diff --git a/app.py b/app.py new file mode 100644 index 0000000000000000000000000000000000000000..faea70d8996fa8e1190229e9e7fb305dcb90cd0a --- /dev/null +++ b/app.py @@ -0,0 +1,223 @@ +import os + +import torch +import spaces + +import gradio as gr +import torch +from artistic_portrait.pipeline import ArtisticPortraitXLPipeline +from diffusers import ControlNetModel, DPMSolverMultistepScheduler +from ip_adapter_diffusers.ip_adapter import * + +from huggingface_hub import hf_hub_download + +style_adapter_path = "models/ip_adapter_art_sdxl_512.pth" +id_adapter_path = "models/pulid_adapter_diffusers_1.1.pth" +if not os.path.exists("models/csd_clip.pth"): + hf_hub_download( + repo_id="AisingioroHao0/IP-Adapter-Art", + filename="csd_clip.pth", + local_dir="models", + ) +if not os.path.exists(style_adapter_path): + hf_hub_download( + repo_id="AisingioroHao0/IP-Adapter-Art", + filename="ip_adapter_art_sdxl_512.pth", + local_dir="models", + ) +if not os.path.exists(id_adapter_path): + hf_hub_download( + repo_id="AisingioroHao0/IP-Adapter-Art", + filename="pulid_adapter_diffusers_1.1.pth", + local_dir="models", + ) + +device = "cuda" if torch.cuda.is_available() else "cpu" +sdxl_repo_id = "stabilityai/stable-diffusion-xl-base-1.0" + +torch_dtype = torch.float16 if str(device).__contains__("cuda") else torch.float32 + +# Load pretrained models. +print("Initializing pipeline...") +controlnet = ControlNetModel.from_pretrained( + "xinsir/controlnet-openpose-sdxl-1.0", + torch_dtype=torch_dtype, +).to(device) +pipe = ArtisticPortraitXLPipeline.from_pretrained( + "stabilityai/stable-diffusion-xl-base-1.0", + controlnet=controlnet, + safety_checker=None, + torch_dtype=torch_dtype, + style_adapter_path=style_adapter_path, + id_adapter_path=id_adapter_path, + device=device, +).to(device) +pipe.scheduler = DPMSolverMultistepScheduler.from_config( + pipe.scheduler.config, timestep_spacing="trailing" +) +load_ip_adapter( + pipe.controlnet, + "models/ip_adapter_art_sdxl_512.pth", +) + +example_inputs = [ + [ + "datasets/test/style_dataset/Abstract D'Oyley.jpg", + "datasets/test/id_dataset/lifeifei.jpg", + ], + [ + "datasets/test/style_dataset/Adam Zyglis.jpg", + "datasets/test/id_dataset/lecun.jpg", + ], + [ + "datasets/test/style_dataset/Diffused lighting.jpg", + "datasets/test/id_dataset/liuyifei.jpg", + ], + [ + "datasets/test/style_dataset/Shirley Hughes.jpg", + "datasets/test/id_dataset/rihanna.jpg", + ], + [ + "datasets/test/style_dataset/Winter.jpg", + "datasets/test/id_dataset/hinton.jpg", + ], +] + + +@spaces.GPU(enable_queue=True) +def generation( + style_image=None, + id_image=None, + pose_image=None, + prompt="portrait, solo, looking at viewer, best quality, masterpiece", + negative_prompt="flaws in the eyes, flaws in the face, flaws, lowres, non-HDRi, low quality, worst quality,artifacts noise, text, watermark, glitch, deformed, mutated, ugly, disfigured, hands, low resolution, partially rendered objects, deformed or partially rendered eyes, deformed, deformed eyeballs, cross-eyed", + num_inference_steps=20, + guidance_scale=7.0, + style_scale=1.0, + id_scale=1.0, + controlnet_scale=0.9, + seed=42, + height=1024, + width=1024, + artify_contorlnet_scale=0.0, +): + set_ip_adapter_scale(pipe.controlnet, artify_contorlnet_scale) + result = pipe( + prompt=prompt, + negative_prompt=negative_prompt, + control_image=pose_image, + controlnet_conditioning_scale=controlnet_scale, + width=width, + height=height, + num_inference_steps=num_inference_steps, + guidance_scale=guidance_scale, + style_image=style_image, + id_image=id_image, + generator=torch.Generator(device).manual_seed(seed), + id_scale=id_scale, + style_scale=style_scale, + ).images[0] + + return result + + +with gr.Blocks(delete_cache=(3600, 3600)) as demo: + gr.Markdown( + """ + # Artistic Portrait Gen 0.9: Generate Customized Artistic Portrait through Style Reference Images + + **Implementation based on [Art-Adapter](https://github.com/aihao2000/IP-Adapter-Art), [PuLID-Adapter](https://github.com/ToTheBeginning/PuLID), and [Instant Style](https://github.com/instantX-research/InstantStyle).** + + ## Basic usage: + - Stylized Portrait Generation: Upload the style reference image and ID reference image, and click "Generation" to generate the artistic portrait directly. + - Text-guided Stylization Generation: Set ID Scale to 0, modify prompt, and then try text-guided stylized image generation through **Art-Adapter**. **(Note that ID image cannot be empty in the current version.)** + + _If the style similarity is low, try increasing the Artify ControlNet Scale, or set the Controlnet Scale to 0._ + + ## News + + - 2025.3.24: We released Artistic Portrait Gen 0.9. + """ + ) + with gr.Row(): + with gr.Column(): + + with gr.Row(): + style_image = gr.Image( + label="Style Reference Image", + type="pil", + ) + id_image = gr.Image( + label="ID Reference Image", + type="pil", + ) + pose_image = gr.Image( + label="Pose Reference Image", + type="pil", + value="datasets/test/pose.jpg", + ) + with gr.Row(): + clear_btn = gr.ClearButton() + generation_btn = gr.Button("Generation") + with gr.Row(): + id_scale = gr.Number(label="ID Scale", value=1.0, step=0.1) + style_scale = gr.Number(label="Style Scale", value=1.0, step=0.1) + controlnet_scale = gr.Number( + label="ControlNet Scale", value=0.9, step=0.1 + ) + artify_contorlnet_scale = gr.Number( + label="Artify ControlNet Scale", value=0.0, step=0.1 + ) + guidance_scale = gr.Number(label="CFG Scale", value=7.0, step=0.1) + with gr.Row(): + height = gr.Number(label="Height", step=1, maximum=1024, value=1024) + width = gr.Number(label="Width", step=1, maximum=1024, value=1024) + seed = gr.Number(label="Seed", value=42, step=1) + num_inference_steps = gr.Number(label="Steps", value=20, step=1) + prompt = gr.Textbox( + label="Prompt", + value="portrait, solo, looking at viewer, best quality, masterpiece", + ) + negative_prompt = gr.Textbox( + label="Negative Prompt", + value="flaws in the eyes, flaws in the face, flaws, lowres, non-HDRi, low quality, worst quality,artifacts noise, text, watermark, glitch, deformed, mutated, ugly, disfigured, hands, low resolution, partially rendered objects, deformed or partially rendered eyes, deformed, deformed eyeballs, cross-eyed", + ) + + with gr.Column(): + output = gr.Image(label="Result", type="pil") + with gr.Row(): + examples = gr.Examples( + examples=example_inputs, + inputs=[style_image, id_image], + outputs=[ + output, + ], + fn=lambda x, y: None, + cache_examples=False, + ) + + clear_btn.add([style_image, id_image, pose_image, output]) + + generation_btn.click( + generation, + inputs=[ + style_image, + id_image, + pose_image, + prompt, + negative_prompt, + num_inference_steps, + guidance_scale, + style_scale, + id_scale, + controlnet_scale, + seed, + height, + width, + artify_contorlnet_scale, + ], + outputs=[output], + api_name="artistic_portrait_gen", + ) + +demo.queue().launch(share=True) diff --git a/ip_adapter_art/__init__.py b/artistic_portrait/__init__.py similarity index 100% rename from ip_adapter_art/__init__.py rename to artistic_portrait/__init__.py diff --git a/artistic_portrait/pipeline.py b/artistic_portrait/pipeline.py new file mode 100644 index 0000000000000000000000000000000000000000..36c3841f29c966c2006d83193cf84f24070b72f9 --- /dev/null +++ b/artistic_portrait/pipeline.py @@ -0,0 +1,886 @@ +from diffusers import StableDiffusionXLControlNetPipeline +from diffusers.pipelines.controlnet.pipeline_controlnet_sd_xl import * +from .pulid_encoder import PuLIDEncoder +from csd_clip import create_model_and_transforms as create_csd_clip_model_and_transforms +from csd_clip import CSD_CLIP +from ip_adapter_diffusers.ip_adapter import * +from transformers import CLIPVisionModelWithProjection + + +class ArtisticPortraitXLPipeline(StableDiffusionXLControlNetPipeline): + def __init__( + self, + vae: AutoencoderKL, + text_encoder: CLIPTextModel, + text_encoder_2: CLIPTextModelWithProjection, + tokenizer: CLIPTokenizer, + tokenizer_2: CLIPTokenizer, + unet: UNet2DConditionModel, + controlnet: Union[ + ControlNetModel, + List[ControlNetModel], + Tuple[ControlNetModel], + MultiControlNetModel, + ], + scheduler: KarrasDiffusionSchedulers, + force_zeros_for_empty_prompt: bool = True, + add_watermarker: Optional[bool] = None, + feature_extractor: CLIPImageProcessor = None, + image_encoder: CLIPVisionModelWithProjection = None, + style_adapter_path=None, + id_adapter_path=None, + style_image_encoder_path="models/h94/IP-Adapter/sdxl_models/image_encoder", + device=None, + ): + super().__init__( + vae=vae, + text_encoder=text_encoder, + text_encoder_2=text_encoder_2, + tokenizer=tokenizer, + tokenizer_2=tokenizer_2, + unet=unet, + controlnet=controlnet, + scheduler=scheduler, + force_zeros_for_empty_prompt=force_zeros_for_empty_prompt, + add_watermarker=add_watermarker, + feature_extractor=feature_extractor, + image_encoder=image_encoder, + ) + self.id_image_encoder = PuLIDEncoder(device=device) + if "art" in style_adapter_path: + self.style_image_encoder = create_csd_clip_model_and_transforms()[0] + else: + self.style_image_encoder = CLIPVisionModelWithProjection.from_pretrained( + style_image_encoder_path + ) + + self.style_image_processor = CLIPImageProcessor() + load_multi_ip_adapter( + self.unet, + paths=[style_adapter_path, id_adapter_path], + ) + self.style_image_projection_layer = ( + self.unet.encoder_hid_proj.image_projection_layers[0] + ) + self.id_image_projection_layer = ( + self.unet.encoder_hid_proj.image_projection_layers[1] + ) + + def load_style_adapter_to_controlnet(self, style_adapter_path): + load_ip_adapter(self.controlnet, style_adapter_path) + + def get_id_hidden_states(self, image): + if not isinstance(image, list): + image = [image] + image = [ + ( + single_image + if isinstance(single_image, np.ndarray) + else np.array(single_image) + ) + for single_image in image + ] + id_cond, id_vit_hidden, id_uncond, id_vit_hidden_uncond = ( + self.id_image_encoder.get_id_embedding(image) + ) + + id_vit_hidden = [x.to(dtype=self.unet.dtype) for x in id_vit_hidden] + id_vit_hidden_uncond = [ + x.to(dtype=self.unet.dtype) for x in id_vit_hidden_uncond + ] + uncond_id_embedding = self.id_image_projection_layer( + id_uncond.to(self.unet.device, self.unet.dtype), + id_vit_hidden_uncond, + ) + id_embedding = self.id_image_projection_layer( + id_cond.to(self.unet.device, self.unet.dtype), id_vit_hidden + ) + id_hidden_states = torch.concat([uncond_id_embedding, id_embedding], dim=0) + torch.cuda.empty_cache() + return id_hidden_states + + def get_style_hidden_states(self, image): + if isinstance(self.style_image_encoder, CSD_CLIP): + self.style_image_encoder = self.style_image_encoder.to( + self._execution_device, dtype=torch.float32 + ) + style_pixel_values = self.style_image_processor.preprocess( + image, return_tensors="pt" + ).pixel_values + _, __, style_image_embeds = self.style_image_encoder( + style_pixel_values.to(self._execution_device, torch.float32) + ) + style_image_embeds = torch.stack( + [ + torch.zeros_like(style_image_embeds).to(self._execution_device), + style_image_embeds, + ] + ).to(self._execution_device, torch.float16) + style_ip_adapter_hidden_states = self.style_image_projection_layer( + style_image_embeds + ) + + elif isinstance(self.style_image_encoder, CLIPVisionModelWithProjection): + self.style_image_encoder = self.style_image_encoder.to( + self._execution_device, dtype=torch.float16 + ) + style_pixel_values = self.style_image_processor.preprocess( + image, return_tensors="pt" + ).pixel_values + style_image_embeds = self.style_image_encoder( + style_pixel_values.to(self._execution_device, torch.float16) + ).image_embeds + style_image_embeds = torch.stack( + [ + torch.zeros_like(style_image_embeds).to(self._execution_device), + style_image_embeds, + ] + ).to(self._execution_device, torch.float16) + style_ip_adapter_hidden_states = self.style_image_projection_layer( + style_image_embeds + ) + + torch.cuda.empty_cache() + self.style_image_encoder = self.style_image_encoder.to("cpu") + + return style_ip_adapter_hidden_states + + def set_style_adapter_scale(self, style_adapter_scale): + for name, processor in self.unet.attn_processors.items(): + if ( + isinstance(processor, torch.nn.Module) + and "up_blocks.0.attentions.1" in name + ): + processor.scale = [style_adapter_scale, 0.0] + + def set_id_adapter_scale(self, id_adapter_scale): + for name, processor in self.unet.attn_processors.items(): + if ( + isinstance(processor, torch.nn.Module) + and "up_blocks.0.attentions.1" not in name + ): + processor.scale = [0.0, id_adapter_scale] + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]] = None, + prompt_2: Optional[Union[str, List[str]]] = None, + control_image: PipelineImageInput = None, + style_image: PipelineImageInput = None, + id_image: PipelineImageInput = None, + height: Optional[int] = None, + width: Optional[int] = None, + num_inference_steps: int = 50, + timesteps: List[int] = None, + sigmas: List[float] = None, + denoising_end: Optional[float] = None, + guidance_scale: float = 5.0, + id_adapter_scale=1.0, + style_adapter_scale=1.0, + negative_prompt: Optional[Union[str, List[str]]] = None, + negative_prompt_2: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.Tensor] = None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + pooled_prompt_embeds: Optional[torch.Tensor] = None, + negative_pooled_prompt_embeds: Optional[torch.Tensor] = None, + ip_adapter_image: Optional[PipelineImageInput] = None, + ip_adapter_image_embeds: Optional[List[torch.Tensor]] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + controlnet_conditioning_scale: Union[float, List[float]] = 1.0, + guess_mode: bool = False, + control_guidance_start: Union[float, List[float]] = 0.0, + control_guidance_end: Union[float, List[float]] = 1.0, + style_guidance_start=0.0, + style_guidance_end=1.0, + id_guidance_start=0.0, + id_guidance_end=1.0, + original_size: Tuple[int, int] = None, + crops_coords_top_left: Tuple[int, int] = (0, 0), + target_size: Tuple[int, int] = None, + negative_original_size: Optional[Tuple[int, int]] = None, + negative_crops_coords_top_left: Tuple[int, int] = (0, 0), + negative_target_size: Optional[Tuple[int, int]] = None, + clip_skip: Optional[int] = None, + callback_on_step_end: Optional[ + Union[ + Callable[[int, int, Dict], None], + PipelineCallback, + MultiPipelineCallbacks, + ] + ] = None, + callback_on_step_end_tensor_inputs: List[str] = ["latents"], + **kwargs, + ): + r""" + The call function to the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`. + prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts to be sent to `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is + used in both text-encoders. + image (`torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.Tensor]`, `List[PIL.Image.Image]`, `List[np.ndarray]`,: + `List[List[torch.Tensor]]`, `List[List[np.ndarray]]` or `List[List[PIL.Image.Image]]`): + The ControlNet input condition to provide guidance to the `unet` for generation. If the type is + specified as `torch.Tensor`, it is passed to ControlNet as is. `PIL.Image.Image` can also be accepted + as an image. The dimensions of the output image defaults to `image`'s dimensions. If height and/or + width are passed, `image` is resized accordingly. If multiple ControlNets are specified in `init`, + images must be passed as a list such that each element of the list can be correctly batched for input + to a single ControlNet. + height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The height in pixels of the generated image. Anything below 512 pixels won't work well for + [stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0) + and checkpoints that are not specifically fine-tuned on low resolutions. + width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The width in pixels of the generated image. Anything below 512 pixels won't work well for + [stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0) + and checkpoints that are not specifically fine-tuned on low resolutions. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + timesteps (`List[int]`, *optional*): + Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument + in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is + passed will be used. Must be in descending order. + sigmas (`List[float]`, *optional*): + Custom sigmas to use for the denoising process with schedulers which support a `sigmas` argument in + their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed + will be used. + denoising_end (`float`, *optional*): + When specified, determines the fraction (between 0.0 and 1.0) of the total denoising process to be + completed before it is intentionally prematurely terminated. As a result, the returned sample will + still retain a substantial amount of noise as determined by the discrete timesteps selected by the + scheduler. The denoising_end parameter should ideally be utilized when this pipeline forms a part of a + "Mixture of Denoisers" multi-pipeline setup, as elaborated in [**Refining the Image + Output**](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/stable_diffusion_xl#refining-the-image-output) + guidance_scale (`float`, *optional*, defaults to 5.0): + A higher guidance scale value encourages the model to generate images closely linked to the text + `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide what to not include in image generation. If not defined, you need to + pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). + negative_prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts to guide what to not include in image generation. This is sent to `tokenizer_2` + and `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders. + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies + to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + latents (`torch.Tensor`, *optional*): + Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor is generated by sampling using the supplied random `generator`. + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not + provided, text embeddings are generated from the `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If + not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. + pooled_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated pooled text embeddings. Can be used to easily tweak text inputs (prompt weighting). If + not provided, pooled text embeddings are generated from `prompt` input argument. + negative_pooled_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs (prompt + weighting). If not provided, pooled `negative_prompt_embeds` are generated from `negative_prompt` input + argument. + ip_adapter_image: (`PipelineImageInput`, *optional*): Optional image input to work with IP Adapters. + ip_adapter_image_embeds (`List[torch.Tensor]`, *optional*): + Pre-generated image embeddings for IP-Adapter. It should be a list of length same as number of + IP-adapters. Each element should be a tensor of shape `(batch_size, num_images, emb_dim)`. It should + contain the negative image embedding if `do_classifier_free_guidance` is set to `True`. If not + provided, embeddings are computed from the `ip_adapter_image` input argument. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generated image. Choose between `PIL.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a + plain tuple. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in + [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + controlnet_conditioning_scale (`float` or `List[float]`, *optional*, defaults to 1.0): + The outputs of the ControlNet are multiplied by `controlnet_conditioning_scale` before they are added + to the residual in the original `unet`. If multiple ControlNets are specified in `init`, you can set + the corresponding scale as a list. + guess_mode (`bool`, *optional*, defaults to `False`): + The ControlNet encoder tries to recognize the content of the input image even if you remove all + prompts. A `guidance_scale` value between 3.0 and 5.0 is recommended. + control_guidance_start (`float` or `List[float]`, *optional*, defaults to 0.0): + The percentage of total steps at which the ControlNet starts applying. + control_guidance_end (`float` or `List[float]`, *optional*, defaults to 1.0): + The percentage of total steps at which the ControlNet stops applying. + original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): + If `original_size` is not the same as `target_size` the image will appear to be down- or upsampled. + `original_size` defaults to `(height, width)` if not specified. Part of SDXL's micro-conditioning as + explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). + crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)): + `crops_coords_top_left` can be used to generate an image that appears to be "cropped" from the position + `crops_coords_top_left` downwards. Favorable, well-centered images are usually achieved by setting + `crops_coords_top_left` to (0, 0). Part of SDXL's micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). + target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): + For most cases, `target_size` should be set to the desired height and width of the generated image. If + not specified it will default to `(height, width)`. Part of SDXL's micro-conditioning as explained in + section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). + negative_original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): + To negatively condition the generation process based on a specific image resolution. Part of SDXL's + micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more + information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. + negative_crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)): + To negatively condition the generation process based on a specific crop coordinates. Part of SDXL's + micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more + information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. + negative_target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): + To negatively condition the generation process based on a target image resolution. It should be as same + as the `target_size` for most cases. Part of SDXL's micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more + information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + callback_on_step_end (`Callable`, `PipelineCallback`, `MultiPipelineCallbacks`, *optional*): + A function or a subclass of `PipelineCallback` or `MultiPipelineCallbacks` that is called at the end of + each denoising step during the inference. with the following arguments: `callback_on_step_end(self: + DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a + list of all tensors as specified by `callback_on_step_end_tensor_inputs`. + callback_on_step_end_tensor_inputs (`List`, *optional*): + The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list + will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the + `._callback_tensor_inputs` attribute of your pipeline class. + + Examples: + + Returns: + [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned, + otherwise a `tuple` is returned containing the output images. + """ + + callback = kwargs.pop("callback", None) + callback_steps = kwargs.pop("callback_steps", None) + + if callback is not None: + deprecate( + "callback", + "1.0.0", + "Passing `callback` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`", + ) + if callback_steps is not None: + deprecate( + "callback_steps", + "1.0.0", + "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`", + ) + + if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)): + callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs + + controlnet = ( + self.controlnet._orig_mod + if is_compiled_module(self.controlnet) + else self.controlnet + ) + + # align format for control guidance + if not isinstance(control_guidance_start, list) and isinstance( + control_guidance_end, list + ): + control_guidance_start = len(control_guidance_end) * [ + control_guidance_start + ] + elif not isinstance(control_guidance_end, list) and isinstance( + control_guidance_start, list + ): + control_guidance_end = len(control_guidance_start) * [control_guidance_end] + elif not isinstance(control_guidance_start, list) and not isinstance( + control_guidance_end, list + ): + mult = ( + len(controlnet.nets) + if isinstance(controlnet, MultiControlNetModel) + else 1 + ) + control_guidance_start, control_guidance_end = ( + mult * [control_guidance_start], + mult * [control_guidance_end], + ) + + # 1. Check inputs. Raise error if not correct + # self.check_inputs( + # prompt, + # prompt_2, + # control_image, + # callback_steps, + # negative_prompt, + # negative_prompt_2, + # prompt_embeds, + # negative_prompt_embeds, + # pooled_prompt_embeds, + # ip_adapter_image, + # ip_adapter_image_embeds, + # negative_pooled_prompt_embeds, + # controlnet_conditioning_scale, + # control_guidance_start, + # control_guidance_end, + # callback_on_step_end_tensor_inputs, + # ) + + self._guidance_scale = guidance_scale + self._clip_skip = clip_skip + self._cross_attention_kwargs = cross_attention_kwargs + self._denoising_end = denoising_end + self._interrupt = False + + # 2. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + device = self._execution_device + + if isinstance(controlnet, MultiControlNetModel) and isinstance( + controlnet_conditioning_scale, float + ): + controlnet_conditioning_scale = [controlnet_conditioning_scale] * len( + controlnet.nets + ) + + global_pool_conditions = ( + controlnet.config.global_pool_conditions + if isinstance(controlnet, ControlNetModel) + else controlnet.nets[0].config.global_pool_conditions + ) + guess_mode = guess_mode or global_pool_conditions + + # 3.1 Encode input prompt + text_encoder_lora_scale = ( + self.cross_attention_kwargs.get("scale", None) + if self.cross_attention_kwargs is not None + else None + ) + ( + prompt_embeds, + negative_prompt_embeds, + pooled_prompt_embeds, + negative_pooled_prompt_embeds, + ) = self.encode_prompt( + prompt, + prompt_2, + device, + num_images_per_prompt, + self.do_classifier_free_guidance, + negative_prompt, + negative_prompt_2, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + pooled_prompt_embeds=pooled_prompt_embeds, + negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, + lora_scale=text_encoder_lora_scale, + clip_skip=self.clip_skip, + ) + + # 3.2 Encode ip_adapter_image + style_hidden_states = self.get_style_hidden_states(style_image) + id_hidden_states = self.get_id_hidden_states(id_image) + set_multi_ip_hidden_states( + self.unet, + [ + style_hidden_states, + id_hidden_states, + ], + ) + set_ip_hidden_states(self.controlnet, style_hidden_states) + self.set_id_adapter_scale(id_adapter_scale) + self.set_style_adapter_scale(style_adapter_scale) + # 4. Prepare image + if isinstance(controlnet, ControlNetModel) and control_image is not None: + control_image = self.prepare_image( + image=control_image, + width=width, + height=height, + batch_size=batch_size * num_images_per_prompt, + num_images_per_prompt=num_images_per_prompt, + device=device, + dtype=controlnet.dtype, + do_classifier_free_guidance=self.do_classifier_free_guidance, + guess_mode=guess_mode, + ) + height, width = control_image.shape[-2:] + elif isinstance(controlnet, MultiControlNetModel) and control_image is not None: + images = [] + + for image_ in control_image: + image_ = self.prepare_image( + image=image_, + width=width, + height=height, + batch_size=batch_size * num_images_per_prompt, + num_images_per_prompt=num_images_per_prompt, + device=device, + dtype=controlnet.dtype, + do_classifier_free_guidance=self.do_classifier_free_guidance, + guess_mode=guess_mode, + ) + + images.append(image_) + + control_image = images + height, width = control_image[0].shape[-2:] + + # 5. Prepare timesteps + timesteps, num_inference_steps = retrieve_timesteps( + self.scheduler, num_inference_steps, device, timesteps, sigmas + ) + self._num_timesteps = len(timesteps) + + # 6. Prepare latent variables + num_channels_latents = self.unet.config.in_channels + latents = self.prepare_latents( + batch_size * num_images_per_prompt, + num_channels_latents, + height, + width, + prompt_embeds.dtype, + device, + generator, + latents, + ) + + # 6.5 Optionally get Guidance Scale Embedding + timestep_cond = None + if self.unet.config.time_cond_proj_dim is not None: + guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat( + batch_size * num_images_per_prompt + ) + timestep_cond = self.get_guidance_scale_embedding( + guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim + ).to(device=device, dtype=latents.dtype) + + # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # 7.1 Create tensor stating which controlnets to keep + controlnet_keep = [] + for i in range(len(timesteps)): + keeps = [ + 1.0 - float(i / len(timesteps) < s or (i + 1) / len(timesteps) > e) + for s, e in zip(control_guidance_start, control_guidance_end) + ] + controlnet_keep.append( + keeps[0] if isinstance(controlnet, ControlNetModel) else keeps + ) + + # 7.2 Prepare added time ids & embeddings + if control_image is None: + original_size = original_size + original_size = original_size or (height, width) + target_size = target_size or (height, width) + else: + if isinstance(control_image, list): + original_size = original_size or control_image[0].shape[-2:] + else: + original_size = original_size or control_image.shape[-2:] + target_size = target_size or (height, width) + + add_text_embeds = pooled_prompt_embeds + if self.text_encoder_2 is None: + text_encoder_projection_dim = int(pooled_prompt_embeds.shape[-1]) + else: + text_encoder_projection_dim = self.text_encoder_2.config.projection_dim + + add_time_ids = self._get_add_time_ids( + original_size, + crops_coords_top_left, + target_size, + dtype=prompt_embeds.dtype, + text_encoder_projection_dim=text_encoder_projection_dim, + ) + + if negative_original_size is not None and negative_target_size is not None: + negative_add_time_ids = self._get_add_time_ids( + negative_original_size, + negative_crops_coords_top_left, + negative_target_size, + dtype=prompt_embeds.dtype, + text_encoder_projection_dim=text_encoder_projection_dim, + ) + else: + negative_add_time_ids = add_time_ids + + if self.do_classifier_free_guidance: + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0) + add_text_embeds = torch.cat( + [negative_pooled_prompt_embeds, add_text_embeds], dim=0 + ) + add_time_ids = torch.cat([negative_add_time_ids, add_time_ids], dim=0) + + prompt_embeds = prompt_embeds.to(device) + add_text_embeds = add_text_embeds.to(device) + add_time_ids = add_time_ids.to(device).repeat( + batch_size * num_images_per_prompt, 1 + ) + + # 8. Denoising loop + num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order + + # 8.1 Apply denoising_end + if ( + self.denoising_end is not None + and isinstance(self.denoising_end, float) + and self.denoising_end > 0 + and self.denoising_end < 1 + ): + discrete_timestep_cutoff = int( + round( + self.scheduler.config.num_train_timesteps + - (self.denoising_end * self.scheduler.config.num_train_timesteps) + ) + ) + num_inference_steps = len( + list(filter(lambda ts: ts >= discrete_timestep_cutoff, timesteps)) + ) + timesteps = timesteps[:num_inference_steps] + + is_unet_compiled = is_compiled_module(self.unet) + is_controlnet_compiled = is_compiled_module(self.controlnet) + is_torch_higher_equal_2_1 = is_torch_version(">=", "2.1") + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + if self.interrupt: + continue + + # Relevant thread: + # https://dev-discuss.pytorch.org/t/cudagraphs-in-pytorch-2-0/1428 + if ( + is_unet_compiled and is_controlnet_compiled + ) and is_torch_higher_equal_2_1: + torch._inductor.cudagraph_mark_step_begin() + # expand the latents if we are doing classifier free guidance + latent_model_input = ( + torch.cat([latents] * 2) + if self.do_classifier_free_guidance + else latents + ) + latent_model_input = self.scheduler.scale_model_input( + latent_model_input, t + ) + + added_cond_kwargs = { + "text_embeds": add_text_embeds, + "time_ids": add_time_ids, + } + + # controlnet(s) inference + if guess_mode and self.do_classifier_free_guidance: + # Infer ControlNet only for the conditional batch. + control_model_input = latents + control_model_input = self.scheduler.scale_model_input( + control_model_input, t + ) + controlnet_prompt_embeds = prompt_embeds.chunk(2)[1] + controlnet_added_cond_kwargs = { + "text_embeds": add_text_embeds.chunk(2)[1], + "time_ids": add_time_ids.chunk(2)[1], + } + else: + control_model_input = latent_model_input + controlnet_prompt_embeds = prompt_embeds + controlnet_added_cond_kwargs = added_cond_kwargs + + if isinstance(controlnet_keep[i], list): + cond_scale = [ + c * s + for c, s in zip( + controlnet_conditioning_scale, controlnet_keep[i] + ) + ] + else: + controlnet_cond_scale = controlnet_conditioning_scale + if isinstance(controlnet_cond_scale, list): + controlnet_cond_scale = controlnet_cond_scale[0] + cond_scale = controlnet_cond_scale * controlnet_keep[i] + + if control_image is not None and controlnet_conditioning_scale != 0.0: + down_block_res_samples, mid_block_res_sample = self.controlnet( + control_model_input, + t, + encoder_hidden_states=controlnet_prompt_embeds, + controlnet_cond=control_image, + conditioning_scale=cond_scale, + guess_mode=guess_mode, + added_cond_kwargs=controlnet_added_cond_kwargs, + return_dict=False, + ) + else: + down_block_res_samples = None + mid_block_res_sample = None + + if ( + guess_mode + and self.do_classifier_free_guidance + and control_image is not None + ): + # Inferred ControlNet only for the conditional batch. + # To apply the output of ControlNet to both the unconditional and conditional batches, + # add 0 to the unconditional batch to keep it unchanged. + down_block_res_samples = [ + torch.cat([torch.zeros_like(d), d]) + for d in down_block_res_samples + ] + mid_block_res_sample = torch.cat( + [torch.zeros_like(mid_block_res_sample), mid_block_res_sample] + ) + + # if ( + # i / num_inference_steps >= style_guidance_start + # and i / num_inference_steps <= style_guidance_end + # ): + # self.set_style_adapter_scale(style_adapter_scale) + # else: + # self.set_style_adapter_scale(0.0) + + # if ( + # i / num_inference_steps >= id_guidance_start + # and i / num_inference_steps <= id_guidance_end + # ): + # self.set_id_adapter_scale(id_adapter_scale) + # else: + # self.set_id_adapter_scale(0.0) + # predict the noise residual + noise_pred = self.unet( + latent_model_input, + t, + encoder_hidden_states=prompt_embeds, + timestep_cond=timestep_cond, + cross_attention_kwargs=self.cross_attention_kwargs, + down_block_additional_residuals=down_block_res_samples, + mid_block_additional_residual=mid_block_res_sample, + added_cond_kwargs=added_cond_kwargs, + return_dict=False, + )[0] + + # perform guidance + if self.do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * ( + noise_pred_text - noise_pred_uncond + ) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step( + noise_pred, t, latents, **extra_step_kwargs, return_dict=False + )[0] + + if callback_on_step_end is not None: + callback_kwargs = {} + for k in callback_on_step_end_tensor_inputs: + callback_kwargs[k] = locals()[k] + callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) + + latents = callback_outputs.pop("latents", latents) + prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) + negative_prompt_embeds = callback_outputs.pop( + "negative_prompt_embeds", negative_prompt_embeds + ) + add_text_embeds = callback_outputs.pop( + "add_text_embeds", add_text_embeds + ) + negative_pooled_prompt_embeds = callback_outputs.pop( + "negative_pooled_prompt_embeds", negative_pooled_prompt_embeds + ) + add_time_ids = callback_outputs.pop("add_time_ids", add_time_ids) + negative_add_time_ids = callback_outputs.pop( + "negative_add_time_ids", negative_add_time_ids + ) + + # call the callback, if provided + if i == len(timesteps) - 1 or ( + (i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0 + ): + progress_bar.update() + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents) + + if not output_type == "latent": + # make sure the VAE is in float32 mode, as it overflows in float16 + needs_upcasting = ( + self.vae.dtype == torch.float16 and self.vae.config.force_upcast + ) + + if needs_upcasting: + self.upcast_vae() + latents = latents.to( + next(iter(self.vae.post_quant_conv.parameters())).dtype + ) + + # unscale/denormalize the latents + # denormalize with the mean and std if available and not None + has_latents_mean = ( + hasattr(self.vae.config, "latents_mean") + and self.vae.config.latents_mean is not None + ) + has_latents_std = ( + hasattr(self.vae.config, "latents_std") + and self.vae.config.latents_std is not None + ) + if has_latents_mean and has_latents_std: + latents_mean = ( + torch.tensor(self.vae.config.latents_mean) + .view(1, 4, 1, 1) + .to(latents.device, latents.dtype) + ) + latents_std = ( + torch.tensor(self.vae.config.latents_std) + .view(1, 4, 1, 1) + .to(latents.device, latents.dtype) + ) + latents = ( + latents * latents_std / self.vae.config.scaling_factor + + latents_mean + ) + else: + latents = latents / self.vae.config.scaling_factor + + control_image = self.vae.decode(latents, return_dict=False)[0] + + # cast back to fp16 if needed + if needs_upcasting: + self.vae.to(dtype=torch.float16) + else: + control_image = latents + + if not output_type == "latent": + # apply watermark if available + if self.watermark is not None: + control_image = self.watermark.apply_watermark(control_image) + + control_image = self.image_processor.postprocess( + control_image, output_type=output_type + ) + + # Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (control_image,) + + return StableDiffusionXLPipelineOutput(images=control_image) diff --git a/artistic_portrait/pulid_encoder.py b/artistic_portrait/pulid_encoder.py new file mode 100644 index 0000000000000000000000000000000000000000..14d18445d6ba5f376eca14d005114b500a8d11cd --- /dev/null +++ b/artistic_portrait/pulid_encoder.py @@ -0,0 +1,207 @@ +import gc + +import cv2 +import insightface +import numpy as np +import torch +import torch.nn as nn +from pulid.utils import img2tensor, tensor2img +from diffusers import DPMSolverMultistepScheduler, StableDiffusionXLPipeline +from facexlib.parsing import init_parsing_model +from facexlib.utils.face_restoration_helper import FaceRestoreHelper + +from huggingface_hub import hf_hub_download, snapshot_download +from insightface.app import FaceAnalysis +from safetensors.torch import load_file +from torchvision.transforms import InterpolationMode +from torchvision.transforms.functional import normalize, resize + +from eva_clip import create_model_and_transforms +from eva_clip.constants import OPENAI_DATASET_MEAN, OPENAI_DATASET_STD +from pulid.encoders_transformer import IDFormer +from pulid.utils import is_torch2_available, sample_dpmpp_2m, sample_dpmpp_sde + +if is_torch2_available(): + from pulid.attention_processor import AttnProcessor2_0 as AttnProcessor + from pulid.attention_processor import IDAttnProcessor2_0 as IDAttnProcessor +else: + from pulid.attention_processor import AttnProcessor, IDAttnProcessor + +class PuLIDEncoder: + def __init__( + self, + device + ): + super().__init__() + self.device = device + + # scheduler + # self.pipe.scheduler = DPMSolverMultistepScheduler.from_config( + # self.pipe.scheduler.config + # ) + + # ID adapters + # self.id_adapter = IDFormer().to(self.device) + + # preprocessors + # face align and parsing + self.face_helper = FaceRestoreHelper( + upscale_factor=1, + face_size=512, + crop_ratio=(1, 1), + det_model="retinaface_resnet50", + save_ext="png", + device=self.device, + ) + self.face_helper.face_parse = None + self.face_helper.face_parse = init_parsing_model( + model_name="bisenet", device=self.device + ) + # clip-vit backbone + model, _, _ = create_model_and_transforms( + "EVA02-CLIP-L-14-336", "eva_clip", force_custom_clip=True + ) + model = model.visual + self.clip_vision_model = model.to(self.device) + eva_transform_mean = getattr( + self.clip_vision_model, "image_mean", OPENAI_DATASET_MEAN + ) + eva_transform_std = getattr( + self.clip_vision_model, "image_std", OPENAI_DATASET_STD + ) + if not isinstance(eva_transform_mean, (list, tuple)): + eva_transform_mean = (eva_transform_mean,) * 3 + if not isinstance(eva_transform_std, (list, tuple)): + eva_transform_std = (eva_transform_std,) * 3 + self.eva_transform_mean = eva_transform_mean + self.eva_transform_std = eva_transform_std + # antelopev2 + snapshot_download("DIAMONIK7777/antelopev2", local_dir="models/antelopev2") + self.app = FaceAnalysis( + name="antelopev2", + root=".", + providers=["CPUExecutionProvider"], + ) + self.app.prepare(ctx_id=0, det_size=(640, 640)) + self.handler_ante = insightface.model_zoo.get_model( + "models/antelopev2/glintr100.onnx" + ) + self.handler_ante.prepare(ctx_id=0) + + gc.collect() + torch.cuda.empty_cache() + + # self.load_pretrain() + + # other configs + self.debug_img_list = [] + + + def to_gray(self, img): + x = 0.299 * img[:, 0:1] + 0.587 * img[:, 1:2] + 0.114 * img[:, 2:3] + x = x.repeat(1, 3, 1, 1) + return x + + def get_id_embedding(self, image_list): + """ + Args: + image in image_list: numpy rgb image, range [0, 255] + """ + id_cond_list = [] + id_vit_hidden_list = [] + for ii, image in enumerate(image_list): + self.face_helper.clean_all() + image_bgr = cv2.cvtColor(image, cv2.COLOR_RGB2BGR) + # get antelopev2 embedding + face_info = self.app.get(image_bgr) + if len(face_info) > 0: + face_info = sorted( + face_info, + key=lambda x: (x["bbox"][2] - x["bbox"][0]) + * (x["bbox"][3] - x["bbox"][1]), + )[ + -1 + ] # only use the maximum face + id_ante_embedding = face_info["embedding"] + self.debug_img_list.append( + image[ + int(face_info["bbox"][1]) : int(face_info["bbox"][3]), + int(face_info["bbox"][0]) : int(face_info["bbox"][2]), + ] + ) + else: + id_ante_embedding = None + + # using facexlib to detect and align face + self.face_helper.read_image(image_bgr) + self.face_helper.get_face_landmarks_5(only_center_face=True) + self.face_helper.align_warp_face() + if len(self.face_helper.cropped_faces) == 0: + raise RuntimeError("facexlib align face fail") + align_face = self.face_helper.cropped_faces[0] + # incase insightface didn't detect face + if id_ante_embedding is None: + print( + "fail to detect face using insightface, extract embedding on align face" + ) + id_ante_embedding = self.handler_ante.get_feat(align_face) + + id_ante_embedding = torch.from_numpy(id_ante_embedding).to(self.device) + if id_ante_embedding.ndim == 1: + id_ante_embedding = id_ante_embedding.unsqueeze(0) + + # parsing + input = img2tensor(align_face, bgr2rgb=True).unsqueeze(0) / 255.0 + input = input.to(self.device) + parsing_out = self.face_helper.face_parse( + normalize(input, [0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) + )[0] + parsing_out = parsing_out.argmax(dim=1, keepdim=True) + bg_label = [0, 16, 18, 7, 8, 9, 14, 15] + bg = sum(parsing_out == i for i in bg_label).bool() + white_image = torch.ones_like(input) + # only keep the face features + face_features_image = torch.where(bg, white_image, self.to_gray(input)) + self.debug_img_list.append(tensor2img(face_features_image, rgb2bgr=False)) + + # transform img before sending to eva-clip-vit + face_features_image = resize( + face_features_image, + self.clip_vision_model.image_size, + InterpolationMode.BICUBIC, + ) + face_features_image = normalize( + face_features_image, self.eva_transform_mean, self.eva_transform_std + ) + id_cond_vit, id_vit_hidden = self.clip_vision_model( + face_features_image, + return_all_features=False, + return_hidden=True, + shuffle=False, + ) + id_cond_vit_norm = torch.norm(id_cond_vit, 2, 1, True) + id_cond_vit = torch.div(id_cond_vit, id_cond_vit_norm) + + id_cond = torch.cat([id_ante_embedding, id_cond_vit], dim=-1) + + id_cond_list.append(id_cond) + id_vit_hidden_list.append(id_vit_hidden) + + id_uncond = torch.zeros_like(id_cond_list[0]) + id_vit_hidden_uncond = [] + for layer_idx in range(0, len(id_vit_hidden_list[0])): + id_vit_hidden_uncond.append( + torch.zeros_like(id_vit_hidden_list[0][layer_idx]) + ) + + id_cond = torch.stack(id_cond_list, dim=1) + id_vit_hidden = id_vit_hidden_list[0] + for i in range(1, len(image_list)): + for j, x in enumerate(id_vit_hidden_list[i]): + id_vit_hidden[j] = torch.cat([id_vit_hidden[j], x], dim=1) + + # id_embedding = self.id_adapter(id_cond, id_vit_hidden) + # uncond_id_embedding = self.id_adapter(id_uncond, id_vit_hidden_uncond) + + # return id_embedding + return id_cond, id_vit_hidden, id_uncond, id_vit_hidden_uncond \ No newline at end of file diff --git a/artistic_portrait_gen.ipynb b/artistic_portrait_gen.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..5eb73c77ee14d1055103cf05bd1d4694fc4da98c --- /dev/null +++ b/artistic_portrait_gen.ipynb @@ -0,0 +1,130 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Artisitc Portrait Gen" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import torch\n", + "from artistic_portrait.pipeline import ArtisticPortraitXLPipeline\n", + "from diffusers import ControlNetModel\n", + "from PIL import Image\n", + "from ip_adapter_diffusers.ip_adapter import *\n", + "from diffusers import DPMSolverMultistepScheduler" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "device = \"cuda\"\n", + "dtype = torch.float16\n", + "style_adapter_path = \"models/ip_adapter_art_sdxl_512.pth\"\n", + "id_adapter_path = \"models/pulid_adapter_diffusers_1.1.pth\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "controlnet = ControlNetModel.from_pretrained(\n", + " \"xinsir/controlnet-openpose-sdxl-1.0\",\n", + " torch_dtype=dtype,\n", + ").to(device)\n", + "pipe = ArtisticPortraitXLPipeline.from_pretrained(\n", + " \"stabilityai/stable-diffusion-xl-base-1.0\",\n", + " controlnet=controlnet,\n", + " safety_checker=None,\n", + " torch_dtype=torch.float16,\n", + " style_adapter_path=style_adapter_path,\n", + " id_adapter_path=id_adapter_path,\n", + " device=device,\n", + ").to(device)\n", + "pipe.scheduler = DPMSolverMultistepScheduler.from_config(\n", + " pipe.scheduler.config, timestep_spacing=\"trailing\"\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "height = 1024\n", + "width = 1024\n", + "artify_controlnet_scale = 0.0\n", + "style_scale = 1.0\n", + "id_scale = 1.0\n", + "controlnet_scale = 0.9\n", + "\n", + "if artify_controlnet_scale > 0:\n", + " pipe.load_style_adapter_to_controlnet(style_adapter_path)\n", + " set_ip_adapter_scale(pipe.controlnet, artify_controlnet_scale)\n", + "\n", + "style_image = Image.open(\"datasets/test/style_dataset/Abstract D'Oyley.jpg\")\n", + "id_image = Image.open(\"datasets/test/id_dataset/hinton.jpg\")\n", + "pose_image = Image.open(\"datasets/test/pose.jpg\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "result = pipe(\n", + " f\"portrait, solo, looking at viewer, best quality, masterpiece\",\n", + " negative_prompt=\"flaws in the eyes, flaws in the face, flaws, lowres, non-HDRi, low quality, worst quality,artifacts noise, text, watermark, glitch, deformed, mutated, ugly, disfigured, hands, low resolution, partially rendered objects, deformed or partially rendered eyes, deformed, deformed eyeballs, cross-eyed\",\n", + " control_image=pose_image,\n", + " controlnet_conditioning_scale=controlnet_scale,\n", + " width=width,\n", + " height=height,\n", + " num_inference_steps=20,\n", + " guidance_scale=7,\n", + " style_image=style_image,\n", + " id_image=id_image,\n", + " generator=torch.Generator(\"cuda\").manual_seed(42),\n", + " id_scale=1.0,\n", + " style_scale=1.0,\n", + " # num_zero=[None, 16],\n", + " # ortho=[None, 'ortho_v2'],\n", + ").images[0]\n", + "result" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.16" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/ip_adapter_art/utils/csd_clip.py b/csd_clip/__init__.py similarity index 83% rename from ip_adapter_art/utils/csd_clip.py rename to csd_clip/__init__.py index 8c444490993788dc60666ad8af84024039f284aa..37e1243b968556c02b3439777842c00319c9120c 100644 --- a/ip_adapter_art/utils/csd_clip.py +++ b/csd_clip/__init__.py @@ -5,6 +5,7 @@ import copy from torch.autograd import Function from collections import OrderedDict +from torchvision import transforms def convert_state_dict(state_dict): @@ -94,7 +95,7 @@ class CSD_CLIP(nn.Module): self.content_proj_head = content_proj_head if name == "vit_large": if model_path is None: - clipmodel, _ = clip.load("models/ViT-L-14.pt") + clipmodel, _ = clip.load("ViT-L/14") else: clipmodel, _ = clip.load(model_path) self.backbone = clipmodel.visual @@ -143,3 +144,29 @@ class CSD_CLIP(nn.Module): content_output = reverse_feature @ self.last_layer_content content_output = nn.functional.normalize(content_output, dim=1, p=2) return feature, content_output, style_output + + +def create_model_and_transforms(model_path="models/csd_clip.pth"): + # init model + model = CSD_CLIP("vit_large", "default") + + # load model + checkpoint = torch.load(model_path, map_location="cpu") + state_dict = convert_state_dict(checkpoint["model_state_dict"]) + model.load_state_dict(state_dict, strict=False) + + # normalization + normalize = transforms.Normalize( + (0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711) + ) + preprocess = transforms.Compose( + [ + transforms.Resize( + size=224, interpolation=transforms.functional.InterpolationMode.BICUBIC + ), + transforms.CenterCrop(224), + transforms.ToTensor(), + normalize, + ] + ) + return model, preprocess, preprocess diff --git a/datasets/test/id_dataset/hinton.jpg b/datasets/test/id_dataset/hinton.jpg new file mode 100644 index 0000000000000000000000000000000000000000..441b9c0ca0323e378b2834229835bc8fa9840abc --- /dev/null +++ b/datasets/test/id_dataset/hinton.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9f8e0c41abf2f3de47d284c90700c44a12b74d3787345c740a6919044165bbad +size 31904 diff --git a/datasets/test/id_dataset/lecun.jpg b/datasets/test/id_dataset/lecun.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2b8c787868211c1fc9d2678f917516479f871bdc --- /dev/null +++ b/datasets/test/id_dataset/lecun.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2730103b6b9ebaf47b44ef9a9d7fbb722de7878a101af09f0b85f8dfadb4c8a4 +size 30572 diff --git a/datasets/test/id_dataset/lifeifei.jpg b/datasets/test/id_dataset/lifeifei.jpg new file mode 100644 index 0000000000000000000000000000000000000000..87285ec92fe3ac31ba5b20936864b8ce6b9fdea7 --- /dev/null +++ b/datasets/test/id_dataset/lifeifei.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c7b26e78b94ccd8c30a40efca3c52c8a04573188b41eb4b3d1fc517ec8577b35 +size 202940 diff --git a/datasets/test/id_dataset/liuyifei.jpg b/datasets/test/id_dataset/liuyifei.jpg new file mode 100644 index 0000000000000000000000000000000000000000..13fe0481f37ceb046a08ae1475fc1e01a0778f14 --- /dev/null +++ b/datasets/test/id_dataset/liuyifei.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c6a43925c86a360871aa69863436aa48b917d5a039b7b9a293426328ecb6c67c +size 37695 diff --git a/datasets/test/id_dataset/rihanna.jpg b/datasets/test/id_dataset/rihanna.jpg new file mode 100644 index 0000000000000000000000000000000000000000..214fddef5613e90f8b2fe6512bd15243f4594da4 --- /dev/null +++ b/datasets/test/id_dataset/rihanna.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8df5ffa49f22ca4e0ad3a69fd5648b7810e91d04aa54cfe77dd6f943416dd0ed +size 104818 diff --git a/datasets/test/pose.jpg b/datasets/test/pose.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7dfbaacea0dc40c55b430e34b9acc54fe91b0028 --- /dev/null +++ b/datasets/test/pose.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ccc0df26d3c84b2640c0d5996a3e506e062aeeafb92b79e09ddf665103d7987d +size 24153 diff --git a/datasets/test/style_dataset/Abstract D'Oyley.jpg b/datasets/test/style_dataset/Abstract D'Oyley.jpg new file mode 100644 index 0000000000000000000000000000000000000000..cb7cdb19c392393618b090f011361dbd9e575835 --- /dev/null +++ b/datasets/test/style_dataset/Abstract D'Oyley.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:275d059cb49d75a8184fc767b72e3d13d60be901106d52ee1511f908fd860a21 +size 85634 diff --git a/datasets/test/style_dataset/Adam Zyglis.jpg b/datasets/test/style_dataset/Adam Zyglis.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e5a904b86b746072663b3bb032ec7294c9abe108 --- /dev/null +++ b/datasets/test/style_dataset/Adam Zyglis.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:545d4dd0a093d606241e4da1b15fdbde6e6dcfe94eba69563d5c6143c61a9d77 +size 67492 diff --git a/README.assets/example.jpg b/datasets/test/style_dataset/Amigurumi.jpg similarity index 100% rename from README.assets/example.jpg rename to datasets/test/style_dataset/Amigurumi.jpg diff --git a/datasets/test/style_dataset/Diffused lighting.jpg b/datasets/test/style_dataset/Diffused lighting.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3e89950e5fabdbbb96fa08cc2f1cb606ed6af089 --- /dev/null +++ b/datasets/test/style_dataset/Diffused lighting.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7cfd6d79f511e05d76cb5b65b062dc11b6716098e53581a7809e6b1180cc7a2e +size 15135 diff --git a/datasets/test/style_dataset/Shirley Hughes.jpg b/datasets/test/style_dataset/Shirley Hughes.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4ca867d9aa0639fe7f2c3f5855b783061e1e145e --- /dev/null +++ b/datasets/test/style_dataset/Shirley Hughes.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dc658ee320b1270636fd117c4abbeb9a42e521d27c0cc4aa8950191d538be4b6 +size 75165 diff --git a/datasets/test/style_dataset/Winter.jpg b/datasets/test/style_dataset/Winter.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e248193822c23dd47df0d2a6a7f0593e11d98a75 --- /dev/null +++ b/datasets/test/style_dataset/Winter.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ffb20f8d32e18534f37e9a93f36fb28fe4841cbd45c95f443835a8b05e9d8c91 +size 47416 diff --git a/eva_clip/__init__.py b/eva_clip/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..fa2d014bbfe644b1e247758116bbf1b184738fe5 --- /dev/null +++ b/eva_clip/__init__.py @@ -0,0 +1,11 @@ +from .constants import OPENAI_DATASET_MEAN, OPENAI_DATASET_STD +from .factory import create_model, create_model_and_transforms, create_model_from_pretrained, get_tokenizer, create_transforms +from .factory import list_models, add_model_config, get_model_config, load_checkpoint +from .loss import ClipLoss +from .model import CLIP, CustomCLIP, CLIPTextCfg, CLIPVisionCfg,\ + convert_weights_to_lp, convert_weights_to_fp16, trace_model, get_cast_dtype +from .openai import load_openai_model, list_openai_models +from .pretrained import list_pretrained, list_pretrained_models_by_tag, list_pretrained_tags_by_model,\ + get_pretrained_url, download_pretrained_from_url, is_pretrained_cfg, get_pretrained_cfg, download_pretrained +from .tokenizer import SimpleTokenizer, tokenize +from .transform import image_transform \ No newline at end of file diff --git a/eva_clip/bpe_simple_vocab_16e6.txt.gz b/eva_clip/bpe_simple_vocab_16e6.txt.gz new file mode 100644 index 0000000000000000000000000000000000000000..36a15856e00a06a9fbed8cdd34d2393fea4a3113 --- /dev/null +++ b/eva_clip/bpe_simple_vocab_16e6.txt.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:924691ac288e54409236115652ad4aa250f48203de50a9e4722a6ecd48d6804a +size 1356917 diff --git a/eva_clip/constants.py b/eva_clip/constants.py new file mode 100644 index 0000000000000000000000000000000000000000..a670bb3fab442baeb9af53b91c312e6982af57ee --- /dev/null +++ b/eva_clip/constants.py @@ -0,0 +1,2 @@ +OPENAI_DATASET_MEAN = (0.48145466, 0.4578275, 0.40821073) +OPENAI_DATASET_STD = (0.26862954, 0.26130258, 0.27577711) diff --git a/eva_clip/eva_vit_model.py b/eva_clip/eva_vit_model.py new file mode 100644 index 0000000000000000000000000000000000000000..51db88cf0c7b5d7a43f2be80bc59abb6c859c4b4 --- /dev/null +++ b/eva_clip/eva_vit_model.py @@ -0,0 +1,548 @@ +# -------------------------------------------------------- +# Adapted from https://github.com/microsoft/unilm/tree/master/beit +# -------------------------------------------------------- +import math +import os +from functools import partial +import torch +import torch.nn as nn +import torch.nn.functional as F +try: + from timm.models.layers import drop_path, to_2tuple, trunc_normal_ +except: + from timm.layers import drop_path, to_2tuple, trunc_normal_ + +from .transformer import PatchDropout +from .rope import VisionRotaryEmbedding, VisionRotaryEmbeddingFast + +if os.getenv('ENV_TYPE') == 'deepspeed': + try: + from deepspeed.runtime.activation_checkpointing.checkpointing import checkpoint + except: + from torch.utils.checkpoint import checkpoint +else: + from torch.utils.checkpoint import checkpoint + +try: + import xformers + import xformers.ops as xops + XFORMERS_IS_AVAILBLE = True +except: + XFORMERS_IS_AVAILBLE = False + +class DropPath(nn.Module): + """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). + """ + def __init__(self, drop_prob=None): + super(DropPath, self).__init__() + self.drop_prob = drop_prob + + def forward(self, x): + return drop_path(x, self.drop_prob, self.training) + + def extra_repr(self) -> str: + return 'p={}'.format(self.drop_prob) + + +class Mlp(nn.Module): + def __init__( + self, + in_features, + hidden_features=None, + out_features=None, + act_layer=nn.GELU, + norm_layer=nn.LayerNorm, + drop=0., + subln=False, + + ): + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + self.fc1 = nn.Linear(in_features, hidden_features) + self.act = act_layer() + + self.ffn_ln = norm_layer(hidden_features) if subln else nn.Identity() + + self.fc2 = nn.Linear(hidden_features, out_features) + self.drop = nn.Dropout(drop) + + def forward(self, x): + x = self.fc1(x) + x = self.act(x) + # x = self.drop(x) + # commit this for the orignal BERT implement + x = self.ffn_ln(x) + + x = self.fc2(x) + x = self.drop(x) + return x + +class SwiGLU(nn.Module): + def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.SiLU, drop=0., + norm_layer=nn.LayerNorm, subln=False): + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + + self.w1 = nn.Linear(in_features, hidden_features) + self.w2 = nn.Linear(in_features, hidden_features) + + self.act = act_layer() + self.ffn_ln = norm_layer(hidden_features) if subln else nn.Identity() + self.w3 = nn.Linear(hidden_features, out_features) + + self.drop = nn.Dropout(drop) + + def forward(self, x): + x1 = self.w1(x) + x2 = self.w2(x) + hidden = self.act(x1) * x2 + x = self.ffn_ln(hidden) + x = self.w3(x) + x = self.drop(x) + return x + +class Attention(nn.Module): + def __init__( + self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., + proj_drop=0., window_size=None, attn_head_dim=None, xattn=False, rope=None, subln=False, norm_layer=nn.LayerNorm): + super().__init__() + self.num_heads = num_heads + head_dim = dim // num_heads + if attn_head_dim is not None: + head_dim = attn_head_dim + all_head_dim = head_dim * self.num_heads + self.scale = qk_scale or head_dim ** -0.5 + + self.subln = subln + if self.subln: + self.q_proj = nn.Linear(dim, all_head_dim, bias=False) + self.k_proj = nn.Linear(dim, all_head_dim, bias=False) + self.v_proj = nn.Linear(dim, all_head_dim, bias=False) + else: + self.qkv = nn.Linear(dim, all_head_dim * 3, bias=False) + + if qkv_bias: + self.q_bias = nn.Parameter(torch.zeros(all_head_dim)) + self.v_bias = nn.Parameter(torch.zeros(all_head_dim)) + else: + self.q_bias = None + self.v_bias = None + + if window_size: + self.window_size = window_size + self.num_relative_distance = (2 * window_size[0] - 1) * (2 * window_size[1] - 1) + 3 + self.relative_position_bias_table = nn.Parameter( + torch.zeros(self.num_relative_distance, num_heads)) # 2*Wh-1 * 2*Ww-1, nH + # cls to token & token 2 cls & cls to cls + + # get pair-wise relative position index for each token inside the window + coords_h = torch.arange(window_size[0]) + coords_w = torch.arange(window_size[1]) + coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww + coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww + relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww + relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2 + relative_coords[:, :, 0] += window_size[0] - 1 # shift to start from 0 + relative_coords[:, :, 1] += window_size[1] - 1 + relative_coords[:, :, 0] *= 2 * window_size[1] - 1 + relative_position_index = \ + torch.zeros(size=(window_size[0] * window_size[1] + 1, ) * 2, dtype=relative_coords.dtype) + relative_position_index[1:, 1:] = relative_coords.sum(-1) # Wh*Ww, Wh*Ww + relative_position_index[0, 0:] = self.num_relative_distance - 3 + relative_position_index[0:, 0] = self.num_relative_distance - 2 + relative_position_index[0, 0] = self.num_relative_distance - 1 + + self.register_buffer("relative_position_index", relative_position_index) + else: + self.window_size = None + self.relative_position_bias_table = None + self.relative_position_index = None + + self.attn_drop = nn.Dropout(attn_drop) + self.inner_attn_ln = norm_layer(all_head_dim) if subln else nn.Identity() + # self.proj = nn.Linear(all_head_dim, all_head_dim) + self.proj = nn.Linear(all_head_dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + self.xattn = xattn + self.xattn_drop = attn_drop + + self.rope = rope + + def forward(self, x, rel_pos_bias=None, attn_mask=None): + B, N, C = x.shape + if self.subln: + q = F.linear(input=x, weight=self.q_proj.weight, bias=self.q_bias) + k = F.linear(input=x, weight=self.k_proj.weight, bias=None) + v = F.linear(input=x, weight=self.v_proj.weight, bias=self.v_bias) + + q = q.reshape(B, N, self.num_heads, -1).permute(0, 2, 1, 3) # B, num_heads, N, C + k = k.reshape(B, N, self.num_heads, -1).permute(0, 2, 1, 3) + v = v.reshape(B, N, self.num_heads, -1).permute(0, 2, 1, 3) + else: + + qkv_bias = None + if self.q_bias is not None: + qkv_bias = torch.cat((self.q_bias, torch.zeros_like(self.v_bias, requires_grad=False), self.v_bias)) + + qkv = F.linear(input=x, weight=self.qkv.weight, bias=qkv_bias) + qkv = qkv.reshape(B, N, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4) # 3, B, num_heads, N, C + q, k, v = qkv[0], qkv[1], qkv[2] + + if self.rope: + # slightly fast impl + q_t = q[:, :, 1:, :] + ro_q_t = self.rope(q_t) + q = torch.cat((q[:, :, :1, :], ro_q_t), -2).type_as(v) + + k_t = k[:, :, 1:, :] + ro_k_t = self.rope(k_t) + k = torch.cat((k[:, :, :1, :], ro_k_t), -2).type_as(v) + + if self.xattn: + q = q.permute(0, 2, 1, 3) # B, num_heads, N, C -> B, N, num_heads, C + k = k.permute(0, 2, 1, 3) + v = v.permute(0, 2, 1, 3) + + x = xops.memory_efficient_attention( + q, k, v, + p=self.xattn_drop, + scale=self.scale, + ) + x = x.reshape(B, N, -1) + x = self.inner_attn_ln(x) + x = self.proj(x) + x = self.proj_drop(x) + else: + q = q * self.scale + attn = (q @ k.transpose(-2, -1)) + + if self.relative_position_bias_table is not None: + relative_position_bias = \ + self.relative_position_bias_table[self.relative_position_index.view(-1)].view( + self.window_size[0] * self.window_size[1] + 1, + self.window_size[0] * self.window_size[1] + 1, -1) # Wh*Ww,Wh*Ww,nH + relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww + attn = attn + relative_position_bias.unsqueeze(0).type_as(attn) + + if rel_pos_bias is not None: + attn = attn + rel_pos_bias.type_as(attn) + + if attn_mask is not None: + attn_mask = attn_mask.bool() + attn = attn.masked_fill(~attn_mask[:, None, None, :], float("-inf")) + + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + + x = (attn @ v).transpose(1, 2).reshape(B, N, -1) + x = self.inner_attn_ln(x) + x = self.proj(x) + x = self.proj_drop(x) + return x + + +class Block(nn.Module): + + def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0., + drop_path=0., init_values=None, act_layer=nn.GELU, norm_layer=nn.LayerNorm, + window_size=None, attn_head_dim=None, xattn=False, rope=None, postnorm=False, + subln=False, naiveswiglu=False): + super().__init__() + self.norm1 = norm_layer(dim) + self.attn = Attention( + dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, + attn_drop=attn_drop, proj_drop=drop, window_size=window_size, attn_head_dim=attn_head_dim, + xattn=xattn, rope=rope, subln=subln, norm_layer=norm_layer) + # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + self.norm2 = norm_layer(dim) + mlp_hidden_dim = int(dim * mlp_ratio) + + if naiveswiglu: + self.mlp = SwiGLU( + in_features=dim, + hidden_features=mlp_hidden_dim, + subln=subln, + norm_layer=norm_layer, + ) + else: + self.mlp = Mlp( + in_features=dim, + hidden_features=mlp_hidden_dim, + act_layer=act_layer, + subln=subln, + drop=drop + ) + + if init_values is not None and init_values > 0: + self.gamma_1 = nn.Parameter(init_values * torch.ones((dim)),requires_grad=True) + self.gamma_2 = nn.Parameter(init_values * torch.ones((dim)),requires_grad=True) + else: + self.gamma_1, self.gamma_2 = None, None + + self.postnorm = postnorm + + def forward(self, x, rel_pos_bias=None, attn_mask=None): + if self.gamma_1 is None: + if self.postnorm: + x = x + self.drop_path(self.norm1(self.attn(x, rel_pos_bias=rel_pos_bias, attn_mask=attn_mask))) + x = x + self.drop_path(self.norm2(self.mlp(x))) + else: + x = x + self.drop_path(self.attn(self.norm1(x), rel_pos_bias=rel_pos_bias, attn_mask=attn_mask)) + x = x + self.drop_path(self.mlp(self.norm2(x))) + else: + if self.postnorm: + x = x + self.drop_path(self.gamma_1 * self.norm1(self.attn(x, rel_pos_bias=rel_pos_bias, attn_mask=attn_mask))) + x = x + self.drop_path(self.gamma_2 * self.norm2(self.mlp(x))) + else: + x = x + self.drop_path(self.gamma_1 * self.attn(self.norm1(x), rel_pos_bias=rel_pos_bias, attn_mask=attn_mask)) + x = x + self.drop_path(self.gamma_2 * self.mlp(self.norm2(x))) + return x + + +class PatchEmbed(nn.Module): + """ Image to Patch Embedding + """ + def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768): + super().__init__() + img_size = to_2tuple(img_size) + patch_size = to_2tuple(patch_size) + num_patches = (img_size[1] // patch_size[1]) * (img_size[0] // patch_size[0]) + self.patch_shape = (img_size[0] // patch_size[0], img_size[1] // patch_size[1]) + self.img_size = img_size + self.patch_size = patch_size + self.num_patches = num_patches + + self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size) + + def forward(self, x, **kwargs): + B, C, H, W = x.shape + # FIXME look at relaxing size constraints + assert H == self.img_size[0] and W == self.img_size[1], \ + f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})." + x = self.proj(x).flatten(2).transpose(1, 2) + return x + + +class RelativePositionBias(nn.Module): + + def __init__(self, window_size, num_heads): + super().__init__() + self.window_size = window_size + self.num_relative_distance = (2 * window_size[0] - 1) * (2 * window_size[1] - 1) + 3 + self.relative_position_bias_table = nn.Parameter( + torch.zeros(self.num_relative_distance, num_heads)) # 2*Wh-1 * 2*Ww-1, nH + # cls to token & token 2 cls & cls to cls + + # get pair-wise relative position index for each token inside the window + coords_h = torch.arange(window_size[0]) + coords_w = torch.arange(window_size[1]) + coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww + coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww + relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww + relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2 + relative_coords[:, :, 0] += window_size[0] - 1 # shift to start from 0 + relative_coords[:, :, 1] += window_size[1] - 1 + relative_coords[:, :, 0] *= 2 * window_size[1] - 1 + relative_position_index = \ + torch.zeros(size=(window_size[0] * window_size[1] + 1,) * 2, dtype=relative_coords.dtype) + relative_position_index[1:, 1:] = relative_coords.sum(-1) # Wh*Ww, Wh*Ww + relative_position_index[0, 0:] = self.num_relative_distance - 3 + relative_position_index[0:, 0] = self.num_relative_distance - 2 + relative_position_index[0, 0] = self.num_relative_distance - 1 + + self.register_buffer("relative_position_index", relative_position_index) + + def forward(self): + relative_position_bias = \ + self.relative_position_bias_table[self.relative_position_index.view(-1)].view( + self.window_size[0] * self.window_size[1] + 1, + self.window_size[0] * self.window_size[1] + 1, -1) # Wh*Ww,Wh*Ww,nH + return relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww + + +class EVAVisionTransformer(nn.Module): + """ Vision Transformer with support for patch or hybrid CNN input stage + """ + def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dim=768, depth=12, + num_heads=12, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop_rate=0., attn_drop_rate=0., + drop_path_rate=0., norm_layer=nn.LayerNorm, init_values=None, patch_dropout=0., + use_abs_pos_emb=True, use_rel_pos_bias=False, use_shared_rel_pos_bias=False, rope=False, + use_mean_pooling=True, init_scale=0.001, grad_checkpointing=False, xattn=False, postnorm=False, + pt_hw_seq_len=16, intp_freq=False, naiveswiglu=False, subln=False): + super().__init__() + + if not XFORMERS_IS_AVAILBLE: + xattn = False + + self.image_size = img_size + self.num_classes = num_classes + self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models + + self.patch_embed = PatchEmbed( + img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim) + num_patches = self.patch_embed.num_patches + + self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) + # self.mask_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) + if use_abs_pos_emb: + self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim)) + else: + self.pos_embed = None + self.pos_drop = nn.Dropout(p=drop_rate) + + if use_shared_rel_pos_bias: + self.rel_pos_bias = RelativePositionBias(window_size=self.patch_embed.patch_shape, num_heads=num_heads) + else: + self.rel_pos_bias = None + + if rope: + half_head_dim = embed_dim // num_heads // 2 + hw_seq_len = img_size // patch_size + self.rope = VisionRotaryEmbeddingFast( + dim=half_head_dim, + pt_seq_len=pt_hw_seq_len, + ft_seq_len=hw_seq_len if intp_freq else None, + # patch_dropout=patch_dropout + ) + else: + self.rope = None + + self.naiveswiglu = naiveswiglu + + dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule + self.use_rel_pos_bias = use_rel_pos_bias + self.blocks = nn.ModuleList([ + Block( + dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale, + drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, + init_values=init_values, window_size=self.patch_embed.patch_shape if use_rel_pos_bias else None, + xattn=xattn, rope=self.rope, postnorm=postnorm, subln=subln, naiveswiglu=naiveswiglu) + for i in range(depth)]) + self.norm = nn.Identity() if use_mean_pooling else norm_layer(embed_dim) + self.fc_norm = norm_layer(embed_dim) if use_mean_pooling else None + self.head = nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity() + + if self.pos_embed is not None: + trunc_normal_(self.pos_embed, std=.02) + + trunc_normal_(self.cls_token, std=.02) + # trunc_normal_(self.mask_token, std=.02) + + self.apply(self._init_weights) + self.fix_init_weight() + + if isinstance(self.head, nn.Linear): + trunc_normal_(self.head.weight, std=.02) + self.head.weight.data.mul_(init_scale) + self.head.bias.data.mul_(init_scale) + + # setting a patch_dropout of 0. would mean it is disabled and this function would be the identity fn + self.patch_dropout = PatchDropout(patch_dropout) if patch_dropout > 0. else nn.Identity() + + self.grad_checkpointing = grad_checkpointing + + def fix_init_weight(self): + def rescale(param, layer_id): + param.div_(math.sqrt(2.0 * layer_id)) + + for layer_id, layer in enumerate(self.blocks): + rescale(layer.attn.proj.weight.data, layer_id + 1) + if self.naiveswiglu: + rescale(layer.mlp.w3.weight.data, layer_id + 1) + else: + rescale(layer.mlp.fc2.weight.data, layer_id + 1) + + def get_cast_dtype(self) -> torch.dtype: + return self.blocks[0].mlp.fc2.weight.dtype + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight, std=.02) + if m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.LayerNorm): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + + def get_num_layers(self): + return len(self.blocks) + + def lock(self, unlocked_groups=0, freeze_bn_stats=False): + assert unlocked_groups == 0, 'partial locking not currently supported for this model' + for param in self.parameters(): + param.requires_grad = False + + @torch.jit.ignore + def set_grad_checkpointing(self, enable=True): + self.grad_checkpointing = enable + + @torch.jit.ignore + def no_weight_decay(self): + return {'pos_embed', 'cls_token'} + + def get_classifier(self): + return self.head + + def reset_classifier(self, num_classes, global_pool=''): + self.num_classes = num_classes + self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity() + + def forward_features(self, x, return_all_features=False, return_hidden=False, shuffle=False): + + x = self.patch_embed(x) + batch_size, seq_len, _ = x.size() + + if shuffle: + idx = torch.randperm(x.shape[1]) + 1 + zero = torch.LongTensor([0, ]) + idx = torch.cat([zero, idx]) + pos_embed = self.pos_embed[:, idx] + + cls_tokens = self.cls_token.expand(batch_size, -1, -1) # stole cls_tokens impl from Phil Wang, thanks + x = torch.cat((cls_tokens, x), dim=1) + if shuffle: + x = x + pos_embed + elif self.pos_embed is not None: + x = x + self.pos_embed + x = self.pos_drop(x) + + # a patch_dropout of 0. would mean it is disabled and this function would do nothing but return what was passed in + if os.getenv('RoPE') == '1': + if self.training and not isinstance(self.patch_dropout, nn.Identity): + x, patch_indices_keep = self.patch_dropout(x) + self.rope.forward = partial(self.rope.forward, patch_indices_keep=patch_indices_keep) + else: + self.rope.forward = partial(self.rope.forward, patch_indices_keep=None) + x = self.patch_dropout(x) + else: + x = self.patch_dropout(x) + + rel_pos_bias = self.rel_pos_bias() if self.rel_pos_bias is not None else None + hidden_states = [] + for idx, blk in enumerate(self.blocks): + if (0 < idx <= 20) and (idx % 4 == 0) and return_hidden: + hidden_states.append(x) + if self.grad_checkpointing: + x = checkpoint(blk, x, (rel_pos_bias,)) + else: + x = blk(x, rel_pos_bias=rel_pos_bias) + + if not return_all_features: + x = self.norm(x) + if self.fc_norm is not None: + return self.fc_norm(x.mean(1)), hidden_states + else: + return x[:, 0], hidden_states + return x + + def forward(self, x, return_all_features=False, return_hidden=False, shuffle=False): + if return_all_features: + return self.forward_features(x, return_all_features, return_hidden, shuffle) + x, hidden_states = self.forward_features(x, return_all_features, return_hidden, shuffle) + x = self.head(x) + if return_hidden: + return x, hidden_states + return x diff --git a/eva_clip/factory.py b/eva_clip/factory.py new file mode 100644 index 0000000000000000000000000000000000000000..ced8999997bf374b69f846bc73ea635fe8a6eb63 --- /dev/null +++ b/eva_clip/factory.py @@ -0,0 +1,517 @@ +import json +import logging +import os +import pathlib +import re +from copy import deepcopy +from pathlib import Path +from typing import Optional, Tuple, Union, Dict, Any +import torch + +from .constants import OPENAI_DATASET_MEAN, OPENAI_DATASET_STD +from .model import CLIP, CustomCLIP, convert_weights_to_lp, convert_to_custom_text_state_dict,\ + get_cast_dtype +from .openai import load_openai_model +from .pretrained import is_pretrained_cfg, get_pretrained_cfg, download_pretrained, list_pretrained_tags_by_model +from .transform import image_transform +from .tokenizer import HFTokenizer, tokenize +from .utils import resize_clip_pos_embed, resize_evaclip_pos_embed, resize_visual_pos_embed, resize_eva_pos_embed + + +_MODEL_CONFIG_PATHS = [Path(__file__).parent / f"model_configs/"] +_MODEL_CONFIGS = {} # directory (model_name: config) of model architecture configs + + +def _natural_key(string_): + return [int(s) if s.isdigit() else s for s in re.split(r'(\d+)', string_.lower())] + + +def _rescan_model_configs(): + global _MODEL_CONFIGS + + config_ext = ('.json',) + config_files = [] + for config_path in _MODEL_CONFIG_PATHS: + if config_path.is_file() and config_path.suffix in config_ext: + config_files.append(config_path) + elif config_path.is_dir(): + for ext in config_ext: + config_files.extend(config_path.glob(f'*{ext}')) + + for cf in config_files: + with open(cf, "r", encoding="utf8") as f: + model_cfg = json.load(f) + if all(a in model_cfg for a in ('embed_dim', 'vision_cfg', 'text_cfg')): + _MODEL_CONFIGS[cf.stem] = model_cfg + + _MODEL_CONFIGS = dict(sorted(_MODEL_CONFIGS.items(), key=lambda x: _natural_key(x[0]))) + + +_rescan_model_configs() # initial populate of model config registry + + +def list_models(): + """ enumerate available model architectures based on config files """ + return list(_MODEL_CONFIGS.keys()) + + +def add_model_config(path): + """ add model config path or file and update registry """ + if not isinstance(path, Path): + path = Path(path) + _MODEL_CONFIG_PATHS.append(path) + _rescan_model_configs() + + +def get_model_config(model_name): + if model_name in _MODEL_CONFIGS: + return deepcopy(_MODEL_CONFIGS[model_name]) + else: + return None + + +def get_tokenizer(model_name): + config = get_model_config(model_name) + tokenizer = HFTokenizer(config['text_cfg']['hf_tokenizer_name']) if 'hf_tokenizer_name' in config['text_cfg'] else tokenize + return tokenizer + + +# loading openai CLIP weights when is_openai=True for training +def load_state_dict(checkpoint_path: str, map_location: str='cpu', model_key: str='model|module|state_dict', is_openai: bool=False, skip_list: list=[]): + if is_openai: + model = torch.jit.load(checkpoint_path, map_location="cpu").eval() + state_dict = model.state_dict() + for key in ["input_resolution", "context_length", "vocab_size"]: + state_dict.pop(key, None) + else: + checkpoint = torch.load(checkpoint_path, map_location=map_location) + for mk in model_key.split('|'): + if isinstance(checkpoint, dict) and mk in checkpoint: + state_dict = checkpoint[mk] + break + else: + state_dict = checkpoint + if next(iter(state_dict.items()))[0].startswith('module'): + state_dict = {k[7:]: v for k, v in state_dict.items()} + + for k in skip_list: + if k in list(state_dict.keys()): + logging.info(f"Removing key {k} from pretrained checkpoint") + del state_dict[k] + + if os.getenv('RoPE') == '1': + for k in list(state_dict.keys()): + if 'freqs_cos' in k or 'freqs_sin' in k: + del state_dict[k] + return state_dict + + + +def load_checkpoint(model, checkpoint_path, model_key="model|module|state_dict", strict=True): + state_dict = load_state_dict(checkpoint_path, model_key=model_key, is_openai=False) + # detect old format and make compatible with new format + if 'positional_embedding' in state_dict and not hasattr(model, 'positional_embedding'): + state_dict = convert_to_custom_text_state_dict(state_dict) + if 'text.logit_scale' in state_dict and hasattr(model, 'logit_scale'): + state_dict['logit_scale'] = state_dict['text.logit_scale'] + del state_dict['text.logit_scale'] + + # resize_clip_pos_embed for CLIP and open CLIP + if 'visual.positional_embedding' in state_dict: + resize_clip_pos_embed(state_dict, model) + # specified to eva_vit_model + elif 'visual.pos_embed' in state_dict: + resize_evaclip_pos_embed(state_dict, model) + + # resize_clip_pos_embed(state_dict, model) + incompatible_keys = model.load_state_dict(state_dict, strict=strict) + logging.info(f"incompatible_keys.missing_keys: {incompatible_keys.missing_keys}") + return incompatible_keys + +def load_clip_visual_state_dict(checkpoint_path: str, map_location: str='cpu', is_openai: bool=False, skip_list:list=[]): + state_dict = load_state_dict(checkpoint_path, map_location=map_location, is_openai=is_openai, skip_list=skip_list) + + for k in list(state_dict.keys()): + if not k.startswith('visual.'): + del state_dict[k] + for k in list(state_dict.keys()): + if k.startswith('visual.'): + new_k = k[7:] + state_dict[new_k] = state_dict[k] + del state_dict[k] + return state_dict + +def load_clip_text_state_dict(checkpoint_path: str, map_location: str='cpu', is_openai: bool=False, skip_list:list=[]): + state_dict = load_state_dict(checkpoint_path, map_location=map_location, is_openai=is_openai, skip_list=skip_list) + + for k in list(state_dict.keys()): + if k.startswith('visual.'): + del state_dict[k] + return state_dict + +def get_pretrained_tag(pretrained_model): + pretrained_model = pretrained_model.lower() + if "laion" in pretrained_model or "open_clip" in pretrained_model: + return "open_clip" + elif "openai" in pretrained_model: + return "clip" + elif "eva" in pretrained_model and "clip" in pretrained_model: + return "eva_clip" + else: + return "other" + +def load_pretrained_checkpoint( + model, + visual_checkpoint_path, + text_checkpoint_path, + strict=True, + visual_model=None, + text_model=None, + model_key="model|module|state_dict", + skip_list=[]): + visual_tag = get_pretrained_tag(visual_model) + text_tag = get_pretrained_tag(text_model) + + logging.info(f"num of model state_dict keys: {len(model.state_dict().keys())}") + visual_incompatible_keys, text_incompatible_keys = None, None + if visual_checkpoint_path: + if visual_tag == "eva_clip" or visual_tag == "open_clip": + visual_state_dict = load_clip_visual_state_dict(visual_checkpoint_path, is_openai=False, skip_list=skip_list) + elif visual_tag == "clip": + visual_state_dict = load_clip_visual_state_dict(visual_checkpoint_path, is_openai=True, skip_list=skip_list) + else: + visual_state_dict = load_state_dict(visual_checkpoint_path, model_key=model_key, is_openai=False, skip_list=skip_list) + + # resize_clip_pos_embed for CLIP and open CLIP + if 'positional_embedding' in visual_state_dict: + resize_visual_pos_embed(visual_state_dict, model) + # specified to EVA model + elif 'pos_embed' in visual_state_dict: + resize_eva_pos_embed(visual_state_dict, model) + + visual_incompatible_keys = model.visual.load_state_dict(visual_state_dict, strict=strict) + logging.info(f"num of loaded visual_state_dict keys: {len(visual_state_dict.keys())}") + logging.info(f"visual_incompatible_keys.missing_keys: {visual_incompatible_keys.missing_keys}") + + if text_checkpoint_path: + if text_tag == "eva_clip" or text_tag == "open_clip": + text_state_dict = load_clip_text_state_dict(text_checkpoint_path, is_openai=False, skip_list=skip_list) + elif text_tag == "clip": + text_state_dict = load_clip_text_state_dict(text_checkpoint_path, is_openai=True, skip_list=skip_list) + else: + text_state_dict = load_state_dict(visual_checkpoint_path, model_key=model_key, is_openai=False, skip_list=skip_list) + + text_incompatible_keys = model.text.load_state_dict(text_state_dict, strict=strict) + + logging.info(f"num of loaded text_state_dict keys: {len(text_state_dict.keys())}") + logging.info(f"text_incompatible_keys.missing_keys: {text_incompatible_keys.missing_keys}") + + return visual_incompatible_keys, text_incompatible_keys + +def create_model( + model_name: str, + pretrained: Optional[str] = None, + precision: str = 'fp32', + device: Union[str, torch.device] = 'cpu', + jit: bool = False, + force_quick_gelu: bool = False, + force_custom_clip: bool = False, + force_patch_dropout: Optional[float] = None, + pretrained_image: str = '', + pretrained_text: str = '', + pretrained_hf: bool = True, + pretrained_visual_model: str = None, + pretrained_text_model: str = None, + cache_dir: Optional[str] = None, + skip_list: list = [], +): + model_name = model_name.replace('/', '-') # for callers using old naming with / in ViT names + if isinstance(device, str): + device = torch.device(device) + + if pretrained and pretrained.lower() == 'openai': + logging.info(f'Loading pretrained {model_name} from OpenAI.') + model = load_openai_model( + model_name, + precision=precision, + device=device, + jit=jit, + cache_dir=cache_dir, + ) + else: + model_cfg = get_model_config(model_name) + if model_cfg is not None: + logging.info(f'Loaded {model_name} model config.') + else: + logging.error(f'Model config for {model_name} not found; available models {list_models()}.') + raise RuntimeError(f'Model config for {model_name} not found.') + + if 'rope' in model_cfg.get('vision_cfg', {}): + if model_cfg['vision_cfg']['rope']: + os.environ['RoPE'] = "1" + else: + os.environ['RoPE'] = "0" + + if force_quick_gelu: + # override for use of QuickGELU on non-OpenAI transformer models + model_cfg["quick_gelu"] = True + + if force_patch_dropout is not None: + # override the default patch dropout value + model_cfg['vision_cfg']["patch_dropout"] = force_patch_dropout + + cast_dtype = get_cast_dtype(precision) + custom_clip = model_cfg.pop('custom_text', False) or force_custom_clip or ('hf_model_name' in model_cfg['text_cfg']) + + + if custom_clip: + if 'hf_model_name' in model_cfg.get('text_cfg', {}): + model_cfg['text_cfg']['hf_model_pretrained'] = pretrained_hf + model = CustomCLIP(**model_cfg, cast_dtype=cast_dtype) + else: + model = CLIP(**model_cfg, cast_dtype=cast_dtype) + + pretrained_cfg = {} + if pretrained: + checkpoint_path = '' + pretrained_cfg = get_pretrained_cfg(model_name, pretrained) + if pretrained_cfg: + checkpoint_path = download_pretrained(pretrained_cfg, cache_dir=cache_dir) + elif os.path.exists(pretrained): + checkpoint_path = pretrained + + if checkpoint_path: + logging.info(f'Loading pretrained {model_name} weights ({pretrained}).') + load_checkpoint(model, + checkpoint_path, + model_key="model|module|state_dict", + strict=False + ) + else: + error_str = ( + f'Pretrained weights ({pretrained}) not found for model {model_name}.' + f'Available pretrained tags ({list_pretrained_tags_by_model(model_name)}.') + logging.warning(error_str) + raise RuntimeError(error_str) + else: + visual_checkpoint_path = '' + text_checkpoint_path = '' + + if pretrained_image: + pretrained_visual_model = pretrained_visual_model.replace('/', '-') # for callers using old naming with / in ViT names + pretrained_image_cfg = get_pretrained_cfg(pretrained_visual_model, pretrained_image) + if 'timm_model_name' in model_cfg.get('vision_cfg', {}): + # pretrained weight loading for timm models set via vision_cfg + model_cfg['vision_cfg']['timm_model_pretrained'] = True + elif pretrained_image_cfg: + visual_checkpoint_path = download_pretrained(pretrained_image_cfg, cache_dir=cache_dir) + elif os.path.exists(pretrained_image): + visual_checkpoint_path = pretrained_image + else: + logging.warning(f'Pretrained weights ({visual_checkpoint_path}) not found for model {model_name}.visual.') + raise RuntimeError(f'Pretrained weights ({visual_checkpoint_path}) not found for model {model_name}.visual.') + + if pretrained_text: + pretrained_text_model = pretrained_text_model.replace('/', '-') # for callers using old naming with / in ViT names + pretrained_text_cfg = get_pretrained_cfg(pretrained_text_model, pretrained_text) + if pretrained_image_cfg: + text_checkpoint_path = download_pretrained(pretrained_text_cfg, cache_dir=cache_dir) + elif os.path.exists(pretrained_text): + text_checkpoint_path = pretrained_text + else: + logging.warning(f'Pretrained weights ({text_checkpoint_path}) not found for model {model_name}.text.') + raise RuntimeError(f'Pretrained weights ({text_checkpoint_path}) not found for model {model_name}.text.') + + if visual_checkpoint_path: + logging.info(f'Loading pretrained {model_name}.visual weights ({visual_checkpoint_path}).') + if text_checkpoint_path: + logging.info(f'Loading pretrained {model_name}.text weights ({text_checkpoint_path}).') + + if visual_checkpoint_path or text_checkpoint_path: + load_pretrained_checkpoint( + model, + visual_checkpoint_path, + text_checkpoint_path, + strict=False, + visual_model=pretrained_visual_model, + text_model=pretrained_text_model, + model_key="model|module|state_dict", + skip_list=skip_list + ) + + if "fp16" in precision or "bf16" in precision: + logging.info(f'convert precision to {precision}') + model = model.to(torch.bfloat16) if 'bf16' in precision else model.to(torch.float16) + + model.to(device=device) + + # set image / mean metadata from pretrained_cfg if available, or use default + model.visual.image_mean = pretrained_cfg.get('mean', None) or OPENAI_DATASET_MEAN + model.visual.image_std = pretrained_cfg.get('std', None) or OPENAI_DATASET_STD + + if jit: + model = torch.jit.script(model) + + return model + + +def create_model_and_transforms( + model_name: str, + pretrained: Optional[str] = None, + precision: str = 'fp32', + device: Union[str, torch.device] = 'cpu', + jit: bool = False, + force_quick_gelu: bool = False, + force_custom_clip: bool = False, + force_patch_dropout: Optional[float] = None, + pretrained_image: str = '', + pretrained_text: str = '', + pretrained_hf: bool = True, + pretrained_visual_model: str = None, + pretrained_text_model: str = None, + image_mean: Optional[Tuple[float, ...]] = None, + image_std: Optional[Tuple[float, ...]] = None, + cache_dir: Optional[str] = None, + skip_list: list = [], +): + model = create_model( + model_name, + pretrained, + precision=precision, + device=device, + jit=jit, + force_quick_gelu=force_quick_gelu, + force_custom_clip=force_custom_clip, + force_patch_dropout=force_patch_dropout, + pretrained_image=pretrained_image, + pretrained_text=pretrained_text, + pretrained_hf=pretrained_hf, + pretrained_visual_model=pretrained_visual_model, + pretrained_text_model=pretrained_text_model, + cache_dir=cache_dir, + skip_list=skip_list, + ) + + image_mean = image_mean or getattr(model.visual, 'image_mean', None) + image_std = image_std or getattr(model.visual, 'image_std', None) + preprocess_train = image_transform( + model.visual.image_size, + is_train=True, + mean=image_mean, + std=image_std + ) + preprocess_val = image_transform( + model.visual.image_size, + is_train=False, + mean=image_mean, + std=image_std + ) + + return model, preprocess_train, preprocess_val + + +def create_transforms( + model_name: str, + pretrained: Optional[str] = None, + precision: str = 'fp32', + device: Union[str, torch.device] = 'cpu', + jit: bool = False, + force_quick_gelu: bool = False, + force_custom_clip: bool = False, + force_patch_dropout: Optional[float] = None, + pretrained_image: str = '', + pretrained_text: str = '', + pretrained_hf: bool = True, + pretrained_visual_model: str = None, + pretrained_text_model: str = None, + image_mean: Optional[Tuple[float, ...]] = None, + image_std: Optional[Tuple[float, ...]] = None, + cache_dir: Optional[str] = None, + skip_list: list = [], +): + model = create_model( + model_name, + pretrained, + precision=precision, + device=device, + jit=jit, + force_quick_gelu=force_quick_gelu, + force_custom_clip=force_custom_clip, + force_patch_dropout=force_patch_dropout, + pretrained_image=pretrained_image, + pretrained_text=pretrained_text, + pretrained_hf=pretrained_hf, + pretrained_visual_model=pretrained_visual_model, + pretrained_text_model=pretrained_text_model, + cache_dir=cache_dir, + skip_list=skip_list, + ) + + + image_mean = image_mean or getattr(model.visual, 'image_mean', None) + image_std = image_std or getattr(model.visual, 'image_std', None) + preprocess_train = image_transform( + model.visual.image_size, + is_train=True, + mean=image_mean, + std=image_std + ) + preprocess_val = image_transform( + model.visual.image_size, + is_train=False, + mean=image_mean, + std=image_std + ) + del model + + return preprocess_train, preprocess_val + +def create_model_from_pretrained( + model_name: str, + pretrained: str, + precision: str = 'fp32', + device: Union[str, torch.device] = 'cpu', + jit: bool = False, + force_quick_gelu: bool = False, + force_custom_clip: bool = False, + force_patch_dropout: Optional[float] = None, + return_transform: bool = True, + image_mean: Optional[Tuple[float, ...]] = None, + image_std: Optional[Tuple[float, ...]] = None, + cache_dir: Optional[str] = None, + is_frozen: bool = False, +): + if not is_pretrained_cfg(model_name, pretrained) and not os.path.exists(pretrained): + raise RuntimeError( + f'{pretrained} is not a valid pretrained cfg or checkpoint for {model_name}.' + f' Use open_clip.list_pretrained() to find one.') + + model = create_model( + model_name, + pretrained, + precision=precision, + device=device, + jit=jit, + force_quick_gelu=force_quick_gelu, + force_custom_clip=force_custom_clip, + force_patch_dropout=force_patch_dropout, + cache_dir=cache_dir, + ) + + if is_frozen: + for param in model.parameters(): + param.requires_grad = False + + if not return_transform: + return model + + image_mean = image_mean or getattr(model.visual, 'image_mean', None) + image_std = image_std or getattr(model.visual, 'image_std', None) + preprocess = image_transform( + model.visual.image_size, + is_train=False, + mean=image_mean, + std=image_std + ) + + return model, preprocess diff --git a/eva_clip/hf_configs.py b/eva_clip/hf_configs.py new file mode 100644 index 0000000000000000000000000000000000000000..a8c9b704db1879676aed5cef26796303b65fe987 --- /dev/null +++ b/eva_clip/hf_configs.py @@ -0,0 +1,57 @@ +# HF architecture dict: +arch_dict = { + # https://huggingface.co/docs/transformers/model_doc/roberta#roberta + "roberta": { + "config_names": { + "context_length": "max_position_embeddings", + "vocab_size": "vocab_size", + "width": "hidden_size", + "heads": "num_attention_heads", + "layers": "num_hidden_layers", + "layer_attr": "layer", + "token_embeddings_attr": "embeddings" + }, + "pooler": "mean_pooler", + }, + # https://huggingface.co/docs/transformers/model_doc/xlm-roberta#transformers.XLMRobertaConfig + "xlm-roberta": { + "config_names": { + "context_length": "max_position_embeddings", + "vocab_size": "vocab_size", + "width": "hidden_size", + "heads": "num_attention_heads", + "layers": "num_hidden_layers", + "layer_attr": "layer", + "token_embeddings_attr": "embeddings" + }, + "pooler": "mean_pooler", + }, + # https://huggingface.co/docs/transformers/model_doc/mt5#mt5 + "mt5": { + "config_names": { + # unlimited seqlen + # https://github.com/google-research/text-to-text-transfer-transformer/issues/273 + # https://github.com/huggingface/transformers/blob/v4.24.0/src/transformers/models/t5/modeling_t5.py#L374 + "context_length": "", + "vocab_size": "vocab_size", + "width": "d_model", + "heads": "num_heads", + "layers": "num_layers", + "layer_attr": "block", + "token_embeddings_attr": "embed_tokens" + }, + "pooler": "mean_pooler", + }, + "bert": { + "config_names": { + "context_length": "max_position_embeddings", + "vocab_size": "vocab_size", + "width": "hidden_size", + "heads": "num_attention_heads", + "layers": "num_hidden_layers", + "layer_attr": "layer", + "token_embeddings_attr": "embeddings" + }, + "pooler": "mean_pooler", + } +} diff --git a/eva_clip/hf_model.py b/eva_clip/hf_model.py new file mode 100644 index 0000000000000000000000000000000000000000..c4b9fd85b4066ba31db2bda5767ed1ce15de479d --- /dev/null +++ b/eva_clip/hf_model.py @@ -0,0 +1,248 @@ +""" huggingface model adapter + +Wraps HuggingFace transformers (https://github.com/huggingface/transformers) models for use as a text tower in CLIP model. +""" + +import re + +import torch +import torch.nn as nn +from torch.nn import functional as F +from torch import TensorType +try: + import transformers + from transformers import AutoModel, AutoModelForMaskedLM, AutoTokenizer, AutoConfig, PretrainedConfig + from transformers.modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling, \ + BaseModelOutputWithPoolingAndCrossAttentions +except ImportError as e: + transformers = None + + + class BaseModelOutput: + pass + + + class PretrainedConfig: + pass + +from .hf_configs import arch_dict + +# utils +def _camel2snake(s): + return re.sub(r'(? TensorType: + # image_atts = torch.ones(image_embeds.size()[:-1],dtype=torch.long).to(x.device) + # attn_mask = (x != self.config.pad_token_id).long() + # out = self.transformer( + # input_ids=x, + # attention_mask=attn_mask, + # encoder_hidden_states = image_embeds, + # encoder_attention_mask = image_atts, + # ) + # pooled_out = self.pooler(out, attn_mask) + + # return self.itm_proj(pooled_out) + + def mask(self, input_ids, vocab_size, device, targets=None, masked_indices=None, probability_matrix=None): + if masked_indices is None: + masked_indices = torch.bernoulli(probability_matrix).bool() + + masked_indices[input_ids == self.tokenizer.pad_token_id] = False + masked_indices[input_ids == self.tokenizer.cls_token_id] = False + + if targets is not None: + targets[~masked_indices] = -100 # We only compute loss on masked tokens + + # 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK]) + indices_replaced = torch.bernoulli(torch.full(input_ids.shape, 0.8)).bool() & masked_indices + input_ids[indices_replaced] = self.tokenizer.mask_token_id + + # 10% of the time, we replace masked input tokens with random word + indices_random = torch.bernoulli(torch.full(input_ids.shape, 0.5)).bool() & masked_indices & ~indices_replaced + random_words = torch.randint(vocab_size, input_ids.shape, dtype=torch.long).to(device) + input_ids[indices_random] = random_words[indices_random] + # The rest of the time (10% of the time) we keep the masked input tokens unchanged + + if targets is not None: + return input_ids, targets + else: + return input_ids + + def forward_mlm(self, input_ids, image_embeds, mlm_probability=0.25): + labels = input_ids.clone() + attn_mask = (input_ids != self.config.pad_token_id).long() + image_atts = torch.ones(image_embeds.size()[:-1],dtype=torch.long).to(input_ids.device) + vocab_size = getattr(self.config, arch_dict[self.config.model_type]["config_names"]["vocab_size"]) + probability_matrix = torch.full(labels.shape, mlm_probability) + input_ids, labels = self.mask(input_ids, vocab_size, input_ids.device, targets=labels, + probability_matrix = probability_matrix) + mlm_output = self.transformer(input_ids, + attention_mask = attn_mask, + encoder_hidden_states = image_embeds, + encoder_attention_mask = image_atts, + return_dict = True, + labels = labels, + ) + return mlm_output.loss + # mlm_output = self.transformer(input_ids, + # attention_mask = attn_mask, + # encoder_hidden_states = image_embeds, + # encoder_attention_mask = image_atts, + # return_dict = True, + # ).last_hidden_state + # logits = self.mlm_proj(mlm_output) + + # # logits = logits[:, :-1, :].contiguous().view(-1, vocab_size) + # logits = logits[:, 1:, :].contiguous().view(-1, vocab_size) + # labels = labels[:, 1:].contiguous().view(-1) + + # mlm_loss = F.cross_entropy( + # logits, + # labels, + # # label_smoothing=0.1, + # ) + # return mlm_loss + + + def forward(self, x:TensorType) -> TensorType: + attn_mask = (x != self.config.pad_token_id).long() + out = self.transformer(input_ids=x, attention_mask=attn_mask) + pooled_out = self.pooler(out, attn_mask) + + return self.proj(pooled_out) + + def lock(self, unlocked_layers:int=0, freeze_layer_norm:bool=True): + if not unlocked_layers: # full freezing + for n, p in self.transformer.named_parameters(): + p.requires_grad = (not freeze_layer_norm) if "LayerNorm" in n.split(".") else False + return + + encoder = self.transformer.encoder if hasattr(self.transformer, 'encoder') else self.transformer + layer_list = getattr(encoder, arch_dict[self.config.model_type]["config_names"]["layer_attr"]) + print(f"Unlocking {unlocked_layers}/{len(layer_list) + 1} layers of hf model") + embeddings = getattr( + self.transformer, arch_dict[self.config.model_type]["config_names"]["token_embeddings_attr"]) + modules = [embeddings, *layer_list][:-unlocked_layers] + # freeze layers + for module in modules: + for n, p in module.named_parameters(): + p.requires_grad = (not freeze_layer_norm) if "LayerNorm" in n.split(".") else False + + + @torch.jit.ignore + def set_grad_checkpointing(self, enable=True): + self.transformer.gradient_checkpointing_enable() + + def get_num_layers(self): + encoder = self.transformer.encoder if hasattr(self.transformer, 'encoder') else self.transformer + layer_list = getattr(encoder, arch_dict[self.config.model_type]["config_names"]["layer_attr"]) + return len(layer_list) + + def init_parameters(self): + pass diff --git a/eva_clip/loss.py b/eva_clip/loss.py new file mode 100644 index 0000000000000000000000000000000000000000..473f60d98d501067e85ace2dd089b00e249b6d17 --- /dev/null +++ b/eva_clip/loss.py @@ -0,0 +1,138 @@ +import math +import torch +import torch.nn as nn +from torch.nn import functional as F + +try: + import torch.distributed.nn + from torch import distributed as dist + has_distributed = True +except ImportError: + has_distributed = False + +try: + import horovod.torch as hvd +except ImportError: + hvd = None + +from timm.loss import LabelSmoothingCrossEntropy + + +def gather_features( + image_features, + text_features, + local_loss=False, + gather_with_grad=False, + rank=0, + world_size=1, + use_horovod=False +): + assert has_distributed, 'torch.distributed did not import correctly, please use a PyTorch version with support.' + if use_horovod: + assert hvd is not None, 'Please install horovod' + if gather_with_grad: + all_image_features = hvd.allgather(image_features) + all_text_features = hvd.allgather(text_features) + else: + with torch.no_grad(): + all_image_features = hvd.allgather(image_features) + all_text_features = hvd.allgather(text_features) + if not local_loss: + # ensure grads for local rank when all_* features don't have a gradient + gathered_image_features = list(all_image_features.chunk(world_size, dim=0)) + gathered_text_features = list(all_text_features.chunk(world_size, dim=0)) + gathered_image_features[rank] = image_features + gathered_text_features[rank] = text_features + all_image_features = torch.cat(gathered_image_features, dim=0) + all_text_features = torch.cat(gathered_text_features, dim=0) + else: + # We gather tensors from all gpus + if gather_with_grad: + all_image_features = torch.cat(torch.distributed.nn.all_gather(image_features), dim=0) + all_text_features = torch.cat(torch.distributed.nn.all_gather(text_features), dim=0) + # all_image_features = torch.cat(torch.distributed.nn.all_gather(image_features, async_op=True), dim=0) + # all_text_features = torch.cat(torch.distributed.nn.all_gather(text_features, async_op=True), dim=0) + else: + gathered_image_features = [torch.zeros_like(image_features) for _ in range(world_size)] + gathered_text_features = [torch.zeros_like(text_features) for _ in range(world_size)] + dist.all_gather(gathered_image_features, image_features) + dist.all_gather(gathered_text_features, text_features) + if not local_loss: + # ensure grads for local rank when all_* features don't have a gradient + gathered_image_features[rank] = image_features + gathered_text_features[rank] = text_features + all_image_features = torch.cat(gathered_image_features, dim=0) + all_text_features = torch.cat(gathered_text_features, dim=0) + + return all_image_features, all_text_features + + +class ClipLoss(nn.Module): + + def __init__( + self, + local_loss=False, + gather_with_grad=False, + cache_labels=False, + rank=0, + world_size=1, + use_horovod=False, + smoothing=0., + ): + super().__init__() + self.local_loss = local_loss + self.gather_with_grad = gather_with_grad + self.cache_labels = cache_labels + self.rank = rank + self.world_size = world_size + self.use_horovod = use_horovod + self.label_smoothing_cross_entropy = LabelSmoothingCrossEntropy(smoothing=smoothing) if smoothing > 0 else None + + # cache state + self.prev_num_logits = 0 + self.labels = {} + + def forward(self, image_features, text_features, logit_scale=1.): + device = image_features.device + if self.world_size > 1: + all_image_features, all_text_features = gather_features( + image_features, text_features, + self.local_loss, self.gather_with_grad, self.rank, self.world_size, self.use_horovod) + + if self.local_loss: + logits_per_image = logit_scale * image_features @ all_text_features.T + logits_per_text = logit_scale * text_features @ all_image_features.T + else: + logits_per_image = logit_scale * all_image_features @ all_text_features.T + logits_per_text = logits_per_image.T + else: + logits_per_image = logit_scale * image_features @ text_features.T + logits_per_text = logit_scale * text_features @ image_features.T + # calculated ground-truth and cache if enabled + num_logits = logits_per_image.shape[0] + if self.prev_num_logits != num_logits or device not in self.labels: + labels = torch.arange(num_logits, device=device, dtype=torch.long) + if self.world_size > 1 and self.local_loss: + labels = labels + num_logits * self.rank + if self.cache_labels: + self.labels[device] = labels + self.prev_num_logits = num_logits + else: + labels = self.labels[device] + + if self.label_smoothing_cross_entropy: + total_loss = ( + self.label_smoothing_cross_entropy(logits_per_image, labels) + + self.label_smoothing_cross_entropy(logits_per_text, labels) + ) / 2 + else: + total_loss = ( + F.cross_entropy(logits_per_image, labels) + + F.cross_entropy(logits_per_text, labels) + ) / 2 + + acc = None + i2t_acc = (logits_per_image.argmax(-1) == labels).sum() / len(logits_per_image) + t2i_acc = (logits_per_text.argmax(-1) == labels).sum() / len(logits_per_text) + acc = {"i2t": i2t_acc, "t2i": t2i_acc} + return total_loss, acc \ No newline at end of file diff --git a/eva_clip/model.py b/eva_clip/model.py new file mode 100644 index 0000000000000000000000000000000000000000..da3bbd755799ced672385d1029ba7ce6d5215b0b --- /dev/null +++ b/eva_clip/model.py @@ -0,0 +1,439 @@ +""" CLIP Model + +Adapted from https://github.com/openai/CLIP. Originally MIT License, Copyright (c) 2021 OpenAI. +""" +import os +from dataclasses import dataclass +from typing import Optional, Tuple, Union +from functools import partial + +import numpy as np +import torch +import torch.nn.functional as F +from torch import nn + +try: + from .hf_model import HFTextEncoder +except: + HFTextEncoder = None +from .modified_resnet import ModifiedResNet +from .timm_model import TimmModel +from .eva_vit_model import EVAVisionTransformer +from .transformer import LayerNorm, QuickGELU, Attention, VisionTransformer, TextTransformer + +try: + from apex.normalization import FusedLayerNorm +except: + FusedLayerNorm = LayerNorm + print("Please 'pip install apex'") + +try: + import xformers.ops as xops +except ImportError: + xops = None + print("Please 'pip install xformers'") + +@dataclass +class CLIPVisionCfg: + layers: Union[Tuple[int, int, int, int], int] = 12 + width: int = 768 + head_width: int = 64 + mlp_ratio: float = 4.0 + patch_size: int = 16 + image_size: Union[Tuple[int, int], int] = 224 + ls_init_value: Optional[float] = None # layer scale initial value + patch_dropout: float = 0. # what fraction of patches to dropout during training (0 would mean disabled and no patches dropped) - 0.5 to 0.75 recommended in the paper for optimal results + global_average_pool: bool = False # whether to global average pool the last embedding layer, instead of using CLS token (https://arxiv.org/abs/2205.01580) + drop_path_rate: Optional[float] = None # drop path rate + timm_model_name: str = None # a valid model name overrides layers, width, patch_size + timm_model_pretrained: bool = False # use (imagenet) pretrained weights for named model + timm_pool: str = 'avg' # feature pooling for timm model ('abs_attn', 'rot_attn', 'avg', '') + timm_proj: str = 'linear' # linear projection for timm model output ('linear', 'mlp', '') + timm_proj_bias: bool = False # enable bias final projection + eva_model_name: str = None # a valid eva model name overrides layers, width, patch_size + qkv_bias: bool = True + fusedLN: bool = False + xattn: bool = False + postnorm: bool = False + rope: bool = False + pt_hw_seq_len: int = 16 # 224/14 + intp_freq: bool = False + naiveswiglu: bool = False + subln: bool = False + + +@dataclass +class CLIPTextCfg: + context_length: int = 77 + vocab_size: int = 49408 + width: int = 512 + heads: int = 8 + layers: int = 12 + ls_init_value: Optional[float] = None # layer scale initial value + hf_model_name: str = None + hf_tokenizer_name: str = None + hf_model_pretrained: bool = True + proj: str = 'mlp' + pooler_type: str = 'mean_pooler' + masked_language_modeling: bool = False + fusedLN: bool = False + xattn: bool = False + attn_mask: bool = True + +def get_cast_dtype(precision: str): + cast_dtype = None + if precision == 'bf16': + cast_dtype = torch.bfloat16 + elif precision == 'fp16': + cast_dtype = torch.float16 + return cast_dtype + + +def _build_vision_tower( + embed_dim: int, + vision_cfg: CLIPVisionCfg, + quick_gelu: bool = False, + cast_dtype: Optional[torch.dtype] = None +): + if isinstance(vision_cfg, dict): + vision_cfg = CLIPVisionCfg(**vision_cfg) + + # OpenAI models are pretrained w/ QuickGELU but native nn.GELU is both faster and more + # memory efficient in recent PyTorch releases (>= 1.10). + # NOTE: timm models always use native GELU regardless of quick_gelu flag. + act_layer = QuickGELU if quick_gelu else nn.GELU + + if vision_cfg.eva_model_name: + vision_heads = vision_cfg.width // vision_cfg.head_width + norm_layer = LayerNorm + + visual = EVAVisionTransformer( + img_size=vision_cfg.image_size, + patch_size=vision_cfg.patch_size, + num_classes=embed_dim, + use_mean_pooling=vision_cfg.global_average_pool, #False + init_values=vision_cfg.ls_init_value, + patch_dropout=vision_cfg.patch_dropout, + embed_dim=vision_cfg.width, + depth=vision_cfg.layers, + num_heads=vision_heads, + mlp_ratio=vision_cfg.mlp_ratio, + qkv_bias=vision_cfg.qkv_bias, + drop_path_rate=vision_cfg.drop_path_rate, + norm_layer= partial(FusedLayerNorm, eps=1e-6) if vision_cfg.fusedLN else partial(norm_layer, eps=1e-6), + xattn=vision_cfg.xattn, + rope=vision_cfg.rope, + postnorm=vision_cfg.postnorm, + pt_hw_seq_len= vision_cfg.pt_hw_seq_len, # 224/14 + intp_freq= vision_cfg.intp_freq, + naiveswiglu= vision_cfg.naiveswiglu, + subln= vision_cfg.subln + ) + elif vision_cfg.timm_model_name: + visual = TimmModel( + vision_cfg.timm_model_name, + pretrained=vision_cfg.timm_model_pretrained, + pool=vision_cfg.timm_pool, + proj=vision_cfg.timm_proj, + proj_bias=vision_cfg.timm_proj_bias, + embed_dim=embed_dim, + image_size=vision_cfg.image_size + ) + act_layer = nn.GELU # so that text transformer doesn't use QuickGELU w/ timm models + elif isinstance(vision_cfg.layers, (tuple, list)): + vision_heads = vision_cfg.width * 32 // vision_cfg.head_width + visual = ModifiedResNet( + layers=vision_cfg.layers, + output_dim=embed_dim, + heads=vision_heads, + image_size=vision_cfg.image_size, + width=vision_cfg.width + ) + else: + vision_heads = vision_cfg.width // vision_cfg.head_width + norm_layer = LayerNormFp32 if cast_dtype in (torch.float16, torch.bfloat16) else LayerNorm + visual = VisionTransformer( + image_size=vision_cfg.image_size, + patch_size=vision_cfg.patch_size, + width=vision_cfg.width, + layers=vision_cfg.layers, + heads=vision_heads, + mlp_ratio=vision_cfg.mlp_ratio, + ls_init_value=vision_cfg.ls_init_value, + patch_dropout=vision_cfg.patch_dropout, + global_average_pool=vision_cfg.global_average_pool, + output_dim=embed_dim, + act_layer=act_layer, + norm_layer=norm_layer, + ) + + return visual + + +def _build_text_tower( + embed_dim: int, + text_cfg: CLIPTextCfg, + quick_gelu: bool = False, + cast_dtype: Optional[torch.dtype] = None, +): + if isinstance(text_cfg, dict): + text_cfg = CLIPTextCfg(**text_cfg) + + if text_cfg.hf_model_name: + text = HFTextEncoder( + text_cfg.hf_model_name, + output_dim=embed_dim, + tokenizer_name=text_cfg.hf_tokenizer_name, + proj=text_cfg.proj, + pooler_type=text_cfg.pooler_type, + masked_language_modeling=text_cfg.masked_language_modeling + ) + else: + act_layer = QuickGELU if quick_gelu else nn.GELU + norm_layer = LayerNorm + + text = TextTransformer( + context_length=text_cfg.context_length, + vocab_size=text_cfg.vocab_size, + width=text_cfg.width, + heads=text_cfg.heads, + layers=text_cfg.layers, + ls_init_value=text_cfg.ls_init_value, + output_dim=embed_dim, + act_layer=act_layer, + norm_layer= FusedLayerNorm if text_cfg.fusedLN else norm_layer, + xattn=text_cfg.xattn, + attn_mask=text_cfg.attn_mask, + ) + return text + +class CLIP(nn.Module): + def __init__( + self, + embed_dim: int, + vision_cfg: CLIPVisionCfg, + text_cfg: CLIPTextCfg, + quick_gelu: bool = False, + cast_dtype: Optional[torch.dtype] = None, + ): + super().__init__() + self.visual = _build_vision_tower(embed_dim, vision_cfg, quick_gelu, cast_dtype) + + text = _build_text_tower(embed_dim, text_cfg, quick_gelu, cast_dtype) + self.transformer = text.transformer + self.vocab_size = text.vocab_size + self.token_embedding = text.token_embedding + self.positional_embedding = text.positional_embedding + self.ln_final = text.ln_final + self.text_projection = text.text_projection + self.register_buffer('attn_mask', text.attn_mask, persistent=False) + + self.logit_scale = nn.Parameter(torch.ones([]) * np.log(1 / 0.07)) + + def lock_image_tower(self, unlocked_groups=0, freeze_bn_stats=False): + # lock image tower as per LiT - https://arxiv.org/abs/2111.07991 + self.visual.lock(unlocked_groups=unlocked_groups, freeze_bn_stats=freeze_bn_stats) + + @torch.jit.ignore + def set_grad_checkpointing(self, enable=True): + self.visual.set_grad_checkpointing(enable) + self.transformer.grad_checkpointing = enable + + @torch.jit.ignore + def no_weight_decay(self): + return {'logit_scale'} + + def encode_image(self, image, normalize: bool = False): + features = self.visual(image) + return F.normalize(features, dim=-1) if normalize else features + + def encode_text(self, text, normalize: bool = False): + cast_dtype = self.transformer.get_cast_dtype() + + x = self.token_embedding(text).to(cast_dtype) # [batch_size, n_ctx, d_model] + + x = x + self.positional_embedding.to(cast_dtype) + x = x.permute(1, 0, 2) # NLD -> LND + x = self.transformer(x, attn_mask=self.attn_mask) + x = x.permute(1, 0, 2) # LND -> NLD + x = self.ln_final(x) # [batch_size, n_ctx, transformer.width] + # take features from the eot embedding (eot_token is the highest number in each sequence) + x = x[torch.arange(x.shape[0]), text.argmax(dim=-1)] @ self.text_projection + return F.normalize(x, dim=-1) if normalize else x + + def forward(self, image, text): + image_features = self.encode_image(image, normalize=True) + text_features = self.encode_text(text, normalize=True) + return image_features, text_features, self.logit_scale.exp() + + +class CustomCLIP(nn.Module): + def __init__( + self, + embed_dim: int, + vision_cfg: CLIPVisionCfg, + text_cfg: CLIPTextCfg, + quick_gelu: bool = False, + cast_dtype: Optional[torch.dtype] = None, + itm_task: bool = False, + ): + super().__init__() + self.visual = _build_vision_tower(embed_dim, vision_cfg, quick_gelu, cast_dtype) + self.text = _build_text_tower(embed_dim, text_cfg, quick_gelu, cast_dtype) + self.logit_scale = nn.Parameter(torch.ones([]) * np.log(1 / 0.07)) + + def lock_image_tower(self, unlocked_groups=0, freeze_bn_stats=False): + # lock image tower as per LiT - https://arxiv.org/abs/2111.07991 + self.visual.lock(unlocked_groups=unlocked_groups, freeze_bn_stats=freeze_bn_stats) + + def lock_text_tower(self, unlocked_layers:int=0, freeze_layer_norm:bool=True): + self.text.lock(unlocked_layers, freeze_layer_norm) + + @torch.jit.ignore + def set_grad_checkpointing(self, enable=True): + self.visual.set_grad_checkpointing(enable) + self.text.set_grad_checkpointing(enable) + + @torch.jit.ignore + def no_weight_decay(self): + return {'logit_scale'} + + def encode_image(self, image, normalize: bool = False): + features = self.visual(image) + return F.normalize(features, dim=-1) if normalize else features + + def encode_text(self, text, normalize: bool = False): + features = self.text(text) + return F.normalize(features, dim=-1) if normalize else features + + def forward(self, image, text): + image_features = self.encode_image(image, normalize=True) + text_features = self.encode_text(text, normalize=True) + return image_features, text_features, self.logit_scale.exp() + + +def convert_weights_to_lp(model: nn.Module, dtype=torch.float16): + """Convert applicable model parameters to low-precision (bf16 or fp16)""" + + def _convert_weights(l): + + if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Linear)): + l.weight.data = l.weight.data.to(dtype) + if l.bias is not None: + l.bias.data = l.bias.data.to(dtype) + + if isinstance(l, (nn.MultiheadAttention, Attention)): + for attr in [*[f"{s}_proj_weight" for s in ["in", "q", "k", "v"]], "in_proj_bias", "bias_k", "bias_v"]: + tensor = getattr(l, attr, None) + if tensor is not None: + tensor.data = tensor.data.to(dtype) + + if isinstance(l, nn.Parameter): + l.data = l.data.to(dtype) + + for name in ["text_projection", "proj"]: + if hasattr(l, name) and isinstance(l, nn.Parameter): + attr = getattr(l, name, None) + if attr is not None: + attr.data = attr.data.to(dtype) + + model.apply(_convert_weights) + + +convert_weights_to_fp16 = convert_weights_to_lp # backwards compat + + +# used to maintain checkpoint compatibility +def convert_to_custom_text_state_dict(state_dict: dict): + if 'text_projection' in state_dict: + # old format state_dict, move text tower -> .text + new_state_dict = {} + for k, v in state_dict.items(): + if any(k.startswith(p) for p in ( + 'text_projection', + 'positional_embedding', + 'token_embedding', + 'transformer', + 'ln_final', + 'logit_scale' + )): + k = 'text.' + k + new_state_dict[k] = v + return new_state_dict + return state_dict + + +def build_model_from_openai_state_dict( + state_dict: dict, + quick_gelu=True, + cast_dtype=torch.float16, +): + vit = "visual.proj" in state_dict + + if vit: + vision_width = state_dict["visual.conv1.weight"].shape[0] + vision_layers = len( + [k for k in state_dict.keys() if k.startswith("visual.") and k.endswith(".attn.in_proj_weight")]) + vision_patch_size = state_dict["visual.conv1.weight"].shape[-1] + grid_size = round((state_dict["visual.positional_embedding"].shape[0] - 1) ** 0.5) + image_size = vision_patch_size * grid_size + else: + counts: list = [ + len(set(k.split(".")[2] for k in state_dict if k.startswith(f"visual.layer{b}"))) for b in [1, 2, 3, 4]] + vision_layers = tuple(counts) + vision_width = state_dict["visual.layer1.0.conv1.weight"].shape[0] + output_width = round((state_dict["visual.attnpool.positional_embedding"].shape[0] - 1) ** 0.5) + vision_patch_size = None + assert output_width ** 2 + 1 == state_dict["visual.attnpool.positional_embedding"].shape[0] + image_size = output_width * 32 + + embed_dim = state_dict["text_projection"].shape[1] + context_length = state_dict["positional_embedding"].shape[0] + vocab_size = state_dict["token_embedding.weight"].shape[0] + transformer_width = state_dict["ln_final.weight"].shape[0] + transformer_heads = transformer_width // 64 + transformer_layers = len(set(k.split(".")[2] for k in state_dict if k.startswith(f"transformer.resblocks"))) + + vision_cfg = CLIPVisionCfg( + layers=vision_layers, + width=vision_width, + patch_size=vision_patch_size, + image_size=image_size, + ) + text_cfg = CLIPTextCfg( + context_length=context_length, + vocab_size=vocab_size, + width=transformer_width, + heads=transformer_heads, + layers=transformer_layers + ) + model = CLIP( + embed_dim, + vision_cfg=vision_cfg, + text_cfg=text_cfg, + quick_gelu=quick_gelu, # OpenAI models were trained with QuickGELU + cast_dtype=cast_dtype, + ) + + for key in ["input_resolution", "context_length", "vocab_size"]: + state_dict.pop(key, None) + + convert_weights_to_fp16(model) # OpenAI state dicts are partially converted to float16 + model.load_state_dict(state_dict) + return model.eval() + + +def trace_model(model, batch_size=256, device=torch.device('cpu')): + model.eval() + image_size = model.visual.image_size + example_images = torch.ones((batch_size, 3, image_size, image_size), device=device) + example_text = torch.zeros((batch_size, model.context_length), dtype=torch.int, device=device) + model = torch.jit.trace_module( + model, + inputs=dict( + forward=(example_images, example_text), + encode_text=(example_text,), + encode_image=(example_images,) + )) + model.visual.image_size = image_size + return model diff --git a/eva_clip/model_configs/EVA01-CLIP-B-16.json b/eva_clip/model_configs/EVA01-CLIP-B-16.json new file mode 100644 index 0000000000000000000000000000000000000000..aad2058003962a4ab286bf4e1ae956288af34e62 --- /dev/null +++ b/eva_clip/model_configs/EVA01-CLIP-B-16.json @@ -0,0 +1,19 @@ +{ + "embed_dim": 512, + "vision_cfg": { + "image_size": 224, + "layers": 12, + "width": 768, + "patch_size": 16, + "eva_model_name": "eva-clip-b-16", + "ls_init_value": 0.1, + "drop_path_rate": 0.0 + }, + "text_cfg": { + "context_length": 77, + "vocab_size": 49408, + "width": 512, + "heads": 8, + "layers": 12 + } +} \ No newline at end of file diff --git a/eva_clip/model_configs/EVA01-CLIP-g-14-plus.json b/eva_clip/model_configs/EVA01-CLIP-g-14-plus.json new file mode 100644 index 0000000000000000000000000000000000000000..100279572ff6d1bcca601f0eb526b4d4ff174c7d --- /dev/null +++ b/eva_clip/model_configs/EVA01-CLIP-g-14-plus.json @@ -0,0 +1,24 @@ +{ + "embed_dim": 1024, + "vision_cfg": { + "image_size": 224, + "layers": 40, + "width": 1408, + "head_width": 88, + "mlp_ratio": 4.3637, + "patch_size": 14, + "eva_model_name": "eva-clip-g-14-x", + "drop_path_rate": 0, + "xattn": true, + "fusedLN": true + }, + "text_cfg": { + "context_length": 77, + "vocab_size": 49408, + "width": 1024, + "heads": 16, + "layers": 24, + "xattn": false, + "fusedLN": true + } +} \ No newline at end of file diff --git a/eva_clip/model_configs/EVA01-CLIP-g-14.json b/eva_clip/model_configs/EVA01-CLIP-g-14.json new file mode 100644 index 0000000000000000000000000000000000000000..5d338b4e6104241d1f0304ee82400035d5385332 --- /dev/null +++ b/eva_clip/model_configs/EVA01-CLIP-g-14.json @@ -0,0 +1,24 @@ +{ + "embed_dim": 1024, + "vision_cfg": { + "image_size": 224, + "layers": 40, + "width": 1408, + "head_width": 88, + "mlp_ratio": 4.3637, + "patch_size": 14, + "eva_model_name": "eva-clip-g-14-x", + "drop_path_rate": 0.4, + "xattn": true, + "fusedLN": true + }, + "text_cfg": { + "context_length": 77, + "vocab_size": 49408, + "width": 768, + "heads": 12, + "layers": 12, + "xattn": false, + "fusedLN": true + } +} \ No newline at end of file diff --git a/eva_clip/model_configs/EVA02-CLIP-B-16.json b/eva_clip/model_configs/EVA02-CLIP-B-16.json new file mode 100644 index 0000000000000000000000000000000000000000..e4a6e723f77033caa341ddf9b5be1787d64ad42c --- /dev/null +++ b/eva_clip/model_configs/EVA02-CLIP-B-16.json @@ -0,0 +1,29 @@ +{ + "embed_dim": 512, + "vision_cfg": { + "image_size": 224, + "layers": 12, + "width": 768, + "head_width": 64, + "patch_size": 16, + "mlp_ratio": 2.6667, + "eva_model_name": "eva-clip-b-16-X", + "drop_path_rate": 0.0, + "xattn": true, + "fusedLN": true, + "rope": true, + "pt_hw_seq_len": 16, + "intp_freq": true, + "naiveswiglu": true, + "subln": true + }, + "text_cfg": { + "context_length": 77, + "vocab_size": 49408, + "width": 512, + "heads": 8, + "layers": 12, + "xattn": true, + "fusedLN": true + } +} \ No newline at end of file diff --git a/eva_clip/model_configs/EVA02-CLIP-L-14-336.json b/eva_clip/model_configs/EVA02-CLIP-L-14-336.json new file mode 100644 index 0000000000000000000000000000000000000000..3e1d124e1118911c5ad7b1ce85df195aca363ac4 --- /dev/null +++ b/eva_clip/model_configs/EVA02-CLIP-L-14-336.json @@ -0,0 +1,29 @@ +{ + "embed_dim": 768, + "vision_cfg": { + "image_size": 336, + "layers": 24, + "width": 1024, + "drop_path_rate": 0, + "head_width": 64, + "mlp_ratio": 2.6667, + "patch_size": 14, + "eva_model_name": "eva-clip-l-14-336", + "xattn": true, + "fusedLN": true, + "rope": true, + "pt_hw_seq_len": 16, + "intp_freq": true, + "naiveswiglu": true, + "subln": true + }, + "text_cfg": { + "context_length": 77, + "vocab_size": 49408, + "width": 768, + "heads": 12, + "layers": 12, + "xattn": false, + "fusedLN": true + } +} \ No newline at end of file diff --git a/eva_clip/model_configs/EVA02-CLIP-L-14.json b/eva_clip/model_configs/EVA02-CLIP-L-14.json new file mode 100644 index 0000000000000000000000000000000000000000..03b22ad3cfb92f9c843b9ec8d672e57e7a9ba4a2 --- /dev/null +++ b/eva_clip/model_configs/EVA02-CLIP-L-14.json @@ -0,0 +1,29 @@ +{ + "embed_dim": 768, + "vision_cfg": { + "image_size": 224, + "layers": 24, + "width": 1024, + "drop_path_rate": 0, + "head_width": 64, + "mlp_ratio": 2.6667, + "patch_size": 14, + "eva_model_name": "eva-clip-l-14", + "xattn": true, + "fusedLN": true, + "rope": true, + "pt_hw_seq_len": 16, + "intp_freq": true, + "naiveswiglu": true, + "subln": true + }, + "text_cfg": { + "context_length": 77, + "vocab_size": 49408, + "width": 768, + "heads": 12, + "layers": 12, + "xattn": false, + "fusedLN": true + } +} \ No newline at end of file diff --git a/eva_clip/model_configs/EVA02-CLIP-bigE-14-plus.json b/eva_clip/model_configs/EVA02-CLIP-bigE-14-plus.json new file mode 100644 index 0000000000000000000000000000000000000000..aa04e2545ac1e015daae2c10133956ce969524f7 --- /dev/null +++ b/eva_clip/model_configs/EVA02-CLIP-bigE-14-plus.json @@ -0,0 +1,25 @@ +{ + "embed_dim": 1024, + "vision_cfg": { + "image_size": 224, + "layers": 64, + "width": 1792, + "head_width": 112, + "mlp_ratio": 8.571428571428571, + "patch_size": 14, + "eva_model_name": "eva-clip-4b-14-x", + "drop_path_rate": 0, + "xattn": true, + "postnorm": true, + "fusedLN": true + }, + "text_cfg": { + "context_length": 77, + "vocab_size": 49408, + "width": 1280, + "heads": 20, + "layers": 32, + "xattn": false, + "fusedLN": true + } +} diff --git a/eva_clip/model_configs/EVA02-CLIP-bigE-14.json b/eva_clip/model_configs/EVA02-CLIP-bigE-14.json new file mode 100644 index 0000000000000000000000000000000000000000..747ffccc8bd49dbb6701b58e15843b7fe3754e64 --- /dev/null +++ b/eva_clip/model_configs/EVA02-CLIP-bigE-14.json @@ -0,0 +1,25 @@ +{ + "embed_dim": 1024, + "vision_cfg": { + "image_size": 224, + "layers": 64, + "width": 1792, + "head_width": 112, + "mlp_ratio": 8.571428571428571, + "patch_size": 14, + "eva_model_name": "eva-clip-4b-14-x", + "drop_path_rate": 0, + "xattn": true, + "postnorm": true, + "fusedLN": true + }, + "text_cfg": { + "context_length": 77, + "vocab_size": 49408, + "width": 1024, + "heads": 16, + "layers": 24, + "xattn": false, + "fusedLN": true + } +} \ No newline at end of file diff --git a/eva_clip/modified_resnet.py b/eva_clip/modified_resnet.py new file mode 100644 index 0000000000000000000000000000000000000000..151bfdd0b052d3db1b160d8b2299c33a6e944a4b --- /dev/null +++ b/eva_clip/modified_resnet.py @@ -0,0 +1,181 @@ +from collections import OrderedDict + +import torch +from torch import nn +from torch.nn import functional as F + +from eva_clip.utils import freeze_batch_norm_2d + + +class Bottleneck(nn.Module): + expansion = 4 + + def __init__(self, inplanes, planes, stride=1): + super().__init__() + + # all conv layers have stride 1. an avgpool is performed after the second convolution when stride > 1 + self.conv1 = nn.Conv2d(inplanes, planes, 1, bias=False) + self.bn1 = nn.BatchNorm2d(planes) + self.act1 = nn.ReLU(inplace=True) + + self.conv2 = nn.Conv2d(planes, planes, 3, padding=1, bias=False) + self.bn2 = nn.BatchNorm2d(planes) + self.act2 = nn.ReLU(inplace=True) + + self.avgpool = nn.AvgPool2d(stride) if stride > 1 else nn.Identity() + + self.conv3 = nn.Conv2d(planes, planes * self.expansion, 1, bias=False) + self.bn3 = nn.BatchNorm2d(planes * self.expansion) + self.act3 = nn.ReLU(inplace=True) + + self.downsample = None + self.stride = stride + + if stride > 1 or inplanes != planes * Bottleneck.expansion: + # downsampling layer is prepended with an avgpool, and the subsequent convolution has stride 1 + self.downsample = nn.Sequential(OrderedDict([ + ("-1", nn.AvgPool2d(stride)), + ("0", nn.Conv2d(inplanes, planes * self.expansion, 1, stride=1, bias=False)), + ("1", nn.BatchNorm2d(planes * self.expansion)) + ])) + + def forward(self, x: torch.Tensor): + identity = x + + out = self.act1(self.bn1(self.conv1(x))) + out = self.act2(self.bn2(self.conv2(out))) + out = self.avgpool(out) + out = self.bn3(self.conv3(out)) + + if self.downsample is not None: + identity = self.downsample(x) + + out += identity + out = self.act3(out) + return out + + +class AttentionPool2d(nn.Module): + def __init__(self, spacial_dim: int, embed_dim: int, num_heads: int, output_dim: int = None): + super().__init__() + self.positional_embedding = nn.Parameter(torch.randn(spacial_dim ** 2 + 1, embed_dim) / embed_dim ** 0.5) + self.k_proj = nn.Linear(embed_dim, embed_dim) + self.q_proj = nn.Linear(embed_dim, embed_dim) + self.v_proj = nn.Linear(embed_dim, embed_dim) + self.c_proj = nn.Linear(embed_dim, output_dim or embed_dim) + self.num_heads = num_heads + + def forward(self, x): + x = x.reshape(x.shape[0], x.shape[1], x.shape[2] * x.shape[3]).permute(2, 0, 1) # NCHW -> (HW)NC + x = torch.cat([x.mean(dim=0, keepdim=True), x], dim=0) # (HW+1)NC + x = x + self.positional_embedding[:, None, :].to(x.dtype) # (HW+1)NC + x, _ = F.multi_head_attention_forward( + query=x, key=x, value=x, + embed_dim_to_check=x.shape[-1], + num_heads=self.num_heads, + q_proj_weight=self.q_proj.weight, + k_proj_weight=self.k_proj.weight, + v_proj_weight=self.v_proj.weight, + in_proj_weight=None, + in_proj_bias=torch.cat([self.q_proj.bias, self.k_proj.bias, self.v_proj.bias]), + bias_k=None, + bias_v=None, + add_zero_attn=False, + dropout_p=0., + out_proj_weight=self.c_proj.weight, + out_proj_bias=self.c_proj.bias, + use_separate_proj_weight=True, + training=self.training, + need_weights=False + ) + + return x[0] + + +class ModifiedResNet(nn.Module): + """ + A ResNet class that is similar to torchvision's but contains the following changes: + - There are now 3 "stem" convolutions as opposed to 1, with an average pool instead of a max pool. + - Performs anti-aliasing strided convolutions, where an avgpool is prepended to convolutions with stride > 1 + - The final pooling layer is a QKV attention instead of an average pool + """ + + def __init__(self, layers, output_dim, heads, image_size=224, width=64): + super().__init__() + self.output_dim = output_dim + self.image_size = image_size + + # the 3-layer stem + self.conv1 = nn.Conv2d(3, width // 2, kernel_size=3, stride=2, padding=1, bias=False) + self.bn1 = nn.BatchNorm2d(width // 2) + self.act1 = nn.ReLU(inplace=True) + self.conv2 = nn.Conv2d(width // 2, width // 2, kernel_size=3, padding=1, bias=False) + self.bn2 = nn.BatchNorm2d(width // 2) + self.act2 = nn.ReLU(inplace=True) + self.conv3 = nn.Conv2d(width // 2, width, kernel_size=3, padding=1, bias=False) + self.bn3 = nn.BatchNorm2d(width) + self.act3 = nn.ReLU(inplace=True) + self.avgpool = nn.AvgPool2d(2) + + # residual layers + self._inplanes = width # this is a *mutable* variable used during construction + self.layer1 = self._make_layer(width, layers[0]) + self.layer2 = self._make_layer(width * 2, layers[1], stride=2) + self.layer3 = self._make_layer(width * 4, layers[2], stride=2) + self.layer4 = self._make_layer(width * 8, layers[3], stride=2) + + embed_dim = width * 32 # the ResNet feature dimension + self.attnpool = AttentionPool2d(image_size // 32, embed_dim, heads, output_dim) + + self.init_parameters() + + def _make_layer(self, planes, blocks, stride=1): + layers = [Bottleneck(self._inplanes, planes, stride)] + + self._inplanes = planes * Bottleneck.expansion + for _ in range(1, blocks): + layers.append(Bottleneck(self._inplanes, planes)) + + return nn.Sequential(*layers) + + def init_parameters(self): + if self.attnpool is not None: + std = self.attnpool.c_proj.in_features ** -0.5 + nn.init.normal_(self.attnpool.q_proj.weight, std=std) + nn.init.normal_(self.attnpool.k_proj.weight, std=std) + nn.init.normal_(self.attnpool.v_proj.weight, std=std) + nn.init.normal_(self.attnpool.c_proj.weight, std=std) + + for resnet_block in [self.layer1, self.layer2, self.layer3, self.layer4]: + for name, param in resnet_block.named_parameters(): + if name.endswith("bn3.weight"): + nn.init.zeros_(param) + + def lock(self, unlocked_groups=0, freeze_bn_stats=False): + assert unlocked_groups == 0, 'partial locking not currently supported for this model' + for param in self.parameters(): + param.requires_grad = False + if freeze_bn_stats: + freeze_batch_norm_2d(self) + + @torch.jit.ignore + def set_grad_checkpointing(self, enable=True): + # FIXME support for non-transformer + pass + + def stem(self, x): + x = self.act1(self.bn1(self.conv1(x))) + x = self.act2(self.bn2(self.conv2(x))) + x = self.act3(self.bn3(self.conv3(x))) + x = self.avgpool(x) + return x + + def forward(self, x): + x = self.stem(x) + x = self.layer1(x) + x = self.layer2(x) + x = self.layer3(x) + x = self.layer4(x) + x = self.attnpool(x) + + return x diff --git a/eva_clip/openai.py b/eva_clip/openai.py new file mode 100644 index 0000000000000000000000000000000000000000..cc4e13e876d6a7a3463b457e62c517cb063b1356 --- /dev/null +++ b/eva_clip/openai.py @@ -0,0 +1,144 @@ +""" OpenAI pretrained model functions + +Adapted from https://github.com/openai/CLIP. Originally MIT License, Copyright (c) 2021 OpenAI. +""" + +import os +import warnings +from typing import List, Optional, Union + +import torch + +from .model import build_model_from_openai_state_dict, convert_weights_to_lp, get_cast_dtype +from .pretrained import get_pretrained_url, list_pretrained_models_by_tag, download_pretrained_from_url + +__all__ = ["list_openai_models", "load_openai_model"] + + +def list_openai_models() -> List[str]: + """Returns the names of available CLIP models""" + return list_pretrained_models_by_tag('openai') + + +def load_openai_model( + name: str, + precision: Optional[str] = None, + device: Optional[Union[str, torch.device]] = None, + jit: bool = True, + cache_dir: Optional[str] = None, +): + """Load a CLIP model + + Parameters + ---------- + name : str + A model name listed by `clip.available_models()`, or the path to a model checkpoint containing the state_dict + precision: str + Model precision, if None defaults to 'fp32' if device == 'cpu' else 'fp16'. + device : Union[str, torch.device] + The device to put the loaded model + jit : bool + Whether to load the optimized JIT model (default) or more hackable non-JIT model. + cache_dir : Optional[str] + The directory to cache the downloaded model weights + + Returns + ------- + model : torch.nn.Module + The CLIP model + preprocess : Callable[[PIL.Image], torch.Tensor] + A torchvision transform that converts a PIL image into a tensor that the returned model can take as its input + """ + if device is None: + device = "cuda" if torch.cuda.is_available() else "cpu" + if precision is None: + precision = 'fp32' if device == 'cpu' else 'fp16' + + if get_pretrained_url(name, 'openai'): + model_path = download_pretrained_from_url(get_pretrained_url(name, 'openai'), cache_dir=cache_dir) + elif os.path.isfile(name): + model_path = name + else: + raise RuntimeError(f"Model {name} not found; available models = {list_openai_models()}") + + try: + # loading JIT archive + model = torch.jit.load(model_path, map_location=device if jit else "cpu").eval() + state_dict = None + except RuntimeError: + # loading saved state dict + if jit: + warnings.warn(f"File {model_path} is not a JIT archive. Loading as a state dict instead") + jit = False + state_dict = torch.load(model_path, map_location="cpu") + + if not jit: + # Build a non-jit model from the OpenAI jitted model state dict + cast_dtype = get_cast_dtype(precision) + try: + model = build_model_from_openai_state_dict(state_dict or model.state_dict(), cast_dtype=cast_dtype) + except KeyError: + sd = {k[7:]: v for k, v in state_dict["state_dict"].items()} + model = build_model_from_openai_state_dict(sd, cast_dtype=cast_dtype) + + # model from OpenAI state dict is in manually cast fp16 mode, must be converted for AMP/fp32/bf16 use + model = model.to(device) + if precision.startswith('amp') or precision == 'fp32': + model.float() + elif precision == 'bf16': + convert_weights_to_lp(model, dtype=torch.bfloat16) + + return model + + # patch the device names + device_holder = torch.jit.trace(lambda: torch.ones([]).to(torch.device(device)), example_inputs=[]) + device_node = [n for n in device_holder.graph.findAllNodes("prim::Constant") if "Device" in repr(n)][-1] + + def patch_device(module): + try: + graphs = [module.graph] if hasattr(module, "graph") else [] + except RuntimeError: + graphs = [] + + if hasattr(module, "forward1"): + graphs.append(module.forward1.graph) + + for graph in graphs: + for node in graph.findAllNodes("prim::Constant"): + if "value" in node.attributeNames() and str(node["value"]).startswith("cuda"): + node.copyAttributes(device_node) + + model.apply(patch_device) + patch_device(model.encode_image) + patch_device(model.encode_text) + + # patch dtype to float32 (typically for CPU) + if precision == 'fp32': + float_holder = torch.jit.trace(lambda: torch.ones([]).float(), example_inputs=[]) + float_input = list(float_holder.graph.findNode("aten::to").inputs())[1] + float_node = float_input.node() + + def patch_float(module): + try: + graphs = [module.graph] if hasattr(module, "graph") else [] + except RuntimeError: + graphs = [] + + if hasattr(module, "forward1"): + graphs.append(module.forward1.graph) + + for graph in graphs: + for node in graph.findAllNodes("aten::to"): + inputs = list(node.inputs()) + for i in [1, 2]: # dtype can be the second or third argument to aten::to() + if inputs[i].node()["value"] == 5: + inputs[i].node().copyAttributes(float_node) + + model.apply(patch_float) + patch_float(model.encode_image) + patch_float(model.encode_text) + model.float() + + # ensure image_size attr available at consistent location for both jit and non-jit + model.visual.image_size = model.input_resolution.item() + return model diff --git a/eva_clip/pretrained.py b/eva_clip/pretrained.py new file mode 100644 index 0000000000000000000000000000000000000000..a1e55dcf36a0e7dbd4c13b4ca2d7cb460e4c3547 --- /dev/null +++ b/eva_clip/pretrained.py @@ -0,0 +1,332 @@ +import hashlib +import os +import urllib +import warnings +from functools import partial +from typing import Dict, Union + +from tqdm import tqdm + +try: + from huggingface_hub import hf_hub_download + _has_hf_hub = True +except ImportError: + hf_hub_download = None + _has_hf_hub = False + + +def _pcfg(url='', hf_hub='', filename='', mean=None, std=None): + return dict( + url=url, + hf_hub=hf_hub, + mean=mean, + std=std, + ) + +_VITB32 = dict( + openai=_pcfg( + "https://openaipublic.azureedge.net/clip/models/40d365715913c9da98579312b702a82c18be219cc2a73407c4526f58eba950af/ViT-B-32.pt"), + laion400m_e31=_pcfg( + "https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_b_32-quickgelu-laion400m_e31-d867053b.pt"), + laion400m_e32=_pcfg( + "https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_b_32-quickgelu-laion400m_e32-46683a32.pt"), + laion2b_e16=_pcfg( + "https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_b_32-laion2b_e16-af8dbd0c.pth"), + laion2b_s34b_b79k=_pcfg(hf_hub='laion/CLIP-ViT-B-32-laion2B-s34B-b79K/') +) + +_VITB32_quickgelu = dict( + openai=_pcfg( + "https://openaipublic.azureedge.net/clip/models/40d365715913c9da98579312b702a82c18be219cc2a73407c4526f58eba950af/ViT-B-32.pt"), + laion400m_e31=_pcfg( + "https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_b_32-quickgelu-laion400m_e31-d867053b.pt"), + laion400m_e32=_pcfg( + "https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_b_32-quickgelu-laion400m_e32-46683a32.pt"), +) + +_VITB16 = dict( + openai=_pcfg( + "https://openaipublic.azureedge.net/clip/models/5806e77cd80f8b59890b7e101eabd078d9fb84e6937f9e85e4ecb61988df416f/ViT-B-16.pt"), + laion400m_e31=_pcfg( + "https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_b_16-laion400m_e31-00efa78f.pt"), + laion400m_e32=_pcfg( + "https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_b_16-laion400m_e32-55e67d44.pt"), + laion2b_s34b_b88k=_pcfg(hf_hub='laion/CLIP-ViT-B-16-laion2B-s34B-b88K/'), +) + +_EVAB16 = dict( + eva=_pcfg(hf_hub='QuanSun/EVA-CLIP/EVA02_B_psz14to16.pt'), + eva02=_pcfg(hf_hub='QuanSun/EVA-CLIP/EVA02_B_psz14to16.pt'), + eva_clip=_pcfg(hf_hub='QuanSun/EVA-CLIP/EVA02_CLIP_B_psz16_s8B.pt'), + eva02_clip=_pcfg(hf_hub='QuanSun/EVA-CLIP/EVA02_CLIP_B_psz16_s8B.pt'), +) + +_VITB16_PLUS_240 = dict( + laion400m_e31=_pcfg( + "https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_b_16_plus_240-laion400m_e31-8fb26589.pt"), + laion400m_e32=_pcfg( + "https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_b_16_plus_240-laion400m_e32-699c4b84.pt"), +) + +_VITL14 = dict( + openai=_pcfg( + "https://openaipublic.azureedge.net/clip/models/b8cca3fd41ae0c99ba7e8951adf17d267cdb84cd88be6f7c2e0eca1737a03836/ViT-L-14.pt"), + laion400m_e31=_pcfg( + "https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_l_14-laion400m_e31-69988bb6.pt"), + laion400m_e32=_pcfg( + "https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_l_14-laion400m_e32-3d133497.pt"), + laion2b_s32b_b82k=_pcfg( + hf_hub='laion/CLIP-ViT-L-14-laion2B-s32B-b82K/', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)), +) + +_EVAL14 = dict( + eva=_pcfg(hf_hub='QuanSun/EVA-CLIP/EVA02_L_psz14.pt'), + eva02=_pcfg(hf_hub='QuanSun/EVA-CLIP/EVA02_L_psz14.pt'), + eva_clip=_pcfg(hf_hub='QuanSun/EVA-CLIP/EVA02_CLIP_L_psz14_s4B.pt'), + eva02_clip=_pcfg(hf_hub='QuanSun/EVA-CLIP/EVA02_CLIP_L_psz14_s4B.pt'), +) + +_VITL14_336 = dict( + openai=_pcfg( + "https://openaipublic.azureedge.net/clip/models/3035c92b350959924f9f00213499208652fc7ea050643e8b385c2dac08641f02/ViT-L-14-336px.pt"), +) + +_EVAL14_336 = dict( + eva_clip=_pcfg(hf_hub='QuanSun/EVA-CLIP/EVA02_CLIP_L_336_psz14_s6B.pt'), + eva02_clip=_pcfg(hf_hub='QuanSun/EVA-CLIP/EVA02_CLIP_L_336_psz14_s6B.pt'), + eva_clip_224to336=_pcfg(hf_hub='QuanSun/EVA-CLIP/EVA02_CLIP_L_psz14_224to336.pt'), + eva02_clip_224to336=_pcfg(hf_hub='QuanSun/EVA-CLIP/EVA02_CLIP_L_psz14_224to336.pt'), +) + +_VITH14 = dict( + laion2b_s32b_b79k=_pcfg(hf_hub='laion/CLIP-ViT-H-14-laion2B-s32B-b79K/'), +) + +_VITg14 = dict( + laion2b_s12b_b42k=_pcfg(hf_hub='laion/CLIP-ViT-g-14-laion2B-s12B-b42K/'), + laion2b_s34b_b88k=_pcfg(hf_hub='laion/CLIP-ViT-g-14-laion2B-s34B-b88K/'), +) + +_EVAg14 = dict( + eva=_pcfg(hf_hub='QuanSun/EVA-CLIP/'), + eva01=_pcfg(hf_hub='QuanSun/EVA-CLIP/EVA01_g_psz14.pt'), + eva_clip=_pcfg(hf_hub='QuanSun/EVA-CLIP/EVA01_CLIP_g_14_psz14_s11B.pt'), + eva01_clip=_pcfg(hf_hub='QuanSun/EVA-CLIP/EVA01_CLIP_g_14_psz14_s11B.pt'), +) + +_EVAg14_PLUS = dict( + eva=_pcfg(hf_hub='QuanSun/EVA-CLIP/'), + eva01=_pcfg(hf_hub='QuanSun/EVA-CLIP/EVA01_g_psz14.pt'), + eva_clip=_pcfg(hf_hub='QuanSun/EVA-CLIP/EVA01_CLIP_g_14_plus_psz14_s11B.pt'), + eva01_clip=_pcfg(hf_hub='QuanSun/EVA-CLIP/EVA01_CLIP_g_14_plus_psz14_s11B.pt'), +) + +_VITbigG14 = dict( + laion2b_s39b_b160k=_pcfg(hf_hub='laion/CLIP-ViT-bigG-14-laion2B-39B-b160k/'), +) + +_EVAbigE14 = dict( + eva=_pcfg(hf_hub='QuanSun/EVA-CLIP/EVA02_E_psz14.pt'), + eva02=_pcfg(hf_hub='QuanSun/EVA-CLIP/EVA02_E_psz14.pt'), + eva_clip=_pcfg(hf_hub='QuanSun/EVA-CLIP/EVA02_CLIP_E_psz14_s4B.pt'), + eva02_clip=_pcfg(hf_hub='QuanSun/EVA-CLIP/EVA02_CLIP_E_psz14_s4B.pt'), +) + +_EVAbigE14_PLUS = dict( + eva=_pcfg(hf_hub='QuanSun/EVA-CLIP/EVA02_E_psz14.pt'), + eva02=_pcfg(hf_hub='QuanSun/EVA-CLIP/EVA02_E_psz14.pt'), + eva_clip=_pcfg(hf_hub='QuanSun/EVA-CLIP/EVA02_CLIP_E_psz14_plus_s9B.pt'), + eva02_clip=_pcfg(hf_hub='QuanSun/EVA-CLIP/EVA02_CLIP_E_psz14_plus_s9B.pt'), +) + + +_PRETRAINED = { + # "ViT-B-32": _VITB32, + "OpenaiCLIP-B-32": _VITB32, + "OpenCLIP-B-32": _VITB32, + + # "ViT-B-32-quickgelu": _VITB32_quickgelu, + "OpenaiCLIP-B-32-quickgelu": _VITB32_quickgelu, + "OpenCLIP-B-32-quickgelu": _VITB32_quickgelu, + + # "ViT-B-16": _VITB16, + "OpenaiCLIP-B-16": _VITB16, + "OpenCLIP-B-16": _VITB16, + + "EVA02-B-16": _EVAB16, + "EVA02-CLIP-B-16": _EVAB16, + + # "ViT-B-16-plus-240": _VITB16_PLUS_240, + "OpenCLIP-B-16-plus-240": _VITB16_PLUS_240, + + # "ViT-L-14": _VITL14, + "OpenaiCLIP-L-14": _VITL14, + "OpenCLIP-L-14": _VITL14, + + "EVA02-L-14": _EVAL14, + "EVA02-CLIP-L-14": _EVAL14, + + # "ViT-L-14-336": _VITL14_336, + "OpenaiCLIP-L-14-336": _VITL14_336, + + "EVA02-CLIP-L-14-336": _EVAL14_336, + + # "ViT-H-14": _VITH14, + # "ViT-g-14": _VITg14, + "OpenCLIP-H-14": _VITH14, + "OpenCLIP-g-14": _VITg14, + + "EVA01-CLIP-g-14": _EVAg14, + "EVA01-CLIP-g-14-plus": _EVAg14_PLUS, + + # "ViT-bigG-14": _VITbigG14, + "OpenCLIP-bigG-14": _VITbigG14, + + "EVA02-CLIP-bigE-14": _EVAbigE14, + "EVA02-CLIP-bigE-14-plus": _EVAbigE14_PLUS, +} + + +def _clean_tag(tag: str): + # normalize pretrained tags + return tag.lower().replace('-', '_') + + +def list_pretrained(as_str: bool = False): + """ returns list of pretrained models + Returns a tuple (model_name, pretrain_tag) by default or 'name:tag' if as_str == True + """ + return [':'.join([k, t]) if as_str else (k, t) for k in _PRETRAINED.keys() for t in _PRETRAINED[k].keys()] + + +def list_pretrained_models_by_tag(tag: str): + """ return all models having the specified pretrain tag """ + models = [] + tag = _clean_tag(tag) + for k in _PRETRAINED.keys(): + if tag in _PRETRAINED[k]: + models.append(k) + return models + + +def list_pretrained_tags_by_model(model: str): + """ return all pretrain tags for the specified model architecture """ + tags = [] + if model in _PRETRAINED: + tags.extend(_PRETRAINED[model].keys()) + return tags + + +def is_pretrained_cfg(model: str, tag: str): + if model not in _PRETRAINED: + return False + return _clean_tag(tag) in _PRETRAINED[model] + + +def get_pretrained_cfg(model: str, tag: str): + if model not in _PRETRAINED: + return {} + model_pretrained = _PRETRAINED[model] + return model_pretrained.get(_clean_tag(tag), {}) + + +def get_pretrained_url(model: str, tag: str): + cfg = get_pretrained_cfg(model, _clean_tag(tag)) + return cfg.get('url', '') + + +def download_pretrained_from_url( + url: str, + cache_dir: Union[str, None] = None, +): + if not cache_dir: + cache_dir = os.path.expanduser("~/.cache/clip") + os.makedirs(cache_dir, exist_ok=True) + filename = os.path.basename(url) + + if 'openaipublic' in url: + expected_sha256 = url.split("/")[-2] + elif 'mlfoundations' in url: + expected_sha256 = os.path.splitext(filename)[0].split("-")[-1] + else: + expected_sha256 = '' + + download_target = os.path.join(cache_dir, filename) + + if os.path.exists(download_target) and not os.path.isfile(download_target): + raise RuntimeError(f"{download_target} exists and is not a regular file") + + if os.path.isfile(download_target): + if expected_sha256: + if hashlib.sha256(open(download_target, "rb").read()).hexdigest().startswith(expected_sha256): + return download_target + else: + warnings.warn(f"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file") + else: + return download_target + + with urllib.request.urlopen(url) as source, open(download_target, "wb") as output: + with tqdm(total=int(source.headers.get("Content-Length")), ncols=80, unit='iB', unit_scale=True) as loop: + while True: + buffer = source.read(8192) + if not buffer: + break + + output.write(buffer) + loop.update(len(buffer)) + + if expected_sha256 and not hashlib.sha256(open(download_target, "rb").read()).hexdigest().startswith(expected_sha256): + raise RuntimeError(f"Model has been downloaded but the SHA256 checksum does not not match") + + return download_target + + +def has_hf_hub(necessary=False): + if not _has_hf_hub and necessary: + # if no HF Hub module installed, and it is necessary to continue, raise error + raise RuntimeError( + 'Hugging Face hub model specified but package not installed. Run `pip install huggingface_hub`.') + return _has_hf_hub + + +def download_pretrained_from_hf( + model_id: str, + filename: str = 'open_clip_pytorch_model.bin', + revision=None, + cache_dir: Union[str, None] = None, +): + has_hf_hub(True) + cached_file = hf_hub_download(model_id, filename, revision=revision, cache_dir=cache_dir) + return cached_file + + +def download_pretrained( + cfg: Dict, + force_hf_hub: bool = False, + cache_dir: Union[str, None] = None, +): + target = '' + if not cfg: + return target + + download_url = cfg.get('url', '') + download_hf_hub = cfg.get('hf_hub', '') + if download_hf_hub and force_hf_hub: + # use HF hub even if url exists + download_url = '' + + if download_url: + target = download_pretrained_from_url(download_url, cache_dir=cache_dir) + elif download_hf_hub: + has_hf_hub(True) + # we assume the hf_hub entries in pretrained config combine model_id + filename in + # 'org/model_name/filename.pt' form. To specify just the model id w/o filename and + # use 'open_clip_pytorch_model.bin' default, there must be a trailing slash 'org/model_name/'. + model_id, filename = os.path.split(download_hf_hub) + if filename: + target = download_pretrained_from_hf(model_id, filename=filename, cache_dir=cache_dir) + else: + target = download_pretrained_from_hf(model_id, cache_dir=cache_dir) + + return target diff --git a/eva_clip/rope.py b/eva_clip/rope.py new file mode 100644 index 0000000000000000000000000000000000000000..69030c35ea7b6b4f298daebbee5717f3fa1254ab --- /dev/null +++ b/eva_clip/rope.py @@ -0,0 +1,137 @@ +from math import pi +import torch +from torch import nn +from einops import rearrange, repeat +import logging + +def broadcat(tensors, dim = -1): + num_tensors = len(tensors) + shape_lens = set(list(map(lambda t: len(t.shape), tensors))) + assert len(shape_lens) == 1, 'tensors must all have the same number of dimensions' + shape_len = list(shape_lens)[0] + dim = (dim + shape_len) if dim < 0 else dim + dims = list(zip(*map(lambda t: list(t.shape), tensors))) + expandable_dims = [(i, val) for i, val in enumerate(dims) if i != dim] + assert all([*map(lambda t: len(set(t[1])) <= 2, expandable_dims)]), 'invalid dimensions for broadcastable concatentation' + max_dims = list(map(lambda t: (t[0], max(t[1])), expandable_dims)) + expanded_dims = list(map(lambda t: (t[0], (t[1],) * num_tensors), max_dims)) + expanded_dims.insert(dim, (dim, dims[dim])) + expandable_shapes = list(zip(*map(lambda t: t[1], expanded_dims))) + tensors = list(map(lambda t: t[0].expand(*t[1]), zip(tensors, expandable_shapes))) + return torch.cat(tensors, dim = dim) + +def rotate_half(x): + x = rearrange(x, '... (d r) -> ... d r', r = 2) + x1, x2 = x.unbind(dim = -1) + x = torch.stack((-x2, x1), dim = -1) + return rearrange(x, '... d r -> ... (d r)') + + +class VisionRotaryEmbedding(nn.Module): + def __init__( + self, + dim, + pt_seq_len, + ft_seq_len=None, + custom_freqs = None, + freqs_for = 'lang', + theta = 10000, + max_freq = 10, + num_freqs = 1, + ): + super().__init__() + if custom_freqs: + freqs = custom_freqs + elif freqs_for == 'lang': + freqs = 1. / (theta ** (torch.arange(0, dim, 2)[:(dim // 2)].float() / dim)) + elif freqs_for == 'pixel': + freqs = torch.linspace(1., max_freq / 2, dim // 2) * pi + elif freqs_for == 'constant': + freqs = torch.ones(num_freqs).float() + else: + raise ValueError(f'unknown modality {freqs_for}') + + if ft_seq_len is None: ft_seq_len = pt_seq_len + t = torch.arange(ft_seq_len) / ft_seq_len * pt_seq_len + + freqs_h = torch.einsum('..., f -> ... f', t, freqs) + freqs_h = repeat(freqs_h, '... n -> ... (n r)', r = 2) + + freqs_w = torch.einsum('..., f -> ... f', t, freqs) + freqs_w = repeat(freqs_w, '... n -> ... (n r)', r = 2) + + freqs = broadcat((freqs_h[:, None, :], freqs_w[None, :, :]), dim = -1) + + self.register_buffer("freqs_cos", freqs.cos()) + self.register_buffer("freqs_sin", freqs.sin()) + + logging.info(f'Shape of rope freq: {self.freqs_cos.shape}') + + def forward(self, t, start_index = 0): + rot_dim = self.freqs_cos.shape[-1] + end_index = start_index + rot_dim + assert rot_dim <= t.shape[-1], f'feature dimension {t.shape[-1]} is not of sufficient size to rotate in all the positions {rot_dim}' + t_left, t, t_right = t[..., :start_index], t[..., start_index:end_index], t[..., end_index:] + t = (t * self.freqs_cos) + (rotate_half(t) * self.freqs_sin) + + return torch.cat((t_left, t, t_right), dim = -1) + +class VisionRotaryEmbeddingFast(nn.Module): + def __init__( + self, + dim, + pt_seq_len, + ft_seq_len=None, + custom_freqs = None, + freqs_for = 'lang', + theta = 10000, + max_freq = 10, + num_freqs = 1, + patch_dropout = 0. + ): + super().__init__() + if custom_freqs: + freqs = custom_freqs + elif freqs_for == 'lang': + freqs = 1. / (theta ** (torch.arange(0, dim, 2)[:(dim // 2)].float() / dim)) + elif freqs_for == 'pixel': + freqs = torch.linspace(1., max_freq / 2, dim // 2) * pi + elif freqs_for == 'constant': + freqs = torch.ones(num_freqs).float() + else: + raise ValueError(f'unknown modality {freqs_for}') + + if ft_seq_len is None: ft_seq_len = pt_seq_len + t = torch.arange(ft_seq_len) / ft_seq_len * pt_seq_len + + freqs = torch.einsum('..., f -> ... f', t, freqs) + freqs = repeat(freqs, '... n -> ... (n r)', r = 2) + freqs = broadcat((freqs[:, None, :], freqs[None, :, :]), dim = -1) + + freqs_cos = freqs.cos().view(-1, freqs.shape[-1]) + freqs_sin = freqs.sin().view(-1, freqs.shape[-1]) + + self.patch_dropout = patch_dropout + + self.register_buffer("freqs_cos", freqs_cos) + self.register_buffer("freqs_sin", freqs_sin) + + logging.info(f'Shape of rope freq: {self.freqs_cos.shape}') + + def forward(self, t, patch_indices_keep=None): + if patch_indices_keep is not None: + batch = t.size()[0] + batch_indices = torch.arange(batch) + batch_indices = batch_indices[..., None] + + freqs_cos = repeat(self.freqs_cos, 'i j -> n i m j', n=t.shape[0], m=t.shape[1]) + freqs_sin = repeat(self.freqs_sin, 'i j -> n i m j', n=t.shape[0], m=t.shape[1]) + + freqs_cos = freqs_cos[batch_indices, patch_indices_keep] + freqs_cos = rearrange(freqs_cos, 'n i m j -> n m i j') + freqs_sin = freqs_sin[batch_indices, patch_indices_keep] + freqs_sin = rearrange(freqs_sin, 'n i m j -> n m i j') + + return t * freqs_cos + rotate_half(t) * freqs_sin + + return t * self.freqs_cos + rotate_half(t) * self.freqs_sin \ No newline at end of file diff --git a/eva_clip/timm_model.py b/eva_clip/timm_model.py new file mode 100644 index 0000000000000000000000000000000000000000..b58122c0b84fbda9e51867342823222234e17505 --- /dev/null +++ b/eva_clip/timm_model.py @@ -0,0 +1,122 @@ +""" timm model adapter + +Wraps timm (https://github.com/rwightman/pytorch-image-models) models for use as a vision tower in CLIP model. +""" +import logging +from collections import OrderedDict + +import torch +import torch.nn as nn + +try: + import timm + from timm.models.layers import Mlp, to_2tuple + try: + # old timm imports < 0.8.1 + from timm.models.layers.attention_pool2d import RotAttentionPool2d + from timm.models.layers.attention_pool2d import AttentionPool2d as AbsAttentionPool2d + except ImportError: + # new timm imports >= 0.8.1 + from timm.layers import RotAttentionPool2d + from timm.layers import AttentionPool2d as AbsAttentionPool2d +except ImportError: + timm = None + +from .utils import freeze_batch_norm_2d + + +class TimmModel(nn.Module): + """ timm model adapter + # FIXME this adapter is a work in progress, may change in ways that break weight compat + """ + + def __init__( + self, + model_name, + embed_dim, + image_size=224, + pool='avg', + proj='linear', + proj_bias=False, + drop=0., + pretrained=False): + super().__init__() + if timm is None: + raise RuntimeError("Please `pip install timm` to use timm models.") + + self.image_size = to_2tuple(image_size) + self.trunk = timm.create_model(model_name, pretrained=pretrained) + feat_size = self.trunk.default_cfg.get('pool_size', None) + feature_ndim = 1 if not feat_size else 2 + if pool in ('abs_attn', 'rot_attn'): + assert feature_ndim == 2 + # if attn pooling used, remove both classifier and default pool + self.trunk.reset_classifier(0, global_pool='') + else: + # reset global pool if pool config set, otherwise leave as network default + reset_kwargs = dict(global_pool=pool) if pool else {} + self.trunk.reset_classifier(0, **reset_kwargs) + prev_chs = self.trunk.num_features + + head_layers = OrderedDict() + if pool == 'abs_attn': + head_layers['pool'] = AbsAttentionPool2d(prev_chs, feat_size=feat_size, out_features=embed_dim) + prev_chs = embed_dim + elif pool == 'rot_attn': + head_layers['pool'] = RotAttentionPool2d(prev_chs, out_features=embed_dim) + prev_chs = embed_dim + else: + assert proj, 'projection layer needed if non-attention pooling is used.' + + # NOTE attention pool ends with a projection layer, so proj should usually be set to '' if such pooling is used + if proj == 'linear': + head_layers['drop'] = nn.Dropout(drop) + head_layers['proj'] = nn.Linear(prev_chs, embed_dim, bias=proj_bias) + elif proj == 'mlp': + head_layers['mlp'] = Mlp(prev_chs, 2 * embed_dim, embed_dim, drop=drop, bias=(True, proj_bias)) + + self.head = nn.Sequential(head_layers) + + def lock(self, unlocked_groups=0, freeze_bn_stats=False): + """ lock modules + Args: + unlocked_groups (int): leave last n layer groups unlocked (default: 0) + """ + if not unlocked_groups: + # lock full model + for param in self.trunk.parameters(): + param.requires_grad = False + if freeze_bn_stats: + freeze_batch_norm_2d(self.trunk) + else: + # NOTE: partial freeze requires latest timm (master) branch and is subject to change + try: + # FIXME import here until API stable and in an official release + from timm.models.helpers import group_parameters, group_modules + except ImportError: + raise RuntimeError( + 'Please install latest timm `pip install git+https://github.com/rwightman/pytorch-image-models`') + matcher = self.trunk.group_matcher() + gparams = group_parameters(self.trunk, matcher) + max_layer_id = max(gparams.keys()) + max_layer_id = max_layer_id - unlocked_groups + for group_idx in range(max_layer_id + 1): + group = gparams[group_idx] + for param in group: + self.trunk.get_parameter(param).requires_grad = False + if freeze_bn_stats: + gmodules = group_modules(self.trunk, matcher, reverse=True) + gmodules = {k for k, v in gmodules.items() if v <= max_layer_id} + freeze_batch_norm_2d(self.trunk, gmodules) + + @torch.jit.ignore + def set_grad_checkpointing(self, enable=True): + try: + self.trunk.set_grad_checkpointing(enable) + except Exception as e: + logging.warning('grad checkpointing not supported for this timm image tower, continuing without...') + + def forward(self, x): + x = self.trunk(x) + x = self.head(x) + return x diff --git a/eva_clip/tokenizer.py b/eva_clip/tokenizer.py new file mode 100644 index 0000000000000000000000000000000000000000..41482f82aebbf197f4ee4e6c07c845a0d69dd7d6 --- /dev/null +++ b/eva_clip/tokenizer.py @@ -0,0 +1,201 @@ +""" CLIP tokenizer + +Copied from https://github.com/openai/CLIP. Originally MIT License, Copyright (c) 2021 OpenAI. +""" +import gzip +import html +import os +from functools import lru_cache +from typing import Union, List + +import ftfy +import regex as re +import torch + +# https://stackoverflow.com/q/62691279 +import os +os.environ["TOKENIZERS_PARALLELISM"] = "false" + + +@lru_cache() +def default_bpe(): + return os.path.join(os.path.dirname(os.path.abspath(__file__)), "bpe_simple_vocab_16e6.txt.gz") + + +@lru_cache() +def bytes_to_unicode(): + """ + Returns list of utf-8 byte and a corresponding list of unicode strings. + The reversible bpe codes work on unicode strings. + This means you need a large # of unicode characters in your vocab if you want to avoid UNKs. + When you're at something like a 10B token dataset you end up needing around 5K for decent coverage. + This is a signficant percentage of your normal, say, 32K bpe vocab. + To avoid that, we want lookup tables between utf-8 bytes and unicode strings. + And avoids mapping to whitespace/control characters the bpe code barfs on. + """ + bs = list(range(ord("!"), ord("~")+1))+list(range(ord("¡"), ord("¬")+1))+list(range(ord("®"), ord("ÿ")+1)) + cs = bs[:] + n = 0 + for b in range(2**8): + if b not in bs: + bs.append(b) + cs.append(2**8+n) + n += 1 + cs = [chr(n) for n in cs] + return dict(zip(bs, cs)) + + +def get_pairs(word): + """Return set of symbol pairs in a word. + Word is represented as tuple of symbols (symbols being variable-length strings). + """ + pairs = set() + prev_char = word[0] + for char in word[1:]: + pairs.add((prev_char, char)) + prev_char = char + return pairs + + +def basic_clean(text): + text = ftfy.fix_text(text) + text = html.unescape(html.unescape(text)) + return text.strip() + + +def whitespace_clean(text): + text = re.sub(r'\s+', ' ', text) + text = text.strip() + return text + + +class SimpleTokenizer(object): + def __init__(self, bpe_path: str = default_bpe(), special_tokens=None): + self.byte_encoder = bytes_to_unicode() + self.byte_decoder = {v: k for k, v in self.byte_encoder.items()} + merges = gzip.open(bpe_path).read().decode("utf-8").split('\n') + merges = merges[1:49152-256-2+1] + merges = [tuple(merge.split()) for merge in merges] + vocab = list(bytes_to_unicode().values()) + vocab = vocab + [v+'' for v in vocab] + for merge in merges: + vocab.append(''.join(merge)) + if not special_tokens: + special_tokens = ['', ''] + else: + special_tokens = ['', ''] + special_tokens + vocab.extend(special_tokens) + self.encoder = dict(zip(vocab, range(len(vocab)))) + self.decoder = {v: k for k, v in self.encoder.items()} + self.bpe_ranks = dict(zip(merges, range(len(merges)))) + self.cache = {t:t for t in special_tokens} + special = "|".join(special_tokens) + self.pat = re.compile(special + r"""|'s|'t|'re|'ve|'m|'ll|'d|[\p{L}]+|[\p{N}]|[^\s\p{L}\p{N}]+""", re.IGNORECASE) + + self.vocab_size = len(self.encoder) + self.all_special_ids = [self.encoder[t] for t in special_tokens] + + def bpe(self, token): + if token in self.cache: + return self.cache[token] + word = tuple(token[:-1]) + ( token[-1] + '',) + pairs = get_pairs(word) + + if not pairs: + return token+'' + + while True: + bigram = min(pairs, key = lambda pair: self.bpe_ranks.get(pair, float('inf'))) + if bigram not in self.bpe_ranks: + break + first, second = bigram + new_word = [] + i = 0 + while i < len(word): + try: + j = word.index(first, i) + new_word.extend(word[i:j]) + i = j + except: + new_word.extend(word[i:]) + break + + if word[i] == first and i < len(word)-1 and word[i+1] == second: + new_word.append(first+second) + i += 2 + else: + new_word.append(word[i]) + i += 1 + new_word = tuple(new_word) + word = new_word + if len(word) == 1: + break + else: + pairs = get_pairs(word) + word = ' '.join(word) + self.cache[token] = word + return word + + def encode(self, text): + bpe_tokens = [] + text = whitespace_clean(basic_clean(text)).lower() + for token in re.findall(self.pat, text): + token = ''.join(self.byte_encoder[b] for b in token.encode('utf-8')) + bpe_tokens.extend(self.encoder[bpe_token] for bpe_token in self.bpe(token).split(' ')) + return bpe_tokens + + def decode(self, tokens): + text = ''.join([self.decoder[token] for token in tokens]) + text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors="replace").replace('', ' ') + return text + + +_tokenizer = SimpleTokenizer() + + +def tokenize(texts: Union[str, List[str]], context_length: int = 77) -> torch.LongTensor: + """ + Returns the tokenized representation of given input string(s) + + Parameters + ---------- + texts : Union[str, List[str]] + An input string or a list of input strings to tokenize + context_length : int + The context length to use; all CLIP models use 77 as the context length + + Returns + ------- + A two-dimensional tensor containing the resulting tokens, shape = [number of input strings, context_length] + """ + if isinstance(texts, str): + texts = [texts] + + sot_token = _tokenizer.encoder[""] + eot_token = _tokenizer.encoder[""] + all_tokens = [[sot_token] + _tokenizer.encode(text) + [eot_token] for text in texts] + result = torch.zeros(len(all_tokens), context_length, dtype=torch.long) + + for i, tokens in enumerate(all_tokens): + if len(tokens) > context_length: + tokens = tokens[:context_length] # Truncate + tokens[-1] = eot_token + result[i, :len(tokens)] = torch.tensor(tokens) + + return result + + +class HFTokenizer: + "HuggingFace tokenizer wrapper" + def __init__(self, tokenizer_name:str): + from transformers import AutoTokenizer + self.tokenizer = AutoTokenizer.from_pretrained(tokenizer_name) + + def __call__(self, texts:Union[str, List[str]], context_length:int=77) -> torch.Tensor: + # same cleaning as for default tokenizer, except lowercasing + # adding lower (for case-sensitive tokenizers) will make it more robust but less sensitive to nuance + if isinstance(texts, str): + texts = [texts] + texts = [whitespace_clean(basic_clean(text)) for text in texts] + input_ids = self.tokenizer(texts, return_tensors='pt', max_length=context_length, padding='max_length', truncation=True).input_ids + return input_ids diff --git a/eva_clip/transform.py b/eva_clip/transform.py new file mode 100644 index 0000000000000000000000000000000000000000..39f3e4cf6cf9985131ae2ef254b59540904b02e7 --- /dev/null +++ b/eva_clip/transform.py @@ -0,0 +1,103 @@ +from typing import Optional, Sequence, Tuple + +import torch +import torch.nn as nn +import torchvision.transforms.functional as F + +from torchvision.transforms import Normalize, Compose, RandomResizedCrop, InterpolationMode, ToTensor, Resize, \ + CenterCrop + +from .constants import OPENAI_DATASET_MEAN, OPENAI_DATASET_STD + + +class ResizeMaxSize(nn.Module): + + def __init__(self, max_size, interpolation=InterpolationMode.BICUBIC, fn='max', fill=0): + super().__init__() + if not isinstance(max_size, int): + raise TypeError(f"Size should be int. Got {type(max_size)}") + self.max_size = max_size + self.interpolation = interpolation + self.fn = min if fn == 'min' else min + self.fill = fill + + def forward(self, img): + if isinstance(img, torch.Tensor): + height, width = img.shape[:2] + else: + width, height = img.size + scale = self.max_size / float(max(height, width)) + if scale != 1.0: + new_size = tuple(round(dim * scale) for dim in (height, width)) + img = F.resize(img, new_size, self.interpolation) + pad_h = self.max_size - new_size[0] + pad_w = self.max_size - new_size[1] + img = F.pad(img, padding=[pad_w//2, pad_h//2, pad_w - pad_w//2, pad_h - pad_h//2], fill=self.fill) + return img + + +def _convert_to_rgb(image): + return image.convert('RGB') + + +# class CatGen(nn.Module): +# def __init__(self, num=4): +# self.num = num +# def mixgen_batch(image, text): +# batch_size = image.shape[0] +# index = np.random.permutation(batch_size) + +# cat_images = [] +# for i in range(batch_size): +# # image mixup +# image[i,:] = lam * image[i,:] + (1 - lam) * image[index[i],:] +# # text concat +# text[i] = tokenizer((str(text[i]) + " " + str(text[index[i]])))[0] +# text = torch.stack(text) +# return image, text + + +def image_transform( + image_size: int, + is_train: bool, + mean: Optional[Tuple[float, ...]] = None, + std: Optional[Tuple[float, ...]] = None, + resize_longest_max: bool = False, + fill_color: int = 0, +): + mean = mean or OPENAI_DATASET_MEAN + if not isinstance(mean, (list, tuple)): + mean = (mean,) * 3 + + std = std or OPENAI_DATASET_STD + if not isinstance(std, (list, tuple)): + std = (std,) * 3 + + if isinstance(image_size, (list, tuple)) and image_size[0] == image_size[1]: + # for square size, pass size as int so that Resize() uses aspect preserving shortest edge + image_size = image_size[0] + + normalize = Normalize(mean=mean, std=std) + if is_train: + return Compose([ + RandomResizedCrop(image_size, scale=(0.9, 1.0), interpolation=InterpolationMode.BICUBIC), + _convert_to_rgb, + ToTensor(), + normalize, + ]) + else: + if resize_longest_max: + transforms = [ + ResizeMaxSize(image_size, fill=fill_color) + ] + else: + transforms = [ + Resize(image_size, interpolation=InterpolationMode.BICUBIC), + CenterCrop(image_size), + ] + transforms.extend([ + _convert_to_rgb, + ToTensor(), + normalize, + ]) + return Compose(transforms) diff --git a/eva_clip/transformer.py b/eva_clip/transformer.py new file mode 100644 index 0000000000000000000000000000000000000000..33e89ff7aa8ff60ae65dcfc5d21cf9af4d214510 --- /dev/null +++ b/eva_clip/transformer.py @@ -0,0 +1,737 @@ +import os +import logging +from collections import OrderedDict +import math +from typing import Callable, Optional, Sequence +import numpy as np +import torch +from torch import nn +from torch.nn import functional as F + +try: + from timm.models.layers import trunc_normal_ +except: + from timm.layers import trunc_normal_ + +from .rope import VisionRotaryEmbedding, VisionRotaryEmbeddingFast +from .utils import to_2tuple + +if os.getenv('ENV_TYPE') == 'deepspeed': + try: + import deepspeed + from deepspeed.runtime.activation_checkpointing.checkpointing import checkpoint + except: + print("Please 'pip install deepspeed'") + deepspeed = None + from torch.utils.checkpoint import checkpoint +else: + from torch.utils.checkpoint import checkpoint + +try: + import xformers.ops as xops +except ImportError: + xops = None + print("Please 'pip install xformers'") + +class LayerNormFp32(nn.LayerNorm): + """Subclass torch's LayerNorm to handle fp16 (by casting to float32 and back).""" + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + def forward(self, x: torch.Tensor): + output = F.layer_norm( + x.float(), + self.normalized_shape, + self.weight.float() if self.weight is not None else None, + self.bias.float() if self.bias is not None else None, + self.eps, + ) + return output.type_as(x) + + +class LayerNorm(nn.LayerNorm): + """Subclass torch's LayerNorm (with cast back to input dtype).""" + + def forward(self, x: torch.Tensor): + orig_type = x.dtype + x = F.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps) + return x.to(orig_type) + +class QuickGELU(nn.Module): + # NOTE This is slower than nn.GELU or nn.SiLU and uses more GPU memory + def forward(self, x: torch.Tensor): + return x * torch.sigmoid(1.702 * x) + + +class LayerScale(nn.Module): + def __init__(self, dim, init_values=1e-5, inplace=False): + super().__init__() + self.inplace = inplace + self.gamma = nn.Parameter(init_values * torch.ones(dim)) + + def forward(self, x): + return x.mul_(self.gamma) if self.inplace else x * self.gamma + +class PatchDropout(nn.Module): + """ + https://arxiv.org/abs/2212.00794 + """ + + def __init__(self, prob, exclude_first_token=True): + super().__init__() + assert 0 <= prob < 1. + self.prob = prob + self.exclude_first_token = exclude_first_token # exclude CLS token + logging.info(f"os.getenv('RoPE')={os.getenv('RoPE')}") + + def forward(self, x): + if not self.training or self.prob == 0.: + return x + + if self.exclude_first_token: + cls_tokens, x = x[:, :1], x[:, 1:] + else: + cls_tokens = torch.jit.annotate(torch.Tensor, x[:, :1]) + + batch = x.size()[0] + num_tokens = x.size()[1] + + batch_indices = torch.arange(batch) + batch_indices = batch_indices[..., None] + + keep_prob = 1 - self.prob + num_patches_keep = max(1, int(num_tokens * keep_prob)) + + rand = torch.randn(batch, num_tokens) + patch_indices_keep = rand.topk(num_patches_keep, dim=-1).indices + + x = x[batch_indices, patch_indices_keep] + + if self.exclude_first_token: + x = torch.cat((cls_tokens, x), dim=1) + + if self.training and os.getenv('RoPE') == '1': + return x, patch_indices_keep + + return x + + +def _in_projection_packed( + q: torch.Tensor, + k: torch.Tensor, + v: torch.Tensor, + w: torch.Tensor, + b: Optional[torch.Tensor] = None, + ): + """ + https://github.com/pytorch/pytorch/blob/db2a237763eb8693a20788be94f8c192e762baa8/torch/nn/functional.py#L4726 + """ + E = q.size(-1) + if k is v: + if q is k: + # self-attention + return F.linear(q, w, b).chunk(3, dim=-1) + else: + # encoder-decoder attention + w_q, w_kv = w.split([E, E * 2]) + if b is None: + b_q = b_kv = None + else: + b_q, b_kv = b.split([E, E * 2]) + return (F.linear(q, w_q, b_q),) + F.linear(k, w_kv, b_kv).chunk(2, dim=-1) + else: + w_q, w_k, w_v = w.chunk(3) + if b is None: + b_q = b_k = b_v = None + else: + b_q, b_k, b_v = b.chunk(3) + return F.linear(q, w_q, b_q), F.linear(k, w_k, b_k), F.linear(v, w_v, b_v) + +class Attention(nn.Module): + def __init__( + self, + dim, + num_heads=8, + qkv_bias=True, + scaled_cosine=False, + scale_heads=False, + logit_scale_max=math.log(1. / 0.01), + attn_drop=0., + proj_drop=0., + xattn=False, + rope=False + ): + super().__init__() + self.scaled_cosine = scaled_cosine + self.scale_heads = scale_heads + assert dim % num_heads == 0, 'dim should be divisible by num_heads' + self.num_heads = num_heads + self.head_dim = dim // num_heads + self.scale = self.head_dim ** -0.5 + self.logit_scale_max = logit_scale_max + + # keeping in_proj in this form (instead of nn.Linear) to match weight scheme of original + self.in_proj_weight = nn.Parameter(torch.randn((dim * 3, dim)) * self.scale) + if qkv_bias: + self.in_proj_bias = nn.Parameter(torch.zeros(dim * 3)) + else: + self.in_proj_bias = None + + if self.scaled_cosine: + self.logit_scale = nn.Parameter(torch.log(10 * torch.ones((num_heads, 1, 1)))) + else: + self.logit_scale = None + self.attn_drop = nn.Dropout(attn_drop) + if self.scale_heads: + self.head_scale = nn.Parameter(torch.ones((num_heads, 1, 1))) + else: + self.head_scale = None + self.out_proj = nn.Linear(dim, dim) + self.out_drop = nn.Dropout(proj_drop) + self.xattn = xattn + self.xattn_drop = attn_drop + self.rope = rope + + def forward(self, x, attn_mask: Optional[torch.Tensor] = None): + L, N, C = x.shape + q, k, v = F.linear(x, self.in_proj_weight, self.in_proj_bias).chunk(3, dim=-1) + if self.xattn: + q = q.contiguous().view(L, N, self.num_heads, -1).transpose(0, 1) + k = k.contiguous().view(L, N, self.num_heads, -1).transpose(0, 1) + v = v.contiguous().view(L, N, self.num_heads, -1).transpose(0, 1) + + x = xops.memory_efficient_attention( + q, k, v, + p=self.xattn_drop, + scale=self.scale if self.logit_scale is None else None, + attn_bias=xops.LowerTriangularMask() if attn_mask is not None else None, + ) + else: + q = q.contiguous().view(L, N * self.num_heads, -1).transpose(0, 1) + k = k.contiguous().view(L, N * self.num_heads, -1).transpose(0, 1) + v = v.contiguous().view(L, N * self.num_heads, -1).transpose(0, 1) + + if self.logit_scale is not None: + attn = torch.bmm(F.normalize(q, dim=-1), F.normalize(k, dim=-1).transpose(-1, -2)) + logit_scale = torch.clamp(self.logit_scale, max=self.logit_scale_max).exp() + attn = attn.view(N, self.num_heads, L, L) * logit_scale + attn = attn.view(-1, L, L) + else: + q = q * self.scale + attn = torch.bmm(q, k.transpose(-1, -2)) + + if attn_mask is not None: + if attn_mask.dtype == torch.bool: + new_attn_mask = torch.zeros_like(attn_mask, dtype=q.dtype) + new_attn_mask.masked_fill_(attn_mask, float("-inf")) + attn_mask = new_attn_mask + attn += attn_mask + + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + + x = torch.bmm(attn, v) + + if self.head_scale is not None: + x = x.view(N, self.num_heads, L, C) * self.head_scale + x = x.view(-1, L, C) + x = x.transpose(0, 1).reshape(L, N, C) + x = self.out_proj(x) + x = self.out_drop(x) + return x + +class CustomAttention(nn.Module): + def __init__( + self, + dim, + num_heads=8, + qkv_bias=True, + scaled_cosine=True, + scale_heads=False, + logit_scale_max=math.log(1. / 0.01), + attn_drop=0., + proj_drop=0., + xattn=False + ): + super().__init__() + self.scaled_cosine = scaled_cosine + self.scale_heads = scale_heads + assert dim % num_heads == 0, 'dim should be divisible by num_heads' + self.num_heads = num_heads + self.head_dim = dim // num_heads + self.scale = self.head_dim ** -0.5 + self.logit_scale_max = logit_scale_max + + # keeping in_proj in this form (instead of nn.Linear) to match weight scheme of original + self.in_proj_weight = nn.Parameter(torch.randn((dim * 3, dim)) * self.scale) + if qkv_bias: + self.in_proj_bias = nn.Parameter(torch.zeros(dim * 3)) + else: + self.in_proj_bias = None + + if self.scaled_cosine: + self.logit_scale = nn.Parameter(torch.log(10 * torch.ones((num_heads, 1, 1)))) + else: + self.logit_scale = None + self.attn_drop = nn.Dropout(attn_drop) + if self.scale_heads: + self.head_scale = nn.Parameter(torch.ones((num_heads, 1, 1))) + else: + self.head_scale = None + self.out_proj = nn.Linear(dim, dim) + self.out_drop = nn.Dropout(proj_drop) + self.xattn = xattn + self.xattn_drop = attn_drop + + def forward(self, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, attn_mask: Optional[torch.Tensor] = None): + q, k, v = _in_projection_packed(query, key, value, self.in_proj_weight, self.in_proj_bias) + N_q, B_q, C_q = q.shape + N_k, B_k, C_k = k.shape + N_v, B_v, C_v = v.shape + if self.xattn: + # B, N, C -> B, N, num_heads, C + q = q.permute(1, 0, 2).reshape(B_q, N_q, self.num_heads, -1) + k = k.permute(1, 0, 2).reshape(B_k, N_k, self.num_heads, -1) + v = v.permute(1, 0, 2).reshape(B_v, N_v, self.num_heads, -1) + + x = xops.memory_efficient_attention( + q, k, v, + p=self.xattn_drop, + scale=self.scale if self.logit_scale is None else None, + attn_bias=xops.LowerTriangularMask() if attn_mask is not None else None + ) + else: + # B*H, L, C + q = q.contiguous().view(N_q, B_q * self.num_heads, -1).transpose(0, 1) + k = k.contiguous().view(N_k, B_k * self.num_heads, -1).transpose(0, 1) + v = v.contiguous().view(N_v, B_v * self.num_heads, -1).transpose(0, 1) + + if self.logit_scale is not None: + # B*H, N_q, N_k + attn = torch.bmm(F.normalize(q, dim=-1), F.normalize(k, dim=-1).transpose(-1, -2)) + logit_scale = torch.clamp(self.logit_scale, max=self.logit_scale_max).exp() + attn = attn.view(B_q, self.num_heads, N_q, N_k) * logit_scale + attn = attn.view(-1, N_q, N_k) + else: + q = q * self.scale + attn = torch.bmm(q, k.transpose(-1, -2)) + + if attn_mask is not None: + if attn_mask.dtype == torch.bool: + new_attn_mask = torch.zeros_like(attn_mask, dtype=q.dtype) + new_attn_mask.masked_fill_(attn_mask, float("-inf")) + attn_mask = new_attn_mask + attn += attn_mask + + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + + x = torch.bmm(attn, v) + + if self.head_scale is not None: + x = x.view(B_q, self.num_heads, N_q, C_q) * self.head_scale + x = x.view(-1, N_q, C_q) + x = x.transpose(0, 1).reshape(N_q, B_q, C_q) + x = self.out_proj(x) + x = self.out_drop(x) + return x + +class CustomResidualAttentionBlock(nn.Module): + def __init__( + self, + d_model: int, + n_head: int, + mlp_ratio: float = 4.0, + ls_init_value: float = None, + act_layer: Callable = nn.GELU, + norm_layer: Callable = LayerNorm, + scale_cosine_attn: bool = False, + scale_heads: bool = False, + scale_attn: bool = False, + scale_fc: bool = False, + cross_attn: bool = False, + xattn: bool = False, + ): + super().__init__() + + self.ln_1 = norm_layer(d_model) + self.ln_1_k = norm_layer(d_model) if cross_attn else self.ln_1 + self.ln_1_v = norm_layer(d_model) if cross_attn else self.ln_1 + self.attn = CustomAttention( + d_model, n_head, + qkv_bias=True, + attn_drop=0., + proj_drop=0., + scaled_cosine=scale_cosine_attn, + scale_heads=scale_heads, + xattn=xattn + ) + + self.ln_attn = norm_layer(d_model) if scale_attn else nn.Identity() + self.ls_1 = LayerScale(d_model, ls_init_value) if ls_init_value is not None else nn.Identity() + + self.ln_2 = norm_layer(d_model) + mlp_width = int(d_model * mlp_ratio) + self.mlp = nn.Sequential(OrderedDict([ + ("c_fc", nn.Linear(d_model, mlp_width)), + ('ln', norm_layer(mlp_width) if scale_fc else nn.Identity()), + ("gelu", act_layer()), + ("c_proj", nn.Linear(mlp_width, d_model)) + ])) + + self.ls_2 = LayerScale(d_model, ls_init_value) if ls_init_value is not None else nn.Identity() + + def forward(self, q: torch.Tensor, k: torch.Tensor, v: torch.Tensor, attn_mask: Optional[torch.Tensor] = None): + q = q + self.ls_1(self.ln_attn(self.attn(self.ln_1(q), self.ln_1_k(k), self.ln_1_v(v), attn_mask=attn_mask))) + q = q + self.ls_2(self.mlp(self.ln_2(q))) + return q + +class CustomTransformer(nn.Module): + def __init__( + self, + width: int, + layers: int, + heads: int, + mlp_ratio: float = 4.0, + ls_init_value: float = None, + act_layer: Callable = nn.GELU, + norm_layer: Callable = LayerNorm, + scale_cosine_attn: bool = True, + scale_heads: bool = False, + scale_attn: bool = False, + scale_fc: bool = False, + cross_attn: bool = False, + xattn: bool = False, + ): + super().__init__() + self.width = width + self.layers = layers + self.grad_checkpointing = False + self.xattn = xattn + + self.resblocks = nn.ModuleList([ + CustomResidualAttentionBlock( + width, + heads, + mlp_ratio, + ls_init_value=ls_init_value, + act_layer=act_layer, + norm_layer=norm_layer, + scale_cosine_attn=scale_cosine_attn, + scale_heads=scale_heads, + scale_attn=scale_attn, + scale_fc=scale_fc, + cross_attn=cross_attn, + xattn=xattn) + for _ in range(layers) + ]) + + def get_cast_dtype(self) -> torch.dtype: + return self.resblocks[0].mlp.c_fc.weight.dtype + + def forward(self, q: torch.Tensor, k: torch.Tensor = None, v: torch.Tensor = None, attn_mask: Optional[torch.Tensor] = None): + if k is None and v is None: + k = v = q + for r in self.resblocks: + if self.grad_checkpointing and not torch.jit.is_scripting(): + q = checkpoint(r, q, k, v, attn_mask) + else: + q = r(q, k, v, attn_mask=attn_mask) + return q + + +class ResidualAttentionBlock(nn.Module): + def __init__( + self, + d_model: int, + n_head: int, + mlp_ratio: float = 4.0, + ls_init_value: float = None, + act_layer: Callable = nn.GELU, + norm_layer: Callable = LayerNorm, + xattn: bool = False, + ): + super().__init__() + + self.ln_1 = norm_layer(d_model) + if xattn: + self.attn = Attention(d_model, n_head, xattn=True) + else: + self.attn = nn.MultiheadAttention(d_model, n_head) + self.ls_1 = LayerScale(d_model, ls_init_value) if ls_init_value is not None else nn.Identity() + + self.ln_2 = norm_layer(d_model) + mlp_width = int(d_model * mlp_ratio) + self.mlp = nn.Sequential(OrderedDict([ + ("c_fc", nn.Linear(d_model, mlp_width)), + ("gelu", act_layer()), + ("c_proj", nn.Linear(mlp_width, d_model)) + ])) + + self.ls_2 = LayerScale(d_model, ls_init_value) if ls_init_value is not None else nn.Identity() + self.xattn = xattn + + def attention(self, x: torch.Tensor, attn_mask: Optional[torch.Tensor] = None): + attn_mask = attn_mask.to(x.dtype) if attn_mask is not None else None + if self.xattn: + return self.attn(x, attn_mask=attn_mask) + return self.attn(x, x, x, need_weights=False, attn_mask=attn_mask)[0] + + def forward(self, x: torch.Tensor, attn_mask: Optional[torch.Tensor] = None): + x = x + self.ls_1(self.attention(self.ln_1(x), attn_mask=attn_mask)) + x = x + self.ls_2(self.mlp(self.ln_2(x))) + return x + +class Transformer(nn.Module): + def __init__( + self, + width: int, + layers: int, + heads: int, + mlp_ratio: float = 4.0, + ls_init_value: float = None, + act_layer: Callable = nn.GELU, + norm_layer: Callable = LayerNorm, + xattn: bool = False, + ): + super().__init__() + self.width = width + self.layers = layers + self.grad_checkpointing = False + + self.resblocks = nn.ModuleList([ + ResidualAttentionBlock( + width, heads, mlp_ratio, ls_init_value=ls_init_value, act_layer=act_layer, norm_layer=norm_layer, xattn=xattn) + for _ in range(layers) + ]) + + def get_cast_dtype(self) -> torch.dtype: + return self.resblocks[0].mlp.c_fc.weight.dtype + + def forward(self, x: torch.Tensor, attn_mask: Optional[torch.Tensor] = None): + for r in self.resblocks: + if self.grad_checkpointing and not torch.jit.is_scripting(): + x = checkpoint(r, x, attn_mask) + else: + x = r(x, attn_mask=attn_mask) + return x + + +class VisionTransformer(nn.Module): + def __init__( + self, + image_size: int, + patch_size: int, + width: int, + layers: int, + heads: int, + mlp_ratio: float, + ls_init_value: float = None, + patch_dropout: float = 0., + global_average_pool: bool = False, + output_dim: int = 512, + act_layer: Callable = nn.GELU, + norm_layer: Callable = LayerNorm, + xattn: bool = False, + ): + super().__init__() + self.image_size = to_2tuple(image_size) + self.patch_size = to_2tuple(patch_size) + self.grid_size = (self.image_size[0] // self.patch_size[0], self.image_size[1] // self.patch_size[1]) + self.output_dim = output_dim + self.conv1 = nn.Conv2d(in_channels=3, out_channels=width, kernel_size=patch_size, stride=patch_size, bias=False) + + scale = width ** -0.5 + self.class_embedding = nn.Parameter(scale * torch.randn(width)) + self.positional_embedding = nn.Parameter(scale * torch.randn(self.grid_size[0] * self.grid_size[1] + 1, width)) + + # setting a patch_dropout of 0. would mean it is disabled and this function would be the identity fn + self.patch_dropout = PatchDropout(patch_dropout) if patch_dropout > 0. else nn.Identity() + self.ln_pre = norm_layer(width) + + self.transformer = Transformer( + width, + layers, + heads, + mlp_ratio, + ls_init_value=ls_init_value, + act_layer=act_layer, + norm_layer=norm_layer, + xattn=xattn + ) + + self.global_average_pool = global_average_pool + self.ln_post = norm_layer(width) + self.proj = nn.Parameter(scale * torch.randn(width, output_dim)) + + def lock(self, unlocked_groups=0, freeze_bn_stats=False): + for param in self.parameters(): + param.requires_grad = False + + if unlocked_groups != 0: + groups = [ + [ + self.conv1, + self.class_embedding, + self.positional_embedding, + self.ln_pre, + ], + *self.transformer.resblocks[:-1], + [ + self.transformer.resblocks[-1], + self.ln_post, + ], + self.proj, + ] + + def _unlock(x): + if isinstance(x, Sequence): + for g in x: + _unlock(g) + else: + if isinstance(x, torch.nn.Parameter): + x.requires_grad = True + else: + for p in x.parameters(): + p.requires_grad = True + + _unlock(groups[-unlocked_groups:]) + + def get_num_layers(self): + return self.transformer.layers + + @torch.jit.ignore + def set_grad_checkpointing(self, enable=True): + self.transformer.grad_checkpointing = enable + + @torch.jit.ignore + def no_weight_decay(self): + return {'positional_embedding', 'class_embedding'} + + def forward(self, x: torch.Tensor, return_all_features: bool=False): + x = self.conv1(x) # shape = [*, width, grid, grid] + x = x.reshape(x.shape[0], x.shape[1], -1) # shape = [*, width, grid ** 2] + x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width] + x = torch.cat( + [self.class_embedding.to(x.dtype) + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device), + x], dim=1) # shape = [*, grid ** 2 + 1, width] + x = x + self.positional_embedding.to(x.dtype) + + # a patch_dropout of 0. would mean it is disabled and this function would do nothing but return what was passed in + x = self.patch_dropout(x) + x = self.ln_pre(x) + + x = x.permute(1, 0, 2) # NLD -> LND + x = self.transformer(x) + x = x.permute(1, 0, 2) # LND -> NLD + + if not return_all_features: + if self.global_average_pool: + x = x.mean(dim=1) #x = x[:,1:,:].mean(dim=1) + else: + x = x[:, 0] + + x = self.ln_post(x) + + if self.proj is not None: + x = x @ self.proj + + return x + + +class TextTransformer(nn.Module): + def __init__( + self, + context_length: int = 77, + vocab_size: int = 49408, + width: int = 512, + heads: int = 8, + layers: int = 12, + ls_init_value: float = None, + output_dim: int = 512, + act_layer: Callable = nn.GELU, + norm_layer: Callable = LayerNorm, + xattn: bool= False, + attn_mask: bool = True + ): + super().__init__() + self.context_length = context_length + self.vocab_size = vocab_size + self.width = width + self.output_dim = output_dim + + self.token_embedding = nn.Embedding(vocab_size, width) + self.positional_embedding = nn.Parameter(torch.empty(self.context_length, width)) + self.transformer = Transformer( + width=width, + layers=layers, + heads=heads, + ls_init_value=ls_init_value, + act_layer=act_layer, + norm_layer=norm_layer, + xattn=xattn + ) + + self.xattn = xattn + self.ln_final = norm_layer(width) + self.text_projection = nn.Parameter(torch.empty(width, output_dim)) + + if attn_mask: + self.register_buffer('attn_mask', self.build_attention_mask(), persistent=False) + else: + self.attn_mask = None + + self.init_parameters() + + def init_parameters(self): + nn.init.normal_(self.token_embedding.weight, std=0.02) + nn.init.normal_(self.positional_embedding, std=0.01) + + proj_std = (self.transformer.width ** -0.5) * ((2 * self.transformer.layers) ** -0.5) + attn_std = self.transformer.width ** -0.5 + fc_std = (2 * self.transformer.width) ** -0.5 + for block in self.transformer.resblocks: + nn.init.normal_(block.attn.in_proj_weight, std=attn_std) + nn.init.normal_(block.attn.out_proj.weight, std=proj_std) + nn.init.normal_(block.mlp.c_fc.weight, std=fc_std) + nn.init.normal_(block.mlp.c_proj.weight, std=proj_std) + + if self.text_projection is not None: + nn.init.normal_(self.text_projection, std=self.transformer.width ** -0.5) + + @torch.jit.ignore + def set_grad_checkpointing(self, enable=True): + self.transformer.grad_checkpointing = enable + + @torch.jit.ignore + def no_weight_decay(self): + # return {'positional_embedding', 'token_embedding'} + return {'positional_embedding'} + + def get_num_layers(self): + return self.transformer.layers + + def build_attention_mask(self): + # lazily create causal attention mask, with full attention between the vision tokens + # pytorch uses additive attention mask; fill with -inf + mask = torch.empty(self.context_length, self.context_length) + mask.fill_(float("-inf")) + mask.triu_(1) # zero out the lower diagonal + return mask + + def forward(self, text, return_all_features: bool=False): + cast_dtype = self.transformer.get_cast_dtype() + x = self.token_embedding(text).to(cast_dtype) # [batch_size, n_ctx, d_model] + + x = x + self.positional_embedding.to(cast_dtype) + x = x.permute(1, 0, 2) # NLD -> LND + x = self.transformer(x, attn_mask=self.attn_mask) + # x = self.transformer(x) # no attention mask is applied + x = x.permute(1, 0, 2) # LND -> NLD + x = self.ln_final(x) + + if not return_all_features: + # x.shape = [batch_size, n_ctx, transformer.width] + # take features from the eot embedding (eot_token is the highest number in each sequence) + x = x[torch.arange(x.shape[0]), text.argmax(dim=-1)] @ self.text_projection + return x diff --git a/eva_clip/utils.py b/eva_clip/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..bdc5a7a451fdf8911ebbc816afbd2664ff348836 --- /dev/null +++ b/eva_clip/utils.py @@ -0,0 +1,326 @@ +from itertools import repeat +import collections.abc +import logging +import math +import numpy as np + +import torch +from torch import nn as nn +from torchvision.ops.misc import FrozenBatchNorm2d +import torch.nn.functional as F + +# open CLIP +def resize_clip_pos_embed(state_dict, model, interpolation: str = 'bicubic', seq_dim=1): + # Rescale the grid of position embeddings when loading from state_dict + old_pos_embed = state_dict.get('visual.positional_embedding', None) + if old_pos_embed is None or not hasattr(model.visual, 'grid_size'): + return + grid_size = to_2tuple(model.visual.grid_size) + extra_tokens = 1 # FIXME detect different token configs (ie no class token, or more) + new_seq_len = grid_size[0] * grid_size[1] + extra_tokens + if new_seq_len == old_pos_embed.shape[0]: + return + + if extra_tokens: + pos_emb_tok, pos_emb_img = old_pos_embed[:extra_tokens], old_pos_embed[extra_tokens:] + else: + pos_emb_tok, pos_emb_img = None, old_pos_embed + old_grid_size = to_2tuple(int(math.sqrt(len(pos_emb_img)))) + + logging.info('Resizing position embedding grid-size from %s to %s', old_grid_size, grid_size) + pos_emb_img = pos_emb_img.reshape(1, old_grid_size[0], old_grid_size[1], -1).permute(0, 3, 1, 2) + pos_emb_img = F.interpolate( + pos_emb_img, + size=grid_size, + mode=interpolation, + align_corners=True, + ) + pos_emb_img = pos_emb_img.permute(0, 2, 3, 1).reshape(1, grid_size[0] * grid_size[1], -1)[0] + if pos_emb_tok is not None: + new_pos_embed = torch.cat([pos_emb_tok, pos_emb_img], dim=0) + else: + new_pos_embed = pos_emb_img + state_dict['visual.positional_embedding'] = new_pos_embed + + +def resize_visual_pos_embed(state_dict, model, interpolation: str = 'bicubic', seq_dim=1): + # Rescale the grid of position embeddings when loading from state_dict + old_pos_embed = state_dict.get('positional_embedding', None) + if old_pos_embed is None or not hasattr(model.visual, 'grid_size'): + return + grid_size = to_2tuple(model.visual.grid_size) + extra_tokens = 1 # FIXME detect different token configs (ie no class token, or more) + new_seq_len = grid_size[0] * grid_size[1] + extra_tokens + if new_seq_len == old_pos_embed.shape[0]: + return + + if extra_tokens: + pos_emb_tok, pos_emb_img = old_pos_embed[:extra_tokens], old_pos_embed[extra_tokens:] + else: + pos_emb_tok, pos_emb_img = None, old_pos_embed + old_grid_size = to_2tuple(int(math.sqrt(len(pos_emb_img)))) + + logging.info('Resizing position embedding grid-size from %s to %s', old_grid_size, grid_size) + pos_emb_img = pos_emb_img.reshape(1, old_grid_size[0], old_grid_size[1], -1).permute(0, 3, 1, 2) + pos_emb_img = F.interpolate( + pos_emb_img, + size=grid_size, + mode=interpolation, + align_corners=True, + ) + pos_emb_img = pos_emb_img.permute(0, 2, 3, 1).reshape(1, grid_size[0] * grid_size[1], -1)[0] + if pos_emb_tok is not None: + new_pos_embed = torch.cat([pos_emb_tok, pos_emb_img], dim=0) + else: + new_pos_embed = pos_emb_img + state_dict['positional_embedding'] = new_pos_embed + +def resize_evaclip_pos_embed(state_dict, model, interpolation: str = 'bicubic', seq_dim=1): + all_keys = list(state_dict.keys()) + # interpolate position embedding + if 'visual.pos_embed' in state_dict: + pos_embed_checkpoint = state_dict['visual.pos_embed'] + embedding_size = pos_embed_checkpoint.shape[-1] + num_patches = model.visual.patch_embed.num_patches + num_extra_tokens = model.visual.pos_embed.shape[-2] - num_patches + # height (== width) for the checkpoint position embedding + orig_size = int((pos_embed_checkpoint.shape[-2] - num_extra_tokens) ** 0.5) + # height (== width) for the new position embedding + new_size = int(num_patches ** 0.5) + # class_token and dist_token are kept unchanged + if orig_size != new_size: + print("Position interpolate from %dx%d to %dx%d" % (orig_size, orig_size, new_size, new_size)) + extra_tokens = pos_embed_checkpoint[:, :num_extra_tokens] + # only the position tokens are interpolated + pos_tokens = pos_embed_checkpoint[:, num_extra_tokens:] + pos_tokens = pos_tokens.reshape(-1, orig_size, orig_size, embedding_size).permute(0, 3, 1, 2) + pos_tokens = torch.nn.functional.interpolate( + pos_tokens, size=(new_size, new_size), mode='bicubic', align_corners=False) + pos_tokens = pos_tokens.permute(0, 2, 3, 1).flatten(1, 2) + new_pos_embed = torch.cat((extra_tokens, pos_tokens), dim=1) + state_dict['visual.pos_embed'] = new_pos_embed + + patch_embed_proj = state_dict['visual.patch_embed.proj.weight'] + patch_size = model.visual.patch_embed.patch_size + state_dict['visual.patch_embed.proj.weight'] = torch.nn.functional.interpolate( + patch_embed_proj.float(), size=patch_size, mode='bicubic', align_corners=False) + + +def resize_eva_pos_embed(state_dict, model, interpolation: str = 'bicubic', seq_dim=1): + all_keys = list(state_dict.keys()) + # interpolate position embedding + if 'pos_embed' in state_dict: + pos_embed_checkpoint = state_dict['pos_embed'] + embedding_size = pos_embed_checkpoint.shape[-1] + num_patches = model.visual.patch_embed.num_patches + num_extra_tokens = model.visual.pos_embed.shape[-2] - num_patches + # height (== width) for the checkpoint position embedding + orig_size = int((pos_embed_checkpoint.shape[-2] - num_extra_tokens) ** 0.5) + # height (== width) for the new position embedding + new_size = int(num_patches ** 0.5) + # class_token and dist_token are kept unchanged + if orig_size != new_size: + print("Position interpolate from %dx%d to %dx%d" % (orig_size, orig_size, new_size, new_size)) + extra_tokens = pos_embed_checkpoint[:, :num_extra_tokens] + # only the position tokens are interpolated + pos_tokens = pos_embed_checkpoint[:, num_extra_tokens:] + pos_tokens = pos_tokens.reshape(-1, orig_size, orig_size, embedding_size).permute(0, 3, 1, 2) + pos_tokens = torch.nn.functional.interpolate( + pos_tokens, size=(new_size, new_size), mode='bicubic', align_corners=False) + pos_tokens = pos_tokens.permute(0, 2, 3, 1).flatten(1, 2) + new_pos_embed = torch.cat((extra_tokens, pos_tokens), dim=1) + state_dict['pos_embed'] = new_pos_embed + + patch_embed_proj = state_dict['patch_embed.proj.weight'] + patch_size = model.visual.patch_embed.patch_size + state_dict['patch_embed.proj.weight'] = torch.nn.functional.interpolate( + patch_embed_proj.float(), size=patch_size, mode='bicubic', align_corners=False) + + +def resize_rel_pos_embed(state_dict, model, interpolation: str = 'bicubic', seq_dim=1): + all_keys = list(state_dict.keys()) + for key in all_keys: + if "relative_position_index" in key: + state_dict.pop(key) + + if "relative_position_bias_table" in key: + rel_pos_bias = state_dict[key] + src_num_pos, num_attn_heads = rel_pos_bias.size() + dst_num_pos, _ = model.visual.state_dict()[key].size() + dst_patch_shape = model.visual.patch_embed.patch_shape + if dst_patch_shape[0] != dst_patch_shape[1]: + raise NotImplementedError() + num_extra_tokens = dst_num_pos - (dst_patch_shape[0] * 2 - 1) * (dst_patch_shape[1] * 2 - 1) + src_size = int((src_num_pos - num_extra_tokens) ** 0.5) + dst_size = int((dst_num_pos - num_extra_tokens) ** 0.5) + if src_size != dst_size: + print("Position interpolate for %s from %dx%d to %dx%d" % ( + key, src_size, src_size, dst_size, dst_size)) + extra_tokens = rel_pos_bias[-num_extra_tokens:, :] + rel_pos_bias = rel_pos_bias[:-num_extra_tokens, :] + + def geometric_progression(a, r, n): + return a * (1.0 - r ** n) / (1.0 - r) + + left, right = 1.01, 1.5 + while right - left > 1e-6: + q = (left + right) / 2.0 + gp = geometric_progression(1, q, src_size // 2) + if gp > dst_size // 2: + right = q + else: + left = q + + # if q > 1.090307: + # q = 1.090307 + + dis = [] + cur = 1 + for i in range(src_size // 2): + dis.append(cur) + cur += q ** (i + 1) + + r_ids = [-_ for _ in reversed(dis)] + + x = r_ids + [0] + dis + y = r_ids + [0] + dis + + t = dst_size // 2.0 + dx = np.arange(-t, t + 0.1, 1.0) + dy = np.arange(-t, t + 0.1, 1.0) + + print("Original positions = %s" % str(x)) + print("Target positions = %s" % str(dx)) + + all_rel_pos_bias = [] + + for i in range(num_attn_heads): + z = rel_pos_bias[:, i].view(src_size, src_size).float().numpy() + f = F.interpolate.interp2d(x, y, z, kind='cubic') + all_rel_pos_bias.append( + torch.Tensor(f(dx, dy)).contiguous().view(-1, 1).to(rel_pos_bias.device)) + + rel_pos_bias = torch.cat(all_rel_pos_bias, dim=-1) + + new_rel_pos_bias = torch.cat((rel_pos_bias, extra_tokens), dim=0) + state_dict[key] = new_rel_pos_bias + + # interpolate position embedding + if 'pos_embed' in state_dict: + pos_embed_checkpoint = state_dict['pos_embed'] + embedding_size = pos_embed_checkpoint.shape[-1] + num_patches = model.visual.patch_embed.num_patches + num_extra_tokens = model.visual.pos_embed.shape[-2] - num_patches + # height (== width) for the checkpoint position embedding + orig_size = int((pos_embed_checkpoint.shape[-2] - num_extra_tokens) ** 0.5) + # height (== width) for the new position embedding + new_size = int(num_patches ** 0.5) + # class_token and dist_token are kept unchanged + if orig_size != new_size: + print("Position interpolate from %dx%d to %dx%d" % (orig_size, orig_size, new_size, new_size)) + extra_tokens = pos_embed_checkpoint[:, :num_extra_tokens] + # only the position tokens are interpolated + pos_tokens = pos_embed_checkpoint[:, num_extra_tokens:] + pos_tokens = pos_tokens.reshape(-1, orig_size, orig_size, embedding_size).permute(0, 3, 1, 2) + pos_tokens = torch.nn.functional.interpolate( + pos_tokens, size=(new_size, new_size), mode='bicubic', align_corners=False) + pos_tokens = pos_tokens.permute(0, 2, 3, 1).flatten(1, 2) + new_pos_embed = torch.cat((extra_tokens, pos_tokens), dim=1) + state_dict['pos_embed'] = new_pos_embed + + patch_embed_proj = state_dict['patch_embed.proj.weight'] + patch_size = model.visual.patch_embed.patch_size + state_dict['patch_embed.proj.weight'] = torch.nn.functional.interpolate( + patch_embed_proj.float(), size=patch_size, mode='bicubic', align_corners=False) + + +def freeze_batch_norm_2d(module, module_match={}, name=''): + """ + Converts all `BatchNorm2d` and `SyncBatchNorm` layers of provided module into `FrozenBatchNorm2d`. If `module` is + itself an instance of either `BatchNorm2d` or `SyncBatchNorm`, it is converted into `FrozenBatchNorm2d` and + returned. Otherwise, the module is walked recursively and submodules are converted in place. + + Args: + module (torch.nn.Module): Any PyTorch module. + module_match (dict): Dictionary of full module names to freeze (all if empty) + name (str): Full module name (prefix) + + Returns: + torch.nn.Module: Resulting module + + Inspired by https://github.com/pytorch/pytorch/blob/a5895f85be0f10212791145bfedc0261d364f103/torch/nn/modules/batchnorm.py#L762 + """ + res = module + is_match = True + if module_match: + is_match = name in module_match + if is_match and isinstance(module, (nn.modules.batchnorm.BatchNorm2d, nn.modules.batchnorm.SyncBatchNorm)): + res = FrozenBatchNorm2d(module.num_features) + res.num_features = module.num_features + res.affine = module.affine + if module.affine: + res.weight.data = module.weight.data.clone().detach() + res.bias.data = module.bias.data.clone().detach() + res.running_mean.data = module.running_mean.data + res.running_var.data = module.running_var.data + res.eps = module.eps + else: + for child_name, child in module.named_children(): + full_child_name = '.'.join([name, child_name]) if name else child_name + new_child = freeze_batch_norm_2d(child, module_match, full_child_name) + if new_child is not child: + res.add_module(child_name, new_child) + return res + + +# From PyTorch internals +def _ntuple(n): + def parse(x): + if isinstance(x, collections.abc.Iterable): + return x + return tuple(repeat(x, n)) + return parse + + +to_1tuple = _ntuple(1) +to_2tuple = _ntuple(2) +to_3tuple = _ntuple(3) +to_4tuple = _ntuple(4) +to_ntuple = lambda n, x: _ntuple(n)(x) + + +def is_logging(args): + def is_global_master(args): + return args.rank == 0 + + def is_local_master(args): + return args.local_rank == 0 + + def is_master(args, local=False): + return is_local_master(args) if local else is_global_master(args) + return is_master + + +class AllGather(torch.autograd.Function): + """An autograd function that performs allgather on a tensor. + Performs all_gather operation on the provided tensors. + *** Warning ***: torch.distributed.all_gather has no gradient. + """ + + @staticmethod + def forward(ctx, tensor, rank, world_size): + tensors_gather = [torch.empty_like(tensor) for _ in range(world_size)] + torch.distributed.all_gather(tensors_gather, tensor) + ctx.rank = rank + ctx.batch_size = tensor.shape[0] + return torch.cat(tensors_gather, 0) + + @staticmethod + def backward(ctx, grad_output): + return ( + grad_output[ctx.batch_size * ctx.rank: ctx.batch_size * (ctx.rank + 1)], + None, + None + ) + +allgather = AllGather.apply \ No newline at end of file diff --git a/ip_adapter_art/utils/ip_adapter.py b/ip_adapter_art/utils/ip_adapter.py deleted file mode 100644 index 582d05856b6d55eb226f3f280ecab5282d2e19f6..0000000000000000000000000000000000000000 --- a/ip_adapter_art/utils/ip_adapter.py +++ /dev/null @@ -1,72 +0,0 @@ -from diffusers.models.attention_processor import IPAdapterAttnProcessor2_0, Attention -from diffusers.models.embeddings import ( - ImageProjection, - MultiIPAdapterImageProjection, - IPAdapterPlusImageProjection, -) -import torch - - -def save_ip_adapter(unet, path): - state_dict = {} - if ( - hasattr(unet, "encoder_hid_proj") - and unet.encoder_hid_proj is not None - and isinstance(unet.encoder_hid_proj, torch.nn.Module) - ): - state_dict["encoder_hid_proj"] = unet.encoder_hid_proj.state_dict() - - for name, module in unet.attn_processors.items(): - if isinstance(module, torch.nn.Module): - state_dict[name] = module.state_dict() - torch.save(state_dict, path) - - -def load_ip_adapter( - unet, - path, -): - state_dict = torch.load(path, map_location="cpu") - - if "encoder_hid_proj" in state_dict.keys(): - num_image_text_embeds = 4 - clip_embeddings_dim = state_dict["encoder_hid_proj"][ - "image_projection_layers.0.image_embeds.weight" - ].shape[-1] - cross_attention_dim = ( - state_dict["encoder_hid_proj"][ - "image_projection_layers.0.image_embeds.weight" - ].shape[0] - // 4 - ) - if not hasattr(unet, "encoder_hid_proj") or unet.encoder_hid_proj is None: - unet.encoder_hid_proj = MultiIPAdapterImageProjection( - [ - ImageProjection( - cross_attention_dim=cross_attention_dim, - image_embed_dim=clip_embeddings_dim, - num_image_text_embeds=num_image_text_embeds, - ) - ] - ).to(unet.device, unet.dtype) - unet.encoder_hid_proj.load_state_dict(state_dict["encoder_hid_proj"]) - else: - unet.encoder_hid_proj = lambda x: x - cross_attention_dim = state_dict[ - "down_blocks.1.attentions.0.transformer_blocks.0.attn2.processor" - ]["to_k_ip.0.weight"].shape[-1] - - unet.config.encoder_hid_dim_type = "ip_image_proj" - - for name, module in unet.named_modules(): - if "attn2" in name and isinstance(module, Attention): - if not isinstance(module.processor, IPAdapterAttnProcessor2_0): - module.set_processor( - IPAdapterAttnProcessor2_0( - hidden_size=module.query_dim, - cross_attention_dim=cross_attention_dim, - ).to(unet.device, unet.dtype) - ) - module.processor.load_state_dict( - state_dict[f"{name}.processor"], strict=False - ) diff --git a/ip_adapter_art/utils/__init__.py b/ip_adapter_diffusers/__init__.py similarity index 100% rename from ip_adapter_art/utils/__init__.py rename to ip_adapter_diffusers/__init__.py diff --git a/ip_adapter_diffusers/custom_cross_attention_processor.py b/ip_adapter_diffusers/custom_cross_attention_processor.py new file mode 100644 index 0000000000000000000000000000000000000000..aaea37daec2220758a1c842358d88e7c476822d6 --- /dev/null +++ b/ip_adapter_diffusers/custom_cross_attention_processor.py @@ -0,0 +1,297 @@ +import torch +from diffusers.models.attention_processor import IPAdapterAttnProcessor2_0, Attention +import torch.nn.functional as F +import torch.nn as nn + + +class ExtraCrossAttnProcessor2_0(torch.nn.Module): + r""" + Attention processor for IP-Adapter for PyTorch 2.0. + + Args: + hidden_size (`int`): + The hidden size of the attention layer. + cross_attention_dim (`int`): + The number of channels in the `encoder_hidden_states`. + num_tokens (`int`, `Tuple[int]` or `List[int]`, defaults to `(4,)`): + The context length of the image features. + scale (`float` or `List[float]`, defaults to 1.0): + the weight scale of image prompt. + """ + + def __init__(self, hidden_size, cross_attention_dim=None, scale=1.0): + super().__init__() + + if not hasattr(F, "scaled_dot_product_attention"): + raise ImportError( + f"{self.__class__.__name__} requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0." + ) + + self.hidden_size = hidden_size + self.cross_attention_dim = cross_attention_dim + + self.scale = scale + self.to_q_ip = nn.Linear(hidden_size, hidden_size, bias=False) + self.to_k_ip = nn.Linear(cross_attention_dim, hidden_size, bias=False) + self.to_v_ip = nn.Linear(cross_attention_dim, hidden_size, bias=False) + + def __call__( + self, + attn, + hidden_states, + encoder_hidden_states=None, + attention_mask=None, + temb=None, + *args, + **kwargs, + ): + residual = hidden_states + + if attn.spatial_norm is not None: + hidden_states = attn.spatial_norm(hidden_states, temb) + + input_ndim = hidden_states.ndim + + if input_ndim == 4: + batch_size, channel, height, width = hidden_states.shape + hidden_states = hidden_states.view( + batch_size, channel, height * width + ).transpose(1, 2) + + if encoder_hidden_states is not None: + encoder_hidden_states, ip_hidden_states = encoder_hidden_states + if isinstance(ip_hidden_states, list): + ip_hidden_states = ip_hidden_states[0] + + batch_size, sequence_length, _ = ( + hidden_states.shape + if encoder_hidden_states is None + else encoder_hidden_states.shape + ) + + if attention_mask is not None: + attention_mask = attn.prepare_attention_mask( + attention_mask, sequence_length, batch_size + ) + # scaled_dot_product_attention expects attention_mask shape to be + # (batch, heads, source_length, target_length) + attention_mask = attention_mask.view( + batch_size, attn.heads, -1, attention_mask.shape[-1] + ) + + if attn.group_norm is not None: + hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose( + 1, 2 + ) + + query = attn.to_q(hidden_states) + + key = attn.to_k(encoder_hidden_states) + value = attn.to_v(encoder_hidden_states) + + inner_dim = key.shape[-1] + head_dim = inner_dim // attn.heads + + query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) + + key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) + value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) + + # the output of sdp = (batch, num_heads, seq_len, head_dim) + # TODO: add support for attn.scale when we move to Torch 2.1 + hidden_states = F.scaled_dot_product_attention( + query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False + ) + + hidden_states = hidden_states.transpose(1, 2).reshape( + batch_size, -1, attn.heads * head_dim + ) + hidden_states = hidden_states.to(query.dtype) + + ip_query = self.to_q_ip(hidden_states) + + # for ip-adapter + ip_key = self.to_k_ip(ip_hidden_states) + ip_value = self.to_v_ip(ip_hidden_states) + + ip_query = ip_query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) + + ip_key = ip_key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) + ip_value = ip_value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) + + # the output of sdp = (batch, num_heads, seq_len, head_dim) + # TODO: add support for attn.scale when we move to Torch 2.1 + ip_hidden_states = F.scaled_dot_product_attention( + ip_query, ip_key, ip_value, attn_mask=None, dropout_p=0.0, is_causal=False + ) + # with torch.no_grad(): + # self.attn_map = query @ ip_key.transpose(-2, -1).softmax(dim=-1) + # print(self.attn_map.shape) + + ip_hidden_states = ip_hidden_states.transpose(1, 2).reshape( + batch_size, -1, attn.heads * head_dim + ) + ip_hidden_states = ip_hidden_states.to(query.dtype) + + hidden_states = hidden_states + self.scale * ip_hidden_states + + # linear proj + hidden_states = attn.to_out[0](hidden_states) + # dropout + hidden_states = attn.to_out[1](hidden_states) + + if input_ndim == 4: + hidden_states = hidden_states.transpose(-1, -2).reshape( + batch_size, channel, height, width + ) + + if attn.residual_connection: + hidden_states = hidden_states + residual + + hidden_states = hidden_states / attn.rescale_output_factor + + return hidden_states + + +class DecoupledCrossAttnProcessor2_0(torch.nn.Module): + r""" + Attention processor for IP-Adapter for PyTorch 2.0. + + Args: + hidden_size (`int`): + The hidden size of the attention layer. + cross_attention_dim (`int`): + The number of channels in the `encoder_hidden_states`. + num_tokens (`int`, `Tuple[int]` or `List[int]`, defaults to `(4,)`): + The context length of the image features. + scale (`float` or `List[float]`, defaults to 1.0): + the weight scale of image prompt. + """ + + def __init__(self, hidden_size, cross_attention_dim=None, scale=1.0): + super().__init__() + + if not hasattr(F, "scaled_dot_product_attention"): + raise ImportError( + f"{self.__class__.__name__} requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0." + ) + + self.hidden_size = hidden_size + self.cross_attention_dim = cross_attention_dim + + self.scale = scale + self.to_k_ip = nn.Linear(cross_attention_dim, hidden_size, bias=False) + self.to_v_ip = nn.Linear(cross_attention_dim, hidden_size, bias=False) + + def __call__( + self, + attn, + hidden_states, + encoder_hidden_states=None, + attention_mask=None, + temb=None, + *args, + **kwargs, + ): + residual = hidden_states + + if attn.spatial_norm is not None: + hidden_states = attn.spatial_norm(hidden_states, temb) + + input_ndim = hidden_states.ndim + + if input_ndim == 4: + batch_size, channel, height, width = hidden_states.shape + hidden_states = hidden_states.view( + batch_size, channel, height * width + ).transpose(1, 2) + + if encoder_hidden_states is not None: + encoder_hidden_states, ip_hidden_states = encoder_hidden_states + if isinstance(ip_hidden_states, list): + ip_hidden_states = ip_hidden_states[0] + + batch_size, sequence_length, _ = ( + hidden_states.shape + if encoder_hidden_states is None + else encoder_hidden_states.shape + ) + + if attention_mask is not None: + attention_mask = attn.prepare_attention_mask( + attention_mask, sequence_length, batch_size + ) + # scaled_dot_product_attention expects attention_mask shape to be + # (batch, heads, source_length, target_length) + attention_mask = attention_mask.view( + batch_size, attn.heads, -1, attention_mask.shape[-1] + ) + + if attn.group_norm is not None: + hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose( + 1, 2 + ) + + query = attn.to_q(hidden_states) + + key = attn.to_k(encoder_hidden_states) + value = attn.to_v(encoder_hidden_states) + + inner_dim = key.shape[-1] + head_dim = inner_dim // attn.heads + + query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) + + key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) + value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) + + # the output of sdp = (batch, num_heads, seq_len, head_dim) + # TODO: add support for attn.scale when we move to Torch 2.1 + hidden_states = F.scaled_dot_product_attention( + query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False + ) + + hidden_states = hidden_states.transpose(1, 2).reshape( + batch_size, -1, attn.heads * head_dim + ) + hidden_states = hidden_states.to(query.dtype) + + # for ip-adapter + ip_key = self.to_k_ip(ip_hidden_states) + ip_value = self.to_v_ip(ip_hidden_states) + + ip_key = ip_key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) + ip_value = ip_value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) + + # the output of sdp = (batch, num_heads, seq_len, head_dim) + # TODO: add support for attn.scale when we move to Torch 2.1 + ip_hidden_states = F.scaled_dot_product_attention( + query, ip_key, ip_value, attn_mask=None, dropout_p=0.0, is_causal=False + ) + # with torch.no_grad(): + # self.attn_map = query @ ip_key.transpose(-2, -1).softmax(dim=-1) + # print(self.attn_map.shape) + + ip_hidden_states = ip_hidden_states.transpose(1, 2).reshape( + batch_size, -1, attn.heads * head_dim + ) + ip_hidden_states = ip_hidden_states.to(query.dtype) + + hidden_states = hidden_states + self.scale * ip_hidden_states + + # linear proj + hidden_states = attn.to_out[0](hidden_states) + # dropout + hidden_states = attn.to_out[1](hidden_states) + + if input_ndim == 4: + hidden_states = hidden_states.transpose(-1, -2).reshape( + batch_size, channel, height, width + ) + + if attn.residual_connection: + hidden_states = hidden_states + residual + + hidden_states = hidden_states / attn.rescale_output_factor + + return hidden_states diff --git a/ip_adapter_diffusers/custom_ip_adapter.py b/ip_adapter_diffusers/custom_ip_adapter.py new file mode 100644 index 0000000000000000000000000000000000000000..9151bcaf6405c628e19abd0d3d28a23324aa09da --- /dev/null +++ b/ip_adapter_diffusers/custom_ip_adapter.py @@ -0,0 +1,58 @@ +from .custom_cross_attention_processor import DecoupledCrossAttnProcessor2_0 +import torch +from diffusers.models.attention_processor import IPAdapterAttnProcessor2_0, Attention + + +def load_custom_ip_adapter( + unet, + path=None, + blocks="full", + Custom_Attn_Type=DecoupledCrossAttnProcessor2_0, + cross_attention_dim=2048, + Image_Proj_Type=None, +): + if path is None: + state_dict = None + else: + state_dict = torch.load(path, map_location="cpu") + + # unet.config.encoder_hid_dim_type = "ip_image_proj" + # if Image_Proj_Type is None: + # unet.encoder_hid_proj = torch.nn.Identity() + # unet.encoder_hid_proj.image_projection_layers = torch.nn.ModuleList( + # [torch.nn.Identity()] + # ) + + for name, module in unet.named_modules(): + if "attn2" in name and isinstance(module, Attention): + if blocks == "midup" and "mid" not in name and "up" not in name: + continue + if not isinstance(module.processor, torch.nn.Module): + module.set_processor( + Custom_Attn_Type( + hidden_size=module.query_dim, + cross_attention_dim=cross_attention_dim, + ).to(unet.device, unet.dtype) + ) + if state_dict is not None: + module.processor.load_state_dict(state_dict[f"{name}.processor"]) + else: + if hasattr(module.processor, "to_q_ip"): + torch.nn.init.kaiming_normal_(module.processor.to_q_ip.weight) + torch.nn.init.kaiming_normal_(module.processor.to_k_ip.weight) + torch.nn.init.kaiming_normal_(module.processor.to_v_ip.weight) + + +def save_custom_ip_adapter(unet, path): + state_dict = {} + for name, module in unet.attn_processors.items(): + if isinstance(module, torch.nn.Module): + state_dict[name] = module.state_dict() + + torch.save(state_dict, path) + + +def set_scale(unet, scale): + for name, module in unet.attn_processors.items(): + if isinstance(module, torch.nn.Module): + module.scale = scale diff --git a/ip_adapter_diffusers/ip_adapter.py b/ip_adapter_diffusers/ip_adapter.py new file mode 100644 index 0000000000000000000000000000000000000000..2d0f10dd936a0e15891c9355c101ddf88870d4e2 --- /dev/null +++ b/ip_adapter_diffusers/ip_adapter.py @@ -0,0 +1,821 @@ +from diffusers.models.attention_processor import Attention +from diffusers.models.embeddings import ImageProjection, MultiIPAdapterImageProjection +import torch +import torch.nn as nn +import torch.nn.functional as F +import copy +from .resampler import Resampler +from typing import Optional +from diffusers.image_processor import IPAdapterMaskProcessor +import math +import warnings +from pulid.encoders_transformer import IDFormer + + +def save_ip_adapter(unet, path): + state_dict = {} + if ( + hasattr(unet, "encoder_hid_proj") + and unet.encoder_hid_proj is not None + and isinstance(unet.encoder_hid_proj, torch.nn.Module) + ): + state_dict["encoder_hid_proj"] = unet.encoder_hid_proj.state_dict() + + for name, module in unet.attn_processors.items(): + if isinstance(module, torch.nn.Module): + state_dict[name] = module.state_dict() + + torch.save(state_dict, path) + + +def load_ip_adapter( + unet, + path=None, + clip_embeddings_dim=1280, + cross_attention_dim=2048, + num_image_text_embeds=4, + attn_blocks=["down", "mid", "up"], +): + if path is None: + state_dict = None + else: + state_dict = torch.load(path, map_location="cpu") + clip_embeddings_dim = state_dict["encoder_hid_proj"][ + "image_embeds.weight" + ].shape[-1] + num_image_text_embeds = ( + state_dict["encoder_hid_proj"]["image_embeds.weight"].shape[0] + // cross_attention_dim + ) + + if not hasattr(unet, "encoder_hid_proj") or unet.encoder_hid_proj is None: + unet.encoder_hid_proj = ImageProjection( + cross_attention_dim=cross_attention_dim, + image_embed_dim=clip_embeddings_dim, + num_image_text_embeds=num_image_text_embeds, + ).to(unet.device, unet.dtype) + if state_dict is not None: + unet.encoder_hid_proj.load_state_dict(state_dict["encoder_hid_proj"]) + + for name, module in unet.named_modules(): + if ( + "attn2" in name + and isinstance(module, Attention) + and any([attn in name for attn in attn_blocks]) + ): + if not isinstance(module.processor, IPAttnProcessor2_0): + module.set_processor( + IPAttnProcessor2_0( + hidden_size=module.query_dim, + cross_attention_dim=cross_attention_dim, + ).to(unet.device, unet.dtype) + ) + if state_dict is not None: + module.processor.load_state_dict(state_dict[f"{name}.processor"]) + else: + module.processor.to_k_ip.load_state_dict(module.to_k.state_dict()) + module.processor.to_v_ip.load_state_dict(module.to_v.state_dict()) + + +def parse_clip_embeddings_dim( + path, + state_dict, +): + if "pulid" in path: + return None + else: + return state_dict["encoder_hid_proj"]["image_embeds.weight"].shape[-1] + + +def parse_num_image_text_embeds(path, state_dict, cross_attention_dim=2048): + if "pulid" in path: + return None + else: + return ( + state_dict["encoder_hid_proj"]["image_embeds.weight"].shape[0] + // cross_attention_dim + ) + + +def parse_encoder_hid_proj_module( + path=None, + cross_attention_dim=2048, + image_embed_dim=None, + num_image_text_embeds=None, +): + if "pulid" in path: + return IDFormer() + else: + return ImageProjection( + cross_attention_dim=cross_attention_dim, + image_embed_dim=image_embed_dim, + num_image_text_embeds=num_image_text_embeds, + ) + + +def load_multi_ip_adapter( + unet, + paths=None, + clip_embeddings_dim=[1280], + cross_attention_dim=2048, + num_image_text_embeds=[4], +): + if paths is None: + state_dict = None + else: + state_dict = [torch.load(path, map_location="cpu") for path in paths] + clip_embeddings_dim = [ + parse_clip_embeddings_dim(path=single_path, state_dict=single_state_dict) + for single_path, single_state_dict in zip(paths, state_dict) + ] + num_image_text_embeds = [ + parse_num_image_text_embeds( + path=single_path, + state_dict=single_state_dict, + cross_attention_dim=unet.config.cross_attention_dim, + ) + for single_path, single_state_dict in zip(paths, state_dict) + ] + + if not hasattr(unet, "encoder_hid_proj") or unet.encoder_hid_proj is None: + unet.encoder_hid_proj = MultiIPAdapterImageProjection( + [ + parse_encoder_hid_proj_module( + path=single_path, + cross_attention_dim=unet.config.cross_attention_dim, + image_embed_dim=single_clip_embeddings_dim, + num_image_text_embeds=single_num_image_text_embeds, + ).to(unet.device, unet.dtype) + for single_path, single_clip_embeddings_dim, single_num_image_text_embeds in zip( + paths, clip_embeddings_dim, num_image_text_embeds + ) + ] + ).to(unet.device, unet.dtype) + + if state_dict is not None: + for single_encoder_hid_proj, single_state_dict in zip( + unet.encoder_hid_proj.image_projection_layers, state_dict + ): + single_encoder_hid_proj.load_state_dict( + single_state_dict["encoder_hid_proj"] + ) + + for name, module in unet.named_modules(): + if "attn2" in name and isinstance(module, Attention): + if not isinstance(module.processor, MultiIPAttnProcessor2_0): + module.set_processor( + MultiIPAttnProcessor2_0( + hidden_size=module.query_dim, + cross_attention_dim=unet.config.cross_attention_dim, + num_tokens=num_image_text_embeds, + ).to(unet.device, unet.dtype) + ) + if state_dict is not None: + for ( + to_k_ip, + to_v_ip, + single_state_dict, + ) in zip( + module.processor.to_k_ip, + module.processor.to_v_ip, + state_dict, + ): + if f"{name}.processor" in single_state_dict.keys(): + to_k_ip.weight = nn.Parameter( + single_state_dict[f"{name}.processor"]["to_k_ip.weight"] + ) + to_v_ip.weight = nn.Parameter( + single_state_dict[f"{name}.processor"]["to_v_ip.weight"] + ) + module.processor = module.processor.to(unet.device, unet.dtype) + + +def load_ip_adapter_plus( + unet, + path=None, + embed_dims=1664, + depth=4, + dim_head=64, + heads=12, + num_queries=32, + ff_mult=4, + attn_blocks=["down", "mid", "up"], +): + if path is not None: + state_dict = torch.load(path) + else: + state_dict = None + if not hasattr(unet, "encoder_hid_proj") or unet.encoder_hid_proj is None: + unet.encoder_hid_proj = Resampler( + dim=unet.config.cross_attention_dim, + depth=depth, + dim_head=dim_head, + heads=heads, + num_queries=num_queries, + embedding_dim=embed_dims, + output_dim=unet.config.cross_attention_dim, + ff_mult=ff_mult, + ).to(unet.device, unet.dtype) + if state_dict is not None: + unet.encoder_hid_proj.load_state_dict(state_dict["encoder_hid_proj"]) + + for name, module in unet.named_modules(): + if ( + "attn2" in name + and isinstance(module, Attention) + and any([attn in name for attn in attn_blocks]) + ): + if not isinstance(module.processor, IPAttnProcessor2_0): + module.set_processor( + IPAttnProcessor2_0( + hidden_size=module.query_dim, + cross_attention_dim=unet.config.cross_attention_dim, + ).to(unet.device, unet.dtype) + ) + if state_dict is not None and f"{name}.processor" in state_dict.keys(): + module.processor.load_state_dict(state_dict[f"{name}.processor"]) + else: + module.processor.to_k_ip.load_state_dict(module.to_k.state_dict()) + module.processor.to_v_ip.load_state_dict(module.to_v.state_dict()) + + +def set_ip_hidden_states(unet, image_embeds): + for name, module in unet.attn_processors.items(): + if isinstance(module, IPAttnProcessor2_0) or isinstance( + module, MultiIPAttnProcessor2_0 + ): + module.ip_hidden_states = image_embeds.clone() + + +def set_multi_ip_hidden_states(unet, image_embeds): + for name, module in unet.attn_processors.items(): + if isinstance(module, IPAttnProcessor2_0) or isinstance( + module, MultiIPAttnProcessor2_0 + ): + module.ip_hidden_states = image_embeds + + +def set_multi_ip_attn_masks(unet, attn_masks): + for name, module in unet.attn_processors.items(): + if isinstance(module, IPAttnProcessor2_0) or isinstance( + module, MultiIPAttnProcessor2_0 + ): + module.ip_hidden_states = attn_masks + + +def clear_ip_hidden_states(model): + for name, module in model.named_modules(): + if isinstance(module, IPAttnProcessor2_0): + module.ip_hidden_states = None + + +def set_ip_adapter_scale(unet, scale=1.0, attn_blocks=["down", "mid", "up"]): + for name, module in unet.named_modules(): + if isinstance(module, IPAttnProcessor2_0) and any( + tarhet_module in name for tarhet_module in attn_blocks + ): + module.scale = scale + + +def downsample( + mask: torch.Tensor, batch_size: int, num_queries: int, value_embed_dim: int +): + """ + Downsamples the provided mask tensor to match the expected dimensions for scaled dot-product attention. If the + aspect ratio of the mask does not match the aspect ratio of the output image, a warning is issued. + + Args: + mask (`torch.Tensor`): + The input mask tensor generated with `IPAdapterMaskProcessor.preprocess()`. + batch_size (`int`): + The batch size. + num_queries (`int`): + The number of queries. + value_embed_dim (`int`): + The dimensionality of the value embeddings. + + Returns: + `torch.Tensor`: + The downsampled mask tensor. + + """ + o_h = mask.shape[2] + o_w = mask.shape[3] + ratio = o_w / o_h + mask_h = int(math.sqrt(num_queries / ratio)) + mask_h = int(mask_h) + int((num_queries % int(mask_h)) != 0) + mask_w = num_queries // mask_h + + mask_downsample = F.interpolate(mask, size=(mask_h, mask_w), mode="bicubic") + + # Repeat batch_size times + if mask_downsample.shape[0] < batch_size: + mask_downsample = mask_downsample.repeat(batch_size, 1, 1, 1) + + mask_downsample = mask_downsample.view(mask_downsample.shape[0], -1) + + downsampled_area = mask_h * mask_w + # If the output image and the mask do not have the same aspect ratio, tensor shapes will not match + # Pad tensor if downsampled_mask.shape[1] is smaller than num_queries + if downsampled_area < num_queries: + warnings.warn( + "The aspect ratio of the mask does not match the aspect ratio of the output image. " + "Please update your masks or adjust the output size for optimal performance.", + UserWarning, + ) + mask_downsample = F.pad( + mask_downsample, (0, num_queries - mask_downsample.shape[1]), value=0.0 + ) + # Discard last embeddings if downsampled_mask.shape[1] is bigger than num_queries + if downsampled_area > num_queries: + warnings.warn( + "The aspect ratio of the mask does not match the aspect ratio of the output image. " + "Please update your masks or adjust the output size for optimal performance.", + UserWarning, + ) + mask_downsample = mask_downsample[:, :num_queries] + + # Repeat last dimension to match SDPA output shape + mask_downsample = mask_downsample.view( + mask_downsample.shape[0], mask_downsample.shape[1], 1 + ).repeat(1, 1, value_embed_dim) + + return mask_downsample + + +class IPAttnProcessor2_0(torch.nn.Module): + r""" + Attention processor for IP-Adapater for PyTorch 2.0. + Args: + hidden_size (`int`): + The hidden size of the attention layer. + cross_attention_dim (`int`): + The number of channels in the `encoder_hidden_states`. + scale (`float`, defaults to 1.0): + the weight scale of image prompt. + num_tokens (`int`, defaults to 4 when do ip_adapter_plus it should be 16): + The context length of the image features. + """ + + def __init__( + self, + hidden_size, + cross_attention_dim=None, + scale=1.0, + num_tokens=4, + use_align_sem_and_layout_loss=False, + ): + super().__init__() + + if not hasattr(F, "scaled_dot_product_attention"): + raise ImportError( + "AttnProcessor2_0 requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0." + ) + + self.hidden_size = hidden_size + self.cross_attention_dim = cross_attention_dim + self.scale = scale + self.num_tokens = num_tokens + + self.to_k_ip = nn.Linear( + cross_attention_dim or hidden_size, hidden_size, bias=False + ) + self.to_v_ip = nn.Linear( + cross_attention_dim or hidden_size, hidden_size, bias=False + ) + self.ip_hidden_states = None + + self.use_align_sem_and_layout_loss = use_align_sem_and_layout_loss + if self.use_align_sem_and_layout_loss: + self.align_sem_loss = None + self.align_layout_loss = None + self.cache_query = None + self.cache_attn_weights = None + + def __call__( + self, + attn, + hidden_states, + encoder_hidden_states=None, + attention_mask=None, + temb=None, + ip_adapter_masks: Optional[torch.FloatTensor] = None, + *args, + **kwargs, + ): + residual = hidden_states + + if attn.spatial_norm is not None: + hidden_states = attn.spatial_norm(hidden_states, temb) + + input_ndim = hidden_states.ndim + + if input_ndim == 4: + batch_size, channel, height, width = hidden_states.shape + hidden_states = hidden_states.view( + batch_size, channel, height * width + ).transpose(1, 2) + + batch_size, sequence_length, _ = ( + hidden_states.shape + if encoder_hidden_states is None + else encoder_hidden_states.shape + ) + + if attention_mask is not None: + attention_mask = attn.prepare_attention_mask( + attention_mask, sequence_length, batch_size + ) + # scaled_dot_product_attention expects attention_mask shape to be + # (batch, heads, source_length, target_length) + attention_mask = attention_mask.view( + batch_size, attn.heads, -1, attention_mask.shape[-1] + ) + + if attn.group_norm is not None: + hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose( + 1, 2 + ) + + query = attn.to_q(hidden_states) + + if attn.norm_cross: + encoder_hidden_states = attn.norm_encoder_hidden_states( + encoder_hidden_states + ) + + key = attn.to_k(encoder_hidden_states) + value = attn.to_v(encoder_hidden_states) + + inner_dim = key.shape[-1] + head_dim = inner_dim // attn.heads + + query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) + + key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) + value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) + + # the output of sdp = (batch, num_heads, seq_len, head_dim) + # TODO: add support for attn.scale when we move to Torch 2.1 + hidden_states = F.scaled_dot_product_attention( + query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False + ) + if self.use_align_sem_and_layout_loss: + if self.cache_query is None: + self.cache_query = query.clone().detach() + self.cache_attn_weights = (key @ query.transpose(-2, -1)) / math.sqrt( + query.size(-1) + ) + self.cache_attn_weights = torch.softmax(self.cache_attn_weights, dim=-1) + else: + self.attn_weights = (key @ query.transpose(-2, -1)) / math.sqrt( + query.size(-1) + ) + self.query = query + self.attn_weights = torch.softmax(self.attn_weights, dim=-1) + + hidden_states = hidden_states.transpose(1, 2).reshape( + batch_size, -1, attn.heads * head_dim + ) + hidden_states = hidden_states.to(query.dtype) + + if self.scale != 0.0: + # for ip-adapter + ip_key = self.to_k_ip(self.ip_hidden_states).to(dtype=query.dtype) + ip_value = self.to_v_ip(self.ip_hidden_states).to(dtype=query.dtype) + + ip_key = ip_key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) + ip_value = ip_value.view(batch_size, -1, attn.heads, head_dim).transpose( + 1, 2 + ) + + # the output of sdp = (batch, num_heads, seq_len, head_dim) + # TODO: add support for attn.scale when we move to Torch 2.1 + ip_hidden_states = F.scaled_dot_product_attention( + query, ip_key, ip_value, attn_mask=None, dropout_p=0.0, is_causal=False + ) + # with torch.no_grad(): + # self.attn_map = query @ ip_key.transpose(-2, -1).softmax(dim=-1) + # print(self.attn_map.shape) + + ip_hidden_states = ip_hidden_states.transpose(1, 2).reshape( + batch_size, -1, attn.heads * head_dim + ) + ip_hidden_states = ip_hidden_states.to(query.dtype) + + if ip_adapter_masks is not None: + mask_downsample = downsample( + ip_adapter_masks, + batch_size, + ip_hidden_states.shape[1], + ip_hidden_states.shape[2], + ) + + mask_downsample = mask_downsample.to( + dtype=query.dtype, device=query.device + ) + + ip_hidden_states = ip_hidden_states * mask_downsample + + hidden_states = hidden_states + self.scale * ip_hidden_states + + # linear proj + hidden_states = attn.to_out[0](hidden_states) + # dropout + hidden_states = attn.to_out[1](hidden_states) + + if input_ndim == 4: + hidden_states = hidden_states.transpose(-1, -2).reshape( + batch_size, channel, height, width + ) + + if attn.residual_connection: + hidden_states = hidden_states + residual + + hidden_states = hidden_states / attn.rescale_output_factor + + return hidden_states + + +def set_ortho(unet, ortho): + for name, module in unet.attn_processors.items(): + if isinstance(module, IPAttnProcessor2_0) or isinstance( + module, MultiIPAttnProcessor2_0 + ): + module.ortho = ortho + + +def set_num_zero(unet, num_zero): + for name, module in unet.attn_processors.items(): + if isinstance(module, IPAttnProcessor2_0) or isinstance( + module, MultiIPAttnProcessor2_0 + ): + module.num_zero = num_zero + + +class MultiIPAttnProcessor2_0(torch.nn.Module): + r""" + Attention processor for IP-Adapater for PyTorch 2.0. + + Args: + hidden_size (`int`): + The hidden size of the attention layer. + cross_attention_dim (`int`): + The number of channels in the `encoder_hidden_states`. + num_tokens (`int`, `Tuple[int]` or `List[int]`, defaults to `(4,)`): + The context length of the image features. + scale (`float` or `List[float]`, defaults to 1.0): + the weight scale of image prompt. + """ + + def __init__( + self, hidden_size, cross_attention_dim=None, num_tokens=(4,), scale=1.0 + ): + super().__init__() + + if not hasattr(F, "scaled_dot_product_attention"): + raise ImportError( + f"{self.__class__.__name__} requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0." + ) + + self.hidden_size = hidden_size + self.cross_attention_dim = cross_attention_dim + + if not isinstance(num_tokens, (tuple, list)): + num_tokens = [num_tokens] + self.num_tokens = num_tokens + + if not isinstance(scale, list): + scale = [scale] * len(num_tokens) + if len(scale) != len(num_tokens): + raise ValueError( + "`scale` should be a list of integers with the same length as `num_tokens`." + ) + self.scale = scale + + self.to_k_ip = nn.ModuleList( + [ + nn.Linear(cross_attention_dim, hidden_size, bias=False) + for _ in range(len(num_tokens)) + ] + ) + self.to_v_ip = nn.ModuleList( + [ + nn.Linear(cross_attention_dim, hidden_size, bias=False) + for _ in range(len(num_tokens)) + ] + ) + self.ip_hidden_states = None + self.num_zero = [None] * (len(num_tokens)) + self.ortho = [None] * len(num_tokens) + + def __call__( + self, + attn: Attention, + hidden_states: torch.FloatTensor, + encoder_hidden_states: Optional[torch.FloatTensor] = None, + attention_mask: Optional[torch.FloatTensor] = None, + temb: Optional[torch.FloatTensor] = None, + scale: float = 1.0, + ip_adapter_masks: Optional[torch.FloatTensor] = None, + ): + residual = hidden_states + + ip_hidden_states = self.ip_hidden_states + + if attn.spatial_norm is not None: + hidden_states = attn.spatial_norm(hidden_states, temb) + + input_ndim = hidden_states.ndim + + if input_ndim == 4: + batch_size, channel, height, width = hidden_states.shape + hidden_states = hidden_states.view( + batch_size, channel, height * width + ).transpose(1, 2) + + batch_size, sequence_length, _ = ( + hidden_states.shape + if encoder_hidden_states is None + else encoder_hidden_states.shape + ) + + if attention_mask is not None: + attention_mask = attn.prepare_attention_mask( + attention_mask, sequence_length, batch_size + ) + # scaled_dot_product_attention expects attention_mask shape to be + # (batch, heads, source_length, target_length) + attention_mask = attention_mask.view( + batch_size, attn.heads, -1, attention_mask.shape[-1] + ) + + if attn.group_norm is not None: + hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose( + 1, 2 + ) + + query = attn.to_q(hidden_states) + + if encoder_hidden_states is None: + encoder_hidden_states = hidden_states + elif attn.norm_cross: + encoder_hidden_states = attn.norm_encoder_hidden_states( + encoder_hidden_states + ) + + key = attn.to_k(encoder_hidden_states) + value = attn.to_v(encoder_hidden_states) + + inner_dim = key.shape[-1] + head_dim = inner_dim // attn.heads + + query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) + + key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) + value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) + + # the output of sdp = (batch, num_heads, seq_len, head_dim) + # TODO: add support for attn.scale when we move to Torch 2.1 + hidden_states = F.scaled_dot_product_attention( + query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False + ) + + hidden_states = hidden_states.transpose(1, 2).reshape( + batch_size, -1, attn.heads * head_dim + ) + hidden_states = hidden_states.to(query.dtype) + + if ip_adapter_masks is not None: + if ( + not isinstance(ip_adapter_masks, torch.Tensor) + or ip_adapter_masks.ndim != 4 + ): + raise ValueError( + " ip_adapter_mask should be a tensor with shape [num_ip_adapter, 1, height, width]." + " Please use `IPAdapterMaskProcessor` to preprocess your mask" + ) + if len(ip_adapter_masks) != len(self.scale): + raise ValueError( + f"Number of ip_adapter_masks ({len(ip_adapter_masks)}) must match number of IP-Adapters ({len(self.scale)})" + ) + else: + ip_adapter_masks = [None] * len(self.scale) + + # for ip-adapter + for ( + current_ip_hidden_states, + scale, + to_k_ip, + to_v_ip, + mask, + num_zero, + ortho, + ) in zip( + ip_hidden_states, + self.scale, + self.to_k_ip, + self.to_v_ip, + ip_adapter_masks, + self.num_zero, + self.ortho, + ): + if scale == 0: + continue + if num_zero is not None: + zero_tensor = torch.zeros( + ( + current_ip_hidden_states.size(0), + num_zero, + current_ip_hidden_states.size(-1), + ), + dtype=current_ip_hidden_states.dtype, + device=current_ip_hidden_states.device, + ) + current_ip_hidden_states = torch.concat( + [current_ip_hidden_states, zero_tensor], dim=1 + ) + ip_key = to_k_ip(current_ip_hidden_states) + ip_value = to_v_ip(current_ip_hidden_states) + + ip_key = ip_key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) + ip_value = ip_value.view(batch_size, -1, attn.heads, head_dim).transpose( + 1, 2 + ) + + # the output of sdp = (batch, num_heads, seq_len, head_dim) + # TODO: add support for attn.scale when we move to Torch 2.1 + current_ip_hidden_states = F.scaled_dot_product_attention( + query, ip_key, ip_value, attn_mask=None, dropout_p=0.0, is_causal=False + ) + + current_ip_hidden_states = current_ip_hidden_states.transpose(1, 2).reshape( + batch_size, -1, attn.heads * head_dim + ) + current_ip_hidden_states = current_ip_hidden_states.to(query.dtype) + + if mask is not None: + mask_downsample = IPAdapterMaskProcessor.downsample( + mask, + batch_size, + current_ip_hidden_states.shape[1], + current_ip_hidden_states.shape[2], + ) + + mask_downsample = mask_downsample.to( + dtype=query.dtype, device=query.device + ) + + current_ip_hidden_states = current_ip_hidden_states * mask_downsample + if ortho is None: + hidden_states = hidden_states + scale * current_ip_hidden_states + elif ortho == "ortho": + orig_dtype = hidden_states.dtype + hidden_states = hidden_states.to(torch.float32) + current_ip_hidden_states = current_ip_hidden_states.to(torch.float32) + projection = ( + torch.sum( + (hidden_states * current_ip_hidden_states), dim=-2, keepdim=True + ) + / torch.sum((hidden_states * hidden_states), dim=-2, keepdim=True) + * hidden_states + ) + orthogonal = current_ip_hidden_states - projection + hidden_states = hidden_states + current_ip_hidden_states * orthogonal + hidden_states = hidden_states.to(orig_dtype) + elif ortho == "ortho_v2": + orig_dtype = hidden_states.dtype + hidden_states = hidden_states.to(torch.float32) + current_ip_hidden_states = current_ip_hidden_states.to(torch.float32) + attn_map = query @ ip_key.transpose(-2, -1) + attn_mean = attn_map.softmax(dim=-1).mean(dim=1) + attn_mean = attn_mean[:, :, :5].sum(dim=-1, keepdim=True) + projection = ( + torch.sum( + (hidden_states * current_ip_hidden_states), dim=-2, keepdim=True + ) + / torch.sum((hidden_states * hidden_states), dim=-2, keepdim=True) + * hidden_states + ) + orthogonal = current_ip_hidden_states + (attn_mean - 1) * projection + hidden_states = hidden_states + current_ip_hidden_states * orthogonal + hidden_states = hidden_states.to(orig_dtype) + else: + raise ValueError(f"{ortho} not supported") + + # linear proj + hidden_states = attn.to_out[0](hidden_states) + # dropout + hidden_states = attn.to_out[1](hidden_states) + + if input_ndim == 4: + hidden_states = hidden_states.transpose(-1, -2).reshape( + batch_size, channel, height, width + ) + + if attn.residual_connection: + hidden_states = hidden_states + residual + + hidden_states = hidden_states / attn.rescale_output_factor + + return hidden_states diff --git a/ip_adapter_diffusers/ip_adapter_extra_attn.py b/ip_adapter_diffusers/ip_adapter_extra_attn.py new file mode 100644 index 0000000000000000000000000000000000000000..639ee91cfe878783a11fda44e038838db4f51a1e --- /dev/null +++ b/ip_adapter_diffusers/ip_adapter_extra_attn.py @@ -0,0 +1,250 @@ +from typing import Callable, List, Optional, Tuple, Union +from diffusers.models.attention_processor import Attention +from diffusers.models.embeddings import ( + ImageProjection, + Resampler, +) +import torch +import torch.nn as nn +import torch.nn.functional as F +import copy + + +class IPAdapterAttnProcessor2_0(torch.nn.Module): + r""" + Attention processor for IP-Adapter for PyTorch 2.0. + + Args: + hidden_size (`int`): + The hidden size of the attention layer. + cross_attention_dim (`int`): + The number of channels in the `encoder_hidden_states`. + num_tokens (`int`, `Tuple[int]` or `List[int]`, defaults to `(4,)`): + The context length of the image features. + scale (`float` or `List[float]`, defaults to 1.0): + the weight scale of image prompt. + """ + + def __init__( + self, hidden_size, cross_attention_dim=None, num_tokens=(4,), scale=1.0 + ): + super().__init__() + + if not hasattr(F, "scaled_dot_product_attention"): + raise ImportError( + f"{self.__class__.__name__} requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0." + ) + + self.hidden_size = hidden_size + self.cross_attention_dim = cross_attention_dim + + if not isinstance(num_tokens, (tuple, list)): + num_tokens = [num_tokens] + self.num_tokens = num_tokens + + if not isinstance(scale, list): + scale = [scale] * len(num_tokens) + if len(scale) != len(num_tokens): + raise ValueError( + "`scale` should be a list of integers with the same length as `num_tokens`." + ) + self.scale = scale + + self.to_q_ip = nn.Linear(hidden_size, hidden_size, bias=False) + self.to_k_ip = nn.Linear(cross_attention_dim, hidden_size, bias=False) + self.to_v_ip = nn.Linear(cross_attention_dim, hidden_size, bias=False) + + def __call__( + self, + attn: Attention, + hidden_states: torch.Tensor, + encoder_hidden_states: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + temb: Optional[torch.Tensor] = None, + scale: float = 1.0, + ip_adapter_masks: Optional[torch.Tensor] = None, + ): + residual = hidden_states + + # separate ip_hidden_states from encoder_hidden_states + if encoder_hidden_states is not None: + if isinstance(encoder_hidden_states, tuple): + encoder_hidden_states, ip_hidden_states = encoder_hidden_states + ip_hidden_states = ip_hidden_states[0] + + if attn.spatial_norm is not None: + hidden_states = attn.spatial_norm(hidden_states, temb) + + input_ndim = hidden_states.ndim + + if input_ndim == 4: + batch_size, channel, height, width = hidden_states.shape + hidden_states = hidden_states.view( + batch_size, channel, height * width + ).transpose(1, 2) + + batch_size, sequence_length, _ = ( + hidden_states.shape + if encoder_hidden_states is None + else encoder_hidden_states.shape + ) + + if attention_mask is not None: + attention_mask = attn.prepare_attention_mask( + attention_mask, sequence_length, batch_size + ) + # scaled_dot_product_attention expects attention_mask shape to be + # (batch, heads, source_length, target_length) + attention_mask = attention_mask.view( + batch_size, attn.heads, -1, attention_mask.shape[-1] + ) + + if attn.group_norm is not None: + hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose( + 1, 2 + ) + + query = attn.to_q(hidden_states) + + if encoder_hidden_states is None: + encoder_hidden_states = hidden_states + elif attn.norm_cross: + encoder_hidden_states = attn.norm_encoder_hidden_states( + encoder_hidden_states + ) + + key = attn.to_k(encoder_hidden_states) + value = attn.to_v(encoder_hidden_states) + + inner_dim = key.shape[-1] + head_dim = inner_dim // attn.heads + + query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) + + key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) + value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) + + # the output of sdp = (batch, num_heads, seq_len, head_dim) + # TODO: add support for attn.scale when we move to Torch 2.1 + hidden_states = F.scaled_dot_product_attention( + query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False + ) + + hidden_states = hidden_states.transpose(1, 2).reshape( + batch_size, -1, attn.heads * head_dim + ) + hidden_states = hidden_states.to(query.dtype) + + ip_query = self.to_q_ip(hidden_states) + ip_key = self.to_k_ip(ip_hidden_states) + ip_value = self.to_v_ip(ip_hidden_states) + + ip_query = ip_query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) + ip_key = ip_key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) + ip_value = ip_value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) + + # the output of sdp = (batch, num_heads, seq_len, head_dim) + # TODO: add support for attn.scale when we move to Torch 2.1 + current_ip_hidden_states = F.scaled_dot_product_attention( + ip_query, + ip_key, + ip_value, + attn_mask=None, + dropout_p=0.0, + is_causal=False, + ) + + current_ip_hidden_states = current_ip_hidden_states.transpose(1, 2).reshape( + batch_size, -1, attn.heads * head_dim + ) + current_ip_hidden_states = current_ip_hidden_states.to(query.dtype) + + hidden_states = hidden_states + scale * current_ip_hidden_states + + # linear proj + hidden_states = attn.to_out[0](hidden_states) + # dropout + hidden_states = attn.to_out[1](hidden_states) + + if input_ndim == 4: + hidden_states = hidden_states.transpose(-1, -2).reshape( + batch_size, channel, height, width + ) + + if attn.residual_connection: + hidden_states = hidden_states + residual + + hidden_states = hidden_states / attn.rescale_output_factor + + return hidden_states + + +def save_ip_adapter(unet, path): + state_dict = {} + if ( + hasattr(unet, "encoder_hid_proj") + and unet.encoder_hid_proj is not None + and isinstance(unet.encoder_hid_proj, torch.nn.Module) + ): + state_dict["encoder_hid_proj"] = unet.encoder_hid_proj.state_dict() + + for name, module in unet.attn_processors.items(): + if isinstance(module, torch.nn.Module): + state_dict[name] = module.state_dict() + + torch.save(state_dict, path) + + +def load_ip_adapter( + unet, + path=None, + clip_embeddings_dim=1280, + cross_attention_dim=2048, + num_image_text_embeds=4, +): + if path is None: + state_dict = None + else: + state_dict = torch.load(path, map_location="cpu") + clip_embeddings_dim = state_dict["encoder_hid_proj"][ + "image_projection_layers.0.image_embeds.weight" + ].shape[-1] + num_image_text_embeds = ( + state_dict["encoder_hid_proj"][ + "image_projection_layers.0.image_embeds.weight" + ].shape[0] + // cross_attention_dim + ) + + if not hasattr(unet, "encoder_hid_proj") or unet.encoder_hid_proj is None: + unet.encoder_hid_proj = MultiIPAdapterImageProjection( + [ + ImageProjection( + cross_attention_dim=cross_attention_dim, + image_embed_dim=clip_embeddings_dim, + num_image_text_embeds=num_image_text_embeds, + ) + ] + ).to(unet.device, unet.dtype) + if state_dict is not None: + unet.encoder_hid_proj.load_state_dict(state_dict["encoder_hid_proj"]) + + unet.config.encoder_hid_dim_type = "ip_image_proj" + for name, module in unet.named_modules(): + if "attn2" in name and isinstance(module, Attention): + if not isinstance(module.processor, IPAdapterAttnProcessor2_0): + module.set_processor( + IPAdapterAttnProcessor2_0( + hidden_size=module.query_dim, + cross_attention_dim=cross_attention_dim, + scale=1.0, + ).to(unet.device, unet.dtype) + ) + if state_dict is not None: + module.processor.load_state_dict(state_dict[f"{name}.processor"]) + + +def set_ip_adapter_scale(unet, scale=1.0): + for name, module in unet.named_modules(): + if isinstance(module, IPAdapterAttnProcessor2_0): + module.scale = scale diff --git a/ip_adapter_diffusers/ip_adapter_extra_attn_flux.py b/ip_adapter_diffusers/ip_adapter_extra_attn_flux.py new file mode 100644 index 0000000000000000000000000000000000000000..1140154010e841b00d17a0c94920b5cf960667b2 --- /dev/null +++ b/ip_adapter_diffusers/ip_adapter_extra_attn_flux.py @@ -0,0 +1,365 @@ +from typing import Callable, List, Optional, Tuple, Union +from diffusers.models.attention_processor import Attention +from diffusers.models.embeddings import ( + ImageProjection, + IPAdapterPlusImageProjection, +) +import torch +import torch.nn as nn +import torch.nn.functional as F +import copy +from diffusers import FluxPipeline + + +def apply_rope(xq, xk, freqs_cis): + xq_ = xq.float().reshape(*xq.shape[:-1], -1, 1, 2) + xk_ = xk.float().reshape(*xk.shape[:-1], -1, 1, 2) + xq_out = freqs_cis[..., 0] * xq_[..., 0] + freqs_cis[..., 1] * xq_[..., 1] + xk_out = freqs_cis[..., 0] * xk_[..., 0] + freqs_cis[..., 1] * xk_[..., 1] + return xq_out.reshape(*xq.shape).type_as(xq), xk_out.reshape(*xk.shape).type_as(xk) + + +class IPAdapterFluxSingleAttnProcessor2_0(nn.Module): + r""" + Processor for implementing scaled dot-product attention (enabled by default if you're using PyTorch 2.0). + """ + + def __init__( + self, cross_attention_dim, hidden_size, scale=1.0, num_text_tokens=512 + ): + super().__init__() + + if not hasattr(F, "scaled_dot_product_attention"): + raise ImportError( + "AttnProcessor2_0 requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0." + ) + + self.scale = scale + + self.to_q_ip = nn.Linear(hidden_size, hidden_size, bias=False) + self.to_k_ip = nn.Linear(cross_attention_dim, hidden_size, bias=False) + + self.to_v_ip = nn.Linear(cross_attention_dim, hidden_size, bias=False) + self.norm1 = nn.LayerNorm(hidden_size) + self.norm2 = nn.LayerNorm(cross_attention_dim) + + self.ip_hidden_states = None + self.num_text_tokens = 512 + + def __call__( + self, + attn: Attention, + hidden_states: torch.Tensor, + encoder_hidden_states: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.FloatTensor] = None, + image_rotary_emb: Optional[torch.Tensor] = None, + ) -> torch.Tensor: + input_ndim = hidden_states.ndim + + if input_ndim == 4: + batch_size, channel, height, width = hidden_states.shape + hidden_states = hidden_states.view( + batch_size, channel, height * width + ).transpose(1, 2) + + batch_size, _, _ = ( + hidden_states.shape + if encoder_hidden_states is None + else encoder_hidden_states.shape + ) + + query = attn.to_q(hidden_states) + if encoder_hidden_states is None: + encoder_hidden_states = hidden_states + + key = attn.to_k(encoder_hidden_states) + value = attn.to_v(encoder_hidden_states) + + inner_dim = key.shape[-1] + head_dim = inner_dim // attn.heads + + query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) + + key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) + value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) + + if attn.norm_q is not None: + query = attn.norm_q(query) + if attn.norm_k is not None: + key = attn.norm_k(key) + + # Apply RoPE if needed + if image_rotary_emb is not None: + query, key = apply_rope(query, key, image_rotary_emb) + + # the output of sdp = (batch, num_heads, seq_len, head_dim) + # TODO: add support for attn.scale when we move to Torch 2.1 + hidden_states = F.scaled_dot_product_attention( + query, key, value, dropout_p=0.0, is_causal=False + ) + + hidden_states = hidden_states.transpose(1, 2).reshape( + batch_size, -1, attn.heads * head_dim + ) + hidden_states = hidden_states.to(query.dtype) + + ## ip adapter + image_hidden_states = self.norm1(hidden_states) + ip_hidden_states = self.norm2(self.ip_hidden_states) + + ip_query = self.to_q_ip(image_hidden_states) + ip_query = ip_query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) + + ip_key = self.to_k_ip(self.ip_hidden_states) + ip_value = self.to_v_ip(self.ip_hidden_states) + ip_key = ip_key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) + ip_value = ip_value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) + ip_hidden_states = F.scaled_dot_product_attention( + ip_query, + ip_key, + ip_value, + dropout_p=0.0, + is_causal=False, + ) + ip_hidden_states = ip_hidden_states.transpose(1, 2).reshape( + batch_size, -1, attn.heads * head_dim + ) + ip_hidden_states = ip_hidden_states.to(query.dtype) + + hidden_states += self.scale * ip_hidden_states + if input_ndim == 4: + hidden_states = hidden_states.transpose(-1, -2).reshape( + batch_size, channel, height, width + ) + + return hidden_states + + +class IPAdapterFluxAttnProcessor2_0(nn.Module): + """Attention processor used typically in processing the SD3-like self-attention projections.""" + + def __init__(self, cross_attention_dim, hidden_size, scale=1.0): + super().__init__() + + if not hasattr(F, "scaled_dot_product_attention"): + raise ImportError( + "AttnProcessor2_0 requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0." + ) + self.scale = scale + + self.to_q_ip = nn.Linear(hidden_size, hidden_size, bias=False) + self.to_k_ip = nn.Linear(cross_attention_dim, hidden_size, bias=False) + + self.to_v_ip = nn.Linear(cross_attention_dim, hidden_size, bias=False) + + self.norm1 = nn.LayerNorm(hidden_size) + self.norm2 = nn.LayerNorm(cross_attention_dim) + + self.ip_hidden_states = None + + def __call__( + self, + attn: Attention, + hidden_states: torch.FloatTensor, + encoder_hidden_states: torch.FloatTensor = None, + attention_mask: Optional[torch.FloatTensor] = None, + image_rotary_emb: Optional[torch.Tensor] = None, + ) -> torch.FloatTensor: + input_ndim = hidden_states.ndim + if input_ndim == 4: + batch_size, channel, height, width = hidden_states.shape + hidden_states = hidden_states.view( + batch_size, channel, height * width + ).transpose(1, 2) + context_input_ndim = encoder_hidden_states.ndim + if context_input_ndim == 4: + batch_size, channel, height, width = encoder_hidden_states.shape + encoder_hidden_states = encoder_hidden_states.view( + batch_size, channel, height * width + ).transpose(1, 2) + + batch_size = encoder_hidden_states.shape[0] + + # `sample` projections. + query = attn.to_q(hidden_states) + key = attn.to_k(hidden_states) + value = attn.to_v(hidden_states) + + inner_dim = key.shape[-1] + head_dim = inner_dim // attn.heads + + query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) + key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) + value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) + + if attn.norm_q is not None: + query = attn.norm_q(query) + if attn.norm_k is not None: + key = attn.norm_k(key) + + # `context` projections. + encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) + encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) + encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) + + encoder_hidden_states_query_proj = encoder_hidden_states_query_proj.view( + batch_size, -1, attn.heads, head_dim + ).transpose(1, 2) + encoder_hidden_states_key_proj = encoder_hidden_states_key_proj.view( + batch_size, -1, attn.heads, head_dim + ).transpose(1, 2) + encoder_hidden_states_value_proj = encoder_hidden_states_value_proj.view( + batch_size, -1, attn.heads, head_dim + ).transpose(1, 2) + + if attn.norm_added_q is not None: + encoder_hidden_states_query_proj = attn.norm_added_q( + encoder_hidden_states_query_proj + ) + if attn.norm_added_k is not None: + encoder_hidden_states_key_proj = attn.norm_added_k( + encoder_hidden_states_key_proj + ) + # attention + query = torch.cat([encoder_hidden_states_query_proj, query], dim=2) + key = torch.cat([encoder_hidden_states_key_proj, key], dim=2) + value = torch.cat([encoder_hidden_states_value_proj, value], dim=2) + + if image_rotary_emb is not None: + query, key = apply_rope(query, key, image_rotary_emb) + + hidden_states = F.scaled_dot_product_attention( + query, key, value, dropout_p=0.0, is_causal=False + ) + hidden_states = hidden_states.transpose(1, 2).reshape( + batch_size, -1, attn.heads * head_dim + ) + hidden_states = hidden_states.to(query.dtype) + + encoder_hidden_states, hidden_states = ( + hidden_states[:, : encoder_hidden_states.shape[1]], + hidden_states[:, encoder_hidden_states.shape[1] :], + ) + + # ip adapter + ip_query = self.to_q_ip(self.norm1(hidden_states)) + ip_query = ip_query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) + + ip_hidden_states = self.norm2(self.ip_hidden_states) + ip_key = self.to_k_ip(ip_hidden_states) + ip_value = self.to_v_ip(ip_hidden_states) + ip_key = ip_key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) + ip_value = ip_value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) + ip_hidden_states = F.scaled_dot_product_attention( + ip_query, + ip_key, + ip_value, + dropout_p=0.0, + is_causal=False, + ) + ip_hidden_states = ip_hidden_states.transpose(1, 2).reshape( + batch_size, -1, attn.heads * head_dim + ) + ip_hidden_states = ip_hidden_states.to(query.dtype) + hidden_states = hidden_states + self.scale * ip_hidden_states + + # linear proj + hidden_states = attn.to_out[0](hidden_states) + # dropout + hidden_states = attn.to_out[1](hidden_states) + encoder_hidden_states = attn.to_add_out(encoder_hidden_states) + + if input_ndim == 4: + hidden_states = hidden_states.transpose(-1, -2).reshape( + batch_size, channel, height, width + ) + if context_input_ndim == 4: + encoder_hidden_states = encoder_hidden_states.transpose(-1, -2).reshape( + batch_size, channel, height, width + ) + + return hidden_states, encoder_hidden_states + + +def save_ip_adapter(dit, path): + state_dict = {} + state_dict["encoder_hid_proj"] = dit.encoder_hid_proj.state_dict() + + for name, module in dit.named_modules(): + if isinstance(module, FluxIPAdapterAttnProcessor2_0) or isinstance( + module, FluxIPAdapterSingleAttnProcessor2_0 + ): + state_dict[name] = module.state_dict() + + torch.save(state_dict, path) + + +def load_ip_adapter( + dit, + path=None, + clip_embeddings_dim=1024, + cross_attention_dim=3072, + num_image_text_embeds=8, + attn_blocks=["single", "double"], +): + if path is not None: + state_dict = torch.load(path, map_location="cpu") + clip_embeddings_dim = state_dict["encoder_hid_proj.image_embeds.weight"].shape[ + 1 + ] + num_image_text_embeds = ( + state_dict["encoder_hid_proj.image_embeds.weight"].shape[0] + // cross_attention_dim + ) + dit.encoder_hid_proj = ImageProjection( + cross_attention_dim=cross_attention_dim, + image_embed_dim=clip_embeddings_dim, + num_image_text_embeds=num_image_text_embeds, + ).to(dit.device, dit.dtype) + + for name, module in dit.named_modules(): + if isinstance(module, Attention): + if "single" in name: + if "single" in attn_blocks: + module.set_processor( + IPAdapterFluxSingleAttnProcessor2_0( + hidden_size=module.query_dim, + cross_attention_dim=cross_attention_dim, + ).to(dit.device, dit.dtype) + ) + elif "double" in attn_blocks: + module.set_processor( + IPAdapterFluxAttnProcessor2_0( + hidden_size=module.query_dim, + cross_attention_dim=cross_attention_dim, + ).to(dit.device, dit.dtype) + ) + + if path is not None: + dit.load_state_dict(state_dict, strict=False) + + +def set_ip_hidden_states(dit, image_embeds): + for name, module in dit.named_modules(): + if ( + isinstance(module, IPAdapterFluxSingleAttnProcessor2_0) + or IPAdapterFluxAttnProcessor2_0 + ): + module.ip_hidden_states = image_embeds.clone() + + +def clear_ip_hidden_states(dit): + for name, module in dit.named_modules(): + if ( + isinstance(module, IPAdapterFluxSingleAttnProcessor2_0) + or IPAdapterFluxAttnProcessor2_0 + ): + module.ip_hidden_states = None + + +def set_ip_adapter_scale(dit, scale=1.0): + for name, module in dit.named_modules(): + if isinstance(module, IPAdapterFluxSingleAttnProcessor2_0) or isinstance( + module, IPAdapterFluxAttnProcessor2_0 + ): + module.scale = scale diff --git a/ip_adapter_diffusers/ip_adapter_flux.py b/ip_adapter_diffusers/ip_adapter_flux.py new file mode 100644 index 0000000000000000000000000000000000000000..d1786da8d50ef517b7f24dedc485a431fb43ee11 --- /dev/null +++ b/ip_adapter_diffusers/ip_adapter_flux.py @@ -0,0 +1,412 @@ +from typing import Callable, List, Optional, Tuple, Union +from diffusers.models.attention_processor import Attention +from diffusers.models.embeddings import ( + ImageProjection, + IPAdapterPlusImageProjection, +) +import torch +import torch.nn as nn +import torch.nn.functional as F +import copy +from diffusers.models.normalization import RMSNorm + + +def apply_rope(xq, xk, freqs_cis): + xq_ = xq.float().reshape(*xq.shape[:-1], -1, 1, 2) + xk_ = xk.float().reshape(*xk.shape[:-1], -1, 1, 2) + xq_out = freqs_cis[..., 0] * xq_[..., 0] + freqs_cis[..., 1] * xq_[..., 1] + xk_out = freqs_cis[..., 0] * xk_[..., 0] + freqs_cis[..., 1] * xk_[..., 1] + return xq_out.reshape(*xq.shape).type_as(xq), xk_out.reshape(*xk.shape).type_as(xk) + + +class IPAdapterFluxSingleAttnProcessor2_0(nn.Module): + r""" + Processor for implementing scaled dot-product attention (enabled by default if you're using PyTorch 2.0). + """ + + def __init__( + self, cross_attention_dim, hidden_size, scale=1.0, num_text_tokens=512 + ): + super().__init__() + self.scale = scale + + self.to_k_ip = nn.Linear(cross_attention_dim, hidden_size, bias=True) + + self.to_v_ip = nn.Linear(cross_attention_dim, hidden_size, bias=True) + if not hasattr(F, "scaled_dot_product_attention"): + raise ImportError( + "AttnProcessor2_0 requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0." + ) + self.ip_hidden_states = None + self.num_text_tokens = 512 + + nn.init.zeros_(self.to_k_ip.weight) + nn.init.zeros_(self.to_k_ip.bias) + + nn.init.zeros_(self.to_v_ip.weight) + nn.init.zeros_(self.to_v_ip.bias) + + def __call__( + self, + attn: Attention, + hidden_states: torch.Tensor, + encoder_hidden_states: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.FloatTensor] = None, + image_rotary_emb: Optional[torch.Tensor] = None, + ) -> torch.Tensor: + input_ndim = hidden_states.ndim + + if input_ndim == 4: + batch_size, channel, height, width = hidden_states.shape + hidden_states = hidden_states.view( + batch_size, channel, height * width + ).transpose(1, 2) + + batch_size, _, _ = ( + hidden_states.shape + if encoder_hidden_states is None + else encoder_hidden_states.shape + ) + + query = attn.to_q(hidden_states) + if encoder_hidden_states is None: + encoder_hidden_states = hidden_states + + key = attn.to_k(encoder_hidden_states) + value = attn.to_v(encoder_hidden_states) + + inner_dim = key.shape[-1] + head_dim = inner_dim // attn.heads + + query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) + + key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) + value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) + + if attn.norm_q is not None: + query = attn.norm_q(query) + if attn.norm_k is not None: + key = attn.norm_k(key) + ip_query = query[:, :, self.num_text_tokens :].clone() + # Apply RoPE if needed + if image_rotary_emb is not None: + query, key = apply_rope(query, key, image_rotary_emb) + + # the output of sdp = (batch, num_heads, seq_len, head_dim) + # TODO: add support for attn.scale when we move to Torch 2.1 + hidden_states = F.scaled_dot_product_attention( + query, key, value, dropout_p=0.0, is_causal=False + ) + + hidden_states = hidden_states.transpose(1, 2).reshape( + batch_size, -1, attn.heads * head_dim + ) + hidden_states = hidden_states.to(query.dtype) + + ## ip adapter + + ip_key = self.to_k_ip(self.ip_hidden_states) + ip_value = self.to_v_ip(self.ip_hidden_states) + ip_key = ip_key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) + ip_value = ip_value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) + ip_hidden_states = F.scaled_dot_product_attention( + ip_query, + ip_key, + ip_value, + dropout_p=0.0, + is_causal=False, + ) + ip_hidden_states = ip_hidden_states.transpose(1, 2).reshape( + batch_size, -1, attn.heads * head_dim + ) + ip_hidden_states = ip_hidden_states.to(query.dtype) + + hidden_states[:, self.num_text_tokens :] += self.scale * ip_hidden_states + + if input_ndim == 4: + hidden_states = hidden_states.transpose(-1, -2).reshape( + batch_size, channel, height, width + ) + + return hidden_states + + +class IPAdapterFluxAttnProcessor2_0(nn.Module): + """Attention processor used typically in processing the SD3-like self-attention projections.""" + + def __init__(self, cross_attention_dim, hidden_size, scale=1.0): + super().__init__() + self.scale = scale + + self.to_k_ip = nn.Linear(cross_attention_dim, hidden_size, bias=True) + + self.to_v_ip = nn.Linear(cross_attention_dim, hidden_size, bias=True) + + self.ip_hidden_states = None + nn.init.zeros_(self.to_k_ip.weight) + nn.init.zeros_(self.to_k_ip.bias) + + nn.init.zeros_(self.to_v_ip.weight) + nn.init.zeros_(self.to_v_ip.bias) + if not hasattr(F, "scaled_dot_product_attention"): + raise ImportError( + "FluxAttnProcessor2_0 requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0." + ) + + def __call__( + self, + attn: Attention, + hidden_states: torch.FloatTensor, + encoder_hidden_states: torch.FloatTensor = None, + attention_mask: Optional[torch.FloatTensor] = None, + image_rotary_emb: Optional[torch.Tensor] = None, + ) -> torch.FloatTensor: + input_ndim = hidden_states.ndim + if input_ndim == 4: + batch_size, channel, height, width = hidden_states.shape + hidden_states = hidden_states.view( + batch_size, channel, height * width + ).transpose(1, 2) + context_input_ndim = encoder_hidden_states.ndim + if context_input_ndim == 4: + batch_size, channel, height, width = encoder_hidden_states.shape + encoder_hidden_states = encoder_hidden_states.view( + batch_size, channel, height * width + ).transpose(1, 2) + + batch_size = encoder_hidden_states.shape[0] + + # `sample` projections. + query = attn.to_q(hidden_states) + key = attn.to_k(hidden_states) + value = attn.to_v(hidden_states) + + inner_dim = key.shape[-1] + head_dim = inner_dim // attn.heads + + query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) + key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) + value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) + + if attn.norm_q is not None: + query = attn.norm_q(query) + if attn.norm_k is not None: + key = attn.norm_k(key) + + # `context` projections. + encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) + encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) + encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) + + encoder_hidden_states_query_proj = encoder_hidden_states_query_proj.view( + batch_size, -1, attn.heads, head_dim + ).transpose(1, 2) + encoder_hidden_states_key_proj = encoder_hidden_states_key_proj.view( + batch_size, -1, attn.heads, head_dim + ).transpose(1, 2) + encoder_hidden_states_value_proj = encoder_hidden_states_value_proj.view( + batch_size, -1, attn.heads, head_dim + ).transpose(1, 2) + + if attn.norm_added_q is not None: + encoder_hidden_states_query_proj = attn.norm_added_q( + encoder_hidden_states_query_proj + ) + if attn.norm_added_k is not None: + encoder_hidden_states_key_proj = attn.norm_added_k( + encoder_hidden_states_key_proj + ) + + ip_query = query.clone() + # attention + query = torch.cat([encoder_hidden_states_query_proj, query], dim=2) + key = torch.cat([encoder_hidden_states_key_proj, key], dim=2) + value = torch.cat([encoder_hidden_states_value_proj, value], dim=2) + + if image_rotary_emb is not None: + query, key = apply_rope(query, key, image_rotary_emb) + + hidden_states = F.scaled_dot_product_attention( + query, key, value, dropout_p=0.0, is_causal=False + ) + hidden_states = hidden_states.transpose(1, 2).reshape( + batch_size, -1, attn.heads * head_dim + ) + hidden_states = hidden_states.to(query.dtype) + + encoder_hidden_states, hidden_states = ( + hidden_states[:, : encoder_hidden_states.shape[1]], + hidden_states[:, encoder_hidden_states.shape[1] :], + ) + + # ip adapter + + ip_key = self.to_k_ip(self.ip_hidden_states) + ip_value = self.to_v_ip(self.ip_hidden_states) + ip_key = ip_key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) + ip_value = ip_value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) + ip_hidden_states = F.scaled_dot_product_attention( + ip_query, + ip_key, + ip_value, + dropout_p=0.0, + is_causal=False, + ) + ip_hidden_states = ip_hidden_states.transpose(1, 2).reshape( + batch_size, -1, attn.heads * head_dim + ) + ip_hidden_states = ip_hidden_states.to(query.dtype) + hidden_states = hidden_states + self.scale * ip_hidden_states + + # linear proj + hidden_states = attn.to_out[0](hidden_states) + # dropout + hidden_states = attn.to_out[1](hidden_states) + encoder_hidden_states = attn.to_add_out(encoder_hidden_states) + + if input_ndim == 4: + hidden_states = hidden_states.transpose(-1, -2).reshape( + batch_size, channel, height, width + ) + if context_input_ndim == 4: + encoder_hidden_states = encoder_hidden_states.transpose(-1, -2).reshape( + batch_size, channel, height, width + ) + + return hidden_states, encoder_hidden_states + + +def save_ip_adapter(dit, path): + state_dict = {} + state_dict["encoder_hid_proj"] = dit.encoder_hid_proj.state_dict() + + for name, module in dit.named_modules(): + if isinstance(module, FluxIPAdapterAttnProcessor2_0) or isinstance( + module, FluxIPAdapterSingleAttnProcessor2_0 + ): + state_dict[name] = module.state_dict() + + torch.save(state_dict, path) + + +def load_ip_adapter( + dit, + path=None, + clip_embeddings_dim=1024, + cross_attention_dim=3072, + num_image_text_embeds=8, + attn_blocks=["single", "double"], +): + if path is not None: + state_dict = torch.load(path, map_location="cpu") + clip_embeddings_dim = state_dict["encoder_hid_proj.image_embeds.weight"].shape[ + 1 + ] + num_image_text_embeds = ( + state_dict["encoder_hid_proj.image_embeds.weight"].shape[0] + // cross_attention_dim + ) + dit.encoder_hid_proj = ImageProjection( + cross_attention_dim=cross_attention_dim, + image_embed_dim=clip_embeddings_dim, + num_image_text_embeds=num_image_text_embeds, + ).to(dit.device, dit.dtype) + + for name, module in dit.named_modules(): + if isinstance(module, Attention): + if "single" in name: + if "single" in attn_blocks: + module.set_processor( + IPAdapterFluxSingleAttnProcessor2_0( + hidden_size=module.query_dim, + cross_attention_dim=cross_attention_dim, + ).to(dit.device, dit.dtype) + ) + elif "double" in attn_blocks: + module.set_processor( + IPAdapterFluxAttnProcessor2_0( + hidden_size=module.query_dim, + cross_attention_dim=cross_attention_dim, + ).to(dit.device, dit.dtype) + ) + + if path is not None: + dit.load_state_dict(state_dict, strict=False) + + +def set_ip_hidden_states(dit, image_embeds): + for name, module in dit.named_modules(): + if ( + isinstance(module, IPAdapterFluxSingleAttnProcessor2_0) + or IPAdapterFluxAttnProcessor2_0 + ): + module.ip_hidden_states = image_embeds.clone() + + +def clear_ip_hidden_states(dit): + for name, module in dit.named_modules(): + if ( + isinstance(module, IPAdapterFluxSingleAttnProcessor2_0) + or IPAdapterFluxAttnProcessor2_0 + ): + module.ip_hidden_states = None + + +def set_ip_adapter_scale(dit, scale=1.0): + for name, module in dit.named_modules(): + if isinstance(module, IPAdapterFluxSingleAttnProcessor2_0) or isinstance( + module, IPAdapterFluxAttnProcessor2_0 + ): + module.scale = scale + + +def load_ip_adapter_plus( + dit, + path=None, + embed_dims=1280, + output_dims=2048, + hidden_dims=1280, + depth=4, + dim_head=64, + heads=20, + num_queries=16, + ffn_ratio=4, + cross_attention_dim=2048, +): + if path is not None: + state_dict = torch.load(path) + else: + state_dict = None + if not hasattr(dit, "encoder_hid_proj") or dit.encoder_hid_proj is None: + dit.encoder_hid_proj = MultiIPAdapterImageProjection( + [ + IPAdapterPlusImageProjection( + embed_dims=embed_dims, + output_dims=output_dims, + hidden_dims=hidden_dims, + depth=depth, + dim_head=dim_head, + heads=heads, + num_queries=num_queries, + ffn_ratio=ffn_ratio, + ) + ] + ).to(dit.device, dit.dtype) + if state_dict is not None: + dit.encoder_hid_proj.load_state_dict(state_dict["encoder_hid_proj"]) + + dit.config.encoder_hid_dim_type = "ip_image_proj" + for name, module in dit.named_modules(): + if "attn2" in name and isinstance(module, Attention): + if not isinstance(module.processor, IPAdapterAttnProcessor2_0): + module.set_processor( + IPAdapterAttnProcessor2_0( + hidden_size=module.query_dim, + cross_attention_dim=cross_attention_dim, + ).to(dit.device, dit.dtype) + ) + if state_dict is not None: + module.processor.load_state_dict(state_dict[f"{name}.processor"]) + else: + module.processor.to_k_ip.load_state_dict(module.to_k.state_dict()) + module.processor.to_v_ip.load_state_dict(module.to_v.state_dict()) diff --git a/ip_adapter_diffusers/mm_adapter.py b/ip_adapter_diffusers/mm_adapter.py new file mode 100644 index 0000000000000000000000000000000000000000..66f97acf4fb8ff3ea1ad6f675c12fb360bbe7024 --- /dev/null +++ b/ip_adapter_diffusers/mm_adapter.py @@ -0,0 +1,218 @@ +import math + +import torch +import torch.nn as nn + + +# FFN +def FeedForward(dim, mult=4): + inner_dim = int(dim * mult) + return nn.Sequential( + nn.LayerNorm(dim), + nn.Linear(dim, inner_dim, bias=False), + nn.GELU(), + nn.Linear(inner_dim, dim, bias=False), + ) + + +def reshape_tensor(x, heads): + bs, length, width = x.shape + # (bs, length, width) --> (bs, length, n_heads, dim_per_head) + x = x.view(bs, length, heads, -1) + # (bs, length, n_heads, dim_per_head) --> (bs, n_heads, length, dim_per_head) + x = x.transpose(1, 2) + # (bs, n_heads, length, dim_per_head) --> (bs*n_heads, length, dim_per_head) + x = x.reshape(bs, heads, length, -1) + return x + + +class PerceiverAttentionCA(nn.Module): + def __init__(self, *, dim=3072, dim_head=128, heads=16, kv_dim=2048): + super().__init__() + self.scale = dim_head**-0.5 + self.dim_head = dim_head + self.heads = heads + inner_dim = dim_head * heads + + self.norm1 = nn.LayerNorm(dim if kv_dim is None else kv_dim) + self.norm2 = nn.LayerNorm(dim) + + self.to_q = nn.Linear(dim, inner_dim, bias=False) + self.to_kv = nn.Linear( + dim if kv_dim is None else kv_dim, inner_dim * 2, bias=False + ) + self.to_out = nn.Linear(inner_dim, dim, bias=False) + + def forward(self, x, latents): + """ + Args: + x (torch.Tensor): image features + shape (b, n1, D) + latent (torch.Tensor): latent features + shape (b, n2, D) + """ + x = self.norm1(x) + latents = self.norm2(latents) + + b, seq_len, _ = latents.shape + + q = self.to_q(latents) + k, v = self.to_kv(x).chunk(2, dim=-1) + + q = reshape_tensor(q, self.heads) + k = reshape_tensor(k, self.heads) + v = reshape_tensor(v, self.heads) + + # attention + scale = 1 / math.sqrt(math.sqrt(self.dim_head)) + weight = (q * scale) @ (k * scale).transpose( + -2, -1 + ) # More stable with f16 than dividing afterwards + weight = torch.softmax(weight.float(), dim=-1).type(weight.dtype) + out = weight @ v + + out = out.permute(0, 2, 1, 3).reshape(b, seq_len, -1) + + return self.to_out(out) + + +class PerceiverAttention(nn.Module): + def __init__(self, *, dim, dim_head=64, heads=8, kv_dim=None): + super().__init__() + self.scale = dim_head**-0.5 + self.dim_head = dim_head + self.heads = heads + inner_dim = dim_head * heads + + self.norm1 = nn.LayerNorm(dim if kv_dim is None else kv_dim) + self.norm2 = nn.LayerNorm(dim) + + self.to_q = nn.Linear(dim, inner_dim, bias=False) + self.to_kv = nn.Linear( + dim if kv_dim is None else kv_dim, inner_dim * 2, bias=False + ) + self.to_out = nn.Linear(inner_dim, dim, bias=False) + + def forward(self, x, latents): + """ + Args: + x (torch.Tensor): image features + shape (b, n1, D) + latent (torch.Tensor): latent features + shape (b, n2, D) + """ + x = self.norm1(x) + latents = self.norm2(latents) + + b, seq_len, _ = latents.shape + + q = self.to_q(latents) + kv_input = torch.cat((x, latents), dim=-2) + k, v = self.to_kv(kv_input).chunk(2, dim=-1) + + q = reshape_tensor(q, self.heads) + k = reshape_tensor(k, self.heads) + v = reshape_tensor(v, self.heads) + + # attention + scale = 1 / math.sqrt(math.sqrt(self.dim_head)) + weight = (q * scale) @ (k * scale).transpose( + -2, -1 + ) # More stable with f16 than dividing afterwards + weight = torch.softmax(weight.float(), dim=-1).type(weight.dtype) + out = weight @ v + + out = out.permute(0, 2, 1, 3).reshape(b, seq_len, -1) + + return self.to_out(out) + + +class IDFormer(nn.Module): + """ + - perceiver resampler like arch (compared with previous MLP-like arch) + - we concat id embedding (generated by arcface) and query tokens as latents + - latents will attend each other and interact with vit features through cross-attention + - vit features are multi-scaled and inserted into IDFormer in order, currently, each scale corresponds to two + IDFormer layers + """ + + def __init__( + self, + dim=1024, + depth=10, + dim_head=64, + heads=16, + num_id_token=5, + num_queries=32, + output_dim=2048, + ff_mult=4, + ): + super().__init__() + + self.num_id_token = num_id_token + self.dim = dim + self.num_queries = num_queries + assert depth % 5 == 0 + self.depth = depth // 5 + scale = dim**-0.5 + + self.latents = nn.Parameter(torch.randn(1, num_queries, dim) * scale) + self.proj_out = nn.Parameter(scale * torch.randn(dim, output_dim)) + + self.layers = nn.ModuleList([]) + for _ in range(depth): + self.layers.append( + nn.ModuleList( + [ + PerceiverAttention(dim=dim, dim_head=dim_head, heads=heads), + FeedForward(dim=dim, mult=ff_mult), + ] + ) + ) + + for i in range(5): + setattr( + self, + f"mapping_{i}", + nn.Sequential( + nn.Linear(1024, 1024), + nn.LayerNorm(1024), + nn.LeakyReLU(), + nn.Linear(1024, 1024), + nn.LayerNorm(1024), + nn.LeakyReLU(), + nn.Linear(1024, dim), + ), + ) + + self.id_embedding_mapping = nn.Sequential( + nn.Linear(1280, 1024), + nn.LayerNorm(1024), + nn.LeakyReLU(), + nn.Linear(1024, 1024), + nn.LayerNorm(1024), + nn.LeakyReLU(), + nn.Linear(1024, dim * num_id_token), + ) + + def forward(self, x, y): + + latents = self.latents.repeat(x.size(0), 1, 1) + + num_duotu = x.shape[1] if x.ndim == 3 else 1 + + x = self.id_embedding_mapping(x) + x = x.reshape(-1, self.num_id_token * num_duotu, self.dim) + + latents = torch.cat((latents, x), dim=1) + + for i in range(5): + vit_feature = getattr(self, f"mapping_{i}")(y[i]) + ctx_feature = torch.cat((x, vit_feature), dim=1) + for attn, ff in self.layers[i * self.depth : (i + 1) * self.depth]: + latents = attn(ctx_feature, latents) + latents + latents = ff(latents) + latents + + latents = latents[:, : self.num_queries] + latents = latents @ self.proj_out + return latents diff --git a/ip_adapter_diffusers/resampler.py b/ip_adapter_diffusers/resampler.py new file mode 100644 index 0000000000000000000000000000000000000000..0056e4e028fa5d4459eaba1e69bf89ba7de8d074 --- /dev/null +++ b/ip_adapter_diffusers/resampler.py @@ -0,0 +1,166 @@ +# modified from https://github.com/mlfoundations/open_flamingo/blob/main/open_flamingo/src/helpers.py +# and https://github.com/lucidrains/imagen-pytorch/blob/main/imagen_pytorch/imagen_pytorch.py + +import math + +import torch +import torch.nn as nn +from einops import rearrange +from einops.layers.torch import Rearrange + + +# FFN +def FeedForward(dim, mult=4): + inner_dim = int(dim * mult) + return nn.Sequential( + nn.LayerNorm(dim), + nn.Linear(dim, inner_dim, bias=False), + nn.GELU(), + nn.Linear(inner_dim, dim, bias=False), + ) + + +def reshape_tensor(x, heads): + bs, length, width = x.shape + # (bs, length, width) --> (bs, length, n_heads, dim_per_head) + x = x.view(bs, length, heads, -1) + # (bs, length, n_heads, dim_per_head) --> (bs, n_heads, length, dim_per_head) + x = x.transpose(1, 2) + # (bs, n_heads, length, dim_per_head) --> (bs*n_heads, length, dim_per_head) + x = x.reshape(bs, heads, length, -1) + return x + + +class PerceiverAttention(nn.Module): + def __init__(self, *, dim, dim_head=64, heads=8): + super().__init__() + self.scale = dim_head**-0.5 + self.dim_head = dim_head + self.heads = heads + inner_dim = dim_head * heads + + self.norm1 = nn.LayerNorm(dim) + self.norm2 = nn.LayerNorm(dim) + + self.to_q = nn.Linear(dim, inner_dim, bias=False) + self.to_kv = nn.Linear(dim, inner_dim * 2, bias=False) + self.to_out = nn.Linear(inner_dim, dim, bias=False) + + def forward(self, x, latents): + """ + Args: + x (torch.Tensor): image features + shape (b, n1, D) + latent (torch.Tensor): latent features + shape (b, n2, D) + """ + x = self.norm1(x) + latents = self.norm2(latents) + + b, l, _ = latents.shape + + q = self.to_q(latents) + kv_input = torch.cat((x, latents), dim=-2) + k, v = self.to_kv(kv_input).chunk(2, dim=-1) + + q = reshape_tensor(q, self.heads) + k = reshape_tensor(k, self.heads) + v = reshape_tensor(v, self.heads) + + # attention + scale = 1 / math.sqrt(math.sqrt(self.dim_head)) + weight = (q * scale) @ (k * scale).transpose( + -2, -1 + ) # More stable with f16 than dividing afterwards + weight = torch.softmax(weight.float(), dim=-1).type(weight.dtype) + out = weight @ v + + out = out.permute(0, 2, 1, 3).reshape(b, l, -1) + + return self.to_out(out) + + +class Resampler(nn.Module): + def __init__( + self, + dim=1024, + depth=8, + dim_head=64, + heads=16, + num_queries=8, + embedding_dim=768, + output_dim=1024, + ff_mult=4, + max_seq_len: int = 257, # CLIP tokens + CLS token + apply_pos_emb: bool = False, + num_latents_mean_pooled: int = 0, # number of latents derived from mean pooled representation of the sequence + ): + super().__init__() + self.pos_emb = ( + nn.Embedding(max_seq_len, embedding_dim) if apply_pos_emb else None + ) + + self.latents = nn.Parameter(torch.randn(1, num_queries, dim) / dim**0.5) + + self.proj_in = nn.Linear(embedding_dim, dim) + + self.proj_out = nn.Linear(dim, output_dim) + self.norm_out = nn.LayerNorm(output_dim) + + self.to_latents_from_mean_pooled_seq = ( + nn.Sequential( + nn.LayerNorm(dim), + nn.Linear(dim, dim * num_latents_mean_pooled), + Rearrange("b (n d) -> b n d", n=num_latents_mean_pooled), + ) + if num_latents_mean_pooled > 0 + else None + ) + + self.layers = nn.ModuleList([]) + for _ in range(depth): + self.layers.append( + nn.ModuleList( + [ + PerceiverAttention(dim=dim, dim_head=dim_head, heads=heads), + FeedForward(dim=dim, mult=ff_mult), + ] + ) + ) + + def forward(self, x): + if self.pos_emb is not None: + n, device = x.shape[1], x.device + pos_emb = self.pos_emb(torch.arange(n, device=device)) + x = x + pos_emb + + latents = self.latents.repeat(x.size(0), 1, 1) + + x = self.proj_in(x) + + if self.to_latents_from_mean_pooled_seq: + meanpooled_seq = masked_mean( + x, + dim=1, + mask=torch.ones(x.shape[:2], device=x.device, dtype=torch.bool), + ) + meanpooled_latents = self.to_latents_from_mean_pooled_seq(meanpooled_seq) + latents = torch.cat((meanpooled_latents, latents), dim=-2) + + for attn, ff in self.layers: + latents = attn(x, latents) + latents + latents = ff(latents) + latents + + latents = self.proj_out(latents) + return self.norm_out(latents) + + +def masked_mean(t, *, dim, mask=None): + if mask is None: + return t.mean(dim=dim) + + denom = mask.sum(dim=dim, keepdim=True) + mask = rearrange(mask, "b n -> b n 1") + masked_t = t.masked_fill(~mask, 0.0) + + return masked_t.sum(dim=dim) / denom.clamp(min=1e-5) diff --git a/pulid/attention_processor.py b/pulid/attention_processor.py new file mode 100644 index 0000000000000000000000000000000000000000..fb4f9da179952101e745861c0b112d1e592b2c84 --- /dev/null +++ b/pulid/attention_processor.py @@ -0,0 +1,422 @@ +# modified from https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py +import torch +import torch.nn as nn +import torch.nn.functional as F + +NUM_ZERO = 0 +ORTHO = False +ORTHO_v2 = False + + +class AttnProcessor(nn.Module): + def __init__(self): + super().__init__() + + def __call__( + self, + attn, + hidden_states, + encoder_hidden_states=None, + attention_mask=None, + temb=None, + id_embedding=None, + id_scale=1.0, + ): + residual = hidden_states + + if attn.spatial_norm is not None: + hidden_states = attn.spatial_norm(hidden_states, temb) + + input_ndim = hidden_states.ndim + + if input_ndim == 4: + batch_size, channel, height, width = hidden_states.shape + hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2) + + batch_size, sequence_length, _ = ( + hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape + ) + attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) + + if attn.group_norm is not None: + hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2) + + query = attn.to_q(hidden_states) + + if encoder_hidden_states is None: + encoder_hidden_states = hidden_states + elif attn.norm_cross: + encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) + + key = attn.to_k(encoder_hidden_states) + value = attn.to_v(encoder_hidden_states) + + query = attn.head_to_batch_dim(query) + key = attn.head_to_batch_dim(key) + value = attn.head_to_batch_dim(value) + + attention_probs = attn.get_attention_scores(query, key, attention_mask) + hidden_states = torch.bmm(attention_probs, value) + hidden_states = attn.batch_to_head_dim(hidden_states) + + # linear proj + hidden_states = attn.to_out[0](hidden_states) + # dropout + hidden_states = attn.to_out[1](hidden_states) + + if input_ndim == 4: + hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width) + + if attn.residual_connection: + hidden_states = hidden_states + residual + + hidden_states = hidden_states / attn.rescale_output_factor + + return hidden_states + + +class IDAttnProcessor(nn.Module): + r""" + Attention processor for ID-Adapater. + Args: + hidden_size (`int`): + The hidden size of the attention layer. + cross_attention_dim (`int`): + The number of channels in the `encoder_hidden_states`. + scale (`float`, defaults to 1.0): + the weight scale of image prompt. + """ + + def __init__(self, hidden_size, cross_attention_dim=None): + super().__init__() + self.id_to_k = nn.Linear(cross_attention_dim or hidden_size, hidden_size, bias=False) + self.id_to_v = nn.Linear(cross_attention_dim or hidden_size, hidden_size, bias=False) + + def __call__( + self, + attn, + hidden_states, + encoder_hidden_states=None, + attention_mask=None, + temb=None, + id_embedding=None, + id_scale=1.0, + ): + residual = hidden_states + + if attn.spatial_norm is not None: + hidden_states = attn.spatial_norm(hidden_states, temb) + + input_ndim = hidden_states.ndim + + if input_ndim == 4: + batch_size, channel, height, width = hidden_states.shape + hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2) + + batch_size, sequence_length, _ = ( + hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape + ) + attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) + + if attn.group_norm is not None: + hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2) + + query = attn.to_q(hidden_states) + + if encoder_hidden_states is None: + encoder_hidden_states = hidden_states + elif attn.norm_cross: + encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) + + key = attn.to_k(encoder_hidden_states) + value = attn.to_v(encoder_hidden_states) + + query = attn.head_to_batch_dim(query) + key = attn.head_to_batch_dim(key) + value = attn.head_to_batch_dim(value) + + attention_probs = attn.get_attention_scores(query, key, attention_mask) + hidden_states = torch.bmm(attention_probs, value) + hidden_states = attn.batch_to_head_dim(hidden_states) + + # for id-adapter + if id_embedding is not None: + if NUM_ZERO == 0: + id_key = self.id_to_k(id_embedding) + id_value = self.id_to_v(id_embedding) + else: + zero_tensor = torch.zeros( + (id_embedding.size(0), NUM_ZERO, id_embedding.size(-1)), + dtype=id_embedding.dtype, + device=id_embedding.device, + ) + id_key = self.id_to_k(torch.cat((id_embedding, zero_tensor), dim=1)) + id_value = self.id_to_v(torch.cat((id_embedding, zero_tensor), dim=1)) + + id_key = attn.head_to_batch_dim(id_key).to(query.dtype) + id_value = attn.head_to_batch_dim(id_value).to(query.dtype) + + id_attention_probs = attn.get_attention_scores(query, id_key, None) + id_hidden_states = torch.bmm(id_attention_probs, id_value) + id_hidden_states = attn.batch_to_head_dim(id_hidden_states) + + if not ORTHO: + hidden_states = hidden_states + id_scale * id_hidden_states + else: + projection = ( + torch.sum((hidden_states * id_hidden_states), dim=-2, keepdim=True) + / torch.sum((hidden_states * hidden_states), dim=-2, keepdim=True) + * hidden_states + ) + orthogonal = id_hidden_states - projection + hidden_states = hidden_states + id_scale * orthogonal + + # linear proj + hidden_states = attn.to_out[0](hidden_states) + # dropout + hidden_states = attn.to_out[1](hidden_states) + + if input_ndim == 4: + hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width) + + if attn.residual_connection: + hidden_states = hidden_states + residual + + hidden_states = hidden_states / attn.rescale_output_factor + + return hidden_states + + +class AttnProcessor2_0(nn.Module): + r""" + Processor for implementing scaled dot-product attention (enabled by default if you're using PyTorch 2.0). + """ + + def __init__(self): + super().__init__() + if not hasattr(F, "scaled_dot_product_attention"): + raise ImportError("AttnProcessor2_0 requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0.") + + def __call__( + self, + attn, + hidden_states, + encoder_hidden_states=None, + attention_mask=None, + temb=None, + id_embedding=None, + id_scale=1.0, + ): + residual = hidden_states + + if attn.spatial_norm is not None: + hidden_states = attn.spatial_norm(hidden_states, temb) + + input_ndim = hidden_states.ndim + + if input_ndim == 4: + batch_size, channel, height, width = hidden_states.shape + hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2) + + batch_size, sequence_length, _ = ( + hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape + ) + + if attention_mask is not None: + attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) + # scaled_dot_product_attention expects attention_mask shape to be + # (batch, heads, source_length, target_length) + attention_mask = attention_mask.view(batch_size, attn.heads, -1, attention_mask.shape[-1]) + + if attn.group_norm is not None: + hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2) + + query = attn.to_q(hidden_states) + + if encoder_hidden_states is None: + encoder_hidden_states = hidden_states + elif attn.norm_cross: + encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) + + key = attn.to_k(encoder_hidden_states) + value = attn.to_v(encoder_hidden_states) + + inner_dim = key.shape[-1] + head_dim = inner_dim // attn.heads + + query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) + + key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) + value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) + + # the output of sdp = (batch, num_heads, seq_len, head_dim) + hidden_states = F.scaled_dot_product_attention( + query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False + ) + + hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim) + hidden_states = hidden_states.to(query.dtype) + + # linear proj + hidden_states = attn.to_out[0](hidden_states) + # dropout + hidden_states = attn.to_out[1](hidden_states) + + if input_ndim == 4: + hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width) + + if attn.residual_connection: + hidden_states = hidden_states + residual + + hidden_states = hidden_states / attn.rescale_output_factor + + return hidden_states + + +class IDAttnProcessor2_0(torch.nn.Module): + r""" + Attention processor for ID-Adapater for PyTorch 2.0. + Args: + hidden_size (`int`): + The hidden size of the attention layer. + cross_attention_dim (`int`): + The number of channels in the `encoder_hidden_states`. + """ + + def __init__(self, hidden_size, cross_attention_dim=None): + super().__init__() + if not hasattr(F, "scaled_dot_product_attention"): + raise ImportError("AttnProcessor2_0 requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0.") + + self.id_to_k = nn.Linear(cross_attention_dim or hidden_size, hidden_size, bias=False) + self.id_to_v = nn.Linear(cross_attention_dim or hidden_size, hidden_size, bias=False) + + def __call__( + self, + attn, + hidden_states, + encoder_hidden_states=None, + attention_mask=None, + temb=None, + id_embedding=None, + id_scale=1.0, + ): + residual = hidden_states + + if attn.spatial_norm is not None: + hidden_states = attn.spatial_norm(hidden_states, temb) + + input_ndim = hidden_states.ndim + + if input_ndim == 4: + batch_size, channel, height, width = hidden_states.shape + hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2) + + batch_size, sequence_length, _ = ( + hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape + ) + + if attention_mask is not None: + attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) + # scaled_dot_product_attention expects attention_mask shape to be + # (batch, heads, source_length, target_length) + attention_mask = attention_mask.view(batch_size, attn.heads, -1, attention_mask.shape[-1]) + + if attn.group_norm is not None: + hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2) + + query = attn.to_q(hidden_states) + + if encoder_hidden_states is None: + encoder_hidden_states = hidden_states + elif attn.norm_cross: + encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) + + key = attn.to_k(encoder_hidden_states) + value = attn.to_v(encoder_hidden_states) + + inner_dim = key.shape[-1] + head_dim = inner_dim // attn.heads + + query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) + + key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) + value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) + + # the output of sdp = (batch, num_heads, seq_len, head_dim) + hidden_states = F.scaled_dot_product_attention( + query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False + ) + + hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim) + hidden_states = hidden_states.to(query.dtype) + + # for id embedding + if id_embedding is not None: + if NUM_ZERO == 0: + id_key = self.id_to_k(id_embedding).to(query.dtype) + id_value = self.id_to_v(id_embedding).to(query.dtype) + else: + zero_tensor = torch.zeros( + (id_embedding.size(0), NUM_ZERO, id_embedding.size(-1)), + dtype=id_embedding.dtype, + device=id_embedding.device, + ) + id_key = self.id_to_k(torch.cat((id_embedding, zero_tensor), dim=1)).to(query.dtype) + id_value = self.id_to_v(torch.cat((id_embedding, zero_tensor), dim=1)).to(query.dtype) + + id_key = id_key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) + id_value = id_value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) + + # the output of sdp = (batch, num_heads, seq_len, head_dim) + id_hidden_states = F.scaled_dot_product_attention( + query, id_key, id_value, attn_mask=None, dropout_p=0.0, is_causal=False + ) + + id_hidden_states = id_hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim) + id_hidden_states = id_hidden_states.to(query.dtype) + + if not ORTHO and not ORTHO_v2: + hidden_states = hidden_states + id_scale * id_hidden_states + elif ORTHO_v2: + orig_dtype = hidden_states.dtype + hidden_states = hidden_states.to(torch.float32) + id_hidden_states = id_hidden_states.to(torch.float32) + attn_map = query @ id_key.transpose(-2, -1) + attn_mean = attn_map.softmax(dim=-1).mean(dim=1) + attn_mean = attn_mean[:, :, :5].sum(dim=-1, keepdim=True) + projection = ( + torch.sum((hidden_states * id_hidden_states), dim=-2, keepdim=True) + / torch.sum((hidden_states * hidden_states), dim=-2, keepdim=True) + * hidden_states + ) + orthogonal = id_hidden_states + (attn_mean - 1) * projection + hidden_states = hidden_states + id_scale * orthogonal + hidden_states = hidden_states.to(orig_dtype) + else: + orig_dtype = hidden_states.dtype + hidden_states = hidden_states.to(torch.float32) + id_hidden_states = id_hidden_states.to(torch.float32) + projection = ( + torch.sum((hidden_states * id_hidden_states), dim=-2, keepdim=True) + / torch.sum((hidden_states * hidden_states), dim=-2, keepdim=True) + * hidden_states + ) + orthogonal = id_hidden_states - projection + hidden_states = hidden_states + id_scale * orthogonal + hidden_states = hidden_states.to(orig_dtype) + + # linear proj + hidden_states = attn.to_out[0](hidden_states) + # dropout + hidden_states = attn.to_out[1](hidden_states) + + if input_ndim == 4: + hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width) + + if attn.residual_connection: + hidden_states = hidden_states + residual + + hidden_states = hidden_states / attn.rescale_output_factor + + return hidden_states diff --git a/pulid/encoders.py b/pulid/encoders.py new file mode 100644 index 0000000000000000000000000000000000000000..47dff3dbcb0167beb09aa9df8c4ffdc560042a9f --- /dev/null +++ b/pulid/encoders.py @@ -0,0 +1,64 @@ +import torch +import torch.nn as nn + + +class IDEncoder(nn.Module): + def __init__(self, width=1280, context_dim=2048, num_token=5): + super().__init__() + self.num_token = num_token + self.context_dim = context_dim + h1 = min((context_dim * num_token) // 4, 1024) + h2 = min((context_dim * num_token) // 2, 1024) + self.body = nn.Sequential( + nn.Linear(width, h1), + nn.LayerNorm(h1), + nn.LeakyReLU(), + nn.Linear(h1, h2), + nn.LayerNorm(h2), + nn.LeakyReLU(), + nn.Linear(h2, context_dim * num_token), + ) + + for i in range(5): + setattr( + self, + f'mapping_{i}', + nn.Sequential( + nn.Linear(1024, 1024), + nn.LayerNorm(1024), + nn.LeakyReLU(), + nn.Linear(1024, 1024), + nn.LayerNorm(1024), + nn.LeakyReLU(), + nn.Linear(1024, context_dim), + ), + ) + + setattr( + self, + f'mapping_patch_{i}', + nn.Sequential( + nn.Linear(1024, 1024), + nn.LayerNorm(1024), + nn.LeakyReLU(), + nn.Linear(1024, 1024), + nn.LayerNorm(1024), + nn.LeakyReLU(), + nn.Linear(1024, context_dim), + ), + ) + + def forward(self, x, y): + # x shape [N, C] + x = self.body(x) + x = x.reshape(-1, self.num_token, self.context_dim) + + hidden_states = () + for i, emb in enumerate(y): + hidden_state = getattr(self, f'mapping_{i}')(emb[:, :1]) + getattr(self, f'mapping_patch_{i}')( + emb[:, 1:] + ).mean(dim=1, keepdim=True) + hidden_states += (hidden_state,) + hidden_states = torch.cat(hidden_states, dim=1) + + return torch.cat([x, hidden_states], dim=1) diff --git a/pulid/encoders_transformer.py b/pulid/encoders_transformer.py new file mode 100644 index 0000000000000000000000000000000000000000..afbdaf36e6cc90fd080c405ded56423f1eaeae11 --- /dev/null +++ b/pulid/encoders_transformer.py @@ -0,0 +1,209 @@ +import math + +import torch +import torch.nn as nn + + +# FFN +def FeedForward(dim, mult=4): + inner_dim = int(dim * mult) + return nn.Sequential( + nn.LayerNorm(dim), + nn.Linear(dim, inner_dim, bias=False), + nn.GELU(), + nn.Linear(inner_dim, dim, bias=False), + ) + + +def reshape_tensor(x, heads): + bs, length, width = x.shape + # (bs, length, width) --> (bs, length, n_heads, dim_per_head) + x = x.view(bs, length, heads, -1) + # (bs, length, n_heads, dim_per_head) --> (bs, n_heads, length, dim_per_head) + x = x.transpose(1, 2) + # (bs, n_heads, length, dim_per_head) --> (bs*n_heads, length, dim_per_head) + x = x.reshape(bs, heads, length, -1) + return x + + +class PerceiverAttentionCA(nn.Module): + def __init__(self, *, dim=3072, dim_head=128, heads=16, kv_dim=2048): + super().__init__() + self.scale = dim_head ** -0.5 + self.dim_head = dim_head + self.heads = heads + inner_dim = dim_head * heads + + self.norm1 = nn.LayerNorm(dim if kv_dim is None else kv_dim) + self.norm2 = nn.LayerNorm(dim) + + self.to_q = nn.Linear(dim, inner_dim, bias=False) + self.to_kv = nn.Linear(dim if kv_dim is None else kv_dim, inner_dim * 2, bias=False) + self.to_out = nn.Linear(inner_dim, dim, bias=False) + + def forward(self, x, latents): + """ + Args: + x (torch.Tensor): image features + shape (b, n1, D) + latent (torch.Tensor): latent features + shape (b, n2, D) + """ + x = self.norm1(x) + latents = self.norm2(latents) + + b, seq_len, _ = latents.shape + + q = self.to_q(latents) + k, v = self.to_kv(x).chunk(2, dim=-1) + + q = reshape_tensor(q, self.heads) + k = reshape_tensor(k, self.heads) + v = reshape_tensor(v, self.heads) + + # attention + scale = 1 / math.sqrt(math.sqrt(self.dim_head)) + weight = (q * scale) @ (k * scale).transpose(-2, -1) # More stable with f16 than dividing afterwards + weight = torch.softmax(weight.float(), dim=-1).type(weight.dtype) + out = weight @ v + + out = out.permute(0, 2, 1, 3).reshape(b, seq_len, -1) + + return self.to_out(out) + + +class PerceiverAttention(nn.Module): + def __init__(self, *, dim, dim_head=64, heads=8, kv_dim=None): + super().__init__() + self.scale = dim_head ** -0.5 + self.dim_head = dim_head + self.heads = heads + inner_dim = dim_head * heads + + self.norm1 = nn.LayerNorm(dim if kv_dim is None else kv_dim) + self.norm2 = nn.LayerNorm(dim) + + self.to_q = nn.Linear(dim, inner_dim, bias=False) + self.to_kv = nn.Linear(dim if kv_dim is None else kv_dim, inner_dim * 2, bias=False) + self.to_out = nn.Linear(inner_dim, dim, bias=False) + + def forward(self, x, latents): + """ + Args: + x (torch.Tensor): image features + shape (b, n1, D) + latent (torch.Tensor): latent features + shape (b, n2, D) + """ + x = self.norm1(x) + latents = self.norm2(latents) + + b, seq_len, _ = latents.shape + + q = self.to_q(latents) + kv_input = torch.cat((x, latents), dim=-2) + k, v = self.to_kv(kv_input).chunk(2, dim=-1) + + q = reshape_tensor(q, self.heads) + k = reshape_tensor(k, self.heads) + v = reshape_tensor(v, self.heads) + + # attention + scale = 1 / math.sqrt(math.sqrt(self.dim_head)) + weight = (q * scale) @ (k * scale).transpose(-2, -1) # More stable with f16 than dividing afterwards + weight = torch.softmax(weight.float(), dim=-1).type(weight.dtype) + out = weight @ v + + out = out.permute(0, 2, 1, 3).reshape(b, seq_len, -1) + + return self.to_out(out) + + +class IDFormer(nn.Module): + """ + - perceiver resampler like arch (compared with previous MLP-like arch) + - we concat id embedding (generated by arcface) and query tokens as latents + - latents will attend each other and interact with vit features through cross-attention + - vit features are multi-scaled and inserted into IDFormer in order, currently, each scale corresponds to two + IDFormer layers + """ + def __init__( + self, + dim=1024, + depth=10, + dim_head=64, + heads=16, + num_id_token=5, + num_queries=32, + output_dim=2048, + ff_mult=4, + ): + super().__init__() + + self.num_id_token = num_id_token + self.dim = dim + self.num_queries = num_queries + assert depth % 5 == 0 + self.depth = depth // 5 + scale = dim ** -0.5 + + self.latents = nn.Parameter(torch.randn(1, num_queries, dim) * scale) + self.proj_out = nn.Parameter(scale * torch.randn(dim, output_dim)) + + self.layers = nn.ModuleList([]) + for _ in range(depth): + self.layers.append( + nn.ModuleList( + [ + PerceiverAttention(dim=dim, dim_head=dim_head, heads=heads), + FeedForward(dim=dim, mult=ff_mult), + ] + ) + ) + + for i in range(5): + setattr( + self, + f'mapping_{i}', + nn.Sequential( + nn.Linear(1024, 1024), + nn.LayerNorm(1024), + nn.LeakyReLU(), + nn.Linear(1024, 1024), + nn.LayerNorm(1024), + nn.LeakyReLU(), + nn.Linear(1024, dim), + ), + ) + + self.id_embedding_mapping = nn.Sequential( + nn.Linear(1280, 1024), + nn.LayerNorm(1024), + nn.LeakyReLU(), + nn.Linear(1024, 1024), + nn.LayerNorm(1024), + nn.LeakyReLU(), + nn.Linear(1024, dim * num_id_token), + ) + + def forward(self, x, y): + + latents = self.latents.repeat(x.size(0), 1, 1) + + num_duotu = x.shape[1] if x.ndim == 3 else 1 + + x = self.id_embedding_mapping(x) + x = x.reshape(-1, self.num_id_token * num_duotu, self.dim) + + latents = torch.cat((latents, x), dim=1) + + for i in range(5): + vit_feature = getattr(self, f'mapping_{i}')(y[i]) + ctx_feature = torch.cat((x, vit_feature), dim=1) + for attn, ff in self.layers[i * self.depth: (i + 1) * self.depth]: + latents = attn(ctx_feature, latents) + latents + latents = ff(latents) + latents + + latents = latents[:, :self.num_queries] + latents = latents @ self.proj_out + return latents diff --git a/pulid/pipeline.py b/pulid/pipeline.py new file mode 100644 index 0000000000000000000000000000000000000000..3e83a3e221fb5e302812ab5b5a7aed42d8a295b6 --- /dev/null +++ b/pulid/pipeline.py @@ -0,0 +1,228 @@ +import gc + +import cv2 +import insightface +import torch +import torch.nn as nn +from diffusers import ( + DPMSolverMultistepScheduler, + StableDiffusionXLPipeline, + UNet2DConditionModel, +) +from facexlib.parsing import init_parsing_model +from facexlib.utils.face_restoration_helper import FaceRestoreHelper +from huggingface_hub import hf_hub_download, snapshot_download +from insightface.app import FaceAnalysis +from safetensors.torch import load_file +from torchvision.transforms import InterpolationMode +from torchvision.transforms.functional import normalize, resize + +from eva_clip import create_model_and_transforms +from eva_clip.constants import OPENAI_DATASET_MEAN, OPENAI_DATASET_STD +from pulid.encoders import IDEncoder +from pulid.utils import img2tensor, is_torch2_available, tensor2img + +if is_torch2_available(): + from pulid.attention_processor import AttnProcessor2_0 as AttnProcessor + from pulid.attention_processor import IDAttnProcessor2_0 as IDAttnProcessor +else: + from pulid.attention_processor import AttnProcessor, IDAttnProcessor + + +class PuLIDPipeline: + def __init__(self, *args, **kwargs): + super().__init__() + self.device = 'cuda' + sdxl_base_repo = 'stabilityai/stable-diffusion-xl-base-1.0' + sdxl_lightning_repo = 'ByteDance/SDXL-Lightning' + self.sdxl_base_repo = sdxl_base_repo + + # load base model + unet = UNet2DConditionModel.from_config(sdxl_base_repo, subfolder='unet').to(self.device, torch.float16) + unet.load_state_dict( + load_file( + hf_hub_download(sdxl_lightning_repo, 'sdxl_lightning_4step_unet.safetensors'), device=self.device + ) + ) + self.hack_unet_attn_layers(unet) + self.pipe = StableDiffusionXLPipeline.from_pretrained( + sdxl_base_repo, unet=unet, torch_dtype=torch.float16, variant="fp16" + ).to(self.device) + self.pipe.watermark = None + + # scheduler + self.pipe.scheduler = DPMSolverMultistepScheduler.from_config( + self.pipe.scheduler.config, timestep_spacing="trailing" + ) + + # ID adapters + self.id_adapter = IDEncoder().to(self.device) + + # preprocessors + # face align and parsing + self.face_helper = FaceRestoreHelper( + upscale_factor=1, + face_size=512, + crop_ratio=(1, 1), + det_model='retinaface_resnet50', + save_ext='png', + device=self.device, + ) + self.face_helper.face_parse = None + self.face_helper.face_parse = init_parsing_model(model_name='bisenet', device=self.device) + # clip-vit backbone + model, _, _ = create_model_and_transforms('EVA02-CLIP-L-14-336', 'eva_clip', force_custom_clip=True) + model = model.visual + self.clip_vision_model = model.to(self.device) + eva_transform_mean = getattr(self.clip_vision_model, 'image_mean', OPENAI_DATASET_MEAN) + eva_transform_std = getattr(self.clip_vision_model, 'image_std', OPENAI_DATASET_STD) + if not isinstance(eva_transform_mean, (list, tuple)): + eva_transform_mean = (eva_transform_mean,) * 3 + if not isinstance(eva_transform_std, (list, tuple)): + eva_transform_std = (eva_transform_std,) * 3 + self.eva_transform_mean = eva_transform_mean + self.eva_transform_std = eva_transform_std + # antelopev2 + snapshot_download('DIAMONIK7777/antelopev2', local_dir='models/antelopev2') + self.app = FaceAnalysis( + name='antelopev2', root='.', providers=['CUDAExecutionProvider', 'CPUExecutionProvider'] + ) + self.app.prepare(ctx_id=0, det_size=(640, 640)) + self.handler_ante = insightface.model_zoo.get_model('models/antelopev2/glintr100.onnx') + self.handler_ante.prepare(ctx_id=0) + + gc.collect() + torch.cuda.empty_cache() + + self.load_pretrain() + + # other configs + self.debug_img_list = [] + + def hack_unet_attn_layers(self, unet): + id_adapter_attn_procs = {} + for name, _ in unet.attn_processors.items(): + cross_attention_dim = None if name.endswith("attn1.processor") else unet.config.cross_attention_dim + if name.startswith("mid_block"): + hidden_size = unet.config.block_out_channels[-1] + elif name.startswith("up_blocks"): + block_id = int(name[len("up_blocks.")]) + hidden_size = list(reversed(unet.config.block_out_channels))[block_id] + elif name.startswith("down_blocks"): + block_id = int(name[len("down_blocks.")]) + hidden_size = unet.config.block_out_channels[block_id] + if cross_attention_dim is not None: + id_adapter_attn_procs[name] = IDAttnProcessor( + hidden_size=hidden_size, + cross_attention_dim=cross_attention_dim, + ).to(unet.device) + else: + id_adapter_attn_procs[name] = AttnProcessor() + unet.set_attn_processor(id_adapter_attn_procs) + self.id_adapter_attn_layers = nn.ModuleList(unet.attn_processors.values()) + + def load_pretrain(self): + hf_hub_download('guozinan/PuLID', 'pulid_v1.bin', local_dir='models') + ckpt_path = 'models/pulid_v1.bin' + state_dict = torch.load(ckpt_path, map_location='cpu') + state_dict_dict = {} + for k, v in state_dict.items(): + module = k.split('.')[0] + state_dict_dict.setdefault(module, {}) + new_k = k[len(module) + 1 :] + state_dict_dict[module][new_k] = v + + for module in state_dict_dict: + print(f'loading from {module}') + getattr(self, module).load_state_dict(state_dict_dict[module], strict=True) + + def to_gray(self, img): + x = 0.299 * img[:, 0:1] + 0.587 * img[:, 1:2] + 0.114 * img[:, 2:3] + x = x.repeat(1, 3, 1, 1) + return x + + def get_id_embedding(self, image): + """ + Args: + image: numpy rgb image, range [0, 255] + """ + self.face_helper.clean_all() + image_bgr = cv2.cvtColor(image, cv2.COLOR_RGB2BGR) + # get antelopev2 embedding + face_info = self.app.get(image_bgr) + if len(face_info) > 0: + face_info = sorted(face_info, key=lambda x: (x['bbox'][2] - x['bbox'][0]) * (x['bbox'][3] - x['bbox'][1]))[ + -1 + ] # only use the maximum face + id_ante_embedding = face_info['embedding'] + self.debug_img_list.append( + image[ + int(face_info['bbox'][1]) : int(face_info['bbox'][3]), + int(face_info['bbox'][0]) : int(face_info['bbox'][2]), + ] + ) + else: + id_ante_embedding = None + + # using facexlib to detect and align face + self.face_helper.read_image(image_bgr) + self.face_helper.get_face_landmarks_5(only_center_face=True) + self.face_helper.align_warp_face() + if len(self.face_helper.cropped_faces) == 0: + raise RuntimeError('facexlib align face fail') + align_face = self.face_helper.cropped_faces[0] + # incase insightface didn't detect face + if id_ante_embedding is None: + print('fail to detect face using insightface, extract embedding on align face') + id_ante_embedding = self.handler_ante.get_feat(align_face) + + id_ante_embedding = torch.from_numpy(id_ante_embedding).to(self.device) + if id_ante_embedding.ndim == 1: + id_ante_embedding = id_ante_embedding.unsqueeze(0) + + # parsing + input = img2tensor(align_face, bgr2rgb=True).unsqueeze(0) / 255.0 + input = input.to(self.device) + parsing_out = self.face_helper.face_parse(normalize(input, [0.485, 0.456, 0.406], [0.229, 0.224, 0.225]))[0] + parsing_out = parsing_out.argmax(dim=1, keepdim=True) + bg_label = [0, 16, 18, 7, 8, 9, 14, 15] + bg = sum(parsing_out == i for i in bg_label).bool() + white_image = torch.ones_like(input) + # only keep the face features + face_features_image = torch.where(bg, white_image, self.to_gray(input)) + self.debug_img_list.append(tensor2img(face_features_image, rgb2bgr=False)) + + # transform img before sending to eva-clip-vit + face_features_image = resize(face_features_image, self.clip_vision_model.image_size, InterpolationMode.BICUBIC) + face_features_image = normalize(face_features_image, self.eva_transform_mean, self.eva_transform_std) + id_cond_vit, id_vit_hidden = self.clip_vision_model( + face_features_image, return_all_features=False, return_hidden=True, shuffle=False + ) + id_cond_vit_norm = torch.norm(id_cond_vit, 2, 1, True) + id_cond_vit = torch.div(id_cond_vit, id_cond_vit_norm) + + id_cond = torch.cat([id_ante_embedding, id_cond_vit], dim=-1) + id_uncond = torch.zeros_like(id_cond) + id_vit_hidden_uncond = [] + for layer_idx in range(0, len(id_vit_hidden)): + id_vit_hidden_uncond.append(torch.zeros_like(id_vit_hidden[layer_idx])) + + id_embedding = self.id_adapter(id_cond, id_vit_hidden) + uncond_id_embedding = self.id_adapter(id_uncond, id_vit_hidden_uncond) + + # return id_embedding + return torch.cat((uncond_id_embedding, id_embedding), dim=0) + + def inference(self, prompt, size, prompt_n='', image_embedding=None, id_scale=1.0, guidance_scale=1.2, steps=4): + images = self.pipe( + prompt=prompt, + negative_prompt=prompt_n, + num_images_per_prompt=size[0], + height=size[1], + width=size[2], + num_inference_steps=steps, + guidance_scale=guidance_scale, + cross_attention_kwargs={'id_embedding': image_embedding, 'id_scale': id_scale}, + ).images + + return images diff --git a/pulid/pipeline_flux.py b/pulid/pipeline_flux.py new file mode 100644 index 0000000000000000000000000000000000000000..706514cbef319d3822c2f2fc52bff1bc9383f29b --- /dev/null +++ b/pulid/pipeline_flux.py @@ -0,0 +1,194 @@ +import gc + +import cv2 +import insightface +import torch +import torch.nn as nn +from facexlib.parsing import init_parsing_model +from facexlib.utils.face_restoration_helper import FaceRestoreHelper +from huggingface_hub import hf_hub_download, snapshot_download +from insightface.app import FaceAnalysis +from safetensors.torch import load_file +from torchvision.transforms import InterpolationMode +from torchvision.transforms.functional import normalize, resize + +from eva_clip import create_model_and_transforms +from eva_clip.constants import OPENAI_DATASET_MEAN, OPENAI_DATASET_STD +from pulid.encoders_transformer import IDFormer, PerceiverAttentionCA +from pulid.utils import img2tensor, tensor2img + + +class PuLIDPipeline(nn.Module): + def __init__(self, dit, device, weight_dtype=torch.bfloat16, onnx_provider='gpu', *args, **kwargs): + super().__init__() + self.device = device + self.weight_dtype = weight_dtype + double_interval = 2 + single_interval = 4 + + # init encoder + self.pulid_encoder = IDFormer().to(self.device, self.weight_dtype) + + num_ca = 19 // double_interval + 38 // single_interval + if 19 % double_interval != 0: + num_ca += 1 + if 38 % single_interval != 0: + num_ca += 1 + self.pulid_ca = nn.ModuleList([ + PerceiverAttentionCA().to(self.device, self.weight_dtype) for _ in range(num_ca) + ]) + + dit.pulid_ca = self.pulid_ca + dit.pulid_double_interval = double_interval + dit.pulid_single_interval = single_interval + + # preprocessors + # face align and parsing + self.face_helper = FaceRestoreHelper( + upscale_factor=1, + face_size=512, + crop_ratio=(1, 1), + det_model='retinaface_resnet50', + save_ext='png', + device=self.device, + ) + self.face_helper.face_parse = None + self.face_helper.face_parse = init_parsing_model(model_name='bisenet', device=self.device) + # clip-vit backbone + model, _, _ = create_model_and_transforms('EVA02-CLIP-L-14-336', 'eva_clip', force_custom_clip=True) + model = model.visual + self.clip_vision_model = model.to(self.device, dtype=self.weight_dtype) + eva_transform_mean = getattr(self.clip_vision_model, 'image_mean', OPENAI_DATASET_MEAN) + eva_transform_std = getattr(self.clip_vision_model, 'image_std', OPENAI_DATASET_STD) + if not isinstance(eva_transform_mean, (list, tuple)): + eva_transform_mean = (eva_transform_mean,) * 3 + if not isinstance(eva_transform_std, (list, tuple)): + eva_transform_std = (eva_transform_std,) * 3 + self.eva_transform_mean = eva_transform_mean + self.eva_transform_std = eva_transform_std + # antelopev2 + snapshot_download('DIAMONIK7777/antelopev2', local_dir='models/antelopev2') + providers = ['CPUExecutionProvider'] if onnx_provider == 'cpu' \ + else ['CUDAExecutionProvider', 'CPUExecutionProvider'] + self.app = FaceAnalysis(name='antelopev2', root='.', providers=providers) + self.app.prepare(ctx_id=0, det_size=(640, 640)) + self.handler_ante = insightface.model_zoo.get_model('models/antelopev2/glintr100.onnx', + providers=providers) + self.handler_ante.prepare(ctx_id=0) + + gc.collect() + torch.cuda.empty_cache() + + # self.load_pretrain() + + # other configs + self.debug_img_list = [] + + def components_to_device(self, device): + # everything but pulid_ca + self.face_helper.face_det = self.face_helper.face_det.to(device) + self.face_helper.face_parse = self.face_helper.face_parse.to(device) + self.clip_vision_model = self.clip_vision_model.to(device) + self.pulid_encoder = self.pulid_encoder.to(device) + + def load_pretrain(self, pretrain_path=None, version='v0.9.0'): + hf_hub_download('guozinan/PuLID', f'pulid_flux_{version}.safetensors', local_dir='models') + ckpt_path = f'models/pulid_flux_{version}.safetensors' + if pretrain_path is not None: + ckpt_path = pretrain_path + state_dict = load_file(ckpt_path) + state_dict_dict = {} + for k, v in state_dict.items(): + module = k.split('.')[0] + state_dict_dict.setdefault(module, {}) + new_k = k[len(module) + 1:] + state_dict_dict[module][new_k] = v + + for module in state_dict_dict: + print(f'loading from {module}') + getattr(self, module).load_state_dict(state_dict_dict[module], strict=True) + + del state_dict + del state_dict_dict + + def to_gray(self, img): + x = 0.299 * img[:, 0:1] + 0.587 * img[:, 1:2] + 0.114 * img[:, 2:3] + x = x.repeat(1, 3, 1, 1) + return x + + @torch.no_grad() + def get_id_embedding(self, image, cal_uncond=False): + """ + Args: + image: numpy rgb image, range [0, 255] + """ + self.face_helper.clean_all() + self.debug_img_list = [] + image_bgr = cv2.cvtColor(image, cv2.COLOR_RGB2BGR) + # get antelopev2 embedding + face_info = self.app.get(image_bgr) + if len(face_info) > 0: + face_info = sorted(face_info, key=lambda x: (x['bbox'][2] - x['bbox'][0]) * (x['bbox'][3] - x['bbox'][1]))[ + -1 + ] # only use the maximum face + id_ante_embedding = face_info['embedding'] + self.debug_img_list.append( + image[ + int(face_info['bbox'][1]) : int(face_info['bbox'][3]), + int(face_info['bbox'][0]) : int(face_info['bbox'][2]), + ] + ) + else: + id_ante_embedding = None + + # using facexlib to detect and align face + self.face_helper.read_image(image_bgr) + self.face_helper.get_face_landmarks_5(only_center_face=True) + self.face_helper.align_warp_face() + if len(self.face_helper.cropped_faces) == 0: + raise RuntimeError('facexlib align face fail') + align_face = self.face_helper.cropped_faces[0] + # incase insightface didn't detect face + if id_ante_embedding is None: + print('fail to detect face using insightface, extract embedding on align face') + id_ante_embedding = self.handler_ante.get_feat(align_face) + + id_ante_embedding = torch.from_numpy(id_ante_embedding).to(self.device, self.weight_dtype) + if id_ante_embedding.ndim == 1: + id_ante_embedding = id_ante_embedding.unsqueeze(0) + + # parsing + input = img2tensor(align_face, bgr2rgb=True).unsqueeze(0) / 255.0 + input = input.to(self.device) + parsing_out = self.face_helper.face_parse(normalize(input, [0.485, 0.456, 0.406], [0.229, 0.224, 0.225]))[0] + parsing_out = parsing_out.argmax(dim=1, keepdim=True) + bg_label = [0, 16, 18, 7, 8, 9, 14, 15] + bg = sum(parsing_out == i for i in bg_label).bool() + white_image = torch.ones_like(input) + # only keep the face features + face_features_image = torch.where(bg, white_image, self.to_gray(input)) + self.debug_img_list.append(tensor2img(face_features_image, rgb2bgr=False)) + + # transform img before sending to eva-clip-vit + face_features_image = resize(face_features_image, self.clip_vision_model.image_size, InterpolationMode.BICUBIC) + face_features_image = normalize(face_features_image, self.eva_transform_mean, self.eva_transform_std) + id_cond_vit, id_vit_hidden = self.clip_vision_model( + face_features_image.to(self.weight_dtype), return_all_features=False, return_hidden=True, shuffle=False + ) + id_cond_vit_norm = torch.norm(id_cond_vit, 2, 1, True) + id_cond_vit = torch.div(id_cond_vit, id_cond_vit_norm) + + id_cond = torch.cat([id_ante_embedding, id_cond_vit], dim=-1) + + id_embedding = self.pulid_encoder(id_cond, id_vit_hidden) + + if not cal_uncond: + return id_embedding, None + + id_uncond = torch.zeros_like(id_cond) + id_vit_hidden_uncond = [] + for layer_idx in range(0, len(id_vit_hidden)): + id_vit_hidden_uncond.append(torch.zeros_like(id_vit_hidden[layer_idx])) + uncond_id_embedding = self.pulid_encoder(id_uncond, id_vit_hidden_uncond) + + return id_embedding, uncond_id_embedding diff --git a/pulid/pipeline_v1_1.py b/pulid/pipeline_v1_1.py new file mode 100644 index 0000000000000000000000000000000000000000..4ca68cf11f509a2168c6a075a56f73e1ecf0a074 --- /dev/null +++ b/pulid/pipeline_v1_1.py @@ -0,0 +1,324 @@ +import gc + +import cv2 +import insightface +import numpy as np +import torch +import torch.nn as nn +from basicsr.utils import img2tensor, tensor2img +from diffusers import DPMSolverMultistepScheduler, StableDiffusionXLPipeline +from facexlib.parsing import init_parsing_model +from facexlib.utils.face_restoration_helper import FaceRestoreHelper + +from huggingface_hub import hf_hub_download, snapshot_download +from insightface.app import FaceAnalysis +from safetensors.torch import load_file +from torchvision.transforms import InterpolationMode +from torchvision.transforms.functional import normalize, resize + +from eva_clip import create_model_and_transforms +from eva_clip.constants import OPENAI_DATASET_MEAN, OPENAI_DATASET_STD +from pulid.encoders_transformer import IDFormer +from pulid.utils import is_torch2_available, sample_dpmpp_2m, sample_dpmpp_sde + +if is_torch2_available(): + from pulid.attention_processor import AttnProcessor2_0 as AttnProcessor + from pulid.attention_processor import IDAttnProcessor2_0 as IDAttnProcessor +else: + from pulid.attention_processor import AttnProcessor, IDAttnProcessor + + +class PuLIDPipeline: + def __init__(self, sdxl_repo='Lykon/dreamshaper-xl-lightning', sampler='dpmpp_sde', *args, **kwargs): + super().__init__() + self.device = 'cuda' + + # load base model + self.pipe = StableDiffusionXLPipeline.from_pretrained(sdxl_repo, torch_dtype=torch.float16, variant="fp16").to( + self.device + ) + self.pipe.watermark = None + self.hack_unet_attn_layers(self.pipe.unet) + + # scheduler + self.pipe.scheduler = DPMSolverMultistepScheduler.from_config(self.pipe.scheduler.config) + + # ID adapters + self.id_adapter = IDFormer().to(self.device) + + # preprocessors + # face align and parsing + self.face_helper = FaceRestoreHelper( + upscale_factor=1, + face_size=512, + crop_ratio=(1, 1), + det_model='retinaface_resnet50', + save_ext='png', + device=self.device, + ) + self.face_helper.face_parse = None + self.face_helper.face_parse = init_parsing_model(model_name='bisenet', device=self.device) + # clip-vit backbone + model, _, _ = create_model_and_transforms('EVA02-CLIP-L-14-336', 'eva_clip', force_custom_clip=True) + model = model.visual + self.clip_vision_model = model.to(self.device) + eva_transform_mean = getattr(self.clip_vision_model, 'image_mean', OPENAI_DATASET_MEAN) + eva_transform_std = getattr(self.clip_vision_model, 'image_std', OPENAI_DATASET_STD) + if not isinstance(eva_transform_mean, (list, tuple)): + eva_transform_mean = (eva_transform_mean,) * 3 + if not isinstance(eva_transform_std, (list, tuple)): + eva_transform_std = (eva_transform_std,) * 3 + self.eva_transform_mean = eva_transform_mean + self.eva_transform_std = eva_transform_std + # antelopev2 + snapshot_download('DIAMONIK7777/antelopev2', local_dir='models/antelopev2') + self.app = FaceAnalysis( + name='antelopev2', root='.', providers=['CUDAExecutionProvider', 'CPUExecutionProvider'] + ) + self.app.prepare(ctx_id=0, det_size=(640, 640)) + self.handler_ante = insightface.model_zoo.get_model('models/antelopev2/glintr100.onnx') + self.handler_ante.prepare(ctx_id=0) + + gc.collect() + torch.cuda.empty_cache() + + self.load_pretrain() + + # other configs + self.debug_img_list = [] + + # karras schedule related code, borrow from lllyasviel/Omost + linear_start = 0.00085 + linear_end = 0.012 + timesteps = 1000 + betas = torch.linspace(linear_start**0.5, linear_end**0.5, timesteps, dtype=torch.float64) ** 2 + alphas = 1.0 - betas + alphas_cumprod = torch.tensor(np.cumprod(alphas, axis=0), dtype=torch.float32) + + self.sigmas = ((1 - alphas_cumprod) / alphas_cumprod) ** 0.5 + self.log_sigmas = self.sigmas.log() + self.sigma_data = 1.0 + + if sampler == 'dpmpp_sde': + self.sampler = sample_dpmpp_sde + elif sampler == 'dpmpp_2m': + self.sampler = sample_dpmpp_2m + else: + raise NotImplementedError(f'sampler {sampler} not implemented') + + @property + def sigma_min(self): + return self.sigmas[0] + + @property + def sigma_max(self): + return self.sigmas[-1] + + def timestep(self, sigma): + log_sigma = sigma.log() + dists = log_sigma.to(self.log_sigmas.device) - self.log_sigmas[:, None] + return dists.abs().argmin(dim=0).view(sigma.shape).to(sigma.device) + + def get_sigmas_karras(self, n, rho=7.0): + ramp = torch.linspace(0, 1, n) + min_inv_rho = self.sigma_min ** (1 / rho) + max_inv_rho = self.sigma_max ** (1 / rho) + sigmas = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho + return torch.cat([sigmas, sigmas.new_zeros([1])]) + + def hack_unet_attn_layers(self, unet): + id_adapter_attn_procs = {} + for name, _ in unet.attn_processors.items(): + cross_attention_dim = None if name.endswith("attn1.processor") else unet.config.cross_attention_dim + if name.startswith("mid_block"): + hidden_size = unet.config.block_out_channels[-1] + elif name.startswith("up_blocks"): + block_id = int(name[len("up_blocks.")]) + hidden_size = list(reversed(unet.config.block_out_channels))[block_id] + elif name.startswith("down_blocks"): + block_id = int(name[len("down_blocks.")]) + hidden_size = unet.config.block_out_channels[block_id] + if cross_attention_dim is not None: + id_adapter_attn_procs[name] = IDAttnProcessor( + hidden_size=hidden_size, + cross_attention_dim=cross_attention_dim, + ).to(unet.device) + else: + id_adapter_attn_procs[name] = AttnProcessor() + unet.set_attn_processor(id_adapter_attn_procs) + self.id_adapter_attn_layers = nn.ModuleList(unet.attn_processors.values()) + + def load_pretrain(self): + hf_hub_download('guozinan/PuLID', 'pulid_v1.1.safetensors', local_dir='models') + ckpt_path = 'models/pulid_v1.1.safetensors' + state_dict = load_file(ckpt_path) + state_dict_dict = {} + for k, v in state_dict.items(): + module = k.split('.')[0] + state_dict_dict.setdefault(module, {}) + new_k = k[len(module) + 1 :] + state_dict_dict[module][new_k] = v + + for module in state_dict_dict: + print(f'loading from {module}') + getattr(self, module).load_state_dict(state_dict_dict[module], strict=True) + + def to_gray(self, img): + x = 0.299 * img[:, 0:1] + 0.587 * img[:, 1:2] + 0.114 * img[:, 2:3] + x = x.repeat(1, 3, 1, 1) + return x + + def get_id_embedding(self, image_list): + """ + Args: + image in image_list: numpy rgb image, range [0, 255] + """ + id_cond_list = [] + id_vit_hidden_list = [] + for ii, image in enumerate(image_list): + self.face_helper.clean_all() + image_bgr = cv2.cvtColor(image, cv2.COLOR_RGB2BGR) + # get antelopev2 embedding + face_info = self.app.get(image_bgr) + if len(face_info) > 0: + face_info = sorted( + face_info, key=lambda x: (x['bbox'][2] - x['bbox'][0]) * (x['bbox'][3] - x['bbox'][1]) + )[ + -1 + ] # only use the maximum face + id_ante_embedding = face_info['embedding'] + self.debug_img_list.append( + image[ + int(face_info['bbox'][1]) : int(face_info['bbox'][3]), + int(face_info['bbox'][0]) : int(face_info['bbox'][2]), + ] + ) + else: + id_ante_embedding = None + + # using facexlib to detect and align face + self.face_helper.read_image(image_bgr) + self.face_helper.get_face_landmarks_5(only_center_face=True) + self.face_helper.align_warp_face() + if len(self.face_helper.cropped_faces) == 0: + raise RuntimeError('facexlib align face fail') + align_face = self.face_helper.cropped_faces[0] + # incase insightface didn't detect face + if id_ante_embedding is None: + print('fail to detect face using insightface, extract embedding on align face') + id_ante_embedding = self.handler_ante.get_feat(align_face) + + id_ante_embedding = torch.from_numpy(id_ante_embedding).to(self.device) + if id_ante_embedding.ndim == 1: + id_ante_embedding = id_ante_embedding.unsqueeze(0) + + # parsing + input = img2tensor(align_face, bgr2rgb=True).unsqueeze(0) / 255.0 + input = input.to(self.device) + parsing_out = self.face_helper.face_parse(normalize(input, [0.485, 0.456, 0.406], [0.229, 0.224, 0.225]))[ + 0 + ] + parsing_out = parsing_out.argmax(dim=1, keepdim=True) + bg_label = [0, 16, 18, 7, 8, 9, 14, 15] + bg = sum(parsing_out == i for i in bg_label).bool() + white_image = torch.ones_like(input) + # only keep the face features + face_features_image = torch.where(bg, white_image, self.to_gray(input)) + self.debug_img_list.append(tensor2img(face_features_image, rgb2bgr=False)) + + # transform img before sending to eva-clip-vit + face_features_image = resize( + face_features_image, self.clip_vision_model.image_size, InterpolationMode.BICUBIC + ) + face_features_image = normalize(face_features_image, self.eva_transform_mean, self.eva_transform_std) + id_cond_vit, id_vit_hidden = self.clip_vision_model( + face_features_image, return_all_features=False, return_hidden=True, shuffle=False + ) + id_cond_vit_norm = torch.norm(id_cond_vit, 2, 1, True) + id_cond_vit = torch.div(id_cond_vit, id_cond_vit_norm) + + id_cond = torch.cat([id_ante_embedding, id_cond_vit], dim=-1) + + id_cond_list.append(id_cond) + id_vit_hidden_list.append(id_vit_hidden) + + id_uncond = torch.zeros_like(id_cond_list[0]) + id_vit_hidden_uncond = [] + for layer_idx in range(0, len(id_vit_hidden_list[0])): + id_vit_hidden_uncond.append(torch.zeros_like(id_vit_hidden_list[0][layer_idx])) + + id_cond = torch.stack(id_cond_list, dim=1) + id_vit_hidden = id_vit_hidden_list[0] + for i in range(1, len(image_list)): + for j, x in enumerate(id_vit_hidden_list[i]): + id_vit_hidden[j] = torch.cat([id_vit_hidden[j], x], dim=1) + id_embedding = self.id_adapter(id_cond, id_vit_hidden) + uncond_id_embedding = self.id_adapter(id_uncond, id_vit_hidden_uncond) + + # return id_embedding + return uncond_id_embedding, id_embedding + + def __call__(self, x, sigma, **extra_args): + x_ddim_space = x / (sigma[:, None, None, None] ** 2 + self.sigma_data**2) ** 0.5 + t = self.timestep(sigma) + cfg_scale = extra_args['cfg_scale'] + eps_positive = self.pipe.unet(x_ddim_space, t, return_dict=False, **extra_args['positive'])[0] + eps_negative = self.pipe.unet(x_ddim_space, t, return_dict=False, **extra_args['negative'])[0] + noise_pred = eps_negative + cfg_scale * (eps_positive - eps_negative) + return x - noise_pred * sigma[:, None, None, None] + + def inference( + self, + prompt, + size, + prompt_n='', + id_embedding=None, + uncond_id_embedding=None, + id_scale=1.0, + guidance_scale=1.2, + steps=4, + seed=-1, + ): + + # sigmas + sigmas = self.get_sigmas_karras(steps).to(self.device) + + # latents + noise = torch.randn((size[0], 4, size[1] // 8, size[2] // 8), device="cpu", generator=torch.manual_seed(seed)) + noise = noise.to(dtype=self.pipe.unet.dtype, device=self.device) + latents = noise * sigmas[0].to(noise) + + ( + prompt_embeds, + negative_prompt_embeds, + pooled_prompt_embeds, + negative_pooled_prompt_embeds, + ) = self.pipe.encode_prompt( + prompt=prompt, + negative_prompt=prompt_n, + ) + + add_time_ids = list((size[1], size[2]) + (0, 0) + (size[1], size[2])) + add_time_ids = torch.tensor([add_time_ids], dtype=self.pipe.unet.dtype, device=self.device) + add_neg_time_ids = add_time_ids.clone() + + sampler_kwargs = dict( + cfg_scale=guidance_scale, + positive=dict( + encoder_hidden_states=prompt_embeds, + added_cond_kwargs={"text_embeds": pooled_prompt_embeds, "time_ids": add_time_ids}, + cross_attention_kwargs={'id_embedding': id_embedding, 'id_scale': id_scale}, + ), + negative=dict( + encoder_hidden_states=negative_prompt_embeds, + added_cond_kwargs={"text_embeds": negative_pooled_prompt_embeds, "time_ids": add_neg_time_ids}, + cross_attention_kwargs={'id_embedding': uncond_id_embedding, 'id_scale': id_scale}, + ), + ) + + latents = self.sampler(self, latents, sigmas, extra_args=sampler_kwargs, disable=False) + latents = latents.to(dtype=self.pipe.vae.dtype, device=self.device) / self.pipe.vae.config.scaling_factor + images = self.pipe.vae.decode(latents).sample + images = self.pipe.image_processor.postprocess(images, output_type='pil') + + return images diff --git a/pulid/utils.py b/pulid/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..14b509979894785433abc12321f49d87faaa7311 --- /dev/null +++ b/pulid/utils.py @@ -0,0 +1,337 @@ +import importlib +import math +import os +import random + +import cv2 +import numpy as np +import torch +import torch.nn.functional as F +import torchsde +from torchvision.utils import make_grid +from tqdm.auto import trange +from transformers import PretrainedConfig + + +def seed_everything(seed): + os.environ["PL_GLOBAL_SEED"] = str(seed) + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + torch.cuda.manual_seed_all(seed) + + +def is_torch2_available(): + return hasattr(F, "scaled_dot_product_attention") + + +def instantiate_from_config(config): + if "target" not in config: + if config == '__is_first_stage__' or config == "__is_unconditional__": + return None + raise KeyError("Expected key `target` to instantiate.") + return get_obj_from_str(config["target"])(**config.get("params", {})) + + +def get_obj_from_str(string, reload=False): + module, cls = string.rsplit(".", 1) + if reload: + module_imp = importlib.import_module(module) + importlib.reload(module_imp) + return getattr(importlib.import_module(module, package=None), cls) + + +def drop_seq_token(seq, drop_rate=0.5): + idx = torch.randperm(seq.size(1)) + num_keep_tokens = int(len(idx) * (1 - drop_rate)) + idx = idx[:num_keep_tokens] + seq = seq[:, idx] + return seq + + +def import_model_class_from_model_name_or_path( + pretrained_model_name_or_path: str, revision: str, subfolder: str = "text_encoder" +): + text_encoder_config = PretrainedConfig.from_pretrained( + pretrained_model_name_or_path, subfolder=subfolder, revision=revision + ) + model_class = text_encoder_config.architectures[0] + + if model_class == "CLIPTextModel": + from transformers import CLIPTextModel + + return CLIPTextModel + elif model_class == "CLIPTextModelWithProjection": # noqa RET505 + from transformers import CLIPTextModelWithProjection + + return CLIPTextModelWithProjection + else: + raise ValueError(f"{model_class} is not supported.") + + +def resize_numpy_image_long(image, resize_long_edge=768): + h, w = image.shape[:2] + if max(h, w) <= resize_long_edge: + return image + k = resize_long_edge / max(h, w) + h = int(h * k) + w = int(w * k) + image = cv2.resize(image, (w, h), interpolation=cv2.INTER_LANCZOS4) + return image + + +# from basicsr +def img2tensor(imgs, bgr2rgb=True, float32=True): + """Numpy array to tensor. + + Args: + imgs (list[ndarray] | ndarray): Input images. + bgr2rgb (bool): Whether to change bgr to rgb. + float32 (bool): Whether to change to float32. + + Returns: + list[tensor] | tensor: Tensor images. If returned results only have + one element, just return tensor. + """ + + def _totensor(img, bgr2rgb, float32): + if img.shape[2] == 3 and bgr2rgb: + if img.dtype == 'float64': + img = img.astype('float32') + img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) + img = torch.from_numpy(img.transpose(2, 0, 1)) + if float32: + img = img.float() + return img + + if isinstance(imgs, list): + return [_totensor(img, bgr2rgb, float32) for img in imgs] + return _totensor(imgs, bgr2rgb, float32) + + +def tensor2img(tensor, rgb2bgr=True, out_type=np.uint8, min_max=(0, 1)): + """Convert torch Tensors into image numpy arrays. + + After clamping to [min, max], values will be normalized to [0, 1]. + + Args: + tensor (Tensor or list[Tensor]): Accept shapes: + 1) 4D mini-batch Tensor of shape (B x 3/1 x H x W); + 2) 3D Tensor of shape (3/1 x H x W); + 3) 2D Tensor of shape (H x W). + Tensor channel should be in RGB order. + rgb2bgr (bool): Whether to change rgb to bgr. + out_type (numpy type): output types. If ``np.uint8``, transform outputs + to uint8 type with range [0, 255]; otherwise, float type with + range [0, 1]. Default: ``np.uint8``. + min_max (tuple[int]): min and max values for clamp. + + Returns: + (Tensor or list): 3D ndarray of shape (H x W x C) OR 2D ndarray of + shape (H x W). The channel order is BGR. + """ + if not (torch.is_tensor(tensor) or (isinstance(tensor, list) and all(torch.is_tensor(t) for t in tensor))): + raise TypeError(f'tensor or list of tensors expected, got {type(tensor)}') + + if torch.is_tensor(tensor): + tensor = [tensor] + result = [] + for _tensor in tensor: + _tensor = _tensor.squeeze(0).float().detach().cpu().clamp_(*min_max) + _tensor = (_tensor - min_max[0]) / (min_max[1] - min_max[0]) + + n_dim = _tensor.dim() + if n_dim == 4: + img_np = make_grid(_tensor, nrow=int(math.sqrt(_tensor.size(0))), normalize=False).numpy() + img_np = img_np.transpose(1, 2, 0) + if rgb2bgr: + img_np = cv2.cvtColor(img_np, cv2.COLOR_RGB2BGR) + elif n_dim == 3: + img_np = _tensor.numpy() + img_np = img_np.transpose(1, 2, 0) + if img_np.shape[2] == 1: # gray image + img_np = np.squeeze(img_np, axis=2) + else: + if rgb2bgr: + img_np = cv2.cvtColor(img_np, cv2.COLOR_RGB2BGR) + elif n_dim == 2: + img_np = _tensor.numpy() + else: + raise TypeError(f'Only support 4D, 3D or 2D tensor. But received with dimension: {n_dim}') + if out_type == np.uint8: + # Unlike MATLAB, numpy.unit8() WILL NOT round by default. + img_np = (img_np * 255.0).round() + img_np = img_np.astype(out_type) + result.append(img_np) + if len(result) == 1: + result = result[0] + return result + + +# We didn't find a correct configuration to make the diffusers scheduler align with dpm++2m (karras) in ComfyUI, +# so we copied the ComfyUI code directly. + + +def append_dims(x, target_dims): + """Appends dimensions to the end of a tensor until it has target_dims dimensions.""" + dims_to_append = target_dims - x.ndim + if dims_to_append < 0: + raise ValueError(f'input has {x.ndim} dims but target_dims is {target_dims}, which is less') + expanded = x[(...,) + (None,) * dims_to_append] + # MPS will get inf values if it tries to index into the new axes, but detaching fixes this. + # https://github.com/pytorch/pytorch/issues/84364 + return expanded.detach().clone() if expanded.device.type == 'mps' else expanded + + +def to_d(x, sigma, denoised): + """Converts a denoiser output to a Karras ODE derivative.""" + return (x - denoised) / append_dims(sigma, x.ndim) + + +def get_ancestral_step(sigma_from, sigma_to, eta=1.0): + """Calculates the noise level (sigma_down) to step down to and the amount + of noise to add (sigma_up) when doing an ancestral sampling step.""" + if not eta: + return sigma_to, 0.0 + sigma_up = min(sigma_to, eta * (sigma_to**2 * (sigma_from**2 - sigma_to**2) / sigma_from**2) ** 0.5) + sigma_down = (sigma_to**2 - sigma_up**2) ** 0.5 + return sigma_down, sigma_up + + +class BatchedBrownianTree: + """A wrapper around torchsde.BrownianTree that enables batches of entropy.""" + + def __init__(self, x, t0, t1, seed=None, **kwargs): + self.cpu_tree = True + if "cpu" in kwargs: + self.cpu_tree = kwargs.pop("cpu") + t0, t1, self.sign = self.sort(t0, t1) + w0 = kwargs.get('w0', torch.zeros_like(x)) + if seed is None: + seed = torch.randint(0, 2**63 - 1, []).item() + self.batched = True + try: + assert len(seed) == x.shape[0] + w0 = w0[0] + except TypeError: + seed = [seed] + self.batched = False + if self.cpu_tree: + self.trees = [torchsde.BrownianTree(t0.cpu(), w0.cpu(), t1.cpu(), entropy=s, **kwargs) for s in seed] + else: + self.trees = [torchsde.BrownianTree(t0, w0, t1, entropy=s, **kwargs) for s in seed] + + @staticmethod + def sort(a, b): + return (a, b, 1) if a < b else (b, a, -1) + + def __call__(self, t0, t1): + t0, t1, sign = self.sort(t0, t1) + if self.cpu_tree: + w = torch.stack( + [tree(t0.cpu().float(), t1.cpu().float()).to(t0.dtype).to(t0.device) for tree in self.trees] + ) * (self.sign * sign) + else: + w = torch.stack([tree(t0, t1) for tree in self.trees]) * (self.sign * sign) + + return w if self.batched else w[0] + + +class BrownianTreeNoiseSampler: + """A noise sampler backed by a torchsde.BrownianTree. + + Args: + x (Tensor): The tensor whose shape, device and dtype to use to generate + random samples. + sigma_min (float): The low end of the valid interval. + sigma_max (float): The high end of the valid interval. + seed (int or List[int]): The random seed. If a list of seeds is + supplied instead of a single integer, then the noise sampler will + use one BrownianTree per batch item, each with its own seed. + transform (callable): A function that maps sigma to the sampler's + internal timestep. + """ + + def __init__(self, x, sigma_min, sigma_max, seed=None, transform=lambda x: x, cpu=False): + self.transform = transform + t0, t1 = self.transform(torch.as_tensor(sigma_min)), self.transform(torch.as_tensor(sigma_max)) + self.tree = BatchedBrownianTree(x, t0, t1, seed, cpu=cpu) + + def __call__(self, sigma, sigma_next): + t0, t1 = self.transform(torch.as_tensor(sigma)), self.transform(torch.as_tensor(sigma_next)) + return self.tree(t0, t1) / (t1 - t0).abs().sqrt() + + +@torch.no_grad() +def sample_dpmpp_2m(model, x, sigmas, extra_args=None, callback=None, disable=None): + """DPM-Solver++(2M).""" + extra_args = {} if extra_args is None else extra_args + s_in = x.new_ones([x.shape[0]]) + sigma_fn = lambda t: t.neg().exp() + t_fn = lambda sigma: sigma.log().neg() + old_denoised = None + + for i in trange(len(sigmas) - 1, disable=disable): + denoised = model(x, sigmas[i] * s_in, **extra_args) + if callback is not None: + callback({'x': x, 'i': i, 'sigma': sigmas[i], 'sigma_hat': sigmas[i], 'denoised': denoised}) + t, t_next = t_fn(sigmas[i]), t_fn(sigmas[i + 1]) + h = t_next - t + if old_denoised is None or sigmas[i + 1] == 0: + x = (sigma_fn(t_next) / sigma_fn(t)) * x - (-h).expm1() * denoised + else: + h_last = t - t_fn(sigmas[i - 1]) + r = h_last / h + denoised_d = (1 + 1 / (2 * r)) * denoised - (1 / (2 * r)) * old_denoised + x = (sigma_fn(t_next) / sigma_fn(t)) * x - (-h).expm1() * denoised_d + old_denoised = denoised + return x + + +@torch.no_grad() +def sample_dpmpp_sde( + model, x, sigmas, extra_args=None, callback=None, disable=None, eta=1.0, s_noise=1.0, noise_sampler=None, r=1 / 2 +): + """DPM-Solver++ (stochastic).""" + sigma_min, sigma_max = sigmas[sigmas > 0].min(), sigmas.max() + seed = extra_args.get("seed", None) + noise_sampler = ( + BrownianTreeNoiseSampler(x, sigma_min, sigma_max, seed=seed, cpu=False) + if noise_sampler is None + else noise_sampler + ) + extra_args = {} if extra_args is None else extra_args + s_in = x.new_ones([x.shape[0]]) + sigma_fn = lambda t: t.neg().exp() + t_fn = lambda sigma: sigma.log().neg() + + for i in trange(len(sigmas) - 1, disable=disable): + denoised = model(x, sigmas[i] * s_in, **extra_args) + if callback is not None: + callback({'x': x, 'i': i, 'sigma': sigmas[i], 'sigma_hat': sigmas[i], 'denoised': denoised}) + if sigmas[i + 1] == 0: + # Euler method + d = to_d(x, sigmas[i], denoised) + dt = sigmas[i + 1] - sigmas[i] + x = x + d * dt + else: + # DPM-Solver++ + t, t_next = t_fn(sigmas[i]), t_fn(sigmas[i + 1]) + h = t_next - t + s = t + h * r + fac = 1 / (2 * r) + + # Step 1 + sd, su = get_ancestral_step(sigma_fn(t), sigma_fn(s), eta) + s_ = t_fn(sd) + x_2 = (sigma_fn(s_) / sigma_fn(t)) * x - (t - s_).expm1() * denoised + x_2 = x_2 + noise_sampler(sigma_fn(t), sigma_fn(s)) * s_noise * su + denoised_2 = model(x_2, sigma_fn(s) * s_in, **extra_args) + + # Step 2 + sd, su = get_ancestral_step(sigma_fn(t), sigma_fn(t_next), eta) + t_next_ = t_fn(sd) + denoised_d = (1 - fac) * denoised + fac * denoised_2 + x = (sigma_fn(t_next_) / sigma_fn(t)) * x - (t - t_next_).expm1() * denoised_d + x = x + noise_sampler(sigma_fn(t), sigma_fn(t_next)) * s_noise * su + return x diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..6259d03fa8b691d24927a46f0da828c572dc7dff --- /dev/null +++ b/requirements.txt @@ -0,0 +1,34 @@ +torch==2.5.1 +timm==1.0.11 +deepspeed==0.14.4 +accelerate==0.34.2 +optimum-quanto +datasets +omegaconf +diffusers +transformers==4.43.3 +sentencepiece +opencv-python +matplotlib +onnxruntime-gpu==1.17 +timm +ftfy +xformers==0.0.28.post3 +peft +open_clip_torch +tensorboard +einops +datasets +jupyter +ipykernel +ipywidgets +controlnet_aux +insightface +facexlib +torchsde +numpy<2 +jsonlines +wandb +gradio +spaces +# git+https://github.com/openai/CLIP.git \ No newline at end of file