AisingioroHao0 commited on
Commit
323d67d
·
1 Parent(s): a09fea3

support pulid

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. app.py +223 -0
  2. {ip_adapter_art → artistic_portrait}/__init__.py +0 -0
  3. artistic_portrait/pipeline.py +886 -0
  4. artistic_portrait/pulid_encoder.py +207 -0
  5. artistic_portrait_gen.ipynb +130 -0
  6. ip_adapter_art/utils/csd_clip.py → csd_clip/__init__.py +28 -1
  7. datasets/test/id_dataset/hinton.jpg +3 -0
  8. datasets/test/id_dataset/lecun.jpg +3 -0
  9. datasets/test/id_dataset/lifeifei.jpg +3 -0
  10. datasets/test/id_dataset/liuyifei.jpg +3 -0
  11. datasets/test/id_dataset/rihanna.jpg +3 -0
  12. datasets/test/pose.jpg +3 -0
  13. datasets/test/style_dataset/Abstract D'Oyley.jpg +3 -0
  14. datasets/test/style_dataset/Adam Zyglis.jpg +3 -0
  15. README.assets/example.jpg → datasets/test/style_dataset/Amigurumi.jpg +0 -0
  16. datasets/test/style_dataset/Diffused lighting.jpg +3 -0
  17. datasets/test/style_dataset/Shirley Hughes.jpg +3 -0
  18. datasets/test/style_dataset/Winter.jpg +3 -0
  19. eva_clip/__init__.py +11 -0
  20. eva_clip/bpe_simple_vocab_16e6.txt.gz +3 -0
  21. eva_clip/constants.py +2 -0
  22. eva_clip/eva_vit_model.py +548 -0
  23. eva_clip/factory.py +517 -0
  24. eva_clip/hf_configs.py +57 -0
  25. eva_clip/hf_model.py +248 -0
  26. eva_clip/loss.py +138 -0
  27. eva_clip/model.py +439 -0
  28. eva_clip/model_configs/EVA01-CLIP-B-16.json +19 -0
  29. eva_clip/model_configs/EVA01-CLIP-g-14-plus.json +24 -0
  30. eva_clip/model_configs/EVA01-CLIP-g-14.json +24 -0
  31. eva_clip/model_configs/EVA02-CLIP-B-16.json +29 -0
  32. eva_clip/model_configs/EVA02-CLIP-L-14-336.json +29 -0
  33. eva_clip/model_configs/EVA02-CLIP-L-14.json +29 -0
  34. eva_clip/model_configs/EVA02-CLIP-bigE-14-plus.json +25 -0
  35. eva_clip/model_configs/EVA02-CLIP-bigE-14.json +25 -0
  36. eva_clip/modified_resnet.py +181 -0
  37. eva_clip/openai.py +144 -0
  38. eva_clip/pretrained.py +332 -0
  39. eva_clip/rope.py +137 -0
  40. eva_clip/timm_model.py +122 -0
  41. eva_clip/tokenizer.py +201 -0
  42. eva_clip/transform.py +103 -0
  43. eva_clip/transformer.py +737 -0
  44. eva_clip/utils.py +326 -0
  45. ip_adapter_art/utils/ip_adapter.py +0 -72
  46. {ip_adapter_art/utils → ip_adapter_diffusers}/__init__.py +0 -0
  47. ip_adapter_diffusers/custom_cross_attention_processor.py +297 -0
  48. ip_adapter_diffusers/custom_ip_adapter.py +58 -0
  49. ip_adapter_diffusers/ip_adapter.py +821 -0
  50. ip_adapter_diffusers/ip_adapter_extra_attn.py +250 -0
app.py ADDED
@@ -0,0 +1,223 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ import torch
4
+ import spaces
5
+
6
+ import gradio as gr
7
+ import torch
8
+ from artistic_portrait.pipeline import ArtisticPortraitXLPipeline
9
+ from diffusers import ControlNetModel, DPMSolverMultistepScheduler
10
+ from ip_adapter_diffusers.ip_adapter import *
11
+
12
+ from huggingface_hub import hf_hub_download
13
+
14
+ style_adapter_path = "models/ip_adapter_art_sdxl_512.pth"
15
+ id_adapter_path = "models/pulid_adapter_diffusers_1.1.pth"
16
+ if not os.path.exists("models/csd_clip.pth"):
17
+ hf_hub_download(
18
+ repo_id="AisingioroHao0/IP-Adapter-Art",
19
+ filename="csd_clip.pth",
20
+ local_dir="models",
21
+ )
22
+ if not os.path.exists(style_adapter_path):
23
+ hf_hub_download(
24
+ repo_id="AisingioroHao0/IP-Adapter-Art",
25
+ filename="ip_adapter_art_sdxl_512.pth",
26
+ local_dir="models",
27
+ )
28
+ if not os.path.exists(id_adapter_path):
29
+ hf_hub_download(
30
+ repo_id="AisingioroHao0/IP-Adapter-Art",
31
+ filename="pulid_adapter_diffusers_1.1.pth",
32
+ local_dir="models",
33
+ )
34
+
35
+ device = "cuda" if torch.cuda.is_available() else "cpu"
36
+ sdxl_repo_id = "stabilityai/stable-diffusion-xl-base-1.0"
37
+
38
+ torch_dtype = torch.float16 if str(device).__contains__("cuda") else torch.float32
39
+
40
+ # Load pretrained models.
41
+ print("Initializing pipeline...")
42
+ controlnet = ControlNetModel.from_pretrained(
43
+ "xinsir/controlnet-openpose-sdxl-1.0",
44
+ torch_dtype=torch_dtype,
45
+ ).to(device)
46
+ pipe = ArtisticPortraitXLPipeline.from_pretrained(
47
+ "stabilityai/stable-diffusion-xl-base-1.0",
48
+ controlnet=controlnet,
49
+ safety_checker=None,
50
+ torch_dtype=torch_dtype,
51
+ style_adapter_path=style_adapter_path,
52
+ id_adapter_path=id_adapter_path,
53
+ device=device,
54
+ ).to(device)
55
+ pipe.scheduler = DPMSolverMultistepScheduler.from_config(
56
+ pipe.scheduler.config, timestep_spacing="trailing"
57
+ )
58
+ load_ip_adapter(
59
+ pipe.controlnet,
60
+ "models/ip_adapter_art_sdxl_512.pth",
61
+ )
62
+
63
+ example_inputs = [
64
+ [
65
+ "datasets/test/style_dataset/Abstract D'Oyley.jpg",
66
+ "datasets/test/id_dataset/lifeifei.jpg",
67
+ ],
68
+ [
69
+ "datasets/test/style_dataset/Adam Zyglis.jpg",
70
+ "datasets/test/id_dataset/lecun.jpg",
71
+ ],
72
+ [
73
+ "datasets/test/style_dataset/Diffused lighting.jpg",
74
+ "datasets/test/id_dataset/liuyifei.jpg",
75
+ ],
76
+ [
77
+ "datasets/test/style_dataset/Shirley Hughes.jpg",
78
+ "datasets/test/id_dataset/rihanna.jpg",
79
+ ],
80
+ [
81
+ "datasets/test/style_dataset/Winter.jpg",
82
+ "datasets/test/id_dataset/hinton.jpg",
83
+ ],
84
+ ]
85
+
86
+
87
+ @spaces.GPU(enable_queue=True)
88
+ def generation(
89
+ style_image=None,
90
+ id_image=None,
91
+ pose_image=None,
92
+ prompt="portrait, solo, looking at viewer, best quality, masterpiece",
93
+ negative_prompt="flaws in the eyes, flaws in the face, flaws, lowres, non-HDRi, low quality, worst quality,artifacts noise, text, watermark, glitch, deformed, mutated, ugly, disfigured, hands, low resolution, partially rendered objects, deformed or partially rendered eyes, deformed, deformed eyeballs, cross-eyed",
94
+ num_inference_steps=20,
95
+ guidance_scale=7.0,
96
+ style_scale=1.0,
97
+ id_scale=1.0,
98
+ controlnet_scale=0.9,
99
+ seed=42,
100
+ height=1024,
101
+ width=1024,
102
+ artify_contorlnet_scale=0.0,
103
+ ):
104
+ set_ip_adapter_scale(pipe.controlnet, artify_contorlnet_scale)
105
+ result = pipe(
106
+ prompt=prompt,
107
+ negative_prompt=negative_prompt,
108
+ control_image=pose_image,
109
+ controlnet_conditioning_scale=controlnet_scale,
110
+ width=width,
111
+ height=height,
112
+ num_inference_steps=num_inference_steps,
113
+ guidance_scale=guidance_scale,
114
+ style_image=style_image,
115
+ id_image=id_image,
116
+ generator=torch.Generator(device).manual_seed(seed),
117
+ id_scale=id_scale,
118
+ style_scale=style_scale,
119
+ ).images[0]
120
+
121
+ return result
122
+
123
+
124
+ with gr.Blocks(delete_cache=(3600, 3600)) as demo:
125
+ gr.Markdown(
126
+ """
127
+ # Artistic Portrait Gen 0.9: Generate Customized Artistic Portrait through Style Reference Images
128
+
129
+ **Implementation based on [Art-Adapter](https://github.com/aihao2000/IP-Adapter-Art), [PuLID-Adapter](https://github.com/ToTheBeginning/PuLID), and [Instant Style](https://github.com/instantX-research/InstantStyle).**
130
+
131
+ ## Basic usage:
132
+ - Stylized Portrait Generation: Upload the style reference image and ID reference image, and click "Generation" to generate the artistic portrait directly.
133
+ - Text-guided Stylization Generation: Set ID Scale to 0, modify prompt, and then try text-guided stylized image generation through **Art-Adapter**. **(Note that ID image cannot be empty in the current version.)**
134
+
135
+ _If the style similarity is low, try increasing the Artify ControlNet Scale, or set the Controlnet Scale to 0._
136
+
137
+ ## News
138
+
139
+ - 2025.3.24: We released Artistic Portrait Gen 0.9.
140
+ """
141
+ )
142
+ with gr.Row():
143
+ with gr.Column():
144
+
145
+ with gr.Row():
146
+ style_image = gr.Image(
147
+ label="Style Reference Image",
148
+ type="pil",
149
+ )
150
+ id_image = gr.Image(
151
+ label="ID Reference Image",
152
+ type="pil",
153
+ )
154
+ pose_image = gr.Image(
155
+ label="Pose Reference Image",
156
+ type="pil",
157
+ value="datasets/test/pose.jpg",
158
+ )
159
+ with gr.Row():
160
+ clear_btn = gr.ClearButton()
161
+ generation_btn = gr.Button("Generation")
162
+ with gr.Row():
163
+ id_scale = gr.Number(label="ID Scale", value=1.0, step=0.1)
164
+ style_scale = gr.Number(label="Style Scale", value=1.0, step=0.1)
165
+ controlnet_scale = gr.Number(
166
+ label="ControlNet Scale", value=0.9, step=0.1
167
+ )
168
+ artify_contorlnet_scale = gr.Number(
169
+ label="Artify ControlNet Scale", value=0.0, step=0.1
170
+ )
171
+ guidance_scale = gr.Number(label="CFG Scale", value=7.0, step=0.1)
172
+ with gr.Row():
173
+ height = gr.Number(label="Height", step=1, maximum=1024, value=1024)
174
+ width = gr.Number(label="Width", step=1, maximum=1024, value=1024)
175
+ seed = gr.Number(label="Seed", value=42, step=1)
176
+ num_inference_steps = gr.Number(label="Steps", value=20, step=1)
177
+ prompt = gr.Textbox(
178
+ label="Prompt",
179
+ value="portrait, solo, looking at viewer, best quality, masterpiece",
180
+ )
181
+ negative_prompt = gr.Textbox(
182
+ label="Negative Prompt",
183
+ value="flaws in the eyes, flaws in the face, flaws, lowres, non-HDRi, low quality, worst quality,artifacts noise, text, watermark, glitch, deformed, mutated, ugly, disfigured, hands, low resolution, partially rendered objects, deformed or partially rendered eyes, deformed, deformed eyeballs, cross-eyed",
184
+ )
185
+
186
+ with gr.Column():
187
+ output = gr.Image(label="Result", type="pil")
188
+ with gr.Row():
189
+ examples = gr.Examples(
190
+ examples=example_inputs,
191
+ inputs=[style_image, id_image],
192
+ outputs=[
193
+ output,
194
+ ],
195
+ fn=lambda x, y: None,
196
+ cache_examples=False,
197
+ )
198
+
199
+ clear_btn.add([style_image, id_image, pose_image, output])
200
+
201
+ generation_btn.click(
202
+ generation,
203
+ inputs=[
204
+ style_image,
205
+ id_image,
206
+ pose_image,
207
+ prompt,
208
+ negative_prompt,
209
+ num_inference_steps,
210
+ guidance_scale,
211
+ style_scale,
212
+ id_scale,
213
+ controlnet_scale,
214
+ seed,
215
+ height,
216
+ width,
217
+ artify_contorlnet_scale,
218
+ ],
219
+ outputs=[output],
220
+ api_name="artistic_portrait_gen",
221
+ )
222
+
223
+ demo.queue().launch(share=True)
{ip_adapter_art → artistic_portrait}/__init__.py RENAMED
File without changes
artistic_portrait/pipeline.py ADDED
@@ -0,0 +1,886 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from diffusers import StableDiffusionXLControlNetPipeline
2
+ from diffusers.pipelines.controlnet.pipeline_controlnet_sd_xl import *
3
+ from .pulid_encoder import PuLIDEncoder
4
+ from csd_clip import create_model_and_transforms as create_csd_clip_model_and_transforms
5
+ from csd_clip import CSD_CLIP
6
+ from ip_adapter_diffusers.ip_adapter import *
7
+ from transformers import CLIPVisionModelWithProjection
8
+
9
+
10
+ class ArtisticPortraitXLPipeline(StableDiffusionXLControlNetPipeline):
11
+ def __init__(
12
+ self,
13
+ vae: AutoencoderKL,
14
+ text_encoder: CLIPTextModel,
15
+ text_encoder_2: CLIPTextModelWithProjection,
16
+ tokenizer: CLIPTokenizer,
17
+ tokenizer_2: CLIPTokenizer,
18
+ unet: UNet2DConditionModel,
19
+ controlnet: Union[
20
+ ControlNetModel,
21
+ List[ControlNetModel],
22
+ Tuple[ControlNetModel],
23
+ MultiControlNetModel,
24
+ ],
25
+ scheduler: KarrasDiffusionSchedulers,
26
+ force_zeros_for_empty_prompt: bool = True,
27
+ add_watermarker: Optional[bool] = None,
28
+ feature_extractor: CLIPImageProcessor = None,
29
+ image_encoder: CLIPVisionModelWithProjection = None,
30
+ style_adapter_path=None,
31
+ id_adapter_path=None,
32
+ style_image_encoder_path="models/h94/IP-Adapter/sdxl_models/image_encoder",
33
+ device=None,
34
+ ):
35
+ super().__init__(
36
+ vae=vae,
37
+ text_encoder=text_encoder,
38
+ text_encoder_2=text_encoder_2,
39
+ tokenizer=tokenizer,
40
+ tokenizer_2=tokenizer_2,
41
+ unet=unet,
42
+ controlnet=controlnet,
43
+ scheduler=scheduler,
44
+ force_zeros_for_empty_prompt=force_zeros_for_empty_prompt,
45
+ add_watermarker=add_watermarker,
46
+ feature_extractor=feature_extractor,
47
+ image_encoder=image_encoder,
48
+ )
49
+ self.id_image_encoder = PuLIDEncoder(device=device)
50
+ if "art" in style_adapter_path:
51
+ self.style_image_encoder = create_csd_clip_model_and_transforms()[0]
52
+ else:
53
+ self.style_image_encoder = CLIPVisionModelWithProjection.from_pretrained(
54
+ style_image_encoder_path
55
+ )
56
+
57
+ self.style_image_processor = CLIPImageProcessor()
58
+ load_multi_ip_adapter(
59
+ self.unet,
60
+ paths=[style_adapter_path, id_adapter_path],
61
+ )
62
+ self.style_image_projection_layer = (
63
+ self.unet.encoder_hid_proj.image_projection_layers[0]
64
+ )
65
+ self.id_image_projection_layer = (
66
+ self.unet.encoder_hid_proj.image_projection_layers[1]
67
+ )
68
+
69
+ def load_style_adapter_to_controlnet(self, style_adapter_path):
70
+ load_ip_adapter(self.controlnet, style_adapter_path)
71
+
72
+ def get_id_hidden_states(self, image):
73
+ if not isinstance(image, list):
74
+ image = [image]
75
+ image = [
76
+ (
77
+ single_image
78
+ if isinstance(single_image, np.ndarray)
79
+ else np.array(single_image)
80
+ )
81
+ for single_image in image
82
+ ]
83
+ id_cond, id_vit_hidden, id_uncond, id_vit_hidden_uncond = (
84
+ self.id_image_encoder.get_id_embedding(image)
85
+ )
86
+
87
+ id_vit_hidden = [x.to(dtype=self.unet.dtype) for x in id_vit_hidden]
88
+ id_vit_hidden_uncond = [
89
+ x.to(dtype=self.unet.dtype) for x in id_vit_hidden_uncond
90
+ ]
91
+ uncond_id_embedding = self.id_image_projection_layer(
92
+ id_uncond.to(self.unet.device, self.unet.dtype),
93
+ id_vit_hidden_uncond,
94
+ )
95
+ id_embedding = self.id_image_projection_layer(
96
+ id_cond.to(self.unet.device, self.unet.dtype), id_vit_hidden
97
+ )
98
+ id_hidden_states = torch.concat([uncond_id_embedding, id_embedding], dim=0)
99
+ torch.cuda.empty_cache()
100
+ return id_hidden_states
101
+
102
+ def get_style_hidden_states(self, image):
103
+ if isinstance(self.style_image_encoder, CSD_CLIP):
104
+ self.style_image_encoder = self.style_image_encoder.to(
105
+ self._execution_device, dtype=torch.float32
106
+ )
107
+ style_pixel_values = self.style_image_processor.preprocess(
108
+ image, return_tensors="pt"
109
+ ).pixel_values
110
+ _, __, style_image_embeds = self.style_image_encoder(
111
+ style_pixel_values.to(self._execution_device, torch.float32)
112
+ )
113
+ style_image_embeds = torch.stack(
114
+ [
115
+ torch.zeros_like(style_image_embeds).to(self._execution_device),
116
+ style_image_embeds,
117
+ ]
118
+ ).to(self._execution_device, torch.float16)
119
+ style_ip_adapter_hidden_states = self.style_image_projection_layer(
120
+ style_image_embeds
121
+ )
122
+
123
+ elif isinstance(self.style_image_encoder, CLIPVisionModelWithProjection):
124
+ self.style_image_encoder = self.style_image_encoder.to(
125
+ self._execution_device, dtype=torch.float16
126
+ )
127
+ style_pixel_values = self.style_image_processor.preprocess(
128
+ image, return_tensors="pt"
129
+ ).pixel_values
130
+ style_image_embeds = self.style_image_encoder(
131
+ style_pixel_values.to(self._execution_device, torch.float16)
132
+ ).image_embeds
133
+ style_image_embeds = torch.stack(
134
+ [
135
+ torch.zeros_like(style_image_embeds).to(self._execution_device),
136
+ style_image_embeds,
137
+ ]
138
+ ).to(self._execution_device, torch.float16)
139
+ style_ip_adapter_hidden_states = self.style_image_projection_layer(
140
+ style_image_embeds
141
+ )
142
+
143
+ torch.cuda.empty_cache()
144
+ self.style_image_encoder = self.style_image_encoder.to("cpu")
145
+
146
+ return style_ip_adapter_hidden_states
147
+
148
+ def set_style_adapter_scale(self, style_adapter_scale):
149
+ for name, processor in self.unet.attn_processors.items():
150
+ if (
151
+ isinstance(processor, torch.nn.Module)
152
+ and "up_blocks.0.attentions.1" in name
153
+ ):
154
+ processor.scale = [style_adapter_scale, 0.0]
155
+
156
+ def set_id_adapter_scale(self, id_adapter_scale):
157
+ for name, processor in self.unet.attn_processors.items():
158
+ if (
159
+ isinstance(processor, torch.nn.Module)
160
+ and "up_blocks.0.attentions.1" not in name
161
+ ):
162
+ processor.scale = [0.0, id_adapter_scale]
163
+
164
+ @torch.no_grad()
165
+ @replace_example_docstring(EXAMPLE_DOC_STRING)
166
+ def __call__(
167
+ self,
168
+ prompt: Union[str, List[str]] = None,
169
+ prompt_2: Optional[Union[str, List[str]]] = None,
170
+ control_image: PipelineImageInput = None,
171
+ style_image: PipelineImageInput = None,
172
+ id_image: PipelineImageInput = None,
173
+ height: Optional[int] = None,
174
+ width: Optional[int] = None,
175
+ num_inference_steps: int = 50,
176
+ timesteps: List[int] = None,
177
+ sigmas: List[float] = None,
178
+ denoising_end: Optional[float] = None,
179
+ guidance_scale: float = 5.0,
180
+ id_adapter_scale=1.0,
181
+ style_adapter_scale=1.0,
182
+ negative_prompt: Optional[Union[str, List[str]]] = None,
183
+ negative_prompt_2: Optional[Union[str, List[str]]] = None,
184
+ num_images_per_prompt: Optional[int] = 1,
185
+ eta: float = 0.0,
186
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
187
+ latents: Optional[torch.Tensor] = None,
188
+ prompt_embeds: Optional[torch.Tensor] = None,
189
+ negative_prompt_embeds: Optional[torch.Tensor] = None,
190
+ pooled_prompt_embeds: Optional[torch.Tensor] = None,
191
+ negative_pooled_prompt_embeds: Optional[torch.Tensor] = None,
192
+ ip_adapter_image: Optional[PipelineImageInput] = None,
193
+ ip_adapter_image_embeds: Optional[List[torch.Tensor]] = None,
194
+ output_type: Optional[str] = "pil",
195
+ return_dict: bool = True,
196
+ cross_attention_kwargs: Optional[Dict[str, Any]] = None,
197
+ controlnet_conditioning_scale: Union[float, List[float]] = 1.0,
198
+ guess_mode: bool = False,
199
+ control_guidance_start: Union[float, List[float]] = 0.0,
200
+ control_guidance_end: Union[float, List[float]] = 1.0,
201
+ style_guidance_start=0.0,
202
+ style_guidance_end=1.0,
203
+ id_guidance_start=0.0,
204
+ id_guidance_end=1.0,
205
+ original_size: Tuple[int, int] = None,
206
+ crops_coords_top_left: Tuple[int, int] = (0, 0),
207
+ target_size: Tuple[int, int] = None,
208
+ negative_original_size: Optional[Tuple[int, int]] = None,
209
+ negative_crops_coords_top_left: Tuple[int, int] = (0, 0),
210
+ negative_target_size: Optional[Tuple[int, int]] = None,
211
+ clip_skip: Optional[int] = None,
212
+ callback_on_step_end: Optional[
213
+ Union[
214
+ Callable[[int, int, Dict], None],
215
+ PipelineCallback,
216
+ MultiPipelineCallbacks,
217
+ ]
218
+ ] = None,
219
+ callback_on_step_end_tensor_inputs: List[str] = ["latents"],
220
+ **kwargs,
221
+ ):
222
+ r"""
223
+ The call function to the pipeline for generation.
224
+
225
+ Args:
226
+ prompt (`str` or `List[str]`, *optional*):
227
+ The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`.
228
+ prompt_2 (`str` or `List[str]`, *optional*):
229
+ The prompt or prompts to be sent to `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is
230
+ used in both text-encoders.
231
+ image (`torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.Tensor]`, `List[PIL.Image.Image]`, `List[np.ndarray]`,:
232
+ `List[List[torch.Tensor]]`, `List[List[np.ndarray]]` or `List[List[PIL.Image.Image]]`):
233
+ The ControlNet input condition to provide guidance to the `unet` for generation. If the type is
234
+ specified as `torch.Tensor`, it is passed to ControlNet as is. `PIL.Image.Image` can also be accepted
235
+ as an image. The dimensions of the output image defaults to `image`'s dimensions. If height and/or
236
+ width are passed, `image` is resized accordingly. If multiple ControlNets are specified in `init`,
237
+ images must be passed as a list such that each element of the list can be correctly batched for input
238
+ to a single ControlNet.
239
+ height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
240
+ The height in pixels of the generated image. Anything below 512 pixels won't work well for
241
+ [stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0)
242
+ and checkpoints that are not specifically fine-tuned on low resolutions.
243
+ width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
244
+ The width in pixels of the generated image. Anything below 512 pixels won't work well for
245
+ [stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0)
246
+ and checkpoints that are not specifically fine-tuned on low resolutions.
247
+ num_inference_steps (`int`, *optional*, defaults to 50):
248
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
249
+ expense of slower inference.
250
+ timesteps (`List[int]`, *optional*):
251
+ Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument
252
+ in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is
253
+ passed will be used. Must be in descending order.
254
+ sigmas (`List[float]`, *optional*):
255
+ Custom sigmas to use for the denoising process with schedulers which support a `sigmas` argument in
256
+ their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed
257
+ will be used.
258
+ denoising_end (`float`, *optional*):
259
+ When specified, determines the fraction (between 0.0 and 1.0) of the total denoising process to be
260
+ completed before it is intentionally prematurely terminated. As a result, the returned sample will
261
+ still retain a substantial amount of noise as determined by the discrete timesteps selected by the
262
+ scheduler. The denoising_end parameter should ideally be utilized when this pipeline forms a part of a
263
+ "Mixture of Denoisers" multi-pipeline setup, as elaborated in [**Refining the Image
264
+ Output**](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/stable_diffusion_xl#refining-the-image-output)
265
+ guidance_scale (`float`, *optional*, defaults to 5.0):
266
+ A higher guidance scale value encourages the model to generate images closely linked to the text
267
+ `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.
268
+ negative_prompt (`str` or `List[str]`, *optional*):
269
+ The prompt or prompts to guide what to not include in image generation. If not defined, you need to
270
+ pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`).
271
+ negative_prompt_2 (`str` or `List[str]`, *optional*):
272
+ The prompt or prompts to guide what to not include in image generation. This is sent to `tokenizer_2`
273
+ and `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders.
274
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
275
+ The number of images to generate per prompt.
276
+ eta (`float`, *optional*, defaults to 0.0):
277
+ Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies
278
+ to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers.
279
+ generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
280
+ A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
281
+ generation deterministic.
282
+ latents (`torch.Tensor`, *optional*):
283
+ Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image
284
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
285
+ tensor is generated by sampling using the supplied random `generator`.
286
+ prompt_embeds (`torch.Tensor`, *optional*):
287
+ Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not
288
+ provided, text embeddings are generated from the `prompt` input argument.
289
+ negative_prompt_embeds (`torch.Tensor`, *optional*):
290
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If
291
+ not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument.
292
+ pooled_prompt_embeds (`torch.Tensor`, *optional*):
293
+ Pre-generated pooled text embeddings. Can be used to easily tweak text inputs (prompt weighting). If
294
+ not provided, pooled text embeddings are generated from `prompt` input argument.
295
+ negative_pooled_prompt_embeds (`torch.Tensor`, *optional*):
296
+ Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs (prompt
297
+ weighting). If not provided, pooled `negative_prompt_embeds` are generated from `negative_prompt` input
298
+ argument.
299
+ ip_adapter_image: (`PipelineImageInput`, *optional*): Optional image input to work with IP Adapters.
300
+ ip_adapter_image_embeds (`List[torch.Tensor]`, *optional*):
301
+ Pre-generated image embeddings for IP-Adapter. It should be a list of length same as number of
302
+ IP-adapters. Each element should be a tensor of shape `(batch_size, num_images, emb_dim)`. It should
303
+ contain the negative image embedding if `do_classifier_free_guidance` is set to `True`. If not
304
+ provided, embeddings are computed from the `ip_adapter_image` input argument.
305
+ output_type (`str`, *optional*, defaults to `"pil"`):
306
+ The output format of the generated image. Choose between `PIL.Image` or `np.array`.
307
+ return_dict (`bool`, *optional*, defaults to `True`):
308
+ Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
309
+ plain tuple.
310
+ cross_attention_kwargs (`dict`, *optional*):
311
+ A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in
312
+ [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
313
+ controlnet_conditioning_scale (`float` or `List[float]`, *optional*, defaults to 1.0):
314
+ The outputs of the ControlNet are multiplied by `controlnet_conditioning_scale` before they are added
315
+ to the residual in the original `unet`. If multiple ControlNets are specified in `init`, you can set
316
+ the corresponding scale as a list.
317
+ guess_mode (`bool`, *optional*, defaults to `False`):
318
+ The ControlNet encoder tries to recognize the content of the input image even if you remove all
319
+ prompts. A `guidance_scale` value between 3.0 and 5.0 is recommended.
320
+ control_guidance_start (`float` or `List[float]`, *optional*, defaults to 0.0):
321
+ The percentage of total steps at which the ControlNet starts applying.
322
+ control_guidance_end (`float` or `List[float]`, *optional*, defaults to 1.0):
323
+ The percentage of total steps at which the ControlNet stops applying.
324
+ original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
325
+ If `original_size` is not the same as `target_size` the image will appear to be down- or upsampled.
326
+ `original_size` defaults to `(height, width)` if not specified. Part of SDXL's micro-conditioning as
327
+ explained in section 2.2 of
328
+ [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
329
+ crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)):
330
+ `crops_coords_top_left` can be used to generate an image that appears to be "cropped" from the position
331
+ `crops_coords_top_left` downwards. Favorable, well-centered images are usually achieved by setting
332
+ `crops_coords_top_left` to (0, 0). Part of SDXL's micro-conditioning as explained in section 2.2 of
333
+ [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
334
+ target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
335
+ For most cases, `target_size` should be set to the desired height and width of the generated image. If
336
+ not specified it will default to `(height, width)`. Part of SDXL's micro-conditioning as explained in
337
+ section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
338
+ negative_original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
339
+ To negatively condition the generation process based on a specific image resolution. Part of SDXL's
340
+ micro-conditioning as explained in section 2.2 of
341
+ [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more
342
+ information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208.
343
+ negative_crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)):
344
+ To negatively condition the generation process based on a specific crop coordinates. Part of SDXL's
345
+ micro-conditioning as explained in section 2.2 of
346
+ [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more
347
+ information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208.
348
+ negative_target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
349
+ To negatively condition the generation process based on a target image resolution. It should be as same
350
+ as the `target_size` for most cases. Part of SDXL's micro-conditioning as explained in section 2.2 of
351
+ [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more
352
+ information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208.
353
+ clip_skip (`int`, *optional*):
354
+ Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
355
+ the output of the pre-final layer will be used for computing the prompt embeddings.
356
+ callback_on_step_end (`Callable`, `PipelineCallback`, `MultiPipelineCallbacks`, *optional*):
357
+ A function or a subclass of `PipelineCallback` or `MultiPipelineCallbacks` that is called at the end of
358
+ each denoising step during the inference. with the following arguments: `callback_on_step_end(self:
359
+ DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a
360
+ list of all tensors as specified by `callback_on_step_end_tensor_inputs`.
361
+ callback_on_step_end_tensor_inputs (`List`, *optional*):
362
+ The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list
363
+ will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the
364
+ `._callback_tensor_inputs` attribute of your pipeline class.
365
+
366
+ Examples:
367
+
368
+ Returns:
369
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
370
+ If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned,
371
+ otherwise a `tuple` is returned containing the output images.
372
+ """
373
+
374
+ callback = kwargs.pop("callback", None)
375
+ callback_steps = kwargs.pop("callback_steps", None)
376
+
377
+ if callback is not None:
378
+ deprecate(
379
+ "callback",
380
+ "1.0.0",
381
+ "Passing `callback` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`",
382
+ )
383
+ if callback_steps is not None:
384
+ deprecate(
385
+ "callback_steps",
386
+ "1.0.0",
387
+ "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`",
388
+ )
389
+
390
+ if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)):
391
+ callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs
392
+
393
+ controlnet = (
394
+ self.controlnet._orig_mod
395
+ if is_compiled_module(self.controlnet)
396
+ else self.controlnet
397
+ )
398
+
399
+ # align format for control guidance
400
+ if not isinstance(control_guidance_start, list) and isinstance(
401
+ control_guidance_end, list
402
+ ):
403
+ control_guidance_start = len(control_guidance_end) * [
404
+ control_guidance_start
405
+ ]
406
+ elif not isinstance(control_guidance_end, list) and isinstance(
407
+ control_guidance_start, list
408
+ ):
409
+ control_guidance_end = len(control_guidance_start) * [control_guidance_end]
410
+ elif not isinstance(control_guidance_start, list) and not isinstance(
411
+ control_guidance_end, list
412
+ ):
413
+ mult = (
414
+ len(controlnet.nets)
415
+ if isinstance(controlnet, MultiControlNetModel)
416
+ else 1
417
+ )
418
+ control_guidance_start, control_guidance_end = (
419
+ mult * [control_guidance_start],
420
+ mult * [control_guidance_end],
421
+ )
422
+
423
+ # 1. Check inputs. Raise error if not correct
424
+ # self.check_inputs(
425
+ # prompt,
426
+ # prompt_2,
427
+ # control_image,
428
+ # callback_steps,
429
+ # negative_prompt,
430
+ # negative_prompt_2,
431
+ # prompt_embeds,
432
+ # negative_prompt_embeds,
433
+ # pooled_prompt_embeds,
434
+ # ip_adapter_image,
435
+ # ip_adapter_image_embeds,
436
+ # negative_pooled_prompt_embeds,
437
+ # controlnet_conditioning_scale,
438
+ # control_guidance_start,
439
+ # control_guidance_end,
440
+ # callback_on_step_end_tensor_inputs,
441
+ # )
442
+
443
+ self._guidance_scale = guidance_scale
444
+ self._clip_skip = clip_skip
445
+ self._cross_attention_kwargs = cross_attention_kwargs
446
+ self._denoising_end = denoising_end
447
+ self._interrupt = False
448
+
449
+ # 2. Define call parameters
450
+ if prompt is not None and isinstance(prompt, str):
451
+ batch_size = 1
452
+ elif prompt is not None and isinstance(prompt, list):
453
+ batch_size = len(prompt)
454
+ else:
455
+ batch_size = prompt_embeds.shape[0]
456
+
457
+ device = self._execution_device
458
+
459
+ if isinstance(controlnet, MultiControlNetModel) and isinstance(
460
+ controlnet_conditioning_scale, float
461
+ ):
462
+ controlnet_conditioning_scale = [controlnet_conditioning_scale] * len(
463
+ controlnet.nets
464
+ )
465
+
466
+ global_pool_conditions = (
467
+ controlnet.config.global_pool_conditions
468
+ if isinstance(controlnet, ControlNetModel)
469
+ else controlnet.nets[0].config.global_pool_conditions
470
+ )
471
+ guess_mode = guess_mode or global_pool_conditions
472
+
473
+ # 3.1 Encode input prompt
474
+ text_encoder_lora_scale = (
475
+ self.cross_attention_kwargs.get("scale", None)
476
+ if self.cross_attention_kwargs is not None
477
+ else None
478
+ )
479
+ (
480
+ prompt_embeds,
481
+ negative_prompt_embeds,
482
+ pooled_prompt_embeds,
483
+ negative_pooled_prompt_embeds,
484
+ ) = self.encode_prompt(
485
+ prompt,
486
+ prompt_2,
487
+ device,
488
+ num_images_per_prompt,
489
+ self.do_classifier_free_guidance,
490
+ negative_prompt,
491
+ negative_prompt_2,
492
+ prompt_embeds=prompt_embeds,
493
+ negative_prompt_embeds=negative_prompt_embeds,
494
+ pooled_prompt_embeds=pooled_prompt_embeds,
495
+ negative_pooled_prompt_embeds=negative_pooled_prompt_embeds,
496
+ lora_scale=text_encoder_lora_scale,
497
+ clip_skip=self.clip_skip,
498
+ )
499
+
500
+ # 3.2 Encode ip_adapter_image
501
+ style_hidden_states = self.get_style_hidden_states(style_image)
502
+ id_hidden_states = self.get_id_hidden_states(id_image)
503
+ set_multi_ip_hidden_states(
504
+ self.unet,
505
+ [
506
+ style_hidden_states,
507
+ id_hidden_states,
508
+ ],
509
+ )
510
+ set_ip_hidden_states(self.controlnet, style_hidden_states)
511
+ self.set_id_adapter_scale(id_adapter_scale)
512
+ self.set_style_adapter_scale(style_adapter_scale)
513
+ # 4. Prepare image
514
+ if isinstance(controlnet, ControlNetModel) and control_image is not None:
515
+ control_image = self.prepare_image(
516
+ image=control_image,
517
+ width=width,
518
+ height=height,
519
+ batch_size=batch_size * num_images_per_prompt,
520
+ num_images_per_prompt=num_images_per_prompt,
521
+ device=device,
522
+ dtype=controlnet.dtype,
523
+ do_classifier_free_guidance=self.do_classifier_free_guidance,
524
+ guess_mode=guess_mode,
525
+ )
526
+ height, width = control_image.shape[-2:]
527
+ elif isinstance(controlnet, MultiControlNetModel) and control_image is not None:
528
+ images = []
529
+
530
+ for image_ in control_image:
531
+ image_ = self.prepare_image(
532
+ image=image_,
533
+ width=width,
534
+ height=height,
535
+ batch_size=batch_size * num_images_per_prompt,
536
+ num_images_per_prompt=num_images_per_prompt,
537
+ device=device,
538
+ dtype=controlnet.dtype,
539
+ do_classifier_free_guidance=self.do_classifier_free_guidance,
540
+ guess_mode=guess_mode,
541
+ )
542
+
543
+ images.append(image_)
544
+
545
+ control_image = images
546
+ height, width = control_image[0].shape[-2:]
547
+
548
+ # 5. Prepare timesteps
549
+ timesteps, num_inference_steps = retrieve_timesteps(
550
+ self.scheduler, num_inference_steps, device, timesteps, sigmas
551
+ )
552
+ self._num_timesteps = len(timesteps)
553
+
554
+ # 6. Prepare latent variables
555
+ num_channels_latents = self.unet.config.in_channels
556
+ latents = self.prepare_latents(
557
+ batch_size * num_images_per_prompt,
558
+ num_channels_latents,
559
+ height,
560
+ width,
561
+ prompt_embeds.dtype,
562
+ device,
563
+ generator,
564
+ latents,
565
+ )
566
+
567
+ # 6.5 Optionally get Guidance Scale Embedding
568
+ timestep_cond = None
569
+ if self.unet.config.time_cond_proj_dim is not None:
570
+ guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(
571
+ batch_size * num_images_per_prompt
572
+ )
573
+ timestep_cond = self.get_guidance_scale_embedding(
574
+ guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim
575
+ ).to(device=device, dtype=latents.dtype)
576
+
577
+ # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
578
+ extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
579
+
580
+ # 7.1 Create tensor stating which controlnets to keep
581
+ controlnet_keep = []
582
+ for i in range(len(timesteps)):
583
+ keeps = [
584
+ 1.0 - float(i / len(timesteps) < s or (i + 1) / len(timesteps) > e)
585
+ for s, e in zip(control_guidance_start, control_guidance_end)
586
+ ]
587
+ controlnet_keep.append(
588
+ keeps[0] if isinstance(controlnet, ControlNetModel) else keeps
589
+ )
590
+
591
+ # 7.2 Prepare added time ids & embeddings
592
+ if control_image is None:
593
+ original_size = original_size
594
+ original_size = original_size or (height, width)
595
+ target_size = target_size or (height, width)
596
+ else:
597
+ if isinstance(control_image, list):
598
+ original_size = original_size or control_image[0].shape[-2:]
599
+ else:
600
+ original_size = original_size or control_image.shape[-2:]
601
+ target_size = target_size or (height, width)
602
+
603
+ add_text_embeds = pooled_prompt_embeds
604
+ if self.text_encoder_2 is None:
605
+ text_encoder_projection_dim = int(pooled_prompt_embeds.shape[-1])
606
+ else:
607
+ text_encoder_projection_dim = self.text_encoder_2.config.projection_dim
608
+
609
+ add_time_ids = self._get_add_time_ids(
610
+ original_size,
611
+ crops_coords_top_left,
612
+ target_size,
613
+ dtype=prompt_embeds.dtype,
614
+ text_encoder_projection_dim=text_encoder_projection_dim,
615
+ )
616
+
617
+ if negative_original_size is not None and negative_target_size is not None:
618
+ negative_add_time_ids = self._get_add_time_ids(
619
+ negative_original_size,
620
+ negative_crops_coords_top_left,
621
+ negative_target_size,
622
+ dtype=prompt_embeds.dtype,
623
+ text_encoder_projection_dim=text_encoder_projection_dim,
624
+ )
625
+ else:
626
+ negative_add_time_ids = add_time_ids
627
+
628
+ if self.do_classifier_free_guidance:
629
+ prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0)
630
+ add_text_embeds = torch.cat(
631
+ [negative_pooled_prompt_embeds, add_text_embeds], dim=0
632
+ )
633
+ add_time_ids = torch.cat([negative_add_time_ids, add_time_ids], dim=0)
634
+
635
+ prompt_embeds = prompt_embeds.to(device)
636
+ add_text_embeds = add_text_embeds.to(device)
637
+ add_time_ids = add_time_ids.to(device).repeat(
638
+ batch_size * num_images_per_prompt, 1
639
+ )
640
+
641
+ # 8. Denoising loop
642
+ num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
643
+
644
+ # 8.1 Apply denoising_end
645
+ if (
646
+ self.denoising_end is not None
647
+ and isinstance(self.denoising_end, float)
648
+ and self.denoising_end > 0
649
+ and self.denoising_end < 1
650
+ ):
651
+ discrete_timestep_cutoff = int(
652
+ round(
653
+ self.scheduler.config.num_train_timesteps
654
+ - (self.denoising_end * self.scheduler.config.num_train_timesteps)
655
+ )
656
+ )
657
+ num_inference_steps = len(
658
+ list(filter(lambda ts: ts >= discrete_timestep_cutoff, timesteps))
659
+ )
660
+ timesteps = timesteps[:num_inference_steps]
661
+
662
+ is_unet_compiled = is_compiled_module(self.unet)
663
+ is_controlnet_compiled = is_compiled_module(self.controlnet)
664
+ is_torch_higher_equal_2_1 = is_torch_version(">=", "2.1")
665
+ with self.progress_bar(total=num_inference_steps) as progress_bar:
666
+ for i, t in enumerate(timesteps):
667
+ if self.interrupt:
668
+ continue
669
+
670
+ # Relevant thread:
671
+ # https://dev-discuss.pytorch.org/t/cudagraphs-in-pytorch-2-0/1428
672
+ if (
673
+ is_unet_compiled and is_controlnet_compiled
674
+ ) and is_torch_higher_equal_2_1:
675
+ torch._inductor.cudagraph_mark_step_begin()
676
+ # expand the latents if we are doing classifier free guidance
677
+ latent_model_input = (
678
+ torch.cat([latents] * 2)
679
+ if self.do_classifier_free_guidance
680
+ else latents
681
+ )
682
+ latent_model_input = self.scheduler.scale_model_input(
683
+ latent_model_input, t
684
+ )
685
+
686
+ added_cond_kwargs = {
687
+ "text_embeds": add_text_embeds,
688
+ "time_ids": add_time_ids,
689
+ }
690
+
691
+ # controlnet(s) inference
692
+ if guess_mode and self.do_classifier_free_guidance:
693
+ # Infer ControlNet only for the conditional batch.
694
+ control_model_input = latents
695
+ control_model_input = self.scheduler.scale_model_input(
696
+ control_model_input, t
697
+ )
698
+ controlnet_prompt_embeds = prompt_embeds.chunk(2)[1]
699
+ controlnet_added_cond_kwargs = {
700
+ "text_embeds": add_text_embeds.chunk(2)[1],
701
+ "time_ids": add_time_ids.chunk(2)[1],
702
+ }
703
+ else:
704
+ control_model_input = latent_model_input
705
+ controlnet_prompt_embeds = prompt_embeds
706
+ controlnet_added_cond_kwargs = added_cond_kwargs
707
+
708
+ if isinstance(controlnet_keep[i], list):
709
+ cond_scale = [
710
+ c * s
711
+ for c, s in zip(
712
+ controlnet_conditioning_scale, controlnet_keep[i]
713
+ )
714
+ ]
715
+ else:
716
+ controlnet_cond_scale = controlnet_conditioning_scale
717
+ if isinstance(controlnet_cond_scale, list):
718
+ controlnet_cond_scale = controlnet_cond_scale[0]
719
+ cond_scale = controlnet_cond_scale * controlnet_keep[i]
720
+
721
+ if control_image is not None and controlnet_conditioning_scale != 0.0:
722
+ down_block_res_samples, mid_block_res_sample = self.controlnet(
723
+ control_model_input,
724
+ t,
725
+ encoder_hidden_states=controlnet_prompt_embeds,
726
+ controlnet_cond=control_image,
727
+ conditioning_scale=cond_scale,
728
+ guess_mode=guess_mode,
729
+ added_cond_kwargs=controlnet_added_cond_kwargs,
730
+ return_dict=False,
731
+ )
732
+ else:
733
+ down_block_res_samples = None
734
+ mid_block_res_sample = None
735
+
736
+ if (
737
+ guess_mode
738
+ and self.do_classifier_free_guidance
739
+ and control_image is not None
740
+ ):
741
+ # Inferred ControlNet only for the conditional batch.
742
+ # To apply the output of ControlNet to both the unconditional and conditional batches,
743
+ # add 0 to the unconditional batch to keep it unchanged.
744
+ down_block_res_samples = [
745
+ torch.cat([torch.zeros_like(d), d])
746
+ for d in down_block_res_samples
747
+ ]
748
+ mid_block_res_sample = torch.cat(
749
+ [torch.zeros_like(mid_block_res_sample), mid_block_res_sample]
750
+ )
751
+
752
+ # if (
753
+ # i / num_inference_steps >= style_guidance_start
754
+ # and i / num_inference_steps <= style_guidance_end
755
+ # ):
756
+ # self.set_style_adapter_scale(style_adapter_scale)
757
+ # else:
758
+ # self.set_style_adapter_scale(0.0)
759
+
760
+ # if (
761
+ # i / num_inference_steps >= id_guidance_start
762
+ # and i / num_inference_steps <= id_guidance_end
763
+ # ):
764
+ # self.set_id_adapter_scale(id_adapter_scale)
765
+ # else:
766
+ # self.set_id_adapter_scale(0.0)
767
+ # predict the noise residual
768
+ noise_pred = self.unet(
769
+ latent_model_input,
770
+ t,
771
+ encoder_hidden_states=prompt_embeds,
772
+ timestep_cond=timestep_cond,
773
+ cross_attention_kwargs=self.cross_attention_kwargs,
774
+ down_block_additional_residuals=down_block_res_samples,
775
+ mid_block_additional_residual=mid_block_res_sample,
776
+ added_cond_kwargs=added_cond_kwargs,
777
+ return_dict=False,
778
+ )[0]
779
+
780
+ # perform guidance
781
+ if self.do_classifier_free_guidance:
782
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
783
+ noise_pred = noise_pred_uncond + guidance_scale * (
784
+ noise_pred_text - noise_pred_uncond
785
+ )
786
+
787
+ # compute the previous noisy sample x_t -> x_t-1
788
+ latents = self.scheduler.step(
789
+ noise_pred, t, latents, **extra_step_kwargs, return_dict=False
790
+ )[0]
791
+
792
+ if callback_on_step_end is not None:
793
+ callback_kwargs = {}
794
+ for k in callback_on_step_end_tensor_inputs:
795
+ callback_kwargs[k] = locals()[k]
796
+ callback_outputs = callback_on_step_end(self, i, t, callback_kwargs)
797
+
798
+ latents = callback_outputs.pop("latents", latents)
799
+ prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds)
800
+ negative_prompt_embeds = callback_outputs.pop(
801
+ "negative_prompt_embeds", negative_prompt_embeds
802
+ )
803
+ add_text_embeds = callback_outputs.pop(
804
+ "add_text_embeds", add_text_embeds
805
+ )
806
+ negative_pooled_prompt_embeds = callback_outputs.pop(
807
+ "negative_pooled_prompt_embeds", negative_pooled_prompt_embeds
808
+ )
809
+ add_time_ids = callback_outputs.pop("add_time_ids", add_time_ids)
810
+ negative_add_time_ids = callback_outputs.pop(
811
+ "negative_add_time_ids", negative_add_time_ids
812
+ )
813
+
814
+ # call the callback, if provided
815
+ if i == len(timesteps) - 1 or (
816
+ (i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0
817
+ ):
818
+ progress_bar.update()
819
+ if callback is not None and i % callback_steps == 0:
820
+ step_idx = i // getattr(self.scheduler, "order", 1)
821
+ callback(step_idx, t, latents)
822
+
823
+ if not output_type == "latent":
824
+ # make sure the VAE is in float32 mode, as it overflows in float16
825
+ needs_upcasting = (
826
+ self.vae.dtype == torch.float16 and self.vae.config.force_upcast
827
+ )
828
+
829
+ if needs_upcasting:
830
+ self.upcast_vae()
831
+ latents = latents.to(
832
+ next(iter(self.vae.post_quant_conv.parameters())).dtype
833
+ )
834
+
835
+ # unscale/denormalize the latents
836
+ # denormalize with the mean and std if available and not None
837
+ has_latents_mean = (
838
+ hasattr(self.vae.config, "latents_mean")
839
+ and self.vae.config.latents_mean is not None
840
+ )
841
+ has_latents_std = (
842
+ hasattr(self.vae.config, "latents_std")
843
+ and self.vae.config.latents_std is not None
844
+ )
845
+ if has_latents_mean and has_latents_std:
846
+ latents_mean = (
847
+ torch.tensor(self.vae.config.latents_mean)
848
+ .view(1, 4, 1, 1)
849
+ .to(latents.device, latents.dtype)
850
+ )
851
+ latents_std = (
852
+ torch.tensor(self.vae.config.latents_std)
853
+ .view(1, 4, 1, 1)
854
+ .to(latents.device, latents.dtype)
855
+ )
856
+ latents = (
857
+ latents * latents_std / self.vae.config.scaling_factor
858
+ + latents_mean
859
+ )
860
+ else:
861
+ latents = latents / self.vae.config.scaling_factor
862
+
863
+ control_image = self.vae.decode(latents, return_dict=False)[0]
864
+
865
+ # cast back to fp16 if needed
866
+ if needs_upcasting:
867
+ self.vae.to(dtype=torch.float16)
868
+ else:
869
+ control_image = latents
870
+
871
+ if not output_type == "latent":
872
+ # apply watermark if available
873
+ if self.watermark is not None:
874
+ control_image = self.watermark.apply_watermark(control_image)
875
+
876
+ control_image = self.image_processor.postprocess(
877
+ control_image, output_type=output_type
878
+ )
879
+
880
+ # Offload all models
881
+ self.maybe_free_model_hooks()
882
+
883
+ if not return_dict:
884
+ return (control_image,)
885
+
886
+ return StableDiffusionXLPipelineOutput(images=control_image)
artistic_portrait/pulid_encoder.py ADDED
@@ -0,0 +1,207 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gc
2
+
3
+ import cv2
4
+ import insightface
5
+ import numpy as np
6
+ import torch
7
+ import torch.nn as nn
8
+ from pulid.utils import img2tensor, tensor2img
9
+ from diffusers import DPMSolverMultistepScheduler, StableDiffusionXLPipeline
10
+ from facexlib.parsing import init_parsing_model
11
+ from facexlib.utils.face_restoration_helper import FaceRestoreHelper
12
+
13
+ from huggingface_hub import hf_hub_download, snapshot_download
14
+ from insightface.app import FaceAnalysis
15
+ from safetensors.torch import load_file
16
+ from torchvision.transforms import InterpolationMode
17
+ from torchvision.transforms.functional import normalize, resize
18
+
19
+ from eva_clip import create_model_and_transforms
20
+ from eva_clip.constants import OPENAI_DATASET_MEAN, OPENAI_DATASET_STD
21
+ from pulid.encoders_transformer import IDFormer
22
+ from pulid.utils import is_torch2_available, sample_dpmpp_2m, sample_dpmpp_sde
23
+
24
+ if is_torch2_available():
25
+ from pulid.attention_processor import AttnProcessor2_0 as AttnProcessor
26
+ from pulid.attention_processor import IDAttnProcessor2_0 as IDAttnProcessor
27
+ else:
28
+ from pulid.attention_processor import AttnProcessor, IDAttnProcessor
29
+
30
+ class PuLIDEncoder:
31
+ def __init__(
32
+ self,
33
+ device
34
+ ):
35
+ super().__init__()
36
+ self.device = device
37
+
38
+ # scheduler
39
+ # self.pipe.scheduler = DPMSolverMultistepScheduler.from_config(
40
+ # self.pipe.scheduler.config
41
+ # )
42
+
43
+ # ID adapters
44
+ # self.id_adapter = IDFormer().to(self.device)
45
+
46
+ # preprocessors
47
+ # face align and parsing
48
+ self.face_helper = FaceRestoreHelper(
49
+ upscale_factor=1,
50
+ face_size=512,
51
+ crop_ratio=(1, 1),
52
+ det_model="retinaface_resnet50",
53
+ save_ext="png",
54
+ device=self.device,
55
+ )
56
+ self.face_helper.face_parse = None
57
+ self.face_helper.face_parse = init_parsing_model(
58
+ model_name="bisenet", device=self.device
59
+ )
60
+ # clip-vit backbone
61
+ model, _, _ = create_model_and_transforms(
62
+ "EVA02-CLIP-L-14-336", "eva_clip", force_custom_clip=True
63
+ )
64
+ model = model.visual
65
+ self.clip_vision_model = model.to(self.device)
66
+ eva_transform_mean = getattr(
67
+ self.clip_vision_model, "image_mean", OPENAI_DATASET_MEAN
68
+ )
69
+ eva_transform_std = getattr(
70
+ self.clip_vision_model, "image_std", OPENAI_DATASET_STD
71
+ )
72
+ if not isinstance(eva_transform_mean, (list, tuple)):
73
+ eva_transform_mean = (eva_transform_mean,) * 3
74
+ if not isinstance(eva_transform_std, (list, tuple)):
75
+ eva_transform_std = (eva_transform_std,) * 3
76
+ self.eva_transform_mean = eva_transform_mean
77
+ self.eva_transform_std = eva_transform_std
78
+ # antelopev2
79
+ snapshot_download("DIAMONIK7777/antelopev2", local_dir="models/antelopev2")
80
+ self.app = FaceAnalysis(
81
+ name="antelopev2",
82
+ root=".",
83
+ providers=["CPUExecutionProvider"],
84
+ )
85
+ self.app.prepare(ctx_id=0, det_size=(640, 640))
86
+ self.handler_ante = insightface.model_zoo.get_model(
87
+ "models/antelopev2/glintr100.onnx"
88
+ )
89
+ self.handler_ante.prepare(ctx_id=0)
90
+
91
+ gc.collect()
92
+ torch.cuda.empty_cache()
93
+
94
+ # self.load_pretrain()
95
+
96
+ # other configs
97
+ self.debug_img_list = []
98
+
99
+
100
+ def to_gray(self, img):
101
+ x = 0.299 * img[:, 0:1] + 0.587 * img[:, 1:2] + 0.114 * img[:, 2:3]
102
+ x = x.repeat(1, 3, 1, 1)
103
+ return x
104
+
105
+ def get_id_embedding(self, image_list):
106
+ """
107
+ Args:
108
+ image in image_list: numpy rgb image, range [0, 255]
109
+ """
110
+ id_cond_list = []
111
+ id_vit_hidden_list = []
112
+ for ii, image in enumerate(image_list):
113
+ self.face_helper.clean_all()
114
+ image_bgr = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
115
+ # get antelopev2 embedding
116
+ face_info = self.app.get(image_bgr)
117
+ if len(face_info) > 0:
118
+ face_info = sorted(
119
+ face_info,
120
+ key=lambda x: (x["bbox"][2] - x["bbox"][0])
121
+ * (x["bbox"][3] - x["bbox"][1]),
122
+ )[
123
+ -1
124
+ ] # only use the maximum face
125
+ id_ante_embedding = face_info["embedding"]
126
+ self.debug_img_list.append(
127
+ image[
128
+ int(face_info["bbox"][1]) : int(face_info["bbox"][3]),
129
+ int(face_info["bbox"][0]) : int(face_info["bbox"][2]),
130
+ ]
131
+ )
132
+ else:
133
+ id_ante_embedding = None
134
+
135
+ # using facexlib to detect and align face
136
+ self.face_helper.read_image(image_bgr)
137
+ self.face_helper.get_face_landmarks_5(only_center_face=True)
138
+ self.face_helper.align_warp_face()
139
+ if len(self.face_helper.cropped_faces) == 0:
140
+ raise RuntimeError("facexlib align face fail")
141
+ align_face = self.face_helper.cropped_faces[0]
142
+ # incase insightface didn't detect face
143
+ if id_ante_embedding is None:
144
+ print(
145
+ "fail to detect face using insightface, extract embedding on align face"
146
+ )
147
+ id_ante_embedding = self.handler_ante.get_feat(align_face)
148
+
149
+ id_ante_embedding = torch.from_numpy(id_ante_embedding).to(self.device)
150
+ if id_ante_embedding.ndim == 1:
151
+ id_ante_embedding = id_ante_embedding.unsqueeze(0)
152
+
153
+ # parsing
154
+ input = img2tensor(align_face, bgr2rgb=True).unsqueeze(0) / 255.0
155
+ input = input.to(self.device)
156
+ parsing_out = self.face_helper.face_parse(
157
+ normalize(input, [0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
158
+ )[0]
159
+ parsing_out = parsing_out.argmax(dim=1, keepdim=True)
160
+ bg_label = [0, 16, 18, 7, 8, 9, 14, 15]
161
+ bg = sum(parsing_out == i for i in bg_label).bool()
162
+ white_image = torch.ones_like(input)
163
+ # only keep the face features
164
+ face_features_image = torch.where(bg, white_image, self.to_gray(input))
165
+ self.debug_img_list.append(tensor2img(face_features_image, rgb2bgr=False))
166
+
167
+ # transform img before sending to eva-clip-vit
168
+ face_features_image = resize(
169
+ face_features_image,
170
+ self.clip_vision_model.image_size,
171
+ InterpolationMode.BICUBIC,
172
+ )
173
+ face_features_image = normalize(
174
+ face_features_image, self.eva_transform_mean, self.eva_transform_std
175
+ )
176
+ id_cond_vit, id_vit_hidden = self.clip_vision_model(
177
+ face_features_image,
178
+ return_all_features=False,
179
+ return_hidden=True,
180
+ shuffle=False,
181
+ )
182
+ id_cond_vit_norm = torch.norm(id_cond_vit, 2, 1, True)
183
+ id_cond_vit = torch.div(id_cond_vit, id_cond_vit_norm)
184
+
185
+ id_cond = torch.cat([id_ante_embedding, id_cond_vit], dim=-1)
186
+
187
+ id_cond_list.append(id_cond)
188
+ id_vit_hidden_list.append(id_vit_hidden)
189
+
190
+ id_uncond = torch.zeros_like(id_cond_list[0])
191
+ id_vit_hidden_uncond = []
192
+ for layer_idx in range(0, len(id_vit_hidden_list[0])):
193
+ id_vit_hidden_uncond.append(
194
+ torch.zeros_like(id_vit_hidden_list[0][layer_idx])
195
+ )
196
+
197
+ id_cond = torch.stack(id_cond_list, dim=1)
198
+ id_vit_hidden = id_vit_hidden_list[0]
199
+ for i in range(1, len(image_list)):
200
+ for j, x in enumerate(id_vit_hidden_list[i]):
201
+ id_vit_hidden[j] = torch.cat([id_vit_hidden[j], x], dim=1)
202
+
203
+ # id_embedding = self.id_adapter(id_cond, id_vit_hidden)
204
+ # uncond_id_embedding = self.id_adapter(id_uncond, id_vit_hidden_uncond)
205
+
206
+ # return id_embedding
207
+ return id_cond, id_vit_hidden, id_uncond, id_vit_hidden_uncond
artistic_portrait_gen.ipynb ADDED
@@ -0,0 +1,130 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "markdown",
5
+ "metadata": {},
6
+ "source": [
7
+ "## Artisitc Portrait Gen"
8
+ ]
9
+ },
10
+ {
11
+ "cell_type": "code",
12
+ "execution_count": null,
13
+ "metadata": {},
14
+ "outputs": [],
15
+ "source": [
16
+ "import torch\n",
17
+ "from artistic_portrait.pipeline import ArtisticPortraitXLPipeline\n",
18
+ "from diffusers import ControlNetModel\n",
19
+ "from PIL import Image\n",
20
+ "from ip_adapter_diffusers.ip_adapter import *\n",
21
+ "from diffusers import DPMSolverMultistepScheduler"
22
+ ]
23
+ },
24
+ {
25
+ "cell_type": "code",
26
+ "execution_count": null,
27
+ "metadata": {},
28
+ "outputs": [],
29
+ "source": [
30
+ "device = \"cuda\"\n",
31
+ "dtype = torch.float16\n",
32
+ "style_adapter_path = \"models/ip_adapter_art_sdxl_512.pth\"\n",
33
+ "id_adapter_path = \"models/pulid_adapter_diffusers_1.1.pth\""
34
+ ]
35
+ },
36
+ {
37
+ "cell_type": "code",
38
+ "execution_count": null,
39
+ "metadata": {},
40
+ "outputs": [],
41
+ "source": [
42
+ "controlnet = ControlNetModel.from_pretrained(\n",
43
+ " \"xinsir/controlnet-openpose-sdxl-1.0\",\n",
44
+ " torch_dtype=dtype,\n",
45
+ ").to(device)\n",
46
+ "pipe = ArtisticPortraitXLPipeline.from_pretrained(\n",
47
+ " \"stabilityai/stable-diffusion-xl-base-1.0\",\n",
48
+ " controlnet=controlnet,\n",
49
+ " safety_checker=None,\n",
50
+ " torch_dtype=torch.float16,\n",
51
+ " style_adapter_path=style_adapter_path,\n",
52
+ " id_adapter_path=id_adapter_path,\n",
53
+ " device=device,\n",
54
+ ").to(device)\n",
55
+ "pipe.scheduler = DPMSolverMultistepScheduler.from_config(\n",
56
+ " pipe.scheduler.config, timestep_spacing=\"trailing\"\n",
57
+ ")"
58
+ ]
59
+ },
60
+ {
61
+ "cell_type": "code",
62
+ "execution_count": null,
63
+ "metadata": {},
64
+ "outputs": [],
65
+ "source": [
66
+ "height = 1024\n",
67
+ "width = 1024\n",
68
+ "artify_controlnet_scale = 0.0\n",
69
+ "style_scale = 1.0\n",
70
+ "id_scale = 1.0\n",
71
+ "controlnet_scale = 0.9\n",
72
+ "\n",
73
+ "if artify_controlnet_scale > 0:\n",
74
+ " pipe.load_style_adapter_to_controlnet(style_adapter_path)\n",
75
+ " set_ip_adapter_scale(pipe.controlnet, artify_controlnet_scale)\n",
76
+ "\n",
77
+ "style_image = Image.open(\"datasets/test/style_dataset/Abstract D'Oyley.jpg\")\n",
78
+ "id_image = Image.open(\"datasets/test/id_dataset/hinton.jpg\")\n",
79
+ "pose_image = Image.open(\"datasets/test/pose.jpg\")"
80
+ ]
81
+ },
82
+ {
83
+ "cell_type": "code",
84
+ "execution_count": null,
85
+ "metadata": {},
86
+ "outputs": [],
87
+ "source": [
88
+ "result = pipe(\n",
89
+ " f\"portrait, solo, looking at viewer, best quality, masterpiece\",\n",
90
+ " negative_prompt=\"flaws in the eyes, flaws in the face, flaws, lowres, non-HDRi, low quality, worst quality,artifacts noise, text, watermark, glitch, deformed, mutated, ugly, disfigured, hands, low resolution, partially rendered objects, deformed or partially rendered eyes, deformed, deformed eyeballs, cross-eyed\",\n",
91
+ " control_image=pose_image,\n",
92
+ " controlnet_conditioning_scale=controlnet_scale,\n",
93
+ " width=width,\n",
94
+ " height=height,\n",
95
+ " num_inference_steps=20,\n",
96
+ " guidance_scale=7,\n",
97
+ " style_image=style_image,\n",
98
+ " id_image=id_image,\n",
99
+ " generator=torch.Generator(\"cuda\").manual_seed(42),\n",
100
+ " id_scale=1.0,\n",
101
+ " style_scale=1.0,\n",
102
+ " # num_zero=[None, 16],\n",
103
+ " # ortho=[None, 'ortho_v2'],\n",
104
+ ").images[0]\n",
105
+ "result"
106
+ ]
107
+ }
108
+ ],
109
+ "metadata": {
110
+ "kernelspec": {
111
+ "display_name": "Python 3",
112
+ "language": "python",
113
+ "name": "python3"
114
+ },
115
+ "language_info": {
116
+ "codemirror_mode": {
117
+ "name": "ipython",
118
+ "version": 3
119
+ },
120
+ "file_extension": ".py",
121
+ "mimetype": "text/x-python",
122
+ "name": "python",
123
+ "nbconvert_exporter": "python",
124
+ "pygments_lexer": "ipython3",
125
+ "version": "3.10.16"
126
+ }
127
+ },
128
+ "nbformat": 4,
129
+ "nbformat_minor": 2
130
+ }
ip_adapter_art/utils/csd_clip.py → csd_clip/__init__.py RENAMED
@@ -5,6 +5,7 @@ import copy
5
  from torch.autograd import Function
6
 
7
  from collections import OrderedDict
 
8
 
9
 
10
  def convert_state_dict(state_dict):
@@ -94,7 +95,7 @@ class CSD_CLIP(nn.Module):
94
  self.content_proj_head = content_proj_head
95
  if name == "vit_large":
96
  if model_path is None:
97
- clipmodel, _ = clip.load("models/ViT-L-14.pt")
98
  else:
99
  clipmodel, _ = clip.load(model_path)
100
  self.backbone = clipmodel.visual
@@ -143,3 +144,29 @@ class CSD_CLIP(nn.Module):
143
  content_output = reverse_feature @ self.last_layer_content
144
  content_output = nn.functional.normalize(content_output, dim=1, p=2)
145
  return feature, content_output, style_output
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
  from torch.autograd import Function
6
 
7
  from collections import OrderedDict
8
+ from torchvision import transforms
9
 
10
 
11
  def convert_state_dict(state_dict):
 
95
  self.content_proj_head = content_proj_head
96
  if name == "vit_large":
97
  if model_path is None:
98
+ clipmodel, _ = clip.load("ViT-L/14")
99
  else:
100
  clipmodel, _ = clip.load(model_path)
101
  self.backbone = clipmodel.visual
 
144
  content_output = reverse_feature @ self.last_layer_content
145
  content_output = nn.functional.normalize(content_output, dim=1, p=2)
146
  return feature, content_output, style_output
147
+
148
+
149
+ def create_model_and_transforms(model_path="models/csd_clip.pth"):
150
+ # init model
151
+ model = CSD_CLIP("vit_large", "default")
152
+
153
+ # load model
154
+ checkpoint = torch.load(model_path, map_location="cpu")
155
+ state_dict = convert_state_dict(checkpoint["model_state_dict"])
156
+ model.load_state_dict(state_dict, strict=False)
157
+
158
+ # normalization
159
+ normalize = transforms.Normalize(
160
+ (0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)
161
+ )
162
+ preprocess = transforms.Compose(
163
+ [
164
+ transforms.Resize(
165
+ size=224, interpolation=transforms.functional.InterpolationMode.BICUBIC
166
+ ),
167
+ transforms.CenterCrop(224),
168
+ transforms.ToTensor(),
169
+ normalize,
170
+ ]
171
+ )
172
+ return model, preprocess, preprocess
datasets/test/id_dataset/hinton.jpg ADDED

Git LFS Details

  • SHA256: 9f8e0c41abf2f3de47d284c90700c44a12b74d3787345c740a6919044165bbad
  • Pointer size: 130 Bytes
  • Size of remote file: 31.9 kB
datasets/test/id_dataset/lecun.jpg ADDED

Git LFS Details

  • SHA256: 2730103b6b9ebaf47b44ef9a9d7fbb722de7878a101af09f0b85f8dfadb4c8a4
  • Pointer size: 130 Bytes
  • Size of remote file: 30.6 kB
datasets/test/id_dataset/lifeifei.jpg ADDED

Git LFS Details

  • SHA256: c7b26e78b94ccd8c30a40efca3c52c8a04573188b41eb4b3d1fc517ec8577b35
  • Pointer size: 131 Bytes
  • Size of remote file: 203 kB
datasets/test/id_dataset/liuyifei.jpg ADDED

Git LFS Details

  • SHA256: c6a43925c86a360871aa69863436aa48b917d5a039b7b9a293426328ecb6c67c
  • Pointer size: 130 Bytes
  • Size of remote file: 37.7 kB
datasets/test/id_dataset/rihanna.jpg ADDED

Git LFS Details

  • SHA256: 8df5ffa49f22ca4e0ad3a69fd5648b7810e91d04aa54cfe77dd6f943416dd0ed
  • Pointer size: 131 Bytes
  • Size of remote file: 105 kB
datasets/test/pose.jpg ADDED

Git LFS Details

  • SHA256: ccc0df26d3c84b2640c0d5996a3e506e062aeeafb92b79e09ddf665103d7987d
  • Pointer size: 130 Bytes
  • Size of remote file: 24.2 kB
datasets/test/style_dataset/Abstract D'Oyley.jpg ADDED

Git LFS Details

  • SHA256: 275d059cb49d75a8184fc767b72e3d13d60be901106d52ee1511f908fd860a21
  • Pointer size: 130 Bytes
  • Size of remote file: 85.6 kB
datasets/test/style_dataset/Adam Zyglis.jpg ADDED

Git LFS Details

  • SHA256: 545d4dd0a093d606241e4da1b15fdbde6e6dcfe94eba69563d5c6143c61a9d77
  • Pointer size: 130 Bytes
  • Size of remote file: 67.5 kB
README.assets/example.jpg → datasets/test/style_dataset/Amigurumi.jpg RENAMED
File without changes
datasets/test/style_dataset/Diffused lighting.jpg ADDED

Git LFS Details

  • SHA256: 7cfd6d79f511e05d76cb5b65b062dc11b6716098e53581a7809e6b1180cc7a2e
  • Pointer size: 130 Bytes
  • Size of remote file: 15.1 kB
datasets/test/style_dataset/Shirley Hughes.jpg ADDED

Git LFS Details

  • SHA256: dc658ee320b1270636fd117c4abbeb9a42e521d27c0cc4aa8950191d538be4b6
  • Pointer size: 130 Bytes
  • Size of remote file: 75.2 kB
datasets/test/style_dataset/Winter.jpg ADDED

Git LFS Details

  • SHA256: ffb20f8d32e18534f37e9a93f36fb28fe4841cbd45c95f443835a8b05e9d8c91
  • Pointer size: 130 Bytes
  • Size of remote file: 47.4 kB
eva_clip/__init__.py ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .constants import OPENAI_DATASET_MEAN, OPENAI_DATASET_STD
2
+ from .factory import create_model, create_model_and_transforms, create_model_from_pretrained, get_tokenizer, create_transforms
3
+ from .factory import list_models, add_model_config, get_model_config, load_checkpoint
4
+ from .loss import ClipLoss
5
+ from .model import CLIP, CustomCLIP, CLIPTextCfg, CLIPVisionCfg,\
6
+ convert_weights_to_lp, convert_weights_to_fp16, trace_model, get_cast_dtype
7
+ from .openai import load_openai_model, list_openai_models
8
+ from .pretrained import list_pretrained, list_pretrained_models_by_tag, list_pretrained_tags_by_model,\
9
+ get_pretrained_url, download_pretrained_from_url, is_pretrained_cfg, get_pretrained_cfg, download_pretrained
10
+ from .tokenizer import SimpleTokenizer, tokenize
11
+ from .transform import image_transform
eva_clip/bpe_simple_vocab_16e6.txt.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:924691ac288e54409236115652ad4aa250f48203de50a9e4722a6ecd48d6804a
3
+ size 1356917
eva_clip/constants.py ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ OPENAI_DATASET_MEAN = (0.48145466, 0.4578275, 0.40821073)
2
+ OPENAI_DATASET_STD = (0.26862954, 0.26130258, 0.27577711)
eva_clip/eva_vit_model.py ADDED
@@ -0,0 +1,548 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # --------------------------------------------------------
2
+ # Adapted from https://github.com/microsoft/unilm/tree/master/beit
3
+ # --------------------------------------------------------
4
+ import math
5
+ import os
6
+ from functools import partial
7
+ import torch
8
+ import torch.nn as nn
9
+ import torch.nn.functional as F
10
+ try:
11
+ from timm.models.layers import drop_path, to_2tuple, trunc_normal_
12
+ except:
13
+ from timm.layers import drop_path, to_2tuple, trunc_normal_
14
+
15
+ from .transformer import PatchDropout
16
+ from .rope import VisionRotaryEmbedding, VisionRotaryEmbeddingFast
17
+
18
+ if os.getenv('ENV_TYPE') == 'deepspeed':
19
+ try:
20
+ from deepspeed.runtime.activation_checkpointing.checkpointing import checkpoint
21
+ except:
22
+ from torch.utils.checkpoint import checkpoint
23
+ else:
24
+ from torch.utils.checkpoint import checkpoint
25
+
26
+ try:
27
+ import xformers
28
+ import xformers.ops as xops
29
+ XFORMERS_IS_AVAILBLE = True
30
+ except:
31
+ XFORMERS_IS_AVAILBLE = False
32
+
33
+ class DropPath(nn.Module):
34
+ """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
35
+ """
36
+ def __init__(self, drop_prob=None):
37
+ super(DropPath, self).__init__()
38
+ self.drop_prob = drop_prob
39
+
40
+ def forward(self, x):
41
+ return drop_path(x, self.drop_prob, self.training)
42
+
43
+ def extra_repr(self) -> str:
44
+ return 'p={}'.format(self.drop_prob)
45
+
46
+
47
+ class Mlp(nn.Module):
48
+ def __init__(
49
+ self,
50
+ in_features,
51
+ hidden_features=None,
52
+ out_features=None,
53
+ act_layer=nn.GELU,
54
+ norm_layer=nn.LayerNorm,
55
+ drop=0.,
56
+ subln=False,
57
+
58
+ ):
59
+ super().__init__()
60
+ out_features = out_features or in_features
61
+ hidden_features = hidden_features or in_features
62
+ self.fc1 = nn.Linear(in_features, hidden_features)
63
+ self.act = act_layer()
64
+
65
+ self.ffn_ln = norm_layer(hidden_features) if subln else nn.Identity()
66
+
67
+ self.fc2 = nn.Linear(hidden_features, out_features)
68
+ self.drop = nn.Dropout(drop)
69
+
70
+ def forward(self, x):
71
+ x = self.fc1(x)
72
+ x = self.act(x)
73
+ # x = self.drop(x)
74
+ # commit this for the orignal BERT implement
75
+ x = self.ffn_ln(x)
76
+
77
+ x = self.fc2(x)
78
+ x = self.drop(x)
79
+ return x
80
+
81
+ class SwiGLU(nn.Module):
82
+ def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.SiLU, drop=0.,
83
+ norm_layer=nn.LayerNorm, subln=False):
84
+ super().__init__()
85
+ out_features = out_features or in_features
86
+ hidden_features = hidden_features or in_features
87
+
88
+ self.w1 = nn.Linear(in_features, hidden_features)
89
+ self.w2 = nn.Linear(in_features, hidden_features)
90
+
91
+ self.act = act_layer()
92
+ self.ffn_ln = norm_layer(hidden_features) if subln else nn.Identity()
93
+ self.w3 = nn.Linear(hidden_features, out_features)
94
+
95
+ self.drop = nn.Dropout(drop)
96
+
97
+ def forward(self, x):
98
+ x1 = self.w1(x)
99
+ x2 = self.w2(x)
100
+ hidden = self.act(x1) * x2
101
+ x = self.ffn_ln(hidden)
102
+ x = self.w3(x)
103
+ x = self.drop(x)
104
+ return x
105
+
106
+ class Attention(nn.Module):
107
+ def __init__(
108
+ self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0.,
109
+ proj_drop=0., window_size=None, attn_head_dim=None, xattn=False, rope=None, subln=False, norm_layer=nn.LayerNorm):
110
+ super().__init__()
111
+ self.num_heads = num_heads
112
+ head_dim = dim // num_heads
113
+ if attn_head_dim is not None:
114
+ head_dim = attn_head_dim
115
+ all_head_dim = head_dim * self.num_heads
116
+ self.scale = qk_scale or head_dim ** -0.5
117
+
118
+ self.subln = subln
119
+ if self.subln:
120
+ self.q_proj = nn.Linear(dim, all_head_dim, bias=False)
121
+ self.k_proj = nn.Linear(dim, all_head_dim, bias=False)
122
+ self.v_proj = nn.Linear(dim, all_head_dim, bias=False)
123
+ else:
124
+ self.qkv = nn.Linear(dim, all_head_dim * 3, bias=False)
125
+
126
+ if qkv_bias:
127
+ self.q_bias = nn.Parameter(torch.zeros(all_head_dim))
128
+ self.v_bias = nn.Parameter(torch.zeros(all_head_dim))
129
+ else:
130
+ self.q_bias = None
131
+ self.v_bias = None
132
+
133
+ if window_size:
134
+ self.window_size = window_size
135
+ self.num_relative_distance = (2 * window_size[0] - 1) * (2 * window_size[1] - 1) + 3
136
+ self.relative_position_bias_table = nn.Parameter(
137
+ torch.zeros(self.num_relative_distance, num_heads)) # 2*Wh-1 * 2*Ww-1, nH
138
+ # cls to token & token 2 cls & cls to cls
139
+
140
+ # get pair-wise relative position index for each token inside the window
141
+ coords_h = torch.arange(window_size[0])
142
+ coords_w = torch.arange(window_size[1])
143
+ coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww
144
+ coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww
145
+ relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww
146
+ relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2
147
+ relative_coords[:, :, 0] += window_size[0] - 1 # shift to start from 0
148
+ relative_coords[:, :, 1] += window_size[1] - 1
149
+ relative_coords[:, :, 0] *= 2 * window_size[1] - 1
150
+ relative_position_index = \
151
+ torch.zeros(size=(window_size[0] * window_size[1] + 1, ) * 2, dtype=relative_coords.dtype)
152
+ relative_position_index[1:, 1:] = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
153
+ relative_position_index[0, 0:] = self.num_relative_distance - 3
154
+ relative_position_index[0:, 0] = self.num_relative_distance - 2
155
+ relative_position_index[0, 0] = self.num_relative_distance - 1
156
+
157
+ self.register_buffer("relative_position_index", relative_position_index)
158
+ else:
159
+ self.window_size = None
160
+ self.relative_position_bias_table = None
161
+ self.relative_position_index = None
162
+
163
+ self.attn_drop = nn.Dropout(attn_drop)
164
+ self.inner_attn_ln = norm_layer(all_head_dim) if subln else nn.Identity()
165
+ # self.proj = nn.Linear(all_head_dim, all_head_dim)
166
+ self.proj = nn.Linear(all_head_dim, dim)
167
+ self.proj_drop = nn.Dropout(proj_drop)
168
+ self.xattn = xattn
169
+ self.xattn_drop = attn_drop
170
+
171
+ self.rope = rope
172
+
173
+ def forward(self, x, rel_pos_bias=None, attn_mask=None):
174
+ B, N, C = x.shape
175
+ if self.subln:
176
+ q = F.linear(input=x, weight=self.q_proj.weight, bias=self.q_bias)
177
+ k = F.linear(input=x, weight=self.k_proj.weight, bias=None)
178
+ v = F.linear(input=x, weight=self.v_proj.weight, bias=self.v_bias)
179
+
180
+ q = q.reshape(B, N, self.num_heads, -1).permute(0, 2, 1, 3) # B, num_heads, N, C
181
+ k = k.reshape(B, N, self.num_heads, -1).permute(0, 2, 1, 3)
182
+ v = v.reshape(B, N, self.num_heads, -1).permute(0, 2, 1, 3)
183
+ else:
184
+
185
+ qkv_bias = None
186
+ if self.q_bias is not None:
187
+ qkv_bias = torch.cat((self.q_bias, torch.zeros_like(self.v_bias, requires_grad=False), self.v_bias))
188
+
189
+ qkv = F.linear(input=x, weight=self.qkv.weight, bias=qkv_bias)
190
+ qkv = qkv.reshape(B, N, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4) # 3, B, num_heads, N, C
191
+ q, k, v = qkv[0], qkv[1], qkv[2]
192
+
193
+ if self.rope:
194
+ # slightly fast impl
195
+ q_t = q[:, :, 1:, :]
196
+ ro_q_t = self.rope(q_t)
197
+ q = torch.cat((q[:, :, :1, :], ro_q_t), -2).type_as(v)
198
+
199
+ k_t = k[:, :, 1:, :]
200
+ ro_k_t = self.rope(k_t)
201
+ k = torch.cat((k[:, :, :1, :], ro_k_t), -2).type_as(v)
202
+
203
+ if self.xattn:
204
+ q = q.permute(0, 2, 1, 3) # B, num_heads, N, C -> B, N, num_heads, C
205
+ k = k.permute(0, 2, 1, 3)
206
+ v = v.permute(0, 2, 1, 3)
207
+
208
+ x = xops.memory_efficient_attention(
209
+ q, k, v,
210
+ p=self.xattn_drop,
211
+ scale=self.scale,
212
+ )
213
+ x = x.reshape(B, N, -1)
214
+ x = self.inner_attn_ln(x)
215
+ x = self.proj(x)
216
+ x = self.proj_drop(x)
217
+ else:
218
+ q = q * self.scale
219
+ attn = (q @ k.transpose(-2, -1))
220
+
221
+ if self.relative_position_bias_table is not None:
222
+ relative_position_bias = \
223
+ self.relative_position_bias_table[self.relative_position_index.view(-1)].view(
224
+ self.window_size[0] * self.window_size[1] + 1,
225
+ self.window_size[0] * self.window_size[1] + 1, -1) # Wh*Ww,Wh*Ww,nH
226
+ relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww
227
+ attn = attn + relative_position_bias.unsqueeze(0).type_as(attn)
228
+
229
+ if rel_pos_bias is not None:
230
+ attn = attn + rel_pos_bias.type_as(attn)
231
+
232
+ if attn_mask is not None:
233
+ attn_mask = attn_mask.bool()
234
+ attn = attn.masked_fill(~attn_mask[:, None, None, :], float("-inf"))
235
+
236
+ attn = attn.softmax(dim=-1)
237
+ attn = self.attn_drop(attn)
238
+
239
+ x = (attn @ v).transpose(1, 2).reshape(B, N, -1)
240
+ x = self.inner_attn_ln(x)
241
+ x = self.proj(x)
242
+ x = self.proj_drop(x)
243
+ return x
244
+
245
+
246
+ class Block(nn.Module):
247
+
248
+ def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,
249
+ drop_path=0., init_values=None, act_layer=nn.GELU, norm_layer=nn.LayerNorm,
250
+ window_size=None, attn_head_dim=None, xattn=False, rope=None, postnorm=False,
251
+ subln=False, naiveswiglu=False):
252
+ super().__init__()
253
+ self.norm1 = norm_layer(dim)
254
+ self.attn = Attention(
255
+ dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale,
256
+ attn_drop=attn_drop, proj_drop=drop, window_size=window_size, attn_head_dim=attn_head_dim,
257
+ xattn=xattn, rope=rope, subln=subln, norm_layer=norm_layer)
258
+ # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here
259
+ self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
260
+ self.norm2 = norm_layer(dim)
261
+ mlp_hidden_dim = int(dim * mlp_ratio)
262
+
263
+ if naiveswiglu:
264
+ self.mlp = SwiGLU(
265
+ in_features=dim,
266
+ hidden_features=mlp_hidden_dim,
267
+ subln=subln,
268
+ norm_layer=norm_layer,
269
+ )
270
+ else:
271
+ self.mlp = Mlp(
272
+ in_features=dim,
273
+ hidden_features=mlp_hidden_dim,
274
+ act_layer=act_layer,
275
+ subln=subln,
276
+ drop=drop
277
+ )
278
+
279
+ if init_values is not None and init_values > 0:
280
+ self.gamma_1 = nn.Parameter(init_values * torch.ones((dim)),requires_grad=True)
281
+ self.gamma_2 = nn.Parameter(init_values * torch.ones((dim)),requires_grad=True)
282
+ else:
283
+ self.gamma_1, self.gamma_2 = None, None
284
+
285
+ self.postnorm = postnorm
286
+
287
+ def forward(self, x, rel_pos_bias=None, attn_mask=None):
288
+ if self.gamma_1 is None:
289
+ if self.postnorm:
290
+ x = x + self.drop_path(self.norm1(self.attn(x, rel_pos_bias=rel_pos_bias, attn_mask=attn_mask)))
291
+ x = x + self.drop_path(self.norm2(self.mlp(x)))
292
+ else:
293
+ x = x + self.drop_path(self.attn(self.norm1(x), rel_pos_bias=rel_pos_bias, attn_mask=attn_mask))
294
+ x = x + self.drop_path(self.mlp(self.norm2(x)))
295
+ else:
296
+ if self.postnorm:
297
+ x = x + self.drop_path(self.gamma_1 * self.norm1(self.attn(x, rel_pos_bias=rel_pos_bias, attn_mask=attn_mask)))
298
+ x = x + self.drop_path(self.gamma_2 * self.norm2(self.mlp(x)))
299
+ else:
300
+ x = x + self.drop_path(self.gamma_1 * self.attn(self.norm1(x), rel_pos_bias=rel_pos_bias, attn_mask=attn_mask))
301
+ x = x + self.drop_path(self.gamma_2 * self.mlp(self.norm2(x)))
302
+ return x
303
+
304
+
305
+ class PatchEmbed(nn.Module):
306
+ """ Image to Patch Embedding
307
+ """
308
+ def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768):
309
+ super().__init__()
310
+ img_size = to_2tuple(img_size)
311
+ patch_size = to_2tuple(patch_size)
312
+ num_patches = (img_size[1] // patch_size[1]) * (img_size[0] // patch_size[0])
313
+ self.patch_shape = (img_size[0] // patch_size[0], img_size[1] // patch_size[1])
314
+ self.img_size = img_size
315
+ self.patch_size = patch_size
316
+ self.num_patches = num_patches
317
+
318
+ self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
319
+
320
+ def forward(self, x, **kwargs):
321
+ B, C, H, W = x.shape
322
+ # FIXME look at relaxing size constraints
323
+ assert H == self.img_size[0] and W == self.img_size[1], \
324
+ f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})."
325
+ x = self.proj(x).flatten(2).transpose(1, 2)
326
+ return x
327
+
328
+
329
+ class RelativePositionBias(nn.Module):
330
+
331
+ def __init__(self, window_size, num_heads):
332
+ super().__init__()
333
+ self.window_size = window_size
334
+ self.num_relative_distance = (2 * window_size[0] - 1) * (2 * window_size[1] - 1) + 3
335
+ self.relative_position_bias_table = nn.Parameter(
336
+ torch.zeros(self.num_relative_distance, num_heads)) # 2*Wh-1 * 2*Ww-1, nH
337
+ # cls to token & token 2 cls & cls to cls
338
+
339
+ # get pair-wise relative position index for each token inside the window
340
+ coords_h = torch.arange(window_size[0])
341
+ coords_w = torch.arange(window_size[1])
342
+ coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww
343
+ coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww
344
+ relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww
345
+ relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2
346
+ relative_coords[:, :, 0] += window_size[0] - 1 # shift to start from 0
347
+ relative_coords[:, :, 1] += window_size[1] - 1
348
+ relative_coords[:, :, 0] *= 2 * window_size[1] - 1
349
+ relative_position_index = \
350
+ torch.zeros(size=(window_size[0] * window_size[1] + 1,) * 2, dtype=relative_coords.dtype)
351
+ relative_position_index[1:, 1:] = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
352
+ relative_position_index[0, 0:] = self.num_relative_distance - 3
353
+ relative_position_index[0:, 0] = self.num_relative_distance - 2
354
+ relative_position_index[0, 0] = self.num_relative_distance - 1
355
+
356
+ self.register_buffer("relative_position_index", relative_position_index)
357
+
358
+ def forward(self):
359
+ relative_position_bias = \
360
+ self.relative_position_bias_table[self.relative_position_index.view(-1)].view(
361
+ self.window_size[0] * self.window_size[1] + 1,
362
+ self.window_size[0] * self.window_size[1] + 1, -1) # Wh*Ww,Wh*Ww,nH
363
+ return relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww
364
+
365
+
366
+ class EVAVisionTransformer(nn.Module):
367
+ """ Vision Transformer with support for patch or hybrid CNN input stage
368
+ """
369
+ def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dim=768, depth=12,
370
+ num_heads=12, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop_rate=0., attn_drop_rate=0.,
371
+ drop_path_rate=0., norm_layer=nn.LayerNorm, init_values=None, patch_dropout=0.,
372
+ use_abs_pos_emb=True, use_rel_pos_bias=False, use_shared_rel_pos_bias=False, rope=False,
373
+ use_mean_pooling=True, init_scale=0.001, grad_checkpointing=False, xattn=False, postnorm=False,
374
+ pt_hw_seq_len=16, intp_freq=False, naiveswiglu=False, subln=False):
375
+ super().__init__()
376
+
377
+ if not XFORMERS_IS_AVAILBLE:
378
+ xattn = False
379
+
380
+ self.image_size = img_size
381
+ self.num_classes = num_classes
382
+ self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models
383
+
384
+ self.patch_embed = PatchEmbed(
385
+ img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim)
386
+ num_patches = self.patch_embed.num_patches
387
+
388
+ self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
389
+ # self.mask_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
390
+ if use_abs_pos_emb:
391
+ self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim))
392
+ else:
393
+ self.pos_embed = None
394
+ self.pos_drop = nn.Dropout(p=drop_rate)
395
+
396
+ if use_shared_rel_pos_bias:
397
+ self.rel_pos_bias = RelativePositionBias(window_size=self.patch_embed.patch_shape, num_heads=num_heads)
398
+ else:
399
+ self.rel_pos_bias = None
400
+
401
+ if rope:
402
+ half_head_dim = embed_dim // num_heads // 2
403
+ hw_seq_len = img_size // patch_size
404
+ self.rope = VisionRotaryEmbeddingFast(
405
+ dim=half_head_dim,
406
+ pt_seq_len=pt_hw_seq_len,
407
+ ft_seq_len=hw_seq_len if intp_freq else None,
408
+ # patch_dropout=patch_dropout
409
+ )
410
+ else:
411
+ self.rope = None
412
+
413
+ self.naiveswiglu = naiveswiglu
414
+
415
+ dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule
416
+ self.use_rel_pos_bias = use_rel_pos_bias
417
+ self.blocks = nn.ModuleList([
418
+ Block(
419
+ dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,
420
+ drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer,
421
+ init_values=init_values, window_size=self.patch_embed.patch_shape if use_rel_pos_bias else None,
422
+ xattn=xattn, rope=self.rope, postnorm=postnorm, subln=subln, naiveswiglu=naiveswiglu)
423
+ for i in range(depth)])
424
+ self.norm = nn.Identity() if use_mean_pooling else norm_layer(embed_dim)
425
+ self.fc_norm = norm_layer(embed_dim) if use_mean_pooling else None
426
+ self.head = nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity()
427
+
428
+ if self.pos_embed is not None:
429
+ trunc_normal_(self.pos_embed, std=.02)
430
+
431
+ trunc_normal_(self.cls_token, std=.02)
432
+ # trunc_normal_(self.mask_token, std=.02)
433
+
434
+ self.apply(self._init_weights)
435
+ self.fix_init_weight()
436
+
437
+ if isinstance(self.head, nn.Linear):
438
+ trunc_normal_(self.head.weight, std=.02)
439
+ self.head.weight.data.mul_(init_scale)
440
+ self.head.bias.data.mul_(init_scale)
441
+
442
+ # setting a patch_dropout of 0. would mean it is disabled and this function would be the identity fn
443
+ self.patch_dropout = PatchDropout(patch_dropout) if patch_dropout > 0. else nn.Identity()
444
+
445
+ self.grad_checkpointing = grad_checkpointing
446
+
447
+ def fix_init_weight(self):
448
+ def rescale(param, layer_id):
449
+ param.div_(math.sqrt(2.0 * layer_id))
450
+
451
+ for layer_id, layer in enumerate(self.blocks):
452
+ rescale(layer.attn.proj.weight.data, layer_id + 1)
453
+ if self.naiveswiglu:
454
+ rescale(layer.mlp.w3.weight.data, layer_id + 1)
455
+ else:
456
+ rescale(layer.mlp.fc2.weight.data, layer_id + 1)
457
+
458
+ def get_cast_dtype(self) -> torch.dtype:
459
+ return self.blocks[0].mlp.fc2.weight.dtype
460
+
461
+ def _init_weights(self, m):
462
+ if isinstance(m, nn.Linear):
463
+ trunc_normal_(m.weight, std=.02)
464
+ if m.bias is not None:
465
+ nn.init.constant_(m.bias, 0)
466
+ elif isinstance(m, nn.LayerNorm):
467
+ nn.init.constant_(m.bias, 0)
468
+ nn.init.constant_(m.weight, 1.0)
469
+
470
+ def get_num_layers(self):
471
+ return len(self.blocks)
472
+
473
+ def lock(self, unlocked_groups=0, freeze_bn_stats=False):
474
+ assert unlocked_groups == 0, 'partial locking not currently supported for this model'
475
+ for param in self.parameters():
476
+ param.requires_grad = False
477
+
478
+ @torch.jit.ignore
479
+ def set_grad_checkpointing(self, enable=True):
480
+ self.grad_checkpointing = enable
481
+
482
+ @torch.jit.ignore
483
+ def no_weight_decay(self):
484
+ return {'pos_embed', 'cls_token'}
485
+
486
+ def get_classifier(self):
487
+ return self.head
488
+
489
+ def reset_classifier(self, num_classes, global_pool=''):
490
+ self.num_classes = num_classes
491
+ self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity()
492
+
493
+ def forward_features(self, x, return_all_features=False, return_hidden=False, shuffle=False):
494
+
495
+ x = self.patch_embed(x)
496
+ batch_size, seq_len, _ = x.size()
497
+
498
+ if shuffle:
499
+ idx = torch.randperm(x.shape[1]) + 1
500
+ zero = torch.LongTensor([0, ])
501
+ idx = torch.cat([zero, idx])
502
+ pos_embed = self.pos_embed[:, idx]
503
+
504
+ cls_tokens = self.cls_token.expand(batch_size, -1, -1) # stole cls_tokens impl from Phil Wang, thanks
505
+ x = torch.cat((cls_tokens, x), dim=1)
506
+ if shuffle:
507
+ x = x + pos_embed
508
+ elif self.pos_embed is not None:
509
+ x = x + self.pos_embed
510
+ x = self.pos_drop(x)
511
+
512
+ # a patch_dropout of 0. would mean it is disabled and this function would do nothing but return what was passed in
513
+ if os.getenv('RoPE') == '1':
514
+ if self.training and not isinstance(self.patch_dropout, nn.Identity):
515
+ x, patch_indices_keep = self.patch_dropout(x)
516
+ self.rope.forward = partial(self.rope.forward, patch_indices_keep=patch_indices_keep)
517
+ else:
518
+ self.rope.forward = partial(self.rope.forward, patch_indices_keep=None)
519
+ x = self.patch_dropout(x)
520
+ else:
521
+ x = self.patch_dropout(x)
522
+
523
+ rel_pos_bias = self.rel_pos_bias() if self.rel_pos_bias is not None else None
524
+ hidden_states = []
525
+ for idx, blk in enumerate(self.blocks):
526
+ if (0 < idx <= 20) and (idx % 4 == 0) and return_hidden:
527
+ hidden_states.append(x)
528
+ if self.grad_checkpointing:
529
+ x = checkpoint(blk, x, (rel_pos_bias,))
530
+ else:
531
+ x = blk(x, rel_pos_bias=rel_pos_bias)
532
+
533
+ if not return_all_features:
534
+ x = self.norm(x)
535
+ if self.fc_norm is not None:
536
+ return self.fc_norm(x.mean(1)), hidden_states
537
+ else:
538
+ return x[:, 0], hidden_states
539
+ return x
540
+
541
+ def forward(self, x, return_all_features=False, return_hidden=False, shuffle=False):
542
+ if return_all_features:
543
+ return self.forward_features(x, return_all_features, return_hidden, shuffle)
544
+ x, hidden_states = self.forward_features(x, return_all_features, return_hidden, shuffle)
545
+ x = self.head(x)
546
+ if return_hidden:
547
+ return x, hidden_states
548
+ return x
eva_clip/factory.py ADDED
@@ -0,0 +1,517 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import logging
3
+ import os
4
+ import pathlib
5
+ import re
6
+ from copy import deepcopy
7
+ from pathlib import Path
8
+ from typing import Optional, Tuple, Union, Dict, Any
9
+ import torch
10
+
11
+ from .constants import OPENAI_DATASET_MEAN, OPENAI_DATASET_STD
12
+ from .model import CLIP, CustomCLIP, convert_weights_to_lp, convert_to_custom_text_state_dict,\
13
+ get_cast_dtype
14
+ from .openai import load_openai_model
15
+ from .pretrained import is_pretrained_cfg, get_pretrained_cfg, download_pretrained, list_pretrained_tags_by_model
16
+ from .transform import image_transform
17
+ from .tokenizer import HFTokenizer, tokenize
18
+ from .utils import resize_clip_pos_embed, resize_evaclip_pos_embed, resize_visual_pos_embed, resize_eva_pos_embed
19
+
20
+
21
+ _MODEL_CONFIG_PATHS = [Path(__file__).parent / f"model_configs/"]
22
+ _MODEL_CONFIGS = {} # directory (model_name: config) of model architecture configs
23
+
24
+
25
+ def _natural_key(string_):
26
+ return [int(s) if s.isdigit() else s for s in re.split(r'(\d+)', string_.lower())]
27
+
28
+
29
+ def _rescan_model_configs():
30
+ global _MODEL_CONFIGS
31
+
32
+ config_ext = ('.json',)
33
+ config_files = []
34
+ for config_path in _MODEL_CONFIG_PATHS:
35
+ if config_path.is_file() and config_path.suffix in config_ext:
36
+ config_files.append(config_path)
37
+ elif config_path.is_dir():
38
+ for ext in config_ext:
39
+ config_files.extend(config_path.glob(f'*{ext}'))
40
+
41
+ for cf in config_files:
42
+ with open(cf, "r", encoding="utf8") as f:
43
+ model_cfg = json.load(f)
44
+ if all(a in model_cfg for a in ('embed_dim', 'vision_cfg', 'text_cfg')):
45
+ _MODEL_CONFIGS[cf.stem] = model_cfg
46
+
47
+ _MODEL_CONFIGS = dict(sorted(_MODEL_CONFIGS.items(), key=lambda x: _natural_key(x[0])))
48
+
49
+
50
+ _rescan_model_configs() # initial populate of model config registry
51
+
52
+
53
+ def list_models():
54
+ """ enumerate available model architectures based on config files """
55
+ return list(_MODEL_CONFIGS.keys())
56
+
57
+
58
+ def add_model_config(path):
59
+ """ add model config path or file and update registry """
60
+ if not isinstance(path, Path):
61
+ path = Path(path)
62
+ _MODEL_CONFIG_PATHS.append(path)
63
+ _rescan_model_configs()
64
+
65
+
66
+ def get_model_config(model_name):
67
+ if model_name in _MODEL_CONFIGS:
68
+ return deepcopy(_MODEL_CONFIGS[model_name])
69
+ else:
70
+ return None
71
+
72
+
73
+ def get_tokenizer(model_name):
74
+ config = get_model_config(model_name)
75
+ tokenizer = HFTokenizer(config['text_cfg']['hf_tokenizer_name']) if 'hf_tokenizer_name' in config['text_cfg'] else tokenize
76
+ return tokenizer
77
+
78
+
79
+ # loading openai CLIP weights when is_openai=True for training
80
+ def load_state_dict(checkpoint_path: str, map_location: str='cpu', model_key: str='model|module|state_dict', is_openai: bool=False, skip_list: list=[]):
81
+ if is_openai:
82
+ model = torch.jit.load(checkpoint_path, map_location="cpu").eval()
83
+ state_dict = model.state_dict()
84
+ for key in ["input_resolution", "context_length", "vocab_size"]:
85
+ state_dict.pop(key, None)
86
+ else:
87
+ checkpoint = torch.load(checkpoint_path, map_location=map_location)
88
+ for mk in model_key.split('|'):
89
+ if isinstance(checkpoint, dict) and mk in checkpoint:
90
+ state_dict = checkpoint[mk]
91
+ break
92
+ else:
93
+ state_dict = checkpoint
94
+ if next(iter(state_dict.items()))[0].startswith('module'):
95
+ state_dict = {k[7:]: v for k, v in state_dict.items()}
96
+
97
+ for k in skip_list:
98
+ if k in list(state_dict.keys()):
99
+ logging.info(f"Removing key {k} from pretrained checkpoint")
100
+ del state_dict[k]
101
+
102
+ if os.getenv('RoPE') == '1':
103
+ for k in list(state_dict.keys()):
104
+ if 'freqs_cos' in k or 'freqs_sin' in k:
105
+ del state_dict[k]
106
+ return state_dict
107
+
108
+
109
+
110
+ def load_checkpoint(model, checkpoint_path, model_key="model|module|state_dict", strict=True):
111
+ state_dict = load_state_dict(checkpoint_path, model_key=model_key, is_openai=False)
112
+ # detect old format and make compatible with new format
113
+ if 'positional_embedding' in state_dict and not hasattr(model, 'positional_embedding'):
114
+ state_dict = convert_to_custom_text_state_dict(state_dict)
115
+ if 'text.logit_scale' in state_dict and hasattr(model, 'logit_scale'):
116
+ state_dict['logit_scale'] = state_dict['text.logit_scale']
117
+ del state_dict['text.logit_scale']
118
+
119
+ # resize_clip_pos_embed for CLIP and open CLIP
120
+ if 'visual.positional_embedding' in state_dict:
121
+ resize_clip_pos_embed(state_dict, model)
122
+ # specified to eva_vit_model
123
+ elif 'visual.pos_embed' in state_dict:
124
+ resize_evaclip_pos_embed(state_dict, model)
125
+
126
+ # resize_clip_pos_embed(state_dict, model)
127
+ incompatible_keys = model.load_state_dict(state_dict, strict=strict)
128
+ logging.info(f"incompatible_keys.missing_keys: {incompatible_keys.missing_keys}")
129
+ return incompatible_keys
130
+
131
+ def load_clip_visual_state_dict(checkpoint_path: str, map_location: str='cpu', is_openai: bool=False, skip_list:list=[]):
132
+ state_dict = load_state_dict(checkpoint_path, map_location=map_location, is_openai=is_openai, skip_list=skip_list)
133
+
134
+ for k in list(state_dict.keys()):
135
+ if not k.startswith('visual.'):
136
+ del state_dict[k]
137
+ for k in list(state_dict.keys()):
138
+ if k.startswith('visual.'):
139
+ new_k = k[7:]
140
+ state_dict[new_k] = state_dict[k]
141
+ del state_dict[k]
142
+ return state_dict
143
+
144
+ def load_clip_text_state_dict(checkpoint_path: str, map_location: str='cpu', is_openai: bool=False, skip_list:list=[]):
145
+ state_dict = load_state_dict(checkpoint_path, map_location=map_location, is_openai=is_openai, skip_list=skip_list)
146
+
147
+ for k in list(state_dict.keys()):
148
+ if k.startswith('visual.'):
149
+ del state_dict[k]
150
+ return state_dict
151
+
152
+ def get_pretrained_tag(pretrained_model):
153
+ pretrained_model = pretrained_model.lower()
154
+ if "laion" in pretrained_model or "open_clip" in pretrained_model:
155
+ return "open_clip"
156
+ elif "openai" in pretrained_model:
157
+ return "clip"
158
+ elif "eva" in pretrained_model and "clip" in pretrained_model:
159
+ return "eva_clip"
160
+ else:
161
+ return "other"
162
+
163
+ def load_pretrained_checkpoint(
164
+ model,
165
+ visual_checkpoint_path,
166
+ text_checkpoint_path,
167
+ strict=True,
168
+ visual_model=None,
169
+ text_model=None,
170
+ model_key="model|module|state_dict",
171
+ skip_list=[]):
172
+ visual_tag = get_pretrained_tag(visual_model)
173
+ text_tag = get_pretrained_tag(text_model)
174
+
175
+ logging.info(f"num of model state_dict keys: {len(model.state_dict().keys())}")
176
+ visual_incompatible_keys, text_incompatible_keys = None, None
177
+ if visual_checkpoint_path:
178
+ if visual_tag == "eva_clip" or visual_tag == "open_clip":
179
+ visual_state_dict = load_clip_visual_state_dict(visual_checkpoint_path, is_openai=False, skip_list=skip_list)
180
+ elif visual_tag == "clip":
181
+ visual_state_dict = load_clip_visual_state_dict(visual_checkpoint_path, is_openai=True, skip_list=skip_list)
182
+ else:
183
+ visual_state_dict = load_state_dict(visual_checkpoint_path, model_key=model_key, is_openai=False, skip_list=skip_list)
184
+
185
+ # resize_clip_pos_embed for CLIP and open CLIP
186
+ if 'positional_embedding' in visual_state_dict:
187
+ resize_visual_pos_embed(visual_state_dict, model)
188
+ # specified to EVA model
189
+ elif 'pos_embed' in visual_state_dict:
190
+ resize_eva_pos_embed(visual_state_dict, model)
191
+
192
+ visual_incompatible_keys = model.visual.load_state_dict(visual_state_dict, strict=strict)
193
+ logging.info(f"num of loaded visual_state_dict keys: {len(visual_state_dict.keys())}")
194
+ logging.info(f"visual_incompatible_keys.missing_keys: {visual_incompatible_keys.missing_keys}")
195
+
196
+ if text_checkpoint_path:
197
+ if text_tag == "eva_clip" or text_tag == "open_clip":
198
+ text_state_dict = load_clip_text_state_dict(text_checkpoint_path, is_openai=False, skip_list=skip_list)
199
+ elif text_tag == "clip":
200
+ text_state_dict = load_clip_text_state_dict(text_checkpoint_path, is_openai=True, skip_list=skip_list)
201
+ else:
202
+ text_state_dict = load_state_dict(visual_checkpoint_path, model_key=model_key, is_openai=False, skip_list=skip_list)
203
+
204
+ text_incompatible_keys = model.text.load_state_dict(text_state_dict, strict=strict)
205
+
206
+ logging.info(f"num of loaded text_state_dict keys: {len(text_state_dict.keys())}")
207
+ logging.info(f"text_incompatible_keys.missing_keys: {text_incompatible_keys.missing_keys}")
208
+
209
+ return visual_incompatible_keys, text_incompatible_keys
210
+
211
+ def create_model(
212
+ model_name: str,
213
+ pretrained: Optional[str] = None,
214
+ precision: str = 'fp32',
215
+ device: Union[str, torch.device] = 'cpu',
216
+ jit: bool = False,
217
+ force_quick_gelu: bool = False,
218
+ force_custom_clip: bool = False,
219
+ force_patch_dropout: Optional[float] = None,
220
+ pretrained_image: str = '',
221
+ pretrained_text: str = '',
222
+ pretrained_hf: bool = True,
223
+ pretrained_visual_model: str = None,
224
+ pretrained_text_model: str = None,
225
+ cache_dir: Optional[str] = None,
226
+ skip_list: list = [],
227
+ ):
228
+ model_name = model_name.replace('/', '-') # for callers using old naming with / in ViT names
229
+ if isinstance(device, str):
230
+ device = torch.device(device)
231
+
232
+ if pretrained and pretrained.lower() == 'openai':
233
+ logging.info(f'Loading pretrained {model_name} from OpenAI.')
234
+ model = load_openai_model(
235
+ model_name,
236
+ precision=precision,
237
+ device=device,
238
+ jit=jit,
239
+ cache_dir=cache_dir,
240
+ )
241
+ else:
242
+ model_cfg = get_model_config(model_name)
243
+ if model_cfg is not None:
244
+ logging.info(f'Loaded {model_name} model config.')
245
+ else:
246
+ logging.error(f'Model config for {model_name} not found; available models {list_models()}.')
247
+ raise RuntimeError(f'Model config for {model_name} not found.')
248
+
249
+ if 'rope' in model_cfg.get('vision_cfg', {}):
250
+ if model_cfg['vision_cfg']['rope']:
251
+ os.environ['RoPE'] = "1"
252
+ else:
253
+ os.environ['RoPE'] = "0"
254
+
255
+ if force_quick_gelu:
256
+ # override for use of QuickGELU on non-OpenAI transformer models
257
+ model_cfg["quick_gelu"] = True
258
+
259
+ if force_patch_dropout is not None:
260
+ # override the default patch dropout value
261
+ model_cfg['vision_cfg']["patch_dropout"] = force_patch_dropout
262
+
263
+ cast_dtype = get_cast_dtype(precision)
264
+ custom_clip = model_cfg.pop('custom_text', False) or force_custom_clip or ('hf_model_name' in model_cfg['text_cfg'])
265
+
266
+
267
+ if custom_clip:
268
+ if 'hf_model_name' in model_cfg.get('text_cfg', {}):
269
+ model_cfg['text_cfg']['hf_model_pretrained'] = pretrained_hf
270
+ model = CustomCLIP(**model_cfg, cast_dtype=cast_dtype)
271
+ else:
272
+ model = CLIP(**model_cfg, cast_dtype=cast_dtype)
273
+
274
+ pretrained_cfg = {}
275
+ if pretrained:
276
+ checkpoint_path = ''
277
+ pretrained_cfg = get_pretrained_cfg(model_name, pretrained)
278
+ if pretrained_cfg:
279
+ checkpoint_path = download_pretrained(pretrained_cfg, cache_dir=cache_dir)
280
+ elif os.path.exists(pretrained):
281
+ checkpoint_path = pretrained
282
+
283
+ if checkpoint_path:
284
+ logging.info(f'Loading pretrained {model_name} weights ({pretrained}).')
285
+ load_checkpoint(model,
286
+ checkpoint_path,
287
+ model_key="model|module|state_dict",
288
+ strict=False
289
+ )
290
+ else:
291
+ error_str = (
292
+ f'Pretrained weights ({pretrained}) not found for model {model_name}.'
293
+ f'Available pretrained tags ({list_pretrained_tags_by_model(model_name)}.')
294
+ logging.warning(error_str)
295
+ raise RuntimeError(error_str)
296
+ else:
297
+ visual_checkpoint_path = ''
298
+ text_checkpoint_path = ''
299
+
300
+ if pretrained_image:
301
+ pretrained_visual_model = pretrained_visual_model.replace('/', '-') # for callers using old naming with / in ViT names
302
+ pretrained_image_cfg = get_pretrained_cfg(pretrained_visual_model, pretrained_image)
303
+ if 'timm_model_name' in model_cfg.get('vision_cfg', {}):
304
+ # pretrained weight loading for timm models set via vision_cfg
305
+ model_cfg['vision_cfg']['timm_model_pretrained'] = True
306
+ elif pretrained_image_cfg:
307
+ visual_checkpoint_path = download_pretrained(pretrained_image_cfg, cache_dir=cache_dir)
308
+ elif os.path.exists(pretrained_image):
309
+ visual_checkpoint_path = pretrained_image
310
+ else:
311
+ logging.warning(f'Pretrained weights ({visual_checkpoint_path}) not found for model {model_name}.visual.')
312
+ raise RuntimeError(f'Pretrained weights ({visual_checkpoint_path}) not found for model {model_name}.visual.')
313
+
314
+ if pretrained_text:
315
+ pretrained_text_model = pretrained_text_model.replace('/', '-') # for callers using old naming with / in ViT names
316
+ pretrained_text_cfg = get_pretrained_cfg(pretrained_text_model, pretrained_text)
317
+ if pretrained_image_cfg:
318
+ text_checkpoint_path = download_pretrained(pretrained_text_cfg, cache_dir=cache_dir)
319
+ elif os.path.exists(pretrained_text):
320
+ text_checkpoint_path = pretrained_text
321
+ else:
322
+ logging.warning(f'Pretrained weights ({text_checkpoint_path}) not found for model {model_name}.text.')
323
+ raise RuntimeError(f'Pretrained weights ({text_checkpoint_path}) not found for model {model_name}.text.')
324
+
325
+ if visual_checkpoint_path:
326
+ logging.info(f'Loading pretrained {model_name}.visual weights ({visual_checkpoint_path}).')
327
+ if text_checkpoint_path:
328
+ logging.info(f'Loading pretrained {model_name}.text weights ({text_checkpoint_path}).')
329
+
330
+ if visual_checkpoint_path or text_checkpoint_path:
331
+ load_pretrained_checkpoint(
332
+ model,
333
+ visual_checkpoint_path,
334
+ text_checkpoint_path,
335
+ strict=False,
336
+ visual_model=pretrained_visual_model,
337
+ text_model=pretrained_text_model,
338
+ model_key="model|module|state_dict",
339
+ skip_list=skip_list
340
+ )
341
+
342
+ if "fp16" in precision or "bf16" in precision:
343
+ logging.info(f'convert precision to {precision}')
344
+ model = model.to(torch.bfloat16) if 'bf16' in precision else model.to(torch.float16)
345
+
346
+ model.to(device=device)
347
+
348
+ # set image / mean metadata from pretrained_cfg if available, or use default
349
+ model.visual.image_mean = pretrained_cfg.get('mean', None) or OPENAI_DATASET_MEAN
350
+ model.visual.image_std = pretrained_cfg.get('std', None) or OPENAI_DATASET_STD
351
+
352
+ if jit:
353
+ model = torch.jit.script(model)
354
+
355
+ return model
356
+
357
+
358
+ def create_model_and_transforms(
359
+ model_name: str,
360
+ pretrained: Optional[str] = None,
361
+ precision: str = 'fp32',
362
+ device: Union[str, torch.device] = 'cpu',
363
+ jit: bool = False,
364
+ force_quick_gelu: bool = False,
365
+ force_custom_clip: bool = False,
366
+ force_patch_dropout: Optional[float] = None,
367
+ pretrained_image: str = '',
368
+ pretrained_text: str = '',
369
+ pretrained_hf: bool = True,
370
+ pretrained_visual_model: str = None,
371
+ pretrained_text_model: str = None,
372
+ image_mean: Optional[Tuple[float, ...]] = None,
373
+ image_std: Optional[Tuple[float, ...]] = None,
374
+ cache_dir: Optional[str] = None,
375
+ skip_list: list = [],
376
+ ):
377
+ model = create_model(
378
+ model_name,
379
+ pretrained,
380
+ precision=precision,
381
+ device=device,
382
+ jit=jit,
383
+ force_quick_gelu=force_quick_gelu,
384
+ force_custom_clip=force_custom_clip,
385
+ force_patch_dropout=force_patch_dropout,
386
+ pretrained_image=pretrained_image,
387
+ pretrained_text=pretrained_text,
388
+ pretrained_hf=pretrained_hf,
389
+ pretrained_visual_model=pretrained_visual_model,
390
+ pretrained_text_model=pretrained_text_model,
391
+ cache_dir=cache_dir,
392
+ skip_list=skip_list,
393
+ )
394
+
395
+ image_mean = image_mean or getattr(model.visual, 'image_mean', None)
396
+ image_std = image_std or getattr(model.visual, 'image_std', None)
397
+ preprocess_train = image_transform(
398
+ model.visual.image_size,
399
+ is_train=True,
400
+ mean=image_mean,
401
+ std=image_std
402
+ )
403
+ preprocess_val = image_transform(
404
+ model.visual.image_size,
405
+ is_train=False,
406
+ mean=image_mean,
407
+ std=image_std
408
+ )
409
+
410
+ return model, preprocess_train, preprocess_val
411
+
412
+
413
+ def create_transforms(
414
+ model_name: str,
415
+ pretrained: Optional[str] = None,
416
+ precision: str = 'fp32',
417
+ device: Union[str, torch.device] = 'cpu',
418
+ jit: bool = False,
419
+ force_quick_gelu: bool = False,
420
+ force_custom_clip: bool = False,
421
+ force_patch_dropout: Optional[float] = None,
422
+ pretrained_image: str = '',
423
+ pretrained_text: str = '',
424
+ pretrained_hf: bool = True,
425
+ pretrained_visual_model: str = None,
426
+ pretrained_text_model: str = None,
427
+ image_mean: Optional[Tuple[float, ...]] = None,
428
+ image_std: Optional[Tuple[float, ...]] = None,
429
+ cache_dir: Optional[str] = None,
430
+ skip_list: list = [],
431
+ ):
432
+ model = create_model(
433
+ model_name,
434
+ pretrained,
435
+ precision=precision,
436
+ device=device,
437
+ jit=jit,
438
+ force_quick_gelu=force_quick_gelu,
439
+ force_custom_clip=force_custom_clip,
440
+ force_patch_dropout=force_patch_dropout,
441
+ pretrained_image=pretrained_image,
442
+ pretrained_text=pretrained_text,
443
+ pretrained_hf=pretrained_hf,
444
+ pretrained_visual_model=pretrained_visual_model,
445
+ pretrained_text_model=pretrained_text_model,
446
+ cache_dir=cache_dir,
447
+ skip_list=skip_list,
448
+ )
449
+
450
+
451
+ image_mean = image_mean or getattr(model.visual, 'image_mean', None)
452
+ image_std = image_std or getattr(model.visual, 'image_std', None)
453
+ preprocess_train = image_transform(
454
+ model.visual.image_size,
455
+ is_train=True,
456
+ mean=image_mean,
457
+ std=image_std
458
+ )
459
+ preprocess_val = image_transform(
460
+ model.visual.image_size,
461
+ is_train=False,
462
+ mean=image_mean,
463
+ std=image_std
464
+ )
465
+ del model
466
+
467
+ return preprocess_train, preprocess_val
468
+
469
+ def create_model_from_pretrained(
470
+ model_name: str,
471
+ pretrained: str,
472
+ precision: str = 'fp32',
473
+ device: Union[str, torch.device] = 'cpu',
474
+ jit: bool = False,
475
+ force_quick_gelu: bool = False,
476
+ force_custom_clip: bool = False,
477
+ force_patch_dropout: Optional[float] = None,
478
+ return_transform: bool = True,
479
+ image_mean: Optional[Tuple[float, ...]] = None,
480
+ image_std: Optional[Tuple[float, ...]] = None,
481
+ cache_dir: Optional[str] = None,
482
+ is_frozen: bool = False,
483
+ ):
484
+ if not is_pretrained_cfg(model_name, pretrained) and not os.path.exists(pretrained):
485
+ raise RuntimeError(
486
+ f'{pretrained} is not a valid pretrained cfg or checkpoint for {model_name}.'
487
+ f' Use open_clip.list_pretrained() to find one.')
488
+
489
+ model = create_model(
490
+ model_name,
491
+ pretrained,
492
+ precision=precision,
493
+ device=device,
494
+ jit=jit,
495
+ force_quick_gelu=force_quick_gelu,
496
+ force_custom_clip=force_custom_clip,
497
+ force_patch_dropout=force_patch_dropout,
498
+ cache_dir=cache_dir,
499
+ )
500
+
501
+ if is_frozen:
502
+ for param in model.parameters():
503
+ param.requires_grad = False
504
+
505
+ if not return_transform:
506
+ return model
507
+
508
+ image_mean = image_mean or getattr(model.visual, 'image_mean', None)
509
+ image_std = image_std or getattr(model.visual, 'image_std', None)
510
+ preprocess = image_transform(
511
+ model.visual.image_size,
512
+ is_train=False,
513
+ mean=image_mean,
514
+ std=image_std
515
+ )
516
+
517
+ return model, preprocess
eva_clip/hf_configs.py ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # HF architecture dict:
2
+ arch_dict = {
3
+ # https://huggingface.co/docs/transformers/model_doc/roberta#roberta
4
+ "roberta": {
5
+ "config_names": {
6
+ "context_length": "max_position_embeddings",
7
+ "vocab_size": "vocab_size",
8
+ "width": "hidden_size",
9
+ "heads": "num_attention_heads",
10
+ "layers": "num_hidden_layers",
11
+ "layer_attr": "layer",
12
+ "token_embeddings_attr": "embeddings"
13
+ },
14
+ "pooler": "mean_pooler",
15
+ },
16
+ # https://huggingface.co/docs/transformers/model_doc/xlm-roberta#transformers.XLMRobertaConfig
17
+ "xlm-roberta": {
18
+ "config_names": {
19
+ "context_length": "max_position_embeddings",
20
+ "vocab_size": "vocab_size",
21
+ "width": "hidden_size",
22
+ "heads": "num_attention_heads",
23
+ "layers": "num_hidden_layers",
24
+ "layer_attr": "layer",
25
+ "token_embeddings_attr": "embeddings"
26
+ },
27
+ "pooler": "mean_pooler",
28
+ },
29
+ # https://huggingface.co/docs/transformers/model_doc/mt5#mt5
30
+ "mt5": {
31
+ "config_names": {
32
+ # unlimited seqlen
33
+ # https://github.com/google-research/text-to-text-transfer-transformer/issues/273
34
+ # https://github.com/huggingface/transformers/blob/v4.24.0/src/transformers/models/t5/modeling_t5.py#L374
35
+ "context_length": "",
36
+ "vocab_size": "vocab_size",
37
+ "width": "d_model",
38
+ "heads": "num_heads",
39
+ "layers": "num_layers",
40
+ "layer_attr": "block",
41
+ "token_embeddings_attr": "embed_tokens"
42
+ },
43
+ "pooler": "mean_pooler",
44
+ },
45
+ "bert": {
46
+ "config_names": {
47
+ "context_length": "max_position_embeddings",
48
+ "vocab_size": "vocab_size",
49
+ "width": "hidden_size",
50
+ "heads": "num_attention_heads",
51
+ "layers": "num_hidden_layers",
52
+ "layer_attr": "layer",
53
+ "token_embeddings_attr": "embeddings"
54
+ },
55
+ "pooler": "mean_pooler",
56
+ }
57
+ }
eva_clip/hf_model.py ADDED
@@ -0,0 +1,248 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ huggingface model adapter
2
+
3
+ Wraps HuggingFace transformers (https://github.com/huggingface/transformers) models for use as a text tower in CLIP model.
4
+ """
5
+
6
+ import re
7
+
8
+ import torch
9
+ import torch.nn as nn
10
+ from torch.nn import functional as F
11
+ from torch import TensorType
12
+ try:
13
+ import transformers
14
+ from transformers import AutoModel, AutoModelForMaskedLM, AutoTokenizer, AutoConfig, PretrainedConfig
15
+ from transformers.modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling, \
16
+ BaseModelOutputWithPoolingAndCrossAttentions
17
+ except ImportError as e:
18
+ transformers = None
19
+
20
+
21
+ class BaseModelOutput:
22
+ pass
23
+
24
+
25
+ class PretrainedConfig:
26
+ pass
27
+
28
+ from .hf_configs import arch_dict
29
+
30
+ # utils
31
+ def _camel2snake(s):
32
+ return re.sub(r'(?<!^)(?=[A-Z])', '_', s).lower()
33
+
34
+ # TODO: ?last - for gpt-like models
35
+ _POOLERS = {}
36
+
37
+ def register_pooler(cls):
38
+ """Decorator registering pooler class"""
39
+ _POOLERS[_camel2snake(cls.__name__)] = cls
40
+ return cls
41
+
42
+
43
+ @register_pooler
44
+ class MeanPooler(nn.Module):
45
+ """Mean pooling"""
46
+ def forward(self, x:BaseModelOutput, attention_mask:TensorType):
47
+ masked_output = x.last_hidden_state * attention_mask.unsqueeze(-1)
48
+ return masked_output.sum(dim=1) / attention_mask.sum(-1, keepdim=True)
49
+
50
+ @register_pooler
51
+ class MaxPooler(nn.Module):
52
+ """Max pooling"""
53
+ def forward(self, x:BaseModelOutput, attention_mask:TensorType):
54
+ masked_output = x.last_hidden_state.masked_fill(attention_mask.unsqueeze(-1), -torch.inf)
55
+ return masked_output.max(1).values
56
+
57
+ @register_pooler
58
+ class ClsPooler(nn.Module):
59
+ """CLS token pooling"""
60
+ def __init__(self, use_pooler_output=True):
61
+ super().__init__()
62
+ self.cls_token_position = 0
63
+ self.use_pooler_output = use_pooler_output
64
+
65
+ def forward(self, x:BaseModelOutput, attention_mask:TensorType):
66
+
67
+ if (self.use_pooler_output and
68
+ isinstance(x, (BaseModelOutputWithPooling, BaseModelOutputWithPoolingAndCrossAttentions)) and
69
+ (x.pooler_output is not None)
70
+ ):
71
+ return x.pooler_output
72
+
73
+ return x.last_hidden_state[:, self.cls_token_position, :]
74
+
75
+ class HFTextEncoder(nn.Module):
76
+ """HuggingFace model adapter"""
77
+ def __init__(
78
+ self,
79
+ model_name_or_path: str,
80
+ output_dim: int,
81
+ tokenizer_name: str = None,
82
+ config: PretrainedConfig = None,
83
+ pooler_type: str = None,
84
+ proj: str = None,
85
+ pretrained: bool = True,
86
+ masked_language_modeling: bool = False):
87
+ super().__init__()
88
+
89
+ self.output_dim = output_dim
90
+
91
+ # TODO: find better way to get this information
92
+ uses_transformer_pooler = (pooler_type == "cls_pooler")
93
+
94
+ if transformers is None:
95
+ raise RuntimeError("Please `pip install transformers` to use pre-trained HuggingFace models")
96
+ if config is None:
97
+ self.config = AutoConfig.from_pretrained(model_name_or_path)
98
+ if masked_language_modeling:
99
+ create_func, model_args = (AutoModelForMaskedLM.from_pretrained, model_name_or_path) if pretrained else (
100
+ AutoModelForMaskedLM.from_config, self.config)
101
+ else:
102
+ create_func, model_args = (AutoModel.from_pretrained, model_name_or_path) if pretrained else (
103
+ AutoModel.from_config, self.config)
104
+ # TODO: do all model configs have this attribute? PretrainedConfig does so yes??
105
+ if hasattr(self.config, "is_encoder_decoder") and self.config.is_encoder_decoder:
106
+ self.transformer = create_func(model_args)
107
+ self.transformer = self.transformer.encoder
108
+ else:
109
+ self.transformer = create_func(model_args, add_pooling_layer=uses_transformer_pooler)
110
+ else:
111
+ self.config = config
112
+ if masked_language_modeling:
113
+ self.transformer = AutoModelForMaskedLM.from_config(config)
114
+ else:
115
+ self.transformer = AutoModel.from_config(config)
116
+
117
+ if pooler_type is None: # get default arch pooler
118
+ self.pooler = _POOLERS[(arch_dict[self.config.model_type]["pooler"])]()
119
+ else:
120
+ self.pooler = _POOLERS[pooler_type]()
121
+
122
+ d_model = getattr(self.config, arch_dict[self.config.model_type]["config_names"]["width"])
123
+ if (d_model == output_dim) and (proj is None): # do we always need a proj?
124
+ self.proj = nn.Identity()
125
+ elif proj == 'linear':
126
+ self.proj = nn.Linear(d_model, output_dim, bias=False)
127
+ elif proj == 'mlp':
128
+ hidden_size = (d_model + output_dim) // 2
129
+ self.proj = nn.Sequential(
130
+ nn.Linear(d_model, hidden_size, bias=False),
131
+ nn.GELU(),
132
+ nn.Linear(hidden_size, output_dim, bias=False),
133
+ )
134
+
135
+ # self.itm_proj = nn.Linear(d_model, 2, bias=False)
136
+ # self.mlm_proj = nn.Linear(d_model, self.config.vocab_size), bias=False)
137
+ self.tokenizer = AutoTokenizer.from_pretrained(tokenizer_name)
138
+
139
+ # def forward_itm(self, x:TensorType, image_embeds:TensorType) -> TensorType:
140
+ # image_atts = torch.ones(image_embeds.size()[:-1],dtype=torch.long).to(x.device)
141
+ # attn_mask = (x != self.config.pad_token_id).long()
142
+ # out = self.transformer(
143
+ # input_ids=x,
144
+ # attention_mask=attn_mask,
145
+ # encoder_hidden_states = image_embeds,
146
+ # encoder_attention_mask = image_atts,
147
+ # )
148
+ # pooled_out = self.pooler(out, attn_mask)
149
+
150
+ # return self.itm_proj(pooled_out)
151
+
152
+ def mask(self, input_ids, vocab_size, device, targets=None, masked_indices=None, probability_matrix=None):
153
+ if masked_indices is None:
154
+ masked_indices = torch.bernoulli(probability_matrix).bool()
155
+
156
+ masked_indices[input_ids == self.tokenizer.pad_token_id] = False
157
+ masked_indices[input_ids == self.tokenizer.cls_token_id] = False
158
+
159
+ if targets is not None:
160
+ targets[~masked_indices] = -100 # We only compute loss on masked tokens
161
+
162
+ # 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK])
163
+ indices_replaced = torch.bernoulli(torch.full(input_ids.shape, 0.8)).bool() & masked_indices
164
+ input_ids[indices_replaced] = self.tokenizer.mask_token_id
165
+
166
+ # 10% of the time, we replace masked input tokens with random word
167
+ indices_random = torch.bernoulli(torch.full(input_ids.shape, 0.5)).bool() & masked_indices & ~indices_replaced
168
+ random_words = torch.randint(vocab_size, input_ids.shape, dtype=torch.long).to(device)
169
+ input_ids[indices_random] = random_words[indices_random]
170
+ # The rest of the time (10% of the time) we keep the masked input tokens unchanged
171
+
172
+ if targets is not None:
173
+ return input_ids, targets
174
+ else:
175
+ return input_ids
176
+
177
+ def forward_mlm(self, input_ids, image_embeds, mlm_probability=0.25):
178
+ labels = input_ids.clone()
179
+ attn_mask = (input_ids != self.config.pad_token_id).long()
180
+ image_atts = torch.ones(image_embeds.size()[:-1],dtype=torch.long).to(input_ids.device)
181
+ vocab_size = getattr(self.config, arch_dict[self.config.model_type]["config_names"]["vocab_size"])
182
+ probability_matrix = torch.full(labels.shape, mlm_probability)
183
+ input_ids, labels = self.mask(input_ids, vocab_size, input_ids.device, targets=labels,
184
+ probability_matrix = probability_matrix)
185
+ mlm_output = self.transformer(input_ids,
186
+ attention_mask = attn_mask,
187
+ encoder_hidden_states = image_embeds,
188
+ encoder_attention_mask = image_atts,
189
+ return_dict = True,
190
+ labels = labels,
191
+ )
192
+ return mlm_output.loss
193
+ # mlm_output = self.transformer(input_ids,
194
+ # attention_mask = attn_mask,
195
+ # encoder_hidden_states = image_embeds,
196
+ # encoder_attention_mask = image_atts,
197
+ # return_dict = True,
198
+ # ).last_hidden_state
199
+ # logits = self.mlm_proj(mlm_output)
200
+
201
+ # # logits = logits[:, :-1, :].contiguous().view(-1, vocab_size)
202
+ # logits = logits[:, 1:, :].contiguous().view(-1, vocab_size)
203
+ # labels = labels[:, 1:].contiguous().view(-1)
204
+
205
+ # mlm_loss = F.cross_entropy(
206
+ # logits,
207
+ # labels,
208
+ # # label_smoothing=0.1,
209
+ # )
210
+ # return mlm_loss
211
+
212
+
213
+ def forward(self, x:TensorType) -> TensorType:
214
+ attn_mask = (x != self.config.pad_token_id).long()
215
+ out = self.transformer(input_ids=x, attention_mask=attn_mask)
216
+ pooled_out = self.pooler(out, attn_mask)
217
+
218
+ return self.proj(pooled_out)
219
+
220
+ def lock(self, unlocked_layers:int=0, freeze_layer_norm:bool=True):
221
+ if not unlocked_layers: # full freezing
222
+ for n, p in self.transformer.named_parameters():
223
+ p.requires_grad = (not freeze_layer_norm) if "LayerNorm" in n.split(".") else False
224
+ return
225
+
226
+ encoder = self.transformer.encoder if hasattr(self.transformer, 'encoder') else self.transformer
227
+ layer_list = getattr(encoder, arch_dict[self.config.model_type]["config_names"]["layer_attr"])
228
+ print(f"Unlocking {unlocked_layers}/{len(layer_list) + 1} layers of hf model")
229
+ embeddings = getattr(
230
+ self.transformer, arch_dict[self.config.model_type]["config_names"]["token_embeddings_attr"])
231
+ modules = [embeddings, *layer_list][:-unlocked_layers]
232
+ # freeze layers
233
+ for module in modules:
234
+ for n, p in module.named_parameters():
235
+ p.requires_grad = (not freeze_layer_norm) if "LayerNorm" in n.split(".") else False
236
+
237
+
238
+ @torch.jit.ignore
239
+ def set_grad_checkpointing(self, enable=True):
240
+ self.transformer.gradient_checkpointing_enable()
241
+
242
+ def get_num_layers(self):
243
+ encoder = self.transformer.encoder if hasattr(self.transformer, 'encoder') else self.transformer
244
+ layer_list = getattr(encoder, arch_dict[self.config.model_type]["config_names"]["layer_attr"])
245
+ return len(layer_list)
246
+
247
+ def init_parameters(self):
248
+ pass
eva_clip/loss.py ADDED
@@ -0,0 +1,138 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ import torch
3
+ import torch.nn as nn
4
+ from torch.nn import functional as F
5
+
6
+ try:
7
+ import torch.distributed.nn
8
+ from torch import distributed as dist
9
+ has_distributed = True
10
+ except ImportError:
11
+ has_distributed = False
12
+
13
+ try:
14
+ import horovod.torch as hvd
15
+ except ImportError:
16
+ hvd = None
17
+
18
+ from timm.loss import LabelSmoothingCrossEntropy
19
+
20
+
21
+ def gather_features(
22
+ image_features,
23
+ text_features,
24
+ local_loss=False,
25
+ gather_with_grad=False,
26
+ rank=0,
27
+ world_size=1,
28
+ use_horovod=False
29
+ ):
30
+ assert has_distributed, 'torch.distributed did not import correctly, please use a PyTorch version with support.'
31
+ if use_horovod:
32
+ assert hvd is not None, 'Please install horovod'
33
+ if gather_with_grad:
34
+ all_image_features = hvd.allgather(image_features)
35
+ all_text_features = hvd.allgather(text_features)
36
+ else:
37
+ with torch.no_grad():
38
+ all_image_features = hvd.allgather(image_features)
39
+ all_text_features = hvd.allgather(text_features)
40
+ if not local_loss:
41
+ # ensure grads for local rank when all_* features don't have a gradient
42
+ gathered_image_features = list(all_image_features.chunk(world_size, dim=0))
43
+ gathered_text_features = list(all_text_features.chunk(world_size, dim=0))
44
+ gathered_image_features[rank] = image_features
45
+ gathered_text_features[rank] = text_features
46
+ all_image_features = torch.cat(gathered_image_features, dim=0)
47
+ all_text_features = torch.cat(gathered_text_features, dim=0)
48
+ else:
49
+ # We gather tensors from all gpus
50
+ if gather_with_grad:
51
+ all_image_features = torch.cat(torch.distributed.nn.all_gather(image_features), dim=0)
52
+ all_text_features = torch.cat(torch.distributed.nn.all_gather(text_features), dim=0)
53
+ # all_image_features = torch.cat(torch.distributed.nn.all_gather(image_features, async_op=True), dim=0)
54
+ # all_text_features = torch.cat(torch.distributed.nn.all_gather(text_features, async_op=True), dim=0)
55
+ else:
56
+ gathered_image_features = [torch.zeros_like(image_features) for _ in range(world_size)]
57
+ gathered_text_features = [torch.zeros_like(text_features) for _ in range(world_size)]
58
+ dist.all_gather(gathered_image_features, image_features)
59
+ dist.all_gather(gathered_text_features, text_features)
60
+ if not local_loss:
61
+ # ensure grads for local rank when all_* features don't have a gradient
62
+ gathered_image_features[rank] = image_features
63
+ gathered_text_features[rank] = text_features
64
+ all_image_features = torch.cat(gathered_image_features, dim=0)
65
+ all_text_features = torch.cat(gathered_text_features, dim=0)
66
+
67
+ return all_image_features, all_text_features
68
+
69
+
70
+ class ClipLoss(nn.Module):
71
+
72
+ def __init__(
73
+ self,
74
+ local_loss=False,
75
+ gather_with_grad=False,
76
+ cache_labels=False,
77
+ rank=0,
78
+ world_size=1,
79
+ use_horovod=False,
80
+ smoothing=0.,
81
+ ):
82
+ super().__init__()
83
+ self.local_loss = local_loss
84
+ self.gather_with_grad = gather_with_grad
85
+ self.cache_labels = cache_labels
86
+ self.rank = rank
87
+ self.world_size = world_size
88
+ self.use_horovod = use_horovod
89
+ self.label_smoothing_cross_entropy = LabelSmoothingCrossEntropy(smoothing=smoothing) if smoothing > 0 else None
90
+
91
+ # cache state
92
+ self.prev_num_logits = 0
93
+ self.labels = {}
94
+
95
+ def forward(self, image_features, text_features, logit_scale=1.):
96
+ device = image_features.device
97
+ if self.world_size > 1:
98
+ all_image_features, all_text_features = gather_features(
99
+ image_features, text_features,
100
+ self.local_loss, self.gather_with_grad, self.rank, self.world_size, self.use_horovod)
101
+
102
+ if self.local_loss:
103
+ logits_per_image = logit_scale * image_features @ all_text_features.T
104
+ logits_per_text = logit_scale * text_features @ all_image_features.T
105
+ else:
106
+ logits_per_image = logit_scale * all_image_features @ all_text_features.T
107
+ logits_per_text = logits_per_image.T
108
+ else:
109
+ logits_per_image = logit_scale * image_features @ text_features.T
110
+ logits_per_text = logit_scale * text_features @ image_features.T
111
+ # calculated ground-truth and cache if enabled
112
+ num_logits = logits_per_image.shape[0]
113
+ if self.prev_num_logits != num_logits or device not in self.labels:
114
+ labels = torch.arange(num_logits, device=device, dtype=torch.long)
115
+ if self.world_size > 1 and self.local_loss:
116
+ labels = labels + num_logits * self.rank
117
+ if self.cache_labels:
118
+ self.labels[device] = labels
119
+ self.prev_num_logits = num_logits
120
+ else:
121
+ labels = self.labels[device]
122
+
123
+ if self.label_smoothing_cross_entropy:
124
+ total_loss = (
125
+ self.label_smoothing_cross_entropy(logits_per_image, labels) +
126
+ self.label_smoothing_cross_entropy(logits_per_text, labels)
127
+ ) / 2
128
+ else:
129
+ total_loss = (
130
+ F.cross_entropy(logits_per_image, labels) +
131
+ F.cross_entropy(logits_per_text, labels)
132
+ ) / 2
133
+
134
+ acc = None
135
+ i2t_acc = (logits_per_image.argmax(-1) == labels).sum() / len(logits_per_image)
136
+ t2i_acc = (logits_per_text.argmax(-1) == labels).sum() / len(logits_per_text)
137
+ acc = {"i2t": i2t_acc, "t2i": t2i_acc}
138
+ return total_loss, acc
eva_clip/model.py ADDED
@@ -0,0 +1,439 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ CLIP Model
2
+
3
+ Adapted from https://github.com/openai/CLIP. Originally MIT License, Copyright (c) 2021 OpenAI.
4
+ """
5
+ import os
6
+ from dataclasses import dataclass
7
+ from typing import Optional, Tuple, Union
8
+ from functools import partial
9
+
10
+ import numpy as np
11
+ import torch
12
+ import torch.nn.functional as F
13
+ from torch import nn
14
+
15
+ try:
16
+ from .hf_model import HFTextEncoder
17
+ except:
18
+ HFTextEncoder = None
19
+ from .modified_resnet import ModifiedResNet
20
+ from .timm_model import TimmModel
21
+ from .eva_vit_model import EVAVisionTransformer
22
+ from .transformer import LayerNorm, QuickGELU, Attention, VisionTransformer, TextTransformer
23
+
24
+ try:
25
+ from apex.normalization import FusedLayerNorm
26
+ except:
27
+ FusedLayerNorm = LayerNorm
28
+ print("Please 'pip install apex'")
29
+
30
+ try:
31
+ import xformers.ops as xops
32
+ except ImportError:
33
+ xops = None
34
+ print("Please 'pip install xformers'")
35
+
36
+ @dataclass
37
+ class CLIPVisionCfg:
38
+ layers: Union[Tuple[int, int, int, int], int] = 12
39
+ width: int = 768
40
+ head_width: int = 64
41
+ mlp_ratio: float = 4.0
42
+ patch_size: int = 16
43
+ image_size: Union[Tuple[int, int], int] = 224
44
+ ls_init_value: Optional[float] = None # layer scale initial value
45
+ patch_dropout: float = 0. # what fraction of patches to dropout during training (0 would mean disabled and no patches dropped) - 0.5 to 0.75 recommended in the paper for optimal results
46
+ global_average_pool: bool = False # whether to global average pool the last embedding layer, instead of using CLS token (https://arxiv.org/abs/2205.01580)
47
+ drop_path_rate: Optional[float] = None # drop path rate
48
+ timm_model_name: str = None # a valid model name overrides layers, width, patch_size
49
+ timm_model_pretrained: bool = False # use (imagenet) pretrained weights for named model
50
+ timm_pool: str = 'avg' # feature pooling for timm model ('abs_attn', 'rot_attn', 'avg', '')
51
+ timm_proj: str = 'linear' # linear projection for timm model output ('linear', 'mlp', '')
52
+ timm_proj_bias: bool = False # enable bias final projection
53
+ eva_model_name: str = None # a valid eva model name overrides layers, width, patch_size
54
+ qkv_bias: bool = True
55
+ fusedLN: bool = False
56
+ xattn: bool = False
57
+ postnorm: bool = False
58
+ rope: bool = False
59
+ pt_hw_seq_len: int = 16 # 224/14
60
+ intp_freq: bool = False
61
+ naiveswiglu: bool = False
62
+ subln: bool = False
63
+
64
+
65
+ @dataclass
66
+ class CLIPTextCfg:
67
+ context_length: int = 77
68
+ vocab_size: int = 49408
69
+ width: int = 512
70
+ heads: int = 8
71
+ layers: int = 12
72
+ ls_init_value: Optional[float] = None # layer scale initial value
73
+ hf_model_name: str = None
74
+ hf_tokenizer_name: str = None
75
+ hf_model_pretrained: bool = True
76
+ proj: str = 'mlp'
77
+ pooler_type: str = 'mean_pooler'
78
+ masked_language_modeling: bool = False
79
+ fusedLN: bool = False
80
+ xattn: bool = False
81
+ attn_mask: bool = True
82
+
83
+ def get_cast_dtype(precision: str):
84
+ cast_dtype = None
85
+ if precision == 'bf16':
86
+ cast_dtype = torch.bfloat16
87
+ elif precision == 'fp16':
88
+ cast_dtype = torch.float16
89
+ return cast_dtype
90
+
91
+
92
+ def _build_vision_tower(
93
+ embed_dim: int,
94
+ vision_cfg: CLIPVisionCfg,
95
+ quick_gelu: bool = False,
96
+ cast_dtype: Optional[torch.dtype] = None
97
+ ):
98
+ if isinstance(vision_cfg, dict):
99
+ vision_cfg = CLIPVisionCfg(**vision_cfg)
100
+
101
+ # OpenAI models are pretrained w/ QuickGELU but native nn.GELU is both faster and more
102
+ # memory efficient in recent PyTorch releases (>= 1.10).
103
+ # NOTE: timm models always use native GELU regardless of quick_gelu flag.
104
+ act_layer = QuickGELU if quick_gelu else nn.GELU
105
+
106
+ if vision_cfg.eva_model_name:
107
+ vision_heads = vision_cfg.width // vision_cfg.head_width
108
+ norm_layer = LayerNorm
109
+
110
+ visual = EVAVisionTransformer(
111
+ img_size=vision_cfg.image_size,
112
+ patch_size=vision_cfg.patch_size,
113
+ num_classes=embed_dim,
114
+ use_mean_pooling=vision_cfg.global_average_pool, #False
115
+ init_values=vision_cfg.ls_init_value,
116
+ patch_dropout=vision_cfg.patch_dropout,
117
+ embed_dim=vision_cfg.width,
118
+ depth=vision_cfg.layers,
119
+ num_heads=vision_heads,
120
+ mlp_ratio=vision_cfg.mlp_ratio,
121
+ qkv_bias=vision_cfg.qkv_bias,
122
+ drop_path_rate=vision_cfg.drop_path_rate,
123
+ norm_layer= partial(FusedLayerNorm, eps=1e-6) if vision_cfg.fusedLN else partial(norm_layer, eps=1e-6),
124
+ xattn=vision_cfg.xattn,
125
+ rope=vision_cfg.rope,
126
+ postnorm=vision_cfg.postnorm,
127
+ pt_hw_seq_len= vision_cfg.pt_hw_seq_len, # 224/14
128
+ intp_freq= vision_cfg.intp_freq,
129
+ naiveswiglu= vision_cfg.naiveswiglu,
130
+ subln= vision_cfg.subln
131
+ )
132
+ elif vision_cfg.timm_model_name:
133
+ visual = TimmModel(
134
+ vision_cfg.timm_model_name,
135
+ pretrained=vision_cfg.timm_model_pretrained,
136
+ pool=vision_cfg.timm_pool,
137
+ proj=vision_cfg.timm_proj,
138
+ proj_bias=vision_cfg.timm_proj_bias,
139
+ embed_dim=embed_dim,
140
+ image_size=vision_cfg.image_size
141
+ )
142
+ act_layer = nn.GELU # so that text transformer doesn't use QuickGELU w/ timm models
143
+ elif isinstance(vision_cfg.layers, (tuple, list)):
144
+ vision_heads = vision_cfg.width * 32 // vision_cfg.head_width
145
+ visual = ModifiedResNet(
146
+ layers=vision_cfg.layers,
147
+ output_dim=embed_dim,
148
+ heads=vision_heads,
149
+ image_size=vision_cfg.image_size,
150
+ width=vision_cfg.width
151
+ )
152
+ else:
153
+ vision_heads = vision_cfg.width // vision_cfg.head_width
154
+ norm_layer = LayerNormFp32 if cast_dtype in (torch.float16, torch.bfloat16) else LayerNorm
155
+ visual = VisionTransformer(
156
+ image_size=vision_cfg.image_size,
157
+ patch_size=vision_cfg.patch_size,
158
+ width=vision_cfg.width,
159
+ layers=vision_cfg.layers,
160
+ heads=vision_heads,
161
+ mlp_ratio=vision_cfg.mlp_ratio,
162
+ ls_init_value=vision_cfg.ls_init_value,
163
+ patch_dropout=vision_cfg.patch_dropout,
164
+ global_average_pool=vision_cfg.global_average_pool,
165
+ output_dim=embed_dim,
166
+ act_layer=act_layer,
167
+ norm_layer=norm_layer,
168
+ )
169
+
170
+ return visual
171
+
172
+
173
+ def _build_text_tower(
174
+ embed_dim: int,
175
+ text_cfg: CLIPTextCfg,
176
+ quick_gelu: bool = False,
177
+ cast_dtype: Optional[torch.dtype] = None,
178
+ ):
179
+ if isinstance(text_cfg, dict):
180
+ text_cfg = CLIPTextCfg(**text_cfg)
181
+
182
+ if text_cfg.hf_model_name:
183
+ text = HFTextEncoder(
184
+ text_cfg.hf_model_name,
185
+ output_dim=embed_dim,
186
+ tokenizer_name=text_cfg.hf_tokenizer_name,
187
+ proj=text_cfg.proj,
188
+ pooler_type=text_cfg.pooler_type,
189
+ masked_language_modeling=text_cfg.masked_language_modeling
190
+ )
191
+ else:
192
+ act_layer = QuickGELU if quick_gelu else nn.GELU
193
+ norm_layer = LayerNorm
194
+
195
+ text = TextTransformer(
196
+ context_length=text_cfg.context_length,
197
+ vocab_size=text_cfg.vocab_size,
198
+ width=text_cfg.width,
199
+ heads=text_cfg.heads,
200
+ layers=text_cfg.layers,
201
+ ls_init_value=text_cfg.ls_init_value,
202
+ output_dim=embed_dim,
203
+ act_layer=act_layer,
204
+ norm_layer= FusedLayerNorm if text_cfg.fusedLN else norm_layer,
205
+ xattn=text_cfg.xattn,
206
+ attn_mask=text_cfg.attn_mask,
207
+ )
208
+ return text
209
+
210
+ class CLIP(nn.Module):
211
+ def __init__(
212
+ self,
213
+ embed_dim: int,
214
+ vision_cfg: CLIPVisionCfg,
215
+ text_cfg: CLIPTextCfg,
216
+ quick_gelu: bool = False,
217
+ cast_dtype: Optional[torch.dtype] = None,
218
+ ):
219
+ super().__init__()
220
+ self.visual = _build_vision_tower(embed_dim, vision_cfg, quick_gelu, cast_dtype)
221
+
222
+ text = _build_text_tower(embed_dim, text_cfg, quick_gelu, cast_dtype)
223
+ self.transformer = text.transformer
224
+ self.vocab_size = text.vocab_size
225
+ self.token_embedding = text.token_embedding
226
+ self.positional_embedding = text.positional_embedding
227
+ self.ln_final = text.ln_final
228
+ self.text_projection = text.text_projection
229
+ self.register_buffer('attn_mask', text.attn_mask, persistent=False)
230
+
231
+ self.logit_scale = nn.Parameter(torch.ones([]) * np.log(1 / 0.07))
232
+
233
+ def lock_image_tower(self, unlocked_groups=0, freeze_bn_stats=False):
234
+ # lock image tower as per LiT - https://arxiv.org/abs/2111.07991
235
+ self.visual.lock(unlocked_groups=unlocked_groups, freeze_bn_stats=freeze_bn_stats)
236
+
237
+ @torch.jit.ignore
238
+ def set_grad_checkpointing(self, enable=True):
239
+ self.visual.set_grad_checkpointing(enable)
240
+ self.transformer.grad_checkpointing = enable
241
+
242
+ @torch.jit.ignore
243
+ def no_weight_decay(self):
244
+ return {'logit_scale'}
245
+
246
+ def encode_image(self, image, normalize: bool = False):
247
+ features = self.visual(image)
248
+ return F.normalize(features, dim=-1) if normalize else features
249
+
250
+ def encode_text(self, text, normalize: bool = False):
251
+ cast_dtype = self.transformer.get_cast_dtype()
252
+
253
+ x = self.token_embedding(text).to(cast_dtype) # [batch_size, n_ctx, d_model]
254
+
255
+ x = x + self.positional_embedding.to(cast_dtype)
256
+ x = x.permute(1, 0, 2) # NLD -> LND
257
+ x = self.transformer(x, attn_mask=self.attn_mask)
258
+ x = x.permute(1, 0, 2) # LND -> NLD
259
+ x = self.ln_final(x) # [batch_size, n_ctx, transformer.width]
260
+ # take features from the eot embedding (eot_token is the highest number in each sequence)
261
+ x = x[torch.arange(x.shape[0]), text.argmax(dim=-1)] @ self.text_projection
262
+ return F.normalize(x, dim=-1) if normalize else x
263
+
264
+ def forward(self, image, text):
265
+ image_features = self.encode_image(image, normalize=True)
266
+ text_features = self.encode_text(text, normalize=True)
267
+ return image_features, text_features, self.logit_scale.exp()
268
+
269
+
270
+ class CustomCLIP(nn.Module):
271
+ def __init__(
272
+ self,
273
+ embed_dim: int,
274
+ vision_cfg: CLIPVisionCfg,
275
+ text_cfg: CLIPTextCfg,
276
+ quick_gelu: bool = False,
277
+ cast_dtype: Optional[torch.dtype] = None,
278
+ itm_task: bool = False,
279
+ ):
280
+ super().__init__()
281
+ self.visual = _build_vision_tower(embed_dim, vision_cfg, quick_gelu, cast_dtype)
282
+ self.text = _build_text_tower(embed_dim, text_cfg, quick_gelu, cast_dtype)
283
+ self.logit_scale = nn.Parameter(torch.ones([]) * np.log(1 / 0.07))
284
+
285
+ def lock_image_tower(self, unlocked_groups=0, freeze_bn_stats=False):
286
+ # lock image tower as per LiT - https://arxiv.org/abs/2111.07991
287
+ self.visual.lock(unlocked_groups=unlocked_groups, freeze_bn_stats=freeze_bn_stats)
288
+
289
+ def lock_text_tower(self, unlocked_layers:int=0, freeze_layer_norm:bool=True):
290
+ self.text.lock(unlocked_layers, freeze_layer_norm)
291
+
292
+ @torch.jit.ignore
293
+ def set_grad_checkpointing(self, enable=True):
294
+ self.visual.set_grad_checkpointing(enable)
295
+ self.text.set_grad_checkpointing(enable)
296
+
297
+ @torch.jit.ignore
298
+ def no_weight_decay(self):
299
+ return {'logit_scale'}
300
+
301
+ def encode_image(self, image, normalize: bool = False):
302
+ features = self.visual(image)
303
+ return F.normalize(features, dim=-1) if normalize else features
304
+
305
+ def encode_text(self, text, normalize: bool = False):
306
+ features = self.text(text)
307
+ return F.normalize(features, dim=-1) if normalize else features
308
+
309
+ def forward(self, image, text):
310
+ image_features = self.encode_image(image, normalize=True)
311
+ text_features = self.encode_text(text, normalize=True)
312
+ return image_features, text_features, self.logit_scale.exp()
313
+
314
+
315
+ def convert_weights_to_lp(model: nn.Module, dtype=torch.float16):
316
+ """Convert applicable model parameters to low-precision (bf16 or fp16)"""
317
+
318
+ def _convert_weights(l):
319
+
320
+ if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Linear)):
321
+ l.weight.data = l.weight.data.to(dtype)
322
+ if l.bias is not None:
323
+ l.bias.data = l.bias.data.to(dtype)
324
+
325
+ if isinstance(l, (nn.MultiheadAttention, Attention)):
326
+ for attr in [*[f"{s}_proj_weight" for s in ["in", "q", "k", "v"]], "in_proj_bias", "bias_k", "bias_v"]:
327
+ tensor = getattr(l, attr, None)
328
+ if tensor is not None:
329
+ tensor.data = tensor.data.to(dtype)
330
+
331
+ if isinstance(l, nn.Parameter):
332
+ l.data = l.data.to(dtype)
333
+
334
+ for name in ["text_projection", "proj"]:
335
+ if hasattr(l, name) and isinstance(l, nn.Parameter):
336
+ attr = getattr(l, name, None)
337
+ if attr is not None:
338
+ attr.data = attr.data.to(dtype)
339
+
340
+ model.apply(_convert_weights)
341
+
342
+
343
+ convert_weights_to_fp16 = convert_weights_to_lp # backwards compat
344
+
345
+
346
+ # used to maintain checkpoint compatibility
347
+ def convert_to_custom_text_state_dict(state_dict: dict):
348
+ if 'text_projection' in state_dict:
349
+ # old format state_dict, move text tower -> .text
350
+ new_state_dict = {}
351
+ for k, v in state_dict.items():
352
+ if any(k.startswith(p) for p in (
353
+ 'text_projection',
354
+ 'positional_embedding',
355
+ 'token_embedding',
356
+ 'transformer',
357
+ 'ln_final',
358
+ 'logit_scale'
359
+ )):
360
+ k = 'text.' + k
361
+ new_state_dict[k] = v
362
+ return new_state_dict
363
+ return state_dict
364
+
365
+
366
+ def build_model_from_openai_state_dict(
367
+ state_dict: dict,
368
+ quick_gelu=True,
369
+ cast_dtype=torch.float16,
370
+ ):
371
+ vit = "visual.proj" in state_dict
372
+
373
+ if vit:
374
+ vision_width = state_dict["visual.conv1.weight"].shape[0]
375
+ vision_layers = len(
376
+ [k for k in state_dict.keys() if k.startswith("visual.") and k.endswith(".attn.in_proj_weight")])
377
+ vision_patch_size = state_dict["visual.conv1.weight"].shape[-1]
378
+ grid_size = round((state_dict["visual.positional_embedding"].shape[0] - 1) ** 0.5)
379
+ image_size = vision_patch_size * grid_size
380
+ else:
381
+ counts: list = [
382
+ len(set(k.split(".")[2] for k in state_dict if k.startswith(f"visual.layer{b}"))) for b in [1, 2, 3, 4]]
383
+ vision_layers = tuple(counts)
384
+ vision_width = state_dict["visual.layer1.0.conv1.weight"].shape[0]
385
+ output_width = round((state_dict["visual.attnpool.positional_embedding"].shape[0] - 1) ** 0.5)
386
+ vision_patch_size = None
387
+ assert output_width ** 2 + 1 == state_dict["visual.attnpool.positional_embedding"].shape[0]
388
+ image_size = output_width * 32
389
+
390
+ embed_dim = state_dict["text_projection"].shape[1]
391
+ context_length = state_dict["positional_embedding"].shape[0]
392
+ vocab_size = state_dict["token_embedding.weight"].shape[0]
393
+ transformer_width = state_dict["ln_final.weight"].shape[0]
394
+ transformer_heads = transformer_width // 64
395
+ transformer_layers = len(set(k.split(".")[2] for k in state_dict if k.startswith(f"transformer.resblocks")))
396
+
397
+ vision_cfg = CLIPVisionCfg(
398
+ layers=vision_layers,
399
+ width=vision_width,
400
+ patch_size=vision_patch_size,
401
+ image_size=image_size,
402
+ )
403
+ text_cfg = CLIPTextCfg(
404
+ context_length=context_length,
405
+ vocab_size=vocab_size,
406
+ width=transformer_width,
407
+ heads=transformer_heads,
408
+ layers=transformer_layers
409
+ )
410
+ model = CLIP(
411
+ embed_dim,
412
+ vision_cfg=vision_cfg,
413
+ text_cfg=text_cfg,
414
+ quick_gelu=quick_gelu, # OpenAI models were trained with QuickGELU
415
+ cast_dtype=cast_dtype,
416
+ )
417
+
418
+ for key in ["input_resolution", "context_length", "vocab_size"]:
419
+ state_dict.pop(key, None)
420
+
421
+ convert_weights_to_fp16(model) # OpenAI state dicts are partially converted to float16
422
+ model.load_state_dict(state_dict)
423
+ return model.eval()
424
+
425
+
426
+ def trace_model(model, batch_size=256, device=torch.device('cpu')):
427
+ model.eval()
428
+ image_size = model.visual.image_size
429
+ example_images = torch.ones((batch_size, 3, image_size, image_size), device=device)
430
+ example_text = torch.zeros((batch_size, model.context_length), dtype=torch.int, device=device)
431
+ model = torch.jit.trace_module(
432
+ model,
433
+ inputs=dict(
434
+ forward=(example_images, example_text),
435
+ encode_text=(example_text,),
436
+ encode_image=(example_images,)
437
+ ))
438
+ model.visual.image_size = image_size
439
+ return model
eva_clip/model_configs/EVA01-CLIP-B-16.json ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "embed_dim": 512,
3
+ "vision_cfg": {
4
+ "image_size": 224,
5
+ "layers": 12,
6
+ "width": 768,
7
+ "patch_size": 16,
8
+ "eva_model_name": "eva-clip-b-16",
9
+ "ls_init_value": 0.1,
10
+ "drop_path_rate": 0.0
11
+ },
12
+ "text_cfg": {
13
+ "context_length": 77,
14
+ "vocab_size": 49408,
15
+ "width": 512,
16
+ "heads": 8,
17
+ "layers": 12
18
+ }
19
+ }
eva_clip/model_configs/EVA01-CLIP-g-14-plus.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "embed_dim": 1024,
3
+ "vision_cfg": {
4
+ "image_size": 224,
5
+ "layers": 40,
6
+ "width": 1408,
7
+ "head_width": 88,
8
+ "mlp_ratio": 4.3637,
9
+ "patch_size": 14,
10
+ "eva_model_name": "eva-clip-g-14-x",
11
+ "drop_path_rate": 0,
12
+ "xattn": true,
13
+ "fusedLN": true
14
+ },
15
+ "text_cfg": {
16
+ "context_length": 77,
17
+ "vocab_size": 49408,
18
+ "width": 1024,
19
+ "heads": 16,
20
+ "layers": 24,
21
+ "xattn": false,
22
+ "fusedLN": true
23
+ }
24
+ }
eva_clip/model_configs/EVA01-CLIP-g-14.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "embed_dim": 1024,
3
+ "vision_cfg": {
4
+ "image_size": 224,
5
+ "layers": 40,
6
+ "width": 1408,
7
+ "head_width": 88,
8
+ "mlp_ratio": 4.3637,
9
+ "patch_size": 14,
10
+ "eva_model_name": "eva-clip-g-14-x",
11
+ "drop_path_rate": 0.4,
12
+ "xattn": true,
13
+ "fusedLN": true
14
+ },
15
+ "text_cfg": {
16
+ "context_length": 77,
17
+ "vocab_size": 49408,
18
+ "width": 768,
19
+ "heads": 12,
20
+ "layers": 12,
21
+ "xattn": false,
22
+ "fusedLN": true
23
+ }
24
+ }
eva_clip/model_configs/EVA02-CLIP-B-16.json ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "embed_dim": 512,
3
+ "vision_cfg": {
4
+ "image_size": 224,
5
+ "layers": 12,
6
+ "width": 768,
7
+ "head_width": 64,
8
+ "patch_size": 16,
9
+ "mlp_ratio": 2.6667,
10
+ "eva_model_name": "eva-clip-b-16-X",
11
+ "drop_path_rate": 0.0,
12
+ "xattn": true,
13
+ "fusedLN": true,
14
+ "rope": true,
15
+ "pt_hw_seq_len": 16,
16
+ "intp_freq": true,
17
+ "naiveswiglu": true,
18
+ "subln": true
19
+ },
20
+ "text_cfg": {
21
+ "context_length": 77,
22
+ "vocab_size": 49408,
23
+ "width": 512,
24
+ "heads": 8,
25
+ "layers": 12,
26
+ "xattn": true,
27
+ "fusedLN": true
28
+ }
29
+ }
eva_clip/model_configs/EVA02-CLIP-L-14-336.json ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "embed_dim": 768,
3
+ "vision_cfg": {
4
+ "image_size": 336,
5
+ "layers": 24,
6
+ "width": 1024,
7
+ "drop_path_rate": 0,
8
+ "head_width": 64,
9
+ "mlp_ratio": 2.6667,
10
+ "patch_size": 14,
11
+ "eva_model_name": "eva-clip-l-14-336",
12
+ "xattn": true,
13
+ "fusedLN": true,
14
+ "rope": true,
15
+ "pt_hw_seq_len": 16,
16
+ "intp_freq": true,
17
+ "naiveswiglu": true,
18
+ "subln": true
19
+ },
20
+ "text_cfg": {
21
+ "context_length": 77,
22
+ "vocab_size": 49408,
23
+ "width": 768,
24
+ "heads": 12,
25
+ "layers": 12,
26
+ "xattn": false,
27
+ "fusedLN": true
28
+ }
29
+ }
eva_clip/model_configs/EVA02-CLIP-L-14.json ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "embed_dim": 768,
3
+ "vision_cfg": {
4
+ "image_size": 224,
5
+ "layers": 24,
6
+ "width": 1024,
7
+ "drop_path_rate": 0,
8
+ "head_width": 64,
9
+ "mlp_ratio": 2.6667,
10
+ "patch_size": 14,
11
+ "eva_model_name": "eva-clip-l-14",
12
+ "xattn": true,
13
+ "fusedLN": true,
14
+ "rope": true,
15
+ "pt_hw_seq_len": 16,
16
+ "intp_freq": true,
17
+ "naiveswiglu": true,
18
+ "subln": true
19
+ },
20
+ "text_cfg": {
21
+ "context_length": 77,
22
+ "vocab_size": 49408,
23
+ "width": 768,
24
+ "heads": 12,
25
+ "layers": 12,
26
+ "xattn": false,
27
+ "fusedLN": true
28
+ }
29
+ }
eva_clip/model_configs/EVA02-CLIP-bigE-14-plus.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "embed_dim": 1024,
3
+ "vision_cfg": {
4
+ "image_size": 224,
5
+ "layers": 64,
6
+ "width": 1792,
7
+ "head_width": 112,
8
+ "mlp_ratio": 8.571428571428571,
9
+ "patch_size": 14,
10
+ "eva_model_name": "eva-clip-4b-14-x",
11
+ "drop_path_rate": 0,
12
+ "xattn": true,
13
+ "postnorm": true,
14
+ "fusedLN": true
15
+ },
16
+ "text_cfg": {
17
+ "context_length": 77,
18
+ "vocab_size": 49408,
19
+ "width": 1280,
20
+ "heads": 20,
21
+ "layers": 32,
22
+ "xattn": false,
23
+ "fusedLN": true
24
+ }
25
+ }
eva_clip/model_configs/EVA02-CLIP-bigE-14.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "embed_dim": 1024,
3
+ "vision_cfg": {
4
+ "image_size": 224,
5
+ "layers": 64,
6
+ "width": 1792,
7
+ "head_width": 112,
8
+ "mlp_ratio": 8.571428571428571,
9
+ "patch_size": 14,
10
+ "eva_model_name": "eva-clip-4b-14-x",
11
+ "drop_path_rate": 0,
12
+ "xattn": true,
13
+ "postnorm": true,
14
+ "fusedLN": true
15
+ },
16
+ "text_cfg": {
17
+ "context_length": 77,
18
+ "vocab_size": 49408,
19
+ "width": 1024,
20
+ "heads": 16,
21
+ "layers": 24,
22
+ "xattn": false,
23
+ "fusedLN": true
24
+ }
25
+ }
eva_clip/modified_resnet.py ADDED
@@ -0,0 +1,181 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from collections import OrderedDict
2
+
3
+ import torch
4
+ from torch import nn
5
+ from torch.nn import functional as F
6
+
7
+ from eva_clip.utils import freeze_batch_norm_2d
8
+
9
+
10
+ class Bottleneck(nn.Module):
11
+ expansion = 4
12
+
13
+ def __init__(self, inplanes, planes, stride=1):
14
+ super().__init__()
15
+
16
+ # all conv layers have stride 1. an avgpool is performed after the second convolution when stride > 1
17
+ self.conv1 = nn.Conv2d(inplanes, planes, 1, bias=False)
18
+ self.bn1 = nn.BatchNorm2d(planes)
19
+ self.act1 = nn.ReLU(inplace=True)
20
+
21
+ self.conv2 = nn.Conv2d(planes, planes, 3, padding=1, bias=False)
22
+ self.bn2 = nn.BatchNorm2d(planes)
23
+ self.act2 = nn.ReLU(inplace=True)
24
+
25
+ self.avgpool = nn.AvgPool2d(stride) if stride > 1 else nn.Identity()
26
+
27
+ self.conv3 = nn.Conv2d(planes, planes * self.expansion, 1, bias=False)
28
+ self.bn3 = nn.BatchNorm2d(planes * self.expansion)
29
+ self.act3 = nn.ReLU(inplace=True)
30
+
31
+ self.downsample = None
32
+ self.stride = stride
33
+
34
+ if stride > 1 or inplanes != planes * Bottleneck.expansion:
35
+ # downsampling layer is prepended with an avgpool, and the subsequent convolution has stride 1
36
+ self.downsample = nn.Sequential(OrderedDict([
37
+ ("-1", nn.AvgPool2d(stride)),
38
+ ("0", nn.Conv2d(inplanes, planes * self.expansion, 1, stride=1, bias=False)),
39
+ ("1", nn.BatchNorm2d(planes * self.expansion))
40
+ ]))
41
+
42
+ def forward(self, x: torch.Tensor):
43
+ identity = x
44
+
45
+ out = self.act1(self.bn1(self.conv1(x)))
46
+ out = self.act2(self.bn2(self.conv2(out)))
47
+ out = self.avgpool(out)
48
+ out = self.bn3(self.conv3(out))
49
+
50
+ if self.downsample is not None:
51
+ identity = self.downsample(x)
52
+
53
+ out += identity
54
+ out = self.act3(out)
55
+ return out
56
+
57
+
58
+ class AttentionPool2d(nn.Module):
59
+ def __init__(self, spacial_dim: int, embed_dim: int, num_heads: int, output_dim: int = None):
60
+ super().__init__()
61
+ self.positional_embedding = nn.Parameter(torch.randn(spacial_dim ** 2 + 1, embed_dim) / embed_dim ** 0.5)
62
+ self.k_proj = nn.Linear(embed_dim, embed_dim)
63
+ self.q_proj = nn.Linear(embed_dim, embed_dim)
64
+ self.v_proj = nn.Linear(embed_dim, embed_dim)
65
+ self.c_proj = nn.Linear(embed_dim, output_dim or embed_dim)
66
+ self.num_heads = num_heads
67
+
68
+ def forward(self, x):
69
+ x = x.reshape(x.shape[0], x.shape[1], x.shape[2] * x.shape[3]).permute(2, 0, 1) # NCHW -> (HW)NC
70
+ x = torch.cat([x.mean(dim=0, keepdim=True), x], dim=0) # (HW+1)NC
71
+ x = x + self.positional_embedding[:, None, :].to(x.dtype) # (HW+1)NC
72
+ x, _ = F.multi_head_attention_forward(
73
+ query=x, key=x, value=x,
74
+ embed_dim_to_check=x.shape[-1],
75
+ num_heads=self.num_heads,
76
+ q_proj_weight=self.q_proj.weight,
77
+ k_proj_weight=self.k_proj.weight,
78
+ v_proj_weight=self.v_proj.weight,
79
+ in_proj_weight=None,
80
+ in_proj_bias=torch.cat([self.q_proj.bias, self.k_proj.bias, self.v_proj.bias]),
81
+ bias_k=None,
82
+ bias_v=None,
83
+ add_zero_attn=False,
84
+ dropout_p=0.,
85
+ out_proj_weight=self.c_proj.weight,
86
+ out_proj_bias=self.c_proj.bias,
87
+ use_separate_proj_weight=True,
88
+ training=self.training,
89
+ need_weights=False
90
+ )
91
+
92
+ return x[0]
93
+
94
+
95
+ class ModifiedResNet(nn.Module):
96
+ """
97
+ A ResNet class that is similar to torchvision's but contains the following changes:
98
+ - There are now 3 "stem" convolutions as opposed to 1, with an average pool instead of a max pool.
99
+ - Performs anti-aliasing strided convolutions, where an avgpool is prepended to convolutions with stride > 1
100
+ - The final pooling layer is a QKV attention instead of an average pool
101
+ """
102
+
103
+ def __init__(self, layers, output_dim, heads, image_size=224, width=64):
104
+ super().__init__()
105
+ self.output_dim = output_dim
106
+ self.image_size = image_size
107
+
108
+ # the 3-layer stem
109
+ self.conv1 = nn.Conv2d(3, width // 2, kernel_size=3, stride=2, padding=1, bias=False)
110
+ self.bn1 = nn.BatchNorm2d(width // 2)
111
+ self.act1 = nn.ReLU(inplace=True)
112
+ self.conv2 = nn.Conv2d(width // 2, width // 2, kernel_size=3, padding=1, bias=False)
113
+ self.bn2 = nn.BatchNorm2d(width // 2)
114
+ self.act2 = nn.ReLU(inplace=True)
115
+ self.conv3 = nn.Conv2d(width // 2, width, kernel_size=3, padding=1, bias=False)
116
+ self.bn3 = nn.BatchNorm2d(width)
117
+ self.act3 = nn.ReLU(inplace=True)
118
+ self.avgpool = nn.AvgPool2d(2)
119
+
120
+ # residual layers
121
+ self._inplanes = width # this is a *mutable* variable used during construction
122
+ self.layer1 = self._make_layer(width, layers[0])
123
+ self.layer2 = self._make_layer(width * 2, layers[1], stride=2)
124
+ self.layer3 = self._make_layer(width * 4, layers[2], stride=2)
125
+ self.layer4 = self._make_layer(width * 8, layers[3], stride=2)
126
+
127
+ embed_dim = width * 32 # the ResNet feature dimension
128
+ self.attnpool = AttentionPool2d(image_size // 32, embed_dim, heads, output_dim)
129
+
130
+ self.init_parameters()
131
+
132
+ def _make_layer(self, planes, blocks, stride=1):
133
+ layers = [Bottleneck(self._inplanes, planes, stride)]
134
+
135
+ self._inplanes = planes * Bottleneck.expansion
136
+ for _ in range(1, blocks):
137
+ layers.append(Bottleneck(self._inplanes, planes))
138
+
139
+ return nn.Sequential(*layers)
140
+
141
+ def init_parameters(self):
142
+ if self.attnpool is not None:
143
+ std = self.attnpool.c_proj.in_features ** -0.5
144
+ nn.init.normal_(self.attnpool.q_proj.weight, std=std)
145
+ nn.init.normal_(self.attnpool.k_proj.weight, std=std)
146
+ nn.init.normal_(self.attnpool.v_proj.weight, std=std)
147
+ nn.init.normal_(self.attnpool.c_proj.weight, std=std)
148
+
149
+ for resnet_block in [self.layer1, self.layer2, self.layer3, self.layer4]:
150
+ for name, param in resnet_block.named_parameters():
151
+ if name.endswith("bn3.weight"):
152
+ nn.init.zeros_(param)
153
+
154
+ def lock(self, unlocked_groups=0, freeze_bn_stats=False):
155
+ assert unlocked_groups == 0, 'partial locking not currently supported for this model'
156
+ for param in self.parameters():
157
+ param.requires_grad = False
158
+ if freeze_bn_stats:
159
+ freeze_batch_norm_2d(self)
160
+
161
+ @torch.jit.ignore
162
+ def set_grad_checkpointing(self, enable=True):
163
+ # FIXME support for non-transformer
164
+ pass
165
+
166
+ def stem(self, x):
167
+ x = self.act1(self.bn1(self.conv1(x)))
168
+ x = self.act2(self.bn2(self.conv2(x)))
169
+ x = self.act3(self.bn3(self.conv3(x)))
170
+ x = self.avgpool(x)
171
+ return x
172
+
173
+ def forward(self, x):
174
+ x = self.stem(x)
175
+ x = self.layer1(x)
176
+ x = self.layer2(x)
177
+ x = self.layer3(x)
178
+ x = self.layer4(x)
179
+ x = self.attnpool(x)
180
+
181
+ return x
eva_clip/openai.py ADDED
@@ -0,0 +1,144 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ OpenAI pretrained model functions
2
+
3
+ Adapted from https://github.com/openai/CLIP. Originally MIT License, Copyright (c) 2021 OpenAI.
4
+ """
5
+
6
+ import os
7
+ import warnings
8
+ from typing import List, Optional, Union
9
+
10
+ import torch
11
+
12
+ from .model import build_model_from_openai_state_dict, convert_weights_to_lp, get_cast_dtype
13
+ from .pretrained import get_pretrained_url, list_pretrained_models_by_tag, download_pretrained_from_url
14
+
15
+ __all__ = ["list_openai_models", "load_openai_model"]
16
+
17
+
18
+ def list_openai_models() -> List[str]:
19
+ """Returns the names of available CLIP models"""
20
+ return list_pretrained_models_by_tag('openai')
21
+
22
+
23
+ def load_openai_model(
24
+ name: str,
25
+ precision: Optional[str] = None,
26
+ device: Optional[Union[str, torch.device]] = None,
27
+ jit: bool = True,
28
+ cache_dir: Optional[str] = None,
29
+ ):
30
+ """Load a CLIP model
31
+
32
+ Parameters
33
+ ----------
34
+ name : str
35
+ A model name listed by `clip.available_models()`, or the path to a model checkpoint containing the state_dict
36
+ precision: str
37
+ Model precision, if None defaults to 'fp32' if device == 'cpu' else 'fp16'.
38
+ device : Union[str, torch.device]
39
+ The device to put the loaded model
40
+ jit : bool
41
+ Whether to load the optimized JIT model (default) or more hackable non-JIT model.
42
+ cache_dir : Optional[str]
43
+ The directory to cache the downloaded model weights
44
+
45
+ Returns
46
+ -------
47
+ model : torch.nn.Module
48
+ The CLIP model
49
+ preprocess : Callable[[PIL.Image], torch.Tensor]
50
+ A torchvision transform that converts a PIL image into a tensor that the returned model can take as its input
51
+ """
52
+ if device is None:
53
+ device = "cuda" if torch.cuda.is_available() else "cpu"
54
+ if precision is None:
55
+ precision = 'fp32' if device == 'cpu' else 'fp16'
56
+
57
+ if get_pretrained_url(name, 'openai'):
58
+ model_path = download_pretrained_from_url(get_pretrained_url(name, 'openai'), cache_dir=cache_dir)
59
+ elif os.path.isfile(name):
60
+ model_path = name
61
+ else:
62
+ raise RuntimeError(f"Model {name} not found; available models = {list_openai_models()}")
63
+
64
+ try:
65
+ # loading JIT archive
66
+ model = torch.jit.load(model_path, map_location=device if jit else "cpu").eval()
67
+ state_dict = None
68
+ except RuntimeError:
69
+ # loading saved state dict
70
+ if jit:
71
+ warnings.warn(f"File {model_path} is not a JIT archive. Loading as a state dict instead")
72
+ jit = False
73
+ state_dict = torch.load(model_path, map_location="cpu")
74
+
75
+ if not jit:
76
+ # Build a non-jit model from the OpenAI jitted model state dict
77
+ cast_dtype = get_cast_dtype(precision)
78
+ try:
79
+ model = build_model_from_openai_state_dict(state_dict or model.state_dict(), cast_dtype=cast_dtype)
80
+ except KeyError:
81
+ sd = {k[7:]: v for k, v in state_dict["state_dict"].items()}
82
+ model = build_model_from_openai_state_dict(sd, cast_dtype=cast_dtype)
83
+
84
+ # model from OpenAI state dict is in manually cast fp16 mode, must be converted for AMP/fp32/bf16 use
85
+ model = model.to(device)
86
+ if precision.startswith('amp') or precision == 'fp32':
87
+ model.float()
88
+ elif precision == 'bf16':
89
+ convert_weights_to_lp(model, dtype=torch.bfloat16)
90
+
91
+ return model
92
+
93
+ # patch the device names
94
+ device_holder = torch.jit.trace(lambda: torch.ones([]).to(torch.device(device)), example_inputs=[])
95
+ device_node = [n for n in device_holder.graph.findAllNodes("prim::Constant") if "Device" in repr(n)][-1]
96
+
97
+ def patch_device(module):
98
+ try:
99
+ graphs = [module.graph] if hasattr(module, "graph") else []
100
+ except RuntimeError:
101
+ graphs = []
102
+
103
+ if hasattr(module, "forward1"):
104
+ graphs.append(module.forward1.graph)
105
+
106
+ for graph in graphs:
107
+ for node in graph.findAllNodes("prim::Constant"):
108
+ if "value" in node.attributeNames() and str(node["value"]).startswith("cuda"):
109
+ node.copyAttributes(device_node)
110
+
111
+ model.apply(patch_device)
112
+ patch_device(model.encode_image)
113
+ patch_device(model.encode_text)
114
+
115
+ # patch dtype to float32 (typically for CPU)
116
+ if precision == 'fp32':
117
+ float_holder = torch.jit.trace(lambda: torch.ones([]).float(), example_inputs=[])
118
+ float_input = list(float_holder.graph.findNode("aten::to").inputs())[1]
119
+ float_node = float_input.node()
120
+
121
+ def patch_float(module):
122
+ try:
123
+ graphs = [module.graph] if hasattr(module, "graph") else []
124
+ except RuntimeError:
125
+ graphs = []
126
+
127
+ if hasattr(module, "forward1"):
128
+ graphs.append(module.forward1.graph)
129
+
130
+ for graph in graphs:
131
+ for node in graph.findAllNodes("aten::to"):
132
+ inputs = list(node.inputs())
133
+ for i in [1, 2]: # dtype can be the second or third argument to aten::to()
134
+ if inputs[i].node()["value"] == 5:
135
+ inputs[i].node().copyAttributes(float_node)
136
+
137
+ model.apply(patch_float)
138
+ patch_float(model.encode_image)
139
+ patch_float(model.encode_text)
140
+ model.float()
141
+
142
+ # ensure image_size attr available at consistent location for both jit and non-jit
143
+ model.visual.image_size = model.input_resolution.item()
144
+ return model
eva_clip/pretrained.py ADDED
@@ -0,0 +1,332 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import hashlib
2
+ import os
3
+ import urllib
4
+ import warnings
5
+ from functools import partial
6
+ from typing import Dict, Union
7
+
8
+ from tqdm import tqdm
9
+
10
+ try:
11
+ from huggingface_hub import hf_hub_download
12
+ _has_hf_hub = True
13
+ except ImportError:
14
+ hf_hub_download = None
15
+ _has_hf_hub = False
16
+
17
+
18
+ def _pcfg(url='', hf_hub='', filename='', mean=None, std=None):
19
+ return dict(
20
+ url=url,
21
+ hf_hub=hf_hub,
22
+ mean=mean,
23
+ std=std,
24
+ )
25
+
26
+ _VITB32 = dict(
27
+ openai=_pcfg(
28
+ "https://openaipublic.azureedge.net/clip/models/40d365715913c9da98579312b702a82c18be219cc2a73407c4526f58eba950af/ViT-B-32.pt"),
29
+ laion400m_e31=_pcfg(
30
+ "https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_b_32-quickgelu-laion400m_e31-d867053b.pt"),
31
+ laion400m_e32=_pcfg(
32
+ "https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_b_32-quickgelu-laion400m_e32-46683a32.pt"),
33
+ laion2b_e16=_pcfg(
34
+ "https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_b_32-laion2b_e16-af8dbd0c.pth"),
35
+ laion2b_s34b_b79k=_pcfg(hf_hub='laion/CLIP-ViT-B-32-laion2B-s34B-b79K/')
36
+ )
37
+
38
+ _VITB32_quickgelu = dict(
39
+ openai=_pcfg(
40
+ "https://openaipublic.azureedge.net/clip/models/40d365715913c9da98579312b702a82c18be219cc2a73407c4526f58eba950af/ViT-B-32.pt"),
41
+ laion400m_e31=_pcfg(
42
+ "https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_b_32-quickgelu-laion400m_e31-d867053b.pt"),
43
+ laion400m_e32=_pcfg(
44
+ "https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_b_32-quickgelu-laion400m_e32-46683a32.pt"),
45
+ )
46
+
47
+ _VITB16 = dict(
48
+ openai=_pcfg(
49
+ "https://openaipublic.azureedge.net/clip/models/5806e77cd80f8b59890b7e101eabd078d9fb84e6937f9e85e4ecb61988df416f/ViT-B-16.pt"),
50
+ laion400m_e31=_pcfg(
51
+ "https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_b_16-laion400m_e31-00efa78f.pt"),
52
+ laion400m_e32=_pcfg(
53
+ "https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_b_16-laion400m_e32-55e67d44.pt"),
54
+ laion2b_s34b_b88k=_pcfg(hf_hub='laion/CLIP-ViT-B-16-laion2B-s34B-b88K/'),
55
+ )
56
+
57
+ _EVAB16 = dict(
58
+ eva=_pcfg(hf_hub='QuanSun/EVA-CLIP/EVA02_B_psz14to16.pt'),
59
+ eva02=_pcfg(hf_hub='QuanSun/EVA-CLIP/EVA02_B_psz14to16.pt'),
60
+ eva_clip=_pcfg(hf_hub='QuanSun/EVA-CLIP/EVA02_CLIP_B_psz16_s8B.pt'),
61
+ eva02_clip=_pcfg(hf_hub='QuanSun/EVA-CLIP/EVA02_CLIP_B_psz16_s8B.pt'),
62
+ )
63
+
64
+ _VITB16_PLUS_240 = dict(
65
+ laion400m_e31=_pcfg(
66
+ "https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_b_16_plus_240-laion400m_e31-8fb26589.pt"),
67
+ laion400m_e32=_pcfg(
68
+ "https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_b_16_plus_240-laion400m_e32-699c4b84.pt"),
69
+ )
70
+
71
+ _VITL14 = dict(
72
+ openai=_pcfg(
73
+ "https://openaipublic.azureedge.net/clip/models/b8cca3fd41ae0c99ba7e8951adf17d267cdb84cd88be6f7c2e0eca1737a03836/ViT-L-14.pt"),
74
+ laion400m_e31=_pcfg(
75
+ "https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_l_14-laion400m_e31-69988bb6.pt"),
76
+ laion400m_e32=_pcfg(
77
+ "https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_l_14-laion400m_e32-3d133497.pt"),
78
+ laion2b_s32b_b82k=_pcfg(
79
+ hf_hub='laion/CLIP-ViT-L-14-laion2B-s32B-b82K/',
80
+ mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)),
81
+ )
82
+
83
+ _EVAL14 = dict(
84
+ eva=_pcfg(hf_hub='QuanSun/EVA-CLIP/EVA02_L_psz14.pt'),
85
+ eva02=_pcfg(hf_hub='QuanSun/EVA-CLIP/EVA02_L_psz14.pt'),
86
+ eva_clip=_pcfg(hf_hub='QuanSun/EVA-CLIP/EVA02_CLIP_L_psz14_s4B.pt'),
87
+ eva02_clip=_pcfg(hf_hub='QuanSun/EVA-CLIP/EVA02_CLIP_L_psz14_s4B.pt'),
88
+ )
89
+
90
+ _VITL14_336 = dict(
91
+ openai=_pcfg(
92
+ "https://openaipublic.azureedge.net/clip/models/3035c92b350959924f9f00213499208652fc7ea050643e8b385c2dac08641f02/ViT-L-14-336px.pt"),
93
+ )
94
+
95
+ _EVAL14_336 = dict(
96
+ eva_clip=_pcfg(hf_hub='QuanSun/EVA-CLIP/EVA02_CLIP_L_336_psz14_s6B.pt'),
97
+ eva02_clip=_pcfg(hf_hub='QuanSun/EVA-CLIP/EVA02_CLIP_L_336_psz14_s6B.pt'),
98
+ eva_clip_224to336=_pcfg(hf_hub='QuanSun/EVA-CLIP/EVA02_CLIP_L_psz14_224to336.pt'),
99
+ eva02_clip_224to336=_pcfg(hf_hub='QuanSun/EVA-CLIP/EVA02_CLIP_L_psz14_224to336.pt'),
100
+ )
101
+
102
+ _VITH14 = dict(
103
+ laion2b_s32b_b79k=_pcfg(hf_hub='laion/CLIP-ViT-H-14-laion2B-s32B-b79K/'),
104
+ )
105
+
106
+ _VITg14 = dict(
107
+ laion2b_s12b_b42k=_pcfg(hf_hub='laion/CLIP-ViT-g-14-laion2B-s12B-b42K/'),
108
+ laion2b_s34b_b88k=_pcfg(hf_hub='laion/CLIP-ViT-g-14-laion2B-s34B-b88K/'),
109
+ )
110
+
111
+ _EVAg14 = dict(
112
+ eva=_pcfg(hf_hub='QuanSun/EVA-CLIP/'),
113
+ eva01=_pcfg(hf_hub='QuanSun/EVA-CLIP/EVA01_g_psz14.pt'),
114
+ eva_clip=_pcfg(hf_hub='QuanSun/EVA-CLIP/EVA01_CLIP_g_14_psz14_s11B.pt'),
115
+ eva01_clip=_pcfg(hf_hub='QuanSun/EVA-CLIP/EVA01_CLIP_g_14_psz14_s11B.pt'),
116
+ )
117
+
118
+ _EVAg14_PLUS = dict(
119
+ eva=_pcfg(hf_hub='QuanSun/EVA-CLIP/'),
120
+ eva01=_pcfg(hf_hub='QuanSun/EVA-CLIP/EVA01_g_psz14.pt'),
121
+ eva_clip=_pcfg(hf_hub='QuanSun/EVA-CLIP/EVA01_CLIP_g_14_plus_psz14_s11B.pt'),
122
+ eva01_clip=_pcfg(hf_hub='QuanSun/EVA-CLIP/EVA01_CLIP_g_14_plus_psz14_s11B.pt'),
123
+ )
124
+
125
+ _VITbigG14 = dict(
126
+ laion2b_s39b_b160k=_pcfg(hf_hub='laion/CLIP-ViT-bigG-14-laion2B-39B-b160k/'),
127
+ )
128
+
129
+ _EVAbigE14 = dict(
130
+ eva=_pcfg(hf_hub='QuanSun/EVA-CLIP/EVA02_E_psz14.pt'),
131
+ eva02=_pcfg(hf_hub='QuanSun/EVA-CLIP/EVA02_E_psz14.pt'),
132
+ eva_clip=_pcfg(hf_hub='QuanSun/EVA-CLIP/EVA02_CLIP_E_psz14_s4B.pt'),
133
+ eva02_clip=_pcfg(hf_hub='QuanSun/EVA-CLIP/EVA02_CLIP_E_psz14_s4B.pt'),
134
+ )
135
+
136
+ _EVAbigE14_PLUS = dict(
137
+ eva=_pcfg(hf_hub='QuanSun/EVA-CLIP/EVA02_E_psz14.pt'),
138
+ eva02=_pcfg(hf_hub='QuanSun/EVA-CLIP/EVA02_E_psz14.pt'),
139
+ eva_clip=_pcfg(hf_hub='QuanSun/EVA-CLIP/EVA02_CLIP_E_psz14_plus_s9B.pt'),
140
+ eva02_clip=_pcfg(hf_hub='QuanSun/EVA-CLIP/EVA02_CLIP_E_psz14_plus_s9B.pt'),
141
+ )
142
+
143
+
144
+ _PRETRAINED = {
145
+ # "ViT-B-32": _VITB32,
146
+ "OpenaiCLIP-B-32": _VITB32,
147
+ "OpenCLIP-B-32": _VITB32,
148
+
149
+ # "ViT-B-32-quickgelu": _VITB32_quickgelu,
150
+ "OpenaiCLIP-B-32-quickgelu": _VITB32_quickgelu,
151
+ "OpenCLIP-B-32-quickgelu": _VITB32_quickgelu,
152
+
153
+ # "ViT-B-16": _VITB16,
154
+ "OpenaiCLIP-B-16": _VITB16,
155
+ "OpenCLIP-B-16": _VITB16,
156
+
157
+ "EVA02-B-16": _EVAB16,
158
+ "EVA02-CLIP-B-16": _EVAB16,
159
+
160
+ # "ViT-B-16-plus-240": _VITB16_PLUS_240,
161
+ "OpenCLIP-B-16-plus-240": _VITB16_PLUS_240,
162
+
163
+ # "ViT-L-14": _VITL14,
164
+ "OpenaiCLIP-L-14": _VITL14,
165
+ "OpenCLIP-L-14": _VITL14,
166
+
167
+ "EVA02-L-14": _EVAL14,
168
+ "EVA02-CLIP-L-14": _EVAL14,
169
+
170
+ # "ViT-L-14-336": _VITL14_336,
171
+ "OpenaiCLIP-L-14-336": _VITL14_336,
172
+
173
+ "EVA02-CLIP-L-14-336": _EVAL14_336,
174
+
175
+ # "ViT-H-14": _VITH14,
176
+ # "ViT-g-14": _VITg14,
177
+ "OpenCLIP-H-14": _VITH14,
178
+ "OpenCLIP-g-14": _VITg14,
179
+
180
+ "EVA01-CLIP-g-14": _EVAg14,
181
+ "EVA01-CLIP-g-14-plus": _EVAg14_PLUS,
182
+
183
+ # "ViT-bigG-14": _VITbigG14,
184
+ "OpenCLIP-bigG-14": _VITbigG14,
185
+
186
+ "EVA02-CLIP-bigE-14": _EVAbigE14,
187
+ "EVA02-CLIP-bigE-14-plus": _EVAbigE14_PLUS,
188
+ }
189
+
190
+
191
+ def _clean_tag(tag: str):
192
+ # normalize pretrained tags
193
+ return tag.lower().replace('-', '_')
194
+
195
+
196
+ def list_pretrained(as_str: bool = False):
197
+ """ returns list of pretrained models
198
+ Returns a tuple (model_name, pretrain_tag) by default or 'name:tag' if as_str == True
199
+ """
200
+ return [':'.join([k, t]) if as_str else (k, t) for k in _PRETRAINED.keys() for t in _PRETRAINED[k].keys()]
201
+
202
+
203
+ def list_pretrained_models_by_tag(tag: str):
204
+ """ return all models having the specified pretrain tag """
205
+ models = []
206
+ tag = _clean_tag(tag)
207
+ for k in _PRETRAINED.keys():
208
+ if tag in _PRETRAINED[k]:
209
+ models.append(k)
210
+ return models
211
+
212
+
213
+ def list_pretrained_tags_by_model(model: str):
214
+ """ return all pretrain tags for the specified model architecture """
215
+ tags = []
216
+ if model in _PRETRAINED:
217
+ tags.extend(_PRETRAINED[model].keys())
218
+ return tags
219
+
220
+
221
+ def is_pretrained_cfg(model: str, tag: str):
222
+ if model not in _PRETRAINED:
223
+ return False
224
+ return _clean_tag(tag) in _PRETRAINED[model]
225
+
226
+
227
+ def get_pretrained_cfg(model: str, tag: str):
228
+ if model not in _PRETRAINED:
229
+ return {}
230
+ model_pretrained = _PRETRAINED[model]
231
+ return model_pretrained.get(_clean_tag(tag), {})
232
+
233
+
234
+ def get_pretrained_url(model: str, tag: str):
235
+ cfg = get_pretrained_cfg(model, _clean_tag(tag))
236
+ return cfg.get('url', '')
237
+
238
+
239
+ def download_pretrained_from_url(
240
+ url: str,
241
+ cache_dir: Union[str, None] = None,
242
+ ):
243
+ if not cache_dir:
244
+ cache_dir = os.path.expanduser("~/.cache/clip")
245
+ os.makedirs(cache_dir, exist_ok=True)
246
+ filename = os.path.basename(url)
247
+
248
+ if 'openaipublic' in url:
249
+ expected_sha256 = url.split("/")[-2]
250
+ elif 'mlfoundations' in url:
251
+ expected_sha256 = os.path.splitext(filename)[0].split("-")[-1]
252
+ else:
253
+ expected_sha256 = ''
254
+
255
+ download_target = os.path.join(cache_dir, filename)
256
+
257
+ if os.path.exists(download_target) and not os.path.isfile(download_target):
258
+ raise RuntimeError(f"{download_target} exists and is not a regular file")
259
+
260
+ if os.path.isfile(download_target):
261
+ if expected_sha256:
262
+ if hashlib.sha256(open(download_target, "rb").read()).hexdigest().startswith(expected_sha256):
263
+ return download_target
264
+ else:
265
+ warnings.warn(f"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file")
266
+ else:
267
+ return download_target
268
+
269
+ with urllib.request.urlopen(url) as source, open(download_target, "wb") as output:
270
+ with tqdm(total=int(source.headers.get("Content-Length")), ncols=80, unit='iB', unit_scale=True) as loop:
271
+ while True:
272
+ buffer = source.read(8192)
273
+ if not buffer:
274
+ break
275
+
276
+ output.write(buffer)
277
+ loop.update(len(buffer))
278
+
279
+ if expected_sha256 and not hashlib.sha256(open(download_target, "rb").read()).hexdigest().startswith(expected_sha256):
280
+ raise RuntimeError(f"Model has been downloaded but the SHA256 checksum does not not match")
281
+
282
+ return download_target
283
+
284
+
285
+ def has_hf_hub(necessary=False):
286
+ if not _has_hf_hub and necessary:
287
+ # if no HF Hub module installed, and it is necessary to continue, raise error
288
+ raise RuntimeError(
289
+ 'Hugging Face hub model specified but package not installed. Run `pip install huggingface_hub`.')
290
+ return _has_hf_hub
291
+
292
+
293
+ def download_pretrained_from_hf(
294
+ model_id: str,
295
+ filename: str = 'open_clip_pytorch_model.bin',
296
+ revision=None,
297
+ cache_dir: Union[str, None] = None,
298
+ ):
299
+ has_hf_hub(True)
300
+ cached_file = hf_hub_download(model_id, filename, revision=revision, cache_dir=cache_dir)
301
+ return cached_file
302
+
303
+
304
+ def download_pretrained(
305
+ cfg: Dict,
306
+ force_hf_hub: bool = False,
307
+ cache_dir: Union[str, None] = None,
308
+ ):
309
+ target = ''
310
+ if not cfg:
311
+ return target
312
+
313
+ download_url = cfg.get('url', '')
314
+ download_hf_hub = cfg.get('hf_hub', '')
315
+ if download_hf_hub and force_hf_hub:
316
+ # use HF hub even if url exists
317
+ download_url = ''
318
+
319
+ if download_url:
320
+ target = download_pretrained_from_url(download_url, cache_dir=cache_dir)
321
+ elif download_hf_hub:
322
+ has_hf_hub(True)
323
+ # we assume the hf_hub entries in pretrained config combine model_id + filename in
324
+ # 'org/model_name/filename.pt' form. To specify just the model id w/o filename and
325
+ # use 'open_clip_pytorch_model.bin' default, there must be a trailing slash 'org/model_name/'.
326
+ model_id, filename = os.path.split(download_hf_hub)
327
+ if filename:
328
+ target = download_pretrained_from_hf(model_id, filename=filename, cache_dir=cache_dir)
329
+ else:
330
+ target = download_pretrained_from_hf(model_id, cache_dir=cache_dir)
331
+
332
+ return target
eva_clip/rope.py ADDED
@@ -0,0 +1,137 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from math import pi
2
+ import torch
3
+ from torch import nn
4
+ from einops import rearrange, repeat
5
+ import logging
6
+
7
+ def broadcat(tensors, dim = -1):
8
+ num_tensors = len(tensors)
9
+ shape_lens = set(list(map(lambda t: len(t.shape), tensors)))
10
+ assert len(shape_lens) == 1, 'tensors must all have the same number of dimensions'
11
+ shape_len = list(shape_lens)[0]
12
+ dim = (dim + shape_len) if dim < 0 else dim
13
+ dims = list(zip(*map(lambda t: list(t.shape), tensors)))
14
+ expandable_dims = [(i, val) for i, val in enumerate(dims) if i != dim]
15
+ assert all([*map(lambda t: len(set(t[1])) <= 2, expandable_dims)]), 'invalid dimensions for broadcastable concatentation'
16
+ max_dims = list(map(lambda t: (t[0], max(t[1])), expandable_dims))
17
+ expanded_dims = list(map(lambda t: (t[0], (t[1],) * num_tensors), max_dims))
18
+ expanded_dims.insert(dim, (dim, dims[dim]))
19
+ expandable_shapes = list(zip(*map(lambda t: t[1], expanded_dims)))
20
+ tensors = list(map(lambda t: t[0].expand(*t[1]), zip(tensors, expandable_shapes)))
21
+ return torch.cat(tensors, dim = dim)
22
+
23
+ def rotate_half(x):
24
+ x = rearrange(x, '... (d r) -> ... d r', r = 2)
25
+ x1, x2 = x.unbind(dim = -1)
26
+ x = torch.stack((-x2, x1), dim = -1)
27
+ return rearrange(x, '... d r -> ... (d r)')
28
+
29
+
30
+ class VisionRotaryEmbedding(nn.Module):
31
+ def __init__(
32
+ self,
33
+ dim,
34
+ pt_seq_len,
35
+ ft_seq_len=None,
36
+ custom_freqs = None,
37
+ freqs_for = 'lang',
38
+ theta = 10000,
39
+ max_freq = 10,
40
+ num_freqs = 1,
41
+ ):
42
+ super().__init__()
43
+ if custom_freqs:
44
+ freqs = custom_freqs
45
+ elif freqs_for == 'lang':
46
+ freqs = 1. / (theta ** (torch.arange(0, dim, 2)[:(dim // 2)].float() / dim))
47
+ elif freqs_for == 'pixel':
48
+ freqs = torch.linspace(1., max_freq / 2, dim // 2) * pi
49
+ elif freqs_for == 'constant':
50
+ freqs = torch.ones(num_freqs).float()
51
+ else:
52
+ raise ValueError(f'unknown modality {freqs_for}')
53
+
54
+ if ft_seq_len is None: ft_seq_len = pt_seq_len
55
+ t = torch.arange(ft_seq_len) / ft_seq_len * pt_seq_len
56
+
57
+ freqs_h = torch.einsum('..., f -> ... f', t, freqs)
58
+ freqs_h = repeat(freqs_h, '... n -> ... (n r)', r = 2)
59
+
60
+ freqs_w = torch.einsum('..., f -> ... f', t, freqs)
61
+ freqs_w = repeat(freqs_w, '... n -> ... (n r)', r = 2)
62
+
63
+ freqs = broadcat((freqs_h[:, None, :], freqs_w[None, :, :]), dim = -1)
64
+
65
+ self.register_buffer("freqs_cos", freqs.cos())
66
+ self.register_buffer("freqs_sin", freqs.sin())
67
+
68
+ logging.info(f'Shape of rope freq: {self.freqs_cos.shape}')
69
+
70
+ def forward(self, t, start_index = 0):
71
+ rot_dim = self.freqs_cos.shape[-1]
72
+ end_index = start_index + rot_dim
73
+ assert rot_dim <= t.shape[-1], f'feature dimension {t.shape[-1]} is not of sufficient size to rotate in all the positions {rot_dim}'
74
+ t_left, t, t_right = t[..., :start_index], t[..., start_index:end_index], t[..., end_index:]
75
+ t = (t * self.freqs_cos) + (rotate_half(t) * self.freqs_sin)
76
+
77
+ return torch.cat((t_left, t, t_right), dim = -1)
78
+
79
+ class VisionRotaryEmbeddingFast(nn.Module):
80
+ def __init__(
81
+ self,
82
+ dim,
83
+ pt_seq_len,
84
+ ft_seq_len=None,
85
+ custom_freqs = None,
86
+ freqs_for = 'lang',
87
+ theta = 10000,
88
+ max_freq = 10,
89
+ num_freqs = 1,
90
+ patch_dropout = 0.
91
+ ):
92
+ super().__init__()
93
+ if custom_freqs:
94
+ freqs = custom_freqs
95
+ elif freqs_for == 'lang':
96
+ freqs = 1. / (theta ** (torch.arange(0, dim, 2)[:(dim // 2)].float() / dim))
97
+ elif freqs_for == 'pixel':
98
+ freqs = torch.linspace(1., max_freq / 2, dim // 2) * pi
99
+ elif freqs_for == 'constant':
100
+ freqs = torch.ones(num_freqs).float()
101
+ else:
102
+ raise ValueError(f'unknown modality {freqs_for}')
103
+
104
+ if ft_seq_len is None: ft_seq_len = pt_seq_len
105
+ t = torch.arange(ft_seq_len) / ft_seq_len * pt_seq_len
106
+
107
+ freqs = torch.einsum('..., f -> ... f', t, freqs)
108
+ freqs = repeat(freqs, '... n -> ... (n r)', r = 2)
109
+ freqs = broadcat((freqs[:, None, :], freqs[None, :, :]), dim = -1)
110
+
111
+ freqs_cos = freqs.cos().view(-1, freqs.shape[-1])
112
+ freqs_sin = freqs.sin().view(-1, freqs.shape[-1])
113
+
114
+ self.patch_dropout = patch_dropout
115
+
116
+ self.register_buffer("freqs_cos", freqs_cos)
117
+ self.register_buffer("freqs_sin", freqs_sin)
118
+
119
+ logging.info(f'Shape of rope freq: {self.freqs_cos.shape}')
120
+
121
+ def forward(self, t, patch_indices_keep=None):
122
+ if patch_indices_keep is not None:
123
+ batch = t.size()[0]
124
+ batch_indices = torch.arange(batch)
125
+ batch_indices = batch_indices[..., None]
126
+
127
+ freqs_cos = repeat(self.freqs_cos, 'i j -> n i m j', n=t.shape[0], m=t.shape[1])
128
+ freqs_sin = repeat(self.freqs_sin, 'i j -> n i m j', n=t.shape[0], m=t.shape[1])
129
+
130
+ freqs_cos = freqs_cos[batch_indices, patch_indices_keep]
131
+ freqs_cos = rearrange(freqs_cos, 'n i m j -> n m i j')
132
+ freqs_sin = freqs_sin[batch_indices, patch_indices_keep]
133
+ freqs_sin = rearrange(freqs_sin, 'n i m j -> n m i j')
134
+
135
+ return t * freqs_cos + rotate_half(t) * freqs_sin
136
+
137
+ return t * self.freqs_cos + rotate_half(t) * self.freqs_sin
eva_clip/timm_model.py ADDED
@@ -0,0 +1,122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ timm model adapter
2
+
3
+ Wraps timm (https://github.com/rwightman/pytorch-image-models) models for use as a vision tower in CLIP model.
4
+ """
5
+ import logging
6
+ from collections import OrderedDict
7
+
8
+ import torch
9
+ import torch.nn as nn
10
+
11
+ try:
12
+ import timm
13
+ from timm.models.layers import Mlp, to_2tuple
14
+ try:
15
+ # old timm imports < 0.8.1
16
+ from timm.models.layers.attention_pool2d import RotAttentionPool2d
17
+ from timm.models.layers.attention_pool2d import AttentionPool2d as AbsAttentionPool2d
18
+ except ImportError:
19
+ # new timm imports >= 0.8.1
20
+ from timm.layers import RotAttentionPool2d
21
+ from timm.layers import AttentionPool2d as AbsAttentionPool2d
22
+ except ImportError:
23
+ timm = None
24
+
25
+ from .utils import freeze_batch_norm_2d
26
+
27
+
28
+ class TimmModel(nn.Module):
29
+ """ timm model adapter
30
+ # FIXME this adapter is a work in progress, may change in ways that break weight compat
31
+ """
32
+
33
+ def __init__(
34
+ self,
35
+ model_name,
36
+ embed_dim,
37
+ image_size=224,
38
+ pool='avg',
39
+ proj='linear',
40
+ proj_bias=False,
41
+ drop=0.,
42
+ pretrained=False):
43
+ super().__init__()
44
+ if timm is None:
45
+ raise RuntimeError("Please `pip install timm` to use timm models.")
46
+
47
+ self.image_size = to_2tuple(image_size)
48
+ self.trunk = timm.create_model(model_name, pretrained=pretrained)
49
+ feat_size = self.trunk.default_cfg.get('pool_size', None)
50
+ feature_ndim = 1 if not feat_size else 2
51
+ if pool in ('abs_attn', 'rot_attn'):
52
+ assert feature_ndim == 2
53
+ # if attn pooling used, remove both classifier and default pool
54
+ self.trunk.reset_classifier(0, global_pool='')
55
+ else:
56
+ # reset global pool if pool config set, otherwise leave as network default
57
+ reset_kwargs = dict(global_pool=pool) if pool else {}
58
+ self.trunk.reset_classifier(0, **reset_kwargs)
59
+ prev_chs = self.trunk.num_features
60
+
61
+ head_layers = OrderedDict()
62
+ if pool == 'abs_attn':
63
+ head_layers['pool'] = AbsAttentionPool2d(prev_chs, feat_size=feat_size, out_features=embed_dim)
64
+ prev_chs = embed_dim
65
+ elif pool == 'rot_attn':
66
+ head_layers['pool'] = RotAttentionPool2d(prev_chs, out_features=embed_dim)
67
+ prev_chs = embed_dim
68
+ else:
69
+ assert proj, 'projection layer needed if non-attention pooling is used.'
70
+
71
+ # NOTE attention pool ends with a projection layer, so proj should usually be set to '' if such pooling is used
72
+ if proj == 'linear':
73
+ head_layers['drop'] = nn.Dropout(drop)
74
+ head_layers['proj'] = nn.Linear(prev_chs, embed_dim, bias=proj_bias)
75
+ elif proj == 'mlp':
76
+ head_layers['mlp'] = Mlp(prev_chs, 2 * embed_dim, embed_dim, drop=drop, bias=(True, proj_bias))
77
+
78
+ self.head = nn.Sequential(head_layers)
79
+
80
+ def lock(self, unlocked_groups=0, freeze_bn_stats=False):
81
+ """ lock modules
82
+ Args:
83
+ unlocked_groups (int): leave last n layer groups unlocked (default: 0)
84
+ """
85
+ if not unlocked_groups:
86
+ # lock full model
87
+ for param in self.trunk.parameters():
88
+ param.requires_grad = False
89
+ if freeze_bn_stats:
90
+ freeze_batch_norm_2d(self.trunk)
91
+ else:
92
+ # NOTE: partial freeze requires latest timm (master) branch and is subject to change
93
+ try:
94
+ # FIXME import here until API stable and in an official release
95
+ from timm.models.helpers import group_parameters, group_modules
96
+ except ImportError:
97
+ raise RuntimeError(
98
+ 'Please install latest timm `pip install git+https://github.com/rwightman/pytorch-image-models`')
99
+ matcher = self.trunk.group_matcher()
100
+ gparams = group_parameters(self.trunk, matcher)
101
+ max_layer_id = max(gparams.keys())
102
+ max_layer_id = max_layer_id - unlocked_groups
103
+ for group_idx in range(max_layer_id + 1):
104
+ group = gparams[group_idx]
105
+ for param in group:
106
+ self.trunk.get_parameter(param).requires_grad = False
107
+ if freeze_bn_stats:
108
+ gmodules = group_modules(self.trunk, matcher, reverse=True)
109
+ gmodules = {k for k, v in gmodules.items() if v <= max_layer_id}
110
+ freeze_batch_norm_2d(self.trunk, gmodules)
111
+
112
+ @torch.jit.ignore
113
+ def set_grad_checkpointing(self, enable=True):
114
+ try:
115
+ self.trunk.set_grad_checkpointing(enable)
116
+ except Exception as e:
117
+ logging.warning('grad checkpointing not supported for this timm image tower, continuing without...')
118
+
119
+ def forward(self, x):
120
+ x = self.trunk(x)
121
+ x = self.head(x)
122
+ return x
eva_clip/tokenizer.py ADDED
@@ -0,0 +1,201 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ CLIP tokenizer
2
+
3
+ Copied from https://github.com/openai/CLIP. Originally MIT License, Copyright (c) 2021 OpenAI.
4
+ """
5
+ import gzip
6
+ import html
7
+ import os
8
+ from functools import lru_cache
9
+ from typing import Union, List
10
+
11
+ import ftfy
12
+ import regex as re
13
+ import torch
14
+
15
+ # https://stackoverflow.com/q/62691279
16
+ import os
17
+ os.environ["TOKENIZERS_PARALLELISM"] = "false"
18
+
19
+
20
+ @lru_cache()
21
+ def default_bpe():
22
+ return os.path.join(os.path.dirname(os.path.abspath(__file__)), "bpe_simple_vocab_16e6.txt.gz")
23
+
24
+
25
+ @lru_cache()
26
+ def bytes_to_unicode():
27
+ """
28
+ Returns list of utf-8 byte and a corresponding list of unicode strings.
29
+ The reversible bpe codes work on unicode strings.
30
+ This means you need a large # of unicode characters in your vocab if you want to avoid UNKs.
31
+ When you're at something like a 10B token dataset you end up needing around 5K for decent coverage.
32
+ This is a signficant percentage of your normal, say, 32K bpe vocab.
33
+ To avoid that, we want lookup tables between utf-8 bytes and unicode strings.
34
+ And avoids mapping to whitespace/control characters the bpe code barfs on.
35
+ """
36
+ bs = list(range(ord("!"), ord("~")+1))+list(range(ord("¡"), ord("¬")+1))+list(range(ord("®"), ord("ÿ")+1))
37
+ cs = bs[:]
38
+ n = 0
39
+ for b in range(2**8):
40
+ if b not in bs:
41
+ bs.append(b)
42
+ cs.append(2**8+n)
43
+ n += 1
44
+ cs = [chr(n) for n in cs]
45
+ return dict(zip(bs, cs))
46
+
47
+
48
+ def get_pairs(word):
49
+ """Return set of symbol pairs in a word.
50
+ Word is represented as tuple of symbols (symbols being variable-length strings).
51
+ """
52
+ pairs = set()
53
+ prev_char = word[0]
54
+ for char in word[1:]:
55
+ pairs.add((prev_char, char))
56
+ prev_char = char
57
+ return pairs
58
+
59
+
60
+ def basic_clean(text):
61
+ text = ftfy.fix_text(text)
62
+ text = html.unescape(html.unescape(text))
63
+ return text.strip()
64
+
65
+
66
+ def whitespace_clean(text):
67
+ text = re.sub(r'\s+', ' ', text)
68
+ text = text.strip()
69
+ return text
70
+
71
+
72
+ class SimpleTokenizer(object):
73
+ def __init__(self, bpe_path: str = default_bpe(), special_tokens=None):
74
+ self.byte_encoder = bytes_to_unicode()
75
+ self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
76
+ merges = gzip.open(bpe_path).read().decode("utf-8").split('\n')
77
+ merges = merges[1:49152-256-2+1]
78
+ merges = [tuple(merge.split()) for merge in merges]
79
+ vocab = list(bytes_to_unicode().values())
80
+ vocab = vocab + [v+'</w>' for v in vocab]
81
+ for merge in merges:
82
+ vocab.append(''.join(merge))
83
+ if not special_tokens:
84
+ special_tokens = ['<start_of_text>', '<end_of_text>']
85
+ else:
86
+ special_tokens = ['<start_of_text>', '<end_of_text>'] + special_tokens
87
+ vocab.extend(special_tokens)
88
+ self.encoder = dict(zip(vocab, range(len(vocab))))
89
+ self.decoder = {v: k for k, v in self.encoder.items()}
90
+ self.bpe_ranks = dict(zip(merges, range(len(merges))))
91
+ self.cache = {t:t for t in special_tokens}
92
+ special = "|".join(special_tokens)
93
+ self.pat = re.compile(special + r"""|'s|'t|'re|'ve|'m|'ll|'d|[\p{L}]+|[\p{N}]|[^\s\p{L}\p{N}]+""", re.IGNORECASE)
94
+
95
+ self.vocab_size = len(self.encoder)
96
+ self.all_special_ids = [self.encoder[t] for t in special_tokens]
97
+
98
+ def bpe(self, token):
99
+ if token in self.cache:
100
+ return self.cache[token]
101
+ word = tuple(token[:-1]) + ( token[-1] + '</w>',)
102
+ pairs = get_pairs(word)
103
+
104
+ if not pairs:
105
+ return token+'</w>'
106
+
107
+ while True:
108
+ bigram = min(pairs, key = lambda pair: self.bpe_ranks.get(pair, float('inf')))
109
+ if bigram not in self.bpe_ranks:
110
+ break
111
+ first, second = bigram
112
+ new_word = []
113
+ i = 0
114
+ while i < len(word):
115
+ try:
116
+ j = word.index(first, i)
117
+ new_word.extend(word[i:j])
118
+ i = j
119
+ except:
120
+ new_word.extend(word[i:])
121
+ break
122
+
123
+ if word[i] == first and i < len(word)-1 and word[i+1] == second:
124
+ new_word.append(first+second)
125
+ i += 2
126
+ else:
127
+ new_word.append(word[i])
128
+ i += 1
129
+ new_word = tuple(new_word)
130
+ word = new_word
131
+ if len(word) == 1:
132
+ break
133
+ else:
134
+ pairs = get_pairs(word)
135
+ word = ' '.join(word)
136
+ self.cache[token] = word
137
+ return word
138
+
139
+ def encode(self, text):
140
+ bpe_tokens = []
141
+ text = whitespace_clean(basic_clean(text)).lower()
142
+ for token in re.findall(self.pat, text):
143
+ token = ''.join(self.byte_encoder[b] for b in token.encode('utf-8'))
144
+ bpe_tokens.extend(self.encoder[bpe_token] for bpe_token in self.bpe(token).split(' '))
145
+ return bpe_tokens
146
+
147
+ def decode(self, tokens):
148
+ text = ''.join([self.decoder[token] for token in tokens])
149
+ text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors="replace").replace('</w>', ' ')
150
+ return text
151
+
152
+
153
+ _tokenizer = SimpleTokenizer()
154
+
155
+
156
+ def tokenize(texts: Union[str, List[str]], context_length: int = 77) -> torch.LongTensor:
157
+ """
158
+ Returns the tokenized representation of given input string(s)
159
+
160
+ Parameters
161
+ ----------
162
+ texts : Union[str, List[str]]
163
+ An input string or a list of input strings to tokenize
164
+ context_length : int
165
+ The context length to use; all CLIP models use 77 as the context length
166
+
167
+ Returns
168
+ -------
169
+ A two-dimensional tensor containing the resulting tokens, shape = [number of input strings, context_length]
170
+ """
171
+ if isinstance(texts, str):
172
+ texts = [texts]
173
+
174
+ sot_token = _tokenizer.encoder["<start_of_text>"]
175
+ eot_token = _tokenizer.encoder["<end_of_text>"]
176
+ all_tokens = [[sot_token] + _tokenizer.encode(text) + [eot_token] for text in texts]
177
+ result = torch.zeros(len(all_tokens), context_length, dtype=torch.long)
178
+
179
+ for i, tokens in enumerate(all_tokens):
180
+ if len(tokens) > context_length:
181
+ tokens = tokens[:context_length] # Truncate
182
+ tokens[-1] = eot_token
183
+ result[i, :len(tokens)] = torch.tensor(tokens)
184
+
185
+ return result
186
+
187
+
188
+ class HFTokenizer:
189
+ "HuggingFace tokenizer wrapper"
190
+ def __init__(self, tokenizer_name:str):
191
+ from transformers import AutoTokenizer
192
+ self.tokenizer = AutoTokenizer.from_pretrained(tokenizer_name)
193
+
194
+ def __call__(self, texts:Union[str, List[str]], context_length:int=77) -> torch.Tensor:
195
+ # same cleaning as for default tokenizer, except lowercasing
196
+ # adding lower (for case-sensitive tokenizers) will make it more robust but less sensitive to nuance
197
+ if isinstance(texts, str):
198
+ texts = [texts]
199
+ texts = [whitespace_clean(basic_clean(text)) for text in texts]
200
+ input_ids = self.tokenizer(texts, return_tensors='pt', max_length=context_length, padding='max_length', truncation=True).input_ids
201
+ return input_ids
eva_clip/transform.py ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional, Sequence, Tuple
2
+
3
+ import torch
4
+ import torch.nn as nn
5
+ import torchvision.transforms.functional as F
6
+
7
+ from torchvision.transforms import Normalize, Compose, RandomResizedCrop, InterpolationMode, ToTensor, Resize, \
8
+ CenterCrop
9
+
10
+ from .constants import OPENAI_DATASET_MEAN, OPENAI_DATASET_STD
11
+
12
+
13
+ class ResizeMaxSize(nn.Module):
14
+
15
+ def __init__(self, max_size, interpolation=InterpolationMode.BICUBIC, fn='max', fill=0):
16
+ super().__init__()
17
+ if not isinstance(max_size, int):
18
+ raise TypeError(f"Size should be int. Got {type(max_size)}")
19
+ self.max_size = max_size
20
+ self.interpolation = interpolation
21
+ self.fn = min if fn == 'min' else min
22
+ self.fill = fill
23
+
24
+ def forward(self, img):
25
+ if isinstance(img, torch.Tensor):
26
+ height, width = img.shape[:2]
27
+ else:
28
+ width, height = img.size
29
+ scale = self.max_size / float(max(height, width))
30
+ if scale != 1.0:
31
+ new_size = tuple(round(dim * scale) for dim in (height, width))
32
+ img = F.resize(img, new_size, self.interpolation)
33
+ pad_h = self.max_size - new_size[0]
34
+ pad_w = self.max_size - new_size[1]
35
+ img = F.pad(img, padding=[pad_w//2, pad_h//2, pad_w - pad_w//2, pad_h - pad_h//2], fill=self.fill)
36
+ return img
37
+
38
+
39
+ def _convert_to_rgb(image):
40
+ return image.convert('RGB')
41
+
42
+
43
+ # class CatGen(nn.Module):
44
+ # def __init__(self, num=4):
45
+ # self.num = num
46
+ # def mixgen_batch(image, text):
47
+ # batch_size = image.shape[0]
48
+ # index = np.random.permutation(batch_size)
49
+
50
+ # cat_images = []
51
+ # for i in range(batch_size):
52
+ # # image mixup
53
+ # image[i,:] = lam * image[i,:] + (1 - lam) * image[index[i],:]
54
+ # # text concat
55
+ # text[i] = tokenizer((str(text[i]) + " " + str(text[index[i]])))[0]
56
+ # text = torch.stack(text)
57
+ # return image, text
58
+
59
+
60
+ def image_transform(
61
+ image_size: int,
62
+ is_train: bool,
63
+ mean: Optional[Tuple[float, ...]] = None,
64
+ std: Optional[Tuple[float, ...]] = None,
65
+ resize_longest_max: bool = False,
66
+ fill_color: int = 0,
67
+ ):
68
+ mean = mean or OPENAI_DATASET_MEAN
69
+ if not isinstance(mean, (list, tuple)):
70
+ mean = (mean,) * 3
71
+
72
+ std = std or OPENAI_DATASET_STD
73
+ if not isinstance(std, (list, tuple)):
74
+ std = (std,) * 3
75
+
76
+ if isinstance(image_size, (list, tuple)) and image_size[0] == image_size[1]:
77
+ # for square size, pass size as int so that Resize() uses aspect preserving shortest edge
78
+ image_size = image_size[0]
79
+
80
+ normalize = Normalize(mean=mean, std=std)
81
+ if is_train:
82
+ return Compose([
83
+ RandomResizedCrop(image_size, scale=(0.9, 1.0), interpolation=InterpolationMode.BICUBIC),
84
+ _convert_to_rgb,
85
+ ToTensor(),
86
+ normalize,
87
+ ])
88
+ else:
89
+ if resize_longest_max:
90
+ transforms = [
91
+ ResizeMaxSize(image_size, fill=fill_color)
92
+ ]
93
+ else:
94
+ transforms = [
95
+ Resize(image_size, interpolation=InterpolationMode.BICUBIC),
96
+ CenterCrop(image_size),
97
+ ]
98
+ transforms.extend([
99
+ _convert_to_rgb,
100
+ ToTensor(),
101
+ normalize,
102
+ ])
103
+ return Compose(transforms)
eva_clip/transformer.py ADDED
@@ -0,0 +1,737 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import logging
3
+ from collections import OrderedDict
4
+ import math
5
+ from typing import Callable, Optional, Sequence
6
+ import numpy as np
7
+ import torch
8
+ from torch import nn
9
+ from torch.nn import functional as F
10
+
11
+ try:
12
+ from timm.models.layers import trunc_normal_
13
+ except:
14
+ from timm.layers import trunc_normal_
15
+
16
+ from .rope import VisionRotaryEmbedding, VisionRotaryEmbeddingFast
17
+ from .utils import to_2tuple
18
+
19
+ if os.getenv('ENV_TYPE') == 'deepspeed':
20
+ try:
21
+ import deepspeed
22
+ from deepspeed.runtime.activation_checkpointing.checkpointing import checkpoint
23
+ except:
24
+ print("Please 'pip install deepspeed'")
25
+ deepspeed = None
26
+ from torch.utils.checkpoint import checkpoint
27
+ else:
28
+ from torch.utils.checkpoint import checkpoint
29
+
30
+ try:
31
+ import xformers.ops as xops
32
+ except ImportError:
33
+ xops = None
34
+ print("Please 'pip install xformers'")
35
+
36
+ class LayerNormFp32(nn.LayerNorm):
37
+ """Subclass torch's LayerNorm to handle fp16 (by casting to float32 and back)."""
38
+ def __init__(self, *args, **kwargs):
39
+ super().__init__(*args, **kwargs)
40
+
41
+ def forward(self, x: torch.Tensor):
42
+ output = F.layer_norm(
43
+ x.float(),
44
+ self.normalized_shape,
45
+ self.weight.float() if self.weight is not None else None,
46
+ self.bias.float() if self.bias is not None else None,
47
+ self.eps,
48
+ )
49
+ return output.type_as(x)
50
+
51
+
52
+ class LayerNorm(nn.LayerNorm):
53
+ """Subclass torch's LayerNorm (with cast back to input dtype)."""
54
+
55
+ def forward(self, x: torch.Tensor):
56
+ orig_type = x.dtype
57
+ x = F.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps)
58
+ return x.to(orig_type)
59
+
60
+ class QuickGELU(nn.Module):
61
+ # NOTE This is slower than nn.GELU or nn.SiLU and uses more GPU memory
62
+ def forward(self, x: torch.Tensor):
63
+ return x * torch.sigmoid(1.702 * x)
64
+
65
+
66
+ class LayerScale(nn.Module):
67
+ def __init__(self, dim, init_values=1e-5, inplace=False):
68
+ super().__init__()
69
+ self.inplace = inplace
70
+ self.gamma = nn.Parameter(init_values * torch.ones(dim))
71
+
72
+ def forward(self, x):
73
+ return x.mul_(self.gamma) if self.inplace else x * self.gamma
74
+
75
+ class PatchDropout(nn.Module):
76
+ """
77
+ https://arxiv.org/abs/2212.00794
78
+ """
79
+
80
+ def __init__(self, prob, exclude_first_token=True):
81
+ super().__init__()
82
+ assert 0 <= prob < 1.
83
+ self.prob = prob
84
+ self.exclude_first_token = exclude_first_token # exclude CLS token
85
+ logging.info(f"os.getenv('RoPE')={os.getenv('RoPE')}")
86
+
87
+ def forward(self, x):
88
+ if not self.training or self.prob == 0.:
89
+ return x
90
+
91
+ if self.exclude_first_token:
92
+ cls_tokens, x = x[:, :1], x[:, 1:]
93
+ else:
94
+ cls_tokens = torch.jit.annotate(torch.Tensor, x[:, :1])
95
+
96
+ batch = x.size()[0]
97
+ num_tokens = x.size()[1]
98
+
99
+ batch_indices = torch.arange(batch)
100
+ batch_indices = batch_indices[..., None]
101
+
102
+ keep_prob = 1 - self.prob
103
+ num_patches_keep = max(1, int(num_tokens * keep_prob))
104
+
105
+ rand = torch.randn(batch, num_tokens)
106
+ patch_indices_keep = rand.topk(num_patches_keep, dim=-1).indices
107
+
108
+ x = x[batch_indices, patch_indices_keep]
109
+
110
+ if self.exclude_first_token:
111
+ x = torch.cat((cls_tokens, x), dim=1)
112
+
113
+ if self.training and os.getenv('RoPE') == '1':
114
+ return x, patch_indices_keep
115
+
116
+ return x
117
+
118
+
119
+ def _in_projection_packed(
120
+ q: torch.Tensor,
121
+ k: torch.Tensor,
122
+ v: torch.Tensor,
123
+ w: torch.Tensor,
124
+ b: Optional[torch.Tensor] = None,
125
+ ):
126
+ """
127
+ https://github.com/pytorch/pytorch/blob/db2a237763eb8693a20788be94f8c192e762baa8/torch/nn/functional.py#L4726
128
+ """
129
+ E = q.size(-1)
130
+ if k is v:
131
+ if q is k:
132
+ # self-attention
133
+ return F.linear(q, w, b).chunk(3, dim=-1)
134
+ else:
135
+ # encoder-decoder attention
136
+ w_q, w_kv = w.split([E, E * 2])
137
+ if b is None:
138
+ b_q = b_kv = None
139
+ else:
140
+ b_q, b_kv = b.split([E, E * 2])
141
+ return (F.linear(q, w_q, b_q),) + F.linear(k, w_kv, b_kv).chunk(2, dim=-1)
142
+ else:
143
+ w_q, w_k, w_v = w.chunk(3)
144
+ if b is None:
145
+ b_q = b_k = b_v = None
146
+ else:
147
+ b_q, b_k, b_v = b.chunk(3)
148
+ return F.linear(q, w_q, b_q), F.linear(k, w_k, b_k), F.linear(v, w_v, b_v)
149
+
150
+ class Attention(nn.Module):
151
+ def __init__(
152
+ self,
153
+ dim,
154
+ num_heads=8,
155
+ qkv_bias=True,
156
+ scaled_cosine=False,
157
+ scale_heads=False,
158
+ logit_scale_max=math.log(1. / 0.01),
159
+ attn_drop=0.,
160
+ proj_drop=0.,
161
+ xattn=False,
162
+ rope=False
163
+ ):
164
+ super().__init__()
165
+ self.scaled_cosine = scaled_cosine
166
+ self.scale_heads = scale_heads
167
+ assert dim % num_heads == 0, 'dim should be divisible by num_heads'
168
+ self.num_heads = num_heads
169
+ self.head_dim = dim // num_heads
170
+ self.scale = self.head_dim ** -0.5
171
+ self.logit_scale_max = logit_scale_max
172
+
173
+ # keeping in_proj in this form (instead of nn.Linear) to match weight scheme of original
174
+ self.in_proj_weight = nn.Parameter(torch.randn((dim * 3, dim)) * self.scale)
175
+ if qkv_bias:
176
+ self.in_proj_bias = nn.Parameter(torch.zeros(dim * 3))
177
+ else:
178
+ self.in_proj_bias = None
179
+
180
+ if self.scaled_cosine:
181
+ self.logit_scale = nn.Parameter(torch.log(10 * torch.ones((num_heads, 1, 1))))
182
+ else:
183
+ self.logit_scale = None
184
+ self.attn_drop = nn.Dropout(attn_drop)
185
+ if self.scale_heads:
186
+ self.head_scale = nn.Parameter(torch.ones((num_heads, 1, 1)))
187
+ else:
188
+ self.head_scale = None
189
+ self.out_proj = nn.Linear(dim, dim)
190
+ self.out_drop = nn.Dropout(proj_drop)
191
+ self.xattn = xattn
192
+ self.xattn_drop = attn_drop
193
+ self.rope = rope
194
+
195
+ def forward(self, x, attn_mask: Optional[torch.Tensor] = None):
196
+ L, N, C = x.shape
197
+ q, k, v = F.linear(x, self.in_proj_weight, self.in_proj_bias).chunk(3, dim=-1)
198
+ if self.xattn:
199
+ q = q.contiguous().view(L, N, self.num_heads, -1).transpose(0, 1)
200
+ k = k.contiguous().view(L, N, self.num_heads, -1).transpose(0, 1)
201
+ v = v.contiguous().view(L, N, self.num_heads, -1).transpose(0, 1)
202
+
203
+ x = xops.memory_efficient_attention(
204
+ q, k, v,
205
+ p=self.xattn_drop,
206
+ scale=self.scale if self.logit_scale is None else None,
207
+ attn_bias=xops.LowerTriangularMask() if attn_mask is not None else None,
208
+ )
209
+ else:
210
+ q = q.contiguous().view(L, N * self.num_heads, -1).transpose(0, 1)
211
+ k = k.contiguous().view(L, N * self.num_heads, -1).transpose(0, 1)
212
+ v = v.contiguous().view(L, N * self.num_heads, -1).transpose(0, 1)
213
+
214
+ if self.logit_scale is not None:
215
+ attn = torch.bmm(F.normalize(q, dim=-1), F.normalize(k, dim=-1).transpose(-1, -2))
216
+ logit_scale = torch.clamp(self.logit_scale, max=self.logit_scale_max).exp()
217
+ attn = attn.view(N, self.num_heads, L, L) * logit_scale
218
+ attn = attn.view(-1, L, L)
219
+ else:
220
+ q = q * self.scale
221
+ attn = torch.bmm(q, k.transpose(-1, -2))
222
+
223
+ if attn_mask is not None:
224
+ if attn_mask.dtype == torch.bool:
225
+ new_attn_mask = torch.zeros_like(attn_mask, dtype=q.dtype)
226
+ new_attn_mask.masked_fill_(attn_mask, float("-inf"))
227
+ attn_mask = new_attn_mask
228
+ attn += attn_mask
229
+
230
+ attn = attn.softmax(dim=-1)
231
+ attn = self.attn_drop(attn)
232
+
233
+ x = torch.bmm(attn, v)
234
+
235
+ if self.head_scale is not None:
236
+ x = x.view(N, self.num_heads, L, C) * self.head_scale
237
+ x = x.view(-1, L, C)
238
+ x = x.transpose(0, 1).reshape(L, N, C)
239
+ x = self.out_proj(x)
240
+ x = self.out_drop(x)
241
+ return x
242
+
243
+ class CustomAttention(nn.Module):
244
+ def __init__(
245
+ self,
246
+ dim,
247
+ num_heads=8,
248
+ qkv_bias=True,
249
+ scaled_cosine=True,
250
+ scale_heads=False,
251
+ logit_scale_max=math.log(1. / 0.01),
252
+ attn_drop=0.,
253
+ proj_drop=0.,
254
+ xattn=False
255
+ ):
256
+ super().__init__()
257
+ self.scaled_cosine = scaled_cosine
258
+ self.scale_heads = scale_heads
259
+ assert dim % num_heads == 0, 'dim should be divisible by num_heads'
260
+ self.num_heads = num_heads
261
+ self.head_dim = dim // num_heads
262
+ self.scale = self.head_dim ** -0.5
263
+ self.logit_scale_max = logit_scale_max
264
+
265
+ # keeping in_proj in this form (instead of nn.Linear) to match weight scheme of original
266
+ self.in_proj_weight = nn.Parameter(torch.randn((dim * 3, dim)) * self.scale)
267
+ if qkv_bias:
268
+ self.in_proj_bias = nn.Parameter(torch.zeros(dim * 3))
269
+ else:
270
+ self.in_proj_bias = None
271
+
272
+ if self.scaled_cosine:
273
+ self.logit_scale = nn.Parameter(torch.log(10 * torch.ones((num_heads, 1, 1))))
274
+ else:
275
+ self.logit_scale = None
276
+ self.attn_drop = nn.Dropout(attn_drop)
277
+ if self.scale_heads:
278
+ self.head_scale = nn.Parameter(torch.ones((num_heads, 1, 1)))
279
+ else:
280
+ self.head_scale = None
281
+ self.out_proj = nn.Linear(dim, dim)
282
+ self.out_drop = nn.Dropout(proj_drop)
283
+ self.xattn = xattn
284
+ self.xattn_drop = attn_drop
285
+
286
+ def forward(self, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, attn_mask: Optional[torch.Tensor] = None):
287
+ q, k, v = _in_projection_packed(query, key, value, self.in_proj_weight, self.in_proj_bias)
288
+ N_q, B_q, C_q = q.shape
289
+ N_k, B_k, C_k = k.shape
290
+ N_v, B_v, C_v = v.shape
291
+ if self.xattn:
292
+ # B, N, C -> B, N, num_heads, C
293
+ q = q.permute(1, 0, 2).reshape(B_q, N_q, self.num_heads, -1)
294
+ k = k.permute(1, 0, 2).reshape(B_k, N_k, self.num_heads, -1)
295
+ v = v.permute(1, 0, 2).reshape(B_v, N_v, self.num_heads, -1)
296
+
297
+ x = xops.memory_efficient_attention(
298
+ q, k, v,
299
+ p=self.xattn_drop,
300
+ scale=self.scale if self.logit_scale is None else None,
301
+ attn_bias=xops.LowerTriangularMask() if attn_mask is not None else None
302
+ )
303
+ else:
304
+ # B*H, L, C
305
+ q = q.contiguous().view(N_q, B_q * self.num_heads, -1).transpose(0, 1)
306
+ k = k.contiguous().view(N_k, B_k * self.num_heads, -1).transpose(0, 1)
307
+ v = v.contiguous().view(N_v, B_v * self.num_heads, -1).transpose(0, 1)
308
+
309
+ if self.logit_scale is not None:
310
+ # B*H, N_q, N_k
311
+ attn = torch.bmm(F.normalize(q, dim=-1), F.normalize(k, dim=-1).transpose(-1, -2))
312
+ logit_scale = torch.clamp(self.logit_scale, max=self.logit_scale_max).exp()
313
+ attn = attn.view(B_q, self.num_heads, N_q, N_k) * logit_scale
314
+ attn = attn.view(-1, N_q, N_k)
315
+ else:
316
+ q = q * self.scale
317
+ attn = torch.bmm(q, k.transpose(-1, -2))
318
+
319
+ if attn_mask is not None:
320
+ if attn_mask.dtype == torch.bool:
321
+ new_attn_mask = torch.zeros_like(attn_mask, dtype=q.dtype)
322
+ new_attn_mask.masked_fill_(attn_mask, float("-inf"))
323
+ attn_mask = new_attn_mask
324
+ attn += attn_mask
325
+
326
+ attn = attn.softmax(dim=-1)
327
+ attn = self.attn_drop(attn)
328
+
329
+ x = torch.bmm(attn, v)
330
+
331
+ if self.head_scale is not None:
332
+ x = x.view(B_q, self.num_heads, N_q, C_q) * self.head_scale
333
+ x = x.view(-1, N_q, C_q)
334
+ x = x.transpose(0, 1).reshape(N_q, B_q, C_q)
335
+ x = self.out_proj(x)
336
+ x = self.out_drop(x)
337
+ return x
338
+
339
+ class CustomResidualAttentionBlock(nn.Module):
340
+ def __init__(
341
+ self,
342
+ d_model: int,
343
+ n_head: int,
344
+ mlp_ratio: float = 4.0,
345
+ ls_init_value: float = None,
346
+ act_layer: Callable = nn.GELU,
347
+ norm_layer: Callable = LayerNorm,
348
+ scale_cosine_attn: bool = False,
349
+ scale_heads: bool = False,
350
+ scale_attn: bool = False,
351
+ scale_fc: bool = False,
352
+ cross_attn: bool = False,
353
+ xattn: bool = False,
354
+ ):
355
+ super().__init__()
356
+
357
+ self.ln_1 = norm_layer(d_model)
358
+ self.ln_1_k = norm_layer(d_model) if cross_attn else self.ln_1
359
+ self.ln_1_v = norm_layer(d_model) if cross_attn else self.ln_1
360
+ self.attn = CustomAttention(
361
+ d_model, n_head,
362
+ qkv_bias=True,
363
+ attn_drop=0.,
364
+ proj_drop=0.,
365
+ scaled_cosine=scale_cosine_attn,
366
+ scale_heads=scale_heads,
367
+ xattn=xattn
368
+ )
369
+
370
+ self.ln_attn = norm_layer(d_model) if scale_attn else nn.Identity()
371
+ self.ls_1 = LayerScale(d_model, ls_init_value) if ls_init_value is not None else nn.Identity()
372
+
373
+ self.ln_2 = norm_layer(d_model)
374
+ mlp_width = int(d_model * mlp_ratio)
375
+ self.mlp = nn.Sequential(OrderedDict([
376
+ ("c_fc", nn.Linear(d_model, mlp_width)),
377
+ ('ln', norm_layer(mlp_width) if scale_fc else nn.Identity()),
378
+ ("gelu", act_layer()),
379
+ ("c_proj", nn.Linear(mlp_width, d_model))
380
+ ]))
381
+
382
+ self.ls_2 = LayerScale(d_model, ls_init_value) if ls_init_value is not None else nn.Identity()
383
+
384
+ def forward(self, q: torch.Tensor, k: torch.Tensor, v: torch.Tensor, attn_mask: Optional[torch.Tensor] = None):
385
+ q = q + self.ls_1(self.ln_attn(self.attn(self.ln_1(q), self.ln_1_k(k), self.ln_1_v(v), attn_mask=attn_mask)))
386
+ q = q + self.ls_2(self.mlp(self.ln_2(q)))
387
+ return q
388
+
389
+ class CustomTransformer(nn.Module):
390
+ def __init__(
391
+ self,
392
+ width: int,
393
+ layers: int,
394
+ heads: int,
395
+ mlp_ratio: float = 4.0,
396
+ ls_init_value: float = None,
397
+ act_layer: Callable = nn.GELU,
398
+ norm_layer: Callable = LayerNorm,
399
+ scale_cosine_attn: bool = True,
400
+ scale_heads: bool = False,
401
+ scale_attn: bool = False,
402
+ scale_fc: bool = False,
403
+ cross_attn: bool = False,
404
+ xattn: bool = False,
405
+ ):
406
+ super().__init__()
407
+ self.width = width
408
+ self.layers = layers
409
+ self.grad_checkpointing = False
410
+ self.xattn = xattn
411
+
412
+ self.resblocks = nn.ModuleList([
413
+ CustomResidualAttentionBlock(
414
+ width,
415
+ heads,
416
+ mlp_ratio,
417
+ ls_init_value=ls_init_value,
418
+ act_layer=act_layer,
419
+ norm_layer=norm_layer,
420
+ scale_cosine_attn=scale_cosine_attn,
421
+ scale_heads=scale_heads,
422
+ scale_attn=scale_attn,
423
+ scale_fc=scale_fc,
424
+ cross_attn=cross_attn,
425
+ xattn=xattn)
426
+ for _ in range(layers)
427
+ ])
428
+
429
+ def get_cast_dtype(self) -> torch.dtype:
430
+ return self.resblocks[0].mlp.c_fc.weight.dtype
431
+
432
+ def forward(self, q: torch.Tensor, k: torch.Tensor = None, v: torch.Tensor = None, attn_mask: Optional[torch.Tensor] = None):
433
+ if k is None and v is None:
434
+ k = v = q
435
+ for r in self.resblocks:
436
+ if self.grad_checkpointing and not torch.jit.is_scripting():
437
+ q = checkpoint(r, q, k, v, attn_mask)
438
+ else:
439
+ q = r(q, k, v, attn_mask=attn_mask)
440
+ return q
441
+
442
+
443
+ class ResidualAttentionBlock(nn.Module):
444
+ def __init__(
445
+ self,
446
+ d_model: int,
447
+ n_head: int,
448
+ mlp_ratio: float = 4.0,
449
+ ls_init_value: float = None,
450
+ act_layer: Callable = nn.GELU,
451
+ norm_layer: Callable = LayerNorm,
452
+ xattn: bool = False,
453
+ ):
454
+ super().__init__()
455
+
456
+ self.ln_1 = norm_layer(d_model)
457
+ if xattn:
458
+ self.attn = Attention(d_model, n_head, xattn=True)
459
+ else:
460
+ self.attn = nn.MultiheadAttention(d_model, n_head)
461
+ self.ls_1 = LayerScale(d_model, ls_init_value) if ls_init_value is not None else nn.Identity()
462
+
463
+ self.ln_2 = norm_layer(d_model)
464
+ mlp_width = int(d_model * mlp_ratio)
465
+ self.mlp = nn.Sequential(OrderedDict([
466
+ ("c_fc", nn.Linear(d_model, mlp_width)),
467
+ ("gelu", act_layer()),
468
+ ("c_proj", nn.Linear(mlp_width, d_model))
469
+ ]))
470
+
471
+ self.ls_2 = LayerScale(d_model, ls_init_value) if ls_init_value is not None else nn.Identity()
472
+ self.xattn = xattn
473
+
474
+ def attention(self, x: torch.Tensor, attn_mask: Optional[torch.Tensor] = None):
475
+ attn_mask = attn_mask.to(x.dtype) if attn_mask is not None else None
476
+ if self.xattn:
477
+ return self.attn(x, attn_mask=attn_mask)
478
+ return self.attn(x, x, x, need_weights=False, attn_mask=attn_mask)[0]
479
+
480
+ def forward(self, x: torch.Tensor, attn_mask: Optional[torch.Tensor] = None):
481
+ x = x + self.ls_1(self.attention(self.ln_1(x), attn_mask=attn_mask))
482
+ x = x + self.ls_2(self.mlp(self.ln_2(x)))
483
+ return x
484
+
485
+ class Transformer(nn.Module):
486
+ def __init__(
487
+ self,
488
+ width: int,
489
+ layers: int,
490
+ heads: int,
491
+ mlp_ratio: float = 4.0,
492
+ ls_init_value: float = None,
493
+ act_layer: Callable = nn.GELU,
494
+ norm_layer: Callable = LayerNorm,
495
+ xattn: bool = False,
496
+ ):
497
+ super().__init__()
498
+ self.width = width
499
+ self.layers = layers
500
+ self.grad_checkpointing = False
501
+
502
+ self.resblocks = nn.ModuleList([
503
+ ResidualAttentionBlock(
504
+ width, heads, mlp_ratio, ls_init_value=ls_init_value, act_layer=act_layer, norm_layer=norm_layer, xattn=xattn)
505
+ for _ in range(layers)
506
+ ])
507
+
508
+ def get_cast_dtype(self) -> torch.dtype:
509
+ return self.resblocks[0].mlp.c_fc.weight.dtype
510
+
511
+ def forward(self, x: torch.Tensor, attn_mask: Optional[torch.Tensor] = None):
512
+ for r in self.resblocks:
513
+ if self.grad_checkpointing and not torch.jit.is_scripting():
514
+ x = checkpoint(r, x, attn_mask)
515
+ else:
516
+ x = r(x, attn_mask=attn_mask)
517
+ return x
518
+
519
+
520
+ class VisionTransformer(nn.Module):
521
+ def __init__(
522
+ self,
523
+ image_size: int,
524
+ patch_size: int,
525
+ width: int,
526
+ layers: int,
527
+ heads: int,
528
+ mlp_ratio: float,
529
+ ls_init_value: float = None,
530
+ patch_dropout: float = 0.,
531
+ global_average_pool: bool = False,
532
+ output_dim: int = 512,
533
+ act_layer: Callable = nn.GELU,
534
+ norm_layer: Callable = LayerNorm,
535
+ xattn: bool = False,
536
+ ):
537
+ super().__init__()
538
+ self.image_size = to_2tuple(image_size)
539
+ self.patch_size = to_2tuple(patch_size)
540
+ self.grid_size = (self.image_size[0] // self.patch_size[0], self.image_size[1] // self.patch_size[1])
541
+ self.output_dim = output_dim
542
+ self.conv1 = nn.Conv2d(in_channels=3, out_channels=width, kernel_size=patch_size, stride=patch_size, bias=False)
543
+
544
+ scale = width ** -0.5
545
+ self.class_embedding = nn.Parameter(scale * torch.randn(width))
546
+ self.positional_embedding = nn.Parameter(scale * torch.randn(self.grid_size[0] * self.grid_size[1] + 1, width))
547
+
548
+ # setting a patch_dropout of 0. would mean it is disabled and this function would be the identity fn
549
+ self.patch_dropout = PatchDropout(patch_dropout) if patch_dropout > 0. else nn.Identity()
550
+ self.ln_pre = norm_layer(width)
551
+
552
+ self.transformer = Transformer(
553
+ width,
554
+ layers,
555
+ heads,
556
+ mlp_ratio,
557
+ ls_init_value=ls_init_value,
558
+ act_layer=act_layer,
559
+ norm_layer=norm_layer,
560
+ xattn=xattn
561
+ )
562
+
563
+ self.global_average_pool = global_average_pool
564
+ self.ln_post = norm_layer(width)
565
+ self.proj = nn.Parameter(scale * torch.randn(width, output_dim))
566
+
567
+ def lock(self, unlocked_groups=0, freeze_bn_stats=False):
568
+ for param in self.parameters():
569
+ param.requires_grad = False
570
+
571
+ if unlocked_groups != 0:
572
+ groups = [
573
+ [
574
+ self.conv1,
575
+ self.class_embedding,
576
+ self.positional_embedding,
577
+ self.ln_pre,
578
+ ],
579
+ *self.transformer.resblocks[:-1],
580
+ [
581
+ self.transformer.resblocks[-1],
582
+ self.ln_post,
583
+ ],
584
+ self.proj,
585
+ ]
586
+
587
+ def _unlock(x):
588
+ if isinstance(x, Sequence):
589
+ for g in x:
590
+ _unlock(g)
591
+ else:
592
+ if isinstance(x, torch.nn.Parameter):
593
+ x.requires_grad = True
594
+ else:
595
+ for p in x.parameters():
596
+ p.requires_grad = True
597
+
598
+ _unlock(groups[-unlocked_groups:])
599
+
600
+ def get_num_layers(self):
601
+ return self.transformer.layers
602
+
603
+ @torch.jit.ignore
604
+ def set_grad_checkpointing(self, enable=True):
605
+ self.transformer.grad_checkpointing = enable
606
+
607
+ @torch.jit.ignore
608
+ def no_weight_decay(self):
609
+ return {'positional_embedding', 'class_embedding'}
610
+
611
+ def forward(self, x: torch.Tensor, return_all_features: bool=False):
612
+ x = self.conv1(x) # shape = [*, width, grid, grid]
613
+ x = x.reshape(x.shape[0], x.shape[1], -1) # shape = [*, width, grid ** 2]
614
+ x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width]
615
+ x = torch.cat(
616
+ [self.class_embedding.to(x.dtype) + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device),
617
+ x], dim=1) # shape = [*, grid ** 2 + 1, width]
618
+ x = x + self.positional_embedding.to(x.dtype)
619
+
620
+ # a patch_dropout of 0. would mean it is disabled and this function would do nothing but return what was passed in
621
+ x = self.patch_dropout(x)
622
+ x = self.ln_pre(x)
623
+
624
+ x = x.permute(1, 0, 2) # NLD -> LND
625
+ x = self.transformer(x)
626
+ x = x.permute(1, 0, 2) # LND -> NLD
627
+
628
+ if not return_all_features:
629
+ if self.global_average_pool:
630
+ x = x.mean(dim=1) #x = x[:,1:,:].mean(dim=1)
631
+ else:
632
+ x = x[:, 0]
633
+
634
+ x = self.ln_post(x)
635
+
636
+ if self.proj is not None:
637
+ x = x @ self.proj
638
+
639
+ return x
640
+
641
+
642
+ class TextTransformer(nn.Module):
643
+ def __init__(
644
+ self,
645
+ context_length: int = 77,
646
+ vocab_size: int = 49408,
647
+ width: int = 512,
648
+ heads: int = 8,
649
+ layers: int = 12,
650
+ ls_init_value: float = None,
651
+ output_dim: int = 512,
652
+ act_layer: Callable = nn.GELU,
653
+ norm_layer: Callable = LayerNorm,
654
+ xattn: bool= False,
655
+ attn_mask: bool = True
656
+ ):
657
+ super().__init__()
658
+ self.context_length = context_length
659
+ self.vocab_size = vocab_size
660
+ self.width = width
661
+ self.output_dim = output_dim
662
+
663
+ self.token_embedding = nn.Embedding(vocab_size, width)
664
+ self.positional_embedding = nn.Parameter(torch.empty(self.context_length, width))
665
+ self.transformer = Transformer(
666
+ width=width,
667
+ layers=layers,
668
+ heads=heads,
669
+ ls_init_value=ls_init_value,
670
+ act_layer=act_layer,
671
+ norm_layer=norm_layer,
672
+ xattn=xattn
673
+ )
674
+
675
+ self.xattn = xattn
676
+ self.ln_final = norm_layer(width)
677
+ self.text_projection = nn.Parameter(torch.empty(width, output_dim))
678
+
679
+ if attn_mask:
680
+ self.register_buffer('attn_mask', self.build_attention_mask(), persistent=False)
681
+ else:
682
+ self.attn_mask = None
683
+
684
+ self.init_parameters()
685
+
686
+ def init_parameters(self):
687
+ nn.init.normal_(self.token_embedding.weight, std=0.02)
688
+ nn.init.normal_(self.positional_embedding, std=0.01)
689
+
690
+ proj_std = (self.transformer.width ** -0.5) * ((2 * self.transformer.layers) ** -0.5)
691
+ attn_std = self.transformer.width ** -0.5
692
+ fc_std = (2 * self.transformer.width) ** -0.5
693
+ for block in self.transformer.resblocks:
694
+ nn.init.normal_(block.attn.in_proj_weight, std=attn_std)
695
+ nn.init.normal_(block.attn.out_proj.weight, std=proj_std)
696
+ nn.init.normal_(block.mlp.c_fc.weight, std=fc_std)
697
+ nn.init.normal_(block.mlp.c_proj.weight, std=proj_std)
698
+
699
+ if self.text_projection is not None:
700
+ nn.init.normal_(self.text_projection, std=self.transformer.width ** -0.5)
701
+
702
+ @torch.jit.ignore
703
+ def set_grad_checkpointing(self, enable=True):
704
+ self.transformer.grad_checkpointing = enable
705
+
706
+ @torch.jit.ignore
707
+ def no_weight_decay(self):
708
+ # return {'positional_embedding', 'token_embedding'}
709
+ return {'positional_embedding'}
710
+
711
+ def get_num_layers(self):
712
+ return self.transformer.layers
713
+
714
+ def build_attention_mask(self):
715
+ # lazily create causal attention mask, with full attention between the vision tokens
716
+ # pytorch uses additive attention mask; fill with -inf
717
+ mask = torch.empty(self.context_length, self.context_length)
718
+ mask.fill_(float("-inf"))
719
+ mask.triu_(1) # zero out the lower diagonal
720
+ return mask
721
+
722
+ def forward(self, text, return_all_features: bool=False):
723
+ cast_dtype = self.transformer.get_cast_dtype()
724
+ x = self.token_embedding(text).to(cast_dtype) # [batch_size, n_ctx, d_model]
725
+
726
+ x = x + self.positional_embedding.to(cast_dtype)
727
+ x = x.permute(1, 0, 2) # NLD -> LND
728
+ x = self.transformer(x, attn_mask=self.attn_mask)
729
+ # x = self.transformer(x) # no attention mask is applied
730
+ x = x.permute(1, 0, 2) # LND -> NLD
731
+ x = self.ln_final(x)
732
+
733
+ if not return_all_features:
734
+ # x.shape = [batch_size, n_ctx, transformer.width]
735
+ # take features from the eot embedding (eot_token is the highest number in each sequence)
736
+ x = x[torch.arange(x.shape[0]), text.argmax(dim=-1)] @ self.text_projection
737
+ return x
eva_clip/utils.py ADDED
@@ -0,0 +1,326 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from itertools import repeat
2
+ import collections.abc
3
+ import logging
4
+ import math
5
+ import numpy as np
6
+
7
+ import torch
8
+ from torch import nn as nn
9
+ from torchvision.ops.misc import FrozenBatchNorm2d
10
+ import torch.nn.functional as F
11
+
12
+ # open CLIP
13
+ def resize_clip_pos_embed(state_dict, model, interpolation: str = 'bicubic', seq_dim=1):
14
+ # Rescale the grid of position embeddings when loading from state_dict
15
+ old_pos_embed = state_dict.get('visual.positional_embedding', None)
16
+ if old_pos_embed is None or not hasattr(model.visual, 'grid_size'):
17
+ return
18
+ grid_size = to_2tuple(model.visual.grid_size)
19
+ extra_tokens = 1 # FIXME detect different token configs (ie no class token, or more)
20
+ new_seq_len = grid_size[0] * grid_size[1] + extra_tokens
21
+ if new_seq_len == old_pos_embed.shape[0]:
22
+ return
23
+
24
+ if extra_tokens:
25
+ pos_emb_tok, pos_emb_img = old_pos_embed[:extra_tokens], old_pos_embed[extra_tokens:]
26
+ else:
27
+ pos_emb_tok, pos_emb_img = None, old_pos_embed
28
+ old_grid_size = to_2tuple(int(math.sqrt(len(pos_emb_img))))
29
+
30
+ logging.info('Resizing position embedding grid-size from %s to %s', old_grid_size, grid_size)
31
+ pos_emb_img = pos_emb_img.reshape(1, old_grid_size[0], old_grid_size[1], -1).permute(0, 3, 1, 2)
32
+ pos_emb_img = F.interpolate(
33
+ pos_emb_img,
34
+ size=grid_size,
35
+ mode=interpolation,
36
+ align_corners=True,
37
+ )
38
+ pos_emb_img = pos_emb_img.permute(0, 2, 3, 1).reshape(1, grid_size[0] * grid_size[1], -1)[0]
39
+ if pos_emb_tok is not None:
40
+ new_pos_embed = torch.cat([pos_emb_tok, pos_emb_img], dim=0)
41
+ else:
42
+ new_pos_embed = pos_emb_img
43
+ state_dict['visual.positional_embedding'] = new_pos_embed
44
+
45
+
46
+ def resize_visual_pos_embed(state_dict, model, interpolation: str = 'bicubic', seq_dim=1):
47
+ # Rescale the grid of position embeddings when loading from state_dict
48
+ old_pos_embed = state_dict.get('positional_embedding', None)
49
+ if old_pos_embed is None or not hasattr(model.visual, 'grid_size'):
50
+ return
51
+ grid_size = to_2tuple(model.visual.grid_size)
52
+ extra_tokens = 1 # FIXME detect different token configs (ie no class token, or more)
53
+ new_seq_len = grid_size[0] * grid_size[1] + extra_tokens
54
+ if new_seq_len == old_pos_embed.shape[0]:
55
+ return
56
+
57
+ if extra_tokens:
58
+ pos_emb_tok, pos_emb_img = old_pos_embed[:extra_tokens], old_pos_embed[extra_tokens:]
59
+ else:
60
+ pos_emb_tok, pos_emb_img = None, old_pos_embed
61
+ old_grid_size = to_2tuple(int(math.sqrt(len(pos_emb_img))))
62
+
63
+ logging.info('Resizing position embedding grid-size from %s to %s', old_grid_size, grid_size)
64
+ pos_emb_img = pos_emb_img.reshape(1, old_grid_size[0], old_grid_size[1], -1).permute(0, 3, 1, 2)
65
+ pos_emb_img = F.interpolate(
66
+ pos_emb_img,
67
+ size=grid_size,
68
+ mode=interpolation,
69
+ align_corners=True,
70
+ )
71
+ pos_emb_img = pos_emb_img.permute(0, 2, 3, 1).reshape(1, grid_size[0] * grid_size[1], -1)[0]
72
+ if pos_emb_tok is not None:
73
+ new_pos_embed = torch.cat([pos_emb_tok, pos_emb_img], dim=0)
74
+ else:
75
+ new_pos_embed = pos_emb_img
76
+ state_dict['positional_embedding'] = new_pos_embed
77
+
78
+ def resize_evaclip_pos_embed(state_dict, model, interpolation: str = 'bicubic', seq_dim=1):
79
+ all_keys = list(state_dict.keys())
80
+ # interpolate position embedding
81
+ if 'visual.pos_embed' in state_dict:
82
+ pos_embed_checkpoint = state_dict['visual.pos_embed']
83
+ embedding_size = pos_embed_checkpoint.shape[-1]
84
+ num_patches = model.visual.patch_embed.num_patches
85
+ num_extra_tokens = model.visual.pos_embed.shape[-2] - num_patches
86
+ # height (== width) for the checkpoint position embedding
87
+ orig_size = int((pos_embed_checkpoint.shape[-2] - num_extra_tokens) ** 0.5)
88
+ # height (== width) for the new position embedding
89
+ new_size = int(num_patches ** 0.5)
90
+ # class_token and dist_token are kept unchanged
91
+ if orig_size != new_size:
92
+ print("Position interpolate from %dx%d to %dx%d" % (orig_size, orig_size, new_size, new_size))
93
+ extra_tokens = pos_embed_checkpoint[:, :num_extra_tokens]
94
+ # only the position tokens are interpolated
95
+ pos_tokens = pos_embed_checkpoint[:, num_extra_tokens:]
96
+ pos_tokens = pos_tokens.reshape(-1, orig_size, orig_size, embedding_size).permute(0, 3, 1, 2)
97
+ pos_tokens = torch.nn.functional.interpolate(
98
+ pos_tokens, size=(new_size, new_size), mode='bicubic', align_corners=False)
99
+ pos_tokens = pos_tokens.permute(0, 2, 3, 1).flatten(1, 2)
100
+ new_pos_embed = torch.cat((extra_tokens, pos_tokens), dim=1)
101
+ state_dict['visual.pos_embed'] = new_pos_embed
102
+
103
+ patch_embed_proj = state_dict['visual.patch_embed.proj.weight']
104
+ patch_size = model.visual.patch_embed.patch_size
105
+ state_dict['visual.patch_embed.proj.weight'] = torch.nn.functional.interpolate(
106
+ patch_embed_proj.float(), size=patch_size, mode='bicubic', align_corners=False)
107
+
108
+
109
+ def resize_eva_pos_embed(state_dict, model, interpolation: str = 'bicubic', seq_dim=1):
110
+ all_keys = list(state_dict.keys())
111
+ # interpolate position embedding
112
+ if 'pos_embed' in state_dict:
113
+ pos_embed_checkpoint = state_dict['pos_embed']
114
+ embedding_size = pos_embed_checkpoint.shape[-1]
115
+ num_patches = model.visual.patch_embed.num_patches
116
+ num_extra_tokens = model.visual.pos_embed.shape[-2] - num_patches
117
+ # height (== width) for the checkpoint position embedding
118
+ orig_size = int((pos_embed_checkpoint.shape[-2] - num_extra_tokens) ** 0.5)
119
+ # height (== width) for the new position embedding
120
+ new_size = int(num_patches ** 0.5)
121
+ # class_token and dist_token are kept unchanged
122
+ if orig_size != new_size:
123
+ print("Position interpolate from %dx%d to %dx%d" % (orig_size, orig_size, new_size, new_size))
124
+ extra_tokens = pos_embed_checkpoint[:, :num_extra_tokens]
125
+ # only the position tokens are interpolated
126
+ pos_tokens = pos_embed_checkpoint[:, num_extra_tokens:]
127
+ pos_tokens = pos_tokens.reshape(-1, orig_size, orig_size, embedding_size).permute(0, 3, 1, 2)
128
+ pos_tokens = torch.nn.functional.interpolate(
129
+ pos_tokens, size=(new_size, new_size), mode='bicubic', align_corners=False)
130
+ pos_tokens = pos_tokens.permute(0, 2, 3, 1).flatten(1, 2)
131
+ new_pos_embed = torch.cat((extra_tokens, pos_tokens), dim=1)
132
+ state_dict['pos_embed'] = new_pos_embed
133
+
134
+ patch_embed_proj = state_dict['patch_embed.proj.weight']
135
+ patch_size = model.visual.patch_embed.patch_size
136
+ state_dict['patch_embed.proj.weight'] = torch.nn.functional.interpolate(
137
+ patch_embed_proj.float(), size=patch_size, mode='bicubic', align_corners=False)
138
+
139
+
140
+ def resize_rel_pos_embed(state_dict, model, interpolation: str = 'bicubic', seq_dim=1):
141
+ all_keys = list(state_dict.keys())
142
+ for key in all_keys:
143
+ if "relative_position_index" in key:
144
+ state_dict.pop(key)
145
+
146
+ if "relative_position_bias_table" in key:
147
+ rel_pos_bias = state_dict[key]
148
+ src_num_pos, num_attn_heads = rel_pos_bias.size()
149
+ dst_num_pos, _ = model.visual.state_dict()[key].size()
150
+ dst_patch_shape = model.visual.patch_embed.patch_shape
151
+ if dst_patch_shape[0] != dst_patch_shape[1]:
152
+ raise NotImplementedError()
153
+ num_extra_tokens = dst_num_pos - (dst_patch_shape[0] * 2 - 1) * (dst_patch_shape[1] * 2 - 1)
154
+ src_size = int((src_num_pos - num_extra_tokens) ** 0.5)
155
+ dst_size = int((dst_num_pos - num_extra_tokens) ** 0.5)
156
+ if src_size != dst_size:
157
+ print("Position interpolate for %s from %dx%d to %dx%d" % (
158
+ key, src_size, src_size, dst_size, dst_size))
159
+ extra_tokens = rel_pos_bias[-num_extra_tokens:, :]
160
+ rel_pos_bias = rel_pos_bias[:-num_extra_tokens, :]
161
+
162
+ def geometric_progression(a, r, n):
163
+ return a * (1.0 - r ** n) / (1.0 - r)
164
+
165
+ left, right = 1.01, 1.5
166
+ while right - left > 1e-6:
167
+ q = (left + right) / 2.0
168
+ gp = geometric_progression(1, q, src_size // 2)
169
+ if gp > dst_size // 2:
170
+ right = q
171
+ else:
172
+ left = q
173
+
174
+ # if q > 1.090307:
175
+ # q = 1.090307
176
+
177
+ dis = []
178
+ cur = 1
179
+ for i in range(src_size // 2):
180
+ dis.append(cur)
181
+ cur += q ** (i + 1)
182
+
183
+ r_ids = [-_ for _ in reversed(dis)]
184
+
185
+ x = r_ids + [0] + dis
186
+ y = r_ids + [0] + dis
187
+
188
+ t = dst_size // 2.0
189
+ dx = np.arange(-t, t + 0.1, 1.0)
190
+ dy = np.arange(-t, t + 0.1, 1.0)
191
+
192
+ print("Original positions = %s" % str(x))
193
+ print("Target positions = %s" % str(dx))
194
+
195
+ all_rel_pos_bias = []
196
+
197
+ for i in range(num_attn_heads):
198
+ z = rel_pos_bias[:, i].view(src_size, src_size).float().numpy()
199
+ f = F.interpolate.interp2d(x, y, z, kind='cubic')
200
+ all_rel_pos_bias.append(
201
+ torch.Tensor(f(dx, dy)).contiguous().view(-1, 1).to(rel_pos_bias.device))
202
+
203
+ rel_pos_bias = torch.cat(all_rel_pos_bias, dim=-1)
204
+
205
+ new_rel_pos_bias = torch.cat((rel_pos_bias, extra_tokens), dim=0)
206
+ state_dict[key] = new_rel_pos_bias
207
+
208
+ # interpolate position embedding
209
+ if 'pos_embed' in state_dict:
210
+ pos_embed_checkpoint = state_dict['pos_embed']
211
+ embedding_size = pos_embed_checkpoint.shape[-1]
212
+ num_patches = model.visual.patch_embed.num_patches
213
+ num_extra_tokens = model.visual.pos_embed.shape[-2] - num_patches
214
+ # height (== width) for the checkpoint position embedding
215
+ orig_size = int((pos_embed_checkpoint.shape[-2] - num_extra_tokens) ** 0.5)
216
+ # height (== width) for the new position embedding
217
+ new_size = int(num_patches ** 0.5)
218
+ # class_token and dist_token are kept unchanged
219
+ if orig_size != new_size:
220
+ print("Position interpolate from %dx%d to %dx%d" % (orig_size, orig_size, new_size, new_size))
221
+ extra_tokens = pos_embed_checkpoint[:, :num_extra_tokens]
222
+ # only the position tokens are interpolated
223
+ pos_tokens = pos_embed_checkpoint[:, num_extra_tokens:]
224
+ pos_tokens = pos_tokens.reshape(-1, orig_size, orig_size, embedding_size).permute(0, 3, 1, 2)
225
+ pos_tokens = torch.nn.functional.interpolate(
226
+ pos_tokens, size=(new_size, new_size), mode='bicubic', align_corners=False)
227
+ pos_tokens = pos_tokens.permute(0, 2, 3, 1).flatten(1, 2)
228
+ new_pos_embed = torch.cat((extra_tokens, pos_tokens), dim=1)
229
+ state_dict['pos_embed'] = new_pos_embed
230
+
231
+ patch_embed_proj = state_dict['patch_embed.proj.weight']
232
+ patch_size = model.visual.patch_embed.patch_size
233
+ state_dict['patch_embed.proj.weight'] = torch.nn.functional.interpolate(
234
+ patch_embed_proj.float(), size=patch_size, mode='bicubic', align_corners=False)
235
+
236
+
237
+ def freeze_batch_norm_2d(module, module_match={}, name=''):
238
+ """
239
+ Converts all `BatchNorm2d` and `SyncBatchNorm` layers of provided module into `FrozenBatchNorm2d`. If `module` is
240
+ itself an instance of either `BatchNorm2d` or `SyncBatchNorm`, it is converted into `FrozenBatchNorm2d` and
241
+ returned. Otherwise, the module is walked recursively and submodules are converted in place.
242
+
243
+ Args:
244
+ module (torch.nn.Module): Any PyTorch module.
245
+ module_match (dict): Dictionary of full module names to freeze (all if empty)
246
+ name (str): Full module name (prefix)
247
+
248
+ Returns:
249
+ torch.nn.Module: Resulting module
250
+
251
+ Inspired by https://github.com/pytorch/pytorch/blob/a5895f85be0f10212791145bfedc0261d364f103/torch/nn/modules/batchnorm.py#L762
252
+ """
253
+ res = module
254
+ is_match = True
255
+ if module_match:
256
+ is_match = name in module_match
257
+ if is_match and isinstance(module, (nn.modules.batchnorm.BatchNorm2d, nn.modules.batchnorm.SyncBatchNorm)):
258
+ res = FrozenBatchNorm2d(module.num_features)
259
+ res.num_features = module.num_features
260
+ res.affine = module.affine
261
+ if module.affine:
262
+ res.weight.data = module.weight.data.clone().detach()
263
+ res.bias.data = module.bias.data.clone().detach()
264
+ res.running_mean.data = module.running_mean.data
265
+ res.running_var.data = module.running_var.data
266
+ res.eps = module.eps
267
+ else:
268
+ for child_name, child in module.named_children():
269
+ full_child_name = '.'.join([name, child_name]) if name else child_name
270
+ new_child = freeze_batch_norm_2d(child, module_match, full_child_name)
271
+ if new_child is not child:
272
+ res.add_module(child_name, new_child)
273
+ return res
274
+
275
+
276
+ # From PyTorch internals
277
+ def _ntuple(n):
278
+ def parse(x):
279
+ if isinstance(x, collections.abc.Iterable):
280
+ return x
281
+ return tuple(repeat(x, n))
282
+ return parse
283
+
284
+
285
+ to_1tuple = _ntuple(1)
286
+ to_2tuple = _ntuple(2)
287
+ to_3tuple = _ntuple(3)
288
+ to_4tuple = _ntuple(4)
289
+ to_ntuple = lambda n, x: _ntuple(n)(x)
290
+
291
+
292
+ def is_logging(args):
293
+ def is_global_master(args):
294
+ return args.rank == 0
295
+
296
+ def is_local_master(args):
297
+ return args.local_rank == 0
298
+
299
+ def is_master(args, local=False):
300
+ return is_local_master(args) if local else is_global_master(args)
301
+ return is_master
302
+
303
+
304
+ class AllGather(torch.autograd.Function):
305
+ """An autograd function that performs allgather on a tensor.
306
+ Performs all_gather operation on the provided tensors.
307
+ *** Warning ***: torch.distributed.all_gather has no gradient.
308
+ """
309
+
310
+ @staticmethod
311
+ def forward(ctx, tensor, rank, world_size):
312
+ tensors_gather = [torch.empty_like(tensor) for _ in range(world_size)]
313
+ torch.distributed.all_gather(tensors_gather, tensor)
314
+ ctx.rank = rank
315
+ ctx.batch_size = tensor.shape[0]
316
+ return torch.cat(tensors_gather, 0)
317
+
318
+ @staticmethod
319
+ def backward(ctx, grad_output):
320
+ return (
321
+ grad_output[ctx.batch_size * ctx.rank: ctx.batch_size * (ctx.rank + 1)],
322
+ None,
323
+ None
324
+ )
325
+
326
+ allgather = AllGather.apply
ip_adapter_art/utils/ip_adapter.py DELETED
@@ -1,72 +0,0 @@
1
- from diffusers.models.attention_processor import IPAdapterAttnProcessor2_0, Attention
2
- from diffusers.models.embeddings import (
3
- ImageProjection,
4
- MultiIPAdapterImageProjection,
5
- IPAdapterPlusImageProjection,
6
- )
7
- import torch
8
-
9
-
10
- def save_ip_adapter(unet, path):
11
- state_dict = {}
12
- if (
13
- hasattr(unet, "encoder_hid_proj")
14
- and unet.encoder_hid_proj is not None
15
- and isinstance(unet.encoder_hid_proj, torch.nn.Module)
16
- ):
17
- state_dict["encoder_hid_proj"] = unet.encoder_hid_proj.state_dict()
18
-
19
- for name, module in unet.attn_processors.items():
20
- if isinstance(module, torch.nn.Module):
21
- state_dict[name] = module.state_dict()
22
- torch.save(state_dict, path)
23
-
24
-
25
- def load_ip_adapter(
26
- unet,
27
- path,
28
- ):
29
- state_dict = torch.load(path, map_location="cpu")
30
-
31
- if "encoder_hid_proj" in state_dict.keys():
32
- num_image_text_embeds = 4
33
- clip_embeddings_dim = state_dict["encoder_hid_proj"][
34
- "image_projection_layers.0.image_embeds.weight"
35
- ].shape[-1]
36
- cross_attention_dim = (
37
- state_dict["encoder_hid_proj"][
38
- "image_projection_layers.0.image_embeds.weight"
39
- ].shape[0]
40
- // 4
41
- )
42
- if not hasattr(unet, "encoder_hid_proj") or unet.encoder_hid_proj is None:
43
- unet.encoder_hid_proj = MultiIPAdapterImageProjection(
44
- [
45
- ImageProjection(
46
- cross_attention_dim=cross_attention_dim,
47
- image_embed_dim=clip_embeddings_dim,
48
- num_image_text_embeds=num_image_text_embeds,
49
- )
50
- ]
51
- ).to(unet.device, unet.dtype)
52
- unet.encoder_hid_proj.load_state_dict(state_dict["encoder_hid_proj"])
53
- else:
54
- unet.encoder_hid_proj = lambda x: x
55
- cross_attention_dim = state_dict[
56
- "down_blocks.1.attentions.0.transformer_blocks.0.attn2.processor"
57
- ]["to_k_ip.0.weight"].shape[-1]
58
-
59
- unet.config.encoder_hid_dim_type = "ip_image_proj"
60
-
61
- for name, module in unet.named_modules():
62
- if "attn2" in name and isinstance(module, Attention):
63
- if not isinstance(module.processor, IPAdapterAttnProcessor2_0):
64
- module.set_processor(
65
- IPAdapterAttnProcessor2_0(
66
- hidden_size=module.query_dim,
67
- cross_attention_dim=cross_attention_dim,
68
- ).to(unet.device, unet.dtype)
69
- )
70
- module.processor.load_state_dict(
71
- state_dict[f"{name}.processor"], strict=False
72
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
{ip_adapter_art/utils → ip_adapter_diffusers}/__init__.py RENAMED
File without changes
ip_adapter_diffusers/custom_cross_attention_processor.py ADDED
@@ -0,0 +1,297 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from diffusers.models.attention_processor import IPAdapterAttnProcessor2_0, Attention
3
+ import torch.nn.functional as F
4
+ import torch.nn as nn
5
+
6
+
7
+ class ExtraCrossAttnProcessor2_0(torch.nn.Module):
8
+ r"""
9
+ Attention processor for IP-Adapter for PyTorch 2.0.
10
+
11
+ Args:
12
+ hidden_size (`int`):
13
+ The hidden size of the attention layer.
14
+ cross_attention_dim (`int`):
15
+ The number of channels in the `encoder_hidden_states`.
16
+ num_tokens (`int`, `Tuple[int]` or `List[int]`, defaults to `(4,)`):
17
+ The context length of the image features.
18
+ scale (`float` or `List[float]`, defaults to 1.0):
19
+ the weight scale of image prompt.
20
+ """
21
+
22
+ def __init__(self, hidden_size, cross_attention_dim=None, scale=1.0):
23
+ super().__init__()
24
+
25
+ if not hasattr(F, "scaled_dot_product_attention"):
26
+ raise ImportError(
27
+ f"{self.__class__.__name__} requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0."
28
+ )
29
+
30
+ self.hidden_size = hidden_size
31
+ self.cross_attention_dim = cross_attention_dim
32
+
33
+ self.scale = scale
34
+ self.to_q_ip = nn.Linear(hidden_size, hidden_size, bias=False)
35
+ self.to_k_ip = nn.Linear(cross_attention_dim, hidden_size, bias=False)
36
+ self.to_v_ip = nn.Linear(cross_attention_dim, hidden_size, bias=False)
37
+
38
+ def __call__(
39
+ self,
40
+ attn,
41
+ hidden_states,
42
+ encoder_hidden_states=None,
43
+ attention_mask=None,
44
+ temb=None,
45
+ *args,
46
+ **kwargs,
47
+ ):
48
+ residual = hidden_states
49
+
50
+ if attn.spatial_norm is not None:
51
+ hidden_states = attn.spatial_norm(hidden_states, temb)
52
+
53
+ input_ndim = hidden_states.ndim
54
+
55
+ if input_ndim == 4:
56
+ batch_size, channel, height, width = hidden_states.shape
57
+ hidden_states = hidden_states.view(
58
+ batch_size, channel, height * width
59
+ ).transpose(1, 2)
60
+
61
+ if encoder_hidden_states is not None:
62
+ encoder_hidden_states, ip_hidden_states = encoder_hidden_states
63
+ if isinstance(ip_hidden_states, list):
64
+ ip_hidden_states = ip_hidden_states[0]
65
+
66
+ batch_size, sequence_length, _ = (
67
+ hidden_states.shape
68
+ if encoder_hidden_states is None
69
+ else encoder_hidden_states.shape
70
+ )
71
+
72
+ if attention_mask is not None:
73
+ attention_mask = attn.prepare_attention_mask(
74
+ attention_mask, sequence_length, batch_size
75
+ )
76
+ # scaled_dot_product_attention expects attention_mask shape to be
77
+ # (batch, heads, source_length, target_length)
78
+ attention_mask = attention_mask.view(
79
+ batch_size, attn.heads, -1, attention_mask.shape[-1]
80
+ )
81
+
82
+ if attn.group_norm is not None:
83
+ hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(
84
+ 1, 2
85
+ )
86
+
87
+ query = attn.to_q(hidden_states)
88
+
89
+ key = attn.to_k(encoder_hidden_states)
90
+ value = attn.to_v(encoder_hidden_states)
91
+
92
+ inner_dim = key.shape[-1]
93
+ head_dim = inner_dim // attn.heads
94
+
95
+ query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
96
+
97
+ key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
98
+ value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
99
+
100
+ # the output of sdp = (batch, num_heads, seq_len, head_dim)
101
+ # TODO: add support for attn.scale when we move to Torch 2.1
102
+ hidden_states = F.scaled_dot_product_attention(
103
+ query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False
104
+ )
105
+
106
+ hidden_states = hidden_states.transpose(1, 2).reshape(
107
+ batch_size, -1, attn.heads * head_dim
108
+ )
109
+ hidden_states = hidden_states.to(query.dtype)
110
+
111
+ ip_query = self.to_q_ip(hidden_states)
112
+
113
+ # for ip-adapter
114
+ ip_key = self.to_k_ip(ip_hidden_states)
115
+ ip_value = self.to_v_ip(ip_hidden_states)
116
+
117
+ ip_query = ip_query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
118
+
119
+ ip_key = ip_key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
120
+ ip_value = ip_value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
121
+
122
+ # the output of sdp = (batch, num_heads, seq_len, head_dim)
123
+ # TODO: add support for attn.scale when we move to Torch 2.1
124
+ ip_hidden_states = F.scaled_dot_product_attention(
125
+ ip_query, ip_key, ip_value, attn_mask=None, dropout_p=0.0, is_causal=False
126
+ )
127
+ # with torch.no_grad():
128
+ # self.attn_map = query @ ip_key.transpose(-2, -1).softmax(dim=-1)
129
+ # print(self.attn_map.shape)
130
+
131
+ ip_hidden_states = ip_hidden_states.transpose(1, 2).reshape(
132
+ batch_size, -1, attn.heads * head_dim
133
+ )
134
+ ip_hidden_states = ip_hidden_states.to(query.dtype)
135
+
136
+ hidden_states = hidden_states + self.scale * ip_hidden_states
137
+
138
+ # linear proj
139
+ hidden_states = attn.to_out[0](hidden_states)
140
+ # dropout
141
+ hidden_states = attn.to_out[1](hidden_states)
142
+
143
+ if input_ndim == 4:
144
+ hidden_states = hidden_states.transpose(-1, -2).reshape(
145
+ batch_size, channel, height, width
146
+ )
147
+
148
+ if attn.residual_connection:
149
+ hidden_states = hidden_states + residual
150
+
151
+ hidden_states = hidden_states / attn.rescale_output_factor
152
+
153
+ return hidden_states
154
+
155
+
156
+ class DecoupledCrossAttnProcessor2_0(torch.nn.Module):
157
+ r"""
158
+ Attention processor for IP-Adapter for PyTorch 2.0.
159
+
160
+ Args:
161
+ hidden_size (`int`):
162
+ The hidden size of the attention layer.
163
+ cross_attention_dim (`int`):
164
+ The number of channels in the `encoder_hidden_states`.
165
+ num_tokens (`int`, `Tuple[int]` or `List[int]`, defaults to `(4,)`):
166
+ The context length of the image features.
167
+ scale (`float` or `List[float]`, defaults to 1.0):
168
+ the weight scale of image prompt.
169
+ """
170
+
171
+ def __init__(self, hidden_size, cross_attention_dim=None, scale=1.0):
172
+ super().__init__()
173
+
174
+ if not hasattr(F, "scaled_dot_product_attention"):
175
+ raise ImportError(
176
+ f"{self.__class__.__name__} requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0."
177
+ )
178
+
179
+ self.hidden_size = hidden_size
180
+ self.cross_attention_dim = cross_attention_dim
181
+
182
+ self.scale = scale
183
+ self.to_k_ip = nn.Linear(cross_attention_dim, hidden_size, bias=False)
184
+ self.to_v_ip = nn.Linear(cross_attention_dim, hidden_size, bias=False)
185
+
186
+ def __call__(
187
+ self,
188
+ attn,
189
+ hidden_states,
190
+ encoder_hidden_states=None,
191
+ attention_mask=None,
192
+ temb=None,
193
+ *args,
194
+ **kwargs,
195
+ ):
196
+ residual = hidden_states
197
+
198
+ if attn.spatial_norm is not None:
199
+ hidden_states = attn.spatial_norm(hidden_states, temb)
200
+
201
+ input_ndim = hidden_states.ndim
202
+
203
+ if input_ndim == 4:
204
+ batch_size, channel, height, width = hidden_states.shape
205
+ hidden_states = hidden_states.view(
206
+ batch_size, channel, height * width
207
+ ).transpose(1, 2)
208
+
209
+ if encoder_hidden_states is not None:
210
+ encoder_hidden_states, ip_hidden_states = encoder_hidden_states
211
+ if isinstance(ip_hidden_states, list):
212
+ ip_hidden_states = ip_hidden_states[0]
213
+
214
+ batch_size, sequence_length, _ = (
215
+ hidden_states.shape
216
+ if encoder_hidden_states is None
217
+ else encoder_hidden_states.shape
218
+ )
219
+
220
+ if attention_mask is not None:
221
+ attention_mask = attn.prepare_attention_mask(
222
+ attention_mask, sequence_length, batch_size
223
+ )
224
+ # scaled_dot_product_attention expects attention_mask shape to be
225
+ # (batch, heads, source_length, target_length)
226
+ attention_mask = attention_mask.view(
227
+ batch_size, attn.heads, -1, attention_mask.shape[-1]
228
+ )
229
+
230
+ if attn.group_norm is not None:
231
+ hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(
232
+ 1, 2
233
+ )
234
+
235
+ query = attn.to_q(hidden_states)
236
+
237
+ key = attn.to_k(encoder_hidden_states)
238
+ value = attn.to_v(encoder_hidden_states)
239
+
240
+ inner_dim = key.shape[-1]
241
+ head_dim = inner_dim // attn.heads
242
+
243
+ query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
244
+
245
+ key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
246
+ value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
247
+
248
+ # the output of sdp = (batch, num_heads, seq_len, head_dim)
249
+ # TODO: add support for attn.scale when we move to Torch 2.1
250
+ hidden_states = F.scaled_dot_product_attention(
251
+ query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False
252
+ )
253
+
254
+ hidden_states = hidden_states.transpose(1, 2).reshape(
255
+ batch_size, -1, attn.heads * head_dim
256
+ )
257
+ hidden_states = hidden_states.to(query.dtype)
258
+
259
+ # for ip-adapter
260
+ ip_key = self.to_k_ip(ip_hidden_states)
261
+ ip_value = self.to_v_ip(ip_hidden_states)
262
+
263
+ ip_key = ip_key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
264
+ ip_value = ip_value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
265
+
266
+ # the output of sdp = (batch, num_heads, seq_len, head_dim)
267
+ # TODO: add support for attn.scale when we move to Torch 2.1
268
+ ip_hidden_states = F.scaled_dot_product_attention(
269
+ query, ip_key, ip_value, attn_mask=None, dropout_p=0.0, is_causal=False
270
+ )
271
+ # with torch.no_grad():
272
+ # self.attn_map = query @ ip_key.transpose(-2, -1).softmax(dim=-1)
273
+ # print(self.attn_map.shape)
274
+
275
+ ip_hidden_states = ip_hidden_states.transpose(1, 2).reshape(
276
+ batch_size, -1, attn.heads * head_dim
277
+ )
278
+ ip_hidden_states = ip_hidden_states.to(query.dtype)
279
+
280
+ hidden_states = hidden_states + self.scale * ip_hidden_states
281
+
282
+ # linear proj
283
+ hidden_states = attn.to_out[0](hidden_states)
284
+ # dropout
285
+ hidden_states = attn.to_out[1](hidden_states)
286
+
287
+ if input_ndim == 4:
288
+ hidden_states = hidden_states.transpose(-1, -2).reshape(
289
+ batch_size, channel, height, width
290
+ )
291
+
292
+ if attn.residual_connection:
293
+ hidden_states = hidden_states + residual
294
+
295
+ hidden_states = hidden_states / attn.rescale_output_factor
296
+
297
+ return hidden_states
ip_adapter_diffusers/custom_ip_adapter.py ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .custom_cross_attention_processor import DecoupledCrossAttnProcessor2_0
2
+ import torch
3
+ from diffusers.models.attention_processor import IPAdapterAttnProcessor2_0, Attention
4
+
5
+
6
+ def load_custom_ip_adapter(
7
+ unet,
8
+ path=None,
9
+ blocks="full",
10
+ Custom_Attn_Type=DecoupledCrossAttnProcessor2_0,
11
+ cross_attention_dim=2048,
12
+ Image_Proj_Type=None,
13
+ ):
14
+ if path is None:
15
+ state_dict = None
16
+ else:
17
+ state_dict = torch.load(path, map_location="cpu")
18
+
19
+ # unet.config.encoder_hid_dim_type = "ip_image_proj"
20
+ # if Image_Proj_Type is None:
21
+ # unet.encoder_hid_proj = torch.nn.Identity()
22
+ # unet.encoder_hid_proj.image_projection_layers = torch.nn.ModuleList(
23
+ # [torch.nn.Identity()]
24
+ # )
25
+
26
+ for name, module in unet.named_modules():
27
+ if "attn2" in name and isinstance(module, Attention):
28
+ if blocks == "midup" and "mid" not in name and "up" not in name:
29
+ continue
30
+ if not isinstance(module.processor, torch.nn.Module):
31
+ module.set_processor(
32
+ Custom_Attn_Type(
33
+ hidden_size=module.query_dim,
34
+ cross_attention_dim=cross_attention_dim,
35
+ ).to(unet.device, unet.dtype)
36
+ )
37
+ if state_dict is not None:
38
+ module.processor.load_state_dict(state_dict[f"{name}.processor"])
39
+ else:
40
+ if hasattr(module.processor, "to_q_ip"):
41
+ torch.nn.init.kaiming_normal_(module.processor.to_q_ip.weight)
42
+ torch.nn.init.kaiming_normal_(module.processor.to_k_ip.weight)
43
+ torch.nn.init.kaiming_normal_(module.processor.to_v_ip.weight)
44
+
45
+
46
+ def save_custom_ip_adapter(unet, path):
47
+ state_dict = {}
48
+ for name, module in unet.attn_processors.items():
49
+ if isinstance(module, torch.nn.Module):
50
+ state_dict[name] = module.state_dict()
51
+
52
+ torch.save(state_dict, path)
53
+
54
+
55
+ def set_scale(unet, scale):
56
+ for name, module in unet.attn_processors.items():
57
+ if isinstance(module, torch.nn.Module):
58
+ module.scale = scale
ip_adapter_diffusers/ip_adapter.py ADDED
@@ -0,0 +1,821 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from diffusers.models.attention_processor import Attention
2
+ from diffusers.models.embeddings import ImageProjection, MultiIPAdapterImageProjection
3
+ import torch
4
+ import torch.nn as nn
5
+ import torch.nn.functional as F
6
+ import copy
7
+ from .resampler import Resampler
8
+ from typing import Optional
9
+ from diffusers.image_processor import IPAdapterMaskProcessor
10
+ import math
11
+ import warnings
12
+ from pulid.encoders_transformer import IDFormer
13
+
14
+
15
+ def save_ip_adapter(unet, path):
16
+ state_dict = {}
17
+ if (
18
+ hasattr(unet, "encoder_hid_proj")
19
+ and unet.encoder_hid_proj is not None
20
+ and isinstance(unet.encoder_hid_proj, torch.nn.Module)
21
+ ):
22
+ state_dict["encoder_hid_proj"] = unet.encoder_hid_proj.state_dict()
23
+
24
+ for name, module in unet.attn_processors.items():
25
+ if isinstance(module, torch.nn.Module):
26
+ state_dict[name] = module.state_dict()
27
+
28
+ torch.save(state_dict, path)
29
+
30
+
31
+ def load_ip_adapter(
32
+ unet,
33
+ path=None,
34
+ clip_embeddings_dim=1280,
35
+ cross_attention_dim=2048,
36
+ num_image_text_embeds=4,
37
+ attn_blocks=["down", "mid", "up"],
38
+ ):
39
+ if path is None:
40
+ state_dict = None
41
+ else:
42
+ state_dict = torch.load(path, map_location="cpu")
43
+ clip_embeddings_dim = state_dict["encoder_hid_proj"][
44
+ "image_embeds.weight"
45
+ ].shape[-1]
46
+ num_image_text_embeds = (
47
+ state_dict["encoder_hid_proj"]["image_embeds.weight"].shape[0]
48
+ // cross_attention_dim
49
+ )
50
+
51
+ if not hasattr(unet, "encoder_hid_proj") or unet.encoder_hid_proj is None:
52
+ unet.encoder_hid_proj = ImageProjection(
53
+ cross_attention_dim=cross_attention_dim,
54
+ image_embed_dim=clip_embeddings_dim,
55
+ num_image_text_embeds=num_image_text_embeds,
56
+ ).to(unet.device, unet.dtype)
57
+ if state_dict is not None:
58
+ unet.encoder_hid_proj.load_state_dict(state_dict["encoder_hid_proj"])
59
+
60
+ for name, module in unet.named_modules():
61
+ if (
62
+ "attn2" in name
63
+ and isinstance(module, Attention)
64
+ and any([attn in name for attn in attn_blocks])
65
+ ):
66
+ if not isinstance(module.processor, IPAttnProcessor2_0):
67
+ module.set_processor(
68
+ IPAttnProcessor2_0(
69
+ hidden_size=module.query_dim,
70
+ cross_attention_dim=cross_attention_dim,
71
+ ).to(unet.device, unet.dtype)
72
+ )
73
+ if state_dict is not None:
74
+ module.processor.load_state_dict(state_dict[f"{name}.processor"])
75
+ else:
76
+ module.processor.to_k_ip.load_state_dict(module.to_k.state_dict())
77
+ module.processor.to_v_ip.load_state_dict(module.to_v.state_dict())
78
+
79
+
80
+ def parse_clip_embeddings_dim(
81
+ path,
82
+ state_dict,
83
+ ):
84
+ if "pulid" in path:
85
+ return None
86
+ else:
87
+ return state_dict["encoder_hid_proj"]["image_embeds.weight"].shape[-1]
88
+
89
+
90
+ def parse_num_image_text_embeds(path, state_dict, cross_attention_dim=2048):
91
+ if "pulid" in path:
92
+ return None
93
+ else:
94
+ return (
95
+ state_dict["encoder_hid_proj"]["image_embeds.weight"].shape[0]
96
+ // cross_attention_dim
97
+ )
98
+
99
+
100
+ def parse_encoder_hid_proj_module(
101
+ path=None,
102
+ cross_attention_dim=2048,
103
+ image_embed_dim=None,
104
+ num_image_text_embeds=None,
105
+ ):
106
+ if "pulid" in path:
107
+ return IDFormer()
108
+ else:
109
+ return ImageProjection(
110
+ cross_attention_dim=cross_attention_dim,
111
+ image_embed_dim=image_embed_dim,
112
+ num_image_text_embeds=num_image_text_embeds,
113
+ )
114
+
115
+
116
+ def load_multi_ip_adapter(
117
+ unet,
118
+ paths=None,
119
+ clip_embeddings_dim=[1280],
120
+ cross_attention_dim=2048,
121
+ num_image_text_embeds=[4],
122
+ ):
123
+ if paths is None:
124
+ state_dict = None
125
+ else:
126
+ state_dict = [torch.load(path, map_location="cpu") for path in paths]
127
+ clip_embeddings_dim = [
128
+ parse_clip_embeddings_dim(path=single_path, state_dict=single_state_dict)
129
+ for single_path, single_state_dict in zip(paths, state_dict)
130
+ ]
131
+ num_image_text_embeds = [
132
+ parse_num_image_text_embeds(
133
+ path=single_path,
134
+ state_dict=single_state_dict,
135
+ cross_attention_dim=unet.config.cross_attention_dim,
136
+ )
137
+ for single_path, single_state_dict in zip(paths, state_dict)
138
+ ]
139
+
140
+ if not hasattr(unet, "encoder_hid_proj") or unet.encoder_hid_proj is None:
141
+ unet.encoder_hid_proj = MultiIPAdapterImageProjection(
142
+ [
143
+ parse_encoder_hid_proj_module(
144
+ path=single_path,
145
+ cross_attention_dim=unet.config.cross_attention_dim,
146
+ image_embed_dim=single_clip_embeddings_dim,
147
+ num_image_text_embeds=single_num_image_text_embeds,
148
+ ).to(unet.device, unet.dtype)
149
+ for single_path, single_clip_embeddings_dim, single_num_image_text_embeds in zip(
150
+ paths, clip_embeddings_dim, num_image_text_embeds
151
+ )
152
+ ]
153
+ ).to(unet.device, unet.dtype)
154
+
155
+ if state_dict is not None:
156
+ for single_encoder_hid_proj, single_state_dict in zip(
157
+ unet.encoder_hid_proj.image_projection_layers, state_dict
158
+ ):
159
+ single_encoder_hid_proj.load_state_dict(
160
+ single_state_dict["encoder_hid_proj"]
161
+ )
162
+
163
+ for name, module in unet.named_modules():
164
+ if "attn2" in name and isinstance(module, Attention):
165
+ if not isinstance(module.processor, MultiIPAttnProcessor2_0):
166
+ module.set_processor(
167
+ MultiIPAttnProcessor2_0(
168
+ hidden_size=module.query_dim,
169
+ cross_attention_dim=unet.config.cross_attention_dim,
170
+ num_tokens=num_image_text_embeds,
171
+ ).to(unet.device, unet.dtype)
172
+ )
173
+ if state_dict is not None:
174
+ for (
175
+ to_k_ip,
176
+ to_v_ip,
177
+ single_state_dict,
178
+ ) in zip(
179
+ module.processor.to_k_ip,
180
+ module.processor.to_v_ip,
181
+ state_dict,
182
+ ):
183
+ if f"{name}.processor" in single_state_dict.keys():
184
+ to_k_ip.weight = nn.Parameter(
185
+ single_state_dict[f"{name}.processor"]["to_k_ip.weight"]
186
+ )
187
+ to_v_ip.weight = nn.Parameter(
188
+ single_state_dict[f"{name}.processor"]["to_v_ip.weight"]
189
+ )
190
+ module.processor = module.processor.to(unet.device, unet.dtype)
191
+
192
+
193
+ def load_ip_adapter_plus(
194
+ unet,
195
+ path=None,
196
+ embed_dims=1664,
197
+ depth=4,
198
+ dim_head=64,
199
+ heads=12,
200
+ num_queries=32,
201
+ ff_mult=4,
202
+ attn_blocks=["down", "mid", "up"],
203
+ ):
204
+ if path is not None:
205
+ state_dict = torch.load(path)
206
+ else:
207
+ state_dict = None
208
+ if not hasattr(unet, "encoder_hid_proj") or unet.encoder_hid_proj is None:
209
+ unet.encoder_hid_proj = Resampler(
210
+ dim=unet.config.cross_attention_dim,
211
+ depth=depth,
212
+ dim_head=dim_head,
213
+ heads=heads,
214
+ num_queries=num_queries,
215
+ embedding_dim=embed_dims,
216
+ output_dim=unet.config.cross_attention_dim,
217
+ ff_mult=ff_mult,
218
+ ).to(unet.device, unet.dtype)
219
+ if state_dict is not None:
220
+ unet.encoder_hid_proj.load_state_dict(state_dict["encoder_hid_proj"])
221
+
222
+ for name, module in unet.named_modules():
223
+ if (
224
+ "attn2" in name
225
+ and isinstance(module, Attention)
226
+ and any([attn in name for attn in attn_blocks])
227
+ ):
228
+ if not isinstance(module.processor, IPAttnProcessor2_0):
229
+ module.set_processor(
230
+ IPAttnProcessor2_0(
231
+ hidden_size=module.query_dim,
232
+ cross_attention_dim=unet.config.cross_attention_dim,
233
+ ).to(unet.device, unet.dtype)
234
+ )
235
+ if state_dict is not None and f"{name}.processor" in state_dict.keys():
236
+ module.processor.load_state_dict(state_dict[f"{name}.processor"])
237
+ else:
238
+ module.processor.to_k_ip.load_state_dict(module.to_k.state_dict())
239
+ module.processor.to_v_ip.load_state_dict(module.to_v.state_dict())
240
+
241
+
242
+ def set_ip_hidden_states(unet, image_embeds):
243
+ for name, module in unet.attn_processors.items():
244
+ if isinstance(module, IPAttnProcessor2_0) or isinstance(
245
+ module, MultiIPAttnProcessor2_0
246
+ ):
247
+ module.ip_hidden_states = image_embeds.clone()
248
+
249
+
250
+ def set_multi_ip_hidden_states(unet, image_embeds):
251
+ for name, module in unet.attn_processors.items():
252
+ if isinstance(module, IPAttnProcessor2_0) or isinstance(
253
+ module, MultiIPAttnProcessor2_0
254
+ ):
255
+ module.ip_hidden_states = image_embeds
256
+
257
+
258
+ def set_multi_ip_attn_masks(unet, attn_masks):
259
+ for name, module in unet.attn_processors.items():
260
+ if isinstance(module, IPAttnProcessor2_0) or isinstance(
261
+ module, MultiIPAttnProcessor2_0
262
+ ):
263
+ module.ip_hidden_states = attn_masks
264
+
265
+
266
+ def clear_ip_hidden_states(model):
267
+ for name, module in model.named_modules():
268
+ if isinstance(module, IPAttnProcessor2_0):
269
+ module.ip_hidden_states = None
270
+
271
+
272
+ def set_ip_adapter_scale(unet, scale=1.0, attn_blocks=["down", "mid", "up"]):
273
+ for name, module in unet.named_modules():
274
+ if isinstance(module, IPAttnProcessor2_0) and any(
275
+ tarhet_module in name for tarhet_module in attn_blocks
276
+ ):
277
+ module.scale = scale
278
+
279
+
280
+ def downsample(
281
+ mask: torch.Tensor, batch_size: int, num_queries: int, value_embed_dim: int
282
+ ):
283
+ """
284
+ Downsamples the provided mask tensor to match the expected dimensions for scaled dot-product attention. If the
285
+ aspect ratio of the mask does not match the aspect ratio of the output image, a warning is issued.
286
+
287
+ Args:
288
+ mask (`torch.Tensor`):
289
+ The input mask tensor generated with `IPAdapterMaskProcessor.preprocess()`.
290
+ batch_size (`int`):
291
+ The batch size.
292
+ num_queries (`int`):
293
+ The number of queries.
294
+ value_embed_dim (`int`):
295
+ The dimensionality of the value embeddings.
296
+
297
+ Returns:
298
+ `torch.Tensor`:
299
+ The downsampled mask tensor.
300
+
301
+ """
302
+ o_h = mask.shape[2]
303
+ o_w = mask.shape[3]
304
+ ratio = o_w / o_h
305
+ mask_h = int(math.sqrt(num_queries / ratio))
306
+ mask_h = int(mask_h) + int((num_queries % int(mask_h)) != 0)
307
+ mask_w = num_queries // mask_h
308
+
309
+ mask_downsample = F.interpolate(mask, size=(mask_h, mask_w), mode="bicubic")
310
+
311
+ # Repeat batch_size times
312
+ if mask_downsample.shape[0] < batch_size:
313
+ mask_downsample = mask_downsample.repeat(batch_size, 1, 1, 1)
314
+
315
+ mask_downsample = mask_downsample.view(mask_downsample.shape[0], -1)
316
+
317
+ downsampled_area = mask_h * mask_w
318
+ # If the output image and the mask do not have the same aspect ratio, tensor shapes will not match
319
+ # Pad tensor if downsampled_mask.shape[1] is smaller than num_queries
320
+ if downsampled_area < num_queries:
321
+ warnings.warn(
322
+ "The aspect ratio of the mask does not match the aspect ratio of the output image. "
323
+ "Please update your masks or adjust the output size for optimal performance.",
324
+ UserWarning,
325
+ )
326
+ mask_downsample = F.pad(
327
+ mask_downsample, (0, num_queries - mask_downsample.shape[1]), value=0.0
328
+ )
329
+ # Discard last embeddings if downsampled_mask.shape[1] is bigger than num_queries
330
+ if downsampled_area > num_queries:
331
+ warnings.warn(
332
+ "The aspect ratio of the mask does not match the aspect ratio of the output image. "
333
+ "Please update your masks or adjust the output size for optimal performance.",
334
+ UserWarning,
335
+ )
336
+ mask_downsample = mask_downsample[:, :num_queries]
337
+
338
+ # Repeat last dimension to match SDPA output shape
339
+ mask_downsample = mask_downsample.view(
340
+ mask_downsample.shape[0], mask_downsample.shape[1], 1
341
+ ).repeat(1, 1, value_embed_dim)
342
+
343
+ return mask_downsample
344
+
345
+
346
+ class IPAttnProcessor2_0(torch.nn.Module):
347
+ r"""
348
+ Attention processor for IP-Adapater for PyTorch 2.0.
349
+ Args:
350
+ hidden_size (`int`):
351
+ The hidden size of the attention layer.
352
+ cross_attention_dim (`int`):
353
+ The number of channels in the `encoder_hidden_states`.
354
+ scale (`float`, defaults to 1.0):
355
+ the weight scale of image prompt.
356
+ num_tokens (`int`, defaults to 4 when do ip_adapter_plus it should be 16):
357
+ The context length of the image features.
358
+ """
359
+
360
+ def __init__(
361
+ self,
362
+ hidden_size,
363
+ cross_attention_dim=None,
364
+ scale=1.0,
365
+ num_tokens=4,
366
+ use_align_sem_and_layout_loss=False,
367
+ ):
368
+ super().__init__()
369
+
370
+ if not hasattr(F, "scaled_dot_product_attention"):
371
+ raise ImportError(
372
+ "AttnProcessor2_0 requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0."
373
+ )
374
+
375
+ self.hidden_size = hidden_size
376
+ self.cross_attention_dim = cross_attention_dim
377
+ self.scale = scale
378
+ self.num_tokens = num_tokens
379
+
380
+ self.to_k_ip = nn.Linear(
381
+ cross_attention_dim or hidden_size, hidden_size, bias=False
382
+ )
383
+ self.to_v_ip = nn.Linear(
384
+ cross_attention_dim or hidden_size, hidden_size, bias=False
385
+ )
386
+ self.ip_hidden_states = None
387
+
388
+ self.use_align_sem_and_layout_loss = use_align_sem_and_layout_loss
389
+ if self.use_align_sem_and_layout_loss:
390
+ self.align_sem_loss = None
391
+ self.align_layout_loss = None
392
+ self.cache_query = None
393
+ self.cache_attn_weights = None
394
+
395
+ def __call__(
396
+ self,
397
+ attn,
398
+ hidden_states,
399
+ encoder_hidden_states=None,
400
+ attention_mask=None,
401
+ temb=None,
402
+ ip_adapter_masks: Optional[torch.FloatTensor] = None,
403
+ *args,
404
+ **kwargs,
405
+ ):
406
+ residual = hidden_states
407
+
408
+ if attn.spatial_norm is not None:
409
+ hidden_states = attn.spatial_norm(hidden_states, temb)
410
+
411
+ input_ndim = hidden_states.ndim
412
+
413
+ if input_ndim == 4:
414
+ batch_size, channel, height, width = hidden_states.shape
415
+ hidden_states = hidden_states.view(
416
+ batch_size, channel, height * width
417
+ ).transpose(1, 2)
418
+
419
+ batch_size, sequence_length, _ = (
420
+ hidden_states.shape
421
+ if encoder_hidden_states is None
422
+ else encoder_hidden_states.shape
423
+ )
424
+
425
+ if attention_mask is not None:
426
+ attention_mask = attn.prepare_attention_mask(
427
+ attention_mask, sequence_length, batch_size
428
+ )
429
+ # scaled_dot_product_attention expects attention_mask shape to be
430
+ # (batch, heads, source_length, target_length)
431
+ attention_mask = attention_mask.view(
432
+ batch_size, attn.heads, -1, attention_mask.shape[-1]
433
+ )
434
+
435
+ if attn.group_norm is not None:
436
+ hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(
437
+ 1, 2
438
+ )
439
+
440
+ query = attn.to_q(hidden_states)
441
+
442
+ if attn.norm_cross:
443
+ encoder_hidden_states = attn.norm_encoder_hidden_states(
444
+ encoder_hidden_states
445
+ )
446
+
447
+ key = attn.to_k(encoder_hidden_states)
448
+ value = attn.to_v(encoder_hidden_states)
449
+
450
+ inner_dim = key.shape[-1]
451
+ head_dim = inner_dim // attn.heads
452
+
453
+ query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
454
+
455
+ key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
456
+ value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
457
+
458
+ # the output of sdp = (batch, num_heads, seq_len, head_dim)
459
+ # TODO: add support for attn.scale when we move to Torch 2.1
460
+ hidden_states = F.scaled_dot_product_attention(
461
+ query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False
462
+ )
463
+ if self.use_align_sem_and_layout_loss:
464
+ if self.cache_query is None:
465
+ self.cache_query = query.clone().detach()
466
+ self.cache_attn_weights = (key @ query.transpose(-2, -1)) / math.sqrt(
467
+ query.size(-1)
468
+ )
469
+ self.cache_attn_weights = torch.softmax(self.cache_attn_weights, dim=-1)
470
+ else:
471
+ self.attn_weights = (key @ query.transpose(-2, -1)) / math.sqrt(
472
+ query.size(-1)
473
+ )
474
+ self.query = query
475
+ self.attn_weights = torch.softmax(self.attn_weights, dim=-1)
476
+
477
+ hidden_states = hidden_states.transpose(1, 2).reshape(
478
+ batch_size, -1, attn.heads * head_dim
479
+ )
480
+ hidden_states = hidden_states.to(query.dtype)
481
+
482
+ if self.scale != 0.0:
483
+ # for ip-adapter
484
+ ip_key = self.to_k_ip(self.ip_hidden_states).to(dtype=query.dtype)
485
+ ip_value = self.to_v_ip(self.ip_hidden_states).to(dtype=query.dtype)
486
+
487
+ ip_key = ip_key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
488
+ ip_value = ip_value.view(batch_size, -1, attn.heads, head_dim).transpose(
489
+ 1, 2
490
+ )
491
+
492
+ # the output of sdp = (batch, num_heads, seq_len, head_dim)
493
+ # TODO: add support for attn.scale when we move to Torch 2.1
494
+ ip_hidden_states = F.scaled_dot_product_attention(
495
+ query, ip_key, ip_value, attn_mask=None, dropout_p=0.0, is_causal=False
496
+ )
497
+ # with torch.no_grad():
498
+ # self.attn_map = query @ ip_key.transpose(-2, -1).softmax(dim=-1)
499
+ # print(self.attn_map.shape)
500
+
501
+ ip_hidden_states = ip_hidden_states.transpose(1, 2).reshape(
502
+ batch_size, -1, attn.heads * head_dim
503
+ )
504
+ ip_hidden_states = ip_hidden_states.to(query.dtype)
505
+
506
+ if ip_adapter_masks is not None:
507
+ mask_downsample = downsample(
508
+ ip_adapter_masks,
509
+ batch_size,
510
+ ip_hidden_states.shape[1],
511
+ ip_hidden_states.shape[2],
512
+ )
513
+
514
+ mask_downsample = mask_downsample.to(
515
+ dtype=query.dtype, device=query.device
516
+ )
517
+
518
+ ip_hidden_states = ip_hidden_states * mask_downsample
519
+
520
+ hidden_states = hidden_states + self.scale * ip_hidden_states
521
+
522
+ # linear proj
523
+ hidden_states = attn.to_out[0](hidden_states)
524
+ # dropout
525
+ hidden_states = attn.to_out[1](hidden_states)
526
+
527
+ if input_ndim == 4:
528
+ hidden_states = hidden_states.transpose(-1, -2).reshape(
529
+ batch_size, channel, height, width
530
+ )
531
+
532
+ if attn.residual_connection:
533
+ hidden_states = hidden_states + residual
534
+
535
+ hidden_states = hidden_states / attn.rescale_output_factor
536
+
537
+ return hidden_states
538
+
539
+
540
+ def set_ortho(unet, ortho):
541
+ for name, module in unet.attn_processors.items():
542
+ if isinstance(module, IPAttnProcessor2_0) or isinstance(
543
+ module, MultiIPAttnProcessor2_0
544
+ ):
545
+ module.ortho = ortho
546
+
547
+
548
+ def set_num_zero(unet, num_zero):
549
+ for name, module in unet.attn_processors.items():
550
+ if isinstance(module, IPAttnProcessor2_0) or isinstance(
551
+ module, MultiIPAttnProcessor2_0
552
+ ):
553
+ module.num_zero = num_zero
554
+
555
+
556
+ class MultiIPAttnProcessor2_0(torch.nn.Module):
557
+ r"""
558
+ Attention processor for IP-Adapater for PyTorch 2.0.
559
+
560
+ Args:
561
+ hidden_size (`int`):
562
+ The hidden size of the attention layer.
563
+ cross_attention_dim (`int`):
564
+ The number of channels in the `encoder_hidden_states`.
565
+ num_tokens (`int`, `Tuple[int]` or `List[int]`, defaults to `(4,)`):
566
+ The context length of the image features.
567
+ scale (`float` or `List[float]`, defaults to 1.0):
568
+ the weight scale of image prompt.
569
+ """
570
+
571
+ def __init__(
572
+ self, hidden_size, cross_attention_dim=None, num_tokens=(4,), scale=1.0
573
+ ):
574
+ super().__init__()
575
+
576
+ if not hasattr(F, "scaled_dot_product_attention"):
577
+ raise ImportError(
578
+ f"{self.__class__.__name__} requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0."
579
+ )
580
+
581
+ self.hidden_size = hidden_size
582
+ self.cross_attention_dim = cross_attention_dim
583
+
584
+ if not isinstance(num_tokens, (tuple, list)):
585
+ num_tokens = [num_tokens]
586
+ self.num_tokens = num_tokens
587
+
588
+ if not isinstance(scale, list):
589
+ scale = [scale] * len(num_tokens)
590
+ if len(scale) != len(num_tokens):
591
+ raise ValueError(
592
+ "`scale` should be a list of integers with the same length as `num_tokens`."
593
+ )
594
+ self.scale = scale
595
+
596
+ self.to_k_ip = nn.ModuleList(
597
+ [
598
+ nn.Linear(cross_attention_dim, hidden_size, bias=False)
599
+ for _ in range(len(num_tokens))
600
+ ]
601
+ )
602
+ self.to_v_ip = nn.ModuleList(
603
+ [
604
+ nn.Linear(cross_attention_dim, hidden_size, bias=False)
605
+ for _ in range(len(num_tokens))
606
+ ]
607
+ )
608
+ self.ip_hidden_states = None
609
+ self.num_zero = [None] * (len(num_tokens))
610
+ self.ortho = [None] * len(num_tokens)
611
+
612
+ def __call__(
613
+ self,
614
+ attn: Attention,
615
+ hidden_states: torch.FloatTensor,
616
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
617
+ attention_mask: Optional[torch.FloatTensor] = None,
618
+ temb: Optional[torch.FloatTensor] = None,
619
+ scale: float = 1.0,
620
+ ip_adapter_masks: Optional[torch.FloatTensor] = None,
621
+ ):
622
+ residual = hidden_states
623
+
624
+ ip_hidden_states = self.ip_hidden_states
625
+
626
+ if attn.spatial_norm is not None:
627
+ hidden_states = attn.spatial_norm(hidden_states, temb)
628
+
629
+ input_ndim = hidden_states.ndim
630
+
631
+ if input_ndim == 4:
632
+ batch_size, channel, height, width = hidden_states.shape
633
+ hidden_states = hidden_states.view(
634
+ batch_size, channel, height * width
635
+ ).transpose(1, 2)
636
+
637
+ batch_size, sequence_length, _ = (
638
+ hidden_states.shape
639
+ if encoder_hidden_states is None
640
+ else encoder_hidden_states.shape
641
+ )
642
+
643
+ if attention_mask is not None:
644
+ attention_mask = attn.prepare_attention_mask(
645
+ attention_mask, sequence_length, batch_size
646
+ )
647
+ # scaled_dot_product_attention expects attention_mask shape to be
648
+ # (batch, heads, source_length, target_length)
649
+ attention_mask = attention_mask.view(
650
+ batch_size, attn.heads, -1, attention_mask.shape[-1]
651
+ )
652
+
653
+ if attn.group_norm is not None:
654
+ hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(
655
+ 1, 2
656
+ )
657
+
658
+ query = attn.to_q(hidden_states)
659
+
660
+ if encoder_hidden_states is None:
661
+ encoder_hidden_states = hidden_states
662
+ elif attn.norm_cross:
663
+ encoder_hidden_states = attn.norm_encoder_hidden_states(
664
+ encoder_hidden_states
665
+ )
666
+
667
+ key = attn.to_k(encoder_hidden_states)
668
+ value = attn.to_v(encoder_hidden_states)
669
+
670
+ inner_dim = key.shape[-1]
671
+ head_dim = inner_dim // attn.heads
672
+
673
+ query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
674
+
675
+ key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
676
+ value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
677
+
678
+ # the output of sdp = (batch, num_heads, seq_len, head_dim)
679
+ # TODO: add support for attn.scale when we move to Torch 2.1
680
+ hidden_states = F.scaled_dot_product_attention(
681
+ query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False
682
+ )
683
+
684
+ hidden_states = hidden_states.transpose(1, 2).reshape(
685
+ batch_size, -1, attn.heads * head_dim
686
+ )
687
+ hidden_states = hidden_states.to(query.dtype)
688
+
689
+ if ip_adapter_masks is not None:
690
+ if (
691
+ not isinstance(ip_adapter_masks, torch.Tensor)
692
+ or ip_adapter_masks.ndim != 4
693
+ ):
694
+ raise ValueError(
695
+ " ip_adapter_mask should be a tensor with shape [num_ip_adapter, 1, height, width]."
696
+ " Please use `IPAdapterMaskProcessor` to preprocess your mask"
697
+ )
698
+ if len(ip_adapter_masks) != len(self.scale):
699
+ raise ValueError(
700
+ f"Number of ip_adapter_masks ({len(ip_adapter_masks)}) must match number of IP-Adapters ({len(self.scale)})"
701
+ )
702
+ else:
703
+ ip_adapter_masks = [None] * len(self.scale)
704
+
705
+ # for ip-adapter
706
+ for (
707
+ current_ip_hidden_states,
708
+ scale,
709
+ to_k_ip,
710
+ to_v_ip,
711
+ mask,
712
+ num_zero,
713
+ ortho,
714
+ ) in zip(
715
+ ip_hidden_states,
716
+ self.scale,
717
+ self.to_k_ip,
718
+ self.to_v_ip,
719
+ ip_adapter_masks,
720
+ self.num_zero,
721
+ self.ortho,
722
+ ):
723
+ if scale == 0:
724
+ continue
725
+ if num_zero is not None:
726
+ zero_tensor = torch.zeros(
727
+ (
728
+ current_ip_hidden_states.size(0),
729
+ num_zero,
730
+ current_ip_hidden_states.size(-1),
731
+ ),
732
+ dtype=current_ip_hidden_states.dtype,
733
+ device=current_ip_hidden_states.device,
734
+ )
735
+ current_ip_hidden_states = torch.concat(
736
+ [current_ip_hidden_states, zero_tensor], dim=1
737
+ )
738
+ ip_key = to_k_ip(current_ip_hidden_states)
739
+ ip_value = to_v_ip(current_ip_hidden_states)
740
+
741
+ ip_key = ip_key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
742
+ ip_value = ip_value.view(batch_size, -1, attn.heads, head_dim).transpose(
743
+ 1, 2
744
+ )
745
+
746
+ # the output of sdp = (batch, num_heads, seq_len, head_dim)
747
+ # TODO: add support for attn.scale when we move to Torch 2.1
748
+ current_ip_hidden_states = F.scaled_dot_product_attention(
749
+ query, ip_key, ip_value, attn_mask=None, dropout_p=0.0, is_causal=False
750
+ )
751
+
752
+ current_ip_hidden_states = current_ip_hidden_states.transpose(1, 2).reshape(
753
+ batch_size, -1, attn.heads * head_dim
754
+ )
755
+ current_ip_hidden_states = current_ip_hidden_states.to(query.dtype)
756
+
757
+ if mask is not None:
758
+ mask_downsample = IPAdapterMaskProcessor.downsample(
759
+ mask,
760
+ batch_size,
761
+ current_ip_hidden_states.shape[1],
762
+ current_ip_hidden_states.shape[2],
763
+ )
764
+
765
+ mask_downsample = mask_downsample.to(
766
+ dtype=query.dtype, device=query.device
767
+ )
768
+
769
+ current_ip_hidden_states = current_ip_hidden_states * mask_downsample
770
+ if ortho is None:
771
+ hidden_states = hidden_states + scale * current_ip_hidden_states
772
+ elif ortho == "ortho":
773
+ orig_dtype = hidden_states.dtype
774
+ hidden_states = hidden_states.to(torch.float32)
775
+ current_ip_hidden_states = current_ip_hidden_states.to(torch.float32)
776
+ projection = (
777
+ torch.sum(
778
+ (hidden_states * current_ip_hidden_states), dim=-2, keepdim=True
779
+ )
780
+ / torch.sum((hidden_states * hidden_states), dim=-2, keepdim=True)
781
+ * hidden_states
782
+ )
783
+ orthogonal = current_ip_hidden_states - projection
784
+ hidden_states = hidden_states + current_ip_hidden_states * orthogonal
785
+ hidden_states = hidden_states.to(orig_dtype)
786
+ elif ortho == "ortho_v2":
787
+ orig_dtype = hidden_states.dtype
788
+ hidden_states = hidden_states.to(torch.float32)
789
+ current_ip_hidden_states = current_ip_hidden_states.to(torch.float32)
790
+ attn_map = query @ ip_key.transpose(-2, -1)
791
+ attn_mean = attn_map.softmax(dim=-1).mean(dim=1)
792
+ attn_mean = attn_mean[:, :, :5].sum(dim=-1, keepdim=True)
793
+ projection = (
794
+ torch.sum(
795
+ (hidden_states * current_ip_hidden_states), dim=-2, keepdim=True
796
+ )
797
+ / torch.sum((hidden_states * hidden_states), dim=-2, keepdim=True)
798
+ * hidden_states
799
+ )
800
+ orthogonal = current_ip_hidden_states + (attn_mean - 1) * projection
801
+ hidden_states = hidden_states + current_ip_hidden_states * orthogonal
802
+ hidden_states = hidden_states.to(orig_dtype)
803
+ else:
804
+ raise ValueError(f"{ortho} not supported")
805
+
806
+ # linear proj
807
+ hidden_states = attn.to_out[0](hidden_states)
808
+ # dropout
809
+ hidden_states = attn.to_out[1](hidden_states)
810
+
811
+ if input_ndim == 4:
812
+ hidden_states = hidden_states.transpose(-1, -2).reshape(
813
+ batch_size, channel, height, width
814
+ )
815
+
816
+ if attn.residual_connection:
817
+ hidden_states = hidden_states + residual
818
+
819
+ hidden_states = hidden_states / attn.rescale_output_factor
820
+
821
+ return hidden_states
ip_adapter_diffusers/ip_adapter_extra_attn.py ADDED
@@ -0,0 +1,250 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Callable, List, Optional, Tuple, Union
2
+ from diffusers.models.attention_processor import Attention
3
+ from diffusers.models.embeddings import (
4
+ ImageProjection,
5
+ Resampler,
6
+ )
7
+ import torch
8
+ import torch.nn as nn
9
+ import torch.nn.functional as F
10
+ import copy
11
+
12
+
13
+ class IPAdapterAttnProcessor2_0(torch.nn.Module):
14
+ r"""
15
+ Attention processor for IP-Adapter for PyTorch 2.0.
16
+
17
+ Args:
18
+ hidden_size (`int`):
19
+ The hidden size of the attention layer.
20
+ cross_attention_dim (`int`):
21
+ The number of channels in the `encoder_hidden_states`.
22
+ num_tokens (`int`, `Tuple[int]` or `List[int]`, defaults to `(4,)`):
23
+ The context length of the image features.
24
+ scale (`float` or `List[float]`, defaults to 1.0):
25
+ the weight scale of image prompt.
26
+ """
27
+
28
+ def __init__(
29
+ self, hidden_size, cross_attention_dim=None, num_tokens=(4,), scale=1.0
30
+ ):
31
+ super().__init__()
32
+
33
+ if not hasattr(F, "scaled_dot_product_attention"):
34
+ raise ImportError(
35
+ f"{self.__class__.__name__} requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0."
36
+ )
37
+
38
+ self.hidden_size = hidden_size
39
+ self.cross_attention_dim = cross_attention_dim
40
+
41
+ if not isinstance(num_tokens, (tuple, list)):
42
+ num_tokens = [num_tokens]
43
+ self.num_tokens = num_tokens
44
+
45
+ if not isinstance(scale, list):
46
+ scale = [scale] * len(num_tokens)
47
+ if len(scale) != len(num_tokens):
48
+ raise ValueError(
49
+ "`scale` should be a list of integers with the same length as `num_tokens`."
50
+ )
51
+ self.scale = scale
52
+
53
+ self.to_q_ip = nn.Linear(hidden_size, hidden_size, bias=False)
54
+ self.to_k_ip = nn.Linear(cross_attention_dim, hidden_size, bias=False)
55
+ self.to_v_ip = nn.Linear(cross_attention_dim, hidden_size, bias=False)
56
+
57
+ def __call__(
58
+ self,
59
+ attn: Attention,
60
+ hidden_states: torch.Tensor,
61
+ encoder_hidden_states: Optional[torch.Tensor] = None,
62
+ attention_mask: Optional[torch.Tensor] = None,
63
+ temb: Optional[torch.Tensor] = None,
64
+ scale: float = 1.0,
65
+ ip_adapter_masks: Optional[torch.Tensor] = None,
66
+ ):
67
+ residual = hidden_states
68
+
69
+ # separate ip_hidden_states from encoder_hidden_states
70
+ if encoder_hidden_states is not None:
71
+ if isinstance(encoder_hidden_states, tuple):
72
+ encoder_hidden_states, ip_hidden_states = encoder_hidden_states
73
+ ip_hidden_states = ip_hidden_states[0]
74
+
75
+ if attn.spatial_norm is not None:
76
+ hidden_states = attn.spatial_norm(hidden_states, temb)
77
+
78
+ input_ndim = hidden_states.ndim
79
+
80
+ if input_ndim == 4:
81
+ batch_size, channel, height, width = hidden_states.shape
82
+ hidden_states = hidden_states.view(
83
+ batch_size, channel, height * width
84
+ ).transpose(1, 2)
85
+
86
+ batch_size, sequence_length, _ = (
87
+ hidden_states.shape
88
+ if encoder_hidden_states is None
89
+ else encoder_hidden_states.shape
90
+ )
91
+
92
+ if attention_mask is not None:
93
+ attention_mask = attn.prepare_attention_mask(
94
+ attention_mask, sequence_length, batch_size
95
+ )
96
+ # scaled_dot_product_attention expects attention_mask shape to be
97
+ # (batch, heads, source_length, target_length)
98
+ attention_mask = attention_mask.view(
99
+ batch_size, attn.heads, -1, attention_mask.shape[-1]
100
+ )
101
+
102
+ if attn.group_norm is not None:
103
+ hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(
104
+ 1, 2
105
+ )
106
+
107
+ query = attn.to_q(hidden_states)
108
+
109
+ if encoder_hidden_states is None:
110
+ encoder_hidden_states = hidden_states
111
+ elif attn.norm_cross:
112
+ encoder_hidden_states = attn.norm_encoder_hidden_states(
113
+ encoder_hidden_states
114
+ )
115
+
116
+ key = attn.to_k(encoder_hidden_states)
117
+ value = attn.to_v(encoder_hidden_states)
118
+
119
+ inner_dim = key.shape[-1]
120
+ head_dim = inner_dim // attn.heads
121
+
122
+ query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
123
+
124
+ key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
125
+ value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
126
+
127
+ # the output of sdp = (batch, num_heads, seq_len, head_dim)
128
+ # TODO: add support for attn.scale when we move to Torch 2.1
129
+ hidden_states = F.scaled_dot_product_attention(
130
+ query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False
131
+ )
132
+
133
+ hidden_states = hidden_states.transpose(1, 2).reshape(
134
+ batch_size, -1, attn.heads * head_dim
135
+ )
136
+ hidden_states = hidden_states.to(query.dtype)
137
+
138
+ ip_query = self.to_q_ip(hidden_states)
139
+ ip_key = self.to_k_ip(ip_hidden_states)
140
+ ip_value = self.to_v_ip(ip_hidden_states)
141
+
142
+ ip_query = ip_query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
143
+ ip_key = ip_key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
144
+ ip_value = ip_value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
145
+
146
+ # the output of sdp = (batch, num_heads, seq_len, head_dim)
147
+ # TODO: add support for attn.scale when we move to Torch 2.1
148
+ current_ip_hidden_states = F.scaled_dot_product_attention(
149
+ ip_query,
150
+ ip_key,
151
+ ip_value,
152
+ attn_mask=None,
153
+ dropout_p=0.0,
154
+ is_causal=False,
155
+ )
156
+
157
+ current_ip_hidden_states = current_ip_hidden_states.transpose(1, 2).reshape(
158
+ batch_size, -1, attn.heads * head_dim
159
+ )
160
+ current_ip_hidden_states = current_ip_hidden_states.to(query.dtype)
161
+
162
+ hidden_states = hidden_states + scale * current_ip_hidden_states
163
+
164
+ # linear proj
165
+ hidden_states = attn.to_out[0](hidden_states)
166
+ # dropout
167
+ hidden_states = attn.to_out[1](hidden_states)
168
+
169
+ if input_ndim == 4:
170
+ hidden_states = hidden_states.transpose(-1, -2).reshape(
171
+ batch_size, channel, height, width
172
+ )
173
+
174
+ if attn.residual_connection:
175
+ hidden_states = hidden_states + residual
176
+
177
+ hidden_states = hidden_states / attn.rescale_output_factor
178
+
179
+ return hidden_states
180
+
181
+
182
+ def save_ip_adapter(unet, path):
183
+ state_dict = {}
184
+ if (
185
+ hasattr(unet, "encoder_hid_proj")
186
+ and unet.encoder_hid_proj is not None
187
+ and isinstance(unet.encoder_hid_proj, torch.nn.Module)
188
+ ):
189
+ state_dict["encoder_hid_proj"] = unet.encoder_hid_proj.state_dict()
190
+
191
+ for name, module in unet.attn_processors.items():
192
+ if isinstance(module, torch.nn.Module):
193
+ state_dict[name] = module.state_dict()
194
+
195
+ torch.save(state_dict, path)
196
+
197
+
198
+ def load_ip_adapter(
199
+ unet,
200
+ path=None,
201
+ clip_embeddings_dim=1280,
202
+ cross_attention_dim=2048,
203
+ num_image_text_embeds=4,
204
+ ):
205
+ if path is None:
206
+ state_dict = None
207
+ else:
208
+ state_dict = torch.load(path, map_location="cpu")
209
+ clip_embeddings_dim = state_dict["encoder_hid_proj"][
210
+ "image_projection_layers.0.image_embeds.weight"
211
+ ].shape[-1]
212
+ num_image_text_embeds = (
213
+ state_dict["encoder_hid_proj"][
214
+ "image_projection_layers.0.image_embeds.weight"
215
+ ].shape[0]
216
+ // cross_attention_dim
217
+ )
218
+
219
+ if not hasattr(unet, "encoder_hid_proj") or unet.encoder_hid_proj is None:
220
+ unet.encoder_hid_proj = MultiIPAdapterImageProjection(
221
+ [
222
+ ImageProjection(
223
+ cross_attention_dim=cross_attention_dim,
224
+ image_embed_dim=clip_embeddings_dim,
225
+ num_image_text_embeds=num_image_text_embeds,
226
+ )
227
+ ]
228
+ ).to(unet.device, unet.dtype)
229
+ if state_dict is not None:
230
+ unet.encoder_hid_proj.load_state_dict(state_dict["encoder_hid_proj"])
231
+
232
+ unet.config.encoder_hid_dim_type = "ip_image_proj"
233
+ for name, module in unet.named_modules():
234
+ if "attn2" in name and isinstance(module, Attention):
235
+ if not isinstance(module.processor, IPAdapterAttnProcessor2_0):
236
+ module.set_processor(
237
+ IPAdapterAttnProcessor2_0(
238
+ hidden_size=module.query_dim,
239
+ cross_attention_dim=cross_attention_dim,
240
+ scale=1.0,
241
+ ).to(unet.device, unet.dtype)
242
+ )
243
+ if state_dict is not None:
244
+ module.processor.load_state_dict(state_dict[f"{name}.processor"])
245
+
246
+
247
+ def set_ip_adapter_scale(unet, scale=1.0):
248
+ for name, module in unet.named_modules():
249
+ if isinstance(module, IPAdapterAttnProcessor2_0):
250
+ module.scale = scale