huzey commited on
Commit
3407de8
·
0 Parent(s):

Initial commit with Xet-tracked images

Browse files
.gitattributes ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ *.jpg filter=lfs diff=lfs merge=lfs -text
37
+ *.jpeg filter=lfs diff=lfs merge=lfs -text
38
+ *.png filter=lfs diff=lfs merge=lfs -text
39
+ *.gif filter=lfs diff=lfs merge=lfs -text
40
+ *.bmp filter=lfs diff=lfs merge=lfs -text
41
+ *.webp filter=lfs diff=lfs merge=lfs -text
.gitignore ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ lightning_logs/
2
+ experiments/
3
+ old/
4
+ .DS_Store
5
+ .venv/
6
+ .idea/
7
+ .vscode/
8
+ .pytest_cache/
9
+ downloads/
10
+ __pycache__/
11
+ *.pyc
12
+ *.pyo
13
+ *.pyd
14
+ *.pyw
15
+ images/sit.png
16
+ .trash
17
+ *.zip
18
+ images/cup_torus/
19
+ images/cup_torus_no_bg/
20
+ *.pt
21
+ models/
22
+ .gradio/
README.md ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: VibeSpace
3
+ emoji: 🚀
4
+ colorFrom: purple
5
+ colorTo: yellow
6
+ sdk: gradio
7
+ sdk_version: 5.24.0
8
+ app_file: app.py
9
+ pinned: false
10
+ ---
11
+
12
+
13
+ step1: pip install -r ./requirements.txt
14
+
15
+ step2: run `python app.py` or demo notebooks
app.py ADDED
@@ -0,0 +1,171 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ import os
3
+ from typing import List, Union
4
+
5
+ import gradio as gr
6
+ from PIL import Image
7
+ import numpy as np
8
+
9
+ from vibe_blending import run_vibe_blend_safe, run_vibe_blend_not_safe
10
+ from ipadapter_model import create_image_grid
11
+
12
+ USE_HUGGINGFACE_ZEROGPU = os.getenv("USE_HUGGINGFACE_ZEROGPU", "false").lower() == "false" #"true"
13
+ DEFAULT_CONFIG_PATH = "./config.yaml"
14
+
15
+ if USE_HUGGINGFACE_ZEROGPU:
16
+ try:
17
+ import spaces
18
+ except ImportError:
19
+ USE_HUGGINGFACE_ZEROGPU = False
20
+ logging.warning("HuggingFace Spaces not available, running without GPU acceleration")
21
+
22
+ if USE_HUGGINGFACE_ZEROGPU:
23
+ run_vibe_blend_safe = spaces.GPU(duration=60)(run_vibe_blend_safe)
24
+ run_vibe_blend_not_safe = spaces.GPU(duration=60)(run_vibe_blend_not_safe)
25
+
26
+ try:
27
+ from download_models import download_ipadapter
28
+ download_ipadapter()
29
+ except ImportError:
30
+ logging.warning("Could not import download_models")
31
+
32
+
33
+ def load_gradio_images_helper(pil_images: Union[List, Image.Image, str]) -> List[Image.Image]:
34
+ """
35
+ Convert various image input formats to a list of PIL Images.
36
+ """
37
+ if pil_images is None:
38
+ return []
39
+
40
+ # Handle single image
41
+ if isinstance(pil_images, np.ndarray):
42
+ return Image.fromarray(pil_images).convert("RGB")
43
+ if isinstance(pil_images, Image.Image):
44
+ return pil_images.convert("RGB")
45
+ if isinstance(pil_images, str):
46
+ return Image.open(pil_images).convert("RGB")
47
+
48
+ # Handle list of images
49
+ processed_images = []
50
+ for image in pil_images:
51
+ if isinstance(image, tuple): # Gradio gallery format
52
+ image = image[0]
53
+ if isinstance(image, str):
54
+ image = Image.open(image)
55
+ elif isinstance(image, Image.Image):
56
+ pass # Already PIL Image
57
+ else:
58
+ continue
59
+ processed_images.append(image.convert("RGB"))
60
+
61
+ return processed_images
62
+
63
+
64
+ def create_gradio_interface():
65
+ theme = gr.themes.Base(
66
+ spacing_size='md',
67
+ text_size='lg',
68
+ primary_hue='blue',
69
+ neutral_hue='slate',
70
+ secondary_hue='pink'
71
+ )
72
+
73
+ demo = gr.Blocks(theme=theme)
74
+ with demo:
75
+ gr.Markdown("""
76
+ ## Vibe Blending Demo
77
+
78
+ This is the demo for the paper "*Vibe Spaces for Creatively Connecting and Expressing Visual Concepts*".
79
+
80
+ [Paper]() | [Code]() | [Website]()
81
+
82
+ Given a pair of images, vibe blending will generate a set of images that creatively connect the input images.
83
+
84
+ **[📝 Feedback Form](https://docs.google.com/forms/d/e/1FAIpQLSfS-2fdJ3eaG6JBUGNgHYD4zNRtoPUOc2OhF8J-uT-gyR3LyA/viewform?usp=dialog)** - Please submit your interesting images!
85
+
86
+ """)
87
+ with gr.Row():
88
+ with gr.Column():
89
+ with gr.Group():
90
+ with gr.Row():
91
+ gr.Markdown("**Step 1:** Upload 2 images")
92
+ with gr.Row():
93
+ input1 = gr.Image(label="Input 1", show_label=True)
94
+ input2 = gr.Image(label="Input 2", show_label=True)
95
+
96
+ with gr.Accordion("Options", open=False):
97
+ with gr.Group():
98
+ with gr.Row():
99
+ alpha_start = gr.Slider(minimum=0, maximum=2, step=0.1, value=0.0, label="Start α", info="interpolation weight")
100
+ alpha_end = gr.Slider(minimum=0, maximum=2, step=0.1, value=1.0, label="End α", info="use α>1 for extrapolation")
101
+ # n_steps = gr.Slider(minimum=1, maximum=40, step=1, value=10, label="Number of Output Images")
102
+ n_steps = gr.Number(value=12, label="Number of Output Images", interactive=True)
103
+ with gr.Row():
104
+ extra_images = gr.Gallery(label="Extra Images (optional)", show_label=True, columns=3, rows=2, height=150)
105
+ negative_images = gr.Gallery(label="Negative Images (optional)", show_label=True, columns=3, rows=2, height=150)
106
+ with gr.Column():
107
+ with gr.Group():
108
+ # blending_results = gr.Gallery(label="Vibe Blending Results", columns=5, rows=4, height=600)
109
+ gr.Markdown("**Step 2:** Run Vibe Blending")
110
+ blending_results = gr.Image(label="Vibe Blending Results", show_label=True, height=400)
111
+ blend_button = gr.Button("🔴 Run Vibe Blending", variant="primary")
112
+
113
+ # Training wrapper function
114
+ def blend_button_click(input1, input2, extra_images, negative_images, alpha_start, alpha_end, n_steps):
115
+ input1 = load_gradio_images_helper(input1)
116
+ input2 = load_gradio_images_helper(input2)
117
+ extra_images = load_gradio_images_helper(extra_images)
118
+ negative_images = load_gradio_images_helper(negative_images)
119
+
120
+ if extra_images is None:
121
+ extra_images = []
122
+ elif isinstance(extra_images, Image.Image):
123
+ extra_images = [extra_images]
124
+
125
+ if negative_images is None:
126
+ negative_images = []
127
+ elif isinstance(negative_images, Image.Image):
128
+ negative_images = [negative_images]
129
+
130
+ alpha_weights = np.linspace(alpha_start, alpha_end, n_steps+2)[1:-1].tolist()
131
+ blended_images = run_vibe_blend_not_safe(input1, input2, extra_images, negative_images, DEFAULT_CONFIG_PATH, alpha_weights)
132
+ blended_images = create_image_grid(blended_images, rows=np.ceil(len(blended_images)/4).astype(int), cols=4)
133
+ return blended_images
134
+
135
+ blend_button.click(blend_button_click, inputs=[input1, input2, extra_images, negative_images, alpha_start, alpha_end, n_steps], outputs=[blending_results])
136
+
137
+ example_cases = [
138
+ [Image.open("./images/playviolin_hr.png"), Image.open("./images/playguitar_hr.png")],
139
+ [Image.open("./images/input_cat.png"), Image.open("./images/input_bread.png")],
140
+ [Image.open("./images/02140_left.jpg"), Image.open("./images/02140_right.jpg")],
141
+ #[Image.open("./images/02718_l.jpg"), Image.open("./images/02718_r.jpg")],
142
+ [Image.open("./images/03969_l.jpg"), Image.open("./images/03969_r.jpg")],
143
+ [Image.open("./images/04963_l.jpg"), Image.open("./images/04963_r.jpg")],
144
+ #[Image.open("./images/05358_l.jpg"), Image.open("./images/05358_r.jpg")],
145
+ [Image.open("./images/00436_l.jpg"), Image.open("./images/00436_r.jpg")],
146
+ [Image.open("./images/archi/input_A.jpg"), Image.open("./images/archi/input_B.jpg")],
147
+ ]
148
+ gr.Examples(examples=example_cases, label="Example Cases", inputs=[input1, input2], outputs=[blending_results])
149
+
150
+ extra_image_examples = [
151
+ [Image.open("./images/archi/input_A.jpg"), Image.open("./images/archi/input_B.jpg"), [Image.open("./images/archi/extra1.jpg"), Image.open("./images/archi/extra2.jpg"), Image.open("./images/archi/extra3.jpg")]],
152
+ ]
153
+ gr.Examples(examples=extra_image_examples, label="Extra Image Examples", inputs=[input1, input2, extra_images], outputs=[blending_results])
154
+
155
+ negative_image_examples = [
156
+ [Image.open("./images/pink_bear1.jpg"), Image.open("./images/black_bear2.jpg"), [Image.open("./images/pink_bear1.jpg"), Image.open("./images/black_bear1.jpg")]],
157
+ ]
158
+ gr.Examples(examples=negative_image_examples, label="Negative Image Examples", inputs=[input1, input2, negative_images], outputs=[blending_results])
159
+
160
+ return demo
161
+
162
+
163
+ if __name__ == "__main__":
164
+ logging.basicConfig(level=logging.INFO)
165
+
166
+ demo = create_gradio_interface()
167
+ demo.launch(
168
+ share=True,
169
+ server_name="0.0.0.0" if USE_HUGGINGFACE_ZEROGPU else None,
170
+ show_error=True
171
+ )
config.yaml ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ in_dim: 768 #1024 #768 #384
2
+ vibe_dim: -1
3
+ out_dim: 1280
4
+ latent_dim: 512
5
+ n_layer: 2
6
+
7
+ n_eig: 64
8
+ flag_encoder_loss: 1.0
9
+ flag_decoder_loss: 0.01
10
+ recon_loss: 1.0
11
+
12
+ negative_beta: 1.0
13
+ do_decoder_negative_flag: false
14
+
15
+ single_scale_flag: false
16
+
17
+ log_dir: /tmp/logs/
18
+ name: debug
19
+
20
+ ipadapter_version: sd15 # sdxl, sd15
21
+
22
+ steps: 1000
23
+ batch_size: 8
24
+ n_negative_sample: 100
25
+ n_sample_eigsolve: 2000
26
+ lr: 0.001
demo_vibe_blending.ipynb ADDED
@@ -0,0 +1,129 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": null,
6
+ "id": "a4447a99",
7
+ "metadata": {},
8
+ "outputs": [],
9
+ "source": [
10
+ "from pathlib import Path\n",
11
+ "from typing import Iterable, List\n",
12
+ "\n",
13
+ "import numpy as np\n",
14
+ "from PIL import Image\n",
15
+ "from IPython.display import display\n",
16
+ "\n",
17
+ "from vibe_blending import run_vibe_blend_not_safe\n",
18
+ "from ipadapter_model import create_image_grid"
19
+ ]
20
+ },
21
+ {
22
+ "cell_type": "code",
23
+ "execution_count": null,
24
+ "id": "2f9e85da",
25
+ "metadata": {},
26
+ "outputs": [],
27
+ "source": [
28
+ "CONFIG_PATH = Path('config.yaml')\n",
29
+ "IMAGES_DIR = Path('images')\n",
30
+ "\n",
31
+ "POSITIVE_IMAGE_PATHS = [\n",
32
+ " IMAGES_DIR / 'playviolin_hr.png',\n",
33
+ " IMAGES_DIR / 'playguitar_hr.png',\n",
34
+ "]\n",
35
+ "\n",
36
+ "EXTRA_IMAGE_PATHS = [\n",
37
+ " # ...\n",
38
+ "]\n",
39
+ "\n",
40
+ "NEGATIVE_IMAGE_PATHS = [\n",
41
+ " # ...\n",
42
+ "]\n",
43
+ "\n",
44
+ "def load_images(paths: Iterable[Path]) -> List[Image.Image]:\n",
45
+ " return [Image.open(path).convert('RGB') for path in paths]\n",
46
+ "\n",
47
+ "positive_images = load_images(POSITIVE_IMAGE_PATHS)\n",
48
+ "extra_images = load_images(EXTRA_IMAGE_PATHS)\n",
49
+ "negative_images = load_images(NEGATIVE_IMAGE_PATHS)\n",
50
+ "\n",
51
+ "print(f'Loaded {len(positive_images)} positive, {len(extra_images)} extra, {len(negative_images)} negative images')"
52
+ ]
53
+ },
54
+ {
55
+ "cell_type": "code",
56
+ "execution_count": null,
57
+ "id": "4b079a6a",
58
+ "metadata": {},
59
+ "outputs": [],
60
+ "source": [
61
+ "GRID_PREVIEW_SIZE = (256, 256)\n",
62
+ "GRID_BLEND_SIZE = (512, 512)\n",
63
+ "\n",
64
+ "def resize_for_grid(images: List[Image.Image], size: tuple[int, int]) -> List[Image.Image]:\n",
65
+ " return [img.resize(size, Image.Resampling.LANCZOS) for img in images]\n",
66
+ "\n",
67
+ "def show_row(title: str, images: List[Image.Image]):\n",
68
+ " if not images:\n",
69
+ " return\n",
70
+ " print(f\"{title} ({len(images)} images)\")\n",
71
+ " thumbs = resize_for_grid(images, GRID_PREVIEW_SIZE)\n",
72
+ " grid = create_image_grid(thumbs, rows=1, cols=len(thumbs))\n",
73
+ " display(grid)\n",
74
+ "\n",
75
+ "show_row('Positives', positive_images)\n",
76
+ "show_row('Extra references', extra_images)\n",
77
+ "show_row('Negatives (attributes to suppress)', negative_images)"
78
+ ]
79
+ },
80
+ {
81
+ "cell_type": "code",
82
+ "execution_count": null,
83
+ "id": "9d396972",
84
+ "metadata": {},
85
+ "outputs": [],
86
+ "source": [
87
+ "alpha_weights = np.linspace(0, 1, 10).tolist()\n",
88
+ "print(f'α values: {alpha_weights}')\n",
89
+ "\n",
90
+ "blended_with_negatives = run_vibe_blend_not_safe(\n",
91
+ " image1=positive_images[0],\n",
92
+ " image2=positive_images[1],\n",
93
+ " extra_images=extra_images,\n",
94
+ " negative_images=negative_images,\n",
95
+ " config_path=str(CONFIG_PATH),\n",
96
+ " interpolation_weights=alpha_weights,\n",
97
+ " n_clusters=20,\n",
98
+ ")\n",
99
+ "\n",
100
+ "sequence_with_neg = [positive_images[0], *blended_with_negatives, positive_images[1]]\n",
101
+ "rows = int(np.ceil(len(sequence_with_neg) / 4))\n",
102
+ "normalized_sequence = resize_for_grid(sequence_with_neg, GRID_BLEND_SIZE)\n",
103
+ "neg_grid = create_image_grid(normalized_sequence, rows=rows, cols=4)\n",
104
+ "display(neg_grid)"
105
+ ]
106
+ }
107
+ ],
108
+ "metadata": {
109
+ "kernelspec": {
110
+ "display_name": "mspace",
111
+ "language": "python",
112
+ "name": "python3"
113
+ },
114
+ "language_info": {
115
+ "codemirror_mode": {
116
+ "name": "ipython",
117
+ "version": 3
118
+ },
119
+ "file_extension": ".py",
120
+ "mimetype": "text/x-python",
121
+ "name": "python",
122
+ "nbconvert_exporter": "python",
123
+ "pygments_lexer": "ipython3",
124
+ "version": "3.11.0"
125
+ }
126
+ },
127
+ "nbformat": 4,
128
+ "nbformat_minor": 5
129
+ }
dino_correspondence.py ADDED
@@ -0,0 +1,764 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ DINO Correspondence Analysis Module
3
+
4
+ This module provides functions for analyzing visual correspondences between images
5
+ using DINO features, normalized cuts (NCut), and clustering techniques.
6
+ """
7
+
8
+ import numpy as np
9
+ import torch
10
+ from PIL import Image
11
+ from scipy.optimize import linear_sum_assignment
12
+ from einops import rearrange
13
+
14
+ from extract_features import image_inverse_transform
15
+ from ipadapter_model import image_grid
16
+ from ncut_pytorch import ncut_fn, kway_ncut, convert_to_lab_color
17
+ from ncut_pytorch.color import tsne_color
18
+ from ncut_pytorch.utils.gamma import find_gamma_by_degree
19
+
20
+
21
+ # ===== Core NCut and Clustering Functions =====
22
+
23
+ def ncut_tsne_multiple_images(image_embeds, n_eig=50, gamma=None, degree=0.5):
24
+ """
25
+ Apply NCut and t-SNE coloring to multiple image embeddings.
26
+
27
+ image_embeds is (batch, length, channels)
28
+ """
29
+ batch_size, length, channels = image_embeds.shape
30
+ flattened_input = image_embeds.flatten(end_dim=-2)
31
+
32
+ if gamma is None:
33
+ gamma = find_gamma_by_degree(flattened_input, degree)
34
+
35
+ eigenvectors, eigenvalues = ncut_fn(
36
+ flattened_input, n_eig=n_eig, gamma=gamma, device='cuda'
37
+ )
38
+
39
+ rgb_colors = tsne_color(eigenvectors, n_dim=3, device='cuda', perplexity=50)
40
+ rgb_colors = convert_to_lab_color(rgb_colors)
41
+
42
+ # Reshape back to original batch structure
43
+ rgb_colors = rearrange(rgb_colors, '(b l) c -> b l c', b=batch_size)
44
+ eigenvectors = rearrange(eigenvectors, '(b l) c -> b l c', b=batch_size)
45
+
46
+ return eigenvectors, rgb_colors
47
+
48
+
49
+ def _kway_cluster_single_image(image_embeds, n_clusters, gamma=None, degree=0.5):
50
+ length, channels = image_embeds.shape
51
+ flattened_input = image_embeds.flatten(end_dim=-2)
52
+
53
+ if gamma is None:
54
+ gamma = find_gamma_by_degree(flattened_input, degree)
55
+ else:
56
+ gamma = gamma * image_embeds.var(0).sum().item()
57
+
58
+ # Calculate number of eigenvectors needed
59
+ n_eig = min(n_clusters * 2 + 6, flattened_input.shape[0] // 2 - 1)
60
+
61
+ eigenvectors, _ = ncut_fn(
62
+ flattened_input, n_eig=n_eig, gamma=gamma, device='cuda'
63
+ )
64
+
65
+ continuous_clusters = kway_ncut(eigenvectors[:, :n_clusters])
66
+ return continuous_clusters
67
+
68
+
69
+ def kway_cluster_per_image(image_embeds, n_clusters, gamma=None, degree=0.5):
70
+ """
71
+ Perform k-way clustering on each image separately.
72
+
73
+ image_embeds is (batch, length, channels)
74
+ return (batch, length, clusters)
75
+ """
76
+ clustered_eigenvectors = []
77
+
78
+ for i in range(image_embeds.shape[0]):
79
+ eigenvector = _kway_cluster_single_image(
80
+ image_embeds[i], n_clusters, gamma, degree
81
+ )
82
+ clustered_eigenvectors.append(eigenvector)
83
+
84
+ return torch.stack(clustered_eigenvectors)
85
+
86
+
87
+ def kway_cluster_multiple_images(image_embeds, n_clusters, gamma=None, degree=0.5):
88
+ """
89
+ Perform k-way clustering on multiple images jointly.
90
+
91
+ image_embeds is (batch, length, channels)
92
+ return (batch, length, clusters)
93
+ """
94
+ batch_size, length, channels = image_embeds.shape
95
+ flattened_input = image_embeds.flatten(end_dim=-2)
96
+
97
+ if gamma is None:
98
+ gamma = find_gamma_by_degree(flattened_input, degree)
99
+
100
+ # Calculate number of eigenvectors needed
101
+ n_eig = min(n_clusters * 2 + 6, flattened_input.shape[0] // 2 - 1)
102
+
103
+ eigenvectors, _ = ncut_fn(
104
+ flattened_input, n_eig=n_eig, gamma=gamma, device='cuda'
105
+ )
106
+
107
+ continuous_clusters = kway_ncut(eigenvectors[:, :n_clusters])
108
+ continuous_clusters = rearrange(
109
+ continuous_clusters, '(b l) c -> b l c', b=batch_size
110
+ )
111
+
112
+ return continuous_clusters
113
+
114
+
115
+ # ===== Color and Visualization Functions =====
116
+
117
+ def get_discrete_colors_from_clusters(joint_colors, cluster_eigenvectors):
118
+
119
+ n_clusters = cluster_eigenvectors.shape[-1]
120
+ discrete_colors = np.zeros_like(joint_colors)
121
+
122
+ for img_idx in range(joint_colors.shape[0]):
123
+ colors = joint_colors[img_idx]
124
+ eigenvector = cluster_eigenvectors[img_idx].cpu().numpy()
125
+ cluster_labels = eigenvector.argmax(-1)
126
+ discrete_img_colors = np.zeros_like(colors)
127
+
128
+ for cluster_idx in range(n_clusters):
129
+ cluster_mask = cluster_labels == cluster_idx
130
+ if cluster_mask.sum() > 0:
131
+ # Use mean color for each cluster
132
+ discrete_img_colors[cluster_mask] = colors[cluster_mask].mean(0)
133
+
134
+ discrete_colors[img_idx] = discrete_img_colors
135
+
136
+ # Convert to uint8 format
137
+ discrete_colors = (discrete_colors * 255).astype(np.uint8)
138
+ return discrete_colors
139
+
140
+
141
+ # ===== Center Matching Functions =====
142
+
143
+ def get_cluster_center_features(image_embeds, cluster_labels, n_clusters):
144
+
145
+ center_features = torch.zeros((n_clusters, image_embeds.shape[-1]))
146
+
147
+ for cluster_idx in range(n_clusters):
148
+ cluster_mask = cluster_labels == cluster_idx
149
+
150
+ if cluster_mask.sum() > 0:
151
+ center_features[cluster_idx] = image_embeds[cluster_mask].mean(0)
152
+ else:
153
+ # Use a unique identifier for empty clusters
154
+ center_features[cluster_idx] = torch.ones_like(image_embeds[0]) * 114514
155
+
156
+ return center_features
157
+
158
+
159
+ def cosine_similarity(matrix_a, matrix_b):
160
+ normalized_a = matrix_a / matrix_a.norm(dim=-1, keepdim=True)
161
+ normalized_b = matrix_b / matrix_b.norm(dim=-1, keepdim=True)
162
+ return normalized_a @ normalized_b.T
163
+
164
+
165
+ def hungarian_match_centers(center_features1, center_features2):
166
+ distances = torch.cdist(center_features1, center_features2)
167
+ distances = distances.cpu().detach().numpy()
168
+ _, column_indices = linear_sum_assignment(distances)
169
+ return column_indices
170
+
171
+
172
+ def argmin_matching(center_features1, center_features2):
173
+ distances = torch.cdist(center_features1, center_features2)
174
+ distances = distances.cpu().detach().numpy()
175
+ return np.argmin(distances, axis=-1)
176
+
177
+
178
+ def match_cluster_centers(image_embed1, image_embed2, eigvec1, eigvec2,
179
+ match_method='hungarian'):
180
+ cluster_labels1 = eigvec1.argmax(-1).cpu().numpy()
181
+ cluster_labels2 = eigvec2.argmax(-1).cpu().numpy()
182
+
183
+ center_features1 = get_cluster_center_features(
184
+ image_embed1, cluster_labels1, eigvec1.shape[-1]
185
+ )
186
+ center_features2 = get_cluster_center_features(
187
+ image_embed2, cluster_labels2, eigvec2.shape[-1]
188
+ )
189
+
190
+ if match_method == 'hungarian':
191
+ mapping = hungarian_match_centers(center_features1, center_features2)
192
+ elif match_method == 'argmin':
193
+ mapping = argmin_matching(center_features1, center_features2)
194
+ else:
195
+ raise ValueError(f"Unknown match_method: {match_method}")
196
+
197
+ return mapping
198
+
199
+
200
+ def match_centers_three_images(image_embeds, eigenvectors, match_method='hungarian'):
201
+ """
202
+ Match cluster centers across three images (A2 -> A1 -> B1).
203
+
204
+ Args:
205
+ image_embeds (torch.Tensor): Embeddings for 3 images [A2, A1, B1]
206
+ eigenvectors (torch.Tensor): Eigenvectors for 3 images
207
+ match_method (str): Matching method
208
+
209
+ Returns:
210
+ tuple: (A2_to_A1_mapping, A1_to_B1_mapping)
211
+ """
212
+ a2_to_a1_mapping = match_cluster_centers(
213
+ image_embeds[0], image_embeds[1],
214
+ eigenvectors[0], eigenvectors[1],
215
+ match_method=match_method
216
+ )
217
+
218
+ a1_to_b1_mapping = match_cluster_centers(
219
+ image_embeds[1], image_embeds[2],
220
+ eigenvectors[1], eigenvectors[2],
221
+ match_method=match_method
222
+ )
223
+
224
+ return a2_to_a1_mapping, a1_to_b1_mapping
225
+
226
+
227
+ def match_centers_two_images(image_embed1, image_embed2, eigvec1, eigvec2,
228
+ match_method='hungarian'):
229
+ return match_cluster_centers(
230
+ image_embed1, image_embed2, eigvec1, eigvec2, match_method=match_method
231
+ )
232
+
233
+
234
+ # ===== Two-Step Clustering Functions =====
235
+
236
+ def kway_cluster_per_image_two_step(
237
+ image_embeds,
238
+ n_superclusters,
239
+ n_subclusters_per_supercluster,
240
+ supercluster_gamma=None,
241
+ subcluster_gamma=None,
242
+ degree=0.5
243
+ ):
244
+ """
245
+ Perform 2-step hierarchical clustering on each image separately.
246
+ First finds superclusters, then subdivides each supercluster into subclusters.
247
+
248
+ Args:
249
+ image_embeds: (batch, length, channels) - Image embeddings
250
+ n_superclusters: Number of coarse superclusters to find
251
+ n_subclusters_per_supercluster: Number of subclusters within each supercluster
252
+ supercluster_gamma: Gamma parameter for supercluster NCut (None = auto)
253
+ subcluster_gamma: Gamma parameter for subcluster NCut (None = auto)
254
+ degree: Degree parameter for gamma estimation
255
+
256
+ Returns:
257
+ tuple: (supercluster_eigenvectors, subcluster_eigenvectors, subcluster_to_supercluster_mapping)
258
+ - supercluster_eigenvectors: (batch, length, n_superclusters)
259
+ - subcluster_eigenvectors: (batch, length, total_subclusters)
260
+ - subcluster_to_supercluster_mapping: (batch, total_subclusters) mapping each subcluster to its supercluster
261
+ """
262
+ batch_size = image_embeds.shape[0]
263
+
264
+ # Step 1: Compute superclusters for each image
265
+ supercluster_eigenvectors = []
266
+ for i in range(batch_size):
267
+ eigenvector = _kway_cluster_single_image(
268
+ image_embeds[i], n_superclusters, supercluster_gamma, degree
269
+ )
270
+ supercluster_eigenvectors.append(eigenvector)
271
+ supercluster_eigenvectors = torch.stack(supercluster_eigenvectors)
272
+
273
+ # Step 2: For each supercluster in each image, compute subclusters
274
+ subcluster_eigenvectors = []
275
+ subcluster_to_supercluster_mapping = []
276
+
277
+ for img_idx in range(batch_size):
278
+ img_subclusters = []
279
+ img_mapping = []
280
+
281
+ supercluster_labels = supercluster_eigenvectors[img_idx].argmax(-1)
282
+
283
+ # For each supercluster, extract tokens and compute subclusters
284
+ for supercluster_idx in range(n_superclusters):
285
+ supercluster_mask = supercluster_labels == supercluster_idx
286
+
287
+ if supercluster_mask.sum() == 0:
288
+ # Empty supercluster - create dummy subclusters
289
+ for sub_idx in range(n_subclusters_per_supercluster):
290
+ img_mapping.append(supercluster_idx)
291
+ continue
292
+
293
+ # Extract features belonging to this supercluster
294
+ supercluster_features = image_embeds[img_idx][supercluster_mask]
295
+
296
+ # Perform clustering on this subset
297
+ if supercluster_features.shape[0] <= n_subclusters_per_supercluster:
298
+ # Too few tokens - each token becomes its own subcluster
299
+ n_actual_subclusters = supercluster_features.shape[0]
300
+ subcluster_labels = torch.arange(n_actual_subclusters).to(supercluster_features.device)
301
+ # Pad with dummy subclusters if needed
302
+ for sub_idx in range(n_subclusters_per_supercluster):
303
+ img_mapping.append(supercluster_idx)
304
+ else:
305
+ # Perform subclustering
306
+ subcluster_eigvecs = _kway_cluster_single_image(
307
+ supercluster_features,
308
+ n_subclusters_per_supercluster,
309
+ subcluster_gamma,
310
+ degree
311
+ )
312
+ subcluster_labels = subcluster_eigvecs.argmax(-1)
313
+
314
+ # Track which supercluster these subclusters belong to
315
+ for sub_idx in range(n_subclusters_per_supercluster):
316
+ img_mapping.append(supercluster_idx)
317
+
318
+ # Store subcluster assignments for this supercluster
319
+ for sub_idx in range(n_subclusters_per_supercluster):
320
+ img_subclusters.append((supercluster_mask, subcluster_labels == sub_idx if supercluster_features.shape[0] > n_subclusters_per_supercluster else None))
321
+
322
+ # Convert to full eigenvector representation
323
+ total_subclusters = n_superclusters * n_subclusters_per_supercluster
324
+ img_subcluster_eigvec = torch.zeros((image_embeds.shape[1], total_subclusters)).to(image_embeds.device)
325
+
326
+ for subcluster_global_idx, (supercluster_mask, subcluster_mask) in enumerate(img_subclusters):
327
+ if subcluster_mask is not None:
328
+ # Combine masks: belongs to supercluster AND subcluster
329
+ final_mask = torch.zeros(image_embeds.shape[1], dtype=torch.bool).to(image_embeds.device)
330
+ supercluster_indices = torch.where(supercluster_mask)[0]
331
+ subcluster_within_super = torch.where(subcluster_mask)[0]
332
+ if len(subcluster_within_super) > 0:
333
+ final_indices = supercluster_indices[subcluster_within_super]
334
+ final_mask[final_indices] = True
335
+ img_subcluster_eigvec[final_mask, subcluster_global_idx] = 1.0
336
+ # else: leave as zeros (empty subcluster)
337
+
338
+ subcluster_eigenvectors.append(img_subcluster_eigvec)
339
+ subcluster_to_supercluster_mapping.append(torch.tensor(img_mapping))
340
+
341
+ subcluster_eigenvectors = torch.stack(subcluster_eigenvectors)
342
+ subcluster_to_supercluster_mapping = torch.stack(subcluster_to_supercluster_mapping)
343
+
344
+ return supercluster_eigenvectors, subcluster_eigenvectors, subcluster_to_supercluster_mapping
345
+
346
+
347
+ def match_centers_two_step(
348
+ image_embed1,
349
+ image_embed2,
350
+ supercluster_eigvec1,
351
+ supercluster_eigvec2,
352
+ subcluster_eigvec1,
353
+ subcluster_eigvec2,
354
+ subcluster_to_supercluster_mapping1,
355
+ subcluster_to_supercluster_mapping2,
356
+ supercluster_match_method='hungarian',
357
+ subcluster_match_method='hungarian'
358
+ ):
359
+ """
360
+ Match clusters using 2-step hierarchical approach.
361
+ First matches superclusters, then matches subclusters only within matched superclusters.
362
+
363
+ Args:
364
+ image_embed1, image_embed2: Image embeddings (length, channels)
365
+ supercluster_eigvec1, supercluster_eigvec2: Supercluster eigenvectors (length, n_superclusters)
366
+ subcluster_eigvec1, subcluster_eigvec2: Subcluster eigenvectors (length, total_subclusters)
367
+ subcluster_to_supercluster_mapping1, subcluster_to_supercluster_mapping2: (total_subclusters,)
368
+ supercluster_match_method: Matching method for superclusters
369
+ subcluster_match_method: Matching method for subclusters
370
+
371
+ Returns:
372
+ np.ndarray: Mapping from image1 subclusters to image2 subclusters
373
+ """
374
+ n_superclusters = supercluster_eigvec1.shape[-1]
375
+ n_subclusters_total = subcluster_eigvec1.shape[-1]
376
+
377
+ # Step 1: Match superclusters
378
+ supercluster_mapping = match_cluster_centers(
379
+ image_embed1, image_embed2,
380
+ supercluster_eigvec1, supercluster_eigvec2,
381
+ match_method=supercluster_match_method
382
+ )
383
+
384
+ # Step 2: For each matched supercluster pair, match subclusters within them
385
+ subcluster_mapping = np.zeros(n_subclusters_total, dtype=np.int64)
386
+
387
+ for supercluster1_idx in range(n_superclusters):
388
+ # Find which supercluster in image2 this maps to
389
+ supercluster2_idx = supercluster_mapping[supercluster1_idx]
390
+
391
+ # Find all subclusters belonging to these superclusters
392
+ subclusters1_mask = (subcluster_to_supercluster_mapping1 == supercluster1_idx).cpu().numpy()
393
+ subclusters2_mask = (subcluster_to_supercluster_mapping2 == supercluster2_idx).cpu().numpy()
394
+
395
+ subclusters1_indices = np.where(subclusters1_mask)[0]
396
+ subclusters2_indices = np.where(subclusters2_mask)[0]
397
+
398
+ if len(subclusters1_indices) == 0 or len(subclusters2_indices) == 0:
399
+ # No subclusters in one or both superclusters - use identity mapping
400
+ for sub1_idx in subclusters1_indices:
401
+ if sub1_idx < len(subclusters2_indices):
402
+ subcluster_mapping[sub1_idx] = subclusters2_indices[sub1_idx]
403
+ else:
404
+ subcluster_mapping[sub1_idx] = subclusters2_indices[0] if len(subclusters2_indices) > 0 else 0
405
+ continue
406
+
407
+ # Extract subcluster eigenvectors for matching
408
+ sub_eigvec1 = subcluster_eigvec1[:, subclusters1_indices]
409
+ sub_eigvec2 = subcluster_eigvec2[:, subclusters2_indices]
410
+
411
+ # Compute cluster centers for these subclusters
412
+ cluster_labels1 = sub_eigvec1.argmax(-1).cpu()
413
+ cluster_labels2 = sub_eigvec2.argmax(-1).cpu()
414
+
415
+ center_features1 = get_cluster_center_features(
416
+ image_embed1, cluster_labels1, len(subclusters1_indices)
417
+ )
418
+ center_features2 = get_cluster_center_features(
419
+ image_embed2, cluster_labels2, len(subclusters2_indices)
420
+ )
421
+
422
+ # Match subclusters within this supercluster pair
423
+ if subcluster_match_method == 'hungarian':
424
+ local_mapping = hungarian_match_centers(center_features1, center_features2)
425
+ elif subcluster_match_method == 'argmin':
426
+ local_mapping = argmin_matching(center_features1, center_features2)
427
+ else:
428
+ raise ValueError(f"Unknown subcluster_match_method: {subcluster_match_method}")
429
+
430
+ # Convert local mapping to global subcluster indices
431
+ for local_idx, global_idx1 in enumerate(subclusters1_indices):
432
+ global_idx2 = subclusters2_indices[local_mapping[local_idx]]
433
+ subcluster_mapping[global_idx1] = global_idx2
434
+
435
+ return subcluster_mapping
436
+
437
+
438
+ def kway_cluster_per_image_two_step_fgbg(
439
+ image_embeds,
440
+ n_foreground_subclusters,
441
+ n_background_subclusters,
442
+ supercluster_gamma=None,
443
+ subcluster_gamma=None,
444
+ degree=0.5
445
+ ):
446
+ """
447
+ Perform 2-step hierarchical clustering with automatic foreground/background separation.
448
+ First separates foreground (FG) and background (BG) using 2 clusters, identifying FG
449
+ by the cluster with highest max eigenvector value. Then subdivides FG and BG separately.
450
+
451
+ Args:
452
+ image_embeds: (batch, length, channels) - Image embeddings
453
+ n_foreground_subclusters: Number of subclusters within foreground
454
+ n_background_subclusters: Number of subclusters within background
455
+ supercluster_gamma: Gamma parameter for FG/BG clustering (None = auto)
456
+ subcluster_gamma: Gamma parameter for subcluster NCut (None = auto)
457
+ degree: Degree parameter for gamma estimation
458
+
459
+ Returns:
460
+ tuple: (supercluster_eigenvectors, subcluster_eigenvectors, subcluster_to_supercluster_mapping, fg_indices)
461
+ - supercluster_eigenvectors: (batch, length, 2) - [BG, FG] clusters
462
+ - subcluster_eigenvectors: (batch, length, total_subclusters)
463
+ - subcluster_to_supercluster_mapping: (batch, total_subclusters) - 0=BG, 1=FG
464
+ - fg_indices: (batch,) - which supercluster index is foreground for each image
465
+ """
466
+ batch_size = image_embeds.shape[0]
467
+ n_superclusters = 2 # Always FG and BG
468
+
469
+ # Step 1: Compute FG/BG separation for each image
470
+ supercluster_eigenvectors = []
471
+ fg_indices = []
472
+
473
+ for i in range(batch_size):
474
+ eigenvector = _kway_cluster_single_image(
475
+ image_embeds[i], n_clusters=2, gamma=supercluster_gamma, degree=degree
476
+ )
477
+ supercluster_eigenvectors.append(eigenvector)
478
+
479
+ # Identify foreground: cluster with highest max eigenvector value
480
+ fg_idx = eigenvector.max(0).values.argmax().item()
481
+ fg_indices.append(fg_idx)
482
+
483
+ supercluster_eigenvectors = torch.stack(supercluster_eigenvectors)
484
+ fg_indices = torch.tensor(fg_indices)
485
+
486
+ # Step 2: For each image, compute subclusters within FG and BG
487
+ subcluster_eigenvectors = []
488
+ subcluster_to_supercluster_mapping = []
489
+
490
+ for img_idx in range(batch_size):
491
+ img_subclusters = []
492
+ img_mapping = []
493
+
494
+ supercluster_labels = supercluster_eigenvectors[img_idx].argmax(-1)
495
+ fg_idx = fg_indices[img_idx].item()
496
+ bg_idx = 1 - fg_idx
497
+
498
+ # Process BG and FG in order (BG first, then FG)
499
+ for is_foreground, n_subclusters in [(False, n_background_subclusters), (True, n_foreground_subclusters)]:
500
+ supercluster_idx = fg_idx if is_foreground else bg_idx
501
+ supercluster_mask = supercluster_labels == supercluster_idx
502
+
503
+ # Mark which supercluster type (0=BG, 1=FG)
504
+ supercluster_type = 1 if is_foreground else 0
505
+
506
+ if supercluster_mask.sum() == 0:
507
+ # Empty supercluster - create dummy subclusters
508
+ for sub_idx in range(n_subclusters):
509
+ img_mapping.append(supercluster_type)
510
+ img_subclusters.append((supercluster_mask, None))
511
+ continue
512
+
513
+ # Extract features belonging to this supercluster
514
+ supercluster_features = image_embeds[img_idx][supercluster_mask]
515
+
516
+ # Perform clustering on this subset
517
+ if supercluster_features.shape[0] <= n_subclusters:
518
+ # Too few tokens - each token becomes its own subcluster
519
+ n_actual_subclusters = supercluster_features.shape[0]
520
+ subcluster_labels = torch.arange(n_actual_subclusters).to(supercluster_features.device)
521
+ # Pad with dummy subclusters if needed
522
+ for sub_idx in range(n_subclusters):
523
+ img_mapping.append(supercluster_type)
524
+ if sub_idx < n_actual_subclusters:
525
+ img_subclusters.append((supercluster_mask, subcluster_labels == sub_idx))
526
+ else:
527
+ img_subclusters.append((supercluster_mask, None))
528
+ else:
529
+ # Perform subclustering
530
+ subcluster_eigvecs = _kway_cluster_single_image(
531
+ supercluster_features,
532
+ n_subclusters,
533
+ subcluster_gamma,
534
+ degree
535
+ )
536
+ subcluster_labels = subcluster_eigvecs.argmax(-1)
537
+
538
+ # Store subcluster assignments
539
+ for sub_idx in range(n_subclusters):
540
+ img_mapping.append(supercluster_type)
541
+ img_subclusters.append((supercluster_mask, subcluster_labels == sub_idx))
542
+
543
+ # Convert to full eigenvector representation
544
+ total_subclusters = n_background_subclusters + n_foreground_subclusters
545
+ img_subcluster_eigvec = torch.zeros((image_embeds.shape[1], total_subclusters)).to(image_embeds.device)
546
+
547
+ for subcluster_global_idx, (supercluster_mask, subcluster_mask) in enumerate(img_subclusters):
548
+ if subcluster_mask is not None:
549
+ # Combine masks: belongs to supercluster AND subcluster
550
+ final_mask = torch.zeros(image_embeds.shape[1], dtype=torch.bool).to(image_embeds.device)
551
+ supercluster_indices = torch.where(supercluster_mask)[0]
552
+ subcluster_within_super = torch.where(subcluster_mask)[0]
553
+ if len(subcluster_within_super) > 0:
554
+ final_indices = supercluster_indices[subcluster_within_super]
555
+ final_mask[final_indices] = True
556
+ img_subcluster_eigvec[final_mask, subcluster_global_idx] = 1.0
557
+ # else: leave as zeros (empty subcluster)
558
+
559
+ subcluster_eigenvectors.append(img_subcluster_eigvec)
560
+ subcluster_to_supercluster_mapping.append(torch.tensor(img_mapping))
561
+
562
+ subcluster_eigenvectors = torch.stack(subcluster_eigenvectors)
563
+ subcluster_to_supercluster_mapping = torch.stack(subcluster_to_supercluster_mapping)
564
+
565
+ return supercluster_eigenvectors, subcluster_eigenvectors, subcluster_to_supercluster_mapping, fg_indices
566
+
567
+
568
+ def match_centers_two_step_fgbg(
569
+ image_embed1,
570
+ image_embed2,
571
+ subcluster_eigvec1,
572
+ subcluster_eigvec2,
573
+ subcluster_to_supercluster_mapping1,
574
+ subcluster_to_supercluster_mapping2,
575
+ n_background_subclusters,
576
+ n_foreground_subclusters,
577
+ background_match_method='hungarian',
578
+ foreground_match_method='hungarian'
579
+ ):
580
+ """
581
+ Match clusters using 2-step FG/BG hierarchical approach.
582
+ FG and BG are automatically matched (no need for supercluster matching).
583
+ Subclusters are matched within their respective FG or BG groups.
584
+
585
+ Args:
586
+ image_embed1, image_embed2: Image embeddings (length, channels)
587
+ subcluster_eigvec1, subcluster_eigvec2: Subcluster eigenvectors (length, total_subclusters)
588
+ subcluster_to_supercluster_mapping1, subcluster_to_supercluster_mapping2: (total_subclusters,) - 0=BG, 1=FG
589
+ n_background_subclusters: Number of background subclusters
590
+ n_foreground_subclusters: Number of foreground subclusters
591
+ background_match_method: Matching method for background subclusters
592
+ foreground_match_method: Matching method for foreground subclusters
593
+
594
+ Returns:
595
+ np.ndarray: Mapping from image1 subclusters to image2 subclusters
596
+ """
597
+ total_subclusters = n_background_subclusters + n_foreground_subclusters
598
+ subcluster_mapping = np.zeros(total_subclusters, dtype=np.int64)
599
+
600
+ # Process BG (supercluster_type=0) and FG (supercluster_type=1) separately
601
+ for supercluster_type in [0, 1]: # 0=BG, 1=FG
602
+ # Find subclusters belonging to this supercluster type
603
+ subclusters1_mask = (subcluster_to_supercluster_mapping1 == supercluster_type).cpu().numpy()
604
+ subclusters2_mask = (subcluster_to_supercluster_mapping2 == supercluster_type).cpu().numpy()
605
+
606
+ subclusters1_indices = np.where(subclusters1_mask)[0]
607
+ subclusters2_indices = np.where(subclusters2_mask)[0]
608
+
609
+ if len(subclusters1_indices) == 0 or len(subclusters2_indices) == 0:
610
+ # No subclusters in one or both - use identity mapping
611
+ for sub1_idx in subclusters1_indices:
612
+ if sub1_idx < len(subclusters2_indices):
613
+ subcluster_mapping[sub1_idx] = subclusters2_indices[sub1_idx]
614
+ else:
615
+ subcluster_mapping[sub1_idx] = subclusters2_indices[0] if len(subclusters2_indices) > 0 else 0
616
+ continue
617
+
618
+ # Extract subcluster eigenvectors for matching
619
+ sub_eigvec1 = subcluster_eigvec1[:, subclusters1_indices]
620
+ sub_eigvec2 = subcluster_eigvec2[:, subclusters2_indices]
621
+
622
+ # Compute cluster centers for these subclusters
623
+ cluster_labels1 = sub_eigvec1.argmax(-1).cpu()
624
+ cluster_labels2 = sub_eigvec2.argmax(-1).cpu()
625
+
626
+ center_features1 = get_cluster_center_features(
627
+ image_embed1, cluster_labels1, len(subclusters1_indices)
628
+ )
629
+ center_features2 = get_cluster_center_features(
630
+ image_embed2, cluster_labels2, len(subclusters2_indices)
631
+ )
632
+
633
+ # Match subclusters within this FG/BG group
634
+ match_method = foreground_match_method if supercluster_type == 1 else background_match_method
635
+
636
+ if match_method == 'hungarian':
637
+ local_mapping = hungarian_match_centers(center_features1, center_features2)
638
+ elif match_method == 'argmin':
639
+ local_mapping = argmin_matching(center_features1, center_features2)
640
+ else:
641
+ raise ValueError(f"Unknown match_method: {match_method}")
642
+
643
+ # Convert local mapping to global subcluster indices
644
+ for local_idx, global_idx1 in enumerate(subclusters1_indices):
645
+ global_idx2 = subclusters2_indices[local_mapping[local_idx]]
646
+ subcluster_mapping[global_idx1] = global_idx2
647
+
648
+ return subcluster_mapping
649
+
650
+
651
+ # ===== Visualization Functions =====
652
+
653
+ def plot_cluster_masks(image, eigenvector, cluster_order, hw=16):
654
+ """
655
+ blend the image with the cluster masks
656
+ # image is (c, h, w)
657
+ # eigenvector is (h*w, n_eig)
658
+ # cluster_order is (n_eig), the order of the clusters
659
+ """
660
+ cluster_images = []
661
+ base_img = image_inverse_transform(image).resize(
662
+ (128, 128), resample=Image.Resampling.NEAREST
663
+ )
664
+
665
+ for cluster_idx in cluster_order:
666
+ # Create cluster mask
667
+ cluster_mask = eigenvector.argmax(-1) == cluster_idx
668
+ mask_array = cluster_mask.cpu().numpy()[1:].reshape(hw, hw)
669
+ mask_array = (mask_array * 255).astype(np.uint8)
670
+
671
+ # Resize mask to match image
672
+ mask_img = Image.fromarray(mask_array).resize(
673
+ (128, 128), resample=Image.Resampling.NEAREST
674
+ )
675
+
676
+ # Apply mask to image
677
+ mask_normalized = np.array(mask_img).astype(np.float32) / 255
678
+ img_array = np.array(base_img).astype(np.float32) / 255
679
+
680
+ # Create 3-channel mask and apply
681
+ mask_3ch = np.stack([mask_normalized] * 3, axis=-1)
682
+ mask_3ch[mask_3ch == 0] = 0.1 # Dim non-masked areas
683
+
684
+ masked_img = img_array * mask_3ch
685
+ masked_img = (masked_img * 255).astype(np.uint8)
686
+
687
+ cluster_images.append(Image.fromarray(masked_img))
688
+
689
+ return cluster_images
690
+
691
+
692
+ def create_image_grid_row(image, eigenvector, cluster_order, discrete_colors,
693
+ hw=16, n_cols=10):
694
+
695
+ cluster_images = plot_cluster_masks(image, eigenvector, cluster_order, hw)
696
+
697
+ # Prepare base images
698
+ base_img = image_inverse_transform(image).resize(
699
+ (128, 128), resample=Image.Resampling.NEAREST
700
+ )
701
+
702
+ ncut_visualization = discrete_colors[1:].reshape(hw, hw, 3)
703
+ ncut_img = Image.fromarray(ncut_visualization).resize(
704
+ (128, 128), resample=Image.Resampling.NEAREST
705
+ )
706
+
707
+ # Pad cluster images to fill grid
708
+ num_missing = n_cols - len(cluster_images) % n_cols
709
+ if num_missing != n_cols:
710
+ empty_img = Image.fromarray(np.zeros((128, 128, 3), dtype=np.uint8))
711
+ cluster_images.extend([empty_img] * num_missing)
712
+
713
+ # Create grid rows
714
+ prepend_images = [base_img, ncut_img]
715
+ n_rows = len(cluster_images) // n_cols
716
+ grid_rows = []
717
+
718
+ for row_idx in range(n_rows):
719
+ start_idx = row_idx * n_cols
720
+ end_idx = (row_idx + 1) * n_cols
721
+ row_images = prepend_images + cluster_images[start_idx:end_idx]
722
+ grid_rows.append(row_images)
723
+
724
+ return grid_rows
725
+
726
+
727
+ def create_multi_image_grid(images, eigenvectors, cluster_orders, discrete_colors,
728
+ hw=16, n_cols=10):
729
+ all_grid_rows = []
730
+
731
+ for image, eigvec, cluster_order, discrete_rgb in zip(
732
+ images, eigenvectors, cluster_orders, discrete_colors
733
+ ):
734
+ grid_rows = create_image_grid_row(
735
+ image, eigvec, cluster_order, discrete_rgb, hw, n_cols
736
+ )
737
+ all_grid_rows.append(grid_rows)
738
+
739
+ # Interleave rows from different images
740
+ interleaved_rows = []
741
+ for row_idx in range(len(all_grid_rows[0])):
742
+ for img_idx in range(len(all_grid_rows)):
743
+ interleaved_rows.append(all_grid_rows[img_idx][row_idx])
744
+
745
+ return interleaved_rows
746
+
747
+
748
+ def get_correspondence_plot(images, eigenvectors, cluster_orders, discrete_colors,
749
+ hw=16, n_cols=10):
750
+ n_clusters = eigenvectors.shape[-1]
751
+ n_cols = min(n_cols, n_clusters)
752
+
753
+ interleaved_rows = create_multi_image_grid(
754
+ images, eigenvectors, cluster_orders, discrete_colors, hw, n_cols
755
+ )
756
+
757
+ n_rows = len(interleaved_rows)
758
+ n_cols = len(interleaved_rows[0])
759
+
760
+ # Flatten all images and create final grid
761
+ all_images = sum(interleaved_rows, [])
762
+ final_grid = image_grid(all_images, n_rows, n_cols)
763
+
764
+ return final_grid
download_models.py ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ def download_ipadapter():
2
+ from huggingface_hub import snapshot_download
3
+ # snapshot_download(repo_id="h94/IP-Adapter", ignore_patterns="sdxl_models/*", local_dir="./downloads/")
4
+ snapshot_download(repo_id="h94/IP-Adapter", local_dir="./downloads/")
5
+
6
+ from ipadapter_model import load_ipadapter
7
+ ip_model = load_ipadapter(device="cpu")
8
+
9
+ if __name__ == "__main__":
10
+ download_ipadapter()
extract_features.py ADDED
@@ -0,0 +1,135 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Feature Extraction Module
3
+
4
+ This module provides utilities for extracting features from images using various
5
+ pre-trained models including DINO, DINOv3, and CLIP. It handles model loading,
6
+ batch processing, and memory management for efficient feature extraction.
7
+ """
8
+
9
+ import gc
10
+ from typing import Tuple, Optional
11
+
12
+ import torch
13
+ import torch.nn as nn
14
+ from einops import rearrange
15
+ from torchvision import transforms
16
+
17
+ from ipadapter_model import extract_clip_embedding_tensor
18
+ from ipadapter_model import load_ipadapter
19
+
20
+
21
+ # Default hyperparameters
22
+ DEFAULT_BATCH_SIZE = 32
23
+
24
+
25
+ # ===== Image Transforms =====
26
+
27
+ # High-resolution transform for DINO models
28
+ dino_image_transform = transforms.Compose([
29
+ transforms.Resize((256 * 2, 256 * 2)), # High resolution for detailed features
30
+ transforms.ToTensor(),
31
+ transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
32
+ ])
33
+
34
+ # Standard resolution transform for CLIP models
35
+ clip_image_transform = transforms.Compose([
36
+ transforms.Resize((224, 224)), # Standard ImageNet resolution
37
+ transforms.ToTensor(),
38
+ transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
39
+ ])
40
+
41
+ # Inverse transform to convert normalized tensors back to PIL images
42
+ image_inverse_transform = transforms.Compose([
43
+ transforms.Normalize(mean=[0.0, 0.0, 0.0], std=[1/0.229, 1/0.224, 1/0.225]),
44
+ transforms.Normalize(mean=[-0.485, -0.456, -0.406], std=[1.0, 1.0, 1.0]),
45
+ transforms.ToPILImage(),
46
+ ])
47
+
48
+
49
+ # ===== Memory Management =====
50
+
51
+ def clear_gpu_memory():
52
+ """Clear GPU cache and run garbage collection to free memory."""
53
+ torch.cuda.empty_cache()
54
+ gc.collect()
55
+
56
+
57
+ # ===== Feature Extraction Functions =====
58
+
59
+ @torch.no_grad()
60
+ def extract_dino_features(images: torch.Tensor, batch_size: int = DEFAULT_BATCH_SIZE) -> torch.Tensor:
61
+ """
62
+ Extract features using DINO ViT-S/16 model.
63
+
64
+ Args:
65
+ images (torch.Tensor): Input images of shape (N, C, H, W)
66
+ batch_size (int): Batch size for processing
67
+
68
+ Returns:
69
+ torch.Tensor: DINO features of shape (N, L, D)
70
+ """
71
+ # Load DINO model
72
+ #dino_model = torch.hub.load('facebookresearch/dino:main', 'dino_vits16')
73
+ dino_model = torch.hub.load('facebookresearch/dino:main', 'dino_vitb16')
74
+ dino_model = dino_model.eval().cuda()
75
+
76
+ # Process images in batches
77
+ num_batches = (images.shape[0] + batch_size - 1) // batch_size
78
+ feature_batches = []
79
+
80
+ for batch_idx in range(num_batches):
81
+ start_idx = batch_idx * batch_size
82
+ end_idx = min((batch_idx + 1) * batch_size, images.shape[0])
83
+
84
+ batch_images = images[start_idx:end_idx].cuda()
85
+ batch_features = dino_model.get_intermediate_layers(batch_images)[-1]
86
+ feature_batches.append(batch_features.cpu())
87
+
88
+ # Concatenate all batches
89
+ all_features = torch.cat(feature_batches, dim=0)
90
+
91
+ # Clean up memory
92
+ del dino_model
93
+ clear_gpu_memory()
94
+
95
+ return all_features
96
+
97
+
98
+ @torch.no_grad()
99
+ def extract_clip_features(images: torch.Tensor, batch_size: int = DEFAULT_BATCH_SIZE, ipadapter_version: str = "sd15") -> torch.Tensor:
100
+ """
101
+ Extract features using CLIP vision encoder.
102
+
103
+ Args:
104
+ images (torch.Tensor): Input images of shape (N, C, H, W)
105
+ batch_size (int): Batch size for processing
106
+
107
+ Returns:
108
+ torch.Tensor: CLIP features of shape (N, L, D)
109
+ """
110
+ # Load IP-Adapter model (contains CLIP encoder)
111
+ ip_adapter_model = load_ipadapter(version=ipadapter_version)
112
+
113
+ # Process images in batches
114
+ num_batches = (images.shape[0] + batch_size - 1) // batch_size
115
+ feature_batches = []
116
+
117
+ for batch_idx in range(num_batches):
118
+ start_idx = batch_idx * batch_size
119
+ end_idx = min((batch_idx + 1) * batch_size, images.shape[0])
120
+
121
+ batch_images = images[start_idx:end_idx].cuda()
122
+ batch_features = extract_clip_embedding_tensor(
123
+ batch_images, ip_adapter_model, resize=False
124
+ )
125
+ feature_batches.append(batch_features.cpu())
126
+
127
+ # Concatenate all batches
128
+ all_features = torch.cat(feature_batches, dim=0)
129
+
130
+ # Clean up memory
131
+ del ip_adapter_model
132
+ clear_gpu_memory()
133
+
134
+ return all_features
135
+
images/00436_l.jpg ADDED

Git LFS Details

  • SHA256: e713848859c0aa73bfbb7e6b5160a2f14712d095755e3e7df73a6a118caafaa3
  • Pointer size: 130 Bytes
  • Size of remote file: 14.4 kB
images/00436_r.jpg ADDED

Git LFS Details

  • SHA256: 72b620291fe4c56a86fcfbc5f57f2b17c7e0f5b2c7289f6fd96cf4545cc6aa05
  • Pointer size: 130 Bytes
  • Size of remote file: 19.6 kB
images/02140_left.jpg ADDED

Git LFS Details

  • SHA256: 3af50aab2ed37da1da939ddb5438f398b2d2a04b1dd726ad0c5317c45d1bc0e7
  • Pointer size: 130 Bytes
  • Size of remote file: 17.6 kB
images/02140_right.jpg ADDED

Git LFS Details

  • SHA256: e6cac915a2fae2f9c938e8def2df75a2799fc54a358317cc5372e1d4293d7ced
  • Pointer size: 130 Bytes
  • Size of remote file: 18.7 kB
images/02718_l.jpg ADDED

Git LFS Details

  • SHA256: 3d914835f514cfa9f0b74da5e1727e3439eedeb71fad9131e0ab2b7cbb7e3626
  • Pointer size: 130 Bytes
  • Size of remote file: 15.2 kB
images/02718_r.jpg ADDED

Git LFS Details

  • SHA256: 17b792ba2c5b263cf8c0c585337660f61b3a670eee31d167ef138baaa6cd96c7
  • Pointer size: 130 Bytes
  • Size of remote file: 11.7 kB
images/03969_l.jpg ADDED

Git LFS Details

  • SHA256: 931eb41d78a6100de980daccaaa85da8e69b45838bc4dcab79b44364c81ddcc6
  • Pointer size: 130 Bytes
  • Size of remote file: 18.8 kB
images/03969_r.jpg ADDED

Git LFS Details

  • SHA256: 6b8a6cdc4111bf584f32c1b1c5e758b820a74db39fb9d897c8c9604df42b8be4
  • Pointer size: 130 Bytes
  • Size of remote file: 26.4 kB
images/04963_l.jpg ADDED

Git LFS Details

  • SHA256: 06abccfcfff8bb4a442b248cba7f7833f44f77d7487a16a3b19a0b5c7fc36d81
  • Pointer size: 130 Bytes
  • Size of remote file: 20.6 kB
images/04963_r.jpg ADDED

Git LFS Details

  • SHA256: dcdb33ad83ed6cb3aafabd216045ef3bc4b25bacfc3d1441c75712510986c6c3
  • Pointer size: 130 Bytes
  • Size of remote file: 18.2 kB
images/05358_l.jpg ADDED

Git LFS Details

  • SHA256: cbf335ff4035af4178af36437add55ee38dbb027a6deb396e5cc97bee789c5b6
  • Pointer size: 129 Bytes
  • Size of remote file: 7.81 kB
images/05358_r.jpg ADDED

Git LFS Details

  • SHA256: 5be1f8333805791fdef574ec7b647516f26fd24d80d47f6b363052b640ecdb1b
  • Pointer size: 130 Bytes
  • Size of remote file: 14.2 kB
images/archi/extra1.jpg ADDED

Git LFS Details

  • SHA256: cc26697f7e923849687314a3b4616a5607edf86d73c13c307c5461cfcb4828d5
  • Pointer size: 131 Bytes
  • Size of remote file: 412 kB
images/archi/extra2.jpg ADDED

Git LFS Details

  • SHA256: b507b196c76ec5dd177489571622c5b90fe61a548d74890ad80a7c0b8a2d06ea
  • Pointer size: 131 Bytes
  • Size of remote file: 333 kB
images/archi/extra3.jpg ADDED

Git LFS Details

  • SHA256: c6c34186ee529673fe390070ab54a16e98d4b09ad3cdbc004663bd05c1b0e9e0
  • Pointer size: 131 Bytes
  • Size of remote file: 240 kB
images/archi/input_A.jpg ADDED

Git LFS Details

  • SHA256: 26c073498963ff2b94d6f777233439ec5b7de20e784062f02fc137d1ca81de25
  • Pointer size: 130 Bytes
  • Size of remote file: 68.9 kB
images/archi/input_B.jpg ADDED

Git LFS Details

  • SHA256: b1398a0bfbbdd8b6d6007d8bb64a84af1447a73cd6263725f323417749c63fa1
  • Pointer size: 131 Bytes
  • Size of remote file: 589 kB
images/black_bear1.jpg ADDED

Git LFS Details

  • SHA256: 7865d7e9362b70ffb1dfceb22b2199896df47d29c78dd35107504df6045fa700
  • Pointer size: 130 Bytes
  • Size of remote file: 22.7 kB
images/black_bear2.jpg ADDED

Git LFS Details

  • SHA256: 40a150b0dd6feada75ff631e795461f28fc5ca28e9234b6a5265271359498881
  • Pointer size: 130 Bytes
  • Size of remote file: 22.8 kB
images/input_bread.png ADDED

Git LFS Details

  • SHA256: f5def803bc945f869918f85d880da91597f3c823e59672246ca69db7ee4a896f
  • Pointer size: 132 Bytes
  • Size of remote file: 1.05 MB
images/input_cat.png ADDED

Git LFS Details

  • SHA256: 2a9700e51498ce61246dcb8c2b72053215ec00b4123c787e97e0c038f7723d4a
  • Pointer size: 132 Bytes
  • Size of remote file: 1.12 MB
images/pink_bear1.jpg ADDED

Git LFS Details

  • SHA256: 5c55f63c86ec0fcf1ab0d3a7b3edad89ada24d050352b96f51c8c27776f1dc41
  • Pointer size: 130 Bytes
  • Size of remote file: 30.2 kB
images/playguitar_hr.png ADDED

Git LFS Details

  • SHA256: 104e7e90c29bfafa489a732c6d86bf5cbc8f81dd97bb5c0dbf46b14020132d7b
  • Pointer size: 131 Bytes
  • Size of remote file: 650 kB
images/playviolin_hr.png ADDED

Git LFS Details

  • SHA256: 9b0debb545aa513b711616619b6b4115b3ac5a5a4e02940bd97466349ae74894
  • Pointer size: 130 Bytes
  • Size of remote file: 90.4 kB
intrinsic_dim.py ADDED
@@ -0,0 +1,149 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Intrinsic Dimensionality Estimation Module
3
+
4
+ This module provides utilities for estimating the intrinsic dimensionality of
5
+ high-dimensional feature representations using Maximum Likelihood Estimation (MLE).
6
+ The intrinsic dimension represents the true underlying dimensionality of the data
7
+ manifold, which is often much lower than the ambient feature space dimension.
8
+ """
9
+
10
+ import logging
11
+ from typing import Union, Optional
12
+
13
+ import numpy as np
14
+ import torch
15
+ import skdim
16
+ from ncut_pytorch.utils.sample import farthest_point_sampling
17
+
18
+
19
+ # ===== Constants =====
20
+
21
+ DEFAULT_MAX_SAMPLES = 2000
22
+ MIN_SAMPLES_REQUIRED = 10
23
+
24
+
25
+ # ===== Intrinsic Dimensionality Estimation =====
26
+
27
+ def estimate_intrinsic_dimension(features: Union[torch.Tensor, np.ndarray],
28
+ max_samples: int = DEFAULT_MAX_SAMPLES,
29
+ use_global_estimation: bool = True) -> float:
30
+ """
31
+ Estimate the intrinsic dimensionality of feature representations.
32
+
33
+ This function uses Maximum Likelihood Estimation (MLE) to determine the intrinsic
34
+ dimensionality of high-dimensional features. If the dataset is large, it uses
35
+ farthest point sampling to select a representative subset for efficient computation.
36
+
37
+ Args:
38
+ features (Union[torch.Tensor, np.ndarray]): Input features of any shape.
39
+ Will be flattened to (N, D) format.
40
+ max_samples (int): Maximum number of samples to use for estimation.
41
+ Larger values give more accurate estimates but are slower.
42
+ use_global_estimation (bool): Whether to prefer global over local estimation.
43
+
44
+ Returns:
45
+ float: Estimated intrinsic dimensionality of the feature manifold.
46
+
47
+ Raises:
48
+ ValueError: If input features are empty or have insufficient samples.
49
+ RuntimeError: If dimensionality estimation fails completely.
50
+
51
+ Example:
52
+ >>> features = torch.randn(1000, 512) # 1000 samples, 512-dim features
53
+ >>> intrinsic_dim = estimate_intrinsic_dimension(features)
54
+ >>> print(f"Intrinsic dimension: {intrinsic_dim:.2f}")
55
+ """
56
+ # Input validation
57
+ if features is None:
58
+ raise ValueError("Features cannot be None")
59
+
60
+ # Convert to numpy if needed
61
+ if isinstance(features, torch.Tensor):
62
+ if features.numel() == 0:
63
+ raise ValueError("Input tensor is empty")
64
+ numpy_features = features.cpu().detach().numpy()
65
+ else:
66
+ numpy_features = np.asarray(features)
67
+ if numpy_features.size == 0:
68
+ raise ValueError("Input array is empty")
69
+
70
+ # Reshape to 2D format (N_samples, N_features)
71
+ original_shape = numpy_features.shape
72
+ flattened_features = numpy_features.reshape(-1, numpy_features.shape[-1])
73
+
74
+ n_samples, n_features = flattened_features.shape
75
+
76
+ # Validate minimum requirements
77
+ if n_samples < MIN_SAMPLES_REQUIRED:
78
+ raise ValueError(
79
+ f"Insufficient samples for dimensionality estimation. "
80
+ f"Need at least {MIN_SAMPLES_REQUIRED}, got {n_samples}"
81
+ )
82
+
83
+ if n_features < 2:
84
+ raise ValueError(
85
+ f"Feature dimension must be at least 2, got {n_features}"
86
+ )
87
+
88
+ # Apply farthest point sampling if dataset is too large
89
+ if n_samples > max_samples:
90
+ logging.info(
91
+ f"Dataset has {n_samples} samples, downsampling to {max_samples} "
92
+ f"using farthest point sampling for efficiency"
93
+ )
94
+
95
+ # Convert back to tensor for sampling
96
+ tensor_features = torch.tensor(flattened_features, dtype=torch.float32)
97
+ sample_indices = farthest_point_sampling(tensor_features, max_samples)
98
+ sampled_features = flattened_features[sample_indices]
99
+ else:
100
+ sampled_features = flattened_features
101
+
102
+ # Validate sampled data quality
103
+ if np.any(np.isnan(sampled_features)) or np.any(np.isinf(sampled_features)):
104
+ logging.warning("Input features contain NaN or infinite values, which may affect estimation")
105
+
106
+ # Estimate intrinsic dimensionality using MLE
107
+ try:
108
+ mle_estimator = skdim.id.MLE()
109
+ fitted_estimator = mle_estimator.fit(sampled_features)
110
+ estimated_dimension = fitted_estimator.dimension_
111
+
112
+ # Handle failed global estimation
113
+ if estimated_dimension <= 0 or not np.isfinite(estimated_dimension):
114
+ if hasattr(fitted_estimator, 'dimension_pw_') and fitted_estimator.dimension_pw_ is not None:
115
+ # Fallback to local (pairwise) dimension estimates
116
+ local_dimensions = fitted_estimator.dimension_pw_
117
+ valid_local_dims = local_dimensions[np.isfinite(local_dimensions) & (local_dimensions > 0)]
118
+
119
+ if len(valid_local_dims) > 0:
120
+ estimated_dimension = float(np.mean(valid_local_dims))
121
+ logging.warning(
122
+ f"Global intrinsic dimension estimation failed (got {fitted_estimator.dimension_}). "
123
+ f"Using mean of {len(valid_local_dims)} local estimates: {estimated_dimension:.2f}"
124
+ )
125
+ else:
126
+ raise RuntimeError("Both global and local dimensionality estimation failed")
127
+ else:
128
+ raise RuntimeError("Global dimensionality estimation failed and no local estimates available")
129
+
130
+ # Sanity check: intrinsic dimension should not exceed ambient dimension
131
+ if estimated_dimension > n_features:
132
+ logging.warning(
133
+ f"Estimated intrinsic dimension ({estimated_dimension:.2f}) exceeds "
134
+ f"ambient dimension ({n_features}). Capping to ambient dimension."
135
+ )
136
+ estimated_dimension = float(n_features)
137
+
138
+ # Log results
139
+ compression_ratio = n_features / estimated_dimension if estimated_dimension > 0 else np.inf
140
+ logging.info(
141
+ f"Intrinsic dimensionality estimation completed: "
142
+ f"{estimated_dimension:.2f} (compression ratio: {compression_ratio:.1f}x)"
143
+ )
144
+
145
+ return float(estimated_dimension)
146
+
147
+ except Exception as e:
148
+ raise RuntimeError(f"Intrinsic dimensionality estimation failed: {str(e)}") from e
149
+
ip_adapter/__init__.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ from .ip_adapter import IPAdapter, IPAdapterPlus, IPAdapterPlusXL, IPAdapterXL, IPAdapterFull
2
+
3
+ __all__ = [
4
+ "IPAdapter",
5
+ "IPAdapterPlus",
6
+ "IPAdapterPlusXL",
7
+ "IPAdapterXL",
8
+ "IPAdapterFull",
9
+ ]
ip_adapter/attention_processor.py ADDED
@@ -0,0 +1,568 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # modified from https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py
2
+ import torch
3
+ import torch.nn as nn
4
+ import torch.nn.functional as F
5
+
6
+
7
+ class AttnProcessor(nn.Module):
8
+ r"""
9
+ Default processor for performing attention-related computations.
10
+ """
11
+
12
+ def __init__(
13
+ self,
14
+ hidden_size=None,
15
+ cross_attention_dim=None,
16
+ ):
17
+ super().__init__()
18
+
19
+ def __call__(
20
+ self,
21
+ attn,
22
+ hidden_states,
23
+ encoder_hidden_states=None,
24
+ attention_mask=None,
25
+ temb=None,
26
+ *args,
27
+ **kwargs,
28
+ ):
29
+ residual = hidden_states
30
+
31
+ if attn.spatial_norm is not None:
32
+ hidden_states = attn.spatial_norm(hidden_states, temb)
33
+
34
+ input_ndim = hidden_states.ndim
35
+
36
+ if input_ndim == 4:
37
+ batch_size, channel, height, width = hidden_states.shape
38
+ hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2)
39
+
40
+ batch_size, sequence_length, _ = (
41
+ hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape
42
+ )
43
+ attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size)
44
+
45
+ if attn.group_norm is not None:
46
+ hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2)
47
+
48
+ query = attn.to_q(hidden_states)
49
+
50
+ if encoder_hidden_states is None:
51
+ encoder_hidden_states = hidden_states
52
+ elif attn.norm_cross:
53
+ encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states)
54
+
55
+ key = attn.to_k(encoder_hidden_states)
56
+ value = attn.to_v(encoder_hidden_states)
57
+
58
+ query = attn.head_to_batch_dim(query)
59
+ key = attn.head_to_batch_dim(key)
60
+ value = attn.head_to_batch_dim(value)
61
+
62
+ attention_probs = attn.get_attention_scores(query, key, attention_mask)
63
+ hidden_states = torch.bmm(attention_probs, value)
64
+ hidden_states = attn.batch_to_head_dim(hidden_states)
65
+
66
+ # linear proj
67
+ hidden_states = attn.to_out[0](hidden_states)
68
+ # dropout
69
+ hidden_states = attn.to_out[1](hidden_states)
70
+
71
+ if input_ndim == 4:
72
+ hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width)
73
+
74
+ if attn.residual_connection:
75
+ hidden_states = hidden_states + residual
76
+
77
+ hidden_states = hidden_states / attn.rescale_output_factor
78
+
79
+ return hidden_states
80
+
81
+
82
+ class IPAttnProcessor(nn.Module):
83
+ r"""
84
+ Attention processor for IP-Adapater.
85
+ Args:
86
+ hidden_size (`int`):
87
+ The hidden size of the attention layer.
88
+ cross_attention_dim (`int`):
89
+ The number of channels in the `encoder_hidden_states`.
90
+ scale (`float`, defaults to 1.0):
91
+ the weight scale of image prompt.
92
+ num_tokens (`int`, defaults to 4 when do ip_adapter_plus it should be 16):
93
+ The context length of the image features.
94
+ """
95
+
96
+ def __init__(self, hidden_size, cross_attention_dim=None, scale=1.0, num_tokens=4):
97
+ super().__init__()
98
+
99
+ self.hidden_size = hidden_size
100
+ self.cross_attention_dim = cross_attention_dim
101
+ self.scale = scale
102
+ self.num_tokens = num_tokens
103
+
104
+ self.to_k_ip = nn.Linear(cross_attention_dim or hidden_size, hidden_size, bias=False)
105
+ self.to_v_ip = nn.Linear(cross_attention_dim or hidden_size, hidden_size, bias=False)
106
+
107
+ def __call__(
108
+ self,
109
+ attn,
110
+ hidden_states,
111
+ encoder_hidden_states=None,
112
+ attention_mask=None,
113
+ temb=None,
114
+ *args,
115
+ **kwargs,
116
+ ):
117
+ residual = hidden_states
118
+
119
+ if attn.spatial_norm is not None:
120
+ hidden_states = attn.spatial_norm(hidden_states, temb)
121
+
122
+ input_ndim = hidden_states.ndim
123
+
124
+ if input_ndim == 4:
125
+ batch_size, channel, height, width = hidden_states.shape
126
+ hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2)
127
+
128
+ batch_size, sequence_length, _ = (
129
+ hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape
130
+ )
131
+ attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size)
132
+
133
+ if attn.group_norm is not None:
134
+ hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2)
135
+
136
+ query = attn.to_q(hidden_states)
137
+
138
+ if encoder_hidden_states is None:
139
+ encoder_hidden_states = hidden_states
140
+ else:
141
+ # get encoder_hidden_states, ip_hidden_states
142
+ end_pos = encoder_hidden_states.shape[1] - self.num_tokens
143
+ encoder_hidden_states, ip_hidden_states = (
144
+ encoder_hidden_states[:, :end_pos, :],
145
+ encoder_hidden_states[:, end_pos:, :],
146
+ )
147
+ if attn.norm_cross:
148
+ encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states)
149
+
150
+ key = attn.to_k(encoder_hidden_states)
151
+ value = attn.to_v(encoder_hidden_states)
152
+
153
+ query = attn.head_to_batch_dim(query)
154
+ key = attn.head_to_batch_dim(key)
155
+ value = attn.head_to_batch_dim(value)
156
+
157
+ attention_probs = attn.get_attention_scores(query, key, attention_mask)
158
+ hidden_states = torch.bmm(attention_probs, value)
159
+ hidden_states = attn.batch_to_head_dim(hidden_states)
160
+
161
+ # for ip-adapter
162
+ ip_key = self.to_k_ip(ip_hidden_states)
163
+ ip_value = self.to_v_ip(ip_hidden_states)
164
+
165
+ ip_key = attn.head_to_batch_dim(ip_key)
166
+ ip_value = attn.head_to_batch_dim(ip_value)
167
+
168
+ ip_attention_probs = attn.get_attention_scores(query, ip_key, None)
169
+ self.attn_map = ip_attention_probs
170
+ ip_hidden_states = torch.bmm(ip_attention_probs, ip_value)
171
+ ip_hidden_states = attn.batch_to_head_dim(ip_hidden_states)
172
+
173
+ hidden_states = hidden_states + self.scale * ip_hidden_states
174
+
175
+ # linear proj
176
+ hidden_states = attn.to_out[0](hidden_states)
177
+ # dropout
178
+ hidden_states = attn.to_out[1](hidden_states)
179
+
180
+ if input_ndim == 4:
181
+ hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width)
182
+
183
+ if attn.residual_connection:
184
+ hidden_states = hidden_states + residual
185
+
186
+ hidden_states = hidden_states / attn.rescale_output_factor
187
+
188
+ return hidden_states
189
+
190
+
191
+ class AttnProcessor2_0(torch.nn.Module):
192
+ r"""
193
+ Processor for implementing scaled dot-product attention (enabled by default if you're using PyTorch 2.0).
194
+ """
195
+
196
+ def __init__(
197
+ self,
198
+ hidden_size=None,
199
+ cross_attention_dim=None,
200
+ ):
201
+ super().__init__()
202
+ if not hasattr(F, "scaled_dot_product_attention"):
203
+ raise ImportError("AttnProcessor2_0 requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0.")
204
+
205
+ def __call__(
206
+ self,
207
+ attn,
208
+ hidden_states,
209
+ encoder_hidden_states=None,
210
+ attention_mask=None,
211
+ temb=None,
212
+ *args,
213
+ **kwargs,
214
+ ):
215
+ residual = hidden_states
216
+
217
+ if attn.spatial_norm is not None:
218
+ hidden_states = attn.spatial_norm(hidden_states, temb)
219
+
220
+ input_ndim = hidden_states.ndim
221
+
222
+ if input_ndim == 4:
223
+ batch_size, channel, height, width = hidden_states.shape
224
+ hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2)
225
+
226
+ batch_size, sequence_length, _ = (
227
+ hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape
228
+ )
229
+
230
+ if attention_mask is not None:
231
+ attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size)
232
+ # scaled_dot_product_attention expects attention_mask shape to be
233
+ # (batch, heads, source_length, target_length)
234
+ attention_mask = attention_mask.view(batch_size, attn.heads, -1, attention_mask.shape[-1])
235
+
236
+ if attn.group_norm is not None:
237
+ hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2)
238
+
239
+ query = attn.to_q(hidden_states)
240
+
241
+ if encoder_hidden_states is None:
242
+ encoder_hidden_states = hidden_states
243
+ elif attn.norm_cross:
244
+ encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states)
245
+
246
+ key = attn.to_k(encoder_hidden_states)
247
+ value = attn.to_v(encoder_hidden_states)
248
+
249
+ inner_dim = key.shape[-1]
250
+ head_dim = inner_dim // attn.heads
251
+
252
+ query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
253
+
254
+ key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
255
+ value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
256
+
257
+ # the output of sdp = (batch, num_heads, seq_len, head_dim)
258
+ # TODO: add support for attn.scale when we move to Torch 2.1
259
+ hidden_states = F.scaled_dot_product_attention(
260
+ query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False
261
+ )
262
+
263
+ hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim)
264
+ hidden_states = hidden_states.to(query.dtype)
265
+
266
+ # linear proj
267
+ hidden_states = attn.to_out[0](hidden_states)
268
+ # dropout
269
+ hidden_states = attn.to_out[1](hidden_states)
270
+
271
+ if input_ndim == 4:
272
+ hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width)
273
+
274
+ if attn.residual_connection:
275
+ hidden_states = hidden_states + residual
276
+
277
+ hidden_states = hidden_states / attn.rescale_output_factor
278
+
279
+ return hidden_states
280
+
281
+
282
+ class IPAttnProcessor2_0(torch.nn.Module):
283
+ r"""
284
+ Attention processor for IP-Adapater for PyTorch 2.0.
285
+ Args:
286
+ hidden_size (`int`):
287
+ The hidden size of the attention layer.
288
+ cross_attention_dim (`int`):
289
+ The number of channels in the `encoder_hidden_states`.
290
+ scale (`float`, defaults to 1.0):
291
+ the weight scale of image prompt.
292
+ num_tokens (`int`, defaults to 4 when do ip_adapter_plus it should be 16):
293
+ The context length of the image features.
294
+ """
295
+
296
+ def __init__(self, hidden_size, cross_attention_dim=None, scale=1.0, num_tokens=4):
297
+ super().__init__()
298
+
299
+ if not hasattr(F, "scaled_dot_product_attention"):
300
+ raise ImportError("AttnProcessor2_0 requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0.")
301
+
302
+ self.hidden_size = hidden_size
303
+ self.cross_attention_dim = cross_attention_dim
304
+ self.scale = scale
305
+ self.num_tokens = num_tokens
306
+
307
+ self.to_k_ip = nn.Linear(cross_attention_dim or hidden_size, hidden_size, bias=False)
308
+ self.to_v_ip = nn.Linear(cross_attention_dim or hidden_size, hidden_size, bias=False)
309
+
310
+ def __call__(
311
+ self,
312
+ attn,
313
+ hidden_states,
314
+ encoder_hidden_states=None,
315
+ attention_mask=None,
316
+ temb=None,
317
+ *args,
318
+ **kwargs,
319
+ ):
320
+ residual = hidden_states
321
+
322
+ if attn.spatial_norm is not None:
323
+ hidden_states = attn.spatial_norm(hidden_states, temb)
324
+
325
+ input_ndim = hidden_states.ndim
326
+
327
+ if input_ndim == 4:
328
+ batch_size, channel, height, width = hidden_states.shape
329
+ hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2)
330
+
331
+ batch_size, sequence_length, _ = (
332
+ hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape
333
+ )
334
+
335
+ if attention_mask is not None:
336
+ attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size)
337
+ # scaled_dot_product_attention expects attention_mask shape to be
338
+ # (batch, heads, source_length, target_length)
339
+ attention_mask = attention_mask.view(batch_size, attn.heads, -1, attention_mask.shape[-1])
340
+
341
+ if attn.group_norm is not None:
342
+ hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2)
343
+
344
+ query = attn.to_q(hidden_states)
345
+
346
+ if encoder_hidden_states is None:
347
+ encoder_hidden_states = hidden_states
348
+ else:
349
+ # get encoder_hidden_states, ip_hidden_states
350
+ end_pos = encoder_hidden_states.shape[1] - self.num_tokens
351
+ encoder_hidden_states, ip_hidden_states = (
352
+ encoder_hidden_states[:, :end_pos, :],
353
+ encoder_hidden_states[:, end_pos:, :],
354
+ )
355
+ if attn.norm_cross:
356
+ encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states)
357
+
358
+ key = attn.to_k(encoder_hidden_states)
359
+ value = attn.to_v(encoder_hidden_states)
360
+
361
+ inner_dim = key.shape[-1]
362
+ head_dim = inner_dim // attn.heads
363
+
364
+ query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
365
+
366
+ key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
367
+ value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
368
+
369
+ # the output of sdp = (batch, num_heads, seq_len, head_dim)
370
+ # TODO: add support for attn.scale when we move to Torch 2.1
371
+ hidden_states = F.scaled_dot_product_attention(
372
+ query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False
373
+ )
374
+
375
+ hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim)
376
+ hidden_states = hidden_states.to(query.dtype)
377
+
378
+ # for ip-adapter
379
+ ip_key = self.to_k_ip(ip_hidden_states)
380
+ ip_value = self.to_v_ip(ip_hidden_states)
381
+
382
+ ip_key = ip_key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
383
+ ip_value = ip_value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
384
+
385
+ # the output of sdp = (batch, num_heads, seq_len, head_dim)
386
+ # TODO: add support for attn.scale when we move to Torch 2.1
387
+ ip_hidden_states = F.scaled_dot_product_attention(
388
+ query, ip_key, ip_value, attn_mask=None, dropout_p=0.0, is_causal=False
389
+ )
390
+ with torch.no_grad():
391
+ self.attn_map = query @ ip_key.transpose(-2, -1).softmax(dim=-1)
392
+ #print(self.attn_map.shape)
393
+
394
+ ip_hidden_states = ip_hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim)
395
+ ip_hidden_states = ip_hidden_states.to(query.dtype)
396
+
397
+ hidden_states = hidden_states + self.scale * ip_hidden_states
398
+
399
+ # linear proj
400
+ hidden_states = attn.to_out[0](hidden_states)
401
+ # dropout
402
+ hidden_states = attn.to_out[1](hidden_states)
403
+
404
+ if input_ndim == 4:
405
+ hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width)
406
+
407
+ if attn.residual_connection:
408
+ hidden_states = hidden_states + residual
409
+
410
+ hidden_states = hidden_states / attn.rescale_output_factor
411
+
412
+ return hidden_states
413
+
414
+
415
+ ## for controlnet
416
+ class CNAttnProcessor:
417
+ r"""
418
+ Default processor for performing attention-related computations.
419
+ """
420
+
421
+ def __init__(self, num_tokens=4):
422
+ self.num_tokens = num_tokens
423
+
424
+ def __call__(self, attn, hidden_states, encoder_hidden_states=None, attention_mask=None, temb=None, *args, **kwargs,):
425
+ residual = hidden_states
426
+
427
+ if attn.spatial_norm is not None:
428
+ hidden_states = attn.spatial_norm(hidden_states, temb)
429
+
430
+ input_ndim = hidden_states.ndim
431
+
432
+ if input_ndim == 4:
433
+ batch_size, channel, height, width = hidden_states.shape
434
+ hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2)
435
+
436
+ batch_size, sequence_length, _ = (
437
+ hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape
438
+ )
439
+ attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size)
440
+
441
+ if attn.group_norm is not None:
442
+ hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2)
443
+
444
+ query = attn.to_q(hidden_states)
445
+
446
+ if encoder_hidden_states is None:
447
+ encoder_hidden_states = hidden_states
448
+ else:
449
+ end_pos = encoder_hidden_states.shape[1] - self.num_tokens
450
+ encoder_hidden_states = encoder_hidden_states[:, :end_pos] # only use text
451
+ if attn.norm_cross:
452
+ encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states)
453
+
454
+ key = attn.to_k(encoder_hidden_states)
455
+ value = attn.to_v(encoder_hidden_states)
456
+
457
+ query = attn.head_to_batch_dim(query)
458
+ key = attn.head_to_batch_dim(key)
459
+ value = attn.head_to_batch_dim(value)
460
+
461
+ attention_probs = attn.get_attention_scores(query, key, attention_mask)
462
+ hidden_states = torch.bmm(attention_probs, value)
463
+ hidden_states = attn.batch_to_head_dim(hidden_states)
464
+
465
+ # linear proj
466
+ hidden_states = attn.to_out[0](hidden_states)
467
+ # dropout
468
+ hidden_states = attn.to_out[1](hidden_states)
469
+
470
+ if input_ndim == 4:
471
+ hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width)
472
+
473
+ if attn.residual_connection:
474
+ hidden_states = hidden_states + residual
475
+
476
+ hidden_states = hidden_states / attn.rescale_output_factor
477
+
478
+ return hidden_states
479
+
480
+
481
+ class CNAttnProcessor2_0:
482
+ r"""
483
+ Processor for implementing scaled dot-product attention (enabled by default if you're using PyTorch 2.0).
484
+ """
485
+
486
+ def __init__(self, num_tokens=4):
487
+ if not hasattr(F, "scaled_dot_product_attention"):
488
+ raise ImportError("AttnProcessor2_0 requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0.")
489
+ self.num_tokens = num_tokens
490
+
491
+ def __call__(
492
+ self,
493
+ attn,
494
+ hidden_states,
495
+ encoder_hidden_states=None,
496
+ attention_mask=None,
497
+ temb=None,
498
+ *args,
499
+ **kwargs,
500
+ ):
501
+ residual = hidden_states
502
+
503
+ if attn.spatial_norm is not None:
504
+ hidden_states = attn.spatial_norm(hidden_states, temb)
505
+
506
+ input_ndim = hidden_states.ndim
507
+
508
+ if input_ndim == 4:
509
+ batch_size, channel, height, width = hidden_states.shape
510
+ hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2)
511
+
512
+ batch_size, sequence_length, _ = (
513
+ hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape
514
+ )
515
+
516
+ if attention_mask is not None:
517
+ attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size)
518
+ # scaled_dot_product_attention expects attention_mask shape to be
519
+ # (batch, heads, source_length, target_length)
520
+ attention_mask = attention_mask.view(batch_size, attn.heads, -1, attention_mask.shape[-1])
521
+
522
+ if attn.group_norm is not None:
523
+ hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2)
524
+
525
+ query = attn.to_q(hidden_states)
526
+
527
+ if encoder_hidden_states is None:
528
+ encoder_hidden_states = hidden_states
529
+ else:
530
+ end_pos = encoder_hidden_states.shape[1] - self.num_tokens
531
+ encoder_hidden_states = encoder_hidden_states[:, :end_pos] # only use text
532
+ if attn.norm_cross:
533
+ encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states)
534
+
535
+ key = attn.to_k(encoder_hidden_states)
536
+ value = attn.to_v(encoder_hidden_states)
537
+
538
+ inner_dim = key.shape[-1]
539
+ head_dim = inner_dim // attn.heads
540
+
541
+ query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
542
+
543
+ key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
544
+ value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
545
+
546
+ # the output of sdp = (batch, num_heads, seq_len, head_dim)
547
+ # TODO: add support for attn.scale when we move to Torch 2.1
548
+ hidden_states = F.scaled_dot_product_attention(
549
+ query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False
550
+ )
551
+
552
+ hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim)
553
+ hidden_states = hidden_states.to(query.dtype)
554
+
555
+ # linear proj
556
+ hidden_states = attn.to_out[0](hidden_states)
557
+ # dropout
558
+ hidden_states = attn.to_out[1](hidden_states)
559
+
560
+ if input_ndim == 4:
561
+ hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width)
562
+
563
+ if attn.residual_connection:
564
+ hidden_states = hidden_states + residual
565
+
566
+ hidden_states = hidden_states / attn.rescale_output_factor
567
+
568
+ return hidden_states
ip_adapter/attention_processor_faceid.py ADDED
@@ -0,0 +1,433 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # modified from https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py
2
+ import torch
3
+ import torch.nn as nn
4
+ import torch.nn.functional as F
5
+
6
+ from diffusers.models.lora import LoRALinearLayer
7
+
8
+
9
+ class LoRAAttnProcessor(nn.Module):
10
+ r"""
11
+ Default processor for performing attention-related computations.
12
+ """
13
+
14
+ def __init__(
15
+ self,
16
+ hidden_size=None,
17
+ cross_attention_dim=None,
18
+ rank=4,
19
+ network_alpha=None,
20
+ lora_scale=1.0,
21
+ ):
22
+ super().__init__()
23
+
24
+ self.rank = rank
25
+ self.lora_scale = lora_scale
26
+
27
+ self.to_q_lora = LoRALinearLayer(hidden_size, hidden_size, rank, network_alpha)
28
+ self.to_k_lora = LoRALinearLayer(cross_attention_dim or hidden_size, hidden_size, rank, network_alpha)
29
+ self.to_v_lora = LoRALinearLayer(cross_attention_dim or hidden_size, hidden_size, rank, network_alpha)
30
+ self.to_out_lora = LoRALinearLayer(hidden_size, hidden_size, rank, network_alpha)
31
+
32
+ def __call__(
33
+ self,
34
+ attn,
35
+ hidden_states,
36
+ encoder_hidden_states=None,
37
+ attention_mask=None,
38
+ temb=None,
39
+ *args,
40
+ **kwargs,
41
+ ):
42
+ residual = hidden_states
43
+
44
+ if attn.spatial_norm is not None:
45
+ hidden_states = attn.spatial_norm(hidden_states, temb)
46
+
47
+ input_ndim = hidden_states.ndim
48
+
49
+ if input_ndim == 4:
50
+ batch_size, channel, height, width = hidden_states.shape
51
+ hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2)
52
+
53
+ batch_size, sequence_length, _ = (
54
+ hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape
55
+ )
56
+ attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size)
57
+
58
+ if attn.group_norm is not None:
59
+ hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2)
60
+
61
+ query = attn.to_q(hidden_states) + self.lora_scale * self.to_q_lora(hidden_states)
62
+
63
+ if encoder_hidden_states is None:
64
+ encoder_hidden_states = hidden_states
65
+ elif attn.norm_cross:
66
+ encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states)
67
+
68
+ key = attn.to_k(encoder_hidden_states) + self.lora_scale * self.to_k_lora(encoder_hidden_states)
69
+ value = attn.to_v(encoder_hidden_states) + self.lora_scale * self.to_v_lora(encoder_hidden_states)
70
+
71
+ query = attn.head_to_batch_dim(query)
72
+ key = attn.head_to_batch_dim(key)
73
+ value = attn.head_to_batch_dim(value)
74
+
75
+ attention_probs = attn.get_attention_scores(query, key, attention_mask)
76
+ hidden_states = torch.bmm(attention_probs, value)
77
+ hidden_states = attn.batch_to_head_dim(hidden_states)
78
+
79
+ # linear proj
80
+ hidden_states = attn.to_out[0](hidden_states) + self.lora_scale * self.to_out_lora(hidden_states)
81
+ # dropout
82
+ hidden_states = attn.to_out[1](hidden_states)
83
+
84
+ if input_ndim == 4:
85
+ hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width)
86
+
87
+ if attn.residual_connection:
88
+ hidden_states = hidden_states + residual
89
+
90
+ hidden_states = hidden_states / attn.rescale_output_factor
91
+
92
+ return hidden_states
93
+
94
+
95
+ class LoRAIPAttnProcessor(nn.Module):
96
+ r"""
97
+ Attention processor for IP-Adapater.
98
+ Args:
99
+ hidden_size (`int`):
100
+ The hidden size of the attention layer.
101
+ cross_attention_dim (`int`):
102
+ The number of channels in the `encoder_hidden_states`.
103
+ scale (`float`, defaults to 1.0):
104
+ the weight scale of image prompt.
105
+ num_tokens (`int`, defaults to 4 when do ip_adapter_plus it should be 16):
106
+ The context length of the image features.
107
+ """
108
+
109
+ def __init__(self, hidden_size, cross_attention_dim=None, rank=4, network_alpha=None, lora_scale=1.0, scale=1.0, num_tokens=4):
110
+ super().__init__()
111
+
112
+ self.rank = rank
113
+ self.lora_scale = lora_scale
114
+
115
+ self.to_q_lora = LoRALinearLayer(hidden_size, hidden_size, rank, network_alpha)
116
+ self.to_k_lora = LoRALinearLayer(cross_attention_dim or hidden_size, hidden_size, rank, network_alpha)
117
+ self.to_v_lora = LoRALinearLayer(cross_attention_dim or hidden_size, hidden_size, rank, network_alpha)
118
+ self.to_out_lora = LoRALinearLayer(hidden_size, hidden_size, rank, network_alpha)
119
+
120
+ self.hidden_size = hidden_size
121
+ self.cross_attention_dim = cross_attention_dim
122
+ self.scale = scale
123
+ self.num_tokens = num_tokens
124
+
125
+ self.to_k_ip = nn.Linear(cross_attention_dim or hidden_size, hidden_size, bias=False)
126
+ self.to_v_ip = nn.Linear(cross_attention_dim or hidden_size, hidden_size, bias=False)
127
+
128
+ def __call__(
129
+ self,
130
+ attn,
131
+ hidden_states,
132
+ encoder_hidden_states=None,
133
+ attention_mask=None,
134
+ temb=None,
135
+ *args,
136
+ **kwargs,
137
+ ):
138
+ residual = hidden_states
139
+
140
+ if attn.spatial_norm is not None:
141
+ hidden_states = attn.spatial_norm(hidden_states, temb)
142
+
143
+ input_ndim = hidden_states.ndim
144
+
145
+ if input_ndim == 4:
146
+ batch_size, channel, height, width = hidden_states.shape
147
+ hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2)
148
+
149
+ batch_size, sequence_length, _ = (
150
+ hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape
151
+ )
152
+ attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size)
153
+
154
+ if attn.group_norm is not None:
155
+ hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2)
156
+
157
+ query = attn.to_q(hidden_states) + self.lora_scale * self.to_q_lora(hidden_states)
158
+
159
+ if encoder_hidden_states is None:
160
+ encoder_hidden_states = hidden_states
161
+ else:
162
+ # get encoder_hidden_states, ip_hidden_states
163
+ end_pos = encoder_hidden_states.shape[1] - self.num_tokens
164
+ encoder_hidden_states, ip_hidden_states = (
165
+ encoder_hidden_states[:, :end_pos, :],
166
+ encoder_hidden_states[:, end_pos:, :],
167
+ )
168
+ if attn.norm_cross:
169
+ encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states)
170
+
171
+ key = attn.to_k(encoder_hidden_states) + self.lora_scale * self.to_k_lora(encoder_hidden_states)
172
+ value = attn.to_v(encoder_hidden_states) + self.lora_scale * self.to_v_lora(encoder_hidden_states)
173
+
174
+ query = attn.head_to_batch_dim(query)
175
+ key = attn.head_to_batch_dim(key)
176
+ value = attn.head_to_batch_dim(value)
177
+
178
+ attention_probs = attn.get_attention_scores(query, key, attention_mask)
179
+ hidden_states = torch.bmm(attention_probs, value)
180
+ hidden_states = attn.batch_to_head_dim(hidden_states)
181
+
182
+ # for ip-adapter
183
+ ip_key = self.to_k_ip(ip_hidden_states)
184
+ ip_value = self.to_v_ip(ip_hidden_states)
185
+
186
+ ip_key = attn.head_to_batch_dim(ip_key)
187
+ ip_value = attn.head_to_batch_dim(ip_value)
188
+
189
+ ip_attention_probs = attn.get_attention_scores(query, ip_key, None)
190
+ self.attn_map = ip_attention_probs
191
+ ip_hidden_states = torch.bmm(ip_attention_probs, ip_value)
192
+ ip_hidden_states = attn.batch_to_head_dim(ip_hidden_states)
193
+
194
+ hidden_states = hidden_states + self.scale * ip_hidden_states
195
+
196
+ # linear proj
197
+ hidden_states = attn.to_out[0](hidden_states) + self.lora_scale * self.to_out_lora(hidden_states)
198
+ # dropout
199
+ hidden_states = attn.to_out[1](hidden_states)
200
+
201
+ if input_ndim == 4:
202
+ hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width)
203
+
204
+ if attn.residual_connection:
205
+ hidden_states = hidden_states + residual
206
+
207
+ hidden_states = hidden_states / attn.rescale_output_factor
208
+
209
+ return hidden_states
210
+
211
+
212
+ class LoRAAttnProcessor2_0(nn.Module):
213
+
214
+ r"""
215
+ Default processor for performing attention-related computations.
216
+ """
217
+
218
+ def __init__(
219
+ self,
220
+ hidden_size=None,
221
+ cross_attention_dim=None,
222
+ rank=4,
223
+ network_alpha=None,
224
+ lora_scale=1.0,
225
+ ):
226
+ super().__init__()
227
+
228
+ self.rank = rank
229
+ self.lora_scale = lora_scale
230
+
231
+ self.to_q_lora = LoRALinearLayer(hidden_size, hidden_size, rank, network_alpha)
232
+ self.to_k_lora = LoRALinearLayer(cross_attention_dim or hidden_size, hidden_size, rank, network_alpha)
233
+ self.to_v_lora = LoRALinearLayer(cross_attention_dim or hidden_size, hidden_size, rank, network_alpha)
234
+ self.to_out_lora = LoRALinearLayer(hidden_size, hidden_size, rank, network_alpha)
235
+
236
+ def __call__(
237
+ self,
238
+ attn,
239
+ hidden_states,
240
+ encoder_hidden_states=None,
241
+ attention_mask=None,
242
+ temb=None,
243
+ *args,
244
+ **kwargs,
245
+ ):
246
+ residual = hidden_states
247
+
248
+ if attn.spatial_norm is not None:
249
+ hidden_states = attn.spatial_norm(hidden_states, temb)
250
+
251
+ input_ndim = hidden_states.ndim
252
+
253
+ if input_ndim == 4:
254
+ batch_size, channel, height, width = hidden_states.shape
255
+ hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2)
256
+
257
+ batch_size, sequence_length, _ = (
258
+ hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape
259
+ )
260
+ attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size)
261
+
262
+ if attn.group_norm is not None:
263
+ hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2)
264
+
265
+ query = attn.to_q(hidden_states) + self.lora_scale * self.to_q_lora(hidden_states)
266
+
267
+ if encoder_hidden_states is None:
268
+ encoder_hidden_states = hidden_states
269
+ elif attn.norm_cross:
270
+ encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states)
271
+
272
+ key = attn.to_k(encoder_hidden_states) + self.lora_scale * self.to_k_lora(encoder_hidden_states)
273
+ value = attn.to_v(encoder_hidden_states) + self.lora_scale * self.to_v_lora(encoder_hidden_states)
274
+
275
+ inner_dim = key.shape[-1]
276
+ head_dim = inner_dim // attn.heads
277
+
278
+ query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
279
+
280
+ key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
281
+ value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
282
+
283
+ # the output of sdp = (batch, num_heads, seq_len, head_dim)
284
+ # TODO: add support for attn.scale when we move to Torch 2.1
285
+ hidden_states = F.scaled_dot_product_attention(
286
+ query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False
287
+ )
288
+
289
+ hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim)
290
+ hidden_states = hidden_states.to(query.dtype)
291
+
292
+ # linear proj
293
+ hidden_states = attn.to_out[0](hidden_states) + self.lora_scale * self.to_out_lora(hidden_states)
294
+ # dropout
295
+ hidden_states = attn.to_out[1](hidden_states)
296
+
297
+ if input_ndim == 4:
298
+ hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width)
299
+
300
+ if attn.residual_connection:
301
+ hidden_states = hidden_states + residual
302
+
303
+ hidden_states = hidden_states / attn.rescale_output_factor
304
+
305
+ return hidden_states
306
+
307
+
308
+ class LoRAIPAttnProcessor2_0(nn.Module):
309
+ r"""
310
+ Processor for implementing the LoRA attention mechanism.
311
+
312
+ Args:
313
+ hidden_size (`int`, *optional*):
314
+ The hidden size of the attention layer.
315
+ cross_attention_dim (`int`, *optional*):
316
+ The number of channels in the `encoder_hidden_states`.
317
+ rank (`int`, defaults to 4):
318
+ The dimension of the LoRA update matrices.
319
+ network_alpha (`int`, *optional*):
320
+ Equivalent to `alpha` but it's usage is specific to Kohya (A1111) style LoRAs.
321
+ """
322
+
323
+ def __init__(self, hidden_size, cross_attention_dim=None, rank=4, network_alpha=None, lora_scale=1.0, scale=1.0, num_tokens=4):
324
+ super().__init__()
325
+
326
+ self.rank = rank
327
+ self.lora_scale = lora_scale
328
+ self.num_tokens = num_tokens
329
+
330
+ self.to_q_lora = LoRALinearLayer(hidden_size, hidden_size, rank, network_alpha)
331
+ self.to_k_lora = LoRALinearLayer(cross_attention_dim or hidden_size, hidden_size, rank, network_alpha)
332
+ self.to_v_lora = LoRALinearLayer(cross_attention_dim or hidden_size, hidden_size, rank, network_alpha)
333
+ self.to_out_lora = LoRALinearLayer(hidden_size, hidden_size, rank, network_alpha)
334
+
335
+
336
+ self.hidden_size = hidden_size
337
+ self.cross_attention_dim = cross_attention_dim
338
+ self.scale = scale
339
+
340
+ self.to_k_ip = nn.Linear(cross_attention_dim or hidden_size, hidden_size, bias=False)
341
+ self.to_v_ip = nn.Linear(cross_attention_dim or hidden_size, hidden_size, bias=False)
342
+
343
+ def __call__(
344
+ self, attn, hidden_states, encoder_hidden_states=None, attention_mask=None, scale=1.0, temb=None, *args, **kwargs,
345
+ ):
346
+ residual = hidden_states
347
+
348
+ if attn.spatial_norm is not None:
349
+ hidden_states = attn.spatial_norm(hidden_states, temb)
350
+
351
+ input_ndim = hidden_states.ndim
352
+
353
+ if input_ndim == 4:
354
+ batch_size, channel, height, width = hidden_states.shape
355
+ hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2)
356
+
357
+ batch_size, sequence_length, _ = (
358
+ hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape
359
+ )
360
+ attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size)
361
+
362
+ if attn.group_norm is not None:
363
+ hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2)
364
+
365
+ query = attn.to_q(hidden_states) + self.lora_scale * self.to_q_lora(hidden_states)
366
+ #query = attn.head_to_batch_dim(query)
367
+
368
+ if encoder_hidden_states is None:
369
+ encoder_hidden_states = hidden_states
370
+ else:
371
+ # get encoder_hidden_states, ip_hidden_states
372
+ end_pos = encoder_hidden_states.shape[1] - self.num_tokens
373
+ encoder_hidden_states, ip_hidden_states = (
374
+ encoder_hidden_states[:, :end_pos, :],
375
+ encoder_hidden_states[:, end_pos:, :],
376
+ )
377
+ if attn.norm_cross:
378
+ encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states)
379
+
380
+ # for text
381
+ key = attn.to_k(encoder_hidden_states) + self.lora_scale * self.to_k_lora(encoder_hidden_states)
382
+ value = attn.to_v(encoder_hidden_states) + self.lora_scale * self.to_v_lora(encoder_hidden_states)
383
+
384
+ inner_dim = key.shape[-1]
385
+ head_dim = inner_dim // attn.heads
386
+
387
+ query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
388
+
389
+ key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
390
+ value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
391
+
392
+ # the output of sdp = (batch, num_heads, seq_len, head_dim)
393
+ # TODO: add support for attn.scale when we move to Torch 2.1
394
+ hidden_states = F.scaled_dot_product_attention(
395
+ query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False
396
+ )
397
+
398
+ hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim)
399
+ hidden_states = hidden_states.to(query.dtype)
400
+
401
+ # for ip
402
+ ip_key = self.to_k_ip(ip_hidden_states)
403
+ ip_value = self.to_v_ip(ip_hidden_states)
404
+
405
+ ip_key = ip_key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
406
+ ip_value = ip_value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
407
+
408
+ # the output of sdp = (batch, num_heads, seq_len, head_dim)
409
+ # TODO: add support for attn.scale when we move to Torch 2.1
410
+ ip_hidden_states = F.scaled_dot_product_attention(
411
+ query, ip_key, ip_value, attn_mask=None, dropout_p=0.0, is_causal=False
412
+ )
413
+
414
+
415
+ ip_hidden_states = ip_hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim)
416
+ ip_hidden_states = ip_hidden_states.to(query.dtype)
417
+
418
+ hidden_states = hidden_states + self.scale * ip_hidden_states
419
+
420
+ # linear proj
421
+ hidden_states = attn.to_out[0](hidden_states) + self.lora_scale * self.to_out_lora(hidden_states)
422
+ # dropout
423
+ hidden_states = attn.to_out[1](hidden_states)
424
+
425
+ if input_ndim == 4:
426
+ hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width)
427
+
428
+ if attn.residual_connection:
429
+ hidden_states = hidden_states + residual
430
+
431
+ hidden_states = hidden_states / attn.rescale_output_factor
432
+
433
+ return hidden_states
ip_adapter/custom_pipelines.py ADDED
@@ -0,0 +1,394 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any, Callable, Dict, List, Optional, Tuple, Union
2
+
3
+ import torch
4
+ from diffusers import StableDiffusionXLPipeline
5
+ from diffusers.pipelines.stable_diffusion_xl import StableDiffusionXLPipelineOutput
6
+ from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl import rescale_noise_cfg
7
+
8
+ from .utils import is_torch2_available
9
+
10
+ if is_torch2_available():
11
+ from .attention_processor import IPAttnProcessor2_0 as IPAttnProcessor
12
+ else:
13
+ from .attention_processor import IPAttnProcessor
14
+
15
+
16
+ class StableDiffusionXLCustomPipeline(StableDiffusionXLPipeline):
17
+ def set_scale(self, scale):
18
+ for attn_processor in self.unet.attn_processors.values():
19
+ if isinstance(attn_processor, IPAttnProcessor):
20
+ attn_processor.scale = scale
21
+
22
+ @torch.no_grad()
23
+ def __call__( # noqa: C901
24
+ self,
25
+ prompt: Optional[Union[str, List[str]]] = None,
26
+ prompt_2: Optional[Union[str, List[str]]] = None,
27
+ height: Optional[int] = None,
28
+ width: Optional[int] = None,
29
+ num_inference_steps: int = 50,
30
+ denoising_end: Optional[float] = None,
31
+ guidance_scale: float = 5.0,
32
+ negative_prompt: Optional[Union[str, List[str]]] = None,
33
+ negative_prompt_2: Optional[Union[str, List[str]]] = None,
34
+ num_images_per_prompt: Optional[int] = 1,
35
+ eta: float = 0.0,
36
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
37
+ latents: Optional[torch.FloatTensor] = None,
38
+ prompt_embeds: Optional[torch.FloatTensor] = None,
39
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
40
+ pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
41
+ negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
42
+ output_type: Optional[str] = "pil",
43
+ return_dict: bool = True,
44
+ callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
45
+ callback_steps: int = 1,
46
+ cross_attention_kwargs: Optional[Dict[str, Any]] = None,
47
+ guidance_rescale: float = 0.0,
48
+ original_size: Optional[Tuple[int, int]] = None,
49
+ crops_coords_top_left: Tuple[int, int] = (0, 0),
50
+ target_size: Optional[Tuple[int, int]] = None,
51
+ negative_original_size: Optional[Tuple[int, int]] = None,
52
+ negative_crops_coords_top_left: Tuple[int, int] = (0, 0),
53
+ negative_target_size: Optional[Tuple[int, int]] = None,
54
+ control_guidance_start: float = 0.0,
55
+ control_guidance_end: float = 1.0,
56
+ ):
57
+ r"""
58
+ Function invoked when calling the pipeline for generation.
59
+
60
+ Args:
61
+ prompt (`str` or `List[str]`, *optional*):
62
+ The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
63
+ instead.
64
+ prompt_2 (`str` or `List[str]`, *optional*):
65
+ The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is
66
+ used in both text-encoders
67
+ height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
68
+ The height in pixels of the generated image. This is set to 1024 by default for the best results.
69
+ Anything below 512 pixels won't work well for
70
+ [stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0)
71
+ and checkpoints that are not specifically fine-tuned on low resolutions.
72
+ width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
73
+ The width in pixels of the generated image. This is set to 1024 by default for the best results.
74
+ Anything below 512 pixels won't work well for
75
+ [stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0)
76
+ and checkpoints that are not specifically fine-tuned on low resolutions.
77
+ num_inference_steps (`int`, *optional*, defaults to 50):
78
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
79
+ expense of slower inference.
80
+ denoising_end (`float`, *optional*):
81
+ When specified, determines the fraction (between 0.0 and 1.0) of the total denoising process to be
82
+ completed before it is intentionally prematurely terminated. As a result, the returned sample will
83
+ still retain a substantial amount of noise as determined by the discrete timesteps selected by the
84
+ scheduler. The denoising_end parameter should ideally be utilized when this pipeline forms a part of a
85
+ "Mixture of Denoisers" multi-pipeline setup, as elaborated in [**Refining the Image
86
+ Output**](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/stable_diffusion_xl#refining-the-image-output)
87
+ guidance_scale (`float`, *optional*, defaults to 5.0):
88
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
89
+ `guidance_scale` is defined as `w` of equation 2. of [Imagen
90
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
91
+ 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
92
+ usually at the expense of lower image quality.
93
+ negative_prompt (`str` or `List[str]`, *optional*):
94
+ The prompt or prompts not to guide the image generation. If not defined, one has to pass
95
+ `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
96
+ less than `1`).
97
+ negative_prompt_2 (`str` or `List[str]`, *optional*):
98
+ The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and
99
+ `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders
100
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
101
+ The number of images to generate per prompt.
102
+ eta (`float`, *optional*, defaults to 0.0):
103
+ Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
104
+ [`schedulers.DDIMScheduler`], will be ignored for others.
105
+ generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
106
+ One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
107
+ to make generation deterministic.
108
+ latents (`torch.FloatTensor`, *optional*):
109
+ Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
110
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
111
+ tensor will ge generated by sampling using the supplied random `generator`.
112
+ prompt_embeds (`torch.FloatTensor`, *optional*):
113
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
114
+ provided, text embeddings will be generated from `prompt` input argument.
115
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
116
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
117
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
118
+ argument.
119
+ pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
120
+ Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting.
121
+ If not provided, pooled text embeddings will be generated from `prompt` input argument.
122
+ negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
123
+ Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
124
+ weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt`
125
+ input argument.
126
+ output_type (`str`, *optional*, defaults to `"pil"`):
127
+ The output format of the generate image. Choose between
128
+ [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
129
+ return_dict (`bool`, *optional*, defaults to `True`):
130
+ Whether or not to return a [`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] instead
131
+ of a plain tuple.
132
+ callback (`Callable`, *optional*):
133
+ A function that will be called every `callback_steps` steps during inference. The function will be
134
+ called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
135
+ callback_steps (`int`, *optional*, defaults to 1):
136
+ The frequency at which the `callback` function will be called. If not specified, the callback will be
137
+ called at every step.
138
+ cross_attention_kwargs (`dict`, *optional*):
139
+ A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
140
+ `self.processor` in
141
+ [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
142
+ guidance_rescale (`float`, *optional*, defaults to 0.7):
143
+ Guidance rescale factor proposed by [Common Diffusion Noise Schedules and Sample Steps are
144
+ Flawed](https://arxiv.org/pdf/2305.08891.pdf) `guidance_scale` is defined as `φ` in equation 16. of
145
+ [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf).
146
+ Guidance rescale factor should fix overexposure when using zero terminal SNR.
147
+ original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
148
+ If `original_size` is not the same as `target_size` the image will appear to be down- or upsampled.
149
+ `original_size` defaults to `(width, height)` if not specified. Part of SDXL's micro-conditioning as
150
+ explained in section 2.2 of
151
+ [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
152
+ crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)):
153
+ `crops_coords_top_left` can be used to generate an image that appears to be "cropped" from the position
154
+ `crops_coords_top_left` downwards. Favorable, well-centered images are usually achieved by setting
155
+ `crops_coords_top_left` to (0, 0). Part of SDXL's micro-conditioning as explained in section 2.2 of
156
+ [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
157
+ target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
158
+ For most cases, `target_size` should be set to the desired height and width of the generated image. If
159
+ not specified it will default to `(width, height)`. Part of SDXL's micro-conditioning as explained in
160
+ section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
161
+ negative_original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
162
+ To negatively condition the generation process based on a specific image resolution. Part of SDXL's
163
+ micro-conditioning as explained in section 2.2 of
164
+ [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more
165
+ information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208.
166
+ negative_crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)):
167
+ To negatively condition the generation process based on a specific crop coordinates. Part of SDXL's
168
+ micro-conditioning as explained in section 2.2 of
169
+ [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more
170
+ information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208.
171
+ negative_target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
172
+ To negatively condition the generation process based on a target image resolution. It should be as same
173
+ as the `target_size` for most cases. Part of SDXL's micro-conditioning as explained in section 2.2 of
174
+ [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more
175
+ information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208.
176
+ control_guidance_start (`float`, *optional*, defaults to 0.0):
177
+ The percentage of total steps at which the ControlNet starts applying.
178
+ control_guidance_end (`float`, *optional*, defaults to 1.0):
179
+ The percentage of total steps at which the ControlNet stops applying.
180
+
181
+ Examples:
182
+
183
+ Returns:
184
+ [`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] or `tuple`:
185
+ [`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] if `return_dict` is True, otherwise a
186
+ `tuple`. When returning a tuple, the first element is a list with the generated images.
187
+ """
188
+ # 0. Default height and width to unet
189
+ height = height or self.default_sample_size * self.vae_scale_factor
190
+ width = width or self.default_sample_size * self.vae_scale_factor
191
+
192
+ original_size = original_size or (height, width)
193
+ target_size = target_size or (height, width)
194
+
195
+ # 1. Check inputs. Raise error if not correct
196
+ self.check_inputs(
197
+ prompt,
198
+ prompt_2,
199
+ height,
200
+ width,
201
+ callback_steps,
202
+ negative_prompt,
203
+ negative_prompt_2,
204
+ prompt_embeds,
205
+ negative_prompt_embeds,
206
+ pooled_prompt_embeds,
207
+ negative_pooled_prompt_embeds,
208
+ )
209
+
210
+ # 2. Define call parameters
211
+ if prompt is not None and isinstance(prompt, str):
212
+ batch_size = 1
213
+ elif prompt is not None and isinstance(prompt, list):
214
+ batch_size = len(prompt)
215
+ else:
216
+ batch_size = prompt_embeds.shape[0]
217
+
218
+ device = self._execution_device
219
+
220
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
221
+ # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
222
+ # corresponds to doing no classifier free guidance.
223
+ do_classifier_free_guidance = guidance_scale > 1.0
224
+
225
+ # 3. Encode input prompt
226
+ text_encoder_lora_scale = (
227
+ cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None
228
+ )
229
+ (
230
+ prompt_embeds,
231
+ negative_prompt_embeds,
232
+ pooled_prompt_embeds,
233
+ negative_pooled_prompt_embeds,
234
+ ) = self.encode_prompt(
235
+ prompt=prompt,
236
+ prompt_2=prompt_2,
237
+ device=device,
238
+ num_images_per_prompt=num_images_per_prompt,
239
+ do_classifier_free_guidance=do_classifier_free_guidance,
240
+ negative_prompt=negative_prompt,
241
+ negative_prompt_2=negative_prompt_2,
242
+ prompt_embeds=prompt_embeds,
243
+ negative_prompt_embeds=negative_prompt_embeds,
244
+ pooled_prompt_embeds=pooled_prompt_embeds,
245
+ negative_pooled_prompt_embeds=negative_pooled_prompt_embeds,
246
+ lora_scale=text_encoder_lora_scale,
247
+ )
248
+
249
+ # 4. Prepare timesteps
250
+ self.scheduler.set_timesteps(num_inference_steps, device=device)
251
+
252
+ timesteps = self.scheduler.timesteps
253
+
254
+ # 5. Prepare latent variables
255
+ num_channels_latents = self.unet.config.in_channels
256
+ latents = self.prepare_latents(
257
+ batch_size * num_images_per_prompt,
258
+ num_channels_latents,
259
+ height,
260
+ width,
261
+ prompt_embeds.dtype,
262
+ device,
263
+ generator,
264
+ latents,
265
+ )
266
+
267
+ # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
268
+ extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
269
+
270
+ # 7. Prepare added time ids & embeddings
271
+ add_text_embeds = pooled_prompt_embeds
272
+ if self.text_encoder_2 is None:
273
+ text_encoder_projection_dim = int(pooled_prompt_embeds.shape[-1])
274
+ else:
275
+ text_encoder_projection_dim = self.text_encoder_2.config.projection_dim
276
+
277
+ add_time_ids = self._get_add_time_ids(
278
+ original_size,
279
+ crops_coords_top_left,
280
+ target_size,
281
+ dtype=prompt_embeds.dtype,
282
+ text_encoder_projection_dim=text_encoder_projection_dim,
283
+ )
284
+ if negative_original_size is not None and negative_target_size is not None:
285
+ negative_add_time_ids = self._get_add_time_ids(
286
+ negative_original_size,
287
+ negative_crops_coords_top_left,
288
+ negative_target_size,
289
+ dtype=prompt_embeds.dtype,
290
+ text_encoder_projection_dim=text_encoder_projection_dim,
291
+ )
292
+ else:
293
+ negative_add_time_ids = add_time_ids
294
+
295
+ if do_classifier_free_guidance:
296
+ prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0)
297
+ add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0)
298
+ add_time_ids = torch.cat([negative_add_time_ids, add_time_ids], dim=0)
299
+
300
+ prompt_embeds = prompt_embeds.to(device)
301
+ add_text_embeds = add_text_embeds.to(device)
302
+ add_time_ids = add_time_ids.to(device).repeat(batch_size * num_images_per_prompt, 1)
303
+
304
+ # 8. Denoising loop
305
+ num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0)
306
+
307
+ # 7.1 Apply denoising_end
308
+ if denoising_end is not None and isinstance(denoising_end, float) and denoising_end > 0 and denoising_end < 1:
309
+ discrete_timestep_cutoff = int(
310
+ round(
311
+ self.scheduler.config.num_train_timesteps
312
+ - (denoising_end * self.scheduler.config.num_train_timesteps)
313
+ )
314
+ )
315
+ num_inference_steps = len(list(filter(lambda ts: ts >= discrete_timestep_cutoff, timesteps)))
316
+ timesteps = timesteps[:num_inference_steps]
317
+
318
+ # get init conditioning scale
319
+ for attn_processor in self.unet.attn_processors.values():
320
+ if isinstance(attn_processor, IPAttnProcessor):
321
+ conditioning_scale = attn_processor.scale
322
+ break
323
+
324
+ with self.progress_bar(total=num_inference_steps) as progress_bar:
325
+ for i, t in enumerate(timesteps):
326
+ if (i / len(timesteps) < control_guidance_start) or ((i + 1) / len(timesteps) > control_guidance_end):
327
+ self.set_scale(0.0)
328
+ else:
329
+ self.set_scale(conditioning_scale)
330
+
331
+ # expand the latents if we are doing classifier free guidance
332
+ latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
333
+
334
+ latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
335
+
336
+ # predict the noise residual
337
+ added_cond_kwargs = {"text_embeds": add_text_embeds, "time_ids": add_time_ids}
338
+ noise_pred = self.unet(
339
+ latent_model_input,
340
+ t,
341
+ encoder_hidden_states=prompt_embeds,
342
+ cross_attention_kwargs=cross_attention_kwargs,
343
+ added_cond_kwargs=added_cond_kwargs,
344
+ return_dict=False,
345
+ )[0]
346
+
347
+ # perform guidance
348
+ if do_classifier_free_guidance:
349
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
350
+ noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
351
+
352
+ if do_classifier_free_guidance and guidance_rescale > 0.0:
353
+ # Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf
354
+ noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=guidance_rescale)
355
+
356
+ # compute the previous noisy sample x_t -> x_t-1
357
+ latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
358
+
359
+ # call the callback, if provided
360
+ if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
361
+ progress_bar.update()
362
+ if callback is not None and i % callback_steps == 0:
363
+ callback(i, t, latents)
364
+
365
+ if not output_type == "latent":
366
+ # make sure the VAE is in float32 mode, as it overflows in float16
367
+ needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast
368
+
369
+ if needs_upcasting:
370
+ self.upcast_vae()
371
+ latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype)
372
+
373
+ image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0]
374
+
375
+ # cast back to fp16 if needed
376
+ if needs_upcasting:
377
+ self.vae.to(dtype=torch.float16)
378
+ else:
379
+ image = latents
380
+
381
+ if output_type != "latent":
382
+ # apply watermark if available
383
+ if self.watermark is not None:
384
+ image = self.watermark.apply_watermark(image)
385
+
386
+ image = self.image_processor.postprocess(image, output_type=output_type)
387
+
388
+ # Offload all models
389
+ self.maybe_free_model_hooks()
390
+
391
+ if not return_dict:
392
+ return (image,)
393
+
394
+ return StableDiffusionXLPipelineOutput(images=image)
ip_adapter/ip_adapter.py ADDED
@@ -0,0 +1,424 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from typing import List
3
+
4
+ import torch
5
+ from diffusers import StableDiffusionPipeline
6
+ from diffusers.pipelines.controlnet import MultiControlNetModel
7
+ from PIL import Image
8
+ from safetensors import safe_open
9
+ from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
10
+
11
+ from .utils import is_torch2_available, get_generator
12
+
13
+ if is_torch2_available():
14
+ from .attention_processor import (
15
+ AttnProcessor2_0 as AttnProcessor,
16
+ )
17
+ from .attention_processor import (
18
+ CNAttnProcessor2_0 as CNAttnProcessor,
19
+ )
20
+ from .attention_processor import (
21
+ IPAttnProcessor2_0 as IPAttnProcessor,
22
+ )
23
+ else:
24
+ from .attention_processor import AttnProcessor, CNAttnProcessor, IPAttnProcessor
25
+ from .resampler import Resampler
26
+
27
+
28
+ class ImageProjModel(torch.nn.Module):
29
+ """Projection Model"""
30
+
31
+ def __init__(self, cross_attention_dim=1024, clip_embeddings_dim=1024, clip_extra_context_tokens=4):
32
+ super().__init__()
33
+
34
+ self.generator = None
35
+ self.cross_attention_dim = cross_attention_dim
36
+ self.clip_extra_context_tokens = clip_extra_context_tokens
37
+ self.proj = torch.nn.Linear(clip_embeddings_dim, self.clip_extra_context_tokens * cross_attention_dim)
38
+ self.norm = torch.nn.LayerNorm(cross_attention_dim)
39
+
40
+ def forward(self, image_embeds):
41
+ embeds = image_embeds
42
+ clip_extra_context_tokens = self.proj(embeds).reshape(
43
+ -1, self.clip_extra_context_tokens, self.cross_attention_dim
44
+ )
45
+ clip_extra_context_tokens = self.norm(clip_extra_context_tokens)
46
+ return clip_extra_context_tokens
47
+
48
+
49
+ class MLPProjModel(torch.nn.Module):
50
+ """SD model with image prompt"""
51
+ def __init__(self, cross_attention_dim=1024, clip_embeddings_dim=1024):
52
+ super().__init__()
53
+
54
+ self.proj = torch.nn.Sequential(
55
+ torch.nn.Linear(clip_embeddings_dim, clip_embeddings_dim),
56
+ torch.nn.GELU(),
57
+ torch.nn.Linear(clip_embeddings_dim, cross_attention_dim),
58
+ torch.nn.LayerNorm(cross_attention_dim)
59
+ )
60
+
61
+ def forward(self, image_embeds):
62
+ clip_extra_context_tokens = self.proj(image_embeds)
63
+ return clip_extra_context_tokens
64
+
65
+
66
+ class IPAdapter:
67
+ def __init__(self, sd_pipe, image_encoder_path, ip_ckpt, device, num_tokens=4):
68
+ self.device = device
69
+ self.image_encoder_path = image_encoder_path
70
+ self.ip_ckpt = ip_ckpt
71
+ self.num_tokens = num_tokens
72
+
73
+ self.pipe = sd_pipe.to(self.device)
74
+ self.set_ip_adapter()
75
+
76
+ # load image encoder
77
+ self.image_encoder = CLIPVisionModelWithProjection.from_pretrained(self.image_encoder_path).to(
78
+ self.device, dtype=torch.float16
79
+ )
80
+ self.clip_image_processor = CLIPImageProcessor()
81
+ # image proj model
82
+ self.image_proj_model = self.init_proj()
83
+
84
+ self.load_ip_adapter()
85
+
86
+ def init_proj(self):
87
+ image_proj_model = ImageProjModel(
88
+ cross_attention_dim=self.pipe.unet.config.cross_attention_dim,
89
+ clip_embeddings_dim=self.image_encoder.config.projection_dim,
90
+ clip_extra_context_tokens=self.num_tokens,
91
+ ).to(self.device, dtype=torch.float16)
92
+ return image_proj_model
93
+
94
+ def set_ip_adapter(self):
95
+ unet = self.pipe.unet
96
+ attn_procs = {}
97
+ for name in unet.attn_processors.keys():
98
+ cross_attention_dim = None if name.endswith("attn1.processor") else unet.config.cross_attention_dim
99
+ if name.startswith("mid_block"):
100
+ hidden_size = unet.config.block_out_channels[-1]
101
+ elif name.startswith("up_blocks"):
102
+ block_id = int(name[len("up_blocks.")])
103
+ hidden_size = list(reversed(unet.config.block_out_channels))[block_id]
104
+ elif name.startswith("down_blocks"):
105
+ block_id = int(name[len("down_blocks.")])
106
+ hidden_size = unet.config.block_out_channels[block_id]
107
+ if cross_attention_dim is None:
108
+ attn_procs[name] = AttnProcessor()
109
+ else:
110
+ attn_procs[name] = IPAttnProcessor(
111
+ hidden_size=hidden_size,
112
+ cross_attention_dim=cross_attention_dim,
113
+ scale=1.0,
114
+ num_tokens=self.num_tokens,
115
+ ).to(self.device, dtype=torch.float16)
116
+ unet.set_attn_processor(attn_procs)
117
+ if hasattr(self.pipe, "controlnet"):
118
+ if isinstance(self.pipe.controlnet, MultiControlNetModel):
119
+ for controlnet in self.pipe.controlnet.nets:
120
+ controlnet.set_attn_processor(CNAttnProcessor(num_tokens=self.num_tokens))
121
+ else:
122
+ self.pipe.controlnet.set_attn_processor(CNAttnProcessor(num_tokens=self.num_tokens))
123
+
124
+ def load_ip_adapter(self):
125
+ if os.path.splitext(self.ip_ckpt)[-1] == ".safetensors":
126
+ state_dict = {"image_proj": {}, "ip_adapter": {}}
127
+ with safe_open(self.ip_ckpt, framework="pt", device="cpu") as f:
128
+ for key in f.keys():
129
+ if key.startswith("image_proj."):
130
+ state_dict["image_proj"][key.replace("image_proj.", "")] = f.get_tensor(key)
131
+ elif key.startswith("ip_adapter."):
132
+ state_dict["ip_adapter"][key.replace("ip_adapter.", "")] = f.get_tensor(key)
133
+ else:
134
+ state_dict = torch.load(self.ip_ckpt, map_location="cpu")
135
+ self.image_proj_model.load_state_dict(state_dict["image_proj"])
136
+ ip_layers = torch.nn.ModuleList(self.pipe.unet.attn_processors.values())
137
+ ip_layers.load_state_dict(state_dict["ip_adapter"])
138
+
139
+ @torch.inference_mode()
140
+ def get_image_embeds(self, pil_image=None, clip_image_embeds=None):
141
+ if pil_image is not None:
142
+ if isinstance(pil_image, Image.Image):
143
+ pil_image = [pil_image]
144
+ clip_image = self.clip_image_processor(images=pil_image, return_tensors="pt").pixel_values
145
+ clip_image_embeds = self.image_encoder(clip_image.to(self.device, dtype=torch.float16)).image_embeds
146
+ else:
147
+ clip_image_embeds = clip_image_embeds.to(self.device, dtype=torch.float16)
148
+ image_prompt_embeds = self.image_proj_model(clip_image_embeds)
149
+ uncond_image_prompt_embeds = self.image_proj_model(torch.zeros_like(clip_image_embeds))
150
+ return image_prompt_embeds, uncond_image_prompt_embeds
151
+
152
+ def set_scale(self, scale):
153
+ for attn_processor in self.pipe.unet.attn_processors.values():
154
+ if isinstance(attn_processor, IPAttnProcessor):
155
+ attn_processor.scale = scale
156
+
157
+ def generate(
158
+ self,
159
+ pil_image=None,
160
+ clip_image_embeds=None,
161
+ prompt=None,
162
+ negative_prompt=None,
163
+ scale=1.0,
164
+ num_samples=4,
165
+ seed=None,
166
+ guidance_scale=7.5,
167
+ num_inference_steps=30,
168
+ **kwargs,
169
+ ):
170
+ self.set_scale(scale)
171
+
172
+ if pil_image is not None:
173
+ num_prompts = 1 if isinstance(pil_image, Image.Image) else len(pil_image)
174
+ else:
175
+ num_prompts = clip_image_embeds.size(0)
176
+
177
+ if prompt is None:
178
+ prompt = "best quality, high quality"
179
+ if negative_prompt is None:
180
+ negative_prompt = "monochrome, lowres, bad anatomy, worst quality, low quality"
181
+
182
+ if not isinstance(prompt, List):
183
+ prompt = [prompt] * num_prompts
184
+ if not isinstance(negative_prompt, List):
185
+ negative_prompt = [negative_prompt] * num_prompts
186
+
187
+ image_prompt_embeds, uncond_image_prompt_embeds = self.get_image_embeds(
188
+ pil_image=pil_image, clip_image_embeds=clip_image_embeds
189
+ )
190
+ bs_embed, seq_len, _ = image_prompt_embeds.shape
191
+ image_prompt_embeds = image_prompt_embeds.repeat(1, num_samples, 1)
192
+ image_prompt_embeds = image_prompt_embeds.view(bs_embed * num_samples, seq_len, -1)
193
+ uncond_image_prompt_embeds = uncond_image_prompt_embeds.repeat(1, num_samples, 1)
194
+ uncond_image_prompt_embeds = uncond_image_prompt_embeds.view(bs_embed * num_samples, seq_len, -1)
195
+
196
+ with torch.inference_mode():
197
+ prompt_embeds_, negative_prompt_embeds_ = self.pipe.encode_prompt(
198
+ prompt,
199
+ device=self.device,
200
+ num_images_per_prompt=num_samples,
201
+ do_classifier_free_guidance=True,
202
+ negative_prompt=negative_prompt,
203
+ )
204
+ prompt_embeds = torch.cat([prompt_embeds_, image_prompt_embeds], dim=1)
205
+ negative_prompt_embeds = torch.cat([negative_prompt_embeds_, uncond_image_prompt_embeds], dim=1)
206
+
207
+ generator = get_generator(seed, self.device)
208
+
209
+ images = self.pipe(
210
+ prompt_embeds=prompt_embeds,
211
+ negative_prompt_embeds=negative_prompt_embeds,
212
+ guidance_scale=guidance_scale,
213
+ num_inference_steps=num_inference_steps,
214
+ generator=generator,
215
+ **kwargs,
216
+ ).images
217
+
218
+ return images
219
+
220
+
221
+ class IPAdapterXL(IPAdapter):
222
+ """SDXL"""
223
+
224
+ def generate(
225
+ self,
226
+ pil_image,
227
+ prompt=None,
228
+ negative_prompt=None,
229
+ scale=1.0,
230
+ num_samples=4,
231
+ seed=None,
232
+ num_inference_steps=30,
233
+ **kwargs,
234
+ ):
235
+ self.set_scale(scale)
236
+
237
+ num_prompts = 1 if isinstance(pil_image, Image.Image) else len(pil_image)
238
+
239
+ if prompt is None:
240
+ prompt = "best quality, high quality"
241
+ if negative_prompt is None:
242
+ negative_prompt = "monochrome, lowres, bad anatomy, worst quality, low quality"
243
+
244
+ if not isinstance(prompt, List):
245
+ prompt = [prompt] * num_prompts
246
+ if not isinstance(negative_prompt, List):
247
+ negative_prompt = [negative_prompt] * num_prompts
248
+
249
+ image_prompt_embeds, uncond_image_prompt_embeds = self.get_image_embeds(pil_image)
250
+ bs_embed, seq_len, _ = image_prompt_embeds.shape
251
+ image_prompt_embeds = image_prompt_embeds.repeat(1, num_samples, 1)
252
+ image_prompt_embeds = image_prompt_embeds.view(bs_embed * num_samples, seq_len, -1)
253
+ uncond_image_prompt_embeds = uncond_image_prompt_embeds.repeat(1, num_samples, 1)
254
+ uncond_image_prompt_embeds = uncond_image_prompt_embeds.view(bs_embed * num_samples, seq_len, -1)
255
+
256
+ with torch.inference_mode():
257
+ (
258
+ prompt_embeds,
259
+ negative_prompt_embeds,
260
+ pooled_prompt_embeds,
261
+ negative_pooled_prompt_embeds,
262
+ ) = self.pipe.encode_prompt(
263
+ prompt,
264
+ num_images_per_prompt=num_samples,
265
+ do_classifier_free_guidance=True,
266
+ negative_prompt=negative_prompt,
267
+ )
268
+ prompt_embeds = torch.cat([prompt_embeds, image_prompt_embeds], dim=1)
269
+ negative_prompt_embeds = torch.cat([negative_prompt_embeds, uncond_image_prompt_embeds], dim=1)
270
+
271
+ self.generator = get_generator(seed, self.device)
272
+
273
+ images = self.pipe(
274
+ prompt_embeds=prompt_embeds,
275
+ negative_prompt_embeds=negative_prompt_embeds,
276
+ pooled_prompt_embeds=pooled_prompt_embeds,
277
+ negative_pooled_prompt_embeds=negative_pooled_prompt_embeds,
278
+ num_inference_steps=num_inference_steps,
279
+ generator=self.generator,
280
+ **kwargs,
281
+ ).images
282
+
283
+ return images
284
+
285
+
286
+ class IPAdapterPlus(IPAdapter):
287
+ """IP-Adapter with fine-grained features"""
288
+
289
+ def init_proj(self):
290
+ image_proj_model = Resampler(
291
+ dim=self.pipe.unet.config.cross_attention_dim,
292
+ depth=4,
293
+ dim_head=64,
294
+ heads=12,
295
+ num_queries=self.num_tokens,
296
+ embedding_dim=self.image_encoder.config.hidden_size,
297
+ output_dim=self.pipe.unet.config.cross_attention_dim,
298
+ ff_mult=4,
299
+ ).to(self.device, dtype=torch.float16)
300
+ return image_proj_model
301
+
302
+ @torch.inference_mode()
303
+ def get_image_embeds(self, pil_image=None, clip_image_embeds=None):
304
+ if isinstance(pil_image, Image.Image):
305
+ pil_image = [pil_image]
306
+ clip_image = self.clip_image_processor(images=pil_image, return_tensors="pt").pixel_values
307
+ clip_image = clip_image.to(self.device, dtype=torch.float16)
308
+ clip_image_embeds = self.image_encoder(clip_image, output_hidden_states=True).hidden_states[-2]
309
+ image_prompt_embeds = self.image_proj_model(clip_image_embeds)
310
+ uncond_clip_image_embeds = self.image_encoder(
311
+ torch.zeros_like(clip_image), output_hidden_states=True
312
+ ).hidden_states[-2]
313
+ uncond_image_prompt_embeds = self.image_proj_model(uncond_clip_image_embeds)
314
+ return image_prompt_embeds, uncond_image_prompt_embeds
315
+
316
+
317
+ class IPAdapterFull(IPAdapterPlus):
318
+ """IP-Adapter with full features"""
319
+
320
+ def init_proj(self):
321
+ image_proj_model = MLPProjModel(
322
+ cross_attention_dim=self.pipe.unet.config.cross_attention_dim,
323
+ clip_embeddings_dim=self.image_encoder.config.hidden_size,
324
+ ).to(self.device, dtype=torch.float16)
325
+ return image_proj_model
326
+
327
+
328
+ class IPAdapterPlusXL(IPAdapter):
329
+ """SDXL"""
330
+
331
+ def init_proj(self):
332
+ image_proj_model = Resampler(
333
+ dim=1280,
334
+ depth=4,
335
+ dim_head=64,
336
+ heads=20,
337
+ num_queries=self.num_tokens,
338
+ embedding_dim=self.image_encoder.config.hidden_size,
339
+ output_dim=self.pipe.unet.config.cross_attention_dim,
340
+ ff_mult=4,
341
+ ).to(self.device, dtype=torch.float16)
342
+ return image_proj_model
343
+
344
+ @torch.inference_mode()
345
+ def get_image_embeds(self, pil_image, clip_image_embeds=None):
346
+ if pil_image is not None:
347
+ if isinstance(pil_image, Image.Image):
348
+ pil_image = [pil_image]
349
+ clip_image = self.clip_image_processor(images=pil_image, return_tensors="pt").pixel_values
350
+ clip_image = clip_image.to(self.device, dtype=torch.float16)
351
+ clip_image_embeds = self.image_encoder(clip_image, output_hidden_states=True).hidden_states[-2]
352
+ else:
353
+ clip_image_embeds = clip_image_embeds.to(self.device, dtype=torch.float16)
354
+ image_prompt_embeds = self.image_proj_model(clip_image_embeds)
355
+ uncond_clip_image_embeds = self.image_encoder(
356
+ torch.zeros(clip_image_embeds.shape[0], 3, 224, 224).to(self.device, dtype=torch.float16), output_hidden_states=True
357
+ ).hidden_states[-2]
358
+ uncond_image_prompt_embeds = self.image_proj_model(uncond_clip_image_embeds)
359
+ return image_prompt_embeds, uncond_image_prompt_embeds
360
+
361
+ def generate(
362
+ self,
363
+ pil_image,
364
+ prompt=None,
365
+ clip_image_embeds=None,
366
+ negative_prompt=None,
367
+ scale=1.0,
368
+ num_samples=4,
369
+ seed=None,
370
+ num_inference_steps=30,
371
+ **kwargs,
372
+ ):
373
+ self.set_scale(scale)
374
+
375
+ if pil_image is not None:
376
+ num_prompts = 1 if isinstance(pil_image, Image.Image) else len(pil_image)
377
+ else:
378
+ num_prompts = clip_image_embeds.size(0)
379
+
380
+ if prompt is None:
381
+ prompt = "best quality, high quality"
382
+ if negative_prompt is None:
383
+ negative_prompt = "monochrome, lowres, bad anatomy, worst quality, low quality"
384
+
385
+ if not isinstance(prompt, List):
386
+ prompt = [prompt] * num_prompts
387
+ if not isinstance(negative_prompt, List):
388
+ negative_prompt = [negative_prompt] * num_prompts
389
+
390
+ image_prompt_embeds, uncond_image_prompt_embeds = self.get_image_embeds(pil_image, clip_image_embeds)
391
+ bs_embed, seq_len, _ = image_prompt_embeds.shape
392
+ image_prompt_embeds = image_prompt_embeds.repeat(1, num_samples, 1)
393
+ image_prompt_embeds = image_prompt_embeds.view(bs_embed * num_samples, seq_len, -1)
394
+ uncond_image_prompt_embeds = uncond_image_prompt_embeds.repeat(1, num_samples, 1)
395
+ uncond_image_prompt_embeds = uncond_image_prompt_embeds.view(bs_embed * num_samples, seq_len, -1)
396
+
397
+ with torch.inference_mode():
398
+ (
399
+ prompt_embeds,
400
+ negative_prompt_embeds,
401
+ pooled_prompt_embeds,
402
+ negative_pooled_prompt_embeds,
403
+ ) = self.pipe.encode_prompt(
404
+ prompt,
405
+ num_images_per_prompt=num_samples,
406
+ do_classifier_free_guidance=True,
407
+ negative_prompt=negative_prompt,
408
+ )
409
+ prompt_embeds = torch.cat([prompt_embeds, image_prompt_embeds], dim=1)
410
+ negative_prompt_embeds = torch.cat([negative_prompt_embeds, uncond_image_prompt_embeds], dim=1)
411
+
412
+ generator = get_generator(seed, self.device)
413
+
414
+ images = self.pipe(
415
+ prompt_embeds=prompt_embeds,
416
+ negative_prompt_embeds=negative_prompt_embeds,
417
+ pooled_prompt_embeds=pooled_prompt_embeds,
418
+ negative_pooled_prompt_embeds=negative_pooled_prompt_embeds,
419
+ num_inference_steps=num_inference_steps,
420
+ generator=generator,
421
+ **kwargs,
422
+ ).images
423
+
424
+ return images
ip_adapter/ip_adapter_faceid.py ADDED
@@ -0,0 +1,542 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from typing import List
3
+
4
+ import torch
5
+ from diffusers import StableDiffusionPipeline
6
+ from diffusers.pipelines.controlnet import MultiControlNetModel
7
+ from PIL import Image
8
+ from safetensors import safe_open
9
+ from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
10
+
11
+ from .attention_processor_faceid import LoRAAttnProcessor, LoRAIPAttnProcessor
12
+ from .utils import is_torch2_available, get_generator
13
+
14
+ USE_DAFAULT_ATTN = False # should be True for visualization_attnmap
15
+ if is_torch2_available() and (not USE_DAFAULT_ATTN):
16
+ from .attention_processor_faceid import (
17
+ LoRAAttnProcessor2_0 as LoRAAttnProcessor,
18
+ )
19
+ from .attention_processor_faceid import (
20
+ LoRAIPAttnProcessor2_0 as LoRAIPAttnProcessor,
21
+ )
22
+ else:
23
+ from .attention_processor_faceid import LoRAAttnProcessor, LoRAIPAttnProcessor
24
+ from .resampler import PerceiverAttention, FeedForward
25
+
26
+
27
+ class FacePerceiverResampler(torch.nn.Module):
28
+ def __init__(
29
+ self,
30
+ *,
31
+ dim=768,
32
+ depth=4,
33
+ dim_head=64,
34
+ heads=16,
35
+ embedding_dim=1280,
36
+ output_dim=768,
37
+ ff_mult=4,
38
+ ):
39
+ super().__init__()
40
+
41
+ self.proj_in = torch.nn.Linear(embedding_dim, dim)
42
+ self.proj_out = torch.nn.Linear(dim, output_dim)
43
+ self.norm_out = torch.nn.LayerNorm(output_dim)
44
+ self.layers = torch.nn.ModuleList([])
45
+ for _ in range(depth):
46
+ self.layers.append(
47
+ torch.nn.ModuleList(
48
+ [
49
+ PerceiverAttention(dim=dim, dim_head=dim_head, heads=heads),
50
+ FeedForward(dim=dim, mult=ff_mult),
51
+ ]
52
+ )
53
+ )
54
+
55
+ def forward(self, latents, x):
56
+ x = self.proj_in(x)
57
+ for attn, ff in self.layers:
58
+ latents = attn(x, latents) + latents
59
+ latents = ff(latents) + latents
60
+ latents = self.proj_out(latents)
61
+ return self.norm_out(latents)
62
+
63
+
64
+ class MLPProjModel(torch.nn.Module):
65
+ def __init__(self, cross_attention_dim=768, id_embeddings_dim=512, num_tokens=4):
66
+ super().__init__()
67
+
68
+ self.cross_attention_dim = cross_attention_dim
69
+ self.num_tokens = num_tokens
70
+
71
+ self.proj = torch.nn.Sequential(
72
+ torch.nn.Linear(id_embeddings_dim, id_embeddings_dim*2),
73
+ torch.nn.GELU(),
74
+ torch.nn.Linear(id_embeddings_dim*2, cross_attention_dim*num_tokens),
75
+ )
76
+ self.norm = torch.nn.LayerNorm(cross_attention_dim)
77
+
78
+ def forward(self, id_embeds):
79
+ x = self.proj(id_embeds)
80
+ x = x.reshape(-1, self.num_tokens, self.cross_attention_dim)
81
+ x = self.norm(x)
82
+ return x
83
+
84
+
85
+ class ProjPlusModel(torch.nn.Module):
86
+ def __init__(self, cross_attention_dim=768, id_embeddings_dim=512, clip_embeddings_dim=1280, num_tokens=4):
87
+ super().__init__()
88
+
89
+ self.cross_attention_dim = cross_attention_dim
90
+ self.num_tokens = num_tokens
91
+
92
+ self.proj = torch.nn.Sequential(
93
+ torch.nn.Linear(id_embeddings_dim, id_embeddings_dim*2),
94
+ torch.nn.GELU(),
95
+ torch.nn.Linear(id_embeddings_dim*2, cross_attention_dim*num_tokens),
96
+ )
97
+ self.norm = torch.nn.LayerNorm(cross_attention_dim)
98
+
99
+ self.perceiver_resampler = FacePerceiverResampler(
100
+ dim=cross_attention_dim,
101
+ depth=4,
102
+ dim_head=64,
103
+ heads=cross_attention_dim // 64,
104
+ embedding_dim=clip_embeddings_dim,
105
+ output_dim=cross_attention_dim,
106
+ ff_mult=4,
107
+ )
108
+
109
+ def forward(self, id_embeds, clip_embeds, shortcut=False, scale=1.0):
110
+
111
+ x = self.proj(id_embeds)
112
+ x = x.reshape(-1, self.num_tokens, self.cross_attention_dim)
113
+ x = self.norm(x)
114
+ out = self.perceiver_resampler(x, clip_embeds)
115
+ if shortcut:
116
+ out = x + scale * out
117
+ return out
118
+
119
+
120
+ class IPAdapterFaceID:
121
+ def __init__(self, sd_pipe, ip_ckpt, device, lora_rank=128, num_tokens=4, torch_dtype=torch.float16):
122
+ self.device = device
123
+ self.ip_ckpt = ip_ckpt
124
+ self.lora_rank = lora_rank
125
+ self.num_tokens = num_tokens
126
+ self.torch_dtype = torch_dtype
127
+
128
+ self.pipe = sd_pipe.to(self.device)
129
+ self.set_ip_adapter()
130
+
131
+ # image proj model
132
+ self.image_proj_model = self.init_proj()
133
+
134
+ self.load_ip_adapter()
135
+
136
+ def init_proj(self):
137
+ image_proj_model = MLPProjModel(
138
+ cross_attention_dim=self.pipe.unet.config.cross_attention_dim,
139
+ id_embeddings_dim=512,
140
+ num_tokens=self.num_tokens,
141
+ ).to(self.device, dtype=self.torch_dtype)
142
+ return image_proj_model
143
+
144
+ def set_ip_adapter(self):
145
+ unet = self.pipe.unet
146
+ attn_procs = {}
147
+ for name in unet.attn_processors.keys():
148
+ cross_attention_dim = None if name.endswith("attn1.processor") else unet.config.cross_attention_dim
149
+ if name.startswith("mid_block"):
150
+ hidden_size = unet.config.block_out_channels[-1]
151
+ elif name.startswith("up_blocks"):
152
+ block_id = int(name[len("up_blocks.")])
153
+ hidden_size = list(reversed(unet.config.block_out_channels))[block_id]
154
+ elif name.startswith("down_blocks"):
155
+ block_id = int(name[len("down_blocks.")])
156
+ hidden_size = unet.config.block_out_channels[block_id]
157
+ if cross_attention_dim is None:
158
+ attn_procs[name] = LoRAAttnProcessor(
159
+ hidden_size=hidden_size, cross_attention_dim=cross_attention_dim, rank=self.lora_rank,
160
+ ).to(self.device, dtype=self.torch_dtype)
161
+ else:
162
+ attn_procs[name] = LoRAIPAttnProcessor(
163
+ hidden_size=hidden_size, cross_attention_dim=cross_attention_dim, scale=1.0, rank=self.lora_rank, num_tokens=self.num_tokens,
164
+ ).to(self.device, dtype=self.torch_dtype)
165
+ unet.set_attn_processor(attn_procs)
166
+
167
+ def load_ip_adapter(self):
168
+ if os.path.splitext(self.ip_ckpt)[-1] == ".safetensors":
169
+ state_dict = {"image_proj": {}, "ip_adapter": {}}
170
+ with safe_open(self.ip_ckpt, framework="pt", device="cpu") as f:
171
+ for key in f.keys():
172
+ if key.startswith("image_proj."):
173
+ state_dict["image_proj"][key.replace("image_proj.", "")] = f.get_tensor(key)
174
+ elif key.startswith("ip_adapter."):
175
+ state_dict["ip_adapter"][key.replace("ip_adapter.", "")] = f.get_tensor(key)
176
+ else:
177
+ state_dict = torch.load(self.ip_ckpt, map_location="cpu")
178
+ self.image_proj_model.load_state_dict(state_dict["image_proj"])
179
+ ip_layers = torch.nn.ModuleList(self.pipe.unet.attn_processors.values())
180
+ ip_layers.load_state_dict(state_dict["ip_adapter"])
181
+
182
+ @torch.inference_mode()
183
+ def get_image_embeds(self, faceid_embeds):
184
+
185
+ faceid_embeds = faceid_embeds.to(self.device, dtype=self.torch_dtype)
186
+ image_prompt_embeds = self.image_proj_model(faceid_embeds)
187
+ uncond_image_prompt_embeds = self.image_proj_model(torch.zeros_like(faceid_embeds))
188
+ return image_prompt_embeds, uncond_image_prompt_embeds
189
+
190
+ def set_scale(self, scale):
191
+ for attn_processor in self.pipe.unet.attn_processors.values():
192
+ if isinstance(attn_processor, LoRAIPAttnProcessor):
193
+ attn_processor.scale = scale
194
+
195
+ def generate(
196
+ self,
197
+ faceid_embeds=None,
198
+ prompt=None,
199
+ negative_prompt=None,
200
+ scale=1.0,
201
+ num_samples=4,
202
+ seed=None,
203
+ guidance_scale=7.5,
204
+ num_inference_steps=30,
205
+ **kwargs,
206
+ ):
207
+ self.set_scale(scale)
208
+
209
+
210
+ num_prompts = faceid_embeds.size(0)
211
+
212
+ if prompt is None:
213
+ prompt = "best quality, high quality"
214
+ if negative_prompt is None:
215
+ negative_prompt = "monochrome, lowres, bad anatomy, worst quality, low quality"
216
+
217
+ if not isinstance(prompt, List):
218
+ prompt = [prompt] * num_prompts
219
+ if not isinstance(negative_prompt, List):
220
+ negative_prompt = [negative_prompt] * num_prompts
221
+
222
+ image_prompt_embeds, uncond_image_prompt_embeds = self.get_image_embeds(faceid_embeds)
223
+
224
+ bs_embed, seq_len, _ = image_prompt_embeds.shape
225
+ image_prompt_embeds = image_prompt_embeds.repeat(1, num_samples, 1)
226
+ image_prompt_embeds = image_prompt_embeds.view(bs_embed * num_samples, seq_len, -1)
227
+ uncond_image_prompt_embeds = uncond_image_prompt_embeds.repeat(1, num_samples, 1)
228
+ uncond_image_prompt_embeds = uncond_image_prompt_embeds.view(bs_embed * num_samples, seq_len, -1)
229
+
230
+ with torch.inference_mode():
231
+ prompt_embeds_, negative_prompt_embeds_ = self.pipe.encode_prompt(
232
+ prompt,
233
+ device=self.device,
234
+ num_images_per_prompt=num_samples,
235
+ do_classifier_free_guidance=True,
236
+ negative_prompt=negative_prompt,
237
+ )
238
+ prompt_embeds = torch.cat([prompt_embeds_, image_prompt_embeds], dim=1)
239
+ negative_prompt_embeds = torch.cat([negative_prompt_embeds_, uncond_image_prompt_embeds], dim=1)
240
+
241
+ generator = get_generator(seed, self.device)
242
+
243
+ images = self.pipe(
244
+ prompt_embeds=prompt_embeds,
245
+ negative_prompt_embeds=negative_prompt_embeds,
246
+ guidance_scale=guidance_scale,
247
+ num_inference_steps=num_inference_steps,
248
+ generator=generator,
249
+ **kwargs,
250
+ ).images
251
+
252
+ return images
253
+
254
+
255
+ class IPAdapterFaceIDPlus:
256
+ def __init__(self, sd_pipe, image_encoder_path, ip_ckpt, device, lora_rank=128, num_tokens=4, torch_dtype=torch.float16):
257
+ self.device = device
258
+ self.image_encoder_path = image_encoder_path
259
+ self.ip_ckpt = ip_ckpt
260
+ self.lora_rank = lora_rank
261
+ self.num_tokens = num_tokens
262
+ self.torch_dtype = torch_dtype
263
+
264
+ self.pipe = sd_pipe.to(self.device)
265
+ self.set_ip_adapter()
266
+
267
+ # load image encoder
268
+ self.image_encoder = CLIPVisionModelWithProjection.from_pretrained(self.image_encoder_path).to(
269
+ self.device, dtype=self.torch_dtype
270
+ )
271
+ self.clip_image_processor = CLIPImageProcessor()
272
+ # image proj model
273
+ self.image_proj_model = self.init_proj()
274
+
275
+ self.load_ip_adapter()
276
+
277
+ def init_proj(self):
278
+ image_proj_model = ProjPlusModel(
279
+ cross_attention_dim=self.pipe.unet.config.cross_attention_dim,
280
+ id_embeddings_dim=512,
281
+ clip_embeddings_dim=self.image_encoder.config.hidden_size,
282
+ num_tokens=self.num_tokens,
283
+ ).to(self.device, dtype=self.torch_dtype)
284
+ return image_proj_model
285
+
286
+ def set_ip_adapter(self):
287
+ unet = self.pipe.unet
288
+ attn_procs = {}
289
+ for name in unet.attn_processors.keys():
290
+ cross_attention_dim = None if name.endswith("attn1.processor") else unet.config.cross_attention_dim
291
+ if name.startswith("mid_block"):
292
+ hidden_size = unet.config.block_out_channels[-1]
293
+ elif name.startswith("up_blocks"):
294
+ block_id = int(name[len("up_blocks.")])
295
+ hidden_size = list(reversed(unet.config.block_out_channels))[block_id]
296
+ elif name.startswith("down_blocks"):
297
+ block_id = int(name[len("down_blocks.")])
298
+ hidden_size = unet.config.block_out_channels[block_id]
299
+ if cross_attention_dim is None:
300
+ attn_procs[name] = LoRAAttnProcessor(
301
+ hidden_size=hidden_size, cross_attention_dim=cross_attention_dim, rank=self.lora_rank,
302
+ ).to(self.device, dtype=self.torch_dtype)
303
+ else:
304
+ attn_procs[name] = LoRAIPAttnProcessor(
305
+ hidden_size=hidden_size, cross_attention_dim=cross_attention_dim, scale=1.0, rank=self.lora_rank, num_tokens=self.num_tokens,
306
+ ).to(self.device, dtype=self.torch_dtype)
307
+ unet.set_attn_processor(attn_procs)
308
+
309
+ def load_ip_adapter(self):
310
+ if os.path.splitext(self.ip_ckpt)[-1] == ".safetensors":
311
+ state_dict = {"image_proj": {}, "ip_adapter": {}}
312
+ with safe_open(self.ip_ckpt, framework="pt", device="cpu") as f:
313
+ for key in f.keys():
314
+ if key.startswith("image_proj."):
315
+ state_dict["image_proj"][key.replace("image_proj.", "")] = f.get_tensor(key)
316
+ elif key.startswith("ip_adapter."):
317
+ state_dict["ip_adapter"][key.replace("ip_adapter.", "")] = f.get_tensor(key)
318
+ else:
319
+ state_dict = torch.load(self.ip_ckpt, map_location="cpu")
320
+ self.image_proj_model.load_state_dict(state_dict["image_proj"])
321
+ ip_layers = torch.nn.ModuleList(self.pipe.unet.attn_processors.values())
322
+ ip_layers.load_state_dict(state_dict["ip_adapter"])
323
+
324
+ @torch.inference_mode()
325
+ def get_image_embeds(self, faceid_embeds, face_image, s_scale, shortcut):
326
+ if isinstance(face_image, Image.Image):
327
+ pil_image = [face_image]
328
+ clip_image = self.clip_image_processor(images=face_image, return_tensors="pt").pixel_values
329
+ clip_image = clip_image.to(self.device, dtype=self.torch_dtype)
330
+ clip_image_embeds = self.image_encoder(clip_image, output_hidden_states=True).hidden_states[-2]
331
+ uncond_clip_image_embeds = self.image_encoder(
332
+ torch.zeros_like(clip_image), output_hidden_states=True
333
+ ).hidden_states[-2]
334
+
335
+ faceid_embeds = faceid_embeds.to(self.device, dtype=self.torch_dtype)
336
+ image_prompt_embeds = self.image_proj_model(faceid_embeds, clip_image_embeds, shortcut=shortcut, scale=s_scale)
337
+ uncond_image_prompt_embeds = self.image_proj_model(torch.zeros_like(faceid_embeds), uncond_clip_image_embeds, shortcut=shortcut, scale=s_scale)
338
+ return image_prompt_embeds, uncond_image_prompt_embeds
339
+
340
+ def set_scale(self, scale):
341
+ for attn_processor in self.pipe.unet.attn_processors.values():
342
+ if isinstance(attn_processor, LoRAIPAttnProcessor):
343
+ attn_processor.scale = scale
344
+
345
+ def generate(
346
+ self,
347
+ face_image=None,
348
+ faceid_embeds=None,
349
+ prompt=None,
350
+ negative_prompt=None,
351
+ scale=1.0,
352
+ num_samples=4,
353
+ seed=None,
354
+ guidance_scale=7.5,
355
+ num_inference_steps=30,
356
+ s_scale=1.0,
357
+ shortcut=False,
358
+ **kwargs,
359
+ ):
360
+ self.set_scale(scale)
361
+
362
+
363
+ num_prompts = faceid_embeds.size(0)
364
+
365
+ if prompt is None:
366
+ prompt = "best quality, high quality"
367
+ if negative_prompt is None:
368
+ negative_prompt = "monochrome, lowres, bad anatomy, worst quality, low quality"
369
+
370
+ if not isinstance(prompt, List):
371
+ prompt = [prompt] * num_prompts
372
+ if not isinstance(negative_prompt, List):
373
+ negative_prompt = [negative_prompt] * num_prompts
374
+
375
+ image_prompt_embeds, uncond_image_prompt_embeds = self.get_image_embeds(faceid_embeds, face_image, s_scale, shortcut)
376
+
377
+ bs_embed, seq_len, _ = image_prompt_embeds.shape
378
+ image_prompt_embeds = image_prompt_embeds.repeat(1, num_samples, 1)
379
+ image_prompt_embeds = image_prompt_embeds.view(bs_embed * num_samples, seq_len, -1)
380
+ uncond_image_prompt_embeds = uncond_image_prompt_embeds.repeat(1, num_samples, 1)
381
+ uncond_image_prompt_embeds = uncond_image_prompt_embeds.view(bs_embed * num_samples, seq_len, -1)
382
+
383
+ with torch.inference_mode():
384
+ prompt_embeds_, negative_prompt_embeds_ = self.pipe.encode_prompt(
385
+ prompt,
386
+ device=self.device,
387
+ num_images_per_prompt=num_samples,
388
+ do_classifier_free_guidance=True,
389
+ negative_prompt=negative_prompt,
390
+ )
391
+ prompt_embeds = torch.cat([prompt_embeds_, image_prompt_embeds], dim=1)
392
+ negative_prompt_embeds = torch.cat([negative_prompt_embeds_, uncond_image_prompt_embeds], dim=1)
393
+
394
+ generator = get_generator(seed, self.device)
395
+
396
+ images = self.pipe(
397
+ prompt_embeds=prompt_embeds,
398
+ negative_prompt_embeds=negative_prompt_embeds,
399
+ guidance_scale=guidance_scale,
400
+ num_inference_steps=num_inference_steps,
401
+ generator=generator,
402
+ **kwargs,
403
+ ).images
404
+
405
+ return images
406
+
407
+
408
+ class IPAdapterFaceIDXL(IPAdapterFaceID):
409
+ """SDXL"""
410
+
411
+ def generate(
412
+ self,
413
+ faceid_embeds=None,
414
+ prompt=None,
415
+ negative_prompt=None,
416
+ scale=1.0,
417
+ num_samples=4,
418
+ seed=None,
419
+ num_inference_steps=30,
420
+ **kwargs,
421
+ ):
422
+ self.set_scale(scale)
423
+
424
+ num_prompts = faceid_embeds.size(0)
425
+
426
+ if prompt is None:
427
+ prompt = "best quality, high quality"
428
+ if negative_prompt is None:
429
+ negative_prompt = "monochrome, lowres, bad anatomy, worst quality, low quality"
430
+
431
+ if not isinstance(prompt, List):
432
+ prompt = [prompt] * num_prompts
433
+ if not isinstance(negative_prompt, List):
434
+ negative_prompt = [negative_prompt] * num_prompts
435
+
436
+ image_prompt_embeds, uncond_image_prompt_embeds = self.get_image_embeds(faceid_embeds)
437
+
438
+ bs_embed, seq_len, _ = image_prompt_embeds.shape
439
+ image_prompt_embeds = image_prompt_embeds.repeat(1, num_samples, 1)
440
+ image_prompt_embeds = image_prompt_embeds.view(bs_embed * num_samples, seq_len, -1)
441
+ uncond_image_prompt_embeds = uncond_image_prompt_embeds.repeat(1, num_samples, 1)
442
+ uncond_image_prompt_embeds = uncond_image_prompt_embeds.view(bs_embed * num_samples, seq_len, -1)
443
+
444
+ with torch.inference_mode():
445
+ (
446
+ prompt_embeds,
447
+ negative_prompt_embeds,
448
+ pooled_prompt_embeds,
449
+ negative_pooled_prompt_embeds,
450
+ ) = self.pipe.encode_prompt(
451
+ prompt,
452
+ num_images_per_prompt=num_samples,
453
+ do_classifier_free_guidance=True,
454
+ negative_prompt=negative_prompt,
455
+ )
456
+ prompt_embeds = torch.cat([prompt_embeds, image_prompt_embeds], dim=1)
457
+ negative_prompt_embeds = torch.cat([negative_prompt_embeds, uncond_image_prompt_embeds], dim=1)
458
+
459
+ generator = get_generator(seed, self.device)
460
+
461
+ images = self.pipe(
462
+ prompt_embeds=prompt_embeds,
463
+ negative_prompt_embeds=negative_prompt_embeds,
464
+ pooled_prompt_embeds=pooled_prompt_embeds,
465
+ negative_pooled_prompt_embeds=negative_pooled_prompt_embeds,
466
+ num_inference_steps=num_inference_steps,
467
+ generator=generator,
468
+ **kwargs,
469
+ ).images
470
+
471
+ return images
472
+
473
+
474
+ class IPAdapterFaceIDPlusXL(IPAdapterFaceIDPlus):
475
+ """SDXL"""
476
+
477
+ def generate(
478
+ self,
479
+ face_image=None,
480
+ faceid_embeds=None,
481
+ prompt=None,
482
+ negative_prompt=None,
483
+ scale=1.0,
484
+ num_samples=4,
485
+ seed=None,
486
+ guidance_scale=7.5,
487
+ num_inference_steps=30,
488
+ s_scale=1.0,
489
+ shortcut=True,
490
+ **kwargs,
491
+ ):
492
+ self.set_scale(scale)
493
+
494
+ num_prompts = faceid_embeds.size(0)
495
+
496
+ if prompt is None:
497
+ prompt = "best quality, high quality"
498
+ if negative_prompt is None:
499
+ negative_prompt = "monochrome, lowres, bad anatomy, worst quality, low quality"
500
+
501
+ if not isinstance(prompt, List):
502
+ prompt = [prompt] * num_prompts
503
+ if not isinstance(negative_prompt, List):
504
+ negative_prompt = [negative_prompt] * num_prompts
505
+
506
+ image_prompt_embeds, uncond_image_prompt_embeds = self.get_image_embeds(faceid_embeds, face_image, s_scale, shortcut)
507
+
508
+ bs_embed, seq_len, _ = image_prompt_embeds.shape
509
+ image_prompt_embeds = image_prompt_embeds.repeat(1, num_samples, 1)
510
+ image_prompt_embeds = image_prompt_embeds.view(bs_embed * num_samples, seq_len, -1)
511
+ uncond_image_prompt_embeds = uncond_image_prompt_embeds.repeat(1, num_samples, 1)
512
+ uncond_image_prompt_embeds = uncond_image_prompt_embeds.view(bs_embed * num_samples, seq_len, -1)
513
+
514
+ with torch.inference_mode():
515
+ (
516
+ prompt_embeds,
517
+ negative_prompt_embeds,
518
+ pooled_prompt_embeds,
519
+ negative_pooled_prompt_embeds,
520
+ ) = self.pipe.encode_prompt(
521
+ prompt,
522
+ num_images_per_prompt=num_samples,
523
+ do_classifier_free_guidance=True,
524
+ negative_prompt=negative_prompt,
525
+ )
526
+ prompt_embeds = torch.cat([prompt_embeds, image_prompt_embeds], dim=1)
527
+ negative_prompt_embeds = torch.cat([negative_prompt_embeds, uncond_image_prompt_embeds], dim=1)
528
+
529
+ generator = get_generator(seed, self.device)
530
+
531
+ images = self.pipe(
532
+ prompt_embeds=prompt_embeds,
533
+ negative_prompt_embeds=negative_prompt_embeds,
534
+ pooled_prompt_embeds=pooled_prompt_embeds,
535
+ negative_pooled_prompt_embeds=negative_pooled_prompt_embeds,
536
+ num_inference_steps=num_inference_steps,
537
+ generator=generator,
538
+ guidance_scale=guidance_scale,
539
+ **kwargs,
540
+ ).images
541
+
542
+ return images
ip_adapter/ip_adapter_faceid_separate.py ADDED
@@ -0,0 +1,556 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from typing import List
3
+
4
+ import torch
5
+ from diffusers import StableDiffusionPipeline
6
+ from diffusers.pipelines.controlnet import MultiControlNetModel
7
+ from PIL import Image
8
+ from safetensors import safe_open
9
+ from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
10
+
11
+ from .utils import is_torch2_available, get_generator
12
+
13
+ USE_DAFAULT_ATTN = False # should be True for visualization_attnmap
14
+ if is_torch2_available() and (not USE_DAFAULT_ATTN):
15
+ from .attention_processor import (
16
+ AttnProcessor2_0 as AttnProcessor,
17
+ )
18
+ from .attention_processor import (
19
+ IPAttnProcessor2_0 as IPAttnProcessor,
20
+ )
21
+ else:
22
+ from .attention_processor import AttnProcessor, IPAttnProcessor
23
+ from .resampler import PerceiverAttention, FeedForward
24
+
25
+
26
+ class FacePerceiverResampler(torch.nn.Module):
27
+ def __init__(
28
+ self,
29
+ *,
30
+ dim=768,
31
+ depth=4,
32
+ dim_head=64,
33
+ heads=16,
34
+ embedding_dim=1280,
35
+ output_dim=768,
36
+ ff_mult=4,
37
+ ):
38
+ super().__init__()
39
+
40
+ self.proj_in = torch.nn.Linear(embedding_dim, dim)
41
+ self.proj_out = torch.nn.Linear(dim, output_dim)
42
+ self.norm_out = torch.nn.LayerNorm(output_dim)
43
+ self.layers = torch.nn.ModuleList([])
44
+ for _ in range(depth):
45
+ self.layers.append(
46
+ torch.nn.ModuleList(
47
+ [
48
+ PerceiverAttention(dim=dim, dim_head=dim_head, heads=heads),
49
+ FeedForward(dim=dim, mult=ff_mult),
50
+ ]
51
+ )
52
+ )
53
+
54
+ def forward(self, latents, x):
55
+ x = self.proj_in(x)
56
+ for attn, ff in self.layers:
57
+ latents = attn(x, latents) + latents
58
+ latents = ff(latents) + latents
59
+ latents = self.proj_out(latents)
60
+ return self.norm_out(latents)
61
+
62
+
63
+ class MLPProjModel(torch.nn.Module):
64
+ def __init__(self, cross_attention_dim=768, id_embeddings_dim=512, num_tokens=4):
65
+ super().__init__()
66
+
67
+ self.cross_attention_dim = cross_attention_dim
68
+ self.num_tokens = num_tokens
69
+
70
+ self.proj = torch.nn.Sequential(
71
+ torch.nn.Linear(id_embeddings_dim, id_embeddings_dim*2),
72
+ torch.nn.GELU(),
73
+ torch.nn.Linear(id_embeddings_dim*2, cross_attention_dim*num_tokens),
74
+ )
75
+ self.norm = torch.nn.LayerNorm(cross_attention_dim)
76
+
77
+ def forward(self, id_embeds):
78
+ x = self.proj(id_embeds)
79
+ x = x.reshape(-1, self.num_tokens, self.cross_attention_dim)
80
+ x = self.norm(x)
81
+ return x
82
+
83
+
84
+ class ProjPlusModel(torch.nn.Module):
85
+ def __init__(self, cross_attention_dim=768, id_embeddings_dim=512, clip_embeddings_dim=1280, num_tokens=4):
86
+ super().__init__()
87
+
88
+ self.cross_attention_dim = cross_attention_dim
89
+ self.num_tokens = num_tokens
90
+
91
+ self.proj = torch.nn.Sequential(
92
+ torch.nn.Linear(id_embeddings_dim, id_embeddings_dim*2),
93
+ torch.nn.GELU(),
94
+ torch.nn.Linear(id_embeddings_dim*2, cross_attention_dim*num_tokens),
95
+ )
96
+ self.norm = torch.nn.LayerNorm(cross_attention_dim)
97
+
98
+ self.perceiver_resampler = FacePerceiverResampler(
99
+ dim=cross_attention_dim,
100
+ depth=4,
101
+ dim_head=64,
102
+ heads=cross_attention_dim // 64,
103
+ embedding_dim=clip_embeddings_dim,
104
+ output_dim=cross_attention_dim,
105
+ ff_mult=4,
106
+ )
107
+
108
+ def forward(self, id_embeds, clip_embeds, shortcut=False, scale=1.0):
109
+
110
+ x = self.proj(id_embeds)
111
+ x = x.reshape(-1, self.num_tokens, self.cross_attention_dim)
112
+ x = self.norm(x)
113
+ out = self.perceiver_resampler(x, clip_embeds)
114
+ if shortcut:
115
+ out = x + scale * out
116
+ return out
117
+
118
+
119
+ class IPAdapterFaceID:
120
+ def __init__(self, sd_pipe, ip_ckpt, device, num_tokens=4, n_cond=1, torch_dtype=torch.float16):
121
+ self.device = device
122
+ self.ip_ckpt = ip_ckpt
123
+ self.num_tokens = num_tokens
124
+ self.n_cond = n_cond
125
+ self.torch_dtype = torch_dtype
126
+
127
+ self.pipe = sd_pipe.to(self.device)
128
+ self.set_ip_adapter()
129
+
130
+ # image proj model
131
+ self.image_proj_model = self.init_proj()
132
+
133
+ self.load_ip_adapter()
134
+
135
+ def init_proj(self):
136
+ image_proj_model = MLPProjModel(
137
+ cross_attention_dim=self.pipe.unet.config.cross_attention_dim,
138
+ id_embeddings_dim=512,
139
+ num_tokens=self.num_tokens,
140
+ ).to(self.device, dtype=self.torch_dtype)
141
+ return image_proj_model
142
+
143
+ def set_ip_adapter(self):
144
+ unet = self.pipe.unet
145
+ attn_procs = {}
146
+ for name in unet.attn_processors.keys():
147
+ cross_attention_dim = None if name.endswith("attn1.processor") else unet.config.cross_attention_dim
148
+ if name.startswith("mid_block"):
149
+ hidden_size = unet.config.block_out_channels[-1]
150
+ elif name.startswith("up_blocks"):
151
+ block_id = int(name[len("up_blocks.")])
152
+ hidden_size = list(reversed(unet.config.block_out_channels))[block_id]
153
+ elif name.startswith("down_blocks"):
154
+ block_id = int(name[len("down_blocks.")])
155
+ hidden_size = unet.config.block_out_channels[block_id]
156
+ if cross_attention_dim is None:
157
+ attn_procs[name] = AttnProcessor()
158
+ else:
159
+ attn_procs[name] = IPAttnProcessor(
160
+ hidden_size=hidden_size, cross_attention_dim=cross_attention_dim, scale=1.0, num_tokens=self.num_tokens*self.n_cond,
161
+ ).to(self.device, dtype=self.torch_dtype)
162
+ unet.set_attn_processor(attn_procs)
163
+
164
+ def load_ip_adapter(self):
165
+ if os.path.splitext(self.ip_ckpt)[-1] == ".safetensors":
166
+ state_dict = {"image_proj": {}, "ip_adapter": {}}
167
+ with safe_open(self.ip_ckpt, framework="pt", device="cpu") as f:
168
+ for key in f.keys():
169
+ if key.startswith("image_proj."):
170
+ state_dict["image_proj"][key.replace("image_proj.", "")] = f.get_tensor(key)
171
+ elif key.startswith("ip_adapter."):
172
+ state_dict["ip_adapter"][key.replace("ip_adapter.", "")] = f.get_tensor(key)
173
+ else:
174
+ state_dict = torch.load(self.ip_ckpt, map_location="cpu")
175
+ self.image_proj_model.load_state_dict(state_dict["image_proj"])
176
+ ip_layers = torch.nn.ModuleList(self.pipe.unet.attn_processors.values())
177
+ ip_layers.load_state_dict(state_dict["ip_adapter"], strict=False)
178
+
179
+ @torch.inference_mode()
180
+ def get_image_embeds(self, faceid_embeds):
181
+
182
+ multi_face = False
183
+ if faceid_embeds.dim() == 3:
184
+ multi_face = True
185
+ b, n, c = faceid_embeds.shape
186
+ faceid_embeds = faceid_embeds.reshape(b*n, c)
187
+
188
+ faceid_embeds = faceid_embeds.to(self.device, dtype=self.torch_dtype)
189
+ image_prompt_embeds = self.image_proj_model(faceid_embeds)
190
+ uncond_image_prompt_embeds = self.image_proj_model(torch.zeros_like(faceid_embeds))
191
+ if multi_face:
192
+ c = image_prompt_embeds.size(-1)
193
+ image_prompt_embeds = image_prompt_embeds.reshape(b, -1, c)
194
+ uncond_image_prompt_embeds = uncond_image_prompt_embeds.reshape(b, -1, c)
195
+
196
+ return image_prompt_embeds, uncond_image_prompt_embeds
197
+
198
+ def set_scale(self, scale):
199
+ for attn_processor in self.pipe.unet.attn_processors.values():
200
+ if isinstance(attn_processor, IPAttnProcessor):
201
+ attn_processor.scale = scale
202
+
203
+ def generate(
204
+ self,
205
+ faceid_embeds=None,
206
+ prompt=None,
207
+ negative_prompt=None,
208
+ scale=1.0,
209
+ num_samples=4,
210
+ seed=None,
211
+ guidance_scale=7.5,
212
+ num_inference_steps=30,
213
+ **kwargs,
214
+ ):
215
+ self.set_scale(scale)
216
+
217
+ num_prompts = faceid_embeds.size(0)
218
+
219
+ if prompt is None:
220
+ prompt = "best quality, high quality"
221
+ if negative_prompt is None:
222
+ negative_prompt = "monochrome, lowres, bad anatomy, worst quality, low quality"
223
+
224
+ if not isinstance(prompt, List):
225
+ prompt = [prompt] * num_prompts
226
+ else:
227
+ faceid_embeds = faceid_embeds.repeat(num_samples, 1, 1)
228
+ num_samples = 1
229
+
230
+ if not isinstance(negative_prompt, List):
231
+ negative_prompt = [negative_prompt] * num_prompts
232
+
233
+ image_prompt_embeds, uncond_image_prompt_embeds = self.get_image_embeds(faceid_embeds)
234
+
235
+ bs_embed, seq_len, _ = image_prompt_embeds.shape
236
+ image_prompt_embeds = image_prompt_embeds.repeat(1, num_samples, 1)
237
+ image_prompt_embeds = image_prompt_embeds.view(bs_embed * num_samples, seq_len, -1)
238
+ uncond_image_prompt_embeds = uncond_image_prompt_embeds.repeat(1, num_samples, 1)
239
+ uncond_image_prompt_embeds = uncond_image_prompt_embeds.view(bs_embed * num_samples, seq_len, -1)
240
+
241
+ with torch.inference_mode():
242
+ prompt_embeds_, negative_prompt_embeds_ = self.pipe.encode_prompt(
243
+ prompt,
244
+ device=self.device,
245
+ num_images_per_prompt=num_samples,
246
+ do_classifier_free_guidance=True,
247
+ negative_prompt=negative_prompt,
248
+ )
249
+ prompt_embeds = torch.cat([prompt_embeds_, image_prompt_embeds], dim=1)
250
+ negative_prompt_embeds = torch.cat([negative_prompt_embeds_, uncond_image_prompt_embeds], dim=1)
251
+
252
+ generator = get_generator(seed, self.device)
253
+
254
+ images = self.pipe(
255
+ prompt_embeds=prompt_embeds,
256
+ negative_prompt_embeds=negative_prompt_embeds,
257
+ guidance_scale=guidance_scale,
258
+ num_inference_steps=num_inference_steps,
259
+ generator=generator,
260
+ num_images_per_prompt=num_samples,
261
+ **kwargs,
262
+ ).images
263
+
264
+ return images
265
+
266
+
267
+ class IPAdapterFaceIDPlus:
268
+ def __init__(self, sd_pipe, image_encoder_path, ip_ckpt, device, num_tokens=4, torch_dtype=torch.float16):
269
+ self.device = device
270
+ self.image_encoder_path = image_encoder_path
271
+ self.ip_ckpt = ip_ckpt
272
+ self.num_tokens = num_tokens
273
+ self.torch_dtype = torch_dtype
274
+
275
+ self.pipe = sd_pipe.to(self.device)
276
+ self.set_ip_adapter()
277
+
278
+ # load image encoder
279
+ self.image_encoder = CLIPVisionModelWithProjection.from_pretrained(self.image_encoder_path).to(
280
+ self.device, dtype=self.torch_dtype
281
+ )
282
+ self.clip_image_processor = CLIPImageProcessor()
283
+ # image proj model
284
+ self.image_proj_model = self.init_proj()
285
+
286
+ self.load_ip_adapter()
287
+
288
+ def init_proj(self):
289
+ image_proj_model = ProjPlusModel(
290
+ cross_attention_dim=self.pipe.unet.config.cross_attention_dim,
291
+ id_embeddings_dim=512,
292
+ clip_embeddings_dim=self.image_encoder.config.hidden_size,
293
+ num_tokens=self.num_tokens,
294
+ ).to(self.device, dtype=self.torch_dtype)
295
+ return image_proj_model
296
+
297
+ def set_ip_adapter(self):
298
+ unet = self.pipe.unet
299
+ attn_procs = {}
300
+ for name in unet.attn_processors.keys():
301
+ cross_attention_dim = None if name.endswith("attn1.processor") else unet.config.cross_attention_dim
302
+ if name.startswith("mid_block"):
303
+ hidden_size = unet.config.block_out_channels[-1]
304
+ elif name.startswith("up_blocks"):
305
+ block_id = int(name[len("up_blocks.")])
306
+ hidden_size = list(reversed(unet.config.block_out_channels))[block_id]
307
+ elif name.startswith("down_blocks"):
308
+ block_id = int(name[len("down_blocks.")])
309
+ hidden_size = unet.config.block_out_channels[block_id]
310
+ if cross_attention_dim is None:
311
+ attn_procs[name] = AttnProcessor()
312
+ else:
313
+ attn_procs[name] = IPAttnProcessor(
314
+ hidden_size=hidden_size, cross_attention_dim=cross_attention_dim, scale=1.0, num_tokens=self.num_tokens,
315
+ ).to(self.device, dtype=self.torch_dtype)
316
+ unet.set_attn_processor(attn_procs)
317
+
318
+ def load_ip_adapter(self):
319
+ if os.path.splitext(self.ip_ckpt)[-1] == ".safetensors":
320
+ state_dict = {"image_proj": {}, "ip_adapter": {}}
321
+ with safe_open(self.ip_ckpt, framework="pt", device="cpu") as f:
322
+ for key in f.keys():
323
+ if key.startswith("image_proj."):
324
+ state_dict["image_proj"][key.replace("image_proj.", "")] = f.get_tensor(key)
325
+ elif key.startswith("ip_adapter."):
326
+ state_dict["ip_adapter"][key.replace("ip_adapter.", "")] = f.get_tensor(key)
327
+ else:
328
+ state_dict = torch.load(self.ip_ckpt, map_location="cpu")
329
+ self.image_proj_model.load_state_dict(state_dict["image_proj"])
330
+ ip_layers = torch.nn.ModuleList(self.pipe.unet.attn_processors.values())
331
+ ip_layers.load_state_dict(state_dict["ip_adapter"], strict=False)
332
+
333
+ @torch.inference_mode()
334
+ def get_image_embeds(self, faceid_embeds, face_image, s_scale, shortcut):
335
+ if isinstance(face_image, Image.Image):
336
+ pil_image = [face_image]
337
+ clip_image = self.clip_image_processor(images=face_image, return_tensors="pt").pixel_values
338
+ clip_image = clip_image.to(self.device, dtype=self.torch_dtype)
339
+ clip_image_embeds = self.image_encoder(clip_image, output_hidden_states=True).hidden_states[-2]
340
+ uncond_clip_image_embeds = self.image_encoder(
341
+ torch.zeros_like(clip_image), output_hidden_states=True
342
+ ).hidden_states[-2]
343
+
344
+ faceid_embeds = faceid_embeds.to(self.device, dtype=self.torch_dtype)
345
+ image_prompt_embeds = self.image_proj_model(faceid_embeds, clip_image_embeds, shortcut=shortcut, scale=s_scale)
346
+ uncond_image_prompt_embeds = self.image_proj_model(torch.zeros_like(faceid_embeds), uncond_clip_image_embeds, shortcut=shortcut, scale=s_scale)
347
+ return image_prompt_embeds, uncond_image_prompt_embeds
348
+
349
+ def set_scale(self, scale):
350
+ for attn_processor in self.pipe.unet.attn_processors.values():
351
+ if isinstance(attn_processor, LoRAIPAttnProcessor):
352
+ attn_processor.scale = scale
353
+
354
+ def generate(
355
+ self,
356
+ face_image=None,
357
+ faceid_embeds=None,
358
+ prompt=None,
359
+ negative_prompt=None,
360
+ scale=1.0,
361
+ num_samples=4,
362
+ seed=None,
363
+ guidance_scale=7.5,
364
+ num_inference_steps=30,
365
+ s_scale=1.0,
366
+ shortcut=False,
367
+ **kwargs,
368
+ ):
369
+ self.set_scale(scale)
370
+
371
+
372
+ num_prompts = faceid_embeds.size(0)
373
+
374
+ if prompt is None:
375
+ prompt = "best quality, high quality"
376
+ if negative_prompt is None:
377
+ negative_prompt = "monochrome, lowres, bad anatomy, worst quality, low quality"
378
+
379
+ if not isinstance(prompt, List):
380
+ prompt = [prompt] * num_prompts
381
+ if not isinstance(negative_prompt, List):
382
+ negative_prompt = [negative_prompt] * num_prompts
383
+
384
+ image_prompt_embeds, uncond_image_prompt_embeds = self.get_image_embeds(faceid_embeds, face_image, s_scale, shortcut)
385
+
386
+ bs_embed, seq_len, _ = image_prompt_embeds.shape
387
+ image_prompt_embeds = image_prompt_embeds.repeat(1, num_samples, 1)
388
+ image_prompt_embeds = image_prompt_embeds.view(bs_embed * num_samples, seq_len, -1)
389
+ uncond_image_prompt_embeds = uncond_image_prompt_embeds.repeat(1, num_samples, 1)
390
+ uncond_image_prompt_embeds = uncond_image_prompt_embeds.view(bs_embed * num_samples, seq_len, -1)
391
+
392
+ with torch.inference_mode():
393
+ prompt_embeds_, negative_prompt_embeds_ = self.pipe.encode_prompt(
394
+ prompt,
395
+ device=self.device,
396
+ num_images_per_prompt=num_samples,
397
+ do_classifier_free_guidance=True,
398
+ negative_prompt=negative_prompt,
399
+ )
400
+ prompt_embeds = torch.cat([prompt_embeds_, image_prompt_embeds], dim=1)
401
+ negative_prompt_embeds = torch.cat([negative_prompt_embeds_, uncond_image_prompt_embeds], dim=1)
402
+
403
+ generator = get_generator(seed, self.device)
404
+
405
+ images = self.pipe(
406
+ prompt_embeds=prompt_embeds,
407
+ negative_prompt_embeds=negative_prompt_embeds,
408
+ guidance_scale=guidance_scale,
409
+ num_inference_steps=num_inference_steps,
410
+ generator=generator,
411
+ **kwargs,
412
+ ).images
413
+
414
+ return images
415
+
416
+
417
+ class IPAdapterFaceIDXL(IPAdapterFaceID):
418
+ """SDXL"""
419
+
420
+ def generate(
421
+ self,
422
+ faceid_embeds=None,
423
+ prompt=None,
424
+ negative_prompt=None,
425
+ scale=1.0,
426
+ num_samples=4,
427
+ seed=None,
428
+ num_inference_steps=30,
429
+ **kwargs,
430
+ ):
431
+ self.set_scale(scale)
432
+
433
+ num_prompts = faceid_embeds.size(0)
434
+
435
+ if prompt is None:
436
+ prompt = "best quality, high quality"
437
+ if negative_prompt is None:
438
+ negative_prompt = "monochrome, lowres, bad anatomy, worst quality, low quality"
439
+
440
+ if not isinstance(prompt, List):
441
+ prompt = [prompt] * num_prompts
442
+ else:
443
+ faceid_embeds = faceid_embeds.repeat(num_samples, 1, 1)
444
+ num_samples = 1
445
+
446
+ if not isinstance(negative_prompt, List):
447
+ negative_prompt = [negative_prompt] * num_prompts
448
+
449
+ image_prompt_embeds, uncond_image_prompt_embeds = self.get_image_embeds(faceid_embeds)
450
+
451
+ bs_embed, seq_len, _ = image_prompt_embeds.shape
452
+ image_prompt_embeds = image_prompt_embeds.repeat(1, num_samples, 1)
453
+ image_prompt_embeds = image_prompt_embeds.view(bs_embed * num_samples, seq_len, -1)
454
+ uncond_image_prompt_embeds = uncond_image_prompt_embeds.repeat(1, num_samples, 1)
455
+ uncond_image_prompt_embeds = uncond_image_prompt_embeds.view(bs_embed * num_samples, seq_len, -1)
456
+
457
+ with torch.inference_mode():
458
+ (
459
+ prompt_embeds,
460
+ negative_prompt_embeds,
461
+ pooled_prompt_embeds,
462
+ negative_pooled_prompt_embeds,
463
+ ) = self.pipe.encode_prompt(
464
+ prompt,
465
+ num_images_per_prompt=num_samples,
466
+ do_classifier_free_guidance=True,
467
+ negative_prompt=negative_prompt,
468
+ )
469
+ prompt_embeds = torch.cat([prompt_embeds, image_prompt_embeds], dim=1)
470
+ negative_prompt_embeds = torch.cat([negative_prompt_embeds, uncond_image_prompt_embeds], dim=1)
471
+
472
+ generator = get_generator(seed, self.device)
473
+
474
+ images = self.pipe(
475
+ prompt_embeds=prompt_embeds,
476
+ negative_prompt_embeds=negative_prompt_embeds,
477
+ pooled_prompt_embeds=pooled_prompt_embeds,
478
+ negative_pooled_prompt_embeds=negative_pooled_prompt_embeds,
479
+ num_inference_steps=num_inference_steps,
480
+ generator=generator,
481
+ num_images_per_prompt=num_samples,
482
+ **kwargs,
483
+ ).images
484
+
485
+ return images
486
+
487
+
488
+ class IPAdapterFaceIDPlusXL(IPAdapterFaceIDPlus):
489
+ """SDXL"""
490
+
491
+ def generate(
492
+ self,
493
+ face_image=None,
494
+ faceid_embeds=None,
495
+ prompt=None,
496
+ negative_prompt=None,
497
+ scale=1.0,
498
+ num_samples=4,
499
+ seed=None,
500
+ guidance_scale=7.5,
501
+ num_inference_steps=30,
502
+ s_scale=1.0,
503
+ shortcut=True,
504
+ **kwargs,
505
+ ):
506
+ self.set_scale(scale)
507
+
508
+ num_prompts = faceid_embeds.size(0)
509
+
510
+ if prompt is None:
511
+ prompt = "best quality, high quality"
512
+ if negative_prompt is None:
513
+ negative_prompt = "monochrome, lowres, bad anatomy, worst quality, low quality"
514
+
515
+ if not isinstance(prompt, List):
516
+ prompt = [prompt] * num_prompts
517
+ if not isinstance(negative_prompt, List):
518
+ negative_prompt = [negative_prompt] * num_prompts
519
+
520
+ image_prompt_embeds, uncond_image_prompt_embeds = self.get_image_embeds(faceid_embeds, face_image, s_scale, shortcut)
521
+
522
+ bs_embed, seq_len, _ = image_prompt_embeds.shape
523
+ image_prompt_embeds = image_prompt_embeds.repeat(1, num_samples, 1)
524
+ image_prompt_embeds = image_prompt_embeds.view(bs_embed * num_samples, seq_len, -1)
525
+ uncond_image_prompt_embeds = uncond_image_prompt_embeds.repeat(1, num_samples, 1)
526
+ uncond_image_prompt_embeds = uncond_image_prompt_embeds.view(bs_embed * num_samples, seq_len, -1)
527
+
528
+ with torch.inference_mode():
529
+ (
530
+ prompt_embeds,
531
+ negative_prompt_embeds,
532
+ pooled_prompt_embeds,
533
+ negative_pooled_prompt_embeds,
534
+ ) = self.pipe.encode_prompt(
535
+ prompt,
536
+ num_images_per_prompt=num_samples,
537
+ do_classifier_free_guidance=True,
538
+ negative_prompt=negative_prompt,
539
+ )
540
+ prompt_embeds = torch.cat([prompt_embeds, image_prompt_embeds], dim=1)
541
+ negative_prompt_embeds = torch.cat([negative_prompt_embeds, uncond_image_prompt_embeds], dim=1)
542
+
543
+ generator = get_generator(seed, self.device)
544
+
545
+ images = self.pipe(
546
+ prompt_embeds=prompt_embeds,
547
+ negative_prompt_embeds=negative_prompt_embeds,
548
+ pooled_prompt_embeds=pooled_prompt_embeds,
549
+ negative_pooled_prompt_embeds=negative_pooled_prompt_embeds,
550
+ num_inference_steps=num_inference_steps,
551
+ generator=generator,
552
+ guidance_scale=guidance_scale,
553
+ **kwargs,
554
+ ).images
555
+
556
+ return images
ip_adapter/resampler.py ADDED
@@ -0,0 +1,158 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # modified from https://github.com/mlfoundations/open_flamingo/blob/main/open_flamingo/src/helpers.py
2
+ # and https://github.com/lucidrains/imagen-pytorch/blob/main/imagen_pytorch/imagen_pytorch.py
3
+
4
+ import math
5
+
6
+ import torch
7
+ import torch.nn as nn
8
+ from einops import rearrange
9
+ from einops.layers.torch import Rearrange
10
+
11
+
12
+ # FFN
13
+ def FeedForward(dim, mult=4):
14
+ inner_dim = int(dim * mult)
15
+ return nn.Sequential(
16
+ nn.LayerNorm(dim),
17
+ nn.Linear(dim, inner_dim, bias=False),
18
+ nn.GELU(),
19
+ nn.Linear(inner_dim, dim, bias=False),
20
+ )
21
+
22
+
23
+ def reshape_tensor(x, heads):
24
+ bs, length, width = x.shape
25
+ # (bs, length, width) --> (bs, length, n_heads, dim_per_head)
26
+ x = x.view(bs, length, heads, -1)
27
+ # (bs, length, n_heads, dim_per_head) --> (bs, n_heads, length, dim_per_head)
28
+ x = x.transpose(1, 2)
29
+ # (bs, n_heads, length, dim_per_head) --> (bs*n_heads, length, dim_per_head)
30
+ x = x.reshape(bs, heads, length, -1)
31
+ return x
32
+
33
+
34
+ class PerceiverAttention(nn.Module):
35
+ def __init__(self, *, dim, dim_head=64, heads=8):
36
+ super().__init__()
37
+ self.scale = dim_head**-0.5
38
+ self.dim_head = dim_head
39
+ self.heads = heads
40
+ inner_dim = dim_head * heads
41
+
42
+ self.norm1 = nn.LayerNorm(dim)
43
+ self.norm2 = nn.LayerNorm(dim)
44
+
45
+ self.to_q = nn.Linear(dim, inner_dim, bias=False)
46
+ self.to_kv = nn.Linear(dim, inner_dim * 2, bias=False)
47
+ self.to_out = nn.Linear(inner_dim, dim, bias=False)
48
+
49
+ def forward(self, x, latents):
50
+ """
51
+ Args:
52
+ x (torch.Tensor): image features
53
+ shape (b, n1, D)
54
+ latent (torch.Tensor): latent features
55
+ shape (b, n2, D)
56
+ """
57
+ x = self.norm1(x)
58
+ latents = self.norm2(latents)
59
+
60
+ b, l, _ = latents.shape
61
+
62
+ q = self.to_q(latents)
63
+ kv_input = torch.cat((x, latents), dim=-2)
64
+ k, v = self.to_kv(kv_input).chunk(2, dim=-1)
65
+
66
+ q = reshape_tensor(q, self.heads)
67
+ k = reshape_tensor(k, self.heads)
68
+ v = reshape_tensor(v, self.heads)
69
+
70
+ # attention
71
+ scale = 1 / math.sqrt(math.sqrt(self.dim_head))
72
+ weight = (q * scale) @ (k * scale).transpose(-2, -1) # More stable with f16 than dividing afterwards
73
+ weight = torch.softmax(weight.float(), dim=-1).type(weight.dtype)
74
+ out = weight @ v
75
+
76
+ out = out.permute(0, 2, 1, 3).reshape(b, l, -1)
77
+
78
+ return self.to_out(out)
79
+
80
+
81
+ class Resampler(nn.Module):
82
+ def __init__(
83
+ self,
84
+ dim=1024,
85
+ depth=8,
86
+ dim_head=64,
87
+ heads=16,
88
+ num_queries=8,
89
+ embedding_dim=768,
90
+ output_dim=1024,
91
+ ff_mult=4,
92
+ max_seq_len: int = 257, # CLIP tokens + CLS token
93
+ apply_pos_emb: bool = False,
94
+ num_latents_mean_pooled: int = 0, # number of latents derived from mean pooled representation of the sequence
95
+ ):
96
+ super().__init__()
97
+ self.pos_emb = nn.Embedding(max_seq_len, embedding_dim) if apply_pos_emb else None
98
+
99
+ self.latents = nn.Parameter(torch.randn(1, num_queries, dim) / dim**0.5)
100
+
101
+ self.proj_in = nn.Linear(embedding_dim, dim)
102
+
103
+ self.proj_out = nn.Linear(dim, output_dim)
104
+ self.norm_out = nn.LayerNorm(output_dim)
105
+
106
+ self.to_latents_from_mean_pooled_seq = (
107
+ nn.Sequential(
108
+ nn.LayerNorm(dim),
109
+ nn.Linear(dim, dim * num_latents_mean_pooled),
110
+ Rearrange("b (n d) -> b n d", n=num_latents_mean_pooled),
111
+ )
112
+ if num_latents_mean_pooled > 0
113
+ else None
114
+ )
115
+
116
+ self.layers = nn.ModuleList([])
117
+ for _ in range(depth):
118
+ self.layers.append(
119
+ nn.ModuleList(
120
+ [
121
+ PerceiverAttention(dim=dim, dim_head=dim_head, heads=heads),
122
+ FeedForward(dim=dim, mult=ff_mult),
123
+ ]
124
+ )
125
+ )
126
+
127
+ def forward(self, x):
128
+ if self.pos_emb is not None:
129
+ n, device = x.shape[1], x.device
130
+ pos_emb = self.pos_emb(torch.arange(n, device=device))
131
+ x = x + pos_emb
132
+
133
+ latents = self.latents.repeat(x.size(0), 1, 1)
134
+
135
+ x = self.proj_in(x)
136
+
137
+ if self.to_latents_from_mean_pooled_seq:
138
+ meanpooled_seq = masked_mean(x, dim=1, mask=torch.ones(x.shape[:2], device=x.device, dtype=torch.bool))
139
+ meanpooled_latents = self.to_latents_from_mean_pooled_seq(meanpooled_seq)
140
+ latents = torch.cat((meanpooled_latents, latents), dim=-2)
141
+
142
+ for attn, ff in self.layers:
143
+ latents = attn(x, latents) + latents
144
+ latents = ff(latents) + latents
145
+
146
+ latents = self.proj_out(latents)
147
+ return self.norm_out(latents)
148
+
149
+
150
+ def masked_mean(t, *, dim, mask=None):
151
+ if mask is None:
152
+ return t.mean(dim=dim)
153
+
154
+ denom = mask.sum(dim=dim, keepdim=True)
155
+ mask = rearrange(mask, "b n -> b n 1")
156
+ masked_t = t.masked_fill(~mask, 0.0)
157
+
158
+ return masked_t.sum(dim=dim) / denom.clamp(min=1e-5)
ip_adapter/sd3_attention_processor.py ADDED
@@ -0,0 +1,179 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Callable, List, Optional, Union
2
+
3
+ import torch
4
+ import torch.nn.functional as F
5
+ from torch import nn
6
+ from diffusers.models.attention_processor import Attention
7
+
8
+
9
+ class JointAttnProcessor2_0:
10
+ """Attention processor used typically in processing the SD3-like self-attention projections."""
11
+
12
+ def __init__(self):
13
+ if not hasattr(F, "scaled_dot_product_attention"):
14
+ raise ImportError("AttnProcessor2_0 requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0.")
15
+
16
+ def __call__(
17
+ self,
18
+ attn: Attention,
19
+ hidden_states: torch.FloatTensor,
20
+ encoder_hidden_states: torch.FloatTensor = None,
21
+ attention_mask: Optional[torch.FloatTensor] = None,
22
+ *args,
23
+ **kwargs,
24
+ ) -> torch.FloatTensor:
25
+ residual = hidden_states
26
+
27
+ input_ndim = hidden_states.ndim
28
+ if input_ndim == 4:
29
+ batch_size, channel, height, width = hidden_states.shape
30
+ hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2)
31
+ context_input_ndim = encoder_hidden_states.ndim
32
+ if context_input_ndim == 4:
33
+ batch_size, channel, height, width = encoder_hidden_states.shape
34
+ encoder_hidden_states = encoder_hidden_states.view(batch_size, channel, height * width).transpose(1, 2)
35
+
36
+ batch_size = encoder_hidden_states.shape[0]
37
+
38
+ # `sample` projections.
39
+ query = attn.to_q(hidden_states)
40
+ key = attn.to_k(hidden_states)
41
+ value = attn.to_v(hidden_states)
42
+
43
+ # `context` projections.
44
+ encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states)
45
+ encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states)
46
+ encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states)
47
+
48
+ # attention
49
+ query = torch.cat([query, encoder_hidden_states_query_proj], dim=1)
50
+ key = torch.cat([key, encoder_hidden_states_key_proj], dim=1)
51
+ value = torch.cat([value, encoder_hidden_states_value_proj], dim=1)
52
+
53
+ inner_dim = key.shape[-1]
54
+ head_dim = inner_dim // attn.heads
55
+ query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
56
+ key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
57
+ value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
58
+
59
+ hidden_states = F.scaled_dot_product_attention(query, key, value, dropout_p=0.0, is_causal=False)
60
+ hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim)
61
+ hidden_states = hidden_states.to(query.dtype)
62
+
63
+ # Split the attention outputs.
64
+ hidden_states, encoder_hidden_states = (
65
+ hidden_states[:, : residual.shape[1]],
66
+ hidden_states[:, residual.shape[1] :],
67
+ )
68
+
69
+ # linear proj
70
+ hidden_states = attn.to_out[0](hidden_states)
71
+ # dropout
72
+ hidden_states = attn.to_out[1](hidden_states)
73
+ if not attn.context_pre_only:
74
+ encoder_hidden_states = attn.to_add_out(encoder_hidden_states)
75
+
76
+ if input_ndim == 4:
77
+ hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width)
78
+ if context_input_ndim == 4:
79
+ encoder_hidden_states = encoder_hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width)
80
+
81
+ return hidden_states, encoder_hidden_states
82
+
83
+
84
+ class IPJointAttnProcessor2_0(torch.nn.Module):
85
+ """Attention processor used typically in processing the SD3-like self-attention projections."""
86
+
87
+ def __init__(self, context_dim, hidden_dim, scale=1.0):
88
+ if not hasattr(F, "scaled_dot_product_attention"):
89
+ raise ImportError("AttnProcessor2_0 requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0.")
90
+ super().__init__()
91
+ self.scale = scale
92
+
93
+ self.add_k_proj_ip = nn.Linear(context_dim, hidden_dim)
94
+ self.add_v_proj_ip = nn.Linear(context_dim, hidden_dim)
95
+
96
+
97
+ def __call__(
98
+ self,
99
+ attn: Attention,
100
+ hidden_states: torch.FloatTensor,
101
+ encoder_hidden_states: torch.FloatTensor = None,
102
+ attention_mask: Optional[torch.FloatTensor] = None,
103
+ ip_hidden_states: torch.FloatTensor = None,
104
+ *args,
105
+ **kwargs,
106
+ ) -> torch.FloatTensor:
107
+ residual = hidden_states
108
+
109
+ input_ndim = hidden_states.ndim
110
+ if input_ndim == 4:
111
+ batch_size, channel, height, width = hidden_states.shape
112
+ hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2)
113
+ context_input_ndim = encoder_hidden_states.ndim
114
+ if context_input_ndim == 4:
115
+ batch_size, channel, height, width = encoder_hidden_states.shape
116
+ encoder_hidden_states = encoder_hidden_states.view(batch_size, channel, height * width).transpose(1, 2)
117
+
118
+ batch_size = encoder_hidden_states.shape[0]
119
+
120
+ # `sample` projections.
121
+ query = attn.to_q(hidden_states)
122
+ key = attn.to_k(hidden_states)
123
+ value = attn.to_v(hidden_states)
124
+
125
+ sample_query = query # latent query
126
+
127
+ # `context` projections.
128
+ encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states)
129
+ encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states)
130
+ encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states)
131
+
132
+ # attention
133
+ query = torch.cat([query, encoder_hidden_states_query_proj], dim=1)
134
+ key = torch.cat([key, encoder_hidden_states_key_proj], dim=1)
135
+ value = torch.cat([value, encoder_hidden_states_value_proj], dim=1)
136
+
137
+ inner_dim = key.shape[-1]
138
+ head_dim = inner_dim // attn.heads
139
+ query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
140
+ key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
141
+ value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
142
+
143
+ hidden_states = F.scaled_dot_product_attention(query, key, value, dropout_p=0.0, is_causal=False)
144
+ hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim)
145
+ hidden_states = hidden_states.to(query.dtype)
146
+
147
+ # Split the attention outputs.
148
+ hidden_states, encoder_hidden_states = (
149
+ hidden_states[:, : residual.shape[1]],
150
+ hidden_states[:, residual.shape[1] :],
151
+ )
152
+
153
+ # for ip-adapter
154
+ ip_key = self.add_k_proj_ip(ip_hidden_states)
155
+ ip_value = self.add_v_proj_ip(ip_hidden_states)
156
+ ip_query = sample_query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
157
+ ip_key = ip_key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
158
+ ip_value = ip_value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
159
+
160
+ ip_hidden_states = F.scaled_dot_product_attention(ip_query, ip_key, ip_value, dropout_p=0.0, is_causal=False)
161
+ ip_hidden_states = ip_hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim)
162
+ ip_hidden_states = ip_hidden_states.to(ip_query.dtype)
163
+
164
+ hidden_states = hidden_states + self.scale * ip_hidden_states
165
+
166
+ # linear proj
167
+ hidden_states = attn.to_out[0](hidden_states)
168
+ # dropout
169
+ hidden_states = attn.to_out[1](hidden_states)
170
+ if not attn.context_pre_only:
171
+ encoder_hidden_states = attn.to_add_out(encoder_hidden_states)
172
+
173
+ if input_ndim == 4:
174
+ hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width)
175
+ if context_input_ndim == 4:
176
+ encoder_hidden_states = encoder_hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width)
177
+
178
+ return hidden_states, encoder_hidden_states
179
+
ip_adapter/test_resampler.py ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from resampler import Resampler
3
+ from transformers import CLIPVisionModel
4
+
5
+ BATCH_SIZE = 2
6
+ OUTPUT_DIM = 1280
7
+ NUM_QUERIES = 8
8
+ NUM_LATENTS_MEAN_POOLED = 4 # 0 for no mean pooling (previous behavior)
9
+ APPLY_POS_EMB = True # False for no positional embeddings (previous behavior)
10
+ IMAGE_ENCODER_NAME_OR_PATH = "laion/CLIP-ViT-H-14-laion2B-s32B-b79K"
11
+
12
+
13
+ def main():
14
+ image_encoder = CLIPVisionModel.from_pretrained(IMAGE_ENCODER_NAME_OR_PATH)
15
+ embedding_dim = image_encoder.config.hidden_size
16
+ print(f"image_encoder hidden size: ", embedding_dim)
17
+
18
+ image_proj_model = Resampler(
19
+ dim=1024,
20
+ depth=2,
21
+ dim_head=64,
22
+ heads=16,
23
+ num_queries=NUM_QUERIES,
24
+ embedding_dim=embedding_dim,
25
+ output_dim=OUTPUT_DIM,
26
+ ff_mult=2,
27
+ max_seq_len=257,
28
+ apply_pos_emb=APPLY_POS_EMB,
29
+ num_latents_mean_pooled=NUM_LATENTS_MEAN_POOLED,
30
+ )
31
+
32
+ dummy_images = torch.randn(BATCH_SIZE, 3, 224, 224)
33
+ with torch.no_grad():
34
+ image_embeds = image_encoder(dummy_images, output_hidden_states=True).hidden_states[-2]
35
+ print("image_embds shape: ", image_embeds.shape)
36
+
37
+ with torch.no_grad():
38
+ ip_tokens = image_proj_model(image_embeds)
39
+ print("ip_tokens shape:", ip_tokens.shape)
40
+ assert ip_tokens.shape == (BATCH_SIZE, NUM_QUERIES + NUM_LATENTS_MEAN_POOLED, OUTPUT_DIM)
41
+
42
+
43
+ if __name__ == "__main__":
44
+ main()
ip_adapter/utils.py ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn.functional as F
3
+ import numpy as np
4
+ from PIL import Image
5
+
6
+ attn_maps = {}
7
+ def hook_fn(name):
8
+ def forward_hook(module, input, output):
9
+ if hasattr(module.processor, "attn_map"):
10
+ attn_maps[name] = module.processor.attn_map
11
+ del module.processor.attn_map
12
+
13
+ return forward_hook
14
+
15
+ def register_cross_attention_hook(unet):
16
+ for name, module in unet.named_modules():
17
+ if name.split('.')[-1].startswith('attn2'):
18
+ module.register_forward_hook(hook_fn(name))
19
+
20
+ return unet
21
+
22
+ def upscale(attn_map, target_size):
23
+ attn_map = torch.mean(attn_map, dim=0)
24
+ attn_map = attn_map.permute(1,0)
25
+ temp_size = None
26
+
27
+ for i in range(0,5):
28
+ scale = 2 ** i
29
+ if ( target_size[0] // scale ) * ( target_size[1] // scale) == attn_map.shape[1]*64:
30
+ temp_size = (target_size[0]//(scale*8), target_size[1]//(scale*8))
31
+ break
32
+
33
+ assert temp_size is not None, "temp_size cannot is None"
34
+
35
+ attn_map = attn_map.view(attn_map.shape[0], *temp_size)
36
+
37
+ attn_map = F.interpolate(
38
+ attn_map.unsqueeze(0).to(dtype=torch.float32),
39
+ size=target_size,
40
+ mode='bilinear',
41
+ align_corners=False
42
+ )[0]
43
+
44
+ attn_map = torch.softmax(attn_map, dim=0)
45
+ return attn_map
46
+ def get_net_attn_map(image_size, batch_size=2, instance_or_negative=False, detach=True):
47
+
48
+ idx = 0 if instance_or_negative else 1
49
+ net_attn_maps = []
50
+
51
+ for name, attn_map in attn_maps.items():
52
+ attn_map = attn_map.cpu() if detach else attn_map
53
+ attn_map = torch.chunk(attn_map, batch_size)[idx].squeeze()
54
+ attn_map = upscale(attn_map, image_size)
55
+ net_attn_maps.append(attn_map)
56
+
57
+ net_attn_maps = torch.mean(torch.stack(net_attn_maps,dim=0),dim=0)
58
+
59
+ return net_attn_maps
60
+
61
+ def attnmaps2images(net_attn_maps):
62
+
63
+ #total_attn_scores = 0
64
+ images = []
65
+
66
+ for attn_map in net_attn_maps:
67
+ attn_map = attn_map.cpu().numpy()
68
+ #total_attn_scores += attn_map.mean().item()
69
+
70
+ normalized_attn_map = (attn_map - np.min(attn_map)) / (np.max(attn_map) - np.min(attn_map)) * 255
71
+ normalized_attn_map = normalized_attn_map.astype(np.uint8)
72
+ #print("norm: ", normalized_attn_map.shape)
73
+ image = Image.fromarray(normalized_attn_map)
74
+
75
+ #image = fix_save_attn_map(attn_map)
76
+ images.append(image)
77
+
78
+ #print(total_attn_scores)
79
+ return images
80
+ def is_torch2_available():
81
+ return hasattr(F, "scaled_dot_product_attention")
82
+
83
+ def get_generator(seed, device):
84
+
85
+ if seed is not None:
86
+ if isinstance(seed, list):
87
+ generator = [torch.Generator(device).manual_seed(seed_item) for seed_item in seed]
88
+ else:
89
+ generator = torch.Generator(device).manual_seed(seed)
90
+ else:
91
+ generator = None
92
+
93
+ return generator
ipadapter_model.py ADDED
@@ -0,0 +1,314 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ IP-Adapter Model Interface
3
+
4
+ This module provides utilities for working with IP-Adapter models, including:
5
+ - Loading Stable Diffusion pipelines with IP-Adapter
6
+ - Extracting CLIP embeddings from images
7
+ - Generating images from CLIP embeddings
8
+ - Utility functions for image processing
9
+ """
10
+
11
+ from typing import List, Optional, Union, Tuple
12
+
13
+ import numpy as np
14
+ import torch
15
+ from PIL import Image
16
+ from diffusers import StableDiffusionPipeline, StableDiffusionXLPipeline, DDIMScheduler, AutoencoderKL
17
+
18
+ # Fix for torch 2.5.0 compatibility
19
+ torch.backends.cuda.enable_cudnn_sdp(False)
20
+
21
+ from ip_adapter import IPAdapterPlus, IPAdapterPlusXL
22
+
23
+
24
+ # ===== Image Utility Functions =====
25
+
26
+ def create_image_grid(images: List[Image.Image], rows: int, cols: int) -> Image.Image:
27
+ # Get dimensions from first image (assumes all images are same size)
28
+ width, height = images[0].size
29
+
30
+ # Create empty grid canvas
31
+ grid = Image.new('RGB', size=(cols * width, rows * height))
32
+
33
+ # Paste each image into the grid
34
+ for i, img in enumerate(images):
35
+ x_pos = (i % cols) * width
36
+ y_pos = (i // cols) * height
37
+ grid.paste(img, box=(x_pos, y_pos))
38
+
39
+ return grid
40
+
41
+
42
+ # ===== CLIP Embedding Extraction Functions =====
43
+
44
+ @torch.inference_mode()
45
+ def extract_clip_embeddings_from_pil(pil_image: Union[Image.Image, List[Image.Image]],
46
+ ip_model) -> torch.Tensor:
47
+ """
48
+ Returns:
49
+ torch.Tensor: CLIP embeddings of shape (batch_size, seq_len, embed_dim)
50
+ """
51
+ if isinstance(pil_image, Image.Image):
52
+ pil_image = [pil_image]
53
+
54
+ # Process images through CLIP processor
55
+ processed_images = ip_model.clip_image_processor(
56
+ images=pil_image, return_tensors="pt"
57
+ ).pixel_values
58
+
59
+ # Move to model device with appropriate dtype
60
+ processed_images = processed_images.to(ip_model.device, dtype=torch.float16)
61
+
62
+ # Extract embeddings from penultimate layer (better for downstream tasks)
63
+ clip_embeddings = ip_model.image_encoder(
64
+ processed_images, output_hidden_states=True
65
+ ).hidden_states[-2]
66
+
67
+ # Convert to float32 for better numerical stability
68
+ return clip_embeddings.float()
69
+
70
+
71
+ @torch.inference_mode()
72
+ def extract_clip_embeddings_from_pil_batch(pil_images: List[Image.Image],
73
+ ip_model) -> torch.Tensor:
74
+ """
75
+ Returns:
76
+ torch.Tensor: Concatenated CLIP embeddings of shape (batch, seq_len, embed_dim)
77
+ """
78
+ embeddings_batch = []
79
+
80
+ for image in pil_images:
81
+ embeddings = extract_clip_embeddings_from_pil(image, ip_model)
82
+ embeddings_batch.append(embeddings)
83
+
84
+ return torch.cat(embeddings_batch, dim=0)
85
+
86
+
87
+ @torch.inference_mode()
88
+ def extract_clip_embeddings_from_tensor(tensor_image: torch.Tensor,
89
+ ip_model,
90
+ resize: bool = True) -> torch.Tensor:
91
+ """
92
+ Returns:
93
+ torch.Tensor: CLIP embeddings of shape (batch_size, seq_len, embed_dim)
94
+ """
95
+ # Move tensor to model device with appropriate dtype
96
+ tensor_image = tensor_image.to(ip_model.device, dtype=torch.float16)
97
+
98
+ # Resize to CLIP input resolution if requested
99
+ if resize:
100
+ tensor_image = torch.nn.functional.interpolate(
101
+ tensor_image,
102
+ size=(224, 224),
103
+ mode="bilinear",
104
+ align_corners=False
105
+ )
106
+
107
+ # Extract embeddings with positional encoding interpolation
108
+ clip_embeddings = ip_model.image_encoder(
109
+ tensor_image,
110
+ output_hidden_states=True,
111
+ interpolate_pos_encoding=True
112
+ ).hidden_states[-2]
113
+
114
+ # Convert to float32 for numerical stability
115
+ return clip_embeddings.float()
116
+
117
+
118
+ # ===== IP-Adapter Helper Functions =====
119
+
120
+ @torch.inference_mode()
121
+ def _enhanced_get_image_embeds(self, pil_image=None, clip_image_embeds=None):
122
+ """
123
+ Enhanced version of IP-Adapter's get_image_embeds method.
124
+
125
+ This method processes either PIL images or pre-computed CLIP embeddings
126
+ and returns both conditional and unconditional embeddings for generation.
127
+
128
+ Args:
129
+ pil_image: PIL Image(s) to process (optional)
130
+ clip_image_embeds: Pre-computed CLIP embeddings (optional)
131
+
132
+ Returns:
133
+ Tuple of (conditional_embeds, unconditional_embeds)
134
+ """
135
+ # Process PIL images if provided
136
+ if pil_image is not None:
137
+ if isinstance(pil_image, Image.Image):
138
+ pil_image = [pil_image]
139
+
140
+ # Convert PIL to tensor and extract CLIP embeddings
141
+ processed_images = self.clip_image_processor(
142
+ images=pil_image, return_tensors="pt"
143
+ ).pixel_values
144
+ processed_images = processed_images.to(self.device, dtype=torch.float16)
145
+
146
+ clip_image_embeds = self.image_encoder(
147
+ processed_images, output_hidden_states=True
148
+ ).hidden_states[-2]
149
+
150
+ # Project CLIP embeddings to IP-Adapter space
151
+ conditional_embeds = self.image_proj_model(clip_image_embeds)
152
+
153
+ # Generate unconditional embeddings (for classifier-free guidance)
154
+ zero_tensor = torch.zeros(1, 3, 224, 224).to(self.device, dtype=torch.float16)
155
+ uncond_clip_embeds = self.image_encoder(
156
+ zero_tensor, output_hidden_states=True
157
+ ).hidden_states[-2]
158
+ unconditional_embeds = self.image_proj_model(uncond_clip_embeds)
159
+
160
+ return conditional_embeds, unconditional_embeds
161
+
162
+
163
+ # ===== Model Loading Functions =====
164
+
165
+ @torch.inference_mode()
166
+ def load_stable_diffusion_pipeline(device: str = "cuda") -> StableDiffusionPipeline:
167
+ # Model paths
168
+ base_model_path = "SG161222/Realistic_Vision_V4.0_noVAE"
169
+ vae_model_path = "stabilityai/sd-vae-ft-mse"
170
+
171
+ # Configure DDIM scheduler for high-quality sampling
172
+ noise_scheduler = DDIMScheduler(
173
+ num_train_timesteps=1000,
174
+ beta_start=0.00085,
175
+ beta_end=0.012,
176
+ beta_schedule="scaled_linear",
177
+ clip_sample=False,
178
+ set_alpha_to_one=False,
179
+ steps_offset=1,
180
+ )
181
+
182
+ # Load VAE separately for better quality
183
+ vae = AutoencoderKL.from_pretrained(vae_model_path).to(dtype=torch.float16)
184
+
185
+ # Create Stable Diffusion pipeline
186
+ pipeline = StableDiffusionPipeline.from_pretrained(
187
+ base_model_path,
188
+ torch_dtype=torch.float16,
189
+ scheduler=noise_scheduler,
190
+ vae=vae,
191
+ feature_extractor=None, # Disable safety checker for faster inference
192
+ safety_checker=None,
193
+ )
194
+
195
+ return pipeline
196
+
197
+
198
+ @torch.inference_mode()
199
+ def load_ip_adapter_model(device: str = "cuda", sd_only: bool = False) -> IPAdapterPlus:
200
+ # Model and checkpoint paths
201
+ base_model_path = "SG161222/Realistic_Vision_V4.0_noVAE"
202
+ vae_model_path = "stabilityai/sd-vae-ft-mse"
203
+ image_encoder_path = "./downloads/models/image_encoder"
204
+ ip_checkpoint_path = "./downloads/models/ip-adapter-plus_sd15.bin"
205
+
206
+ # Configure DDIM scheduler
207
+ noise_scheduler = DDIMScheduler(
208
+ num_train_timesteps=1000,
209
+ beta_start=0.00085,
210
+ beta_end=0.012,
211
+ beta_schedule="scaled_linear",
212
+ clip_sample=False,
213
+ set_alpha_to_one=False,
214
+ steps_offset=1,
215
+ )
216
+
217
+ # Load high-quality VAE
218
+ vae = AutoencoderKL.from_pretrained(vae_model_path).to(dtype=torch.float16)
219
+
220
+ # Create base Stable Diffusion pipeline
221
+ pipeline = StableDiffusionPipeline.from_pretrained(
222
+ base_model_path,
223
+ torch_dtype=torch.float16,
224
+ scheduler=noise_scheduler,
225
+ vae=vae,
226
+ feature_extractor=None,
227
+ safety_checker=None,
228
+ )
229
+
230
+ if sd_only:
231
+ return pipeline
232
+
233
+ # Initialize IP-Adapter with 16 tokens for better image conditioning
234
+ ip_model = IPAdapterPlus(
235
+ pipeline,
236
+ image_encoder_path,
237
+ ip_checkpoint_path,
238
+ device,
239
+ num_tokens=16
240
+ )
241
+
242
+ # Enhance the model with our improved get_image_embeds method
243
+ setattr(ip_model.__class__, "get_image_embeds", _enhanced_get_image_embeds)
244
+
245
+ return ip_model
246
+
247
+
248
+ def load_ip_adapter_xl_model(device: str = "cuda") -> IPAdapterPlusXL:
249
+ base_model_path = "SG161222/RealVisXL_V1.0"
250
+ image_encoder_path = "./downloads/models/image_encoder"
251
+ ip_ckpt = "./downloads/sdxl_models/ip-adapter-plus_sdxl_vit-h.bin"
252
+
253
+ pipe = StableDiffusionXLPipeline.from_pretrained(
254
+ base_model_path,
255
+ torch_dtype=torch.float16,
256
+ add_watermarker=False,
257
+ )
258
+ ip_model = IPAdapterPlusXL(pipe, image_encoder_path, ip_ckpt, device, num_tokens=16)
259
+
260
+ return ip_model
261
+
262
+ def load_ipadapter(version: str = "sd15", device: str = "cuda") -> IPAdapterPlus | IPAdapterPlusXL:
263
+ if version == "sd15":
264
+ return load_ip_adapter_model(device)
265
+ elif version == "sdxl":
266
+ return load_ip_adapter_xl_model(device)
267
+ else:
268
+ raise ValueError(f"Invalid version: {version}")
269
+
270
+
271
+ # ===== Image Generation Functions =====
272
+
273
+ @torch.inference_mode()
274
+ def generate_images_from_clip_embeddings(ip_model : IPAdapterPlus,
275
+ clip_embeddings: torch.Tensor,
276
+ num_samples: int = 4,
277
+ num_inference_steps: int = 50,
278
+ seed: Optional[int] = 42) -> List[Image.Image]:
279
+ """Generate images from CLIP embeddings using IP-Adapter.
280
+ clip_embeddings is (batch, seq_len, embed_dim)
281
+ """
282
+ # Ensure embeddings have correct shape and dtype
283
+ if clip_embeddings.ndim == 2:
284
+ clip_embeddings = clip_embeddings.unsqueeze(0)
285
+
286
+ if clip_embeddings.ndim != 3:
287
+ raise ValueError(f"Expected 3D embeddings (batch, seq, dim), got {clip_embeddings.shape}")
288
+
289
+ # Move to appropriate device and dtype
290
+ clip_embeddings = clip_embeddings.half().to(ip_model.device)
291
+
292
+ # Generate images using IP-Adapter
293
+ negative_prompt = "nsfw, lowres, (bad), text, error, fewer, extra, missing, worst quality, jpeg artifacts, low quality, watermark, unfinished, displeasing, oldest, early, chromatic aberration, signature, extra digits, artistic error, username, scan, [abstract]"
294
+ generated_images = ip_model.generate(
295
+ clip_image_embeds=clip_embeddings,
296
+ negative_prompt=negative_prompt,
297
+ pil_image=None,
298
+ num_samples=num_samples,
299
+ num_inference_steps=num_inference_steps,
300
+ seed=seed
301
+ )
302
+
303
+ return generated_images
304
+
305
+
306
+ # ===== Legacy Function Aliases =====
307
+
308
+ # Maintain backward compatibility with existing code
309
+ image_grid = create_image_grid
310
+ extract_clip_embedding_pil = extract_clip_embeddings_from_pil
311
+ extract_clip_embedding_pil_batch = extract_clip_embeddings_from_pil_batch
312
+ extract_clip_embedding_tensor = extract_clip_embeddings_from_tensor
313
+ load_sdxl = load_stable_diffusion_pipeline
314
+ generate = generate_images_from_clip_embeddings
requirements.txt ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ torch
2
+ torchvision
3
+ einops
4
+ matplotlib
5
+ opencv-python
6
+ pillow
7
+ scikit-image
8
+ omegaconf
9
+ scikit-dimension
10
+ pytorch-lightning==1.9.4
11
+ diffusers==0.33.1
12
+ transformers==4.47.0
13
+ triton==3.0.0
14
+ ncut_pytorch==2.3.0
vibe_blending.py ADDED
@@ -0,0 +1,230 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import torch
3
+ from PIL import Image
4
+ from typing import List, Optional, Tuple
5
+ import torch.nn.functional as F
6
+ from omegaconf import OmegaConf
7
+ from ipadapter_model import generate_images_from_clip_embeddings
8
+ from ipadapter_model import load_ipadapter
9
+ from intrinsic_dim import estimate_intrinsic_dimension
10
+ from vibespace_model import VibeSpaceModel, train_vibe_space, clear_gpu_memory
11
+ from dino_correspondence import kway_cluster_per_image, match_centers_two_images, get_cluster_center_features
12
+
13
+ from extract_features import extract_dino_features, extract_clip_features, dino_image_transform, clip_image_transform
14
+ import logging
15
+ import gradio as gr
16
+
17
+
18
+
19
+ DEFAULT_CONFIG_PATH = "./config.yaml"
20
+ def load_config(config_path: str):
21
+ cfg_base = OmegaConf.load(DEFAULT_CONFIG_PATH)
22
+ cfg = OmegaConf.load(config_path)
23
+ cfg_base.update(cfg)
24
+ return cfg_base
25
+
26
+
27
+ def run_vibe_blend_safe(image1, image2, extra_images, negative_images, config_path, interpolation_weights: List[float], n_clusters: int = 25):
28
+ success = False
29
+ while not success:
30
+ try:
31
+ model, trainer = run_vibe_space_training(
32
+ positive_images=[image1, image2, *extra_images],
33
+ negative_images=negative_images,
34
+ config_path=config_path,
35
+ )
36
+ success = True
37
+ except Exception as e:
38
+ logging.error(f"Error training model: {e}")
39
+ torch.cuda.empty_cache()
40
+ continue
41
+
42
+ success = False
43
+ while not success:
44
+ try:
45
+ blended_images = generate_blend_images(
46
+ image1,
47
+ image2,
48
+ model,
49
+ interpolation_weights,
50
+ n_clusters=n_clusters,
51
+ )
52
+ success = True
53
+ except Exception as e:
54
+ logging.error(f"Error generating images: {e}")
55
+ torch.cuda.empty_cache()
56
+ continue
57
+
58
+ return blended_images
59
+
60
+
61
+ def run_vibe_blend_not_safe(image1, image2, extra_images, negative_images, config_path, interpolation_weights: List[float], n_clusters: int = 20):
62
+
63
+ model, trainer = run_vibe_space_training(
64
+ positive_images=[image1, image2, *extra_images],
65
+ negative_images=negative_images,
66
+ config_path=config_path,
67
+ )
68
+ blended_images = generate_blend_images(
69
+ image1,
70
+ image2,
71
+ model,
72
+ interpolation_weights,
73
+ n_clusters=n_clusters,
74
+ )
75
+ return blended_images
76
+
77
+
78
+ def run_vibe_space_training(positive_images: List[Image.Image],
79
+ negative_images: List[Image.Image],
80
+ config_path: str = DEFAULT_CONFIG_PATH) -> Tuple[VibeSpaceModel, object]:
81
+ """
82
+ Train a Mood Space compression model from input images.
83
+
84
+ This function extracts DINO and CLIP features from the input images,
85
+ estimates the intrinsic dimensionality if not provided, and trains
86
+ a neural compression model to learn a meaningful embedding space.
87
+
88
+ Args:
89
+ pil_images: List of PIL Images for training
90
+ """
91
+ # Load and configure training parameters
92
+ config = load_config(config_path)
93
+ positive_images = [img for img in positive_images if img is not None]
94
+ negative_images = [img for img in negative_images or [] if img is not None]
95
+ if len(positive_images) == 0:
96
+ raise ValueError("No valid positive images provided for Vibe Space training")
97
+ has_negative_images = len(negative_images) > 0
98
+
99
+ # Transform images for feature extraction
100
+ dino_input_images = torch.stack([dino_image_transform(image) for image in positive_images])
101
+ clip_input_images = torch.stack([clip_image_transform(image) for image in positive_images])
102
+ if has_negative_images:
103
+ negative_dino_input_images = torch.stack([dino_image_transform(image) for image in negative_images])
104
+ else:
105
+ negative_dino_input_images = None
106
+
107
+ # Extract features using pre-trained models
108
+ dino_image_embeds = extract_dino_features(dino_input_images)
109
+ clip_image_embeds = extract_clip_features(clip_input_images)
110
+ if has_negative_images:
111
+ negative_dino_embeds = extract_dino_features(negative_dino_input_images)
112
+ else:
113
+ negative_dino_embeds = None
114
+
115
+ # Determine intrinsic dimensionality
116
+ flattened_features = dino_image_embeds.flatten(end_dim=-2)
117
+ estimated_dim = estimate_intrinsic_dimension(flattened_features)
118
+ hidden_dim = int(estimated_dim)
119
+ config.vibe_dim = hidden_dim
120
+
121
+ if len(positive_images) > 2:
122
+ # increase training steps for extra images
123
+ config.steps = config.steps * 2
124
+
125
+ # Create and train model
126
+ model = VibeSpaceModel(config, enable_gradio_progress=True)
127
+ trainer = train_vibe_space(
128
+ model,
129
+ config,
130
+ dino_image_embeds,
131
+ clip_image_embeds,
132
+ negative_dino_embeds,
133
+ )
134
+
135
+ return model, trainer
136
+
137
+
138
+ def _compute_direction_from_two_images(image_embeds: torch.Tensor,
139
+ eigenvectors: torch.Tensor | List[torch.Tensor],
140
+ a_to_b_mapping: np.ndarray,
141
+ use_unit_norm: bool = False) -> torch.Tensor:
142
+
143
+ # Compute cluster centers
144
+ a_center_features = get_cluster_center_features(
145
+ image_embeds[0], eigenvectors[0].argmax(-1).cpu(), eigenvectors[0].shape[-1])
146
+ b_center_features = get_cluster_center_features(
147
+ image_embeds[1], eigenvectors[1].argmax(-1).cpu(), eigenvectors[1].shape[-1])
148
+
149
+ # Compute direction vectors
150
+ direction_vectors = []
151
+ for i_a, i_b in enumerate(a_to_b_mapping):
152
+ direction = b_center_features[i_b] - a_center_features[i_a]
153
+ if use_unit_norm:
154
+ direction = F.normalize(direction, dim=-1)
155
+ direction_vectors.append(direction)
156
+ direction_vectors = torch.stack(direction_vectors)
157
+
158
+
159
+ # Apply direction based on cluster assignments
160
+ cluster_labels = eigenvectors[0].argmax(-1).cpu()
161
+ direction_field = torch.zeros_like(image_embeds[0])
162
+
163
+ for i_cluster in range(eigenvectors[0].shape[-1]):
164
+ cluster_mask = cluster_labels == i_cluster
165
+ if cluster_mask.sum() > 0:
166
+ direction_field[cluster_mask] = direction_vectors[i_cluster]
167
+
168
+ return direction_field
169
+
170
+
171
+ def generate_blend_images(image1: Image.Image,
172
+ image2: Image.Image,
173
+ model: VibeSpaceModel,
174
+ interpolation_weights: List[float],
175
+ n_clusters: int = 20,
176
+ seed: Optional[int] = None,
177
+ ) -> List[Image.Image]:
178
+ """
179
+ Interpolate between two images using the trained compression model.
180
+
181
+ Args:
182
+ image1, image2: Input PIL Images
183
+ model: Trained compression model
184
+ interpolation_weights: Weights for interpolation
185
+ n_clusters: Number of clusters for correspondence matching
186
+ seed: Random seed for generation
187
+
188
+ Returns:
189
+ List[Image.Image]: Generated interpolated images
190
+ """
191
+ clear_gpu_memory()
192
+
193
+ # Prepare images and extract features
194
+ images = torch.stack([dino_image_transform(img) for img in [image1, image2]])
195
+ dino_image_embeds = extract_dino_features(images)
196
+ compressed_image_embeds = model.encoder(dino_image_embeds)
197
+
198
+ cluster_eigenvectors = kway_cluster_per_image(dino_image_embeds, n_clusters=n_clusters, gamma=None)
199
+ a_to_b_mapping = match_centers_two_images(
200
+ dino_image_embeds[0], dino_image_embeds[1],
201
+ cluster_eigenvectors[0], cluster_eigenvectors[1],
202
+ match_method='hungarian'
203
+ )
204
+ direction_field = _compute_direction_from_two_images(
205
+ compressed_image_embeds, cluster_eigenvectors, a_to_b_mapping, use_unit_norm=False
206
+ )
207
+
208
+ # Generate interpolated images
209
+ ip_model = load_ipadapter()
210
+
211
+ progress_tracker = gr.Progress()
212
+ generated_images = []
213
+ for i, weight in enumerate(interpolation_weights):
214
+ progress_tracker(i / len(interpolation_weights), desc=f"Generating images, α = {weight:.2f}")
215
+ interpolated_embedding = compressed_image_embeds[0] + direction_field * weight
216
+ decompressed_embedding = model.decoder(interpolated_embedding)
217
+
218
+ batch_images = generate_images_from_clip_embeddings(
219
+ ip_model, decompressed_embedding, num_samples=1, seed=seed
220
+ )
221
+ if np.all(np.array(batch_images[0]) == 0):
222
+ raise ValueError("Generated image is all black")
223
+ generated_images.extend(batch_images)
224
+
225
+ # Clean up
226
+ del ip_model
227
+ clear_gpu_memory()
228
+
229
+ return generated_images
230
+
vibespace_model.py ADDED
@@ -0,0 +1,504 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Neural Compression Model for Feature Space Learning
3
+
4
+ This module implements a compression model that learns to compress and decompress
5
+ image features while preserving their geometric and semantic properties using
6
+ normalized cuts (NCut).
7
+ """
8
+
9
+ import gc
10
+ from collections import defaultdict
11
+ from typing import List, Optional, Tuple
12
+
13
+ import torch
14
+ import torch.nn as nn
15
+ import torch.nn.functional as F
16
+ import pytorch_lightning as pl
17
+ from einops import rearrange
18
+ from omegaconf import DictConfig
19
+ import gradio as gr
20
+
21
+ from ncut_pytorch.ncuts.ncut_nystrom import _plain_ncut
22
+ from ncut_pytorch.utils.math import rbf_affinity
23
+
24
+
25
+ def compute_ncut_eigenvectors(features: torch.Tensor, n_eig: int) -> Tuple[torch.Tensor, torch.Tensor]:
26
+ gamma = features.var(0).sum().item()
27
+ affinity_matrix = rbf_affinity(features, gamma=gamma)
28
+ eigenvectors, eigenvalues = _plain_ncut(affinity_matrix, n_eig)
29
+ return eigenvectors, eigenvalues
30
+
31
+
32
+ # ===== Neural Network Components =====
33
+
34
+ class MultiLayerPerceptron(nn.Module):
35
+
36
+ def __init__(self, input_dim: int, output_dim: int, num_layers: int = 4, hidden_dim: int = 4096):
37
+ super().__init__()
38
+
39
+ layers = [nn.Linear(input_dim, hidden_dim), nn.GELU()]
40
+
41
+ # Add hidden layers
42
+ for _ in range(num_layers):
43
+ layers.extend([nn.Linear(hidden_dim, hidden_dim), nn.GELU()])
44
+
45
+ # Output layer
46
+ layers.append(nn.Linear(hidden_dim, output_dim))
47
+
48
+ self.mlp = nn.Sequential(*layers)
49
+
50
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
51
+ return self.mlp(x)
52
+
53
+ class SpatialPoolingAvgPool(nn.Module):
54
+ """
55
+ AvgPool layer for spatial pooling of feature maps with support for sequence inputs.
56
+
57
+ Handles inputs with CLS tokens and reshapes appropriately for 2D convolution.
58
+ """
59
+ def __init__(self, downsample_factor: int = 2):
60
+ super().__init__()
61
+ self.downsample_factor = downsample_factor
62
+ self.avg_pool = nn.AvgPool2d(downsample_factor)
63
+
64
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
65
+ """
66
+ Forward pass supporting both (batch, seq_len, channels) and (seq_len, channels) inputs.
67
+ """
68
+ # Handle input shape variations
69
+ added_batch_dim = False
70
+ if x.dim() == 2:
71
+ x = x.unsqueeze(0)
72
+ added_batch_dim = True
73
+ elif x.dim() != 3:
74
+ raise ValueError(f"Expected input shape (B, L, C) or (L, C), got {x.shape}")
75
+
76
+ batch_size, seq_len, channels = x.shape
77
+
78
+ if seq_len < 2:
79
+ raise ValueError("Sequence length must be at least 2 (1 CLS token + 1 patch)")
80
+
81
+ # Validate that seq_len-1 is a perfect square (for spatial arrangement)
82
+ spatial_size = int(round((seq_len - 1) ** 0.5))
83
+ if spatial_size * spatial_size != (seq_len - 1):
84
+ raise ValueError(f"seq_len-1 must be perfect square. Got {seq_len-1}")
85
+
86
+ # Separate CLS token and spatial features
87
+ cls_tokens = x[:, :1, :] # (B, 1, C)
88
+ spatial_features = x[:, 1:, :] # (B, H*W, C)
89
+
90
+ # Reshape to 2D for convolution
91
+ spatial_2d = rearrange(
92
+ spatial_features, 'b (h w) c -> b c h w',
93
+ h=spatial_size, w=spatial_size
94
+ )
95
+
96
+ # Apply pooling
97
+ pooled_features = self.avg_pool(spatial_2d)
98
+
99
+ # Reshape back to sequence format
100
+ pooled_sequence = rearrange(pooled_features, 'b c h w -> b (h w) c')
101
+
102
+ # Concatenate CLS token back
103
+ output = torch.cat([cls_tokens, pooled_sequence], dim=1)
104
+
105
+ # Remove batch dimension if it was added
106
+ if added_batch_dim:
107
+ output = output.squeeze(0)
108
+
109
+ return output
110
+
111
+ class MLPWithSpatialPooling(nn.Module):
112
+ def __init__(self, input_dim: int, output_dim: int, num_layers: int = 4,
113
+ hidden_dim: int = 4096, downsample_factor: int = 2):
114
+ super().__init__()
115
+
116
+ self.pooling = SpatialPoolingAvgPool(downsample_factor)
117
+
118
+ layers = [nn.Linear(input_dim, hidden_dim), nn.GELU()]
119
+
120
+ # Add hidden layers
121
+ for _ in range(num_layers):
122
+ layers.extend([nn.Linear(hidden_dim, hidden_dim), nn.GELU()])
123
+
124
+ # Output layer
125
+ layers.append(nn.Linear(hidden_dim, output_dim))
126
+
127
+ self.network = nn.Sequential(*layers)
128
+
129
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
130
+ x = self.pooling(x)
131
+ return self.network(x)
132
+
133
+
134
+ # ===== Main Compression Model =====
135
+
136
+ class VibeSpaceModel(pl.LightningModule):
137
+ """
138
+ Neural compression model for learning compressed feature representations.
139
+
140
+ This model compresses input features to a lower-dimensional "vibe space" and
141
+ then decompresses them back, while preserving geometric and semantic properties
142
+ through various loss functions including NCut-based losses.
143
+ """
144
+
145
+ def __init__(self, config: DictConfig, enable_gradio_progress: bool = False, downsample_factor: int = 2):
146
+ super().__init__()
147
+
148
+ self.config = config
149
+ self.downsample_factor = downsample_factor
150
+
151
+ self.encoder = MultiLayerPerceptron(
152
+ config.in_dim, config.vibe_dim, config.n_layer, config.latent_dim
153
+ )
154
+
155
+ self.decoder = MLPWithSpatialPooling(
156
+ config.vibe_dim, config.out_dim, config.n_layer,
157
+ config.latent_dim, self.downsample_factor
158
+ )
159
+
160
+ self.loss_history = defaultdict(list)
161
+ self.enable_gradio_progress = enable_gradio_progress
162
+ if enable_gradio_progress:
163
+ self.progress_tracker = gr.Progress()
164
+
165
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
166
+ compressed = self.encoder(x)
167
+ reconstructed = self.decoder(compressed)
168
+ return reconstructed
169
+
170
+ def training_step(self, batch, batch_idx):
171
+ # Update progress bar if using Gradio
172
+ if (self.enable_gradio_progress and
173
+ self.trainer.global_step % 10 == 0 and
174
+ self.trainer.global_step > 0 and
175
+ self.loss_history['recon']):
176
+
177
+ progress = self.trainer.global_step / self.config.steps
178
+ recent_loss = self.loss_history['recon'][-1]
179
+ self.progress_tracker(progress, desc=f"Training Vibe Space, loss = {recent_loss:.4f}")
180
+
181
+ positive_features, negative_features, target_features, negative_mask = batch
182
+ negative_mask = negative_mask.bool()
183
+ has_negatives = bool(negative_mask.any().item())
184
+
185
+ if has_negatives:
186
+ if bool(negative_mask.all().item()):
187
+ batch_negative_features = negative_features
188
+ else:
189
+ batch_negative_features = negative_features[negative_mask]
190
+ else:
191
+ batch_negative_features = None
192
+
193
+ compressed_features = self.encoder(positive_features)
194
+ reconstructed_features = self.decoder(compressed_features)
195
+
196
+
197
+ total_loss = self._compute_total_loss(
198
+ positive_features,
199
+ batch_negative_features,
200
+ target_features,
201
+ compressed_features,
202
+ reconstructed_features,
203
+ )
204
+
205
+ self.log("loss/total", total_loss, prog_bar=True)
206
+ return total_loss
207
+
208
+ def _compute_ncut_eigenvectors(self, features: torch.Tensor) -> torch.Tensor:
209
+ """Compute NCut eigenvectors for features."""
210
+ # Accept inputs shaped either (batch, length, channels) or (length, channels)
211
+ flattened_features = features
212
+ if flattened_features.dim() >= 3:
213
+ flattened_features = flattened_features.flatten(0, 1)
214
+ elif flattened_features.dim() == 1:
215
+ # rbf_affinity expects at least 2D; treat single vector as one sample with channels
216
+ flattened_features = flattened_features.unsqueeze(0)
217
+
218
+ if flattened_features.numel() > 0 and flattened_features.dim() == 2:
219
+ eigenvectors, _ = compute_ncut_eigenvectors(flattened_features, self.config.n_eig)
220
+ return eigenvectors
221
+ else:
222
+ # Return zero tensor if no features
223
+ device = features.device if isinstance(features, torch.Tensor) else 'cpu'
224
+ return torch.zeros((1, self.config.n_eig), device=device)
225
+
226
+ def _compute_multiscale_similarity(self, eigenvectors: torch.Tensor,
227
+ start_n_eig: int = 4, step_mult: int = 2) -> torch.Tensor:
228
+ """Compute multi-scale similarity matrix from eigenvectors.
229
+ eigenvectors is (batch*length, n_eig)
230
+ """
231
+ total_similarity = 0.0
232
+ num_scales = 0
233
+ max_available = eigenvectors.shape[1]
234
+ current_n_eig = min(start_n_eig, max_available)
235
+
236
+ if self.config.single_scale_flag:
237
+ current_n_eig = max_available
238
+
239
+ while current_n_eig <= max_available:
240
+ eigvec_subset = eigenvectors[:, :current_n_eig]
241
+ eigvec_normalized = F.normalize(eigvec_subset, dim=-1)
242
+
243
+ total_similarity += eigvec_normalized @ eigvec_normalized.T
244
+
245
+ num_scales += 1
246
+ current_n_eig *= step_mult
247
+
248
+ return total_similarity / num_scales if num_scales > 0 else total_similarity
249
+
250
+ def _compute_flag_decoder_loss(
251
+ self,
252
+ compressed_features: torch.Tensor,
253
+ reconstructed_features: torch.Tensor,
254
+ negative_input_features: Optional[torch.Tensor] = None,
255
+ ) -> torch.Tensor:
256
+ """
257
+ compressed_features is (batch, length, channels)
258
+ reconstructed_features is (batch, length, channels)
259
+ """
260
+ pooled_compressed = self.decoder.pooling(compressed_features)
261
+ pooled_compressed = pooled_compressed.flatten(0, 1)
262
+ reconstructed_features = reconstructed_features.flatten(0, 1)
263
+
264
+ has_negative = (
265
+ negative_input_features is not None and negative_input_features.numel() > 0
266
+ )
267
+
268
+ # sample points from the compressed feature space (only when no negatives available)
269
+ dim_mins = pooled_compressed.min(0).values
270
+ dim_maxs = pooled_compressed.max(0).values
271
+ dim_mins -= 0.25 * (dim_maxs - dim_mins) * torch.rand_like(dim_mins)
272
+ dim_maxs += 0.25 * (dim_maxs - dim_mins) * torch.rand_like(dim_maxs)
273
+
274
+ num_samples = 0 if has_negative else self.config.n_negative_sample
275
+ sample_points = torch.rand(num_samples, pooled_compressed.shape[1], device=pooled_compressed.device)
276
+ sample_points = sample_points * (dim_maxs - dim_mins) + dim_mins
277
+
278
+ # reconstruct the sample points
279
+ sample_reconstructed = self.decoder.network(sample_points)
280
+
281
+ all_compressed = torch.cat([pooled_compressed, sample_points], dim=0)
282
+ all_reconstructed = torch.cat([reconstructed_features, sample_reconstructed], dim=0)
283
+
284
+ # flag loss on the sample points
285
+ similarity = all_compressed @ all_compressed.T
286
+ eigenvectors_pos, _ = compute_ncut_eigenvectors(all_reconstructed, self.config.n_eig)
287
+
288
+ if has_negative and self.config.get('do_decoder_negative_flag', False):
289
+ negative_compressed = self.encoder(negative_input_features)
290
+ negative_reconstructed = self.decoder(negative_compressed)
291
+ negative_reconstructed = negative_reconstructed.flatten(0, 1)
292
+
293
+ neg_eigenvectors, _ = compute_ncut_eigenvectors(negative_reconstructed, self.config.n_eig)
294
+
295
+ max_available = min(eigenvectors_pos.shape[1], neg_eigenvectors.shape[1])
296
+ if max_available == 0:
297
+ eig_similarity = self._compute_multiscale_similarity(eigenvectors_pos)
298
+ else:
299
+ if self.config.single_scale_flag:
300
+ current_n_eig = max_available
301
+ else:
302
+ current_n_eig = min(self.config.get('start_n_eig', 4), max_available)
303
+ current_n_eig = max(current_n_eig, 1)
304
+
305
+ total_filtered_similarity = similarity.new_zeros(similarity.shape)
306
+ num_scales = 0
307
+ beta = self.config.get('decoder_negative_beta', self.config.get('negative_beta', 1.0))
308
+ step_mult = self.config.get('step_mult', 2)
309
+
310
+ while current_n_eig <= max_available:
311
+ P = eigenvectors_pos[:, :current_n_eig]
312
+ N = neg_eigenvectors[:, :current_n_eig]
313
+
314
+ N_norm = F.normalize(N, dim=0)
315
+ projection = torch.matmul(N_norm.T, P)
316
+ P_filtered = P - beta * torch.matmul(N_norm, projection)
317
+
318
+ P_filtered_norm = F.normalize(P_filtered, dim=-1)
319
+ total_filtered_similarity += P_filtered_norm @ P_filtered_norm.T
320
+
321
+ num_scales += 1
322
+ current_n_eig *= step_mult
323
+
324
+ if num_scales > 0:
325
+ eig_similarity = total_filtered_similarity / num_scales
326
+ else:
327
+ eig_similarity = self._compute_multiscale_similarity(eigenvectors_pos)
328
+ else:
329
+ eig_similarity = self._compute_multiscale_similarity(eigenvectors_pos)
330
+
331
+ loss = F.smooth_l1_loss(eig_similarity, similarity)
332
+ return loss
333
+
334
+ def _compute_flag_encoder_loss(self, input_features: torch.Tensor, compressed_features: torch.Tensor) -> torch.Tensor:
335
+ """
336
+ input_features is (batch, length, channels)
337
+ compressed_features is (batch, length, channels)
338
+ """
339
+ sample_indices = torch.randperm(input_features.shape[0])[:self.config.n_sample_eigsolve]
340
+ gt_eigenvectors = self._compute_ncut_eigenvectors(input_features.flatten(0, 1)[sample_indices])
341
+ gt_similarity = self._compute_multiscale_similarity(gt_eigenvectors)
342
+ flattened_compressed = compressed_features.flatten(0, 1)[sample_indices]
343
+ pred_similarity = flattened_compressed @ flattened_compressed.T
344
+ loss = F.smooth_l1_loss(gt_similarity, pred_similarity)
345
+ return loss
346
+
347
+ def _compute_total_loss(
348
+ self,
349
+ positive_features: torch.Tensor,
350
+ negative_features: Optional[torch.Tensor],
351
+ target_features: torch.Tensor,
352
+ compressed_features: torch.Tensor,
353
+ reconstructed_features: torch.Tensor,
354
+ ) -> torch.Tensor:
355
+ """
356
+ positive_features is (batch, length, channels)
357
+ target_features is (batch, length, channels)
358
+ compressed_features is (batch, length, channels)
359
+ reconstructed_features is (batch, length, channels)
360
+ """
361
+ total_loss = positive_features.new_tensor(0.0)
362
+ has_negative_features = (
363
+ negative_features is not None and negative_features.numel() > 0
364
+ )
365
+ beta = self.config.get('negative_beta', 1.0)
366
+
367
+ # Flag encoder loss - guide the structure from encoder to compressed features
368
+ if self.config.flag_encoder_loss > 0 and has_negative_features:
369
+ gt_eigenvectors_pos = self._compute_ncut_eigenvectors(positive_features)
370
+ gt_eigenvectors_neg = self._compute_ncut_eigenvectors(negative_features)
371
+
372
+ total_filtered_similarity = 0.0
373
+ num_scales = 0
374
+ max_available = min(gt_eigenvectors_pos.shape[1], gt_eigenvectors_neg.shape[1])
375
+
376
+ if max_available == 0:
377
+ gt_similarity = self._compute_multiscale_similarity(gt_eigenvectors_pos)
378
+ else:
379
+ if self.config.single_scale_flag:
380
+ current_n_eig = max_available
381
+ else:
382
+ current_n_eig = min(self.config.get('start_n_eig', 4), max_available)
383
+ current_n_eig = max(current_n_eig, 1)
384
+
385
+ step_mult = self.config.get('step_mult', 2)
386
+ while current_n_eig <= max_available and current_n_eig > 0:
387
+ P = gt_eigenvectors_pos[:, :current_n_eig]
388
+ N = gt_eigenvectors_neg[:, :current_n_eig]
389
+
390
+ N_norm = F.normalize(N, dim=0)
391
+ projection = torch.matmul(N_norm.T, P)
392
+ P_filtered = P - beta * torch.matmul(N_norm, projection)
393
+
394
+ P_filtered_norm = F.normalize(P_filtered, dim=-1)
395
+ total_filtered_similarity += P_filtered_norm @ P_filtered_norm.T
396
+
397
+ num_scales += 1
398
+ current_n_eig *= step_mult
399
+
400
+ if num_scales > 0:
401
+ gt_similarity = total_filtered_similarity / num_scales
402
+ else:
403
+ gt_similarity = self._compute_multiscale_similarity(gt_eigenvectors_pos)
404
+ flattened_compressed = compressed_features.flatten(0, 1)
405
+ pred_similarity = flattened_compressed @ flattened_compressed.T
406
+
407
+ flag_encoder_loss = F.smooth_l1_loss(gt_similarity, pred_similarity)
408
+ self.log("loss/flag_encoder", flag_encoder_loss, prog_bar=True)
409
+ total_loss += flag_encoder_loss * self.config.flag_encoder_loss
410
+ self.loss_history['flag_encoder'].append(flag_encoder_loss.item())
411
+ elif self.config.flag_encoder_loss > 0:
412
+ flag_encoder_loss = self._compute_flag_encoder_loss(positive_features, compressed_features)
413
+ self.log("loss/flag_encoder", flag_encoder_loss, prog_bar=True)
414
+ total_loss += flag_encoder_loss * self.config.flag_encoder_loss
415
+ self.loss_history['flag_encoder'].append(flag_encoder_loss.item())
416
+
417
+ # Flag decoder loss - guide the structure from compressed to decoded features
418
+ if self.config.flag_decoder_loss > 0:
419
+ if self.trainer.global_step >= 500: # warmup period
420
+ flag_decoder_loss = self._compute_flag_decoder_loss(
421
+ compressed_features,
422
+ reconstructed_features,
423
+ negative_features,
424
+ )
425
+ self.log("loss/flag_decoder", flag_decoder_loss, prog_bar=True)
426
+ total_loss += flag_decoder_loss * self.config.flag_decoder_loss
427
+ self.loss_history['flag_decoder'].append(flag_decoder_loss.item())
428
+
429
+ # Reconstruction loss
430
+ if self.config.recon_loss > 0:
431
+ recon_loss = F.smooth_l1_loss(target_features, reconstructed_features)
432
+ self.log("loss/recon", recon_loss, prog_bar=True)
433
+ total_loss += recon_loss * self.config.recon_loss
434
+ self.loss_history['recon'].append(recon_loss.item())
435
+
436
+ return total_loss
437
+
438
+ def configure_optimizers(self):
439
+ return torch.optim.NAdam(self.parameters(), lr=self.config.lr)
440
+
441
+
442
+ # ===== Dataset and Training Utilities =====
443
+
444
+ class FeatureDataset(torch.utils.data.Dataset):
445
+
446
+ def __init__(
447
+ self,
448
+ positive_features: torch.Tensor,
449
+ target_features: torch.Tensor,
450
+ negative_features: Optional[torch.Tensor] = None,
451
+ ):
452
+ self.positive_features = positive_features
453
+ self.target_features = target_features
454
+ if negative_features is not None and negative_features.numel() > 0:
455
+ self.negative_features = negative_features
456
+ else:
457
+ self.negative_features = None
458
+
459
+ def __len__(self) -> int:
460
+ return len(self.positive_features)
461
+
462
+ def __getitem__(self, idx: int) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
463
+ positive = self.positive_features[idx]
464
+ target = self.target_features[idx]
465
+
466
+ if self.negative_features is None:
467
+ negative = torch.zeros_like(positive)
468
+ has_negative = torch.tensor(False, dtype=torch.bool)
469
+ else:
470
+ neg_idx = torch.randint(0, self.negative_features.shape[0], (1,)).item()
471
+ negative = self.negative_features[neg_idx]
472
+ has_negative = torch.tensor(True, dtype=torch.bool)
473
+
474
+ return positive, negative, target, has_negative
475
+
476
+
477
+ def clear_gpu_memory():
478
+ torch.cuda.empty_cache()
479
+ torch.cuda.ipc_collect()
480
+ gc.collect()
481
+
482
+
483
+ def train_vibe_space(model: VibeSpaceModel,
484
+ config: DictConfig,
485
+ input_features: torch.Tensor,
486
+ target_features: torch.Tensor,
487
+ negative_features: Optional[torch.Tensor] = None,
488
+ devices: List[int] = [0]) -> pl.Trainer:
489
+ clear_gpu_memory()
490
+ dataset = FeatureDataset(input_features, target_features, negative_features)
491
+ dataloader = torch.utils.data.DataLoader(dataset, batch_size=8, shuffle=True, num_workers=0)
492
+ trainer = pl.Trainer(
493
+ max_steps=config.steps,
494
+ gradient_clip_val=1.0,
495
+ accelerator="gpu",
496
+ devices=devices,
497
+ enable_checkpointing=False,
498
+ enable_progress_bar=True,
499
+ logger=False # Disable default logger
500
+ )
501
+
502
+ trainer.fit(model, dataloader)
503
+
504
+ return trainer