devanshsrivastav commited on
Commit
33edc32
·
verified ·
1 Parent(s): 60ed8f7

Add files using upload-large-folder tool

Browse files
Files changed (50) hide show
  1. .gitattributes +3 -35
  2. app.py +334 -0
  3. cache/.locks/models--openai--clip-vit-large-patch14/2c19f6666e0e163c7954df66cb901353fcad088e.lock +0 -0
  4. cache/.locks/models--openai--clip-vit-large-patch14/4297ea6a8d2bae1fea8f48b45e257814dcb11f69.lock +0 -0
  5. cache/.locks/models--openai--clip-vit-large-patch14/580c79c6862f31d1f9bd08dd1a415ba0d0502cd9.lock +0 -0
  6. cache/.locks/models--openai--clip-vit-large-patch14/702bb12920b291cade3706cf215c1604d2255d93.lock +0 -0
  7. cache/.locks/models--openai--clip-vit-large-patch14/76e821f1b6f0a9709293c3b6b51ed90980b3166b.lock +0 -0
  8. cache/.locks/models--openai--clip-vit-large-patch14/9bfb42aa97dcd61e89f279ccaee988bccb4fabae.lock +0 -0
  9. cache/.locks/models--openai--clip-vit-large-patch14/a2bf730a0c7debf160f7a6b50b3aaf3703e7e88ac73de7a314903141db026dcb.lock +0 -0
  10. cache/models--openai--clip-vit-large-patch14/.no_exist/32bd64288804d66eefd0ccbe215aa642df71cc41/added_tokens.json +0 -0
  11. cache/models--openai--clip-vit-large-patch14/blobs/2c19f6666e0e163c7954df66cb901353fcad088e +171 -0
  12. cache/models--openai--clip-vit-large-patch14/blobs/4297ea6a8d2bae1fea8f48b45e257814dcb11f69 +0 -0
  13. cache/models--openai--clip-vit-large-patch14/blobs/580c79c6862f31d1f9bd08dd1a415ba0d0502cd9 +0 -0
  14. cache/models--openai--clip-vit-large-patch14/blobs/702bb12920b291cade3706cf215c1604d2255d93 +34 -0
  15. cache/models--openai--clip-vit-large-patch14/blobs/76e821f1b6f0a9709293c3b6b51ed90980b3166b +0 -0
  16. cache/models--openai--clip-vit-large-patch14/blobs/9bfb42aa97dcd61e89f279ccaee988bccb4fabae +1 -0
  17. cache/models--openai--clip-vit-large-patch14/refs/main +1 -0
  18. cache/models--openai--clip-vit-large-patch14/snapshots/32bd64288804d66eefd0ccbe215aa642df71cc41/config.json +171 -0
  19. cache/models--openai--clip-vit-large-patch14/snapshots/32bd64288804d66eefd0ccbe215aa642df71cc41/merges.txt +0 -0
  20. cache/models--openai--clip-vit-large-patch14/snapshots/32bd64288804d66eefd0ccbe215aa642df71cc41/special_tokens_map.json +1 -0
  21. cache/models--openai--clip-vit-large-patch14/snapshots/32bd64288804d66eefd0ccbe215aa642df71cc41/tokenizer.json +0 -0
  22. cache/models--openai--clip-vit-large-patch14/snapshots/32bd64288804d66eefd0ccbe215aa642df71cc41/tokenizer_config.json +34 -0
  23. cache/models--openai--clip-vit-large-patch14/snapshots/32bd64288804d66eefd0ccbe215aa642df71cc41/vocab.json +0 -0
  24. generate_sd.yaml +70 -0
  25. stable_diffusion/assets/stable-samples/img2img/upscaling-in.png.REMOVED.git-id +1 -0
  26. stable_diffusion/ldm/__pycache__/util.cpython-38.pyc +0 -0
  27. stable_diffusion/ldm/modules/__pycache__/attention.cpython-38.pyc +0 -0
  28. stable_diffusion/ldm/modules/__pycache__/ema.cpython-38.pyc +0 -0
  29. stable_diffusion/ldm/modules/__pycache__/x_transformer.cpython-38.pyc +0 -0
  30. stable_diffusion/ldm/modules/attention.py +328 -0
  31. stable_diffusion/ldm/modules/diffusionmodules/__init__.py +0 -0
  32. stable_diffusion/ldm/modules/diffusionmodules/model.py +837 -0
  33. stable_diffusion/ldm/modules/diffusionmodules/openaimodel.py +1021 -0
  34. stable_diffusion/ldm/modules/diffusionmodules/util.py +321 -0
  35. stable_diffusion/ldm/modules/distributions/__init__.py +0 -0
  36. stable_diffusion/ldm/modules/ema.py +86 -0
  37. stable_diffusion/ldm/modules/encoders/__init__.py +0 -0
  38. stable_diffusion/ldm/modules/encoders/__pycache__/__init__.cpython-38.pyc +0 -0
  39. stable_diffusion/ldm/modules/encoders/__pycache__/modules.cpython-38.pyc +0 -0
  40. stable_diffusion/ldm/modules/encoders/modules.py +422 -0
  41. stable_diffusion/ldm/modules/evaluate/adm_evaluator.py +676 -0
  42. stable_diffusion/ldm/modules/evaluate/evaluate_perceptualsim.py +632 -0
  43. stable_diffusion/ldm/modules/evaluate/frechet_video_distance.py +147 -0
  44. stable_diffusion/ldm/modules/evaluate/ssim.py +124 -0
  45. stable_diffusion/ldm/modules/evaluate/torch_frechet_video_distance.py +294 -0
  46. stable_diffusion/ldm/modules/image_degradation/__init__.py +2 -0
  47. stable_diffusion/ldm/modules/image_degradation/bsrgan.py +733 -0
  48. stable_diffusion/ldm/modules/image_degradation/bsrgan_light.py +652 -0
  49. stable_diffusion/ldm/modules/image_degradation/utils_image.py +916 -0
  50. stable_diffusion/ldm/modules/x_transformer.py +641 -0
.gitattributes CHANGED
@@ -1,35 +1,3 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
- *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ckpt filter=lfs diff=lfs merge=lfs -text
6
- *.ftz filter=lfs diff=lfs merge=lfs -text
7
- *.gz filter=lfs diff=lfs merge=lfs -text
8
- *.h5 filter=lfs diff=lfs merge=lfs -text
9
- *.joblib filter=lfs diff=lfs merge=lfs -text
10
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
- *.model filter=lfs diff=lfs merge=lfs -text
13
- *.msgpack filter=lfs diff=lfs merge=lfs -text
14
- *.npy filter=lfs diff=lfs merge=lfs -text
15
- *.npz filter=lfs diff=lfs merge=lfs -text
16
- *.onnx filter=lfs diff=lfs merge=lfs -text
17
- *.ot filter=lfs diff=lfs merge=lfs -text
18
- *.parquet filter=lfs diff=lfs merge=lfs -text
19
- *.pb filter=lfs diff=lfs merge=lfs -text
20
- *.pickle filter=lfs diff=lfs merge=lfs -text
21
- *.pkl filter=lfs diff=lfs merge=lfs -text
22
- *.pt filter=lfs diff=lfs merge=lfs -text
23
- *.pth filter=lfs diff=lfs merge=lfs -text
24
- *.rar filter=lfs diff=lfs merge=lfs -text
25
- *.safetensors filter=lfs diff=lfs merge=lfs -text
26
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
- *.tar.* filter=lfs diff=lfs merge=lfs -text
28
- *.tar filter=lfs diff=lfs merge=lfs -text
29
- *.tflite filter=lfs diff=lfs merge=lfs -text
30
- *.tgz filter=lfs diff=lfs merge=lfs -text
31
- *.wasm filter=lfs diff=lfs merge=lfs -text
32
- *.xz filter=lfs diff=lfs merge=lfs -text
33
- *.zip filter=lfs diff=lfs merge=lfs -text
34
- *.zst filter=lfs diff=lfs merge=lfs -text
35
- *tfevents* filter=lfs diff=lfs merge=lfs -text
 
1
+ cache/** filter=lfs diff=lfs merge=lfs -text
2
+ weights/** filter=lfs diff=lfs merge=lfs -text
3
+ stable_diffusion/** filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
app.py ADDED
@@ -0,0 +1,334 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import gc
4
+ import sys
5
+ from pathlib import Path
6
+
7
+ import numpy as np
8
+ import streamlit as st
9
+ import torch
10
+ from omegaconf import OmegaConf
11
+ from PIL import Image
12
+ from pytorch_lightning import seed_everything
13
+ from torch import autocast
14
+
15
+ ROOT = Path(__file__).resolve().parent
16
+ PARENT = ROOT.parent
17
+
18
+ if str(PARENT) not in sys.path:
19
+ sys.path.insert(0, str(PARENT))
20
+
21
+ STABLE_DIFFUSION_DIR = ROOT / "stable_diffusion"
22
+ if str(STABLE_DIFFUSION_DIR) not in sys.path:
23
+ sys.path.insert(0, str(STABLE_DIFFUSION_DIR))
24
+
25
+ from stable_diffusion.ldm.models.diffusion.ddim import DDIMSampler
26
+ from stable_diffusion.ldm.util import instantiate_from_config
27
+
28
+ WEIGHTS_DIR = ROOT / "weights"
29
+ CONFIG_PATH = ROOT / "generate_sd.yaml"
30
+
31
+ DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
32
+
33
+ theme_available = [
34
+ "Abstractionism", "Artist_Sketch", "Blossom_Season", "Bricks", "Byzantine", "Cartoon",
35
+ "Cold_Warm", "Color_Fantasy", "Comic_Etch", "Crayon", "Cubism", "Dadaism", "Dapple",
36
+ "Defoliation", "Early_Autumn", "Expressionism", "Fauvism", "French", "Glowing_Sunset",
37
+ "Gorgeous_Love", "Greenfield", "Impressionism", "Ink_Art", "Joy", "Liquid_Dreams",
38
+ "Magic_Cube", "Meta_Physics", "Meteor_Shower", "Monet", "Mosaic", "Neon_Lines", "On_Fire",
39
+ "Pastel", "Pencil_Drawing", "Picasso", "Pop_Art", "Red_Blue_Ink", "Rust", "Seed_Images",
40
+ "Sketch", "Sponge_Dabbed", "Structuralism", "Superstring", "Surrealism", "Ukiyoe",
41
+ "Van_Gogh", "Vibrant_Flow", "Warm_Love", "Warm_Smear", "Watercolor", "Winter",
42
+ ]
43
+
44
+ class_available = [
45
+ "Architectures", "Bears", "Birds", "Butterfly", "Cats", "Dogs", "Fishes", "Flame", "Flowers",
46
+ "Frogs", "Horses", "Human", "Jellyfish", "Rabbits", "Sandwiches", "Sea", "Statues", "Towers",
47
+ "Trees", "Waterfalls",
48
+ ]
49
+
50
+ if not WEIGHTS_DIR.exists():
51
+ raise FileNotFoundError(f"Weights directory not found: {WEIGHTS_DIR}")
52
+
53
+ MODEL_CONFIGS = {}
54
+
55
+ original_display_name = None
56
+ theme_model_for = {}
57
+ class_model_for = {}
58
+ other_models = set()
59
+
60
+ for pattern in ("*.pth", "*.ckpt"):
61
+ for ckpt in WEIGHTS_DIR.glob(pattern):
62
+ stem = ckpt.stem
63
+
64
+ if stem.lower() == "original":
65
+ display_name = "Original (no unlearning)"
66
+ category = "original"
67
+ original_display_name = display_name
68
+
69
+ elif stem in theme_available:
70
+ display_name = f"Style Unlearned: {stem}"
71
+ category = "theme"
72
+ theme_model_for[stem] = display_name
73
+
74
+ elif stem in class_available:
75
+ display_name = f"Object Unlearned: {stem}"
76
+ category = "class"
77
+ class_model_for[stem] = display_name
78
+
79
+ else:
80
+ display_name = stem
81
+ category = "other"
82
+ other_models.add(display_name)
83
+
84
+ MODEL_CONFIGS[display_name] = {
85
+ "ckpt": str(ckpt),
86
+ "config": str(CONFIG_PATH),
87
+ "category": category,
88
+ "raw_name": stem,
89
+ }
90
+
91
+ if not MODEL_CONFIGS:
92
+ raise RuntimeError(f"No .pth or .ckpt files found in {WEIGHTS_DIR}")
93
+
94
+ def load_model_from_config(config, ckpt_path: str, verbose: bool = False):
95
+ """
96
+ Load model from checkpoint + config, move to DEVICE, eval mode.
97
+ """
98
+ print(f"Loading model from {ckpt_path}")
99
+ pl_sd = torch.load(ckpt_path, map_location="cpu")
100
+ if "global_step" in pl_sd:
101
+ print(f"Global Step: {pl_sd['global_step']}")
102
+ sd = pl_sd["state_dict"]
103
+
104
+ model = instantiate_from_config(config.model)
105
+ missing, unexpected = model.load_state_dict(sd, strict=False)
106
+ if verbose:
107
+ if len(missing) > 0:
108
+ print("missing keys:")
109
+ print(missing)
110
+ if len(unexpected) > 0:
111
+ print("unexpected keys:")
112
+ print(unexpected)
113
+
114
+ model.to(DEVICE)
115
+ model.eval()
116
+ return model
117
+
118
+ def generate_image_single(
119
+ model_name: str,
120
+ prompt: str,
121
+ steps: int,
122
+ cfg_text: float,
123
+ seed: int,
124
+ H: int,
125
+ W: int,
126
+ ddim_eta: float,
127
+ ):
128
+ """
129
+ Load selected checkpoint, generate one image for given prompt,
130
+ then free all model memory (CPU + GPU).
131
+ """
132
+ model_cfg = MODEL_CONFIGS[model_name]
133
+ ckpt_path = model_cfg["ckpt"]
134
+ config_path = model_cfg["config"]
135
+
136
+ # Load config and model
137
+ config = OmegaConf.load(config_path)
138
+ model = load_model_from_config(config, ckpt_path)
139
+ sampler = DDIMSampler(model)
140
+
141
+ seed_everything(seed)
142
+
143
+ print(f"Prompt: {prompt}")
144
+
145
+ # Choose autocast context only for CUDA
146
+ if DEVICE == "cuda":
147
+ autocast_ctx = autocast("cuda")
148
+ else:
149
+ from contextlib import nullcontext
150
+ autocast_ctx = nullcontext()
151
+
152
+ with torch.no_grad():
153
+ with autocast_ctx:
154
+ try:
155
+ ema_ctx = model.ema_scope()
156
+ except AttributeError:
157
+ from contextlib import nullcontext
158
+ ema_ctx = nullcontext()
159
+
160
+ with ema_ctx:
161
+ uc = model.get_learned_conditioning([""])
162
+ c = model.get_learned_conditioning(prompt)
163
+ shape = [4, H // 8, W // 8] # downsampling factor 8
164
+
165
+ samples_ddim, _ = sampler.sample(
166
+ S=steps,
167
+ conditioning=c,
168
+ batch_size=1,
169
+ shape=shape,
170
+ verbose=False,
171
+ unconditional_guidance_scale=cfg_text,
172
+ unconditional_conditioning=uc,
173
+ eta=ddim_eta,
174
+ x_T=None,
175
+ )
176
+
177
+ x_samples_ddim = model.decode_first_stage(samples_ddim)
178
+ x_samples_ddim = torch.clamp(
179
+ (x_samples_ddim + 1.0) / 2.0, min=0.0, max=1.0
180
+ )
181
+ x_samples_ddim = x_samples_ddim.cpu().permute(0, 2, 3, 1)
182
+ assert len(x_samples_ddim) == 1
183
+ x_sample = x_samples_ddim[0].numpy()
184
+
185
+ # Convert to PIL
186
+ x_sample = (255.0 * x_sample).round().astype(np.uint8)
187
+ img = Image.fromarray(x_sample)
188
+
189
+ # free GPU + CPU memory
190
+ del sampler
191
+ del model
192
+ if DEVICE == "cuda":
193
+ torch.cuda.empty_cache()
194
+ gc.collect()
195
+
196
+ return img, prompt
197
+
198
+ # Streamlit UI
199
+
200
+ st.set_page_config(page_title="Unlearning Styles Demo", layout="wide")
201
+
202
+ st.title("Machine Unlearning Demo - Styles and Objects")
203
+
204
+ st.sidebar.header("Model selection")
205
+
206
+ model_family_options = []
207
+ if original_display_name is not None:
208
+ model_family_options.append("Original")
209
+ if theme_model_for:
210
+ model_family_options.append("Style Unlearned")
211
+ if class_model_for:
212
+ model_family_options.append("Object Unlearned")
213
+ if other_models:
214
+ model_family_options.append("Other")
215
+
216
+ model_family = st.sidebar.radio(
217
+ "Which model family?",
218
+ model_family_options,
219
+ label_visibility='hidden',
220
+ )
221
+
222
+ selected_model_display_name = None
223
+
224
+ if model_family == "Original":
225
+ st.sidebar.markdown(f"**Using Model:** \n {original_display_name}")
226
+ selected_model_display_name = original_display_name
227
+
228
+ elif model_family == "Style Unlearned":
229
+ available_theme_keys = sorted(theme_model_for.keys())
230
+ chosen_theme_model = st.sidebar.selectbox(
231
+ "Unlearned style model",
232
+ available_theme_keys,
233
+ )
234
+ selected_model_display_name = theme_model_for[chosen_theme_model]
235
+ st.sidebar.markdown(f"**Using Model:** \n {selected_model_display_name}")
236
+
237
+ elif model_family == "Object Unlearned":
238
+ available_class_keys = sorted(class_model_for.keys())
239
+ chosen_class_model = st.sidebar.selectbox(
240
+ "Unlearned object model",
241
+ available_class_keys,
242
+ )
243
+ selected_model_display_name = class_model_for[chosen_class_model]
244
+ st.sidebar.markdown(f"**Using Model:** \n {selected_model_display_name}")
245
+
246
+ elif model_family == "Other":
247
+ other_list = sorted(other_models)
248
+ selected_model_display_name = st.sidebar.selectbox(
249
+ "Other models",
250
+ other_list,
251
+ )
252
+
253
+ st.sidebar.header("Generation settings")
254
+ seed = st.sidebar.number_input("Random seed", value=256, step=1)
255
+ steps = 100
256
+ cfg_text = 9.0
257
+ H = 512
258
+ W = 512
259
+ ddim_eta = 0.0
260
+
261
+
262
+ prompt_mode = st.radio(
263
+ "Prompt mode",
264
+ ["Preset Style/Object", "Free Text Prompt"],
265
+ horizontal=True,
266
+ )
267
+
268
+ if prompt_mode == "Preset Style/Object":
269
+ st.subheader("Style")
270
+ theme = st.pills("Choose style", theme_available)
271
+
272
+ st.subheader("Object")
273
+ object_class = st.pills("Choose object", class_available)
274
+
275
+ prompt = None
276
+ if theme and object_class:
277
+ prompt = f"A {object_class} image in {theme.replace('_', ' ')} style."
278
+ else:
279
+ st.subheader("Free Text Prompt")
280
+ prompt = st.text_area(
281
+ "Enter your prompt",
282
+ placeholder="e.g., A beautiful sunset over mountains, digital art",
283
+ height=100,
284
+ )
285
+ theme = None
286
+ object_class = None
287
+
288
+ st.markdown("---")
289
+
290
+ if st.button("Generate"):
291
+ if selected_model_display_name is None:
292
+ st.error("Please select a model in the sidebar.")
293
+ elif prompt_mode == "Preset Style/Object":
294
+ if theme is None:
295
+ st.error("Please select a style.")
296
+ elif object_class is None:
297
+ st.error("Please select an object.")
298
+ else:
299
+ with st.spinner("Generating image..."):
300
+ img, used_prompt = generate_image_single(
301
+ model_name=selected_model_display_name,
302
+ prompt=prompt,
303
+ steps=int(steps),
304
+ cfg_text=float(cfg_text),
305
+ seed=int(seed),
306
+ H=int(H),
307
+ W=int(W),
308
+ ddim_eta=float(ddim_eta),
309
+ )
310
+
311
+ st.image(
312
+ img,
313
+ caption=f"Model: {selected_model_display_name} | Prompt: {used_prompt}",
314
+ )
315
+ else: # Free Text Prompt mode
316
+ if not prompt or not prompt.strip():
317
+ st.error("Please enter a prompt.")
318
+ else:
319
+ with st.spinner("Generating image..."):
320
+ img, used_prompt = generate_image_single(
321
+ model_name=selected_model_display_name,
322
+ prompt=prompt.strip(),
323
+ steps=int(steps),
324
+ cfg_text=float(cfg_text),
325
+ seed=int(seed),
326
+ H=int(H),
327
+ W=int(W),
328
+ ddim_eta=float(ddim_eta),
329
+ )
330
+
331
+ st.image(
332
+ img,
333
+ caption=f"Model: {selected_model_display_name} | Prompt: {used_prompt}",
334
+ )
cache/.locks/models--openai--clip-vit-large-patch14/2c19f6666e0e163c7954df66cb901353fcad088e.lock ADDED
File without changes
cache/.locks/models--openai--clip-vit-large-patch14/4297ea6a8d2bae1fea8f48b45e257814dcb11f69.lock ADDED
File without changes
cache/.locks/models--openai--clip-vit-large-patch14/580c79c6862f31d1f9bd08dd1a415ba0d0502cd9.lock ADDED
File without changes
cache/.locks/models--openai--clip-vit-large-patch14/702bb12920b291cade3706cf215c1604d2255d93.lock ADDED
File without changes
cache/.locks/models--openai--clip-vit-large-patch14/76e821f1b6f0a9709293c3b6b51ed90980b3166b.lock ADDED
File without changes
cache/.locks/models--openai--clip-vit-large-patch14/9bfb42aa97dcd61e89f279ccaee988bccb4fabae.lock ADDED
File without changes
cache/.locks/models--openai--clip-vit-large-patch14/a2bf730a0c7debf160f7a6b50b3aaf3703e7e88ac73de7a314903141db026dcb.lock ADDED
File without changes
cache/models--openai--clip-vit-large-patch14/.no_exist/32bd64288804d66eefd0ccbe215aa642df71cc41/added_tokens.json ADDED
File without changes
cache/models--openai--clip-vit-large-patch14/blobs/2c19f6666e0e163c7954df66cb901353fcad088e ADDED
@@ -0,0 +1,171 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "clip-vit-large-patch14/",
3
+ "architectures": [
4
+ "CLIPModel"
5
+ ],
6
+ "initializer_factor": 1.0,
7
+ "logit_scale_init_value": 2.6592,
8
+ "model_type": "clip",
9
+ "projection_dim": 768,
10
+ "text_config": {
11
+ "_name_or_path": "",
12
+ "add_cross_attention": false,
13
+ "architectures": null,
14
+ "attention_dropout": 0.0,
15
+ "bad_words_ids": null,
16
+ "bos_token_id": 0,
17
+ "chunk_size_feed_forward": 0,
18
+ "cross_attention_hidden_size": null,
19
+ "decoder_start_token_id": null,
20
+ "diversity_penalty": 0.0,
21
+ "do_sample": false,
22
+ "dropout": 0.0,
23
+ "early_stopping": false,
24
+ "encoder_no_repeat_ngram_size": 0,
25
+ "eos_token_id": 2,
26
+ "finetuning_task": null,
27
+ "forced_bos_token_id": null,
28
+ "forced_eos_token_id": null,
29
+ "hidden_act": "quick_gelu",
30
+ "hidden_size": 768,
31
+ "id2label": {
32
+ "0": "LABEL_0",
33
+ "1": "LABEL_1"
34
+ },
35
+ "initializer_factor": 1.0,
36
+ "initializer_range": 0.02,
37
+ "intermediate_size": 3072,
38
+ "is_decoder": false,
39
+ "is_encoder_decoder": false,
40
+ "label2id": {
41
+ "LABEL_0": 0,
42
+ "LABEL_1": 1
43
+ },
44
+ "layer_norm_eps": 1e-05,
45
+ "length_penalty": 1.0,
46
+ "max_length": 20,
47
+ "max_position_embeddings": 77,
48
+ "min_length": 0,
49
+ "model_type": "clip_text_model",
50
+ "no_repeat_ngram_size": 0,
51
+ "num_attention_heads": 12,
52
+ "num_beam_groups": 1,
53
+ "num_beams": 1,
54
+ "num_hidden_layers": 12,
55
+ "num_return_sequences": 1,
56
+ "output_attentions": false,
57
+ "output_hidden_states": false,
58
+ "output_scores": false,
59
+ "pad_token_id": 1,
60
+ "prefix": null,
61
+ "problem_type": null,
62
+ "projection_dim" : 768,
63
+ "pruned_heads": {},
64
+ "remove_invalid_values": false,
65
+ "repetition_penalty": 1.0,
66
+ "return_dict": true,
67
+ "return_dict_in_generate": false,
68
+ "sep_token_id": null,
69
+ "task_specific_params": null,
70
+ "temperature": 1.0,
71
+ "tie_encoder_decoder": false,
72
+ "tie_word_embeddings": true,
73
+ "tokenizer_class": null,
74
+ "top_k": 50,
75
+ "top_p": 1.0,
76
+ "torch_dtype": null,
77
+ "torchscript": false,
78
+ "transformers_version": "4.16.0.dev0",
79
+ "use_bfloat16": false,
80
+ "vocab_size": 49408
81
+ },
82
+ "text_config_dict": {
83
+ "hidden_size": 768,
84
+ "intermediate_size": 3072,
85
+ "num_attention_heads": 12,
86
+ "num_hidden_layers": 12,
87
+ "projection_dim": 768
88
+ },
89
+ "torch_dtype": "float32",
90
+ "transformers_version": null,
91
+ "vision_config": {
92
+ "_name_or_path": "",
93
+ "add_cross_attention": false,
94
+ "architectures": null,
95
+ "attention_dropout": 0.0,
96
+ "bad_words_ids": null,
97
+ "bos_token_id": null,
98
+ "chunk_size_feed_forward": 0,
99
+ "cross_attention_hidden_size": null,
100
+ "decoder_start_token_id": null,
101
+ "diversity_penalty": 0.0,
102
+ "do_sample": false,
103
+ "dropout": 0.0,
104
+ "early_stopping": false,
105
+ "encoder_no_repeat_ngram_size": 0,
106
+ "eos_token_id": null,
107
+ "finetuning_task": null,
108
+ "forced_bos_token_id": null,
109
+ "forced_eos_token_id": null,
110
+ "hidden_act": "quick_gelu",
111
+ "hidden_size": 1024,
112
+ "id2label": {
113
+ "0": "LABEL_0",
114
+ "1": "LABEL_1"
115
+ },
116
+ "image_size": 224,
117
+ "initializer_factor": 1.0,
118
+ "initializer_range": 0.02,
119
+ "intermediate_size": 4096,
120
+ "is_decoder": false,
121
+ "is_encoder_decoder": false,
122
+ "label2id": {
123
+ "LABEL_0": 0,
124
+ "LABEL_1": 1
125
+ },
126
+ "layer_norm_eps": 1e-05,
127
+ "length_penalty": 1.0,
128
+ "max_length": 20,
129
+ "min_length": 0,
130
+ "model_type": "clip_vision_model",
131
+ "no_repeat_ngram_size": 0,
132
+ "num_attention_heads": 16,
133
+ "num_beam_groups": 1,
134
+ "num_beams": 1,
135
+ "num_hidden_layers": 24,
136
+ "num_return_sequences": 1,
137
+ "output_attentions": false,
138
+ "output_hidden_states": false,
139
+ "output_scores": false,
140
+ "pad_token_id": null,
141
+ "patch_size": 14,
142
+ "prefix": null,
143
+ "problem_type": null,
144
+ "projection_dim" : 768,
145
+ "pruned_heads": {},
146
+ "remove_invalid_values": false,
147
+ "repetition_penalty": 1.0,
148
+ "return_dict": true,
149
+ "return_dict_in_generate": false,
150
+ "sep_token_id": null,
151
+ "task_specific_params": null,
152
+ "temperature": 1.0,
153
+ "tie_encoder_decoder": false,
154
+ "tie_word_embeddings": true,
155
+ "tokenizer_class": null,
156
+ "top_k": 50,
157
+ "top_p": 1.0,
158
+ "torch_dtype": null,
159
+ "torchscript": false,
160
+ "transformers_version": "4.16.0.dev0",
161
+ "use_bfloat16": false
162
+ },
163
+ "vision_config_dict": {
164
+ "hidden_size": 1024,
165
+ "intermediate_size": 4096,
166
+ "num_attention_heads": 16,
167
+ "num_hidden_layers": 24,
168
+ "patch_size": 14,
169
+ "projection_dim": 768
170
+ }
171
+ }
cache/models--openai--clip-vit-large-patch14/blobs/4297ea6a8d2bae1fea8f48b45e257814dcb11f69 ADDED
The diff for this file is too large to render. See raw diff
 
cache/models--openai--clip-vit-large-patch14/blobs/580c79c6862f31d1f9bd08dd1a415ba0d0502cd9 ADDED
The diff for this file is too large to render. See raw diff
 
cache/models--openai--clip-vit-large-patch14/blobs/702bb12920b291cade3706cf215c1604d2255d93 ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "unk_token": {
3
+ "content": "<|endoftext|>",
4
+ "single_word": false,
5
+ "lstrip": false,
6
+ "rstrip": false,
7
+ "normalized": true,
8
+ "__type": "AddedToken"
9
+ },
10
+ "bos_token": {
11
+ "content": "<|startoftext|>",
12
+ "single_word": false,
13
+ "lstrip": false,
14
+ "rstrip": false,
15
+ "normalized": true,
16
+ "__type": "AddedToken"
17
+ },
18
+ "eos_token": {
19
+ "content": "<|endoftext|>",
20
+ "single_word": false,
21
+ "lstrip": false,
22
+ "rstrip": false,
23
+ "normalized": true,
24
+ "__type": "AddedToken"
25
+ },
26
+ "pad_token": "<|endoftext|>",
27
+ "add_prefix_space": false,
28
+ "errors": "replace",
29
+ "do_lower_case": true,
30
+ "name_or_path": "openai/clip-vit-base-patch32",
31
+ "model_max_length": 77,
32
+ "special_tokens_map_file": "./special_tokens_map.json",
33
+ "tokenizer_class": "CLIPTokenizer"
34
+ }
cache/models--openai--clip-vit-large-patch14/blobs/76e821f1b6f0a9709293c3b6b51ed90980b3166b ADDED
The diff for this file is too large to render. See raw diff
 
cache/models--openai--clip-vit-large-patch14/blobs/9bfb42aa97dcd61e89f279ccaee988bccb4fabae ADDED
@@ -0,0 +1 @@
 
 
1
+ {"bos_token": {"content": "<|startoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "eos_token": {"content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "unk_token": {"content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "pad_token": "<|endoftext|>"}
cache/models--openai--clip-vit-large-patch14/refs/main ADDED
@@ -0,0 +1 @@
 
 
1
+ 32bd64288804d66eefd0ccbe215aa642df71cc41
cache/models--openai--clip-vit-large-patch14/snapshots/32bd64288804d66eefd0ccbe215aa642df71cc41/config.json ADDED
@@ -0,0 +1,171 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "clip-vit-large-patch14/",
3
+ "architectures": [
4
+ "CLIPModel"
5
+ ],
6
+ "initializer_factor": 1.0,
7
+ "logit_scale_init_value": 2.6592,
8
+ "model_type": "clip",
9
+ "projection_dim": 768,
10
+ "text_config": {
11
+ "_name_or_path": "",
12
+ "add_cross_attention": false,
13
+ "architectures": null,
14
+ "attention_dropout": 0.0,
15
+ "bad_words_ids": null,
16
+ "bos_token_id": 0,
17
+ "chunk_size_feed_forward": 0,
18
+ "cross_attention_hidden_size": null,
19
+ "decoder_start_token_id": null,
20
+ "diversity_penalty": 0.0,
21
+ "do_sample": false,
22
+ "dropout": 0.0,
23
+ "early_stopping": false,
24
+ "encoder_no_repeat_ngram_size": 0,
25
+ "eos_token_id": 2,
26
+ "finetuning_task": null,
27
+ "forced_bos_token_id": null,
28
+ "forced_eos_token_id": null,
29
+ "hidden_act": "quick_gelu",
30
+ "hidden_size": 768,
31
+ "id2label": {
32
+ "0": "LABEL_0",
33
+ "1": "LABEL_1"
34
+ },
35
+ "initializer_factor": 1.0,
36
+ "initializer_range": 0.02,
37
+ "intermediate_size": 3072,
38
+ "is_decoder": false,
39
+ "is_encoder_decoder": false,
40
+ "label2id": {
41
+ "LABEL_0": 0,
42
+ "LABEL_1": 1
43
+ },
44
+ "layer_norm_eps": 1e-05,
45
+ "length_penalty": 1.0,
46
+ "max_length": 20,
47
+ "max_position_embeddings": 77,
48
+ "min_length": 0,
49
+ "model_type": "clip_text_model",
50
+ "no_repeat_ngram_size": 0,
51
+ "num_attention_heads": 12,
52
+ "num_beam_groups": 1,
53
+ "num_beams": 1,
54
+ "num_hidden_layers": 12,
55
+ "num_return_sequences": 1,
56
+ "output_attentions": false,
57
+ "output_hidden_states": false,
58
+ "output_scores": false,
59
+ "pad_token_id": 1,
60
+ "prefix": null,
61
+ "problem_type": null,
62
+ "projection_dim" : 768,
63
+ "pruned_heads": {},
64
+ "remove_invalid_values": false,
65
+ "repetition_penalty": 1.0,
66
+ "return_dict": true,
67
+ "return_dict_in_generate": false,
68
+ "sep_token_id": null,
69
+ "task_specific_params": null,
70
+ "temperature": 1.0,
71
+ "tie_encoder_decoder": false,
72
+ "tie_word_embeddings": true,
73
+ "tokenizer_class": null,
74
+ "top_k": 50,
75
+ "top_p": 1.0,
76
+ "torch_dtype": null,
77
+ "torchscript": false,
78
+ "transformers_version": "4.16.0.dev0",
79
+ "use_bfloat16": false,
80
+ "vocab_size": 49408
81
+ },
82
+ "text_config_dict": {
83
+ "hidden_size": 768,
84
+ "intermediate_size": 3072,
85
+ "num_attention_heads": 12,
86
+ "num_hidden_layers": 12,
87
+ "projection_dim": 768
88
+ },
89
+ "torch_dtype": "float32",
90
+ "transformers_version": null,
91
+ "vision_config": {
92
+ "_name_or_path": "",
93
+ "add_cross_attention": false,
94
+ "architectures": null,
95
+ "attention_dropout": 0.0,
96
+ "bad_words_ids": null,
97
+ "bos_token_id": null,
98
+ "chunk_size_feed_forward": 0,
99
+ "cross_attention_hidden_size": null,
100
+ "decoder_start_token_id": null,
101
+ "diversity_penalty": 0.0,
102
+ "do_sample": false,
103
+ "dropout": 0.0,
104
+ "early_stopping": false,
105
+ "encoder_no_repeat_ngram_size": 0,
106
+ "eos_token_id": null,
107
+ "finetuning_task": null,
108
+ "forced_bos_token_id": null,
109
+ "forced_eos_token_id": null,
110
+ "hidden_act": "quick_gelu",
111
+ "hidden_size": 1024,
112
+ "id2label": {
113
+ "0": "LABEL_0",
114
+ "1": "LABEL_1"
115
+ },
116
+ "image_size": 224,
117
+ "initializer_factor": 1.0,
118
+ "initializer_range": 0.02,
119
+ "intermediate_size": 4096,
120
+ "is_decoder": false,
121
+ "is_encoder_decoder": false,
122
+ "label2id": {
123
+ "LABEL_0": 0,
124
+ "LABEL_1": 1
125
+ },
126
+ "layer_norm_eps": 1e-05,
127
+ "length_penalty": 1.0,
128
+ "max_length": 20,
129
+ "min_length": 0,
130
+ "model_type": "clip_vision_model",
131
+ "no_repeat_ngram_size": 0,
132
+ "num_attention_heads": 16,
133
+ "num_beam_groups": 1,
134
+ "num_beams": 1,
135
+ "num_hidden_layers": 24,
136
+ "num_return_sequences": 1,
137
+ "output_attentions": false,
138
+ "output_hidden_states": false,
139
+ "output_scores": false,
140
+ "pad_token_id": null,
141
+ "patch_size": 14,
142
+ "prefix": null,
143
+ "problem_type": null,
144
+ "projection_dim" : 768,
145
+ "pruned_heads": {},
146
+ "remove_invalid_values": false,
147
+ "repetition_penalty": 1.0,
148
+ "return_dict": true,
149
+ "return_dict_in_generate": false,
150
+ "sep_token_id": null,
151
+ "task_specific_params": null,
152
+ "temperature": 1.0,
153
+ "tie_encoder_decoder": false,
154
+ "tie_word_embeddings": true,
155
+ "tokenizer_class": null,
156
+ "top_k": 50,
157
+ "top_p": 1.0,
158
+ "torch_dtype": null,
159
+ "torchscript": false,
160
+ "transformers_version": "4.16.0.dev0",
161
+ "use_bfloat16": false
162
+ },
163
+ "vision_config_dict": {
164
+ "hidden_size": 1024,
165
+ "intermediate_size": 4096,
166
+ "num_attention_heads": 16,
167
+ "num_hidden_layers": 24,
168
+ "patch_size": 14,
169
+ "projection_dim": 768
170
+ }
171
+ }
cache/models--openai--clip-vit-large-patch14/snapshots/32bd64288804d66eefd0ccbe215aa642df71cc41/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
cache/models--openai--clip-vit-large-patch14/snapshots/32bd64288804d66eefd0ccbe215aa642df71cc41/special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"bos_token": {"content": "<|startoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "eos_token": {"content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "unk_token": {"content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "pad_token": "<|endoftext|>"}
cache/models--openai--clip-vit-large-patch14/snapshots/32bd64288804d66eefd0ccbe215aa642df71cc41/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
cache/models--openai--clip-vit-large-patch14/snapshots/32bd64288804d66eefd0ccbe215aa642df71cc41/tokenizer_config.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "unk_token": {
3
+ "content": "<|endoftext|>",
4
+ "single_word": false,
5
+ "lstrip": false,
6
+ "rstrip": false,
7
+ "normalized": true,
8
+ "__type": "AddedToken"
9
+ },
10
+ "bos_token": {
11
+ "content": "<|startoftext|>",
12
+ "single_word": false,
13
+ "lstrip": false,
14
+ "rstrip": false,
15
+ "normalized": true,
16
+ "__type": "AddedToken"
17
+ },
18
+ "eos_token": {
19
+ "content": "<|endoftext|>",
20
+ "single_word": false,
21
+ "lstrip": false,
22
+ "rstrip": false,
23
+ "normalized": true,
24
+ "__type": "AddedToken"
25
+ },
26
+ "pad_token": "<|endoftext|>",
27
+ "add_prefix_space": false,
28
+ "errors": "replace",
29
+ "do_lower_case": true,
30
+ "name_or_path": "openai/clip-vit-base-patch32",
31
+ "model_max_length": 77,
32
+ "special_tokens_map_file": "./special_tokens_map.json",
33
+ "tokenizer_class": "CLIPTokenizer"
34
+ }
cache/models--openai--clip-vit-large-patch14/snapshots/32bd64288804d66eefd0ccbe215aa642df71cc41/vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
generate_sd.yaml ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model:
2
+ base_learning_rate: 1.0e-04
3
+ target: ldm.models.diffusion.ddpm.LatentDiffusion
4
+ params:
5
+ linear_start: 0.00085
6
+ linear_end: 0.0120
7
+ num_timesteps_cond: 1
8
+ log_every_t: 200
9
+ timesteps: 1000
10
+ first_stage_key: "jpg"
11
+ cond_stage_key: "txt"
12
+ image_size: 64
13
+ channels: 4
14
+ cond_stage_trainable: false # Note: different from the one we trained before
15
+ conditioning_key: crossattn
16
+ monitor: val/loss_simple_ema
17
+ scale_factor: 0.18215
18
+ use_ema: False
19
+
20
+ scheduler_config: # 10000 warmup steps
21
+ target: ldm.lr_scheduler.LambdaLinearScheduler
22
+ params:
23
+ warm_up_steps: [ 10000 ]
24
+ cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases
25
+ f_start: [ 1.e-6 ]
26
+ f_max: [ 1. ]
27
+ f_min: [ 1. ]
28
+
29
+ unet_config:
30
+ target: ldm.modules.diffusionmodules.openaimodel.UNetModel
31
+ params:
32
+ image_size: 32 # unused
33
+ in_channels: 4
34
+ out_channels: 4
35
+ model_channels: 320
36
+ attention_resolutions: [ 4, 2, 1 ]
37
+ num_res_blocks: 2
38
+ channel_mult: [ 1, 2, 4, 4 ]
39
+ num_heads: 8
40
+ use_spatial_transformer: True
41
+ transformer_depth: 1
42
+ context_dim: 768
43
+ use_checkpoint: True
44
+ legacy: False
45
+
46
+ first_stage_config:
47
+ target: ldm.models.autoencoder.AutoencoderKL
48
+ params:
49
+ embed_dim: 4
50
+ monitor: val/rec_loss
51
+ ddconfig:
52
+ double_z: true
53
+ z_channels: 4
54
+ resolution: 256
55
+ in_channels: 3
56
+ out_ch: 3
57
+ ch: 128
58
+ ch_mult:
59
+ - 1
60
+ - 2
61
+ - 4
62
+ - 4
63
+ num_res_blocks: 2
64
+ attn_resolutions: []
65
+ dropout: 0.0
66
+ lossconfig:
67
+ target: torch.nn.Identity
68
+
69
+ cond_stage_config:
70
+ target: ldm.modules.encoders.modules.FrozenCLIPEmbedder
stable_diffusion/assets/stable-samples/img2img/upscaling-in.png.REMOVED.git-id ADDED
@@ -0,0 +1 @@
 
 
1
+ 501c31c21751664957e69ce52cad1818b6d2f4ce
stable_diffusion/ldm/__pycache__/util.cpython-38.pyc ADDED
Binary file (6.17 kB). View file
 
stable_diffusion/ldm/modules/__pycache__/attention.cpython-38.pyc ADDED
Binary file (9.54 kB). View file
 
stable_diffusion/ldm/modules/__pycache__/ema.cpython-38.pyc ADDED
Binary file (3.72 kB). View file
 
stable_diffusion/ldm/modules/__pycache__/x_transformer.cpython-38.pyc ADDED
Binary file (18.3 kB). View file
 
stable_diffusion/ldm/modules/attention.py ADDED
@@ -0,0 +1,328 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # File modified by authors of InstructPix2Pix from original (https://github.com/CompVis/stable-diffusion).
2
+ # See more details in LICENSE.
3
+
4
+ from inspect import isfunction
5
+ import math
6
+ import torch
7
+ import torch.nn.functional as F
8
+ from torch import nn, einsum
9
+ from einops import rearrange, repeat
10
+ import sys
11
+ sys.path.append('.')
12
+
13
+ from stable_diffusion.ldm.modules.diffusionmodules.util import checkpoint
14
+
15
+
16
+ def exists(val):
17
+ return val is not None
18
+
19
+
20
+ def uniq(arr):
21
+ return{el: True for el in arr}.keys()
22
+
23
+
24
+ def default(val, d): # val ? val : d
25
+ if exists(val):
26
+ return val
27
+ return d() if isfunction(d) else d
28
+
29
+
30
+ def max_neg_value(t):
31
+ return -torch.finfo(t.dtype).max
32
+
33
+
34
+ def init_(tensor):
35
+ dim = tensor.shape[-1]
36
+ std = 1 / math.sqrt(dim)
37
+ tensor.uniform_(-std, std)
38
+ return tensor
39
+
40
+
41
+ # feedforward
42
+ class GEGLU(nn.Module):
43
+ """
44
+ # The input x is first passed through the linear layer self.proj.
45
+ The output of the linear layer is then divided into two equal chunks
46
+ along the last dimension (dim=-1), which serve as the input x and a
47
+ gate. The gating mechanism is applied using the GELU activation
48
+ function on the gate and then multiplied element-wise with the x.
49
+ """
50
+ def __init__(self, dim_in, dim_out):
51
+ super().__init__()
52
+ self.proj = nn.Linear(dim_in, dim_out * 2)
53
+
54
+ def forward(self, x):
55
+ x, gate = self.proj(x).chunk(2, dim=-1)
56
+ return x * F.gelu(gate)
57
+
58
+
59
+ class FeedForward(nn.Module):
60
+ def __init__(self, dim, dim_out=None, mult=4, glu=False, dropout=0.):
61
+ super().__init__()
62
+ inner_dim = int(dim * mult)
63
+ dim_out = default(dim_out, dim)
64
+ project_in = nn.Sequential(
65
+ nn.Linear(dim, inner_dim),
66
+ nn.GELU()
67
+ ) if not glu else GEGLU(dim, inner_dim)
68
+
69
+ self.net = nn.Sequential(
70
+ project_in,
71
+ nn.Dropout(dropout),
72
+ nn.Linear(inner_dim, dim_out)
73
+ )
74
+
75
+ def forward(self, x):
76
+ return self.net(x)
77
+
78
+
79
+ def zero_module(module):
80
+ """
81
+ Zero out the parameters of a module and return it.
82
+ """
83
+ for p in module.parameters():
84
+ p.detach().zero_()
85
+ return module
86
+
87
+
88
+ def Normalize(in_channels):
89
+ return torch.nn.GroupNorm(num_groups=32, num_channels=in_channels, eps=1e-6, affine=True)
90
+
91
+
92
+ class LinearAttention(nn.Module):
93
+ def __init__(self, dim, heads=4, dim_head=32):
94
+ super().__init__()
95
+ self.heads = heads
96
+ hidden_dim = dim_head * heads
97
+ self.to_qkv = nn.Conv2d(dim, hidden_dim * 3, 1, bias = False)
98
+ self.to_out = nn.Conv2d(hidden_dim, dim, 1)
99
+
100
+ def forward(self, x):
101
+ b, c, h, w = x.shape
102
+ qkv = self.to_qkv(x)
103
+ q, k, v = rearrange(qkv, 'b (qkv heads c) h w -> qkv b heads c (h w)', heads = self.heads, qkv=3)
104
+ k = k.softmax(dim=-1)
105
+ context = torch.einsum('bhdn,bhen->bhde', k, v)
106
+ out = torch.einsum('bhde,bhdn->bhen', context, q)
107
+ out = rearrange(out, 'b heads c (h w) -> b (heads c) h w', heads=self.heads, h=h, w=w)
108
+ return self.to_out(out)
109
+
110
+
111
+ class SpatialSelfAttention(nn.Module):
112
+ def __init__(self, in_channels):
113
+ super().__init__()
114
+ self.in_channels = in_channels
115
+
116
+ self.norm = Normalize(in_channels)
117
+ self.q = torch.nn.Conv2d(in_channels,
118
+ in_channels,
119
+ kernel_size=1,
120
+ stride=1,
121
+ padding=0)
122
+ self.k = torch.nn.Conv2d(in_channels,
123
+ in_channels,
124
+ kernel_size=1,
125
+ stride=1,
126
+ padding=0)
127
+ self.v = torch.nn.Conv2d(in_channels,
128
+ in_channels,
129
+ kernel_size=1,
130
+ stride=1,
131
+ padding=0)
132
+ self.proj_out = torch.nn.Conv2d(in_channels,
133
+ in_channels,
134
+ kernel_size=1,
135
+ stride=1,
136
+ padding=0)
137
+
138
+ def forward(self, x):
139
+ h_ = x
140
+ h_ = self.norm(h_)
141
+ q = self.q(h_)
142
+ k = self.k(h_)
143
+ v = self.v(h_)
144
+
145
+ # compute attention
146
+ b,c,h,w = q.shape
147
+ q = rearrange(q, 'b c h w -> b (h w) c')
148
+ k = rearrange(k, 'b c h w -> b c (h w)')
149
+ w_ = torch.einsum('bij,bjk->bik', q, k)
150
+
151
+ w_ = w_ * (int(c)**(-0.5))
152
+ w_ = torch.nn.functional.softmax(w_, dim=2)
153
+
154
+ # attend to values
155
+ v = rearrange(v, 'b c h w -> b c (h w)')
156
+ w_ = rearrange(w_, 'b i j -> b j i')
157
+ h_ = torch.einsum('bij,bjk->bik', v, w_)
158
+ h_ = rearrange(h_, 'b c (h w) -> b c h w', h=h)
159
+ h_ = self.proj_out(h_)
160
+
161
+ return x+h_
162
+
163
+
164
+ class CrossAttention(nn.Module):
165
+ def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0.):
166
+ super().__init__()
167
+ inner_dim = dim_head * heads # head total dim
168
+ # if context_dim is None, this is a self-attention,
169
+ # and context_dim should be exactly the same as query_dim (input dim)
170
+ context_dim = default(context_dim, query_dim) # context_dim ? context_dim : query_dim
171
+ self.scale = dim_head ** -0.5 # 1/\sqrt(d)
172
+ self.heads = heads
173
+
174
+ self.to_q = nn.Linear(query_dim, inner_dim, bias=False)
175
+ self.to_k = nn.Linear(context_dim, inner_dim, bias=False)
176
+ self.to_v = nn.Linear(context_dim, inner_dim, bias=False)
177
+
178
+ self.to_out = nn.Sequential(
179
+ nn.Linear(inner_dim, query_dim),
180
+ nn.Dropout(dropout)
181
+ )
182
+
183
+ self.prompt_to_prompt = False
184
+
185
+ def forward(self, x, context=None, mask=None):
186
+ is_self_attn = context is None
187
+ # print("CrossAttention", "input x shape", x.shape)
188
+ h = self.heads
189
+ # print("CrossAttention", "h shape", h)
190
+ q = self.to_q(x)
191
+ # print("CrossAttention", "q shape", q.shape)
192
+
193
+ # if context is None, then it is self-attention, otherwise cross-attention
194
+ context = default(context, x)
195
+ k = self.to_k(context)
196
+ # print("CrossAttention", "k shape", k.shape)
197
+ v = self.to_v(context)
198
+ # print("CrossAttention", "v shape", v.shape)
199
+
200
+ q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q, k, v))
201
+ # print("After mapping", "q shape", q.shape, "k shape", k.shape, "v shape", v.shape)
202
+
203
+ sim = einsum('b i d, b j d -> b i j', q, k) * self.scale
204
+ # print("CrossAttention", "sim shape", sim.shape)
205
+
206
+ """
207
+ When self.prompt_to_prompt is set to True and the layer is
208
+ performing self-attention, it duplicates the attention maps
209
+ for the first half of the batch (effectively ignoring the
210
+ second half of the batch). The code comment suggests this
211
+ might be used in a scenario where you have 4 elements in the
212
+ batch with a specific structure: {conditional, unconditional} x
213
+ {prompt 1, prompt 2}. For self-attention, the model is essentially
214
+ treating prompt 1 and prompt 2 as if they have the same attention map.
215
+ """
216
+ if is_self_attn and self.prompt_to_prompt:
217
+ # Unlike the original Prompt-to-Prompt which uses cross-attention layers,
218
+ # we copy attention maps for self-attention layers.
219
+ # There must be 4 elements in the batch: {conditional, unconditional} x {prompt 1, prompt 2}
220
+ assert x.size(0) == 4
221
+ sims = sim.chunk(4)
222
+ sim = torch.cat((sims[0], sims[0], sims[2], sims[2]))
223
+ """
224
+ In the context of attention mechanisms, a mask is often used to prevent
225
+ certain positions in the input from attending to other specific positions
226
+ in the input. This is usually done to enforce certain structural constraints,
227
+ like preventing future positions from being attended to in a sequence (to
228
+ ensure causality in autoregressive models), or masking out padding positions
229
+ in a sequence.
230
+ """
231
+ if exists(mask):
232
+ """
233
+ # mask is used to selectively ignore or "mask" certain parts of the input in
234
+ the attention calculation. This is done by setting the mask value to be False
235
+ at positions we want to ignore. Then, these positions get filled with a very
236
+ negative value (effectively negative infinity when used in a softmax function),
237
+ ensuring that they contribute almost nothing in the subsequent softmax operation
238
+ that calculates attention weights.
239
+ """
240
+ mask = rearrange(mask, 'b ... -> b (...)')
241
+ max_neg_value = -torch.finfo(sim.dtype).max
242
+ mask = repeat(mask, 'b j -> (b h) () j', h=h)
243
+ sim.masked_fill_(~mask, max_neg_value)
244
+
245
+ attn = sim.softmax(dim=-1)
246
+ # print("CrossAttention", "attn shape", attn.shape)
247
+
248
+ out = einsum('b i j, b j d -> b i d', attn, v)
249
+ # print("CrossAttention", "out shape", out.shape)
250
+ out = rearrange(out, '(b h) n d -> b n (h d)', h=h)
251
+ # print("CrossAttention", "out shape", out.shape)
252
+ out = self.to_out(out)
253
+ # print("CrossAttention", "after out out shape", out.shape)
254
+ return out
255
+
256
+
257
+ class BasicTransformerBlock(nn.Module):
258
+ def __init__(self, dim, n_heads, d_head, dropout=0., context_dim=None, gated_ff=True, checkpoint=True,
259
+ disable_self_attn=False):
260
+ super().__init__()
261
+ self.disable_self_attn = disable_self_attn
262
+ self.attn1 = CrossAttention(query_dim=dim, heads=n_heads, dim_head=d_head, dropout=dropout,
263
+ context_dim=context_dim if self.disable_self_attn else None) # is a self-attention if not self.disable_self_attn
264
+ self.ff = FeedForward(dim, dropout=dropout, glu=gated_ff)
265
+ self.attn2 = CrossAttention(query_dim=dim, context_dim=context_dim,
266
+ heads=n_heads, dim_head=d_head, dropout=dropout) # is self-attn if context is none
267
+ self.norm1 = nn.LayerNorm(dim)
268
+ self.norm2 = nn.LayerNorm(dim)
269
+ self.norm3 = nn.LayerNorm(dim)
270
+ self.checkpoint = checkpoint
271
+
272
+ def forward(self, x, context=None):
273
+ return checkpoint(self._forward, (x, context), self.parameters(), self.checkpoint)
274
+
275
+ def _forward(self, x, context=None):
276
+ x = self.attn1(self.norm1(x), context=context if self.disable_self_attn else None) + x
277
+ x = self.attn2(self.norm2(x), context=context) + x
278
+ x = self.ff(self.norm3(x)) + x
279
+ return x
280
+
281
+
282
+ class SpatialTransformer(nn.Module):
283
+ """
284
+ Transformer block for image-like data.
285
+ First, project the input (aka embedding)
286
+ and reshape to b, t, d.
287
+ Then apply standard transformer action.
288
+ Finally, reshape to image
289
+ """
290
+ def __init__(self, in_channels, n_heads, d_head,
291
+ depth=1, dropout=0., context_dim=None,
292
+ disable_self_attn=False):
293
+ super().__init__()
294
+ self.in_channels = in_channels
295
+ inner_dim = n_heads * d_head
296
+ self.norm = Normalize(in_channels) # GroupNormalize, by default 32 groups
297
+
298
+ self.proj_in = nn.Conv2d(in_channels,
299
+ inner_dim,
300
+ kernel_size=1,
301
+ stride=1,
302
+ padding=0)
303
+
304
+ self.transformer_blocks = nn.ModuleList(
305
+ [BasicTransformerBlock(inner_dim, n_heads, d_head, dropout=dropout, context_dim=context_dim,
306
+ disable_self_attn=disable_self_attn)
307
+ for d in range(depth)]
308
+ )
309
+
310
+ self.proj_out = zero_module(nn.Conv2d(inner_dim,
311
+ in_channels,
312
+ kernel_size=1,
313
+ stride=1,
314
+ padding=0))
315
+
316
+ def forward(self, x, context=None):
317
+ # note: if no context is given, cross-attention defaults to self-attention
318
+ # context: [bs, 77, 768]
319
+ b, c, h, w = x.shape
320
+ x_in = x
321
+ x = self.norm(x)
322
+ x = self.proj_in(x)
323
+ x = rearrange(x, 'b c h w -> b (h w) c').contiguous()
324
+ for block in self.transformer_blocks:
325
+ x = block(x, context=context)
326
+ x = rearrange(x, 'b (h w) c -> b c h w', h=h, w=w).contiguous()
327
+ x = self.proj_out(x)
328
+ return x + x_in
stable_diffusion/ldm/modules/diffusionmodules/__init__.py ADDED
File without changes
stable_diffusion/ldm/modules/diffusionmodules/model.py ADDED
@@ -0,0 +1,837 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # pytorch_diffusion + derived encoder decoder
2
+ import math
3
+ import torch
4
+ import torch.nn as nn
5
+ import numpy as np
6
+ from einops import rearrange
7
+
8
+ import sys
9
+ sys.path.append('.')
10
+
11
+ from stable_diffusion.ldm.util import instantiate_from_config
12
+ from stable_diffusion.ldm.modules.attention import LinearAttention
13
+ from stable_diffusion.ldm.modules.distributions.distributions import DiagonalGaussianDistribution
14
+
15
+
16
+ def get_timestep_embedding(timesteps, embedding_dim):
17
+ """
18
+ This matches the implementation in Denoising Diffusion Probabilistic Models:
19
+ From Fairseq.
20
+ Build sinusoidal embeddings.
21
+ This matches the implementation in tensor2tensor, but differs slightly
22
+ from the description in Section 3.5 of "Attention Is All You Need".
23
+ """
24
+ assert len(timesteps.shape) == 1
25
+
26
+ half_dim = embedding_dim // 2
27
+ emb = math.log(10000) / (half_dim - 1)
28
+ emb = torch.exp(torch.arange(half_dim, dtype=torch.float32) * -emb)
29
+ emb = emb.to(device=timesteps.device)
30
+ emb = timesteps.float()[:, None] * emb[None, :]
31
+ emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1)
32
+ if embedding_dim % 2 == 1: # zero pad
33
+ emb = torch.nn.functional.pad(emb, (0,1,0,0))
34
+ return emb
35
+
36
+
37
+ def nonlinearity(x):
38
+ # swish
39
+ return x*torch.sigmoid(x)
40
+
41
+
42
+ def Normalize(in_channels, num_groups=32):
43
+ return torch.nn.GroupNorm(num_groups=num_groups, num_channels=in_channels, eps=1e-6, affine=True)
44
+
45
+
46
+ class Upsample(nn.Module):
47
+ def __init__(self, in_channels, with_conv):
48
+ super().__init__()
49
+ self.with_conv = with_conv # True
50
+ if self.with_conv:
51
+ self.conv = torch.nn.Conv2d(in_channels,
52
+ in_channels,
53
+ kernel_size=3,
54
+ stride=1,
55
+ padding=1)
56
+
57
+ def forward(self, x):
58
+ x = torch.nn.functional.interpolate(x, scale_factor=2.0, mode="nearest")
59
+ if self.with_conv:
60
+ x = self.conv(x)
61
+ return x
62
+
63
+
64
+ class Downsample(nn.Module):
65
+ def __init__(self, in_channels: int, with_conv: bool):
66
+ super().__init__()
67
+ self.with_conv = with_conv
68
+ if self.with_conv:
69
+ # no asymmetric padding in torch conv, must do it ourselves
70
+ self.conv = torch.nn.Conv2d(in_channels,
71
+ in_channels,
72
+ kernel_size=3,
73
+ stride=2,
74
+ padding=0)
75
+
76
+ def forward(self, x):
77
+ if self.with_conv:
78
+ pad = (0,1,0,1)
79
+ x = torch.nn.functional.pad(x, pad, mode="constant", value=0)
80
+ x = self.conv(x)
81
+ else:
82
+ x = torch.nn.functional.avg_pool2d(x, kernel_size=2, stride=2)
83
+ return x
84
+
85
+
86
+ class ResnetBlock(nn.Module):
87
+ def __init__(self, *, in_channels, out_channels=None, conv_shortcut=False,
88
+ dropout, temb_channels=512):
89
+ super().__init__()
90
+ self.in_channels = in_channels
91
+ out_channels = in_channels if out_channels is None else out_channels
92
+ self.out_channels = out_channels
93
+ self.use_conv_shortcut = conv_shortcut
94
+
95
+ self.norm1 = Normalize(in_channels)
96
+ self.conv1 = torch.nn.Conv2d(in_channels,
97
+ out_channels,
98
+ kernel_size=3,
99
+ stride=1,
100
+ padding=1)
101
+ if temb_channels > 0:
102
+ self.temb_proj = torch.nn.Linear(temb_channels,
103
+ out_channels)
104
+ self.norm2 = Normalize(out_channels)
105
+ self.dropout = torch.nn.Dropout(dropout)
106
+ self.conv2 = torch.nn.Conv2d(out_channels,
107
+ out_channels,
108
+ kernel_size=3,
109
+ stride=1,
110
+ padding=1)
111
+ if self.in_channels != self.out_channels:
112
+ if self.use_conv_shortcut:
113
+ self.conv_shortcut = torch.nn.Conv2d(in_channels,
114
+ out_channels,
115
+ kernel_size=3,
116
+ stride=1,
117
+ padding=1)
118
+ else:
119
+ self.nin_shortcut = torch.nn.Conv2d(in_channels,
120
+ out_channels,
121
+ kernel_size=1,
122
+ stride=1,
123
+ padding=0)
124
+
125
+ def forward(self, x, temb):
126
+ h = x
127
+ h = self.norm1(h)
128
+ h = nonlinearity(h)
129
+ h = self.conv1(h)
130
+
131
+ if temb is not None:
132
+ h = h + self.temb_proj(nonlinearity(temb))[:,:,None,None]
133
+
134
+ h = self.norm2(h)
135
+ h = nonlinearity(h)
136
+ h = self.dropout(h)
137
+ h = self.conv2(h)
138
+
139
+ if self.in_channels != self.out_channels:
140
+ if self.use_conv_shortcut:
141
+ x = self.conv_shortcut(x)
142
+ else:
143
+ x = self.nin_shortcut(x)
144
+
145
+ return x+h
146
+
147
+
148
+ class LinAttnBlock(LinearAttention):
149
+ """to match AttnBlock usage"""
150
+ def __init__(self, in_channels):
151
+ super().__init__(dim=in_channels, heads=1, dim_head=in_channels)
152
+
153
+
154
+ class AttnBlock(nn.Module):
155
+ def __init__(self, in_channels):
156
+ super().__init__()
157
+ self.in_channels = in_channels
158
+
159
+ self.norm = Normalize(in_channels)
160
+ self.q = torch.nn.Conv2d(in_channels,
161
+ in_channels,
162
+ kernel_size=1,
163
+ stride=1,
164
+ padding=0)
165
+ self.k = torch.nn.Conv2d(in_channels,
166
+ in_channels,
167
+ kernel_size=1,
168
+ stride=1,
169
+ padding=0)
170
+ self.v = torch.nn.Conv2d(in_channels,
171
+ in_channels,
172
+ kernel_size=1,
173
+ stride=1,
174
+ padding=0)
175
+ self.proj_out = torch.nn.Conv2d(in_channels,
176
+ in_channels,
177
+ kernel_size=1,
178
+ stride=1,
179
+ padding=0)
180
+
181
+
182
+ def forward(self, x):
183
+ h_ = x
184
+ h_ = self.norm(h_)
185
+ q = self.q(h_)
186
+ k = self.k(h_)
187
+ v = self.v(h_)
188
+
189
+ # compute attention
190
+ b,c,h,w = q.shape
191
+ q = q.reshape(b,c,h*w)
192
+ q = q.permute(0,2,1) # b,hw,c
193
+ k = k.reshape(b,c,h*w) # b,c,hw
194
+ w_ = torch.bmm(q,k) # b,hw,hw w[b,i,j]=sum_c q[b,i,c]k[b,c,j]
195
+ w_ = w_ * (int(c)**(-0.5))
196
+ w_ = torch.nn.functional.softmax(w_, dim=2)
197
+
198
+ # attend to values
199
+ v = v.reshape(b,c,h*w)
200
+ w_ = w_.permute(0,2,1) # b,hw,hw (first hw of k, second of q)
201
+ h_ = torch.bmm(v,w_) # b, c,hw (hw of q) h_[b,c,j] = sum_i v[b,c,i] w_[b,i,j]
202
+ h_ = h_.reshape(b,c,h,w)
203
+
204
+ h_ = self.proj_out(h_)
205
+
206
+ return x+h_
207
+
208
+
209
+ def make_attn(in_channels, attn_type="vanilla"):
210
+ assert attn_type in ["vanilla", "linear", "none"], f'attn_type {attn_type} unknown'
211
+ print(f"making attention of type '{attn_type}' with {in_channels} in_channels")
212
+ if attn_type == "vanilla":
213
+ return AttnBlock(in_channels)
214
+ elif attn_type == "none":
215
+ return nn.Identity(in_channels)
216
+ else:
217
+ return LinAttnBlock(in_channels)
218
+
219
+
220
+ class Model(nn.Module): # This seems useless so far
221
+ def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks,
222
+ attn_resolutions, dropout=0.0, resamp_with_conv=True, in_channels,
223
+ resolution, use_timestep=True, use_linear_attn=False, attn_type="vanilla"):
224
+ super().__init__()
225
+ if use_linear_attn: attn_type = "linear"
226
+ self.ch = ch
227
+ self.temb_ch = self.ch*4
228
+ self.num_resolutions = len(ch_mult)
229
+ self.num_res_blocks = num_res_blocks
230
+ self.resolution = resolution
231
+ self.in_channels = in_channels
232
+
233
+ self.use_timestep = use_timestep
234
+ if self.use_timestep:
235
+ # timestep embedding
236
+ self.temb = nn.Module()
237
+ self.temb.dense = nn.ModuleList([
238
+ torch.nn.Linear(self.ch,
239
+ self.temb_ch),
240
+ torch.nn.Linear(self.temb_ch,
241
+ self.temb_ch),
242
+ ])
243
+
244
+ # downsampling
245
+ self.conv_in = torch.nn.Conv2d(in_channels,
246
+ self.ch,
247
+ kernel_size=3,
248
+ stride=1,
249
+ padding=1)
250
+
251
+ curr_res = resolution
252
+ in_ch_mult = (1,)+tuple(ch_mult)
253
+ self.down = nn.ModuleList()
254
+ for i_level in range(self.num_resolutions):
255
+ block = nn.ModuleList()
256
+ attn = nn.ModuleList()
257
+ block_in = ch*in_ch_mult[i_level]
258
+ block_out = ch*ch_mult[i_level]
259
+ for i_block in range(self.num_res_blocks):
260
+ block.append(ResnetBlock(in_channels=block_in,
261
+ out_channels=block_out,
262
+ temb_channels=self.temb_ch,
263
+ dropout=dropout))
264
+ block_in = block_out
265
+ if curr_res in attn_resolutions:
266
+ attn.append(make_attn(block_in, attn_type=attn_type))
267
+ down = nn.Module()
268
+ down.block = block
269
+ down.attn = attn
270
+ if i_level != self.num_resolutions-1:
271
+ down.downsample = Downsample(block_in, resamp_with_conv)
272
+ curr_res = curr_res // 2
273
+ self.down.append(down)
274
+
275
+ # middle
276
+ self.mid = nn.Module()
277
+ self.mid.block_1 = ResnetBlock(in_channels=block_in,
278
+ out_channels=block_in,
279
+ temb_channels=self.temb_ch,
280
+ dropout=dropout)
281
+ self.mid.attn_1 = make_attn(block_in, attn_type=attn_type)
282
+ self.mid.block_2 = ResnetBlock(in_channels=block_in,
283
+ out_channels=block_in,
284
+ temb_channels=self.temb_ch,
285
+ dropout=dropout)
286
+
287
+ # upsampling
288
+ self.up = nn.ModuleList()
289
+ for i_level in reversed(range(self.num_resolutions)):
290
+ block = nn.ModuleList()
291
+ attn = nn.ModuleList()
292
+ block_out = ch*ch_mult[i_level]
293
+ skip_in = ch*ch_mult[i_level]
294
+ for i_block in range(self.num_res_blocks+1):
295
+ if i_block == self.num_res_blocks:
296
+ skip_in = ch*in_ch_mult[i_level]
297
+ block.append(ResnetBlock(in_channels=block_in+skip_in,
298
+ out_channels=block_out,
299
+ temb_channels=self.temb_ch,
300
+ dropout=dropout))
301
+ block_in = block_out
302
+ if curr_res in attn_resolutions:
303
+ attn.append(make_attn(block_in, attn_type=attn_type))
304
+ up = nn.Module()
305
+ up.block = block
306
+ up.attn = attn
307
+ if i_level != 0:
308
+ up.upsample = Upsample(block_in, resamp_with_conv)
309
+ curr_res = curr_res * 2
310
+ self.up.insert(0, up) # prepend to get consistent order
311
+
312
+ # end
313
+ self.norm_out = Normalize(block_in)
314
+ self.conv_out = torch.nn.Conv2d(block_in,
315
+ out_ch,
316
+ kernel_size=3,
317
+ stride=1,
318
+ padding=1)
319
+
320
+ def forward(self, x, t=None, context=None):
321
+ #assert x.shape[2] == x.shape[3] == self.resolution
322
+ if context is not None:
323
+ # assume aligned context, cat along channel axis
324
+ x = torch.cat((x, context), dim=1)
325
+ if self.use_timestep:
326
+ # timestep embedding
327
+ assert t is not None
328
+ temb = get_timestep_embedding(t, self.ch)
329
+ temb = self.temb.dense[0](temb)
330
+ temb = nonlinearity(temb)
331
+ temb = self.temb.dense[1](temb)
332
+ else:
333
+ temb = None
334
+
335
+ # downsampling
336
+ hs = [self.conv_in(x)]
337
+ for i_level in range(self.num_resolutions):
338
+ for i_block in range(self.num_res_blocks):
339
+ h = self.down[i_level].block[i_block](hs[-1], temb)
340
+ if len(self.down[i_level].attn) > 0:
341
+ h = self.down[i_level].attn[i_block](h)
342
+ hs.append(h)
343
+ if i_level != self.num_resolutions-1:
344
+ hs.append(self.down[i_level].downsample(hs[-1]))
345
+
346
+ # middle
347
+ h = hs[-1]
348
+ h = self.mid.block_1(h, temb)
349
+ h = self.mid.attn_1(h)
350
+ h = self.mid.block_2(h, temb)
351
+
352
+ # upsampling
353
+ for i_level in reversed(range(self.num_resolutions)):
354
+ for i_block in range(self.num_res_blocks+1):
355
+ h = self.up[i_level].block[i_block](
356
+ torch.cat([h, hs.pop()], dim=1), temb)
357
+ if len(self.up[i_level].attn) > 0:
358
+ h = self.up[i_level].attn[i_block](h)
359
+ if i_level != 0:
360
+ h = self.up[i_level].upsample(h)
361
+
362
+ # end
363
+ h = self.norm_out(h)
364
+ h = nonlinearity(h)
365
+ h = self.conv_out(h)
366
+ return h
367
+
368
+ def get_last_layer(self):
369
+ return self.conv_out.weight
370
+
371
+
372
+ class Encoder(nn.Module):
373
+ def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks,
374
+ attn_resolutions, dropout=0.0, resamp_with_conv=True, in_channels,
375
+ resolution, z_channels, double_z=True, use_linear_attn=False, attn_type="vanilla",
376
+ **ignore_kwargs): # the usage of ignorekwargs makes Encoder and Decoder shares the same constructor parameter
377
+ super().__init__()
378
+ if use_linear_attn: attn_type = "linear"
379
+ self.ch = ch # 128
380
+ self.temb_ch = 0 # 0
381
+ self.num_resolutions = len(ch_mult) # 4
382
+ self.num_res_blocks = num_res_blocks # 2
383
+ self.resolution = resolution # 256
384
+ self.in_channels = in_channels # 3
385
+
386
+ # downsampling
387
+ self.conv_in = torch.nn.Conv2d(in_channels,
388
+ self.ch,
389
+ kernel_size=3,
390
+ stride=1,
391
+ padding=1)
392
+
393
+ curr_res = resolution
394
+ in_ch_mult = (1,)+tuple(ch_mult) # (1, 1, 2, 4, 4)
395
+ self.in_ch_mult = in_ch_mult # (1, 1, 2, 4, 4)
396
+ self.down = nn.ModuleList()
397
+ for i_level in range(self.num_resolutions): # 4
398
+ block = nn.ModuleList()
399
+ attn = nn.ModuleList()
400
+ block_in = ch * in_ch_mult[i_level]
401
+ block_out = ch * ch_mult[i_level]
402
+ for i_block in range(self.num_res_blocks): # 2
403
+ block.append(ResnetBlock(in_channels=block_in,
404
+ out_channels=block_out,
405
+ temb_channels=self.temb_ch,
406
+ dropout=dropout))
407
+ block_in = block_out
408
+ if curr_res in attn_resolutions: # [], not used, no attention between ResBlocks
409
+ attn.append(make_attn(block_in, attn_type=attn_type))
410
+ down = nn.Module()
411
+ down.block = block
412
+ down.attn = attn
413
+ if i_level != self.num_resolutions - 1: # The last level
414
+ down.downsample = Downsample(block_in, resamp_with_conv) # conv layer with a stride of 2
415
+ curr_res = curr_res // 2
416
+ self.down.append(down)
417
+
418
+ # middle
419
+ self.mid = nn.Module()
420
+ self.mid.block_1 = ResnetBlock(in_channels=block_in,
421
+ out_channels=block_in,
422
+ temb_channels=self.temb_ch,
423
+ dropout=dropout)
424
+ self.mid.attn_1 = make_attn(block_in, attn_type=attn_type)
425
+ self.mid.block_2 = ResnetBlock(in_channels=block_in,
426
+ out_channels=block_in,
427
+ temb_channels=self.temb_ch,
428
+ dropout=dropout)
429
+
430
+ # end
431
+ self.norm_out = Normalize(block_in)
432
+ self.conv_out = torch.nn.Conv2d(block_in,
433
+ 2 * z_channels if double_z else z_channels, # here we need double_z to be True. The reason is this is a VAE and the output of the encoder is not directly thrown to the decoder, but used to instantiate a GaussianDistribution. The output channel is doubled, because we need the first half channel to be the mean value and the second half to be the variance value.
434
+ kernel_size=3,
435
+ stride=1,
436
+ padding=1)
437
+
438
+ def forward(self, x):
439
+ # timestep embedding
440
+ temb = None
441
+
442
+ # downsampling
443
+ hs = [self.conv_in(x)]
444
+ for i_level in range(self.num_resolutions):
445
+ for i_block in range(self.num_res_blocks):
446
+ h = self.down[i_level].block[i_block](hs[-1], temb)
447
+ if len(self.down[i_level].attn) > 0:
448
+ h = self.down[i_level].attn[i_block](h)
449
+ hs.append(h)
450
+ if i_level != self.num_resolutions-1:
451
+ hs.append(self.down[i_level].downsample(hs[-1]))
452
+
453
+ # middle
454
+ h = hs[-1]
455
+ h = self.mid.block_1(h, temb)
456
+ h = self.mid.attn_1(h)
457
+ h = self.mid.block_2(h, temb)
458
+
459
+ # end
460
+ h = self.norm_out(h)
461
+ h = nonlinearity(h)
462
+ h = self.conv_out(h)
463
+ return h
464
+
465
+
466
+ class Decoder(nn.Module):
467
+ def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks,
468
+ attn_resolutions, dropout=0.0, resamp_with_conv=True, in_channels,
469
+ resolution, z_channels, give_pre_end=False, tanh_out=False, use_linear_attn=False,
470
+ attn_type="vanilla", **ignorekwargs): # the usage of ignorekwargs makes Encoder and Decoder shares the same constructor parameter
471
+ super().__init__()
472
+ if use_linear_attn: attn_type = "linear"
473
+ self.ch = ch # ch represents the base channel number and keeps unchanged throughout the class
474
+ self.temb_ch = 0
475
+ self.num_resolutions = len(ch_mult) # [1, 2, 4, 4], ch_mult is shared by Encoder
476
+ self.num_res_blocks = num_res_blocks # 2, the number of ResBlocks in one upsample block
477
+ self.resolution = resolution
478
+ self.in_channels = in_channels # useless
479
+ self.give_pre_end = give_pre_end # False, if True, the forward function will give the result without going through the conv_out projector
480
+ self.tanh_out = tanh_out # False, do we use tanh function before giving the result out in forward function
481
+
482
+ # compute in_ch_mult, block_in and curr_res at lowest res
483
+ in_ch_mult = (1,) + tuple(ch_mult)
484
+ block_in = ch * ch_mult[self.num_resolutions-1]
485
+ curr_res = resolution // 2**(self.num_resolutions-1)
486
+ self.z_shape = (1, z_channels, curr_res, curr_res)
487
+ print("Working with z of shape {} = {} dimensions.".format(
488
+ self.z_shape, np.prod(self.z_shape)))
489
+
490
+ # z to block_in
491
+ self.conv_in = torch.nn.Conv2d(z_channels,
492
+ block_in,
493
+ kernel_size=3,
494
+ stride=1,
495
+ padding=1)
496
+
497
+ # middle
498
+ self.mid = nn.Module()
499
+ self.mid.block_1 = ResnetBlock(in_channels=block_in,
500
+ out_channels=block_in,
501
+ temb_channels=self.temb_ch,
502
+ dropout=dropout)
503
+ self.mid.attn_1 = make_attn(block_in, attn_type=attn_type)
504
+ self.mid.block_2 = ResnetBlock(in_channels=block_in,
505
+ out_channels=block_in,
506
+ temb_channels=self.temb_ch,
507
+ dropout=dropout)
508
+
509
+ # upsampling
510
+ self.up = nn.ModuleList()
511
+ for i_level in reversed(range(self.num_resolutions)): # 4
512
+ block = nn.ModuleList()
513
+ attn = nn.ModuleList()
514
+ block_out = ch * ch_mult[i_level]
515
+ for i_block in range(self.num_res_blocks+1): # 3
516
+ block.append(ResnetBlock(in_channels=block_in,
517
+ out_channels=block_out,
518
+ temb_channels=self.temb_ch,
519
+ dropout=dropout))
520
+ block_in = block_out
521
+ if curr_res in attn_resolutions: # [], not used, no attention block between ResBlocks
522
+ attn.append(make_attn(block_in, attn_type=attn_type))
523
+ up = nn.Module()
524
+ up.block = block
525
+ up.attn = attn
526
+ if i_level != 0: # there is upsampling between blocks
527
+ up.upsample = Upsample(block_in, resamp_with_conv)
528
+ curr_res = curr_res * 2
529
+ self.up.insert(0, up) # prepend to get consistent order, here the upsampling blocks as stored in a reversed order, and later in the forward function it is also used in a reversed order
530
+
531
+ # end
532
+ self.norm_out = Normalize(block_in) # GroupNorm with channel (group) size of 32 by default
533
+ self.conv_out = torch.nn.Conv2d(block_in,
534
+ out_ch,
535
+ kernel_size=3,
536
+ stride=1,
537
+ padding=1)
538
+
539
+ def forward(self, z):
540
+ #assert z.shape[1:] == self.z_shape[1:]
541
+ self.last_z_shape = z.shape
542
+
543
+ # timestep embedding
544
+ temb = None
545
+
546
+ # z to block_in
547
+ h = self.conv_in(z)
548
+
549
+ # middle
550
+ h = self.mid.block_1(h, temb)
551
+ h = self.mid.attn_1(h)
552
+ h = self.mid.block_2(h, temb)
553
+
554
+ # upsampling
555
+ for i_level in reversed(range(self.num_resolutions)):
556
+ for i_block in range(self.num_res_blocks+1):
557
+ h = self.up[i_level].block[i_block](h, temb)
558
+ if len(self.up[i_level].attn) > 0:
559
+ h = self.up[i_level].attn[i_block](h)
560
+ if i_level != 0:
561
+ h = self.up[i_level].upsample(h)
562
+
563
+ # end
564
+ if self.give_pre_end:
565
+ return h
566
+
567
+ h = self.norm_out(h)
568
+ h = nonlinearity(h) # swish
569
+ h = self.conv_out(h)
570
+ if self.tanh_out:
571
+ h = torch.tanh(h)
572
+ return h
573
+
574
+
575
+ class SimpleDecoder(nn.Module):
576
+ def __init__(self, in_channels, out_channels, *args, **kwargs):
577
+ super().__init__()
578
+ self.model = nn.ModuleList([nn.Conv2d(in_channels, in_channels, 1),
579
+ ResnetBlock(in_channels=in_channels,
580
+ out_channels=2 * in_channels,
581
+ temb_channels=0, dropout=0.0),
582
+ ResnetBlock(in_channels=2 * in_channels,
583
+ out_channels=4 * in_channels,
584
+ temb_channels=0, dropout=0.0),
585
+ ResnetBlock(in_channels=4 * in_channels,
586
+ out_channels=2 * in_channels,
587
+ temb_channels=0, dropout=0.0),
588
+ nn.Conv2d(2*in_channels, in_channels, 1),
589
+ Upsample(in_channels, with_conv=True)])
590
+ # end
591
+ self.norm_out = Normalize(in_channels)
592
+ self.conv_out = torch.nn.Conv2d(in_channels,
593
+ out_channels,
594
+ kernel_size=3,
595
+ stride=1,
596
+ padding=1)
597
+
598
+ def forward(self, x):
599
+ for i, layer in enumerate(self.model):
600
+ if i in [1,2,3]:
601
+ x = layer(x, None)
602
+ else:
603
+ x = layer(x)
604
+
605
+ h = self.norm_out(x)
606
+ h = nonlinearity(h)
607
+ x = self.conv_out(h)
608
+ return x
609
+
610
+
611
+ class UpsampleDecoder(nn.Module):
612
+ def __init__(self, in_channels, out_channels, ch, num_res_blocks, resolution,
613
+ ch_mult=(2,2), dropout=0.0):
614
+ super().__init__()
615
+ # upsampling
616
+ self.temb_ch = 0
617
+ self.num_resolutions = len(ch_mult)
618
+ self.num_res_blocks = num_res_blocks
619
+ block_in = in_channels
620
+ curr_res = resolution // 2 ** (self.num_resolutions - 1)
621
+ self.res_blocks = nn.ModuleList()
622
+ self.upsample_blocks = nn.ModuleList()
623
+ for i_level in range(self.num_resolutions):
624
+ res_block = []
625
+ block_out = ch * ch_mult[i_level]
626
+ for i_block in range(self.num_res_blocks + 1):
627
+ res_block.append(ResnetBlock(in_channels=block_in,
628
+ out_channels=block_out,
629
+ temb_channels=self.temb_ch,
630
+ dropout=dropout))
631
+ block_in = block_out
632
+ self.res_blocks.append(nn.ModuleList(res_block))
633
+ if i_level != self.num_resolutions - 1:
634
+ self.upsample_blocks.append(Upsample(block_in, True))
635
+ curr_res = curr_res * 2
636
+
637
+ # end
638
+ self.norm_out = Normalize(block_in)
639
+ self.conv_out = torch.nn.Conv2d(block_in,
640
+ out_channels,
641
+ kernel_size=3,
642
+ stride=1,
643
+ padding=1)
644
+
645
+ def forward(self, x):
646
+ # upsampling
647
+ h = x
648
+ for k, i_level in enumerate(range(self.num_resolutions)):
649
+ for i_block in range(self.num_res_blocks + 1):
650
+ h = self.res_blocks[i_level][i_block](h, None)
651
+ if i_level != self.num_resolutions - 1:
652
+ h = self.upsample_blocks[k](h)
653
+ h = self.norm_out(h)
654
+ h = nonlinearity(h)
655
+ h = self.conv_out(h)
656
+ return h
657
+
658
+
659
+ class LatentRescaler(nn.Module):
660
+ def __init__(self, factor, in_channels, mid_channels, out_channels, depth=2):
661
+ super().__init__()
662
+ # residual block, interpolate, residual block
663
+ self.factor = factor
664
+ self.conv_in = nn.Conv2d(in_channels,
665
+ mid_channels,
666
+ kernel_size=3,
667
+ stride=1,
668
+ padding=1)
669
+ self.res_block1 = nn.ModuleList([ResnetBlock(in_channels=mid_channels,
670
+ out_channels=mid_channels,
671
+ temb_channels=0,
672
+ dropout=0.0) for _ in range(depth)])
673
+ self.attn = AttnBlock(mid_channels)
674
+ self.res_block2 = nn.ModuleList([ResnetBlock(in_channels=mid_channels,
675
+ out_channels=mid_channels,
676
+ temb_channels=0,
677
+ dropout=0.0) for _ in range(depth)])
678
+
679
+ self.conv_out = nn.Conv2d(mid_channels,
680
+ out_channels,
681
+ kernel_size=1,
682
+ )
683
+
684
+ def forward(self, x):
685
+ x = self.conv_in(x)
686
+ for block in self.res_block1:
687
+ x = block(x, None)
688
+ x = torch.nn.functional.interpolate(x, size=(int(round(x.shape[2]*self.factor)), int(round(x.shape[3]*self.factor))))
689
+ x = self.attn(x)
690
+ for block in self.res_block2:
691
+ x = block(x, None)
692
+ x = self.conv_out(x)
693
+ return x
694
+
695
+
696
+ class MergedRescaleEncoder(nn.Module):
697
+ def __init__(self, in_channels, ch, resolution, out_ch, num_res_blocks,
698
+ attn_resolutions, dropout=0.0, resamp_with_conv=True,
699
+ ch_mult=(1,2,4,8), rescale_factor=1.0, rescale_module_depth=1):
700
+ super().__init__()
701
+ intermediate_chn = ch * ch_mult[-1]
702
+ self.encoder = Encoder(in_channels=in_channels, num_res_blocks=num_res_blocks, ch=ch, ch_mult=ch_mult,
703
+ z_channels=intermediate_chn, double_z=False, resolution=resolution,
704
+ attn_resolutions=attn_resolutions, dropout=dropout, resamp_with_conv=resamp_with_conv,
705
+ out_ch=None)
706
+ self.rescaler = LatentRescaler(factor=rescale_factor, in_channels=intermediate_chn,
707
+ mid_channels=intermediate_chn, out_channels=out_ch, depth=rescale_module_depth)
708
+
709
+ def forward(self, x):
710
+ x = self.encoder(x)
711
+ x = self.rescaler(x)
712
+ return x
713
+
714
+
715
+ class MergedRescaleDecoder(nn.Module):
716
+ def __init__(self, z_channels, out_ch, resolution, num_res_blocks, attn_resolutions, ch, ch_mult=(1,2,4,8),
717
+ dropout=0.0, resamp_with_conv=True, rescale_factor=1.0, rescale_module_depth=1):
718
+ super().__init__()
719
+ tmp_chn = z_channels*ch_mult[-1]
720
+ self.decoder = Decoder(out_ch=out_ch, z_channels=tmp_chn, attn_resolutions=attn_resolutions, dropout=dropout,
721
+ resamp_with_conv=resamp_with_conv, in_channels=None, num_res_blocks=num_res_blocks,
722
+ ch_mult=ch_mult, resolution=resolution, ch=ch)
723
+ self.rescaler = LatentRescaler(factor=rescale_factor, in_channels=z_channels, mid_channels=tmp_chn,
724
+ out_channels=tmp_chn, depth=rescale_module_depth)
725
+
726
+ def forward(self, x):
727
+ x = self.rescaler(x)
728
+ x = self.decoder(x)
729
+ return x
730
+
731
+
732
+ class Upsampler(nn.Module):
733
+ def __init__(self, in_size, out_size, in_channels, out_channels, ch_mult=2):
734
+ super().__init__()
735
+ assert out_size >= in_size
736
+ num_blocks = int(np.log2(out_size//in_size))+1
737
+ factor_up = 1.+ (out_size % in_size)
738
+ print(f"Building {self.__class__.__name__} with in_size: {in_size} --> out_size {out_size} and factor {factor_up}")
739
+ self.rescaler = LatentRescaler(factor=factor_up, in_channels=in_channels, mid_channels=2*in_channels,
740
+ out_channels=in_channels)
741
+ self.decoder = Decoder(out_ch=out_channels, resolution=out_size, z_channels=in_channels, num_res_blocks=2,
742
+ attn_resolutions=[], in_channels=None, ch=in_channels,
743
+ ch_mult=[ch_mult for _ in range(num_blocks)])
744
+
745
+ def forward(self, x):
746
+ x = self.rescaler(x)
747
+ x = self.decoder(x)
748
+ return x
749
+
750
+
751
+ class Resize(nn.Module):
752
+ def __init__(self, in_channels=None, learned=False, mode="bilinear"):
753
+ super().__init__()
754
+ self.with_conv = learned
755
+ self.mode = mode
756
+ if self.with_conv:
757
+ print(f"Note: {self.__class__.__name} uses learned downsampling and will ignore the fixed {mode} mode")
758
+ raise NotImplementedError()
759
+ assert in_channels is not None
760
+ # no asymmetric padding in torch conv, must do it ourselves
761
+ self.conv = torch.nn.Conv2d(in_channels,
762
+ in_channels,
763
+ kernel_size=4,
764
+ stride=2,
765
+ padding=1)
766
+
767
+ def forward(self, x, scale_factor=1.0):
768
+ if scale_factor==1.0:
769
+ return x
770
+ else:
771
+ x = torch.nn.functional.interpolate(x, mode=self.mode, align_corners=False, scale_factor=scale_factor)
772
+ return x
773
+
774
+
775
+ class FirstStagePostProcessor(nn.Module):
776
+
777
+ def __init__(self, ch_mult:list, in_channels,
778
+ pretrained_model:nn.Module=None,
779
+ reshape=False,
780
+ n_channels=None,
781
+ dropout=0.,
782
+ pretrained_config=None):
783
+ super().__init__()
784
+ if pretrained_config is None:
785
+ assert pretrained_model is not None, 'Either "pretrained_model" or "pretrained_config" must not be None'
786
+ self.pretrained_model = pretrained_model
787
+ else:
788
+ assert pretrained_config is not None, 'Either "pretrained_model" or "pretrained_config" must not be None'
789
+ self.instantiate_pretrained(pretrained_config)
790
+
791
+ self.do_reshape = reshape
792
+
793
+ if n_channels is None:
794
+ n_channels = self.pretrained_model.encoder.ch
795
+
796
+ self.proj_norm = Normalize(in_channels, num_groups=in_channels//2)
797
+ self.proj = nn.Conv2d(in_channels, n_channels, kernel_size=3, stride=1,padding=1)
798
+
799
+ blocks = []
800
+ downs = []
801
+ ch_in = n_channels
802
+ for m in ch_mult:
803
+ blocks.append(ResnetBlock(in_channels=ch_in,out_channels=m*n_channels,dropout=dropout))
804
+ ch_in = m * n_channels
805
+ downs.append(Downsample(ch_in, with_conv=False))
806
+
807
+ self.model = nn.ModuleList(blocks)
808
+ self.downsampler = nn.ModuleList(downs)
809
+
810
+ def instantiate_pretrained(self, config):
811
+ model = instantiate_from_config(config)
812
+ self.pretrained_model = model.eval()
813
+ # self.pretrained_model.train = False
814
+ for param in self.pretrained_model.parameters():
815
+ param.requires_grad = False
816
+
817
+ @torch.no_grad()
818
+ def encode_with_pretrained(self,x):
819
+ c = self.pretrained_model.encode(x)
820
+ if isinstance(c, DiagonalGaussianDistribution):
821
+ c = c.mode()
822
+ return c
823
+
824
+ def forward(self, x):
825
+ z_fs = self.encode_with_pretrained(x)
826
+ z = self.proj_norm(z_fs)
827
+ z = self.proj(z)
828
+ z = nonlinearity(z)
829
+
830
+ for submodel, downmodel in zip(self.model,self.downsampler):
831
+ z = submodel(z, temb=None)
832
+ z = downmodel(z)
833
+
834
+ if self.do_reshape:
835
+ z = rearrange(z,'b c h w -> b (h w) c')
836
+ return z
837
+
stable_diffusion/ldm/modules/diffusionmodules/openaimodel.py ADDED
@@ -0,0 +1,1021 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from abc import abstractmethod
2
+ import math
3
+
4
+ import numpy as np
5
+ import torch as th
6
+ import torch.nn as nn
7
+ import torch.nn.functional as F
8
+
9
+ import sys
10
+ sys.path.append('.')
11
+
12
+ from stable_diffusion.ldm.modules.diffusionmodules.util import (
13
+ checkpoint,
14
+ conv_nd, # nn.Conv2d
15
+ linear, # nn.Linear
16
+ avg_pool_nd,
17
+ zero_module,
18
+ normalization,
19
+ timestep_embedding,
20
+ )
21
+ from stable_diffusion.ldm.modules.attention import SpatialTransformer
22
+ from stable_diffusion.ldm.util import exists
23
+
24
+ # dummy replace
25
+ def convert_module_to_f16(x):
26
+ pass
27
+
28
+ def convert_module_to_f32(x):
29
+ pass
30
+
31
+
32
+ # go
33
+ class AttentionPool2d(nn.Module):
34
+ """
35
+ Adapted from CLIP: https://github.com/openai/CLIP/blob/main/clip/model.py
36
+ """
37
+
38
+ def __init__(
39
+ self,
40
+ spacial_dim: int,
41
+ embed_dim: int,
42
+ num_heads_channels: int,
43
+ output_dim: int = None,
44
+ ):
45
+ super().__init__()
46
+ self.positional_embedding = nn.Parameter(th.randn(embed_dim, spacial_dim ** 2 + 1) / embed_dim ** 0.5)
47
+ self.qkv_proj = conv_nd(1, embed_dim, 3 * embed_dim, 1)
48
+ self.c_proj = conv_nd(1, embed_dim, output_dim or embed_dim, 1)
49
+ self.num_heads = embed_dim // num_heads_channels
50
+ self.attention = QKVAttention(self.num_heads)
51
+
52
+ def forward(self, x):
53
+ b, c, *_spatial = x.shape
54
+ x = x.reshape(b, c, -1) # NC(HW)
55
+ x = th.cat([x.mean(dim=-1, keepdim=True), x], dim=-1) # NC(HW+1)
56
+ x = x + self.positional_embedding[None, :, :].to(x.dtype) # NC(HW+1)
57
+ x = self.qkv_proj(x)
58
+ x = self.attention(x)
59
+ x = self.c_proj(x)
60
+ return x[:, :, 0]
61
+
62
+
63
+ class TimestepBlock(nn.Module):
64
+ """
65
+ Any module where forward() takes timestep embeddings as a second argument.
66
+ """
67
+
68
+ @abstractmethod
69
+ def forward(self, x, emb):
70
+ """
71
+ Apply the module to `x` given `emb` timestep embeddings.
72
+ """
73
+
74
+
75
+ class TimestepEmbedSequential(nn.Sequential, TimestepBlock):
76
+ """
77
+ TimestepBlock is a dummy class
78
+ A sequential module that passes timestep embeddings to the children that
79
+ support it as an extra input.
80
+ """
81
+
82
+ def forward(self, x, emb, context=None):
83
+ for layer in self:
84
+ if isinstance(layer, TimestepBlock):
85
+ x = layer(x, emb)
86
+ elif isinstance(layer, SpatialTransformer):
87
+ x = layer(x, context)
88
+ else:
89
+ x = layer(x)
90
+ return x
91
+
92
+
93
+ class Upsample(nn.Module):
94
+ """
95
+ An upsampling layer with an optional convolution.
96
+ :param channels: channels in the inputs and outputs.
97
+ :param use_conv: a bool determining if a convolution is applied.
98
+ :param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then
99
+ upsampling occurs in the inner-two dimensions.
100
+ """
101
+
102
+ def __init__(self, channels, use_conv, dims=2, out_channels=None, padding=1):
103
+ super().__init__()
104
+ self.channels = channels
105
+ self.out_channels = out_channels or channels
106
+ self.use_conv = use_conv
107
+ self.dims = dims
108
+ if use_conv:
109
+ self.conv = conv_nd(dims, self.channels, self.out_channels, 3, padding=padding)
110
+
111
+ def forward(self, x):
112
+ assert x.shape[1] == self.channels
113
+ if self.dims == 3:
114
+ x = F.interpolate(
115
+ x, (x.shape[2], x.shape[3] * 2, x.shape[4] * 2), mode="nearest"
116
+ )
117
+ else:
118
+ x = F.interpolate(x, scale_factor=2, mode="nearest")
119
+ if self.use_conv:
120
+ x = self.conv(x)
121
+ return x
122
+
123
+
124
+ class TransposedUpsample(nn.Module):
125
+ 'Learned 2x upsampling without padding'
126
+ def __init__(self, channels, out_channels=None, ks=5):
127
+ super().__init__()
128
+ self.channels = channels
129
+ self.out_channels = out_channels or channels
130
+
131
+ self.up = nn.ConvTranspose2d(self.channels,self.out_channels,kernel_size=ks,stride=2)
132
+
133
+ def forward(self,x):
134
+ return self.up(x)
135
+
136
+
137
+ class Downsample(nn.Module):
138
+ """
139
+ A down-sampling layer with an optional convolution.
140
+ :param channels: channels in the inputs and outputs.
141
+ :param use_conv: a bool determining if a convolution is applied.
142
+ :param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then
143
+ downsampling occurs in the inner-two dimensions.
144
+ """
145
+
146
+ def __init__(self, channels, use_conv, dims=2, out_channels=None,padding=1):
147
+ super().__init__()
148
+ self.channels = channels
149
+ self.out_channels = out_channels or channels
150
+ self.use_conv = use_conv
151
+ self.dims = dims
152
+ stride = 2 if dims != 3 else (1, 2, 2)
153
+ if use_conv:
154
+ self.op = conv_nd(
155
+ dims, self.channels, self.out_channels, 3, stride=stride, padding=padding
156
+ )
157
+ else:
158
+ assert self.channels == self.out_channels
159
+ self.op = avg_pool_nd(dims, kernel_size=stride, stride=stride)
160
+
161
+ def forward(self, x):
162
+ assert x.shape[1] == self.channels
163
+ return self.op(x)
164
+
165
+
166
+ class ResBlock(TimestepBlock):
167
+ """
168
+ This ResBlock is for UNet only.
169
+ There is a similar class stable_diffusion.ldm.models.modules.diffusionmodules.model.ResnetBlock for building AutoEncoder.
170
+
171
+ A residual block that can optionally change the number of channels.
172
+ :param channels: the number of input channels.
173
+ :param emb_channels: the number of timestep embedding channels.
174
+ :param dropout: the rate of dropout.
175
+ :param out_channels: if specified, the number of out channels.
176
+ :param use_conv: if True and out_channels is specified, use a spatial
177
+ convolution instead of a smaller 1x1 convolution to change the
178
+ channels in the skip connection.
179
+ :param dims: determines if the signal is 1D, 2D, or 3D.
180
+ :param use_checkpoint: if True, use gradient checkpointing on this module.
181
+ :param up: if True, use this block for upsampling.
182
+ :param down: if True, use this block for downsampling.
183
+ """
184
+
185
+ def __init__(
186
+ self,
187
+ channels,
188
+ emb_channels,
189
+ dropout,
190
+ out_channels=None,
191
+ use_conv=False,
192
+ use_scale_shift_norm=False,
193
+ dims=2,
194
+ use_checkpoint=False,
195
+ up=False,
196
+ down=False,
197
+ ):
198
+ super().__init__()
199
+ self.channels = channels
200
+ self.emb_channels = emb_channels
201
+ self.dropout = dropout
202
+ self.out_channels = out_channels or channels # if out_channels is None then keep the input channel number unchanged.
203
+ self.use_conv = use_conv
204
+ self.use_checkpoint = use_checkpoint
205
+ self.use_scale_shift_norm = use_scale_shift_norm
206
+
207
+ self.in_layers = nn.Sequential(
208
+ normalization(channels), # GroupNorm
209
+ nn.SiLU(),
210
+ conv_nd(dims, channels, self.out_channels, 3, padding=1),
211
+ )
212
+ # This is related to
213
+ self.updown = up or down # this is to identify whether the feature map size changes or not
214
+
215
+ if up:
216
+ self.h_upd = Upsample(channels, False, dims) # False: not using conv
217
+ self.x_upd = Upsample(channels, False, dims)
218
+ elif down:
219
+ self.h_upd = Downsample(channels, False, dims)
220
+ self.x_upd = Downsample(channels, False, dims)
221
+ else:
222
+ self.h_upd = self.x_upd = nn.Identity()
223
+
224
+ self.emb_layers = nn.Sequential(
225
+ nn.SiLU(),
226
+ linear(
227
+ emb_channels,
228
+ # use_scale_shift_norm = False
229
+ 2 * self.out_channels if use_scale_shift_norm else self.out_channels,
230
+ ),
231
+ )
232
+ self.out_layers = nn.Sequential(
233
+ normalization(self.out_channels), # channel-wise normalization
234
+ nn.SiLU(),
235
+ nn.Dropout(p=dropout),
236
+ zero_module( # zero_module just means zero initialization
237
+ conv_nd(dims, self.out_channels, self.out_channels, 3, padding=1)
238
+ ),
239
+ )
240
+
241
+ if self.out_channels == channels:
242
+ self.skip_connection = nn.Identity()
243
+ elif use_conv:
244
+ self.skip_connection = conv_nd(
245
+ dims, channels, self.out_channels, 3, padding=1
246
+ )
247
+ else:
248
+ self.skip_connection = conv_nd(dims, channels, self.out_channels, 1)
249
+
250
+ def forward(self, x, emb):
251
+ """
252
+ Apply the block to a Tensor, conditioned on a timestep embedding.
253
+ :param x: an [N x C x ...] Tensor of features.
254
+ :param emb: an [N x emb_channels] Tensor of timestep embeddings.
255
+ :return: an [N x C x ...] Tensor of outputs.
256
+ """
257
+ return checkpoint(
258
+ self._forward, (x, emb), self.parameters(), self.use_checkpoint
259
+ )
260
+
261
+ def _forward(self, x, emb):
262
+ # Does this block involves up/down-sampling?
263
+ if self.updown:
264
+ # this splits the in_layers with [norm, SiLU] (in_rest) and Conv2d (in_conv)
265
+ in_rest, in_conv = self.in_layers[:-1], self.in_layers[-1]
266
+ h = in_rest(x)
267
+ h = self.h_upd(h)
268
+ x = self.x_upd(x)
269
+ h = in_conv(h)
270
+ else:
271
+ h = self.in_layers(x)
272
+ emb_out = self.emb_layers(emb).type(h.dtype)
273
+ while len(emb_out.shape) < len(h.shape):
274
+ emb_out = emb_out[..., None]
275
+
276
+ # self.use_scale_shift_norm is always False in this file by its default setting
277
+ if self.use_scale_shift_norm:
278
+ out_norm, out_rest = self.out_layers[0], self.out_layers[1:]
279
+ scale, shift = th.chunk(emb_out, 2, dim=1)
280
+ h = out_norm(h) * (1 + scale) + shift
281
+ h = out_rest(h)
282
+ else:
283
+ h = h + emb_out
284
+ h = self.out_layers(h)
285
+ return self.skip_connection(x) + h
286
+
287
+
288
+ class AttentionBlock(nn.Module):
289
+ """
290
+ An attention block that allows spatial positions to attend to each other.
291
+ Originally ported from here, but adapted to the N-d case.
292
+ https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/models/unet.py#L66.
293
+ """
294
+
295
+ def __init__(
296
+ self,
297
+ channels,
298
+ num_heads=1,
299
+ num_head_channels=-1,
300
+ use_checkpoint=False,
301
+ use_new_attention_order=False,
302
+ ):
303
+ super().__init__()
304
+ self.channels = channels
305
+ if num_head_channels == -1:
306
+ self.num_heads = num_heads
307
+ else:
308
+ assert (
309
+ channels % num_head_channels == 0
310
+ ), f"q,k,v channels {channels} is not divisible by num_head_channels {num_head_channels}"
311
+ self.num_heads = channels // num_head_channels
312
+ self.use_checkpoint = use_checkpoint
313
+ self.norm = normalization(channels)
314
+ self.qkv = conv_nd(1, channels, channels * 3, 1)
315
+ if use_new_attention_order:
316
+ # split qkv before split heads
317
+ self.attention = QKVAttention(self.num_heads)
318
+ else:
319
+ # split heads before split qkv
320
+ self.attention = QKVAttentionLegacy(self.num_heads)
321
+
322
+ self.proj_out = zero_module(conv_nd(1, channels, channels, 1))
323
+
324
+ def forward(self, x):
325
+ return checkpoint(self._forward, (x,), self.parameters(), True) # TODO: check checkpoint usage, is True # TODO: fix the .half call!!!
326
+ #return pt_checkpoint(self._forward, x) # pytorch
327
+
328
+ def _forward(self, x):
329
+ b, c, *spatial = x.shape
330
+ x = x.reshape(b, c, -1)
331
+ qkv = self.qkv(self.norm(x))
332
+ h = self.attention(qkv)
333
+ h = self.proj_out(h)
334
+ return (x + h).reshape(b, c, *spatial)
335
+
336
+
337
+ def count_flops_attn(model, _x, y):
338
+ """
339
+ A counter for the `thop` package to count the operations in an
340
+ attention operation.
341
+ Meant to be used like:
342
+ macs, params = thop.profile(
343
+ model,
344
+ inputs=(inputs, timestamps),
345
+ custom_ops={QKVAttention: QKVAttention.count_flops},
346
+ )
347
+ """
348
+ b, c, *spatial = y[0].shape
349
+ num_spatial = int(np.prod(spatial))
350
+ # We perform two matmuls with the same number of ops.
351
+ # The first computes the weight matrix, the second computes
352
+ # the combination of the value vectors.
353
+ matmul_ops = 2 * b * (num_spatial ** 2) * c
354
+ model.total_ops += th.DoubleTensor([matmul_ops])
355
+
356
+
357
+ class QKVAttentionLegacy(nn.Module):
358
+ """
359
+ A module which performs QKV attention. Matches legacy QKVAttention + input/ouput heads shaping
360
+ """
361
+
362
+ def __init__(self, n_heads):
363
+ super().__init__()
364
+ self.n_heads = n_heads
365
+
366
+ def forward(self, qkv):
367
+ """
368
+ Apply QKV attention.
369
+ :param qkv: an [N x (H * 3 * C) x T] tensor of Qs, Ks, and Vs.
370
+ :return: an [N x (H * C) x T] tensor after attention.
371
+ """
372
+ bs, width, length = qkv.shape
373
+ assert width % (3 * self.n_heads) == 0
374
+ ch = width // (3 * self.n_heads)
375
+ q, k, v = qkv.reshape(bs * self.n_heads, ch * 3, length).split(ch, dim=1)
376
+ scale = 1 / math.sqrt(math.sqrt(ch))
377
+ weight = th.einsum(
378
+ "bct,bcs->bts", q * scale, k * scale
379
+ ) # More stable with f16 than dividing afterwards
380
+ weight = th.softmax(weight.float(), dim=-1).type(weight.dtype)
381
+ a = th.einsum("bts,bcs->bct", weight, v)
382
+ return a.reshape(bs, -1, length)
383
+
384
+ @staticmethod
385
+ def count_flops(model, _x, y):
386
+ return count_flops_attn(model, _x, y)
387
+
388
+
389
+ class QKVAttention(nn.Module):
390
+ """
391
+ A module which performs QKV attention and splits in a different order.
392
+ """
393
+
394
+ def __init__(self, n_heads):
395
+ super().__init__()
396
+ self.n_heads = n_heads
397
+
398
+ def forward(self, qkv):
399
+ """
400
+ Apply QKV attention.
401
+ :param qkv: an [N x (3 * H * C) x T] tensor of Qs, Ks, and Vs.
402
+ :return: an [N x (H * C) x T] tensor after attention.
403
+ """
404
+ bs, width, length = qkv.shape
405
+ assert width % (3 * self.n_heads) == 0
406
+ ch = width // (3 * self.n_heads)
407
+ q, k, v = qkv.chunk(3, dim=1)
408
+ scale = 1 / math.sqrt(math.sqrt(ch))
409
+ weight = th.einsum(
410
+ "bct,bcs->bts",
411
+ (q * scale).view(bs * self.n_heads, ch, length),
412
+ (k * scale).view(bs * self.n_heads, ch, length),
413
+ ) # More stable with f16 than dividing afterwards
414
+ weight = th.softmax(weight.float(), dim=-1).type(weight.dtype)
415
+ a = th.einsum("bts,bcs->bct", weight, v.reshape(bs * self.n_heads, ch, length))
416
+ return a.reshape(bs, -1, length)
417
+
418
+ @staticmethod
419
+ def count_flops(model, _x, y):
420
+ return count_flops_attn(model, _x, y)
421
+
422
+
423
+ class UNetModel(nn.Module):
424
+ """
425
+ The full UNet model with attention and timestep embedding.
426
+ :param in_channels: channels in the input Tensor.
427
+ :param model_channels: base channel count for the model.
428
+ :param out_channels: channels in the output Tensor.
429
+ :param num_res_blocks: number of residual blocks per downsample.
430
+ :param attention_resolutions: a collection of downsample rates at which
431
+ attention will take place. May be a set, list, or tuple.
432
+ For example, if this contains 4, then at 4x downsampling, attention
433
+ will be used.
434
+ :param dropout: the dropout probability.
435
+ :param channel_mult: channel multiplier for each level of the UNet.
436
+ :param conv_resample: if True, use learned convolutions for upsampling and
437
+ downsampling.
438
+ :param dims: determines if the signal is 1D, 2D, or 3D.
439
+ :param num_classes: if specified (as an int), then this model will be
440
+ class-conditional with `num_classes` classes.
441
+ :param use_checkpoint: use gradient checkpointing to reduce memory usage.
442
+ :param num_heads: the number of attention heads in each attention layer.
443
+ :param num_heads_channels: if specified, ignore num_heads and instead use
444
+ a fixed channel width per attention head.
445
+ :param num_heads_upsample: works with num_heads to set a different number
446
+ of heads for upsampling. Deprecated.
447
+ :param use_scale_shift_norm: use a FiLM-like conditioning mechanism.
448
+ :param resblock_updown: use residual blocks for up/downsampling.
449
+ :param use_new_attention_order: use a different attention pattern for potentially
450
+ increased efficiency.
451
+ """
452
+
453
+ def __init__(
454
+ self,
455
+ image_size,
456
+ in_channels, # 8
457
+ model_channels, # 320
458
+ out_channels, # 4
459
+ num_res_blocks, # 2
460
+ attention_resolutions, # [ 4, 2, 1 ]
461
+ dropout=0,
462
+ channel_mult=(1, 2, 4, 8), # [ 1, 2, 4, 4 ]
463
+ conv_resample=True,
464
+ dims=2,
465
+ num_classes=None,
466
+ use_checkpoint=False, # True
467
+ use_fp16=False,
468
+ num_heads=-1, # 8
469
+ num_head_channels=-1,
470
+ num_heads_upsample=-1,
471
+ use_scale_shift_norm=False,
472
+ resblock_updown=False,
473
+ use_new_attention_order=False,
474
+ use_spatial_transformer=False, # True, custom transformer support
475
+ transformer_depth=1, # custom transformer support
476
+ context_dim=None, # 768, custom transformer support
477
+ n_embed=None, # custom support for prediction of discrete ids into codebook of first stage vq model
478
+ legacy=True, # False
479
+ disable_self_attentions=None,
480
+ num_attention_blocks=None
481
+ ):
482
+ super().__init__()
483
+
484
+ # Parameter sanity check and preparation
485
+ if use_spatial_transformer:
486
+ assert context_dim is not None, 'Fool!! You forgot to include the dimension of your cross-attention conditioning...'
487
+
488
+ if context_dim is not None:
489
+ assert use_spatial_transformer, 'Fool!! You forgot to use the spatial transformer for your cross-attention conditioning...'
490
+ from omegaconf.listconfig import ListConfig
491
+ if type(context_dim) == ListConfig:
492
+ context_dim = list(context_dim)
493
+
494
+ if num_heads_upsample == -1:
495
+ num_heads_upsample = num_heads
496
+
497
+ if num_heads == -1:
498
+ assert num_head_channels != -1, 'Either num_heads or num_head_channels has to be set'
499
+
500
+ if num_head_channels == -1:
501
+ assert num_heads != -1, 'Either num_heads or num_head_channels has to be set'
502
+
503
+ self.image_size = image_size
504
+ self.in_channels = in_channels
505
+ self.model_channels = model_channels
506
+ self.out_channels = out_channels
507
+ if isinstance(num_res_blocks, int):
508
+ self.num_res_blocks = len(channel_mult) * [num_res_blocks]
509
+ else:
510
+ if len(num_res_blocks) != len(channel_mult):
511
+ raise ValueError("provide num_res_blocks either as an int (globally constant) or "
512
+ "as a list/tuple (per-level) with the same length as channel_mult")
513
+ self.num_res_blocks = num_res_blocks
514
+ #self.num_res_blocks = num_res_blocks
515
+ if disable_self_attentions is not None:
516
+ # should be a list of booleans, indicating whether to disable self-attention in TransformerBlocks or not
517
+ assert len(disable_self_attentions) == len(channel_mult)
518
+ if num_attention_blocks is not None:
519
+ assert len(num_attention_blocks) == len(self.num_res_blocks)
520
+ assert all(map(lambda i: self.num_res_blocks[i] >= num_attention_blocks[i], range(len(num_attention_blocks))))
521
+ print(f"Constructor of UNetModel received num_attention_blocks={num_attention_blocks}. "
522
+ f"This option has LESS priority than attention_resolutions {attention_resolutions}, "
523
+ f"i.e., in cases where num_attention_blocks[i] > 0 but 2**i not in attention_resolutions, "
524
+ f"attention will still not be set.") # todo: convert to warning
525
+
526
+ self.attention_resolutions = attention_resolutions
527
+ self.dropout = dropout
528
+ self.channel_mult = channel_mult
529
+ self.conv_resample = conv_resample
530
+ self.num_classes = num_classes
531
+ self.use_checkpoint = use_checkpoint
532
+ self.dtype = th.float16 if use_fp16 else th.float32
533
+ self.num_heads = num_heads
534
+ self.num_head_channels = num_head_channels
535
+ self.num_heads_upsample = num_heads_upsample
536
+ self.predict_codebook_ids = n_embed is not None
537
+ self.dim_heads = []
538
+ # model_channels 320, time_embed_dim 1280
539
+ # a simple MLP
540
+ time_embed_dim = model_channels * 4 # 1280, this is fixed throughout the class.
541
+ self.time_embed = nn.Sequential(
542
+ linear(model_channels, time_embed_dim),
543
+ nn.SiLU(),
544
+ linear(time_embed_dim, time_embed_dim),
545
+ )
546
+
547
+ if self.num_classes is not None: # None
548
+ self.label_emb = nn.Embedding(num_classes, time_embed_dim)
549
+
550
+ # The input block is to downsample and increase the channel width
551
+ self.input_blocks = nn.ModuleList(
552
+ [
553
+ # TimestepEmbedSequential is just a wrapper that passes the timestep
554
+ TimestepEmbedSequential(
555
+ # a normal conv2d layer, dims=2, in_channels=8, out_channels=320, kernel_size=3, padding=1
556
+ conv_nd(dims, in_channels, model_channels, 3, padding=1)
557
+ )
558
+ ]
559
+ )
560
+ self._feature_size = model_channels # 320
561
+ input_block_chans = [model_channels] # [320]
562
+ ch = model_channels # 320
563
+ ds = 1 # this means downsample, only the first three loops below we add downsampling blocks
564
+ for level, mult in enumerate(channel_mult): # [ 1, 2, 4, 4 ]
565
+ for nr in range(self.num_res_blocks[level]): # 2
566
+ layers = [
567
+ ResBlock(
568
+ ch,
569
+ time_embed_dim,
570
+ dropout, # 0
571
+ out_channels=mult * model_channels,
572
+ dims=dims, # 2
573
+ use_checkpoint=use_checkpoint, # True
574
+ use_scale_shift_norm=use_scale_shift_norm, # False
575
+ )
576
+ ]
577
+ ch = mult * model_channels
578
+ if ds in attention_resolutions: # [ 4, 2, 1 ]
579
+ if num_head_channels == -1:
580
+ dim_head = ch // num_heads # num_heads = 8
581
+ else:
582
+ num_heads = ch // num_head_channels
583
+ dim_head = num_head_channels
584
+ if legacy: # False
585
+ #num_heads = 1
586
+ dim_head = ch // num_heads if use_spatial_transformer else num_head_channels
587
+ if exists(disable_self_attentions):
588
+ disabled_sa = disable_self_attentions[level]
589
+ else:
590
+ disabled_sa = False
591
+
592
+ if not exists(num_attention_blocks) or nr < num_attention_blocks[level]:
593
+ self.dim_heads.append(dim_head)
594
+ layers.append(
595
+ AttentionBlock(
596
+ ch,
597
+ use_checkpoint=use_checkpoint,
598
+ num_heads=num_heads,
599
+ num_head_channels=dim_head,
600
+ use_new_attention_order=use_new_attention_order,
601
+ ) if not use_spatial_transformer else SpatialTransformer(
602
+ ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim,
603
+ disable_self_attn=disabled_sa
604
+ )
605
+ )
606
+ self.input_blocks.append(TimestepEmbedSequential(*layers))
607
+ self._feature_size += ch
608
+ input_block_chans.append(ch)
609
+ if level != len(channel_mult) - 1: # if this is not the last loop
610
+ out_ch = ch
611
+ self.input_blocks.append(
612
+ TimestepEmbedSequential(
613
+ ResBlock(
614
+ ch,
615
+ time_embed_dim,
616
+ dropout,
617
+ out_channels=out_ch,
618
+ dims=dims,
619
+ use_checkpoint=use_checkpoint,
620
+ use_scale_shift_norm=use_scale_shift_norm,
621
+ down=True,
622
+ )
623
+ if resblock_updown
624
+ else Downsample( # this is either a conv layer or a pooling layer (in this case the input dim must equal to the output dim)
625
+ ch, conv_resample, dims=dims, out_channels=out_ch
626
+ )
627
+ )
628
+ )
629
+ ch = out_ch
630
+ input_block_chans.append(ch)
631
+ ds *= 2
632
+ self._feature_size += ch
633
+
634
+ if num_head_channels == -1:
635
+ dim_head = ch // num_heads
636
+ else:
637
+ num_heads = ch // num_head_channels
638
+ dim_head = num_head_channels
639
+ if legacy:
640
+ #num_heads = 1
641
+ dim_head = ch // num_heads if use_spatial_transformer else num_head_channels
642
+ print(dim_head)
643
+ print('legacy')
644
+ self.dim_heads.append(dim_head)
645
+ self.middle_block = TimestepEmbedSequential(
646
+ ResBlock( # input channel equals to output channel
647
+ ch,
648
+ time_embed_dim,
649
+ dropout,
650
+ dims=dims,
651
+ use_checkpoint=use_checkpoint,
652
+ use_scale_shift_norm=use_scale_shift_norm,
653
+ ),
654
+ AttentionBlock(
655
+ ch,
656
+ use_checkpoint=use_checkpoint,
657
+ num_heads=num_heads,
658
+ num_head_channels=dim_head,
659
+ use_new_attention_order=use_new_attention_order,
660
+ ) if not use_spatial_transformer else SpatialTransformer( # always uses a self-attn
661
+ ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim
662
+ ),
663
+ ResBlock(
664
+ ch,
665
+ time_embed_dim,
666
+ dropout,
667
+ dims=dims,
668
+ use_checkpoint=use_checkpoint,
669
+ use_scale_shift_norm=use_scale_shift_norm,
670
+ ),
671
+ )
672
+ self._feature_size += ch
673
+
674
+ self.output_blocks = nn.ModuleList([])
675
+ for level, mult in list(enumerate(channel_mult))[::-1]:
676
+ for i in range(self.num_res_blocks[level] + 1):
677
+ ich = input_block_chans.pop()
678
+ layers = [
679
+ ResBlock(
680
+ ch + ich,
681
+ time_embed_dim,
682
+ dropout,
683
+ out_channels=model_channels * mult,
684
+ dims=dims,
685
+ use_checkpoint=use_checkpoint,
686
+ use_scale_shift_norm=use_scale_shift_norm,
687
+ )
688
+ ]
689
+ ch = model_channels * mult
690
+ if ds in attention_resolutions: # note ds loops following [8, 4, 2, 1], a reverse order of the input_blocks
691
+ if num_head_channels == -1:
692
+ dim_head = ch // num_heads
693
+ else:
694
+ num_heads = ch // num_head_channels
695
+ dim_head = num_head_channels
696
+ if legacy:
697
+ #num_heads = 1
698
+ dim_head = ch // num_heads if use_spatial_transformer else num_head_channels
699
+ if exists(disable_self_attentions):
700
+ disabled_sa = disable_self_attentions[level]
701
+ else:
702
+ disabled_sa = False
703
+
704
+ if not exists(num_attention_blocks) or i < num_attention_blocks[level]:
705
+ self.dim_heads.append(dim_head)
706
+ layers.append(
707
+ AttentionBlock(
708
+ ch,
709
+ use_checkpoint=use_checkpoint,
710
+ num_heads=num_heads_upsample,
711
+ num_head_channels=dim_head,
712
+ use_new_attention_order=use_new_attention_order,
713
+ ) if not use_spatial_transformer else SpatialTransformer(
714
+ ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim,
715
+ disable_self_attn=disabled_sa
716
+ )
717
+ )
718
+ if level and i == self.num_res_blocks[level]:
719
+ out_ch = ch
720
+ layers.append(
721
+ ResBlock(
722
+ ch,
723
+ time_embed_dim,
724
+ dropout,
725
+ out_channels=out_ch,
726
+ dims=dims,
727
+ use_checkpoint=use_checkpoint,
728
+ use_scale_shift_norm=use_scale_shift_norm,
729
+ up=True,
730
+ )
731
+ if resblock_updown
732
+ else Upsample(ch, conv_resample, dims=dims, out_channels=out_ch)
733
+ )
734
+ ds //= 2
735
+ self.output_blocks.append(TimestepEmbedSequential(*layers))
736
+ self._feature_size += ch
737
+
738
+ self.out = nn.Sequential(
739
+ normalization(ch), # GroupNorm32
740
+ nn.SiLU(),
741
+ zero_module(conv_nd(dims, model_channels, out_channels, 3, padding=1)), # input_channels: model_channels, output_channels: out_channels
742
+ )
743
+ if self.predict_codebook_ids: # False
744
+ self.id_predictor = nn.Sequential(
745
+ normalization(ch),
746
+ conv_nd(dims, model_channels, n_embed, 1),
747
+ #nn.LogSoftmax(dim=1) # change to cross_entropy and produce non-normalized logits
748
+ )
749
+
750
+ def convert_to_fp16(self):
751
+ """
752
+ Convert the torso of the model to float16.
753
+ """
754
+ self.input_blocks.apply(convert_module_to_f16)
755
+ self.middle_block.apply(convert_module_to_f16)
756
+ self.output_blocks.apply(convert_module_to_f16)
757
+
758
+ def convert_to_fp32(self):
759
+ """
760
+ Convert the torso of the model to float32.
761
+ """
762
+ self.input_blocks.apply(convert_module_to_f32)
763
+ self.middle_block.apply(convert_module_to_f32)
764
+ self.output_blocks.apply(convert_module_to_f32)
765
+
766
+ def forward(self, x, timesteps=None, context=None, y=None, **kwargs):
767
+ """
768
+ Apply the model to an input batch.
769
+ :param x: an [bs, 8, 32, 32] Tensor of inputs.
770
+ :param timesteps: a 1-D batch of timesteps [bs]
771
+ :param context: [bs, 77, 768]
772
+ :param y: an [N] Tensor of labels, if class-conditional.
773
+ :return: an [N x C x ...] Tensor of outputs.
774
+ """
775
+ assert (y is not None) == (
776
+ self.num_classes is not None
777
+ ), "must specify y if and only if the model is class-conditional"
778
+ hs = []
779
+ # timesteps [bs], self.model_channels = 320
780
+ t_emb = timestep_embedding(timesteps, self.model_channels, repeat_only=False)
781
+ # t_emb [bs, 320], emb [bs, 1280]
782
+ emb = self.time_embed(t_emb)
783
+
784
+ if self.num_classes is not None: # we do not use num_classes here
785
+ assert y.shape == (x.shape[0],)
786
+ emb = emb + self.label_emb(y)
787
+
788
+ h = x.type(self.dtype)
789
+ for module in self.input_blocks:
790
+ h = module(h, emb, context)
791
+ hs.append(h)
792
+ h = self.middle_block(h, emb, context)
793
+
794
+ for module in self.output_blocks:
795
+ h = th.cat([h, hs.pop()], dim=1)
796
+ h = module(h, emb, context)
797
+ h = h.type(x.dtype)
798
+ if self.predict_codebook_ids:
799
+ return self.id_predictor(h)
800
+ else:
801
+ h = self.out(h)
802
+ return h
803
+
804
+
805
+ class EncoderUNetModel(nn.Module):
806
+ """
807
+ The half UNet model with attention and timestep embedding.
808
+ For usage, see UNet.
809
+ """
810
+
811
+ def __init__(
812
+ self,
813
+ image_size,
814
+ in_channels,
815
+ model_channels,
816
+ out_channels,
817
+ num_res_blocks,
818
+ attention_resolutions,
819
+ dropout=0,
820
+ channel_mult=(1, 2, 4, 8),
821
+ conv_resample=True,
822
+ dims=2,
823
+ use_checkpoint=False,
824
+ use_fp16=False,
825
+ num_heads=1,
826
+ num_head_channels=-1,
827
+ num_heads_upsample=-1,
828
+ use_scale_shift_norm=False,
829
+ resblock_updown=False,
830
+ use_new_attention_order=False,
831
+ pool="adaptive",
832
+ *args,
833
+ **kwargs
834
+ ):
835
+ super().__init__()
836
+
837
+ if num_heads_upsample == -1:
838
+ num_heads_upsample = num_heads
839
+
840
+ self.in_channels = in_channels
841
+ self.model_channels = model_channels
842
+ self.out_channels = out_channels
843
+ self.num_res_blocks = num_res_blocks
844
+ self.attention_resolutions = attention_resolutions
845
+ self.dropout = dropout
846
+ self.channel_mult = channel_mult
847
+ self.conv_resample = conv_resample
848
+ self.use_checkpoint = use_checkpoint
849
+ self.dtype = th.float16 if use_fp16 else th.float32
850
+ self.num_heads = num_heads
851
+ self.num_head_channels = num_head_channels
852
+ self.num_heads_upsample = num_heads_upsample
853
+
854
+ time_embed_dim = model_channels * 4
855
+ self.time_embed = nn.Sequential(
856
+ linear(model_channels, time_embed_dim),
857
+ nn.SiLU(),
858
+ linear(time_embed_dim, time_embed_dim),
859
+ )
860
+
861
+ self.input_blocks = nn.ModuleList(
862
+ [
863
+ TimestepEmbedSequential(
864
+ conv_nd(dims, in_channels, model_channels, 3, padding=1)
865
+ )
866
+ ]
867
+ )
868
+ self._feature_size = model_channels
869
+ input_block_chans = [model_channels]
870
+ ch = model_channels
871
+ ds = 1
872
+ for level, mult in enumerate(channel_mult):
873
+ for _ in range(num_res_blocks):
874
+ layers = [
875
+ ResBlock(
876
+ ch,
877
+ time_embed_dim,
878
+ dropout,
879
+ out_channels=mult * model_channels,
880
+ dims=dims,
881
+ use_checkpoint=use_checkpoint,
882
+ use_scale_shift_norm=use_scale_shift_norm,
883
+ )
884
+ ]
885
+ ch = mult * model_channels
886
+ if ds in attention_resolutions:
887
+ layers.append(
888
+ AttentionBlock(
889
+ ch,
890
+ use_checkpoint=use_checkpoint,
891
+ num_heads=num_heads,
892
+ num_head_channels=num_head_channels,
893
+ use_new_attention_order=use_new_attention_order,
894
+ )
895
+ )
896
+ self.input_blocks.append(TimestepEmbedSequential(*layers))
897
+ self._feature_size += ch
898
+ input_block_chans.append(ch)
899
+ if level != len(channel_mult) - 1:
900
+ out_ch = ch
901
+ self.input_blocks.append(
902
+ TimestepEmbedSequential(
903
+ ResBlock(
904
+ ch,
905
+ time_embed_dim,
906
+ dropout,
907
+ out_channels=out_ch,
908
+ dims=dims,
909
+ use_checkpoint=use_checkpoint,
910
+ use_scale_shift_norm=use_scale_shift_norm,
911
+ down=True,
912
+ )
913
+ if resblock_updown
914
+ else Downsample(
915
+ ch, conv_resample, dims=dims, out_channels=out_ch
916
+ )
917
+ )
918
+ )
919
+ ch = out_ch
920
+ input_block_chans.append(ch)
921
+ ds *= 2
922
+ self._feature_size += ch
923
+
924
+ self.middle_block = TimestepEmbedSequential(
925
+ ResBlock(
926
+ ch,
927
+ time_embed_dim,
928
+ dropout,
929
+ dims=dims,
930
+ use_checkpoint=use_checkpoint,
931
+ use_scale_shift_norm=use_scale_shift_norm,
932
+ ),
933
+ AttentionBlock(
934
+ ch,
935
+ use_checkpoint=use_checkpoint,
936
+ num_heads=num_heads,
937
+ num_head_channels=num_head_channels,
938
+ use_new_attention_order=use_new_attention_order,
939
+ ),
940
+ ResBlock(
941
+ ch,
942
+ time_embed_dim,
943
+ dropout,
944
+ dims=dims,
945
+ use_checkpoint=use_checkpoint,
946
+ use_scale_shift_norm=use_scale_shift_norm,
947
+ ),
948
+ )
949
+ self._feature_size += ch
950
+ self.pool = pool
951
+ if pool == "adaptive":
952
+ self.out = nn.Sequential(
953
+ normalization(ch),
954
+ nn.SiLU(),
955
+ nn.AdaptiveAvgPool2d((1, 1)),
956
+ zero_module(conv_nd(dims, ch, out_channels, 1)),
957
+ nn.Flatten(),
958
+ )
959
+ elif pool == "attention":
960
+ assert num_head_channels != -1
961
+ self.out = nn.Sequential(
962
+ normalization(ch),
963
+ nn.SiLU(),
964
+ AttentionPool2d(
965
+ (image_size // ds), ch, num_head_channels, out_channels
966
+ ),
967
+ )
968
+ elif pool == "spatial":
969
+ self.out = nn.Sequential(
970
+ nn.Linear(self._feature_size, 2048),
971
+ nn.ReLU(),
972
+ nn.Linear(2048, self.out_channels),
973
+ )
974
+ elif pool == "spatial_v2":
975
+ self.out = nn.Sequential(
976
+ nn.Linear(self._feature_size, 2048),
977
+ normalization(2048),
978
+ nn.SiLU(),
979
+ nn.Linear(2048, self.out_channels),
980
+ )
981
+ else:
982
+ raise NotImplementedError(f"Unexpected {pool} pooling")
983
+
984
+ def convert_to_fp16(self):
985
+ """
986
+ Convert the torso of the model to float16.
987
+ """
988
+ self.input_blocks.apply(convert_module_to_f16)
989
+ self.middle_block.apply(convert_module_to_f16)
990
+
991
+ def convert_to_fp32(self):
992
+ """
993
+ Convert the torso of the model to float32.
994
+ """
995
+ self.input_blocks.apply(convert_module_to_f32)
996
+ self.middle_block.apply(convert_module_to_f32)
997
+
998
+ def forward(self, x, timesteps):
999
+ """
1000
+ Apply the model to an input batch.
1001
+ :param x: an [N x C x ...] Tensor of inputs.
1002
+ :param timesteps: a 1-D batch of timesteps.
1003
+ :return: an [N x K] Tensor of outputs.
1004
+ """
1005
+ emb = self.time_embed(timestep_embedding(timesteps, self.model_channels))
1006
+
1007
+ results = []
1008
+ h = x.type(self.dtype)
1009
+ for module in self.input_blocks:
1010
+ h = module(h, emb)
1011
+ if self.pool.startswith("spatial"):
1012
+ results.append(h.type(x.dtype).mean(dim=(2, 3)))
1013
+ h = self.middle_block(h, emb)
1014
+ if self.pool.startswith("spatial"):
1015
+ results.append(h.type(x.dtype).mean(dim=(2, 3)))
1016
+ h = th.cat(results, axis=-1)
1017
+ return self.out(h)
1018
+ else:
1019
+ h = h.type(x.dtype)
1020
+ return self.out(h)
1021
+
stable_diffusion/ldm/modules/diffusionmodules/util.py ADDED
@@ -0,0 +1,321 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # adopted from
2
+ # https://github.com/openai/improved-diffusion/blob/main/improved_diffusion/gaussian_diffusion.py
3
+ # and
4
+ # https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py
5
+ # and
6
+ # https://github.com/openai/guided-diffusion/blob/0ba878e517b276c45d1195eb29f6f5f72659a05b/guided_diffusion/nn.py
7
+ #
8
+ # thanks!
9
+
10
+
11
+ import os
12
+ import math
13
+ import torch
14
+ import torch.nn as nn
15
+ import numpy as np
16
+ from einops import repeat
17
+
18
+ import sys
19
+ sys.path.append('.')
20
+
21
+ from stable_diffusion.ldm.util import instantiate_from_config
22
+
23
+
24
+ def make_beta_schedule(schedule, n_timestep, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):
25
+ if schedule == "linear":
26
+ betas = (
27
+ torch.linspace(linear_start ** 0.5, linear_end ** 0.5, n_timestep, dtype=torch.float64) ** 2
28
+ )
29
+
30
+ elif schedule == "cosine":
31
+ timesteps = (
32
+ torch.arange(n_timestep + 1, dtype=torch.float64) / n_timestep + cosine_s
33
+ )
34
+ alphas = timesteps / (1 + cosine_s) * np.pi / 2
35
+ alphas = torch.cos(alphas).pow(2)
36
+ alphas = alphas / alphas[0]
37
+ betas = 1 - alphas[1:] / alphas[:-1]
38
+ betas = np.clip(betas, a_min=0, a_max=0.999)
39
+
40
+ elif schedule == "sqrt_linear":
41
+ betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64)
42
+ elif schedule == "sqrt":
43
+ betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64) ** 0.5
44
+ else:
45
+ raise ValueError(f"schedule '{schedule}' unknown.")
46
+ return betas.numpy()
47
+
48
+
49
+ def make_ddim_timesteps(ddim_discr_method, num_ddim_timesteps, num_ddpm_timesteps, verbose=True):
50
+ if ddim_discr_method == 'uniform':
51
+ c = num_ddpm_timesteps // num_ddim_timesteps
52
+ ddim_timesteps = np.asarray(list(range(0, num_ddpm_timesteps, c)))
53
+ elif ddim_discr_method == 'quad':
54
+ ddim_timesteps = ((np.linspace(0, np.sqrt(num_ddpm_timesteps * .8), num_ddim_timesteps)) ** 2).astype(int)
55
+ else:
56
+ raise NotImplementedError(f'There is no ddim discretization method called "{ddim_discr_method}"')
57
+
58
+ # assert ddim_timesteps.shape[0] == num_ddim_timesteps
59
+ # add one to get the final alpha values right (the ones from first scale to data during sampling)
60
+ steps_out = ddim_timesteps + 1
61
+ if verbose:
62
+ print(f'Selected timesteps for ddim sampler: {steps_out}')
63
+ return steps_out
64
+
65
+
66
+ def make_ddim_sampling_parameters(alphacums, ddim_timesteps, eta, verbose=True):
67
+ # select alphas for computing the variance schedule
68
+ alphas = alphacums[ddim_timesteps]
69
+ alphas_prev = np.asarray([alphacums[0]] + alphacums[ddim_timesteps[:-1]].tolist())
70
+
71
+ # according the the formula provided in https://arxiv.org/abs/2010.02502
72
+ sigmas = eta * np.sqrt((1 - alphas_prev) / (1 - alphas) * (1 - alphas / alphas_prev))
73
+ if verbose:
74
+ print(f'Selected alphas for ddim sampler: a_t: {alphas}; a_(t-1): {alphas_prev}')
75
+ print(f'For the chosen value of eta, which is {eta}, '
76
+ f'this results in the following sigma_t schedule for ddim sampler {sigmas}')
77
+ return sigmas, alphas, alphas_prev
78
+
79
+
80
+ def betas_for_alpha_bar(num_diffusion_timesteps, alpha_bar, max_beta=0.999):
81
+ """
82
+ Create a beta schedule that discretizes the given alpha_t_bar function,
83
+ which defines the cumulative product of (1-beta) over time from t = [0,1].
84
+ :param num_diffusion_timesteps: the number of betas to produce.
85
+ :param alpha_bar: a lambda that takes an argument t from 0 to 1 and
86
+ produces the cumulative product of (1-beta) up to that
87
+ part of the diffusion process.
88
+ :param max_beta: the maximum beta to use; use values lower than 1 to
89
+ prevent singularities.
90
+ """
91
+ betas = []
92
+ for i in range(num_diffusion_timesteps):
93
+ t1 = i / num_diffusion_timesteps
94
+ t2 = (i + 1) / num_diffusion_timesteps
95
+ betas.append(min(1 - alpha_bar(t2) / alpha_bar(t1), max_beta))
96
+ return np.array(betas)
97
+
98
+
99
+ def extract_into_tensor(a, t, x_shape):
100
+ b, *_ = t.shape
101
+ out = a.gather(-1, t)
102
+ return out.reshape(b, *((1,) * (len(x_shape) - 1)))
103
+
104
+
105
+ def checkpoint(func, inputs, params, flag):
106
+ """
107
+ Evaluate a function without caching intermediate activations, allowing for
108
+ reduced memory at the expense of extra compute in the backward pass.
109
+ :param func: the function to evaluate.
110
+ :param inputs: the argument sequence to pass to `func`.
111
+ :param params: a sequence of parameters `func` depends on but does not
112
+ explicitly take as arguments. This is also the parameters which we calculate the gradient with respect to
113
+ :param flag: if False, disable gradient checkpointing.
114
+ """
115
+
116
+ # Filter out parameters that don't require gradients
117
+ params_with_grads = [p for p in params if p.requires_grad]
118
+
119
+ if flag:
120
+ args = tuple(inputs) + tuple(params_with_grads)
121
+ return CheckpointFunction.apply(func, len(inputs), *args)
122
+ else:
123
+ return func(*inputs)
124
+
125
+
126
+ class CheckpointFunction(torch.autograd.Function):
127
+ """
128
+ This is a method to save memory when training the model.
129
+ """
130
+ @staticmethod
131
+ def forward(ctx, run_function, length, *args): # args = inputs + params
132
+ """
133
+ The forward calls the function but without calculating the gradients.
134
+ As indicated by the with torch.no_grad(). The parameter args contains
135
+ the inputs and also the parameters w.r.t. which we want to calcuate
136
+ the gradient.
137
+ """
138
+ ctx.run_function = run_function
139
+ ctx.input_tensors = list(args[:length]) # inputs
140
+ ctx.input_params = list(args[length:]) # params
141
+
142
+ with torch.no_grad():
143
+ output_tensors = ctx.run_function(*ctx.input_tensors)
144
+ return output_tensors
145
+
146
+ @staticmethod
147
+ def backward(ctx, *output_grads):
148
+ """
149
+ The backward method takes in the gradients of the outputs
150
+ (from the forward method), and computes the gradients of
151
+ the inputs and parameters. This is done by re-running the
152
+ run_function with gradients enabled, and then using
153
+ torch.autograd.grad to calculate the gradients.
154
+ These gradients are then returned.
155
+
156
+ This technique of re-running the forward pass during the
157
+ backward pass to re-compute the values required for gradient
158
+ computation is known as gradient checkpointing. It can
159
+ significantly reduce memory usage during training at the cost
160
+ of increased computation time, as certain intermediate values
161
+ from the forward pass don't need to be stored for the backward pass.
162
+ """
163
+ ctx.input_tensors = [x.detach().requires_grad_(True) for x in ctx.input_tensors]
164
+ with torch.enable_grad():
165
+ # Fixes a bug where the first op in run_function modifies the
166
+ # Tensor storage in place, which is not allowed for detach()'d
167
+ # Tensors.
168
+ shallow_copies = [x.view_as(x) for x in ctx.input_tensors]
169
+ output_tensors = ctx.run_function(*shallow_copies)
170
+ input_grads = torch.autograd.grad(
171
+ output_tensors,
172
+ ctx.input_tensors + ctx.input_params,
173
+ output_grads,
174
+ allow_unused=True,
175
+ )
176
+ del ctx.input_tensors
177
+ del ctx.input_params
178
+ del output_tensors
179
+ return (None, None) + input_grads
180
+
181
+
182
+ def timestep_embedding(timesteps, dim, max_period=10000, repeat_only=False):
183
+ """
184
+ Create sinusoidal timestep embeddings.
185
+ :param timesteps: a 1-D Tensor of N indices, one per batch element.
186
+ These may be fractional.
187
+ :param dim: the dimension of the output.
188
+ :param max_period: controls the minimum frequency of the embeddings.
189
+ :return: an [N x dim] Tensor of positional embeddings.
190
+ """
191
+ if not repeat_only:
192
+ half = dim // 2
193
+ freqs = torch.exp(
194
+ -math.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32) / half
195
+ ).to(device=timesteps.device)
196
+ args = timesteps[:, None].float() * freqs[None]
197
+ embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1)
198
+ if dim % 2:
199
+ embedding = torch.cat([embedding, torch.zeros_like(embedding[:, :1])], dim=-1)
200
+ else:
201
+ embedding = repeat(timesteps, 'b -> b d', d=dim)
202
+ return embedding
203
+
204
+
205
+ def zero_module(module):
206
+ """
207
+ Zero out the parameters of a module and return it.
208
+ """
209
+ for p in module.parameters():
210
+ p.detach().zero_()
211
+ return module
212
+
213
+
214
+ def scale_module(module, scale):
215
+ """
216
+ Scale the parameters of a module and return it.
217
+ """
218
+ for p in module.parameters():
219
+ p.detach().mul_(scale)
220
+ return module
221
+
222
+
223
+ def mean_flat(tensor):
224
+ """
225
+ Take the mean over all non-batch dimensions.
226
+ """
227
+ return tensor.mean(dim=list(range(1, len(tensor.shape))))
228
+
229
+
230
+ def normalization(channels):
231
+ """
232
+ Make a standard normalization layer.
233
+ :param channels: number of input channels.
234
+ :return: an nn.Module for normalization.
235
+ """
236
+ return GroupNorm32(32, channels)
237
+
238
+
239
+ # PyTorch 1.7 has SiLU, but we support PyTorch 1.5.
240
+ class SiLU(nn.Module):
241
+ def forward(self, x):
242
+ return x * torch.sigmoid(x)
243
+
244
+
245
+ class GroupNorm32(nn.GroupNorm):
246
+ """
247
+ Group Normalization is a method that divides the channels of your
248
+ inputs into groups and normalizes these groups separately. This is
249
+ in contrast to Batch Normalization, which normalizes each channel
250
+ across the batch dimension.
251
+
252
+ torch.nn.GroupNorm used to apply Group Normalization to a 4D or 5D
253
+ input (a mini-batch of input features).
254
+
255
+ The difference in the GroupNorm32 class is that it changes the data
256
+ type of the input tensor to float before passing it to the original
257
+ GroupNorm layer, and then changes it back to the original data type
258
+ after the GroupNorm layer has processed it.
259
+
260
+ This can be useful in situations where the input tensor is not of
261
+ type float. PyTorch's normalization layers expect the input to be
262
+ a float tensor, and may behave unexpectedly if given an integer
263
+ tensor or a tensor of a different floating-point precision. By
264
+ explicitly changing the tensor to float before the normalization,
265
+ this class ensures that the normalization layer always receives a
266
+ tensor of the correct type.
267
+ """
268
+ def forward(self, x):
269
+ return super().forward(x.float()).type(x.dtype)
270
+
271
+
272
+ def conv_nd(dims, *args, **kwargs):
273
+ """
274
+ Create a 1D, 2D, or 3D convolution module.
275
+ """
276
+ if dims == 1:
277
+ return nn.Conv1d(*args, **kwargs)
278
+ elif dims == 2:
279
+ return nn.Conv2d(*args, **kwargs)
280
+ elif dims == 3:
281
+ return nn.Conv3d(*args, **kwargs)
282
+ raise ValueError(f"unsupported dimensions: {dims}")
283
+
284
+
285
+ def linear(*args, **kwargs):
286
+ """
287
+ Create a linear module.
288
+ """
289
+ return nn.Linear(*args, **kwargs)
290
+
291
+
292
+ def avg_pool_nd(dims, *args, **kwargs):
293
+ """
294
+ Create a 1D, 2D, or 3D average pooling module.
295
+ """
296
+ if dims == 1:
297
+ return nn.AvgPool1d(*args, **kwargs)
298
+ elif dims == 2:
299
+ return nn.AvgPool2d(*args, **kwargs)
300
+ elif dims == 3:
301
+ return nn.AvgPool3d(*args, **kwargs)
302
+ raise ValueError(f"unsupported dimensions: {dims}")
303
+
304
+
305
+ class HybridConditioner(nn.Module):
306
+
307
+ def __init__(self, c_concat_config, c_crossattn_config):
308
+ super().__init__()
309
+ self.concat_conditioner = instantiate_from_config(c_concat_config)
310
+ self.crossattn_conditioner = instantiate_from_config(c_crossattn_config)
311
+
312
+ def forward(self, c_concat, c_crossattn):
313
+ c_concat = self.concat_conditioner(c_concat)
314
+ c_crossattn = self.crossattn_conditioner(c_crossattn)
315
+ return {'c_concat': [c_concat], 'c_crossattn': [c_crossattn]}
316
+
317
+
318
+ def noise_like(shape, device, repeat=False):
319
+ repeat_noise = lambda: torch.randn((1, *shape[1:]), device=device).repeat(shape[0], *((1,) * (len(shape) - 1)))
320
+ noise = lambda: torch.randn(shape, device=device)
321
+ return repeat_noise() if repeat else noise()
stable_diffusion/ldm/modules/distributions/__init__.py ADDED
File without changes
stable_diffusion/ldm/modules/ema.py ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from torch import nn
3
+
4
+
5
+ class LitEma(nn.Module): # LitEma is an implementation of Exponential Moving Average (EMA) for PyTorch models. EMA is a common technique used in many machine learning models, especially in optimization algorithms, to get a smoothed, average value of parameters over time. This can be helpful to stabilize the learning process and to avoid overfitting.
6
+ def __init__(self, model, decay=0.9999, use_num_upates=True, handle_non_trainable=False):
7
+ super().__init__()
8
+ if decay < 0.0 or decay > 1.0:
9
+ raise ValueError('Decay must be between 0 and 1')
10
+ self.frozen_param_names = set(name for name, p in model.named_parameters() if not p.requires_grad)
11
+ self.handle_non_trainable = handle_non_trainable
12
+ self.m_name2s_name = {}
13
+ self.register_buffer('decay', torch.tensor(decay, dtype=torch.float32)) # defines the rate at which the importance of older observations decreases
14
+ self.register_buffer('num_updates', torch.tensor(0, dtype=torch.int) if use_num_upates
15
+ else torch.tensor(-1, dtype=torch.int)) # whether or not to use the number of updates in the decay calculation
16
+ # store a clone of the model's parameters that will be used to hold the EMA parameters.
17
+ for name, p in model.named_parameters():
18
+ if p.requires_grad or handle_non_trainable:
19
+ #remove as '.'-character is not allowed in buffers
20
+ s_name = name.replace('.', '')
21
+ self.m_name2s_name.update({name: s_name})
22
+ self.register_buffer(s_name, p.clone().detach().data)
23
+
24
+ self.collected_params = []
25
+
26
+ def update_frozen_params(self, model):
27
+ self.frozen_param_names = set(name for name, p in model.named_parameters() if not p.requires_grad)
28
+
29
+ def forward(self, model):
30
+ decay = self.decay
31
+
32
+ if self.num_updates >= 0:
33
+ self.num_updates += 1
34
+ decay = min(self.decay, (1 + self.num_updates) / (10 + self.num_updates))
35
+
36
+ one_minus_decay = 1.0 - decay
37
+
38
+ with torch.no_grad():
39
+ m_param = dict(model.named_parameters())
40
+ shadow_params = dict(self.named_buffers())
41
+
42
+ for key in m_param:
43
+ # Check if this parameter is frozen
44
+ if self.handle_non_trainable and key in self.frozen_param_names:
45
+ continue # Skip EMA update for frozen parameter
46
+ # CHANGED: Added a condition to handle non-trainable parameters
47
+ if m_param[key].requires_grad or (self.handle_non_trainable and not m_param[key].requires_grad):
48
+ sname = self.m_name2s_name[key]
49
+ shadow_params[sname] = shadow_params[sname].type_as(m_param[key])
50
+ shadow_params[sname].sub_(one_minus_decay * (shadow_params[sname] - m_param[key]))
51
+ else:
52
+ assert not key in self.m_name2s_name
53
+
54
+ def copy_to(self, model): # copies the current EMA parameters to the model parameters.
55
+ m_param = dict(model.named_parameters())
56
+ shadow_params = dict(self.named_buffers())
57
+ for key in m_param:
58
+ # CHANGED: Added a condition to handle non-trainable parameters
59
+ if m_param[key].requires_grad or (self.handle_non_trainable and not m_param[key].requires_grad):
60
+ # if m_param[key].requires_grad:
61
+ m_param[key].data.copy_(shadow_params[self.m_name2s_name[key]].data)
62
+ else:
63
+ assert not key in self.m_name2s_name, print(f"keys {key} not found in shadow parameters")
64
+
65
+ def store(self, parameters):
66
+ """
67
+ Save the current parameters for restoring later.
68
+ Args:
69
+ parameters: Iterable of `torch.nn.Parameter`; the parameters to be
70
+ temporarily stored.
71
+ """
72
+ self.collected_params = [param.clone() for param in parameters]
73
+
74
+ def restore(self, parameters):
75
+ """
76
+ Restore the parameters stored with the `store` method.
77
+ Useful to validate the model with EMA parameters without affecting the
78
+ original optimization process. Store the parameters before the
79
+ `copy_to` method. After validation (or model saving), use this to
80
+ restore the former parameters.
81
+ Args:
82
+ parameters: Iterable of `torch.nn.Parameter`; the parameters to be
83
+ updated with the stored parameters.
84
+ """
85
+ for c_param, param in zip(self.collected_params, parameters):
86
+ param.data.copy_(c_param.data)
stable_diffusion/ldm/modules/encoders/__init__.py ADDED
File without changes
stable_diffusion/ldm/modules/encoders/__pycache__/__init__.cpython-38.pyc ADDED
Binary file (207 Bytes). View file
 
stable_diffusion/ldm/modules/encoders/__pycache__/modules.cpython-38.pyc ADDED
Binary file (16.9 kB). View file
 
stable_diffusion/ldm/modules/encoders/modules.py ADDED
@@ -0,0 +1,422 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ import numpy as np
4
+ from functools import partial
5
+ import kornia
6
+ import sys
7
+ import clip
8
+ sys.path.append(".")
9
+
10
+ from stable_diffusion.ldm.util import instantiate_from_config
11
+ from stable_diffusion.ldm.modules.diffusionmodules.util import make_beta_schedule, extract_into_tensor, noise_like
12
+ from stable_diffusion.ldm.modules.x_transformer import Encoder, TransformerWrapper # TODO: can we directly rely on lucidrains code and simply add this as a reuirement? --> test
13
+ from stable_diffusion.ldm.util import default
14
+ from stable_diffusion.ldm.thirdp.psp.id_loss import IDFeatures
15
+ import kornia.augmentation as K
16
+
17
+ class AbstractEncoder(nn.Module):
18
+ def __init__(self):
19
+ super().__init__()
20
+
21
+ def encode(self, *args, **kwargs):
22
+ raise NotImplementedError
23
+
24
+ class IdentityEncoder(AbstractEncoder):
25
+
26
+ def encode(self, x):
27
+ return x
28
+
29
+
30
+ class ClassEmbedder(nn.Module):
31
+ def __init__(self, embed_dim, n_classes=1000, key='class'):
32
+ super().__init__()
33
+ self.key = key
34
+ self.embedding = nn.Embedding(n_classes, embed_dim)
35
+
36
+ def forward(self, batch, key=None):
37
+ if key is None:
38
+ key = self.key
39
+ # this is for use in crossattn
40
+ c = batch[key][:, None]
41
+ c = self.embedding(c)
42
+ return c
43
+
44
+
45
+ class TransformerEmbedder(AbstractEncoder):
46
+ """Some transformer encoder layers"""
47
+ def __init__(self, n_embed, n_layer, vocab_size, max_seq_len=77, device="cuda"):
48
+ super().__init__()
49
+ self.device = device
50
+ self.transformer = TransformerWrapper(num_tokens=vocab_size, max_seq_len=max_seq_len,
51
+ attn_layers=Encoder(dim=n_embed, depth=n_layer))
52
+
53
+ def forward(self, tokens):
54
+ tokens = tokens.to(self.device) # meh
55
+ z = self.transformer(tokens, return_embeddings=True)
56
+ return z
57
+
58
+ def encode(self, x):
59
+ return self(x)
60
+
61
+
62
+ class BERTTokenizer(AbstractEncoder):
63
+ """ Uses a pretrained BERT tokenizer by huggingface. Vocab size: 30522 (?)"""
64
+ def __init__(self, device="cuda", vq_interface=True, max_length=77):
65
+ super().__init__()
66
+ from transformers import BertTokenizerFast # TODO: add to reuquirements
67
+ self.tokenizer = BertTokenizerFast.from_pretrained("bert-base-uncased", cache_dir="./cache")
68
+ self.device = device
69
+ self.vq_interface = vq_interface
70
+ self.max_length = max_length
71
+
72
+ def forward(self, text):
73
+ batch_encoding = self.tokenizer(text, truncation=True, max_length=self.max_length, return_length=True,
74
+ return_overflowing_tokens=False, padding="max_length", return_tensors="pt")
75
+ tokens = batch_encoding["input_ids"].to(self.device)
76
+ return tokens
77
+
78
+ @torch.no_grad()
79
+ def encode(self, text):
80
+ tokens = self(text)
81
+ if not self.vq_interface:
82
+ return tokens
83
+ return None, None, [None, None, tokens]
84
+
85
+ def decode(self, text):
86
+ return text
87
+
88
+
89
+ class BERTEmbedder(AbstractEncoder):
90
+ """Uses the BERT tokenizr model and add some transformer encoder layers"""
91
+ def __init__(self, n_embed, n_layer, vocab_size=30522, max_seq_len=77,
92
+ device="cuda",use_tokenizer=True, embedding_dropout=0.0):
93
+ super().__init__()
94
+ self.use_tknz_fn = use_tokenizer
95
+ if self.use_tknz_fn:
96
+ self.tknz_fn = BERTTokenizer(vq_interface=False, max_length=max_seq_len)
97
+ self.device = device
98
+ self.transformer = TransformerWrapper(num_tokens=vocab_size, max_seq_len=max_seq_len,
99
+ attn_layers=Encoder(dim=n_embed, depth=n_layer),
100
+ emb_dropout=embedding_dropout)
101
+
102
+ def forward(self, text):
103
+ if self.use_tknz_fn:
104
+ tokens = self.tknz_fn(text)#.to(self.device)
105
+ else:
106
+ tokens = text
107
+ z = self.transformer(tokens, return_embeddings=True)
108
+ return z
109
+
110
+ def encode(self, text):
111
+ # output of length 77
112
+ return self(text)
113
+
114
+
115
+ from transformers import T5Tokenizer, T5EncoderModel, CLIPTokenizer, CLIPTextModel
116
+
117
+ def disabled_train(self, mode=True):
118
+ """Overwrite model.train with this function to make sure train/eval mode
119
+ does not change anymore."""
120
+ return self
121
+
122
+
123
+ class FrozenT5Embedder(AbstractEncoder):
124
+ """Uses the T5 transformer encoder for text"""
125
+ def __init__(self, version="google/t5-v1_1-large", device="cuda", max_length=77): # others are google/t5-v1_1-xl and google/t5-v1_1-xxl
126
+ super().__init__()
127
+ self.tokenizer = T5Tokenizer.from_pretrained(version, cache_dir="./cache")
128
+ self.transformer = T5EncoderModel.from_pretrained(version, cache_dir="./cache")
129
+ self.device = device
130
+ self.max_length = max_length # TODO: typical value?
131
+ self.freeze()
132
+
133
+ def freeze(self):
134
+ self.transformer = self.transformer.eval()
135
+ #self.train = disabled_train
136
+ for param in self.parameters():
137
+ param.requires_grad = False
138
+
139
+ def forward(self, text):
140
+ batch_encoding = self.tokenizer(text, truncation=True, max_length=self.max_length, return_length=True,
141
+ return_overflowing_tokens=False, padding="max_length", return_tensors="pt")
142
+ tokens = batch_encoding["input_ids"].to(self.device)
143
+ outputs = self.transformer(input_ids=tokens)
144
+
145
+ z = outputs.last_hidden_state
146
+ return z
147
+
148
+ def encode(self, text):
149
+ return self(text)
150
+
151
+ class FrozenFaceEncoder(AbstractEncoder):
152
+ def __init__(self, model_path, augment=False):
153
+ super().__init__()
154
+ self.loss_fn = IDFeatures(model_path)
155
+ # face encoder is frozen
156
+ for p in self.loss_fn.parameters():
157
+ p.requires_grad = False
158
+ # Mapper is trainable
159
+ self.mapper = torch.nn.Linear(512, 768)
160
+ p = 0.25
161
+ if augment:
162
+ self.augment = K.AugmentationSequential(
163
+ K.RandomHorizontalFlip(p=0.5),
164
+ K.RandomEqualize(p=p),
165
+ K.RandomPlanckianJitter(p=p),
166
+ K.RandomPlasmaBrightness(p=p),
167
+ K.RandomPlasmaContrast(p=p),
168
+ K.ColorJiggle(0.02, 0.2, 0.2, p=p),
169
+ )
170
+ else:
171
+ self.augment = False
172
+
173
+ def forward(self, img):
174
+ if isinstance(img, list):
175
+ # Uncondition
176
+ return torch.zeros((1, 1, 768), device=self.mapper.weight.device)
177
+
178
+ if self.augment is not None:
179
+ # Transforms require 0-1
180
+ img = self.augment((img + 1)/2)
181
+ img = 2*img - 1
182
+
183
+ feat = self.loss_fn(img, crop=True)
184
+ feat = self.mapper(feat.unsqueeze(1))
185
+ return feat
186
+
187
+ def encode(self, img):
188
+ return self(img)
189
+
190
+ class FrozenCLIPEmbedder(AbstractEncoder):
191
+ """Uses the CLIP transformer encoder for text (from huggingface)"""
192
+ def __init__(self, version="openai/clip-vit-large-patch14", device="cuda", max_length=77): # clip-vit-base-patch32
193
+ super().__init__()
194
+ self.tokenizer = CLIPTokenizer.from_pretrained(version, cache_dir="./cache")
195
+ self.transformer = CLIPTextModel.from_pretrained(version, cache_dir="./cache")
196
+ self.device = device
197
+ self.max_length = max_length # TODO: typical value?
198
+ self.freeze()
199
+
200
+ def freeze(self):
201
+ self.transformer = self.transformer.eval()
202
+ #self.train = disabled_train
203
+ for param in self.parameters():
204
+ param.requires_grad = False
205
+
206
+ def forward(self, text):
207
+ batch_encoding = self.tokenizer(text, truncation=True, max_length=self.max_length, return_length=True,
208
+ return_overflowing_tokens=False, padding="max_length", return_tensors="pt")
209
+ tokens = batch_encoding["input_ids"].to(self.device)
210
+ outputs = self.transformer(input_ids=tokens)
211
+
212
+ z = outputs.last_hidden_state
213
+ return z
214
+
215
+ def encode(self, text):
216
+ return self(text)
217
+
218
+ import torch.nn.functional as F
219
+ from transformers import CLIPVisionModel
220
+ class ClipImageProjector(AbstractEncoder):
221
+ """
222
+ Uses the CLIP image encoder.
223
+ """
224
+ def __init__(self, version="openai/clip-vit-large-patch14", max_length=77): # clip-vit-base-patch32
225
+ super().__init__()
226
+ self.model = CLIPVisionModel.from_pretrained(version, cache_dir="./cache")
227
+ self.model.train()
228
+ self.max_length = max_length # TODO: typical value?
229
+ self.antialias = True
230
+ self.mapper = torch.nn.Linear(1024, 768)
231
+ self.register_buffer('mean', torch.Tensor([0.48145466, 0.4578275, 0.40821073]), persistent=False)
232
+ self.register_buffer('std', torch.Tensor([0.26862954, 0.26130258, 0.27577711]), persistent=False)
233
+ null_cond = self.get_null_cond(version, max_length)
234
+ self.register_buffer('null_cond', null_cond)
235
+
236
+ @torch.no_grad()
237
+ def get_null_cond(self, version, max_length):
238
+ device = self.mean.device
239
+ embedder = FrozenCLIPEmbedder(version=version, device=device, max_length=max_length)
240
+ null_cond = embedder([""])
241
+ return null_cond
242
+
243
+ def preprocess(self, x):
244
+ # Expects inputs in the range -1, 1
245
+ x = kornia.geometry.resize(x, (224, 224),
246
+ interpolation='bicubic',align_corners=True,
247
+ antialias=self.antialias)
248
+ x = (x + 1.) / 2.
249
+ # renormalize according to clip
250
+ x = kornia.enhance.normalize(x, self.mean, self.std)
251
+ return x
252
+
253
+ def forward(self, x):
254
+ if isinstance(x, list):
255
+ return self.null_cond
256
+ # x is assumed to be in range [-1,1]
257
+ x = self.preprocess(x)
258
+ outputs = self.model(pixel_values=x)
259
+ last_hidden_state = outputs.last_hidden_state
260
+ last_hidden_state = self.mapper(last_hidden_state)
261
+ return F.pad(last_hidden_state, [0,0, 0,self.max_length-last_hidden_state.shape[1], 0,0])
262
+
263
+ def encode(self, im):
264
+ return self(im)
265
+
266
+ class ProjectedFrozenCLIPEmbedder(AbstractEncoder):
267
+ def __init__(self, version="openai/clip-vit-large-patch14", device="cuda", max_length=77): # clip-vit-base-patch32
268
+ super().__init__()
269
+ self.embedder = FrozenCLIPEmbedder(version=version, device=device, max_length=max_length)
270
+ self.projection = torch.nn.Linear(768, 768)
271
+
272
+ def forward(self, text):
273
+ z = self.embedder(text)
274
+ return self.projection(z)
275
+
276
+ def encode(self, text):
277
+ return self(text)
278
+
279
+ class FrozenCLIPImageEmbedder(AbstractEncoder):
280
+ """
281
+ Uses the CLIP image encoder.
282
+ Not actually frozen... If you want that set cond_stage_trainable=False in cfg
283
+ """
284
+ def __init__(
285
+ self,
286
+ model='ViT-L/14',
287
+ jit=False,
288
+ device='cpu',
289
+ antialias=False,
290
+ ):
291
+ super().__init__()
292
+ self.model, _ = clip.load(name=model, device=device, jit=jit)
293
+ # We don't use the text part so delete it
294
+ del self.model.transformer
295
+ self.antialias = antialias
296
+ self.register_buffer('mean', torch.Tensor([0.48145466, 0.4578275, 0.40821073]), persistent=False)
297
+ self.register_buffer('std', torch.Tensor([0.26862954, 0.26130258, 0.27577711]), persistent=False)
298
+
299
+ def preprocess(self, x):
300
+ # Expects inputs in the range -1, 1
301
+ x = kornia.geometry.resize(x, (224, 224),
302
+ interpolation='bicubic',align_corners=True,
303
+ antialias=self.antialias)
304
+ x = (x + 1.) / 2.
305
+ # renormalize according to clip
306
+ x = kornia.enhance.normalize(x, self.mean, self.std)
307
+ return x
308
+
309
+ def forward(self, x):
310
+ # x is assumed to be in range [-1,1]
311
+ if isinstance(x, list):
312
+ # [""] denotes condition dropout for ucg
313
+ device = self.model.visual.conv1.weight.device
314
+ return torch.zeros(1, 768, device=device)
315
+ return self.model.encode_image(self.preprocess(x)).float()
316
+
317
+ def encode(self, im):
318
+ return self(im).unsqueeze(1)
319
+
320
+ class SpatialRescaler(nn.Module):
321
+ def __init__(self,
322
+ n_stages=1,
323
+ method='bilinear',
324
+ multiplier=0.5,
325
+ in_channels=3,
326
+ out_channels=None,
327
+ bias=False):
328
+ super().__init__()
329
+ self.n_stages = n_stages
330
+ assert self.n_stages >= 0
331
+ assert method in ['nearest','linear','bilinear','trilinear','bicubic','area']
332
+ self.multiplier = multiplier
333
+ self.interpolator = partial(torch.nn.functional.interpolate, mode=method)
334
+ self.remap_output = out_channels is not None
335
+ if self.remap_output:
336
+ print(f'Spatial Rescaler mapping from {in_channels} to {out_channels} channels after resizing.')
337
+ self.channel_mapper = nn.Conv2d(in_channels,out_channels,1,bias=bias)
338
+
339
+ def forward(self,x):
340
+ for stage in range(self.n_stages):
341
+ x = self.interpolator(x, scale_factor=self.multiplier)
342
+
343
+
344
+ if self.remap_output:
345
+ x = self.channel_mapper(x)
346
+ return x
347
+
348
+ def encode(self, x):
349
+ return self(x)
350
+
351
+ class LowScaleEncoder(nn.Module):
352
+ def __init__(self, model_config, linear_start, linear_end, timesteps=1000, max_noise_level=250, output_size=64,
353
+ scale_factor=1.0):
354
+ super().__init__()
355
+ self.max_noise_level = max_noise_level
356
+ self.model = instantiate_from_config(model_config)
357
+ self.augmentation_schedule = self.register_schedule(timesteps=timesteps, linear_start=linear_start,
358
+ linear_end=linear_end)
359
+ self.out_size = output_size
360
+ self.scale_factor = scale_factor
361
+
362
+ def register_schedule(self, beta_schedule="linear", timesteps=1000,
363
+ linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):
364
+ betas = make_beta_schedule(beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end,
365
+ cosine_s=cosine_s)
366
+ alphas = 1. - betas
367
+ alphas_cumprod = np.cumprod(alphas, axis=0)
368
+ alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1])
369
+
370
+ timesteps, = betas.shape
371
+ self.num_timesteps = int(timesteps)
372
+ self.linear_start = linear_start
373
+ self.linear_end = linear_end
374
+ assert alphas_cumprod.shape[0] == self.num_timesteps, 'alphas have to be defined for each timestep'
375
+
376
+ to_torch = partial(torch.tensor, dtype=torch.float32)
377
+
378
+ self.register_buffer('betas', to_torch(betas))
379
+ self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))
380
+ self.register_buffer('alphas_cumprod_prev', to_torch(alphas_cumprod_prev))
381
+
382
+ # calculations for diffusion q(x_t | x_{t-1}) and others
383
+ self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod)))
384
+ self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod)))
385
+ self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod)))
386
+ self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod)))
387
+ self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod - 1)))
388
+
389
+ def q_sample(self, x_start, t, noise=None):
390
+ noise = default(noise, lambda: torch.randn_like(x_start))
391
+ return (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start +
392
+ extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise)
393
+
394
+ def forward(self, x):
395
+ z = self.model.encode(x).sample()
396
+ z = z * self.scale_factor
397
+ noise_level = torch.randint(0, self.max_noise_level, (x.shape[0],), device=x.device).long()
398
+ z = self.q_sample(z, noise_level)
399
+ if self.out_size is not None:
400
+ z = torch.nn.functional.interpolate(z, size=self.out_size, mode="nearest") # TODO: experiment with mode
401
+ # z = z.repeat_interleave(2, -2).repeat_interleave(2, -1)
402
+ return z, noise_level
403
+
404
+ def decode(self, z):
405
+ z = z / self.scale_factor
406
+ return self.model.decode(z)
407
+
408
+
409
+ if __name__ == "__main__":
410
+ from stable_diffusion.ldm.util import count_params
411
+ sentences = ["a hedgehog drinking a whiskey", "der mond ist aufgegangen", "Ein Satz mit vielen Sonderzeichen: äöü ß ?! : 'xx-y/@s'"]
412
+ model = FrozenT5Embedder(version="google/t5-v1_1-xl").cuda()
413
+ count_params(model, True)
414
+ z = model(sentences)
415
+ print(z.shape)
416
+
417
+ model = FrozenCLIPEmbedder().cuda()
418
+ count_params(model, True)
419
+ z = model(sentences)
420
+ print(z.shape)
421
+
422
+ print("done.")
stable_diffusion/ldm/modules/evaluate/adm_evaluator.py ADDED
@@ -0,0 +1,676 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import io
3
+ import os
4
+ import random
5
+ import warnings
6
+ import zipfile
7
+ from abc import ABC, abstractmethod
8
+ from contextlib import contextmanager
9
+ from functools import partial
10
+ from multiprocessing import cpu_count
11
+ from multiprocessing.pool import ThreadPool
12
+ from typing import Iterable, Optional, Tuple
13
+ import yaml
14
+
15
+ import numpy as np
16
+ import requests
17
+ import tensorflow.compat.v1 as tf
18
+ from scipy import linalg
19
+ from tqdm.auto import tqdm
20
+
21
+ INCEPTION_V3_URL = "https://openaipublic.blob.core.windows.net/diffusion/jul-2021/ref_batches/classify_image_graph_def.pb"
22
+ INCEPTION_V3_PATH = "classify_image_graph_def.pb"
23
+
24
+ FID_POOL_NAME = "pool_3:0"
25
+ FID_SPATIAL_NAME = "mixed_6/conv:0"
26
+
27
+ REQUIREMENTS = f"This script has the following requirements: \n" \
28
+ 'tensorflow-gpu>=2.0' + "\n" + 'scipy' + "\n" + "requests" + "\n" + "tqdm"
29
+
30
+
31
+ def main():
32
+ parser = argparse.ArgumentParser()
33
+ parser.add_argument("--ref_batch", help="path to reference batch npz file")
34
+ parser.add_argument("--sample_batch", help="path to sample batch npz file")
35
+ args = parser.parse_args()
36
+
37
+ config = tf.ConfigProto(
38
+ allow_soft_placement=True # allows DecodeJpeg to run on CPU in Inception graph
39
+ )
40
+ config.gpu_options.allow_growth = True
41
+ evaluator = Evaluator(tf.Session(config=config))
42
+
43
+ print("warming up TensorFlow...")
44
+ # This will cause TF to print a bunch of verbose stuff now rather
45
+ # than after the next print(), to help prevent confusion.
46
+ evaluator.warmup()
47
+
48
+ print("computing reference batch activations...")
49
+ ref_acts = evaluator.read_activations(args.ref_batch)
50
+ print("computing/reading reference batch statistics...")
51
+ ref_stats, ref_stats_spatial = evaluator.read_statistics(args.ref_batch, ref_acts)
52
+
53
+ print("computing sample batch activations...")
54
+ sample_acts = evaluator.read_activations(args.sample_batch)
55
+ print("computing/reading sample batch statistics...")
56
+ sample_stats, sample_stats_spatial = evaluator.read_statistics(args.sample_batch, sample_acts)
57
+
58
+ print("Computing evaluations...")
59
+ is_ = evaluator.compute_inception_score(sample_acts[0])
60
+ print("Inception Score:", is_)
61
+ fid = sample_stats.frechet_distance(ref_stats)
62
+ print("FID:", fid)
63
+ sfid = sample_stats_spatial.frechet_distance(ref_stats_spatial)
64
+ print("sFID:", sfid)
65
+ prec, recall = evaluator.compute_prec_recall(ref_acts[0], sample_acts[0])
66
+ print("Precision:", prec)
67
+ print("Recall:", recall)
68
+
69
+ savepath = '/'.join(args.sample_batch.split('/')[:-1])
70
+ results_file = os.path.join(savepath,'evaluation_metrics.yaml')
71
+ print(f'Saving evaluation results to "{results_file}"')
72
+
73
+ results = {
74
+ 'IS': is_,
75
+ 'FID': fid,
76
+ 'sFID': sfid,
77
+ 'Precision:':prec,
78
+ 'Recall': recall
79
+ }
80
+
81
+ with open(results_file, 'w') as f:
82
+ yaml.dump(results, f, default_flow_style=False)
83
+
84
+ class InvalidFIDException(Exception):
85
+ pass
86
+
87
+
88
+ class FIDStatistics:
89
+ def __init__(self, mu: np.ndarray, sigma: np.ndarray):
90
+ self.mu = mu
91
+ self.sigma = sigma
92
+
93
+ def frechet_distance(self, other, eps=1e-6):
94
+ """
95
+ Compute the Frechet distance between two sets of statistics.
96
+ """
97
+ # https://github.com/bioinf-jku/TTUR/blob/73ab375cdf952a12686d9aa7978567771084da42/fid.py#L132
98
+ mu1, sigma1 = self.mu, self.sigma
99
+ mu2, sigma2 = other.mu, other.sigma
100
+
101
+ mu1 = np.atleast_1d(mu1)
102
+ mu2 = np.atleast_1d(mu2)
103
+
104
+ sigma1 = np.atleast_2d(sigma1)
105
+ sigma2 = np.atleast_2d(sigma2)
106
+
107
+ assert (
108
+ mu1.shape == mu2.shape
109
+ ), f"Training and test mean vectors have different lengths: {mu1.shape}, {mu2.shape}"
110
+ assert (
111
+ sigma1.shape == sigma2.shape
112
+ ), f"Training and test covariances have different dimensions: {sigma1.shape}, {sigma2.shape}"
113
+
114
+ diff = mu1 - mu2
115
+
116
+ # product might be almost singular
117
+ covmean, _ = linalg.sqrtm(sigma1.dot(sigma2), disp=False)
118
+ if not np.isfinite(covmean).all():
119
+ msg = (
120
+ "fid calculation produces singular product; adding %s to diagonal of cov estimates"
121
+ % eps
122
+ )
123
+ warnings.warn(msg)
124
+ offset = np.eye(sigma1.shape[0]) * eps
125
+ covmean = linalg.sqrtm((sigma1 + offset).dot(sigma2 + offset))
126
+
127
+ # numerical error might give slight imaginary component
128
+ if np.iscomplexobj(covmean):
129
+ if not np.allclose(np.diagonal(covmean).imag, 0, atol=1e-3):
130
+ m = np.max(np.abs(covmean.imag))
131
+ raise ValueError("Imaginary component {}".format(m))
132
+ covmean = covmean.real
133
+
134
+ tr_covmean = np.trace(covmean)
135
+
136
+ return diff.dot(diff) + np.trace(sigma1) + np.trace(sigma2) - 2 * tr_covmean
137
+
138
+
139
+ class Evaluator:
140
+ def __init__(
141
+ self,
142
+ session,
143
+ batch_size=64,
144
+ softmax_batch_size=512,
145
+ ):
146
+ self.sess = session
147
+ self.batch_size = batch_size
148
+ self.softmax_batch_size = softmax_batch_size
149
+ self.manifold_estimator = ManifoldEstimator(session)
150
+ with self.sess.graph.as_default():
151
+ self.image_input = tf.placeholder(tf.float32, shape=[None, None, None, 3])
152
+ self.softmax_input = tf.placeholder(tf.float32, shape=[None, 2048])
153
+ self.pool_features, self.spatial_features = _create_feature_graph(self.image_input)
154
+ self.softmax = _create_softmax_graph(self.softmax_input)
155
+
156
+ def warmup(self):
157
+ self.compute_activations(np.zeros([1, 8, 64, 64, 3]))
158
+
159
+ def read_activations(self, npz_path: str) -> Tuple[np.ndarray, np.ndarray]:
160
+ with open_npz_array(npz_path, "arr_0") as reader:
161
+ return self.compute_activations(reader.read_batches(self.batch_size))
162
+
163
+ def compute_activations(self, batches: Iterable[np.ndarray],silent=False) -> Tuple[np.ndarray, np.ndarray]:
164
+ """
165
+ Compute image features for downstream evals.
166
+
167
+ :param batches: a iterator over NHWC numpy arrays in [0, 255].
168
+ :return: a tuple of numpy arrays of shape [N x X], where X is a feature
169
+ dimension. The tuple is (pool_3, spatial).
170
+ """
171
+ preds = []
172
+ spatial_preds = []
173
+ it = batches if silent else tqdm(batches)
174
+ for batch in it:
175
+ batch = batch.astype(np.float32)
176
+ pred, spatial_pred = self.sess.run(
177
+ [self.pool_features, self.spatial_features], {self.image_input: batch}
178
+ )
179
+ preds.append(pred.reshape([pred.shape[0], -1]))
180
+ spatial_preds.append(spatial_pred.reshape([spatial_pred.shape[0], -1]))
181
+ return (
182
+ np.concatenate(preds, axis=0),
183
+ np.concatenate(spatial_preds, axis=0),
184
+ )
185
+
186
+ def read_statistics(
187
+ self, npz_path: str, activations: Tuple[np.ndarray, np.ndarray]
188
+ ) -> Tuple[FIDStatistics, FIDStatistics]:
189
+ obj = np.load(npz_path)
190
+ if "mu" in list(obj.keys()):
191
+ return FIDStatistics(obj["mu"], obj["sigma"]), FIDStatistics(
192
+ obj["mu_s"], obj["sigma_s"]
193
+ )
194
+ return tuple(self.compute_statistics(x) for x in activations)
195
+
196
+ def compute_statistics(self, activations: np.ndarray) -> FIDStatistics:
197
+ mu = np.mean(activations, axis=0)
198
+ sigma = np.cov(activations, rowvar=False)
199
+ return FIDStatistics(mu, sigma)
200
+
201
+ def compute_inception_score(self, activations: np.ndarray, split_size: int = 5000) -> float:
202
+ softmax_out = []
203
+ for i in range(0, len(activations), self.softmax_batch_size):
204
+ acts = activations[i : i + self.softmax_batch_size]
205
+ softmax_out.append(self.sess.run(self.softmax, feed_dict={self.softmax_input: acts}))
206
+ preds = np.concatenate(softmax_out, axis=0)
207
+ # https://github.com/openai/improved-gan/blob/4f5d1ec5c16a7eceb206f42bfc652693601e1d5c/inception_score/model.py#L46
208
+ scores = []
209
+ for i in range(0, len(preds), split_size):
210
+ part = preds[i : i + split_size]
211
+ kl = part * (np.log(part) - np.log(np.expand_dims(np.mean(part, 0), 0)))
212
+ kl = np.mean(np.sum(kl, 1))
213
+ scores.append(np.exp(kl))
214
+ return float(np.mean(scores))
215
+
216
+ def compute_prec_recall(
217
+ self, activations_ref: np.ndarray, activations_sample: np.ndarray
218
+ ) -> Tuple[float, float]:
219
+ radii_1 = self.manifold_estimator.manifold_radii(activations_ref)
220
+ radii_2 = self.manifold_estimator.manifold_radii(activations_sample)
221
+ pr = self.manifold_estimator.evaluate_pr(
222
+ activations_ref, radii_1, activations_sample, radii_2
223
+ )
224
+ return (float(pr[0][0]), float(pr[1][0]))
225
+
226
+
227
+ class ManifoldEstimator:
228
+ """
229
+ A helper for comparing manifolds of feature vectors.
230
+
231
+ Adapted from https://github.com/kynkaat/improved-precision-and-recall-metric/blob/f60f25e5ad933a79135c783fcda53de30f42c9b9/precision_recall.py#L57
232
+ """
233
+
234
+ def __init__(
235
+ self,
236
+ session,
237
+ row_batch_size=10000,
238
+ col_batch_size=10000,
239
+ nhood_sizes=(3,),
240
+ clamp_to_percentile=None,
241
+ eps=1e-5,
242
+ ):
243
+ """
244
+ Estimate the manifold of given feature vectors.
245
+
246
+ :param session: the TensorFlow session.
247
+ :param row_batch_size: row batch size to compute pairwise distances
248
+ (parameter to trade-off between memory usage and performance).
249
+ :param col_batch_size: column batch size to compute pairwise distances.
250
+ :param nhood_sizes: number of neighbors used to estimate the manifold.
251
+ :param clamp_to_percentile: prune hyperspheres that have radius larger than
252
+ the given percentile.
253
+ :param eps: small number for numerical stability.
254
+ """
255
+ self.distance_block = DistanceBlock(session)
256
+ self.row_batch_size = row_batch_size
257
+ self.col_batch_size = col_batch_size
258
+ self.nhood_sizes = nhood_sizes
259
+ self.num_nhoods = len(nhood_sizes)
260
+ self.clamp_to_percentile = clamp_to_percentile
261
+ self.eps = eps
262
+
263
+ def warmup(self):
264
+ feats, radii = (
265
+ np.zeros([1, 2048], dtype=np.float32),
266
+ np.zeros([1, 1], dtype=np.float32),
267
+ )
268
+ self.evaluate_pr(feats, radii, feats, radii)
269
+
270
+ def manifold_radii(self, features: np.ndarray) -> np.ndarray:
271
+ num_images = len(features)
272
+
273
+ # Estimate manifold of features by calculating distances to k-NN of each sample.
274
+ radii = np.zeros([num_images, self.num_nhoods], dtype=np.float32)
275
+ distance_batch = np.zeros([self.row_batch_size, num_images], dtype=np.float32)
276
+ seq = np.arange(max(self.nhood_sizes) + 1, dtype=np.int32)
277
+
278
+ for begin1 in range(0, num_images, self.row_batch_size):
279
+ end1 = min(begin1 + self.row_batch_size, num_images)
280
+ row_batch = features[begin1:end1]
281
+
282
+ for begin2 in range(0, num_images, self.col_batch_size):
283
+ end2 = min(begin2 + self.col_batch_size, num_images)
284
+ col_batch = features[begin2:end2]
285
+
286
+ # Compute distances between batches.
287
+ distance_batch[
288
+ 0 : end1 - begin1, begin2:end2
289
+ ] = self.distance_block.pairwise_distances(row_batch, col_batch)
290
+
291
+ # Find the k-nearest neighbor from the current batch.
292
+ radii[begin1:end1, :] = np.concatenate(
293
+ [
294
+ x[:, self.nhood_sizes]
295
+ for x in _numpy_partition(distance_batch[0 : end1 - begin1, :], seq, axis=1)
296
+ ],
297
+ axis=0,
298
+ )
299
+
300
+ if self.clamp_to_percentile is not None:
301
+ max_distances = np.percentile(radii, self.clamp_to_percentile, axis=0)
302
+ radii[radii > max_distances] = 0
303
+ return radii
304
+
305
+ def evaluate(self, features: np.ndarray, radii: np.ndarray, eval_features: np.ndarray):
306
+ """
307
+ Evaluate if new feature vectors are at the manifold.
308
+ """
309
+ num_eval_images = eval_features.shape[0]
310
+ num_ref_images = radii.shape[0]
311
+ distance_batch = np.zeros([self.row_batch_size, num_ref_images], dtype=np.float32)
312
+ batch_predictions = np.zeros([num_eval_images, self.num_nhoods], dtype=np.int32)
313
+ max_realism_score = np.zeros([num_eval_images], dtype=np.float32)
314
+ nearest_indices = np.zeros([num_eval_images], dtype=np.int32)
315
+
316
+ for begin1 in range(0, num_eval_images, self.row_batch_size):
317
+ end1 = min(begin1 + self.row_batch_size, num_eval_images)
318
+ feature_batch = eval_features[begin1:end1]
319
+
320
+ for begin2 in range(0, num_ref_images, self.col_batch_size):
321
+ end2 = min(begin2 + self.col_batch_size, num_ref_images)
322
+ ref_batch = features[begin2:end2]
323
+
324
+ distance_batch[
325
+ 0 : end1 - begin1, begin2:end2
326
+ ] = self.distance_block.pairwise_distances(feature_batch, ref_batch)
327
+
328
+ # From the minibatch of new feature vectors, determine if they are in the estimated manifold.
329
+ # If a feature vector is inside a hypersphere of some reference sample, then
330
+ # the new sample lies at the estimated manifold.
331
+ # The radii of the hyperspheres are determined from distances of neighborhood size k.
332
+ samples_in_manifold = distance_batch[0 : end1 - begin1, :, None] <= radii
333
+ batch_predictions[begin1:end1] = np.any(samples_in_manifold, axis=1).astype(np.int32)
334
+
335
+ max_realism_score[begin1:end1] = np.max(
336
+ radii[:, 0] / (distance_batch[0 : end1 - begin1, :] + self.eps), axis=1
337
+ )
338
+ nearest_indices[begin1:end1] = np.argmin(distance_batch[0 : end1 - begin1, :], axis=1)
339
+
340
+ return {
341
+ "fraction": float(np.mean(batch_predictions)),
342
+ "batch_predictions": batch_predictions,
343
+ "max_realisim_score": max_realism_score,
344
+ "nearest_indices": nearest_indices,
345
+ }
346
+
347
+ def evaluate_pr(
348
+ self,
349
+ features_1: np.ndarray,
350
+ radii_1: np.ndarray,
351
+ features_2: np.ndarray,
352
+ radii_2: np.ndarray,
353
+ ) -> Tuple[np.ndarray, np.ndarray]:
354
+ """
355
+ Evaluate precision and recall efficiently.
356
+
357
+ :param features_1: [N1 x D] feature vectors for reference batch.
358
+ :param radii_1: [N1 x K1] radii for reference vectors.
359
+ :param features_2: [N2 x D] feature vectors for the other batch.
360
+ :param radii_2: [N x K2] radii for other vectors.
361
+ :return: a tuple of arrays for (precision, recall):
362
+ - precision: an np.ndarray of length K1
363
+ - recall: an np.ndarray of length K2
364
+ """
365
+ features_1_status = np.zeros([len(features_1), radii_2.shape[1]], dtype=np.bool)
366
+ features_2_status = np.zeros([len(features_2), radii_1.shape[1]], dtype=np.bool)
367
+ for begin_1 in range(0, len(features_1), self.row_batch_size):
368
+ end_1 = begin_1 + self.row_batch_size
369
+ batch_1 = features_1[begin_1:end_1]
370
+ for begin_2 in range(0, len(features_2), self.col_batch_size):
371
+ end_2 = begin_2 + self.col_batch_size
372
+ batch_2 = features_2[begin_2:end_2]
373
+ batch_1_in, batch_2_in = self.distance_block.less_thans(
374
+ batch_1, radii_1[begin_1:end_1], batch_2, radii_2[begin_2:end_2]
375
+ )
376
+ features_1_status[begin_1:end_1] |= batch_1_in
377
+ features_2_status[begin_2:end_2] |= batch_2_in
378
+ return (
379
+ np.mean(features_2_status.astype(np.float64), axis=0),
380
+ np.mean(features_1_status.astype(np.float64), axis=0),
381
+ )
382
+
383
+
384
+ class DistanceBlock:
385
+ """
386
+ Calculate pairwise distances between vectors.
387
+
388
+ Adapted from https://github.com/kynkaat/improved-precision-and-recall-metric/blob/f60f25e5ad933a79135c783fcda53de30f42c9b9/precision_recall.py#L34
389
+ """
390
+
391
+ def __init__(self, session):
392
+ self.session = session
393
+
394
+ # Initialize TF graph to calculate pairwise distances.
395
+ with session.graph.as_default():
396
+ self._features_batch1 = tf.placeholder(tf.float32, shape=[None, None])
397
+ self._features_batch2 = tf.placeholder(tf.float32, shape=[None, None])
398
+ distance_block_16 = _batch_pairwise_distances(
399
+ tf.cast(self._features_batch1, tf.float16),
400
+ tf.cast(self._features_batch2, tf.float16),
401
+ )
402
+ self.distance_block = tf.cond(
403
+ tf.reduce_all(tf.math.is_finite(distance_block_16)),
404
+ lambda: tf.cast(distance_block_16, tf.float32),
405
+ lambda: _batch_pairwise_distances(self._features_batch1, self._features_batch2),
406
+ )
407
+
408
+ # Extra logic for less thans.
409
+ self._radii1 = tf.placeholder(tf.float32, shape=[None, None])
410
+ self._radii2 = tf.placeholder(tf.float32, shape=[None, None])
411
+ dist32 = tf.cast(self.distance_block, tf.float32)[..., None]
412
+ self._batch_1_in = tf.math.reduce_any(dist32 <= self._radii2, axis=1)
413
+ self._batch_2_in = tf.math.reduce_any(dist32 <= self._radii1[:, None], axis=0)
414
+
415
+ def pairwise_distances(self, U, V):
416
+ """
417
+ Evaluate pairwise distances between two batches of feature vectors.
418
+ """
419
+ return self.session.run(
420
+ self.distance_block,
421
+ feed_dict={self._features_batch1: U, self._features_batch2: V},
422
+ )
423
+
424
+ def less_thans(self, batch_1, radii_1, batch_2, radii_2):
425
+ return self.session.run(
426
+ [self._batch_1_in, self._batch_2_in],
427
+ feed_dict={
428
+ self._features_batch1: batch_1,
429
+ self._features_batch2: batch_2,
430
+ self._radii1: radii_1,
431
+ self._radii2: radii_2,
432
+ },
433
+ )
434
+
435
+
436
+ def _batch_pairwise_distances(U, V):
437
+ """
438
+ Compute pairwise distances between two batches of feature vectors.
439
+ """
440
+ with tf.variable_scope("pairwise_dist_block"):
441
+ # Squared norms of each row in U and V.
442
+ norm_u = tf.reduce_sum(tf.square(U), 1)
443
+ norm_v = tf.reduce_sum(tf.square(V), 1)
444
+
445
+ # norm_u as a column and norm_v as a row vectors.
446
+ norm_u = tf.reshape(norm_u, [-1, 1])
447
+ norm_v = tf.reshape(norm_v, [1, -1])
448
+
449
+ # Pairwise squared Euclidean distances.
450
+ D = tf.maximum(norm_u - 2 * tf.matmul(U, V, False, True) + norm_v, 0.0)
451
+
452
+ return D
453
+
454
+
455
+ class NpzArrayReader(ABC):
456
+ @abstractmethod
457
+ def read_batch(self, batch_size: int) -> Optional[np.ndarray]:
458
+ pass
459
+
460
+ @abstractmethod
461
+ def remaining(self) -> int:
462
+ pass
463
+
464
+ def read_batches(self, batch_size: int) -> Iterable[np.ndarray]:
465
+ def gen_fn():
466
+ while True:
467
+ batch = self.read_batch(batch_size)
468
+ if batch is None:
469
+ break
470
+ yield batch
471
+
472
+ rem = self.remaining()
473
+ num_batches = rem // batch_size + int(rem % batch_size != 0)
474
+ return BatchIterator(gen_fn, num_batches)
475
+
476
+
477
+ class BatchIterator:
478
+ def __init__(self, gen_fn, length):
479
+ self.gen_fn = gen_fn
480
+ self.length = length
481
+
482
+ def __len__(self):
483
+ return self.length
484
+
485
+ def __iter__(self):
486
+ return self.gen_fn()
487
+
488
+
489
+ class StreamingNpzArrayReader(NpzArrayReader):
490
+ def __init__(self, arr_f, shape, dtype):
491
+ self.arr_f = arr_f
492
+ self.shape = shape
493
+ self.dtype = dtype
494
+ self.idx = 0
495
+
496
+ def read_batch(self, batch_size: int) -> Optional[np.ndarray]:
497
+ if self.idx >= self.shape[0]:
498
+ return None
499
+
500
+ bs = min(batch_size, self.shape[0] - self.idx)
501
+ self.idx += bs
502
+
503
+ if self.dtype.itemsize == 0:
504
+ return np.ndarray([bs, *self.shape[1:]], dtype=self.dtype)
505
+
506
+ read_count = bs * np.prod(self.shape[1:])
507
+ read_size = int(read_count * self.dtype.itemsize)
508
+ data = _read_bytes(self.arr_f, read_size, "array data")
509
+ return np.frombuffer(data, dtype=self.dtype).reshape([bs, *self.shape[1:]])
510
+
511
+ def remaining(self) -> int:
512
+ return max(0, self.shape[0] - self.idx)
513
+
514
+
515
+ class MemoryNpzArrayReader(NpzArrayReader):
516
+ def __init__(self, arr):
517
+ self.arr = arr
518
+ self.idx = 0
519
+
520
+ @classmethod
521
+ def load(cls, path: str, arr_name: str):
522
+ with open(path, "rb") as f:
523
+ arr = np.load(f)[arr_name]
524
+ return cls(arr)
525
+
526
+ def read_batch(self, batch_size: int) -> Optional[np.ndarray]:
527
+ if self.idx >= self.arr.shape[0]:
528
+ return None
529
+
530
+ res = self.arr[self.idx : self.idx + batch_size]
531
+ self.idx += batch_size
532
+ return res
533
+
534
+ def remaining(self) -> int:
535
+ return max(0, self.arr.shape[0] - self.idx)
536
+
537
+
538
+ @contextmanager
539
+ def open_npz_array(path: str, arr_name: str) -> NpzArrayReader:
540
+ with _open_npy_file(path, arr_name) as arr_f:
541
+ version = np.lib.format.read_magic(arr_f)
542
+ if version == (1, 0):
543
+ header = np.lib.format.read_array_header_1_0(arr_f)
544
+ elif version == (2, 0):
545
+ header = np.lib.format.read_array_header_2_0(arr_f)
546
+ else:
547
+ yield MemoryNpzArrayReader.load(path, arr_name)
548
+ return
549
+ shape, fortran, dtype = header
550
+ if fortran or dtype.hasobject:
551
+ yield MemoryNpzArrayReader.load(path, arr_name)
552
+ else:
553
+ yield StreamingNpzArrayReader(arr_f, shape, dtype)
554
+
555
+
556
+ def _read_bytes(fp, size, error_template="ran out of data"):
557
+ """
558
+ Copied from: https://github.com/numpy/numpy/blob/fb215c76967739268de71aa4bda55dd1b062bc2e/numpy/lib/format.py#L788-L886
559
+
560
+ Read from file-like object until size bytes are read.
561
+ Raises ValueError if not EOF is encountered before size bytes are read.
562
+ Non-blocking objects only supported if they derive from io objects.
563
+ Required as e.g. ZipExtFile in python 2.6 can return less data than
564
+ requested.
565
+ """
566
+ data = bytes()
567
+ while True:
568
+ # io files (default in python3) return None or raise on
569
+ # would-block, python2 file will truncate, probably nothing can be
570
+ # done about that. note that regular files can't be non-blocking
571
+ try:
572
+ r = fp.read(size - len(data))
573
+ data += r
574
+ if len(r) == 0 or len(data) == size:
575
+ break
576
+ except io.BlockingIOError:
577
+ pass
578
+ if len(data) != size:
579
+ msg = "EOF: reading %s, expected %d bytes got %d"
580
+ raise ValueError(msg % (error_template, size, len(data)))
581
+ else:
582
+ return data
583
+
584
+
585
+ @contextmanager
586
+ def _open_npy_file(path: str, arr_name: str):
587
+ with open(path, "rb") as f:
588
+ with zipfile.ZipFile(f, "r") as zip_f:
589
+ if f"{arr_name}.npy" not in zip_f.namelist():
590
+ raise ValueError(f"missing {arr_name} in npz file")
591
+ with zip_f.open(f"{arr_name}.npy", "r") as arr_f:
592
+ yield arr_f
593
+
594
+
595
+ def _download_inception_model():
596
+ if os.path.exists(INCEPTION_V3_PATH):
597
+ return
598
+ print("downloading InceptionV3 model...")
599
+ with requests.get(INCEPTION_V3_URL, stream=True) as r:
600
+ r.raise_for_status()
601
+ tmp_path = INCEPTION_V3_PATH + ".tmp"
602
+ with open(tmp_path, "wb") as f:
603
+ for chunk in tqdm(r.iter_content(chunk_size=8192)):
604
+ f.write(chunk)
605
+ os.rename(tmp_path, INCEPTION_V3_PATH)
606
+
607
+
608
+ def _create_feature_graph(input_batch):
609
+ _download_inception_model()
610
+ prefix = f"{random.randrange(2**32)}_{random.randrange(2**32)}"
611
+ with open(INCEPTION_V3_PATH, "rb") as f:
612
+ graph_def = tf.GraphDef()
613
+ graph_def.ParseFromString(f.read())
614
+ pool3, spatial = tf.import_graph_def(
615
+ graph_def,
616
+ input_map={f"ExpandDims:0": input_batch},
617
+ return_elements=[FID_POOL_NAME, FID_SPATIAL_NAME],
618
+ name=prefix,
619
+ )
620
+ _update_shapes(pool3)
621
+ spatial = spatial[..., :7]
622
+ return pool3, spatial
623
+
624
+
625
+ def _create_softmax_graph(input_batch):
626
+ _download_inception_model()
627
+ prefix = f"{random.randrange(2**32)}_{random.randrange(2**32)}"
628
+ with open(INCEPTION_V3_PATH, "rb") as f:
629
+ graph_def = tf.GraphDef()
630
+ graph_def.ParseFromString(f.read())
631
+ (matmul,) = tf.import_graph_def(
632
+ graph_def, return_elements=[f"softmax/logits/MatMul"], name=prefix
633
+ )
634
+ w = matmul.inputs[1]
635
+ logits = tf.matmul(input_batch, w)
636
+ return tf.nn.softmax(logits)
637
+
638
+
639
+ def _update_shapes(pool3):
640
+ # https://github.com/bioinf-jku/TTUR/blob/73ab375cdf952a12686d9aa7978567771084da42/fid.py#L50-L63
641
+ ops = pool3.graph.get_operations()
642
+ for op in ops:
643
+ for o in op.outputs:
644
+ shape = o.get_shape()
645
+ if shape._dims is not None: # pylint: disable=protected-access
646
+ # shape = [s.value for s in shape] TF 1.x
647
+ shape = [s for s in shape] # TF 2.x
648
+ new_shape = []
649
+ for j, s in enumerate(shape):
650
+ if s == 1 and j == 0:
651
+ new_shape.append(None)
652
+ else:
653
+ new_shape.append(s)
654
+ o.__dict__["_shape_val"] = tf.TensorShape(new_shape)
655
+ return pool3
656
+
657
+
658
+ def _numpy_partition(arr, kth, **kwargs):
659
+ num_workers = min(cpu_count(), len(arr))
660
+ chunk_size = len(arr) // num_workers
661
+ extra = len(arr) % num_workers
662
+
663
+ start_idx = 0
664
+ batches = []
665
+ for i in range(num_workers):
666
+ size = chunk_size + (1 if i < extra else 0)
667
+ batches.append(arr[start_idx : start_idx + size])
668
+ start_idx += size
669
+
670
+ with ThreadPool(num_workers) as pool:
671
+ return list(pool.map(partial(np.partition, kth=kth, **kwargs), batches))
672
+
673
+
674
+ if __name__ == "__main__":
675
+ print(REQUIREMENTS)
676
+ main()
stable_diffusion/ldm/modules/evaluate/evaluate_perceptualsim.py ADDED
@@ -0,0 +1,632 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import glob
3
+ import os
4
+ from tqdm import tqdm
5
+ from collections import namedtuple
6
+
7
+ import numpy as np
8
+ import torch
9
+ import torchvision.transforms as transforms
10
+ from torchvision import models
11
+ from PIL import Image
12
+
13
+ import sys
14
+ sys.path.append(".")
15
+ from stable_diffusion.ldm.modules.evaluate.ssim import ssim
16
+
17
+
18
+ transform = transforms.Compose([transforms.ToTensor()])
19
+
20
+ def normalize_tensor(in_feat, eps=1e-10):
21
+ norm_factor = torch.sqrt(torch.sum(in_feat ** 2, dim=1)).view(
22
+ in_feat.size()[0], 1, in_feat.size()[2], in_feat.size()[3]
23
+ )
24
+ return in_feat / (norm_factor.expand_as(in_feat) + eps)
25
+
26
+
27
+ def cos_sim(in0, in1):
28
+ in0_norm = normalize_tensor(in0)
29
+ in1_norm = normalize_tensor(in1)
30
+ N = in0.size()[0]
31
+ X = in0.size()[2]
32
+ Y = in0.size()[3]
33
+
34
+ return torch.mean(
35
+ torch.mean(
36
+ torch.sum(in0_norm * in1_norm, dim=1).view(N, 1, X, Y), dim=2
37
+ ).view(N, 1, 1, Y),
38
+ dim=3,
39
+ ).view(N)
40
+
41
+
42
+ class squeezenet(torch.nn.Module):
43
+ def __init__(self, requires_grad=False, pretrained=True):
44
+ super(squeezenet, self).__init__()
45
+ pretrained_features = models.squeezenet1_1(
46
+ pretrained=pretrained
47
+ ).features
48
+ self.slice1 = torch.nn.Sequential()
49
+ self.slice2 = torch.nn.Sequential()
50
+ self.slice3 = torch.nn.Sequential()
51
+ self.slice4 = torch.nn.Sequential()
52
+ self.slice5 = torch.nn.Sequential()
53
+ self.slice6 = torch.nn.Sequential()
54
+ self.slice7 = torch.nn.Sequential()
55
+ self.N_slices = 7
56
+ for x in range(2):
57
+ self.slice1.add_module(str(x), pretrained_features[x])
58
+ for x in range(2, 5):
59
+ self.slice2.add_module(str(x), pretrained_features[x])
60
+ for x in range(5, 8):
61
+ self.slice3.add_module(str(x), pretrained_features[x])
62
+ for x in range(8, 10):
63
+ self.slice4.add_module(str(x), pretrained_features[x])
64
+ for x in range(10, 11):
65
+ self.slice5.add_module(str(x), pretrained_features[x])
66
+ for x in range(11, 12):
67
+ self.slice6.add_module(str(x), pretrained_features[x])
68
+ for x in range(12, 13):
69
+ self.slice7.add_module(str(x), pretrained_features[x])
70
+ if not requires_grad:
71
+ for param in self.parameters():
72
+ param.requires_grad = False
73
+
74
+ def forward(self, X):
75
+ h = self.slice1(X)
76
+ h_relu1 = h
77
+ h = self.slice2(h)
78
+ h_relu2 = h
79
+ h = self.slice3(h)
80
+ h_relu3 = h
81
+ h = self.slice4(h)
82
+ h_relu4 = h
83
+ h = self.slice5(h)
84
+ h_relu5 = h
85
+ h = self.slice6(h)
86
+ h_relu6 = h
87
+ h = self.slice7(h)
88
+ h_relu7 = h
89
+ vgg_outputs = namedtuple(
90
+ "SqueezeOutputs",
91
+ ["relu1", "relu2", "relu3", "relu4", "relu5", "relu6", "relu7"],
92
+ )
93
+ out = vgg_outputs(
94
+ h_relu1, h_relu2, h_relu3, h_relu4, h_relu5, h_relu6, h_relu7
95
+ )
96
+
97
+ return out
98
+
99
+
100
+ class alexnet(torch.nn.Module):
101
+ def __init__(self, requires_grad=False, pretrained=True):
102
+ super(alexnet, self).__init__()
103
+ alexnet_pretrained_features = models.alexnet(
104
+ pretrained=pretrained
105
+ ).features
106
+ self.slice1 = torch.nn.Sequential()
107
+ self.slice2 = torch.nn.Sequential()
108
+ self.slice3 = torch.nn.Sequential()
109
+ self.slice4 = torch.nn.Sequential()
110
+ self.slice5 = torch.nn.Sequential()
111
+ self.N_slices = 5
112
+ for x in range(2):
113
+ self.slice1.add_module(str(x), alexnet_pretrained_features[x])
114
+ for x in range(2, 5):
115
+ self.slice2.add_module(str(x), alexnet_pretrained_features[x])
116
+ for x in range(5, 8):
117
+ self.slice3.add_module(str(x), alexnet_pretrained_features[x])
118
+ for x in range(8, 10):
119
+ self.slice4.add_module(str(x), alexnet_pretrained_features[x])
120
+ for x in range(10, 12):
121
+ self.slice5.add_module(str(x), alexnet_pretrained_features[x])
122
+ if not requires_grad:
123
+ for param in self.parameters():
124
+ param.requires_grad = False
125
+
126
+ def forward(self, X):
127
+ h = self.slice1(X)
128
+ h_relu1 = h
129
+ h = self.slice2(h)
130
+ h_relu2 = h
131
+ h = self.slice3(h)
132
+ h_relu3 = h
133
+ h = self.slice4(h)
134
+ h_relu4 = h
135
+ h = self.slice5(h)
136
+ h_relu5 = h
137
+ alexnet_outputs = namedtuple(
138
+ "AlexnetOutputs", ["relu1", "relu2", "relu3", "relu4", "relu5"]
139
+ )
140
+ out = alexnet_outputs(h_relu1, h_relu2, h_relu3, h_relu4, h_relu5)
141
+
142
+ return out
143
+
144
+
145
+ class vgg16(torch.nn.Module):
146
+ def __init__(self, requires_grad=False, pretrained=True):
147
+ super(vgg16, self).__init__()
148
+ vgg_pretrained_features = models.vgg16(pretrained=pretrained).features
149
+ self.slice1 = torch.nn.Sequential()
150
+ self.slice2 = torch.nn.Sequential()
151
+ self.slice3 = torch.nn.Sequential()
152
+ self.slice4 = torch.nn.Sequential()
153
+ self.slice5 = torch.nn.Sequential()
154
+ self.N_slices = 5
155
+ for x in range(4):
156
+ self.slice1.add_module(str(x), vgg_pretrained_features[x])
157
+ for x in range(4, 9):
158
+ self.slice2.add_module(str(x), vgg_pretrained_features[x])
159
+ for x in range(9, 16):
160
+ self.slice3.add_module(str(x), vgg_pretrained_features[x])
161
+ for x in range(16, 23):
162
+ self.slice4.add_module(str(x), vgg_pretrained_features[x])
163
+ for x in range(23, 30):
164
+ self.slice5.add_module(str(x), vgg_pretrained_features[x])
165
+ if not requires_grad:
166
+ for param in self.parameters():
167
+ param.requires_grad = False
168
+
169
+ def forward(self, X):
170
+ h = self.slice1(X)
171
+ h_relu1_2 = h
172
+ h = self.slice2(h)
173
+ h_relu2_2 = h
174
+ h = self.slice3(h)
175
+ h_relu3_3 = h
176
+ h = self.slice4(h)
177
+ h_relu4_3 = h
178
+ h = self.slice5(h)
179
+ h_relu5_3 = h
180
+ vgg_outputs = namedtuple(
181
+ "VggOutputs",
182
+ ["relu1_2", "relu2_2", "relu3_3", "relu4_3", "relu5_3"],
183
+ )
184
+ out = vgg_outputs(h_relu1_2, h_relu2_2, h_relu3_3, h_relu4_3, h_relu5_3)
185
+
186
+ return out
187
+
188
+
189
+ class resnet(torch.nn.Module):
190
+ def __init__(self, requires_grad=False, pretrained=True, num=18):
191
+ super(resnet, self).__init__()
192
+ if num == 18:
193
+ self.net = models.resnet18(pretrained=pretrained)
194
+ elif num == 34:
195
+ self.net = models.resnet34(pretrained=pretrained)
196
+ elif num == 50:
197
+ self.net = models.resnet50(pretrained=pretrained)
198
+ elif num == 101:
199
+ self.net = models.resnet101(pretrained=pretrained)
200
+ elif num == 152:
201
+ self.net = models.resnet152(pretrained=pretrained)
202
+ self.N_slices = 5
203
+
204
+ self.conv1 = self.net.conv1
205
+ self.bn1 = self.net.bn1
206
+ self.relu = self.net.relu
207
+ self.maxpool = self.net.maxpool
208
+ self.layer1 = self.net.layer1
209
+ self.layer2 = self.net.layer2
210
+ self.layer3 = self.net.layer3
211
+ self.layer4 = self.net.layer4
212
+
213
+ def forward(self, X):
214
+ h = self.conv1(X)
215
+ h = self.bn1(h)
216
+ h = self.relu(h)
217
+ h_relu1 = h
218
+ h = self.maxpool(h)
219
+ h = self.layer1(h)
220
+ h_conv2 = h
221
+ h = self.layer2(h)
222
+ h_conv3 = h
223
+ h = self.layer3(h)
224
+ h_conv4 = h
225
+ h = self.layer4(h)
226
+ h_conv5 = h
227
+
228
+ outputs = namedtuple(
229
+ "Outputs", ["relu1", "conv2", "conv3", "conv4", "conv5"]
230
+ )
231
+ out = outputs(h_relu1, h_conv2, h_conv3, h_conv4, h_conv5)
232
+
233
+ return out
234
+
235
+ # Off-the-shelf deep network
236
+ class PNet(torch.nn.Module):
237
+ """Pre-trained network with all channels equally weighted by default"""
238
+
239
+ def __init__(self, pnet_type="vgg", pnet_rand=False, use_gpu=True):
240
+ super(PNet, self).__init__()
241
+
242
+ self.use_gpu = use_gpu
243
+
244
+ self.pnet_type = pnet_type
245
+ self.pnet_rand = pnet_rand
246
+
247
+ self.shift = torch.Tensor([-0.030, -0.088, -0.188]).view(1, 3, 1, 1)
248
+ self.scale = torch.Tensor([0.458, 0.448, 0.450]).view(1, 3, 1, 1)
249
+
250
+ if self.pnet_type in ["vgg", "vgg16"]:
251
+ self.net = vgg16(pretrained=not self.pnet_rand, requires_grad=False)
252
+ elif self.pnet_type == "alex":
253
+ self.net = alexnet(
254
+ pretrained=not self.pnet_rand, requires_grad=False
255
+ )
256
+ elif self.pnet_type[:-2] == "resnet":
257
+ self.net = resnet(
258
+ pretrained=not self.pnet_rand,
259
+ requires_grad=False,
260
+ num=int(self.pnet_type[-2:]),
261
+ )
262
+ elif self.pnet_type == "squeeze":
263
+ self.net = squeezenet(
264
+ pretrained=not self.pnet_rand, requires_grad=False
265
+ )
266
+
267
+ self.L = self.net.N_slices
268
+
269
+ if use_gpu:
270
+ self.net.cuda()
271
+ self.shift = self.shift.cuda()
272
+ self.scale = self.scale.cuda()
273
+
274
+ def forward(self, in0, in1, retPerLayer=False):
275
+ in0_sc = (in0 - self.shift.expand_as(in0)) / self.scale.expand_as(in0)
276
+ in1_sc = (in1 - self.shift.expand_as(in0)) / self.scale.expand_as(in0)
277
+
278
+ outs0 = self.net.forward(in0_sc)
279
+ outs1 = self.net.forward(in1_sc)
280
+
281
+ if retPerLayer:
282
+ all_scores = []
283
+ for (kk, out0) in enumerate(outs0):
284
+ cur_score = 1.0 - cos_sim(outs0[kk], outs1[kk])
285
+ if kk == 0:
286
+ val = 1.0 * cur_score
287
+ else:
288
+ val = val + cur_score
289
+ if retPerLayer:
290
+ all_scores += [cur_score]
291
+
292
+ if retPerLayer:
293
+ return (val, all_scores)
294
+ else:
295
+ return val
296
+
297
+
298
+
299
+
300
+ # The SSIM metric
301
+ def ssim_metric(img1, img2, mask=None):
302
+ return ssim(img1, img2, mask=mask, size_average=False)
303
+
304
+
305
+ # The PSNR metric
306
+ def psnr(img1, img2, mask=None,reshape=False):
307
+ b = img1.size(0)
308
+ if not (mask is None):
309
+ b = img1.size(0)
310
+ mse_err = (img1 - img2).pow(2) * mask
311
+ if reshape:
312
+ mse_err = mse_err.reshape(b, -1).sum(dim=1) / (
313
+ 3 * mask.reshape(b, -1).sum(dim=1).clamp(min=1)
314
+ )
315
+ else:
316
+ mse_err = mse_err.view(b, -1).sum(dim=1) / (
317
+ 3 * mask.view(b, -1).sum(dim=1).clamp(min=1)
318
+ )
319
+ else:
320
+ if reshape:
321
+ mse_err = (img1 - img2).pow(2).reshape(b, -1).mean(dim=1)
322
+ else:
323
+ mse_err = (img1 - img2).pow(2).view(b, -1).mean(dim=1)
324
+
325
+ psnr = 10 * (1 / mse_err).log10()
326
+ return psnr
327
+
328
+
329
+ # The perceptual similarity metric
330
+ def perceptual_sim(img1, img2, vgg16):
331
+ # First extract features
332
+ dist = vgg16(img1 * 2 - 1, img2 * 2 - 1)
333
+
334
+ return dist
335
+
336
+ def load_img(img_name, size=None):
337
+ try:
338
+ img = Image.open(img_name)
339
+
340
+ if type(size) == int:
341
+ img = img.resize((size, size))
342
+ elif size is not None:
343
+ img = img.resize((size[1], size[0]))
344
+
345
+ img = transform(img).cuda()
346
+ img = img.unsqueeze(0)
347
+ except Exception as e:
348
+ print("Failed at loading %s " % img_name)
349
+ print(e)
350
+ img = torch.zeros(1, 3, 256, 256).cuda()
351
+ raise
352
+ return img
353
+
354
+
355
+ def compute_perceptual_similarity(folder, pred_img, tgt_img, take_every_other):
356
+
357
+ # Load VGG16 for feature similarity
358
+ vgg16 = PNet().to("cuda")
359
+ vgg16.eval()
360
+ vgg16.cuda()
361
+
362
+ values_percsim = []
363
+ values_ssim = []
364
+ values_psnr = []
365
+ folders = os.listdir(folder)
366
+ for i, f in tqdm(enumerate(sorted(folders))):
367
+ pred_imgs = glob.glob(folder + f + "/" + pred_img)
368
+ tgt_imgs = glob.glob(folder + f + "/" + tgt_img)
369
+ assert len(tgt_imgs) == 1
370
+
371
+ perc_sim = 10000
372
+ ssim_sim = -10
373
+ psnr_sim = -10
374
+ for p_img in pred_imgs:
375
+ t_img = load_img(tgt_imgs[0])
376
+ p_img = load_img(p_img, size=t_img.shape[2:])
377
+ t_perc_sim = perceptual_sim(p_img, t_img, vgg16).item()
378
+ perc_sim = min(perc_sim, t_perc_sim)
379
+
380
+ ssim_sim = max(ssim_sim, ssim_metric(p_img, t_img).item())
381
+ psnr_sim = max(psnr_sim, psnr(p_img, t_img).item())
382
+
383
+ values_percsim += [perc_sim]
384
+ values_ssim += [ssim_sim]
385
+ values_psnr += [psnr_sim]
386
+
387
+ if take_every_other:
388
+ n_valuespercsim = []
389
+ n_valuesssim = []
390
+ n_valuespsnr = []
391
+ for i in range(0, len(values_percsim) // 2):
392
+ n_valuespercsim += [
393
+ min(values_percsim[2 * i], values_percsim[2 * i + 1])
394
+ ]
395
+ n_valuespsnr += [max(values_psnr[2 * i], values_psnr[2 * i + 1])]
396
+ n_valuesssim += [max(values_ssim[2 * i], values_ssim[2 * i + 1])]
397
+
398
+ values_percsim = n_valuespercsim
399
+ values_ssim = n_valuesssim
400
+ values_psnr = n_valuespsnr
401
+
402
+ avg_percsim = np.mean(np.array(values_percsim))
403
+ std_percsim = np.std(np.array(values_percsim))
404
+
405
+ avg_psnr = np.mean(np.array(values_psnr))
406
+ std_psnr = np.std(np.array(values_psnr))
407
+
408
+ avg_ssim = np.mean(np.array(values_ssim))
409
+ std_ssim = np.std(np.array(values_ssim))
410
+
411
+ return {
412
+ "Perceptual similarity": (avg_percsim, std_percsim),
413
+ "PSNR": (avg_psnr, std_psnr),
414
+ "SSIM": (avg_ssim, std_ssim),
415
+ }
416
+
417
+
418
+ def compute_perceptual_similarity_from_list(pred_imgs_list, tgt_imgs_list,
419
+ take_every_other,
420
+ simple_format=True):
421
+
422
+ # Load VGG16 for feature similarity
423
+ vgg16 = PNet().to("cuda")
424
+ vgg16.eval()
425
+ vgg16.cuda()
426
+
427
+ values_percsim = []
428
+ values_ssim = []
429
+ values_psnr = []
430
+ equal_count = 0
431
+ ambig_count = 0
432
+ for i, tgt_img in enumerate(tqdm(tgt_imgs_list)):
433
+ pred_imgs = pred_imgs_list[i]
434
+ tgt_imgs = [tgt_img]
435
+ assert len(tgt_imgs) == 1
436
+
437
+ if type(pred_imgs) != list:
438
+ pred_imgs = [pred_imgs]
439
+
440
+ perc_sim = 10000
441
+ ssim_sim = -10
442
+ psnr_sim = -10
443
+ assert len(pred_imgs)>0
444
+ for p_img in pred_imgs:
445
+ t_img = load_img(tgt_imgs[0])
446
+ p_img = load_img(p_img, size=t_img.shape[2:])
447
+ t_perc_sim = perceptual_sim(p_img, t_img, vgg16).item()
448
+ perc_sim = min(perc_sim, t_perc_sim)
449
+
450
+ ssim_sim = max(ssim_sim, ssim_metric(p_img, t_img).item())
451
+ psnr_sim = max(psnr_sim, psnr(p_img, t_img).item())
452
+
453
+ values_percsim += [perc_sim]
454
+ values_ssim += [ssim_sim]
455
+ if psnr_sim != np.float("inf"):
456
+ values_psnr += [psnr_sim]
457
+ else:
458
+ if torch.allclose(p_img, t_img):
459
+ equal_count += 1
460
+ print("{} equal src and wrp images.".format(equal_count))
461
+ else:
462
+ ambig_count += 1
463
+ print("{} ambiguous src and wrp images.".format(ambig_count))
464
+
465
+ if take_every_other:
466
+ n_valuespercsim = []
467
+ n_valuesssim = []
468
+ n_valuespsnr = []
469
+ for i in range(0, len(values_percsim) // 2):
470
+ n_valuespercsim += [
471
+ min(values_percsim[2 * i], values_percsim[2 * i + 1])
472
+ ]
473
+ n_valuespsnr += [max(values_psnr[2 * i], values_psnr[2 * i + 1])]
474
+ n_valuesssim += [max(values_ssim[2 * i], values_ssim[2 * i + 1])]
475
+
476
+ values_percsim = n_valuespercsim
477
+ values_ssim = n_valuesssim
478
+ values_psnr = n_valuespsnr
479
+
480
+ avg_percsim = np.mean(np.array(values_percsim))
481
+ std_percsim = np.std(np.array(values_percsim))
482
+
483
+ avg_psnr = np.mean(np.array(values_psnr))
484
+ std_psnr = np.std(np.array(values_psnr))
485
+
486
+ avg_ssim = np.mean(np.array(values_ssim))
487
+ std_ssim = np.std(np.array(values_ssim))
488
+
489
+ if simple_format:
490
+ # just to make yaml formatting readable
491
+ return {
492
+ "Perceptual similarity": [float(avg_percsim), float(std_percsim)],
493
+ "PSNR": [float(avg_psnr), float(std_psnr)],
494
+ "SSIM": [float(avg_ssim), float(std_ssim)],
495
+ }
496
+ else:
497
+ return {
498
+ "Perceptual similarity": (avg_percsim, std_percsim),
499
+ "PSNR": (avg_psnr, std_psnr),
500
+ "SSIM": (avg_ssim, std_ssim),
501
+ }
502
+
503
+
504
+ def compute_perceptual_similarity_from_list_topk(pred_imgs_list, tgt_imgs_list,
505
+ take_every_other, resize=False):
506
+
507
+ # Load VGG16 for feature similarity
508
+ vgg16 = PNet().to("cuda")
509
+ vgg16.eval()
510
+ vgg16.cuda()
511
+
512
+ values_percsim = []
513
+ values_ssim = []
514
+ values_psnr = []
515
+ individual_percsim = []
516
+ individual_ssim = []
517
+ individual_psnr = []
518
+ for i, tgt_img in enumerate(tqdm(tgt_imgs_list)):
519
+ pred_imgs = pred_imgs_list[i]
520
+ tgt_imgs = [tgt_img]
521
+ assert len(tgt_imgs) == 1
522
+
523
+ if type(pred_imgs) != list:
524
+ assert False
525
+ pred_imgs = [pred_imgs]
526
+
527
+ perc_sim = 10000
528
+ ssim_sim = -10
529
+ psnr_sim = -10
530
+ sample_percsim = list()
531
+ sample_ssim = list()
532
+ sample_psnr = list()
533
+ for p_img in pred_imgs:
534
+ if resize:
535
+ t_img = load_img(tgt_imgs[0], size=(256,256))
536
+ else:
537
+ t_img = load_img(tgt_imgs[0])
538
+ p_img = load_img(p_img, size=t_img.shape[2:])
539
+
540
+ t_perc_sim = perceptual_sim(p_img, t_img, vgg16).item()
541
+ sample_percsim.append(t_perc_sim)
542
+ perc_sim = min(perc_sim, t_perc_sim)
543
+
544
+ t_ssim = ssim_metric(p_img, t_img).item()
545
+ sample_ssim.append(t_ssim)
546
+ ssim_sim = max(ssim_sim, t_ssim)
547
+
548
+ t_psnr = psnr(p_img, t_img).item()
549
+ sample_psnr.append(t_psnr)
550
+ psnr_sim = max(psnr_sim, t_psnr)
551
+
552
+ values_percsim += [perc_sim]
553
+ values_ssim += [ssim_sim]
554
+ values_psnr += [psnr_sim]
555
+ individual_percsim.append(sample_percsim)
556
+ individual_ssim.append(sample_ssim)
557
+ individual_psnr.append(sample_psnr)
558
+
559
+ if take_every_other:
560
+ assert False, "Do this later, after specifying topk to get proper results"
561
+ n_valuespercsim = []
562
+ n_valuesssim = []
563
+ n_valuespsnr = []
564
+ for i in range(0, len(values_percsim) // 2):
565
+ n_valuespercsim += [
566
+ min(values_percsim[2 * i], values_percsim[2 * i + 1])
567
+ ]
568
+ n_valuespsnr += [max(values_psnr[2 * i], values_psnr[2 * i + 1])]
569
+ n_valuesssim += [max(values_ssim[2 * i], values_ssim[2 * i + 1])]
570
+
571
+ values_percsim = n_valuespercsim
572
+ values_ssim = n_valuesssim
573
+ values_psnr = n_valuespsnr
574
+
575
+ avg_percsim = np.mean(np.array(values_percsim))
576
+ std_percsim = np.std(np.array(values_percsim))
577
+
578
+ avg_psnr = np.mean(np.array(values_psnr))
579
+ std_psnr = np.std(np.array(values_psnr))
580
+
581
+ avg_ssim = np.mean(np.array(values_ssim))
582
+ std_ssim = np.std(np.array(values_ssim))
583
+
584
+ individual_percsim = np.array(individual_percsim)
585
+ individual_psnr = np.array(individual_psnr)
586
+ individual_ssim = np.array(individual_ssim)
587
+
588
+ return {
589
+ "avg_of_best": {
590
+ "Perceptual similarity": [float(avg_percsim), float(std_percsim)],
591
+ "PSNR": [float(avg_psnr), float(std_psnr)],
592
+ "SSIM": [float(avg_ssim), float(std_ssim)],
593
+ },
594
+ "individual": {
595
+ "PSIM": individual_percsim,
596
+ "PSNR": individual_psnr,
597
+ "SSIM": individual_ssim,
598
+ }
599
+ }
600
+
601
+
602
+ if __name__ == "__main__":
603
+ args = argparse.ArgumentParser()
604
+ args.add_argument("--folder", type=str, default="")
605
+ args.add_argument("--pred_image", type=str, default="")
606
+ args.add_argument("--target_image", type=str, default="")
607
+ args.add_argument("--take_every_other", action="store_true", default=False)
608
+ args.add_argument("--output_file", type=str, default="")
609
+
610
+ opts = args.parse_args()
611
+
612
+ folder = opts.folder
613
+ pred_img = opts.pred_image
614
+ tgt_img = opts.target_image
615
+
616
+ results = compute_perceptual_similarity(
617
+ folder, pred_img, tgt_img, opts.take_every_other
618
+ )
619
+
620
+ f = open(opts.output_file, 'w')
621
+ for key in results:
622
+ print("%s for %s: \n" % (key, opts.folder))
623
+ print(
624
+ "\t {:0.4f} | {:0.4f} \n".format(results[key][0], results[key][1])
625
+ )
626
+
627
+ f.write("%s for %s: \n" % (key, opts.folder))
628
+ f.write(
629
+ "\t {:0.4f} | {:0.4f} \n".format(results[key][0], results[key][1])
630
+ )
631
+
632
+ f.close()
stable_diffusion/ldm/modules/evaluate/frechet_video_distance.py ADDED
@@ -0,0 +1,147 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The Google Research Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ # Lint as: python2, python3
17
+ """Minimal Reference implementation for the Frechet Video Distance (FVD).
18
+
19
+ FVD is a metric for the quality of video generation models. It is inspired by
20
+ the FID (Frechet Inception Distance) used for images, but uses a different
21
+ embedding to be better suitable for videos.
22
+ """
23
+
24
+ from __future__ import absolute_import
25
+ from __future__ import division
26
+ from __future__ import print_function
27
+
28
+
29
+ import six
30
+ import tensorflow.compat.v1 as tf
31
+ import tensorflow_gan as tfgan
32
+ import tensorflow_hub as hub
33
+
34
+
35
+ def preprocess(videos, target_resolution):
36
+ """Runs some preprocessing on the videos for I3D model.
37
+
38
+ Args:
39
+ videos: <T>[batch_size, num_frames, height, width, depth] The videos to be
40
+ preprocessed. We don't care about the specific dtype of the videos, it can
41
+ be anything that tf.image.resize_bilinear accepts. Values are expected to
42
+ be in the range 0-255.
43
+ target_resolution: (width, height): target video resolution
44
+
45
+ Returns:
46
+ videos: <float32>[batch_size, num_frames, height, width, depth]
47
+ """
48
+ videos_shape = list(videos.shape)
49
+ all_frames = tf.reshape(videos, [-1] + videos_shape[-3:])
50
+ resized_videos = tf.image.resize_bilinear(all_frames, size=target_resolution)
51
+ target_shape = [videos_shape[0], -1] + list(target_resolution) + [3]
52
+ output_videos = tf.reshape(resized_videos, target_shape)
53
+ scaled_videos = 2. * tf.cast(output_videos, tf.float32) / 255. - 1
54
+ return scaled_videos
55
+
56
+
57
+ def _is_in_graph(tensor_name):
58
+ """Checks whether a given tensor does exists in the graph."""
59
+ try:
60
+ tf.get_default_graph().get_tensor_by_name(tensor_name)
61
+ except KeyError:
62
+ return False
63
+ return True
64
+
65
+
66
+ def create_id3_embedding(videos,warmup=False,batch_size=16):
67
+ """Embeds the given videos using the Inflated 3D Convolution ne twork.
68
+
69
+ Downloads the graph of the I3D from tf.hub and adds it to the graph on the
70
+ first call.
71
+
72
+ Args:
73
+ videos: <float32>[batch_size, num_frames, height=224, width=224, depth=3].
74
+ Expected range is [-1, 1].
75
+
76
+ Returns:
77
+ embedding: <float32>[batch_size, embedding_size]. embedding_size depends
78
+ on the model used.
79
+
80
+ Raises:
81
+ ValueError: when a provided embedding_layer is not supported.
82
+ """
83
+
84
+ # batch_size = 16
85
+ module_spec = "https://tfhub.dev/deepmind/i3d-kinetics-400/1"
86
+
87
+
88
+ # Making sure that we import the graph separately for
89
+ # each different input video tensor.
90
+ module_name = "fvd_kinetics-400_id3_module_" + six.ensure_str(
91
+ videos.name).replace(":", "_")
92
+
93
+
94
+
95
+ assert_ops = [
96
+ tf.Assert(
97
+ tf.reduce_max(videos) <= 1.001,
98
+ ["max value in frame is > 1", videos]),
99
+ tf.Assert(
100
+ tf.reduce_min(videos) >= -1.001,
101
+ ["min value in frame is < -1", videos]),
102
+ tf.assert_equal(
103
+ tf.shape(videos)[0],
104
+ batch_size, ["invalid frame batch size: ",
105
+ tf.shape(videos)],
106
+ summarize=6),
107
+ ]
108
+ with tf.control_dependencies(assert_ops):
109
+ videos = tf.identity(videos)
110
+
111
+ module_scope = "%s_apply_default/" % module_name
112
+
113
+ # To check whether the module has already been loaded into the graph, we look
114
+ # for a given tensor name. If this tensor name exists, we assume the function
115
+ # has been called before and the graph was imported. Otherwise we import it.
116
+ # Note: in theory, the tensor could exist, but have wrong shapes.
117
+ # This will happen if create_id3_embedding is called with a frames_placehoder
118
+ # of wrong size/batch size, because even though that will throw a tf.Assert
119
+ # on graph-execution time, it will insert the tensor (with wrong shape) into
120
+ # the graph. This is why we need the following assert.
121
+ if warmup:
122
+ video_batch_size = int(videos.shape[0])
123
+ assert video_batch_size in [batch_size, -1, None], f"Invalid batch size {video_batch_size}"
124
+ tensor_name = module_scope + "RGB/inception_i3d/Mean:0"
125
+ if not _is_in_graph(tensor_name):
126
+ i3d_model = hub.Module(module_spec, name=module_name)
127
+ i3d_model(videos)
128
+
129
+ # gets the kinetics-i3d-400-logits layer
130
+ tensor_name = module_scope + "RGB/inception_i3d/Mean:0"
131
+ tensor = tf.get_default_graph().get_tensor_by_name(tensor_name)
132
+ return tensor
133
+
134
+
135
+ def calculate_fvd(real_activations,
136
+ generated_activations):
137
+ """Returns a list of ops that compute metrics as funcs of activations.
138
+
139
+ Args:
140
+ real_activations: <float32>[num_samples, embedding_size]
141
+ generated_activations: <float32>[num_samples, embedding_size]
142
+
143
+ Returns:
144
+ A scalar that contains the requested FVD.
145
+ """
146
+ return tfgan.eval.frechet_classifier_distance_from_activations(
147
+ real_activations, generated_activations)
stable_diffusion/ldm/modules/evaluate/ssim.py ADDED
@@ -0,0 +1,124 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # MIT Licence
2
+
3
+ # Methods to predict the SSIM, taken from
4
+ # https://github.com/Po-Hsun-Su/pytorch-ssim/blob/master/pytorch_ssim/__init__.py
5
+
6
+ from math import exp
7
+
8
+ import torch
9
+ import torch.nn.functional as F
10
+ from torch.autograd import Variable
11
+
12
+ def gaussian(window_size, sigma):
13
+ gauss = torch.Tensor(
14
+ [
15
+ exp(-((x - window_size // 2) ** 2) / float(2 * sigma ** 2))
16
+ for x in range(window_size)
17
+ ]
18
+ )
19
+ return gauss / gauss.sum()
20
+
21
+
22
+ def create_window(window_size, channel):
23
+ _1D_window = gaussian(window_size, 1.5).unsqueeze(1)
24
+ _2D_window = _1D_window.mm(_1D_window.t()).float().unsqueeze(0).unsqueeze(0)
25
+ window = Variable(
26
+ _2D_window.expand(channel, 1, window_size, window_size).contiguous()
27
+ )
28
+ return window
29
+
30
+
31
+ def _ssim(
32
+ img1, img2, window, window_size, channel, mask=None, size_average=True
33
+ ):
34
+ mu1 = F.conv2d(img1, window, padding=window_size // 2, groups=channel)
35
+ mu2 = F.conv2d(img2, window, padding=window_size // 2, groups=channel)
36
+
37
+ mu1_sq = mu1.pow(2)
38
+ mu2_sq = mu2.pow(2)
39
+ mu1_mu2 = mu1 * mu2
40
+
41
+ sigma1_sq = (
42
+ F.conv2d(img1 * img1, window, padding=window_size // 2, groups=channel)
43
+ - mu1_sq
44
+ )
45
+ sigma2_sq = (
46
+ F.conv2d(img2 * img2, window, padding=window_size // 2, groups=channel)
47
+ - mu2_sq
48
+ )
49
+ sigma12 = (
50
+ F.conv2d(img1 * img2, window, padding=window_size // 2, groups=channel)
51
+ - mu1_mu2
52
+ )
53
+
54
+ C1 = (0.01) ** 2
55
+ C2 = (0.03) ** 2
56
+
57
+ ssim_map = ((2 * mu1_mu2 + C1) * (2 * sigma12 + C2)) / (
58
+ (mu1_sq + mu2_sq + C1) * (sigma1_sq + sigma2_sq + C2)
59
+ )
60
+
61
+ if not (mask is None):
62
+ b = mask.size(0)
63
+ ssim_map = ssim_map.mean(dim=1, keepdim=True) * mask
64
+ ssim_map = ssim_map.view(b, -1).sum(dim=1) / mask.view(b, -1).sum(
65
+ dim=1
66
+ ).clamp(min=1)
67
+ return ssim_map
68
+
69
+ import pdb
70
+
71
+ pdb.set_trace
72
+
73
+ if size_average:
74
+ return ssim_map.mean()
75
+ else:
76
+ return ssim_map.mean(1).mean(1).mean(1)
77
+
78
+
79
+ class SSIM(torch.nn.Module):
80
+ def __init__(self, window_size=11, size_average=True):
81
+ super(SSIM, self).__init__()
82
+ self.window_size = window_size
83
+ self.size_average = size_average
84
+ self.channel = 1
85
+ self.window = create_window(window_size, self.channel)
86
+
87
+ def forward(self, img1, img2, mask=None):
88
+ (_, channel, _, _) = img1.size()
89
+
90
+ if (
91
+ channel == self.channel
92
+ and self.window.data.type() == img1.data.type()
93
+ ):
94
+ window = self.window
95
+ else:
96
+ window = create_window(self.window_size, channel)
97
+
98
+ if img1.is_cuda:
99
+ window = window.cuda(img1.get_device())
100
+ window = window.type_as(img1)
101
+
102
+ self.window = window
103
+ self.channel = channel
104
+
105
+ return _ssim(
106
+ img1,
107
+ img2,
108
+ window,
109
+ self.window_size,
110
+ channel,
111
+ mask,
112
+ self.size_average,
113
+ )
114
+
115
+
116
+ def ssim(img1, img2, window_size=11, mask=None, size_average=True):
117
+ (_, channel, _, _) = img1.size()
118
+ window = create_window(window_size, channel)
119
+
120
+ if img1.is_cuda:
121
+ window = window.cuda(img1.get_device())
122
+ window = window.type_as(img1)
123
+
124
+ return _ssim(img1, img2, window, window_size, channel, mask, size_average)
stable_diffusion/ldm/modules/evaluate/torch_frechet_video_distance.py ADDED
@@ -0,0 +1,294 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # based on https://github.com/universome/fvd-comparison/blob/master/compare_models.py; huge thanks!
2
+ import os
3
+ import numpy as np
4
+ import io
5
+ import re
6
+ import requests
7
+ import html
8
+ import hashlib
9
+ import urllib
10
+ import urllib.request
11
+ import scipy.linalg
12
+ import multiprocessing as mp
13
+ import glob
14
+
15
+
16
+ from tqdm import tqdm
17
+ from typing import Any, List, Tuple, Union, Dict, Callable
18
+
19
+ from torchvision.io import read_video
20
+ import torch; torch.set_grad_enabled(False)
21
+ from einops import rearrange
22
+
23
+ from nitro.util import isvideo
24
+
25
+ def compute_frechet_distance(mu_sample,sigma_sample,mu_ref,sigma_ref) -> float:
26
+ print('Calculate frechet distance...')
27
+ m = np.square(mu_sample - mu_ref).sum()
28
+ s, _ = scipy.linalg.sqrtm(np.dot(sigma_sample, sigma_ref), disp=False) # pylint: disable=no-member
29
+ fid = np.real(m + np.trace(sigma_sample + sigma_ref - s * 2))
30
+
31
+ return float(fid)
32
+
33
+
34
+ def compute_stats(feats: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
35
+ mu = feats.mean(axis=0) # [d]
36
+ sigma = np.cov(feats, rowvar=False) # [d, d]
37
+
38
+ return mu, sigma
39
+
40
+
41
+ def open_url(url: str, num_attempts: int = 10, verbose: bool = True, return_filename: bool = False) -> Any:
42
+ """Download the given URL and return a binary-mode file object to access the data."""
43
+ assert num_attempts >= 1
44
+
45
+ # Doesn't look like an URL scheme so interpret it as a local filename.
46
+ if not re.match('^[a-z]+://', url):
47
+ return url if return_filename else open(url, "rb")
48
+
49
+ # Handle file URLs. This code handles unusual file:// patterns that
50
+ # arise on Windows:
51
+ #
52
+ # file:///c:/foo.txt
53
+ #
54
+ # which would translate to a local '/c:/foo.txt' filename that's
55
+ # invalid. Drop the forward slash for such pathnames.
56
+ #
57
+ # If you touch this code path, you should test it on both Linux and
58
+ # Windows.
59
+ #
60
+ # Some internet resources suggest using urllib.request.url2pathname() but
61
+ # but that converts forward slashes to backslashes and this causes
62
+ # its own set of problems.
63
+ if url.startswith('file://'):
64
+ filename = urllib.parse.urlparse(url).path
65
+ if re.match(r'^/[a-zA-Z]:', filename):
66
+ filename = filename[1:]
67
+ return filename if return_filename else open(filename, "rb")
68
+
69
+ url_md5 = hashlib.md5(url.encode("utf-8")).hexdigest()
70
+
71
+ # Download.
72
+ url_name = None
73
+ url_data = None
74
+ with requests.Session() as session:
75
+ if verbose:
76
+ print("Downloading %s ..." % url, end="", flush=True)
77
+ for attempts_left in reversed(range(num_attempts)):
78
+ try:
79
+ with session.get(url) as res:
80
+ res.raise_for_status()
81
+ if len(res.content) == 0:
82
+ raise IOError("No data received")
83
+
84
+ if len(res.content) < 8192:
85
+ content_str = res.content.decode("utf-8")
86
+ if "download_warning" in res.headers.get("Set-Cookie", ""):
87
+ links = [html.unescape(link) for link in content_str.split('"') if "export=download" in link]
88
+ if len(links) == 1:
89
+ url = requests.compat.urljoin(url, links[0])
90
+ raise IOError("Google Drive virus checker nag")
91
+ if "Google Drive - Quota exceeded" in content_str:
92
+ raise IOError("Google Drive download quota exceeded -- please try again later")
93
+
94
+ match = re.search(r'filename="([^"]*)"', res.headers.get("Content-Disposition", ""))
95
+ url_name = match[1] if match else url
96
+ url_data = res.content
97
+ if verbose:
98
+ print(" done")
99
+ break
100
+ except KeyboardInterrupt:
101
+ raise
102
+ except:
103
+ if not attempts_left:
104
+ if verbose:
105
+ print(" failed")
106
+ raise
107
+ if verbose:
108
+ print(".", end="", flush=True)
109
+
110
+ # Return data as file object.
111
+ assert not return_filename
112
+ return io.BytesIO(url_data)
113
+
114
+ def load_video(ip):
115
+ vid, *_ = read_video(ip)
116
+ vid = rearrange(vid, 't h w c -> t c h w').to(torch.uint8)
117
+ return vid
118
+
119
+ def get_data_from_str(input_str,nprc = None):
120
+ assert os.path.isdir(input_str), f'Specified input folder "{input_str}" is not a directory'
121
+ vid_filelist = glob.glob(os.path.join(input_str,'*.mp4'))
122
+ print(f'Found {len(vid_filelist)} videos in dir {input_str}')
123
+
124
+ if nprc is None:
125
+ try:
126
+ nprc = mp.cpu_count()
127
+ except NotImplementedError:
128
+ print('WARNING: cpu_count() not avlailable, using only 1 cpu for video loading')
129
+ nprc = 1
130
+
131
+ pool = mp.Pool(processes=nprc)
132
+
133
+ vids = []
134
+ for v in tqdm(pool.imap_unordered(load_video,vid_filelist),total=len(vid_filelist),desc='Loading videos...'):
135
+ vids.append(v)
136
+
137
+
138
+ vids = torch.stack(vids,dim=0).float()
139
+
140
+ return vids
141
+
142
+ def get_stats(stats):
143
+ assert os.path.isfile(stats) and stats.endswith('.npz'), f'no stats found under {stats}'
144
+
145
+ print(f'Using precomputed statistics under {stats}')
146
+ stats = np.load(stats)
147
+ stats = {key: stats[key] for key in stats.files}
148
+
149
+ return stats
150
+
151
+
152
+
153
+
154
+ @torch.no_grad()
155
+ def compute_fvd(ref_input, sample_input, bs=32,
156
+ ref_stats=None,
157
+ sample_stats=None,
158
+ nprc_load=None):
159
+
160
+
161
+
162
+ calc_stats = ref_stats is None or sample_stats is None
163
+
164
+ if calc_stats:
165
+
166
+ only_ref = sample_stats is not None
167
+ only_sample = ref_stats is not None
168
+
169
+
170
+ if isinstance(ref_input,str) and not only_sample:
171
+ ref_input = get_data_from_str(ref_input,nprc_load)
172
+
173
+ if isinstance(sample_input, str) and not only_ref:
174
+ sample_input = get_data_from_str(sample_input, nprc_load)
175
+
176
+ stats = compute_statistics(sample_input,ref_input,
177
+ device='cuda' if torch.cuda.is_available() else 'cpu',
178
+ bs=bs,
179
+ only_ref=only_ref,
180
+ only_sample=only_sample)
181
+
182
+ if only_ref:
183
+ stats.update(get_stats(sample_stats))
184
+ elif only_sample:
185
+ stats.update(get_stats(ref_stats))
186
+
187
+
188
+
189
+ else:
190
+ stats = get_stats(sample_stats)
191
+ stats.update(get_stats(ref_stats))
192
+
193
+ fvd = compute_frechet_distance(**stats)
194
+
195
+ return {'FVD' : fvd,}
196
+
197
+
198
+ @torch.no_grad()
199
+ def compute_statistics(videos_fake, videos_real, device: str='cuda', bs=32, only_ref=False,only_sample=False) -> Dict:
200
+ detector_url = 'https://www.dropbox.com/s/ge9e5ujwgetktms/i3d_torchscript.pt?dl=1'
201
+ detector_kwargs = dict(rescale=True, resize=True, return_features=True) # Return raw features before the softmax layer.
202
+
203
+ with open_url(detector_url, verbose=False) as f:
204
+ detector = torch.jit.load(f).eval().to(device)
205
+
206
+
207
+
208
+ assert not (only_sample and only_ref), 'only_ref and only_sample arguments are mutually exclusive'
209
+
210
+ ref_embed, sample_embed = [], []
211
+
212
+ info = f'Computing I3D activations for FVD score with batch size {bs}'
213
+
214
+ if only_ref:
215
+
216
+ if not isvideo(videos_real):
217
+ # if not is video we assume to have numpy arrays pf shape (n_vids, t, h, w, c) in range [0,255]
218
+ videos_real = torch.from_numpy(videos_real).permute(0, 4, 1, 2, 3).float()
219
+ print(videos_real.shape)
220
+
221
+ if videos_real.shape[0] % bs == 0:
222
+ n_secs = videos_real.shape[0] // bs
223
+ else:
224
+ n_secs = videos_real.shape[0] // bs + 1
225
+
226
+ videos_real = torch.tensor_split(videos_real, n_secs, dim=0)
227
+
228
+ for ref_v in tqdm(videos_real, total=len(videos_real),desc=info):
229
+
230
+ feats_ref = detector(ref_v.to(device).contiguous(), **detector_kwargs).cpu().numpy()
231
+ ref_embed.append(feats_ref)
232
+
233
+ elif only_sample:
234
+
235
+ if not isvideo(videos_fake):
236
+ # if not is video we assume to have numpy arrays pf shape (n_vids, t, h, w, c) in range [0,255]
237
+ videos_fake = torch.from_numpy(videos_fake).permute(0, 4, 1, 2, 3).float()
238
+ print(videos_fake.shape)
239
+
240
+ if videos_fake.shape[0] % bs == 0:
241
+ n_secs = videos_fake.shape[0] // bs
242
+ else:
243
+ n_secs = videos_fake.shape[0] // bs + 1
244
+
245
+ videos_real = torch.tensor_split(videos_real, n_secs, dim=0)
246
+
247
+ for sample_v in tqdm(videos_fake, total=len(videos_real),desc=info):
248
+ feats_sample = detector(sample_v.to(device).contiguous(), **detector_kwargs).cpu().numpy()
249
+ sample_embed.append(feats_sample)
250
+
251
+
252
+ else:
253
+
254
+ if not isvideo(videos_real):
255
+ # if not is video we assume to have numpy arrays pf shape (n_vids, t, h, w, c) in range [0,255]
256
+ videos_real = torch.from_numpy(videos_real).permute(0, 4, 1, 2, 3).float()
257
+
258
+ if not isvideo(videos_fake):
259
+ videos_fake = torch.from_numpy(videos_fake).permute(0, 4, 1, 2, 3).float()
260
+
261
+ if videos_fake.shape[0] % bs == 0:
262
+ n_secs = videos_fake.shape[0] // bs
263
+ else:
264
+ n_secs = videos_fake.shape[0] // bs + 1
265
+
266
+ videos_real = torch.tensor_split(videos_real, n_secs, dim=0)
267
+ videos_fake = torch.tensor_split(videos_fake, n_secs, dim=0)
268
+
269
+ for ref_v, sample_v in tqdm(zip(videos_real,videos_fake),total=len(videos_fake),desc=info):
270
+ # print(ref_v.shape)
271
+ # ref_v = torch.nn.functional.interpolate(ref_v, size=(sample_v.shape[2], 256, 256), mode='trilinear', align_corners=False)
272
+ # sample_v = torch.nn.functional.interpolate(sample_v, size=(sample_v.shape[2], 256, 256), mode='trilinear', align_corners=False)
273
+
274
+
275
+ feats_sample = detector(sample_v.to(device).contiguous(), **detector_kwargs).cpu().numpy()
276
+ feats_ref = detector(ref_v.to(device).contiguous(), **detector_kwargs).cpu().numpy()
277
+ sample_embed.append(feats_sample)
278
+ ref_embed.append(feats_ref)
279
+
280
+ out = dict()
281
+ if len(sample_embed) > 0:
282
+ sample_embed = np.concatenate(sample_embed,axis=0)
283
+ mu_sample, sigma_sample = compute_stats(sample_embed)
284
+ out.update({'mu_sample': mu_sample,
285
+ 'sigma_sample': sigma_sample})
286
+
287
+ if len(ref_embed) > 0:
288
+ ref_embed = np.concatenate(ref_embed,axis=0)
289
+ mu_ref, sigma_ref = compute_stats(ref_embed)
290
+ out.update({'mu_ref': mu_ref,
291
+ 'sigma_ref': sigma_ref})
292
+
293
+
294
+ return out
stable_diffusion/ldm/modules/image_degradation/__init__.py ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ from ldm.modules.image_degradation.bsrgan import degradation_bsrgan_variant as degradation_fn_bsr
2
+ from ldm.modules.image_degradation.bsrgan_light import degradation_bsrgan_variant as degradation_fn_bsr_light
stable_diffusion/ldm/modules/image_degradation/bsrgan.py ADDED
@@ -0,0 +1,733 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ """
3
+ # --------------------------------------------
4
+ # Super-Resolution
5
+ # --------------------------------------------
6
+ #
7
+ # Kai Zhang (cskaizhang@gmail.com)
8
+ # https://github.com/cszn
9
+ # From 2019/03--2021/08
10
+ # --------------------------------------------
11
+ """
12
+
13
+ import numpy as np
14
+ import cv2
15
+ import torch
16
+
17
+ from functools import partial
18
+ import random
19
+ from scipy import ndimage
20
+ import scipy
21
+ import scipy.stats as ss
22
+ from scipy.interpolate import interp2d
23
+ from scipy.linalg import orth
24
+ import albumentations
25
+
26
+ import sys
27
+ sys.path.append('.')
28
+
29
+ import stable_diffusion.ldm.modules.image_degradation.utils_image as util
30
+
31
+
32
+ def modcrop_np(img, sf):
33
+ '''
34
+ Args:
35
+ img: numpy image, WxH or WxHxC
36
+ sf: scale factor
37
+ Return:
38
+ cropped image
39
+ '''
40
+ w, h = img.shape[:2]
41
+ im = np.copy(img)
42
+ return im[:w - w % sf, :h - h % sf, ...]
43
+
44
+
45
+ """
46
+ # --------------------------------------------
47
+ # anisotropic Gaussian kernels
48
+ # --------------------------------------------
49
+ """
50
+
51
+
52
+ def analytic_kernel(k):
53
+ """Calculate the X4 kernel from the X2 kernel (for proof see appendix in paper)"""
54
+ k_size = k.shape[0]
55
+ # Calculate the big kernels size
56
+ big_k = np.zeros((3 * k_size - 2, 3 * k_size - 2))
57
+ # Loop over the small kernel to fill the big one
58
+ for r in range(k_size):
59
+ for c in range(k_size):
60
+ big_k[2 * r:2 * r + k_size, 2 * c:2 * c + k_size] += k[r, c] * k
61
+ # Crop the edges of the big kernel to ignore very small values and increase run time of SR
62
+ crop = k_size // 2
63
+ cropped_big_k = big_k[crop:-crop, crop:-crop]
64
+ # Normalize to 1
65
+ return cropped_big_k / cropped_big_k.sum()
66
+
67
+
68
+ def anisotropic_Gaussian(ksize=15, theta=np.pi, l1=6, l2=6):
69
+ """ generate an anisotropic Gaussian kernel
70
+ Args:
71
+ ksize : e.g., 15, kernel size
72
+ theta : [0, pi], rotation angle range
73
+ l1 : [0.1,50], scaling of eigenvalues
74
+ l2 : [0.1,l1], scaling of eigenvalues
75
+ If l1 = l2, will get an isotropic Gaussian kernel.
76
+ Returns:
77
+ k : kernel
78
+ """
79
+
80
+ v = np.dot(np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]]), np.array([1., 0.]))
81
+ V = np.array([[v[0], v[1]], [v[1], -v[0]]])
82
+ D = np.array([[l1, 0], [0, l2]])
83
+ Sigma = np.dot(np.dot(V, D), np.linalg.inv(V))
84
+ k = gm_blur_kernel(mean=[0, 0], cov=Sigma, size=ksize)
85
+
86
+ return k
87
+
88
+
89
+ def gm_blur_kernel(mean, cov, size=15):
90
+ center = size / 2.0 + 0.5
91
+ k = np.zeros([size, size])
92
+ for y in range(size):
93
+ for x in range(size):
94
+ cy = y - center + 1
95
+ cx = x - center + 1
96
+ k[y, x] = ss.multivariate_normal.pdf([cx, cy], mean=mean, cov=cov)
97
+
98
+ k = k / np.sum(k)
99
+ return k
100
+
101
+
102
+ def shift_pixel(x, sf, upper_left=True):
103
+ """shift pixel for super-resolution with different scale factors
104
+ Args:
105
+ x: WxHxC or WxH
106
+ sf: scale factor
107
+ upper_left: shift direction
108
+ """
109
+ h, w = x.shape[:2]
110
+ shift = (sf - 1) * 0.5
111
+ xv, yv = np.arange(0, w, 1.0), np.arange(0, h, 1.0)
112
+ if upper_left:
113
+ x1 = xv + shift
114
+ y1 = yv + shift
115
+ else:
116
+ x1 = xv - shift
117
+ y1 = yv - shift
118
+
119
+ x1 = np.clip(x1, 0, w - 1)
120
+ y1 = np.clip(y1, 0, h - 1)
121
+
122
+ if x.ndim == 2:
123
+ x = interp2d(xv, yv, x)(x1, y1)
124
+ if x.ndim == 3:
125
+ for i in range(x.shape[-1]):
126
+ x[:, :, i] = interp2d(xv, yv, x[:, :, i])(x1, y1)
127
+
128
+ return x
129
+
130
+
131
+ def blur(x, k):
132
+ '''
133
+ x: image, NxcxHxW
134
+ k: kernel, Nx1xhxw
135
+ '''
136
+ n, c = x.shape[:2]
137
+ p1, p2 = (k.shape[-2] - 1) // 2, (k.shape[-1] - 1) // 2
138
+ x = torch.nn.functional.pad(x, pad=(p1, p2, p1, p2), mode='replicate')
139
+ k = k.repeat(1, c, 1, 1)
140
+ k = k.view(-1, 1, k.shape[2], k.shape[3])
141
+ x = x.view(1, -1, x.shape[2], x.shape[3])
142
+ x = torch.nn.functional.conv2d(x, k, bias=None, stride=1, padding=0, groups=n * c)
143
+ x = x.view(n, c, x.shape[2], x.shape[3])
144
+
145
+ return x
146
+
147
+
148
+ def gen_kernel(k_size=np.array([15, 15]), scale_factor=np.array([4, 4]), min_var=0.6, max_var=10., noise_level=0):
149
+ """"
150
+ # modified version of https://github.com/assafshocher/BlindSR_dataset_generator
151
+ # Kai Zhang
152
+ # min_var = 0.175 * sf # variance of the gaussian kernel will be sampled between min_var and max_var
153
+ # max_var = 2.5 * sf
154
+ """
155
+ # Set random eigen-vals (lambdas) and angle (theta) for COV matrix
156
+ lambda_1 = min_var + np.random.rand() * (max_var - min_var)
157
+ lambda_2 = min_var + np.random.rand() * (max_var - min_var)
158
+ theta = np.random.rand() * np.pi # random theta
159
+ noise = -noise_level + np.random.rand(*k_size) * noise_level * 2
160
+
161
+ # Set COV matrix using Lambdas and Theta
162
+ LAMBDA = np.diag([lambda_1, lambda_2])
163
+ Q = np.array([[np.cos(theta), -np.sin(theta)],
164
+ [np.sin(theta), np.cos(theta)]])
165
+ SIGMA = Q @ LAMBDA @ Q.T
166
+ INV_SIGMA = np.linalg.inv(SIGMA)[None, None, :, :]
167
+
168
+ # Set expectation position (shifting kernel for aligned image)
169
+ MU = k_size // 2 - 0.5 * (scale_factor - 1) # - 0.5 * (scale_factor - k_size % 2)
170
+ MU = MU[None, None, :, None]
171
+
172
+ # Create meshgrid for Gaussian
173
+ [X, Y] = np.meshgrid(range(k_size[0]), range(k_size[1]))
174
+ Z = np.stack([X, Y], 2)[:, :, :, None]
175
+
176
+ # Calcualte Gaussian for every pixel of the kernel
177
+ ZZ = Z - MU
178
+ ZZ_t = ZZ.transpose(0, 1, 3, 2)
179
+ raw_kernel = np.exp(-0.5 * np.squeeze(ZZ_t @ INV_SIGMA @ ZZ)) * (1 + noise)
180
+
181
+ # shift the kernel so it will be centered
182
+ # raw_kernel_centered = kernel_shift(raw_kernel, scale_factor)
183
+
184
+ # Normalize the kernel and return
185
+ # kernel = raw_kernel_centered / np.sum(raw_kernel_centered)
186
+ kernel = raw_kernel / np.sum(raw_kernel)
187
+ return kernel
188
+
189
+
190
+ def fspecial_gaussian(hsize, sigma):
191
+ hsize = [hsize, hsize]
192
+ siz = [(hsize[0] - 1.0) / 2.0, (hsize[1] - 1.0) / 2.0]
193
+ std = sigma
194
+ [x, y] = np.meshgrid(np.arange(-siz[1], siz[1] + 1), np.arange(-siz[0], siz[0] + 1))
195
+ arg = -(x * x + y * y) / (2 * std * std)
196
+ h = np.exp(arg)
197
+ h[h < scipy.finfo(float).eps * h.max()] = 0
198
+ sumh = h.sum()
199
+ if sumh != 0:
200
+ h = h / sumh
201
+ return h
202
+
203
+
204
+ def fspecial_laplacian(alpha):
205
+ alpha = max([0, min([alpha, 1])])
206
+ h1 = alpha / (alpha + 1)
207
+ h2 = (1 - alpha) / (alpha + 1)
208
+ h = [[h1, h2, h1], [h2, -4 / (alpha + 1), h2], [h1, h2, h1]]
209
+ h = np.array(h)
210
+ return h
211
+
212
+
213
+ def fspecial(filter_type, *args, **kwargs):
214
+ '''
215
+ python code from:
216
+ https://github.com/ronaldosena/imagens-medicas-2/blob/40171a6c259edec7827a6693a93955de2bd39e76/Aulas/aula_2_-_uniform_filter/matlab_fspecial.py
217
+ '''
218
+ if filter_type == 'gaussian':
219
+ return fspecial_gaussian(*args, **kwargs)
220
+ if filter_type == 'laplacian':
221
+ return fspecial_laplacian(*args, **kwargs)
222
+
223
+
224
+ """
225
+ # --------------------------------------------
226
+ # degradation models
227
+ # --------------------------------------------
228
+ """
229
+
230
+
231
+ def bicubic_degradation(x, sf=3):
232
+ '''
233
+ Args:
234
+ x: HxWxC image, [0, 1]
235
+ sf: down-scale factor
236
+ Return:
237
+ bicubicly downsampled LR image
238
+ '''
239
+ x = util.imresize_np(x, scale=1 / sf)
240
+ return x
241
+
242
+
243
+ def srmd_degradation(x, k, sf=3):
244
+ ''' blur + bicubic downsampling
245
+ Args:
246
+ x: HxWxC image, [0, 1]
247
+ k: hxw, double
248
+ sf: down-scale factor
249
+ Return:
250
+ downsampled LR image
251
+ Reference:
252
+ @inproceedings{zhang2018learning,
253
+ title={Learning a single convolutional super-resolution network for multiple degradations},
254
+ author={Zhang, Kai and Zuo, Wangmeng and Zhang, Lei},
255
+ booktitle={IEEE Conference on Computer Vision and Pattern Recognition},
256
+ pages={3262--3271},
257
+ year={2018}
258
+ }
259
+ '''
260
+ x = ndimage.filters.convolve(x, np.expand_dims(k, axis=2), mode='wrap') # 'nearest' | 'mirror'
261
+ x = bicubic_degradation(x, sf=sf)
262
+ return x
263
+
264
+
265
+ def dpsr_degradation(x, k, sf=3):
266
+ ''' bicubic downsampling + blur
267
+ Args:
268
+ x: HxWxC image, [0, 1]
269
+ k: hxw, double
270
+ sf: down-scale factor
271
+ Return:
272
+ downsampled LR image
273
+ Reference:
274
+ @inproceedings{zhang2019deep,
275
+ title={Deep Plug-and-Play Super-Resolution for Arbitrary Blur Kernels},
276
+ author={Zhang, Kai and Zuo, Wangmeng and Zhang, Lei},
277
+ booktitle={IEEE Conference on Computer Vision and Pattern Recognition},
278
+ pages={1671--1681},
279
+ year={2019}
280
+ }
281
+ '''
282
+ x = bicubic_degradation(x, sf=sf)
283
+ x = ndimage.filters.convolve(x, np.expand_dims(k, axis=2), mode='wrap')
284
+ return x
285
+
286
+
287
+ def classical_degradation(x, k, sf=3):
288
+ ''' blur + downsampling
289
+ Args:
290
+ x: HxWxC image, [0, 1]/[0, 255]
291
+ k: hxw, double
292
+ sf: down-scale factor
293
+ Return:
294
+ downsampled LR image
295
+ '''
296
+ x = ndimage.filters.convolve(x, np.expand_dims(k, axis=2), mode='wrap')
297
+ # x = filters.correlate(x, np.expand_dims(np.flip(k), axis=2))
298
+ st = 0
299
+ return x[st::sf, st::sf, ...]
300
+
301
+
302
+ def add_sharpening(img, weight=0.5, radius=50, threshold=10):
303
+ """USM sharpening. borrowed from real-ESRGAN
304
+ Input image: I; Blurry image: B.
305
+ 1. K = I + weight * (I - B)
306
+ 2. Mask = 1 if abs(I - B) > threshold, else: 0
307
+ 3. Blur mask:
308
+ 4. Out = Mask * K + (1 - Mask) * I
309
+ Args:
310
+ img (Numpy array): Input image, HWC, BGR; float32, [0, 1].
311
+ weight (float): Sharp weight. Default: 1.
312
+ radius (float): Kernel size of Gaussian blur. Default: 50.
313
+ threshold (int):
314
+ """
315
+ if radius % 2 == 0:
316
+ radius += 1
317
+ blur = cv2.GaussianBlur(img, (radius, radius), 0)
318
+ residual = img - blur
319
+ mask = np.abs(residual) * 255 > threshold
320
+ mask = mask.astype('float32')
321
+ soft_mask = cv2.GaussianBlur(mask, (radius, radius), 0)
322
+
323
+ K = img + weight * residual
324
+ K = np.clip(K, 0, 1)
325
+ return soft_mask * K + (1 - soft_mask) * img
326
+
327
+
328
+ def add_blur(img, sf=4):
329
+ wd2 = 4.0 + sf
330
+ wd = 2.0 + 0.2 * sf
331
+ if random.random() < 0.5:
332
+ l1 = wd2 * random.random()
333
+ l2 = wd2 * random.random()
334
+ k = anisotropic_Gaussian(ksize=2 * random.randint(2, 11) + 3, theta=random.random() * np.pi, l1=l1, l2=l2)
335
+ else:
336
+ k = fspecial('gaussian', 2 * random.randint(2, 11) + 3, wd * random.random())
337
+ img = ndimage.filters.convolve(img, np.expand_dims(k, axis=2), mode='mirror')
338
+
339
+ return img
340
+
341
+
342
+ def add_resize(img, sf=4):
343
+ rnum = np.random.rand()
344
+ if rnum > 0.8: # up
345
+ sf1 = random.uniform(1, 2)
346
+ elif rnum < 0.7: # down
347
+ sf1 = random.uniform(0.5 / sf, 1)
348
+ else:
349
+ sf1 = 1.0
350
+ img = cv2.resize(img, (int(sf1 * img.shape[1]), int(sf1 * img.shape[0])), interpolation=random.choice([1, 2, 3]))
351
+ img = np.clip(img, 0.0, 1.0)
352
+
353
+ return img
354
+
355
+
356
+ # def add_Gaussian_noise(img, noise_level1=2, noise_level2=25):
357
+ # noise_level = random.randint(noise_level1, noise_level2)
358
+ # rnum = np.random.rand()
359
+ # if rnum > 0.6: # add color Gaussian noise
360
+ # img += np.random.normal(0, noise_level / 255.0, img.shape).astype(np.float32)
361
+ # elif rnum < 0.4: # add grayscale Gaussian noise
362
+ # img += np.random.normal(0, noise_level / 255.0, (*img.shape[:2], 1)).astype(np.float32)
363
+ # else: # add noise
364
+ # L = noise_level2 / 255.
365
+ # D = np.diag(np.random.rand(3))
366
+ # U = orth(np.random.rand(3, 3))
367
+ # conv = np.dot(np.dot(np.transpose(U), D), U)
368
+ # img += np.random.multivariate_normal([0, 0, 0], np.abs(L ** 2 * conv), img.shape[:2]).astype(np.float32)
369
+ # img = np.clip(img, 0.0, 1.0)
370
+ # return img
371
+
372
+ def add_Gaussian_noise(img, noise_level1=2, noise_level2=25):
373
+ noise_level = random.randint(noise_level1, noise_level2)
374
+ rnum = np.random.rand()
375
+ if rnum > 0.6: # add color Gaussian noise
376
+ img = img + np.random.normal(0, noise_level / 255.0, img.shape).astype(np.float32)
377
+ elif rnum < 0.4: # add grayscale Gaussian noise
378
+ img = img + np.random.normal(0, noise_level / 255.0, (*img.shape[:2], 1)).astype(np.float32)
379
+ else: # add noise
380
+ L = noise_level2 / 255.
381
+ D = np.diag(np.random.rand(3))
382
+ U = orth(np.random.rand(3, 3))
383
+ conv = np.dot(np.dot(np.transpose(U), D), U)
384
+ img = img + np.random.multivariate_normal([0, 0, 0], np.abs(L ** 2 * conv), img.shape[:2]).astype(np.float32)
385
+ img = np.clip(img, 0.0, 1.0)
386
+ return img
387
+
388
+
389
+ def add_speckle_noise(img, noise_level1=2, noise_level2=25):
390
+ noise_level = random.randint(noise_level1, noise_level2)
391
+ img = np.clip(img, 0.0, 1.0)
392
+ rnum = random.random()
393
+ if rnum > 0.6:
394
+ img += img * np.random.normal(0, noise_level / 255.0, img.shape).astype(np.float32)
395
+ elif rnum < 0.4:
396
+ img += img * np.random.normal(0, noise_level / 255.0, (*img.shape[:2], 1)).astype(np.float32)
397
+ else:
398
+ L = noise_level2 / 255.
399
+ D = np.diag(np.random.rand(3))
400
+ U = orth(np.random.rand(3, 3))
401
+ conv = np.dot(np.dot(np.transpose(U), D), U)
402
+ img += img * np.random.multivariate_normal([0, 0, 0], np.abs(L ** 2 * conv), img.shape[:2]).astype(np.float32)
403
+ img = np.clip(img, 0.0, 1.0)
404
+ return img
405
+
406
+
407
+ def add_Poisson_noise(img):
408
+ img = np.clip((img * 255.0).round(), 0, 255) / 255.
409
+ vals = 10 ** (2 * random.random() + 2.0) # [2, 4]
410
+ if random.random() < 0.5:
411
+ img = np.random.poisson(img * vals).astype(np.float32) / vals
412
+ else:
413
+ img_gray = np.dot(img[..., :3], [0.299, 0.587, 0.114])
414
+ img_gray = np.clip((img_gray * 255.0).round(), 0, 255) / 255.
415
+ noise_gray = np.random.poisson(img_gray * vals).astype(np.float32) / vals - img_gray
416
+ img += noise_gray[:, :, np.newaxis]
417
+ img = np.clip(img, 0.0, 1.0)
418
+ return img
419
+
420
+
421
+ def add_JPEG_noise(img):
422
+ quality_factor = random.randint(30, 95)
423
+ img = cv2.cvtColor(util.single2uint(img), cv2.COLOR_RGB2BGR)
424
+ result, encimg = cv2.imencode('.jpg', img, [int(cv2.IMWRITE_JPEG_QUALITY), quality_factor])
425
+ img = cv2.imdecode(encimg, 1)
426
+ img = cv2.cvtColor(util.uint2single(img), cv2.COLOR_BGR2RGB)
427
+ return img
428
+
429
+
430
+ def random_crop(lq, hq, sf=4, lq_patchsize=64):
431
+ h, w = lq.shape[:2]
432
+ rnd_h = random.randint(0, h - lq_patchsize)
433
+ rnd_w = random.randint(0, w - lq_patchsize)
434
+ lq = lq[rnd_h:rnd_h + lq_patchsize, rnd_w:rnd_w + lq_patchsize, :]
435
+
436
+ rnd_h_H, rnd_w_H = int(rnd_h * sf), int(rnd_w * sf)
437
+ hq = hq[rnd_h_H:rnd_h_H + lq_patchsize * sf, rnd_w_H:rnd_w_H + lq_patchsize * sf, :]
438
+ return lq, hq
439
+
440
+
441
+ def degradation_bsrgan(img, sf=4, lq_patchsize=72, isp_model=None):
442
+ """
443
+ This is the degradation model of BSRGAN from the paper
444
+ "Designing a Practical Degradation Model for Deep Blind Image Super-Resolution"
445
+ ----------
446
+ img: HXWXC, [0, 1], its size should be large than (lq_patchsizexsf)x(lq_patchsizexsf)
447
+ sf: scale factor
448
+ isp_model: camera ISP model
449
+ Returns
450
+ -------
451
+ img: low-quality patch, size: lq_patchsizeXlq_patchsizeXC, range: [0, 1]
452
+ hq: corresponding high-quality patch, size: (lq_patchsizexsf)X(lq_patchsizexsf)XC, range: [0, 1]
453
+ """
454
+ isp_prob, jpeg_prob, scale2_prob = 0.25, 0.9, 0.25
455
+ sf_ori = sf
456
+
457
+ h1, w1 = img.shape[:2]
458
+ img = img.copy()[:w1 - w1 % sf, :h1 - h1 % sf, ...] # mod crop
459
+ h, w = img.shape[:2]
460
+
461
+ if h < lq_patchsize * sf or w < lq_patchsize * sf:
462
+ raise ValueError(f'img size ({h1}X{w1}) is too small!')
463
+
464
+ hq = img.copy()
465
+
466
+ if sf == 4 and random.random() < scale2_prob: # downsample1
467
+ if np.random.rand() < 0.5:
468
+ img = cv2.resize(img, (int(1 / 2 * img.shape[1]), int(1 / 2 * img.shape[0])),
469
+ interpolation=random.choice([1, 2, 3]))
470
+ else:
471
+ img = util.imresize_np(img, 1 / 2, True)
472
+ img = np.clip(img, 0.0, 1.0)
473
+ sf = 2
474
+
475
+ shuffle_order = random.sample(range(7), 7)
476
+ idx1, idx2 = shuffle_order.index(2), shuffle_order.index(3)
477
+ if idx1 > idx2: # keep downsample3 last
478
+ shuffle_order[idx1], shuffle_order[idx2] = shuffle_order[idx2], shuffle_order[idx1]
479
+
480
+ for i in shuffle_order:
481
+
482
+ if i == 0:
483
+ img = add_blur(img, sf=sf)
484
+
485
+ elif i == 1:
486
+ img = add_blur(img, sf=sf)
487
+
488
+ elif i == 2:
489
+ a, b = img.shape[1], img.shape[0]
490
+ # downsample2
491
+ if random.random() < 0.75:
492
+ sf1 = random.uniform(1, 2 * sf)
493
+ img = cv2.resize(img, (int(1 / sf1 * img.shape[1]), int(1 / sf1 * img.shape[0])),
494
+ interpolation=random.choice([1, 2, 3]))
495
+ else:
496
+ k = fspecial('gaussian', 25, random.uniform(0.1, 0.6 * sf))
497
+ k_shifted = shift_pixel(k, sf)
498
+ k_shifted = k_shifted / k_shifted.sum() # blur with shifted kernel
499
+ img = ndimage.filters.convolve(img, np.expand_dims(k_shifted, axis=2), mode='mirror')
500
+ img = img[0::sf, 0::sf, ...] # nearest downsampling
501
+ img = np.clip(img, 0.0, 1.0)
502
+
503
+ elif i == 3:
504
+ # downsample3
505
+ img = cv2.resize(img, (int(1 / sf * a), int(1 / sf * b)), interpolation=random.choice([1, 2, 3]))
506
+ img = np.clip(img, 0.0, 1.0)
507
+
508
+ elif i == 4:
509
+ # add Gaussian noise
510
+ img = add_Gaussian_noise(img, noise_level1=2, noise_level2=25)
511
+
512
+ elif i == 5:
513
+ # add JPEG noise
514
+ if random.random() < jpeg_prob:
515
+ img = add_JPEG_noise(img)
516
+
517
+ elif i == 6:
518
+ # add processed camera sensor noise
519
+ if random.random() < isp_prob and isp_model is not None:
520
+ with torch.no_grad():
521
+ img, hq = isp_model.forward(img.copy(), hq)
522
+
523
+ # add final JPEG compression noise
524
+ img = add_JPEG_noise(img)
525
+
526
+ # random crop
527
+ img, hq = random_crop(img, hq, sf_ori, lq_patchsize)
528
+
529
+ return img, hq
530
+
531
+
532
+ # todo no isp_model?
533
+ def degradation_bsrgan_variant(image, sf=4, isp_model=None):
534
+ """
535
+ This is the degradation model of BSRGAN from the paper
536
+ "Designing a Practical Degradation Model for Deep Blind Image Super-Resolution"
537
+ ----------
538
+ sf: scale factor
539
+ isp_model: camera ISP model
540
+ Returns
541
+ -------
542
+ img: low-quality patch, size: lq_patchsizeXlq_patchsizeXC, range: [0, 1]
543
+ hq: corresponding high-quality patch, size: (lq_patchsizexsf)X(lq_patchsizexsf)XC, range: [0, 1]
544
+ """
545
+ image = util.uint2single(image)
546
+ isp_prob, jpeg_prob, scale2_prob = 0.25, 0.9, 0.25
547
+ sf_ori = sf
548
+
549
+ h1, w1 = image.shape[:2]
550
+ image = image.copy()[:w1 - w1 % sf, :h1 - h1 % sf, ...] # mod crop
551
+ h, w = image.shape[:2]
552
+
553
+ hq = image.copy()
554
+
555
+ if sf == 4 and random.random() < scale2_prob: # downsample1
556
+ if np.random.rand() < 0.5:
557
+ image = cv2.resize(image, (int(1 / 2 * image.shape[1]), int(1 / 2 * image.shape[0])),
558
+ interpolation=random.choice([1, 2, 3]))
559
+ else:
560
+ image = util.imresize_np(image, 1 / 2, True)
561
+ image = np.clip(image, 0.0, 1.0)
562
+ sf = 2
563
+
564
+ shuffle_order = random.sample(range(7), 7)
565
+ idx1, idx2 = shuffle_order.index(2), shuffle_order.index(3)
566
+ if idx1 > idx2: # keep downsample3 last
567
+ shuffle_order[idx1], shuffle_order[idx2] = shuffle_order[idx2], shuffle_order[idx1]
568
+
569
+ for i in shuffle_order:
570
+
571
+ if i == 0:
572
+ image = add_blur(image, sf=sf)
573
+
574
+ elif i == 1:
575
+ image = add_blur(image, sf=sf)
576
+
577
+ elif i == 2:
578
+ a, b = image.shape[1], image.shape[0]
579
+ # downsample2
580
+ if random.random() < 0.75:
581
+ sf1 = random.uniform(1, 2 * sf)
582
+ image = cv2.resize(image, (int(1 / sf1 * image.shape[1]), int(1 / sf1 * image.shape[0])),
583
+ interpolation=random.choice([1, 2, 3]))
584
+ else:
585
+ k = fspecial('gaussian', 25, random.uniform(0.1, 0.6 * sf))
586
+ k_shifted = shift_pixel(k, sf)
587
+ k_shifted = k_shifted / k_shifted.sum() # blur with shifted kernel
588
+ image = ndimage.filters.convolve(image, np.expand_dims(k_shifted, axis=2), mode='mirror')
589
+ image = image[0::sf, 0::sf, ...] # nearest downsampling
590
+ image = np.clip(image, 0.0, 1.0)
591
+
592
+ elif i == 3:
593
+ # downsample3
594
+ image = cv2.resize(image, (int(1 / sf * a), int(1 / sf * b)), interpolation=random.choice([1, 2, 3]))
595
+ image = np.clip(image, 0.0, 1.0)
596
+
597
+ elif i == 4:
598
+ # add Gaussian noise
599
+ image = add_Gaussian_noise(image, noise_level1=2, noise_level2=25)
600
+
601
+ elif i == 5:
602
+ # add JPEG noise
603
+ if random.random() < jpeg_prob:
604
+ image = add_JPEG_noise(image)
605
+
606
+ # elif i == 6:
607
+ # # add processed camera sensor noise
608
+ # if random.random() < isp_prob and isp_model is not None:
609
+ # with torch.no_grad():
610
+ # img, hq = isp_model.forward(img.copy(), hq)
611
+
612
+ # add final JPEG compression noise
613
+ image = add_JPEG_noise(image)
614
+ image = util.single2uint(image)
615
+ example = {"image":image}
616
+ return example
617
+
618
+
619
+ # TODO incase there is a pickle error one needs to replace a += x with a = a + x in add_speckle_noise etc...
620
+ def degradation_bsrgan_plus(img, sf=4, shuffle_prob=0.5, use_sharp=True, lq_patchsize=64, isp_model=None):
621
+ """
622
+ This is an extended degradation model by combining
623
+ the degradation models of BSRGAN and Real-ESRGAN
624
+ ----------
625
+ img: HXWXC, [0, 1], its size should be large than (lq_patchsizexsf)x(lq_patchsizexsf)
626
+ sf: scale factor
627
+ use_shuffle: the degradation shuffle
628
+ use_sharp: sharpening the img
629
+ Returns
630
+ -------
631
+ img: low-quality patch, size: lq_patchsizeXlq_patchsizeXC, range: [0, 1]
632
+ hq: corresponding high-quality patch, size: (lq_patchsizexsf)X(lq_patchsizexsf)XC, range: [0, 1]
633
+ """
634
+
635
+ h1, w1 = img.shape[:2]
636
+ img = img.copy()[:w1 - w1 % sf, :h1 - h1 % sf, ...] # mod crop
637
+ h, w = img.shape[:2]
638
+
639
+ if h < lq_patchsize * sf or w < lq_patchsize * sf:
640
+ raise ValueError(f'img size ({h1}X{w1}) is too small!')
641
+
642
+ if use_sharp:
643
+ img = add_sharpening(img)
644
+ hq = img.copy()
645
+
646
+ if random.random() < shuffle_prob:
647
+ shuffle_order = random.sample(range(13), 13)
648
+ else:
649
+ shuffle_order = list(range(13))
650
+ # local shuffle for noise, JPEG is always the last one
651
+ shuffle_order[2:6] = random.sample(shuffle_order[2:6], len(range(2, 6)))
652
+ shuffle_order[9:13] = random.sample(shuffle_order[9:13], len(range(9, 13)))
653
+
654
+ poisson_prob, speckle_prob, isp_prob = 0.1, 0.1, 0.1
655
+
656
+ for i in shuffle_order:
657
+ if i == 0:
658
+ img = add_blur(img, sf=sf)
659
+ elif i == 1:
660
+ img = add_resize(img, sf=sf)
661
+ elif i == 2:
662
+ img = add_Gaussian_noise(img, noise_level1=2, noise_level2=25)
663
+ elif i == 3:
664
+ if random.random() < poisson_prob:
665
+ img = add_Poisson_noise(img)
666
+ elif i == 4:
667
+ if random.random() < speckle_prob:
668
+ img = add_speckle_noise(img)
669
+ elif i == 5:
670
+ if random.random() < isp_prob and isp_model is not None:
671
+ with torch.no_grad():
672
+ img, hq = isp_model.forward(img.copy(), hq)
673
+ elif i == 6:
674
+ img = add_JPEG_noise(img)
675
+ elif i == 7:
676
+ img = add_blur(img, sf=sf)
677
+ elif i == 8:
678
+ img = add_resize(img, sf=sf)
679
+ elif i == 9:
680
+ img = add_Gaussian_noise(img, noise_level1=2, noise_level2=25)
681
+ elif i == 10:
682
+ if random.random() < poisson_prob:
683
+ img = add_Poisson_noise(img)
684
+ elif i == 11:
685
+ if random.random() < speckle_prob:
686
+ img = add_speckle_noise(img)
687
+ elif i == 12:
688
+ if random.random() < isp_prob and isp_model is not None:
689
+ with torch.no_grad():
690
+ img, hq = isp_model.forward(img.copy(), hq)
691
+ else:
692
+ print('check the shuffle!')
693
+
694
+ # resize to desired size
695
+ img = cv2.resize(img, (int(1 / sf * hq.shape[1]), int(1 / sf * hq.shape[0])),
696
+ interpolation=random.choice([1, 2, 3]))
697
+
698
+ # add final JPEG compression noise
699
+ img = add_JPEG_noise(img)
700
+
701
+ # random crop
702
+ img, hq = random_crop(img, hq, sf, lq_patchsize)
703
+
704
+ return img, hq
705
+
706
+
707
+ if __name__ == '__main__':
708
+ print("hey")
709
+ img = util.imread_uint('utils/test.png', 3)
710
+ print(img)
711
+ img = util.uint2single(img)
712
+ print(img)
713
+ img = img[:448, :448]
714
+ h = img.shape[0] // 4
715
+ print("resizing to", h)
716
+ sf = 4
717
+ deg_fn = partial(degradation_bsrgan_variant, sf=sf)
718
+ for i in range(20):
719
+ print(i)
720
+ img_lq = deg_fn(img)
721
+ print(img_lq)
722
+ img_lq_bicubic = albumentations.SmallestMaxSize(max_size=h, interpolation=cv2.INTER_CUBIC)(image=img)["image"]
723
+ print(img_lq.shape)
724
+ print("bicubic", img_lq_bicubic.shape)
725
+ print(img_hq.shape)
726
+ lq_nearest = cv2.resize(util.single2uint(img_lq), (int(sf * img_lq.shape[1]), int(sf * img_lq.shape[0])),
727
+ interpolation=0)
728
+ lq_bicubic_nearest = cv2.resize(util.single2uint(img_lq_bicubic), (int(sf * img_lq.shape[1]), int(sf * img_lq.shape[0])),
729
+ interpolation=0)
730
+ img_concat = np.concatenate([lq_bicubic_nearest, lq_nearest, util.single2uint(img_hq)], axis=1)
731
+ util.imsave(img_concat, str(i) + '.png')
732
+
733
+
stable_diffusion/ldm/modules/image_degradation/bsrgan_light.py ADDED
@@ -0,0 +1,652 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ import numpy as np
3
+ import cv2
4
+ import torch
5
+
6
+ from functools import partial
7
+ import random
8
+ from scipy import ndimage
9
+ import scipy
10
+ import scipy.stats as ss
11
+ from scipy.interpolate import interp2d
12
+ from scipy.linalg import orth
13
+ import albumentations
14
+
15
+ import sys
16
+ sys.path.append('.')
17
+ import stable_diffusion.ldm.modules.image_degradation.utils_image as util
18
+
19
+ """
20
+ # --------------------------------------------
21
+ # Super-Resolution
22
+ # --------------------------------------------
23
+ #
24
+ # Kai Zhang (cskaizhang@gmail.com)
25
+ # https://github.com/cszn
26
+ # From 2019/03--2021/08
27
+ # --------------------------------------------
28
+ """
29
+
30
+
31
+ def modcrop_np(img, sf):
32
+ '''
33
+ Args:
34
+ img: numpy image, WxH or WxHxC
35
+ sf: scale factor
36
+ Return:
37
+ cropped image
38
+ '''
39
+ w, h = img.shape[:2]
40
+ im = np.copy(img)
41
+ return im[:w - w % sf, :h - h % sf, ...]
42
+
43
+
44
+ """
45
+ # --------------------------------------------
46
+ # anisotropic Gaussian kernels
47
+ # --------------------------------------------
48
+ """
49
+
50
+
51
+ def analytic_kernel(k):
52
+ """Calculate the X4 kernel from the X2 kernel (for proof see appendix in paper)"""
53
+ k_size = k.shape[0]
54
+ # Calculate the big kernels size
55
+ big_k = np.zeros((3 * k_size - 2, 3 * k_size - 2))
56
+ # Loop over the small kernel to fill the big one
57
+ for r in range(k_size):
58
+ for c in range(k_size):
59
+ big_k[2 * r:2 * r + k_size, 2 * c:2 * c + k_size] += k[r, c] * k
60
+ # Crop the edges of the big kernel to ignore very small values and increase run time of SR
61
+ crop = k_size // 2
62
+ cropped_big_k = big_k[crop:-crop, crop:-crop]
63
+ # Normalize to 1
64
+ return cropped_big_k / cropped_big_k.sum()
65
+
66
+
67
+ def anisotropic_Gaussian(ksize=15, theta=np.pi, l1=6, l2=6):
68
+ """ generate an anisotropic Gaussian kernel
69
+ Args:
70
+ ksize : e.g., 15, kernel size
71
+ theta : [0, pi], rotation angle range
72
+ l1 : [0.1,50], scaling of eigenvalues
73
+ l2 : [0.1,l1], scaling of eigenvalues
74
+ If l1 = l2, will get an isotropic Gaussian kernel.
75
+ Returns:
76
+ k : kernel
77
+ """
78
+
79
+ v = np.dot(np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]]), np.array([1., 0.]))
80
+ V = np.array([[v[0], v[1]], [v[1], -v[0]]])
81
+ D = np.array([[l1, 0], [0, l2]])
82
+ Sigma = np.dot(np.dot(V, D), np.linalg.inv(V))
83
+ k = gm_blur_kernel(mean=[0, 0], cov=Sigma, size=ksize)
84
+
85
+ return k
86
+
87
+
88
+ def gm_blur_kernel(mean, cov, size=15):
89
+ center = size / 2.0 + 0.5
90
+ k = np.zeros([size, size])
91
+ for y in range(size):
92
+ for x in range(size):
93
+ cy = y - center + 1
94
+ cx = x - center + 1
95
+ k[y, x] = ss.multivariate_normal.pdf([cx, cy], mean=mean, cov=cov)
96
+
97
+ k = k / np.sum(k)
98
+ return k
99
+
100
+
101
+ def shift_pixel(x, sf, upper_left=True):
102
+ """shift pixel for super-resolution with different scale factors
103
+ Args:
104
+ x: WxHxC or WxH
105
+ sf: scale factor
106
+ upper_left: shift direction
107
+ """
108
+ h, w = x.shape[:2]
109
+ shift = (sf - 1) * 0.5
110
+ xv, yv = np.arange(0, w, 1.0), np.arange(0, h, 1.0)
111
+ if upper_left:
112
+ x1 = xv + shift
113
+ y1 = yv + shift
114
+ else:
115
+ x1 = xv - shift
116
+ y1 = yv - shift
117
+
118
+ x1 = np.clip(x1, 0, w - 1)
119
+ y1 = np.clip(y1, 0, h - 1)
120
+
121
+ if x.ndim == 2:
122
+ x = interp2d(xv, yv, x)(x1, y1)
123
+ if x.ndim == 3:
124
+ for i in range(x.shape[-1]):
125
+ x[:, :, i] = interp2d(xv, yv, x[:, :, i])(x1, y1)
126
+
127
+ return x
128
+
129
+
130
+ def blur(x, k):
131
+ '''
132
+ x: image, NxcxHxW
133
+ k: kernel, Nx1xhxw
134
+ '''
135
+ n, c = x.shape[:2]
136
+ p1, p2 = (k.shape[-2] - 1) // 2, (k.shape[-1] - 1) // 2
137
+ x = torch.nn.functional.pad(x, pad=(p1, p2, p1, p2), mode='replicate')
138
+ k = k.repeat(1, c, 1, 1)
139
+ k = k.view(-1, 1, k.shape[2], k.shape[3])
140
+ x = x.view(1, -1, x.shape[2], x.shape[3])
141
+ x = torch.nn.functional.conv2d(x, k, bias=None, stride=1, padding=0, groups=n * c)
142
+ x = x.view(n, c, x.shape[2], x.shape[3])
143
+
144
+ return x
145
+
146
+
147
+ def gen_kernel(k_size=np.array([15, 15]), scale_factor=np.array([4, 4]), min_var=0.6, max_var=10., noise_level=0):
148
+ """"
149
+ # modified version of https://github.com/assafshocher/BlindSR_dataset_generator
150
+ # Kai Zhang
151
+ # min_var = 0.175 * sf # variance of the gaussian kernel will be sampled between min_var and max_var
152
+ # max_var = 2.5 * sf
153
+ """
154
+ # Set random eigen-vals (lambdas) and angle (theta) for COV matrix
155
+ lambda_1 = min_var + np.random.rand() * (max_var - min_var)
156
+ lambda_2 = min_var + np.random.rand() * (max_var - min_var)
157
+ theta = np.random.rand() * np.pi # random theta
158
+ noise = -noise_level + np.random.rand(*k_size) * noise_level * 2
159
+
160
+ # Set COV matrix using Lambdas and Theta
161
+ LAMBDA = np.diag([lambda_1, lambda_2])
162
+ Q = np.array([[np.cos(theta), -np.sin(theta)],
163
+ [np.sin(theta), np.cos(theta)]])
164
+ SIGMA = Q @ LAMBDA @ Q.T
165
+ INV_SIGMA = np.linalg.inv(SIGMA)[None, None, :, :]
166
+
167
+ # Set expectation position (shifting kernel for aligned image)
168
+ MU = k_size // 2 - 0.5 * (scale_factor - 1) # - 0.5 * (scale_factor - k_size % 2)
169
+ MU = MU[None, None, :, None]
170
+
171
+ # Create meshgrid for Gaussian
172
+ [X, Y] = np.meshgrid(range(k_size[0]), range(k_size[1]))
173
+ Z = np.stack([X, Y], 2)[:, :, :, None]
174
+
175
+ # Calcualte Gaussian for every pixel of the kernel
176
+ ZZ = Z - MU
177
+ ZZ_t = ZZ.transpose(0, 1, 3, 2)
178
+ raw_kernel = np.exp(-0.5 * np.squeeze(ZZ_t @ INV_SIGMA @ ZZ)) * (1 + noise)
179
+
180
+ # shift the kernel so it will be centered
181
+ # raw_kernel_centered = kernel_shift(raw_kernel, scale_factor)
182
+
183
+ # Normalize the kernel and return
184
+ # kernel = raw_kernel_centered / np.sum(raw_kernel_centered)
185
+ kernel = raw_kernel / np.sum(raw_kernel)
186
+ return kernel
187
+
188
+
189
+ def fspecial_gaussian(hsize, sigma):
190
+ hsize = [hsize, hsize]
191
+ siz = [(hsize[0] - 1.0) / 2.0, (hsize[1] - 1.0) / 2.0]
192
+ std = sigma
193
+ [x, y] = np.meshgrid(np.arange(-siz[1], siz[1] + 1), np.arange(-siz[0], siz[0] + 1))
194
+ arg = -(x * x + y * y) / (2 * std * std)
195
+ h = np.exp(arg)
196
+ h[h < scipy.finfo(float).eps * h.max()] = 0
197
+ sumh = h.sum()
198
+ if sumh != 0:
199
+ h = h / sumh
200
+ return h
201
+
202
+
203
+ def fspecial_laplacian(alpha):
204
+ alpha = max([0, min([alpha, 1])])
205
+ h1 = alpha / (alpha + 1)
206
+ h2 = (1 - alpha) / (alpha + 1)
207
+ h = [[h1, h2, h1], [h2, -4 / (alpha + 1), h2], [h1, h2, h1]]
208
+ h = np.array(h)
209
+ return h
210
+
211
+
212
+ def fspecial(filter_type, *args, **kwargs):
213
+ '''
214
+ python code from:
215
+ https://github.com/ronaldosena/imagens-medicas-2/blob/40171a6c259edec7827a6693a93955de2bd39e76/Aulas/aula_2_-_uniform_filter/matlab_fspecial.py
216
+ '''
217
+ if filter_type == 'gaussian':
218
+ return fspecial_gaussian(*args, **kwargs)
219
+ if filter_type == 'laplacian':
220
+ return fspecial_laplacian(*args, **kwargs)
221
+
222
+
223
+ """
224
+ # --------------------------------------------
225
+ # degradation models
226
+ # --------------------------------------------
227
+ """
228
+
229
+
230
+ def bicubic_degradation(x, sf=3):
231
+ '''
232
+ Args:
233
+ x: HxWxC image, [0, 1]
234
+ sf: down-scale factor
235
+ Return:
236
+ bicubicly downsampled LR image
237
+ '''
238
+ x = util.imresize_np(x, scale=1 / sf)
239
+ return x
240
+
241
+
242
+ def srmd_degradation(x, k, sf=3):
243
+ ''' blur + bicubic downsampling
244
+ Args:
245
+ x: HxWxC image, [0, 1]
246
+ k: hxw, double
247
+ sf: down-scale factor
248
+ Return:
249
+ downsampled LR image
250
+ Reference:
251
+ @inproceedings{zhang2018learning,
252
+ title={Learning a single convolutional super-resolution network for multiple degradations},
253
+ author={Zhang, Kai and Zuo, Wangmeng and Zhang, Lei},
254
+ booktitle={IEEE Conference on Computer Vision and Pattern Recognition},
255
+ pages={3262--3271},
256
+ year={2018}
257
+ }
258
+ '''
259
+ x = ndimage.convolve(x, np.expand_dims(k, axis=2), mode='wrap') # 'nearest' | 'mirror'
260
+ x = bicubic_degradation(x, sf=sf)
261
+ return x
262
+
263
+
264
+ def dpsr_degradation(x, k, sf=3):
265
+ ''' bicubic downsampling + blur
266
+ Args:
267
+ x: HxWxC image, [0, 1]
268
+ k: hxw, double
269
+ sf: down-scale factor
270
+ Return:
271
+ downsampled LR image
272
+ Reference:
273
+ @inproceedings{zhang2019deep,
274
+ title={Deep Plug-and-Play Super-Resolution for Arbitrary Blur Kernels},
275
+ author={Zhang, Kai and Zuo, Wangmeng and Zhang, Lei},
276
+ booktitle={IEEE Conference on Computer Vision and Pattern Recognition},
277
+ pages={1671--1681},
278
+ year={2019}
279
+ }
280
+ '''
281
+ x = bicubic_degradation(x, sf=sf)
282
+ x = ndimage.convolve(x, np.expand_dims(k, axis=2), mode='wrap')
283
+ return x
284
+
285
+
286
+ def classical_degradation(x, k, sf=3):
287
+ ''' blur + downsampling
288
+ Args:
289
+ x: HxWxC image, [0, 1]/[0, 255]
290
+ k: hxw, double
291
+ sf: down-scale factor
292
+ Return:
293
+ downsampled LR image
294
+ '''
295
+ x = ndimage.convolve(x, np.expand_dims(k, axis=2), mode='wrap')
296
+ # x = filters.correlate(x, np.expand_dims(np.flip(k), axis=2))
297
+ st = 0
298
+ return x[st::sf, st::sf, ...]
299
+
300
+
301
+ def add_sharpening(img, weight=0.5, radius=50, threshold=10):
302
+ """USM sharpening. borrowed from real-ESRGAN
303
+ Input image: I; Blurry image: B.
304
+ 1. K = I + weight * (I - B)
305
+ 2. Mask = 1 if abs(I - B) > threshold, else: 0
306
+ 3. Blur mask:
307
+ 4. Out = Mask * K + (1 - Mask) * I
308
+ Args:
309
+ img (Numpy array): Input image, HWC, BGR; float32, [0, 1].
310
+ weight (float): Sharp weight. Default: 1.
311
+ radius (float): Kernel size of Gaussian blur. Default: 50.
312
+ threshold (int):
313
+ """
314
+ if radius % 2 == 0:
315
+ radius += 1
316
+ blur = cv2.GaussianBlur(img, (radius, radius), 0)
317
+ residual = img - blur
318
+ mask = np.abs(residual) * 255 > threshold
319
+ mask = mask.astype('float32')
320
+ soft_mask = cv2.GaussianBlur(mask, (radius, radius), 0)
321
+
322
+ K = img + weight * residual
323
+ K = np.clip(K, 0, 1)
324
+ return soft_mask * K + (1 - soft_mask) * img
325
+
326
+
327
+ def add_blur(img, sf=4):
328
+ wd2 = 4.0 + sf
329
+ wd = 2.0 + 0.2 * sf
330
+
331
+ wd2 = wd2/4
332
+ wd = wd/4
333
+
334
+ if random.random() < 0.5:
335
+ l1 = wd2 * random.random()
336
+ l2 = wd2 * random.random()
337
+ k = anisotropic_Gaussian(ksize=random.randint(2, 11) + 3, theta=random.random() * np.pi, l1=l1, l2=l2)
338
+ else:
339
+ k = fspecial('gaussian', random.randint(2, 4) + 3, wd * random.random())
340
+ img = ndimage.convolve(img, np.expand_dims(k, axis=2), mode='mirror')
341
+
342
+ return img
343
+
344
+
345
+ def add_resize(img, sf=4):
346
+ rnum = np.random.rand()
347
+ if rnum > 0.8: # up
348
+ sf1 = random.uniform(1, 2)
349
+ elif rnum < 0.7: # down
350
+ sf1 = random.uniform(0.5 / sf, 1)
351
+ else:
352
+ sf1 = 1.0
353
+ img = cv2.resize(img, (int(sf1 * img.shape[1]), int(sf1 * img.shape[0])), interpolation=random.choice([1, 2, 3]))
354
+ img = np.clip(img, 0.0, 1.0)
355
+
356
+ return img
357
+
358
+
359
+ # def add_Gaussian_noise(img, noise_level1=2, noise_level2=25):
360
+ # noise_level = random.randint(noise_level1, noise_level2)
361
+ # rnum = np.random.rand()
362
+ # if rnum > 0.6: # add color Gaussian noise
363
+ # img += np.random.normal(0, noise_level / 255.0, img.shape).astype(np.float32)
364
+ # elif rnum < 0.4: # add grayscale Gaussian noise
365
+ # img += np.random.normal(0, noise_level / 255.0, (*img.shape[:2], 1)).astype(np.float32)
366
+ # else: # add noise
367
+ # L = noise_level2 / 255.
368
+ # D = np.diag(np.random.rand(3))
369
+ # U = orth(np.random.rand(3, 3))
370
+ # conv = np.dot(np.dot(np.transpose(U), D), U)
371
+ # img += np.random.multivariate_normal([0, 0, 0], np.abs(L ** 2 * conv), img.shape[:2]).astype(np.float32)
372
+ # img = np.clip(img, 0.0, 1.0)
373
+ # return img
374
+
375
+ def add_Gaussian_noise(img, noise_level1=2, noise_level2=25):
376
+ noise_level = random.randint(noise_level1, noise_level2)
377
+ rnum = np.random.rand()
378
+ if rnum > 0.6: # add color Gaussian noise
379
+ img = img + np.random.normal(0, noise_level / 255.0, img.shape).astype(np.float32)
380
+ elif rnum < 0.4: # add grayscale Gaussian noise
381
+ img = img + np.random.normal(0, noise_level / 255.0, (*img.shape[:2], 1)).astype(np.float32)
382
+ else: # add noise
383
+ L = noise_level2 / 255.
384
+ D = np.diag(np.random.rand(3))
385
+ U = orth(np.random.rand(3, 3))
386
+ conv = np.dot(np.dot(np.transpose(U), D), U)
387
+ img = img + np.random.multivariate_normal([0, 0, 0], np.abs(L ** 2 * conv), img.shape[:2]).astype(np.float32)
388
+ img = np.clip(img, 0.0, 1.0)
389
+ return img
390
+
391
+
392
+ def add_speckle_noise(img, noise_level1=2, noise_level2=25):
393
+ noise_level = random.randint(noise_level1, noise_level2)
394
+ img = np.clip(img, 0.0, 1.0)
395
+ rnum = random.random()
396
+ if rnum > 0.6:
397
+ img += img * np.random.normal(0, noise_level / 255.0, img.shape).astype(np.float32)
398
+ elif rnum < 0.4:
399
+ img += img * np.random.normal(0, noise_level / 255.0, (*img.shape[:2], 1)).astype(np.float32)
400
+ else:
401
+ L = noise_level2 / 255.
402
+ D = np.diag(np.random.rand(3))
403
+ U = orth(np.random.rand(3, 3))
404
+ conv = np.dot(np.dot(np.transpose(U), D), U)
405
+ img += img * np.random.multivariate_normal([0, 0, 0], np.abs(L ** 2 * conv), img.shape[:2]).astype(np.float32)
406
+ img = np.clip(img, 0.0, 1.0)
407
+ return img
408
+
409
+
410
+ def add_Poisson_noise(img):
411
+ img = np.clip((img * 255.0).round(), 0, 255) / 255.
412
+ vals = 10 ** (2 * random.random() + 2.0) # [2, 4]
413
+ if random.random() < 0.5:
414
+ img = np.random.poisson(img * vals).astype(np.float32) / vals
415
+ else:
416
+ img_gray = np.dot(img[..., :3], [0.299, 0.587, 0.114])
417
+ img_gray = np.clip((img_gray * 255.0).round(), 0, 255) / 255.
418
+ noise_gray = np.random.poisson(img_gray * vals).astype(np.float32) / vals - img_gray
419
+ img += noise_gray[:, :, np.newaxis]
420
+ img = np.clip(img, 0.0, 1.0)
421
+ return img
422
+
423
+
424
+ def add_JPEG_noise(img):
425
+ quality_factor = random.randint(80, 95)
426
+ img = cv2.cvtColor(util.single2uint(img), cv2.COLOR_RGB2BGR)
427
+ result, encimg = cv2.imencode('.jpg', img, [int(cv2.IMWRITE_JPEG_QUALITY), quality_factor])
428
+ img = cv2.imdecode(encimg, 1)
429
+ img = cv2.cvtColor(util.uint2single(img), cv2.COLOR_BGR2RGB)
430
+ return img
431
+
432
+
433
+ def random_crop(lq, hq, sf=4, lq_patchsize=64):
434
+ h, w = lq.shape[:2]
435
+ rnd_h = random.randint(0, h - lq_patchsize)
436
+ rnd_w = random.randint(0, w - lq_patchsize)
437
+ lq = lq[rnd_h:rnd_h + lq_patchsize, rnd_w:rnd_w + lq_patchsize, :]
438
+
439
+ rnd_h_H, rnd_w_H = int(rnd_h * sf), int(rnd_w * sf)
440
+ hq = hq[rnd_h_H:rnd_h_H + lq_patchsize * sf, rnd_w_H:rnd_w_H + lq_patchsize * sf, :]
441
+ return lq, hq
442
+
443
+
444
+ def degradation_bsrgan(img, sf=4, lq_patchsize=72, isp_model=None):
445
+ """
446
+ This is the degradation model of BSRGAN from the paper
447
+ "Designing a Practical Degradation Model for Deep Blind Image Super-Resolution"
448
+ ----------
449
+ img: HXWXC, [0, 1], its size should be large than (lq_patchsizexsf)x(lq_patchsizexsf)
450
+ sf: scale factor
451
+ isp_model: camera ISP model
452
+ Returns
453
+ -------
454
+ img: low-quality patch, size: lq_patchsizeXlq_patchsizeXC, range: [0, 1]
455
+ hq: corresponding high-quality patch, size: (lq_patchsizexsf)X(lq_patchsizexsf)XC, range: [0, 1]
456
+ """
457
+ isp_prob, jpeg_prob, scale2_prob = 0.25, 0.9, 0.25
458
+ sf_ori = sf
459
+
460
+ h1, w1 = img.shape[:2]
461
+ img = img.copy()[:w1 - w1 % sf, :h1 - h1 % sf, ...] # mod crop
462
+ h, w = img.shape[:2]
463
+
464
+ if h < lq_patchsize * sf or w < lq_patchsize * sf:
465
+ raise ValueError(f'img size ({h1}X{w1}) is too small!')
466
+
467
+ hq = img.copy()
468
+
469
+ if sf == 4 and random.random() < scale2_prob: # downsample1
470
+ if np.random.rand() < 0.5:
471
+ img = cv2.resize(img, (int(1 / 2 * img.shape[1]), int(1 / 2 * img.shape[0])),
472
+ interpolation=random.choice([1, 2, 3]))
473
+ else:
474
+ img = util.imresize_np(img, 1 / 2, True)
475
+ img = np.clip(img, 0.0, 1.0)
476
+ sf = 2
477
+
478
+ shuffle_order = random.sample(range(7), 7)
479
+ idx1, idx2 = shuffle_order.index(2), shuffle_order.index(3)
480
+ if idx1 > idx2: # keep downsample3 last
481
+ shuffle_order[idx1], shuffle_order[idx2] = shuffle_order[idx2], shuffle_order[idx1]
482
+
483
+ for i in shuffle_order:
484
+
485
+ if i == 0:
486
+ img = add_blur(img, sf=sf)
487
+
488
+ elif i == 1:
489
+ img = add_blur(img, sf=sf)
490
+
491
+ elif i == 2:
492
+ a, b = img.shape[1], img.shape[0]
493
+ # downsample2
494
+ if random.random() < 0.75:
495
+ sf1 = random.uniform(1, 2 * sf)
496
+ img = cv2.resize(img, (int(1 / sf1 * img.shape[1]), int(1 / sf1 * img.shape[0])),
497
+ interpolation=random.choice([1, 2, 3]))
498
+ else:
499
+ k = fspecial('gaussian', 25, random.uniform(0.1, 0.6 * sf))
500
+ k_shifted = shift_pixel(k, sf)
501
+ k_shifted = k_shifted / k_shifted.sum() # blur with shifted kernel
502
+ img = ndimage.convolve(img, np.expand_dims(k_shifted, axis=2), mode='mirror')
503
+ img = img[0::sf, 0::sf, ...] # nearest downsampling
504
+ img = np.clip(img, 0.0, 1.0)
505
+
506
+ elif i == 3:
507
+ # downsample3
508
+ img = cv2.resize(img, (int(1 / sf * a), int(1 / sf * b)), interpolation=random.choice([1, 2, 3]))
509
+ img = np.clip(img, 0.0, 1.0)
510
+
511
+ elif i == 4:
512
+ # add Gaussian noise
513
+ img = add_Gaussian_noise(img, noise_level1=2, noise_level2=8)
514
+
515
+ elif i == 5:
516
+ # add JPEG noise
517
+ if random.random() < jpeg_prob:
518
+ img = add_JPEG_noise(img)
519
+
520
+ elif i == 6:
521
+ # add processed camera sensor noise
522
+ if random.random() < isp_prob and isp_model is not None:
523
+ with torch.no_grad():
524
+ img, hq = isp_model.forward(img.copy(), hq)
525
+
526
+ # add final JPEG compression noise
527
+ img = add_JPEG_noise(img)
528
+
529
+ # random crop
530
+ img, hq = random_crop(img, hq, sf_ori, lq_patchsize)
531
+
532
+ return img, hq
533
+
534
+
535
+ # todo no isp_model?
536
+ def degradation_bsrgan_variant(image, sf=4, isp_model=None):
537
+ """
538
+ This is the degradation model of BSRGAN from the paper
539
+ "Designing a Practical Degradation Model for Deep Blind Image Super-Resolution"
540
+ ----------
541
+ sf: scale factor
542
+ isp_model: camera ISP model
543
+ Returns
544
+ -------
545
+ img: low-quality patch, size: lq_patchsizeXlq_patchsizeXC, range: [0, 1]
546
+ hq: corresponding high-quality patch, size: (lq_patchsizexsf)X(lq_patchsizexsf)XC, range: [0, 1]
547
+ """
548
+ image = util.uint2single(image)
549
+ isp_prob, jpeg_prob, scale2_prob = 0.25, 0.9, 0.25
550
+ sf_ori = sf
551
+
552
+ h1, w1 = image.shape[:2]
553
+ image = image.copy()[:w1 - w1 % sf, :h1 - h1 % sf, ...] # mod crop
554
+ h, w = image.shape[:2]
555
+
556
+ hq = image.copy()
557
+
558
+ if sf == 4 and random.random() < scale2_prob: # downsample1
559
+ if np.random.rand() < 0.5:
560
+ image = cv2.resize(image, (int(1 / 2 * image.shape[1]), int(1 / 2 * image.shape[0])),
561
+ interpolation=random.choice([1, 2, 3]))
562
+ else:
563
+ image = util.imresize_np(image, 1 / 2, True)
564
+ image = np.clip(image, 0.0, 1.0)
565
+ sf = 2
566
+
567
+ shuffle_order = random.sample(range(7), 7)
568
+ idx1, idx2 = shuffle_order.index(2), shuffle_order.index(3)
569
+ if idx1 > idx2: # keep downsample3 last
570
+ shuffle_order[idx1], shuffle_order[idx2] = shuffle_order[idx2], shuffle_order[idx1]
571
+
572
+ for i in shuffle_order:
573
+
574
+ if i == 0:
575
+ image = add_blur(image, sf=sf)
576
+
577
+ # elif i == 1:
578
+ # image = add_blur(image, sf=sf)
579
+
580
+ if i == 0:
581
+ pass
582
+
583
+ elif i == 2:
584
+ a, b = image.shape[1], image.shape[0]
585
+ # downsample2
586
+ if random.random() < 0.8:
587
+ sf1 = random.uniform(1, 2 * sf)
588
+ image = cv2.resize(image, (int(1 / sf1 * image.shape[1]), int(1 / sf1 * image.shape[0])),
589
+ interpolation=random.choice([1, 2, 3]))
590
+ else:
591
+ k = fspecial('gaussian', 25, random.uniform(0.1, 0.6 * sf))
592
+ k_shifted = shift_pixel(k, sf)
593
+ k_shifted = k_shifted / k_shifted.sum() # blur with shifted kernel
594
+ image = ndimage.convolve(image, np.expand_dims(k_shifted, axis=2), mode='mirror')
595
+ image = image[0::sf, 0::sf, ...] # nearest downsampling
596
+
597
+ image = np.clip(image, 0.0, 1.0)
598
+
599
+ elif i == 3:
600
+ # downsample3
601
+ image = cv2.resize(image, (int(1 / sf * a), int(1 / sf * b)), interpolation=random.choice([1, 2, 3]))
602
+ image = np.clip(image, 0.0, 1.0)
603
+
604
+ elif i == 4:
605
+ # add Gaussian noise
606
+ image = add_Gaussian_noise(image, noise_level1=1, noise_level2=2)
607
+
608
+ elif i == 5:
609
+ # add JPEG noise
610
+ if random.random() < jpeg_prob:
611
+ image = add_JPEG_noise(image)
612
+ #
613
+ # elif i == 6:
614
+ # # add processed camera sensor noise
615
+ # if random.random() < isp_prob and isp_model is not None:
616
+ # with torch.no_grad():
617
+ # img, hq = isp_model.forward(img.copy(), hq)
618
+
619
+ # add final JPEG compression noise
620
+ image = add_JPEG_noise(image)
621
+ image = util.single2uint(image)
622
+ example = {"image": image}
623
+ return example
624
+
625
+
626
+
627
+
628
+ if __name__ == '__main__':
629
+ print("hey")
630
+ img = util.imread_uint('utils/test.png', 3)
631
+ img = img[:448, :448]
632
+ h = img.shape[0] // 4
633
+ print("resizing to", h)
634
+ sf = 4
635
+ deg_fn = partial(degradation_bsrgan_variant, sf=sf)
636
+ for i in range(20):
637
+ print(i)
638
+ img_hq = img
639
+ img_lq = deg_fn(img)["image"]
640
+ img_hq, img_lq = util.uint2single(img_hq), util.uint2single(img_lq)
641
+ print(img_lq)
642
+ img_lq_bicubic = albumentations.SmallestMaxSize(max_size=h, interpolation=cv2.INTER_CUBIC)(image=img_hq)["image"]
643
+ print(img_lq.shape)
644
+ print("bicubic", img_lq_bicubic.shape)
645
+ print(img_hq.shape)
646
+ lq_nearest = cv2.resize(util.single2uint(img_lq), (int(sf * img_lq.shape[1]), int(sf * img_lq.shape[0])),
647
+ interpolation=0)
648
+ lq_bicubic_nearest = cv2.resize(util.single2uint(img_lq_bicubic),
649
+ (int(sf * img_lq.shape[1]), int(sf * img_lq.shape[0])),
650
+ interpolation=0)
651
+ img_concat = np.concatenate([lq_bicubic_nearest, lq_nearest, util.single2uint(img_hq)], axis=1)
652
+ util.imsave(img_concat, str(i) + '.png')
stable_diffusion/ldm/modules/image_degradation/utils_image.py ADDED
@@ -0,0 +1,916 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import math
3
+ import random
4
+ import numpy as np
5
+ import torch
6
+ import cv2
7
+ from torchvision.utils import make_grid
8
+ from datetime import datetime
9
+ #import matplotlib.pyplot as plt # TODO: check with Dominik, also bsrgan.py vs bsrgan_light.py
10
+
11
+
12
+ os.environ["KMP_DUPLICATE_LIB_OK"]="TRUE"
13
+
14
+
15
+ '''
16
+ # --------------------------------------------
17
+ # Kai Zhang (github: https://github.com/cszn)
18
+ # 03/Mar/2019
19
+ # --------------------------------------------
20
+ # https://github.com/twhui/SRGAN-pyTorch
21
+ # https://github.com/xinntao/BasicSR
22
+ # --------------------------------------------
23
+ '''
24
+
25
+
26
+ IMG_EXTENSIONS = ['.jpg', '.JPG', '.jpeg', '.JPEG', '.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP', '.tif']
27
+
28
+
29
+ def is_image_file(filename):
30
+ return any(filename.endswith(extension) for extension in IMG_EXTENSIONS)
31
+
32
+
33
+ def get_timestamp():
34
+ return datetime.now().strftime('%y%m%d-%H%M%S')
35
+
36
+
37
+ def imshow(x, title=None, cbar=False, figsize=None):
38
+ plt.figure(figsize=figsize)
39
+ plt.imshow(np.squeeze(x), interpolation='nearest', cmap='gray')
40
+ if title:
41
+ plt.title(title)
42
+ if cbar:
43
+ plt.colorbar()
44
+ plt.show()
45
+
46
+
47
+ def surf(Z, cmap='rainbow', figsize=None):
48
+ plt.figure(figsize=figsize)
49
+ ax3 = plt.axes(projection='3d')
50
+
51
+ w, h = Z.shape[:2]
52
+ xx = np.arange(0,w,1)
53
+ yy = np.arange(0,h,1)
54
+ X, Y = np.meshgrid(xx, yy)
55
+ ax3.plot_surface(X,Y,Z,cmap=cmap)
56
+ #ax3.contour(X,Y,Z, zdim='z',offset=-2,cmap=cmap)
57
+ plt.show()
58
+
59
+
60
+ '''
61
+ # --------------------------------------------
62
+ # get image pathes
63
+ # --------------------------------------------
64
+ '''
65
+
66
+
67
+ def get_image_paths(dataroot):
68
+ paths = None # return None if dataroot is None
69
+ if dataroot is not None:
70
+ paths = sorted(_get_paths_from_images(dataroot))
71
+ return paths
72
+
73
+
74
+ def _get_paths_from_images(path):
75
+ assert os.path.isdir(path), '{:s} is not a valid directory'.format(path)
76
+ images = []
77
+ for dirpath, _, fnames in sorted(os.walk(path)):
78
+ for fname in sorted(fnames):
79
+ if is_image_file(fname):
80
+ img_path = os.path.join(dirpath, fname)
81
+ images.append(img_path)
82
+ assert images, '{:s} has no valid image file'.format(path)
83
+ return images
84
+
85
+
86
+ '''
87
+ # --------------------------------------------
88
+ # split large images into small images
89
+ # --------------------------------------------
90
+ '''
91
+
92
+
93
+ def patches_from_image(img, p_size=512, p_overlap=64, p_max=800):
94
+ w, h = img.shape[:2]
95
+ patches = []
96
+ if w > p_max and h > p_max:
97
+ w1 = list(np.arange(0, w-p_size, p_size-p_overlap, dtype=np.int))
98
+ h1 = list(np.arange(0, h-p_size, p_size-p_overlap, dtype=np.int))
99
+ w1.append(w-p_size)
100
+ h1.append(h-p_size)
101
+ # print(w1)
102
+ # print(h1)
103
+ for i in w1:
104
+ for j in h1:
105
+ patches.append(img[i:i+p_size, j:j+p_size,:])
106
+ else:
107
+ patches.append(img)
108
+
109
+ return patches
110
+
111
+
112
+ def imssave(imgs, img_path):
113
+ """
114
+ imgs: list, N images of size WxHxC
115
+ """
116
+ img_name, ext = os.path.splitext(os.path.basename(img_path))
117
+
118
+ for i, img in enumerate(imgs):
119
+ if img.ndim == 3:
120
+ img = img[:, :, [2, 1, 0]]
121
+ new_path = os.path.join(os.path.dirname(img_path), img_name+str('_s{:04d}'.format(i))+'.png')
122
+ cv2.imwrite(new_path, img)
123
+
124
+
125
+ def split_imageset(original_dataroot, taget_dataroot, n_channels=3, p_size=800, p_overlap=96, p_max=1000):
126
+ """
127
+ split the large images from original_dataroot into small overlapped images with size (p_size)x(p_size),
128
+ and save them into taget_dataroot; only the images with larger size than (p_max)x(p_max)
129
+ will be splitted.
130
+ Args:
131
+ original_dataroot:
132
+ taget_dataroot:
133
+ p_size: size of small images
134
+ p_overlap: patch size in training is a good choice
135
+ p_max: images with smaller size than (p_max)x(p_max) keep unchanged.
136
+ """
137
+ paths = get_image_paths(original_dataroot)
138
+ for img_path in paths:
139
+ # img_name, ext = os.path.splitext(os.path.basename(img_path))
140
+ img = imread_uint(img_path, n_channels=n_channels)
141
+ patches = patches_from_image(img, p_size, p_overlap, p_max)
142
+ imssave(patches, os.path.join(taget_dataroot,os.path.basename(img_path)))
143
+ #if original_dataroot == taget_dataroot:
144
+ #del img_path
145
+
146
+ '''
147
+ # --------------------------------------------
148
+ # makedir
149
+ # --------------------------------------------
150
+ '''
151
+
152
+
153
+ def mkdir(path):
154
+ if not os.path.exists(path):
155
+ os.makedirs(path)
156
+
157
+
158
+ def mkdirs(paths):
159
+ if isinstance(paths, str):
160
+ mkdir(paths)
161
+ else:
162
+ for path in paths:
163
+ mkdir(path)
164
+
165
+
166
+ def mkdir_and_rename(path):
167
+ if os.path.exists(path):
168
+ new_name = path + '_archived_' + get_timestamp()
169
+ print('Path already exists. Rename it to [{:s}]'.format(new_name))
170
+ os.rename(path, new_name)
171
+ os.makedirs(path)
172
+
173
+
174
+ '''
175
+ # --------------------------------------------
176
+ # read image from path
177
+ # opencv is fast, but read BGR numpy image
178
+ # --------------------------------------------
179
+ '''
180
+
181
+
182
+ # --------------------------------------------
183
+ # get uint8 image of size HxWxn_channles (RGB)
184
+ # --------------------------------------------
185
+ def imread_uint(path, n_channels=3):
186
+ # input: path
187
+ # output: HxWx3(RGB or GGG), or HxWx1 (G)
188
+ if n_channels == 1:
189
+ img = cv2.imread(path, 0) # cv2.IMREAD_GRAYSCALE
190
+ img = np.expand_dims(img, axis=2) # HxWx1
191
+ elif n_channels == 3:
192
+ img = cv2.imread(path, cv2.IMREAD_UNCHANGED) # BGR or G
193
+ if img.ndim == 2:
194
+ img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB) # GGG
195
+ else:
196
+ img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # RGB
197
+ return img
198
+
199
+
200
+ # --------------------------------------------
201
+ # matlab's imwrite
202
+ # --------------------------------------------
203
+ def imsave(img, img_path):
204
+ img = np.squeeze(img)
205
+ if img.ndim == 3:
206
+ img = img[:, :, [2, 1, 0]]
207
+ cv2.imwrite(img_path, img)
208
+
209
+ def imwrite(img, img_path):
210
+ img = np.squeeze(img)
211
+ if img.ndim == 3:
212
+ img = img[:, :, [2, 1, 0]]
213
+ cv2.imwrite(img_path, img)
214
+
215
+
216
+
217
+ # --------------------------------------------
218
+ # get single image of size HxWxn_channles (BGR)
219
+ # --------------------------------------------
220
+ def read_img(path):
221
+ # read image by cv2
222
+ # return: Numpy float32, HWC, BGR, [0,1]
223
+ img = cv2.imread(path, cv2.IMREAD_UNCHANGED) # cv2.IMREAD_GRAYSCALE
224
+ img = img.astype(np.float32) / 255.
225
+ if img.ndim == 2:
226
+ img = np.expand_dims(img, axis=2)
227
+ # some images have 4 channels
228
+ if img.shape[2] > 3:
229
+ img = img[:, :, :3]
230
+ return img
231
+
232
+
233
+ '''
234
+ # --------------------------------------------
235
+ # image format conversion
236
+ # --------------------------------------------
237
+ # numpy(single) <---> numpy(unit)
238
+ # numpy(single) <---> tensor
239
+ # numpy(unit) <---> tensor
240
+ # --------------------------------------------
241
+ '''
242
+
243
+
244
+ # --------------------------------------------
245
+ # numpy(single) [0, 1] <---> numpy(unit)
246
+ # --------------------------------------------
247
+
248
+
249
+ def uint2single(img):
250
+
251
+ return np.float32(img/255.)
252
+
253
+
254
+ def single2uint(img):
255
+
256
+ return np.uint8((img.clip(0, 1)*255.).round())
257
+
258
+
259
+ def uint162single(img):
260
+
261
+ return np.float32(img/65535.)
262
+
263
+
264
+ def single2uint16(img):
265
+
266
+ return np.uint16((img.clip(0, 1)*65535.).round())
267
+
268
+
269
+ # --------------------------------------------
270
+ # numpy(unit) (HxWxC or HxW) <---> tensor
271
+ # --------------------------------------------
272
+
273
+
274
+ # convert uint to 4-dimensional torch tensor
275
+ def uint2tensor4(img):
276
+ if img.ndim == 2:
277
+ img = np.expand_dims(img, axis=2)
278
+ return torch.from_numpy(np.ascontiguousarray(img)).permute(2, 0, 1).float().div(255.).unsqueeze(0)
279
+
280
+
281
+ # convert uint to 3-dimensional torch tensor
282
+ def uint2tensor3(img):
283
+ if img.ndim == 2:
284
+ img = np.expand_dims(img, axis=2)
285
+ return torch.from_numpy(np.ascontiguousarray(img)).permute(2, 0, 1).float().div(255.)
286
+
287
+
288
+ # convert 2/3/4-dimensional torch tensor to uint
289
+ def tensor2uint(img):
290
+ img = img.data.squeeze().float().clamp_(0, 1).cpu().numpy()
291
+ if img.ndim == 3:
292
+ img = np.transpose(img, (1, 2, 0))
293
+ return np.uint8((img*255.0).round())
294
+
295
+
296
+ # --------------------------------------------
297
+ # numpy(single) (HxWxC) <---> tensor
298
+ # --------------------------------------------
299
+
300
+
301
+ # convert single (HxWxC) to 3-dimensional torch tensor
302
+ def single2tensor3(img):
303
+ return torch.from_numpy(np.ascontiguousarray(img)).permute(2, 0, 1).float()
304
+
305
+
306
+ # convert single (HxWxC) to 4-dimensional torch tensor
307
+ def single2tensor4(img):
308
+ return torch.from_numpy(np.ascontiguousarray(img)).permute(2, 0, 1).float().unsqueeze(0)
309
+
310
+
311
+ # convert torch tensor to single
312
+ def tensor2single(img):
313
+ img = img.data.squeeze().float().cpu().numpy()
314
+ if img.ndim == 3:
315
+ img = np.transpose(img, (1, 2, 0))
316
+
317
+ return img
318
+
319
+ # convert torch tensor to single
320
+ def tensor2single3(img):
321
+ img = img.data.squeeze().float().cpu().numpy()
322
+ if img.ndim == 3:
323
+ img = np.transpose(img, (1, 2, 0))
324
+ elif img.ndim == 2:
325
+ img = np.expand_dims(img, axis=2)
326
+ return img
327
+
328
+
329
+ def single2tensor5(img):
330
+ return torch.from_numpy(np.ascontiguousarray(img)).permute(2, 0, 1, 3).float().unsqueeze(0)
331
+
332
+
333
+ def single32tensor5(img):
334
+ return torch.from_numpy(np.ascontiguousarray(img)).float().unsqueeze(0).unsqueeze(0)
335
+
336
+
337
+ def single42tensor4(img):
338
+ return torch.from_numpy(np.ascontiguousarray(img)).permute(2, 0, 1, 3).float()
339
+
340
+
341
+ # from skimage.io import imread, imsave
342
+ def tensor2img(tensor, out_type=np.uint8, min_max=(0, 1)):
343
+ '''
344
+ Converts a torch Tensor into an image Numpy array of BGR channel order
345
+ Input: 4D(B,(3/1),H,W), 3D(C,H,W), or 2D(H,W), any range, RGB channel order
346
+ Output: 3D(H,W,C) or 2D(H,W), [0,255], np.uint8 (default)
347
+ '''
348
+ tensor = tensor.squeeze().float().cpu().clamp_(*min_max) # squeeze first, then clamp
349
+ tensor = (tensor - min_max[0]) / (min_max[1] - min_max[0]) # to range [0,1]
350
+ n_dim = tensor.dim()
351
+ if n_dim == 4:
352
+ n_img = len(tensor)
353
+ img_np = make_grid(tensor, nrow=int(math.sqrt(n_img)), normalize=False).numpy()
354
+ img_np = np.transpose(img_np[[2, 1, 0], :, :], (1, 2, 0)) # HWC, BGR
355
+ elif n_dim == 3:
356
+ img_np = tensor.numpy()
357
+ img_np = np.transpose(img_np[[2, 1, 0], :, :], (1, 2, 0)) # HWC, BGR
358
+ elif n_dim == 2:
359
+ img_np = tensor.numpy()
360
+ else:
361
+ raise TypeError(
362
+ 'Only support 4D, 3D and 2D tensor. But received with dimension: {:d}'.format(n_dim))
363
+ if out_type == np.uint8:
364
+ img_np = (img_np * 255.0).round()
365
+ # Important. Unlike matlab, numpy.unit8() WILL NOT round by default.
366
+ return img_np.astype(out_type)
367
+
368
+
369
+ '''
370
+ # --------------------------------------------
371
+ # Augmentation, flipe and/or rotate
372
+ # --------------------------------------------
373
+ # The following two are enough.
374
+ # (1) augmet_img: numpy image of WxHxC or WxH
375
+ # (2) augment_img_tensor4: tensor image 1xCxWxH
376
+ # --------------------------------------------
377
+ '''
378
+
379
+
380
+ def augment_img(img, mode=0):
381
+ '''Kai Zhang (github: https://github.com/cszn)
382
+ '''
383
+ if mode == 0:
384
+ return img
385
+ elif mode == 1:
386
+ return np.flipud(np.rot90(img))
387
+ elif mode == 2:
388
+ return np.flipud(img)
389
+ elif mode == 3:
390
+ return np.rot90(img, k=3)
391
+ elif mode == 4:
392
+ return np.flipud(np.rot90(img, k=2))
393
+ elif mode == 5:
394
+ return np.rot90(img)
395
+ elif mode == 6:
396
+ return np.rot90(img, k=2)
397
+ elif mode == 7:
398
+ return np.flipud(np.rot90(img, k=3))
399
+
400
+
401
+ def augment_img_tensor4(img, mode=0):
402
+ '''Kai Zhang (github: https://github.com/cszn)
403
+ '''
404
+ if mode == 0:
405
+ return img
406
+ elif mode == 1:
407
+ return img.rot90(1, [2, 3]).flip([2])
408
+ elif mode == 2:
409
+ return img.flip([2])
410
+ elif mode == 3:
411
+ return img.rot90(3, [2, 3])
412
+ elif mode == 4:
413
+ return img.rot90(2, [2, 3]).flip([2])
414
+ elif mode == 5:
415
+ return img.rot90(1, [2, 3])
416
+ elif mode == 6:
417
+ return img.rot90(2, [2, 3])
418
+ elif mode == 7:
419
+ return img.rot90(3, [2, 3]).flip([2])
420
+
421
+
422
+ def augment_img_tensor(img, mode=0):
423
+ '''Kai Zhang (github: https://github.com/cszn)
424
+ '''
425
+ img_size = img.size()
426
+ img_np = img.data.cpu().numpy()
427
+ if len(img_size) == 3:
428
+ img_np = np.transpose(img_np, (1, 2, 0))
429
+ elif len(img_size) == 4:
430
+ img_np = np.transpose(img_np, (2, 3, 1, 0))
431
+ img_np = augment_img(img_np, mode=mode)
432
+ img_tensor = torch.from_numpy(np.ascontiguousarray(img_np))
433
+ if len(img_size) == 3:
434
+ img_tensor = img_tensor.permute(2, 0, 1)
435
+ elif len(img_size) == 4:
436
+ img_tensor = img_tensor.permute(3, 2, 0, 1)
437
+
438
+ return img_tensor.type_as(img)
439
+
440
+
441
+ def augment_img_np3(img, mode=0):
442
+ if mode == 0:
443
+ return img
444
+ elif mode == 1:
445
+ return img.transpose(1, 0, 2)
446
+ elif mode == 2:
447
+ return img[::-1, :, :]
448
+ elif mode == 3:
449
+ img = img[::-1, :, :]
450
+ img = img.transpose(1, 0, 2)
451
+ return img
452
+ elif mode == 4:
453
+ return img[:, ::-1, :]
454
+ elif mode == 5:
455
+ img = img[:, ::-1, :]
456
+ img = img.transpose(1, 0, 2)
457
+ return img
458
+ elif mode == 6:
459
+ img = img[:, ::-1, :]
460
+ img = img[::-1, :, :]
461
+ return img
462
+ elif mode == 7:
463
+ img = img[:, ::-1, :]
464
+ img = img[::-1, :, :]
465
+ img = img.transpose(1, 0, 2)
466
+ return img
467
+
468
+
469
+ def augment_imgs(img_list, hflip=True, rot=True):
470
+ # horizontal flip OR rotate
471
+ hflip = hflip and random.random() < 0.5
472
+ vflip = rot and random.random() < 0.5
473
+ rot90 = rot and random.random() < 0.5
474
+
475
+ def _augment(img):
476
+ if hflip:
477
+ img = img[:, ::-1, :]
478
+ if vflip:
479
+ img = img[::-1, :, :]
480
+ if rot90:
481
+ img = img.transpose(1, 0, 2)
482
+ return img
483
+
484
+ return [_augment(img) for img in img_list]
485
+
486
+
487
+ '''
488
+ # --------------------------------------------
489
+ # modcrop and shave
490
+ # --------------------------------------------
491
+ '''
492
+
493
+
494
+ def modcrop(img_in, scale):
495
+ # img_in: Numpy, HWC or HW
496
+ img = np.copy(img_in)
497
+ if img.ndim == 2:
498
+ H, W = img.shape
499
+ H_r, W_r = H % scale, W % scale
500
+ img = img[:H - H_r, :W - W_r]
501
+ elif img.ndim == 3:
502
+ H, W, C = img.shape
503
+ H_r, W_r = H % scale, W % scale
504
+ img = img[:H - H_r, :W - W_r, :]
505
+ else:
506
+ raise ValueError('Wrong img ndim: [{:d}].'.format(img.ndim))
507
+ return img
508
+
509
+
510
+ def shave(img_in, border=0):
511
+ # img_in: Numpy, HWC or HW
512
+ img = np.copy(img_in)
513
+ h, w = img.shape[:2]
514
+ img = img[border:h-border, border:w-border]
515
+ return img
516
+
517
+
518
+ '''
519
+ # --------------------------------------------
520
+ # image processing process on numpy image
521
+ # channel_convert(in_c, tar_type, img_list):
522
+ # rgb2ycbcr(img, only_y=True):
523
+ # bgr2ycbcr(img, only_y=True):
524
+ # ycbcr2rgb(img):
525
+ # --------------------------------------------
526
+ '''
527
+
528
+
529
+ def rgb2ycbcr(img, only_y=True):
530
+ '''same as matlab rgb2ycbcr
531
+ only_y: only return Y channel
532
+ Input:
533
+ uint8, [0, 255]
534
+ float, [0, 1]
535
+ '''
536
+ in_img_type = img.dtype
537
+ img.astype(np.float32)
538
+ if in_img_type != np.uint8:
539
+ img *= 255.
540
+ # convert
541
+ if only_y:
542
+ rlt = np.dot(img, [65.481, 128.553, 24.966]) / 255.0 + 16.0
543
+ else:
544
+ rlt = np.matmul(img, [[65.481, -37.797, 112.0], [128.553, -74.203, -93.786],
545
+ [24.966, 112.0, -18.214]]) / 255.0 + [16, 128, 128]
546
+ if in_img_type == np.uint8:
547
+ rlt = rlt.round()
548
+ else:
549
+ rlt /= 255.
550
+ return rlt.astype(in_img_type)
551
+
552
+
553
+ def ycbcr2rgb(img):
554
+ '''same as matlab ycbcr2rgb
555
+ Input:
556
+ uint8, [0, 255]
557
+ float, [0, 1]
558
+ '''
559
+ in_img_type = img.dtype
560
+ img.astype(np.float32)
561
+ if in_img_type != np.uint8:
562
+ img *= 255.
563
+ # convert
564
+ rlt = np.matmul(img, [[0.00456621, 0.00456621, 0.00456621], [0, -0.00153632, 0.00791071],
565
+ [0.00625893, -0.00318811, 0]]) * 255.0 + [-222.921, 135.576, -276.836]
566
+ if in_img_type == np.uint8:
567
+ rlt = rlt.round()
568
+ else:
569
+ rlt /= 255.
570
+ return rlt.astype(in_img_type)
571
+
572
+
573
+ def bgr2ycbcr(img, only_y=True):
574
+ '''bgr version of rgb2ycbcr
575
+ only_y: only return Y channel
576
+ Input:
577
+ uint8, [0, 255]
578
+ float, [0, 1]
579
+ '''
580
+ in_img_type = img.dtype
581
+ img.astype(np.float32)
582
+ if in_img_type != np.uint8:
583
+ img *= 255.
584
+ # convert
585
+ if only_y:
586
+ rlt = np.dot(img, [24.966, 128.553, 65.481]) / 255.0 + 16.0
587
+ else:
588
+ rlt = np.matmul(img, [[24.966, 112.0, -18.214], [128.553, -74.203, -93.786],
589
+ [65.481, -37.797, 112.0]]) / 255.0 + [16, 128, 128]
590
+ if in_img_type == np.uint8:
591
+ rlt = rlt.round()
592
+ else:
593
+ rlt /= 255.
594
+ return rlt.astype(in_img_type)
595
+
596
+
597
+ def channel_convert(in_c, tar_type, img_list):
598
+ # conversion among BGR, gray and y
599
+ if in_c == 3 and tar_type == 'gray': # BGR to gray
600
+ gray_list = [cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) for img in img_list]
601
+ return [np.expand_dims(img, axis=2) for img in gray_list]
602
+ elif in_c == 3 and tar_type == 'y': # BGR to y
603
+ y_list = [bgr2ycbcr(img, only_y=True) for img in img_list]
604
+ return [np.expand_dims(img, axis=2) for img in y_list]
605
+ elif in_c == 1 and tar_type == 'RGB': # gray/y to BGR
606
+ return [cv2.cvtColor(img, cv2.COLOR_GRAY2BGR) for img in img_list]
607
+ else:
608
+ return img_list
609
+
610
+
611
+ '''
612
+ # --------------------------------------------
613
+ # metric, PSNR and SSIM
614
+ # --------------------------------------------
615
+ '''
616
+
617
+
618
+ # --------------------------------------------
619
+ # PSNR
620
+ # --------------------------------------------
621
+ def calculate_psnr(img1, img2, border=0):
622
+ # img1 and img2 have range [0, 255]
623
+ #img1 = img1.squeeze()
624
+ #img2 = img2.squeeze()
625
+ if not img1.shape == img2.shape:
626
+ raise ValueError('Input images must have the same dimensions.')
627
+ h, w = img1.shape[:2]
628
+ img1 = img1[border:h-border, border:w-border]
629
+ img2 = img2[border:h-border, border:w-border]
630
+
631
+ img1 = img1.astype(np.float64)
632
+ img2 = img2.astype(np.float64)
633
+ mse = np.mean((img1 - img2)**2)
634
+ if mse == 0:
635
+ return float('inf')
636
+ return 20 * math.log10(255.0 / math.sqrt(mse))
637
+
638
+
639
+ # --------------------------------------------
640
+ # SSIM
641
+ # --------------------------------------------
642
+ def calculate_ssim(img1, img2, border=0):
643
+ '''calculate SSIM
644
+ the same outputs as MATLAB's
645
+ img1, img2: [0, 255]
646
+ '''
647
+ #img1 = img1.squeeze()
648
+ #img2 = img2.squeeze()
649
+ if not img1.shape == img2.shape:
650
+ raise ValueError('Input images must have the same dimensions.')
651
+ h, w = img1.shape[:2]
652
+ img1 = img1[border:h-border, border:w-border]
653
+ img2 = img2[border:h-border, border:w-border]
654
+
655
+ if img1.ndim == 2:
656
+ return ssim(img1, img2)
657
+ elif img1.ndim == 3:
658
+ if img1.shape[2] == 3:
659
+ ssims = []
660
+ for i in range(3):
661
+ ssims.append(ssim(img1[:,:,i], img2[:,:,i]))
662
+ return np.array(ssims).mean()
663
+ elif img1.shape[2] == 1:
664
+ return ssim(np.squeeze(img1), np.squeeze(img2))
665
+ else:
666
+ raise ValueError('Wrong input image dimensions.')
667
+
668
+
669
+ def ssim(img1, img2):
670
+ C1 = (0.01 * 255)**2
671
+ C2 = (0.03 * 255)**2
672
+
673
+ img1 = img1.astype(np.float64)
674
+ img2 = img2.astype(np.float64)
675
+ kernel = cv2.getGaussianKernel(11, 1.5)
676
+ window = np.outer(kernel, kernel.transpose())
677
+
678
+ mu1 = cv2.filter2D(img1, -1, window)[5:-5, 5:-5] # valid
679
+ mu2 = cv2.filter2D(img2, -1, window)[5:-5, 5:-5]
680
+ mu1_sq = mu1**2
681
+ mu2_sq = mu2**2
682
+ mu1_mu2 = mu1 * mu2
683
+ sigma1_sq = cv2.filter2D(img1**2, -1, window)[5:-5, 5:-5] - mu1_sq
684
+ sigma2_sq = cv2.filter2D(img2**2, -1, window)[5:-5, 5:-5] - mu2_sq
685
+ sigma12 = cv2.filter2D(img1 * img2, -1, window)[5:-5, 5:-5] - mu1_mu2
686
+
687
+ ssim_map = ((2 * mu1_mu2 + C1) * (2 * sigma12 + C2)) / ((mu1_sq + mu2_sq + C1) *
688
+ (sigma1_sq + sigma2_sq + C2))
689
+ return ssim_map.mean()
690
+
691
+
692
+ '''
693
+ # --------------------------------------------
694
+ # matlab's bicubic imresize (numpy and torch) [0, 1]
695
+ # --------------------------------------------
696
+ '''
697
+
698
+
699
+ # matlab 'imresize' function, now only support 'bicubic'
700
+ def cubic(x):
701
+ absx = torch.abs(x)
702
+ absx2 = absx**2
703
+ absx3 = absx**3
704
+ return (1.5*absx3 - 2.5*absx2 + 1) * ((absx <= 1).type_as(absx)) + \
705
+ (-0.5*absx3 + 2.5*absx2 - 4*absx + 2) * (((absx > 1)*(absx <= 2)).type_as(absx))
706
+
707
+
708
+ def calculate_weights_indices(in_length, out_length, scale, kernel, kernel_width, antialiasing):
709
+ if (scale < 1) and (antialiasing):
710
+ # Use a modified kernel to simultaneously interpolate and antialias- larger kernel width
711
+ kernel_width = kernel_width / scale
712
+
713
+ # Output-space coordinates
714
+ x = torch.linspace(1, out_length, out_length)
715
+
716
+ # Input-space coordinates. Calculate the inverse mapping such that 0.5
717
+ # in output space maps to 0.5 in input space, and 0.5+scale in output
718
+ # space maps to 1.5 in input space.
719
+ u = x / scale + 0.5 * (1 - 1 / scale)
720
+
721
+ # What is the left-most pixel that can be involved in the computation?
722
+ left = torch.floor(u - kernel_width / 2)
723
+
724
+ # What is the maximum number of pixels that can be involved in the
725
+ # computation? Note: it's OK to use an extra pixel here; if the
726
+ # corresponding weights are all zero, it will be eliminated at the end
727
+ # of this function.
728
+ P = math.ceil(kernel_width) + 2
729
+
730
+ # The indices of the input pixels involved in computing the k-th output
731
+ # pixel are in row k of the indices matrix.
732
+ indices = left.view(out_length, 1).expand(out_length, P) + torch.linspace(0, P - 1, P).view(
733
+ 1, P).expand(out_length, P)
734
+
735
+ # The weights used to compute the k-th output pixel are in row k of the
736
+ # weights matrix.
737
+ distance_to_center = u.view(out_length, 1).expand(out_length, P) - indices
738
+ # apply cubic kernel
739
+ if (scale < 1) and (antialiasing):
740
+ weights = scale * cubic(distance_to_center * scale)
741
+ else:
742
+ weights = cubic(distance_to_center)
743
+ # Normalize the weights matrix so that each row sums to 1.
744
+ weights_sum = torch.sum(weights, 1).view(out_length, 1)
745
+ weights = weights / weights_sum.expand(out_length, P)
746
+
747
+ # If a column in weights is all zero, get rid of it. only consider the first and last column.
748
+ weights_zero_tmp = torch.sum((weights == 0), 0)
749
+ if not math.isclose(weights_zero_tmp[0], 0, rel_tol=1e-6):
750
+ indices = indices.narrow(1, 1, P - 2)
751
+ weights = weights.narrow(1, 1, P - 2)
752
+ if not math.isclose(weights_zero_tmp[-1], 0, rel_tol=1e-6):
753
+ indices = indices.narrow(1, 0, P - 2)
754
+ weights = weights.narrow(1, 0, P - 2)
755
+ weights = weights.contiguous()
756
+ indices = indices.contiguous()
757
+ sym_len_s = -indices.min() + 1
758
+ sym_len_e = indices.max() - in_length
759
+ indices = indices + sym_len_s - 1
760
+ return weights, indices, int(sym_len_s), int(sym_len_e)
761
+
762
+
763
+ # --------------------------------------------
764
+ # imresize for tensor image [0, 1]
765
+ # --------------------------------------------
766
+ def imresize(img, scale, antialiasing=True):
767
+ # Now the scale should be the same for H and W
768
+ # input: img: pytorch tensor, CHW or HW [0,1]
769
+ # output: CHW or HW [0,1] w/o round
770
+ need_squeeze = True if img.dim() == 2 else False
771
+ if need_squeeze:
772
+ img.unsqueeze_(0)
773
+ in_C, in_H, in_W = img.size()
774
+ out_C, out_H, out_W = in_C, math.ceil(in_H * scale), math.ceil(in_W * scale)
775
+ kernel_width = 4
776
+ kernel = 'cubic'
777
+
778
+ # Return the desired dimension order for performing the resize. The
779
+ # strategy is to perform the resize first along the dimension with the
780
+ # smallest scale factor.
781
+ # Now we do not support this.
782
+
783
+ # get weights and indices
784
+ weights_H, indices_H, sym_len_Hs, sym_len_He = calculate_weights_indices(
785
+ in_H, out_H, scale, kernel, kernel_width, antialiasing)
786
+ weights_W, indices_W, sym_len_Ws, sym_len_We = calculate_weights_indices(
787
+ in_W, out_W, scale, kernel, kernel_width, antialiasing)
788
+ # process H dimension
789
+ # symmetric copying
790
+ img_aug = torch.FloatTensor(in_C, in_H + sym_len_Hs + sym_len_He, in_W)
791
+ img_aug.narrow(1, sym_len_Hs, in_H).copy_(img)
792
+
793
+ sym_patch = img[:, :sym_len_Hs, :]
794
+ inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long()
795
+ sym_patch_inv = sym_patch.index_select(1, inv_idx)
796
+ img_aug.narrow(1, 0, sym_len_Hs).copy_(sym_patch_inv)
797
+
798
+ sym_patch = img[:, -sym_len_He:, :]
799
+ inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long()
800
+ sym_patch_inv = sym_patch.index_select(1, inv_idx)
801
+ img_aug.narrow(1, sym_len_Hs + in_H, sym_len_He).copy_(sym_patch_inv)
802
+
803
+ out_1 = torch.FloatTensor(in_C, out_H, in_W)
804
+ kernel_width = weights_H.size(1)
805
+ for i in range(out_H):
806
+ idx = int(indices_H[i][0])
807
+ for j in range(out_C):
808
+ out_1[j, i, :] = img_aug[j, idx:idx + kernel_width, :].transpose(0, 1).mv(weights_H[i])
809
+
810
+ # process W dimension
811
+ # symmetric copying
812
+ out_1_aug = torch.FloatTensor(in_C, out_H, in_W + sym_len_Ws + sym_len_We)
813
+ out_1_aug.narrow(2, sym_len_Ws, in_W).copy_(out_1)
814
+
815
+ sym_patch = out_1[:, :, :sym_len_Ws]
816
+ inv_idx = torch.arange(sym_patch.size(2) - 1, -1, -1).long()
817
+ sym_patch_inv = sym_patch.index_select(2, inv_idx)
818
+ out_1_aug.narrow(2, 0, sym_len_Ws).copy_(sym_patch_inv)
819
+
820
+ sym_patch = out_1[:, :, -sym_len_We:]
821
+ inv_idx = torch.arange(sym_patch.size(2) - 1, -1, -1).long()
822
+ sym_patch_inv = sym_patch.index_select(2, inv_idx)
823
+ out_1_aug.narrow(2, sym_len_Ws + in_W, sym_len_We).copy_(sym_patch_inv)
824
+
825
+ out_2 = torch.FloatTensor(in_C, out_H, out_W)
826
+ kernel_width = weights_W.size(1)
827
+ for i in range(out_W):
828
+ idx = int(indices_W[i][0])
829
+ for j in range(out_C):
830
+ out_2[j, :, i] = out_1_aug[j, :, idx:idx + kernel_width].mv(weights_W[i])
831
+ if need_squeeze:
832
+ out_2.squeeze_()
833
+ return out_2
834
+
835
+
836
+ # --------------------------------------------
837
+ # imresize for numpy image [0, 1]
838
+ # --------------------------------------------
839
+ def imresize_np(img, scale, antialiasing=True):
840
+ # Now the scale should be the same for H and W
841
+ # input: img: Numpy, HWC or HW [0,1]
842
+ # output: HWC or HW [0,1] w/o round
843
+ img = torch.from_numpy(img)
844
+ need_squeeze = True if img.dim() == 2 else False
845
+ if need_squeeze:
846
+ img.unsqueeze_(2)
847
+
848
+ in_H, in_W, in_C = img.size()
849
+ out_C, out_H, out_W = in_C, math.ceil(in_H * scale), math.ceil(in_W * scale)
850
+ kernel_width = 4
851
+ kernel = 'cubic'
852
+
853
+ # Return the desired dimension order for performing the resize. The
854
+ # strategy is to perform the resize first along the dimension with the
855
+ # smallest scale factor.
856
+ # Now we do not support this.
857
+
858
+ # get weights and indices
859
+ weights_H, indices_H, sym_len_Hs, sym_len_He = calculate_weights_indices(
860
+ in_H, out_H, scale, kernel, kernel_width, antialiasing)
861
+ weights_W, indices_W, sym_len_Ws, sym_len_We = calculate_weights_indices(
862
+ in_W, out_W, scale, kernel, kernel_width, antialiasing)
863
+ # process H dimension
864
+ # symmetric copying
865
+ img_aug = torch.FloatTensor(in_H + sym_len_Hs + sym_len_He, in_W, in_C)
866
+ img_aug.narrow(0, sym_len_Hs, in_H).copy_(img)
867
+
868
+ sym_patch = img[:sym_len_Hs, :, :]
869
+ inv_idx = torch.arange(sym_patch.size(0) - 1, -1, -1).long()
870
+ sym_patch_inv = sym_patch.index_select(0, inv_idx)
871
+ img_aug.narrow(0, 0, sym_len_Hs).copy_(sym_patch_inv)
872
+
873
+ sym_patch = img[-sym_len_He:, :, :]
874
+ inv_idx = torch.arange(sym_patch.size(0) - 1, -1, -1).long()
875
+ sym_patch_inv = sym_patch.index_select(0, inv_idx)
876
+ img_aug.narrow(0, sym_len_Hs + in_H, sym_len_He).copy_(sym_patch_inv)
877
+
878
+ out_1 = torch.FloatTensor(out_H, in_W, in_C)
879
+ kernel_width = weights_H.size(1)
880
+ for i in range(out_H):
881
+ idx = int(indices_H[i][0])
882
+ for j in range(out_C):
883
+ out_1[i, :, j] = img_aug[idx:idx + kernel_width, :, j].transpose(0, 1).mv(weights_H[i])
884
+
885
+ # process W dimension
886
+ # symmetric copying
887
+ out_1_aug = torch.FloatTensor(out_H, in_W + sym_len_Ws + sym_len_We, in_C)
888
+ out_1_aug.narrow(1, sym_len_Ws, in_W).copy_(out_1)
889
+
890
+ sym_patch = out_1[:, :sym_len_Ws, :]
891
+ inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long()
892
+ sym_patch_inv = sym_patch.index_select(1, inv_idx)
893
+ out_1_aug.narrow(1, 0, sym_len_Ws).copy_(sym_patch_inv)
894
+
895
+ sym_patch = out_1[:, -sym_len_We:, :]
896
+ inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long()
897
+ sym_patch_inv = sym_patch.index_select(1, inv_idx)
898
+ out_1_aug.narrow(1, sym_len_Ws + in_W, sym_len_We).copy_(sym_patch_inv)
899
+
900
+ out_2 = torch.FloatTensor(out_H, out_W, in_C)
901
+ kernel_width = weights_W.size(1)
902
+ for i in range(out_W):
903
+ idx = int(indices_W[i][0])
904
+ for j in range(out_C):
905
+ out_2[:, i, j] = out_1_aug[:, idx:idx + kernel_width, j].mv(weights_W[i])
906
+ if need_squeeze:
907
+ out_2.squeeze_()
908
+
909
+ return out_2.numpy()
910
+
911
+
912
+ if __name__ == '__main__':
913
+ print('---')
914
+ # img = imread_uint('test.bmp', 3)
915
+ # img = uint2single(img)
916
+ # img_bicubic = imresize_np(img, 1/4)
stable_diffusion/ldm/modules/x_transformer.py ADDED
@@ -0,0 +1,641 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """shout-out to https://github.com/lucidrains/x-transformers/tree/main/x_transformers"""
2
+ import torch
3
+ from torch import nn, einsum
4
+ import torch.nn.functional as F
5
+ from functools import partial
6
+ from inspect import isfunction
7
+ from collections import namedtuple
8
+ from einops import rearrange, repeat, reduce
9
+
10
+ # constants
11
+
12
+ DEFAULT_DIM_HEAD = 64
13
+
14
+ Intermediates = namedtuple('Intermediates', [
15
+ 'pre_softmax_attn',
16
+ 'post_softmax_attn'
17
+ ])
18
+
19
+ LayerIntermediates = namedtuple('Intermediates', [
20
+ 'hiddens',
21
+ 'attn_intermediates'
22
+ ])
23
+
24
+
25
+ class AbsolutePositionalEmbedding(nn.Module):
26
+ def __init__(self, dim, max_seq_len):
27
+ super().__init__()
28
+ self.emb = nn.Embedding(max_seq_len, dim)
29
+ self.init_()
30
+
31
+ def init_(self):
32
+ nn.init.normal_(self.emb.weight, std=0.02)
33
+
34
+ def forward(self, x):
35
+ n = torch.arange(x.shape[1], device=x.device)
36
+ return self.emb(n)[None, :, :]
37
+
38
+
39
+ class FixedPositionalEmbedding(nn.Module):
40
+ def __init__(self, dim):
41
+ super().__init__()
42
+ inv_freq = 1. / (10000 ** (torch.arange(0, dim, 2).float() / dim))
43
+ self.register_buffer('inv_freq', inv_freq)
44
+
45
+ def forward(self, x, seq_dim=1, offset=0):
46
+ t = torch.arange(x.shape[seq_dim], device=x.device).type_as(self.inv_freq) + offset
47
+ sinusoid_inp = torch.einsum('i , j -> i j', t, self.inv_freq)
48
+ emb = torch.cat((sinusoid_inp.sin(), sinusoid_inp.cos()), dim=-1)
49
+ return emb[None, :, :]
50
+
51
+
52
+ # helpers
53
+
54
+ def exists(val):
55
+ return val is not None
56
+
57
+
58
+ def default(val, d):
59
+ if exists(val):
60
+ return val
61
+ return d() if isfunction(d) else d
62
+
63
+
64
+ def always(val):
65
+ def inner(*args, **kwargs):
66
+ return val
67
+ return inner
68
+
69
+
70
+ def not_equals(val):
71
+ def inner(x):
72
+ return x != val
73
+ return inner
74
+
75
+
76
+ def equals(val):
77
+ def inner(x):
78
+ return x == val
79
+ return inner
80
+
81
+
82
+ def max_neg_value(tensor):
83
+ return -torch.finfo(tensor.dtype).max
84
+
85
+
86
+ # keyword argument helpers
87
+
88
+ def pick_and_pop(keys, d):
89
+ values = list(map(lambda key: d.pop(key), keys))
90
+ return dict(zip(keys, values))
91
+
92
+
93
+ def group_dict_by_key(cond, d):
94
+ return_val = [dict(), dict()]
95
+ for key in d.keys():
96
+ match = bool(cond(key))
97
+ ind = int(not match)
98
+ return_val[ind][key] = d[key]
99
+ return (*return_val,)
100
+
101
+
102
+ def string_begins_with(prefix, str):
103
+ return str.startswith(prefix)
104
+
105
+
106
+ def group_by_key_prefix(prefix, d):
107
+ return group_dict_by_key(partial(string_begins_with, prefix), d)
108
+
109
+
110
+ def groupby_prefix_and_trim(prefix, d):
111
+ kwargs_with_prefix, kwargs = group_dict_by_key(partial(string_begins_with, prefix), d)
112
+ kwargs_without_prefix = dict(map(lambda x: (x[0][len(prefix):], x[1]), tuple(kwargs_with_prefix.items())))
113
+ return kwargs_without_prefix, kwargs
114
+
115
+
116
+ # classes
117
+ class Scale(nn.Module):
118
+ def __init__(self, value, fn):
119
+ super().__init__()
120
+ self.value = value
121
+ self.fn = fn
122
+
123
+ def forward(self, x, **kwargs):
124
+ x, *rest = self.fn(x, **kwargs)
125
+ return (x * self.value, *rest)
126
+
127
+
128
+ class Rezero(nn.Module):
129
+ def __init__(self, fn):
130
+ super().__init__()
131
+ self.fn = fn
132
+ self.g = nn.Parameter(torch.zeros(1))
133
+
134
+ def forward(self, x, **kwargs):
135
+ x, *rest = self.fn(x, **kwargs)
136
+ return (x * self.g, *rest)
137
+
138
+
139
+ class ScaleNorm(nn.Module):
140
+ def __init__(self, dim, eps=1e-5):
141
+ super().__init__()
142
+ self.scale = dim ** -0.5
143
+ self.eps = eps
144
+ self.g = nn.Parameter(torch.ones(1))
145
+
146
+ def forward(self, x):
147
+ norm = torch.norm(x, dim=-1, keepdim=True) * self.scale
148
+ return x / norm.clamp(min=self.eps) * self.g
149
+
150
+
151
+ class RMSNorm(nn.Module):
152
+ def __init__(self, dim, eps=1e-8):
153
+ super().__init__()
154
+ self.scale = dim ** -0.5
155
+ self.eps = eps
156
+ self.g = nn.Parameter(torch.ones(dim))
157
+
158
+ def forward(self, x):
159
+ norm = torch.norm(x, dim=-1, keepdim=True) * self.scale
160
+ return x / norm.clamp(min=self.eps) * self.g
161
+
162
+
163
+ class Residual(nn.Module):
164
+ def forward(self, x, residual):
165
+ return x + residual
166
+
167
+
168
+ class GRUGating(nn.Module):
169
+ def __init__(self, dim):
170
+ super().__init__()
171
+ self.gru = nn.GRUCell(dim, dim)
172
+
173
+ def forward(self, x, residual):
174
+ gated_output = self.gru(
175
+ rearrange(x, 'b n d -> (b n) d'),
176
+ rearrange(residual, 'b n d -> (b n) d')
177
+ )
178
+
179
+ return gated_output.reshape_as(x)
180
+
181
+
182
+ # feedforward
183
+
184
+ class GEGLU(nn.Module):
185
+ def __init__(self, dim_in, dim_out):
186
+ super().__init__()
187
+ self.proj = nn.Linear(dim_in, dim_out * 2)
188
+
189
+ def forward(self, x):
190
+ x, gate = self.proj(x).chunk(2, dim=-1)
191
+ return x * F.gelu(gate)
192
+
193
+
194
+ class FeedForward(nn.Module):
195
+ def __init__(self, dim, dim_out=None, mult=4, glu=False, dropout=0.):
196
+ super().__init__()
197
+ inner_dim = int(dim * mult)
198
+ dim_out = default(dim_out, dim)
199
+ project_in = nn.Sequential(
200
+ nn.Linear(dim, inner_dim),
201
+ nn.GELU()
202
+ ) if not glu else GEGLU(dim, inner_dim)
203
+
204
+ self.net = nn.Sequential(
205
+ project_in,
206
+ nn.Dropout(dropout),
207
+ nn.Linear(inner_dim, dim_out)
208
+ )
209
+
210
+ def forward(self, x):
211
+ return self.net(x)
212
+
213
+
214
+ # attention.
215
+ class Attention(nn.Module):
216
+ def __init__(
217
+ self,
218
+ dim,
219
+ dim_head=DEFAULT_DIM_HEAD,
220
+ heads=8,
221
+ causal=False,
222
+ mask=None,
223
+ talking_heads=False,
224
+ sparse_topk=None,
225
+ use_entmax15=False,
226
+ num_mem_kv=0,
227
+ dropout=0.,
228
+ on_attn=False
229
+ ):
230
+ super().__init__()
231
+ if use_entmax15:
232
+ raise NotImplementedError("Check out entmax activation instead of softmax activation!")
233
+ self.scale = dim_head ** -0.5
234
+ self.heads = heads
235
+ self.causal = causal
236
+ self.mask = mask
237
+
238
+ inner_dim = dim_head * heads
239
+
240
+ self.to_q = nn.Linear(dim, inner_dim, bias=False)
241
+ self.to_k = nn.Linear(dim, inner_dim, bias=False)
242
+ self.to_v = nn.Linear(dim, inner_dim, bias=False)
243
+ self.dropout = nn.Dropout(dropout)
244
+
245
+ # talking heads
246
+ self.talking_heads = talking_heads
247
+ if talking_heads:
248
+ self.pre_softmax_proj = nn.Parameter(torch.randn(heads, heads))
249
+ self.post_softmax_proj = nn.Parameter(torch.randn(heads, heads))
250
+
251
+ # explicit topk sparse attention
252
+ self.sparse_topk = sparse_topk
253
+
254
+ # entmax
255
+ #self.attn_fn = entmax15 if use_entmax15 else F.softmax
256
+ self.attn_fn = F.softmax
257
+
258
+ # add memory key / values
259
+ self.num_mem_kv = num_mem_kv
260
+ if num_mem_kv > 0:
261
+ self.mem_k = nn.Parameter(torch.randn(heads, num_mem_kv, dim_head))
262
+ self.mem_v = nn.Parameter(torch.randn(heads, num_mem_kv, dim_head))
263
+
264
+ # attention on attention
265
+ self.attn_on_attn = on_attn
266
+ self.to_out = nn.Sequential(nn.Linear(inner_dim, dim * 2), nn.GLU()) if on_attn else nn.Linear(inner_dim, dim)
267
+
268
+ def forward(
269
+ self,
270
+ x,
271
+ context=None,
272
+ mask=None,
273
+ context_mask=None,
274
+ rel_pos=None,
275
+ sinusoidal_emb=None,
276
+ prev_attn=None,
277
+ mem=None
278
+ ):
279
+ b, n, _, h, talking_heads, device = *x.shape, self.heads, self.talking_heads, x.device
280
+ kv_input = default(context, x)
281
+
282
+ q_input = x
283
+ k_input = kv_input
284
+ v_input = kv_input
285
+
286
+ if exists(mem):
287
+ k_input = torch.cat((mem, k_input), dim=-2)
288
+ v_input = torch.cat((mem, v_input), dim=-2)
289
+
290
+ if exists(sinusoidal_emb):
291
+ # in shortformer, the query would start at a position offset depending on the past cached memory
292
+ offset = k_input.shape[-2] - q_input.shape[-2]
293
+ q_input = q_input + sinusoidal_emb(q_input, offset=offset)
294
+ k_input = k_input + sinusoidal_emb(k_input)
295
+
296
+ q = self.to_q(q_input)
297
+ k = self.to_k(k_input)
298
+ v = self.to_v(v_input)
299
+
300
+ q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h=h), (q, k, v))
301
+
302
+ input_mask = None
303
+ if any(map(exists, (mask, context_mask))):
304
+ q_mask = default(mask, lambda: torch.ones((b, n), device=device).bool())
305
+ k_mask = q_mask if not exists(context) else context_mask
306
+ k_mask = default(k_mask, lambda: torch.ones((b, k.shape[-2]), device=device).bool())
307
+ q_mask = rearrange(q_mask, 'b i -> b () i ()')
308
+ k_mask = rearrange(k_mask, 'b j -> b () () j')
309
+ input_mask = q_mask * k_mask
310
+
311
+ if self.num_mem_kv > 0:
312
+ mem_k, mem_v = map(lambda t: repeat(t, 'h n d -> b h n d', b=b), (self.mem_k, self.mem_v))
313
+ k = torch.cat((mem_k, k), dim=-2)
314
+ v = torch.cat((mem_v, v), dim=-2)
315
+ if exists(input_mask):
316
+ input_mask = F.pad(input_mask, (self.num_mem_kv, 0), value=True)
317
+
318
+ dots = einsum('b h i d, b h j d -> b h i j', q, k) * self.scale
319
+ mask_value = max_neg_value(dots)
320
+
321
+ if exists(prev_attn):
322
+ dots = dots + prev_attn
323
+
324
+ pre_softmax_attn = dots
325
+
326
+ if talking_heads:
327
+ dots = einsum('b h i j, h k -> b k i j', dots, self.pre_softmax_proj).contiguous()
328
+
329
+ if exists(rel_pos):
330
+ dots = rel_pos(dots)
331
+
332
+ if exists(input_mask):
333
+ dots.masked_fill_(~input_mask, mask_value)
334
+ del input_mask
335
+
336
+ if self.causal:
337
+ i, j = dots.shape[-2:]
338
+ r = torch.arange(i, device=device)
339
+ mask = rearrange(r, 'i -> () () i ()') < rearrange(r, 'j -> () () () j')
340
+ mask = F.pad(mask, (j - i, 0), value=False)
341
+ dots.masked_fill_(mask, mask_value)
342
+ del mask
343
+
344
+ if exists(self.sparse_topk) and self.sparse_topk < dots.shape[-1]:
345
+ top, _ = dots.topk(self.sparse_topk, dim=-1)
346
+ vk = top[..., -1].unsqueeze(-1).expand_as(dots)
347
+ mask = dots < vk
348
+ dots.masked_fill_(mask, mask_value)
349
+ del mask
350
+
351
+ attn = self.attn_fn(dots, dim=-1)
352
+ post_softmax_attn = attn
353
+
354
+ attn = self.dropout(attn)
355
+
356
+ if talking_heads:
357
+ attn = einsum('b h i j, h k -> b k i j', attn, self.post_softmax_proj).contiguous()
358
+
359
+ out = einsum('b h i j, b h j d -> b h i d', attn, v)
360
+ out = rearrange(out, 'b h n d -> b n (h d)')
361
+
362
+ intermediates = Intermediates(
363
+ pre_softmax_attn=pre_softmax_attn,
364
+ post_softmax_attn=post_softmax_attn
365
+ )
366
+
367
+ return self.to_out(out), intermediates
368
+
369
+
370
+ class AttentionLayers(nn.Module):
371
+ def __init__(
372
+ self,
373
+ dim,
374
+ depth,
375
+ heads=8,
376
+ causal=False,
377
+ cross_attend=False,
378
+ only_cross=False,
379
+ use_scalenorm=False,
380
+ use_rmsnorm=False,
381
+ use_rezero=False,
382
+ rel_pos_num_buckets=32,
383
+ rel_pos_max_distance=128,
384
+ position_infused_attn=False,
385
+ custom_layers=None,
386
+ sandwich_coef=None,
387
+ par_ratio=None,
388
+ residual_attn=False,
389
+ cross_residual_attn=False,
390
+ macaron=False,
391
+ pre_norm=True,
392
+ gate_residual=False,
393
+ **kwargs
394
+ ):
395
+ super().__init__()
396
+ ff_kwargs, kwargs = groupby_prefix_and_trim('ff_', kwargs)
397
+ attn_kwargs, _ = groupby_prefix_and_trim('attn_', kwargs)
398
+
399
+ dim_head = attn_kwargs.get('dim_head', DEFAULT_DIM_HEAD)
400
+
401
+ self.dim = dim
402
+ self.depth = depth
403
+ self.layers = nn.ModuleList([])
404
+
405
+ self.has_pos_emb = position_infused_attn
406
+ self.pia_pos_emb = FixedPositionalEmbedding(dim) if position_infused_attn else None
407
+ self.rotary_pos_emb = always(None)
408
+
409
+ assert rel_pos_num_buckets <= rel_pos_max_distance, 'number of relative position buckets must be less than the relative position max distance'
410
+ self.rel_pos = None
411
+
412
+ self.pre_norm = pre_norm
413
+
414
+ self.residual_attn = residual_attn
415
+ self.cross_residual_attn = cross_residual_attn
416
+
417
+ norm_class = ScaleNorm if use_scalenorm else nn.LayerNorm
418
+ norm_class = RMSNorm if use_rmsnorm else norm_class
419
+ norm_fn = partial(norm_class, dim)
420
+
421
+ norm_fn = nn.Identity if use_rezero else norm_fn
422
+ branch_fn = Rezero if use_rezero else None
423
+
424
+ if cross_attend and not only_cross:
425
+ default_block = ('a', 'c', 'f')
426
+ elif cross_attend and only_cross:
427
+ default_block = ('c', 'f')
428
+ else:
429
+ default_block = ('a', 'f')
430
+
431
+ if macaron:
432
+ default_block = ('f',) + default_block
433
+
434
+ if exists(custom_layers):
435
+ layer_types = custom_layers
436
+ elif exists(par_ratio):
437
+ par_depth = depth * len(default_block)
438
+ assert 1 < par_ratio <= par_depth, 'par ratio out of range'
439
+ default_block = tuple(filter(not_equals('f'), default_block))
440
+ par_attn = par_depth // par_ratio
441
+ depth_cut = par_depth * 2 // 3 # 2 / 3 attention layer cutoff suggested by PAR paper
442
+ par_width = (depth_cut + depth_cut // par_attn) // par_attn
443
+ assert len(default_block) <= par_width, 'default block is too large for par_ratio'
444
+ par_block = default_block + ('f',) * (par_width - len(default_block))
445
+ par_head = par_block * par_attn
446
+ layer_types = par_head + ('f',) * (par_depth - len(par_head))
447
+ elif exists(sandwich_coef):
448
+ assert sandwich_coef > 0 and sandwich_coef <= depth, 'sandwich coefficient should be less than the depth'
449
+ layer_types = ('a',) * sandwich_coef + default_block * (depth - sandwich_coef) + ('f',) * sandwich_coef
450
+ else:
451
+ layer_types = default_block * depth
452
+
453
+ self.layer_types = layer_types
454
+ self.num_attn_layers = len(list(filter(equals('a'), layer_types)))
455
+
456
+ for layer_type in self.layer_types:
457
+ if layer_type == 'a':
458
+ layer = Attention(dim, heads=heads, causal=causal, **attn_kwargs)
459
+ elif layer_type == 'c':
460
+ layer = Attention(dim, heads=heads, **attn_kwargs)
461
+ elif layer_type == 'f':
462
+ layer = FeedForward(dim, **ff_kwargs)
463
+ layer = layer if not macaron else Scale(0.5, layer)
464
+ else:
465
+ raise Exception(f'invalid layer type {layer_type}')
466
+
467
+ if isinstance(layer, Attention) and exists(branch_fn):
468
+ layer = branch_fn(layer)
469
+
470
+ if gate_residual:
471
+ residual_fn = GRUGating(dim)
472
+ else:
473
+ residual_fn = Residual()
474
+
475
+ self.layers.append(nn.ModuleList([
476
+ norm_fn(),
477
+ layer,
478
+ residual_fn
479
+ ]))
480
+
481
+ def forward(
482
+ self,
483
+ x,
484
+ context=None,
485
+ mask=None,
486
+ context_mask=None,
487
+ mems=None,
488
+ return_hiddens=False
489
+ ):
490
+ hiddens = []
491
+ intermediates = []
492
+ prev_attn = None
493
+ prev_cross_attn = None
494
+
495
+ mems = mems.copy() if exists(mems) else [None] * self.num_attn_layers
496
+
497
+ for ind, (layer_type, (norm, block, residual_fn)) in enumerate(zip(self.layer_types, self.layers)):
498
+ is_last = ind == (len(self.layers) - 1)
499
+
500
+ if layer_type == 'a':
501
+ hiddens.append(x)
502
+ layer_mem = mems.pop(0)
503
+
504
+ residual = x
505
+
506
+ if self.pre_norm:
507
+ x = norm(x)
508
+
509
+ if layer_type == 'a':
510
+ out, inter = block(x, mask=mask, sinusoidal_emb=self.pia_pos_emb, rel_pos=self.rel_pos,
511
+ prev_attn=prev_attn, mem=layer_mem)
512
+ elif layer_type == 'c':
513
+ out, inter = block(x, context=context, mask=mask, context_mask=context_mask, prev_attn=prev_cross_attn)
514
+ elif layer_type == 'f':
515
+ out = block(x)
516
+
517
+ x = residual_fn(out, residual)
518
+
519
+ if layer_type in ('a', 'c'):
520
+ intermediates.append(inter)
521
+
522
+ if layer_type == 'a' and self.residual_attn:
523
+ prev_attn = inter.pre_softmax_attn
524
+ elif layer_type == 'c' and self.cross_residual_attn:
525
+ prev_cross_attn = inter.pre_softmax_attn
526
+
527
+ if not self.pre_norm and not is_last:
528
+ x = norm(x)
529
+
530
+ if return_hiddens:
531
+ intermediates = LayerIntermediates(
532
+ hiddens=hiddens,
533
+ attn_intermediates=intermediates
534
+ )
535
+
536
+ return x, intermediates
537
+
538
+ return x
539
+
540
+
541
+ class Encoder(AttentionLayers):
542
+ def __init__(self, **kwargs):
543
+ assert 'causal' not in kwargs, 'cannot set causality on encoder'
544
+ super().__init__(causal=False, **kwargs)
545
+
546
+
547
+
548
+ class TransformerWrapper(nn.Module):
549
+ def __init__(
550
+ self,
551
+ *,
552
+ num_tokens,
553
+ max_seq_len,
554
+ attn_layers,
555
+ emb_dim=None,
556
+ max_mem_len=0.,
557
+ emb_dropout=0.,
558
+ num_memory_tokens=None,
559
+ tie_embedding=False,
560
+ use_pos_emb=True
561
+ ):
562
+ super().__init__()
563
+ assert isinstance(attn_layers, AttentionLayers), 'attention layers must be one of Encoder or Decoder'
564
+
565
+ dim = attn_layers.dim
566
+ emb_dim = default(emb_dim, dim)
567
+
568
+ self.max_seq_len = max_seq_len
569
+ self.max_mem_len = max_mem_len
570
+ self.num_tokens = num_tokens
571
+
572
+ self.token_emb = nn.Embedding(num_tokens, emb_dim)
573
+ self.pos_emb = AbsolutePositionalEmbedding(emb_dim, max_seq_len) if (
574
+ use_pos_emb and not attn_layers.has_pos_emb) else always(0)
575
+ self.emb_dropout = nn.Dropout(emb_dropout)
576
+
577
+ self.project_emb = nn.Linear(emb_dim, dim) if emb_dim != dim else nn.Identity()
578
+ self.attn_layers = attn_layers
579
+ self.norm = nn.LayerNorm(dim)
580
+
581
+ self.init_()
582
+
583
+ self.to_logits = nn.Linear(dim, num_tokens) if not tie_embedding else lambda t: t @ self.token_emb.weight.t()
584
+
585
+ # memory tokens (like [cls]) from Memory Transformers paper
586
+ num_memory_tokens = default(num_memory_tokens, 0)
587
+ self.num_memory_tokens = num_memory_tokens
588
+ if num_memory_tokens > 0:
589
+ self.memory_tokens = nn.Parameter(torch.randn(num_memory_tokens, dim))
590
+
591
+ # let funnel encoder know number of memory tokens, if specified
592
+ if hasattr(attn_layers, 'num_memory_tokens'):
593
+ attn_layers.num_memory_tokens = num_memory_tokens
594
+
595
+ def init_(self):
596
+ nn.init.normal_(self.token_emb.weight, std=0.02)
597
+
598
+ def forward(
599
+ self,
600
+ x,
601
+ return_embeddings=False,
602
+ mask=None,
603
+ return_mems=False,
604
+ return_attn=False,
605
+ mems=None,
606
+ **kwargs
607
+ ):
608
+ b, n, device, num_mem = *x.shape, x.device, self.num_memory_tokens
609
+ x = self.token_emb(x)
610
+ x += self.pos_emb(x)
611
+ x = self.emb_dropout(x)
612
+
613
+ x = self.project_emb(x)
614
+
615
+ if num_mem > 0:
616
+ mem = repeat(self.memory_tokens, 'n d -> b n d', b=b)
617
+ x = torch.cat((mem, x), dim=1)
618
+
619
+ # auto-handle masking after appending memory tokens
620
+ if exists(mask):
621
+ mask = F.pad(mask, (num_mem, 0), value=True)
622
+
623
+ x, intermediates = self.attn_layers(x, mask=mask, mems=mems, return_hiddens=True, **kwargs)
624
+ x = self.norm(x)
625
+
626
+ mem, x = x[:, :num_mem], x[:, num_mem:]
627
+
628
+ out = self.to_logits(x) if not return_embeddings else x
629
+
630
+ if return_mems:
631
+ hiddens = intermediates.hiddens
632
+ new_mems = list(map(lambda pair: torch.cat(pair, dim=-2), zip(mems, hiddens))) if exists(mems) else hiddens
633
+ new_mems = list(map(lambda t: t[..., -self.max_mem_len:, :].detach(), new_mems))
634
+ return out, new_mems
635
+
636
+ if return_attn:
637
+ attn_maps = list(map(lambda t: t.post_softmax_attn, intermediates.attn_intermediates))
638
+ return out, attn_maps
639
+
640
+ return out
641
+