Commit ·
56dd783
1
Parent(s): 10de23d
'in1'
Browse filesThis view is limited to 50 files because it contains too many changes. See raw diff
- .eslintignore +4 -0
- .eslintrc.js +98 -0
- .git-blame-ignore-revs +2 -0
- .gitattributes +55 -35
- .gitignore +1 -0
- .pylintrc +3 -0
- CITATION.cff +7 -0
- CODEOWNERS +12 -0
- __pycache__/launch.cpython-310.pyc +0 -0
- __pycache__/webui.cpython-310.pyc +0 -0
- configs/alt-diffusion-inference.yaml +72 -0
- configs/alt-diffusion-m18-inference.yaml +73 -0
- configs/instruct-pix2pix.yaml +98 -0
- configs/v1-inference.yaml +70 -0
- configs/v1-inpainting-inference.yaml +70 -0
- environment-wsl2.yaml +11 -0
- extensions-builtin/LDSR/__pycache__/ldsr_model_arch.cpython-310.pyc +0 -0
- extensions-builtin/LDSR/__pycache__/preload.cpython-310.pyc +0 -0
- extensions-builtin/LDSR/__pycache__/sd_hijack_autoencoder.cpython-310.pyc +0 -0
- extensions-builtin/LDSR/__pycache__/sd_hijack_ddpm_v1.cpython-310.pyc +0 -0
- extensions-builtin/LDSR/__pycache__/vqvae_quantize.cpython-310.pyc +0 -0
- extensions-builtin/LDSR/ldsr_model_arch.py +250 -0
- extensions-builtin/LDSR/preload.py +6 -0
- extensions-builtin/LDSR/scripts/__pycache__/ldsr_model.cpython-310.pyc +0 -0
- extensions-builtin/LDSR/scripts/ldsr_model.py +68 -0
- extensions-builtin/LDSR/sd_hijack_autoencoder.py +293 -0
- extensions-builtin/LDSR/sd_hijack_ddpm_v1.py +1443 -0
- extensions-builtin/LDSR/vqvae_quantize.py +147 -0
- extensions-builtin/Lora/__pycache__/extra_networks_lora.cpython-310.pyc +0 -0
- extensions-builtin/Lora/__pycache__/lora.cpython-310.pyc +0 -0
- extensions-builtin/Lora/__pycache__/lora_logger.cpython-310.pyc +0 -0
- extensions-builtin/Lora/__pycache__/lora_patches.cpython-310.pyc +0 -0
- extensions-builtin/Lora/__pycache__/lyco_helpers.cpython-310.pyc +0 -0
- extensions-builtin/Lora/__pycache__/network.cpython-310.pyc +0 -0
- extensions-builtin/Lora/__pycache__/network_full.cpython-310.pyc +0 -0
- extensions-builtin/Lora/__pycache__/network_glora.cpython-310.pyc +0 -0
- extensions-builtin/Lora/__pycache__/network_hada.cpython-310.pyc +0 -0
- extensions-builtin/Lora/__pycache__/network_ia3.cpython-310.pyc +0 -0
- extensions-builtin/Lora/__pycache__/network_lokr.cpython-310.pyc +0 -0
- extensions-builtin/Lora/__pycache__/network_lora.cpython-310.pyc +0 -0
- extensions-builtin/Lora/__pycache__/network_norm.cpython-310.pyc +0 -0
- extensions-builtin/Lora/__pycache__/network_oft.cpython-310.pyc +0 -0
- extensions-builtin/Lora/__pycache__/networks.cpython-310.pyc +0 -0
- extensions-builtin/Lora/__pycache__/preload.cpython-310.pyc +0 -0
- extensions-builtin/Lora/__pycache__/ui_edit_user_metadata.cpython-310.pyc +0 -0
- extensions-builtin/Lora/__pycache__/ui_extra_networks_lora.cpython-310.pyc +0 -0
- extensions-builtin/Lora/extra_networks_lora.py +67 -0
- extensions-builtin/Lora/lora.py +9 -0
- extensions-builtin/Lora/lora_logger.py +33 -0
- extensions-builtin/Lora/lora_patches.py +31 -0
.eslintignore
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
extensions
|
| 2 |
+
extensions-disabled
|
| 3 |
+
repositories
|
| 4 |
+
venv
|
.eslintrc.js
ADDED
|
@@ -0,0 +1,98 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/* global module */
|
| 2 |
+
module.exports = {
|
| 3 |
+
env: {
|
| 4 |
+
browser: true,
|
| 5 |
+
es2021: true,
|
| 6 |
+
},
|
| 7 |
+
extends: "eslint:recommended",
|
| 8 |
+
parserOptions: {
|
| 9 |
+
ecmaVersion: "latest",
|
| 10 |
+
},
|
| 11 |
+
rules: {
|
| 12 |
+
"arrow-spacing": "error",
|
| 13 |
+
"block-spacing": "error",
|
| 14 |
+
"brace-style": "error",
|
| 15 |
+
"comma-dangle": ["error", "only-multiline"],
|
| 16 |
+
"comma-spacing": "error",
|
| 17 |
+
"comma-style": ["error", "last"],
|
| 18 |
+
"curly": ["error", "multi-line", "consistent"],
|
| 19 |
+
"eol-last": "error",
|
| 20 |
+
"func-call-spacing": "error",
|
| 21 |
+
"function-call-argument-newline": ["error", "consistent"],
|
| 22 |
+
"function-paren-newline": ["error", "consistent"],
|
| 23 |
+
"indent": ["error", 4],
|
| 24 |
+
"key-spacing": "error",
|
| 25 |
+
"keyword-spacing": "error",
|
| 26 |
+
"linebreak-style": ["error", "unix"],
|
| 27 |
+
"no-extra-semi": "error",
|
| 28 |
+
"no-mixed-spaces-and-tabs": "error",
|
| 29 |
+
"no-multi-spaces": "error",
|
| 30 |
+
"no-redeclare": ["error", {builtinGlobals: false}],
|
| 31 |
+
"no-trailing-spaces": "error",
|
| 32 |
+
"no-unused-vars": "off",
|
| 33 |
+
"no-whitespace-before-property": "error",
|
| 34 |
+
"object-curly-newline": ["error", {consistent: true, multiline: true}],
|
| 35 |
+
"object-curly-spacing": ["error", "never"],
|
| 36 |
+
"operator-linebreak": ["error", "after"],
|
| 37 |
+
"quote-props": ["error", "consistent-as-needed"],
|
| 38 |
+
"semi": ["error", "always"],
|
| 39 |
+
"semi-spacing": "error",
|
| 40 |
+
"semi-style": ["error", "last"],
|
| 41 |
+
"space-before-blocks": "error",
|
| 42 |
+
"space-before-function-paren": ["error", "never"],
|
| 43 |
+
"space-in-parens": ["error", "never"],
|
| 44 |
+
"space-infix-ops": "error",
|
| 45 |
+
"space-unary-ops": "error",
|
| 46 |
+
"switch-colon-spacing": "error",
|
| 47 |
+
"template-curly-spacing": ["error", "never"],
|
| 48 |
+
"unicode-bom": "error",
|
| 49 |
+
},
|
| 50 |
+
globals: {
|
| 51 |
+
//script.js
|
| 52 |
+
gradioApp: "readonly",
|
| 53 |
+
executeCallbacks: "readonly",
|
| 54 |
+
onAfterUiUpdate: "readonly",
|
| 55 |
+
onOptionsChanged: "readonly",
|
| 56 |
+
onUiLoaded: "readonly",
|
| 57 |
+
onUiUpdate: "readonly",
|
| 58 |
+
uiCurrentTab: "writable",
|
| 59 |
+
uiElementInSight: "readonly",
|
| 60 |
+
uiElementIsVisible: "readonly",
|
| 61 |
+
//ui.js
|
| 62 |
+
opts: "writable",
|
| 63 |
+
all_gallery_buttons: "readonly",
|
| 64 |
+
selected_gallery_button: "readonly",
|
| 65 |
+
selected_gallery_index: "readonly",
|
| 66 |
+
switch_to_txt2img: "readonly",
|
| 67 |
+
switch_to_img2img_tab: "readonly",
|
| 68 |
+
switch_to_img2img: "readonly",
|
| 69 |
+
switch_to_sketch: "readonly",
|
| 70 |
+
switch_to_inpaint: "readonly",
|
| 71 |
+
switch_to_inpaint_sketch: "readonly",
|
| 72 |
+
switch_to_extras: "readonly",
|
| 73 |
+
get_tab_index: "readonly",
|
| 74 |
+
create_submit_args: "readonly",
|
| 75 |
+
restart_reload: "readonly",
|
| 76 |
+
updateInput: "readonly",
|
| 77 |
+
onEdit: "readonly",
|
| 78 |
+
//extraNetworks.js
|
| 79 |
+
requestGet: "readonly",
|
| 80 |
+
popup: "readonly",
|
| 81 |
+
// from python
|
| 82 |
+
localization: "readonly",
|
| 83 |
+
// progrssbar.js
|
| 84 |
+
randomId: "readonly",
|
| 85 |
+
requestProgress: "readonly",
|
| 86 |
+
// imageviewer.js
|
| 87 |
+
modalPrevImage: "readonly",
|
| 88 |
+
modalNextImage: "readonly",
|
| 89 |
+
// token-counters.js
|
| 90 |
+
setupTokenCounters: "readonly",
|
| 91 |
+
// localStorage.js
|
| 92 |
+
localSet: "readonly",
|
| 93 |
+
localGet: "readonly",
|
| 94 |
+
localRemove: "readonly",
|
| 95 |
+
// resizeHandle.js
|
| 96 |
+
setupResizeHandle: "writable"
|
| 97 |
+
}
|
| 98 |
+
};
|
.git-blame-ignore-revs
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Apply ESlint
|
| 2 |
+
9c54b78d9dde5601e916f308d9a9d6953ec39430
|
.gitattributes
CHANGED
|
@@ -1,35 +1,55 @@
|
|
| 1 |
-
*.7z
|
| 2 |
-
*.arrow
|
| 3 |
-
*.bin
|
| 4 |
-
*.bz2
|
| 5 |
-
*.ckpt
|
| 6 |
-
*.ftz
|
| 7 |
-
*.gz
|
| 8 |
-
*.h5
|
| 9 |
-
*.joblib
|
| 10 |
-
*.lfs.*
|
| 11 |
-
*.
|
| 12 |
-
*.
|
| 13 |
-
*.
|
| 14 |
-
*.
|
| 15 |
-
*.
|
| 16 |
-
*.
|
| 17 |
-
*.
|
| 18 |
-
*.
|
| 19 |
-
*.
|
| 20 |
-
*.
|
| 21 |
-
*.
|
| 22 |
-
*.
|
| 23 |
-
*.
|
| 24 |
-
*.
|
| 25 |
-
*.
|
| 26 |
-
|
| 27 |
-
*
|
| 28 |
-
*.tar
|
| 29 |
-
*.
|
| 30 |
-
*.
|
| 31 |
-
*.
|
| 32 |
-
*.
|
| 33 |
-
*.
|
| 34 |
-
*.
|
| 35 |
-
*
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
*.7z -text
|
| 2 |
+
*.arrow -text
|
| 3 |
+
*.bin -text
|
| 4 |
+
*.bz2 -text
|
| 5 |
+
*.ckpt -text
|
| 6 |
+
*.ftz -text
|
| 7 |
+
*.gz -text
|
| 8 |
+
*.h5 -text
|
| 9 |
+
*.joblib -text
|
| 10 |
+
*.lfs.* -text
|
| 11 |
+
*.lz4 -text
|
| 12 |
+
*.mlmodel -text
|
| 13 |
+
*.model -text
|
| 14 |
+
*.msgpack -text
|
| 15 |
+
*.npy -text
|
| 16 |
+
*.npz -text
|
| 17 |
+
*.onnx -text
|
| 18 |
+
*.ot -text
|
| 19 |
+
*.parquet -text
|
| 20 |
+
*.pb -text
|
| 21 |
+
*.pickle -text
|
| 22 |
+
*.pkl -text
|
| 23 |
+
*.pt -text
|
| 24 |
+
*.pth -text
|
| 25 |
+
*.rar -text
|
| 26 |
+
*.safetensors -text
|
| 27 |
+
saved_model/**/* -text
|
| 28 |
+
*.tar.* -text
|
| 29 |
+
*.tar -text
|
| 30 |
+
*.tflite -text
|
| 31 |
+
*.tgz -text
|
| 32 |
+
*.wasm -text
|
| 33 |
+
*.xz -text
|
| 34 |
+
*.zip -text
|
| 35 |
+
*.zst -text
|
| 36 |
+
*tfevents* -text
|
| 37 |
+
# Audio files - uncompressed
|
| 38 |
+
*.pcm -text
|
| 39 |
+
*.sam -text
|
| 40 |
+
*.raw -text
|
| 41 |
+
# Audio files - compressed
|
| 42 |
+
*.aac -text
|
| 43 |
+
*.flac -text
|
| 44 |
+
*.mp3 -text
|
| 45 |
+
*.ogg -text
|
| 46 |
+
*.wav -text
|
| 47 |
+
# Image files - uncompressed
|
| 48 |
+
*.bmp -text
|
| 49 |
+
*.gif -text
|
| 50 |
+
*.png -text
|
| 51 |
+
*.tiff -text
|
| 52 |
+
# Image files - compressed
|
| 53 |
+
*.jpg -text
|
| 54 |
+
*.jpeg -text
|
| 55 |
+
*.webp -text
|
.gitignore
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
/venv
|
.pylintrc
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# See https://pylint.pycqa.org/en/latest/user_guide/messages/message_control.html
|
| 2 |
+
[MESSAGES CONTROL]
|
| 3 |
+
disable=C,R,W,E,I
|
CITATION.cff
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
cff-version: 1.2.0
|
| 2 |
+
message: "If you use this software, please cite it as below."
|
| 3 |
+
authors:
|
| 4 |
+
- given-names: AUTOMATIC1111
|
| 5 |
+
title: "Stable Diffusion Web UI"
|
| 6 |
+
date-released: 2022-08-22
|
| 7 |
+
url: "https://github.com/AUTOMATIC1111/stable-diffusion-webui"
|
CODEOWNERS
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
* @AUTOMATIC1111
|
| 2 |
+
|
| 3 |
+
# if you were managing a localization and were removed from this file, this is because
|
| 4 |
+
# the intended way to do localizations now is via extensions. See:
|
| 5 |
+
# https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Developing-extensions
|
| 6 |
+
# Make a repo with your localization and since you are still listed as a collaborator
|
| 7 |
+
# you can add it to the wiki page yourself. This change is because some people complained
|
| 8 |
+
# the git commit log is cluttered with things unrelated to almost everyone and
|
| 9 |
+
# because I believe this is the best overall for the project to handle localizations almost
|
| 10 |
+
# entirely without my oversight.
|
| 11 |
+
|
| 12 |
+
|
__pycache__/launch.cpython-310.pyc
ADDED
|
Binary file (1.06 kB). View file
|
|
|
__pycache__/webui.cpython-310.pyc
ADDED
|
Binary file (3.9 kB). View file
|
|
|
configs/alt-diffusion-inference.yaml
ADDED
|
@@ -0,0 +1,72 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
model:
|
| 2 |
+
base_learning_rate: 1.0e-04
|
| 3 |
+
target: ldm.models.diffusion.ddpm.LatentDiffusion
|
| 4 |
+
params:
|
| 5 |
+
linear_start: 0.00085
|
| 6 |
+
linear_end: 0.0120
|
| 7 |
+
num_timesteps_cond: 1
|
| 8 |
+
log_every_t: 200
|
| 9 |
+
timesteps: 1000
|
| 10 |
+
first_stage_key: "jpg"
|
| 11 |
+
cond_stage_key: "txt"
|
| 12 |
+
image_size: 64
|
| 13 |
+
channels: 4
|
| 14 |
+
cond_stage_trainable: false # Note: different from the one we trained before
|
| 15 |
+
conditioning_key: crossattn
|
| 16 |
+
monitor: val/loss_simple_ema
|
| 17 |
+
scale_factor: 0.18215
|
| 18 |
+
use_ema: False
|
| 19 |
+
|
| 20 |
+
scheduler_config: # 10000 warmup steps
|
| 21 |
+
target: ldm.lr_scheduler.LambdaLinearScheduler
|
| 22 |
+
params:
|
| 23 |
+
warm_up_steps: [ 10000 ]
|
| 24 |
+
cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases
|
| 25 |
+
f_start: [ 1.e-6 ]
|
| 26 |
+
f_max: [ 1. ]
|
| 27 |
+
f_min: [ 1. ]
|
| 28 |
+
|
| 29 |
+
unet_config:
|
| 30 |
+
target: ldm.modules.diffusionmodules.openaimodel.UNetModel
|
| 31 |
+
params:
|
| 32 |
+
image_size: 32 # unused
|
| 33 |
+
in_channels: 4
|
| 34 |
+
out_channels: 4
|
| 35 |
+
model_channels: 320
|
| 36 |
+
attention_resolutions: [ 4, 2, 1 ]
|
| 37 |
+
num_res_blocks: 2
|
| 38 |
+
channel_mult: [ 1, 2, 4, 4 ]
|
| 39 |
+
num_heads: 8
|
| 40 |
+
use_spatial_transformer: True
|
| 41 |
+
transformer_depth: 1
|
| 42 |
+
context_dim: 768
|
| 43 |
+
use_checkpoint: True
|
| 44 |
+
legacy: False
|
| 45 |
+
|
| 46 |
+
first_stage_config:
|
| 47 |
+
target: ldm.models.autoencoder.AutoencoderKL
|
| 48 |
+
params:
|
| 49 |
+
embed_dim: 4
|
| 50 |
+
monitor: val/rec_loss
|
| 51 |
+
ddconfig:
|
| 52 |
+
double_z: true
|
| 53 |
+
z_channels: 4
|
| 54 |
+
resolution: 256
|
| 55 |
+
in_channels: 3
|
| 56 |
+
out_ch: 3
|
| 57 |
+
ch: 128
|
| 58 |
+
ch_mult:
|
| 59 |
+
- 1
|
| 60 |
+
- 2
|
| 61 |
+
- 4
|
| 62 |
+
- 4
|
| 63 |
+
num_res_blocks: 2
|
| 64 |
+
attn_resolutions: []
|
| 65 |
+
dropout: 0.0
|
| 66 |
+
lossconfig:
|
| 67 |
+
target: torch.nn.Identity
|
| 68 |
+
|
| 69 |
+
cond_stage_config:
|
| 70 |
+
target: modules.xlmr.BertSeriesModelWithTransformation
|
| 71 |
+
params:
|
| 72 |
+
name: "XLMR-Large"
|
configs/alt-diffusion-m18-inference.yaml
ADDED
|
@@ -0,0 +1,73 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
model:
|
| 2 |
+
base_learning_rate: 1.0e-04
|
| 3 |
+
target: ldm.models.diffusion.ddpm.LatentDiffusion
|
| 4 |
+
params:
|
| 5 |
+
linear_start: 0.00085
|
| 6 |
+
linear_end: 0.0120
|
| 7 |
+
num_timesteps_cond: 1
|
| 8 |
+
log_every_t: 200
|
| 9 |
+
timesteps: 1000
|
| 10 |
+
first_stage_key: "jpg"
|
| 11 |
+
cond_stage_key: "txt"
|
| 12 |
+
image_size: 64
|
| 13 |
+
channels: 4
|
| 14 |
+
cond_stage_trainable: false # Note: different from the one we trained before
|
| 15 |
+
conditioning_key: crossattn
|
| 16 |
+
monitor: val/loss_simple_ema
|
| 17 |
+
scale_factor: 0.18215
|
| 18 |
+
use_ema: False
|
| 19 |
+
|
| 20 |
+
scheduler_config: # 10000 warmup steps
|
| 21 |
+
target: ldm.lr_scheduler.LambdaLinearScheduler
|
| 22 |
+
params:
|
| 23 |
+
warm_up_steps: [ 10000 ]
|
| 24 |
+
cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases
|
| 25 |
+
f_start: [ 1.e-6 ]
|
| 26 |
+
f_max: [ 1. ]
|
| 27 |
+
f_min: [ 1. ]
|
| 28 |
+
|
| 29 |
+
unet_config:
|
| 30 |
+
target: ldm.modules.diffusionmodules.openaimodel.UNetModel
|
| 31 |
+
params:
|
| 32 |
+
image_size: 32 # unused
|
| 33 |
+
in_channels: 4
|
| 34 |
+
out_channels: 4
|
| 35 |
+
model_channels: 320
|
| 36 |
+
attention_resolutions: [ 4, 2, 1 ]
|
| 37 |
+
num_res_blocks: 2
|
| 38 |
+
channel_mult: [ 1, 2, 4, 4 ]
|
| 39 |
+
num_head_channels: 64
|
| 40 |
+
use_spatial_transformer: True
|
| 41 |
+
use_linear_in_transformer: True
|
| 42 |
+
transformer_depth: 1
|
| 43 |
+
context_dim: 1024
|
| 44 |
+
use_checkpoint: True
|
| 45 |
+
legacy: False
|
| 46 |
+
|
| 47 |
+
first_stage_config:
|
| 48 |
+
target: ldm.models.autoencoder.AutoencoderKL
|
| 49 |
+
params:
|
| 50 |
+
embed_dim: 4
|
| 51 |
+
monitor: val/rec_loss
|
| 52 |
+
ddconfig:
|
| 53 |
+
double_z: true
|
| 54 |
+
z_channels: 4
|
| 55 |
+
resolution: 256
|
| 56 |
+
in_channels: 3
|
| 57 |
+
out_ch: 3
|
| 58 |
+
ch: 128
|
| 59 |
+
ch_mult:
|
| 60 |
+
- 1
|
| 61 |
+
- 2
|
| 62 |
+
- 4
|
| 63 |
+
- 4
|
| 64 |
+
num_res_blocks: 2
|
| 65 |
+
attn_resolutions: []
|
| 66 |
+
dropout: 0.0
|
| 67 |
+
lossconfig:
|
| 68 |
+
target: torch.nn.Identity
|
| 69 |
+
|
| 70 |
+
cond_stage_config:
|
| 71 |
+
target: modules.xlmr_m18.BertSeriesModelWithTransformation
|
| 72 |
+
params:
|
| 73 |
+
name: "XLMR-Large"
|
configs/instruct-pix2pix.yaml
ADDED
|
@@ -0,0 +1,98 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# File modified by authors of InstructPix2Pix from original (https://github.com/CompVis/stable-diffusion).
|
| 2 |
+
# See more details in LICENSE.
|
| 3 |
+
|
| 4 |
+
model:
|
| 5 |
+
base_learning_rate: 1.0e-04
|
| 6 |
+
target: modules.models.diffusion.ddpm_edit.LatentDiffusion
|
| 7 |
+
params:
|
| 8 |
+
linear_start: 0.00085
|
| 9 |
+
linear_end: 0.0120
|
| 10 |
+
num_timesteps_cond: 1
|
| 11 |
+
log_every_t: 200
|
| 12 |
+
timesteps: 1000
|
| 13 |
+
first_stage_key: edited
|
| 14 |
+
cond_stage_key: edit
|
| 15 |
+
# image_size: 64
|
| 16 |
+
# image_size: 32
|
| 17 |
+
image_size: 16
|
| 18 |
+
channels: 4
|
| 19 |
+
cond_stage_trainable: false # Note: different from the one we trained before
|
| 20 |
+
conditioning_key: hybrid
|
| 21 |
+
monitor: val/loss_simple_ema
|
| 22 |
+
scale_factor: 0.18215
|
| 23 |
+
use_ema: false
|
| 24 |
+
|
| 25 |
+
scheduler_config: # 10000 warmup steps
|
| 26 |
+
target: ldm.lr_scheduler.LambdaLinearScheduler
|
| 27 |
+
params:
|
| 28 |
+
warm_up_steps: [ 0 ]
|
| 29 |
+
cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases
|
| 30 |
+
f_start: [ 1.e-6 ]
|
| 31 |
+
f_max: [ 1. ]
|
| 32 |
+
f_min: [ 1. ]
|
| 33 |
+
|
| 34 |
+
unet_config:
|
| 35 |
+
target: ldm.modules.diffusionmodules.openaimodel.UNetModel
|
| 36 |
+
params:
|
| 37 |
+
image_size: 32 # unused
|
| 38 |
+
in_channels: 8
|
| 39 |
+
out_channels: 4
|
| 40 |
+
model_channels: 320
|
| 41 |
+
attention_resolutions: [ 4, 2, 1 ]
|
| 42 |
+
num_res_blocks: 2
|
| 43 |
+
channel_mult: [ 1, 2, 4, 4 ]
|
| 44 |
+
num_heads: 8
|
| 45 |
+
use_spatial_transformer: True
|
| 46 |
+
transformer_depth: 1
|
| 47 |
+
context_dim: 768
|
| 48 |
+
use_checkpoint: True
|
| 49 |
+
legacy: False
|
| 50 |
+
|
| 51 |
+
first_stage_config:
|
| 52 |
+
target: ldm.models.autoencoder.AutoencoderKL
|
| 53 |
+
params:
|
| 54 |
+
embed_dim: 4
|
| 55 |
+
monitor: val/rec_loss
|
| 56 |
+
ddconfig:
|
| 57 |
+
double_z: true
|
| 58 |
+
z_channels: 4
|
| 59 |
+
resolution: 256
|
| 60 |
+
in_channels: 3
|
| 61 |
+
out_ch: 3
|
| 62 |
+
ch: 128
|
| 63 |
+
ch_mult:
|
| 64 |
+
- 1
|
| 65 |
+
- 2
|
| 66 |
+
- 4
|
| 67 |
+
- 4
|
| 68 |
+
num_res_blocks: 2
|
| 69 |
+
attn_resolutions: []
|
| 70 |
+
dropout: 0.0
|
| 71 |
+
lossconfig:
|
| 72 |
+
target: torch.nn.Identity
|
| 73 |
+
|
| 74 |
+
cond_stage_config:
|
| 75 |
+
target: ldm.modules.encoders.modules.FrozenCLIPEmbedder
|
| 76 |
+
|
| 77 |
+
data:
|
| 78 |
+
target: main.DataModuleFromConfig
|
| 79 |
+
params:
|
| 80 |
+
batch_size: 128
|
| 81 |
+
num_workers: 1
|
| 82 |
+
wrap: false
|
| 83 |
+
validation:
|
| 84 |
+
target: edit_dataset.EditDataset
|
| 85 |
+
params:
|
| 86 |
+
path: data/clip-filtered-dataset
|
| 87 |
+
cache_dir: data/
|
| 88 |
+
cache_name: data_10k
|
| 89 |
+
split: val
|
| 90 |
+
min_text_sim: 0.2
|
| 91 |
+
min_image_sim: 0.75
|
| 92 |
+
min_direction_sim: 0.2
|
| 93 |
+
max_samples_per_prompt: 1
|
| 94 |
+
min_resize_res: 512
|
| 95 |
+
max_resize_res: 512
|
| 96 |
+
crop_res: 512
|
| 97 |
+
output_as_edit: False
|
| 98 |
+
real_input: True
|
configs/v1-inference.yaml
ADDED
|
@@ -0,0 +1,70 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
model:
|
| 2 |
+
base_learning_rate: 1.0e-04
|
| 3 |
+
target: ldm.models.diffusion.ddpm.LatentDiffusion
|
| 4 |
+
params:
|
| 5 |
+
linear_start: 0.00085
|
| 6 |
+
linear_end: 0.0120
|
| 7 |
+
num_timesteps_cond: 1
|
| 8 |
+
log_every_t: 200
|
| 9 |
+
timesteps: 1000
|
| 10 |
+
first_stage_key: "jpg"
|
| 11 |
+
cond_stage_key: "txt"
|
| 12 |
+
image_size: 64
|
| 13 |
+
channels: 4
|
| 14 |
+
cond_stage_trainable: false # Note: different from the one we trained before
|
| 15 |
+
conditioning_key: crossattn
|
| 16 |
+
monitor: val/loss_simple_ema
|
| 17 |
+
scale_factor: 0.18215
|
| 18 |
+
use_ema: False
|
| 19 |
+
|
| 20 |
+
scheduler_config: # 10000 warmup steps
|
| 21 |
+
target: ldm.lr_scheduler.LambdaLinearScheduler
|
| 22 |
+
params:
|
| 23 |
+
warm_up_steps: [ 10000 ]
|
| 24 |
+
cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases
|
| 25 |
+
f_start: [ 1.e-6 ]
|
| 26 |
+
f_max: [ 1. ]
|
| 27 |
+
f_min: [ 1. ]
|
| 28 |
+
|
| 29 |
+
unet_config:
|
| 30 |
+
target: ldm.modules.diffusionmodules.openaimodel.UNetModel
|
| 31 |
+
params:
|
| 32 |
+
image_size: 32 # unused
|
| 33 |
+
in_channels: 4
|
| 34 |
+
out_channels: 4
|
| 35 |
+
model_channels: 320
|
| 36 |
+
attention_resolutions: [ 4, 2, 1 ]
|
| 37 |
+
num_res_blocks: 2
|
| 38 |
+
channel_mult: [ 1, 2, 4, 4 ]
|
| 39 |
+
num_heads: 8
|
| 40 |
+
use_spatial_transformer: True
|
| 41 |
+
transformer_depth: 1
|
| 42 |
+
context_dim: 768
|
| 43 |
+
use_checkpoint: True
|
| 44 |
+
legacy: False
|
| 45 |
+
|
| 46 |
+
first_stage_config:
|
| 47 |
+
target: ldm.models.autoencoder.AutoencoderKL
|
| 48 |
+
params:
|
| 49 |
+
embed_dim: 4
|
| 50 |
+
monitor: val/rec_loss
|
| 51 |
+
ddconfig:
|
| 52 |
+
double_z: true
|
| 53 |
+
z_channels: 4
|
| 54 |
+
resolution: 256
|
| 55 |
+
in_channels: 3
|
| 56 |
+
out_ch: 3
|
| 57 |
+
ch: 128
|
| 58 |
+
ch_mult:
|
| 59 |
+
- 1
|
| 60 |
+
- 2
|
| 61 |
+
- 4
|
| 62 |
+
- 4
|
| 63 |
+
num_res_blocks: 2
|
| 64 |
+
attn_resolutions: []
|
| 65 |
+
dropout: 0.0
|
| 66 |
+
lossconfig:
|
| 67 |
+
target: torch.nn.Identity
|
| 68 |
+
|
| 69 |
+
cond_stage_config:
|
| 70 |
+
target: ldm.modules.encoders.modules.FrozenCLIPEmbedder
|
configs/v1-inpainting-inference.yaml
ADDED
|
@@ -0,0 +1,70 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
model:
|
| 2 |
+
base_learning_rate: 7.5e-05
|
| 3 |
+
target: ldm.models.diffusion.ddpm.LatentInpaintDiffusion
|
| 4 |
+
params:
|
| 5 |
+
linear_start: 0.00085
|
| 6 |
+
linear_end: 0.0120
|
| 7 |
+
num_timesteps_cond: 1
|
| 8 |
+
log_every_t: 200
|
| 9 |
+
timesteps: 1000
|
| 10 |
+
first_stage_key: "jpg"
|
| 11 |
+
cond_stage_key: "txt"
|
| 12 |
+
image_size: 64
|
| 13 |
+
channels: 4
|
| 14 |
+
cond_stage_trainable: false # Note: different from the one we trained before
|
| 15 |
+
conditioning_key: hybrid # important
|
| 16 |
+
monitor: val/loss_simple_ema
|
| 17 |
+
scale_factor: 0.18215
|
| 18 |
+
finetune_keys: null
|
| 19 |
+
|
| 20 |
+
scheduler_config: # 10000 warmup steps
|
| 21 |
+
target: ldm.lr_scheduler.LambdaLinearScheduler
|
| 22 |
+
params:
|
| 23 |
+
warm_up_steps: [ 2500 ] # NOTE for resuming. use 10000 if starting from scratch
|
| 24 |
+
cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases
|
| 25 |
+
f_start: [ 1.e-6 ]
|
| 26 |
+
f_max: [ 1. ]
|
| 27 |
+
f_min: [ 1. ]
|
| 28 |
+
|
| 29 |
+
unet_config:
|
| 30 |
+
target: ldm.modules.diffusionmodules.openaimodel.UNetModel
|
| 31 |
+
params:
|
| 32 |
+
image_size: 32 # unused
|
| 33 |
+
in_channels: 9 # 4 data + 4 downscaled image + 1 mask
|
| 34 |
+
out_channels: 4
|
| 35 |
+
model_channels: 320
|
| 36 |
+
attention_resolutions: [ 4, 2, 1 ]
|
| 37 |
+
num_res_blocks: 2
|
| 38 |
+
channel_mult: [ 1, 2, 4, 4 ]
|
| 39 |
+
num_heads: 8
|
| 40 |
+
use_spatial_transformer: True
|
| 41 |
+
transformer_depth: 1
|
| 42 |
+
context_dim: 768
|
| 43 |
+
use_checkpoint: True
|
| 44 |
+
legacy: False
|
| 45 |
+
|
| 46 |
+
first_stage_config:
|
| 47 |
+
target: ldm.models.autoencoder.AutoencoderKL
|
| 48 |
+
params:
|
| 49 |
+
embed_dim: 4
|
| 50 |
+
monitor: val/rec_loss
|
| 51 |
+
ddconfig:
|
| 52 |
+
double_z: true
|
| 53 |
+
z_channels: 4
|
| 54 |
+
resolution: 256
|
| 55 |
+
in_channels: 3
|
| 56 |
+
out_ch: 3
|
| 57 |
+
ch: 128
|
| 58 |
+
ch_mult:
|
| 59 |
+
- 1
|
| 60 |
+
- 2
|
| 61 |
+
- 4
|
| 62 |
+
- 4
|
| 63 |
+
num_res_blocks: 2
|
| 64 |
+
attn_resolutions: []
|
| 65 |
+
dropout: 0.0
|
| 66 |
+
lossconfig:
|
| 67 |
+
target: torch.nn.Identity
|
| 68 |
+
|
| 69 |
+
cond_stage_config:
|
| 70 |
+
target: ldm.modules.encoders.modules.FrozenCLIPEmbedder
|
environment-wsl2.yaml
ADDED
|
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
name: automatic
|
| 2 |
+
channels:
|
| 3 |
+
- pytorch
|
| 4 |
+
- defaults
|
| 5 |
+
dependencies:
|
| 6 |
+
- python=3.10
|
| 7 |
+
- pip=23.0
|
| 8 |
+
- cudatoolkit=11.8
|
| 9 |
+
- pytorch=2.0
|
| 10 |
+
- torchvision=0.15
|
| 11 |
+
- numpy=1.23
|
extensions-builtin/LDSR/__pycache__/ldsr_model_arch.cpython-310.pyc
ADDED
|
Binary file (6.68 kB). View file
|
|
|
extensions-builtin/LDSR/__pycache__/preload.cpython-310.pyc
ADDED
|
Binary file (477 Bytes). View file
|
|
|
extensions-builtin/LDSR/__pycache__/sd_hijack_autoencoder.cpython-310.pyc
ADDED
|
Binary file (8.91 kB). View file
|
|
|
extensions-builtin/LDSR/__pycache__/sd_hijack_ddpm_v1.cpython-310.pyc
ADDED
|
Binary file (42.4 kB). View file
|
|
|
extensions-builtin/LDSR/__pycache__/vqvae_quantize.cpython-310.pyc
ADDED
|
Binary file (3.63 kB). View file
|
|
|
extensions-builtin/LDSR/ldsr_model_arch.py
ADDED
|
@@ -0,0 +1,250 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import gc
|
| 3 |
+
import time
|
| 4 |
+
|
| 5 |
+
import numpy as np
|
| 6 |
+
import torch
|
| 7 |
+
import torchvision
|
| 8 |
+
from PIL import Image
|
| 9 |
+
from einops import rearrange, repeat
|
| 10 |
+
from omegaconf import OmegaConf
|
| 11 |
+
import safetensors.torch
|
| 12 |
+
|
| 13 |
+
from ldm.models.diffusion.ddim import DDIMSampler
|
| 14 |
+
from ldm.util import instantiate_from_config, ismap
|
| 15 |
+
from modules import shared, sd_hijack, devices
|
| 16 |
+
|
| 17 |
+
cached_ldsr_model: torch.nn.Module = None
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
# Create LDSR Class
|
| 21 |
+
class LDSR:
|
| 22 |
+
def load_model_from_config(self, half_attention):
|
| 23 |
+
global cached_ldsr_model
|
| 24 |
+
|
| 25 |
+
if shared.opts.ldsr_cached and cached_ldsr_model is not None:
|
| 26 |
+
print("Loading model from cache")
|
| 27 |
+
model: torch.nn.Module = cached_ldsr_model
|
| 28 |
+
else:
|
| 29 |
+
print(f"Loading model from {self.modelPath}")
|
| 30 |
+
_, extension = os.path.splitext(self.modelPath)
|
| 31 |
+
if extension.lower() == ".safetensors":
|
| 32 |
+
pl_sd = safetensors.torch.load_file(self.modelPath, device="cpu")
|
| 33 |
+
else:
|
| 34 |
+
pl_sd = torch.load(self.modelPath, map_location="cpu")
|
| 35 |
+
sd = pl_sd["state_dict"] if "state_dict" in pl_sd else pl_sd
|
| 36 |
+
config = OmegaConf.load(self.yamlPath)
|
| 37 |
+
config.model.target = "ldm.models.diffusion.ddpm.LatentDiffusionV1"
|
| 38 |
+
model: torch.nn.Module = instantiate_from_config(config.model)
|
| 39 |
+
model.load_state_dict(sd, strict=False)
|
| 40 |
+
model = model.to(shared.device)
|
| 41 |
+
if half_attention:
|
| 42 |
+
model = model.half()
|
| 43 |
+
if shared.cmd_opts.opt_channelslast:
|
| 44 |
+
model = model.to(memory_format=torch.channels_last)
|
| 45 |
+
|
| 46 |
+
sd_hijack.model_hijack.hijack(model) # apply optimization
|
| 47 |
+
model.eval()
|
| 48 |
+
|
| 49 |
+
if shared.opts.ldsr_cached:
|
| 50 |
+
cached_ldsr_model = model
|
| 51 |
+
|
| 52 |
+
return {"model": model}
|
| 53 |
+
|
| 54 |
+
def __init__(self, model_path, yaml_path):
|
| 55 |
+
self.modelPath = model_path
|
| 56 |
+
self.yamlPath = yaml_path
|
| 57 |
+
|
| 58 |
+
@staticmethod
|
| 59 |
+
def run(model, selected_path, custom_steps, eta):
|
| 60 |
+
example = get_cond(selected_path)
|
| 61 |
+
|
| 62 |
+
n_runs = 1
|
| 63 |
+
guider = None
|
| 64 |
+
ckwargs = None
|
| 65 |
+
ddim_use_x0_pred = False
|
| 66 |
+
temperature = 1.
|
| 67 |
+
eta = eta
|
| 68 |
+
custom_shape = None
|
| 69 |
+
|
| 70 |
+
height, width = example["image"].shape[1:3]
|
| 71 |
+
split_input = height >= 128 and width >= 128
|
| 72 |
+
|
| 73 |
+
if split_input:
|
| 74 |
+
ks = 128
|
| 75 |
+
stride = 64
|
| 76 |
+
vqf = 4 #
|
| 77 |
+
model.split_input_params = {"ks": (ks, ks), "stride": (stride, stride),
|
| 78 |
+
"vqf": vqf,
|
| 79 |
+
"patch_distributed_vq": True,
|
| 80 |
+
"tie_braker": False,
|
| 81 |
+
"clip_max_weight": 0.5,
|
| 82 |
+
"clip_min_weight": 0.01,
|
| 83 |
+
"clip_max_tie_weight": 0.5,
|
| 84 |
+
"clip_min_tie_weight": 0.01}
|
| 85 |
+
else:
|
| 86 |
+
if hasattr(model, "split_input_params"):
|
| 87 |
+
delattr(model, "split_input_params")
|
| 88 |
+
|
| 89 |
+
x_t = None
|
| 90 |
+
logs = None
|
| 91 |
+
for _ in range(n_runs):
|
| 92 |
+
if custom_shape is not None:
|
| 93 |
+
x_t = torch.randn(1, custom_shape[1], custom_shape[2], custom_shape[3]).to(model.device)
|
| 94 |
+
x_t = repeat(x_t, '1 c h w -> b c h w', b=custom_shape[0])
|
| 95 |
+
|
| 96 |
+
logs = make_convolutional_sample(example, model,
|
| 97 |
+
custom_steps=custom_steps,
|
| 98 |
+
eta=eta, quantize_x0=False,
|
| 99 |
+
custom_shape=custom_shape,
|
| 100 |
+
temperature=temperature, noise_dropout=0.,
|
| 101 |
+
corrector=guider, corrector_kwargs=ckwargs, x_T=x_t,
|
| 102 |
+
ddim_use_x0_pred=ddim_use_x0_pred
|
| 103 |
+
)
|
| 104 |
+
return logs
|
| 105 |
+
|
| 106 |
+
def super_resolution(self, image, steps=100, target_scale=2, half_attention=False):
|
| 107 |
+
model = self.load_model_from_config(half_attention)
|
| 108 |
+
|
| 109 |
+
# Run settings
|
| 110 |
+
diffusion_steps = int(steps)
|
| 111 |
+
eta = 1.0
|
| 112 |
+
|
| 113 |
+
|
| 114 |
+
gc.collect()
|
| 115 |
+
devices.torch_gc()
|
| 116 |
+
|
| 117 |
+
im_og = image
|
| 118 |
+
width_og, height_og = im_og.size
|
| 119 |
+
# If we can adjust the max upscale size, then the 4 below should be our variable
|
| 120 |
+
down_sample_rate = target_scale / 4
|
| 121 |
+
wd = width_og * down_sample_rate
|
| 122 |
+
hd = height_og * down_sample_rate
|
| 123 |
+
width_downsampled_pre = int(np.ceil(wd))
|
| 124 |
+
height_downsampled_pre = int(np.ceil(hd))
|
| 125 |
+
|
| 126 |
+
if down_sample_rate != 1:
|
| 127 |
+
print(
|
| 128 |
+
f'Downsampling from [{width_og}, {height_og}] to [{width_downsampled_pre}, {height_downsampled_pre}]')
|
| 129 |
+
im_og = im_og.resize((width_downsampled_pre, height_downsampled_pre), Image.LANCZOS)
|
| 130 |
+
else:
|
| 131 |
+
print(f"Down sample rate is 1 from {target_scale} / 4 (Not downsampling)")
|
| 132 |
+
|
| 133 |
+
# pad width and height to multiples of 64, pads with the edge values of image to avoid artifacts
|
| 134 |
+
pad_w, pad_h = np.max(((2, 2), np.ceil(np.array(im_og.size) / 64).astype(int)), axis=0) * 64 - im_og.size
|
| 135 |
+
im_padded = Image.fromarray(np.pad(np.array(im_og), ((0, pad_h), (0, pad_w), (0, 0)), mode='edge'))
|
| 136 |
+
|
| 137 |
+
logs = self.run(model["model"], im_padded, diffusion_steps, eta)
|
| 138 |
+
|
| 139 |
+
sample = logs["sample"]
|
| 140 |
+
sample = sample.detach().cpu()
|
| 141 |
+
sample = torch.clamp(sample, -1., 1.)
|
| 142 |
+
sample = (sample + 1.) / 2. * 255
|
| 143 |
+
sample = sample.numpy().astype(np.uint8)
|
| 144 |
+
sample = np.transpose(sample, (0, 2, 3, 1))
|
| 145 |
+
a = Image.fromarray(sample[0])
|
| 146 |
+
|
| 147 |
+
# remove padding
|
| 148 |
+
a = a.crop((0, 0) + tuple(np.array(im_og.size) * 4))
|
| 149 |
+
|
| 150 |
+
del model
|
| 151 |
+
gc.collect()
|
| 152 |
+
devices.torch_gc()
|
| 153 |
+
|
| 154 |
+
return a
|
| 155 |
+
|
| 156 |
+
|
| 157 |
+
def get_cond(selected_path):
|
| 158 |
+
example = {}
|
| 159 |
+
up_f = 4
|
| 160 |
+
c = selected_path.convert('RGB')
|
| 161 |
+
c = torch.unsqueeze(torchvision.transforms.ToTensor()(c), 0)
|
| 162 |
+
c_up = torchvision.transforms.functional.resize(c, size=[up_f * c.shape[2], up_f * c.shape[3]],
|
| 163 |
+
antialias=True)
|
| 164 |
+
c_up = rearrange(c_up, '1 c h w -> 1 h w c')
|
| 165 |
+
c = rearrange(c, '1 c h w -> 1 h w c')
|
| 166 |
+
c = 2. * c - 1.
|
| 167 |
+
|
| 168 |
+
c = c.to(shared.device)
|
| 169 |
+
example["LR_image"] = c
|
| 170 |
+
example["image"] = c_up
|
| 171 |
+
|
| 172 |
+
return example
|
| 173 |
+
|
| 174 |
+
|
| 175 |
+
@torch.no_grad()
|
| 176 |
+
def convsample_ddim(model, cond, steps, shape, eta=1.0, callback=None, normals_sequence=None,
|
| 177 |
+
mask=None, x0=None, quantize_x0=False, temperature=1., score_corrector=None,
|
| 178 |
+
corrector_kwargs=None, x_t=None
|
| 179 |
+
):
|
| 180 |
+
ddim = DDIMSampler(model)
|
| 181 |
+
bs = shape[0]
|
| 182 |
+
shape = shape[1:]
|
| 183 |
+
print(f"Sampling with eta = {eta}; steps: {steps}")
|
| 184 |
+
samples, intermediates = ddim.sample(steps, batch_size=bs, shape=shape, conditioning=cond, callback=callback,
|
| 185 |
+
normals_sequence=normals_sequence, quantize_x0=quantize_x0, eta=eta,
|
| 186 |
+
mask=mask, x0=x0, temperature=temperature, verbose=False,
|
| 187 |
+
score_corrector=score_corrector,
|
| 188 |
+
corrector_kwargs=corrector_kwargs, x_t=x_t)
|
| 189 |
+
|
| 190 |
+
return samples, intermediates
|
| 191 |
+
|
| 192 |
+
|
| 193 |
+
@torch.no_grad()
|
| 194 |
+
def make_convolutional_sample(batch, model, custom_steps=None, eta=1.0, quantize_x0=False, custom_shape=None, temperature=1., noise_dropout=0., corrector=None,
|
| 195 |
+
corrector_kwargs=None, x_T=None, ddim_use_x0_pred=False):
|
| 196 |
+
log = {}
|
| 197 |
+
|
| 198 |
+
z, c, x, xrec, xc = model.get_input(batch, model.first_stage_key,
|
| 199 |
+
return_first_stage_outputs=True,
|
| 200 |
+
force_c_encode=not (hasattr(model, 'split_input_params')
|
| 201 |
+
and model.cond_stage_key == 'coordinates_bbox'),
|
| 202 |
+
return_original_cond=True)
|
| 203 |
+
|
| 204 |
+
if custom_shape is not None:
|
| 205 |
+
z = torch.randn(custom_shape)
|
| 206 |
+
print(f"Generating {custom_shape[0]} samples of shape {custom_shape[1:]}")
|
| 207 |
+
|
| 208 |
+
z0 = None
|
| 209 |
+
|
| 210 |
+
log["input"] = x
|
| 211 |
+
log["reconstruction"] = xrec
|
| 212 |
+
|
| 213 |
+
if ismap(xc):
|
| 214 |
+
log["original_conditioning"] = model.to_rgb(xc)
|
| 215 |
+
if hasattr(model, 'cond_stage_key'):
|
| 216 |
+
log[model.cond_stage_key] = model.to_rgb(xc)
|
| 217 |
+
|
| 218 |
+
else:
|
| 219 |
+
log["original_conditioning"] = xc if xc is not None else torch.zeros_like(x)
|
| 220 |
+
if model.cond_stage_model:
|
| 221 |
+
log[model.cond_stage_key] = xc if xc is not None else torch.zeros_like(x)
|
| 222 |
+
if model.cond_stage_key == 'class_label':
|
| 223 |
+
log[model.cond_stage_key] = xc[model.cond_stage_key]
|
| 224 |
+
|
| 225 |
+
with model.ema_scope("Plotting"):
|
| 226 |
+
t0 = time.time()
|
| 227 |
+
|
| 228 |
+
sample, intermediates = convsample_ddim(model, c, steps=custom_steps, shape=z.shape,
|
| 229 |
+
eta=eta,
|
| 230 |
+
quantize_x0=quantize_x0, mask=None, x0=z0,
|
| 231 |
+
temperature=temperature, score_corrector=corrector, corrector_kwargs=corrector_kwargs,
|
| 232 |
+
x_t=x_T)
|
| 233 |
+
t1 = time.time()
|
| 234 |
+
|
| 235 |
+
if ddim_use_x0_pred:
|
| 236 |
+
sample = intermediates['pred_x0'][-1]
|
| 237 |
+
|
| 238 |
+
x_sample = model.decode_first_stage(sample)
|
| 239 |
+
|
| 240 |
+
try:
|
| 241 |
+
x_sample_noquant = model.decode_first_stage(sample, force_not_quantize=True)
|
| 242 |
+
log["sample_noquant"] = x_sample_noquant
|
| 243 |
+
log["sample_diff"] = torch.abs(x_sample_noquant - x_sample)
|
| 244 |
+
except Exception:
|
| 245 |
+
pass
|
| 246 |
+
|
| 247 |
+
log["sample"] = x_sample
|
| 248 |
+
log["time"] = t1 - t0
|
| 249 |
+
|
| 250 |
+
return log
|
extensions-builtin/LDSR/preload.py
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
from modules import paths
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
def preload(parser):
|
| 6 |
+
parser.add_argument("--ldsr-models-path", type=str, help="Path to directory with LDSR model file(s).", default=os.path.join(paths.models_path, 'LDSR'))
|
extensions-builtin/LDSR/scripts/__pycache__/ldsr_model.cpython-310.pyc
ADDED
|
Binary file (3.18 kB). View file
|
|
|
extensions-builtin/LDSR/scripts/ldsr_model.py
ADDED
|
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
|
| 3 |
+
from modules.modelloader import load_file_from_url
|
| 4 |
+
from modules.upscaler import Upscaler, UpscalerData
|
| 5 |
+
from ldsr_model_arch import LDSR
|
| 6 |
+
from modules import shared, script_callbacks, errors
|
| 7 |
+
import sd_hijack_autoencoder # noqa: F401
|
| 8 |
+
import sd_hijack_ddpm_v1 # noqa: F401
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
class UpscalerLDSR(Upscaler):
|
| 12 |
+
def __init__(self, user_path):
|
| 13 |
+
self.name = "LDSR"
|
| 14 |
+
self.user_path = user_path
|
| 15 |
+
self.model_url = "https://heibox.uni-heidelberg.de/f/578df07c8fc04ffbadf3/?dl=1"
|
| 16 |
+
self.yaml_url = "https://heibox.uni-heidelberg.de/f/31a76b13ea27482981b4/?dl=1"
|
| 17 |
+
super().__init__()
|
| 18 |
+
scaler_data = UpscalerData("LDSR", None, self)
|
| 19 |
+
self.scalers = [scaler_data]
|
| 20 |
+
|
| 21 |
+
def load_model(self, path: str):
|
| 22 |
+
# Remove incorrect project.yaml file if too big
|
| 23 |
+
yaml_path = os.path.join(self.model_path, "project.yaml")
|
| 24 |
+
old_model_path = os.path.join(self.model_path, "model.pth")
|
| 25 |
+
new_model_path = os.path.join(self.model_path, "model.ckpt")
|
| 26 |
+
|
| 27 |
+
local_model_paths = self.find_models(ext_filter=[".ckpt", ".safetensors"])
|
| 28 |
+
local_ckpt_path = next(iter([local_model for local_model in local_model_paths if local_model.endswith("model.ckpt")]), None)
|
| 29 |
+
local_safetensors_path = next(iter([local_model for local_model in local_model_paths if local_model.endswith("model.safetensors")]), None)
|
| 30 |
+
local_yaml_path = next(iter([local_model for local_model in local_model_paths if local_model.endswith("project.yaml")]), None)
|
| 31 |
+
|
| 32 |
+
if os.path.exists(yaml_path):
|
| 33 |
+
statinfo = os.stat(yaml_path)
|
| 34 |
+
if statinfo.st_size >= 10485760:
|
| 35 |
+
print("Removing invalid LDSR YAML file.")
|
| 36 |
+
os.remove(yaml_path)
|
| 37 |
+
|
| 38 |
+
if os.path.exists(old_model_path):
|
| 39 |
+
print("Renaming model from model.pth to model.ckpt")
|
| 40 |
+
os.rename(old_model_path, new_model_path)
|
| 41 |
+
|
| 42 |
+
if local_safetensors_path is not None and os.path.exists(local_safetensors_path):
|
| 43 |
+
model = local_safetensors_path
|
| 44 |
+
else:
|
| 45 |
+
model = local_ckpt_path or load_file_from_url(self.model_url, model_dir=self.model_download_path, file_name="model.ckpt")
|
| 46 |
+
|
| 47 |
+
yaml = local_yaml_path or load_file_from_url(self.yaml_url, model_dir=self.model_download_path, file_name="project.yaml")
|
| 48 |
+
|
| 49 |
+
return LDSR(model, yaml)
|
| 50 |
+
|
| 51 |
+
def do_upscale(self, img, path):
|
| 52 |
+
try:
|
| 53 |
+
ldsr = self.load_model(path)
|
| 54 |
+
except Exception:
|
| 55 |
+
errors.report(f"Failed loading LDSR model {path}", exc_info=True)
|
| 56 |
+
return img
|
| 57 |
+
ddim_steps = shared.opts.ldsr_steps
|
| 58 |
+
return ldsr.super_resolution(img, ddim_steps, self.scale)
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
def on_ui_settings():
|
| 62 |
+
import gradio as gr
|
| 63 |
+
|
| 64 |
+
shared.opts.add_option("ldsr_steps", shared.OptionInfo(100, "LDSR processing steps. Lower = faster", gr.Slider, {"minimum": 1, "maximum": 200, "step": 1}, section=('upscaling', "Upscaling")))
|
| 65 |
+
shared.opts.add_option("ldsr_cached", shared.OptionInfo(False, "Cache LDSR model in memory", gr.Checkbox, {"interactive": True}, section=('upscaling', "Upscaling")))
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
script_callbacks.on_ui_settings(on_ui_settings)
|
extensions-builtin/LDSR/sd_hijack_autoencoder.py
ADDED
|
@@ -0,0 +1,293 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# The content of this file comes from the ldm/models/autoencoder.py file of the compvis/stable-diffusion repo
|
| 2 |
+
# The VQModel & VQModelInterface were subsequently removed from ldm/models/autoencoder.py when we moved to the stability-ai/stablediffusion repo
|
| 3 |
+
# As the LDSR upscaler relies on VQModel & VQModelInterface, the hijack aims to put them back into the ldm.models.autoencoder
|
| 4 |
+
import numpy as np
|
| 5 |
+
import torch
|
| 6 |
+
import pytorch_lightning as pl
|
| 7 |
+
import torch.nn.functional as F
|
| 8 |
+
from contextlib import contextmanager
|
| 9 |
+
|
| 10 |
+
from torch.optim.lr_scheduler import LambdaLR
|
| 11 |
+
|
| 12 |
+
from ldm.modules.ema import LitEma
|
| 13 |
+
from vqvae_quantize import VectorQuantizer2 as VectorQuantizer
|
| 14 |
+
from ldm.modules.diffusionmodules.model import Encoder, Decoder
|
| 15 |
+
from ldm.util import instantiate_from_config
|
| 16 |
+
|
| 17 |
+
import ldm.models.autoencoder
|
| 18 |
+
from packaging import version
|
| 19 |
+
|
| 20 |
+
class VQModel(pl.LightningModule):
|
| 21 |
+
def __init__(self,
|
| 22 |
+
ddconfig,
|
| 23 |
+
lossconfig,
|
| 24 |
+
n_embed,
|
| 25 |
+
embed_dim,
|
| 26 |
+
ckpt_path=None,
|
| 27 |
+
ignore_keys=None,
|
| 28 |
+
image_key="image",
|
| 29 |
+
colorize_nlabels=None,
|
| 30 |
+
monitor=None,
|
| 31 |
+
batch_resize_range=None,
|
| 32 |
+
scheduler_config=None,
|
| 33 |
+
lr_g_factor=1.0,
|
| 34 |
+
remap=None,
|
| 35 |
+
sane_index_shape=False, # tell vector quantizer to return indices as bhw
|
| 36 |
+
use_ema=False
|
| 37 |
+
):
|
| 38 |
+
super().__init__()
|
| 39 |
+
self.embed_dim = embed_dim
|
| 40 |
+
self.n_embed = n_embed
|
| 41 |
+
self.image_key = image_key
|
| 42 |
+
self.encoder = Encoder(**ddconfig)
|
| 43 |
+
self.decoder = Decoder(**ddconfig)
|
| 44 |
+
self.loss = instantiate_from_config(lossconfig)
|
| 45 |
+
self.quantize = VectorQuantizer(n_embed, embed_dim, beta=0.25,
|
| 46 |
+
remap=remap,
|
| 47 |
+
sane_index_shape=sane_index_shape)
|
| 48 |
+
self.quant_conv = torch.nn.Conv2d(ddconfig["z_channels"], embed_dim, 1)
|
| 49 |
+
self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig["z_channels"], 1)
|
| 50 |
+
if colorize_nlabels is not None:
|
| 51 |
+
assert type(colorize_nlabels)==int
|
| 52 |
+
self.register_buffer("colorize", torch.randn(3, colorize_nlabels, 1, 1))
|
| 53 |
+
if monitor is not None:
|
| 54 |
+
self.monitor = monitor
|
| 55 |
+
self.batch_resize_range = batch_resize_range
|
| 56 |
+
if self.batch_resize_range is not None:
|
| 57 |
+
print(f"{self.__class__.__name__}: Using per-batch resizing in range {batch_resize_range}.")
|
| 58 |
+
|
| 59 |
+
self.use_ema = use_ema
|
| 60 |
+
if self.use_ema:
|
| 61 |
+
self.model_ema = LitEma(self)
|
| 62 |
+
print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.")
|
| 63 |
+
|
| 64 |
+
if ckpt_path is not None:
|
| 65 |
+
self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys or [])
|
| 66 |
+
self.scheduler_config = scheduler_config
|
| 67 |
+
self.lr_g_factor = lr_g_factor
|
| 68 |
+
|
| 69 |
+
@contextmanager
|
| 70 |
+
def ema_scope(self, context=None):
|
| 71 |
+
if self.use_ema:
|
| 72 |
+
self.model_ema.store(self.parameters())
|
| 73 |
+
self.model_ema.copy_to(self)
|
| 74 |
+
if context is not None:
|
| 75 |
+
print(f"{context}: Switched to EMA weights")
|
| 76 |
+
try:
|
| 77 |
+
yield None
|
| 78 |
+
finally:
|
| 79 |
+
if self.use_ema:
|
| 80 |
+
self.model_ema.restore(self.parameters())
|
| 81 |
+
if context is not None:
|
| 82 |
+
print(f"{context}: Restored training weights")
|
| 83 |
+
|
| 84 |
+
def init_from_ckpt(self, path, ignore_keys=None):
|
| 85 |
+
sd = torch.load(path, map_location="cpu")["state_dict"]
|
| 86 |
+
keys = list(sd.keys())
|
| 87 |
+
for k in keys:
|
| 88 |
+
for ik in ignore_keys or []:
|
| 89 |
+
if k.startswith(ik):
|
| 90 |
+
print("Deleting key {} from state_dict.".format(k))
|
| 91 |
+
del sd[k]
|
| 92 |
+
missing, unexpected = self.load_state_dict(sd, strict=False)
|
| 93 |
+
print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys")
|
| 94 |
+
if missing:
|
| 95 |
+
print(f"Missing Keys: {missing}")
|
| 96 |
+
if unexpected:
|
| 97 |
+
print(f"Unexpected Keys: {unexpected}")
|
| 98 |
+
|
| 99 |
+
def on_train_batch_end(self, *args, **kwargs):
|
| 100 |
+
if self.use_ema:
|
| 101 |
+
self.model_ema(self)
|
| 102 |
+
|
| 103 |
+
def encode(self, x):
|
| 104 |
+
h = self.encoder(x)
|
| 105 |
+
h = self.quant_conv(h)
|
| 106 |
+
quant, emb_loss, info = self.quantize(h)
|
| 107 |
+
return quant, emb_loss, info
|
| 108 |
+
|
| 109 |
+
def encode_to_prequant(self, x):
|
| 110 |
+
h = self.encoder(x)
|
| 111 |
+
h = self.quant_conv(h)
|
| 112 |
+
return h
|
| 113 |
+
|
| 114 |
+
def decode(self, quant):
|
| 115 |
+
quant = self.post_quant_conv(quant)
|
| 116 |
+
dec = self.decoder(quant)
|
| 117 |
+
return dec
|
| 118 |
+
|
| 119 |
+
def decode_code(self, code_b):
|
| 120 |
+
quant_b = self.quantize.embed_code(code_b)
|
| 121 |
+
dec = self.decode(quant_b)
|
| 122 |
+
return dec
|
| 123 |
+
|
| 124 |
+
def forward(self, input, return_pred_indices=False):
|
| 125 |
+
quant, diff, (_,_,ind) = self.encode(input)
|
| 126 |
+
dec = self.decode(quant)
|
| 127 |
+
if return_pred_indices:
|
| 128 |
+
return dec, diff, ind
|
| 129 |
+
return dec, diff
|
| 130 |
+
|
| 131 |
+
def get_input(self, batch, k):
|
| 132 |
+
x = batch[k]
|
| 133 |
+
if len(x.shape) == 3:
|
| 134 |
+
x = x[..., None]
|
| 135 |
+
x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format).float()
|
| 136 |
+
if self.batch_resize_range is not None:
|
| 137 |
+
lower_size = self.batch_resize_range[0]
|
| 138 |
+
upper_size = self.batch_resize_range[1]
|
| 139 |
+
if self.global_step <= 4:
|
| 140 |
+
# do the first few batches with max size to avoid later oom
|
| 141 |
+
new_resize = upper_size
|
| 142 |
+
else:
|
| 143 |
+
new_resize = np.random.choice(np.arange(lower_size, upper_size+16, 16))
|
| 144 |
+
if new_resize != x.shape[2]:
|
| 145 |
+
x = F.interpolate(x, size=new_resize, mode="bicubic")
|
| 146 |
+
x = x.detach()
|
| 147 |
+
return x
|
| 148 |
+
|
| 149 |
+
def training_step(self, batch, batch_idx, optimizer_idx):
|
| 150 |
+
# https://github.com/pytorch/pytorch/issues/37142
|
| 151 |
+
# try not to fool the heuristics
|
| 152 |
+
x = self.get_input(batch, self.image_key)
|
| 153 |
+
xrec, qloss, ind = self(x, return_pred_indices=True)
|
| 154 |
+
|
| 155 |
+
if optimizer_idx == 0:
|
| 156 |
+
# autoencode
|
| 157 |
+
aeloss, log_dict_ae = self.loss(qloss, x, xrec, optimizer_idx, self.global_step,
|
| 158 |
+
last_layer=self.get_last_layer(), split="train",
|
| 159 |
+
predicted_indices=ind)
|
| 160 |
+
|
| 161 |
+
self.log_dict(log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=True)
|
| 162 |
+
return aeloss
|
| 163 |
+
|
| 164 |
+
if optimizer_idx == 1:
|
| 165 |
+
# discriminator
|
| 166 |
+
discloss, log_dict_disc = self.loss(qloss, x, xrec, optimizer_idx, self.global_step,
|
| 167 |
+
last_layer=self.get_last_layer(), split="train")
|
| 168 |
+
self.log_dict(log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=True)
|
| 169 |
+
return discloss
|
| 170 |
+
|
| 171 |
+
def validation_step(self, batch, batch_idx):
|
| 172 |
+
log_dict = self._validation_step(batch, batch_idx)
|
| 173 |
+
with self.ema_scope():
|
| 174 |
+
self._validation_step(batch, batch_idx, suffix="_ema")
|
| 175 |
+
return log_dict
|
| 176 |
+
|
| 177 |
+
def _validation_step(self, batch, batch_idx, suffix=""):
|
| 178 |
+
x = self.get_input(batch, self.image_key)
|
| 179 |
+
xrec, qloss, ind = self(x, return_pred_indices=True)
|
| 180 |
+
aeloss, log_dict_ae = self.loss(qloss, x, xrec, 0,
|
| 181 |
+
self.global_step,
|
| 182 |
+
last_layer=self.get_last_layer(),
|
| 183 |
+
split="val"+suffix,
|
| 184 |
+
predicted_indices=ind
|
| 185 |
+
)
|
| 186 |
+
|
| 187 |
+
discloss, log_dict_disc = self.loss(qloss, x, xrec, 1,
|
| 188 |
+
self.global_step,
|
| 189 |
+
last_layer=self.get_last_layer(),
|
| 190 |
+
split="val"+suffix,
|
| 191 |
+
predicted_indices=ind
|
| 192 |
+
)
|
| 193 |
+
rec_loss = log_dict_ae[f"val{suffix}/rec_loss"]
|
| 194 |
+
self.log(f"val{suffix}/rec_loss", rec_loss,
|
| 195 |
+
prog_bar=True, logger=True, on_step=False, on_epoch=True, sync_dist=True)
|
| 196 |
+
self.log(f"val{suffix}/aeloss", aeloss,
|
| 197 |
+
prog_bar=True, logger=True, on_step=False, on_epoch=True, sync_dist=True)
|
| 198 |
+
if version.parse(pl.__version__) >= version.parse('1.4.0'):
|
| 199 |
+
del log_dict_ae[f"val{suffix}/rec_loss"]
|
| 200 |
+
self.log_dict(log_dict_ae)
|
| 201 |
+
self.log_dict(log_dict_disc)
|
| 202 |
+
return self.log_dict
|
| 203 |
+
|
| 204 |
+
def configure_optimizers(self):
|
| 205 |
+
lr_d = self.learning_rate
|
| 206 |
+
lr_g = self.lr_g_factor*self.learning_rate
|
| 207 |
+
print("lr_d", lr_d)
|
| 208 |
+
print("lr_g", lr_g)
|
| 209 |
+
opt_ae = torch.optim.Adam(list(self.encoder.parameters())+
|
| 210 |
+
list(self.decoder.parameters())+
|
| 211 |
+
list(self.quantize.parameters())+
|
| 212 |
+
list(self.quant_conv.parameters())+
|
| 213 |
+
list(self.post_quant_conv.parameters()),
|
| 214 |
+
lr=lr_g, betas=(0.5, 0.9))
|
| 215 |
+
opt_disc = torch.optim.Adam(self.loss.discriminator.parameters(),
|
| 216 |
+
lr=lr_d, betas=(0.5, 0.9))
|
| 217 |
+
|
| 218 |
+
if self.scheduler_config is not None:
|
| 219 |
+
scheduler = instantiate_from_config(self.scheduler_config)
|
| 220 |
+
|
| 221 |
+
print("Setting up LambdaLR scheduler...")
|
| 222 |
+
scheduler = [
|
| 223 |
+
{
|
| 224 |
+
'scheduler': LambdaLR(opt_ae, lr_lambda=scheduler.schedule),
|
| 225 |
+
'interval': 'step',
|
| 226 |
+
'frequency': 1
|
| 227 |
+
},
|
| 228 |
+
{
|
| 229 |
+
'scheduler': LambdaLR(opt_disc, lr_lambda=scheduler.schedule),
|
| 230 |
+
'interval': 'step',
|
| 231 |
+
'frequency': 1
|
| 232 |
+
},
|
| 233 |
+
]
|
| 234 |
+
return [opt_ae, opt_disc], scheduler
|
| 235 |
+
return [opt_ae, opt_disc], []
|
| 236 |
+
|
| 237 |
+
def get_last_layer(self):
|
| 238 |
+
return self.decoder.conv_out.weight
|
| 239 |
+
|
| 240 |
+
def log_images(self, batch, only_inputs=False, plot_ema=False, **kwargs):
|
| 241 |
+
log = {}
|
| 242 |
+
x = self.get_input(batch, self.image_key)
|
| 243 |
+
x = x.to(self.device)
|
| 244 |
+
if only_inputs:
|
| 245 |
+
log["inputs"] = x
|
| 246 |
+
return log
|
| 247 |
+
xrec, _ = self(x)
|
| 248 |
+
if x.shape[1] > 3:
|
| 249 |
+
# colorize with random projection
|
| 250 |
+
assert xrec.shape[1] > 3
|
| 251 |
+
x = self.to_rgb(x)
|
| 252 |
+
xrec = self.to_rgb(xrec)
|
| 253 |
+
log["inputs"] = x
|
| 254 |
+
log["reconstructions"] = xrec
|
| 255 |
+
if plot_ema:
|
| 256 |
+
with self.ema_scope():
|
| 257 |
+
xrec_ema, _ = self(x)
|
| 258 |
+
if x.shape[1] > 3:
|
| 259 |
+
xrec_ema = self.to_rgb(xrec_ema)
|
| 260 |
+
log["reconstructions_ema"] = xrec_ema
|
| 261 |
+
return log
|
| 262 |
+
|
| 263 |
+
def to_rgb(self, x):
|
| 264 |
+
assert self.image_key == "segmentation"
|
| 265 |
+
if not hasattr(self, "colorize"):
|
| 266 |
+
self.register_buffer("colorize", torch.randn(3, x.shape[1], 1, 1).to(x))
|
| 267 |
+
x = F.conv2d(x, weight=self.colorize)
|
| 268 |
+
x = 2.*(x-x.min())/(x.max()-x.min()) - 1.
|
| 269 |
+
return x
|
| 270 |
+
|
| 271 |
+
|
| 272 |
+
class VQModelInterface(VQModel):
|
| 273 |
+
def __init__(self, embed_dim, *args, **kwargs):
|
| 274 |
+
super().__init__(*args, embed_dim=embed_dim, **kwargs)
|
| 275 |
+
self.embed_dim = embed_dim
|
| 276 |
+
|
| 277 |
+
def encode(self, x):
|
| 278 |
+
h = self.encoder(x)
|
| 279 |
+
h = self.quant_conv(h)
|
| 280 |
+
return h
|
| 281 |
+
|
| 282 |
+
def decode(self, h, force_not_quantize=False):
|
| 283 |
+
# also go through quantization layer
|
| 284 |
+
if not force_not_quantize:
|
| 285 |
+
quant, emb_loss, info = self.quantize(h)
|
| 286 |
+
else:
|
| 287 |
+
quant = h
|
| 288 |
+
quant = self.post_quant_conv(quant)
|
| 289 |
+
dec = self.decoder(quant)
|
| 290 |
+
return dec
|
| 291 |
+
|
| 292 |
+
ldm.models.autoencoder.VQModel = VQModel
|
| 293 |
+
ldm.models.autoencoder.VQModelInterface = VQModelInterface
|
extensions-builtin/LDSR/sd_hijack_ddpm_v1.py
ADDED
|
@@ -0,0 +1,1443 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# This script is copied from the compvis/stable-diffusion repo (aka the SD V1 repo)
|
| 2 |
+
# Original filename: ldm/models/diffusion/ddpm.py
|
| 3 |
+
# The purpose to reinstate the old DDPM logic which works with VQ, whereas the V2 one doesn't
|
| 4 |
+
# Some models such as LDSR require VQ to work correctly
|
| 5 |
+
# The classes are suffixed with "V1" and added back to the "ldm.models.diffusion.ddpm" module
|
| 6 |
+
|
| 7 |
+
import torch
|
| 8 |
+
import torch.nn as nn
|
| 9 |
+
import numpy as np
|
| 10 |
+
import pytorch_lightning as pl
|
| 11 |
+
from torch.optim.lr_scheduler import LambdaLR
|
| 12 |
+
from einops import rearrange, repeat
|
| 13 |
+
from contextlib import contextmanager
|
| 14 |
+
from functools import partial
|
| 15 |
+
from tqdm import tqdm
|
| 16 |
+
from torchvision.utils import make_grid
|
| 17 |
+
from pytorch_lightning.utilities.distributed import rank_zero_only
|
| 18 |
+
|
| 19 |
+
from ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config
|
| 20 |
+
from ldm.modules.ema import LitEma
|
| 21 |
+
from ldm.modules.distributions.distributions import normal_kl, DiagonalGaussianDistribution
|
| 22 |
+
from ldm.models.autoencoder import VQModelInterface, IdentityFirstStage, AutoencoderKL
|
| 23 |
+
from ldm.modules.diffusionmodules.util import make_beta_schedule, extract_into_tensor, noise_like
|
| 24 |
+
from ldm.models.diffusion.ddim import DDIMSampler
|
| 25 |
+
|
| 26 |
+
import ldm.models.diffusion.ddpm
|
| 27 |
+
|
| 28 |
+
__conditioning_keys__ = {'concat': 'c_concat',
|
| 29 |
+
'crossattn': 'c_crossattn',
|
| 30 |
+
'adm': 'y'}
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
def disabled_train(self, mode=True):
|
| 34 |
+
"""Overwrite model.train with this function to make sure train/eval mode
|
| 35 |
+
does not change anymore."""
|
| 36 |
+
return self
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
def uniform_on_device(r1, r2, shape, device):
|
| 40 |
+
return (r1 - r2) * torch.rand(*shape, device=device) + r2
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
class DDPMV1(pl.LightningModule):
|
| 44 |
+
# classic DDPM with Gaussian diffusion, in image space
|
| 45 |
+
def __init__(self,
|
| 46 |
+
unet_config,
|
| 47 |
+
timesteps=1000,
|
| 48 |
+
beta_schedule="linear",
|
| 49 |
+
loss_type="l2",
|
| 50 |
+
ckpt_path=None,
|
| 51 |
+
ignore_keys=None,
|
| 52 |
+
load_only_unet=False,
|
| 53 |
+
monitor="val/loss",
|
| 54 |
+
use_ema=True,
|
| 55 |
+
first_stage_key="image",
|
| 56 |
+
image_size=256,
|
| 57 |
+
channels=3,
|
| 58 |
+
log_every_t=100,
|
| 59 |
+
clip_denoised=True,
|
| 60 |
+
linear_start=1e-4,
|
| 61 |
+
linear_end=2e-2,
|
| 62 |
+
cosine_s=8e-3,
|
| 63 |
+
given_betas=None,
|
| 64 |
+
original_elbo_weight=0.,
|
| 65 |
+
v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta
|
| 66 |
+
l_simple_weight=1.,
|
| 67 |
+
conditioning_key=None,
|
| 68 |
+
parameterization="eps", # all assuming fixed variance schedules
|
| 69 |
+
scheduler_config=None,
|
| 70 |
+
use_positional_encodings=False,
|
| 71 |
+
learn_logvar=False,
|
| 72 |
+
logvar_init=0.,
|
| 73 |
+
):
|
| 74 |
+
super().__init__()
|
| 75 |
+
assert parameterization in ["eps", "x0"], 'currently only supporting "eps" and "x0"'
|
| 76 |
+
self.parameterization = parameterization
|
| 77 |
+
print(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode")
|
| 78 |
+
self.cond_stage_model = None
|
| 79 |
+
self.clip_denoised = clip_denoised
|
| 80 |
+
self.log_every_t = log_every_t
|
| 81 |
+
self.first_stage_key = first_stage_key
|
| 82 |
+
self.image_size = image_size # try conv?
|
| 83 |
+
self.channels = channels
|
| 84 |
+
self.use_positional_encodings = use_positional_encodings
|
| 85 |
+
self.model = DiffusionWrapperV1(unet_config, conditioning_key)
|
| 86 |
+
count_params(self.model, verbose=True)
|
| 87 |
+
self.use_ema = use_ema
|
| 88 |
+
if self.use_ema:
|
| 89 |
+
self.model_ema = LitEma(self.model)
|
| 90 |
+
print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.")
|
| 91 |
+
|
| 92 |
+
self.use_scheduler = scheduler_config is not None
|
| 93 |
+
if self.use_scheduler:
|
| 94 |
+
self.scheduler_config = scheduler_config
|
| 95 |
+
|
| 96 |
+
self.v_posterior = v_posterior
|
| 97 |
+
self.original_elbo_weight = original_elbo_weight
|
| 98 |
+
self.l_simple_weight = l_simple_weight
|
| 99 |
+
|
| 100 |
+
if monitor is not None:
|
| 101 |
+
self.monitor = monitor
|
| 102 |
+
if ckpt_path is not None:
|
| 103 |
+
self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys or [], only_model=load_only_unet)
|
| 104 |
+
|
| 105 |
+
self.register_schedule(given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps,
|
| 106 |
+
linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s)
|
| 107 |
+
|
| 108 |
+
self.loss_type = loss_type
|
| 109 |
+
|
| 110 |
+
self.learn_logvar = learn_logvar
|
| 111 |
+
self.logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,))
|
| 112 |
+
if self.learn_logvar:
|
| 113 |
+
self.logvar = nn.Parameter(self.logvar, requires_grad=True)
|
| 114 |
+
|
| 115 |
+
|
| 116 |
+
def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000,
|
| 117 |
+
linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):
|
| 118 |
+
if exists(given_betas):
|
| 119 |
+
betas = given_betas
|
| 120 |
+
else:
|
| 121 |
+
betas = make_beta_schedule(beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end,
|
| 122 |
+
cosine_s=cosine_s)
|
| 123 |
+
alphas = 1. - betas
|
| 124 |
+
alphas_cumprod = np.cumprod(alphas, axis=0)
|
| 125 |
+
alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1])
|
| 126 |
+
|
| 127 |
+
timesteps, = betas.shape
|
| 128 |
+
self.num_timesteps = int(timesteps)
|
| 129 |
+
self.linear_start = linear_start
|
| 130 |
+
self.linear_end = linear_end
|
| 131 |
+
assert alphas_cumprod.shape[0] == self.num_timesteps, 'alphas have to be defined for each timestep'
|
| 132 |
+
|
| 133 |
+
to_torch = partial(torch.tensor, dtype=torch.float32)
|
| 134 |
+
|
| 135 |
+
self.register_buffer('betas', to_torch(betas))
|
| 136 |
+
self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))
|
| 137 |
+
self.register_buffer('alphas_cumprod_prev', to_torch(alphas_cumprod_prev))
|
| 138 |
+
|
| 139 |
+
# calculations for diffusion q(x_t | x_{t-1}) and others
|
| 140 |
+
self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod)))
|
| 141 |
+
self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod)))
|
| 142 |
+
self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod)))
|
| 143 |
+
self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod)))
|
| 144 |
+
self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod - 1)))
|
| 145 |
+
|
| 146 |
+
# calculations for posterior q(x_{t-1} | x_t, x_0)
|
| 147 |
+
posterior_variance = (1 - self.v_posterior) * betas * (1. - alphas_cumprod_prev) / (
|
| 148 |
+
1. - alphas_cumprod) + self.v_posterior * betas
|
| 149 |
+
# above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t)
|
| 150 |
+
self.register_buffer('posterior_variance', to_torch(posterior_variance))
|
| 151 |
+
# below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain
|
| 152 |
+
self.register_buffer('posterior_log_variance_clipped', to_torch(np.log(np.maximum(posterior_variance, 1e-20))))
|
| 153 |
+
self.register_buffer('posterior_mean_coef1', to_torch(
|
| 154 |
+
betas * np.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod)))
|
| 155 |
+
self.register_buffer('posterior_mean_coef2', to_torch(
|
| 156 |
+
(1. - alphas_cumprod_prev) * np.sqrt(alphas) / (1. - alphas_cumprod)))
|
| 157 |
+
|
| 158 |
+
if self.parameterization == "eps":
|
| 159 |
+
lvlb_weights = self.betas ** 2 / (
|
| 160 |
+
2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod))
|
| 161 |
+
elif self.parameterization == "x0":
|
| 162 |
+
lvlb_weights = 0.5 * np.sqrt(torch.Tensor(alphas_cumprod)) / (2. * 1 - torch.Tensor(alphas_cumprod))
|
| 163 |
+
else:
|
| 164 |
+
raise NotImplementedError("mu not supported")
|
| 165 |
+
# TODO how to choose this term
|
| 166 |
+
lvlb_weights[0] = lvlb_weights[1]
|
| 167 |
+
self.register_buffer('lvlb_weights', lvlb_weights, persistent=False)
|
| 168 |
+
assert not torch.isnan(self.lvlb_weights).all()
|
| 169 |
+
|
| 170 |
+
@contextmanager
|
| 171 |
+
def ema_scope(self, context=None):
|
| 172 |
+
if self.use_ema:
|
| 173 |
+
self.model_ema.store(self.model.parameters())
|
| 174 |
+
self.model_ema.copy_to(self.model)
|
| 175 |
+
if context is not None:
|
| 176 |
+
print(f"{context}: Switched to EMA weights")
|
| 177 |
+
try:
|
| 178 |
+
yield None
|
| 179 |
+
finally:
|
| 180 |
+
if self.use_ema:
|
| 181 |
+
self.model_ema.restore(self.model.parameters())
|
| 182 |
+
if context is not None:
|
| 183 |
+
print(f"{context}: Restored training weights")
|
| 184 |
+
|
| 185 |
+
def init_from_ckpt(self, path, ignore_keys=None, only_model=False):
|
| 186 |
+
sd = torch.load(path, map_location="cpu")
|
| 187 |
+
if "state_dict" in list(sd.keys()):
|
| 188 |
+
sd = sd["state_dict"]
|
| 189 |
+
keys = list(sd.keys())
|
| 190 |
+
for k in keys:
|
| 191 |
+
for ik in ignore_keys or []:
|
| 192 |
+
if k.startswith(ik):
|
| 193 |
+
print("Deleting key {} from state_dict.".format(k))
|
| 194 |
+
del sd[k]
|
| 195 |
+
missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict(
|
| 196 |
+
sd, strict=False)
|
| 197 |
+
print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys")
|
| 198 |
+
if missing:
|
| 199 |
+
print(f"Missing Keys: {missing}")
|
| 200 |
+
if unexpected:
|
| 201 |
+
print(f"Unexpected Keys: {unexpected}")
|
| 202 |
+
|
| 203 |
+
def q_mean_variance(self, x_start, t):
|
| 204 |
+
"""
|
| 205 |
+
Get the distribution q(x_t | x_0).
|
| 206 |
+
:param x_start: the [N x C x ...] tensor of noiseless inputs.
|
| 207 |
+
:param t: the number of diffusion steps (minus 1). Here, 0 means one step.
|
| 208 |
+
:return: A tuple (mean, variance, log_variance), all of x_start's shape.
|
| 209 |
+
"""
|
| 210 |
+
mean = (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start)
|
| 211 |
+
variance = extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape)
|
| 212 |
+
log_variance = extract_into_tensor(self.log_one_minus_alphas_cumprod, t, x_start.shape)
|
| 213 |
+
return mean, variance, log_variance
|
| 214 |
+
|
| 215 |
+
def predict_start_from_noise(self, x_t, t, noise):
|
| 216 |
+
return (
|
| 217 |
+
extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t -
|
| 218 |
+
extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise
|
| 219 |
+
)
|
| 220 |
+
|
| 221 |
+
def q_posterior(self, x_start, x_t, t):
|
| 222 |
+
posterior_mean = (
|
| 223 |
+
extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start +
|
| 224 |
+
extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t
|
| 225 |
+
)
|
| 226 |
+
posterior_variance = extract_into_tensor(self.posterior_variance, t, x_t.shape)
|
| 227 |
+
posterior_log_variance_clipped = extract_into_tensor(self.posterior_log_variance_clipped, t, x_t.shape)
|
| 228 |
+
return posterior_mean, posterior_variance, posterior_log_variance_clipped
|
| 229 |
+
|
| 230 |
+
def p_mean_variance(self, x, t, clip_denoised: bool):
|
| 231 |
+
model_out = self.model(x, t)
|
| 232 |
+
if self.parameterization == "eps":
|
| 233 |
+
x_recon = self.predict_start_from_noise(x, t=t, noise=model_out)
|
| 234 |
+
elif self.parameterization == "x0":
|
| 235 |
+
x_recon = model_out
|
| 236 |
+
if clip_denoised:
|
| 237 |
+
x_recon.clamp_(-1., 1.)
|
| 238 |
+
|
| 239 |
+
model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t)
|
| 240 |
+
return model_mean, posterior_variance, posterior_log_variance
|
| 241 |
+
|
| 242 |
+
@torch.no_grad()
|
| 243 |
+
def p_sample(self, x, t, clip_denoised=True, repeat_noise=False):
|
| 244 |
+
b, *_, device = *x.shape, x.device
|
| 245 |
+
model_mean, _, model_log_variance = self.p_mean_variance(x=x, t=t, clip_denoised=clip_denoised)
|
| 246 |
+
noise = noise_like(x.shape, device, repeat_noise)
|
| 247 |
+
# no noise when t == 0
|
| 248 |
+
nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1)))
|
| 249 |
+
return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise
|
| 250 |
+
|
| 251 |
+
@torch.no_grad()
|
| 252 |
+
def p_sample_loop(self, shape, return_intermediates=False):
|
| 253 |
+
device = self.betas.device
|
| 254 |
+
b = shape[0]
|
| 255 |
+
img = torch.randn(shape, device=device)
|
| 256 |
+
intermediates = [img]
|
| 257 |
+
for i in tqdm(reversed(range(0, self.num_timesteps)), desc='Sampling t', total=self.num_timesteps):
|
| 258 |
+
img = self.p_sample(img, torch.full((b,), i, device=device, dtype=torch.long),
|
| 259 |
+
clip_denoised=self.clip_denoised)
|
| 260 |
+
if i % self.log_every_t == 0 or i == self.num_timesteps - 1:
|
| 261 |
+
intermediates.append(img)
|
| 262 |
+
if return_intermediates:
|
| 263 |
+
return img, intermediates
|
| 264 |
+
return img
|
| 265 |
+
|
| 266 |
+
@torch.no_grad()
|
| 267 |
+
def sample(self, batch_size=16, return_intermediates=False):
|
| 268 |
+
image_size = self.image_size
|
| 269 |
+
channels = self.channels
|
| 270 |
+
return self.p_sample_loop((batch_size, channels, image_size, image_size),
|
| 271 |
+
return_intermediates=return_intermediates)
|
| 272 |
+
|
| 273 |
+
def q_sample(self, x_start, t, noise=None):
|
| 274 |
+
noise = default(noise, lambda: torch.randn_like(x_start))
|
| 275 |
+
return (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start +
|
| 276 |
+
extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise)
|
| 277 |
+
|
| 278 |
+
def get_loss(self, pred, target, mean=True):
|
| 279 |
+
if self.loss_type == 'l1':
|
| 280 |
+
loss = (target - pred).abs()
|
| 281 |
+
if mean:
|
| 282 |
+
loss = loss.mean()
|
| 283 |
+
elif self.loss_type == 'l2':
|
| 284 |
+
if mean:
|
| 285 |
+
loss = torch.nn.functional.mse_loss(target, pred)
|
| 286 |
+
else:
|
| 287 |
+
loss = torch.nn.functional.mse_loss(target, pred, reduction='none')
|
| 288 |
+
else:
|
| 289 |
+
raise NotImplementedError("unknown loss type '{loss_type}'")
|
| 290 |
+
|
| 291 |
+
return loss
|
| 292 |
+
|
| 293 |
+
def p_losses(self, x_start, t, noise=None):
|
| 294 |
+
noise = default(noise, lambda: torch.randn_like(x_start))
|
| 295 |
+
x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise)
|
| 296 |
+
model_out = self.model(x_noisy, t)
|
| 297 |
+
|
| 298 |
+
loss_dict = {}
|
| 299 |
+
if self.parameterization == "eps":
|
| 300 |
+
target = noise
|
| 301 |
+
elif self.parameterization == "x0":
|
| 302 |
+
target = x_start
|
| 303 |
+
else:
|
| 304 |
+
raise NotImplementedError(f"Paramterization {self.parameterization} not yet supported")
|
| 305 |
+
|
| 306 |
+
loss = self.get_loss(model_out, target, mean=False).mean(dim=[1, 2, 3])
|
| 307 |
+
|
| 308 |
+
log_prefix = 'train' if self.training else 'val'
|
| 309 |
+
|
| 310 |
+
loss_dict.update({f'{log_prefix}/loss_simple': loss.mean()})
|
| 311 |
+
loss_simple = loss.mean() * self.l_simple_weight
|
| 312 |
+
|
| 313 |
+
loss_vlb = (self.lvlb_weights[t] * loss).mean()
|
| 314 |
+
loss_dict.update({f'{log_prefix}/loss_vlb': loss_vlb})
|
| 315 |
+
|
| 316 |
+
loss = loss_simple + self.original_elbo_weight * loss_vlb
|
| 317 |
+
|
| 318 |
+
loss_dict.update({f'{log_prefix}/loss': loss})
|
| 319 |
+
|
| 320 |
+
return loss, loss_dict
|
| 321 |
+
|
| 322 |
+
def forward(self, x, *args, **kwargs):
|
| 323 |
+
# b, c, h, w, device, img_size, = *x.shape, x.device, self.image_size
|
| 324 |
+
# assert h == img_size and w == img_size, f'height and width of image must be {img_size}'
|
| 325 |
+
t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long()
|
| 326 |
+
return self.p_losses(x, t, *args, **kwargs)
|
| 327 |
+
|
| 328 |
+
def get_input(self, batch, k):
|
| 329 |
+
x = batch[k]
|
| 330 |
+
if len(x.shape) == 3:
|
| 331 |
+
x = x[..., None]
|
| 332 |
+
x = rearrange(x, 'b h w c -> b c h w')
|
| 333 |
+
x = x.to(memory_format=torch.contiguous_format).float()
|
| 334 |
+
return x
|
| 335 |
+
|
| 336 |
+
def shared_step(self, batch):
|
| 337 |
+
x = self.get_input(batch, self.first_stage_key)
|
| 338 |
+
loss, loss_dict = self(x)
|
| 339 |
+
return loss, loss_dict
|
| 340 |
+
|
| 341 |
+
def training_step(self, batch, batch_idx):
|
| 342 |
+
loss, loss_dict = self.shared_step(batch)
|
| 343 |
+
|
| 344 |
+
self.log_dict(loss_dict, prog_bar=True,
|
| 345 |
+
logger=True, on_step=True, on_epoch=True)
|
| 346 |
+
|
| 347 |
+
self.log("global_step", self.global_step,
|
| 348 |
+
prog_bar=True, logger=True, on_step=True, on_epoch=False)
|
| 349 |
+
|
| 350 |
+
if self.use_scheduler:
|
| 351 |
+
lr = self.optimizers().param_groups[0]['lr']
|
| 352 |
+
self.log('lr_abs', lr, prog_bar=True, logger=True, on_step=True, on_epoch=False)
|
| 353 |
+
|
| 354 |
+
return loss
|
| 355 |
+
|
| 356 |
+
@torch.no_grad()
|
| 357 |
+
def validation_step(self, batch, batch_idx):
|
| 358 |
+
_, loss_dict_no_ema = self.shared_step(batch)
|
| 359 |
+
with self.ema_scope():
|
| 360 |
+
_, loss_dict_ema = self.shared_step(batch)
|
| 361 |
+
loss_dict_ema = {key + '_ema': loss_dict_ema[key] for key in loss_dict_ema}
|
| 362 |
+
self.log_dict(loss_dict_no_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True)
|
| 363 |
+
self.log_dict(loss_dict_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True)
|
| 364 |
+
|
| 365 |
+
def on_train_batch_end(self, *args, **kwargs):
|
| 366 |
+
if self.use_ema:
|
| 367 |
+
self.model_ema(self.model)
|
| 368 |
+
|
| 369 |
+
def _get_rows_from_list(self, samples):
|
| 370 |
+
n_imgs_per_row = len(samples)
|
| 371 |
+
denoise_grid = rearrange(samples, 'n b c h w -> b n c h w')
|
| 372 |
+
denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w')
|
| 373 |
+
denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row)
|
| 374 |
+
return denoise_grid
|
| 375 |
+
|
| 376 |
+
@torch.no_grad()
|
| 377 |
+
def log_images(self, batch, N=8, n_row=2, sample=True, return_keys=None, **kwargs):
|
| 378 |
+
log = {}
|
| 379 |
+
x = self.get_input(batch, self.first_stage_key)
|
| 380 |
+
N = min(x.shape[0], N)
|
| 381 |
+
n_row = min(x.shape[0], n_row)
|
| 382 |
+
x = x.to(self.device)[:N]
|
| 383 |
+
log["inputs"] = x
|
| 384 |
+
|
| 385 |
+
# get diffusion row
|
| 386 |
+
diffusion_row = []
|
| 387 |
+
x_start = x[:n_row]
|
| 388 |
+
|
| 389 |
+
for t in range(self.num_timesteps):
|
| 390 |
+
if t % self.log_every_t == 0 or t == self.num_timesteps - 1:
|
| 391 |
+
t = repeat(torch.tensor([t]), '1 -> b', b=n_row)
|
| 392 |
+
t = t.to(self.device).long()
|
| 393 |
+
noise = torch.randn_like(x_start)
|
| 394 |
+
x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise)
|
| 395 |
+
diffusion_row.append(x_noisy)
|
| 396 |
+
|
| 397 |
+
log["diffusion_row"] = self._get_rows_from_list(diffusion_row)
|
| 398 |
+
|
| 399 |
+
if sample:
|
| 400 |
+
# get denoise row
|
| 401 |
+
with self.ema_scope("Plotting"):
|
| 402 |
+
samples, denoise_row = self.sample(batch_size=N, return_intermediates=True)
|
| 403 |
+
|
| 404 |
+
log["samples"] = samples
|
| 405 |
+
log["denoise_row"] = self._get_rows_from_list(denoise_row)
|
| 406 |
+
|
| 407 |
+
if return_keys:
|
| 408 |
+
if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0:
|
| 409 |
+
return log
|
| 410 |
+
else:
|
| 411 |
+
return {key: log[key] for key in return_keys}
|
| 412 |
+
return log
|
| 413 |
+
|
| 414 |
+
def configure_optimizers(self):
|
| 415 |
+
lr = self.learning_rate
|
| 416 |
+
params = list(self.model.parameters())
|
| 417 |
+
if self.learn_logvar:
|
| 418 |
+
params = params + [self.logvar]
|
| 419 |
+
opt = torch.optim.AdamW(params, lr=lr)
|
| 420 |
+
return opt
|
| 421 |
+
|
| 422 |
+
|
| 423 |
+
class LatentDiffusionV1(DDPMV1):
|
| 424 |
+
"""main class"""
|
| 425 |
+
def __init__(self,
|
| 426 |
+
first_stage_config,
|
| 427 |
+
cond_stage_config,
|
| 428 |
+
num_timesteps_cond=None,
|
| 429 |
+
cond_stage_key="image",
|
| 430 |
+
cond_stage_trainable=False,
|
| 431 |
+
concat_mode=True,
|
| 432 |
+
cond_stage_forward=None,
|
| 433 |
+
conditioning_key=None,
|
| 434 |
+
scale_factor=1.0,
|
| 435 |
+
scale_by_std=False,
|
| 436 |
+
*args, **kwargs):
|
| 437 |
+
self.num_timesteps_cond = default(num_timesteps_cond, 1)
|
| 438 |
+
self.scale_by_std = scale_by_std
|
| 439 |
+
assert self.num_timesteps_cond <= kwargs['timesteps']
|
| 440 |
+
# for backwards compatibility after implementation of DiffusionWrapper
|
| 441 |
+
if conditioning_key is None:
|
| 442 |
+
conditioning_key = 'concat' if concat_mode else 'crossattn'
|
| 443 |
+
if cond_stage_config == '__is_unconditional__':
|
| 444 |
+
conditioning_key = None
|
| 445 |
+
ckpt_path = kwargs.pop("ckpt_path", None)
|
| 446 |
+
ignore_keys = kwargs.pop("ignore_keys", [])
|
| 447 |
+
super().__init__(*args, conditioning_key=conditioning_key, **kwargs)
|
| 448 |
+
self.concat_mode = concat_mode
|
| 449 |
+
self.cond_stage_trainable = cond_stage_trainable
|
| 450 |
+
self.cond_stage_key = cond_stage_key
|
| 451 |
+
try:
|
| 452 |
+
self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1
|
| 453 |
+
except Exception:
|
| 454 |
+
self.num_downs = 0
|
| 455 |
+
if not scale_by_std:
|
| 456 |
+
self.scale_factor = scale_factor
|
| 457 |
+
else:
|
| 458 |
+
self.register_buffer('scale_factor', torch.tensor(scale_factor))
|
| 459 |
+
self.instantiate_first_stage(first_stage_config)
|
| 460 |
+
self.instantiate_cond_stage(cond_stage_config)
|
| 461 |
+
self.cond_stage_forward = cond_stage_forward
|
| 462 |
+
self.clip_denoised = False
|
| 463 |
+
self.bbox_tokenizer = None
|
| 464 |
+
|
| 465 |
+
self.restarted_from_ckpt = False
|
| 466 |
+
if ckpt_path is not None:
|
| 467 |
+
self.init_from_ckpt(ckpt_path, ignore_keys)
|
| 468 |
+
self.restarted_from_ckpt = True
|
| 469 |
+
|
| 470 |
+
def make_cond_schedule(self, ):
|
| 471 |
+
self.cond_ids = torch.full(size=(self.num_timesteps,), fill_value=self.num_timesteps - 1, dtype=torch.long)
|
| 472 |
+
ids = torch.round(torch.linspace(0, self.num_timesteps - 1, self.num_timesteps_cond)).long()
|
| 473 |
+
self.cond_ids[:self.num_timesteps_cond] = ids
|
| 474 |
+
|
| 475 |
+
@rank_zero_only
|
| 476 |
+
@torch.no_grad()
|
| 477 |
+
def on_train_batch_start(self, batch, batch_idx, dataloader_idx):
|
| 478 |
+
# only for very first batch
|
| 479 |
+
if self.scale_by_std and self.current_epoch == 0 and self.global_step == 0 and batch_idx == 0 and not self.restarted_from_ckpt:
|
| 480 |
+
assert self.scale_factor == 1., 'rather not use custom rescaling and std-rescaling simultaneously'
|
| 481 |
+
# set rescale weight to 1./std of encodings
|
| 482 |
+
print("### USING STD-RESCALING ###")
|
| 483 |
+
x = super().get_input(batch, self.first_stage_key)
|
| 484 |
+
x = x.to(self.device)
|
| 485 |
+
encoder_posterior = self.encode_first_stage(x)
|
| 486 |
+
z = self.get_first_stage_encoding(encoder_posterior).detach()
|
| 487 |
+
del self.scale_factor
|
| 488 |
+
self.register_buffer('scale_factor', 1. / z.flatten().std())
|
| 489 |
+
print(f"setting self.scale_factor to {self.scale_factor}")
|
| 490 |
+
print("### USING STD-RESCALING ###")
|
| 491 |
+
|
| 492 |
+
def register_schedule(self,
|
| 493 |
+
given_betas=None, beta_schedule="linear", timesteps=1000,
|
| 494 |
+
linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):
|
| 495 |
+
super().register_schedule(given_betas, beta_schedule, timesteps, linear_start, linear_end, cosine_s)
|
| 496 |
+
|
| 497 |
+
self.shorten_cond_schedule = self.num_timesteps_cond > 1
|
| 498 |
+
if self.shorten_cond_schedule:
|
| 499 |
+
self.make_cond_schedule()
|
| 500 |
+
|
| 501 |
+
def instantiate_first_stage(self, config):
|
| 502 |
+
model = instantiate_from_config(config)
|
| 503 |
+
self.first_stage_model = model.eval()
|
| 504 |
+
self.first_stage_model.train = disabled_train
|
| 505 |
+
for param in self.first_stage_model.parameters():
|
| 506 |
+
param.requires_grad = False
|
| 507 |
+
|
| 508 |
+
def instantiate_cond_stage(self, config):
|
| 509 |
+
if not self.cond_stage_trainable:
|
| 510 |
+
if config == "__is_first_stage__":
|
| 511 |
+
print("Using first stage also as cond stage.")
|
| 512 |
+
self.cond_stage_model = self.first_stage_model
|
| 513 |
+
elif config == "__is_unconditional__":
|
| 514 |
+
print(f"Training {self.__class__.__name__} as an unconditional model.")
|
| 515 |
+
self.cond_stage_model = None
|
| 516 |
+
# self.be_unconditional = True
|
| 517 |
+
else:
|
| 518 |
+
model = instantiate_from_config(config)
|
| 519 |
+
self.cond_stage_model = model.eval()
|
| 520 |
+
self.cond_stage_model.train = disabled_train
|
| 521 |
+
for param in self.cond_stage_model.parameters():
|
| 522 |
+
param.requires_grad = False
|
| 523 |
+
else:
|
| 524 |
+
assert config != '__is_first_stage__'
|
| 525 |
+
assert config != '__is_unconditional__'
|
| 526 |
+
model = instantiate_from_config(config)
|
| 527 |
+
self.cond_stage_model = model
|
| 528 |
+
|
| 529 |
+
def _get_denoise_row_from_list(self, samples, desc='', force_no_decoder_quantization=False):
|
| 530 |
+
denoise_row = []
|
| 531 |
+
for zd in tqdm(samples, desc=desc):
|
| 532 |
+
denoise_row.append(self.decode_first_stage(zd.to(self.device),
|
| 533 |
+
force_not_quantize=force_no_decoder_quantization))
|
| 534 |
+
n_imgs_per_row = len(denoise_row)
|
| 535 |
+
denoise_row = torch.stack(denoise_row) # n_log_step, n_row, C, H, W
|
| 536 |
+
denoise_grid = rearrange(denoise_row, 'n b c h w -> b n c h w')
|
| 537 |
+
denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w')
|
| 538 |
+
denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row)
|
| 539 |
+
return denoise_grid
|
| 540 |
+
|
| 541 |
+
def get_first_stage_encoding(self, encoder_posterior):
|
| 542 |
+
if isinstance(encoder_posterior, DiagonalGaussianDistribution):
|
| 543 |
+
z = encoder_posterior.sample()
|
| 544 |
+
elif isinstance(encoder_posterior, torch.Tensor):
|
| 545 |
+
z = encoder_posterior
|
| 546 |
+
else:
|
| 547 |
+
raise NotImplementedError(f"encoder_posterior of type '{type(encoder_posterior)}' not yet implemented")
|
| 548 |
+
return self.scale_factor * z
|
| 549 |
+
|
| 550 |
+
def get_learned_conditioning(self, c):
|
| 551 |
+
if self.cond_stage_forward is None:
|
| 552 |
+
if hasattr(self.cond_stage_model, 'encode') and callable(self.cond_stage_model.encode):
|
| 553 |
+
c = self.cond_stage_model.encode(c)
|
| 554 |
+
if isinstance(c, DiagonalGaussianDistribution):
|
| 555 |
+
c = c.mode()
|
| 556 |
+
else:
|
| 557 |
+
c = self.cond_stage_model(c)
|
| 558 |
+
else:
|
| 559 |
+
assert hasattr(self.cond_stage_model, self.cond_stage_forward)
|
| 560 |
+
c = getattr(self.cond_stage_model, self.cond_stage_forward)(c)
|
| 561 |
+
return c
|
| 562 |
+
|
| 563 |
+
def meshgrid(self, h, w):
|
| 564 |
+
y = torch.arange(0, h).view(h, 1, 1).repeat(1, w, 1)
|
| 565 |
+
x = torch.arange(0, w).view(1, w, 1).repeat(h, 1, 1)
|
| 566 |
+
|
| 567 |
+
arr = torch.cat([y, x], dim=-1)
|
| 568 |
+
return arr
|
| 569 |
+
|
| 570 |
+
def delta_border(self, h, w):
|
| 571 |
+
"""
|
| 572 |
+
:param h: height
|
| 573 |
+
:param w: width
|
| 574 |
+
:return: normalized distance to image border,
|
| 575 |
+
wtith min distance = 0 at border and max dist = 0.5 at image center
|
| 576 |
+
"""
|
| 577 |
+
lower_right_corner = torch.tensor([h - 1, w - 1]).view(1, 1, 2)
|
| 578 |
+
arr = self.meshgrid(h, w) / lower_right_corner
|
| 579 |
+
dist_left_up = torch.min(arr, dim=-1, keepdims=True)[0]
|
| 580 |
+
dist_right_down = torch.min(1 - arr, dim=-1, keepdims=True)[0]
|
| 581 |
+
edge_dist = torch.min(torch.cat([dist_left_up, dist_right_down], dim=-1), dim=-1)[0]
|
| 582 |
+
return edge_dist
|
| 583 |
+
|
| 584 |
+
def get_weighting(self, h, w, Ly, Lx, device):
|
| 585 |
+
weighting = self.delta_border(h, w)
|
| 586 |
+
weighting = torch.clip(weighting, self.split_input_params["clip_min_weight"],
|
| 587 |
+
self.split_input_params["clip_max_weight"], )
|
| 588 |
+
weighting = weighting.view(1, h * w, 1).repeat(1, 1, Ly * Lx).to(device)
|
| 589 |
+
|
| 590 |
+
if self.split_input_params["tie_braker"]:
|
| 591 |
+
L_weighting = self.delta_border(Ly, Lx)
|
| 592 |
+
L_weighting = torch.clip(L_weighting,
|
| 593 |
+
self.split_input_params["clip_min_tie_weight"],
|
| 594 |
+
self.split_input_params["clip_max_tie_weight"])
|
| 595 |
+
|
| 596 |
+
L_weighting = L_weighting.view(1, 1, Ly * Lx).to(device)
|
| 597 |
+
weighting = weighting * L_weighting
|
| 598 |
+
return weighting
|
| 599 |
+
|
| 600 |
+
def get_fold_unfold(self, x, kernel_size, stride, uf=1, df=1): # todo load once not every time, shorten code
|
| 601 |
+
"""
|
| 602 |
+
:param x: img of size (bs, c, h, w)
|
| 603 |
+
:return: n img crops of size (n, bs, c, kernel_size[0], kernel_size[1])
|
| 604 |
+
"""
|
| 605 |
+
bs, nc, h, w = x.shape
|
| 606 |
+
|
| 607 |
+
# number of crops in image
|
| 608 |
+
Ly = (h - kernel_size[0]) // stride[0] + 1
|
| 609 |
+
Lx = (w - kernel_size[1]) // stride[1] + 1
|
| 610 |
+
|
| 611 |
+
if uf == 1 and df == 1:
|
| 612 |
+
fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride)
|
| 613 |
+
unfold = torch.nn.Unfold(**fold_params)
|
| 614 |
+
|
| 615 |
+
fold = torch.nn.Fold(output_size=x.shape[2:], **fold_params)
|
| 616 |
+
|
| 617 |
+
weighting = self.get_weighting(kernel_size[0], kernel_size[1], Ly, Lx, x.device).to(x.dtype)
|
| 618 |
+
normalization = fold(weighting).view(1, 1, h, w) # normalizes the overlap
|
| 619 |
+
weighting = weighting.view((1, 1, kernel_size[0], kernel_size[1], Ly * Lx))
|
| 620 |
+
|
| 621 |
+
elif uf > 1 and df == 1:
|
| 622 |
+
fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride)
|
| 623 |
+
unfold = torch.nn.Unfold(**fold_params)
|
| 624 |
+
|
| 625 |
+
fold_params2 = dict(kernel_size=(kernel_size[0] * uf, kernel_size[0] * uf),
|
| 626 |
+
dilation=1, padding=0,
|
| 627 |
+
stride=(stride[0] * uf, stride[1] * uf))
|
| 628 |
+
fold = torch.nn.Fold(output_size=(x.shape[2] * uf, x.shape[3] * uf), **fold_params2)
|
| 629 |
+
|
| 630 |
+
weighting = self.get_weighting(kernel_size[0] * uf, kernel_size[1] * uf, Ly, Lx, x.device).to(x.dtype)
|
| 631 |
+
normalization = fold(weighting).view(1, 1, h * uf, w * uf) # normalizes the overlap
|
| 632 |
+
weighting = weighting.view((1, 1, kernel_size[0] * uf, kernel_size[1] * uf, Ly * Lx))
|
| 633 |
+
|
| 634 |
+
elif df > 1 and uf == 1:
|
| 635 |
+
fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride)
|
| 636 |
+
unfold = torch.nn.Unfold(**fold_params)
|
| 637 |
+
|
| 638 |
+
fold_params2 = dict(kernel_size=(kernel_size[0] // df, kernel_size[0] // df),
|
| 639 |
+
dilation=1, padding=0,
|
| 640 |
+
stride=(stride[0] // df, stride[1] // df))
|
| 641 |
+
fold = torch.nn.Fold(output_size=(x.shape[2] // df, x.shape[3] // df), **fold_params2)
|
| 642 |
+
|
| 643 |
+
weighting = self.get_weighting(kernel_size[0] // df, kernel_size[1] // df, Ly, Lx, x.device).to(x.dtype)
|
| 644 |
+
normalization = fold(weighting).view(1, 1, h // df, w // df) # normalizes the overlap
|
| 645 |
+
weighting = weighting.view((1, 1, kernel_size[0] // df, kernel_size[1] // df, Ly * Lx))
|
| 646 |
+
|
| 647 |
+
else:
|
| 648 |
+
raise NotImplementedError
|
| 649 |
+
|
| 650 |
+
return fold, unfold, normalization, weighting
|
| 651 |
+
|
| 652 |
+
@torch.no_grad()
|
| 653 |
+
def get_input(self, batch, k, return_first_stage_outputs=False, force_c_encode=False,
|
| 654 |
+
cond_key=None, return_original_cond=False, bs=None):
|
| 655 |
+
x = super().get_input(batch, k)
|
| 656 |
+
if bs is not None:
|
| 657 |
+
x = x[:bs]
|
| 658 |
+
x = x.to(self.device)
|
| 659 |
+
encoder_posterior = self.encode_first_stage(x)
|
| 660 |
+
z = self.get_first_stage_encoding(encoder_posterior).detach()
|
| 661 |
+
|
| 662 |
+
if self.model.conditioning_key is not None:
|
| 663 |
+
if cond_key is None:
|
| 664 |
+
cond_key = self.cond_stage_key
|
| 665 |
+
if cond_key != self.first_stage_key:
|
| 666 |
+
if cond_key in ['caption', 'coordinates_bbox']:
|
| 667 |
+
xc = batch[cond_key]
|
| 668 |
+
elif cond_key == 'class_label':
|
| 669 |
+
xc = batch
|
| 670 |
+
else:
|
| 671 |
+
xc = super().get_input(batch, cond_key).to(self.device)
|
| 672 |
+
else:
|
| 673 |
+
xc = x
|
| 674 |
+
if not self.cond_stage_trainable or force_c_encode:
|
| 675 |
+
if isinstance(xc, dict) or isinstance(xc, list):
|
| 676 |
+
# import pudb; pudb.set_trace()
|
| 677 |
+
c = self.get_learned_conditioning(xc)
|
| 678 |
+
else:
|
| 679 |
+
c = self.get_learned_conditioning(xc.to(self.device))
|
| 680 |
+
else:
|
| 681 |
+
c = xc
|
| 682 |
+
if bs is not None:
|
| 683 |
+
c = c[:bs]
|
| 684 |
+
|
| 685 |
+
if self.use_positional_encodings:
|
| 686 |
+
pos_x, pos_y = self.compute_latent_shifts(batch)
|
| 687 |
+
ckey = __conditioning_keys__[self.model.conditioning_key]
|
| 688 |
+
c = {ckey: c, 'pos_x': pos_x, 'pos_y': pos_y}
|
| 689 |
+
|
| 690 |
+
else:
|
| 691 |
+
c = None
|
| 692 |
+
xc = None
|
| 693 |
+
if self.use_positional_encodings:
|
| 694 |
+
pos_x, pos_y = self.compute_latent_shifts(batch)
|
| 695 |
+
c = {'pos_x': pos_x, 'pos_y': pos_y}
|
| 696 |
+
out = [z, c]
|
| 697 |
+
if return_first_stage_outputs:
|
| 698 |
+
xrec = self.decode_first_stage(z)
|
| 699 |
+
out.extend([x, xrec])
|
| 700 |
+
if return_original_cond:
|
| 701 |
+
out.append(xc)
|
| 702 |
+
return out
|
| 703 |
+
|
| 704 |
+
@torch.no_grad()
|
| 705 |
+
def decode_first_stage(self, z, predict_cids=False, force_not_quantize=False):
|
| 706 |
+
if predict_cids:
|
| 707 |
+
if z.dim() == 4:
|
| 708 |
+
z = torch.argmax(z.exp(), dim=1).long()
|
| 709 |
+
z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None)
|
| 710 |
+
z = rearrange(z, 'b h w c -> b c h w').contiguous()
|
| 711 |
+
|
| 712 |
+
z = 1. / self.scale_factor * z
|
| 713 |
+
|
| 714 |
+
if hasattr(self, "split_input_params"):
|
| 715 |
+
if self.split_input_params["patch_distributed_vq"]:
|
| 716 |
+
ks = self.split_input_params["ks"] # eg. (128, 128)
|
| 717 |
+
stride = self.split_input_params["stride"] # eg. (64, 64)
|
| 718 |
+
uf = self.split_input_params["vqf"]
|
| 719 |
+
bs, nc, h, w = z.shape
|
| 720 |
+
if ks[0] > h or ks[1] > w:
|
| 721 |
+
ks = (min(ks[0], h), min(ks[1], w))
|
| 722 |
+
print("reducing Kernel")
|
| 723 |
+
|
| 724 |
+
if stride[0] > h or stride[1] > w:
|
| 725 |
+
stride = (min(stride[0], h), min(stride[1], w))
|
| 726 |
+
print("reducing stride")
|
| 727 |
+
|
| 728 |
+
fold, unfold, normalization, weighting = self.get_fold_unfold(z, ks, stride, uf=uf)
|
| 729 |
+
|
| 730 |
+
z = unfold(z) # (bn, nc * prod(**ks), L)
|
| 731 |
+
# 1. Reshape to img shape
|
| 732 |
+
z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L )
|
| 733 |
+
|
| 734 |
+
# 2. apply model loop over last dim
|
| 735 |
+
if isinstance(self.first_stage_model, VQModelInterface):
|
| 736 |
+
output_list = [self.first_stage_model.decode(z[:, :, :, :, i],
|
| 737 |
+
force_not_quantize=predict_cids or force_not_quantize)
|
| 738 |
+
for i in range(z.shape[-1])]
|
| 739 |
+
else:
|
| 740 |
+
|
| 741 |
+
output_list = [self.first_stage_model.decode(z[:, :, :, :, i])
|
| 742 |
+
for i in range(z.shape[-1])]
|
| 743 |
+
|
| 744 |
+
o = torch.stack(output_list, axis=-1) # # (bn, nc, ks[0], ks[1], L)
|
| 745 |
+
o = o * weighting
|
| 746 |
+
# Reverse 1. reshape to img shape
|
| 747 |
+
o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L)
|
| 748 |
+
# stitch crops together
|
| 749 |
+
decoded = fold(o)
|
| 750 |
+
decoded = decoded / normalization # norm is shape (1, 1, h, w)
|
| 751 |
+
return decoded
|
| 752 |
+
else:
|
| 753 |
+
if isinstance(self.first_stage_model, VQModelInterface):
|
| 754 |
+
return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize)
|
| 755 |
+
else:
|
| 756 |
+
return self.first_stage_model.decode(z)
|
| 757 |
+
|
| 758 |
+
else:
|
| 759 |
+
if isinstance(self.first_stage_model, VQModelInterface):
|
| 760 |
+
return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize)
|
| 761 |
+
else:
|
| 762 |
+
return self.first_stage_model.decode(z)
|
| 763 |
+
|
| 764 |
+
# same as above but without decorator
|
| 765 |
+
def differentiable_decode_first_stage(self, z, predict_cids=False, force_not_quantize=False):
|
| 766 |
+
if predict_cids:
|
| 767 |
+
if z.dim() == 4:
|
| 768 |
+
z = torch.argmax(z.exp(), dim=1).long()
|
| 769 |
+
z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None)
|
| 770 |
+
z = rearrange(z, 'b h w c -> b c h w').contiguous()
|
| 771 |
+
|
| 772 |
+
z = 1. / self.scale_factor * z
|
| 773 |
+
|
| 774 |
+
if hasattr(self, "split_input_params"):
|
| 775 |
+
if self.split_input_params["patch_distributed_vq"]:
|
| 776 |
+
ks = self.split_input_params["ks"] # eg. (128, 128)
|
| 777 |
+
stride = self.split_input_params["stride"] # eg. (64, 64)
|
| 778 |
+
uf = self.split_input_params["vqf"]
|
| 779 |
+
bs, nc, h, w = z.shape
|
| 780 |
+
if ks[0] > h or ks[1] > w:
|
| 781 |
+
ks = (min(ks[0], h), min(ks[1], w))
|
| 782 |
+
print("reducing Kernel")
|
| 783 |
+
|
| 784 |
+
if stride[0] > h or stride[1] > w:
|
| 785 |
+
stride = (min(stride[0], h), min(stride[1], w))
|
| 786 |
+
print("reducing stride")
|
| 787 |
+
|
| 788 |
+
fold, unfold, normalization, weighting = self.get_fold_unfold(z, ks, stride, uf=uf)
|
| 789 |
+
|
| 790 |
+
z = unfold(z) # (bn, nc * prod(**ks), L)
|
| 791 |
+
# 1. Reshape to img shape
|
| 792 |
+
z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L )
|
| 793 |
+
|
| 794 |
+
# 2. apply model loop over last dim
|
| 795 |
+
if isinstance(self.first_stage_model, VQModelInterface):
|
| 796 |
+
output_list = [self.first_stage_model.decode(z[:, :, :, :, i],
|
| 797 |
+
force_not_quantize=predict_cids or force_not_quantize)
|
| 798 |
+
for i in range(z.shape[-1])]
|
| 799 |
+
else:
|
| 800 |
+
|
| 801 |
+
output_list = [self.first_stage_model.decode(z[:, :, :, :, i])
|
| 802 |
+
for i in range(z.shape[-1])]
|
| 803 |
+
|
| 804 |
+
o = torch.stack(output_list, axis=-1) # # (bn, nc, ks[0], ks[1], L)
|
| 805 |
+
o = o * weighting
|
| 806 |
+
# Reverse 1. reshape to img shape
|
| 807 |
+
o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L)
|
| 808 |
+
# stitch crops together
|
| 809 |
+
decoded = fold(o)
|
| 810 |
+
decoded = decoded / normalization # norm is shape (1, 1, h, w)
|
| 811 |
+
return decoded
|
| 812 |
+
else:
|
| 813 |
+
if isinstance(self.first_stage_model, VQModelInterface):
|
| 814 |
+
return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize)
|
| 815 |
+
else:
|
| 816 |
+
return self.first_stage_model.decode(z)
|
| 817 |
+
|
| 818 |
+
else:
|
| 819 |
+
if isinstance(self.first_stage_model, VQModelInterface):
|
| 820 |
+
return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize)
|
| 821 |
+
else:
|
| 822 |
+
return self.first_stage_model.decode(z)
|
| 823 |
+
|
| 824 |
+
@torch.no_grad()
|
| 825 |
+
def encode_first_stage(self, x):
|
| 826 |
+
if hasattr(self, "split_input_params"):
|
| 827 |
+
if self.split_input_params["patch_distributed_vq"]:
|
| 828 |
+
ks = self.split_input_params["ks"] # eg. (128, 128)
|
| 829 |
+
stride = self.split_input_params["stride"] # eg. (64, 64)
|
| 830 |
+
df = self.split_input_params["vqf"]
|
| 831 |
+
self.split_input_params['original_image_size'] = x.shape[-2:]
|
| 832 |
+
bs, nc, h, w = x.shape
|
| 833 |
+
if ks[0] > h or ks[1] > w:
|
| 834 |
+
ks = (min(ks[0], h), min(ks[1], w))
|
| 835 |
+
print("reducing Kernel")
|
| 836 |
+
|
| 837 |
+
if stride[0] > h or stride[1] > w:
|
| 838 |
+
stride = (min(stride[0], h), min(stride[1], w))
|
| 839 |
+
print("reducing stride")
|
| 840 |
+
|
| 841 |
+
fold, unfold, normalization, weighting = self.get_fold_unfold(x, ks, stride, df=df)
|
| 842 |
+
z = unfold(x) # (bn, nc * prod(**ks), L)
|
| 843 |
+
# Reshape to img shape
|
| 844 |
+
z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L )
|
| 845 |
+
|
| 846 |
+
output_list = [self.first_stage_model.encode(z[:, :, :, :, i])
|
| 847 |
+
for i in range(z.shape[-1])]
|
| 848 |
+
|
| 849 |
+
o = torch.stack(output_list, axis=-1)
|
| 850 |
+
o = o * weighting
|
| 851 |
+
|
| 852 |
+
# Reverse reshape to img shape
|
| 853 |
+
o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L)
|
| 854 |
+
# stitch crops together
|
| 855 |
+
decoded = fold(o)
|
| 856 |
+
decoded = decoded / normalization
|
| 857 |
+
return decoded
|
| 858 |
+
|
| 859 |
+
else:
|
| 860 |
+
return self.first_stage_model.encode(x)
|
| 861 |
+
else:
|
| 862 |
+
return self.first_stage_model.encode(x)
|
| 863 |
+
|
| 864 |
+
def shared_step(self, batch, **kwargs):
|
| 865 |
+
x, c = self.get_input(batch, self.first_stage_key)
|
| 866 |
+
loss = self(x, c)
|
| 867 |
+
return loss
|
| 868 |
+
|
| 869 |
+
def forward(self, x, c, *args, **kwargs):
|
| 870 |
+
t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long()
|
| 871 |
+
if self.model.conditioning_key is not None:
|
| 872 |
+
assert c is not None
|
| 873 |
+
if self.cond_stage_trainable:
|
| 874 |
+
c = self.get_learned_conditioning(c)
|
| 875 |
+
if self.shorten_cond_schedule: # TODO: drop this option
|
| 876 |
+
tc = self.cond_ids[t].to(self.device)
|
| 877 |
+
c = self.q_sample(x_start=c, t=tc, noise=torch.randn_like(c.float()))
|
| 878 |
+
return self.p_losses(x, c, t, *args, **kwargs)
|
| 879 |
+
|
| 880 |
+
def apply_model(self, x_noisy, t, cond, return_ids=False):
|
| 881 |
+
|
| 882 |
+
if isinstance(cond, dict):
|
| 883 |
+
# hybrid case, cond is exptected to be a dict
|
| 884 |
+
pass
|
| 885 |
+
else:
|
| 886 |
+
if not isinstance(cond, list):
|
| 887 |
+
cond = [cond]
|
| 888 |
+
key = 'c_concat' if self.model.conditioning_key == 'concat' else 'c_crossattn'
|
| 889 |
+
cond = {key: cond}
|
| 890 |
+
|
| 891 |
+
if hasattr(self, "split_input_params"):
|
| 892 |
+
assert len(cond) == 1 # todo can only deal with one conditioning atm
|
| 893 |
+
assert not return_ids
|
| 894 |
+
ks = self.split_input_params["ks"] # eg. (128, 128)
|
| 895 |
+
stride = self.split_input_params["stride"] # eg. (64, 64)
|
| 896 |
+
|
| 897 |
+
h, w = x_noisy.shape[-2:]
|
| 898 |
+
|
| 899 |
+
fold, unfold, normalization, weighting = self.get_fold_unfold(x_noisy, ks, stride)
|
| 900 |
+
|
| 901 |
+
z = unfold(x_noisy) # (bn, nc * prod(**ks), L)
|
| 902 |
+
# Reshape to img shape
|
| 903 |
+
z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L )
|
| 904 |
+
z_list = [z[:, :, :, :, i] for i in range(z.shape[-1])]
|
| 905 |
+
|
| 906 |
+
if self.cond_stage_key in ["image", "LR_image", "segmentation",
|
| 907 |
+
'bbox_img'] and self.model.conditioning_key: # todo check for completeness
|
| 908 |
+
c_key = next(iter(cond.keys())) # get key
|
| 909 |
+
c = next(iter(cond.values())) # get value
|
| 910 |
+
assert (len(c) == 1) # todo extend to list with more than one elem
|
| 911 |
+
c = c[0] # get element
|
| 912 |
+
|
| 913 |
+
c = unfold(c)
|
| 914 |
+
c = c.view((c.shape[0], -1, ks[0], ks[1], c.shape[-1])) # (bn, nc, ks[0], ks[1], L )
|
| 915 |
+
|
| 916 |
+
cond_list = [{c_key: [c[:, :, :, :, i]]} for i in range(c.shape[-1])]
|
| 917 |
+
|
| 918 |
+
elif self.cond_stage_key == 'coordinates_bbox':
|
| 919 |
+
assert 'original_image_size' in self.split_input_params, 'BoudingBoxRescaling is missing original_image_size'
|
| 920 |
+
|
| 921 |
+
# assuming padding of unfold is always 0 and its dilation is always 1
|
| 922 |
+
n_patches_per_row = int((w - ks[0]) / stride[0] + 1)
|
| 923 |
+
full_img_h, full_img_w = self.split_input_params['original_image_size']
|
| 924 |
+
# as we are operating on latents, we need the factor from the original image size to the
|
| 925 |
+
# spatial latent size to properly rescale the crops for regenerating the bbox annotations
|
| 926 |
+
num_downs = self.first_stage_model.encoder.num_resolutions - 1
|
| 927 |
+
rescale_latent = 2 ** (num_downs)
|
| 928 |
+
|
| 929 |
+
# get top left postions of patches as conforming for the bbbox tokenizer, therefore we
|
| 930 |
+
# need to rescale the tl patch coordinates to be in between (0,1)
|
| 931 |
+
tl_patch_coordinates = [(rescale_latent * stride[0] * (patch_nr % n_patches_per_row) / full_img_w,
|
| 932 |
+
rescale_latent * stride[1] * (patch_nr // n_patches_per_row) / full_img_h)
|
| 933 |
+
for patch_nr in range(z.shape[-1])]
|
| 934 |
+
|
| 935 |
+
# patch_limits are tl_coord, width and height coordinates as (x_tl, y_tl, h, w)
|
| 936 |
+
patch_limits = [(x_tl, y_tl,
|
| 937 |
+
rescale_latent * ks[0] / full_img_w,
|
| 938 |
+
rescale_latent * ks[1] / full_img_h) for x_tl, y_tl in tl_patch_coordinates]
|
| 939 |
+
# patch_values = [(np.arange(x_tl,min(x_tl+ks, 1.)),np.arange(y_tl,min(y_tl+ks, 1.))) for x_tl, y_tl in tl_patch_coordinates]
|
| 940 |
+
|
| 941 |
+
# tokenize crop coordinates for the bounding boxes of the respective patches
|
| 942 |
+
patch_limits_tknzd = [torch.LongTensor(self.bbox_tokenizer._crop_encoder(bbox))[None].to(self.device)
|
| 943 |
+
for bbox in patch_limits] # list of length l with tensors of shape (1, 2)
|
| 944 |
+
print(patch_limits_tknzd[0].shape)
|
| 945 |
+
# cut tknzd crop position from conditioning
|
| 946 |
+
assert isinstance(cond, dict), 'cond must be dict to be fed into model'
|
| 947 |
+
cut_cond = cond['c_crossattn'][0][..., :-2].to(self.device)
|
| 948 |
+
print(cut_cond.shape)
|
| 949 |
+
|
| 950 |
+
adapted_cond = torch.stack([torch.cat([cut_cond, p], dim=1) for p in patch_limits_tknzd])
|
| 951 |
+
adapted_cond = rearrange(adapted_cond, 'l b n -> (l b) n')
|
| 952 |
+
print(adapted_cond.shape)
|
| 953 |
+
adapted_cond = self.get_learned_conditioning(adapted_cond)
|
| 954 |
+
print(adapted_cond.shape)
|
| 955 |
+
adapted_cond = rearrange(adapted_cond, '(l b) n d -> l b n d', l=z.shape[-1])
|
| 956 |
+
print(adapted_cond.shape)
|
| 957 |
+
|
| 958 |
+
cond_list = [{'c_crossattn': [e]} for e in adapted_cond]
|
| 959 |
+
|
| 960 |
+
else:
|
| 961 |
+
cond_list = [cond for i in range(z.shape[-1])] # Todo make this more efficient
|
| 962 |
+
|
| 963 |
+
# apply model by loop over crops
|
| 964 |
+
output_list = [self.model(z_list[i], t, **cond_list[i]) for i in range(z.shape[-1])]
|
| 965 |
+
assert not isinstance(output_list[0],
|
| 966 |
+
tuple) # todo cant deal with multiple model outputs check this never happens
|
| 967 |
+
|
| 968 |
+
o = torch.stack(output_list, axis=-1)
|
| 969 |
+
o = o * weighting
|
| 970 |
+
# Reverse reshape to img shape
|
| 971 |
+
o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L)
|
| 972 |
+
# stitch crops together
|
| 973 |
+
x_recon = fold(o) / normalization
|
| 974 |
+
|
| 975 |
+
else:
|
| 976 |
+
x_recon = self.model(x_noisy, t, **cond)
|
| 977 |
+
|
| 978 |
+
if isinstance(x_recon, tuple) and not return_ids:
|
| 979 |
+
return x_recon[0]
|
| 980 |
+
else:
|
| 981 |
+
return x_recon
|
| 982 |
+
|
| 983 |
+
def _predict_eps_from_xstart(self, x_t, t, pred_xstart):
|
| 984 |
+
return (extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - pred_xstart) / \
|
| 985 |
+
extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape)
|
| 986 |
+
|
| 987 |
+
def _prior_bpd(self, x_start):
|
| 988 |
+
"""
|
| 989 |
+
Get the prior KL term for the variational lower-bound, measured in
|
| 990 |
+
bits-per-dim.
|
| 991 |
+
This term can't be optimized, as it only depends on the encoder.
|
| 992 |
+
:param x_start: the [N x C x ...] tensor of inputs.
|
| 993 |
+
:return: a batch of [N] KL values (in bits), one per batch element.
|
| 994 |
+
"""
|
| 995 |
+
batch_size = x_start.shape[0]
|
| 996 |
+
t = torch.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device)
|
| 997 |
+
qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t)
|
| 998 |
+
kl_prior = normal_kl(mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0)
|
| 999 |
+
return mean_flat(kl_prior) / np.log(2.0)
|
| 1000 |
+
|
| 1001 |
+
def p_losses(self, x_start, cond, t, noise=None):
|
| 1002 |
+
noise = default(noise, lambda: torch.randn_like(x_start))
|
| 1003 |
+
x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise)
|
| 1004 |
+
model_output = self.apply_model(x_noisy, t, cond)
|
| 1005 |
+
|
| 1006 |
+
loss_dict = {}
|
| 1007 |
+
prefix = 'train' if self.training else 'val'
|
| 1008 |
+
|
| 1009 |
+
if self.parameterization == "x0":
|
| 1010 |
+
target = x_start
|
| 1011 |
+
elif self.parameterization == "eps":
|
| 1012 |
+
target = noise
|
| 1013 |
+
else:
|
| 1014 |
+
raise NotImplementedError()
|
| 1015 |
+
|
| 1016 |
+
loss_simple = self.get_loss(model_output, target, mean=False).mean([1, 2, 3])
|
| 1017 |
+
loss_dict.update({f'{prefix}/loss_simple': loss_simple.mean()})
|
| 1018 |
+
|
| 1019 |
+
logvar_t = self.logvar[t].to(self.device)
|
| 1020 |
+
loss = loss_simple / torch.exp(logvar_t) + logvar_t
|
| 1021 |
+
# loss = loss_simple / torch.exp(self.logvar) + self.logvar
|
| 1022 |
+
if self.learn_logvar:
|
| 1023 |
+
loss_dict.update({f'{prefix}/loss_gamma': loss.mean()})
|
| 1024 |
+
loss_dict.update({'logvar': self.logvar.data.mean()})
|
| 1025 |
+
|
| 1026 |
+
loss = self.l_simple_weight * loss.mean()
|
| 1027 |
+
|
| 1028 |
+
loss_vlb = self.get_loss(model_output, target, mean=False).mean(dim=(1, 2, 3))
|
| 1029 |
+
loss_vlb = (self.lvlb_weights[t] * loss_vlb).mean()
|
| 1030 |
+
loss_dict.update({f'{prefix}/loss_vlb': loss_vlb})
|
| 1031 |
+
loss += (self.original_elbo_weight * loss_vlb)
|
| 1032 |
+
loss_dict.update({f'{prefix}/loss': loss})
|
| 1033 |
+
|
| 1034 |
+
return loss, loss_dict
|
| 1035 |
+
|
| 1036 |
+
def p_mean_variance(self, x, c, t, clip_denoised: bool, return_codebook_ids=False, quantize_denoised=False,
|
| 1037 |
+
return_x0=False, score_corrector=None, corrector_kwargs=None):
|
| 1038 |
+
t_in = t
|
| 1039 |
+
model_out = self.apply_model(x, t_in, c, return_ids=return_codebook_ids)
|
| 1040 |
+
|
| 1041 |
+
if score_corrector is not None:
|
| 1042 |
+
assert self.parameterization == "eps"
|
| 1043 |
+
model_out = score_corrector.modify_score(self, model_out, x, t, c, **corrector_kwargs)
|
| 1044 |
+
|
| 1045 |
+
if return_codebook_ids:
|
| 1046 |
+
model_out, logits = model_out
|
| 1047 |
+
|
| 1048 |
+
if self.parameterization == "eps":
|
| 1049 |
+
x_recon = self.predict_start_from_noise(x, t=t, noise=model_out)
|
| 1050 |
+
elif self.parameterization == "x0":
|
| 1051 |
+
x_recon = model_out
|
| 1052 |
+
else:
|
| 1053 |
+
raise NotImplementedError()
|
| 1054 |
+
|
| 1055 |
+
if clip_denoised:
|
| 1056 |
+
x_recon.clamp_(-1., 1.)
|
| 1057 |
+
if quantize_denoised:
|
| 1058 |
+
x_recon, _, [_, _, indices] = self.first_stage_model.quantize(x_recon)
|
| 1059 |
+
model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t)
|
| 1060 |
+
if return_codebook_ids:
|
| 1061 |
+
return model_mean, posterior_variance, posterior_log_variance, logits
|
| 1062 |
+
elif return_x0:
|
| 1063 |
+
return model_mean, posterior_variance, posterior_log_variance, x_recon
|
| 1064 |
+
else:
|
| 1065 |
+
return model_mean, posterior_variance, posterior_log_variance
|
| 1066 |
+
|
| 1067 |
+
@torch.no_grad()
|
| 1068 |
+
def p_sample(self, x, c, t, clip_denoised=False, repeat_noise=False,
|
| 1069 |
+
return_codebook_ids=False, quantize_denoised=False, return_x0=False,
|
| 1070 |
+
temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None):
|
| 1071 |
+
b, *_, device = *x.shape, x.device
|
| 1072 |
+
outputs = self.p_mean_variance(x=x, c=c, t=t, clip_denoised=clip_denoised,
|
| 1073 |
+
return_codebook_ids=return_codebook_ids,
|
| 1074 |
+
quantize_denoised=quantize_denoised,
|
| 1075 |
+
return_x0=return_x0,
|
| 1076 |
+
score_corrector=score_corrector, corrector_kwargs=corrector_kwargs)
|
| 1077 |
+
if return_codebook_ids:
|
| 1078 |
+
raise DeprecationWarning("Support dropped.")
|
| 1079 |
+
model_mean, _, model_log_variance, logits = outputs
|
| 1080 |
+
elif return_x0:
|
| 1081 |
+
model_mean, _, model_log_variance, x0 = outputs
|
| 1082 |
+
else:
|
| 1083 |
+
model_mean, _, model_log_variance = outputs
|
| 1084 |
+
|
| 1085 |
+
noise = noise_like(x.shape, device, repeat_noise) * temperature
|
| 1086 |
+
if noise_dropout > 0.:
|
| 1087 |
+
noise = torch.nn.functional.dropout(noise, p=noise_dropout)
|
| 1088 |
+
# no noise when t == 0
|
| 1089 |
+
nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1)))
|
| 1090 |
+
|
| 1091 |
+
if return_codebook_ids:
|
| 1092 |
+
return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, logits.argmax(dim=1)
|
| 1093 |
+
if return_x0:
|
| 1094 |
+
return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, x0
|
| 1095 |
+
else:
|
| 1096 |
+
return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise
|
| 1097 |
+
|
| 1098 |
+
@torch.no_grad()
|
| 1099 |
+
def progressive_denoising(self, cond, shape, verbose=True, callback=None, quantize_denoised=False,
|
| 1100 |
+
img_callback=None, mask=None, x0=None, temperature=1., noise_dropout=0.,
|
| 1101 |
+
score_corrector=None, corrector_kwargs=None, batch_size=None, x_T=None, start_T=None,
|
| 1102 |
+
log_every_t=None):
|
| 1103 |
+
if not log_every_t:
|
| 1104 |
+
log_every_t = self.log_every_t
|
| 1105 |
+
timesteps = self.num_timesteps
|
| 1106 |
+
if batch_size is not None:
|
| 1107 |
+
b = batch_size if batch_size is not None else shape[0]
|
| 1108 |
+
shape = [batch_size] + list(shape)
|
| 1109 |
+
else:
|
| 1110 |
+
b = batch_size = shape[0]
|
| 1111 |
+
if x_T is None:
|
| 1112 |
+
img = torch.randn(shape, device=self.device)
|
| 1113 |
+
else:
|
| 1114 |
+
img = x_T
|
| 1115 |
+
intermediates = []
|
| 1116 |
+
if cond is not None:
|
| 1117 |
+
if isinstance(cond, dict):
|
| 1118 |
+
cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else
|
| 1119 |
+
[x[:batch_size] for x in cond[key]] for key in cond}
|
| 1120 |
+
else:
|
| 1121 |
+
cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size]
|
| 1122 |
+
|
| 1123 |
+
if start_T is not None:
|
| 1124 |
+
timesteps = min(timesteps, start_T)
|
| 1125 |
+
iterator = tqdm(reversed(range(0, timesteps)), desc='Progressive Generation',
|
| 1126 |
+
total=timesteps) if verbose else reversed(
|
| 1127 |
+
range(0, timesteps))
|
| 1128 |
+
if type(temperature) == float:
|
| 1129 |
+
temperature = [temperature] * timesteps
|
| 1130 |
+
|
| 1131 |
+
for i in iterator:
|
| 1132 |
+
ts = torch.full((b,), i, device=self.device, dtype=torch.long)
|
| 1133 |
+
if self.shorten_cond_schedule:
|
| 1134 |
+
assert self.model.conditioning_key != 'hybrid'
|
| 1135 |
+
tc = self.cond_ids[ts].to(cond.device)
|
| 1136 |
+
cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond))
|
| 1137 |
+
|
| 1138 |
+
img, x0_partial = self.p_sample(img, cond, ts,
|
| 1139 |
+
clip_denoised=self.clip_denoised,
|
| 1140 |
+
quantize_denoised=quantize_denoised, return_x0=True,
|
| 1141 |
+
temperature=temperature[i], noise_dropout=noise_dropout,
|
| 1142 |
+
score_corrector=score_corrector, corrector_kwargs=corrector_kwargs)
|
| 1143 |
+
if mask is not None:
|
| 1144 |
+
assert x0 is not None
|
| 1145 |
+
img_orig = self.q_sample(x0, ts)
|
| 1146 |
+
img = img_orig * mask + (1. - mask) * img
|
| 1147 |
+
|
| 1148 |
+
if i % log_every_t == 0 or i == timesteps - 1:
|
| 1149 |
+
intermediates.append(x0_partial)
|
| 1150 |
+
if callback:
|
| 1151 |
+
callback(i)
|
| 1152 |
+
if img_callback:
|
| 1153 |
+
img_callback(img, i)
|
| 1154 |
+
return img, intermediates
|
| 1155 |
+
|
| 1156 |
+
@torch.no_grad()
|
| 1157 |
+
def p_sample_loop(self, cond, shape, return_intermediates=False,
|
| 1158 |
+
x_T=None, verbose=True, callback=None, timesteps=None, quantize_denoised=False,
|
| 1159 |
+
mask=None, x0=None, img_callback=None, start_T=None,
|
| 1160 |
+
log_every_t=None):
|
| 1161 |
+
|
| 1162 |
+
if not log_every_t:
|
| 1163 |
+
log_every_t = self.log_every_t
|
| 1164 |
+
device = self.betas.device
|
| 1165 |
+
b = shape[0]
|
| 1166 |
+
if x_T is None:
|
| 1167 |
+
img = torch.randn(shape, device=device)
|
| 1168 |
+
else:
|
| 1169 |
+
img = x_T
|
| 1170 |
+
|
| 1171 |
+
intermediates = [img]
|
| 1172 |
+
if timesteps is None:
|
| 1173 |
+
timesteps = self.num_timesteps
|
| 1174 |
+
|
| 1175 |
+
if start_T is not None:
|
| 1176 |
+
timesteps = min(timesteps, start_T)
|
| 1177 |
+
iterator = tqdm(reversed(range(0, timesteps)), desc='Sampling t', total=timesteps) if verbose else reversed(
|
| 1178 |
+
range(0, timesteps))
|
| 1179 |
+
|
| 1180 |
+
if mask is not None:
|
| 1181 |
+
assert x0 is not None
|
| 1182 |
+
assert x0.shape[2:3] == mask.shape[2:3] # spatial size has to match
|
| 1183 |
+
|
| 1184 |
+
for i in iterator:
|
| 1185 |
+
ts = torch.full((b,), i, device=device, dtype=torch.long)
|
| 1186 |
+
if self.shorten_cond_schedule:
|
| 1187 |
+
assert self.model.conditioning_key != 'hybrid'
|
| 1188 |
+
tc = self.cond_ids[ts].to(cond.device)
|
| 1189 |
+
cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond))
|
| 1190 |
+
|
| 1191 |
+
img = self.p_sample(img, cond, ts,
|
| 1192 |
+
clip_denoised=self.clip_denoised,
|
| 1193 |
+
quantize_denoised=quantize_denoised)
|
| 1194 |
+
if mask is not None:
|
| 1195 |
+
img_orig = self.q_sample(x0, ts)
|
| 1196 |
+
img = img_orig * mask + (1. - mask) * img
|
| 1197 |
+
|
| 1198 |
+
if i % log_every_t == 0 or i == timesteps - 1:
|
| 1199 |
+
intermediates.append(img)
|
| 1200 |
+
if callback:
|
| 1201 |
+
callback(i)
|
| 1202 |
+
if img_callback:
|
| 1203 |
+
img_callback(img, i)
|
| 1204 |
+
|
| 1205 |
+
if return_intermediates:
|
| 1206 |
+
return img, intermediates
|
| 1207 |
+
return img
|
| 1208 |
+
|
| 1209 |
+
@torch.no_grad()
|
| 1210 |
+
def sample(self, cond, batch_size=16, return_intermediates=False, x_T=None,
|
| 1211 |
+
verbose=True, timesteps=None, quantize_denoised=False,
|
| 1212 |
+
mask=None, x0=None, shape=None,**kwargs):
|
| 1213 |
+
if shape is None:
|
| 1214 |
+
shape = (batch_size, self.channels, self.image_size, self.image_size)
|
| 1215 |
+
if cond is not None:
|
| 1216 |
+
if isinstance(cond, dict):
|
| 1217 |
+
cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else
|
| 1218 |
+
[x[:batch_size] for x in cond[key]] for key in cond}
|
| 1219 |
+
else:
|
| 1220 |
+
cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size]
|
| 1221 |
+
return self.p_sample_loop(cond,
|
| 1222 |
+
shape,
|
| 1223 |
+
return_intermediates=return_intermediates, x_T=x_T,
|
| 1224 |
+
verbose=verbose, timesteps=timesteps, quantize_denoised=quantize_denoised,
|
| 1225 |
+
mask=mask, x0=x0)
|
| 1226 |
+
|
| 1227 |
+
@torch.no_grad()
|
| 1228 |
+
def sample_log(self,cond,batch_size,ddim, ddim_steps,**kwargs):
|
| 1229 |
+
|
| 1230 |
+
if ddim:
|
| 1231 |
+
ddim_sampler = DDIMSampler(self)
|
| 1232 |
+
shape = (self.channels, self.image_size, self.image_size)
|
| 1233 |
+
samples, intermediates =ddim_sampler.sample(ddim_steps,batch_size,
|
| 1234 |
+
shape,cond,verbose=False,**kwargs)
|
| 1235 |
+
|
| 1236 |
+
else:
|
| 1237 |
+
samples, intermediates = self.sample(cond=cond, batch_size=batch_size,
|
| 1238 |
+
return_intermediates=True,**kwargs)
|
| 1239 |
+
|
| 1240 |
+
return samples, intermediates
|
| 1241 |
+
|
| 1242 |
+
|
| 1243 |
+
@torch.no_grad()
|
| 1244 |
+
def log_images(self, batch, N=8, n_row=4, sample=True, ddim_steps=200, ddim_eta=1., return_keys=None,
|
| 1245 |
+
quantize_denoised=True, inpaint=True, plot_denoise_rows=False, plot_progressive_rows=True,
|
| 1246 |
+
plot_diffusion_rows=True, **kwargs):
|
| 1247 |
+
|
| 1248 |
+
use_ddim = ddim_steps is not None
|
| 1249 |
+
|
| 1250 |
+
log = {}
|
| 1251 |
+
z, c, x, xrec, xc = self.get_input(batch, self.first_stage_key,
|
| 1252 |
+
return_first_stage_outputs=True,
|
| 1253 |
+
force_c_encode=True,
|
| 1254 |
+
return_original_cond=True,
|
| 1255 |
+
bs=N)
|
| 1256 |
+
N = min(x.shape[0], N)
|
| 1257 |
+
n_row = min(x.shape[0], n_row)
|
| 1258 |
+
log["inputs"] = x
|
| 1259 |
+
log["reconstruction"] = xrec
|
| 1260 |
+
if self.model.conditioning_key is not None:
|
| 1261 |
+
if hasattr(self.cond_stage_model, "decode"):
|
| 1262 |
+
xc = self.cond_stage_model.decode(c)
|
| 1263 |
+
log["conditioning"] = xc
|
| 1264 |
+
elif self.cond_stage_key in ["caption"]:
|
| 1265 |
+
xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["caption"])
|
| 1266 |
+
log["conditioning"] = xc
|
| 1267 |
+
elif self.cond_stage_key == 'class_label':
|
| 1268 |
+
xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["human_label"])
|
| 1269 |
+
log['conditioning'] = xc
|
| 1270 |
+
elif isimage(xc):
|
| 1271 |
+
log["conditioning"] = xc
|
| 1272 |
+
if ismap(xc):
|
| 1273 |
+
log["original_conditioning"] = self.to_rgb(xc)
|
| 1274 |
+
|
| 1275 |
+
if plot_diffusion_rows:
|
| 1276 |
+
# get diffusion row
|
| 1277 |
+
diffusion_row = []
|
| 1278 |
+
z_start = z[:n_row]
|
| 1279 |
+
for t in range(self.num_timesteps):
|
| 1280 |
+
if t % self.log_every_t == 0 or t == self.num_timesteps - 1:
|
| 1281 |
+
t = repeat(torch.tensor([t]), '1 -> b', b=n_row)
|
| 1282 |
+
t = t.to(self.device).long()
|
| 1283 |
+
noise = torch.randn_like(z_start)
|
| 1284 |
+
z_noisy = self.q_sample(x_start=z_start, t=t, noise=noise)
|
| 1285 |
+
diffusion_row.append(self.decode_first_stage(z_noisy))
|
| 1286 |
+
|
| 1287 |
+
diffusion_row = torch.stack(diffusion_row) # n_log_step, n_row, C, H, W
|
| 1288 |
+
diffusion_grid = rearrange(diffusion_row, 'n b c h w -> b n c h w')
|
| 1289 |
+
diffusion_grid = rearrange(diffusion_grid, 'b n c h w -> (b n) c h w')
|
| 1290 |
+
diffusion_grid = make_grid(diffusion_grid, nrow=diffusion_row.shape[0])
|
| 1291 |
+
log["diffusion_row"] = diffusion_grid
|
| 1292 |
+
|
| 1293 |
+
if sample:
|
| 1294 |
+
# get denoise row
|
| 1295 |
+
with self.ema_scope("Plotting"):
|
| 1296 |
+
samples, z_denoise_row = self.sample_log(cond=c,batch_size=N,ddim=use_ddim,
|
| 1297 |
+
ddim_steps=ddim_steps,eta=ddim_eta)
|
| 1298 |
+
# samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True)
|
| 1299 |
+
x_samples = self.decode_first_stage(samples)
|
| 1300 |
+
log["samples"] = x_samples
|
| 1301 |
+
if plot_denoise_rows:
|
| 1302 |
+
denoise_grid = self._get_denoise_row_from_list(z_denoise_row)
|
| 1303 |
+
log["denoise_row"] = denoise_grid
|
| 1304 |
+
|
| 1305 |
+
if quantize_denoised and not isinstance(self.first_stage_model, AutoencoderKL) and not isinstance(
|
| 1306 |
+
self.first_stage_model, IdentityFirstStage):
|
| 1307 |
+
# also display when quantizing x0 while sampling
|
| 1308 |
+
with self.ema_scope("Plotting Quantized Denoised"):
|
| 1309 |
+
samples, z_denoise_row = self.sample_log(cond=c,batch_size=N,ddim=use_ddim,
|
| 1310 |
+
ddim_steps=ddim_steps,eta=ddim_eta,
|
| 1311 |
+
quantize_denoised=True)
|
| 1312 |
+
# samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True,
|
| 1313 |
+
# quantize_denoised=True)
|
| 1314 |
+
x_samples = self.decode_first_stage(samples.to(self.device))
|
| 1315 |
+
log["samples_x0_quantized"] = x_samples
|
| 1316 |
+
|
| 1317 |
+
if inpaint:
|
| 1318 |
+
# make a simple center square
|
| 1319 |
+
h, w = z.shape[2], z.shape[3]
|
| 1320 |
+
mask = torch.ones(N, h, w).to(self.device)
|
| 1321 |
+
# zeros will be filled in
|
| 1322 |
+
mask[:, h // 4:3 * h // 4, w // 4:3 * w // 4] = 0.
|
| 1323 |
+
mask = mask[:, None, ...]
|
| 1324 |
+
with self.ema_scope("Plotting Inpaint"):
|
| 1325 |
+
|
| 1326 |
+
samples, _ = self.sample_log(cond=c,batch_size=N,ddim=use_ddim, eta=ddim_eta,
|
| 1327 |
+
ddim_steps=ddim_steps, x0=z[:N], mask=mask)
|
| 1328 |
+
x_samples = self.decode_first_stage(samples.to(self.device))
|
| 1329 |
+
log["samples_inpainting"] = x_samples
|
| 1330 |
+
log["mask"] = mask
|
| 1331 |
+
|
| 1332 |
+
# outpaint
|
| 1333 |
+
with self.ema_scope("Plotting Outpaint"):
|
| 1334 |
+
samples, _ = self.sample_log(cond=c, batch_size=N, ddim=use_ddim,eta=ddim_eta,
|
| 1335 |
+
ddim_steps=ddim_steps, x0=z[:N], mask=mask)
|
| 1336 |
+
x_samples = self.decode_first_stage(samples.to(self.device))
|
| 1337 |
+
log["samples_outpainting"] = x_samples
|
| 1338 |
+
|
| 1339 |
+
if plot_progressive_rows:
|
| 1340 |
+
with self.ema_scope("Plotting Progressives"):
|
| 1341 |
+
img, progressives = self.progressive_denoising(c,
|
| 1342 |
+
shape=(self.channels, self.image_size, self.image_size),
|
| 1343 |
+
batch_size=N)
|
| 1344 |
+
prog_row = self._get_denoise_row_from_list(progressives, desc="Progressive Generation")
|
| 1345 |
+
log["progressive_row"] = prog_row
|
| 1346 |
+
|
| 1347 |
+
if return_keys:
|
| 1348 |
+
if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0:
|
| 1349 |
+
return log
|
| 1350 |
+
else:
|
| 1351 |
+
return {key: log[key] for key in return_keys}
|
| 1352 |
+
return log
|
| 1353 |
+
|
| 1354 |
+
def configure_optimizers(self):
|
| 1355 |
+
lr = self.learning_rate
|
| 1356 |
+
params = list(self.model.parameters())
|
| 1357 |
+
if self.cond_stage_trainable:
|
| 1358 |
+
print(f"{self.__class__.__name__}: Also optimizing conditioner params!")
|
| 1359 |
+
params = params + list(self.cond_stage_model.parameters())
|
| 1360 |
+
if self.learn_logvar:
|
| 1361 |
+
print('Diffusion model optimizing logvar')
|
| 1362 |
+
params.append(self.logvar)
|
| 1363 |
+
opt = torch.optim.AdamW(params, lr=lr)
|
| 1364 |
+
if self.use_scheduler:
|
| 1365 |
+
assert 'target' in self.scheduler_config
|
| 1366 |
+
scheduler = instantiate_from_config(self.scheduler_config)
|
| 1367 |
+
|
| 1368 |
+
print("Setting up LambdaLR scheduler...")
|
| 1369 |
+
scheduler = [
|
| 1370 |
+
{
|
| 1371 |
+
'scheduler': LambdaLR(opt, lr_lambda=scheduler.schedule),
|
| 1372 |
+
'interval': 'step',
|
| 1373 |
+
'frequency': 1
|
| 1374 |
+
}]
|
| 1375 |
+
return [opt], scheduler
|
| 1376 |
+
return opt
|
| 1377 |
+
|
| 1378 |
+
@torch.no_grad()
|
| 1379 |
+
def to_rgb(self, x):
|
| 1380 |
+
x = x.float()
|
| 1381 |
+
if not hasattr(self, "colorize"):
|
| 1382 |
+
self.colorize = torch.randn(3, x.shape[1], 1, 1).to(x)
|
| 1383 |
+
x = nn.functional.conv2d(x, weight=self.colorize)
|
| 1384 |
+
x = 2. * (x - x.min()) / (x.max() - x.min()) - 1.
|
| 1385 |
+
return x
|
| 1386 |
+
|
| 1387 |
+
|
| 1388 |
+
class DiffusionWrapperV1(pl.LightningModule):
|
| 1389 |
+
def __init__(self, diff_model_config, conditioning_key):
|
| 1390 |
+
super().__init__()
|
| 1391 |
+
self.diffusion_model = instantiate_from_config(diff_model_config)
|
| 1392 |
+
self.conditioning_key = conditioning_key
|
| 1393 |
+
assert self.conditioning_key in [None, 'concat', 'crossattn', 'hybrid', 'adm']
|
| 1394 |
+
|
| 1395 |
+
def forward(self, x, t, c_concat: list = None, c_crossattn: list = None):
|
| 1396 |
+
if self.conditioning_key is None:
|
| 1397 |
+
out = self.diffusion_model(x, t)
|
| 1398 |
+
elif self.conditioning_key == 'concat':
|
| 1399 |
+
xc = torch.cat([x] + c_concat, dim=1)
|
| 1400 |
+
out = self.diffusion_model(xc, t)
|
| 1401 |
+
elif self.conditioning_key == 'crossattn':
|
| 1402 |
+
cc = torch.cat(c_crossattn, 1)
|
| 1403 |
+
out = self.diffusion_model(x, t, context=cc)
|
| 1404 |
+
elif self.conditioning_key == 'hybrid':
|
| 1405 |
+
xc = torch.cat([x] + c_concat, dim=1)
|
| 1406 |
+
cc = torch.cat(c_crossattn, 1)
|
| 1407 |
+
out = self.diffusion_model(xc, t, context=cc)
|
| 1408 |
+
elif self.conditioning_key == 'adm':
|
| 1409 |
+
cc = c_crossattn[0]
|
| 1410 |
+
out = self.diffusion_model(x, t, y=cc)
|
| 1411 |
+
else:
|
| 1412 |
+
raise NotImplementedError()
|
| 1413 |
+
|
| 1414 |
+
return out
|
| 1415 |
+
|
| 1416 |
+
|
| 1417 |
+
class Layout2ImgDiffusionV1(LatentDiffusionV1):
|
| 1418 |
+
# TODO: move all layout-specific hacks to this class
|
| 1419 |
+
def __init__(self, cond_stage_key, *args, **kwargs):
|
| 1420 |
+
assert cond_stage_key == 'coordinates_bbox', 'Layout2ImgDiffusion only for cond_stage_key="coordinates_bbox"'
|
| 1421 |
+
super().__init__(*args, cond_stage_key=cond_stage_key, **kwargs)
|
| 1422 |
+
|
| 1423 |
+
def log_images(self, batch, N=8, *args, **kwargs):
|
| 1424 |
+
logs = super().log_images(*args, batch=batch, N=N, **kwargs)
|
| 1425 |
+
|
| 1426 |
+
key = 'train' if self.training else 'validation'
|
| 1427 |
+
dset = self.trainer.datamodule.datasets[key]
|
| 1428 |
+
mapper = dset.conditional_builders[self.cond_stage_key]
|
| 1429 |
+
|
| 1430 |
+
bbox_imgs = []
|
| 1431 |
+
map_fn = lambda catno: dset.get_textual_label(dset.get_category_id(catno))
|
| 1432 |
+
for tknzd_bbox in batch[self.cond_stage_key][:N]:
|
| 1433 |
+
bboximg = mapper.plot(tknzd_bbox.detach().cpu(), map_fn, (256, 256))
|
| 1434 |
+
bbox_imgs.append(bboximg)
|
| 1435 |
+
|
| 1436 |
+
cond_img = torch.stack(bbox_imgs, dim=0)
|
| 1437 |
+
logs['bbox_image'] = cond_img
|
| 1438 |
+
return logs
|
| 1439 |
+
|
| 1440 |
+
ldm.models.diffusion.ddpm.DDPMV1 = DDPMV1
|
| 1441 |
+
ldm.models.diffusion.ddpm.LatentDiffusionV1 = LatentDiffusionV1
|
| 1442 |
+
ldm.models.diffusion.ddpm.DiffusionWrapperV1 = DiffusionWrapperV1
|
| 1443 |
+
ldm.models.diffusion.ddpm.Layout2ImgDiffusionV1 = Layout2ImgDiffusionV1
|
extensions-builtin/LDSR/vqvae_quantize.py
ADDED
|
@@ -0,0 +1,147 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Vendored from https://raw.githubusercontent.com/CompVis/taming-transformers/24268930bf1dce879235a7fddd0b2355b84d7ea6/taming/modules/vqvae/quantize.py,
|
| 2 |
+
# where the license is as follows:
|
| 3 |
+
#
|
| 4 |
+
# Copyright (c) 2020 Patrick Esser and Robin Rombach and Björn Ommer
|
| 5 |
+
#
|
| 6 |
+
# Permission is hereby granted, free of charge, to any person obtaining a copy
|
| 7 |
+
# of this software and associated documentation files (the "Software"), to deal
|
| 8 |
+
# in the Software without restriction, including without limitation the rights
|
| 9 |
+
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
| 10 |
+
# copies of the Software, and to permit persons to whom the Software is
|
| 11 |
+
# furnished to do so, subject to the following conditions:
|
| 12 |
+
#
|
| 13 |
+
# The above copyright notice and this permission notice shall be included in all
|
| 14 |
+
# copies or substantial portions of the Software.
|
| 15 |
+
#
|
| 16 |
+
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
| 17 |
+
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
| 18 |
+
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
|
| 19 |
+
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
|
| 20 |
+
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
| 21 |
+
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
|
| 22 |
+
# OR OTHER DEALINGS IN THE SOFTWARE./
|
| 23 |
+
|
| 24 |
+
import torch
|
| 25 |
+
import torch.nn as nn
|
| 26 |
+
import numpy as np
|
| 27 |
+
from einops import rearrange
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
class VectorQuantizer2(nn.Module):
|
| 31 |
+
"""
|
| 32 |
+
Improved version over VectorQuantizer, can be used as a drop-in replacement. Mostly
|
| 33 |
+
avoids costly matrix multiplications and allows for post-hoc remapping of indices.
|
| 34 |
+
"""
|
| 35 |
+
|
| 36 |
+
# NOTE: due to a bug the beta term was applied to the wrong term. for
|
| 37 |
+
# backwards compatibility we use the buggy version by default, but you can
|
| 38 |
+
# specify legacy=False to fix it.
|
| 39 |
+
def __init__(self, n_e, e_dim, beta, remap=None, unknown_index="random",
|
| 40 |
+
sane_index_shape=False, legacy=True):
|
| 41 |
+
super().__init__()
|
| 42 |
+
self.n_e = n_e
|
| 43 |
+
self.e_dim = e_dim
|
| 44 |
+
self.beta = beta
|
| 45 |
+
self.legacy = legacy
|
| 46 |
+
|
| 47 |
+
self.embedding = nn.Embedding(self.n_e, self.e_dim)
|
| 48 |
+
self.embedding.weight.data.uniform_(-1.0 / self.n_e, 1.0 / self.n_e)
|
| 49 |
+
|
| 50 |
+
self.remap = remap
|
| 51 |
+
if self.remap is not None:
|
| 52 |
+
self.register_buffer("used", torch.tensor(np.load(self.remap)))
|
| 53 |
+
self.re_embed = self.used.shape[0]
|
| 54 |
+
self.unknown_index = unknown_index # "random" or "extra" or integer
|
| 55 |
+
if self.unknown_index == "extra":
|
| 56 |
+
self.unknown_index = self.re_embed
|
| 57 |
+
self.re_embed = self.re_embed + 1
|
| 58 |
+
print(f"Remapping {self.n_e} indices to {self.re_embed} indices. "
|
| 59 |
+
f"Using {self.unknown_index} for unknown indices.")
|
| 60 |
+
else:
|
| 61 |
+
self.re_embed = n_e
|
| 62 |
+
|
| 63 |
+
self.sane_index_shape = sane_index_shape
|
| 64 |
+
|
| 65 |
+
def remap_to_used(self, inds):
|
| 66 |
+
ishape = inds.shape
|
| 67 |
+
assert len(ishape) > 1
|
| 68 |
+
inds = inds.reshape(ishape[0], -1)
|
| 69 |
+
used = self.used.to(inds)
|
| 70 |
+
match = (inds[:, :, None] == used[None, None, ...]).long()
|
| 71 |
+
new = match.argmax(-1)
|
| 72 |
+
unknown = match.sum(2) < 1
|
| 73 |
+
if self.unknown_index == "random":
|
| 74 |
+
new[unknown] = torch.randint(0, self.re_embed, size=new[unknown].shape).to(device=new.device)
|
| 75 |
+
else:
|
| 76 |
+
new[unknown] = self.unknown_index
|
| 77 |
+
return new.reshape(ishape)
|
| 78 |
+
|
| 79 |
+
def unmap_to_all(self, inds):
|
| 80 |
+
ishape = inds.shape
|
| 81 |
+
assert len(ishape) > 1
|
| 82 |
+
inds = inds.reshape(ishape[0], -1)
|
| 83 |
+
used = self.used.to(inds)
|
| 84 |
+
if self.re_embed > self.used.shape[0]: # extra token
|
| 85 |
+
inds[inds >= self.used.shape[0]] = 0 # simply set to zero
|
| 86 |
+
back = torch.gather(used[None, :][inds.shape[0] * [0], :], 1, inds)
|
| 87 |
+
return back.reshape(ishape)
|
| 88 |
+
|
| 89 |
+
def forward(self, z, temp=None, rescale_logits=False, return_logits=False):
|
| 90 |
+
assert temp is None or temp == 1.0, "Only for interface compatible with Gumbel"
|
| 91 |
+
assert rescale_logits is False, "Only for interface compatible with Gumbel"
|
| 92 |
+
assert return_logits is False, "Only for interface compatible with Gumbel"
|
| 93 |
+
# reshape z -> (batch, height, width, channel) and flatten
|
| 94 |
+
z = rearrange(z, 'b c h w -> b h w c').contiguous()
|
| 95 |
+
z_flattened = z.view(-1, self.e_dim)
|
| 96 |
+
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
|
| 97 |
+
|
| 98 |
+
d = torch.sum(z_flattened ** 2, dim=1, keepdim=True) + \
|
| 99 |
+
torch.sum(self.embedding.weight ** 2, dim=1) - 2 * \
|
| 100 |
+
torch.einsum('bd,dn->bn', z_flattened, rearrange(self.embedding.weight, 'n d -> d n'))
|
| 101 |
+
|
| 102 |
+
min_encoding_indices = torch.argmin(d, dim=1)
|
| 103 |
+
z_q = self.embedding(min_encoding_indices).view(z.shape)
|
| 104 |
+
perplexity = None
|
| 105 |
+
min_encodings = None
|
| 106 |
+
|
| 107 |
+
# compute loss for embedding
|
| 108 |
+
if not self.legacy:
|
| 109 |
+
loss = self.beta * torch.mean((z_q.detach() - z) ** 2) + \
|
| 110 |
+
torch.mean((z_q - z.detach()) ** 2)
|
| 111 |
+
else:
|
| 112 |
+
loss = torch.mean((z_q.detach() - z) ** 2) + self.beta * \
|
| 113 |
+
torch.mean((z_q - z.detach()) ** 2)
|
| 114 |
+
|
| 115 |
+
# preserve gradients
|
| 116 |
+
z_q = z + (z_q - z).detach()
|
| 117 |
+
|
| 118 |
+
# reshape back to match original input shape
|
| 119 |
+
z_q = rearrange(z_q, 'b h w c -> b c h w').contiguous()
|
| 120 |
+
|
| 121 |
+
if self.remap is not None:
|
| 122 |
+
min_encoding_indices = min_encoding_indices.reshape(z.shape[0], -1) # add batch axis
|
| 123 |
+
min_encoding_indices = self.remap_to_used(min_encoding_indices)
|
| 124 |
+
min_encoding_indices = min_encoding_indices.reshape(-1, 1) # flatten
|
| 125 |
+
|
| 126 |
+
if self.sane_index_shape:
|
| 127 |
+
min_encoding_indices = min_encoding_indices.reshape(
|
| 128 |
+
z_q.shape[0], z_q.shape[2], z_q.shape[3])
|
| 129 |
+
|
| 130 |
+
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
|
| 131 |
+
|
| 132 |
+
def get_codebook_entry(self, indices, shape):
|
| 133 |
+
# shape specifying (batch, height, width, channel)
|
| 134 |
+
if self.remap is not None:
|
| 135 |
+
indices = indices.reshape(shape[0], -1) # add batch axis
|
| 136 |
+
indices = self.unmap_to_all(indices)
|
| 137 |
+
indices = indices.reshape(-1) # flatten again
|
| 138 |
+
|
| 139 |
+
# get quantized latent vectors
|
| 140 |
+
z_q = self.embedding(indices)
|
| 141 |
+
|
| 142 |
+
if shape is not None:
|
| 143 |
+
z_q = z_q.view(shape)
|
| 144 |
+
# reshape back to match original input shape
|
| 145 |
+
z_q = z_q.permute(0, 3, 1, 2).contiguous()
|
| 146 |
+
|
| 147 |
+
return z_q
|
extensions-builtin/Lora/__pycache__/extra_networks_lora.cpython-310.pyc
ADDED
|
Binary file (2.68 kB). View file
|
|
|
extensions-builtin/Lora/__pycache__/lora.cpython-310.pyc
ADDED
|
Binary file (486 Bytes). View file
|
|
|
extensions-builtin/Lora/__pycache__/lora_logger.cpython-310.pyc
ADDED
|
Binary file (1.05 kB). View file
|
|
|
extensions-builtin/Lora/__pycache__/lora_patches.cpython-310.pyc
ADDED
|
Binary file (1.86 kB). View file
|
|
|
extensions-builtin/Lora/__pycache__/lyco_helpers.cpython-310.pyc
ADDED
|
Binary file (2.36 kB). View file
|
|
|
extensions-builtin/Lora/__pycache__/network.cpython-310.pyc
ADDED
|
Binary file (5.76 kB). View file
|
|
|
extensions-builtin/Lora/__pycache__/network_full.cpython-310.pyc
ADDED
|
Binary file (1.53 kB). View file
|
|
|
extensions-builtin/Lora/__pycache__/network_glora.cpython-310.pyc
ADDED
|
Binary file (1.77 kB). View file
|
|
|
extensions-builtin/Lora/__pycache__/network_hada.cpython-310.pyc
ADDED
|
Binary file (2.17 kB). View file
|
|
|
extensions-builtin/Lora/__pycache__/network_ia3.cpython-310.pyc
ADDED
|
Binary file (1.56 kB). View file
|
|
|
extensions-builtin/Lora/__pycache__/network_lokr.cpython-310.pyc
ADDED
|
Binary file (2.37 kB). View file
|
|
|
extensions-builtin/Lora/__pycache__/network_lora.cpython-310.pyc
ADDED
|
Binary file (3.44 kB). View file
|
|
|
extensions-builtin/Lora/__pycache__/network_norm.cpython-310.pyc
ADDED
|
Binary file (1.54 kB). View file
|
|
|
extensions-builtin/Lora/__pycache__/network_oft.cpython-310.pyc
ADDED
|
Binary file (2.78 kB). View file
|
|
|
extensions-builtin/Lora/__pycache__/networks.cpython-310.pyc
ADDED
|
Binary file (16.1 kB). View file
|
|
|
extensions-builtin/Lora/__pycache__/preload.cpython-310.pyc
ADDED
|
Binary file (626 Bytes). View file
|
|
|
extensions-builtin/Lora/__pycache__/ui_edit_user_metadata.cpython-310.pyc
ADDED
|
Binary file (7.48 kB). View file
|
|
|
extensions-builtin/Lora/__pycache__/ui_extra_networks_lora.cpython-310.pyc
ADDED
|
Binary file (3.1 kB). View file
|
|
|
extensions-builtin/Lora/extra_networks_lora.py
ADDED
|
@@ -0,0 +1,67 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from modules import extra_networks, shared
|
| 2 |
+
import networks
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
class ExtraNetworkLora(extra_networks.ExtraNetwork):
|
| 6 |
+
def __init__(self):
|
| 7 |
+
super().__init__('lora')
|
| 8 |
+
|
| 9 |
+
self.errors = {}
|
| 10 |
+
"""mapping of network names to the number of errors the network had during operation"""
|
| 11 |
+
|
| 12 |
+
def activate(self, p, params_list):
|
| 13 |
+
additional = shared.opts.sd_lora
|
| 14 |
+
|
| 15 |
+
self.errors.clear()
|
| 16 |
+
|
| 17 |
+
if additional != "None" and additional in networks.available_networks and not any(x for x in params_list if x.items[0] == additional):
|
| 18 |
+
p.all_prompts = [x + f"<lora:{additional}:{shared.opts.extra_networks_default_multiplier}>" for x in p.all_prompts]
|
| 19 |
+
params_list.append(extra_networks.ExtraNetworkParams(items=[additional, shared.opts.extra_networks_default_multiplier]))
|
| 20 |
+
|
| 21 |
+
names = []
|
| 22 |
+
te_multipliers = []
|
| 23 |
+
unet_multipliers = []
|
| 24 |
+
dyn_dims = []
|
| 25 |
+
for params in params_list:
|
| 26 |
+
assert params.items
|
| 27 |
+
|
| 28 |
+
names.append(params.positional[0])
|
| 29 |
+
|
| 30 |
+
te_multiplier = float(params.positional[1]) if len(params.positional) > 1 else 1.0
|
| 31 |
+
te_multiplier = float(params.named.get("te", te_multiplier))
|
| 32 |
+
|
| 33 |
+
unet_multiplier = float(params.positional[2]) if len(params.positional) > 2 else te_multiplier
|
| 34 |
+
unet_multiplier = float(params.named.get("unet", unet_multiplier))
|
| 35 |
+
|
| 36 |
+
dyn_dim = int(params.positional[3]) if len(params.positional) > 3 else None
|
| 37 |
+
dyn_dim = int(params.named["dyn"]) if "dyn" in params.named else dyn_dim
|
| 38 |
+
|
| 39 |
+
te_multipliers.append(te_multiplier)
|
| 40 |
+
unet_multipliers.append(unet_multiplier)
|
| 41 |
+
dyn_dims.append(dyn_dim)
|
| 42 |
+
|
| 43 |
+
networks.load_networks(names, te_multipliers, unet_multipliers, dyn_dims)
|
| 44 |
+
|
| 45 |
+
if shared.opts.lora_add_hashes_to_infotext:
|
| 46 |
+
network_hashes = []
|
| 47 |
+
for item in networks.loaded_networks:
|
| 48 |
+
shorthash = item.network_on_disk.shorthash
|
| 49 |
+
if not shorthash:
|
| 50 |
+
continue
|
| 51 |
+
|
| 52 |
+
alias = item.mentioned_name
|
| 53 |
+
if not alias:
|
| 54 |
+
continue
|
| 55 |
+
|
| 56 |
+
alias = alias.replace(":", "").replace(",", "")
|
| 57 |
+
|
| 58 |
+
network_hashes.append(f"{alias}: {shorthash}")
|
| 59 |
+
|
| 60 |
+
if network_hashes:
|
| 61 |
+
p.extra_generation_params["Lora hashes"] = ", ".join(network_hashes)
|
| 62 |
+
|
| 63 |
+
def deactivate(self, p):
|
| 64 |
+
if self.errors:
|
| 65 |
+
p.comment("Networks with errors: " + ", ".join(f"{k} ({v})" for k, v in self.errors.items()))
|
| 66 |
+
|
| 67 |
+
self.errors.clear()
|
extensions-builtin/Lora/lora.py
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import networks
|
| 2 |
+
|
| 3 |
+
list_available_loras = networks.list_available_networks
|
| 4 |
+
|
| 5 |
+
available_loras = networks.available_networks
|
| 6 |
+
available_lora_aliases = networks.available_network_aliases
|
| 7 |
+
available_lora_hash_lookup = networks.available_network_hash_lookup
|
| 8 |
+
forbidden_lora_aliases = networks.forbidden_network_aliases
|
| 9 |
+
loaded_loras = networks.loaded_networks
|
extensions-builtin/Lora/lora_logger.py
ADDED
|
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import sys
|
| 2 |
+
import copy
|
| 3 |
+
import logging
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
class ColoredFormatter(logging.Formatter):
|
| 7 |
+
COLORS = {
|
| 8 |
+
"DEBUG": "\033[0;36m", # CYAN
|
| 9 |
+
"INFO": "\033[0;32m", # GREEN
|
| 10 |
+
"WARNING": "\033[0;33m", # YELLOW
|
| 11 |
+
"ERROR": "\033[0;31m", # RED
|
| 12 |
+
"CRITICAL": "\033[0;37;41m", # WHITE ON RED
|
| 13 |
+
"RESET": "\033[0m", # RESET COLOR
|
| 14 |
+
}
|
| 15 |
+
|
| 16 |
+
def format(self, record):
|
| 17 |
+
colored_record = copy.copy(record)
|
| 18 |
+
levelname = colored_record.levelname
|
| 19 |
+
seq = self.COLORS.get(levelname, self.COLORS["RESET"])
|
| 20 |
+
colored_record.levelname = f"{seq}{levelname}{self.COLORS['RESET']}"
|
| 21 |
+
return super().format(colored_record)
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
logger = logging.getLogger("lora")
|
| 25 |
+
logger.propagate = False
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
if not logger.handlers:
|
| 29 |
+
handler = logging.StreamHandler(sys.stdout)
|
| 30 |
+
handler.setFormatter(
|
| 31 |
+
ColoredFormatter("[%(name)s]-%(levelname)s: %(message)s")
|
| 32 |
+
)
|
| 33 |
+
logger.addHandler(handler)
|
extensions-builtin/Lora/lora_patches.py
ADDED
|
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
|
| 3 |
+
import networks
|
| 4 |
+
from modules import patches
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
class LoraPatches:
|
| 8 |
+
def __init__(self):
|
| 9 |
+
self.Linear_forward = patches.patch(__name__, torch.nn.Linear, 'forward', networks.network_Linear_forward)
|
| 10 |
+
self.Linear_load_state_dict = patches.patch(__name__, torch.nn.Linear, '_load_from_state_dict', networks.network_Linear_load_state_dict)
|
| 11 |
+
self.Conv2d_forward = patches.patch(__name__, torch.nn.Conv2d, 'forward', networks.network_Conv2d_forward)
|
| 12 |
+
self.Conv2d_load_state_dict = patches.patch(__name__, torch.nn.Conv2d, '_load_from_state_dict', networks.network_Conv2d_load_state_dict)
|
| 13 |
+
self.GroupNorm_forward = patches.patch(__name__, torch.nn.GroupNorm, 'forward', networks.network_GroupNorm_forward)
|
| 14 |
+
self.GroupNorm_load_state_dict = patches.patch(__name__, torch.nn.GroupNorm, '_load_from_state_dict', networks.network_GroupNorm_load_state_dict)
|
| 15 |
+
self.LayerNorm_forward = patches.patch(__name__, torch.nn.LayerNorm, 'forward', networks.network_LayerNorm_forward)
|
| 16 |
+
self.LayerNorm_load_state_dict = patches.patch(__name__, torch.nn.LayerNorm, '_load_from_state_dict', networks.network_LayerNorm_load_state_dict)
|
| 17 |
+
self.MultiheadAttention_forward = patches.patch(__name__, torch.nn.MultiheadAttention, 'forward', networks.network_MultiheadAttention_forward)
|
| 18 |
+
self.MultiheadAttention_load_state_dict = patches.patch(__name__, torch.nn.MultiheadAttention, '_load_from_state_dict', networks.network_MultiheadAttention_load_state_dict)
|
| 19 |
+
|
| 20 |
+
def undo(self):
|
| 21 |
+
self.Linear_forward = patches.undo(__name__, torch.nn.Linear, 'forward')
|
| 22 |
+
self.Linear_load_state_dict = patches.undo(__name__, torch.nn.Linear, '_load_from_state_dict')
|
| 23 |
+
self.Conv2d_forward = patches.undo(__name__, torch.nn.Conv2d, 'forward')
|
| 24 |
+
self.Conv2d_load_state_dict = patches.undo(__name__, torch.nn.Conv2d, '_load_from_state_dict')
|
| 25 |
+
self.GroupNorm_forward = patches.undo(__name__, torch.nn.GroupNorm, 'forward')
|
| 26 |
+
self.GroupNorm_load_state_dict = patches.undo(__name__, torch.nn.GroupNorm, '_load_from_state_dict')
|
| 27 |
+
self.LayerNorm_forward = patches.undo(__name__, torch.nn.LayerNorm, 'forward')
|
| 28 |
+
self.LayerNorm_load_state_dict = patches.undo(__name__, torch.nn.LayerNorm, '_load_from_state_dict')
|
| 29 |
+
self.MultiheadAttention_forward = patches.undo(__name__, torch.nn.MultiheadAttention, 'forward')
|
| 30 |
+
self.MultiheadAttention_load_state_dict = patches.undo(__name__, torch.nn.MultiheadAttention, '_load_from_state_dict')
|
| 31 |
+
|