Upload lora-scripts/sd-scripts/library/model_util.py with huggingface_hub
Browse files
lora-scripts/sd-scripts/library/model_util.py
ADDED
|
@@ -0,0 +1,1356 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# v1: split from train_db_fixed.py.
|
| 2 |
+
# v2: support safetensors
|
| 3 |
+
|
| 4 |
+
import math
|
| 5 |
+
import os
|
| 6 |
+
|
| 7 |
+
import torch
|
| 8 |
+
from library.device_utils import init_ipex
|
| 9 |
+
init_ipex()
|
| 10 |
+
|
| 11 |
+
import diffusers
|
| 12 |
+
from transformers import CLIPTextModel, CLIPTokenizer, CLIPTextConfig, logging
|
| 13 |
+
from diffusers import AutoencoderKL, DDIMScheduler, StableDiffusionPipeline # , UNet2DConditionModel
|
| 14 |
+
from safetensors.torch import load_file, save_file
|
| 15 |
+
from library.original_unet import UNet2DConditionModel
|
| 16 |
+
from library.utils import setup_logging
|
| 17 |
+
setup_logging()
|
| 18 |
+
import logging
|
| 19 |
+
logger = logging.getLogger(__name__)
|
| 20 |
+
|
| 21 |
+
# DiffUsers版StableDiffusionのモデルパラメータ
|
| 22 |
+
NUM_TRAIN_TIMESTEPS = 1000
|
| 23 |
+
BETA_START = 0.00085
|
| 24 |
+
BETA_END = 0.0120
|
| 25 |
+
|
| 26 |
+
UNET_PARAMS_MODEL_CHANNELS = 320
|
| 27 |
+
UNET_PARAMS_CHANNEL_MULT = [1, 2, 4, 4]
|
| 28 |
+
UNET_PARAMS_ATTENTION_RESOLUTIONS = [4, 2, 1]
|
| 29 |
+
UNET_PARAMS_IMAGE_SIZE = 64 # fixed from old invalid value `32`
|
| 30 |
+
UNET_PARAMS_IN_CHANNELS = 4
|
| 31 |
+
UNET_PARAMS_OUT_CHANNELS = 4
|
| 32 |
+
UNET_PARAMS_NUM_RES_BLOCKS = 2
|
| 33 |
+
UNET_PARAMS_CONTEXT_DIM = 768
|
| 34 |
+
UNET_PARAMS_NUM_HEADS = 8
|
| 35 |
+
# UNET_PARAMS_USE_LINEAR_PROJECTION = False
|
| 36 |
+
|
| 37 |
+
VAE_PARAMS_Z_CHANNELS = 4
|
| 38 |
+
VAE_PARAMS_RESOLUTION = 256
|
| 39 |
+
VAE_PARAMS_IN_CHANNELS = 3
|
| 40 |
+
VAE_PARAMS_OUT_CH = 3
|
| 41 |
+
VAE_PARAMS_CH = 128
|
| 42 |
+
VAE_PARAMS_CH_MULT = [1, 2, 4, 4]
|
| 43 |
+
VAE_PARAMS_NUM_RES_BLOCKS = 2
|
| 44 |
+
|
| 45 |
+
# V2
|
| 46 |
+
V2_UNET_PARAMS_ATTENTION_HEAD_DIM = [5, 10, 20, 20]
|
| 47 |
+
V2_UNET_PARAMS_CONTEXT_DIM = 1024
|
| 48 |
+
# V2_UNET_PARAMS_USE_LINEAR_PROJECTION = True
|
| 49 |
+
|
| 50 |
+
# Diffusersの設定を読み込むための参照モデル
|
| 51 |
+
DIFFUSERS_REF_MODEL_ID_V1 = "runwayml/stable-diffusion-v1-5"
|
| 52 |
+
DIFFUSERS_REF_MODEL_ID_V2 = "stabilityai/stable-diffusion-2-1"
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
# region StableDiffusion->Diffusersの変換コード
|
| 56 |
+
# convert_original_stable_diffusion_to_diffusers をコピーして修正している(ASL 2.0)
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
def shave_segments(path, n_shave_prefix_segments=1):
|
| 60 |
+
"""
|
| 61 |
+
Removes segments. Positive values shave the first segments, negative shave the last segments.
|
| 62 |
+
"""
|
| 63 |
+
if n_shave_prefix_segments >= 0:
|
| 64 |
+
return ".".join(path.split(".")[n_shave_prefix_segments:])
|
| 65 |
+
else:
|
| 66 |
+
return ".".join(path.split(".")[:n_shave_prefix_segments])
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
def renew_resnet_paths(old_list, n_shave_prefix_segments=0):
|
| 70 |
+
"""
|
| 71 |
+
Updates paths inside resnets to the new naming scheme (local renaming)
|
| 72 |
+
"""
|
| 73 |
+
mapping = []
|
| 74 |
+
for old_item in old_list:
|
| 75 |
+
new_item = old_item.replace("in_layers.0", "norm1")
|
| 76 |
+
new_item = new_item.replace("in_layers.2", "conv1")
|
| 77 |
+
|
| 78 |
+
new_item = new_item.replace("out_layers.0", "norm2")
|
| 79 |
+
new_item = new_item.replace("out_layers.3", "conv2")
|
| 80 |
+
|
| 81 |
+
new_item = new_item.replace("emb_layers.1", "time_emb_proj")
|
| 82 |
+
new_item = new_item.replace("skip_connection", "conv_shortcut")
|
| 83 |
+
|
| 84 |
+
new_item = shave_segments(new_item, n_shave_prefix_segments=n_shave_prefix_segments)
|
| 85 |
+
|
| 86 |
+
mapping.append({"old": old_item, "new": new_item})
|
| 87 |
+
|
| 88 |
+
return mapping
|
| 89 |
+
|
| 90 |
+
|
| 91 |
+
def renew_vae_resnet_paths(old_list, n_shave_prefix_segments=0):
|
| 92 |
+
"""
|
| 93 |
+
Updates paths inside resnets to the new naming scheme (local renaming)
|
| 94 |
+
"""
|
| 95 |
+
mapping = []
|
| 96 |
+
for old_item in old_list:
|
| 97 |
+
new_item = old_item
|
| 98 |
+
|
| 99 |
+
new_item = new_item.replace("nin_shortcut", "conv_shortcut")
|
| 100 |
+
new_item = shave_segments(new_item, n_shave_prefix_segments=n_shave_prefix_segments)
|
| 101 |
+
|
| 102 |
+
mapping.append({"old": old_item, "new": new_item})
|
| 103 |
+
|
| 104 |
+
return mapping
|
| 105 |
+
|
| 106 |
+
|
| 107 |
+
def renew_attention_paths(old_list, n_shave_prefix_segments=0):
|
| 108 |
+
"""
|
| 109 |
+
Updates paths inside attentions to the new naming scheme (local renaming)
|
| 110 |
+
"""
|
| 111 |
+
mapping = []
|
| 112 |
+
for old_item in old_list:
|
| 113 |
+
new_item = old_item
|
| 114 |
+
|
| 115 |
+
# new_item = new_item.replace('norm.weight', 'group_norm.weight')
|
| 116 |
+
# new_item = new_item.replace('norm.bias', 'group_norm.bias')
|
| 117 |
+
|
| 118 |
+
# new_item = new_item.replace('proj_out.weight', 'proj_attn.weight')
|
| 119 |
+
# new_item = new_item.replace('proj_out.bias', 'proj_attn.bias')
|
| 120 |
+
|
| 121 |
+
# new_item = shave_segments(new_item, n_shave_prefix_segments=n_shave_prefix_segments)
|
| 122 |
+
|
| 123 |
+
mapping.append({"old": old_item, "new": new_item})
|
| 124 |
+
|
| 125 |
+
return mapping
|
| 126 |
+
|
| 127 |
+
|
| 128 |
+
def renew_vae_attention_paths(old_list, n_shave_prefix_segments=0):
|
| 129 |
+
"""
|
| 130 |
+
Updates paths inside attentions to the new naming scheme (local renaming)
|
| 131 |
+
"""
|
| 132 |
+
mapping = []
|
| 133 |
+
for old_item in old_list:
|
| 134 |
+
new_item = old_item
|
| 135 |
+
|
| 136 |
+
new_item = new_item.replace("norm.weight", "group_norm.weight")
|
| 137 |
+
new_item = new_item.replace("norm.bias", "group_norm.bias")
|
| 138 |
+
|
| 139 |
+
if diffusers.__version__ < "0.17.0":
|
| 140 |
+
new_item = new_item.replace("q.weight", "query.weight")
|
| 141 |
+
new_item = new_item.replace("q.bias", "query.bias")
|
| 142 |
+
|
| 143 |
+
new_item = new_item.replace("k.weight", "key.weight")
|
| 144 |
+
new_item = new_item.replace("k.bias", "key.bias")
|
| 145 |
+
|
| 146 |
+
new_item = new_item.replace("v.weight", "value.weight")
|
| 147 |
+
new_item = new_item.replace("v.bias", "value.bias")
|
| 148 |
+
|
| 149 |
+
new_item = new_item.replace("proj_out.weight", "proj_attn.weight")
|
| 150 |
+
new_item = new_item.replace("proj_out.bias", "proj_attn.bias")
|
| 151 |
+
else:
|
| 152 |
+
new_item = new_item.replace("q.weight", "to_q.weight")
|
| 153 |
+
new_item = new_item.replace("q.bias", "to_q.bias")
|
| 154 |
+
|
| 155 |
+
new_item = new_item.replace("k.weight", "to_k.weight")
|
| 156 |
+
new_item = new_item.replace("k.bias", "to_k.bias")
|
| 157 |
+
|
| 158 |
+
new_item = new_item.replace("v.weight", "to_v.weight")
|
| 159 |
+
new_item = new_item.replace("v.bias", "to_v.bias")
|
| 160 |
+
|
| 161 |
+
new_item = new_item.replace("proj_out.weight", "to_out.0.weight")
|
| 162 |
+
new_item = new_item.replace("proj_out.bias", "to_out.0.bias")
|
| 163 |
+
|
| 164 |
+
new_item = shave_segments(new_item, n_shave_prefix_segments=n_shave_prefix_segments)
|
| 165 |
+
|
| 166 |
+
mapping.append({"old": old_item, "new": new_item})
|
| 167 |
+
|
| 168 |
+
return mapping
|
| 169 |
+
|
| 170 |
+
|
| 171 |
+
def assign_to_checkpoint(
|
| 172 |
+
paths, checkpoint, old_checkpoint, attention_paths_to_split=None, additional_replacements=None, config=None
|
| 173 |
+
):
|
| 174 |
+
"""
|
| 175 |
+
This does the final conversion step: take locally converted weights and apply a global renaming
|
| 176 |
+
to them. It splits attention layers, and takes into account additional replacements
|
| 177 |
+
that may arise.
|
| 178 |
+
|
| 179 |
+
Assigns the weights to the new checkpoint.
|
| 180 |
+
"""
|
| 181 |
+
assert isinstance(paths, list), "Paths should be a list of dicts containing 'old' and 'new' keys."
|
| 182 |
+
|
| 183 |
+
# Splits the attention layers into three variables.
|
| 184 |
+
if attention_paths_to_split is not None:
|
| 185 |
+
for path, path_map in attention_paths_to_split.items():
|
| 186 |
+
old_tensor = old_checkpoint[path]
|
| 187 |
+
channels = old_tensor.shape[0] // 3
|
| 188 |
+
|
| 189 |
+
target_shape = (-1, channels) if len(old_tensor.shape) == 3 else (-1)
|
| 190 |
+
|
| 191 |
+
num_heads = old_tensor.shape[0] // config["num_head_channels"] // 3
|
| 192 |
+
|
| 193 |
+
old_tensor = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:])
|
| 194 |
+
query, key, value = old_tensor.split(channels // num_heads, dim=1)
|
| 195 |
+
|
| 196 |
+
checkpoint[path_map["query"]] = query.reshape(target_shape)
|
| 197 |
+
checkpoint[path_map["key"]] = key.reshape(target_shape)
|
| 198 |
+
checkpoint[path_map["value"]] = value.reshape(target_shape)
|
| 199 |
+
|
| 200 |
+
for path in paths:
|
| 201 |
+
new_path = path["new"]
|
| 202 |
+
|
| 203 |
+
# These have already been assigned
|
| 204 |
+
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
|
| 205 |
+
continue
|
| 206 |
+
|
| 207 |
+
# Global renaming happens here
|
| 208 |
+
new_path = new_path.replace("middle_block.0", "mid_block.resnets.0")
|
| 209 |
+
new_path = new_path.replace("middle_block.1", "mid_block.attentions.0")
|
| 210 |
+
new_path = new_path.replace("middle_block.2", "mid_block.resnets.1")
|
| 211 |
+
|
| 212 |
+
if additional_replacements is not None:
|
| 213 |
+
for replacement in additional_replacements:
|
| 214 |
+
new_path = new_path.replace(replacement["old"], replacement["new"])
|
| 215 |
+
|
| 216 |
+
# proj_attn.weight has to be converted from conv 1D to linear
|
| 217 |
+
reshaping = False
|
| 218 |
+
if diffusers.__version__ < "0.17.0":
|
| 219 |
+
if "proj_attn.weight" in new_path:
|
| 220 |
+
reshaping = True
|
| 221 |
+
else:
|
| 222 |
+
if ".attentions." in new_path and ".0.to_" in new_path and old_checkpoint[path["old"]].ndim > 2:
|
| 223 |
+
reshaping = True
|
| 224 |
+
|
| 225 |
+
if reshaping:
|
| 226 |
+
checkpoint[new_path] = old_checkpoint[path["old"]][:, :, 0, 0]
|
| 227 |
+
else:
|
| 228 |
+
checkpoint[new_path] = old_checkpoint[path["old"]]
|
| 229 |
+
|
| 230 |
+
|
| 231 |
+
def conv_attn_to_linear(checkpoint):
|
| 232 |
+
keys = list(checkpoint.keys())
|
| 233 |
+
attn_keys = ["query.weight", "key.weight", "value.weight"]
|
| 234 |
+
for key in keys:
|
| 235 |
+
if ".".join(key.split(".")[-2:]) in attn_keys:
|
| 236 |
+
if checkpoint[key].ndim > 2:
|
| 237 |
+
checkpoint[key] = checkpoint[key][:, :, 0, 0]
|
| 238 |
+
elif "proj_attn.weight" in key:
|
| 239 |
+
if checkpoint[key].ndim > 2:
|
| 240 |
+
checkpoint[key] = checkpoint[key][:, :, 0]
|
| 241 |
+
|
| 242 |
+
|
| 243 |
+
def linear_transformer_to_conv(checkpoint):
|
| 244 |
+
keys = list(checkpoint.keys())
|
| 245 |
+
tf_keys = ["proj_in.weight", "proj_out.weight"]
|
| 246 |
+
for key in keys:
|
| 247 |
+
if ".".join(key.split(".")[-2:]) in tf_keys:
|
| 248 |
+
if checkpoint[key].ndim == 2:
|
| 249 |
+
checkpoint[key] = checkpoint[key].unsqueeze(2).unsqueeze(2)
|
| 250 |
+
|
| 251 |
+
|
| 252 |
+
def convert_ldm_unet_checkpoint(v2, checkpoint, config):
|
| 253 |
+
"""
|
| 254 |
+
Takes a state dict and a config, and returns a converted checkpoint.
|
| 255 |
+
"""
|
| 256 |
+
|
| 257 |
+
# extract state_dict for UNet
|
| 258 |
+
unet_state_dict = {}
|
| 259 |
+
unet_key = "model.diffusion_model."
|
| 260 |
+
keys = list(checkpoint.keys())
|
| 261 |
+
for key in keys:
|
| 262 |
+
if key.startswith(unet_key):
|
| 263 |
+
unet_state_dict[key.replace(unet_key, "")] = checkpoint.pop(key)
|
| 264 |
+
|
| 265 |
+
new_checkpoint = {}
|
| 266 |
+
|
| 267 |
+
new_checkpoint["time_embedding.linear_1.weight"] = unet_state_dict["time_embed.0.weight"]
|
| 268 |
+
new_checkpoint["time_embedding.linear_1.bias"] = unet_state_dict["time_embed.0.bias"]
|
| 269 |
+
new_checkpoint["time_embedding.linear_2.weight"] = unet_state_dict["time_embed.2.weight"]
|
| 270 |
+
new_checkpoint["time_embedding.linear_2.bias"] = unet_state_dict["time_embed.2.bias"]
|
| 271 |
+
|
| 272 |
+
new_checkpoint["conv_in.weight"] = unet_state_dict["input_blocks.0.0.weight"]
|
| 273 |
+
new_checkpoint["conv_in.bias"] = unet_state_dict["input_blocks.0.0.bias"]
|
| 274 |
+
|
| 275 |
+
new_checkpoint["conv_norm_out.weight"] = unet_state_dict["out.0.weight"]
|
| 276 |
+
new_checkpoint["conv_norm_out.bias"] = unet_state_dict["out.0.bias"]
|
| 277 |
+
new_checkpoint["conv_out.weight"] = unet_state_dict["out.2.weight"]
|
| 278 |
+
new_checkpoint["conv_out.bias"] = unet_state_dict["out.2.bias"]
|
| 279 |
+
|
| 280 |
+
# Retrieves the keys for the input blocks only
|
| 281 |
+
num_input_blocks = len({".".join(layer.split(".")[:2]) for layer in unet_state_dict if "input_blocks" in layer})
|
| 282 |
+
input_blocks = {
|
| 283 |
+
layer_id: [key for key in unet_state_dict if f"input_blocks.{layer_id}." in key] for layer_id in range(num_input_blocks)
|
| 284 |
+
}
|
| 285 |
+
|
| 286 |
+
# Retrieves the keys for the middle blocks only
|
| 287 |
+
num_middle_blocks = len({".".join(layer.split(".")[:2]) for layer in unet_state_dict if "middle_block" in layer})
|
| 288 |
+
middle_blocks = {
|
| 289 |
+
layer_id: [key for key in unet_state_dict if f"middle_block.{layer_id}." in key] for layer_id in range(num_middle_blocks)
|
| 290 |
+
}
|
| 291 |
+
|
| 292 |
+
# Retrieves the keys for the output blocks only
|
| 293 |
+
num_output_blocks = len({".".join(layer.split(".")[:2]) for layer in unet_state_dict if "output_blocks" in layer})
|
| 294 |
+
output_blocks = {
|
| 295 |
+
layer_id: [key for key in unet_state_dict if f"output_blocks.{layer_id}." in key] for layer_id in range(num_output_blocks)
|
| 296 |
+
}
|
| 297 |
+
|
| 298 |
+
for i in range(1, num_input_blocks):
|
| 299 |
+
block_id = (i - 1) // (config["layers_per_block"] + 1)
|
| 300 |
+
layer_in_block_id = (i - 1) % (config["layers_per_block"] + 1)
|
| 301 |
+
|
| 302 |
+
resnets = [key for key in input_blocks[i] if f"input_blocks.{i}.0" in key and f"input_blocks.{i}.0.op" not in key]
|
| 303 |
+
attentions = [key for key in input_blocks[i] if f"input_blocks.{i}.1" in key]
|
| 304 |
+
|
| 305 |
+
if f"input_blocks.{i}.0.op.weight" in unet_state_dict:
|
| 306 |
+
new_checkpoint[f"down_blocks.{block_id}.downsamplers.0.conv.weight"] = unet_state_dict.pop(
|
| 307 |
+
f"input_blocks.{i}.0.op.weight"
|
| 308 |
+
)
|
| 309 |
+
new_checkpoint[f"down_blocks.{block_id}.downsamplers.0.conv.bias"] = unet_state_dict.pop(f"input_blocks.{i}.0.op.bias")
|
| 310 |
+
|
| 311 |
+
paths = renew_resnet_paths(resnets)
|
| 312 |
+
meta_path = {"old": f"input_blocks.{i}.0", "new": f"down_blocks.{block_id}.resnets.{layer_in_block_id}"}
|
| 313 |
+
assign_to_checkpoint(paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config)
|
| 314 |
+
|
| 315 |
+
if len(attentions):
|
| 316 |
+
paths = renew_attention_paths(attentions)
|
| 317 |
+
meta_path = {"old": f"input_blocks.{i}.1", "new": f"down_blocks.{block_id}.attentions.{layer_in_block_id}"}
|
| 318 |
+
assign_to_checkpoint(paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config)
|
| 319 |
+
|
| 320 |
+
resnet_0 = middle_blocks[0]
|
| 321 |
+
attentions = middle_blocks[1]
|
| 322 |
+
resnet_1 = middle_blocks[2]
|
| 323 |
+
|
| 324 |
+
resnet_0_paths = renew_resnet_paths(resnet_0)
|
| 325 |
+
assign_to_checkpoint(resnet_0_paths, new_checkpoint, unet_state_dict, config=config)
|
| 326 |
+
|
| 327 |
+
resnet_1_paths = renew_resnet_paths(resnet_1)
|
| 328 |
+
assign_to_checkpoint(resnet_1_paths, new_checkpoint, unet_state_dict, config=config)
|
| 329 |
+
|
| 330 |
+
attentions_paths = renew_attention_paths(attentions)
|
| 331 |
+
meta_path = {"old": "middle_block.1", "new": "mid_block.attentions.0"}
|
| 332 |
+
assign_to_checkpoint(attentions_paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config)
|
| 333 |
+
|
| 334 |
+
for i in range(num_output_blocks):
|
| 335 |
+
block_id = i // (config["layers_per_block"] + 1)
|
| 336 |
+
layer_in_block_id = i % (config["layers_per_block"] + 1)
|
| 337 |
+
output_block_layers = [shave_segments(name, 2) for name in output_blocks[i]]
|
| 338 |
+
output_block_list = {}
|
| 339 |
+
|
| 340 |
+
for layer in output_block_layers:
|
| 341 |
+
layer_id, layer_name = layer.split(".")[0], shave_segments(layer, 1)
|
| 342 |
+
if layer_id in output_block_list:
|
| 343 |
+
output_block_list[layer_id].append(layer_name)
|
| 344 |
+
else:
|
| 345 |
+
output_block_list[layer_id] = [layer_name]
|
| 346 |
+
|
| 347 |
+
if len(output_block_list) > 1:
|
| 348 |
+
resnets = [key for key in output_blocks[i] if f"output_blocks.{i}.0" in key]
|
| 349 |
+
attentions = [key for key in output_blocks[i] if f"output_blocks.{i}.1" in key]
|
| 350 |
+
|
| 351 |
+
resnet_0_paths = renew_resnet_paths(resnets)
|
| 352 |
+
paths = renew_resnet_paths(resnets)
|
| 353 |
+
|
| 354 |
+
meta_path = {"old": f"output_blocks.{i}.0", "new": f"up_blocks.{block_id}.resnets.{layer_in_block_id}"}
|
| 355 |
+
assign_to_checkpoint(paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config)
|
| 356 |
+
|
| 357 |
+
# オリジナル:
|
| 358 |
+
# if ["conv.weight", "conv.bias"] in output_block_list.values():
|
| 359 |
+
# index = list(output_block_list.values()).index(["conv.weight", "conv.bias"])
|
| 360 |
+
|
| 361 |
+
# biasとweightの順番に依存しないようにする:もっといいやり方がありそうだが
|
| 362 |
+
for l in output_block_list.values():
|
| 363 |
+
l.sort()
|
| 364 |
+
|
| 365 |
+
if ["conv.bias", "conv.weight"] in output_block_list.values():
|
| 366 |
+
index = list(output_block_list.values()).index(["conv.bias", "conv.weight"])
|
| 367 |
+
new_checkpoint[f"up_blocks.{block_id}.upsamplers.0.conv.bias"] = unet_state_dict[
|
| 368 |
+
f"output_blocks.{i}.{index}.conv.bias"
|
| 369 |
+
]
|
| 370 |
+
new_checkpoint[f"up_blocks.{block_id}.upsamplers.0.conv.weight"] = unet_state_dict[
|
| 371 |
+
f"output_blocks.{i}.{index}.conv.weight"
|
| 372 |
+
]
|
| 373 |
+
|
| 374 |
+
# Clear attentions as they have been attributed above.
|
| 375 |
+
if len(attentions) == 2:
|
| 376 |
+
attentions = []
|
| 377 |
+
|
| 378 |
+
if len(attentions):
|
| 379 |
+
paths = renew_attention_paths(attentions)
|
| 380 |
+
meta_path = {
|
| 381 |
+
"old": f"output_blocks.{i}.1",
|
| 382 |
+
"new": f"up_blocks.{block_id}.attentions.{layer_in_block_id}",
|
| 383 |
+
}
|
| 384 |
+
assign_to_checkpoint(paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config)
|
| 385 |
+
else:
|
| 386 |
+
resnet_0_paths = renew_resnet_paths(output_block_layers, n_shave_prefix_segments=1)
|
| 387 |
+
for path in resnet_0_paths:
|
| 388 |
+
old_path = ".".join(["output_blocks", str(i), path["old"]])
|
| 389 |
+
new_path = ".".join(["up_blocks", str(block_id), "resnets", str(layer_in_block_id), path["new"]])
|
| 390 |
+
|
| 391 |
+
new_checkpoint[new_path] = unet_state_dict[old_path]
|
| 392 |
+
|
| 393 |
+
# SDのv2では1*1のconv2dがlinearに変わっている
|
| 394 |
+
# 誤って Diffusers 側を conv2d のままにしてしまったので、変換必要
|
| 395 |
+
if v2 and not config.get("use_linear_projection", False):
|
| 396 |
+
linear_transformer_to_conv(new_checkpoint)
|
| 397 |
+
|
| 398 |
+
return new_checkpoint
|
| 399 |
+
|
| 400 |
+
|
| 401 |
+
def convert_ldm_vae_checkpoint(checkpoint, config):
|
| 402 |
+
# extract state dict for VAE
|
| 403 |
+
vae_state_dict = {}
|
| 404 |
+
vae_key = "first_stage_model."
|
| 405 |
+
keys = list(checkpoint.keys())
|
| 406 |
+
for key in keys:
|
| 407 |
+
if key.startswith(vae_key):
|
| 408 |
+
vae_state_dict[key.replace(vae_key, "")] = checkpoint.get(key)
|
| 409 |
+
# if len(vae_state_dict) == 0:
|
| 410 |
+
# # 渡されたcheckpointは.ckptから読み込んだcheckpointではなくvaeのstate_dict
|
| 411 |
+
# vae_state_dict = checkpoint
|
| 412 |
+
|
| 413 |
+
new_checkpoint = {}
|
| 414 |
+
|
| 415 |
+
new_checkpoint["encoder.conv_in.weight"] = vae_state_dict["encoder.conv_in.weight"]
|
| 416 |
+
new_checkpoint["encoder.conv_in.bias"] = vae_state_dict["encoder.conv_in.bias"]
|
| 417 |
+
new_checkpoint["encoder.conv_out.weight"] = vae_state_dict["encoder.conv_out.weight"]
|
| 418 |
+
new_checkpoint["encoder.conv_out.bias"] = vae_state_dict["encoder.conv_out.bias"]
|
| 419 |
+
new_checkpoint["encoder.conv_norm_out.weight"] = vae_state_dict["encoder.norm_out.weight"]
|
| 420 |
+
new_checkpoint["encoder.conv_norm_out.bias"] = vae_state_dict["encoder.norm_out.bias"]
|
| 421 |
+
|
| 422 |
+
new_checkpoint["decoder.conv_in.weight"] = vae_state_dict["decoder.conv_in.weight"]
|
| 423 |
+
new_checkpoint["decoder.conv_in.bias"] = vae_state_dict["decoder.conv_in.bias"]
|
| 424 |
+
new_checkpoint["decoder.conv_out.weight"] = vae_state_dict["decoder.conv_out.weight"]
|
| 425 |
+
new_checkpoint["decoder.conv_out.bias"] = vae_state_dict["decoder.conv_out.bias"]
|
| 426 |
+
new_checkpoint["decoder.conv_norm_out.weight"] = vae_state_dict["decoder.norm_out.weight"]
|
| 427 |
+
new_checkpoint["decoder.conv_norm_out.bias"] = vae_state_dict["decoder.norm_out.bias"]
|
| 428 |
+
|
| 429 |
+
new_checkpoint["quant_conv.weight"] = vae_state_dict["quant_conv.weight"]
|
| 430 |
+
new_checkpoint["quant_conv.bias"] = vae_state_dict["quant_conv.bias"]
|
| 431 |
+
new_checkpoint["post_quant_conv.weight"] = vae_state_dict["post_quant_conv.weight"]
|
| 432 |
+
new_checkpoint["post_quant_conv.bias"] = vae_state_dict["post_quant_conv.bias"]
|
| 433 |
+
|
| 434 |
+
# Retrieves the keys for the encoder down blocks only
|
| 435 |
+
num_down_blocks = len({".".join(layer.split(".")[:3]) for layer in vae_state_dict if "encoder.down" in layer})
|
| 436 |
+
down_blocks = {layer_id: [key for key in vae_state_dict if f"down.{layer_id}" in key] for layer_id in range(num_down_blocks)}
|
| 437 |
+
|
| 438 |
+
# Retrieves the keys for the decoder up blocks only
|
| 439 |
+
num_up_blocks = len({".".join(layer.split(".")[:3]) for layer in vae_state_dict if "decoder.up" in layer})
|
| 440 |
+
up_blocks = {layer_id: [key for key in vae_state_dict if f"up.{layer_id}" in key] for layer_id in range(num_up_blocks)}
|
| 441 |
+
|
| 442 |
+
for i in range(num_down_blocks):
|
| 443 |
+
resnets = [key for key in down_blocks[i] if f"down.{i}" in key and f"down.{i}.downsample" not in key]
|
| 444 |
+
|
| 445 |
+
if f"encoder.down.{i}.downsample.conv.weight" in vae_state_dict:
|
| 446 |
+
new_checkpoint[f"encoder.down_blocks.{i}.downsamplers.0.conv.weight"] = vae_state_dict.pop(
|
| 447 |
+
f"encoder.down.{i}.downsample.conv.weight"
|
| 448 |
+
)
|
| 449 |
+
new_checkpoint[f"encoder.down_blocks.{i}.downsamplers.0.conv.bias"] = vae_state_dict.pop(
|
| 450 |
+
f"encoder.down.{i}.downsample.conv.bias"
|
| 451 |
+
)
|
| 452 |
+
|
| 453 |
+
paths = renew_vae_resnet_paths(resnets)
|
| 454 |
+
meta_path = {"old": f"down.{i}.block", "new": f"down_blocks.{i}.resnets"}
|
| 455 |
+
assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config)
|
| 456 |
+
|
| 457 |
+
mid_resnets = [key for key in vae_state_dict if "encoder.mid.block" in key]
|
| 458 |
+
num_mid_res_blocks = 2
|
| 459 |
+
for i in range(1, num_mid_res_blocks + 1):
|
| 460 |
+
resnets = [key for key in mid_resnets if f"encoder.mid.block_{i}" in key]
|
| 461 |
+
|
| 462 |
+
paths = renew_vae_resnet_paths(resnets)
|
| 463 |
+
meta_path = {"old": f"mid.block_{i}", "new": f"mid_block.resnets.{i - 1}"}
|
| 464 |
+
assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config)
|
| 465 |
+
|
| 466 |
+
mid_attentions = [key for key in vae_state_dict if "encoder.mid.attn" in key]
|
| 467 |
+
paths = renew_vae_attention_paths(mid_attentions)
|
| 468 |
+
meta_path = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
|
| 469 |
+
assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config)
|
| 470 |
+
conv_attn_to_linear(new_checkpoint)
|
| 471 |
+
|
| 472 |
+
for i in range(num_up_blocks):
|
| 473 |
+
block_id = num_up_blocks - 1 - i
|
| 474 |
+
resnets = [key for key in up_blocks[block_id] if f"up.{block_id}" in key and f"up.{block_id}.upsample" not in key]
|
| 475 |
+
|
| 476 |
+
if f"decoder.up.{block_id}.upsample.conv.weight" in vae_state_dict:
|
| 477 |
+
new_checkpoint[f"decoder.up_blocks.{i}.upsamplers.0.conv.weight"] = vae_state_dict[
|
| 478 |
+
f"decoder.up.{block_id}.upsample.conv.weight"
|
| 479 |
+
]
|
| 480 |
+
new_checkpoint[f"decoder.up_blocks.{i}.upsamplers.0.conv.bias"] = vae_state_dict[
|
| 481 |
+
f"decoder.up.{block_id}.upsample.conv.bias"
|
| 482 |
+
]
|
| 483 |
+
|
| 484 |
+
paths = renew_vae_resnet_paths(resnets)
|
| 485 |
+
meta_path = {"old": f"up.{block_id}.block", "new": f"up_blocks.{i}.resnets"}
|
| 486 |
+
assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config)
|
| 487 |
+
|
| 488 |
+
mid_resnets = [key for key in vae_state_dict if "decoder.mid.block" in key]
|
| 489 |
+
num_mid_res_blocks = 2
|
| 490 |
+
for i in range(1, num_mid_res_blocks + 1):
|
| 491 |
+
resnets = [key for key in mid_resnets if f"decoder.mid.block_{i}" in key]
|
| 492 |
+
|
| 493 |
+
paths = renew_vae_resnet_paths(resnets)
|
| 494 |
+
meta_path = {"old": f"mid.block_{i}", "new": f"mid_block.resnets.{i - 1}"}
|
| 495 |
+
assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config)
|
| 496 |
+
|
| 497 |
+
mid_attentions = [key for key in vae_state_dict if "decoder.mid.attn" in key]
|
| 498 |
+
paths = renew_vae_attention_paths(mid_attentions)
|
| 499 |
+
meta_path = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
|
| 500 |
+
assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config)
|
| 501 |
+
conv_attn_to_linear(new_checkpoint)
|
| 502 |
+
return new_checkpoint
|
| 503 |
+
|
| 504 |
+
|
| 505 |
+
def create_unet_diffusers_config(v2, use_linear_projection_in_v2=False):
|
| 506 |
+
"""
|
| 507 |
+
Creates a config for the diffusers based on the config of the LDM model.
|
| 508 |
+
"""
|
| 509 |
+
# unet_params = original_config.model.params.unet_config.params
|
| 510 |
+
|
| 511 |
+
block_out_channels = [UNET_PARAMS_MODEL_CHANNELS * mult for mult in UNET_PARAMS_CHANNEL_MULT]
|
| 512 |
+
|
| 513 |
+
down_block_types = []
|
| 514 |
+
resolution = 1
|
| 515 |
+
for i in range(len(block_out_channels)):
|
| 516 |
+
block_type = "CrossAttnDownBlock2D" if resolution in UNET_PARAMS_ATTENTION_RESOLUTIONS else "DownBlock2D"
|
| 517 |
+
down_block_types.append(block_type)
|
| 518 |
+
if i != len(block_out_channels) - 1:
|
| 519 |
+
resolution *= 2
|
| 520 |
+
|
| 521 |
+
up_block_types = []
|
| 522 |
+
for i in range(len(block_out_channels)):
|
| 523 |
+
block_type = "CrossAttnUpBlock2D" if resolution in UNET_PARAMS_ATTENTION_RESOLUTIONS else "UpBlock2D"
|
| 524 |
+
up_block_types.append(block_type)
|
| 525 |
+
resolution //= 2
|
| 526 |
+
|
| 527 |
+
config = dict(
|
| 528 |
+
sample_size=UNET_PARAMS_IMAGE_SIZE,
|
| 529 |
+
in_channels=UNET_PARAMS_IN_CHANNELS,
|
| 530 |
+
out_channels=UNET_PARAMS_OUT_CHANNELS,
|
| 531 |
+
down_block_types=tuple(down_block_types),
|
| 532 |
+
up_block_types=tuple(up_block_types),
|
| 533 |
+
block_out_channels=tuple(block_out_channels),
|
| 534 |
+
layers_per_block=UNET_PARAMS_NUM_RES_BLOCKS,
|
| 535 |
+
cross_attention_dim=UNET_PARAMS_CONTEXT_DIM if not v2 else V2_UNET_PARAMS_CONTEXT_DIM,
|
| 536 |
+
attention_head_dim=UNET_PARAMS_NUM_HEADS if not v2 else V2_UNET_PARAMS_ATTENTION_HEAD_DIM,
|
| 537 |
+
# use_linear_projection=UNET_PARAMS_USE_LINEAR_PROJECTION if not v2 else V2_UNET_PARAMS_USE_LINEAR_PROJECTION,
|
| 538 |
+
)
|
| 539 |
+
if v2 and use_linear_projection_in_v2:
|
| 540 |
+
config["use_linear_projection"] = True
|
| 541 |
+
|
| 542 |
+
return config
|
| 543 |
+
|
| 544 |
+
|
| 545 |
+
def create_vae_diffusers_config():
|
| 546 |
+
"""
|
| 547 |
+
Creates a config for the diffusers based on the config of the LDM model.
|
| 548 |
+
"""
|
| 549 |
+
# vae_params = original_config.model.params.first_stage_config.params.ddconfig
|
| 550 |
+
# _ = original_config.model.params.first_stage_config.params.embed_dim
|
| 551 |
+
block_out_channels = [VAE_PARAMS_CH * mult for mult in VAE_PARAMS_CH_MULT]
|
| 552 |
+
down_block_types = ["DownEncoderBlock2D"] * len(block_out_channels)
|
| 553 |
+
up_block_types = ["UpDecoderBlock2D"] * len(block_out_channels)
|
| 554 |
+
|
| 555 |
+
config = dict(
|
| 556 |
+
sample_size=VAE_PARAMS_RESOLUTION,
|
| 557 |
+
in_channels=VAE_PARAMS_IN_CHANNELS,
|
| 558 |
+
out_channels=VAE_PARAMS_OUT_CH,
|
| 559 |
+
down_block_types=tuple(down_block_types),
|
| 560 |
+
up_block_types=tuple(up_block_types),
|
| 561 |
+
block_out_channels=tuple(block_out_channels),
|
| 562 |
+
latent_channels=VAE_PARAMS_Z_CHANNELS,
|
| 563 |
+
layers_per_block=VAE_PARAMS_NUM_RES_BLOCKS,
|
| 564 |
+
)
|
| 565 |
+
return config
|
| 566 |
+
|
| 567 |
+
|
| 568 |
+
def convert_ldm_clip_checkpoint_v1(checkpoint):
|
| 569 |
+
keys = list(checkpoint.keys())
|
| 570 |
+
text_model_dict = {}
|
| 571 |
+
for key in keys:
|
| 572 |
+
if key.startswith("cond_stage_model.transformer"):
|
| 573 |
+
text_model_dict[key[len("cond_stage_model.transformer.") :]] = checkpoint[key]
|
| 574 |
+
|
| 575 |
+
# remove position_ids for newer transformer, which causes error :(
|
| 576 |
+
if "text_model.embeddings.position_ids" in text_model_dict:
|
| 577 |
+
text_model_dict.pop("text_model.embeddings.position_ids")
|
| 578 |
+
|
| 579 |
+
return text_model_dict
|
| 580 |
+
|
| 581 |
+
|
| 582 |
+
def convert_ldm_clip_checkpoint_v2(checkpoint, max_length):
|
| 583 |
+
# 嫌になるくらい違うぞ!
|
| 584 |
+
def convert_key(key):
|
| 585 |
+
if not key.startswith("cond_stage_model"):
|
| 586 |
+
return None
|
| 587 |
+
|
| 588 |
+
# common conversion
|
| 589 |
+
key = key.replace("cond_stage_model.model.transformer.", "text_model.encoder.")
|
| 590 |
+
key = key.replace("cond_stage_model.model.", "text_model.")
|
| 591 |
+
|
| 592 |
+
if "resblocks" in key:
|
| 593 |
+
# resblocks conversion
|
| 594 |
+
key = key.replace(".resblocks.", ".layers.")
|
| 595 |
+
if ".ln_" in key:
|
| 596 |
+
key = key.replace(".ln_", ".layer_norm")
|
| 597 |
+
elif ".mlp." in key:
|
| 598 |
+
key = key.replace(".c_fc.", ".fc1.")
|
| 599 |
+
key = key.replace(".c_proj.", ".fc2.")
|
| 600 |
+
elif ".attn.out_proj" in key:
|
| 601 |
+
key = key.replace(".attn.out_proj.", ".self_attn.out_proj.")
|
| 602 |
+
elif ".attn.in_proj" in key:
|
| 603 |
+
key = None # 特殊なので後で処理する
|
| 604 |
+
else:
|
| 605 |
+
raise ValueError(f"unexpected key in SD: {key}")
|
| 606 |
+
elif ".positional_embedding" in key:
|
| 607 |
+
key = key.replace(".positional_embedding", ".embeddings.position_embedding.weight")
|
| 608 |
+
elif ".text_projection" in key:
|
| 609 |
+
key = None # 使われない???
|
| 610 |
+
elif ".logit_scale" in key:
|
| 611 |
+
key = None # 使われない???
|
| 612 |
+
elif ".token_embedding" in key:
|
| 613 |
+
key = key.replace(".token_embedding.weight", ".embeddings.token_embedding.weight")
|
| 614 |
+
elif ".ln_final" in key:
|
| 615 |
+
key = key.replace(".ln_final", ".final_layer_norm")
|
| 616 |
+
return key
|
| 617 |
+
|
| 618 |
+
keys = list(checkpoint.keys())
|
| 619 |
+
new_sd = {}
|
| 620 |
+
for key in keys:
|
| 621 |
+
# remove resblocks 23
|
| 622 |
+
if ".resblocks.23." in key:
|
| 623 |
+
continue
|
| 624 |
+
new_key = convert_key(key)
|
| 625 |
+
if new_key is None:
|
| 626 |
+
continue
|
| 627 |
+
new_sd[new_key] = checkpoint[key]
|
| 628 |
+
|
| 629 |
+
# attnの変換
|
| 630 |
+
for key in keys:
|
| 631 |
+
if ".resblocks.23." in key:
|
| 632 |
+
continue
|
| 633 |
+
if ".resblocks" in key and ".attn.in_proj_" in key:
|
| 634 |
+
# 三つに分割
|
| 635 |
+
values = torch.chunk(checkpoint[key], 3)
|
| 636 |
+
|
| 637 |
+
key_suffix = ".weight" if "weight" in key else ".bias"
|
| 638 |
+
key_pfx = key.replace("cond_stage_model.model.transformer.resblocks.", "text_model.encoder.layers.")
|
| 639 |
+
key_pfx = key_pfx.replace("_weight", "")
|
| 640 |
+
key_pfx = key_pfx.replace("_bias", "")
|
| 641 |
+
key_pfx = key_pfx.replace(".attn.in_proj", ".self_attn.")
|
| 642 |
+
new_sd[key_pfx + "q_proj" + key_suffix] = values[0]
|
| 643 |
+
new_sd[key_pfx + "k_proj" + key_suffix] = values[1]
|
| 644 |
+
new_sd[key_pfx + "v_proj" + key_suffix] = values[2]
|
| 645 |
+
|
| 646 |
+
# rename or add position_ids
|
| 647 |
+
ANOTHER_POSITION_IDS_KEY = "text_model.encoder.text_model.embeddings.position_ids"
|
| 648 |
+
if ANOTHER_POSITION_IDS_KEY in new_sd:
|
| 649 |
+
# waifu diffusion v1.4
|
| 650 |
+
position_ids = new_sd[ANOTHER_POSITION_IDS_KEY]
|
| 651 |
+
del new_sd[ANOTHER_POSITION_IDS_KEY]
|
| 652 |
+
else:
|
| 653 |
+
position_ids = torch.Tensor([list(range(max_length))]).to(torch.int64)
|
| 654 |
+
|
| 655 |
+
new_sd["text_model.embeddings.position_ids"] = position_ids
|
| 656 |
+
return new_sd
|
| 657 |
+
|
| 658 |
+
|
| 659 |
+
# endregion
|
| 660 |
+
|
| 661 |
+
|
| 662 |
+
# region Diffusers->StableDiffusion の変換コード
|
| 663 |
+
# convert_diffusers_to_original_stable_diffusion をコピーして修正している(ASL 2.0)
|
| 664 |
+
|
| 665 |
+
|
| 666 |
+
def conv_transformer_to_linear(checkpoint):
|
| 667 |
+
keys = list(checkpoint.keys())
|
| 668 |
+
tf_keys = ["proj_in.weight", "proj_out.weight"]
|
| 669 |
+
for key in keys:
|
| 670 |
+
if ".".join(key.split(".")[-2:]) in tf_keys:
|
| 671 |
+
if checkpoint[key].ndim > 2:
|
| 672 |
+
checkpoint[key] = checkpoint[key][:, :, 0, 0]
|
| 673 |
+
|
| 674 |
+
|
| 675 |
+
def convert_unet_state_dict_to_sd(v2, unet_state_dict):
|
| 676 |
+
unet_conversion_map = [
|
| 677 |
+
# (stable-diffusion, HF Diffusers)
|
| 678 |
+
("time_embed.0.weight", "time_embedding.linear_1.weight"),
|
| 679 |
+
("time_embed.0.bias", "time_embedding.linear_1.bias"),
|
| 680 |
+
("time_embed.2.weight", "time_embedding.linear_2.weight"),
|
| 681 |
+
("time_embed.2.bias", "time_embedding.linear_2.bias"),
|
| 682 |
+
("input_blocks.0.0.weight", "conv_in.weight"),
|
| 683 |
+
("input_blocks.0.0.bias", "conv_in.bias"),
|
| 684 |
+
("out.0.weight", "conv_norm_out.weight"),
|
| 685 |
+
("out.0.bias", "conv_norm_out.bias"),
|
| 686 |
+
("out.2.weight", "conv_out.weight"),
|
| 687 |
+
("out.2.bias", "conv_out.bias"),
|
| 688 |
+
]
|
| 689 |
+
|
| 690 |
+
unet_conversion_map_resnet = [
|
| 691 |
+
# (stable-diffusion, HF Diffusers)
|
| 692 |
+
("in_layers.0", "norm1"),
|
| 693 |
+
("in_layers.2", "conv1"),
|
| 694 |
+
("out_layers.0", "norm2"),
|
| 695 |
+
("out_layers.3", "conv2"),
|
| 696 |
+
("emb_layers.1", "time_emb_proj"),
|
| 697 |
+
("skip_connection", "conv_shortcut"),
|
| 698 |
+
]
|
| 699 |
+
|
| 700 |
+
unet_conversion_map_layer = []
|
| 701 |
+
for i in range(4):
|
| 702 |
+
# loop over downblocks/upblocks
|
| 703 |
+
|
| 704 |
+
for j in range(2):
|
| 705 |
+
# loop over resnets/attentions for downblocks
|
| 706 |
+
hf_down_res_prefix = f"down_blocks.{i}.resnets.{j}."
|
| 707 |
+
sd_down_res_prefix = f"input_blocks.{3*i + j + 1}.0."
|
| 708 |
+
unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix))
|
| 709 |
+
|
| 710 |
+
if i < 3:
|
| 711 |
+
# no attention layers in down_blocks.3
|
| 712 |
+
hf_down_atn_prefix = f"down_blocks.{i}.attentions.{j}."
|
| 713 |
+
sd_down_atn_prefix = f"input_blocks.{3*i + j + 1}.1."
|
| 714 |
+
unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix))
|
| 715 |
+
|
| 716 |
+
for j in range(3):
|
| 717 |
+
# loop over resnets/attentions for upblocks
|
| 718 |
+
hf_up_res_prefix = f"up_blocks.{i}.resnets.{j}."
|
| 719 |
+
sd_up_res_prefix = f"output_blocks.{3*i + j}.0."
|
| 720 |
+
unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix))
|
| 721 |
+
|
| 722 |
+
if i > 0:
|
| 723 |
+
# no attention layers in up_blocks.0
|
| 724 |
+
hf_up_atn_prefix = f"up_blocks.{i}.attentions.{j}."
|
| 725 |
+
sd_up_atn_prefix = f"output_blocks.{3*i + j}.1."
|
| 726 |
+
unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix))
|
| 727 |
+
|
| 728 |
+
if i < 3:
|
| 729 |
+
# no downsample in down_blocks.3
|
| 730 |
+
hf_downsample_prefix = f"down_blocks.{i}.downsamplers.0.conv."
|
| 731 |
+
sd_downsample_prefix = f"input_blocks.{3*(i+1)}.0.op."
|
| 732 |
+
unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix))
|
| 733 |
+
|
| 734 |
+
# no upsample in up_blocks.3
|
| 735 |
+
hf_upsample_prefix = f"up_blocks.{i}.upsamplers.0."
|
| 736 |
+
sd_upsample_prefix = f"output_blocks.{3*i + 2}.{1 if i == 0 else 2}."
|
| 737 |
+
unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix))
|
| 738 |
+
|
| 739 |
+
hf_mid_atn_prefix = "mid_block.attentions.0."
|
| 740 |
+
sd_mid_atn_prefix = "middle_block.1."
|
| 741 |
+
unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix))
|
| 742 |
+
|
| 743 |
+
for j in range(2):
|
| 744 |
+
hf_mid_res_prefix = f"mid_block.resnets.{j}."
|
| 745 |
+
sd_mid_res_prefix = f"middle_block.{2*j}."
|
| 746 |
+
unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix))
|
| 747 |
+
|
| 748 |
+
# buyer beware: this is a *brittle* function,
|
| 749 |
+
# and correct output requires that all of these pieces interact in
|
| 750 |
+
# the exact order in which I have arranged them.
|
| 751 |
+
mapping = {k: k for k in unet_state_dict.keys()}
|
| 752 |
+
for sd_name, hf_name in unet_conversion_map:
|
| 753 |
+
mapping[hf_name] = sd_name
|
| 754 |
+
for k, v in mapping.items():
|
| 755 |
+
if "resnets" in k:
|
| 756 |
+
for sd_part, hf_part in unet_conversion_map_resnet:
|
| 757 |
+
v = v.replace(hf_part, sd_part)
|
| 758 |
+
mapping[k] = v
|
| 759 |
+
for k, v in mapping.items():
|
| 760 |
+
for sd_part, hf_part in unet_conversion_map_layer:
|
| 761 |
+
v = v.replace(hf_part, sd_part)
|
| 762 |
+
mapping[k] = v
|
| 763 |
+
new_state_dict = {v: unet_state_dict[k] for k, v in mapping.items()}
|
| 764 |
+
|
| 765 |
+
if v2:
|
| 766 |
+
conv_transformer_to_linear(new_state_dict)
|
| 767 |
+
|
| 768 |
+
return new_state_dict
|
| 769 |
+
|
| 770 |
+
|
| 771 |
+
def controlnet_conversion_map():
|
| 772 |
+
unet_conversion_map = [
|
| 773 |
+
("time_embed.0.weight", "time_embedding.linear_1.weight"),
|
| 774 |
+
("time_embed.0.bias", "time_embedding.linear_1.bias"),
|
| 775 |
+
("time_embed.2.weight", "time_embedding.linear_2.weight"),
|
| 776 |
+
("time_embed.2.bias", "time_embedding.linear_2.bias"),
|
| 777 |
+
("input_blocks.0.0.weight", "conv_in.weight"),
|
| 778 |
+
("input_blocks.0.0.bias", "conv_in.bias"),
|
| 779 |
+
("middle_block_out.0.weight", "controlnet_mid_block.weight"),
|
| 780 |
+
("middle_block_out.0.bias", "controlnet_mid_block.bias"),
|
| 781 |
+
]
|
| 782 |
+
|
| 783 |
+
unet_conversion_map_resnet = [
|
| 784 |
+
("in_layers.0", "norm1"),
|
| 785 |
+
("in_layers.2", "conv1"),
|
| 786 |
+
("out_layers.0", "norm2"),
|
| 787 |
+
("out_layers.3", "conv2"),
|
| 788 |
+
("emb_layers.1", "time_emb_proj"),
|
| 789 |
+
("skip_connection", "conv_shortcut"),
|
| 790 |
+
]
|
| 791 |
+
|
| 792 |
+
unet_conversion_map_layer = []
|
| 793 |
+
for i in range(4):
|
| 794 |
+
for j in range(2):
|
| 795 |
+
hf_down_res_prefix = f"down_blocks.{i}.resnets.{j}."
|
| 796 |
+
sd_down_res_prefix = f"input_blocks.{3*i + j + 1}.0."
|
| 797 |
+
unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix))
|
| 798 |
+
|
| 799 |
+
if i < 3:
|
| 800 |
+
hf_down_atn_prefix = f"down_blocks.{i}.attentions.{j}."
|
| 801 |
+
sd_down_atn_prefix = f"input_blocks.{3*i + j + 1}.1."
|
| 802 |
+
unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix))
|
| 803 |
+
|
| 804 |
+
if i < 3:
|
| 805 |
+
hf_downsample_prefix = f"down_blocks.{i}.downsamplers.0.conv."
|
| 806 |
+
sd_downsample_prefix = f"input_blocks.{3*(i+1)}.0.op."
|
| 807 |
+
unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix))
|
| 808 |
+
|
| 809 |
+
hf_mid_atn_prefix = "mid_block.attentions.0."
|
| 810 |
+
sd_mid_atn_prefix = "middle_block.1."
|
| 811 |
+
unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix))
|
| 812 |
+
|
| 813 |
+
for j in range(2):
|
| 814 |
+
hf_mid_res_prefix = f"mid_block.resnets.{j}."
|
| 815 |
+
sd_mid_res_prefix = f"middle_block.{2*j}."
|
| 816 |
+
unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix))
|
| 817 |
+
|
| 818 |
+
controlnet_cond_embedding_names = ["conv_in"] + [f"blocks.{i}" for i in range(6)] + ["conv_out"]
|
| 819 |
+
for i, hf_prefix in enumerate(controlnet_cond_embedding_names):
|
| 820 |
+
hf_prefix = f"controlnet_cond_embedding.{hf_prefix}."
|
| 821 |
+
sd_prefix = f"input_hint_block.{i*2}."
|
| 822 |
+
unet_conversion_map_layer.append((sd_prefix, hf_prefix))
|
| 823 |
+
|
| 824 |
+
for i in range(12):
|
| 825 |
+
hf_prefix = f"controlnet_down_blocks.{i}."
|
| 826 |
+
sd_prefix = f"zero_convs.{i}.0."
|
| 827 |
+
unet_conversion_map_layer.append((sd_prefix, hf_prefix))
|
| 828 |
+
|
| 829 |
+
return unet_conversion_map, unet_conversion_map_resnet, unet_conversion_map_layer
|
| 830 |
+
|
| 831 |
+
|
| 832 |
+
def convert_controlnet_state_dict_to_sd(controlnet_state_dict):
|
| 833 |
+
unet_conversion_map, unet_conversion_map_resnet, unet_conversion_map_layer = controlnet_conversion_map()
|
| 834 |
+
|
| 835 |
+
mapping = {k: k for k in controlnet_state_dict.keys()}
|
| 836 |
+
for sd_name, diffusers_name in unet_conversion_map:
|
| 837 |
+
mapping[diffusers_name] = sd_name
|
| 838 |
+
for k, v in mapping.items():
|
| 839 |
+
if "resnets" in k:
|
| 840 |
+
for sd_part, diffusers_part in unet_conversion_map_resnet:
|
| 841 |
+
v = v.replace(diffusers_part, sd_part)
|
| 842 |
+
mapping[k] = v
|
| 843 |
+
for k, v in mapping.items():
|
| 844 |
+
for sd_part, diffusers_part in unet_conversion_map_layer:
|
| 845 |
+
v = v.replace(diffusers_part, sd_part)
|
| 846 |
+
mapping[k] = v
|
| 847 |
+
new_state_dict = {v: controlnet_state_dict[k] for k, v in mapping.items()}
|
| 848 |
+
return new_state_dict
|
| 849 |
+
|
| 850 |
+
|
| 851 |
+
def convert_controlnet_state_dict_to_diffusers(controlnet_state_dict):
|
| 852 |
+
unet_conversion_map, unet_conversion_map_resnet, unet_conversion_map_layer = controlnet_conversion_map()
|
| 853 |
+
|
| 854 |
+
mapping = {k: k for k in controlnet_state_dict.keys()}
|
| 855 |
+
for sd_name, diffusers_name in unet_conversion_map:
|
| 856 |
+
mapping[sd_name] = diffusers_name
|
| 857 |
+
for k, v in mapping.items():
|
| 858 |
+
for sd_part, diffusers_part in unet_conversion_map_layer:
|
| 859 |
+
v = v.replace(sd_part, diffusers_part)
|
| 860 |
+
mapping[k] = v
|
| 861 |
+
for k, v in mapping.items():
|
| 862 |
+
if "resnets" in v:
|
| 863 |
+
for sd_part, diffusers_part in unet_conversion_map_resnet:
|
| 864 |
+
v = v.replace(sd_part, diffusers_part)
|
| 865 |
+
mapping[k] = v
|
| 866 |
+
new_state_dict = {v: controlnet_state_dict[k] for k, v in mapping.items()}
|
| 867 |
+
return new_state_dict
|
| 868 |
+
|
| 869 |
+
|
| 870 |
+
# ================#
|
| 871 |
+
# VAE Conversion #
|
| 872 |
+
# ================#
|
| 873 |
+
|
| 874 |
+
|
| 875 |
+
def reshape_weight_for_sd(w):
|
| 876 |
+
# convert HF linear weights to SD conv2d weights
|
| 877 |
+
return w.reshape(*w.shape, 1, 1)
|
| 878 |
+
|
| 879 |
+
|
| 880 |
+
def convert_vae_state_dict(vae_state_dict):
|
| 881 |
+
vae_conversion_map = [
|
| 882 |
+
# (stable-diffusion, HF Diffusers)
|
| 883 |
+
("nin_shortcut", "conv_shortcut"),
|
| 884 |
+
("norm_out", "conv_norm_out"),
|
| 885 |
+
("mid.attn_1.", "mid_block.attentions.0."),
|
| 886 |
+
]
|
| 887 |
+
|
| 888 |
+
for i in range(4):
|
| 889 |
+
# down_blocks have two resnets
|
| 890 |
+
for j in range(2):
|
| 891 |
+
hf_down_prefix = f"encoder.down_blocks.{i}.resnets.{j}."
|
| 892 |
+
sd_down_prefix = f"encoder.down.{i}.block.{j}."
|
| 893 |
+
vae_conversion_map.append((sd_down_prefix, hf_down_prefix))
|
| 894 |
+
|
| 895 |
+
if i < 3:
|
| 896 |
+
hf_downsample_prefix = f"down_blocks.{i}.downsamplers.0."
|
| 897 |
+
sd_downsample_prefix = f"down.{i}.downsample."
|
| 898 |
+
vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix))
|
| 899 |
+
|
| 900 |
+
hf_upsample_prefix = f"up_blocks.{i}.upsamplers.0."
|
| 901 |
+
sd_upsample_prefix = f"up.{3-i}.upsample."
|
| 902 |
+
vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix))
|
| 903 |
+
|
| 904 |
+
# up_blocks have three resnets
|
| 905 |
+
# also, up blocks in hf are numbered in reverse from sd
|
| 906 |
+
for j in range(3):
|
| 907 |
+
hf_up_prefix = f"decoder.up_blocks.{i}.resnets.{j}."
|
| 908 |
+
sd_up_prefix = f"decoder.up.{3-i}.block.{j}."
|
| 909 |
+
vae_conversion_map.append((sd_up_prefix, hf_up_prefix))
|
| 910 |
+
|
| 911 |
+
# this part accounts for mid blocks in both the encoder and the decoder
|
| 912 |
+
for i in range(2):
|
| 913 |
+
hf_mid_res_prefix = f"mid_block.resnets.{i}."
|
| 914 |
+
sd_mid_res_prefix = f"mid.block_{i+1}."
|
| 915 |
+
vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix))
|
| 916 |
+
|
| 917 |
+
if diffusers.__version__ < "0.17.0":
|
| 918 |
+
vae_conversion_map_attn = [
|
| 919 |
+
# (stable-diffusion, HF Diffusers)
|
| 920 |
+
("norm.", "group_norm."),
|
| 921 |
+
("q.", "query."),
|
| 922 |
+
("k.", "key."),
|
| 923 |
+
("v.", "value."),
|
| 924 |
+
("proj_out.", "proj_attn."),
|
| 925 |
+
]
|
| 926 |
+
else:
|
| 927 |
+
vae_conversion_map_attn = [
|
| 928 |
+
# (stable-diffusion, HF Diffusers)
|
| 929 |
+
("norm.", "group_norm."),
|
| 930 |
+
("q.", "to_q."),
|
| 931 |
+
("k.", "to_k."),
|
| 932 |
+
("v.", "to_v."),
|
| 933 |
+
("proj_out.", "to_out.0."),
|
| 934 |
+
]
|
| 935 |
+
|
| 936 |
+
mapping = {k: k for k in vae_state_dict.keys()}
|
| 937 |
+
for k, v in mapping.items():
|
| 938 |
+
for sd_part, hf_part in vae_conversion_map:
|
| 939 |
+
v = v.replace(hf_part, sd_part)
|
| 940 |
+
mapping[k] = v
|
| 941 |
+
for k, v in mapping.items():
|
| 942 |
+
if "attentions" in k:
|
| 943 |
+
for sd_part, hf_part in vae_conversion_map_attn:
|
| 944 |
+
v = v.replace(hf_part, sd_part)
|
| 945 |
+
mapping[k] = v
|
| 946 |
+
new_state_dict = {v: vae_state_dict[k] for k, v in mapping.items()}
|
| 947 |
+
weights_to_convert = ["q", "k", "v", "proj_out"]
|
| 948 |
+
for k, v in new_state_dict.items():
|
| 949 |
+
for weight_name in weights_to_convert:
|
| 950 |
+
if f"mid.attn_1.{weight_name}.weight" in k:
|
| 951 |
+
# logger.info(f"Reshaping {k} for SD format: shape {v.shape} -> {v.shape} x 1 x 1")
|
| 952 |
+
new_state_dict[k] = reshape_weight_for_sd(v)
|
| 953 |
+
|
| 954 |
+
return new_state_dict
|
| 955 |
+
|
| 956 |
+
|
| 957 |
+
# endregion
|
| 958 |
+
|
| 959 |
+
# region 自作のモデル読み書きなど
|
| 960 |
+
|
| 961 |
+
|
| 962 |
+
def is_safetensors(path):
|
| 963 |
+
return os.path.splitext(path)[1].lower() == ".safetensors"
|
| 964 |
+
|
| 965 |
+
|
| 966 |
+
def load_checkpoint_with_text_encoder_conversion(ckpt_path, device="cpu"):
|
| 967 |
+
# text encoderの格納形式が違うモデルに対応する ('text_model'がない)
|
| 968 |
+
TEXT_ENCODER_KEY_REPLACEMENTS = [
|
| 969 |
+
("cond_stage_model.transformer.embeddings.", "cond_stage_model.transformer.text_model.embeddings."),
|
| 970 |
+
("cond_stage_model.transformer.encoder.", "cond_stage_model.transformer.text_model.encoder."),
|
| 971 |
+
("cond_stage_model.transformer.final_layer_norm.", "cond_stage_model.transformer.text_model.final_layer_norm."),
|
| 972 |
+
]
|
| 973 |
+
|
| 974 |
+
if is_safetensors(ckpt_path):
|
| 975 |
+
checkpoint = None
|
| 976 |
+
state_dict = load_file(ckpt_path) # , device) # may causes error
|
| 977 |
+
else:
|
| 978 |
+
checkpoint = torch.load(ckpt_path, map_location=device)
|
| 979 |
+
if "state_dict" in checkpoint:
|
| 980 |
+
state_dict = checkpoint["state_dict"]
|
| 981 |
+
else:
|
| 982 |
+
state_dict = checkpoint
|
| 983 |
+
checkpoint = None
|
| 984 |
+
|
| 985 |
+
key_reps = []
|
| 986 |
+
for rep_from, rep_to in TEXT_ENCODER_KEY_REPLACEMENTS:
|
| 987 |
+
for key in state_dict.keys():
|
| 988 |
+
if key.startswith(rep_from):
|
| 989 |
+
new_key = rep_to + key[len(rep_from) :]
|
| 990 |
+
key_reps.append((key, new_key))
|
| 991 |
+
|
| 992 |
+
for key, new_key in key_reps:
|
| 993 |
+
state_dict[new_key] = state_dict[key]
|
| 994 |
+
del state_dict[key]
|
| 995 |
+
|
| 996 |
+
return checkpoint, state_dict
|
| 997 |
+
|
| 998 |
+
|
| 999 |
+
# TODO dtype指定の動作が怪しいので確認する text_encoderを指定形式で作れるか未確認
|
| 1000 |
+
def load_models_from_stable_diffusion_checkpoint(v2, ckpt_path, device="cpu", dtype=None, unet_use_linear_projection_in_v2=True):
|
| 1001 |
+
_, state_dict = load_checkpoint_with_text_encoder_conversion(ckpt_path, device)
|
| 1002 |
+
|
| 1003 |
+
# Convert the UNet2DConditionModel model.
|
| 1004 |
+
unet_config = create_unet_diffusers_config(v2, unet_use_linear_projection_in_v2)
|
| 1005 |
+
converted_unet_checkpoint = convert_ldm_unet_checkpoint(v2, state_dict, unet_config)
|
| 1006 |
+
|
| 1007 |
+
unet = UNet2DConditionModel(**unet_config).to(device)
|
| 1008 |
+
info = unet.load_state_dict(converted_unet_checkpoint)
|
| 1009 |
+
logger.info(f"loading u-net: {info}")
|
| 1010 |
+
|
| 1011 |
+
# Convert the VAE model.
|
| 1012 |
+
vae_config = create_vae_diffusers_config()
|
| 1013 |
+
converted_vae_checkpoint = convert_ldm_vae_checkpoint(state_dict, vae_config)
|
| 1014 |
+
|
| 1015 |
+
vae = AutoencoderKL(**vae_config).to(device)
|
| 1016 |
+
info = vae.load_state_dict(converted_vae_checkpoint)
|
| 1017 |
+
logger.info(f"loading vae: {info}")
|
| 1018 |
+
|
| 1019 |
+
# convert text_model
|
| 1020 |
+
if v2:
|
| 1021 |
+
converted_text_encoder_checkpoint = convert_ldm_clip_checkpoint_v2(state_dict, 77)
|
| 1022 |
+
cfg = CLIPTextConfig(
|
| 1023 |
+
vocab_size=49408,
|
| 1024 |
+
hidden_size=1024,
|
| 1025 |
+
intermediate_size=4096,
|
| 1026 |
+
num_hidden_layers=23,
|
| 1027 |
+
num_attention_heads=16,
|
| 1028 |
+
max_position_embeddings=77,
|
| 1029 |
+
hidden_act="gelu",
|
| 1030 |
+
layer_norm_eps=1e-05,
|
| 1031 |
+
dropout=0.0,
|
| 1032 |
+
attention_dropout=0.0,
|
| 1033 |
+
initializer_range=0.02,
|
| 1034 |
+
initializer_factor=1.0,
|
| 1035 |
+
pad_token_id=1,
|
| 1036 |
+
bos_token_id=0,
|
| 1037 |
+
eos_token_id=2,
|
| 1038 |
+
model_type="clip_text_model",
|
| 1039 |
+
projection_dim=512,
|
| 1040 |
+
torch_dtype="float32",
|
| 1041 |
+
transformers_version="4.25.0.dev0",
|
| 1042 |
+
)
|
| 1043 |
+
text_model = CLIPTextModel._from_config(cfg)
|
| 1044 |
+
info = text_model.load_state_dict(converted_text_encoder_checkpoint)
|
| 1045 |
+
else:
|
| 1046 |
+
converted_text_encoder_checkpoint = convert_ldm_clip_checkpoint_v1(state_dict)
|
| 1047 |
+
|
| 1048 |
+
# logging.set_verbosity_error() # don't show annoying warning
|
| 1049 |
+
# text_model = CLIPTextModel.from_pretrained("openai/clip-vit-large-patch14").to(device)
|
| 1050 |
+
# logging.set_verbosity_warning()
|
| 1051 |
+
# logger.info(f"config: {text_model.config}")
|
| 1052 |
+
cfg = CLIPTextConfig(
|
| 1053 |
+
vocab_size=49408,
|
| 1054 |
+
hidden_size=768,
|
| 1055 |
+
intermediate_size=3072,
|
| 1056 |
+
num_hidden_layers=12,
|
| 1057 |
+
num_attention_heads=12,
|
| 1058 |
+
max_position_embeddings=77,
|
| 1059 |
+
hidden_act="quick_gelu",
|
| 1060 |
+
layer_norm_eps=1e-05,
|
| 1061 |
+
dropout=0.0,
|
| 1062 |
+
attention_dropout=0.0,
|
| 1063 |
+
initializer_range=0.02,
|
| 1064 |
+
initializer_factor=1.0,
|
| 1065 |
+
pad_token_id=1,
|
| 1066 |
+
bos_token_id=0,
|
| 1067 |
+
eos_token_id=2,
|
| 1068 |
+
model_type="clip_text_model",
|
| 1069 |
+
projection_dim=768,
|
| 1070 |
+
torch_dtype="float32",
|
| 1071 |
+
)
|
| 1072 |
+
text_model = CLIPTextModel._from_config(cfg)
|
| 1073 |
+
info = text_model.load_state_dict(converted_text_encoder_checkpoint)
|
| 1074 |
+
logger.info(f"loading text encoder: {info}")
|
| 1075 |
+
|
| 1076 |
+
return text_model, vae, unet
|
| 1077 |
+
|
| 1078 |
+
|
| 1079 |
+
def get_model_version_str_for_sd1_sd2(v2, v_parameterization):
|
| 1080 |
+
# only for reference
|
| 1081 |
+
version_str = "sd"
|
| 1082 |
+
if v2:
|
| 1083 |
+
version_str += "_v2"
|
| 1084 |
+
else:
|
| 1085 |
+
version_str += "_v1"
|
| 1086 |
+
if v_parameterization:
|
| 1087 |
+
version_str += "_v"
|
| 1088 |
+
return version_str
|
| 1089 |
+
|
| 1090 |
+
|
| 1091 |
+
def convert_text_encoder_state_dict_to_sd_v2(checkpoint, make_dummy_weights=False):
|
| 1092 |
+
def convert_key(key):
|
| 1093 |
+
# position_idsの除去
|
| 1094 |
+
if ".position_ids" in key:
|
| 1095 |
+
return None
|
| 1096 |
+
|
| 1097 |
+
# common
|
| 1098 |
+
key = key.replace("text_model.encoder.", "transformer.")
|
| 1099 |
+
key = key.replace("text_model.", "")
|
| 1100 |
+
if "layers" in key:
|
| 1101 |
+
# resblocks conversion
|
| 1102 |
+
key = key.replace(".layers.", ".resblocks.")
|
| 1103 |
+
if ".layer_norm" in key:
|
| 1104 |
+
key = key.replace(".layer_norm", ".ln_")
|
| 1105 |
+
elif ".mlp." in key:
|
| 1106 |
+
key = key.replace(".fc1.", ".c_fc.")
|
| 1107 |
+
key = key.replace(".fc2.", ".c_proj.")
|
| 1108 |
+
elif ".self_attn.out_proj" in key:
|
| 1109 |
+
key = key.replace(".self_attn.out_proj.", ".attn.out_proj.")
|
| 1110 |
+
elif ".self_attn." in key:
|
| 1111 |
+
key = None # 特殊なので後で処理する
|
| 1112 |
+
else:
|
| 1113 |
+
raise ValueError(f"unexpected key in DiffUsers model: {key}")
|
| 1114 |
+
elif ".position_embedding" in key:
|
| 1115 |
+
key = key.replace("embeddings.position_embedding.weight", "positional_embedding")
|
| 1116 |
+
elif ".token_embedding" in key:
|
| 1117 |
+
key = key.replace("embeddings.token_embedding.weight", "token_embedding.weight")
|
| 1118 |
+
elif "final_layer_norm" in key:
|
| 1119 |
+
key = key.replace("final_layer_norm", "ln_final")
|
| 1120 |
+
return key
|
| 1121 |
+
|
| 1122 |
+
keys = list(checkpoint.keys())
|
| 1123 |
+
new_sd = {}
|
| 1124 |
+
for key in keys:
|
| 1125 |
+
new_key = convert_key(key)
|
| 1126 |
+
if new_key is None:
|
| 1127 |
+
continue
|
| 1128 |
+
new_sd[new_key] = checkpoint[key]
|
| 1129 |
+
|
| 1130 |
+
# attnの変換
|
| 1131 |
+
for key in keys:
|
| 1132 |
+
if "layers" in key and "q_proj" in key:
|
| 1133 |
+
# 三つを結合
|
| 1134 |
+
key_q = key
|
| 1135 |
+
key_k = key.replace("q_proj", "k_proj")
|
| 1136 |
+
key_v = key.replace("q_proj", "v_proj")
|
| 1137 |
+
|
| 1138 |
+
value_q = checkpoint[key_q]
|
| 1139 |
+
value_k = checkpoint[key_k]
|
| 1140 |
+
value_v = checkpoint[key_v]
|
| 1141 |
+
value = torch.cat([value_q, value_k, value_v])
|
| 1142 |
+
|
| 1143 |
+
new_key = key.replace("text_model.encoder.layers.", "transformer.resblocks.")
|
| 1144 |
+
new_key = new_key.replace(".self_attn.q_proj.", ".attn.in_proj_")
|
| 1145 |
+
new_sd[new_key] = value
|
| 1146 |
+
|
| 1147 |
+
# 最後の層などを捏造するか
|
| 1148 |
+
if make_dummy_weights:
|
| 1149 |
+
logger.info("make dummy weights for resblock.23, text_projection and logit scale.")
|
| 1150 |
+
keys = list(new_sd.keys())
|
| 1151 |
+
for key in keys:
|
| 1152 |
+
if key.startswith("transformer.resblocks.22."):
|
| 1153 |
+
new_sd[key.replace(".22.", ".23.")] = new_sd[key].clone() # copyしないとsafetensorsの保存で落ちる
|
| 1154 |
+
|
| 1155 |
+
# Diffusersに含まれない重みを作っておく
|
| 1156 |
+
new_sd["text_projection"] = torch.ones((1024, 1024), dtype=new_sd[keys[0]].dtype, device=new_sd[keys[0]].device)
|
| 1157 |
+
new_sd["logit_scale"] = torch.tensor(1)
|
| 1158 |
+
|
| 1159 |
+
return new_sd
|
| 1160 |
+
|
| 1161 |
+
|
| 1162 |
+
def save_stable_diffusion_checkpoint(
|
| 1163 |
+
v2, output_file, text_encoder, unet, ckpt_path, epochs, steps, metadata, save_dtype=None, vae=None
|
| 1164 |
+
):
|
| 1165 |
+
if ckpt_path is not None:
|
| 1166 |
+
# epoch/stepを参照する。またVAEがメモリ上にないときなど、もう一度VAEを含めて読み込む
|
| 1167 |
+
checkpoint, state_dict = load_checkpoint_with_text_encoder_conversion(ckpt_path)
|
| 1168 |
+
if checkpoint is None: # safetensors または state_dictのckpt
|
| 1169 |
+
checkpoint = {}
|
| 1170 |
+
strict = False
|
| 1171 |
+
else:
|
| 1172 |
+
strict = True
|
| 1173 |
+
if "state_dict" in state_dict:
|
| 1174 |
+
del state_dict["state_dict"]
|
| 1175 |
+
else:
|
| 1176 |
+
# 新しく作る
|
| 1177 |
+
assert vae is not None, "VAE is required to save a checkpoint without a given checkpoint"
|
| 1178 |
+
checkpoint = {}
|
| 1179 |
+
state_dict = {}
|
| 1180 |
+
strict = False
|
| 1181 |
+
|
| 1182 |
+
def update_sd(prefix, sd):
|
| 1183 |
+
for k, v in sd.items():
|
| 1184 |
+
key = prefix + k
|
| 1185 |
+
assert not strict or key in state_dict, f"Illegal key in save SD: {key}"
|
| 1186 |
+
if save_dtype is not None:
|
| 1187 |
+
v = v.detach().clone().to("cpu").to(save_dtype)
|
| 1188 |
+
state_dict[key] = v
|
| 1189 |
+
|
| 1190 |
+
# Convert the UNet model
|
| 1191 |
+
unet_state_dict = convert_unet_state_dict_to_sd(v2, unet.state_dict())
|
| 1192 |
+
update_sd("model.diffusion_model.", unet_state_dict)
|
| 1193 |
+
|
| 1194 |
+
# Convert the text encoder model
|
| 1195 |
+
if v2:
|
| 1196 |
+
make_dummy = ckpt_path is None # 参照元のcheckpointがない場合は最後の層を前の層から複製して作るなどダミーの重みを入れる
|
| 1197 |
+
text_enc_dict = convert_text_encoder_state_dict_to_sd_v2(text_encoder.state_dict(), make_dummy)
|
| 1198 |
+
update_sd("cond_stage_model.model.", text_enc_dict)
|
| 1199 |
+
else:
|
| 1200 |
+
text_enc_dict = text_encoder.state_dict()
|
| 1201 |
+
update_sd("cond_stage_model.transformer.", text_enc_dict)
|
| 1202 |
+
|
| 1203 |
+
# Convert the VAE
|
| 1204 |
+
if vae is not None:
|
| 1205 |
+
vae_dict = convert_vae_state_dict(vae.state_dict())
|
| 1206 |
+
update_sd("first_stage_model.", vae_dict)
|
| 1207 |
+
|
| 1208 |
+
# Put together new checkpoint
|
| 1209 |
+
key_count = len(state_dict.keys())
|
| 1210 |
+
new_ckpt = {"state_dict": state_dict}
|
| 1211 |
+
|
| 1212 |
+
# epoch and global_step are sometimes not int
|
| 1213 |
+
try:
|
| 1214 |
+
if "epoch" in checkpoint:
|
| 1215 |
+
epochs += checkpoint["epoch"]
|
| 1216 |
+
if "global_step" in checkpoint:
|
| 1217 |
+
steps += checkpoint["global_step"]
|
| 1218 |
+
except:
|
| 1219 |
+
pass
|
| 1220 |
+
|
| 1221 |
+
new_ckpt["epoch"] = epochs
|
| 1222 |
+
new_ckpt["global_step"] = steps
|
| 1223 |
+
|
| 1224 |
+
if is_safetensors(output_file):
|
| 1225 |
+
# TODO Tensor以外のdictの値を削除したほうがいいか
|
| 1226 |
+
save_file(state_dict, output_file, metadata)
|
| 1227 |
+
else:
|
| 1228 |
+
torch.save(new_ckpt, output_file)
|
| 1229 |
+
|
| 1230 |
+
return key_count
|
| 1231 |
+
|
| 1232 |
+
|
| 1233 |
+
def save_diffusers_checkpoint(v2, output_dir, text_encoder, unet, pretrained_model_name_or_path, vae=None, use_safetensors=False):
|
| 1234 |
+
if pretrained_model_name_or_path is None:
|
| 1235 |
+
# load default settings for v1/v2
|
| 1236 |
+
if v2:
|
| 1237 |
+
pretrained_model_name_or_path = DIFFUSERS_REF_MODEL_ID_V2
|
| 1238 |
+
else:
|
| 1239 |
+
pretrained_model_name_or_path = DIFFUSERS_REF_MODEL_ID_V1
|
| 1240 |
+
|
| 1241 |
+
scheduler = DDIMScheduler.from_pretrained(pretrained_model_name_or_path, subfolder="scheduler")
|
| 1242 |
+
tokenizer = CLIPTokenizer.from_pretrained(pretrained_model_name_or_path, subfolder="tokenizer")
|
| 1243 |
+
if vae is None:
|
| 1244 |
+
vae = AutoencoderKL.from_pretrained(pretrained_model_name_or_path, subfolder="vae")
|
| 1245 |
+
|
| 1246 |
+
# original U-Net cannot be saved, so we need to convert it to the Diffusers version
|
| 1247 |
+
# TODO this consumes a lot of memory
|
| 1248 |
+
diffusers_unet = diffusers.UNet2DConditionModel.from_pretrained(pretrained_model_name_or_path, subfolder="unet")
|
| 1249 |
+
diffusers_unet.load_state_dict(unet.state_dict())
|
| 1250 |
+
|
| 1251 |
+
pipeline = StableDiffusionPipeline(
|
| 1252 |
+
unet=diffusers_unet,
|
| 1253 |
+
text_encoder=text_encoder,
|
| 1254 |
+
vae=vae,
|
| 1255 |
+
scheduler=scheduler,
|
| 1256 |
+
tokenizer=tokenizer,
|
| 1257 |
+
safety_checker=None,
|
| 1258 |
+
feature_extractor=None,
|
| 1259 |
+
requires_safety_checker=None,
|
| 1260 |
+
)
|
| 1261 |
+
pipeline.save_pretrained(output_dir, safe_serialization=use_safetensors)
|
| 1262 |
+
|
| 1263 |
+
|
| 1264 |
+
VAE_PREFIX = "first_stage_model."
|
| 1265 |
+
|
| 1266 |
+
|
| 1267 |
+
def load_vae(vae_id, dtype):
|
| 1268 |
+
logger.info(f"load VAE: {vae_id}")
|
| 1269 |
+
if os.path.isdir(vae_id) or not os.path.isfile(vae_id):
|
| 1270 |
+
# Diffusers local/remote
|
| 1271 |
+
try:
|
| 1272 |
+
vae = AutoencoderKL.from_pretrained(vae_id, subfolder=None, torch_dtype=dtype)
|
| 1273 |
+
except EnvironmentError as e:
|
| 1274 |
+
logger.error(f"exception occurs in loading vae: {e}")
|
| 1275 |
+
logger.error("retry with subfolder='vae'")
|
| 1276 |
+
vae = AutoencoderKL.from_pretrained(vae_id, subfolder="vae", torch_dtype=dtype)
|
| 1277 |
+
return vae
|
| 1278 |
+
|
| 1279 |
+
# local
|
| 1280 |
+
vae_config = create_vae_diffusers_config()
|
| 1281 |
+
|
| 1282 |
+
if vae_id.endswith(".bin"):
|
| 1283 |
+
# SD 1.5 VAE on Huggingface
|
| 1284 |
+
converted_vae_checkpoint = torch.load(vae_id, map_location="cpu")
|
| 1285 |
+
else:
|
| 1286 |
+
# StableDiffusion
|
| 1287 |
+
vae_model = load_file(vae_id, "cpu") if is_safetensors(vae_id) else torch.load(vae_id, map_location="cpu")
|
| 1288 |
+
vae_sd = vae_model["state_dict"] if "state_dict" in vae_model else vae_model
|
| 1289 |
+
|
| 1290 |
+
# vae only or full model
|
| 1291 |
+
full_model = False
|
| 1292 |
+
for vae_key in vae_sd:
|
| 1293 |
+
if vae_key.startswith(VAE_PREFIX):
|
| 1294 |
+
full_model = True
|
| 1295 |
+
break
|
| 1296 |
+
if not full_model:
|
| 1297 |
+
sd = {}
|
| 1298 |
+
for key, value in vae_sd.items():
|
| 1299 |
+
sd[VAE_PREFIX + key] = value
|
| 1300 |
+
vae_sd = sd
|
| 1301 |
+
del sd
|
| 1302 |
+
|
| 1303 |
+
# Convert the VAE model.
|
| 1304 |
+
converted_vae_checkpoint = convert_ldm_vae_checkpoint(vae_sd, vae_config)
|
| 1305 |
+
|
| 1306 |
+
vae = AutoencoderKL(**vae_config)
|
| 1307 |
+
vae.load_state_dict(converted_vae_checkpoint)
|
| 1308 |
+
return vae
|
| 1309 |
+
|
| 1310 |
+
|
| 1311 |
+
# endregion
|
| 1312 |
+
|
| 1313 |
+
|
| 1314 |
+
def make_bucket_resolutions(max_reso, min_size=256, max_size=1024, divisible=64):
|
| 1315 |
+
max_width, max_height = max_reso
|
| 1316 |
+
max_area = max_width * max_height
|
| 1317 |
+
|
| 1318 |
+
resos = set()
|
| 1319 |
+
|
| 1320 |
+
width = int(math.sqrt(max_area) // divisible) * divisible
|
| 1321 |
+
resos.add((width, width))
|
| 1322 |
+
|
| 1323 |
+
width = min_size
|
| 1324 |
+
while width <= max_size:
|
| 1325 |
+
height = min(max_size, int((max_area // width) // divisible) * divisible)
|
| 1326 |
+
if height >= min_size:
|
| 1327 |
+
resos.add((width, height))
|
| 1328 |
+
resos.add((height, width))
|
| 1329 |
+
|
| 1330 |
+
# # make additional resos
|
| 1331 |
+
# if width >= height and width - divisible >= min_size:
|
| 1332 |
+
# resos.add((width - divisible, height))
|
| 1333 |
+
# resos.add((height, width - divisible))
|
| 1334 |
+
# if height >= width and height - divisible >= min_size:
|
| 1335 |
+
# resos.add((width, height - divisible))
|
| 1336 |
+
# resos.add((height - divisible, width))
|
| 1337 |
+
|
| 1338 |
+
width += divisible
|
| 1339 |
+
|
| 1340 |
+
resos = list(resos)
|
| 1341 |
+
resos.sort()
|
| 1342 |
+
return resos
|
| 1343 |
+
|
| 1344 |
+
|
| 1345 |
+
if __name__ == "__main__":
|
| 1346 |
+
resos = make_bucket_resolutions((512, 768))
|
| 1347 |
+
logger.info(f"{len(resos)}")
|
| 1348 |
+
logger.info(f"{resos}")
|
| 1349 |
+
aspect_ratios = [w / h for w, h in resos]
|
| 1350 |
+
logger.info(f"{aspect_ratios}")
|
| 1351 |
+
|
| 1352 |
+
ars = set()
|
| 1353 |
+
for ar in aspect_ratios:
|
| 1354 |
+
if ar in ars:
|
| 1355 |
+
logger.error(f"error! duplicate ar: {ar}")
|
| 1356 |
+
ars.add(ar)
|