diff --git "a/pipeline.py" "b/pipeline.py" deleted file mode 100644--- "a/pipeline.py" +++ /dev/null @@ -1,10731 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - pipeline.py · AlanB/lpw_stable_diffusion_update at main - - - - - - - -
- -
Hugging Face's logo - -
- -
-
- -
-
- - - -
- - - -
-

-
- - - - -
- - -AlanB - -
/
- - -
-
- - - - -

-
- - - -
- -
- - - - -
- - - -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
- -
- - - -
-
lpw_stable_diffusion_update - / - pipeline.py -
-
- -
-
AlanB's picture - -
Added freeu functions to unet
- cb9f0d2 - -
-
- - raw -
- history - - blame - - contribute - - delete - - -
- -
-
- -
- -
- 88.1 kB
- -
-
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
import inspect
-
import re
-
from typing import Any, Callable, Dict, List, Optional, Union
-
-
-
import numpy as np
-
import PIL
-
import torch
-
from packaging import version
-
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
-
import random
-
import sys
-
from tqdm.auto import tqdm
-
-
-
from diffusers import DiffusionPipeline
-
from diffusers.configuration_utils import FrozenDict
-
from diffusers.image_processor import VaeImageProcessor
-
from diffusers.loaders import FromSingleFileMixin, LoraLoaderMixin, TextualInversionLoaderMixin
-
from diffusers.models import AutoencoderKL, UNet2DConditionModel
-
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput, StableDiffusionSafetyChecker
-
from diffusers.schedulers import KarrasDiffusionSchedulers
-
from diffusers.utils import (
-
PIL_INTERPOLATION,
-
deprecate,
-
is_accelerate_available,
-
is_accelerate_version,
-
logging,
-
)
-
from diffusers.utils.torch_utils import randn_tensor
-
-
-
# ------------------------------------------------------------------------------
-
-
-
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
-
-
-
re_attention = re.compile(
-
r"""
-
\\\(|
-
\\\)|
-
\\\[|
-
\\]|
-
\\\\|
-
\\|
-
\(|
-
\[|
-
:([+-]?[.\d]+)\)|
-
\)|
-
]|
-
[^\\()\[\]:]+|
-
:
-
""",
-
re.X,
-
)
-
-
-
-
-
def parse_prompt_attention(text):
-
"""
-
Parses a string with attention tokens and returns a list of pairs: text and its associated weight.
-
Accepted tokens are:
-
(abc) - increases attention to abc by a multiplier of 1.1
-
(abc:3.12) - increases attention to abc by a multiplier of 3.12
-
[abc] - decreases attention to abc by a multiplier of 1.1
-
\( - literal character '('
-
\[ - literal character '['
-
\) - literal character ')'
-
\] - literal character ']'
-
\\ - literal character '\'
-
anything else - just text
-
>>> parse_prompt_attention('normal text')
-
[['normal text', 1.0]]
-
>>> parse_prompt_attention('an (important) word')
-
[['an ', 1.0], ['important', 1.1], [' word', 1.0]]
-
>>> parse_prompt_attention('(unbalanced')
-
[['unbalanced', 1.1]]
-
>>> parse_prompt_attention('\(literal\]')
-
[['(literal]', 1.0]]
-
>>> parse_prompt_attention('(unnecessary)(parens)')
-
[['unnecessaryparens', 1.1]]
-
>>> parse_prompt_attention('a (((house:1.3)) [on] a (hill:0.5), sun, (((sky))).')
-
[['a ', 1.0],
-
['house', 1.5730000000000004],
-
[' ', 1.1],
-
['on', 1.0],
-
[' a ', 1.1],
-
['hill', 0.55],
-
[', sun, ', 1.1],
-
['sky', 1.4641000000000006],
-
['.', 1.1]]
-
"""
-
-
-
res = []
-
round_brackets = []
-
square_brackets = []
-
-
-
round_bracket_multiplier = 1.1
-
square_bracket_multiplier = 1 / 1.1
-
-
-
def multiply_range(start_position, multiplier):
-
for p in range(start_position, len(res)):
-
res[p][1] *= multiplier
-
-
-
for m in re_attention.finditer(text):
-
text = m.group(0)
-
weight = m.group(1)
-
-
-
if text.startswith("\\"):
-
res.append([text[1:], 1.0])
-
elif text == "(":
-
round_brackets.append(len(res))
-
elif text == "[":
-
square_brackets.append(len(res))
-
elif weight is not None and len(round_brackets) > 0:
-
multiply_range(round_brackets.pop(), float(weight))
-
elif text == ")" and len(round_brackets) > 0:
-
multiply_range(round_brackets.pop(), round_bracket_multiplier)
-
elif text == "]" and len(square_brackets) > 0:
-
multiply_range(square_brackets.pop(), square_bracket_multiplier)
-
else:
-
res.append([text, 1.0])
-
-
-
for pos in round_brackets:
-
multiply_range(pos, round_bracket_multiplier)
-
-
-
for pos in square_brackets:
-
multiply_range(pos, square_bracket_multiplier)
-
-
-
if len(res) == 0:
-
res = [["", 1.0]]
-
-
-
# merge runs of identical weights
-
i = 0
-
while i + 1 < len(res):
-
if res[i][1] == res[i + 1][1]:
-
res[i][0] += res[i + 1][0]
-
res.pop(i + 1)
-
else:
-
i += 1
-
-
-
return res
-
-
-
-
-
def get_prompts_with_weights(pipe: DiffusionPipeline, prompt: List[str], max_length: int):
-
r"""
-
Tokenize a list of prompts and return its tokens with weights of each token.
-
-
No padding, starting or ending token is included.
-
"""
-
tokens = []
-
weights = []
-
truncated = False
-
for text in prompt:
-
texts_and_weights = parse_prompt_attention(text)
-
text_token = []
-
text_weight = []
-
for word, weight in texts_and_weights:
-
# tokenize and discard the starting and the ending token
-
token = pipe.tokenizer(word).input_ids[1:-1]
-
text_token += token
-
# copy the weight by length of token
-
text_weight += [weight] * len(token)
-
# stop if the text is too long (longer than truncation limit)
-
if len(text_token) > max_length:
-
truncated = True
-
break
-
# truncate
-
if len(text_token) > max_length:
-
truncated = True
-
text_token = text_token[:max_length]
-
text_weight = text_weight[:max_length]
-
tokens.append(text_token)
-
weights.append(text_weight)
-
if truncated:
-
logger.warning("Prompt was truncated. Try to shorten the prompt or increase max_embeddings_multiples")
-
return tokens, weights
-
-
-
-
-
def pad_tokens_and_weights(tokens, weights, max_length, bos, eos, pad, no_boseos_middle=True, chunk_length=77):
-
r"""
-
Pad the tokens (with starting and ending tokens) and weights (with 1.0) to max_length.
-
"""
-
max_embeddings_multiples = (max_length - 2) // (chunk_length - 2)
-
weights_length = max_length if no_boseos_middle else max_embeddings_multiples * chunk_length
-
for i in range(len(tokens)):
-
tokens[i] = [bos] + tokens[i] + [pad] * (max_length - 1 - len(tokens[i]) - 1) + [eos]
-
if no_boseos_middle:
-
weights[i] = [1.0] + weights[i] + [1.0] * (max_length - 1 - len(weights[i]))
-
else:
-
w = []
-
if len(weights[i]) == 0:
-
w = [1.0] * weights_length
-
else:
-
for j in range(max_embeddings_multiples):
-
w.append(1.0) # weight for starting token in this chunk
-
w += weights[i][j * (chunk_length - 2) : min(len(weights[i]), (j + 1) * (chunk_length - 2))]
-
w.append(1.0) # weight for ending token in this chunk
-
w += [1.0] * (weights_length - len(w))
-
weights[i] = w[:]
-
-
-
return tokens, weights
-
-
-
-
-
def get_unweighted_text_embeddings(
-
pipe: DiffusionPipeline,
-
text_input: torch.Tensor,
-
chunk_length: int,
-
no_boseos_middle: Optional[bool] = True,
-
):
-
"""
-
When the length of tokens is a multiple of the capacity of the text encoder,
-
it should be split into chunks and sent to the text encoder individually.
-
"""
-
max_embeddings_multiples = (text_input.shape[1] - 2) // (chunk_length - 2)
-
if max_embeddings_multiples > 1:
-
text_embeddings = []
-
for i in range(max_embeddings_multiples):
-
# extract the i-th chunk
-
text_input_chunk = text_input[:, i * (chunk_length - 2) : (i + 1) * (chunk_length - 2) + 2].clone()
-
-
-
# cover the head and the tail by the starting and the ending tokens
-
text_input_chunk[:, 0] = text_input[0, 0]
-
text_input_chunk[:, -1] = text_input[0, -1]
-
text_embedding = pipe.text_encoder(text_input_chunk)[0]
-
-
-
if no_boseos_middle:
-
if i == 0:
-
# discard the ending token
-
text_embedding = text_embedding[:, :-1]
-
elif i == max_embeddings_multiples - 1:
-
# discard the starting token
-
text_embedding = text_embedding[:, 1:]
-
else:
-
# discard both starting and ending tokens
-
text_embedding = text_embedding[:, 1:-1]
-
-
-
text_embeddings.append(text_embedding)
-
text_embeddings = torch.concat(text_embeddings, axis=1)
-
else:
-
text_embeddings = pipe.text_encoder(text_input)[0]
-
return text_embeddings
-
-
-
-
-
def get_weighted_text_embeddings(
-
pipe: DiffusionPipeline,
-
prompt: Union[str, List[str]],
-
uncond_prompt: Optional[Union[str, List[str]]] = None,
-
max_embeddings_multiples: Optional[int] = 3,
-
no_boseos_middle: Optional[bool] = False,
-
skip_parsing: Optional[bool] = False,
-
skip_weighting: Optional[bool] = False,
-
):
-
r"""
-
Prompts can be assigned with local weights using brackets. For example,
-
prompt 'A (very beautiful) masterpiece' highlights the words 'very beautiful',
-
and the embedding tokens corresponding to the words get multiplied by a constant, 1.1.
-
-
Also, to regularize of the embedding, the weighted embedding would be scaled to preserve the original mean.
-
-
Args:
-
pipe (`DiffusionPipeline`):
-
Pipe to provide access to the tokenizer and the text encoder.
-
prompt (`str` or `List[str]`):
-
The prompt or prompts to guide the image generation.
-
uncond_prompt (`str` or `List[str]`):
-
The unconditional prompt or prompts for guide the image generation. If unconditional prompt
-
is provided, the embeddings of prompt and uncond_prompt are concatenated.
-
max_embeddings_multiples (`int`, *optional*, defaults to `3`):
-
The max multiple length of prompt embeddings compared to the max output length of text encoder.
-
no_boseos_middle (`bool`, *optional*, defaults to `False`):
-
If the length of text token is multiples of the capacity of text encoder, whether reserve the starting and
-
ending token in each of the chunk in the middle.
-
skip_parsing (`bool`, *optional*, defaults to `False`):
-
Skip the parsing of brackets.
-
skip_weighting (`bool`, *optional*, defaults to `False`):
-
Skip the weighting. When the parsing is skipped, it is forced True.
-
"""
-
max_length = (pipe.tokenizer.model_max_length - 2) * max_embeddings_multiples + 2
-
if isinstance(prompt, str):
-
prompt = [prompt]
-
-
-
if not skip_parsing:
-
prompt_tokens, prompt_weights = get_prompts_with_weights(pipe, prompt, max_length - 2)
-
if uncond_prompt is not None:
-
if isinstance(uncond_prompt, str):
-
uncond_prompt = [uncond_prompt]
-
uncond_tokens, uncond_weights = get_prompts_with_weights(pipe, uncond_prompt, max_length - 2)
-
else:
-
prompt_tokens = [
-
token[1:-1] for token in pipe.tokenizer(prompt, max_length=max_length, truncation=True).input_ids
-
]
-
prompt_weights = [[1.0] * len(token) for token in prompt_tokens]
-
if uncond_prompt is not None:
-
if isinstance(uncond_prompt, str):
-
uncond_prompt = [uncond_prompt]
-
uncond_tokens = [
-
token[1:-1]
-
for token in pipe.tokenizer(uncond_prompt, max_length=max_length, truncation=True).input_ids
-
]
-
uncond_weights = [[1.0] * len(token) for token in uncond_tokens]
-
-
-
# round up the longest length of tokens to a multiple of (model_max_length - 2)
-
max_length = max([len(token) for token in prompt_tokens])
-
if uncond_prompt is not None:
-
max_length = max(max_length, max([len(token) for token in uncond_tokens]))
-
-
-
max_embeddings_multiples = min(
-
max_embeddings_multiples,
-
(max_length - 1) // (pipe.tokenizer.model_max_length - 2) + 1,
-
)
-
max_embeddings_multiples = max(1, max_embeddings_multiples)
-
max_length = (pipe.tokenizer.model_max_length - 2) * max_embeddings_multiples + 2
-
-
-
# pad the length of tokens and weights
-
bos = pipe.tokenizer.bos_token_id
-
eos = pipe.tokenizer.eos_token_id
-
pad = getattr(pipe.tokenizer, "pad_token_id", eos)
-
prompt_tokens, prompt_weights = pad_tokens_and_weights(
-
prompt_tokens,
-
prompt_weights,
-
max_length,
-
bos,
-
eos,
-
pad,
-
no_boseos_middle=no_boseos_middle,
-
chunk_length=pipe.tokenizer.model_max_length,
-
)
-
prompt_tokens = torch.tensor(prompt_tokens, dtype=torch.long, device=pipe.device)
-
if uncond_prompt is not None:
-
uncond_tokens, uncond_weights = pad_tokens_and_weights(
-
uncond_tokens,
-
uncond_weights,
-
max_length,
-
bos,
-
eos,
-
pad,
-
no_boseos_middle=no_boseos_middle,
-
chunk_length=pipe.tokenizer.model_max_length,
-
)
-
uncond_tokens = torch.tensor(uncond_tokens, dtype=torch.long, device=pipe.device)
-
-
-
# get the embeddings
-
text_embeddings = get_unweighted_text_embeddings(
-
pipe,
-
prompt_tokens,
-
pipe.tokenizer.model_max_length,
-
no_boseos_middle=no_boseos_middle,
-
)
-
prompt_weights = torch.tensor(prompt_weights, dtype=text_embeddings.dtype, device=text_embeddings.device)
-
if uncond_prompt is not None:
-
uncond_embeddings = get_unweighted_text_embeddings(
-
pipe,
-
uncond_tokens,
-
pipe.tokenizer.model_max_length,
-
no_boseos_middle=no_boseos_middle,
-
)
-
uncond_weights = torch.tensor(uncond_weights, dtype=uncond_embeddings.dtype, device=uncond_embeddings.device)
-
-
-
# assign weights to the prompts and normalize in the sense of mean
-
# TODO: should we normalize by chunk or in a whole (current implementation)?
-
if (not skip_parsing) and (not skip_weighting):
-
previous_mean = text_embeddings.float().mean(axis=[-2, -1]).to(text_embeddings.dtype)
-
text_embeddings *= prompt_weights.unsqueeze(-1)
-
current_mean = text_embeddings.float().mean(axis=[-2, -1]).to(text_embeddings.dtype)
-
text_embeddings *= (previous_mean / current_mean).unsqueeze(-1).unsqueeze(-1)
-
if uncond_prompt is not None:
-
previous_mean = uncond_embeddings.float().mean(axis=[-2, -1]).to(uncond_embeddings.dtype)
-
uncond_embeddings *= uncond_weights.unsqueeze(-1)
-
current_mean = uncond_embeddings.float().mean(axis=[-2, -1]).to(uncond_embeddings.dtype)
-
uncond_embeddings *= (previous_mean / current_mean).unsqueeze(-1).unsqueeze(-1)
-
-
-
if uncond_prompt is not None:
-
return text_embeddings, uncond_embeddings
-
return text_embeddings, None
-
-
-
-
-
def preprocess_image(image, batch_size):
-
w, h = image.size
-
w, h = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
-
image = image.resize((w, h), resample=PIL_INTERPOLATION["lanczos"])
-
image = np.array(image).astype(np.float32) / 255.0
-
image = np.vstack([image[None].transpose(0, 3, 1, 2)] * batch_size)
-
image = torch.from_numpy(image)
-
return 2.0 * image - 1.0
-
-
-
-
-
def preprocess_mask(mask, batch_size, scale_factor=8):
-
if not isinstance(mask, torch.FloatTensor):
-
mask = mask.convert("L")
-
w, h = mask.size
-
w, h = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
-
mask = mask.resize((w // scale_factor, h // scale_factor), resample=PIL_INTERPOLATION["nearest"])
-
mask = np.array(mask).astype(np.float32) / 255.0
-
mask = np.tile(mask, (4, 1, 1))
-
mask = np.vstack([mask[None]] * batch_size)
-
mask = 1 - mask # repaint white, keep black
-
mask = torch.from_numpy(mask)
-
return mask
-
-
-
else:
-
valid_mask_channel_sizes = [1, 3]
-
# if mask channel is fourth tensor dimension, permute dimensions to pytorch standard (B, C, H, W)
-
if mask.shape[3] in valid_mask_channel_sizes:
-
mask = mask.permute(0, 3, 1, 2)
-
elif mask.shape[1] not in valid_mask_channel_sizes:
-
raise ValueError(
-
f"Mask channel dimension of size in {valid_mask_channel_sizes} should be second or fourth dimension,"
-
f" but received mask of shape {tuple(mask.shape)}"
-
)
-
# (potentially) reduce mask channel dimension from 3 to 1 for broadcasting to latent shape
-
mask = mask.mean(dim=1, keepdim=True)
-
h, w = mask.shape[-2:]
-
h, w = (x - x % 8 for x in (h, w)) # resize to integer multiple of 8
-
mask = torch.nn.functional.interpolate(mask, (h // scale_factor, w // scale_factor))
-
return mask
-
-
-
-
-
class StableDiffusionLongPromptWeightingPipeline(
-
DiffusionPipeline, TextualInversionLoaderMixin, LoraLoaderMixin, FromSingleFileMixin
-
):
-
r"""
-
Pipeline for text-to-image generation using Stable Diffusion without tokens length limit, and support parsing
-
weighting in prompt.
-
-
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
-
library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
-
-
Args:
-
vae ([`AutoencoderKL`]):
-
Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
-
text_encoder ([`CLIPTextModel`]):
-
Frozen text-encoder. Stable Diffusion uses the text portion of
-
[CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
-
the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
-
tokenizer (`CLIPTokenizer`):
-
Tokenizer of class
-
[CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
-
unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
-
scheduler ([`SchedulerMixin`]):
-
A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
-
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
-
safety_checker ([`StableDiffusionSafetyChecker`]):
-
Classification module that estimates whether generated images could be considered offensive or harmful.
-
Please, refer to the [model card](https://huggingface.co/CompVis/stable-diffusion-v1-4) for details.
-
feature_extractor ([`CLIPImageProcessor`]):
-
Model that extracts features from generated images to be used as inputs for the `safety_checker`.
-
"""
-
-
-
_optional_components = ["safety_checker", "feature_extractor"]
-
-
-
def __init__(
-
self,
-
vae: AutoencoderKL,
-
text_encoder: CLIPTextModel,
-
tokenizer: CLIPTokenizer,
-
unet: UNet2DConditionModel,
-
scheduler: KarrasDiffusionSchedulers,
-
safety_checker: StableDiffusionSafetyChecker,
-
feature_extractor: CLIPImageProcessor,
-
requires_safety_checker: bool = True,
-
):
-
super().__init__()
-
-
-
if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1:
-
deprecation_message = (
-
f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"
-
f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure "
-
"to update the config accordingly as leaving `steps_offset` might led to incorrect results"
-
" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
-
" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
-
" file"
-
)
-
deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False)
-
new_config = dict(scheduler.config)
-
new_config["steps_offset"] = 1
-
scheduler._internal_dict = FrozenDict(new_config)
-
-
-
if hasattr(scheduler.config, "clip_sample") and scheduler.config.clip_sample is True:
-
deprecation_message = (
-
f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`."
-
" `clip_sample` should be set to False in the configuration file. Please make sure to update the"
-
" config accordingly as not setting `clip_sample` in the config might lead to incorrect results in"
-
" future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very"
-
" nice if you could open a Pull request for the `scheduler/scheduler_config.json` file"
-
)
-
deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False)
-
new_config = dict(scheduler.config)
-
new_config["clip_sample"] = False
-
scheduler._internal_dict = FrozenDict(new_config)
-
-
-
if safety_checker is None and requires_safety_checker:
-
logger.warning(
-
f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
-
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
-
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
-
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
-
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
-
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
-
)
-
-
-
if safety_checker is not None and feature_extractor is None:
-
raise ValueError(
-
"Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety"
-
" checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead."
-
)
-
-
-
is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse(
-
version.parse(unet.config._diffusers_version).base_version
-
) < version.parse("0.9.0.dev0")
-
is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64
-
if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64:
-
deprecation_message = (
-
"The configuration file of the unet has set the default `sample_size` to smaller than"
-
" 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the"
-
" following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-"
-
" CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5"
-
" \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the"
-
" configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`"
-
" in the config might lead to incorrect results in future versions. If you have downloaded this"
-
" checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for"
-
" the `unet/config.json` file"
-
)
-
deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False)
-
new_config = dict(unet.config)
-
new_config["sample_size"] = 64
-
unet._internal_dict = FrozenDict(new_config)
-
self.register_modules(
-
vae=vae,
-
text_encoder=text_encoder,
-
tokenizer=tokenizer,
-
unet=unet,
-
scheduler=scheduler,
-
safety_checker=safety_checker,
-
feature_extractor=feature_extractor,
-
)
-
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
-
-
-
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
-
self.register_to_config(
-
requires_safety_checker=requires_safety_checker,
-
)
-
-
-
def enable_vae_slicing(self):
-
r"""
-
Enable sliced VAE decoding.
-
-
When this option is enabled, the VAE will split the input tensor in slices to compute decoding in several
-
steps. This is useful to save some memory and allow larger batch sizes.
-
"""
-
self.vae.enable_slicing()
-
-
-
def disable_vae_slicing(self):
-
r"""
-
Disable sliced VAE decoding. If `enable_vae_slicing` was previously invoked, this method will go back to
-
computing decoding in one step.
-
"""
-
self.vae.disable_slicing()
-
-
-
def enable_vae_tiling(self):
-
r"""
-
Enable tiled VAE decoding.
-
-
When this option is enabled, the VAE will split the input tensor into tiles to compute decoding and encoding in
-
several steps. This is useful to save a large amount of memory and to allow the processing of larger images.
-
"""
-
self.vae.enable_tiling()
-
-
-
def disable_vae_tiling(self):
-
r"""
-
Disable tiled VAE decoding. If `enable_vae_tiling` was previously invoked, this method will go back to
-
computing decoding in one step.
-
"""
-
self.vae.disable_tiling()
-
-
-
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_sequential_cpu_offload
-
def enable_sequential_cpu_offload(self, gpu_id=0):
-
r"""
-
Offloads all models to CPU using accelerate, significantly reducing memory usage. When called, unet,
-
text_encoder, vae and safety checker have their state dicts saved to CPU and then are moved to a
-
`torch.device('meta') and loaded to GPU only when their specific submodule has its `forward` method called.
-
Note that offloading happens on a submodule basis. Memory savings are higher than with
-
`enable_model_cpu_offload`, but performance is lower.
-
"""
-
if is_accelerate_available() and is_accelerate_version(">=", "0.14.0"):
-
from accelerate import cpu_offload
-
else:
-
raise ImportError("`enable_sequential_cpu_offload` requires `accelerate v0.14.0` or higher")
-
-
-
device = torch.device(f"cuda:{gpu_id}")
-
-
-
if self.device.type != "cpu":
-
self.to("cpu", silence_dtype_warnings=True)
-
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
-
-
-
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae]:
-
cpu_offload(cpu_offloaded_model, device)
-
-
-
if self.safety_checker is not None:
-
cpu_offload(self.safety_checker, execution_device=device, offload_buffers=True)
-
-
-
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_model_cpu_offload
-
def enable_model_cpu_offload(self, gpu_id=0):
-
r"""
-
Offloads all models to CPU using accelerate, reducing memory usage with a low impact on performance. Compared
-
to `enable_sequential_cpu_offload`, this method moves one whole model at a time to the GPU when its `forward`
-
method is called, and the model remains in GPU until the next model runs. Memory savings are lower than with
-
`enable_sequential_cpu_offload`, but performance is much better due to the iterative execution of the `unet`.
-
"""
-
if is_accelerate_available() and is_accelerate_version(">=", "0.17.0.dev0"):
-
from accelerate import cpu_offload_with_hook
-
else:
-
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.")
-
-
-
device = torch.device(f"cuda:{gpu_id}")
-
-
-
if self.device.type != "cpu":
-
self.to("cpu", silence_dtype_warnings=True)
-
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
-
-
-
hook = None
-
for cpu_offloaded_model in [self.text_encoder, self.unet, self.vae]:
-
_, hook = cpu_offload_with_hook(cpu_offloaded_model, device, prev_module_hook=hook)
-
-
-
if self.safety_checker is not None:
-
_, hook = cpu_offload_with_hook(self.safety_checker, device, prev_module_hook=hook)
-
-
-
# We'll offload the last model manually.
-
self.final_offload_hook = hook
-
-
-
@property
-
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
-
def _execution_device(self):
-
r"""
-
Returns the device on which the pipeline's models will be executed. After calling
-
`pipeline.enable_sequential_cpu_offload()` the execution device can only be inferred from Accelerate's module
-
hooks.
-
"""
-
if not hasattr(self.unet, "_hf_hook"):
-
return self.device
-
for module in self.unet.modules():
-
if (
-
hasattr(module, "_hf_hook")
-
and hasattr(module._hf_hook, "execution_device")
-
and module._hf_hook.execution_device is not None
-
):
-
return torch.device(module._hf_hook.execution_device)
-
return self.device
-
-
-
def _encode_prompt(
-
self,
-
prompt,
-
device,
-
num_images_per_prompt,
-
do_classifier_free_guidance,
-
negative_prompt=None,
-
max_embeddings_multiples=3,
-
prompt_embeds: Optional[torch.FloatTensor] = None,
-
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
-
):
-
r"""
-
Encodes the prompt into text encoder hidden states.
-
-
Args:
-
prompt (`str` or `list(int)`):
-
prompt to be encoded
-
device: (`torch.device`):
-
torch device
-
num_images_per_prompt (`int`):
-
number of images that should be generated per prompt
-
do_classifier_free_guidance (`bool`):
-
whether to use classifier free guidance or not
-
negative_prompt (`str` or `List[str]`):
-
The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
-
if `guidance_scale` is less than `1`).
-
max_embeddings_multiples (`int`, *optional*, defaults to `3`):
-
The max multiple length of prompt embeddings compared to the max output length of text encoder.
-
"""
-
if prompt is not None and isinstance(prompt, str):
-
batch_size = 1
-
elif prompt is not None and isinstance(prompt, list):
-
batch_size = len(prompt)
-
else:
-
batch_size = prompt_embeds.shape[0]
-
-
-
if negative_prompt_embeds is None:
-
if negative_prompt is None:
-
negative_prompt = [""] * batch_size
-
elif isinstance(negative_prompt, str):
-
negative_prompt = [negative_prompt] * batch_size
-
if batch_size != len(negative_prompt):
-
raise ValueError(
-
f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
-
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
-
" the batch size of `prompt`."
-
)
-
if prompt_embeds is None or negative_prompt_embeds is None:
-
if isinstance(self, TextualInversionLoaderMixin):
-
prompt = self.maybe_convert_prompt(prompt, self.tokenizer)
-
if do_classifier_free_guidance and negative_prompt_embeds is None:
-
negative_prompt = self.maybe_convert_prompt(negative_prompt, self.tokenizer)
-
-
-
prompt_embeds1, negative_prompt_embeds1 = get_weighted_text_embeddings(
-
pipe=self,
-
prompt=prompt,
-
uncond_prompt=negative_prompt if do_classifier_free_guidance else None,
-
max_embeddings_multiples=max_embeddings_multiples,
-
)
-
if prompt_embeds is None:
-
prompt_embeds = prompt_embeds1
-
if negative_prompt_embeds is None:
-
negative_prompt_embeds = negative_prompt_embeds1
-
-
-
bs_embed, seq_len, _ = prompt_embeds.shape
-
# duplicate text embeddings for each generation per prompt, using mps friendly method
-
prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
-
prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
-
-
-
if do_classifier_free_guidance:
-
bs_embed, seq_len, _ = negative_prompt_embeds.shape
-
negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
-
negative_prompt_embeds = negative_prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
-
prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
-
-
-
return prompt_embeds
-
-
-
def check_inputs(
-
self,
-
prompt,
-
height,
-
width,
-
strength,
-
callback_steps,
-
negative_prompt=None,
-
prompt_embeds=None,
-
negative_prompt_embeds=None,
-
):
-
if height % 8 != 0 or width % 8 != 0:
-
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
-
-
-
if strength < 0 or strength > 1:
-
raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}")
-
-
-
if (callback_steps is None) or (
-
callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
-
):
-
raise ValueError(
-
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
-
f" {type(callback_steps)}."
-
)
-
-
-
if prompt is not None and prompt_embeds is not None:
-
raise ValueError(
-
f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
-
" only forward one of the two."
-
)
-
elif prompt is None and prompt_embeds is None:
-
raise ValueError(
-
"Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
-
)
-
elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
-
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
-
-
-
if negative_prompt is not None and negative_prompt_embeds is not None:
-
raise ValueError(
-
f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
-
f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
-
)
-
-
-
if prompt_embeds is not None and negative_prompt_embeds is not None:
-
if prompt_embeds.shape != negative_prompt_embeds.shape:
-
raise ValueError(
-
"`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
-
f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
-
f" {negative_prompt_embeds.shape}."
-
)
-
-
-
def get_timesteps(self, num_inference_steps, strength, device, is_text2img):
-
if is_text2img:
-
return self.scheduler.timesteps.to(device), num_inference_steps
-
else:
-
# get the original timestep using init_timestep
-
init_timestep = min(int(num_inference_steps * strength), num_inference_steps)
-
-
-
t_start = max(num_inference_steps - init_timestep, 0)
-
timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :]
-
-
-
return timesteps, num_inference_steps - t_start
-
-
-
def run_safety_checker(self, image, device, dtype):
-
if self.safety_checker is not None:
-
safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors="pt").to(device)
-
image, has_nsfw_concept = self.safety_checker(
-
images=image, clip_input=safety_checker_input.pixel_values.to(dtype)
-
)
-
else:
-
has_nsfw_concept = None
-
return image, has_nsfw_concept
-
-
-
def decode_latents(self, latents):
-
latents = 1 / self.vae.config.scaling_factor * latents
-
image = self.vae.decode(latents, return_dict=False)[0] #).sample
-
image = (image / 2 + 0.5).clamp(0, 1)
-
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
-
image = image.cpu().permute(0, 2, 3, 1).float().numpy()
-
return image
-
-
-
def prepare_extra_step_kwargs(self, generator, eta):
-
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
-
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
-
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
-
# and should be between [0, 1]
-
-
-
accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
-
extra_step_kwargs = {}
-
if accepts_eta:
-
extra_step_kwargs["eta"] = eta
-
-
-
# check if the scheduler accepts generator
-
accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
-
if accepts_generator:
-
extra_step_kwargs["generator"] = generator
-
return extra_step_kwargs
-
-
-
def prepare_latents(
-
self,
-
image,
-
timestep,
-
num_images_per_prompt,
-
batch_size,
-
num_channels_latents,
-
height,
-
width,
-
dtype,
-
device,
-
generator,
-
latents=None,
-
):
-
if image is None:
-
batch_size = batch_size * num_images_per_prompt
-
shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor)
-
if isinstance(generator, list) and len(generator) != batch_size:
-
raise ValueError(
-
f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
-
f" size of {batch_size}. Make sure the batch size matches the length of the generators."
-
)
-
-
-
if latents is None:
-
latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
-
else:
-
latents = latents.to(device)
-
-
-
# scale the initial noise by the standard deviation required by the scheduler
-
latents = latents * self.scheduler.init_noise_sigma
-
return latents, None, None
-
else:
-
image = image.to(device=self.device, dtype=dtype)
-
init_latent_dist = self.vae.encode(image).latent_dist
-
init_latents = init_latent_dist.sample(generator=generator)
-
init_latents = self.vae.config.scaling_factor * init_latents
-
-
-
# Expand init_latents for batch_size and num_images_per_prompt
-
init_latents = torch.cat([init_latents] * num_images_per_prompt, dim=0)
-
init_latents_orig = init_latents
-
-
-
# add noise to latents using the timesteps
-
noise = randn_tensor(init_latents.shape, generator=generator, device=self.device, dtype=dtype)
-
init_latents = self.scheduler.add_noise(init_latents, noise, timestep)
-
latents = init_latents
-
return latents, init_latents_orig, noise
-
-
-
def enable_freeu(self, s1: float, s2: float, b1: float, b2: float):
-
r"""Enables the FreeU mechanism as in https://arxiv.org/abs/2309.11497.
-
-
The suffixes after the scaling factors represent the stages where they are being applied.
-
-
Please refer to the [official repository](https://github.com/ChenyangSi/FreeU) for combinations of the values
-
that are known to work well for different pipelines such as Stable Diffusion v1, v2, and Stable Diffusion XL.
-
-
Args:
-
s1 (`float`):
-
Scaling factor for stage 1 to attenuate the contributions of the skip features. This is done to
-
mitigate "oversmoothing effect" in the enhanced denoising process.
-
s2 (`float`):
-
Scaling factor for stage 2 to attenuate the contributions of the skip features. This is done to
-
mitigate "oversmoothing effect" in the enhanced denoising process.
-
b1 (`float`): Scaling factor for stage 1 to amplify the contributions of backbone features.
-
b2 (`float`): Scaling factor for stage 2 to amplify the contributions of backbone features.
-
"""
-
if not hasattr(self, "unet"):
-
raise ValueError("The pipeline must have `unet` for using FreeU.")
-
self.unet.enable_freeu(s1=s1, s2=s2, b1=b1, b2=b2)
-
-
-
def disable_freeu(self):
-
"""Disables the FreeU mechanism if enabled."""
-
self.unet.disable_freeu()
-
-
-
@torch.no_grad()
-
def __call__(
-
self,
-
prompt: Union[str, List[str]],
-
negative_prompt: Optional[Union[str, List[str]]] = None,
-
image: Union[torch.FloatTensor, PIL.Image.Image] = None,
-
mask_image: Union[torch.FloatTensor, PIL.Image.Image] = None,
-
height: int = 512,
-
width: int = 512,
-
num_inference_steps: int = 50,
-
guidance_scale: float = 7.5,
-
strength: float = 0.8,
-
num_images_per_prompt: Optional[int] = 1,
-
add_predicted_noise: Optional[bool] = False,
-
eta: float = 0.0,
-
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
-
latents: Optional[torch.FloatTensor] = None,
-
prompt_embeds: Optional[torch.FloatTensor] = None,
-
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
-
max_embeddings_multiples: Optional[int] = 3,
-
output_type: Optional[str] = "pil",
-
return_dict: bool = True,
-
callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
-
is_cancelled_callback: Optional[Callable[[], bool]] = None,
-
callback_steps: int = 1,
-
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
-
):
-
r"""
-
Function invoked when calling the pipeline for generation.
-
-
Args:
-
prompt (`str` or `List[str]`):
-
The prompt or prompts to guide the image generation.
-
negative_prompt (`str` or `List[str]`, *optional*):
-
The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
-
if `guidance_scale` is less than `1`).
-
image (`torch.FloatTensor` or `PIL.Image.Image`):
-
`Image`, or tensor representing an image batch, that will be used as the starting point for the
-
process.
-
mask_image (`torch.FloatTensor` or `PIL.Image.Image`):
-
`Image`, or tensor representing an image batch, to mask `image`. White pixels in the mask will be
-
replaced by noise and therefore repainted, while black pixels will be preserved. If `mask_image` is a
-
PIL image, it will be converted to a single channel (luminance) before use. If it's a tensor, it should
-
contain one color channel (L) instead of 3, so the expected shape would be `(B, H, W, 1)`.
-
height (`int`, *optional*, defaults to 512):
-
The height in pixels of the generated image.
-
width (`int`, *optional*, defaults to 512):
-
The width in pixels of the generated image.
-
num_inference_steps (`int`, *optional*, defaults to 50):
-
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
-
expense of slower inference.
-
guidance_scale (`float`, *optional*, defaults to 7.5):
-
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
-
`guidance_scale` is defined as `w` of equation 2. of [Imagen
-
Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
-
1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
-
usually at the expense of lower image quality.
-
strength (`float`, *optional*, defaults to 0.8):
-
Conceptually, indicates how much to transform the reference `image`. Must be between 0 and 1.
-
`image` will be used as a starting point, adding more noise to it the larger the `strength`. The
-
number of denoising steps depends on the amount of noise initially added. When `strength` is 1, added
-
noise will be maximum and the denoising process will run for the full number of iterations specified in
-
`num_inference_steps`. A value of 1, therefore, essentially ignores `image`.
-
num_images_per_prompt (`int`, *optional*, defaults to 1):
-
The number of images to generate per prompt.
-
add_predicted_noise (`bool`, *optional*, defaults to True):
-
Use predicted noise instead of random noise when constructing noisy versions of the original image in
-
the reverse diffusion process
-
eta (`float`, *optional*, defaults to 0.0):
-
Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
-
[`schedulers.DDIMScheduler`], will be ignored for others.
-
generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
-
One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
-
to make generation deterministic.
-
latents (`torch.FloatTensor`, *optional*):
-
Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
-
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
-
tensor will ge generated by sampling using the supplied random `generator`.
-
prompt_embeds (`torch.FloatTensor`, *optional*):
-
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
-
provided, text embeddings will be generated from `prompt` input argument.
-
negative_prompt_embeds (`torch.FloatTensor`, *optional*):
-
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
-
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
-
argument.
-
max_embeddings_multiples (`int`, *optional*, defaults to `3`):
-
The max multiple length of prompt embeddings compared to the max output length of text encoder.
-
output_type (`str`, *optional*, defaults to `"pil"`):
-
The output format of the generate image. Choose between
-
[PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
-
return_dict (`bool`, *optional*, defaults to `True`):
-
Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
-
plain tuple.
-
callback (`Callable`, *optional*):
-
A function that will be called every `callback_steps` steps during inference. The function will be
-
called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
-
is_cancelled_callback (`Callable`, *optional*):
-
A function that will be called every `callback_steps` steps during inference. If the function returns
-
`True`, the inference will be cancelled.
-
callback_steps (`int`, *optional*, defaults to 1):
-
The frequency at which the `callback` function will be called. If not specified, the callback will be
-
called at every step.
-
cross_attention_kwargs (`dict`, *optional*):
-
A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
-
`self.processor` in
-
[diffusers.cross_attention](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/cross_attention.py).
-
-
Returns:
-
`None` if cancelled by `is_cancelled_callback`,
-
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
-
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
-
When returning a tuple, the first element is a list with the generated images, and the second element is a
-
list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
-
(nsfw) content, according to the `safety_checker`.
-
"""
-
# 0. Default height and width to unet
-
height = height or self.unet.config.sample_size * self.vae_scale_factor
-
width = width or self.unet.config.sample_size * self.vae_scale_factor
-
-
-
# 1. Check inputs. Raise error if not correct
-
self.check_inputs(
-
prompt, height, width, strength, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds
-
)
-
-
-
# 2. Define call parameters
-
if prompt is not None and isinstance(prompt, str):
-
batch_size = 1
-
elif prompt is not None and isinstance(prompt, list):
-
batch_size = len(prompt)
-
else:
-
batch_size = prompt_embeds.shape[0]
-
-
-
device = self._execution_device
-
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
-
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
-
# corresponds to doing no classifier free guidance.
-
do_classifier_free_guidance = guidance_scale > 1.0
-
-
-
# 3. Encode input prompt
-
prompt_embeds = self._encode_prompt(
-
prompt,
-
device,
-
num_images_per_prompt,
-
do_classifier_free_guidance,
-
negative_prompt,
-
max_embeddings_multiples,
-
prompt_embeds=prompt_embeds,
-
negative_prompt_embeds=negative_prompt_embeds,
-
)
-
dtype = prompt_embeds.dtype
-
-
-
# 4. Preprocess image and mask
-
if isinstance(image, PIL.Image.Image):
-
image = preprocess_image(image, batch_size)
-
if image is not None:
-
image = image.to(device=self.device, dtype=dtype)
-
if isinstance(mask_image, PIL.Image.Image):
-
mask_image = preprocess_mask(mask_image, batch_size, self.vae_scale_factor)
-
if mask_image is not None:
-
mask = mask_image.to(device=self.device, dtype=dtype)
-
mask = torch.cat([mask] * num_images_per_prompt)
-
else:
-
mask = None
-
-
-
# 5. set timesteps
-
self.scheduler.set_timesteps(num_inference_steps, device=device)
-
timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, device, image is None)
-
latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt)
-
-
-
# 6. Prepare latent variables
-
latents, init_latents_orig, noise = self.prepare_latents(
-
image,
-
latent_timestep,
-
num_images_per_prompt,
-
batch_size,
-
self.unet.config.in_channels,
-
height,
-
width,
-
dtype,
-
device,
-
generator,
-
latents,
-
)
-
-
-
# 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
-
extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
-
-
-
# 8. Denoising loop
-
num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
-
with self.progress_bar(total=num_inference_steps) as progress_bar:
-
for i, t in enumerate(timesteps):
-
# expand the latents if we are doing classifier free guidance
-
latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
-
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
-
-
-
# predict the noise residual
-
noise_pred = self.unet(
-
latent_model_input,
-
t,
-
encoder_hidden_states=prompt_embeds,
-
cross_attention_kwargs=cross_attention_kwargs,
-
return_dict=False,
-
)[0]
-
#).sample
-
-
-
# perform guidance
-
if do_classifier_free_guidance:
-
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
-
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
-
-
-
# compute the previous noisy sample x_t -> x_t-1
-
latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] #).prev_sample
-
-
-
if mask is not None:
-
# masking
-
if add_predicted_noise:
-
init_latents_proper = self.scheduler.add_noise(
-
init_latents_orig, noise_pred_uncond, torch.tensor([t])
-
)
-
else:
-
init_latents_proper = self.scheduler.add_noise(init_latents_orig, noise, torch.tensor([t]))
-
latents = (init_latents_proper * mask) + (latents * (1 - mask))
-
-
-
# call the callback, if provided
-
if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
-
progress_bar.update()
-
if i % callback_steps == 0:
-
if callback is not None:
-
callback(i, t, latents)
-
if is_cancelled_callback is not None and is_cancelled_callback():
-
return None
-
-
-
if output_type == "latent":
-
image = latents
-
has_nsfw_concept = None
-
elif output_type == "pil":
-
# 9. Post-processing
-
image = self.decode_latents(latents)
-
-
-
# 10. Run safety checker
-
image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)
-
-
-
# 11. Convert to PIL
-
image = self.numpy_to_pil(image)
-
else:
-
# 9. Post-processing
-
image = self.decode_latents(latents)
-
-
-
# 10. Run safety checker
-
image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)
-
-
-
# Offload last model to CPU
-
if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
-
self.final_offload_hook.offload()
-
-
-
if not return_dict:
-
return image, has_nsfw_concept
-
-
-
return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
-
-
-
def text2img(
-
self,
-
prompt: Union[str, List[str]],
-
negative_prompt: Optional[Union[str, List[str]]] = None,
-
height: int = 512,
-
width: int = 512,
-
num_inference_steps: int = 50,
-
guidance_scale: float = 7.5,
-
num_images_per_prompt: Optional[int] = 1,
-
eta: float = 0.0,
-
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
-
latents: Optional[torch.FloatTensor] = None,
-
prompt_embeds: Optional[torch.FloatTensor] = None,
-
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
-
max_embeddings_multiples: Optional[int] = 3,
-
output_type: Optional[str] = "pil",
-
return_dict: bool = True,
-
callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
-
is_cancelled_callback: Optional[Callable[[], bool]] = None,
-
callback_steps: int = 1,
-
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
-
):
-
r"""
-
Function for text-to-image generation.
-
Args:
-
prompt (`str` or `List[str]`):
-
The prompt or prompts to guide the image generation.
-
negative_prompt (`str` or `List[str]`, *optional*):
-
The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
-
if `guidance_scale` is less than `1`).
-
height (`int`, *optional*, defaults to 512):
-
The height in pixels of the generated image.
-
width (`int`, *optional*, defaults to 512):
-
The width in pixels of the generated image.
-
num_inference_steps (`int`, *optional*, defaults to 50):
-
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
-
expense of slower inference.
-
guidance_scale (`float`, *optional*, defaults to 7.5):
-
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
-
`guidance_scale` is defined as `w` of equation 2. of [Imagen
-
Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
-
1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
-
usually at the expense of lower image quality.
-
num_images_per_prompt (`int`, *optional*, defaults to 1):
-
The number of images to generate per prompt.
-
eta (`float`, *optional*, defaults to 0.0):
-
Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
-
[`schedulers.DDIMScheduler`], will be ignored for others.
-
generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
-
One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
-
to make generation deterministic.
-
latents (`torch.FloatTensor`, *optional*):
-
Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
-
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
-
tensor will ge generated by sampling using the supplied random `generator`.
-
prompt_embeds (`torch.FloatTensor`, *optional*):
-
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
-
provided, text embeddings will be generated from `prompt` input argument.
-
negative_prompt_embeds (`torch.FloatTensor`, *optional*):
-
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
-
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
-
argument.
-
max_embeddings_multiples (`int`, *optional*, defaults to `3`):
-
The max multiple length of prompt embeddings compared to the max output length of text encoder.
-
output_type (`str`, *optional*, defaults to `"pil"`):
-
The output format of the generate image. Choose between
-
[PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
-
return_dict (`bool`, *optional*, defaults to `True`):
-
Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
-
plain tuple.
-
callback (`Callable`, *optional*):
-
A function that will be called every `callback_steps` steps during inference. The function will be
-
called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
-
is_cancelled_callback (`Callable`, *optional*):
-
A function that will be called every `callback_steps` steps during inference. If the function returns
-
`True`, the inference will be cancelled.
-
callback_steps (`int`, *optional*, defaults to 1):
-
The frequency at which the `callback` function will be called. If not specified, the callback will be
-
called at every step.
-
cross_attention_kwargs (`dict`, *optional*):
-
A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
-
`self.processor` in
-
[diffusers.cross_attention](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/cross_attention.py).
-
-
Returns:
-
`None` if cancelled by `is_cancelled_callback`,
-
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
-
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
-
When returning a tuple, the first element is a list with the generated images, and the second element is a
-
list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
-
(nsfw) content, according to the `safety_checker`.
-
"""
-
return self.__call__(
-
prompt=prompt,
-
negative_prompt=negative_prompt,
-
height=height,
-
width=width,
-
num_inference_steps=num_inference_steps,
-
guidance_scale=guidance_scale,
-
num_images_per_prompt=num_images_per_prompt,
-
eta=eta,
-
generator=generator,
-
latents=latents,
-
prompt_embeds=prompt_embeds,
-
negative_prompt_embeds=negative_prompt_embeds,
-
max_embeddings_multiples=max_embeddings_multiples,
-
output_type=output_type,
-
return_dict=return_dict,
-
callback=callback,
-
is_cancelled_callback=is_cancelled_callback,
-
callback_steps=callback_steps,
-
cross_attention_kwargs=cross_attention_kwargs,
-
)
-
-
-
def img2img(
-
self,
-
image: Union[torch.FloatTensor, PIL.Image.Image],
-
prompt: Union[str, List[str]],
-
negative_prompt: Optional[Union[str, List[str]]] = None,
-
strength: float = 0.8,
-
num_inference_steps: Optional[int] = 50,
-
guidance_scale: Optional[float] = 7.5,
-
num_images_per_prompt: Optional[int] = 1,
-
eta: Optional[float] = 0.0,
-
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
-
prompt_embeds: Optional[torch.FloatTensor] = None,
-
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
-
max_embeddings_multiples: Optional[int] = 3,
-
output_type: Optional[str] = "pil",
-
return_dict: bool = True,
-
callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
-
is_cancelled_callback: Optional[Callable[[], bool]] = None,
-
callback_steps: int = 1,
-
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
-
):
-
r"""
-
Function for image-to-image generation.
-
Args:
-
image (`torch.FloatTensor` or `PIL.Image.Image`):
-
`Image`, or tensor representing an image batch, that will be used as the starting point for the
-
process.
-
prompt (`str` or `List[str]`):
-
The prompt or prompts to guide the image generation.
-
negative_prompt (`str` or `List[str]`, *optional*):
-
The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
-
if `guidance_scale` is less than `1`).
-
strength (`float`, *optional*, defaults to 0.8):
-
Conceptually, indicates how much to transform the reference `image`. Must be between 0 and 1.
-
`image` will be used as a starting point, adding more noise to it the larger the `strength`. The
-
number of denoising steps depends on the amount of noise initially added. When `strength` is 1, added
-
noise will be maximum and the denoising process will run for the full number of iterations specified in
-
`num_inference_steps`. A value of 1, therefore, essentially ignores `image`.
-
num_inference_steps (`int`, *optional*, defaults to 50):
-
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
-
expense of slower inference. This parameter will be modulated by `strength`.
-
guidance_scale (`float`, *optional*, defaults to 7.5):
-
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
-
`guidance_scale` is defined as `w` of equation 2. of [Imagen
-
Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
-
1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
-
usually at the expense of lower image quality.
-
num_images_per_prompt (`int`, *optional*, defaults to 1):
-
The number of images to generate per prompt.
-
eta (`float`, *optional*, defaults to 0.0):
-
Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
-
[`schedulers.DDIMScheduler`], will be ignored for others.
-
generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
-
One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
-
to make generation deterministic.
-
prompt_embeds (`torch.FloatTensor`, *optional*):
-
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
-
provided, text embeddings will be generated from `prompt` input argument.
-
negative_prompt_embeds (`torch.FloatTensor`, *optional*):
-
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
-
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
-
argument.
-
max_embeddings_multiples (`int`, *optional*, defaults to `3`):
-
The max multiple length of prompt embeddings compared to the max output length of text encoder.
-
output_type (`str`, *optional*, defaults to `"pil"`):
-
The output format of the generate image. Choose between
-
[PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
-
return_dict (`bool`, *optional*, defaults to `True`):
-
Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
-
plain tuple.
-
callback (`Callable`, *optional*):
-
A function that will be called every `callback_steps` steps during inference. The function will be
-
called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
-
is_cancelled_callback (`Callable`, *optional*):
-
A function that will be called every `callback_steps` steps during inference. If the function returns
-
`True`, the inference will be cancelled.
-
callback_steps (`int`, *optional*, defaults to 1):
-
The frequency at which the `callback` function will be called. If not specified, the callback will be
-
called at every step.
-
cross_attention_kwargs (`dict`, *optional*):
-
A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
-
`self.processor` in
-
[diffusers.cross_attention](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/cross_attention.py).
-
-
Returns:
-
`None` if cancelled by `is_cancelled_callback`,
-
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
-
When returning a tuple, the first element is a list with the generated images, and the second element is a
-
list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
-
(nsfw) content, according to the `safety_checker`.
-
"""
-
return self.__call__(
-
prompt=prompt,
-
negative_prompt=negative_prompt,
-
image=image,
-
num_inference_steps=num_inference_steps,
-
guidance_scale=guidance_scale,
-
strength=strength,
-
num_images_per_prompt=num_images_per_prompt,
-
eta=eta,
-
generator=generator,
-
prompt_embeds=prompt_embeds,
-
negative_prompt_embeds=negative_prompt_embeds,
-
max_embeddings_multiples=max_embeddings_multiples,
-
output_type=output_type,
-
return_dict=return_dict,
-
callback=callback,
-
is_cancelled_callback=is_cancelled_callback,
-
callback_steps=callback_steps,
-
cross_attention_kwargs=cross_attention_kwargs,
-
)
-
-
-
def inpaint(
-
self,
-
image: Union[torch.FloatTensor, PIL.Image.Image],
-
mask_image: Union[torch.FloatTensor, PIL.Image.Image],
-
prompt: Union[str, List[str]],
-
negative_prompt: Optional[Union[str, List[str]]] = None,
-
strength: float = 0.8,
-
num_inference_steps: Optional[int] = 50,
-
guidance_scale: Optional[float] = 7.5,
-
num_images_per_prompt: Optional[int] = 1,
-
add_predicted_noise: Optional[bool] = False,
-
eta: Optional[float] = 0.0,
-
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
-
prompt_embeds: Optional[torch.FloatTensor] = None,
-
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
-
max_embeddings_multiples: Optional[int] = 3,
-
output_type: Optional[str] = "pil",
-
return_dict: bool = True,
-
callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
-
is_cancelled_callback: Optional[Callable[[], bool]] = None,
-
callback_steps: int = 1,
-
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
-
):
-
r"""
-
Function for inpaint.
-
Args:
-
image (`torch.FloatTensor` or `PIL.Image.Image`):
-
`Image`, or tensor representing an image batch, that will be used as the starting point for the
-
process. This is the image whose masked region will be inpainted.
-
mask_image (`torch.FloatTensor` or `PIL.Image.Image`):
-
`Image`, or tensor representing an image batch, to mask `image`. White pixels in the mask will be
-
replaced by noise and therefore repainted, while black pixels will be preserved. If `mask_image` is a
-
PIL image, it will be converted to a single channel (luminance) before use. If it's a tensor, it should
-
contain one color channel (L) instead of 3, so the expected shape would be `(B, H, W, 1)`.
-
prompt (`str` or `List[str]`):
-
The prompt or prompts to guide the image generation.
-
negative_prompt (`str` or `List[str]`, *optional*):
-
The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
-
if `guidance_scale` is less than `1`).
-
strength (`float`, *optional*, defaults to 0.8):
-
Conceptually, indicates how much to inpaint the masked area. Must be between 0 and 1. When `strength`
-
is 1, the denoising process will be run on the masked area for the full number of iterations specified
-
in `num_inference_steps`. `image` will be used as a reference for the masked area, adding more
-
noise to that region the larger the `strength`. If `strength` is 0, no inpainting will occur.
-
num_inference_steps (`int`, *optional*, defaults to 50):
-
The reference number of denoising steps. More denoising steps usually lead to a higher quality image at
-
the expense of slower inference. This parameter will be modulated by `strength`, as explained above.
-
guidance_scale (`float`, *optional*, defaults to 7.5):
-
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
-
`guidance_scale` is defined as `w` of equation 2. of [Imagen
-
Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
-
1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
-
usually at the expense of lower image quality.
-
num_images_per_prompt (`int`, *optional*, defaults to 1):
-
The number of images to generate per prompt.
-
add_predicted_noise (`bool`, *optional*, defaults to True):
-
Use predicted noise instead of random noise when constructing noisy versions of the original image in
-
the reverse diffusion process
-
eta (`float`, *optional*, defaults to 0.0):
-
Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
-
[`schedulers.DDIMScheduler`], will be ignored for others.
-
generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
-
One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
-
to make generation deterministic.
-
prompt_embeds (`torch.FloatTensor`, *optional*):
-
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
-
provided, text embeddings will be generated from `prompt` input argument.
-
negative_prompt_embeds (`torch.FloatTensor`, *optional*):
-
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
-
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
-
argument.
-
max_embeddings_multiples (`int`, *optional*, defaults to `3`):
-
The max multiple length of prompt embeddings compared to the max output length of text encoder.
-
output_type (`str`, *optional*, defaults to `"pil"`):
-
The output format of the generate image. Choose between
-
[PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
-
return_dict (`bool`, *optional*, defaults to `True`):
-
Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
-
plain tuple.
-
callback (`Callable`, *optional*):
-
A function that will be called every `callback_steps` steps during inference. The function will be
-
called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
-
is_cancelled_callback (`Callable`, *optional*):
-
A function that will be called every `callback_steps` steps during inference. If the function returns
-
`True`, the inference will be cancelled.
-
callback_steps (`int`, *optional*, defaults to 1):
-
The frequency at which the `callback` function will be called. If not specified, the callback will be
-
called at every step.
-
cross_attention_kwargs (`dict`, *optional*):
-
A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
-
`self.processor` in
-
[diffusers.cross_attention](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/cross_attention.py).
-
-
Returns:
-
`None` if cancelled by `is_cancelled_callback`,
-
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
-
When returning a tuple, the first element is a list with the generated images, and the second element is a
-
list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
-
(nsfw) content, according to the `safety_checker`.
-
"""
-
return self.__call__(
-
prompt=prompt,
-
negative_prompt=negative_prompt,
-
image=image,
-
mask_image=mask_image,
-
num_inference_steps=num_inference_steps,
-
guidance_scale=guidance_scale,
-
strength=strength,
-
num_images_per_prompt=num_images_per_prompt,
-
add_predicted_noise=add_predicted_noise,
-
eta=eta,
-
generator=generator,
-
prompt_embeds=prompt_embeds,
-
negative_prompt_embeds=negative_prompt_embeds,
-
max_embeddings_multiples=max_embeddings_multiples,
-
output_type=output_type,
-
return_dict=return_dict,
-
callback=callback,
-
is_cancelled_callback=is_cancelled_callback,
-
callback_steps=callback_steps,
-
cross_attention_kwargs=cross_attention_kwargs,
-
)
-
-
-
-
# Borrowed from https://github.com/csaluski/diffusers/blob/main/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py
-
def get_text_latent_space(self, prompt, guidance_scale = 7.5):
-
# get prompt text embeddings
-
text_input = self.tokenizer(
-
prompt,
-
padding="max_length",
-
max_length=self.tokenizer.model_max_length,
-
truncation=True,
-
return_tensors="pt",
-
)
-
text_embeddings = self.text_encoder(text_input.input_ids.to(self.device))[0]
-
-
-
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
-
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
-
# corresponds to doing no classifier free guidance.
-
do_classifier_free_guidance = guidance_scale > 1.0
-
# get unconditional embeddings for classifier free guidance
-
if do_classifier_free_guidance:
-
max_length = text_input.input_ids.shape[-1]
-
uncond_input = self.tokenizer(
-
[""], padding="max_length", max_length=max_length, return_tensors="pt"
-
)
-
uncond_embeddings = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
-
-
-
# For classifier free guidance, we need to do two forward passes.
-
# Here we concatenate the unconditional and text embeddings into a single batch
-
# to avoid doing two forward passes
-
text_embeddings = torch.cat([uncond_embeddings, text_embeddings])
-
-
-
return text_embeddings
-
-
-
def slerp(self, t, v0, v1, DOT_THRESHOLD=0.9995):
-
""" helper function to spherically interpolate two arrays v1 v2
-
from https://gist.github.com/karpathy/00103b0037c5aaea32fe1da1af553355
-
this should be better than lerping for moving between noise spaces """
-
-
-
if not isinstance(v0, np.ndarray):
-
inputs_are_torch = True
-
input_device = v0.device
-
v0 = v0.cpu().numpy()
-
v1 = v1.cpu().numpy()
-
-
-
dot = np.sum(v0 * v1 / (np.linalg.norm(v0) * np.linalg.norm(v1)))
-
if np.abs(dot) > DOT_THRESHOLD:
-
v2 = (1 - t) * v0 + t * v1
-
else:
-
theta_0 = np.arccos(dot)
-
sin_theta_0 = np.sin(theta_0)
-
theta_t = theta_0 * t
-
sin_theta_t = np.sin(theta_t)
-
s0 = np.sin(theta_0 - theta_t) / sin_theta_0
-
s1 = sin_theta_t / sin_theta_0
-
v2 = s0 * v0 + s1 * v1
-
-
-
if inputs_are_torch:
-
v2 = torch.from_numpy(v2).to(input_device)
-
-
-
return v2
-
-
-
def lerp_between_prompts(self, first_prompt, second_prompt, seed = None, length = 10, save=False, guidance_scale: Optional[float] = 7.5, **kwargs):
-
first_embedding = self.get_text_latent_space(first_prompt)
-
second_embedding = self.get_text_latent_space(second_prompt)
-
if not seed:
-
seed = random.randint(0, sys.maxsize)
-
generator = torch.Generator(self.device)
-
generator.manual_seed(seed)
-
generator_state = generator.get_state()
-
lerp_embed_points = []
-
for i in range(length):
-
weight = i / length
-
tensor_lerp = torch.lerp(first_embedding, second_embedding, weight)
-
lerp_embed_points.append(tensor_lerp)
-
images = []
-
for idx, latent_point in enumerate(lerp_embed_points):
-
generator.set_state(generator_state)
-
image = self.diffuse_from_inits(latent_point, **kwargs)["image"][0]
-
images.append(image)
-
if save:
-
image.save(f"{first_prompt}-{second_prompt}-{idx:02d}.png", "PNG")
-
return {"images": images, "latent_points": lerp_embed_points,"generator_state": generator_state}
-
-
-
def slerp_through_seeds(self,
-
prompt,
-
height: Optional[int] = 512,
-
width: Optional[int] = 512,
-
save = False,
-
seed = None, steps = 10, **kwargs):
-
-
-
if not seed:
-
seed = random.randint(0, sys.maxsize)
-
generator = torch.Generator(self.device)
-
generator.manual_seed(seed)
-
init_start = torch.randn(
-
(1, self.unet.in_channels, height // 8, width // 8),
-
generator = generator, device = self.device)
-
init_end = torch.randn(
-
(1, self.unet.in_channels, height // 8, width // 8),
-
generator = generator, device = self.device)
-
generator_state = generator.get_state()
-
slerp_embed_points = []
-
# weight from 0 to 1/(steps - 1), add init_end specifically so that we
-
# have len(images) = steps
-
for i in range(steps - 1):
-
weight = i / steps
-
tensor_slerp = self.slerp(weight, init_start, init_end)
-
slerp_embed_points.append(tensor_slerp)
-
slerp_embed_points.append(init_end)
-
images = []
-
embed_point = self.get_text_latent_space(prompt)
-
for idx, noise_point in enumerate(slerp_embed_points):
-
generator.set_state(generator_state)
-
image = self.diffuse_from_inits(embed_point, init = noise_point, **kwargs)["image"][0]
-
images.append(image)
-
if save:
-
image.save(f"{seed}-{idx:02d}.png", "PNG")
-
return {"images": images, "noise_samples": slerp_embed_points,"generator_state": generator_state}
-
-
-
@torch.no_grad()
-
def diffuse_from_inits(self, text_embeddings,
-
init = None,
-
height: Optional[int] = 512,
-
width: Optional[int] = 512,
-
num_inference_steps: Optional[int] = 50,
-
guidance_scale: Optional[float] = 7.5,
-
eta: Optional[float] = 0.0,
-
generator: Optional[torch.Generator] = None,
-
output_type: Optional[str] = "pil",
-
**kwargs,):
-
-
-
from diffusers.schedulers import LMSDiscreteScheduler
-
batch_size = 1
-
-
-
if generator == None:
-
generator = torch.Generator("cuda")
-
generator_state = generator.get_state()
-
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
-
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
-
# corresponds to doing no classifier free guidance.
-
do_classifier_free_guidance = guidance_scale > 1.0
-
# get the intial random noise
-
latents = init if init is not None else torch.randn(
-
(batch_size, self.unet.in_channels, height // 8, width // 8),
-
generator=generator,
-
device=self.device,)
-
-
-
# set timesteps
-
accepts_offset = "offset" in set(inspect.signature(self.scheduler.set_timesteps).parameters.keys())
-
extra_set_kwargs = {}
-
if accepts_offset:
-
extra_set_kwargs["offset"] = 1
-
-
-
self.scheduler.set_timesteps(num_inference_steps, **extra_set_kwargs)
-
-
-
# if we use LMSDiscreteScheduler, let's make sure latents are mulitplied by sigmas
-
if isinstance(self.scheduler, LMSDiscreteScheduler):
-
latents = latents * self.scheduler.sigmas[0]
-
-
-
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
-
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
-
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
-
# and should be between [0, 1]
-
accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
-
extra_step_kwargs = {}
-
if accepts_eta:
-
extra_step_kwargs["eta"] = eta
-
-
-
for i, t in tqdm(enumerate(self.scheduler.timesteps)):
-
# expand the latents if we are doing classifier free guidance
-
latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
-
if isinstance(self.scheduler, LMSDiscreteScheduler):
-
sigma = self.scheduler.sigmas[i]
-
latent_model_input = latent_model_input / ((sigma**2 + 1) ** 0.5)
-
-
-
# predict the noise residual
-
noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings, return_dict=False)[0] #).sample
-
-
-
# perform guidance
-
if do_classifier_free_guidance:
-
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
-
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
-
-
-
# compute the previous noisy sample x_t -> x_t-1
-
if isinstance(self.scheduler, LMSDiscreteScheduler):
-
latents = self.scheduler.step(noise_pred, i, latents, **extra_step_kwargs, return_dict=False)[0] #).prev_sample
-
else:
-
latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] #).prev_sample
-
-
-
# scale and decode the image latents with vae
-
latents = 1 / 0.18215 * latents
-
image = self.vae.decode(latents)
-
-
-
image = (image / 2 + 0.5).clamp(0, 1)
-
image = image.cpu().permute(0, 2, 3, 1).numpy()
-
-
-
if output_type == "pil":
-
image = self.numpy_to_pil(image)
-
-
-
return {"image": image, "generator_state": generator_state}
-
-
-
def variation(self, text_embeddings, generator_state, variation_magnitude = 100, **kwargs):
-
# random vector to move in latent space
-
rand_t = (torch.rand(text_embeddings.shape, device = self.device) * 2) - 1
-
rand_mag = torch.sum(torch.abs(rand_t)) / variation_magnitude
-
scaled_rand_t = rand_t / rand_mag
-
variation_embedding = text_embeddings + scaled_rand_t
-
-
-
generator = torch.Generator("cuda")
-
generator.set_state(generator_state)
-
result = self.diffuse_from_inits(variation_embedding, generator=generator, **kwargs)
-
result.update({"latent_point": variation_embedding})
-
return result
-
- -
- - - - - - -