diff --git a/modules/__pycache__/call_queue.cpython-39.pyc b/modules/__pycache__/call_queue.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..884d3d300874301f5ee8dfa9f6dd899c10a0993c
Binary files /dev/null and b/modules/__pycache__/call_queue.cpython-39.pyc differ
diff --git a/modules/__pycache__/cmd_args.cpython-39.pyc b/modules/__pycache__/cmd_args.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..6793f48ecf0a667070e65d29521f71b1d77d3fbe
Binary files /dev/null and b/modules/__pycache__/cmd_args.cpython-39.pyc differ
diff --git a/modules/__pycache__/codeformer_model.cpython-39.pyc b/modules/__pycache__/codeformer_model.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..0eed9a0a8a3ec1997940f6eebc8c7ad11f6b3f46
Binary files /dev/null and b/modules/__pycache__/codeformer_model.cpython-39.pyc differ
diff --git a/modules/__pycache__/config_states.cpython-39.pyc b/modules/__pycache__/config_states.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..fd3a643aae0ac3533b98618a2ce989d7f3665170
Binary files /dev/null and b/modules/__pycache__/config_states.cpython-39.pyc differ
diff --git a/modules/__pycache__/deepbooru.cpython-39.pyc b/modules/__pycache__/deepbooru.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..560b44e9f4bac8ba32662d6c9b9d67d461877067
Binary files /dev/null and b/modules/__pycache__/deepbooru.cpython-39.pyc differ
diff --git a/modules/__pycache__/deepbooru_model.cpython-39.pyc b/modules/__pycache__/deepbooru_model.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..27668dd834408d2b5d992cd891f3dfaf388a8329
Binary files /dev/null and b/modules/__pycache__/deepbooru_model.cpython-39.pyc differ
diff --git a/modules/__pycache__/devices.cpython-39.pyc b/modules/__pycache__/devices.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e1d3bc95562c1e140f713ceaebf350f14f64181e
Binary files /dev/null and b/modules/__pycache__/devices.cpython-39.pyc differ
diff --git a/modules/__pycache__/errors.cpython-39.pyc b/modules/__pycache__/errors.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1b6e0eadd84fc535f7746d18b5f5bb979e3917c8
Binary files /dev/null and b/modules/__pycache__/errors.cpython-39.pyc differ
diff --git a/modules/__pycache__/esrgan_model.cpython-39.pyc b/modules/__pycache__/esrgan_model.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e6cb507ff0d31e58cc65cf707c761351152aca65
Binary files /dev/null and b/modules/__pycache__/esrgan_model.cpython-39.pyc differ
diff --git a/modules/__pycache__/esrgan_model_arch.cpython-39.pyc b/modules/__pycache__/esrgan_model_arch.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2e0fca7c73991c423dd0bf687a4a27c8628af15c
Binary files /dev/null and b/modules/__pycache__/esrgan_model_arch.cpython-39.pyc differ
diff --git a/modules/__pycache__/extensions.cpython-39.pyc b/modules/__pycache__/extensions.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..7e93931acb552cb63a0a5881c1995f897824b78a
Binary files /dev/null and b/modules/__pycache__/extensions.cpython-39.pyc differ
diff --git a/modules/__pycache__/extra_networks_hypernet.cpython-39.pyc b/modules/__pycache__/extra_networks_hypernet.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2ca7a2e234c213df8deedc0852e32353a672b76e
Binary files /dev/null and b/modules/__pycache__/extra_networks_hypernet.cpython-39.pyc differ
diff --git a/modules/__pycache__/extras.cpython-39.pyc b/modules/__pycache__/extras.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e50cee2e9b0ce2813fa03d89c8d23cebf541b4fd
Binary files /dev/null and b/modules/__pycache__/extras.cpython-39.pyc differ
diff --git a/modules/__pycache__/face_restoration.cpython-39.pyc b/modules/__pycache__/face_restoration.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4a6c310c1f0d9888855be9b8d3a664769878518c
Binary files /dev/null and b/modules/__pycache__/face_restoration.cpython-39.pyc differ
diff --git a/modules/__pycache__/fifo_lock.cpython-39.pyc b/modules/__pycache__/fifo_lock.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d9fa18b854e6c9c152f571c3581a5fad65b34126
Binary files /dev/null and b/modules/__pycache__/fifo_lock.cpython-39.pyc differ
diff --git a/modules/__pycache__/generation_parameters_copypaste.cpython-39.pyc b/modules/__pycache__/generation_parameters_copypaste.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..dcfdb7bfe774d83757baddc51220d824c4a91691
Binary files /dev/null and b/modules/__pycache__/generation_parameters_copypaste.cpython-39.pyc differ
diff --git a/modules/__pycache__/gfpgan_model.cpython-39.pyc b/modules/__pycache__/gfpgan_model.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9963e4a7d58adf4423144beb940333512ac208b9
Binary files /dev/null and b/modules/__pycache__/gfpgan_model.cpython-39.pyc differ
diff --git a/modules/__pycache__/gitpython_hack.cpython-39.pyc b/modules/__pycache__/gitpython_hack.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..8667dcd7e3c1cc49f2f5a979b1584d0f8827c4d0
Binary files /dev/null and b/modules/__pycache__/gitpython_hack.cpython-39.pyc differ
diff --git a/modules/__pycache__/gradio_extensons.cpython-39.pyc b/modules/__pycache__/gradio_extensons.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c75cdba7c44a0b84c9314f3c6022e4246d5f4ff5
Binary files /dev/null and b/modules/__pycache__/gradio_extensons.cpython-39.pyc differ
diff --git a/modules/__pycache__/hashes.cpython-39.pyc b/modules/__pycache__/hashes.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..400560459564dd8e7254bbdb62a911deadc3155a
Binary files /dev/null and b/modules/__pycache__/hashes.cpython-39.pyc differ
diff --git a/modules/__pycache__/images.cpython-39.pyc b/modules/__pycache__/images.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..de696088e3c51e33081ec5c4175ebeac084d3729
Binary files /dev/null and b/modules/__pycache__/images.cpython-39.pyc differ
diff --git a/modules/__pycache__/img2img.cpython-39.pyc b/modules/__pycache__/img2img.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..dd5f18102e2d78db82478b8b550ac71abe27e9cd
Binary files /dev/null and b/modules/__pycache__/img2img.cpython-39.pyc differ
diff --git a/modules/__pycache__/import_hook.cpython-39.pyc b/modules/__pycache__/import_hook.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b7728725de90d8d29234ff78b6bdcae334bb2f99
Binary files /dev/null and b/modules/__pycache__/import_hook.cpython-39.pyc differ
diff --git a/modules/__pycache__/initialize.cpython-39.pyc b/modules/__pycache__/initialize.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..27271282c6da908f64305cce732611452fd285ac
Binary files /dev/null and b/modules/__pycache__/initialize.cpython-39.pyc differ
diff --git a/modules/__pycache__/initialize_util.cpython-39.pyc b/modules/__pycache__/initialize_util.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..74d8a59f58a95f6d807379ccacd79ad155c3cdec
Binary files /dev/null and b/modules/__pycache__/initialize_util.cpython-39.pyc differ
diff --git a/modules/__pycache__/interrogate.cpython-39.pyc b/modules/__pycache__/interrogate.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..add898f475dae26cb717f8671fc4deab8aee8787
Binary files /dev/null and b/modules/__pycache__/interrogate.cpython-39.pyc differ
diff --git a/modules/__pycache__/launch_utils.cpython-39.pyc b/modules/__pycache__/launch_utils.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..76e746798887ccf5be5bc91a920a5e8e491394e6
Binary files /dev/null and b/modules/__pycache__/launch_utils.cpython-39.pyc differ
diff --git a/modules/__pycache__/localization.cpython-39.pyc b/modules/__pycache__/localization.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..199d3db08984765ab4ad96a02bc62722c0dcc7e4
Binary files /dev/null and b/modules/__pycache__/localization.cpython-39.pyc differ
diff --git a/modules/__pycache__/logging_config.cpython-39.pyc b/modules/__pycache__/logging_config.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f9a13f57775ab28171ef8ec71a324c8f6623100b
Binary files /dev/null and b/modules/__pycache__/logging_config.cpython-39.pyc differ
diff --git a/modules/__pycache__/lowvram.cpython-39.pyc b/modules/__pycache__/lowvram.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e34db7b9c83567ed9b662d62fde649c5787a5633
Binary files /dev/null and b/modules/__pycache__/lowvram.cpython-39.pyc differ
diff --git a/modules/__pycache__/masking.cpython-39.pyc b/modules/__pycache__/masking.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e13bfb550580f1be357077c77f6659d82d5bd01e
Binary files /dev/null and b/modules/__pycache__/masking.cpython-39.pyc differ
diff --git a/modules/__pycache__/memmon.cpython-39.pyc b/modules/__pycache__/memmon.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..74b8027542f81f090159d6a8a660e5fe9317b96d
Binary files /dev/null and b/modules/__pycache__/memmon.cpython-39.pyc differ
diff --git a/modules/__pycache__/modelloader.cpython-39.pyc b/modules/__pycache__/modelloader.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e1e2c5d85664c9a76a00fed4ba603ded5aa0a23a
Binary files /dev/null and b/modules/__pycache__/modelloader.cpython-39.pyc differ
diff --git a/modules/__pycache__/options.cpython-39.pyc b/modules/__pycache__/options.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e350bc7846bbc78a4a70a538ebfd98c5e04a1bbd
Binary files /dev/null and b/modules/__pycache__/options.cpython-39.pyc differ
diff --git a/modules/__pycache__/patches.cpython-39.pyc b/modules/__pycache__/patches.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..64e0b1c23e6a232912dec06677842d0d4c9e5d74
Binary files /dev/null and b/modules/__pycache__/patches.cpython-39.pyc differ
diff --git a/modules/__pycache__/paths.cpython-39.pyc b/modules/__pycache__/paths.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1c7ac6cebca6e4398924538dd495621c2ec42ba3
Binary files /dev/null and b/modules/__pycache__/paths.cpython-39.pyc differ
diff --git a/modules/__pycache__/paths_internal.cpython-39.pyc b/modules/__pycache__/paths_internal.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..20819f285171bd45c4f73a652b28b35666719e28
Binary files /dev/null and b/modules/__pycache__/paths_internal.cpython-39.pyc differ
diff --git a/modules/__pycache__/postprocessing.cpython-39.pyc b/modules/__pycache__/postprocessing.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e92140995903097948f6242dcbce525f1c360a6c
Binary files /dev/null and b/modules/__pycache__/postprocessing.cpython-39.pyc differ
diff --git a/modules/__pycache__/processing.cpython-39.pyc b/modules/__pycache__/processing.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ac42efbf3900c49e56efbfb36dd941bd00b7654a
Binary files /dev/null and b/modules/__pycache__/processing.cpython-39.pyc differ
diff --git a/modules/__pycache__/progress.cpython-39.pyc b/modules/__pycache__/progress.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d84f343c59c3c73b4a6f779da57cc1703b3559e7
Binary files /dev/null and b/modules/__pycache__/progress.cpython-39.pyc differ
diff --git a/modules/__pycache__/prompt_parser.cpython-39.pyc b/modules/__pycache__/prompt_parser.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..0d9830cf6d92fe3d38955c1fde6043f6995969ac
Binary files /dev/null and b/modules/__pycache__/prompt_parser.cpython-39.pyc differ
diff --git a/modules/__pycache__/realesrgan_model.cpython-39.pyc b/modules/__pycache__/realesrgan_model.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1aea26da22f30d7b000245d30d36103540f8b3a4
Binary files /dev/null and b/modules/__pycache__/realesrgan_model.cpython-39.pyc differ
diff --git a/modules/__pycache__/restart.cpython-39.pyc b/modules/__pycache__/restart.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a13614a8c34b198ef20071b07fdbbdc8c120093a
Binary files /dev/null and b/modules/__pycache__/restart.cpython-39.pyc differ
diff --git a/modules/__pycache__/rng.cpython-39.pyc b/modules/__pycache__/rng.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3931c35228cbdf61f0b2bc20a18e3f0f90f924f9
Binary files /dev/null and b/modules/__pycache__/rng.cpython-39.pyc differ
diff --git a/modules/__pycache__/safe.cpython-39.pyc b/modules/__pycache__/safe.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c438230fbe8689f685fc2a0ca856040473ba120f
Binary files /dev/null and b/modules/__pycache__/safe.cpython-39.pyc differ
diff --git a/modules/__pycache__/script_callbacks.cpython-39.pyc b/modules/__pycache__/script_callbacks.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4f8b8cab87994e1c36df8c4f91b432f639448868
Binary files /dev/null and b/modules/__pycache__/script_callbacks.cpython-39.pyc differ
diff --git a/modules/__pycache__/scripts.cpython-39.pyc b/modules/__pycache__/scripts.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..0cd00fd2d2e302a1d452fa8b886b1cadbefd6b8d
Binary files /dev/null and b/modules/__pycache__/scripts.cpython-39.pyc differ
diff --git a/modules/__pycache__/scripts_postprocessing.cpython-39.pyc b/modules/__pycache__/scripts_postprocessing.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..946b8d8d435cba7da71731b5cc8fd624d859cf7a
Binary files /dev/null and b/modules/__pycache__/scripts_postprocessing.cpython-39.pyc differ
diff --git a/modules/__pycache__/sd_disable_initialization.cpython-39.pyc b/modules/__pycache__/sd_disable_initialization.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..09a0fd28fe02ebdfb2122c82b8f7f4e78d372de5
Binary files /dev/null and b/modules/__pycache__/sd_disable_initialization.cpython-39.pyc differ
diff --git a/modules/__pycache__/sd_hijack.cpython-39.pyc b/modules/__pycache__/sd_hijack.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..dd04285311b1b7499e07923b299a4ab20e1f3e25
Binary files /dev/null and b/modules/__pycache__/sd_hijack.cpython-39.pyc differ
diff --git a/modules/__pycache__/sd_hijack_checkpoint.cpython-39.pyc b/modules/__pycache__/sd_hijack_checkpoint.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9ac359fb401eaa4b28312f316054ee1ff85a31a3
Binary files /dev/null and b/modules/__pycache__/sd_hijack_checkpoint.cpython-39.pyc differ
diff --git a/modules/__pycache__/sd_hijack_clip.cpython-39.pyc b/modules/__pycache__/sd_hijack_clip.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f3593652cc7497514b3442d4a30232504ba64bf5
Binary files /dev/null and b/modules/__pycache__/sd_hijack_clip.cpython-39.pyc differ
diff --git a/modules/__pycache__/sd_hijack_open_clip.cpython-39.pyc b/modules/__pycache__/sd_hijack_open_clip.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..207f73c9733769dbc2e6c4ff459e866146fb697b
Binary files /dev/null and b/modules/__pycache__/sd_hijack_open_clip.cpython-39.pyc differ
diff --git a/modules/__pycache__/sd_hijack_optimizations.cpython-39.pyc b/modules/__pycache__/sd_hijack_optimizations.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..32ca37e7df77b645e47e4e96357cd80dda65ee8d
Binary files /dev/null and b/modules/__pycache__/sd_hijack_optimizations.cpython-39.pyc differ
diff --git a/modules/__pycache__/sd_hijack_unet.cpython-39.pyc b/modules/__pycache__/sd_hijack_unet.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e4888f4019111a332202556f809b0dbed1264a41
Binary files /dev/null and b/modules/__pycache__/sd_hijack_unet.cpython-39.pyc differ
diff --git a/modules/__pycache__/sd_hijack_utils.cpython-39.pyc b/modules/__pycache__/sd_hijack_utils.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1c304244ea903212f5209df23da268d074d55b7c
Binary files /dev/null and b/modules/__pycache__/sd_hijack_utils.cpython-39.pyc differ
diff --git a/modules/__pycache__/sd_hijack_xlmr.cpython-39.pyc b/modules/__pycache__/sd_hijack_xlmr.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..89063591954b951f0bd731eb12e2acadc9dc3c3b
Binary files /dev/null and b/modules/__pycache__/sd_hijack_xlmr.cpython-39.pyc differ
diff --git a/modules/__pycache__/sd_models.cpython-39.pyc b/modules/__pycache__/sd_models.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..38c9e8b8af6b411cfa2722843da8d0161784ec60
Binary files /dev/null and b/modules/__pycache__/sd_models.cpython-39.pyc differ
diff --git a/modules/__pycache__/sd_models_config.cpython-39.pyc b/modules/__pycache__/sd_models_config.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..25f51633200c60f60b038ac598fa4bf735496057
Binary files /dev/null and b/modules/__pycache__/sd_models_config.cpython-39.pyc differ
diff --git a/modules/__pycache__/sd_models_types.cpython-39.pyc b/modules/__pycache__/sd_models_types.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..134ba8db1e2b530e12333f42e71ee8ad105906c7
Binary files /dev/null and b/modules/__pycache__/sd_models_types.cpython-39.pyc differ
diff --git a/modules/__pycache__/sd_models_xl.cpython-39.pyc b/modules/__pycache__/sd_models_xl.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9eaef561e52c4880a96faaf8e66ddf88d5e70dbf
Binary files /dev/null and b/modules/__pycache__/sd_models_xl.cpython-39.pyc differ
diff --git a/modules/__pycache__/sd_samplers.cpython-39.pyc b/modules/__pycache__/sd_samplers.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ce7e0138c31067e71e91cf50f2c04d08db474ee7
Binary files /dev/null and b/modules/__pycache__/sd_samplers.cpython-39.pyc differ
diff --git a/modules/__pycache__/sd_samplers_cfg_denoiser.cpython-39.pyc b/modules/__pycache__/sd_samplers_cfg_denoiser.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..cf06faf4a42c9e609e462ba1ef68b15550cd65d6
Binary files /dev/null and b/modules/__pycache__/sd_samplers_cfg_denoiser.cpython-39.pyc differ
diff --git a/modules/__pycache__/sd_samplers_common.cpython-39.pyc b/modules/__pycache__/sd_samplers_common.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..cb25cf06a30e839fbd1ee5a401e39726a35f6700
Binary files /dev/null and b/modules/__pycache__/sd_samplers_common.cpython-39.pyc differ
diff --git a/modules/__pycache__/sd_samplers_extra.cpython-39.pyc b/modules/__pycache__/sd_samplers_extra.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1edbb67abd24934b7b207d38e746f25aacc79615
Binary files /dev/null and b/modules/__pycache__/sd_samplers_extra.cpython-39.pyc differ
diff --git a/modules/__pycache__/sd_samplers_timesteps.cpython-39.pyc b/modules/__pycache__/sd_samplers_timesteps.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ab1c4b88a8b5cf67b4b20dfbc1789299b4481073
Binary files /dev/null and b/modules/__pycache__/sd_samplers_timesteps.cpython-39.pyc differ
diff --git a/modules/__pycache__/sd_samplers_timesteps_impl.cpython-39.pyc b/modules/__pycache__/sd_samplers_timesteps_impl.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..fac03b24ff330370e6435c881d142a19e3ae6ed5
Binary files /dev/null and b/modules/__pycache__/sd_samplers_timesteps_impl.cpython-39.pyc differ
diff --git a/modules/__pycache__/sd_unet.cpython-39.pyc b/modules/__pycache__/sd_unet.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..14f720a87dab7a341132bd66f232e78457bd85d1
Binary files /dev/null and b/modules/__pycache__/sd_unet.cpython-39.pyc differ
diff --git a/modules/__pycache__/sd_vae.cpython-39.pyc b/modules/__pycache__/sd_vae.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..65092d61b607581d128846712a5f0c910575eb83
Binary files /dev/null and b/modules/__pycache__/sd_vae.cpython-39.pyc differ
diff --git a/modules/__pycache__/sd_vae_approx.cpython-39.pyc b/modules/__pycache__/sd_vae_approx.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f1941816d83d81f19dd1282fe95ba40086b6cb9c
Binary files /dev/null and b/modules/__pycache__/sd_vae_approx.cpython-39.pyc differ
diff --git a/modules/__pycache__/shared.cpython-39.pyc b/modules/__pycache__/shared.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c5c81c5f056e132880506325cb21d21bb386625b
Binary files /dev/null and b/modules/__pycache__/shared.cpython-39.pyc differ
diff --git a/modules/__pycache__/shared_cmd_options.cpython-39.pyc b/modules/__pycache__/shared_cmd_options.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3043e713e007c8c1433e46f946f3ae071f660372
Binary files /dev/null and b/modules/__pycache__/shared_cmd_options.cpython-39.pyc differ
diff --git a/modules/__pycache__/shared_gradio_themes.cpython-39.pyc b/modules/__pycache__/shared_gradio_themes.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b5c7cb1e0039422e282b90f70a19254aca0cc39a
Binary files /dev/null and b/modules/__pycache__/shared_gradio_themes.cpython-39.pyc differ
diff --git a/modules/__pycache__/shared_init.cpython-39.pyc b/modules/__pycache__/shared_init.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..fe83957f513d61e9f7cdba466d3b38ee5df0e126
Binary files /dev/null and b/modules/__pycache__/shared_init.cpython-39.pyc differ
diff --git a/modules/__pycache__/shared_items.cpython-39.pyc b/modules/__pycache__/shared_items.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..fb460166ced5393a17e91da8d435267739ca852b
Binary files /dev/null and b/modules/__pycache__/shared_items.cpython-39.pyc differ
diff --git a/modules/__pycache__/shared_options.cpython-39.pyc b/modules/__pycache__/shared_options.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..818c11ee91f948914352b11a99f02e4490b4cd7a
Binary files /dev/null and b/modules/__pycache__/shared_options.cpython-39.pyc differ
diff --git a/modules/__pycache__/shared_state.cpython-39.pyc b/modules/__pycache__/shared_state.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2b3557e0d958010efa26fe739d0ce6124de3c945
Binary files /dev/null and b/modules/__pycache__/shared_state.cpython-39.pyc differ
diff --git a/modules/__pycache__/shared_total_tqdm.cpython-39.pyc b/modules/__pycache__/shared_total_tqdm.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d278f27a9343601074a59e654befb47444cb7eb7
Binary files /dev/null and b/modules/__pycache__/shared_total_tqdm.cpython-39.pyc differ
diff --git a/modules/__pycache__/styles.cpython-39.pyc b/modules/__pycache__/styles.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..dfdaa2d3b3cf7200aa76ff5b76b1f5c98e5db65c
Binary files /dev/null and b/modules/__pycache__/styles.cpython-39.pyc differ
diff --git a/modules/__pycache__/sub_quadratic_attention.cpython-39.pyc b/modules/__pycache__/sub_quadratic_attention.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e9ca9962c9a3c40862c5bd9fe50dff7af37e1ed9
Binary files /dev/null and b/modules/__pycache__/sub_quadratic_attention.cpython-39.pyc differ
diff --git a/modules/__pycache__/sysinfo.cpython-39.pyc b/modules/__pycache__/sysinfo.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4c5f195d069e5be2f6fd51a449b68336b64135a3
Binary files /dev/null and b/modules/__pycache__/sysinfo.cpython-39.pyc differ
diff --git a/modules/__pycache__/txt2img.cpython-39.pyc b/modules/__pycache__/txt2img.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ebf0d2b54107cfa746a6db348c5ad77840b9c8e3
Binary files /dev/null and b/modules/__pycache__/txt2img.cpython-39.pyc differ
diff --git a/modules/__pycache__/ui.cpython-39.pyc b/modules/__pycache__/ui.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..08340954bb3e833c0cbf6727c0c4e8d22688f433
Binary files /dev/null and b/modules/__pycache__/ui.cpython-39.pyc differ
diff --git a/modules/__pycache__/ui_checkpoint_merger.cpython-39.pyc b/modules/__pycache__/ui_checkpoint_merger.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2723b0caaee9919e94dcc57c87f28e19c03008a5
Binary files /dev/null and b/modules/__pycache__/ui_checkpoint_merger.cpython-39.pyc differ
diff --git a/modules/__pycache__/ui_common.cpython-39.pyc b/modules/__pycache__/ui_common.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9e74fb93c037e68f5679de57eab6e454afb9fd05
Binary files /dev/null and b/modules/__pycache__/ui_common.cpython-39.pyc differ
diff --git a/modules/__pycache__/ui_components.cpython-39.pyc b/modules/__pycache__/ui_components.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e184610875e5f99fb5ad4a0b31bf8d89097e23d0
Binary files /dev/null and b/modules/__pycache__/ui_components.cpython-39.pyc differ
diff --git a/modules/__pycache__/ui_extra_networks.cpython-39.pyc b/modules/__pycache__/ui_extra_networks.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d46ea59dfbf4656cdb226f9e6ae9eb072ff98177
Binary files /dev/null and b/modules/__pycache__/ui_extra_networks.cpython-39.pyc differ
diff --git a/modules/__pycache__/ui_extra_networks_checkpoints.cpython-39.pyc b/modules/__pycache__/ui_extra_networks_checkpoints.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e839e08c226ff428c06db36fb9d8a21479117ba8
Binary files /dev/null and b/modules/__pycache__/ui_extra_networks_checkpoints.cpython-39.pyc differ
diff --git a/modules/__pycache__/ui_extra_networks_checkpoints_user_metadata.cpython-39.pyc b/modules/__pycache__/ui_extra_networks_checkpoints_user_metadata.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b0bb7a1f75ee239c27ee9e47ae0e2c62cfc5456c
Binary files /dev/null and b/modules/__pycache__/ui_extra_networks_checkpoints_user_metadata.cpython-39.pyc differ
diff --git a/modules/__pycache__/ui_extra_networks_hypernets.cpython-39.pyc b/modules/__pycache__/ui_extra_networks_hypernets.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f7b99e89f4bb9a317a61ed3f5661046d930bc6a3
Binary files /dev/null and b/modules/__pycache__/ui_extra_networks_hypernets.cpython-39.pyc differ
diff --git a/modules/__pycache__/ui_extra_networks_textual_inversion.cpython-39.pyc b/modules/__pycache__/ui_extra_networks_textual_inversion.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1ae99aef5cff6b607c0cc59a9ef2a4e07ae35078
Binary files /dev/null and b/modules/__pycache__/ui_extra_networks_textual_inversion.cpython-39.pyc differ
diff --git a/modules/__pycache__/ui_extra_networks_user_metadata.cpython-39.pyc b/modules/__pycache__/ui_extra_networks_user_metadata.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e4c48dfbc6b7ac61c20df34fe5e50bdbcf5db7c3
Binary files /dev/null and b/modules/__pycache__/ui_extra_networks_user_metadata.cpython-39.pyc differ
diff --git a/modules/__pycache__/ui_gradio_extensions.cpython-39.pyc b/modules/__pycache__/ui_gradio_extensions.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..cd2bfe6d9fdb7765b3ec8972f0de21984a744003
Binary files /dev/null and b/modules/__pycache__/ui_gradio_extensions.cpython-39.pyc differ
diff --git a/modules/__pycache__/ui_loadsave.cpython-39.pyc b/modules/__pycache__/ui_loadsave.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..8ef809c62e75ab967e97046423f116a2f478f7c5
Binary files /dev/null and b/modules/__pycache__/ui_loadsave.cpython-39.pyc differ
diff --git a/modules/__pycache__/ui_postprocessing.cpython-39.pyc b/modules/__pycache__/ui_postprocessing.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3dd5575ec18230f6e333e999a2e66b5f8592877b
Binary files /dev/null and b/modules/__pycache__/ui_postprocessing.cpython-39.pyc differ
diff --git a/modules/__pycache__/ui_prompt_styles.cpython-39.pyc b/modules/__pycache__/ui_prompt_styles.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4cce7d8851f33dcce46b5e1837c487db8c913153
Binary files /dev/null and b/modules/__pycache__/ui_prompt_styles.cpython-39.pyc differ
diff --git a/modules/__pycache__/ui_tempdir.cpython-39.pyc b/modules/__pycache__/ui_tempdir.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..69f8bf48bfa47ac8f95d86e65ce5ce9f82a01b5a
Binary files /dev/null and b/modules/__pycache__/ui_tempdir.cpython-39.pyc differ
diff --git a/modules/__pycache__/upscaler.cpython-39.pyc b/modules/__pycache__/upscaler.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5c2bd330773a0e1695ad1f00678de6b820b3ed29
Binary files /dev/null and b/modules/__pycache__/upscaler.cpython-39.pyc differ
diff --git a/modules/__pycache__/util.cpython-39.pyc b/modules/__pycache__/util.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1f106af50f1302940f83c7bc4e912852801d1f70
Binary files /dev/null and b/modules/__pycache__/util.cpython-39.pyc differ
diff --git a/modules/__pycache__/xlmr.cpython-39.pyc b/modules/__pycache__/xlmr.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..104ab20bff4568360df5707913138d68a5ca9dc8
Binary files /dev/null and b/modules/__pycache__/xlmr.cpython-39.pyc differ
diff --git a/modules/cache.py b/modules/cache.py
new file mode 100644
index 0000000000000000000000000000000000000000..d23419c4e9b0eb32a5cc9c1a75492f85eac1db6e
--- /dev/null
+++ b/modules/cache.py
@@ -0,0 +1,124 @@
+import json
+import os
+import os.path
+import threading
+import time
+
+from modules.paths import data_path, script_path
+
+cache_filename = os.environ.get('SD_WEBUI_CACHE_FILE', os.path.join(data_path, "cache.json"))
+cache_data = None
+cache_lock = threading.Lock()
+
+dump_cache_after = None
+dump_cache_thread = None
+
+
+def dump_cache():
+ """
+ Marks cache for writing to disk. 5 seconds after no one else flags the cache for writing, it is written.
+ """
+
+ global dump_cache_after
+ global dump_cache_thread
+
+ def thread_func():
+ global dump_cache_after
+ global dump_cache_thread
+
+ while dump_cache_after is not None and time.time() < dump_cache_after:
+ time.sleep(1)
+
+ with cache_lock:
+ cache_filename_tmp = cache_filename + "-"
+ with open(cache_filename_tmp, "w", encoding="utf8") as file:
+ json.dump(cache_data, file, indent=4)
+
+ os.replace(cache_filename_tmp, cache_filename)
+
+ dump_cache_after = None
+ dump_cache_thread = None
+
+ with cache_lock:
+ dump_cache_after = time.time() + 5
+ if dump_cache_thread is None:
+ dump_cache_thread = threading.Thread(name='cache-writer', target=thread_func)
+ dump_cache_thread.start()
+
+
+def cache(subsection):
+ """
+ Retrieves or initializes a cache for a specific subsection.
+
+ Parameters:
+ subsection (str): The subsection identifier for the cache.
+
+ Returns:
+ dict: The cache data for the specified subsection.
+ """
+
+ global cache_data
+
+ if cache_data is None:
+ with cache_lock:
+ if cache_data is None:
+ if not os.path.isfile(cache_filename):
+ cache_data = {}
+ else:
+ try:
+ with open(cache_filename, "r", encoding="utf8") as file:
+ cache_data = json.load(file)
+ except Exception:
+ os.replace(cache_filename, os.path.join(script_path, "tmp", "cache.json"))
+ print('[ERROR] issue occurred while trying to read cache.json, move current cache to tmp/cache.json and create new cache')
+ cache_data = {}
+
+ s = cache_data.get(subsection, {})
+ cache_data[subsection] = s
+
+ return s
+
+
+def cached_data_for_file(subsection, title, filename, func):
+ """
+ Retrieves or generates data for a specific file, using a caching mechanism.
+
+ Parameters:
+ subsection (str): The subsection of the cache to use.
+ title (str): The title of the data entry in the subsection of the cache.
+ filename (str): The path to the file to be checked for modifications.
+ func (callable): A function that generates the data if it is not available in the cache.
+
+ Returns:
+ dict or None: The cached or generated data, or None if data generation fails.
+
+ The `cached_data_for_file` function implements a caching mechanism for data stored in files.
+ It checks if the data associated with the given `title` is present in the cache and compares the
+ modification time of the file with the cached modification time. If the file has been modified,
+ the cache is considered invalid and the data is regenerated using the provided `func`.
+ Otherwise, the cached data is returned.
+
+ If the data generation fails, None is returned to indicate the failure. Otherwise, the generated
+ or cached data is returned as a dictionary.
+ """
+
+ existing_cache = cache(subsection)
+ ondisk_mtime = os.path.getmtime(filename)
+
+ entry = existing_cache.get(title)
+ if entry:
+ cached_mtime = entry.get("mtime", 0)
+ if ondisk_mtime > cached_mtime:
+ entry = None
+
+ if not entry or 'value' not in entry:
+ value = func()
+ if value is None:
+ return None
+
+ entry = {'mtime': ondisk_mtime, 'value': value}
+ existing_cache[title] = entry
+
+ dump_cache()
+
+ return entry['value']
diff --git a/modules/call_queue.py b/modules/call_queue.py
new file mode 100644
index 0000000000000000000000000000000000000000..396501918884db3afd352eb4dca4febd5682f4c4
--- /dev/null
+++ b/modules/call_queue.py
@@ -0,0 +1,118 @@
+from functools import wraps
+import html
+import time
+
+from modules import shared, progress, errors, devices, fifo_lock
+
+queue_lock = fifo_lock.FIFOLock()
+
+
+def wrap_queued_call(func):
+ def f(*args, **kwargs):
+ with queue_lock:
+ res = func(*args, **kwargs)
+
+ return res
+
+ return f
+
+
+def wrap_gradio_gpu_call(func, extra_outputs=None):
+ @wraps(func)
+ def f(*args, **kwargs):
+
+ # if the first argument is a string that says "task(...)", it is treated as a job id
+ if args and type(args[0]) == str and args[0].startswith("task(") and args[0].endswith(")"):
+ id_task = args[0]
+ progress.add_task_to_queue(id_task)
+ else:
+ id_task = None
+
+ with queue_lock:
+ shared.state.begin(job=id_task)
+ progress.start_task(id_task)
+
+ try:
+ res = func(*args, **kwargs)
+ progress.record_results(id_task, res)
+ finally:
+ progress.finish_task(id_task)
+
+ shared.state.end()
+
+ return res
+
+ return wrap_gradio_call(f, extra_outputs=extra_outputs, add_stats=True)
+
+
+def wrap_gradio_call(func, extra_outputs=None, add_stats=False):
+ @wraps(func)
+ def f(*args, extra_outputs_array=extra_outputs, **kwargs):
+ run_memmon = shared.opts.memmon_poll_rate > 0 and not shared.mem_mon.disabled and add_stats
+ if run_memmon:
+ shared.mem_mon.monitor()
+ t = time.perf_counter()
+
+ try:
+ res = list(func(*args, **kwargs))
+ except Exception as e:
+ # When printing out our debug argument list,
+ # do not print out more than a 100 KB of text
+ max_debug_str_len = 131072
+ message = "Error completing request"
+ arg_str = f"Arguments: {args} {kwargs}"[:max_debug_str_len]
+ if len(arg_str) > max_debug_str_len:
+ arg_str += f" (Argument list truncated at {max_debug_str_len}/{len(arg_str)} characters)"
+ errors.report(f"{message}\n{arg_str}", exc_info=True)
+
+ shared.state.job = ""
+ shared.state.job_count = 0
+
+ if extra_outputs_array is None:
+ extra_outputs_array = [None, '']
+
+ error_message = f'{type(e).__name__}: {e}'
+ res = extra_outputs_array + [f"
{html.escape(error_message)}
"]
+
+ devices.torch_gc()
+
+ shared.state.skipped = False
+ shared.state.interrupted = False
+ shared.state.job_count = 0
+
+ if not add_stats:
+ return tuple(res)
+
+ elapsed = time.perf_counter() - t
+ elapsed_m = int(elapsed // 60)
+ elapsed_s = elapsed % 60
+ elapsed_text = f"{elapsed_s:.1f} sec."
+ if elapsed_m > 0:
+ elapsed_text = f"{elapsed_m} min. "+elapsed_text
+
+ if run_memmon:
+ mem_stats = {k: -(v//-(1024*1024)) for k, v in shared.mem_mon.stop().items()}
+ active_peak = mem_stats['active_peak']
+ reserved_peak = mem_stats['reserved_peak']
+ sys_peak = mem_stats['system_peak']
+ sys_total = mem_stats['total']
+ sys_pct = sys_peak/max(sys_total, 1) * 100
+
+ toltip_a = "Active: peak amount of video memory used during generation (excluding cached data)"
+ toltip_r = "Reserved: total amout of video memory allocated by the Torch library "
+ toltip_sys = "System: peak amout of video memory allocated by all running programs, out of total capacity"
+
+ text_a = f"A : {active_peak/1024:.2f} GB "
+ text_r = f"R : {reserved_peak/1024:.2f} GB "
+ text_sys = f"Sys : {sys_peak/1024:.1f}/{sys_total/1024:g} GB ({sys_pct:.1f}%)"
+
+ vram_html = f"{text_a}, {text_r}, {text_sys}
"
+ else:
+ vram_html = ''
+
+ # last item is always HTML
+ res[-1] += f""
+
+ return tuple(res)
+
+ return f
diff --git a/modules/codeformer/__pycache__/codeformer_arch.cpython-39.pyc b/modules/codeformer/__pycache__/codeformer_arch.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..7f8174c1ca9818fc9cb5fe6191d70c0d08e1601c
Binary files /dev/null and b/modules/codeformer/__pycache__/codeformer_arch.cpython-39.pyc differ
diff --git a/modules/codeformer/__pycache__/vqgan_arch.cpython-39.pyc b/modules/codeformer/__pycache__/vqgan_arch.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4f16a8ca3f91f532365a737b021af8aa13ddc4af
Binary files /dev/null and b/modules/codeformer/__pycache__/vqgan_arch.cpython-39.pyc differ
diff --git a/modules/codeformer/codeformer_arch.py b/modules/codeformer/codeformer_arch.py
new file mode 100644
index 0000000000000000000000000000000000000000..12db6814268fdba5a3025f44d1bb24e93d280a69
--- /dev/null
+++ b/modules/codeformer/codeformer_arch.py
@@ -0,0 +1,276 @@
+# this file is copied from CodeFormer repository. Please see comment in modules/codeformer_model.py
+
+import math
+import torch
+from torch import nn, Tensor
+import torch.nn.functional as F
+from typing import Optional
+
+from modules.codeformer.vqgan_arch import VQAutoEncoder, ResBlock
+from basicsr.utils.registry import ARCH_REGISTRY
+
+def calc_mean_std(feat, eps=1e-5):
+ """Calculate mean and std for adaptive_instance_normalization.
+
+ Args:
+ feat (Tensor): 4D tensor.
+ eps (float): A small value added to the variance to avoid
+ divide-by-zero. Default: 1e-5.
+ """
+ size = feat.size()
+ assert len(size) == 4, 'The input feature should be 4D tensor.'
+ b, c = size[:2]
+ feat_var = feat.view(b, c, -1).var(dim=2) + eps
+ feat_std = feat_var.sqrt().view(b, c, 1, 1)
+ feat_mean = feat.view(b, c, -1).mean(dim=2).view(b, c, 1, 1)
+ return feat_mean, feat_std
+
+
+def adaptive_instance_normalization(content_feat, style_feat):
+ """Adaptive instance normalization.
+
+ Adjust the reference features to have the similar color and illuminations
+ as those in the degradate features.
+
+ Args:
+ content_feat (Tensor): The reference feature.
+ style_feat (Tensor): The degradate features.
+ """
+ size = content_feat.size()
+ style_mean, style_std = calc_mean_std(style_feat)
+ content_mean, content_std = calc_mean_std(content_feat)
+ normalized_feat = (content_feat - content_mean.expand(size)) / content_std.expand(size)
+ return normalized_feat * style_std.expand(size) + style_mean.expand(size)
+
+
+class PositionEmbeddingSine(nn.Module):
+ """
+ This is a more standard version of the position embedding, very similar to the one
+ used by the Attention is all you need paper, generalized to work on images.
+ """
+
+ def __init__(self, num_pos_feats=64, temperature=10000, normalize=False, scale=None):
+ super().__init__()
+ self.num_pos_feats = num_pos_feats
+ self.temperature = temperature
+ self.normalize = normalize
+ if scale is not None and normalize is False:
+ raise ValueError("normalize should be True if scale is passed")
+ if scale is None:
+ scale = 2 * math.pi
+ self.scale = scale
+
+ def forward(self, x, mask=None):
+ if mask is None:
+ mask = torch.zeros((x.size(0), x.size(2), x.size(3)), device=x.device, dtype=torch.bool)
+ not_mask = ~mask
+ y_embed = not_mask.cumsum(1, dtype=torch.float32)
+ x_embed = not_mask.cumsum(2, dtype=torch.float32)
+ if self.normalize:
+ eps = 1e-6
+ y_embed = y_embed / (y_embed[:, -1:, :] + eps) * self.scale
+ x_embed = x_embed / (x_embed[:, :, -1:] + eps) * self.scale
+
+ dim_t = torch.arange(self.num_pos_feats, dtype=torch.float32, device=x.device)
+ dim_t = self.temperature ** (2 * (dim_t // 2) / self.num_pos_feats)
+
+ pos_x = x_embed[:, :, :, None] / dim_t
+ pos_y = y_embed[:, :, :, None] / dim_t
+ pos_x = torch.stack(
+ (pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4
+ ).flatten(3)
+ pos_y = torch.stack(
+ (pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4
+ ).flatten(3)
+ pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2)
+ return pos
+
+def _get_activation_fn(activation):
+ """Return an activation function given a string"""
+ if activation == "relu":
+ return F.relu
+ if activation == "gelu":
+ return F.gelu
+ if activation == "glu":
+ return F.glu
+ raise RuntimeError(F"activation should be relu/gelu, not {activation}.")
+
+
+class TransformerSALayer(nn.Module):
+ def __init__(self, embed_dim, nhead=8, dim_mlp=2048, dropout=0.0, activation="gelu"):
+ super().__init__()
+ self.self_attn = nn.MultiheadAttention(embed_dim, nhead, dropout=dropout)
+ # Implementation of Feedforward model - MLP
+ self.linear1 = nn.Linear(embed_dim, dim_mlp)
+ self.dropout = nn.Dropout(dropout)
+ self.linear2 = nn.Linear(dim_mlp, embed_dim)
+
+ self.norm1 = nn.LayerNorm(embed_dim)
+ self.norm2 = nn.LayerNorm(embed_dim)
+ self.dropout1 = nn.Dropout(dropout)
+ self.dropout2 = nn.Dropout(dropout)
+
+ self.activation = _get_activation_fn(activation)
+
+ def with_pos_embed(self, tensor, pos: Optional[Tensor]):
+ return tensor if pos is None else tensor + pos
+
+ def forward(self, tgt,
+ tgt_mask: Optional[Tensor] = None,
+ tgt_key_padding_mask: Optional[Tensor] = None,
+ query_pos: Optional[Tensor] = None):
+
+ # self attention
+ tgt2 = self.norm1(tgt)
+ q = k = self.with_pos_embed(tgt2, query_pos)
+ tgt2 = self.self_attn(q, k, value=tgt2, attn_mask=tgt_mask,
+ key_padding_mask=tgt_key_padding_mask)[0]
+ tgt = tgt + self.dropout1(tgt2)
+
+ # ffn
+ tgt2 = self.norm2(tgt)
+ tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt2))))
+ tgt = tgt + self.dropout2(tgt2)
+ return tgt
+
+class Fuse_sft_block(nn.Module):
+ def __init__(self, in_ch, out_ch):
+ super().__init__()
+ self.encode_enc = ResBlock(2*in_ch, out_ch)
+
+ self.scale = nn.Sequential(
+ nn.Conv2d(in_ch, out_ch, kernel_size=3, padding=1),
+ nn.LeakyReLU(0.2, True),
+ nn.Conv2d(out_ch, out_ch, kernel_size=3, padding=1))
+
+ self.shift = nn.Sequential(
+ nn.Conv2d(in_ch, out_ch, kernel_size=3, padding=1),
+ nn.LeakyReLU(0.2, True),
+ nn.Conv2d(out_ch, out_ch, kernel_size=3, padding=1))
+
+ def forward(self, enc_feat, dec_feat, w=1):
+ enc_feat = self.encode_enc(torch.cat([enc_feat, dec_feat], dim=1))
+ scale = self.scale(enc_feat)
+ shift = self.shift(enc_feat)
+ residual = w * (dec_feat * scale + shift)
+ out = dec_feat + residual
+ return out
+
+
+@ARCH_REGISTRY.register()
+class CodeFormer(VQAutoEncoder):
+ def __init__(self, dim_embd=512, n_head=8, n_layers=9,
+ codebook_size=1024, latent_size=256,
+ connect_list=('32', '64', '128', '256'),
+ fix_modules=('quantize', 'generator')):
+ super(CodeFormer, self).__init__(512, 64, [1, 2, 2, 4, 4, 8], 'nearest',2, [16], codebook_size)
+
+ if fix_modules is not None:
+ for module in fix_modules:
+ for param in getattr(self, module).parameters():
+ param.requires_grad = False
+
+ self.connect_list = connect_list
+ self.n_layers = n_layers
+ self.dim_embd = dim_embd
+ self.dim_mlp = dim_embd*2
+
+ self.position_emb = nn.Parameter(torch.zeros(latent_size, self.dim_embd))
+ self.feat_emb = nn.Linear(256, self.dim_embd)
+
+ # transformer
+ self.ft_layers = nn.Sequential(*[TransformerSALayer(embed_dim=dim_embd, nhead=n_head, dim_mlp=self.dim_mlp, dropout=0.0)
+ for _ in range(self.n_layers)])
+
+ # logits_predict head
+ self.idx_pred_layer = nn.Sequential(
+ nn.LayerNorm(dim_embd),
+ nn.Linear(dim_embd, codebook_size, bias=False))
+
+ self.channels = {
+ '16': 512,
+ '32': 256,
+ '64': 256,
+ '128': 128,
+ '256': 128,
+ '512': 64,
+ }
+
+ # after second residual block for > 16, before attn layer for ==16
+ self.fuse_encoder_block = {'512':2, '256':5, '128':8, '64':11, '32':14, '16':18}
+ # after first residual block for > 16, before attn layer for ==16
+ self.fuse_generator_block = {'16':6, '32': 9, '64':12, '128':15, '256':18, '512':21}
+
+ # fuse_convs_dict
+ self.fuse_convs_dict = nn.ModuleDict()
+ for f_size in self.connect_list:
+ in_ch = self.channels[f_size]
+ self.fuse_convs_dict[f_size] = Fuse_sft_block(in_ch, in_ch)
+
+ def _init_weights(self, module):
+ if isinstance(module, (nn.Linear, nn.Embedding)):
+ module.weight.data.normal_(mean=0.0, std=0.02)
+ if isinstance(module, nn.Linear) and module.bias is not None:
+ module.bias.data.zero_()
+ elif isinstance(module, nn.LayerNorm):
+ module.bias.data.zero_()
+ module.weight.data.fill_(1.0)
+
+ def forward(self, x, w=0, detach_16=True, code_only=False, adain=False):
+ # ################### Encoder #####################
+ enc_feat_dict = {}
+ out_list = [self.fuse_encoder_block[f_size] for f_size in self.connect_list]
+ for i, block in enumerate(self.encoder.blocks):
+ x = block(x)
+ if i in out_list:
+ enc_feat_dict[str(x.shape[-1])] = x.clone()
+
+ lq_feat = x
+ # ################# Transformer ###################
+ # quant_feat, codebook_loss, quant_stats = self.quantize(lq_feat)
+ pos_emb = self.position_emb.unsqueeze(1).repeat(1,x.shape[0],1)
+ # BCHW -> BC(HW) -> (HW)BC
+ feat_emb = self.feat_emb(lq_feat.flatten(2).permute(2,0,1))
+ query_emb = feat_emb
+ # Transformer encoder
+ for layer in self.ft_layers:
+ query_emb = layer(query_emb, query_pos=pos_emb)
+
+ # output logits
+ logits = self.idx_pred_layer(query_emb) # (hw)bn
+ logits = logits.permute(1,0,2) # (hw)bn -> b(hw)n
+
+ if code_only: # for training stage II
+ # logits doesn't need softmax before cross_entropy loss
+ return logits, lq_feat
+
+ # ################# Quantization ###################
+ # if self.training:
+ # quant_feat = torch.einsum('btn,nc->btc', [soft_one_hot, self.quantize.embedding.weight])
+ # # b(hw)c -> bc(hw) -> bchw
+ # quant_feat = quant_feat.permute(0,2,1).view(lq_feat.shape)
+ # ------------
+ soft_one_hot = F.softmax(logits, dim=2)
+ _, top_idx = torch.topk(soft_one_hot, 1, dim=2)
+ quant_feat = self.quantize.get_codebook_feat(top_idx, shape=[x.shape[0],16,16,256])
+ # preserve gradients
+ # quant_feat = lq_feat + (quant_feat - lq_feat).detach()
+
+ if detach_16:
+ quant_feat = quant_feat.detach() # for training stage III
+ if adain:
+ quant_feat = adaptive_instance_normalization(quant_feat, lq_feat)
+
+ # ################## Generator ####################
+ x = quant_feat
+ fuse_list = [self.fuse_generator_block[f_size] for f_size in self.connect_list]
+
+ for i, block in enumerate(self.generator.blocks):
+ x = block(x)
+ if i in fuse_list: # fuse after i-th block
+ f_size = str(x.shape[-1])
+ if w>0:
+ x = self.fuse_convs_dict[f_size](enc_feat_dict[f_size].detach(), x, w)
+ out = x
+ # logits doesn't need softmax before cross_entropy loss
+ return out, logits, lq_feat
diff --git a/modules/codeformer/vqgan_arch.py b/modules/codeformer/vqgan_arch.py
new file mode 100644
index 0000000000000000000000000000000000000000..09ee6660dc537e41fb9d9c7be7196c94c04aa8c6
--- /dev/null
+++ b/modules/codeformer/vqgan_arch.py
@@ -0,0 +1,435 @@
+# this file is copied from CodeFormer repository. Please see comment in modules/codeformer_model.py
+
+'''
+VQGAN code, adapted from the original created by the Unleashing Transformers authors:
+https://github.com/samb-t/unleashing-transformers/blob/master/models/vqgan.py
+
+'''
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+from basicsr.utils import get_root_logger
+from basicsr.utils.registry import ARCH_REGISTRY
+
+def normalize(in_channels):
+ return torch.nn.GroupNorm(num_groups=32, num_channels=in_channels, eps=1e-6, affine=True)
+
+
+@torch.jit.script
+def swish(x):
+ return x*torch.sigmoid(x)
+
+
+# Define VQVAE classes
+class VectorQuantizer(nn.Module):
+ def __init__(self, codebook_size, emb_dim, beta):
+ super(VectorQuantizer, self).__init__()
+ self.codebook_size = codebook_size # number of embeddings
+ self.emb_dim = emb_dim # dimension of embedding
+ self.beta = beta # commitment cost used in loss term, beta * ||z_e(x)-sg[e]||^2
+ self.embedding = nn.Embedding(self.codebook_size, self.emb_dim)
+ self.embedding.weight.data.uniform_(-1.0 / self.codebook_size, 1.0 / self.codebook_size)
+
+ def forward(self, z):
+ # reshape z -> (batch, height, width, channel) and flatten
+ z = z.permute(0, 2, 3, 1).contiguous()
+ z_flattened = z.view(-1, self.emb_dim)
+
+ # distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
+ d = (z_flattened ** 2).sum(dim=1, keepdim=True) + (self.embedding.weight**2).sum(1) - \
+ 2 * torch.matmul(z_flattened, self.embedding.weight.t())
+
+ mean_distance = torch.mean(d)
+ # find closest encodings
+ # min_encoding_indices = torch.argmin(d, dim=1).unsqueeze(1)
+ min_encoding_scores, min_encoding_indices = torch.topk(d, 1, dim=1, largest=False)
+ # [0-1], higher score, higher confidence
+ min_encoding_scores = torch.exp(-min_encoding_scores/10)
+
+ min_encodings = torch.zeros(min_encoding_indices.shape[0], self.codebook_size).to(z)
+ min_encodings.scatter_(1, min_encoding_indices, 1)
+
+ # get quantized latent vectors
+ z_q = torch.matmul(min_encodings, self.embedding.weight).view(z.shape)
+ # compute loss for embedding
+ loss = torch.mean((z_q.detach()-z)**2) + self.beta * torch.mean((z_q - z.detach()) ** 2)
+ # preserve gradients
+ z_q = z + (z_q - z).detach()
+
+ # perplexity
+ e_mean = torch.mean(min_encodings, dim=0)
+ perplexity = torch.exp(-torch.sum(e_mean * torch.log(e_mean + 1e-10)))
+ # reshape back to match original input shape
+ z_q = z_q.permute(0, 3, 1, 2).contiguous()
+
+ return z_q, loss, {
+ "perplexity": perplexity,
+ "min_encodings": min_encodings,
+ "min_encoding_indices": min_encoding_indices,
+ "min_encoding_scores": min_encoding_scores,
+ "mean_distance": mean_distance
+ }
+
+ def get_codebook_feat(self, indices, shape):
+ # input indices: batch*token_num -> (batch*token_num)*1
+ # shape: batch, height, width, channel
+ indices = indices.view(-1,1)
+ min_encodings = torch.zeros(indices.shape[0], self.codebook_size).to(indices)
+ min_encodings.scatter_(1, indices, 1)
+ # get quantized latent vectors
+ z_q = torch.matmul(min_encodings.float(), self.embedding.weight)
+
+ if shape is not None: # reshape back to match original input shape
+ z_q = z_q.view(shape).permute(0, 3, 1, 2).contiguous()
+
+ return z_q
+
+
+class GumbelQuantizer(nn.Module):
+ def __init__(self, codebook_size, emb_dim, num_hiddens, straight_through=False, kl_weight=5e-4, temp_init=1.0):
+ super().__init__()
+ self.codebook_size = codebook_size # number of embeddings
+ self.emb_dim = emb_dim # dimension of embedding
+ self.straight_through = straight_through
+ self.temperature = temp_init
+ self.kl_weight = kl_weight
+ self.proj = nn.Conv2d(num_hiddens, codebook_size, 1) # projects last encoder layer to quantized logits
+ self.embed = nn.Embedding(codebook_size, emb_dim)
+
+ def forward(self, z):
+ hard = self.straight_through if self.training else True
+
+ logits = self.proj(z)
+
+ soft_one_hot = F.gumbel_softmax(logits, tau=self.temperature, dim=1, hard=hard)
+
+ z_q = torch.einsum("b n h w, n d -> b d h w", soft_one_hot, self.embed.weight)
+
+ # + kl divergence to the prior loss
+ qy = F.softmax(logits, dim=1)
+ diff = self.kl_weight * torch.sum(qy * torch.log(qy * self.codebook_size + 1e-10), dim=1).mean()
+ min_encoding_indices = soft_one_hot.argmax(dim=1)
+
+ return z_q, diff, {
+ "min_encoding_indices": min_encoding_indices
+ }
+
+
+class Downsample(nn.Module):
+ def __init__(self, in_channels):
+ super().__init__()
+ self.conv = torch.nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=2, padding=0)
+
+ def forward(self, x):
+ pad = (0, 1, 0, 1)
+ x = torch.nn.functional.pad(x, pad, mode="constant", value=0)
+ x = self.conv(x)
+ return x
+
+
+class Upsample(nn.Module):
+ def __init__(self, in_channels):
+ super().__init__()
+ self.conv = nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1)
+
+ def forward(self, x):
+ x = F.interpolate(x, scale_factor=2.0, mode="nearest")
+ x = self.conv(x)
+
+ return x
+
+
+class ResBlock(nn.Module):
+ def __init__(self, in_channels, out_channels=None):
+ super(ResBlock, self).__init__()
+ self.in_channels = in_channels
+ self.out_channels = in_channels if out_channels is None else out_channels
+ self.norm1 = normalize(in_channels)
+ self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=1)
+ self.norm2 = normalize(out_channels)
+ self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=1, padding=1)
+ if self.in_channels != self.out_channels:
+ self.conv_out = nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, padding=0)
+
+ def forward(self, x_in):
+ x = x_in
+ x = self.norm1(x)
+ x = swish(x)
+ x = self.conv1(x)
+ x = self.norm2(x)
+ x = swish(x)
+ x = self.conv2(x)
+ if self.in_channels != self.out_channels:
+ x_in = self.conv_out(x_in)
+
+ return x + x_in
+
+
+class AttnBlock(nn.Module):
+ def __init__(self, in_channels):
+ super().__init__()
+ self.in_channels = in_channels
+
+ self.norm = normalize(in_channels)
+ self.q = torch.nn.Conv2d(
+ in_channels,
+ in_channels,
+ kernel_size=1,
+ stride=1,
+ padding=0
+ )
+ self.k = torch.nn.Conv2d(
+ in_channels,
+ in_channels,
+ kernel_size=1,
+ stride=1,
+ padding=0
+ )
+ self.v = torch.nn.Conv2d(
+ in_channels,
+ in_channels,
+ kernel_size=1,
+ stride=1,
+ padding=0
+ )
+ self.proj_out = torch.nn.Conv2d(
+ in_channels,
+ in_channels,
+ kernel_size=1,
+ stride=1,
+ padding=0
+ )
+
+ def forward(self, x):
+ h_ = x
+ h_ = self.norm(h_)
+ q = self.q(h_)
+ k = self.k(h_)
+ v = self.v(h_)
+
+ # compute attention
+ b, c, h, w = q.shape
+ q = q.reshape(b, c, h*w)
+ q = q.permute(0, 2, 1)
+ k = k.reshape(b, c, h*w)
+ w_ = torch.bmm(q, k)
+ w_ = w_ * (int(c)**(-0.5))
+ w_ = F.softmax(w_, dim=2)
+
+ # attend to values
+ v = v.reshape(b, c, h*w)
+ w_ = w_.permute(0, 2, 1)
+ h_ = torch.bmm(v, w_)
+ h_ = h_.reshape(b, c, h, w)
+
+ h_ = self.proj_out(h_)
+
+ return x+h_
+
+
+class Encoder(nn.Module):
+ def __init__(self, in_channels, nf, emb_dim, ch_mult, num_res_blocks, resolution, attn_resolutions):
+ super().__init__()
+ self.nf = nf
+ self.num_resolutions = len(ch_mult)
+ self.num_res_blocks = num_res_blocks
+ self.resolution = resolution
+ self.attn_resolutions = attn_resolutions
+
+ curr_res = self.resolution
+ in_ch_mult = (1,)+tuple(ch_mult)
+
+ blocks = []
+ # initial convultion
+ blocks.append(nn.Conv2d(in_channels, nf, kernel_size=3, stride=1, padding=1))
+
+ # residual and downsampling blocks, with attention on smaller res (16x16)
+ for i in range(self.num_resolutions):
+ block_in_ch = nf * in_ch_mult[i]
+ block_out_ch = nf * ch_mult[i]
+ for _ in range(self.num_res_blocks):
+ blocks.append(ResBlock(block_in_ch, block_out_ch))
+ block_in_ch = block_out_ch
+ if curr_res in attn_resolutions:
+ blocks.append(AttnBlock(block_in_ch))
+
+ if i != self.num_resolutions - 1:
+ blocks.append(Downsample(block_in_ch))
+ curr_res = curr_res // 2
+
+ # non-local attention block
+ blocks.append(ResBlock(block_in_ch, block_in_ch))
+ blocks.append(AttnBlock(block_in_ch))
+ blocks.append(ResBlock(block_in_ch, block_in_ch))
+
+ # normalise and convert to latent size
+ blocks.append(normalize(block_in_ch))
+ blocks.append(nn.Conv2d(block_in_ch, emb_dim, kernel_size=3, stride=1, padding=1))
+ self.blocks = nn.ModuleList(blocks)
+
+ def forward(self, x):
+ for block in self.blocks:
+ x = block(x)
+
+ return x
+
+
+class Generator(nn.Module):
+ def __init__(self, nf, emb_dim, ch_mult, res_blocks, img_size, attn_resolutions):
+ super().__init__()
+ self.nf = nf
+ self.ch_mult = ch_mult
+ self.num_resolutions = len(self.ch_mult)
+ self.num_res_blocks = res_blocks
+ self.resolution = img_size
+ self.attn_resolutions = attn_resolutions
+ self.in_channels = emb_dim
+ self.out_channels = 3
+ block_in_ch = self.nf * self.ch_mult[-1]
+ curr_res = self.resolution // 2 ** (self.num_resolutions-1)
+
+ blocks = []
+ # initial conv
+ blocks.append(nn.Conv2d(self.in_channels, block_in_ch, kernel_size=3, stride=1, padding=1))
+
+ # non-local attention block
+ blocks.append(ResBlock(block_in_ch, block_in_ch))
+ blocks.append(AttnBlock(block_in_ch))
+ blocks.append(ResBlock(block_in_ch, block_in_ch))
+
+ for i in reversed(range(self.num_resolutions)):
+ block_out_ch = self.nf * self.ch_mult[i]
+
+ for _ in range(self.num_res_blocks):
+ blocks.append(ResBlock(block_in_ch, block_out_ch))
+ block_in_ch = block_out_ch
+
+ if curr_res in self.attn_resolutions:
+ blocks.append(AttnBlock(block_in_ch))
+
+ if i != 0:
+ blocks.append(Upsample(block_in_ch))
+ curr_res = curr_res * 2
+
+ blocks.append(normalize(block_in_ch))
+ blocks.append(nn.Conv2d(block_in_ch, self.out_channels, kernel_size=3, stride=1, padding=1))
+
+ self.blocks = nn.ModuleList(blocks)
+
+
+ def forward(self, x):
+ for block in self.blocks:
+ x = block(x)
+
+ return x
+
+
+@ARCH_REGISTRY.register()
+class VQAutoEncoder(nn.Module):
+ def __init__(self, img_size, nf, ch_mult, quantizer="nearest", res_blocks=2, attn_resolutions=None, codebook_size=1024, emb_dim=256,
+ beta=0.25, gumbel_straight_through=False, gumbel_kl_weight=1e-8, model_path=None):
+ super().__init__()
+ logger = get_root_logger()
+ self.in_channels = 3
+ self.nf = nf
+ self.n_blocks = res_blocks
+ self.codebook_size = codebook_size
+ self.embed_dim = emb_dim
+ self.ch_mult = ch_mult
+ self.resolution = img_size
+ self.attn_resolutions = attn_resolutions or [16]
+ self.quantizer_type = quantizer
+ self.encoder = Encoder(
+ self.in_channels,
+ self.nf,
+ self.embed_dim,
+ self.ch_mult,
+ self.n_blocks,
+ self.resolution,
+ self.attn_resolutions
+ )
+ if self.quantizer_type == "nearest":
+ self.beta = beta #0.25
+ self.quantize = VectorQuantizer(self.codebook_size, self.embed_dim, self.beta)
+ elif self.quantizer_type == "gumbel":
+ self.gumbel_num_hiddens = emb_dim
+ self.straight_through = gumbel_straight_through
+ self.kl_weight = gumbel_kl_weight
+ self.quantize = GumbelQuantizer(
+ self.codebook_size,
+ self.embed_dim,
+ self.gumbel_num_hiddens,
+ self.straight_through,
+ self.kl_weight
+ )
+ self.generator = Generator(
+ self.nf,
+ self.embed_dim,
+ self.ch_mult,
+ self.n_blocks,
+ self.resolution,
+ self.attn_resolutions
+ )
+
+ if model_path is not None:
+ chkpt = torch.load(model_path, map_location='cpu')
+ if 'params_ema' in chkpt:
+ self.load_state_dict(torch.load(model_path, map_location='cpu')['params_ema'])
+ logger.info(f'vqgan is loaded from: {model_path} [params_ema]')
+ elif 'params' in chkpt:
+ self.load_state_dict(torch.load(model_path, map_location='cpu')['params'])
+ logger.info(f'vqgan is loaded from: {model_path} [params]')
+ else:
+ raise ValueError('Wrong params!')
+
+
+ def forward(self, x):
+ x = self.encoder(x)
+ quant, codebook_loss, quant_stats = self.quantize(x)
+ x = self.generator(quant)
+ return x, codebook_loss, quant_stats
+
+
+
+# patch based discriminator
+@ARCH_REGISTRY.register()
+class VQGANDiscriminator(nn.Module):
+ def __init__(self, nc=3, ndf=64, n_layers=4, model_path=None):
+ super().__init__()
+
+ layers = [nn.Conv2d(nc, ndf, kernel_size=4, stride=2, padding=1), nn.LeakyReLU(0.2, True)]
+ ndf_mult = 1
+ ndf_mult_prev = 1
+ for n in range(1, n_layers): # gradually increase the number of filters
+ ndf_mult_prev = ndf_mult
+ ndf_mult = min(2 ** n, 8)
+ layers += [
+ nn.Conv2d(ndf * ndf_mult_prev, ndf * ndf_mult, kernel_size=4, stride=2, padding=1, bias=False),
+ nn.BatchNorm2d(ndf * ndf_mult),
+ nn.LeakyReLU(0.2, True)
+ ]
+
+ ndf_mult_prev = ndf_mult
+ ndf_mult = min(2 ** n_layers, 8)
+
+ layers += [
+ nn.Conv2d(ndf * ndf_mult_prev, ndf * ndf_mult, kernel_size=4, stride=1, padding=1, bias=False),
+ nn.BatchNorm2d(ndf * ndf_mult),
+ nn.LeakyReLU(0.2, True)
+ ]
+
+ layers += [
+ nn.Conv2d(ndf * ndf_mult, 1, kernel_size=4, stride=1, padding=1)] # output 1 channel prediction map
+ self.main = nn.Sequential(*layers)
+
+ if model_path is not None:
+ chkpt = torch.load(model_path, map_location='cpu')
+ if 'params_d' in chkpt:
+ self.load_state_dict(torch.load(model_path, map_location='cpu')['params_d'])
+ elif 'params' in chkpt:
+ self.load_state_dict(torch.load(model_path, map_location='cpu')['params'])
+ else:
+ raise ValueError('Wrong params!')
+
+ def forward(self, x):
+ return self.main(x)
diff --git a/modules/codeformer_model.py b/modules/codeformer_model.py
new file mode 100644
index 0000000000000000000000000000000000000000..3ad8a9db806d3406610d81534a6d7c85301cceb0
--- /dev/null
+++ b/modules/codeformer_model.py
@@ -0,0 +1,132 @@
+import os
+
+import cv2
+import torch
+
+import modules.face_restoration
+import modules.shared
+from modules import shared, devices, modelloader, errors
+from modules.paths import models_path
+
+# codeformer people made a choice to include modified basicsr library to their project which makes
+# it utterly impossible to use it alongside with other libraries that also use basicsr, like GFPGAN.
+# I am making a choice to include some files from codeformer to work around this issue.
+model_dir = "Codeformer"
+model_path = os.path.join(models_path, model_dir)
+model_url = 'https://github.com/sczhou/CodeFormer/releases/download/v0.1.0/codeformer.pth'
+
+codeformer = None
+
+
+def setup_model(dirname):
+ os.makedirs(model_path, exist_ok=True)
+
+ path = modules.paths.paths.get("CodeFormer", None)
+ if path is None:
+ return
+
+ try:
+ from torchvision.transforms.functional import normalize
+ from modules.codeformer.codeformer_arch import CodeFormer
+ from basicsr.utils import img2tensor, tensor2img
+ from facelib.utils.face_restoration_helper import FaceRestoreHelper
+ from facelib.detection.retinaface import retinaface
+
+ net_class = CodeFormer
+
+ class FaceRestorerCodeFormer(modules.face_restoration.FaceRestoration):
+ def name(self):
+ return "CodeFormer"
+
+ def __init__(self, dirname):
+ self.net = None
+ self.face_helper = None
+ self.cmd_dir = dirname
+
+ def create_models(self):
+
+ if self.net is not None and self.face_helper is not None:
+ self.net.to(devices.device_codeformer)
+ return self.net, self.face_helper
+ model_paths = modelloader.load_models(model_path, model_url, self.cmd_dir, download_name='codeformer-v0.1.0.pth', ext_filter=['.pth'])
+ if len(model_paths) != 0:
+ ckpt_path = model_paths[0]
+ else:
+ print("Unable to load codeformer model.")
+ return None, None
+ net = net_class(dim_embd=512, codebook_size=1024, n_head=8, n_layers=9, connect_list=['32', '64', '128', '256']).to(devices.device_codeformer)
+ checkpoint = torch.load(ckpt_path)['params_ema']
+ net.load_state_dict(checkpoint)
+ net.eval()
+
+ if hasattr(retinaface, 'device'):
+ retinaface.device = devices.device_codeformer
+ face_helper = FaceRestoreHelper(1, face_size=512, crop_ratio=(1, 1), det_model='retinaface_resnet50', save_ext='png', use_parse=True, device=devices.device_codeformer)
+
+ self.net = net
+ self.face_helper = face_helper
+
+ return net, face_helper
+
+ def send_model_to(self, device):
+ self.net.to(device)
+ self.face_helper.face_det.to(device)
+ self.face_helper.face_parse.to(device)
+
+ def restore(self, np_image, w=None):
+ np_image = np_image[:, :, ::-1]
+
+ original_resolution = np_image.shape[0:2]
+
+ self.create_models()
+ if self.net is None or self.face_helper is None:
+ return np_image
+
+ self.send_model_to(devices.device_codeformer)
+
+ self.face_helper.clean_all()
+ self.face_helper.read_image(np_image)
+ self.face_helper.get_face_landmarks_5(only_center_face=False, resize=640, eye_dist_threshold=5)
+ self.face_helper.align_warp_face()
+
+ for cropped_face in self.face_helper.cropped_faces:
+ cropped_face_t = img2tensor(cropped_face / 255., bgr2rgb=True, float32=True)
+ normalize(cropped_face_t, (0.5, 0.5, 0.5), (0.5, 0.5, 0.5), inplace=True)
+ cropped_face_t = cropped_face_t.unsqueeze(0).to(devices.device_codeformer)
+
+ try:
+ with torch.no_grad():
+ output = self.net(cropped_face_t, w=w if w is not None else shared.opts.code_former_weight, adain=True)[0]
+ restored_face = tensor2img(output, rgb2bgr=True, min_max=(-1, 1))
+ del output
+ devices.torch_gc()
+ except Exception:
+ errors.report('Failed inference for CodeFormer', exc_info=True)
+ restored_face = tensor2img(cropped_face_t, rgb2bgr=True, min_max=(-1, 1))
+
+ restored_face = restored_face.astype('uint8')
+ self.face_helper.add_restored_face(restored_face)
+
+ self.face_helper.get_inverse_affine(None)
+
+ restored_img = self.face_helper.paste_faces_to_input_image()
+ restored_img = restored_img[:, :, ::-1]
+
+ if original_resolution != restored_img.shape[0:2]:
+ restored_img = cv2.resize(restored_img, (0, 0), fx=original_resolution[1]/restored_img.shape[1], fy=original_resolution[0]/restored_img.shape[0], interpolation=cv2.INTER_LINEAR)
+
+ self.face_helper.clean_all()
+
+ if shared.opts.face_restoration_unload:
+ self.send_model_to(devices.cpu)
+
+ return restored_img
+
+ global codeformer
+ codeformer = FaceRestorerCodeFormer(dirname)
+ shared.face_restorers.append(codeformer)
+
+ except Exception:
+ errors.report("Error setting up CodeFormer", exc_info=True)
+
+ # sys.path = stored_sys_path
diff --git a/modules/config_states.py b/modules/config_states.py
new file mode 100644
index 0000000000000000000000000000000000000000..b766aef11d87a74ea4cd6fa8a580e12e830e5691
--- /dev/null
+++ b/modules/config_states.py
@@ -0,0 +1,199 @@
+"""
+Supports saving and restoring webui and extensions from a known working set of commits
+"""
+
+import os
+import json
+import time
+import tqdm
+
+from datetime import datetime
+import git
+
+from modules import shared, extensions, errors
+from modules.paths_internal import script_path, config_states_dir
+
+all_config_states = {}
+
+
+def list_config_states():
+ global all_config_states
+
+ all_config_states.clear()
+ os.makedirs(config_states_dir, exist_ok=True)
+
+ config_states = []
+ for filename in os.listdir(config_states_dir):
+ if filename.endswith(".json"):
+ path = os.path.join(config_states_dir, filename)
+ try:
+ with open(path, "r", encoding="utf-8") as f:
+ j = json.load(f)
+ assert "created_at" in j, '"created_at" does not exist'
+ j["filepath"] = path
+ config_states.append(j)
+ except Exception as e:
+ print(f'[ERROR]: Config states {path}, {e}')
+
+ config_states = sorted(config_states, key=lambda cs: cs["created_at"], reverse=True)
+
+ for cs in config_states:
+ timestamp = time.asctime(time.gmtime(cs["created_at"]))
+ name = cs.get("name", "Config")
+ full_name = f"{name}: {timestamp}"
+ all_config_states[full_name] = cs
+
+ return all_config_states
+
+
+def get_webui_config():
+ webui_repo = None
+
+ try:
+ if os.path.exists(os.path.join(script_path, ".git")):
+ webui_repo = git.Repo(script_path)
+ except Exception:
+ errors.report(f"Error reading webui git info from {script_path}", exc_info=True)
+
+ webui_remote = None
+ webui_commit_hash = None
+ webui_commit_date = None
+ webui_branch = None
+ if webui_repo and not webui_repo.bare:
+ try:
+ webui_remote = next(webui_repo.remote().urls, None)
+ head = webui_repo.head.commit
+ webui_commit_date = webui_repo.head.commit.committed_date
+ webui_commit_hash = head.hexsha
+ webui_branch = webui_repo.active_branch.name
+
+ except Exception:
+ webui_remote = None
+
+ return {
+ "remote": webui_remote,
+ "commit_hash": webui_commit_hash,
+ "commit_date": webui_commit_date,
+ "branch": webui_branch,
+ }
+
+
+def get_extension_config():
+ ext_config = {}
+
+ for ext in extensions.extensions:
+ ext.read_info_from_repo()
+
+ entry = {
+ "name": ext.name,
+ "path": ext.path,
+ "enabled": ext.enabled,
+ "is_builtin": ext.is_builtin,
+ "remote": ext.remote,
+ "commit_hash": ext.commit_hash,
+ "commit_date": ext.commit_date,
+ "branch": ext.branch,
+ "have_info_from_repo": ext.have_info_from_repo
+ }
+
+ ext_config[ext.name] = entry
+
+ return ext_config
+
+
+def get_config():
+ creation_time = datetime.now().timestamp()
+ webui_config = get_webui_config()
+ ext_config = get_extension_config()
+
+ return {
+ "created_at": creation_time,
+ "webui": webui_config,
+ "extensions": ext_config
+ }
+
+
+def restore_webui_config(config):
+ print("* Restoring webui state...")
+
+ if "webui" not in config:
+ print("Error: No webui data saved to config")
+ return
+
+ webui_config = config["webui"]
+
+ if "commit_hash" not in webui_config:
+ print("Error: No commit saved to webui config")
+ return
+
+ webui_commit_hash = webui_config.get("commit_hash", None)
+ webui_repo = None
+
+ try:
+ if os.path.exists(os.path.join(script_path, ".git")):
+ webui_repo = git.Repo(script_path)
+ except Exception:
+ errors.report(f"Error reading webui git info from {script_path}", exc_info=True)
+ return
+
+ try:
+ webui_repo.git.fetch(all=True)
+ webui_repo.git.reset(webui_commit_hash, hard=True)
+ print(f"* Restored webui to commit {webui_commit_hash}.")
+ except Exception:
+ errors.report(f"Error restoring webui to commit{webui_commit_hash}")
+
+
+def restore_extension_config(config):
+ print("* Restoring extension state...")
+
+ if "extensions" not in config:
+ print("Error: No extension data saved to config")
+ return
+
+ ext_config = config["extensions"]
+
+ results = []
+ disabled = []
+
+ for ext in tqdm.tqdm(extensions.extensions):
+ if ext.is_builtin:
+ continue
+
+ ext.read_info_from_repo()
+ current_commit = ext.commit_hash
+
+ if ext.name not in ext_config:
+ ext.disabled = True
+ disabled.append(ext.name)
+ results.append((ext, current_commit[:8], False, "Saved extension state not found in config, marking as disabled"))
+ continue
+
+ entry = ext_config[ext.name]
+
+ if "commit_hash" in entry and entry["commit_hash"]:
+ try:
+ ext.fetch_and_reset_hard(entry["commit_hash"])
+ ext.read_info_from_repo()
+ if current_commit != entry["commit_hash"]:
+ results.append((ext, current_commit[:8], True, entry["commit_hash"][:8]))
+ except Exception as ex:
+ results.append((ext, current_commit[:8], False, ex))
+ else:
+ results.append((ext, current_commit[:8], False, "No commit hash found in config"))
+
+ if not entry.get("enabled", False):
+ ext.disabled = True
+ disabled.append(ext.name)
+ else:
+ ext.disabled = False
+
+ shared.opts.disabled_extensions = disabled
+ shared.opts.save(shared.config_filename)
+
+ print("* Finished restoring extensions. Results:")
+ for ext, prev_commit, success, result in results:
+ if success:
+ print(f" + {ext.name}: {prev_commit} -> {result}")
+ else:
+ print(f" ! {ext.name}: FAILURE ({result})")
diff --git a/modules/deepbooru.py b/modules/deepbooru.py
new file mode 100644
index 0000000000000000000000000000000000000000..547e1b4c67aeb75a06c9991f957f51b0ef6fdd0f
--- /dev/null
+++ b/modules/deepbooru.py
@@ -0,0 +1,98 @@
+import os
+import re
+
+import torch
+import numpy as np
+
+from modules import modelloader, paths, deepbooru_model, devices, images, shared
+
+re_special = re.compile(r'([\\()])')
+
+
+class DeepDanbooru:
+ def __init__(self):
+ self.model = None
+
+ def load(self):
+ if self.model is not None:
+ return
+
+ files = modelloader.load_models(
+ model_path=os.path.join(paths.models_path, "torch_deepdanbooru"),
+ model_url='https://github.com/AUTOMATIC1111/TorchDeepDanbooru/releases/download/v1/model-resnet_custom_v3.pt',
+ ext_filter=[".pt"],
+ download_name='model-resnet_custom_v3.pt',
+ )
+
+ self.model = deepbooru_model.DeepDanbooruModel()
+ self.model.load_state_dict(torch.load(files[0], map_location="cpu"))
+
+ self.model.eval()
+ self.model.to(devices.cpu, devices.dtype)
+
+ def start(self):
+ self.load()
+ self.model.to(devices.device)
+
+ def stop(self):
+ if not shared.opts.interrogate_keep_models_in_memory:
+ self.model.to(devices.cpu)
+ devices.torch_gc()
+
+ def tag(self, pil_image):
+ self.start()
+ res = self.tag_multi(pil_image)
+ self.stop()
+
+ return res
+
+ def tag_multi(self, pil_image, force_disable_ranks=False):
+ threshold = shared.opts.interrogate_deepbooru_score_threshold
+ use_spaces = shared.opts.deepbooru_use_spaces
+ use_escape = shared.opts.deepbooru_escape
+ alpha_sort = shared.opts.deepbooru_sort_alpha
+ include_ranks = shared.opts.interrogate_return_ranks and not force_disable_ranks
+
+ pic = images.resize_image(2, pil_image.convert("RGB"), 512, 512)
+ a = np.expand_dims(np.array(pic, dtype=np.float32), 0) / 255
+
+ with torch.no_grad(), devices.autocast():
+ x = torch.from_numpy(a).to(devices.device)
+ y = self.model(x)[0].detach().cpu().numpy()
+
+ probability_dict = {}
+
+ for tag, probability in zip(self.model.tags, y):
+ if probability < threshold:
+ continue
+
+ if tag.startswith("rating:"):
+ continue
+
+ probability_dict[tag] = probability
+
+ if alpha_sort:
+ tags = sorted(probability_dict)
+ else:
+ tags = [tag for tag, _ in sorted(probability_dict.items(), key=lambda x: -x[1])]
+
+ res = []
+
+ filtertags = {x.strip().replace(' ', '_') for x in shared.opts.deepbooru_filter_tags.split(",")}
+
+ for tag in [x for x in tags if x not in filtertags]:
+ probability = probability_dict[tag]
+ tag_outformat = tag
+ if use_spaces:
+ tag_outformat = tag_outformat.replace('_', ' ')
+ if use_escape:
+ tag_outformat = re.sub(re_special, r'\\\1', tag_outformat)
+ if include_ranks:
+ tag_outformat = f"({tag_outformat}:{probability:.3f})"
+
+ res.append(tag_outformat)
+
+ return ", ".join(res)
+
+
+model = DeepDanbooru()
diff --git a/modules/deepbooru_model.py b/modules/deepbooru_model.py
new file mode 100644
index 0000000000000000000000000000000000000000..7a53884624e96284c35214ce02b8a2891d92c3e8
--- /dev/null
+++ b/modules/deepbooru_model.py
@@ -0,0 +1,678 @@
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+
+from modules import devices
+
+# see https://github.com/AUTOMATIC1111/TorchDeepDanbooru for more
+
+
+class DeepDanbooruModel(nn.Module):
+ def __init__(self):
+ super(DeepDanbooruModel, self).__init__()
+
+ self.tags = []
+
+ self.n_Conv_0 = nn.Conv2d(kernel_size=(7, 7), in_channels=3, out_channels=64, stride=(2, 2))
+ self.n_MaxPool_0 = nn.MaxPool2d(kernel_size=(3, 3), stride=(2, 2))
+ self.n_Conv_1 = nn.Conv2d(kernel_size=(1, 1), in_channels=64, out_channels=256)
+ self.n_Conv_2 = nn.Conv2d(kernel_size=(1, 1), in_channels=64, out_channels=64)
+ self.n_Conv_3 = nn.Conv2d(kernel_size=(3, 3), in_channels=64, out_channels=64)
+ self.n_Conv_4 = nn.Conv2d(kernel_size=(1, 1), in_channels=64, out_channels=256)
+ self.n_Conv_5 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=64)
+ self.n_Conv_6 = nn.Conv2d(kernel_size=(3, 3), in_channels=64, out_channels=64)
+ self.n_Conv_7 = nn.Conv2d(kernel_size=(1, 1), in_channels=64, out_channels=256)
+ self.n_Conv_8 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=64)
+ self.n_Conv_9 = nn.Conv2d(kernel_size=(3, 3), in_channels=64, out_channels=64)
+ self.n_Conv_10 = nn.Conv2d(kernel_size=(1, 1), in_channels=64, out_channels=256)
+ self.n_Conv_11 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=512, stride=(2, 2))
+ self.n_Conv_12 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=128)
+ self.n_Conv_13 = nn.Conv2d(kernel_size=(3, 3), in_channels=128, out_channels=128, stride=(2, 2))
+ self.n_Conv_14 = nn.Conv2d(kernel_size=(1, 1), in_channels=128, out_channels=512)
+ self.n_Conv_15 = nn.Conv2d(kernel_size=(1, 1), in_channels=512, out_channels=128)
+ self.n_Conv_16 = nn.Conv2d(kernel_size=(3, 3), in_channels=128, out_channels=128)
+ self.n_Conv_17 = nn.Conv2d(kernel_size=(1, 1), in_channels=128, out_channels=512)
+ self.n_Conv_18 = nn.Conv2d(kernel_size=(1, 1), in_channels=512, out_channels=128)
+ self.n_Conv_19 = nn.Conv2d(kernel_size=(3, 3), in_channels=128, out_channels=128)
+ self.n_Conv_20 = nn.Conv2d(kernel_size=(1, 1), in_channels=128, out_channels=512)
+ self.n_Conv_21 = nn.Conv2d(kernel_size=(1, 1), in_channels=512, out_channels=128)
+ self.n_Conv_22 = nn.Conv2d(kernel_size=(3, 3), in_channels=128, out_channels=128)
+ self.n_Conv_23 = nn.Conv2d(kernel_size=(1, 1), in_channels=128, out_channels=512)
+ self.n_Conv_24 = nn.Conv2d(kernel_size=(1, 1), in_channels=512, out_channels=128)
+ self.n_Conv_25 = nn.Conv2d(kernel_size=(3, 3), in_channels=128, out_channels=128)
+ self.n_Conv_26 = nn.Conv2d(kernel_size=(1, 1), in_channels=128, out_channels=512)
+ self.n_Conv_27 = nn.Conv2d(kernel_size=(1, 1), in_channels=512, out_channels=128)
+ self.n_Conv_28 = nn.Conv2d(kernel_size=(3, 3), in_channels=128, out_channels=128)
+ self.n_Conv_29 = nn.Conv2d(kernel_size=(1, 1), in_channels=128, out_channels=512)
+ self.n_Conv_30 = nn.Conv2d(kernel_size=(1, 1), in_channels=512, out_channels=128)
+ self.n_Conv_31 = nn.Conv2d(kernel_size=(3, 3), in_channels=128, out_channels=128)
+ self.n_Conv_32 = nn.Conv2d(kernel_size=(1, 1), in_channels=128, out_channels=512)
+ self.n_Conv_33 = nn.Conv2d(kernel_size=(1, 1), in_channels=512, out_channels=128)
+ self.n_Conv_34 = nn.Conv2d(kernel_size=(3, 3), in_channels=128, out_channels=128)
+ self.n_Conv_35 = nn.Conv2d(kernel_size=(1, 1), in_channels=128, out_channels=512)
+ self.n_Conv_36 = nn.Conv2d(kernel_size=(1, 1), in_channels=512, out_channels=1024, stride=(2, 2))
+ self.n_Conv_37 = nn.Conv2d(kernel_size=(1, 1), in_channels=512, out_channels=256)
+ self.n_Conv_38 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256, stride=(2, 2))
+ self.n_Conv_39 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024)
+ self.n_Conv_40 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256)
+ self.n_Conv_41 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256)
+ self.n_Conv_42 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024)
+ self.n_Conv_43 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256)
+ self.n_Conv_44 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256)
+ self.n_Conv_45 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024)
+ self.n_Conv_46 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256)
+ self.n_Conv_47 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256)
+ self.n_Conv_48 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024)
+ self.n_Conv_49 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256)
+ self.n_Conv_50 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256)
+ self.n_Conv_51 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024)
+ self.n_Conv_52 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256)
+ self.n_Conv_53 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256)
+ self.n_Conv_54 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024)
+ self.n_Conv_55 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256)
+ self.n_Conv_56 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256)
+ self.n_Conv_57 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024)
+ self.n_Conv_58 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256)
+ self.n_Conv_59 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256)
+ self.n_Conv_60 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024)
+ self.n_Conv_61 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256)
+ self.n_Conv_62 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256)
+ self.n_Conv_63 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024)
+ self.n_Conv_64 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256)
+ self.n_Conv_65 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256)
+ self.n_Conv_66 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024)
+ self.n_Conv_67 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256)
+ self.n_Conv_68 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256)
+ self.n_Conv_69 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024)
+ self.n_Conv_70 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256)
+ self.n_Conv_71 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256)
+ self.n_Conv_72 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024)
+ self.n_Conv_73 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256)
+ self.n_Conv_74 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256)
+ self.n_Conv_75 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024)
+ self.n_Conv_76 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256)
+ self.n_Conv_77 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256)
+ self.n_Conv_78 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024)
+ self.n_Conv_79 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256)
+ self.n_Conv_80 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256)
+ self.n_Conv_81 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024)
+ self.n_Conv_82 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256)
+ self.n_Conv_83 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256)
+ self.n_Conv_84 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024)
+ self.n_Conv_85 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256)
+ self.n_Conv_86 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256)
+ self.n_Conv_87 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024)
+ self.n_Conv_88 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256)
+ self.n_Conv_89 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256)
+ self.n_Conv_90 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024)
+ self.n_Conv_91 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256)
+ self.n_Conv_92 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256)
+ self.n_Conv_93 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024)
+ self.n_Conv_94 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256)
+ self.n_Conv_95 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256)
+ self.n_Conv_96 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024)
+ self.n_Conv_97 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256)
+ self.n_Conv_98 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256, stride=(2, 2))
+ self.n_Conv_99 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024)
+ self.n_Conv_100 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=1024, stride=(2, 2))
+ self.n_Conv_101 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256)
+ self.n_Conv_102 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256)
+ self.n_Conv_103 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024)
+ self.n_Conv_104 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256)
+ self.n_Conv_105 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256)
+ self.n_Conv_106 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024)
+ self.n_Conv_107 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256)
+ self.n_Conv_108 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256)
+ self.n_Conv_109 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024)
+ self.n_Conv_110 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256)
+ self.n_Conv_111 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256)
+ self.n_Conv_112 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024)
+ self.n_Conv_113 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256)
+ self.n_Conv_114 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256)
+ self.n_Conv_115 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024)
+ self.n_Conv_116 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256)
+ self.n_Conv_117 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256)
+ self.n_Conv_118 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024)
+ self.n_Conv_119 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256)
+ self.n_Conv_120 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256)
+ self.n_Conv_121 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024)
+ self.n_Conv_122 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256)
+ self.n_Conv_123 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256)
+ self.n_Conv_124 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024)
+ self.n_Conv_125 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256)
+ self.n_Conv_126 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256)
+ self.n_Conv_127 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024)
+ self.n_Conv_128 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256)
+ self.n_Conv_129 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256)
+ self.n_Conv_130 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024)
+ self.n_Conv_131 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256)
+ self.n_Conv_132 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256)
+ self.n_Conv_133 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024)
+ self.n_Conv_134 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256)
+ self.n_Conv_135 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256)
+ self.n_Conv_136 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024)
+ self.n_Conv_137 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256)
+ self.n_Conv_138 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256)
+ self.n_Conv_139 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024)
+ self.n_Conv_140 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256)
+ self.n_Conv_141 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256)
+ self.n_Conv_142 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024)
+ self.n_Conv_143 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256)
+ self.n_Conv_144 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256)
+ self.n_Conv_145 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024)
+ self.n_Conv_146 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256)
+ self.n_Conv_147 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256)
+ self.n_Conv_148 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024)
+ self.n_Conv_149 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256)
+ self.n_Conv_150 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256)
+ self.n_Conv_151 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024)
+ self.n_Conv_152 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256)
+ self.n_Conv_153 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256)
+ self.n_Conv_154 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024)
+ self.n_Conv_155 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256)
+ self.n_Conv_156 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256)
+ self.n_Conv_157 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024)
+ self.n_Conv_158 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=2048, stride=(2, 2))
+ self.n_Conv_159 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=512)
+ self.n_Conv_160 = nn.Conv2d(kernel_size=(3, 3), in_channels=512, out_channels=512, stride=(2, 2))
+ self.n_Conv_161 = nn.Conv2d(kernel_size=(1, 1), in_channels=512, out_channels=2048)
+ self.n_Conv_162 = nn.Conv2d(kernel_size=(1, 1), in_channels=2048, out_channels=512)
+ self.n_Conv_163 = nn.Conv2d(kernel_size=(3, 3), in_channels=512, out_channels=512)
+ self.n_Conv_164 = nn.Conv2d(kernel_size=(1, 1), in_channels=512, out_channels=2048)
+ self.n_Conv_165 = nn.Conv2d(kernel_size=(1, 1), in_channels=2048, out_channels=512)
+ self.n_Conv_166 = nn.Conv2d(kernel_size=(3, 3), in_channels=512, out_channels=512)
+ self.n_Conv_167 = nn.Conv2d(kernel_size=(1, 1), in_channels=512, out_channels=2048)
+ self.n_Conv_168 = nn.Conv2d(kernel_size=(1, 1), in_channels=2048, out_channels=4096, stride=(2, 2))
+ self.n_Conv_169 = nn.Conv2d(kernel_size=(1, 1), in_channels=2048, out_channels=1024)
+ self.n_Conv_170 = nn.Conv2d(kernel_size=(3, 3), in_channels=1024, out_channels=1024, stride=(2, 2))
+ self.n_Conv_171 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=4096)
+ self.n_Conv_172 = nn.Conv2d(kernel_size=(1, 1), in_channels=4096, out_channels=1024)
+ self.n_Conv_173 = nn.Conv2d(kernel_size=(3, 3), in_channels=1024, out_channels=1024)
+ self.n_Conv_174 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=4096)
+ self.n_Conv_175 = nn.Conv2d(kernel_size=(1, 1), in_channels=4096, out_channels=1024)
+ self.n_Conv_176 = nn.Conv2d(kernel_size=(3, 3), in_channels=1024, out_channels=1024)
+ self.n_Conv_177 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=4096)
+ self.n_Conv_178 = nn.Conv2d(kernel_size=(1, 1), in_channels=4096, out_channels=9176, bias=False)
+
+ def forward(self, *inputs):
+ t_358, = inputs
+ t_359 = t_358.permute(*[0, 3, 1, 2])
+ t_359_padded = F.pad(t_359, [2, 3, 2, 3], value=0)
+ t_360 = self.n_Conv_0(t_359_padded.to(self.n_Conv_0.bias.dtype) if devices.unet_needs_upcast else t_359_padded)
+ t_361 = F.relu(t_360)
+ t_361 = F.pad(t_361, [0, 1, 0, 1], value=float('-inf'))
+ t_362 = self.n_MaxPool_0(t_361)
+ t_363 = self.n_Conv_1(t_362)
+ t_364 = self.n_Conv_2(t_362)
+ t_365 = F.relu(t_364)
+ t_365_padded = F.pad(t_365, [1, 1, 1, 1], value=0)
+ t_366 = self.n_Conv_3(t_365_padded)
+ t_367 = F.relu(t_366)
+ t_368 = self.n_Conv_4(t_367)
+ t_369 = torch.add(t_368, t_363)
+ t_370 = F.relu(t_369)
+ t_371 = self.n_Conv_5(t_370)
+ t_372 = F.relu(t_371)
+ t_372_padded = F.pad(t_372, [1, 1, 1, 1], value=0)
+ t_373 = self.n_Conv_6(t_372_padded)
+ t_374 = F.relu(t_373)
+ t_375 = self.n_Conv_7(t_374)
+ t_376 = torch.add(t_375, t_370)
+ t_377 = F.relu(t_376)
+ t_378 = self.n_Conv_8(t_377)
+ t_379 = F.relu(t_378)
+ t_379_padded = F.pad(t_379, [1, 1, 1, 1], value=0)
+ t_380 = self.n_Conv_9(t_379_padded)
+ t_381 = F.relu(t_380)
+ t_382 = self.n_Conv_10(t_381)
+ t_383 = torch.add(t_382, t_377)
+ t_384 = F.relu(t_383)
+ t_385 = self.n_Conv_11(t_384)
+ t_386 = self.n_Conv_12(t_384)
+ t_387 = F.relu(t_386)
+ t_387_padded = F.pad(t_387, [0, 1, 0, 1], value=0)
+ t_388 = self.n_Conv_13(t_387_padded)
+ t_389 = F.relu(t_388)
+ t_390 = self.n_Conv_14(t_389)
+ t_391 = torch.add(t_390, t_385)
+ t_392 = F.relu(t_391)
+ t_393 = self.n_Conv_15(t_392)
+ t_394 = F.relu(t_393)
+ t_394_padded = F.pad(t_394, [1, 1, 1, 1], value=0)
+ t_395 = self.n_Conv_16(t_394_padded)
+ t_396 = F.relu(t_395)
+ t_397 = self.n_Conv_17(t_396)
+ t_398 = torch.add(t_397, t_392)
+ t_399 = F.relu(t_398)
+ t_400 = self.n_Conv_18(t_399)
+ t_401 = F.relu(t_400)
+ t_401_padded = F.pad(t_401, [1, 1, 1, 1], value=0)
+ t_402 = self.n_Conv_19(t_401_padded)
+ t_403 = F.relu(t_402)
+ t_404 = self.n_Conv_20(t_403)
+ t_405 = torch.add(t_404, t_399)
+ t_406 = F.relu(t_405)
+ t_407 = self.n_Conv_21(t_406)
+ t_408 = F.relu(t_407)
+ t_408_padded = F.pad(t_408, [1, 1, 1, 1], value=0)
+ t_409 = self.n_Conv_22(t_408_padded)
+ t_410 = F.relu(t_409)
+ t_411 = self.n_Conv_23(t_410)
+ t_412 = torch.add(t_411, t_406)
+ t_413 = F.relu(t_412)
+ t_414 = self.n_Conv_24(t_413)
+ t_415 = F.relu(t_414)
+ t_415_padded = F.pad(t_415, [1, 1, 1, 1], value=0)
+ t_416 = self.n_Conv_25(t_415_padded)
+ t_417 = F.relu(t_416)
+ t_418 = self.n_Conv_26(t_417)
+ t_419 = torch.add(t_418, t_413)
+ t_420 = F.relu(t_419)
+ t_421 = self.n_Conv_27(t_420)
+ t_422 = F.relu(t_421)
+ t_422_padded = F.pad(t_422, [1, 1, 1, 1], value=0)
+ t_423 = self.n_Conv_28(t_422_padded)
+ t_424 = F.relu(t_423)
+ t_425 = self.n_Conv_29(t_424)
+ t_426 = torch.add(t_425, t_420)
+ t_427 = F.relu(t_426)
+ t_428 = self.n_Conv_30(t_427)
+ t_429 = F.relu(t_428)
+ t_429_padded = F.pad(t_429, [1, 1, 1, 1], value=0)
+ t_430 = self.n_Conv_31(t_429_padded)
+ t_431 = F.relu(t_430)
+ t_432 = self.n_Conv_32(t_431)
+ t_433 = torch.add(t_432, t_427)
+ t_434 = F.relu(t_433)
+ t_435 = self.n_Conv_33(t_434)
+ t_436 = F.relu(t_435)
+ t_436_padded = F.pad(t_436, [1, 1, 1, 1], value=0)
+ t_437 = self.n_Conv_34(t_436_padded)
+ t_438 = F.relu(t_437)
+ t_439 = self.n_Conv_35(t_438)
+ t_440 = torch.add(t_439, t_434)
+ t_441 = F.relu(t_440)
+ t_442 = self.n_Conv_36(t_441)
+ t_443 = self.n_Conv_37(t_441)
+ t_444 = F.relu(t_443)
+ t_444_padded = F.pad(t_444, [0, 1, 0, 1], value=0)
+ t_445 = self.n_Conv_38(t_444_padded)
+ t_446 = F.relu(t_445)
+ t_447 = self.n_Conv_39(t_446)
+ t_448 = torch.add(t_447, t_442)
+ t_449 = F.relu(t_448)
+ t_450 = self.n_Conv_40(t_449)
+ t_451 = F.relu(t_450)
+ t_451_padded = F.pad(t_451, [1, 1, 1, 1], value=0)
+ t_452 = self.n_Conv_41(t_451_padded)
+ t_453 = F.relu(t_452)
+ t_454 = self.n_Conv_42(t_453)
+ t_455 = torch.add(t_454, t_449)
+ t_456 = F.relu(t_455)
+ t_457 = self.n_Conv_43(t_456)
+ t_458 = F.relu(t_457)
+ t_458_padded = F.pad(t_458, [1, 1, 1, 1], value=0)
+ t_459 = self.n_Conv_44(t_458_padded)
+ t_460 = F.relu(t_459)
+ t_461 = self.n_Conv_45(t_460)
+ t_462 = torch.add(t_461, t_456)
+ t_463 = F.relu(t_462)
+ t_464 = self.n_Conv_46(t_463)
+ t_465 = F.relu(t_464)
+ t_465_padded = F.pad(t_465, [1, 1, 1, 1], value=0)
+ t_466 = self.n_Conv_47(t_465_padded)
+ t_467 = F.relu(t_466)
+ t_468 = self.n_Conv_48(t_467)
+ t_469 = torch.add(t_468, t_463)
+ t_470 = F.relu(t_469)
+ t_471 = self.n_Conv_49(t_470)
+ t_472 = F.relu(t_471)
+ t_472_padded = F.pad(t_472, [1, 1, 1, 1], value=0)
+ t_473 = self.n_Conv_50(t_472_padded)
+ t_474 = F.relu(t_473)
+ t_475 = self.n_Conv_51(t_474)
+ t_476 = torch.add(t_475, t_470)
+ t_477 = F.relu(t_476)
+ t_478 = self.n_Conv_52(t_477)
+ t_479 = F.relu(t_478)
+ t_479_padded = F.pad(t_479, [1, 1, 1, 1], value=0)
+ t_480 = self.n_Conv_53(t_479_padded)
+ t_481 = F.relu(t_480)
+ t_482 = self.n_Conv_54(t_481)
+ t_483 = torch.add(t_482, t_477)
+ t_484 = F.relu(t_483)
+ t_485 = self.n_Conv_55(t_484)
+ t_486 = F.relu(t_485)
+ t_486_padded = F.pad(t_486, [1, 1, 1, 1], value=0)
+ t_487 = self.n_Conv_56(t_486_padded)
+ t_488 = F.relu(t_487)
+ t_489 = self.n_Conv_57(t_488)
+ t_490 = torch.add(t_489, t_484)
+ t_491 = F.relu(t_490)
+ t_492 = self.n_Conv_58(t_491)
+ t_493 = F.relu(t_492)
+ t_493_padded = F.pad(t_493, [1, 1, 1, 1], value=0)
+ t_494 = self.n_Conv_59(t_493_padded)
+ t_495 = F.relu(t_494)
+ t_496 = self.n_Conv_60(t_495)
+ t_497 = torch.add(t_496, t_491)
+ t_498 = F.relu(t_497)
+ t_499 = self.n_Conv_61(t_498)
+ t_500 = F.relu(t_499)
+ t_500_padded = F.pad(t_500, [1, 1, 1, 1], value=0)
+ t_501 = self.n_Conv_62(t_500_padded)
+ t_502 = F.relu(t_501)
+ t_503 = self.n_Conv_63(t_502)
+ t_504 = torch.add(t_503, t_498)
+ t_505 = F.relu(t_504)
+ t_506 = self.n_Conv_64(t_505)
+ t_507 = F.relu(t_506)
+ t_507_padded = F.pad(t_507, [1, 1, 1, 1], value=0)
+ t_508 = self.n_Conv_65(t_507_padded)
+ t_509 = F.relu(t_508)
+ t_510 = self.n_Conv_66(t_509)
+ t_511 = torch.add(t_510, t_505)
+ t_512 = F.relu(t_511)
+ t_513 = self.n_Conv_67(t_512)
+ t_514 = F.relu(t_513)
+ t_514_padded = F.pad(t_514, [1, 1, 1, 1], value=0)
+ t_515 = self.n_Conv_68(t_514_padded)
+ t_516 = F.relu(t_515)
+ t_517 = self.n_Conv_69(t_516)
+ t_518 = torch.add(t_517, t_512)
+ t_519 = F.relu(t_518)
+ t_520 = self.n_Conv_70(t_519)
+ t_521 = F.relu(t_520)
+ t_521_padded = F.pad(t_521, [1, 1, 1, 1], value=0)
+ t_522 = self.n_Conv_71(t_521_padded)
+ t_523 = F.relu(t_522)
+ t_524 = self.n_Conv_72(t_523)
+ t_525 = torch.add(t_524, t_519)
+ t_526 = F.relu(t_525)
+ t_527 = self.n_Conv_73(t_526)
+ t_528 = F.relu(t_527)
+ t_528_padded = F.pad(t_528, [1, 1, 1, 1], value=0)
+ t_529 = self.n_Conv_74(t_528_padded)
+ t_530 = F.relu(t_529)
+ t_531 = self.n_Conv_75(t_530)
+ t_532 = torch.add(t_531, t_526)
+ t_533 = F.relu(t_532)
+ t_534 = self.n_Conv_76(t_533)
+ t_535 = F.relu(t_534)
+ t_535_padded = F.pad(t_535, [1, 1, 1, 1], value=0)
+ t_536 = self.n_Conv_77(t_535_padded)
+ t_537 = F.relu(t_536)
+ t_538 = self.n_Conv_78(t_537)
+ t_539 = torch.add(t_538, t_533)
+ t_540 = F.relu(t_539)
+ t_541 = self.n_Conv_79(t_540)
+ t_542 = F.relu(t_541)
+ t_542_padded = F.pad(t_542, [1, 1, 1, 1], value=0)
+ t_543 = self.n_Conv_80(t_542_padded)
+ t_544 = F.relu(t_543)
+ t_545 = self.n_Conv_81(t_544)
+ t_546 = torch.add(t_545, t_540)
+ t_547 = F.relu(t_546)
+ t_548 = self.n_Conv_82(t_547)
+ t_549 = F.relu(t_548)
+ t_549_padded = F.pad(t_549, [1, 1, 1, 1], value=0)
+ t_550 = self.n_Conv_83(t_549_padded)
+ t_551 = F.relu(t_550)
+ t_552 = self.n_Conv_84(t_551)
+ t_553 = torch.add(t_552, t_547)
+ t_554 = F.relu(t_553)
+ t_555 = self.n_Conv_85(t_554)
+ t_556 = F.relu(t_555)
+ t_556_padded = F.pad(t_556, [1, 1, 1, 1], value=0)
+ t_557 = self.n_Conv_86(t_556_padded)
+ t_558 = F.relu(t_557)
+ t_559 = self.n_Conv_87(t_558)
+ t_560 = torch.add(t_559, t_554)
+ t_561 = F.relu(t_560)
+ t_562 = self.n_Conv_88(t_561)
+ t_563 = F.relu(t_562)
+ t_563_padded = F.pad(t_563, [1, 1, 1, 1], value=0)
+ t_564 = self.n_Conv_89(t_563_padded)
+ t_565 = F.relu(t_564)
+ t_566 = self.n_Conv_90(t_565)
+ t_567 = torch.add(t_566, t_561)
+ t_568 = F.relu(t_567)
+ t_569 = self.n_Conv_91(t_568)
+ t_570 = F.relu(t_569)
+ t_570_padded = F.pad(t_570, [1, 1, 1, 1], value=0)
+ t_571 = self.n_Conv_92(t_570_padded)
+ t_572 = F.relu(t_571)
+ t_573 = self.n_Conv_93(t_572)
+ t_574 = torch.add(t_573, t_568)
+ t_575 = F.relu(t_574)
+ t_576 = self.n_Conv_94(t_575)
+ t_577 = F.relu(t_576)
+ t_577_padded = F.pad(t_577, [1, 1, 1, 1], value=0)
+ t_578 = self.n_Conv_95(t_577_padded)
+ t_579 = F.relu(t_578)
+ t_580 = self.n_Conv_96(t_579)
+ t_581 = torch.add(t_580, t_575)
+ t_582 = F.relu(t_581)
+ t_583 = self.n_Conv_97(t_582)
+ t_584 = F.relu(t_583)
+ t_584_padded = F.pad(t_584, [0, 1, 0, 1], value=0)
+ t_585 = self.n_Conv_98(t_584_padded)
+ t_586 = F.relu(t_585)
+ t_587 = self.n_Conv_99(t_586)
+ t_588 = self.n_Conv_100(t_582)
+ t_589 = torch.add(t_587, t_588)
+ t_590 = F.relu(t_589)
+ t_591 = self.n_Conv_101(t_590)
+ t_592 = F.relu(t_591)
+ t_592_padded = F.pad(t_592, [1, 1, 1, 1], value=0)
+ t_593 = self.n_Conv_102(t_592_padded)
+ t_594 = F.relu(t_593)
+ t_595 = self.n_Conv_103(t_594)
+ t_596 = torch.add(t_595, t_590)
+ t_597 = F.relu(t_596)
+ t_598 = self.n_Conv_104(t_597)
+ t_599 = F.relu(t_598)
+ t_599_padded = F.pad(t_599, [1, 1, 1, 1], value=0)
+ t_600 = self.n_Conv_105(t_599_padded)
+ t_601 = F.relu(t_600)
+ t_602 = self.n_Conv_106(t_601)
+ t_603 = torch.add(t_602, t_597)
+ t_604 = F.relu(t_603)
+ t_605 = self.n_Conv_107(t_604)
+ t_606 = F.relu(t_605)
+ t_606_padded = F.pad(t_606, [1, 1, 1, 1], value=0)
+ t_607 = self.n_Conv_108(t_606_padded)
+ t_608 = F.relu(t_607)
+ t_609 = self.n_Conv_109(t_608)
+ t_610 = torch.add(t_609, t_604)
+ t_611 = F.relu(t_610)
+ t_612 = self.n_Conv_110(t_611)
+ t_613 = F.relu(t_612)
+ t_613_padded = F.pad(t_613, [1, 1, 1, 1], value=0)
+ t_614 = self.n_Conv_111(t_613_padded)
+ t_615 = F.relu(t_614)
+ t_616 = self.n_Conv_112(t_615)
+ t_617 = torch.add(t_616, t_611)
+ t_618 = F.relu(t_617)
+ t_619 = self.n_Conv_113(t_618)
+ t_620 = F.relu(t_619)
+ t_620_padded = F.pad(t_620, [1, 1, 1, 1], value=0)
+ t_621 = self.n_Conv_114(t_620_padded)
+ t_622 = F.relu(t_621)
+ t_623 = self.n_Conv_115(t_622)
+ t_624 = torch.add(t_623, t_618)
+ t_625 = F.relu(t_624)
+ t_626 = self.n_Conv_116(t_625)
+ t_627 = F.relu(t_626)
+ t_627_padded = F.pad(t_627, [1, 1, 1, 1], value=0)
+ t_628 = self.n_Conv_117(t_627_padded)
+ t_629 = F.relu(t_628)
+ t_630 = self.n_Conv_118(t_629)
+ t_631 = torch.add(t_630, t_625)
+ t_632 = F.relu(t_631)
+ t_633 = self.n_Conv_119(t_632)
+ t_634 = F.relu(t_633)
+ t_634_padded = F.pad(t_634, [1, 1, 1, 1], value=0)
+ t_635 = self.n_Conv_120(t_634_padded)
+ t_636 = F.relu(t_635)
+ t_637 = self.n_Conv_121(t_636)
+ t_638 = torch.add(t_637, t_632)
+ t_639 = F.relu(t_638)
+ t_640 = self.n_Conv_122(t_639)
+ t_641 = F.relu(t_640)
+ t_641_padded = F.pad(t_641, [1, 1, 1, 1], value=0)
+ t_642 = self.n_Conv_123(t_641_padded)
+ t_643 = F.relu(t_642)
+ t_644 = self.n_Conv_124(t_643)
+ t_645 = torch.add(t_644, t_639)
+ t_646 = F.relu(t_645)
+ t_647 = self.n_Conv_125(t_646)
+ t_648 = F.relu(t_647)
+ t_648_padded = F.pad(t_648, [1, 1, 1, 1], value=0)
+ t_649 = self.n_Conv_126(t_648_padded)
+ t_650 = F.relu(t_649)
+ t_651 = self.n_Conv_127(t_650)
+ t_652 = torch.add(t_651, t_646)
+ t_653 = F.relu(t_652)
+ t_654 = self.n_Conv_128(t_653)
+ t_655 = F.relu(t_654)
+ t_655_padded = F.pad(t_655, [1, 1, 1, 1], value=0)
+ t_656 = self.n_Conv_129(t_655_padded)
+ t_657 = F.relu(t_656)
+ t_658 = self.n_Conv_130(t_657)
+ t_659 = torch.add(t_658, t_653)
+ t_660 = F.relu(t_659)
+ t_661 = self.n_Conv_131(t_660)
+ t_662 = F.relu(t_661)
+ t_662_padded = F.pad(t_662, [1, 1, 1, 1], value=0)
+ t_663 = self.n_Conv_132(t_662_padded)
+ t_664 = F.relu(t_663)
+ t_665 = self.n_Conv_133(t_664)
+ t_666 = torch.add(t_665, t_660)
+ t_667 = F.relu(t_666)
+ t_668 = self.n_Conv_134(t_667)
+ t_669 = F.relu(t_668)
+ t_669_padded = F.pad(t_669, [1, 1, 1, 1], value=0)
+ t_670 = self.n_Conv_135(t_669_padded)
+ t_671 = F.relu(t_670)
+ t_672 = self.n_Conv_136(t_671)
+ t_673 = torch.add(t_672, t_667)
+ t_674 = F.relu(t_673)
+ t_675 = self.n_Conv_137(t_674)
+ t_676 = F.relu(t_675)
+ t_676_padded = F.pad(t_676, [1, 1, 1, 1], value=0)
+ t_677 = self.n_Conv_138(t_676_padded)
+ t_678 = F.relu(t_677)
+ t_679 = self.n_Conv_139(t_678)
+ t_680 = torch.add(t_679, t_674)
+ t_681 = F.relu(t_680)
+ t_682 = self.n_Conv_140(t_681)
+ t_683 = F.relu(t_682)
+ t_683_padded = F.pad(t_683, [1, 1, 1, 1], value=0)
+ t_684 = self.n_Conv_141(t_683_padded)
+ t_685 = F.relu(t_684)
+ t_686 = self.n_Conv_142(t_685)
+ t_687 = torch.add(t_686, t_681)
+ t_688 = F.relu(t_687)
+ t_689 = self.n_Conv_143(t_688)
+ t_690 = F.relu(t_689)
+ t_690_padded = F.pad(t_690, [1, 1, 1, 1], value=0)
+ t_691 = self.n_Conv_144(t_690_padded)
+ t_692 = F.relu(t_691)
+ t_693 = self.n_Conv_145(t_692)
+ t_694 = torch.add(t_693, t_688)
+ t_695 = F.relu(t_694)
+ t_696 = self.n_Conv_146(t_695)
+ t_697 = F.relu(t_696)
+ t_697_padded = F.pad(t_697, [1, 1, 1, 1], value=0)
+ t_698 = self.n_Conv_147(t_697_padded)
+ t_699 = F.relu(t_698)
+ t_700 = self.n_Conv_148(t_699)
+ t_701 = torch.add(t_700, t_695)
+ t_702 = F.relu(t_701)
+ t_703 = self.n_Conv_149(t_702)
+ t_704 = F.relu(t_703)
+ t_704_padded = F.pad(t_704, [1, 1, 1, 1], value=0)
+ t_705 = self.n_Conv_150(t_704_padded)
+ t_706 = F.relu(t_705)
+ t_707 = self.n_Conv_151(t_706)
+ t_708 = torch.add(t_707, t_702)
+ t_709 = F.relu(t_708)
+ t_710 = self.n_Conv_152(t_709)
+ t_711 = F.relu(t_710)
+ t_711_padded = F.pad(t_711, [1, 1, 1, 1], value=0)
+ t_712 = self.n_Conv_153(t_711_padded)
+ t_713 = F.relu(t_712)
+ t_714 = self.n_Conv_154(t_713)
+ t_715 = torch.add(t_714, t_709)
+ t_716 = F.relu(t_715)
+ t_717 = self.n_Conv_155(t_716)
+ t_718 = F.relu(t_717)
+ t_718_padded = F.pad(t_718, [1, 1, 1, 1], value=0)
+ t_719 = self.n_Conv_156(t_718_padded)
+ t_720 = F.relu(t_719)
+ t_721 = self.n_Conv_157(t_720)
+ t_722 = torch.add(t_721, t_716)
+ t_723 = F.relu(t_722)
+ t_724 = self.n_Conv_158(t_723)
+ t_725 = self.n_Conv_159(t_723)
+ t_726 = F.relu(t_725)
+ t_726_padded = F.pad(t_726, [0, 1, 0, 1], value=0)
+ t_727 = self.n_Conv_160(t_726_padded)
+ t_728 = F.relu(t_727)
+ t_729 = self.n_Conv_161(t_728)
+ t_730 = torch.add(t_729, t_724)
+ t_731 = F.relu(t_730)
+ t_732 = self.n_Conv_162(t_731)
+ t_733 = F.relu(t_732)
+ t_733_padded = F.pad(t_733, [1, 1, 1, 1], value=0)
+ t_734 = self.n_Conv_163(t_733_padded)
+ t_735 = F.relu(t_734)
+ t_736 = self.n_Conv_164(t_735)
+ t_737 = torch.add(t_736, t_731)
+ t_738 = F.relu(t_737)
+ t_739 = self.n_Conv_165(t_738)
+ t_740 = F.relu(t_739)
+ t_740_padded = F.pad(t_740, [1, 1, 1, 1], value=0)
+ t_741 = self.n_Conv_166(t_740_padded)
+ t_742 = F.relu(t_741)
+ t_743 = self.n_Conv_167(t_742)
+ t_744 = torch.add(t_743, t_738)
+ t_745 = F.relu(t_744)
+ t_746 = self.n_Conv_168(t_745)
+ t_747 = self.n_Conv_169(t_745)
+ t_748 = F.relu(t_747)
+ t_748_padded = F.pad(t_748, [0, 1, 0, 1], value=0)
+ t_749 = self.n_Conv_170(t_748_padded)
+ t_750 = F.relu(t_749)
+ t_751 = self.n_Conv_171(t_750)
+ t_752 = torch.add(t_751, t_746)
+ t_753 = F.relu(t_752)
+ t_754 = self.n_Conv_172(t_753)
+ t_755 = F.relu(t_754)
+ t_755_padded = F.pad(t_755, [1, 1, 1, 1], value=0)
+ t_756 = self.n_Conv_173(t_755_padded)
+ t_757 = F.relu(t_756)
+ t_758 = self.n_Conv_174(t_757)
+ t_759 = torch.add(t_758, t_753)
+ t_760 = F.relu(t_759)
+ t_761 = self.n_Conv_175(t_760)
+ t_762 = F.relu(t_761)
+ t_762_padded = F.pad(t_762, [1, 1, 1, 1], value=0)
+ t_763 = self.n_Conv_176(t_762_padded)
+ t_764 = F.relu(t_763)
+ t_765 = self.n_Conv_177(t_764)
+ t_766 = torch.add(t_765, t_760)
+ t_767 = F.relu(t_766)
+ t_768 = self.n_Conv_178(t_767)
+ t_769 = F.avg_pool2d(t_768, kernel_size=t_768.shape[-2:])
+ t_770 = torch.squeeze(t_769, 3)
+ t_770 = torch.squeeze(t_770, 2)
+ t_771 = torch.sigmoid(t_770)
+ return t_771
+
+ def load_state_dict(self, state_dict, **kwargs):
+ self.tags = state_dict.get('tags', [])
+
+ super(DeepDanbooruModel, self).load_state_dict({k: v for k, v in state_dict.items() if k != 'tags'})
+
diff --git a/modules/devices.py b/modules/devices.py
new file mode 100644
index 0000000000000000000000000000000000000000..c01f06024b4cffd4a44f97b6f7699397e27abdb2
--- /dev/null
+++ b/modules/devices.py
@@ -0,0 +1,153 @@
+import sys
+import contextlib
+from functools import lru_cache
+
+import torch
+from modules import errors, shared
+
+if sys.platform == "darwin":
+ from modules import mac_specific
+
+
+def has_mps() -> bool:
+ if sys.platform != "darwin":
+ return False
+ else:
+ return mac_specific.has_mps
+
+
+def get_cuda_device_string():
+ if shared.cmd_opts.device_id is not None:
+ return f"cuda:{shared.cmd_opts.device_id}"
+
+ return "cuda"
+
+
+def get_optimal_device_name():
+ if torch.cuda.is_available():
+ return get_cuda_device_string()
+
+ if has_mps():
+ return "mps"
+
+ return "cpu"
+
+
+def get_optimal_device():
+ return torch.device(get_optimal_device_name())
+
+
+def get_device_for(task):
+ if task in shared.cmd_opts.use_cpu:
+ return cpu
+
+ return get_optimal_device()
+
+
+def torch_gc():
+
+ if torch.cuda.is_available():
+ with torch.cuda.device(get_cuda_device_string()):
+ torch.cuda.empty_cache()
+ torch.cuda.ipc_collect()
+
+ if has_mps():
+ mac_specific.torch_mps_gc()
+
+
+def enable_tf32():
+ if torch.cuda.is_available():
+
+ # enabling benchmark option seems to enable a range of cards to do fp16 when they otherwise can't
+ # see https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/4407
+ if any(torch.cuda.get_device_capability(devid) == (7, 5) for devid in range(0, torch.cuda.device_count())):
+ torch.backends.cudnn.benchmark = True
+
+ torch.backends.cuda.matmul.allow_tf32 = True
+ torch.backends.cudnn.allow_tf32 = True
+
+
+errors.run(enable_tf32, "Enabling TF32")
+
+cpu: torch.device = torch.device("cpu")
+device: torch.device = None
+device_interrogate: torch.device = None
+device_gfpgan: torch.device = None
+device_esrgan: torch.device = None
+device_codeformer: torch.device = None
+dtype: torch.dtype = torch.float16
+dtype_vae: torch.dtype = torch.float16
+dtype_unet: torch.dtype = torch.float16
+unet_needs_upcast = False
+
+
+def cond_cast_unet(input):
+ return input.to(dtype_unet) if unet_needs_upcast else input
+
+
+def cond_cast_float(input):
+ return input.float() if unet_needs_upcast else input
+
+
+nv_rng = None
+
+
+def autocast(disable=False):
+ if disable:
+ return contextlib.nullcontext()
+
+ if dtype == torch.float32 or shared.cmd_opts.precision == "full":
+ return contextlib.nullcontext()
+
+ return torch.autocast("cuda")
+
+
+def without_autocast(disable=False):
+ return torch.autocast("cuda", enabled=False) if torch.is_autocast_enabled() and not disable else contextlib.nullcontext()
+
+
+class NansException(Exception):
+ pass
+
+
+def test_for_nans(x, where):
+ if shared.cmd_opts.disable_nan_check:
+ return
+
+ if not torch.all(torch.isnan(x)).item():
+ return
+
+ if where == "unet":
+ message = "A tensor with all NaNs was produced in Unet."
+
+ if not shared.cmd_opts.no_half:
+ message += " This could be either because there's not enough precision to represent the picture, or because your video card does not support half type. Try setting the \"Upcast cross attention layer to float32\" option in Settings > Stable Diffusion or using the --no-half commandline argument to fix this."
+
+ elif where == "vae":
+ message = "A tensor with all NaNs was produced in VAE."
+
+ if not shared.cmd_opts.no_half and not shared.cmd_opts.no_half_vae:
+ message += " This could be because there's not enough precision to represent the picture. Try adding --no-half-vae commandline argument to fix this."
+ else:
+ message = "A tensor with all NaNs was produced."
+
+ message += " Use --disable-nan-check commandline argument to disable this check."
+
+ raise NansException(message)
+
+
+@lru_cache
+def first_time_calculation():
+ """
+ just do any calculation with pytorch layers - the first time this is done it allocaltes about 700MB of memory and
+ spends about 2.7 seconds doing that, at least wih NVidia.
+ """
+
+ x = torch.zeros((1, 1)).to(device, dtype)
+ linear = torch.nn.Linear(1, 1).to(device, dtype)
+ linear(x)
+
+ x = torch.zeros((1, 1, 3, 3)).to(device, dtype)
+ conv2d = torch.nn.Conv2d(1, 1, (3, 3)).to(device, dtype)
+ conv2d(x)
+
diff --git a/modules/errors.py b/modules/errors.py
new file mode 100644
index 0000000000000000000000000000000000000000..973ebc10b212836e2baa203659d88d1440f4ba04
--- /dev/null
+++ b/modules/errors.py
@@ -0,0 +1,136 @@
+import sys
+import textwrap
+import traceback
+
+
+exception_records = []
+
+
+def record_exception():
+ _, e, tb = sys.exc_info()
+ if e is None:
+ return
+
+ if exception_records and exception_records[-1] == e:
+ return
+
+ from modules import sysinfo
+ exception_records.append(sysinfo.format_exception(e, tb))
+
+ if len(exception_records) > 5:
+ exception_records.pop(0)
+
+
+def report(message: str, *, exc_info: bool = False) -> None:
+ """
+ Print an error message to stderr, with optional traceback.
+ """
+
+ record_exception()
+
+ for line in message.splitlines():
+ print("***", line, file=sys.stderr)
+ if exc_info:
+ print(textwrap.indent(traceback.format_exc(), " "), file=sys.stderr)
+ print("---", file=sys.stderr)
+
+
+def print_error_explanation(message):
+ record_exception()
+
+ lines = message.strip().split("\n")
+ max_len = max([len(x) for x in lines])
+
+ print('=' * max_len, file=sys.stderr)
+ for line in lines:
+ print(line, file=sys.stderr)
+ print('=' * max_len, file=sys.stderr)
+
+
+def display(e: Exception, task, *, full_traceback=False):
+ record_exception()
+
+ print(f"{task or 'error'}: {type(e).__name__}", file=sys.stderr)
+ te = traceback.TracebackException.from_exception(e)
+ if full_traceback:
+ # include frames leading up to the try-catch block
+ te.stack = traceback.StackSummary(traceback.extract_stack()[:-2] + te.stack)
+ print(*te.format(), sep="", file=sys.stderr)
+
+ message = str(e)
+ if "copying a param with shape torch.Size([640, 1024]) from checkpoint, the shape in current model is torch.Size([640, 768])" in message:
+ print_error_explanation("""
+The most likely cause of this is you are trying to load Stable Diffusion 2.0 model without specifying its config file.
+See https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Features#stable-diffusion-20 for how to solve this.
+ """)
+
+
+already_displayed = {}
+
+
+def display_once(e: Exception, task):
+ record_exception()
+
+ if task in already_displayed:
+ return
+
+ display(e, task)
+
+ already_displayed[task] = 1
+
+
+def run(code, task):
+ try:
+ code()
+ except Exception as e:
+ display(task, e)
+
+
+def check_versions():
+ from packaging import version
+ from modules import shared
+
+ import torch
+ import gradio
+
+ expected_torch_version = "2.0.0"
+ expected_xformers_version = "0.0.20"
+ expected_gradio_version = "3.41.2"
+
+ if version.parse(torch.__version__) < version.parse(expected_torch_version):
+ print_error_explanation(f"""
+You are running torch {torch.__version__}.
+The program is tested to work with torch {expected_torch_version}.
+To reinstall the desired version, run with commandline flag --reinstall-torch.
+Beware that this will cause a lot of large files to be downloaded, as well as
+there are reports of issues with training tab on the latest version.
+
+Use --skip-version-check commandline argument to disable this check.
+ """.strip())
+
+ if shared.xformers_available:
+ import xformers
+
+ if version.parse(xformers.__version__) < version.parse(expected_xformers_version):
+ print_error_explanation(f"""
+You are running xformers {xformers.__version__}.
+The program is tested to work with xformers {expected_xformers_version}.
+To reinstall the desired version, run with commandline flag --reinstall-xformers.
+
+Use --skip-version-check commandline argument to disable this check.
+ """.strip())
+
+ if gradio.__version__ != expected_gradio_version:
+ print_error_explanation(f"""
+You are running gradio {gradio.__version__}.
+The program is designed to work with gradio {expected_gradio_version}.
+Using a different version of gradio is extremely likely to break the program.
+
+Reasons why you have the mismatched gradio version can be:
+ - you use --skip-install flag.
+ - you use webui.py to start the program instead of launch.py.
+ - an extension installs the incompatible gradio version.
+
+Use --skip-version-check commandline argument to disable this check.
+ """.strip())
+
diff --git a/modules/esrgan_model.py b/modules/esrgan_model.py
new file mode 100644
index 0000000000000000000000000000000000000000..1e4260e2c62dbb14387e90e369dc109f435867b0
--- /dev/null
+++ b/modules/esrgan_model.py
@@ -0,0 +1,229 @@
+import sys
+
+import numpy as np
+import torch
+from PIL import Image
+
+import modules.esrgan_model_arch as arch
+from modules import modelloader, images, devices
+from modules.shared import opts
+from modules.upscaler import Upscaler, UpscalerData
+
+
+def mod2normal(state_dict):
+ # this code is copied from https://github.com/victorca25/iNNfer
+ if 'conv_first.weight' in state_dict:
+ crt_net = {}
+ items = list(state_dict)
+
+ crt_net['model.0.weight'] = state_dict['conv_first.weight']
+ crt_net['model.0.bias'] = state_dict['conv_first.bias']
+
+ for k in items.copy():
+ if 'RDB' in k:
+ ori_k = k.replace('RRDB_trunk.', 'model.1.sub.')
+ if '.weight' in k:
+ ori_k = ori_k.replace('.weight', '.0.weight')
+ elif '.bias' in k:
+ ori_k = ori_k.replace('.bias', '.0.bias')
+ crt_net[ori_k] = state_dict[k]
+ items.remove(k)
+
+ crt_net['model.1.sub.23.weight'] = state_dict['trunk_conv.weight']
+ crt_net['model.1.sub.23.bias'] = state_dict['trunk_conv.bias']
+ crt_net['model.3.weight'] = state_dict['upconv1.weight']
+ crt_net['model.3.bias'] = state_dict['upconv1.bias']
+ crt_net['model.6.weight'] = state_dict['upconv2.weight']
+ crt_net['model.6.bias'] = state_dict['upconv2.bias']
+ crt_net['model.8.weight'] = state_dict['HRconv.weight']
+ crt_net['model.8.bias'] = state_dict['HRconv.bias']
+ crt_net['model.10.weight'] = state_dict['conv_last.weight']
+ crt_net['model.10.bias'] = state_dict['conv_last.bias']
+ state_dict = crt_net
+ return state_dict
+
+
+def resrgan2normal(state_dict, nb=23):
+ # this code is copied from https://github.com/victorca25/iNNfer
+ if "conv_first.weight" in state_dict and "body.0.rdb1.conv1.weight" in state_dict:
+ re8x = 0
+ crt_net = {}
+ items = list(state_dict)
+
+ crt_net['model.0.weight'] = state_dict['conv_first.weight']
+ crt_net['model.0.bias'] = state_dict['conv_first.bias']
+
+ for k in items.copy():
+ if "rdb" in k:
+ ori_k = k.replace('body.', 'model.1.sub.')
+ ori_k = ori_k.replace('.rdb', '.RDB')
+ if '.weight' in k:
+ ori_k = ori_k.replace('.weight', '.0.weight')
+ elif '.bias' in k:
+ ori_k = ori_k.replace('.bias', '.0.bias')
+ crt_net[ori_k] = state_dict[k]
+ items.remove(k)
+
+ crt_net[f'model.1.sub.{nb}.weight'] = state_dict['conv_body.weight']
+ crt_net[f'model.1.sub.{nb}.bias'] = state_dict['conv_body.bias']
+ crt_net['model.3.weight'] = state_dict['conv_up1.weight']
+ crt_net['model.3.bias'] = state_dict['conv_up1.bias']
+ crt_net['model.6.weight'] = state_dict['conv_up2.weight']
+ crt_net['model.6.bias'] = state_dict['conv_up2.bias']
+
+ if 'conv_up3.weight' in state_dict:
+ # modification supporting: https://github.com/ai-forever/Real-ESRGAN/blob/main/RealESRGAN/rrdbnet_arch.py
+ re8x = 3
+ crt_net['model.9.weight'] = state_dict['conv_up3.weight']
+ crt_net['model.9.bias'] = state_dict['conv_up3.bias']
+
+ crt_net[f'model.{8+re8x}.weight'] = state_dict['conv_hr.weight']
+ crt_net[f'model.{8+re8x}.bias'] = state_dict['conv_hr.bias']
+ crt_net[f'model.{10+re8x}.weight'] = state_dict['conv_last.weight']
+ crt_net[f'model.{10+re8x}.bias'] = state_dict['conv_last.bias']
+
+ state_dict = crt_net
+ return state_dict
+
+
+def infer_params(state_dict):
+ # this code is copied from https://github.com/victorca25/iNNfer
+ scale2x = 0
+ scalemin = 6
+ n_uplayer = 0
+ plus = False
+
+ for block in list(state_dict):
+ parts = block.split(".")
+ n_parts = len(parts)
+ if n_parts == 5 and parts[2] == "sub":
+ nb = int(parts[3])
+ elif n_parts == 3:
+ part_num = int(parts[1])
+ if (part_num > scalemin
+ and parts[0] == "model"
+ and parts[2] == "weight"):
+ scale2x += 1
+ if part_num > n_uplayer:
+ n_uplayer = part_num
+ out_nc = state_dict[block].shape[0]
+ if not plus and "conv1x1" in block:
+ plus = True
+
+ nf = state_dict["model.0.weight"].shape[0]
+ in_nc = state_dict["model.0.weight"].shape[1]
+ out_nc = out_nc
+ scale = 2 ** scale2x
+
+ return in_nc, out_nc, nf, nb, plus, scale
+
+
+class UpscalerESRGAN(Upscaler):
+ def __init__(self, dirname):
+ self.name = "ESRGAN"
+ self.model_url = "https://github.com/cszn/KAIR/releases/download/v1.0/ESRGAN.pth"
+ self.model_name = "ESRGAN_4x"
+ self.scalers = []
+ self.user_path = dirname
+ super().__init__()
+ model_paths = self.find_models(ext_filter=[".pt", ".pth"])
+ scalers = []
+ if len(model_paths) == 0:
+ scaler_data = UpscalerData(self.model_name, self.model_url, self, 4)
+ scalers.append(scaler_data)
+ for file in model_paths:
+ if file.startswith("http"):
+ name = self.model_name
+ else:
+ name = modelloader.friendly_name(file)
+
+ scaler_data = UpscalerData(name, file, self, 4)
+ self.scalers.append(scaler_data)
+
+ def do_upscale(self, img, selected_model):
+ try:
+ model = self.load_model(selected_model)
+ except Exception as e:
+ print(f"Unable to load ESRGAN model {selected_model}: {e}", file=sys.stderr)
+ return img
+ model.to(devices.device_esrgan)
+ img = esrgan_upscale(model, img)
+ return img
+
+ def load_model(self, path: str):
+ if path.startswith("http"):
+ # TODO: this doesn't use `path` at all?
+ filename = modelloader.load_file_from_url(
+ url=self.model_url,
+ model_dir=self.model_download_path,
+ file_name=f"{self.model_name}.pth",
+ )
+ else:
+ filename = path
+
+ state_dict = torch.load(filename, map_location='cpu' if devices.device_esrgan.type == 'mps' else None)
+
+ if "params_ema" in state_dict:
+ state_dict = state_dict["params_ema"]
+ elif "params" in state_dict:
+ state_dict = state_dict["params"]
+ num_conv = 16 if "realesr-animevideov3" in filename else 32
+ model = arch.SRVGGNetCompact(num_in_ch=3, num_out_ch=3, num_feat=64, num_conv=num_conv, upscale=4, act_type='prelu')
+ model.load_state_dict(state_dict)
+ model.eval()
+ return model
+
+ if "body.0.rdb1.conv1.weight" in state_dict and "conv_first.weight" in state_dict:
+ nb = 6 if "RealESRGAN_x4plus_anime_6B" in filename else 23
+ state_dict = resrgan2normal(state_dict, nb)
+ elif "conv_first.weight" in state_dict:
+ state_dict = mod2normal(state_dict)
+ elif "model.0.weight" not in state_dict:
+ raise Exception("The file is not a recognized ESRGAN model.")
+
+ in_nc, out_nc, nf, nb, plus, mscale = infer_params(state_dict)
+
+ model = arch.RRDBNet(in_nc=in_nc, out_nc=out_nc, nf=nf, nb=nb, upscale=mscale, plus=plus)
+ model.load_state_dict(state_dict)
+ model.eval()
+
+ return model
+
+
+def upscale_without_tiling(model, img):
+ img = np.array(img)
+ img = img[:, :, ::-1]
+ img = np.ascontiguousarray(np.transpose(img, (2, 0, 1))) / 255
+ img = torch.from_numpy(img).float()
+ img = img.unsqueeze(0).to(devices.device_esrgan)
+ with torch.no_grad():
+ output = model(img)
+ output = output.squeeze().float().cpu().clamp_(0, 1).numpy()
+ output = 255. * np.moveaxis(output, 0, 2)
+ output = output.astype(np.uint8)
+ output = output[:, :, ::-1]
+ return Image.fromarray(output, 'RGB')
+
+
+def esrgan_upscale(model, img):
+ if opts.ESRGAN_tile == 0:
+ return upscale_without_tiling(model, img)
+
+ grid = images.split_grid(img, opts.ESRGAN_tile, opts.ESRGAN_tile, opts.ESRGAN_tile_overlap)
+ newtiles = []
+ scale_factor = 1
+
+ for y, h, row in grid.tiles:
+ newrow = []
+ for tiledata in row:
+ x, w, tile = tiledata
+
+ output = upscale_without_tiling(model, tile)
+ scale_factor = output.width // tile.width
+
+ newrow.append([x * scale_factor, w * scale_factor, output])
+ newtiles.append([y * scale_factor, h * scale_factor, newrow])
+
+ newgrid = images.Grid(newtiles, grid.tile_w * scale_factor, grid.tile_h * scale_factor, grid.image_w * scale_factor, grid.image_h * scale_factor, grid.overlap * scale_factor)
+ output = images.combine_grid(newgrid)
+ return output
diff --git a/modules/esrgan_model_arch.py b/modules/esrgan_model_arch.py
new file mode 100644
index 0000000000000000000000000000000000000000..353c70dd867cb894a0ac208f39394280175e4e14
--- /dev/null
+++ b/modules/esrgan_model_arch.py
@@ -0,0 +1,465 @@
+# this file is adapted from https://github.com/victorca25/iNNfer
+
+from collections import OrderedDict
+import math
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+
+
+####################
+# RRDBNet Generator
+####################
+
+class RRDBNet(nn.Module):
+ def __init__(self, in_nc, out_nc, nf, nb, nr=3, gc=32, upscale=4, norm_type=None,
+ act_type='leakyrelu', mode='CNA', upsample_mode='upconv', convtype='Conv2D',
+ finalact=None, gaussian_noise=False, plus=False):
+ super(RRDBNet, self).__init__()
+ n_upscale = int(math.log(upscale, 2))
+ if upscale == 3:
+ n_upscale = 1
+
+ self.resrgan_scale = 0
+ if in_nc % 16 == 0:
+ self.resrgan_scale = 1
+ elif in_nc != 4 and in_nc % 4 == 0:
+ self.resrgan_scale = 2
+
+ fea_conv = conv_block(in_nc, nf, kernel_size=3, norm_type=None, act_type=None, convtype=convtype)
+ rb_blocks = [RRDB(nf, nr, kernel_size=3, gc=32, stride=1, bias=1, pad_type='zero',
+ norm_type=norm_type, act_type=act_type, mode='CNA', convtype=convtype,
+ gaussian_noise=gaussian_noise, plus=plus) for _ in range(nb)]
+ LR_conv = conv_block(nf, nf, kernel_size=3, norm_type=norm_type, act_type=None, mode=mode, convtype=convtype)
+
+ if upsample_mode == 'upconv':
+ upsample_block = upconv_block
+ elif upsample_mode == 'pixelshuffle':
+ upsample_block = pixelshuffle_block
+ else:
+ raise NotImplementedError(f'upsample mode [{upsample_mode}] is not found')
+ if upscale == 3:
+ upsampler = upsample_block(nf, nf, 3, act_type=act_type, convtype=convtype)
+ else:
+ upsampler = [upsample_block(nf, nf, act_type=act_type, convtype=convtype) for _ in range(n_upscale)]
+ HR_conv0 = conv_block(nf, nf, kernel_size=3, norm_type=None, act_type=act_type, convtype=convtype)
+ HR_conv1 = conv_block(nf, out_nc, kernel_size=3, norm_type=None, act_type=None, convtype=convtype)
+
+ outact = act(finalact) if finalact else None
+
+ self.model = sequential(fea_conv, ShortcutBlock(sequential(*rb_blocks, LR_conv)),
+ *upsampler, HR_conv0, HR_conv1, outact)
+
+ def forward(self, x, outm=None):
+ if self.resrgan_scale == 1:
+ feat = pixel_unshuffle(x, scale=4)
+ elif self.resrgan_scale == 2:
+ feat = pixel_unshuffle(x, scale=2)
+ else:
+ feat = x
+
+ return self.model(feat)
+
+
+class RRDB(nn.Module):
+ """
+ Residual in Residual Dense Block
+ (ESRGAN: Enhanced Super-Resolution Generative Adversarial Networks)
+ """
+
+ def __init__(self, nf, nr=3, kernel_size=3, gc=32, stride=1, bias=1, pad_type='zero',
+ norm_type=None, act_type='leakyrelu', mode='CNA', convtype='Conv2D',
+ spectral_norm=False, gaussian_noise=False, plus=False):
+ super(RRDB, self).__init__()
+ # This is for backwards compatibility with existing models
+ if nr == 3:
+ self.RDB1 = ResidualDenseBlock_5C(nf, kernel_size, gc, stride, bias, pad_type,
+ norm_type, act_type, mode, convtype, spectral_norm=spectral_norm,
+ gaussian_noise=gaussian_noise, plus=plus)
+ self.RDB2 = ResidualDenseBlock_5C(nf, kernel_size, gc, stride, bias, pad_type,
+ norm_type, act_type, mode, convtype, spectral_norm=spectral_norm,
+ gaussian_noise=gaussian_noise, plus=plus)
+ self.RDB3 = ResidualDenseBlock_5C(nf, kernel_size, gc, stride, bias, pad_type,
+ norm_type, act_type, mode, convtype, spectral_norm=spectral_norm,
+ gaussian_noise=gaussian_noise, plus=plus)
+ else:
+ RDB_list = [ResidualDenseBlock_5C(nf, kernel_size, gc, stride, bias, pad_type,
+ norm_type, act_type, mode, convtype, spectral_norm=spectral_norm,
+ gaussian_noise=gaussian_noise, plus=plus) for _ in range(nr)]
+ self.RDBs = nn.Sequential(*RDB_list)
+
+ def forward(self, x):
+ if hasattr(self, 'RDB1'):
+ out = self.RDB1(x)
+ out = self.RDB2(out)
+ out = self.RDB3(out)
+ else:
+ out = self.RDBs(x)
+ return out * 0.2 + x
+
+
+class ResidualDenseBlock_5C(nn.Module):
+ """
+ Residual Dense Block
+ The core module of paper: (Residual Dense Network for Image Super-Resolution, CVPR 18)
+ Modified options that can be used:
+ - "Partial Convolution based Padding" arXiv:1811.11718
+ - "Spectral normalization" arXiv:1802.05957
+ - "ICASSP 2020 - ESRGAN+ : Further Improving ESRGAN" N. C.
+ {Rakotonirina} and A. {Rasoanaivo}
+ """
+
+ def __init__(self, nf=64, kernel_size=3, gc=32, stride=1, bias=1, pad_type='zero',
+ norm_type=None, act_type='leakyrelu', mode='CNA', convtype='Conv2D',
+ spectral_norm=False, gaussian_noise=False, plus=False):
+ super(ResidualDenseBlock_5C, self).__init__()
+
+ self.noise = GaussianNoise() if gaussian_noise else None
+ self.conv1x1 = conv1x1(nf, gc) if plus else None
+
+ self.conv1 = conv_block(nf, gc, kernel_size, stride, bias=bias, pad_type=pad_type,
+ norm_type=norm_type, act_type=act_type, mode=mode, convtype=convtype,
+ spectral_norm=spectral_norm)
+ self.conv2 = conv_block(nf+gc, gc, kernel_size, stride, bias=bias, pad_type=pad_type,
+ norm_type=norm_type, act_type=act_type, mode=mode, convtype=convtype,
+ spectral_norm=spectral_norm)
+ self.conv3 = conv_block(nf+2*gc, gc, kernel_size, stride, bias=bias, pad_type=pad_type,
+ norm_type=norm_type, act_type=act_type, mode=mode, convtype=convtype,
+ spectral_norm=spectral_norm)
+ self.conv4 = conv_block(nf+3*gc, gc, kernel_size, stride, bias=bias, pad_type=pad_type,
+ norm_type=norm_type, act_type=act_type, mode=mode, convtype=convtype,
+ spectral_norm=spectral_norm)
+ if mode == 'CNA':
+ last_act = None
+ else:
+ last_act = act_type
+ self.conv5 = conv_block(nf+4*gc, nf, 3, stride, bias=bias, pad_type=pad_type,
+ norm_type=norm_type, act_type=last_act, mode=mode, convtype=convtype,
+ spectral_norm=spectral_norm)
+
+ def forward(self, x):
+ x1 = self.conv1(x)
+ x2 = self.conv2(torch.cat((x, x1), 1))
+ if self.conv1x1:
+ x2 = x2 + self.conv1x1(x)
+ x3 = self.conv3(torch.cat((x, x1, x2), 1))
+ x4 = self.conv4(torch.cat((x, x1, x2, x3), 1))
+ if self.conv1x1:
+ x4 = x4 + x2
+ x5 = self.conv5(torch.cat((x, x1, x2, x3, x4), 1))
+ if self.noise:
+ return self.noise(x5.mul(0.2) + x)
+ else:
+ return x5 * 0.2 + x
+
+
+####################
+# ESRGANplus
+####################
+
+class GaussianNoise(nn.Module):
+ def __init__(self, sigma=0.1, is_relative_detach=False):
+ super().__init__()
+ self.sigma = sigma
+ self.is_relative_detach = is_relative_detach
+ self.noise = torch.tensor(0, dtype=torch.float)
+
+ def forward(self, x):
+ if self.training and self.sigma != 0:
+ self.noise = self.noise.to(x.device)
+ scale = self.sigma * x.detach() if self.is_relative_detach else self.sigma * x
+ sampled_noise = self.noise.repeat(*x.size()).normal_() * scale
+ x = x + sampled_noise
+ return x
+
+def conv1x1(in_planes, out_planes, stride=1):
+ return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
+
+
+####################
+# SRVGGNetCompact
+####################
+
+class SRVGGNetCompact(nn.Module):
+ """A compact VGG-style network structure for super-resolution.
+ This class is copied from https://github.com/xinntao/Real-ESRGAN
+ """
+
+ def __init__(self, num_in_ch=3, num_out_ch=3, num_feat=64, num_conv=16, upscale=4, act_type='prelu'):
+ super(SRVGGNetCompact, self).__init__()
+ self.num_in_ch = num_in_ch
+ self.num_out_ch = num_out_ch
+ self.num_feat = num_feat
+ self.num_conv = num_conv
+ self.upscale = upscale
+ self.act_type = act_type
+
+ self.body = nn.ModuleList()
+ # the first conv
+ self.body.append(nn.Conv2d(num_in_ch, num_feat, 3, 1, 1))
+ # the first activation
+ if act_type == 'relu':
+ activation = nn.ReLU(inplace=True)
+ elif act_type == 'prelu':
+ activation = nn.PReLU(num_parameters=num_feat)
+ elif act_type == 'leakyrelu':
+ activation = nn.LeakyReLU(negative_slope=0.1, inplace=True)
+ self.body.append(activation)
+
+ # the body structure
+ for _ in range(num_conv):
+ self.body.append(nn.Conv2d(num_feat, num_feat, 3, 1, 1))
+ # activation
+ if act_type == 'relu':
+ activation = nn.ReLU(inplace=True)
+ elif act_type == 'prelu':
+ activation = nn.PReLU(num_parameters=num_feat)
+ elif act_type == 'leakyrelu':
+ activation = nn.LeakyReLU(negative_slope=0.1, inplace=True)
+ self.body.append(activation)
+
+ # the last conv
+ self.body.append(nn.Conv2d(num_feat, num_out_ch * upscale * upscale, 3, 1, 1))
+ # upsample
+ self.upsampler = nn.PixelShuffle(upscale)
+
+ def forward(self, x):
+ out = x
+ for i in range(0, len(self.body)):
+ out = self.body[i](out)
+
+ out = self.upsampler(out)
+ # add the nearest upsampled image, so that the network learns the residual
+ base = F.interpolate(x, scale_factor=self.upscale, mode='nearest')
+ out += base
+ return out
+
+
+####################
+# Upsampler
+####################
+
+class Upsample(nn.Module):
+ r"""Upsamples a given multi-channel 1D (temporal), 2D (spatial) or 3D (volumetric) data.
+ The input data is assumed to be of the form
+ `minibatch x channels x [optional depth] x [optional height] x width`.
+ """
+
+ def __init__(self, size=None, scale_factor=None, mode="nearest", align_corners=None):
+ super(Upsample, self).__init__()
+ if isinstance(scale_factor, tuple):
+ self.scale_factor = tuple(float(factor) for factor in scale_factor)
+ else:
+ self.scale_factor = float(scale_factor) if scale_factor else None
+ self.mode = mode
+ self.size = size
+ self.align_corners = align_corners
+
+ def forward(self, x):
+ return nn.functional.interpolate(x, size=self.size, scale_factor=self.scale_factor, mode=self.mode, align_corners=self.align_corners)
+
+ def extra_repr(self):
+ if self.scale_factor is not None:
+ info = f'scale_factor={self.scale_factor}'
+ else:
+ info = f'size={self.size}'
+ info += f', mode={self.mode}'
+ return info
+
+
+def pixel_unshuffle(x, scale):
+ """ Pixel unshuffle.
+ Args:
+ x (Tensor): Input feature with shape (b, c, hh, hw).
+ scale (int): Downsample ratio.
+ Returns:
+ Tensor: the pixel unshuffled feature.
+ """
+ b, c, hh, hw = x.size()
+ out_channel = c * (scale**2)
+ assert hh % scale == 0 and hw % scale == 0
+ h = hh // scale
+ w = hw // scale
+ x_view = x.view(b, c, h, scale, w, scale)
+ return x_view.permute(0, 1, 3, 5, 2, 4).reshape(b, out_channel, h, w)
+
+
+def pixelshuffle_block(in_nc, out_nc, upscale_factor=2, kernel_size=3, stride=1, bias=True,
+ pad_type='zero', norm_type=None, act_type='relu', convtype='Conv2D'):
+ """
+ Pixel shuffle layer
+ (Real-Time Single Image and Video Super-Resolution Using an Efficient Sub-Pixel Convolutional
+ Neural Network, CVPR17)
+ """
+ conv = conv_block(in_nc, out_nc * (upscale_factor ** 2), kernel_size, stride, bias=bias,
+ pad_type=pad_type, norm_type=None, act_type=None, convtype=convtype)
+ pixel_shuffle = nn.PixelShuffle(upscale_factor)
+
+ n = norm(norm_type, out_nc) if norm_type else None
+ a = act(act_type) if act_type else None
+ return sequential(conv, pixel_shuffle, n, a)
+
+
+def upconv_block(in_nc, out_nc, upscale_factor=2, kernel_size=3, stride=1, bias=True,
+ pad_type='zero', norm_type=None, act_type='relu', mode='nearest', convtype='Conv2D'):
+ """ Upconv layer """
+ upscale_factor = (1, upscale_factor, upscale_factor) if convtype == 'Conv3D' else upscale_factor
+ upsample = Upsample(scale_factor=upscale_factor, mode=mode)
+ conv = conv_block(in_nc, out_nc, kernel_size, stride, bias=bias,
+ pad_type=pad_type, norm_type=norm_type, act_type=act_type, convtype=convtype)
+ return sequential(upsample, conv)
+
+
+
+
+
+
+
+
+####################
+# Basic blocks
+####################
+
+
+def make_layer(basic_block, num_basic_block, **kwarg):
+ """Make layers by stacking the same blocks.
+ Args:
+ basic_block (nn.module): nn.module class for basic block. (block)
+ num_basic_block (int): number of blocks. (n_layers)
+ Returns:
+ nn.Sequential: Stacked blocks in nn.Sequential.
+ """
+ layers = []
+ for _ in range(num_basic_block):
+ layers.append(basic_block(**kwarg))
+ return nn.Sequential(*layers)
+
+
+def act(act_type, inplace=True, neg_slope=0.2, n_prelu=1, beta=1.0):
+ """ activation helper """
+ act_type = act_type.lower()
+ if act_type == 'relu':
+ layer = nn.ReLU(inplace)
+ elif act_type in ('leakyrelu', 'lrelu'):
+ layer = nn.LeakyReLU(neg_slope, inplace)
+ elif act_type == 'prelu':
+ layer = nn.PReLU(num_parameters=n_prelu, init=neg_slope)
+ elif act_type == 'tanh': # [-1, 1] range output
+ layer = nn.Tanh()
+ elif act_type == 'sigmoid': # [0, 1] range output
+ layer = nn.Sigmoid()
+ else:
+ raise NotImplementedError(f'activation layer [{act_type}] is not found')
+ return layer
+
+
+class Identity(nn.Module):
+ def __init__(self, *kwargs):
+ super(Identity, self).__init__()
+
+ def forward(self, x, *kwargs):
+ return x
+
+
+def norm(norm_type, nc):
+ """ Return a normalization layer """
+ norm_type = norm_type.lower()
+ if norm_type == 'batch':
+ layer = nn.BatchNorm2d(nc, affine=True)
+ elif norm_type == 'instance':
+ layer = nn.InstanceNorm2d(nc, affine=False)
+ elif norm_type == 'none':
+ def norm_layer(x): return Identity()
+ else:
+ raise NotImplementedError(f'normalization layer [{norm_type}] is not found')
+ return layer
+
+
+def pad(pad_type, padding):
+ """ padding layer helper """
+ pad_type = pad_type.lower()
+ if padding == 0:
+ return None
+ if pad_type == 'reflect':
+ layer = nn.ReflectionPad2d(padding)
+ elif pad_type == 'replicate':
+ layer = nn.ReplicationPad2d(padding)
+ elif pad_type == 'zero':
+ layer = nn.ZeroPad2d(padding)
+ else:
+ raise NotImplementedError(f'padding layer [{pad_type}] is not implemented')
+ return layer
+
+
+def get_valid_padding(kernel_size, dilation):
+ kernel_size = kernel_size + (kernel_size - 1) * (dilation - 1)
+ padding = (kernel_size - 1) // 2
+ return padding
+
+
+class ShortcutBlock(nn.Module):
+ """ Elementwise sum the output of a submodule to its input """
+ def __init__(self, submodule):
+ super(ShortcutBlock, self).__init__()
+ self.sub = submodule
+
+ def forward(self, x):
+ output = x + self.sub(x)
+ return output
+
+ def __repr__(self):
+ return 'Identity + \n|' + self.sub.__repr__().replace('\n', '\n|')
+
+
+def sequential(*args):
+ """ Flatten Sequential. It unwraps nn.Sequential. """
+ if len(args) == 1:
+ if isinstance(args[0], OrderedDict):
+ raise NotImplementedError('sequential does not support OrderedDict input.')
+ return args[0] # No sequential is needed.
+ modules = []
+ for module in args:
+ if isinstance(module, nn.Sequential):
+ for submodule in module.children():
+ modules.append(submodule)
+ elif isinstance(module, nn.Module):
+ modules.append(module)
+ return nn.Sequential(*modules)
+
+
+def conv_block(in_nc, out_nc, kernel_size, stride=1, dilation=1, groups=1, bias=True,
+ pad_type='zero', norm_type=None, act_type='relu', mode='CNA', convtype='Conv2D',
+ spectral_norm=False):
+ """ Conv layer with padding, normalization, activation """
+ assert mode in ['CNA', 'NAC', 'CNAC'], f'Wrong conv mode [{mode}]'
+ padding = get_valid_padding(kernel_size, dilation)
+ p = pad(pad_type, padding) if pad_type and pad_type != 'zero' else None
+ padding = padding if pad_type == 'zero' else 0
+
+ if convtype=='PartialConv2D':
+ from torchvision.ops import PartialConv2d # this is definitely not going to work, but PartialConv2d doesn't work anyway and this shuts up static analyzer
+ c = PartialConv2d(in_nc, out_nc, kernel_size=kernel_size, stride=stride, padding=padding,
+ dilation=dilation, bias=bias, groups=groups)
+ elif convtype=='DeformConv2D':
+ from torchvision.ops import DeformConv2d # not tested
+ c = DeformConv2d(in_nc, out_nc, kernel_size=kernel_size, stride=stride, padding=padding,
+ dilation=dilation, bias=bias, groups=groups)
+ elif convtype=='Conv3D':
+ c = nn.Conv3d(in_nc, out_nc, kernel_size=kernel_size, stride=stride, padding=padding,
+ dilation=dilation, bias=bias, groups=groups)
+ else:
+ c = nn.Conv2d(in_nc, out_nc, kernel_size=kernel_size, stride=stride, padding=padding,
+ dilation=dilation, bias=bias, groups=groups)
+
+ if spectral_norm:
+ c = nn.utils.spectral_norm(c)
+
+ a = act(act_type) if act_type else None
+ if 'CNA' in mode:
+ n = norm(norm_type, out_nc) if norm_type else None
+ return sequential(p, c, n, a)
+ elif mode == 'NAC':
+ if norm_type is None and act_type is not None:
+ a = act(act_type, inplace=False)
+ n = norm(norm_type, in_nc) if norm_type else None
+ return sequential(n, a, p, c)
diff --git a/modules/extra_networks.py b/modules/extra_networks.py
new file mode 100644
index 0000000000000000000000000000000000000000..5ebe72260488f31b30532b94d0f6fff1ea2e1660
--- /dev/null
+++ b/modules/extra_networks.py
@@ -0,0 +1,224 @@
+import json
+import os
+import re
+import logging
+from collections import defaultdict
+
+from modules import errors
+
+extra_network_registry = {}
+extra_network_aliases = {}
+
+
+def initialize():
+ extra_network_registry.clear()
+ extra_network_aliases.clear()
+
+
+def register_extra_network(extra_network):
+ extra_network_registry[extra_network.name] = extra_network
+
+
+def register_extra_network_alias(extra_network, alias):
+ extra_network_aliases[alias] = extra_network
+
+
+def register_default_extra_networks():
+ from modules.extra_networks_hypernet import ExtraNetworkHypernet
+ register_extra_network(ExtraNetworkHypernet())
+
+
+class ExtraNetworkParams:
+ def __init__(self, items=None):
+ self.items = items or []
+ self.positional = []
+ self.named = {}
+
+ for item in self.items:
+ parts = item.split('=', 2) if isinstance(item, str) else [item]
+ if len(parts) == 2:
+ self.named[parts[0]] = parts[1]
+ else:
+ self.positional.append(item)
+
+ def __eq__(self, other):
+ return self.items == other.items
+
+
+class ExtraNetwork:
+ def __init__(self, name):
+ self.name = name
+
+ def activate(self, p, params_list):
+ """
+ Called by processing on every run. Whatever the extra network is meant to do should be activated here.
+ Passes arguments related to this extra network in params_list.
+ User passes arguments by specifying this in his prompt:
+
+
+
+ Where name matches the name of this ExtraNetwork object, and arg1:arg2:arg3 are any natural number of text arguments
+ separated by colon.
+
+ Even if the user does not mention this ExtraNetwork in his prompt, the call will stil be made, with empty params_list -
+ in this case, all effects of this extra networks should be disabled.
+
+ Can be called multiple times before deactivate() - each new call should override the previous call completely.
+
+ For example, if this ExtraNetwork's name is 'hypernet' and user's prompt is:
+
+ > "1girl, "
+
+ params_list will be:
+
+ [
+ ExtraNetworkParams(items=["agm", "1.1"]),
+ ExtraNetworkParams(items=["ray"])
+ ]
+
+ """
+ raise NotImplementedError
+
+ def deactivate(self, p):
+ """
+ Called at the end of processing for housekeeping. No need to do anything here.
+ """
+
+ raise NotImplementedError
+
+
+def lookup_extra_networks(extra_network_data):
+ """returns a dict mapping ExtraNetwork objects to lists of arguments for those extra networks.
+
+ Example input:
+ {
+ 'lora': [],
+ 'lyco': [],
+ 'hypernet': []
+ }
+
+ Example output:
+
+ {
+ : [, ],
+ : []
+ }
+ """
+
+ res = {}
+
+ for extra_network_name, extra_network_args in list(extra_network_data.items()):
+ extra_network = extra_network_registry.get(extra_network_name, None)
+ alias = extra_network_aliases.get(extra_network_name, None)
+
+ if alias is not None and extra_network is None:
+ extra_network = alias
+
+ if extra_network is None:
+ logging.info(f"Skipping unknown extra network: {extra_network_name}")
+ continue
+
+ res.setdefault(extra_network, []).extend(extra_network_args)
+
+ return res
+
+
+def activate(p, extra_network_data):
+ """call activate for extra networks in extra_network_data in specified order, then call
+ activate for all remaining registered networks with an empty argument list"""
+
+ activated = []
+
+ for extra_network, extra_network_args in lookup_extra_networks(extra_network_data).items():
+
+ try:
+ extra_network.activate(p, extra_network_args)
+ activated.append(extra_network)
+ except Exception as e:
+ errors.display(e, f"activating extra network {extra_network.name} with arguments {extra_network_args}")
+
+ for extra_network_name, extra_network in extra_network_registry.items():
+ if extra_network in activated:
+ continue
+
+ try:
+ extra_network.activate(p, [])
+ except Exception as e:
+ errors.display(e, f"activating extra network {extra_network_name}")
+
+ if p.scripts is not None:
+ p.scripts.after_extra_networks_activate(p, batch_number=p.iteration, prompts=p.prompts, seeds=p.seeds, subseeds=p.subseeds, extra_network_data=extra_network_data)
+
+
+def deactivate(p, extra_network_data):
+ """call deactivate for extra networks in extra_network_data in specified order, then call
+ deactivate for all remaining registered networks"""
+
+ data = lookup_extra_networks(extra_network_data)
+
+ for extra_network in data:
+ try:
+ extra_network.deactivate(p)
+ except Exception as e:
+ errors.display(e, f"deactivating extra network {extra_network.name}")
+
+ for extra_network_name, extra_network in extra_network_registry.items():
+ if extra_network in data:
+ continue
+
+ try:
+ extra_network.deactivate(p)
+ except Exception as e:
+ errors.display(e, f"deactivating unmentioned extra network {extra_network_name}")
+
+
+re_extra_net = re.compile(r"<(\w+):([^>]+)>")
+
+
+def parse_prompt(prompt):
+ res = defaultdict(list)
+
+ def found(m):
+ name = m.group(1)
+ args = m.group(2)
+
+ res[name].append(ExtraNetworkParams(items=args.split(":")))
+
+ return ""
+
+ prompt = re.sub(re_extra_net, found, prompt)
+
+ return prompt, res
+
+
+def parse_prompts(prompts):
+ res = []
+ extra_data = None
+
+ for prompt in prompts:
+ updated_prompt, parsed_extra_data = parse_prompt(prompt)
+
+ if extra_data is None:
+ extra_data = parsed_extra_data
+
+ res.append(updated_prompt)
+
+ return res, extra_data
+
+
+def get_user_metadata(filename):
+ if filename is None:
+ return {}
+
+ basename, ext = os.path.splitext(filename)
+ metadata_filename = basename + '.json'
+
+ metadata = {}
+ try:
+ if os.path.isfile(metadata_filename):
+ with open(metadata_filename, "r", encoding="utf8") as file:
+ metadata = json.load(file)
+ except Exception as e:
+ errors.display(e, f"reading extra network user metadata from {metadata_filename}")
+
+ return metadata
diff --git a/modules/face_restoration.py b/modules/face_restoration.py
new file mode 100644
index 0000000000000000000000000000000000000000..2c86c6ccce338a1411f4367a0bc6e4046ad67cae
--- /dev/null
+++ b/modules/face_restoration.py
@@ -0,0 +1,19 @@
+from modules import shared
+
+
+class FaceRestoration:
+ def name(self):
+ return "None"
+
+ def restore(self, np_image):
+ return np_image
+
+
+def restore_faces(np_image):
+ face_restorers = [x for x in shared.face_restorers if x.name() == shared.opts.face_restoration_model or shared.opts.face_restoration_model is None]
+ if len(face_restorers) == 0:
+ return np_image
+
+ face_restorer = face_restorers[0]
+
+ return face_restorer.restore(np_image)
diff --git a/modules/fifo_lock.py b/modules/fifo_lock.py
new file mode 100644
index 0000000000000000000000000000000000000000..c35b3ae25a3cf383c8beae04db3e0a3d66785135
--- /dev/null
+++ b/modules/fifo_lock.py
@@ -0,0 +1,37 @@
+import threading
+import collections
+
+
+# reference: https://gist.github.com/vitaliyp/6d54dd76ca2c3cdfc1149d33007dc34a
+class FIFOLock(object):
+ def __init__(self):
+ self._lock = threading.Lock()
+ self._inner_lock = threading.Lock()
+ self._pending_threads = collections.deque()
+
+ def acquire(self, blocking=True):
+ with self._inner_lock:
+ lock_acquired = self._lock.acquire(False)
+ if lock_acquired:
+ return True
+ elif not blocking:
+ return False
+
+ release_event = threading.Event()
+ self._pending_threads.append(release_event)
+
+ release_event.wait()
+ return self._lock.acquire()
+
+ def release(self):
+ with self._inner_lock:
+ if self._pending_threads:
+ release_event = self._pending_threads.popleft()
+ release_event.set()
+
+ self._lock.release()
+
+ __enter__ = acquire
+
+ def __exit__(self, t, v, tb):
+ self.release()
diff --git a/modules/generation_parameters_copypaste.py b/modules/generation_parameters_copypaste.py
new file mode 100644
index 0000000000000000000000000000000000000000..1ef9de05a7fa7a57ea6adf955dfb43a20f5e8b58
--- /dev/null
+++ b/modules/generation_parameters_copypaste.py
@@ -0,0 +1,445 @@
+import base64
+import io
+import json
+import os
+import re
+
+import gradio as gr
+from modules.paths import data_path
+from modules import shared, ui_tempdir, script_callbacks, processing
+from PIL import Image
+
+re_param_code = r'\s*([\w ]+):\s*("(?:\\.|[^\\"])+"|[^,]*)(?:,|$)'
+re_param = re.compile(re_param_code)
+re_imagesize = re.compile(r"^(\d+)x(\d+)$")
+re_hypernet_hash = re.compile("\(([0-9a-f]+)\)$")
+type_of_gr_update = type(gr.update())
+
+paste_fields = {}
+registered_param_bindings = []
+
+
+class ParamBinding:
+ def __init__(self, paste_button, tabname, source_text_component=None, source_image_component=None, source_tabname=None, override_settings_component=None, paste_field_names=None):
+ self.paste_button = paste_button
+ self.tabname = tabname
+ self.source_text_component = source_text_component
+ self.source_image_component = source_image_component
+ self.source_tabname = source_tabname
+ self.override_settings_component = override_settings_component
+ self.paste_field_names = paste_field_names or []
+
+
+def reset():
+ paste_fields.clear()
+ registered_param_bindings.clear()
+
+
+def quote(text):
+ if ',' not in str(text) and '\n' not in str(text) and ':' not in str(text):
+ return text
+
+ return json.dumps(text, ensure_ascii=False)
+
+
+def unquote(text):
+ if len(text) == 0 or text[0] != '"' or text[-1] != '"':
+ return text
+
+ try:
+ return json.loads(text)
+ except Exception:
+ return text
+
+
+def image_from_url_text(filedata):
+ if filedata is None:
+ return None
+
+ if type(filedata) == list and filedata and type(filedata[0]) == dict and filedata[0].get("is_file", False):
+ filedata = filedata[0]
+
+ if type(filedata) == dict and filedata.get("is_file", False):
+ filename = filedata["name"]
+ is_in_right_dir = ui_tempdir.check_tmp_file(shared.demo, filename)
+ assert is_in_right_dir, 'trying to open image file outside of allowed directories'
+
+ filename = filename.rsplit('?', 1)[0]
+ return Image.open(filename)
+
+ if type(filedata) == list:
+ if len(filedata) == 0:
+ return None
+
+ filedata = filedata[0]
+
+ if filedata.startswith("data:image/png;base64,"):
+ filedata = filedata[len("data:image/png;base64,"):]
+
+ filedata = base64.decodebytes(filedata.encode('utf-8'))
+ image = Image.open(io.BytesIO(filedata))
+ return image
+
+
+def add_paste_fields(tabname, init_img, fields, override_settings_component=None):
+ paste_fields[tabname] = {"init_img": init_img, "fields": fields, "override_settings_component": override_settings_component}
+
+ # backwards compatibility for existing extensions
+ import modules.ui
+ if tabname == 'txt2img':
+ modules.ui.txt2img_paste_fields = fields
+ elif tabname == 'img2img':
+ modules.ui.img2img_paste_fields = fields
+
+
+def create_buttons(tabs_list):
+ buttons = {}
+ for tab in tabs_list:
+ buttons[tab] = gr.Button(f"Send to {tab}", elem_id=f"{tab}_tab")
+ return buttons
+
+
+def bind_buttons(buttons, send_image, send_generate_info):
+ """old function for backwards compatibility; do not use this, use register_paste_params_button"""
+ for tabname, button in buttons.items():
+ source_text_component = send_generate_info if isinstance(send_generate_info, gr.components.Component) else None
+ source_tabname = send_generate_info if isinstance(send_generate_info, str) else None
+
+ register_paste_params_button(ParamBinding(paste_button=button, tabname=tabname, source_text_component=source_text_component, source_image_component=send_image, source_tabname=source_tabname))
+
+
+def register_paste_params_button(binding: ParamBinding):
+ registered_param_bindings.append(binding)
+
+
+def connect_paste_params_buttons():
+ binding: ParamBinding
+ for binding in registered_param_bindings:
+ destination_image_component = paste_fields[binding.tabname]["init_img"]
+ fields = paste_fields[binding.tabname]["fields"]
+ override_settings_component = binding.override_settings_component or paste_fields[binding.tabname]["override_settings_component"]
+
+ destination_width_component = next(iter([field for field, name in fields if name == "Size-1"] if fields else []), None)
+ destination_height_component = next(iter([field for field, name in fields if name == "Size-2"] if fields else []), None)
+
+ if binding.source_image_component and destination_image_component:
+ if isinstance(binding.source_image_component, gr.Gallery):
+ func = send_image_and_dimensions if destination_width_component else image_from_url_text
+ jsfunc = "extract_image_from_gallery"
+ else:
+ func = send_image_and_dimensions if destination_width_component else lambda x: x
+ jsfunc = None
+
+ binding.paste_button.click(
+ fn=func,
+ _js=jsfunc,
+ inputs=[binding.source_image_component],
+ outputs=[destination_image_component, destination_width_component, destination_height_component] if destination_width_component else [destination_image_component],
+ show_progress=False,
+ )
+
+ if binding.source_text_component is not None and fields is not None:
+ connect_paste(binding.paste_button, fields, binding.source_text_component, override_settings_component, binding.tabname)
+
+ if binding.source_tabname is not None and fields is not None:
+ paste_field_names = ['Prompt', 'Negative prompt', 'Steps', 'Face restoration'] + (["Seed"] if shared.opts.send_seed else []) + binding.paste_field_names
+ binding.paste_button.click(
+ fn=lambda *x: x,
+ inputs=[field for field, name in paste_fields[binding.source_tabname]["fields"] if name in paste_field_names],
+ outputs=[field for field, name in fields if name in paste_field_names],
+ show_progress=False,
+ )
+
+ binding.paste_button.click(
+ fn=None,
+ _js=f"switch_to_{binding.tabname}",
+ inputs=None,
+ outputs=None,
+ show_progress=False,
+ )
+
+
+def send_image_and_dimensions(x):
+ if isinstance(x, Image.Image):
+ img = x
+ else:
+ img = image_from_url_text(x)
+
+ if shared.opts.send_size and isinstance(img, Image.Image):
+ w = img.width
+ h = img.height
+ else:
+ w = gr.update()
+ h = gr.update()
+
+ return img, w, h
+
+
+def restore_old_hires_fix_params(res):
+ """for infotexts that specify old First pass size parameter, convert it into
+ width, height, and hr scale"""
+
+ firstpass_width = res.get('First pass size-1', None)
+ firstpass_height = res.get('First pass size-2', None)
+
+ if shared.opts.use_old_hires_fix_width_height:
+ hires_width = int(res.get("Hires resize-1", 0))
+ hires_height = int(res.get("Hires resize-2", 0))
+
+ if hires_width and hires_height:
+ res['Size-1'] = hires_width
+ res['Size-2'] = hires_height
+ return
+
+ if firstpass_width is None or firstpass_height is None:
+ return
+
+ firstpass_width, firstpass_height = int(firstpass_width), int(firstpass_height)
+ width = int(res.get("Size-1", 512))
+ height = int(res.get("Size-2", 512))
+
+ if firstpass_width == 0 or firstpass_height == 0:
+ firstpass_width, firstpass_height = processing.old_hires_fix_first_pass_dimensions(width, height)
+
+ res['Size-1'] = firstpass_width
+ res['Size-2'] = firstpass_height
+ res['Hires resize-1'] = width
+ res['Hires resize-2'] = height
+
+
+def parse_generation_parameters(x: str):
+ """parses generation parameters string, the one you see in text field under the picture in UI:
+```
+girl with an artist's beret, determined, blue eyes, desert scene, computer monitors, heavy makeup, by Alphonse Mucha and Charlie Bowater, ((eyeshadow)), (coquettish), detailed, intricate
+Negative prompt: ugly, fat, obese, chubby, (((deformed))), [blurry], bad anatomy, disfigured, poorly drawn face, mutation, mutated, (extra_limb), (ugly), (poorly drawn hands), messy drawing
+Steps: 20, Sampler: Euler a, CFG scale: 7, Seed: 965400086, Size: 512x512, Model hash: 45dee52b
+```
+
+ returns a dict with field values
+ """
+
+ res = {}
+
+ prompt = ""
+ negative_prompt = ""
+
+ done_with_prompt = False
+
+ *lines, lastline = x.strip().split("\n")
+ if len(re_param.findall(lastline)) < 3:
+ lines.append(lastline)
+ lastline = ''
+
+ for line in lines:
+ line = line.strip()
+ if line.startswith("Negative prompt:"):
+ done_with_prompt = True
+ line = line[16:].strip()
+ if done_with_prompt:
+ negative_prompt += ("" if negative_prompt == "" else "\n") + line
+ else:
+ prompt += ("" if prompt == "" else "\n") + line
+
+ if shared.opts.infotext_styles != "Ignore":
+ found_styles, prompt, negative_prompt = shared.prompt_styles.extract_styles_from_prompt(prompt, negative_prompt)
+
+ if shared.opts.infotext_styles == "Apply":
+ res["Styles array"] = found_styles
+ elif shared.opts.infotext_styles == "Apply if any" and found_styles:
+ res["Styles array"] = found_styles
+
+ res["Prompt"] = prompt
+ res["Negative prompt"] = negative_prompt
+
+ for k, v in re_param.findall(lastline):
+ try:
+ if v[0] == '"' and v[-1] == '"':
+ v = unquote(v)
+
+ m = re_imagesize.match(v)
+ if m is not None:
+ res[f"{k}-1"] = m.group(1)
+ res[f"{k}-2"] = m.group(2)
+ else:
+ res[k] = v
+ except Exception:
+ print(f"Error parsing \"{k}: {v}\"")
+
+ # Missing CLIP skip means it was set to 1 (the default)
+ if "Clip skip" not in res:
+ res["Clip skip"] = "1"
+
+ hypernet = res.get("Hypernet", None)
+ if hypernet is not None:
+ res["Prompt"] += f""""""
+
+ if "Hires resize-1" not in res:
+ res["Hires resize-1"] = 0
+ res["Hires resize-2"] = 0
+
+ if "Hires sampler" not in res:
+ res["Hires sampler"] = "Use same sampler"
+
+ if "Hires checkpoint" not in res:
+ res["Hires checkpoint"] = "Use same checkpoint"
+
+ if "Hires prompt" not in res:
+ res["Hires prompt"] = ""
+
+ if "Hires negative prompt" not in res:
+ res["Hires negative prompt"] = ""
+
+ restore_old_hires_fix_params(res)
+
+ # Missing RNG means the default was set, which is GPU RNG
+ if "RNG" not in res:
+ res["RNG"] = "GPU"
+
+ if "Schedule type" not in res:
+ res["Schedule type"] = "Automatic"
+
+ if "Schedule max sigma" not in res:
+ res["Schedule max sigma"] = 0
+
+ if "Schedule min sigma" not in res:
+ res["Schedule min sigma"] = 0
+
+ if "Schedule rho" not in res:
+ res["Schedule rho"] = 0
+
+ if "VAE Encoder" not in res:
+ res["VAE Encoder"] = "Full"
+
+ if "VAE Decoder" not in res:
+ res["VAE Decoder"] = "Full"
+
+ return res
+
+
+infotext_to_setting_name_mapping = [
+
+]
+"""Mapping of infotext labels to setting names. Only left for backwards compatibility - use OptionInfo(..., infotext='...') instead.
+Example content:
+
+infotext_to_setting_name_mapping = [
+ ('Conditional mask weight', 'inpainting_mask_weight'),
+ ('Model hash', 'sd_model_checkpoint'),
+ ('ENSD', 'eta_noise_seed_delta'),
+ ('Schedule type', 'k_sched_type'),
+]
+"""
+
+
+def create_override_settings_dict(text_pairs):
+ """creates processing's override_settings parameters from gradio's multiselect
+
+ Example input:
+ ['Clip skip: 2', 'Model hash: e6e99610c4', 'ENSD: 31337']
+
+ Example output:
+ {'CLIP_stop_at_last_layers': 2, 'sd_model_checkpoint': 'e6e99610c4', 'eta_noise_seed_delta': 31337}
+ """
+
+ res = {}
+
+ params = {}
+ for pair in text_pairs:
+ k, v = pair.split(":", maxsplit=1)
+
+ params[k] = v.strip()
+
+ mapping = [(info.infotext, k) for k, info in shared.opts.data_labels.items() if info.infotext]
+ for param_name, setting_name in mapping + infotext_to_setting_name_mapping:
+ value = params.get(param_name, None)
+
+ if value is None:
+ continue
+
+ res[setting_name] = shared.opts.cast_value(setting_name, value)
+
+ return res
+
+
+def connect_paste(button, paste_fields, input_comp, override_settings_component, tabname):
+ def paste_func(prompt):
+ if not prompt and not shared.cmd_opts.hide_ui_dir_config:
+ filename = os.path.join(data_path, "params.txt")
+ if os.path.exists(filename):
+ with open(filename, "r", encoding="utf8") as file:
+ prompt = file.read()
+
+ params = parse_generation_parameters(prompt)
+ script_callbacks.infotext_pasted_callback(prompt, params)
+ res = []
+
+ for output, key in paste_fields:
+ if callable(key):
+ v = key(params)
+ else:
+ v = params.get(key, None)
+
+ if v is None:
+ res.append(gr.update())
+ elif isinstance(v, type_of_gr_update):
+ res.append(v)
+ else:
+ try:
+ valtype = type(output.value)
+
+ if valtype == bool and v == "False":
+ val = False
+ else:
+ val = valtype(v)
+
+ res.append(gr.update(value=val))
+ except Exception:
+ res.append(gr.update())
+
+ return res
+
+ if override_settings_component is not None:
+ already_handled_fields = {key: 1 for _, key in paste_fields}
+
+ def paste_settings(params):
+ vals = {}
+
+ mapping = [(info.infotext, k) for k, info in shared.opts.data_labels.items() if info.infotext]
+ for param_name, setting_name in mapping + infotext_to_setting_name_mapping:
+ if param_name in already_handled_fields:
+ continue
+
+ v = params.get(param_name, None)
+ if v is None:
+ continue
+
+ if setting_name == "sd_model_checkpoint" and shared.opts.disable_weights_auto_swap:
+ continue
+
+ v = shared.opts.cast_value(setting_name, v)
+ current_value = getattr(shared.opts, setting_name, None)
+
+ if v == current_value:
+ continue
+
+ vals[param_name] = v
+
+ vals_pairs = [f"{k}: {v}" for k, v in vals.items()]
+
+ return gr.Dropdown.update(value=vals_pairs, choices=vals_pairs, visible=bool(vals_pairs))
+
+ paste_fields = paste_fields + [(override_settings_component, paste_settings)]
+
+ button.click(
+ fn=paste_func,
+ inputs=[input_comp],
+ outputs=[x[0] for x in paste_fields],
+ show_progress=False,
+ )
+ button.click(
+ fn=None,
+ _js=f"recalculate_prompts_{tabname}",
+ inputs=[],
+ outputs=[],
+ show_progress=False,
+ )
diff --git a/modules/gfpgan_model.py b/modules/gfpgan_model.py
new file mode 100644
index 0000000000000000000000000000000000000000..e2b58f0b4a864977b602d513423120ad9b29d65d
--- /dev/null
+++ b/modules/gfpgan_model.py
@@ -0,0 +1,110 @@
+import os
+
+import facexlib
+import gfpgan
+
+import modules.face_restoration
+from modules import paths, shared, devices, modelloader, errors
+
+model_dir = "GFPGAN"
+user_path = None
+model_path = os.path.join(paths.models_path, model_dir)
+model_url = "https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.4.pth"
+have_gfpgan = False
+loaded_gfpgan_model = None
+
+
+def gfpgann():
+ global loaded_gfpgan_model
+ global model_path
+ if loaded_gfpgan_model is not None:
+ loaded_gfpgan_model.gfpgan.to(devices.device_gfpgan)
+ return loaded_gfpgan_model
+
+ if gfpgan_constructor is None:
+ return None
+
+ models = modelloader.load_models(model_path, model_url, user_path, ext_filter="GFPGAN")
+ if len(models) == 1 and models[0].startswith("http"):
+ model_file = models[0]
+ elif len(models) != 0:
+ latest_file = max(models, key=os.path.getctime)
+ model_file = latest_file
+ else:
+ print("Unable to load gfpgan model!")
+ return None
+ if hasattr(facexlib.detection.retinaface, 'device'):
+ facexlib.detection.retinaface.device = devices.device_gfpgan
+ model = gfpgan_constructor(model_path=model_file, upscale=1, arch='clean', channel_multiplier=2, bg_upsampler=None, device=devices.device_gfpgan)
+ loaded_gfpgan_model = model
+
+ return model
+
+
+def send_model_to(model, device):
+ model.gfpgan.to(device)
+ model.face_helper.face_det.to(device)
+ model.face_helper.face_parse.to(device)
+
+
+def gfpgan_fix_faces(np_image):
+ model = gfpgann()
+ if model is None:
+ return np_image
+
+ send_model_to(model, devices.device_gfpgan)
+
+ np_image_bgr = np_image[:, :, ::-1]
+ cropped_faces, restored_faces, gfpgan_output_bgr = model.enhance(np_image_bgr, has_aligned=False, only_center_face=False, paste_back=True)
+ np_image = gfpgan_output_bgr[:, :, ::-1]
+
+ model.face_helper.clean_all()
+
+ if shared.opts.face_restoration_unload:
+ send_model_to(model, devices.cpu)
+
+ return np_image
+
+
+gfpgan_constructor = None
+
+
+def setup_model(dirname):
+ try:
+ os.makedirs(model_path, exist_ok=True)
+ from gfpgan import GFPGANer
+ from facexlib import detection, parsing # noqa: F401
+ global user_path
+ global have_gfpgan
+ global gfpgan_constructor
+
+ load_file_from_url_orig = gfpgan.utils.load_file_from_url
+ facex_load_file_from_url_orig = facexlib.detection.load_file_from_url
+ facex_load_file_from_url_orig2 = facexlib.parsing.load_file_from_url
+
+ def my_load_file_from_url(**kwargs):
+ return load_file_from_url_orig(**dict(kwargs, model_dir=model_path))
+
+ def facex_load_file_from_url(**kwargs):
+ return facex_load_file_from_url_orig(**dict(kwargs, save_dir=model_path, model_dir=None))
+
+ def facex_load_file_from_url2(**kwargs):
+ return facex_load_file_from_url_orig2(**dict(kwargs, save_dir=model_path, model_dir=None))
+
+ gfpgan.utils.load_file_from_url = my_load_file_from_url
+ facexlib.detection.load_file_from_url = facex_load_file_from_url
+ facexlib.parsing.load_file_from_url = facex_load_file_from_url2
+ user_path = dirname
+ have_gfpgan = True
+ gfpgan_constructor = GFPGANer
+
+ class FaceRestorerGFPGAN(modules.face_restoration.FaceRestoration):
+ def name(self):
+ return "GFPGAN"
+
+ def restore(self, np_image):
+ return gfpgan_fix_faces(np_image)
+
+ shared.face_restorers.append(FaceRestorerGFPGAN())
+ except Exception:
+ errors.report("Error setting up GFPGAN", exc_info=True)
diff --git a/modules/gitpython_hack.py b/modules/gitpython_hack.py
new file mode 100644
index 0000000000000000000000000000000000000000..e537c1df93e15679d90e9eea3337035a8d50da89
--- /dev/null
+++ b/modules/gitpython_hack.py
@@ -0,0 +1,42 @@
+from __future__ import annotations
+
+import io
+import subprocess
+
+import git
+
+
+class Git(git.Git):
+ """
+ Git subclassed to never use persistent processes.
+ """
+
+ def _get_persistent_cmd(self, attr_name, cmd_name, *args, **kwargs):
+ raise NotImplementedError(f"Refusing to use persistent process: {attr_name} ({cmd_name} {args} {kwargs})")
+
+ def get_object_header(self, ref: str | bytes) -> tuple[str, str, int]:
+ ret = subprocess.check_output(
+ [self.GIT_PYTHON_GIT_EXECUTABLE, "cat-file", "--batch-check"],
+ input=self._prepare_ref(ref),
+ cwd=self._working_dir,
+ timeout=2,
+ )
+ return self._parse_object_header(ret)
+
+ def stream_object_data(self, ref: str) -> tuple[str, str, int, "Git.CatFileContentStream"]:
+ # Not really streaming, per se; this buffers the entire object in memory.
+ # Shouldn't be a problem for our use case, since we're only using this for
+ # object headers (commit objects).
+ ret = subprocess.check_output(
+ [self.GIT_PYTHON_GIT_EXECUTABLE, "cat-file", "--batch"],
+ input=self._prepare_ref(ref),
+ cwd=self._working_dir,
+ timeout=30,
+ )
+ bio = io.BytesIO(ret)
+ hexsha, typename, size = self._parse_object_header(bio.readline())
+ return (hexsha, typename, size, self.CatFileContentStream(size, bio))
+
+
+class Repo(git.Repo):
+ GitCommandWrapperType = Git
diff --git a/modules/gradio_extensons.py b/modules/gradio_extensons.py
new file mode 100644
index 0000000000000000000000000000000000000000..aac742ef0c17cdde7e470d7a068cdbbcec09b57e
--- /dev/null
+++ b/modules/gradio_extensons.py
@@ -0,0 +1,73 @@
+import gradio as gr
+
+from modules import scripts, ui_tempdir, patches
+
+
+def add_classes_to_gradio_component(comp):
+ """
+ this adds gradio-* to the component for css styling (ie gradio-button to gr.Button), as well as some others
+ """
+
+ comp.elem_classes = [f"gradio-{comp.get_block_name()}", *(comp.elem_classes or [])]
+
+ if getattr(comp, 'multiselect', False):
+ comp.elem_classes.append('multiselect')
+
+
+def IOComponent_init(self, *args, **kwargs):
+ self.webui_tooltip = kwargs.pop('tooltip', None)
+
+ if scripts.scripts_current is not None:
+ scripts.scripts_current.before_component(self, **kwargs)
+
+ scripts.script_callbacks.before_component_callback(self, **kwargs)
+
+ res = original_IOComponent_init(self, *args, **kwargs)
+
+ add_classes_to_gradio_component(self)
+
+ scripts.script_callbacks.after_component_callback(self, **kwargs)
+
+ if scripts.scripts_current is not None:
+ scripts.scripts_current.after_component(self, **kwargs)
+
+ return res
+
+
+def Block_get_config(self):
+ config = original_Block_get_config(self)
+
+ webui_tooltip = getattr(self, 'webui_tooltip', None)
+ if webui_tooltip:
+ config["webui_tooltip"] = webui_tooltip
+
+ config.pop('example_inputs', None)
+
+ return config
+
+
+def BlockContext_init(self, *args, **kwargs):
+ res = original_BlockContext_init(self, *args, **kwargs)
+
+ add_classes_to_gradio_component(self)
+
+ return res
+
+
+def Blocks_get_config_file(self, *args, **kwargs):
+ config = original_Blocks_get_config_file(self, *args, **kwargs)
+
+ for comp_config in config["components"]:
+ if "example_inputs" in comp_config:
+ comp_config["example_inputs"] = {"serialized": []}
+
+ return config
+
+
+original_IOComponent_init = patches.patch(__name__, obj=gr.components.IOComponent, field="__init__", replacement=IOComponent_init)
+original_Block_get_config = patches.patch(__name__, obj=gr.blocks.Block, field="get_config", replacement=Block_get_config)
+original_BlockContext_init = patches.patch(__name__, obj=gr.blocks.BlockContext, field="__init__", replacement=BlockContext_init)
+original_Blocks_get_config_file = patches.patch(__name__, obj=gr.blocks.Blocks, field="get_config_file", replacement=Blocks_get_config_file)
+
+
+ui_tempdir.install_ui_tempdir_override()
diff --git a/modules/hypernetworks/__pycache__/hypernetwork.cpython-39.pyc b/modules/hypernetworks/__pycache__/hypernetwork.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c29694384b0a4ee10ffaad4f8fc63ecda6d86323
Binary files /dev/null and b/modules/hypernetworks/__pycache__/hypernetwork.cpython-39.pyc differ
diff --git a/modules/hypernetworks/__pycache__/ui.cpython-39.pyc b/modules/hypernetworks/__pycache__/ui.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c402ed19631968f1225528581feeddb79fe0abd6
Binary files /dev/null and b/modules/hypernetworks/__pycache__/ui.cpython-39.pyc differ
diff --git a/modules/hypernetworks/hypernetwork.py b/modules/hypernetworks/hypernetwork.py
new file mode 100644
index 0000000000000000000000000000000000000000..48e1fb91c399beb2712f49dca9e9e0380c00ef79
--- /dev/null
+++ b/modules/hypernetworks/hypernetwork.py
@@ -0,0 +1,782 @@
+import datetime
+import glob
+import html
+import os
+import inspect
+from contextlib import closing
+
+import modules.textual_inversion.dataset
+import torch
+import tqdm
+from einops import rearrange, repeat
+from ldm.util import default
+from modules import devices, sd_models, shared, sd_samplers, hashes, sd_hijack_checkpoint, errors
+from modules.textual_inversion import textual_inversion, logging
+from modules.textual_inversion.learn_schedule import LearnRateScheduler
+from torch import einsum
+from torch.nn.init import normal_, xavier_normal_, xavier_uniform_, kaiming_normal_, kaiming_uniform_, zeros_
+
+from collections import deque
+from statistics import stdev, mean
+
+
+optimizer_dict = {optim_name : cls_obj for optim_name, cls_obj in inspect.getmembers(torch.optim, inspect.isclass) if optim_name != "Optimizer"}
+
+class HypernetworkModule(torch.nn.Module):
+ activation_dict = {
+ "linear": torch.nn.Identity,
+ "relu": torch.nn.ReLU,
+ "leakyrelu": torch.nn.LeakyReLU,
+ "elu": torch.nn.ELU,
+ "swish": torch.nn.Hardswish,
+ "tanh": torch.nn.Tanh,
+ "sigmoid": torch.nn.Sigmoid,
+ }
+ activation_dict.update({cls_name.lower(): cls_obj for cls_name, cls_obj in inspect.getmembers(torch.nn.modules.activation) if inspect.isclass(cls_obj) and cls_obj.__module__ == 'torch.nn.modules.activation'})
+
+ def __init__(self, dim, state_dict=None, layer_structure=None, activation_func=None, weight_init='Normal',
+ add_layer_norm=False, activate_output=False, dropout_structure=None):
+ super().__init__()
+
+ self.multiplier = 1.0
+
+ assert layer_structure is not None, "layer_structure must not be None"
+ assert layer_structure[0] == 1, "Multiplier Sequence should start with size 1!"
+ assert layer_structure[-1] == 1, "Multiplier Sequence should end with size 1!"
+
+ linears = []
+ for i in range(len(layer_structure) - 1):
+
+ # Add a fully-connected layer
+ linears.append(torch.nn.Linear(int(dim * layer_structure[i]), int(dim * layer_structure[i+1])))
+
+ # Add an activation func except last layer
+ if activation_func == "linear" or activation_func is None or (i >= len(layer_structure) - 2 and not activate_output):
+ pass
+ elif activation_func in self.activation_dict:
+ linears.append(self.activation_dict[activation_func]())
+ else:
+ raise RuntimeError(f'hypernetwork uses an unsupported activation function: {activation_func}')
+
+ # Add layer normalization
+ if add_layer_norm:
+ linears.append(torch.nn.LayerNorm(int(dim * layer_structure[i+1])))
+
+ # Everything should be now parsed into dropout structure, and applied here.
+ # Since we only have dropouts after layers, dropout structure should start with 0 and end with 0.
+ if dropout_structure is not None and dropout_structure[i+1] > 0:
+ assert 0 < dropout_structure[i+1] < 1, "Dropout probability should be 0 or float between 0 and 1!"
+ linears.append(torch.nn.Dropout(p=dropout_structure[i+1]))
+ # Code explanation : [1, 2, 1] -> dropout is missing when last_layer_dropout is false. [1, 2, 2, 1] -> [0, 0.3, 0, 0], when its True, [0, 0.3, 0.3, 0].
+
+ self.linear = torch.nn.Sequential(*linears)
+
+ if state_dict is not None:
+ self.fix_old_state_dict(state_dict)
+ self.load_state_dict(state_dict)
+ else:
+ for layer in self.linear:
+ if type(layer) == torch.nn.Linear or type(layer) == torch.nn.LayerNorm:
+ w, b = layer.weight.data, layer.bias.data
+ if weight_init == "Normal" or type(layer) == torch.nn.LayerNorm:
+ normal_(w, mean=0.0, std=0.01)
+ normal_(b, mean=0.0, std=0)
+ elif weight_init == 'XavierUniform':
+ xavier_uniform_(w)
+ zeros_(b)
+ elif weight_init == 'XavierNormal':
+ xavier_normal_(w)
+ zeros_(b)
+ elif weight_init == 'KaimingUniform':
+ kaiming_uniform_(w, nonlinearity='leaky_relu' if 'leakyrelu' == activation_func else 'relu')
+ zeros_(b)
+ elif weight_init == 'KaimingNormal':
+ kaiming_normal_(w, nonlinearity='leaky_relu' if 'leakyrelu' == activation_func else 'relu')
+ zeros_(b)
+ else:
+ raise KeyError(f"Key {weight_init} is not defined as initialization!")
+ self.to(devices.device)
+
+ def fix_old_state_dict(self, state_dict):
+ changes = {
+ 'linear1.bias': 'linear.0.bias',
+ 'linear1.weight': 'linear.0.weight',
+ 'linear2.bias': 'linear.1.bias',
+ 'linear2.weight': 'linear.1.weight',
+ }
+
+ for fr, to in changes.items():
+ x = state_dict.get(fr, None)
+ if x is None:
+ continue
+
+ del state_dict[fr]
+ state_dict[to] = x
+
+ def forward(self, x):
+ return x + self.linear(x) * (self.multiplier if not self.training else 1)
+
+ def trainables(self):
+ layer_structure = []
+ for layer in self.linear:
+ if type(layer) == torch.nn.Linear or type(layer) == torch.nn.LayerNorm:
+ layer_structure += [layer.weight, layer.bias]
+ return layer_structure
+
+
+#param layer_structure : sequence used for length, use_dropout : controlling boolean, last_layer_dropout : for compatibility check.
+def parse_dropout_structure(layer_structure, use_dropout, last_layer_dropout):
+ if layer_structure is None:
+ layer_structure = [1, 2, 1]
+ if not use_dropout:
+ return [0] * len(layer_structure)
+ dropout_values = [0]
+ dropout_values.extend([0.3] * (len(layer_structure) - 3))
+ if last_layer_dropout:
+ dropout_values.append(0.3)
+ else:
+ dropout_values.append(0)
+ dropout_values.append(0)
+ return dropout_values
+
+
+class Hypernetwork:
+ filename = None
+ name = None
+
+ def __init__(self, name=None, enable_sizes=None, layer_structure=None, activation_func=None, weight_init=None, add_layer_norm=False, use_dropout=False, activate_output=False, **kwargs):
+ self.filename = None
+ self.name = name
+ self.layers = {}
+ self.step = 0
+ self.sd_checkpoint = None
+ self.sd_checkpoint_name = None
+ self.layer_structure = layer_structure
+ self.activation_func = activation_func
+ self.weight_init = weight_init
+ self.add_layer_norm = add_layer_norm
+ self.use_dropout = use_dropout
+ self.activate_output = activate_output
+ self.last_layer_dropout = kwargs.get('last_layer_dropout', True)
+ self.dropout_structure = kwargs.get('dropout_structure', None)
+ if self.dropout_structure is None:
+ self.dropout_structure = parse_dropout_structure(self.layer_structure, self.use_dropout, self.last_layer_dropout)
+ self.optimizer_name = None
+ self.optimizer_state_dict = None
+ self.optional_info = None
+
+ for size in enable_sizes or []:
+ self.layers[size] = (
+ HypernetworkModule(size, None, self.layer_structure, self.activation_func, self.weight_init,
+ self.add_layer_norm, self.activate_output, dropout_structure=self.dropout_structure),
+ HypernetworkModule(size, None, self.layer_structure, self.activation_func, self.weight_init,
+ self.add_layer_norm, self.activate_output, dropout_structure=self.dropout_structure),
+ )
+ self.eval()
+
+ def weights(self):
+ res = []
+ for layers in self.layers.values():
+ for layer in layers:
+ res += layer.parameters()
+ return res
+
+ def train(self, mode=True):
+ for layers in self.layers.values():
+ for layer in layers:
+ layer.train(mode=mode)
+ for param in layer.parameters():
+ param.requires_grad = mode
+
+ def to(self, device):
+ for layers in self.layers.values():
+ for layer in layers:
+ layer.to(device)
+
+ return self
+
+ def set_multiplier(self, multiplier):
+ for layers in self.layers.values():
+ for layer in layers:
+ layer.multiplier = multiplier
+
+ return self
+
+ def eval(self):
+ for layers in self.layers.values():
+ for layer in layers:
+ layer.eval()
+ for param in layer.parameters():
+ param.requires_grad = False
+
+ def save(self, filename):
+ state_dict = {}
+ optimizer_saved_dict = {}
+
+ for k, v in self.layers.items():
+ state_dict[k] = (v[0].state_dict(), v[1].state_dict())
+
+ state_dict['step'] = self.step
+ state_dict['name'] = self.name
+ state_dict['layer_structure'] = self.layer_structure
+ state_dict['activation_func'] = self.activation_func
+ state_dict['is_layer_norm'] = self.add_layer_norm
+ state_dict['weight_initialization'] = self.weight_init
+ state_dict['sd_checkpoint'] = self.sd_checkpoint
+ state_dict['sd_checkpoint_name'] = self.sd_checkpoint_name
+ state_dict['activate_output'] = self.activate_output
+ state_dict['use_dropout'] = self.use_dropout
+ state_dict['dropout_structure'] = self.dropout_structure
+ state_dict['last_layer_dropout'] = (self.dropout_structure[-2] != 0) if self.dropout_structure is not None else self.last_layer_dropout
+ state_dict['optional_info'] = self.optional_info if self.optional_info else None
+
+ if self.optimizer_name is not None:
+ optimizer_saved_dict['optimizer_name'] = self.optimizer_name
+
+ torch.save(state_dict, filename)
+ if shared.opts.save_optimizer_state and self.optimizer_state_dict:
+ optimizer_saved_dict['hash'] = self.shorthash()
+ optimizer_saved_dict['optimizer_state_dict'] = self.optimizer_state_dict
+ torch.save(optimizer_saved_dict, filename + '.optim')
+
+ def load(self, filename):
+ self.filename = filename
+ if self.name is None:
+ self.name = os.path.splitext(os.path.basename(filename))[0]
+
+ state_dict = torch.load(filename, map_location='cpu')
+
+ self.layer_structure = state_dict.get('layer_structure', [1, 2, 1])
+ self.optional_info = state_dict.get('optional_info', None)
+ self.activation_func = state_dict.get('activation_func', None)
+ self.weight_init = state_dict.get('weight_initialization', 'Normal')
+ self.add_layer_norm = state_dict.get('is_layer_norm', False)
+ self.dropout_structure = state_dict.get('dropout_structure', None)
+ self.use_dropout = True if self.dropout_structure is not None and any(self.dropout_structure) else state_dict.get('use_dropout', False)
+ self.activate_output = state_dict.get('activate_output', True)
+ self.last_layer_dropout = state_dict.get('last_layer_dropout', False)
+ # Dropout structure should have same length as layer structure, Every digits should be in [0,1), and last digit must be 0.
+ if self.dropout_structure is None:
+ self.dropout_structure = parse_dropout_structure(self.layer_structure, self.use_dropout, self.last_layer_dropout)
+
+ if shared.opts.print_hypernet_extra:
+ if self.optional_info is not None:
+ print(f" INFO:\n {self.optional_info}\n")
+
+ print(f" Layer structure: {self.layer_structure}")
+ print(f" Activation function: {self.activation_func}")
+ print(f" Weight initialization: {self.weight_init}")
+ print(f" Layer norm: {self.add_layer_norm}")
+ print(f" Dropout usage: {self.use_dropout}" )
+ print(f" Activate last layer: {self.activate_output}")
+ print(f" Dropout structure: {self.dropout_structure}")
+
+ optimizer_saved_dict = torch.load(self.filename + '.optim', map_location='cpu') if os.path.exists(self.filename + '.optim') else {}
+
+ if self.shorthash() == optimizer_saved_dict.get('hash', None):
+ self.optimizer_state_dict = optimizer_saved_dict.get('optimizer_state_dict', None)
+ else:
+ self.optimizer_state_dict = None
+ if self.optimizer_state_dict:
+ self.optimizer_name = optimizer_saved_dict.get('optimizer_name', 'AdamW')
+ if shared.opts.print_hypernet_extra:
+ print("Loaded existing optimizer from checkpoint")
+ print(f"Optimizer name is {self.optimizer_name}")
+ else:
+ self.optimizer_name = "AdamW"
+ if shared.opts.print_hypernet_extra:
+ print("No saved optimizer exists in checkpoint")
+
+ for size, sd in state_dict.items():
+ if type(size) == int:
+ self.layers[size] = (
+ HypernetworkModule(size, sd[0], self.layer_structure, self.activation_func, self.weight_init,
+ self.add_layer_norm, self.activate_output, self.dropout_structure),
+ HypernetworkModule(size, sd[1], self.layer_structure, self.activation_func, self.weight_init,
+ self.add_layer_norm, self.activate_output, self.dropout_structure),
+ )
+
+ self.name = state_dict.get('name', self.name)
+ self.step = state_dict.get('step', 0)
+ self.sd_checkpoint = state_dict.get('sd_checkpoint', None)
+ self.sd_checkpoint_name = state_dict.get('sd_checkpoint_name', None)
+ self.eval()
+
+ def shorthash(self):
+ sha256 = hashes.sha256(self.filename, f'hypernet/{self.name}')
+
+ return sha256[0:10] if sha256 else None
+
+
+def list_hypernetworks(path):
+ res = {}
+ for filename in sorted(glob.iglob(os.path.join(path, '**/*.pt'), recursive=True), key=str.lower):
+ name = os.path.splitext(os.path.basename(filename))[0]
+ # Prevent a hypothetical "None.pt" from being listed.
+ if name != "None":
+ res[name] = filename
+ return res
+
+
+def load_hypernetwork(name):
+ path = shared.hypernetworks.get(name, None)
+
+ if path is None:
+ return None
+
+ try:
+ hypernetwork = Hypernetwork()
+ hypernetwork.load(path)
+ return hypernetwork
+ except Exception:
+ errors.report(f"Error loading hypernetwork {path}", exc_info=True)
+ return None
+
+
+def load_hypernetworks(names, multipliers=None):
+ already_loaded = {}
+
+ for hypernetwork in shared.loaded_hypernetworks:
+ if hypernetwork.name in names:
+ already_loaded[hypernetwork.name] = hypernetwork
+
+ shared.loaded_hypernetworks.clear()
+
+ for i, name in enumerate(names):
+ hypernetwork = already_loaded.get(name, None)
+ if hypernetwork is None:
+ hypernetwork = load_hypernetwork(name)
+
+ if hypernetwork is None:
+ continue
+
+ hypernetwork.set_multiplier(multipliers[i] if multipliers else 1.0)
+ shared.loaded_hypernetworks.append(hypernetwork)
+
+
+def apply_single_hypernetwork(hypernetwork, context_k, context_v, layer=None):
+ hypernetwork_layers = (hypernetwork.layers if hypernetwork is not None else {}).get(context_k.shape[2], None)
+
+ if hypernetwork_layers is None:
+ return context_k, context_v
+
+ if layer is not None:
+ layer.hyper_k = hypernetwork_layers[0]
+ layer.hyper_v = hypernetwork_layers[1]
+
+ context_k = devices.cond_cast_unet(hypernetwork_layers[0](devices.cond_cast_float(context_k)))
+ context_v = devices.cond_cast_unet(hypernetwork_layers[1](devices.cond_cast_float(context_v)))
+ return context_k, context_v
+
+
+def apply_hypernetworks(hypernetworks, context, layer=None):
+ context_k = context
+ context_v = context
+ for hypernetwork in hypernetworks:
+ context_k, context_v = apply_single_hypernetwork(hypernetwork, context_k, context_v, layer)
+
+ return context_k, context_v
+
+
+def attention_CrossAttention_forward(self, x, context=None, mask=None, **kwargs):
+ h = self.heads
+
+ q = self.to_q(x)
+ context = default(context, x)
+
+ context_k, context_v = apply_hypernetworks(shared.loaded_hypernetworks, context, self)
+ k = self.to_k(context_k)
+ v = self.to_v(context_v)
+
+ q, k, v = (rearrange(t, 'b n (h d) -> (b h) n d', h=h) for t in (q, k, v))
+
+ sim = einsum('b i d, b j d -> b i j', q, k) * self.scale
+
+ if mask is not None:
+ mask = rearrange(mask, 'b ... -> b (...)')
+ max_neg_value = -torch.finfo(sim.dtype).max
+ mask = repeat(mask, 'b j -> (b h) () j', h=h)
+ sim.masked_fill_(~mask, max_neg_value)
+
+ # attention, what we cannot get enough of
+ attn = sim.softmax(dim=-1)
+
+ out = einsum('b i j, b j d -> b i d', attn, v)
+ out = rearrange(out, '(b h) n d -> b n (h d)', h=h)
+ return self.to_out(out)
+
+
+def stack_conds(conds):
+ if len(conds) == 1:
+ return torch.stack(conds)
+
+ # same as in reconstruct_multicond_batch
+ token_count = max([x.shape[0] for x in conds])
+ for i in range(len(conds)):
+ if conds[i].shape[0] != token_count:
+ last_vector = conds[i][-1:]
+ last_vector_repeated = last_vector.repeat([token_count - conds[i].shape[0], 1])
+ conds[i] = torch.vstack([conds[i], last_vector_repeated])
+
+ return torch.stack(conds)
+
+
+def statistics(data):
+ if len(data) < 2:
+ std = 0
+ else:
+ std = stdev(data)
+ total_information = f"loss:{mean(data):.3f}" + u"\u00B1" + f"({std/ (len(data) ** 0.5):.3f})"
+ recent_data = data[-32:]
+ if len(recent_data) < 2:
+ std = 0
+ else:
+ std = stdev(recent_data)
+ recent_information = f"recent 32 loss:{mean(recent_data):.3f}" + u"\u00B1" + f"({std / (len(recent_data) ** 0.5):.3f})"
+ return total_information, recent_information
+
+
+def create_hypernetwork(name, enable_sizes, overwrite_old, layer_structure=None, activation_func=None, weight_init=None, add_layer_norm=False, use_dropout=False, dropout_structure=None):
+ # Remove illegal characters from name.
+ name = "".join( x for x in name if (x.isalnum() or x in "._- "))
+ assert name, "Name cannot be empty!"
+
+ fn = os.path.join(shared.cmd_opts.hypernetwork_dir, f"{name}.pt")
+ if not overwrite_old:
+ assert not os.path.exists(fn), f"file {fn} already exists"
+
+ if type(layer_structure) == str:
+ layer_structure = [float(x.strip()) for x in layer_structure.split(",")]
+
+ if use_dropout and dropout_structure and type(dropout_structure) == str:
+ dropout_structure = [float(x.strip()) for x in dropout_structure.split(",")]
+ else:
+ dropout_structure = [0] * len(layer_structure)
+
+ hypernet = modules.hypernetworks.hypernetwork.Hypernetwork(
+ name=name,
+ enable_sizes=[int(x) for x in enable_sizes],
+ layer_structure=layer_structure,
+ activation_func=activation_func,
+ weight_init=weight_init,
+ add_layer_norm=add_layer_norm,
+ use_dropout=use_dropout,
+ dropout_structure=dropout_structure
+ )
+ hypernet.save(fn)
+
+ shared.reload_hypernetworks()
+
+
+def train_hypernetwork(id_task, hypernetwork_name, learn_rate, batch_size, gradient_step, data_root, log_directory, training_width, training_height, varsize, steps, clip_grad_mode, clip_grad_value, shuffle_tags, tag_drop_out, latent_sampling_method, use_weight, create_image_every, save_hypernetwork_every, template_filename, preview_from_txt2img, preview_prompt, preview_negative_prompt, preview_steps, preview_sampler_index, preview_cfg_scale, preview_seed, preview_width, preview_height):
+ from modules import images, processing
+
+ save_hypernetwork_every = save_hypernetwork_every or 0
+ create_image_every = create_image_every or 0
+ template_file = textual_inversion.textual_inversion_templates.get(template_filename, None)
+ textual_inversion.validate_train_inputs(hypernetwork_name, learn_rate, batch_size, gradient_step, data_root, template_file, template_filename, steps, save_hypernetwork_every, create_image_every, log_directory, name="hypernetwork")
+ template_file = template_file.path
+
+ path = shared.hypernetworks.get(hypernetwork_name, None)
+ hypernetwork = Hypernetwork()
+ hypernetwork.load(path)
+ shared.loaded_hypernetworks = [hypernetwork]
+
+ shared.state.job = "train-hypernetwork"
+ shared.state.textinfo = "Initializing hypernetwork training..."
+ shared.state.job_count = steps
+
+ hypernetwork_name = hypernetwork_name.rsplit('(', 1)[0]
+ filename = os.path.join(shared.cmd_opts.hypernetwork_dir, f'{hypernetwork_name}.pt')
+
+ log_directory = os.path.join(log_directory, datetime.datetime.now().strftime("%Y-%m-%d"), hypernetwork_name)
+ unload = shared.opts.unload_models_when_training
+
+ if save_hypernetwork_every > 0:
+ hypernetwork_dir = os.path.join(log_directory, "hypernetworks")
+ os.makedirs(hypernetwork_dir, exist_ok=True)
+ else:
+ hypernetwork_dir = None
+
+ if create_image_every > 0:
+ images_dir = os.path.join(log_directory, "images")
+ os.makedirs(images_dir, exist_ok=True)
+ else:
+ images_dir = None
+
+ checkpoint = sd_models.select_checkpoint()
+
+ initial_step = hypernetwork.step or 0
+ if initial_step >= steps:
+ shared.state.textinfo = "Model has already been trained beyond specified max steps"
+ return hypernetwork, filename
+
+ scheduler = LearnRateScheduler(learn_rate, steps, initial_step)
+
+ clip_grad = torch.nn.utils.clip_grad_value_ if clip_grad_mode == "value" else torch.nn.utils.clip_grad_norm_ if clip_grad_mode == "norm" else None
+ if clip_grad:
+ clip_grad_sched = LearnRateScheduler(clip_grad_value, steps, initial_step, verbose=False)
+
+ if shared.opts.training_enable_tensorboard:
+ tensorboard_writer = textual_inversion.tensorboard_setup(log_directory)
+
+ # dataset loading may take a while, so input validations and early returns should be done before this
+ shared.state.textinfo = f"Preparing dataset from {html.escape(data_root)}..."
+
+ pin_memory = shared.opts.pin_memory
+
+ ds = modules.textual_inversion.dataset.PersonalizedBase(data_root=data_root, width=training_width, height=training_height, repeats=shared.opts.training_image_repeats_per_epoch, placeholder_token=hypernetwork_name, model=shared.sd_model, cond_model=shared.sd_model.cond_stage_model, device=devices.device, template_file=template_file, include_cond=True, batch_size=batch_size, gradient_step=gradient_step, shuffle_tags=shuffle_tags, tag_drop_out=tag_drop_out, latent_sampling_method=latent_sampling_method, varsize=varsize, use_weight=use_weight)
+
+ if shared.opts.save_training_settings_to_txt:
+ saved_params = dict(
+ model_name=checkpoint.model_name, model_hash=checkpoint.shorthash, num_of_dataset_images=len(ds),
+ **{field: getattr(hypernetwork, field) for field in ['layer_structure', 'activation_func', 'weight_init', 'add_layer_norm', 'use_dropout', ]}
+ )
+ logging.save_settings_to_file(log_directory, {**saved_params, **locals()})
+
+ latent_sampling_method = ds.latent_sampling_method
+
+ dl = modules.textual_inversion.dataset.PersonalizedDataLoader(ds, latent_sampling_method=latent_sampling_method, batch_size=ds.batch_size, pin_memory=pin_memory)
+
+ old_parallel_processing_allowed = shared.parallel_processing_allowed
+
+ if unload:
+ shared.parallel_processing_allowed = False
+ shared.sd_model.cond_stage_model.to(devices.cpu)
+ shared.sd_model.first_stage_model.to(devices.cpu)
+
+ weights = hypernetwork.weights()
+ hypernetwork.train()
+
+ # Here we use optimizer from saved HN, or we can specify as UI option.
+ if hypernetwork.optimizer_name in optimizer_dict:
+ optimizer = optimizer_dict[hypernetwork.optimizer_name](params=weights, lr=scheduler.learn_rate)
+ optimizer_name = hypernetwork.optimizer_name
+ else:
+ print(f"Optimizer type {hypernetwork.optimizer_name} is not defined!")
+ optimizer = torch.optim.AdamW(params=weights, lr=scheduler.learn_rate)
+ optimizer_name = 'AdamW'
+
+ if hypernetwork.optimizer_state_dict: # This line must be changed if Optimizer type can be different from saved optimizer.
+ try:
+ optimizer.load_state_dict(hypernetwork.optimizer_state_dict)
+ except RuntimeError as e:
+ print("Cannot resume from saved optimizer!")
+ print(e)
+
+ scaler = torch.cuda.amp.GradScaler()
+
+ batch_size = ds.batch_size
+ gradient_step = ds.gradient_step
+ # n steps = batch_size * gradient_step * n image processed
+ steps_per_epoch = len(ds) // batch_size // gradient_step
+ max_steps_per_epoch = len(ds) // batch_size - (len(ds) // batch_size) % gradient_step
+ loss_step = 0
+ _loss_step = 0 #internal
+ # size = len(ds.indexes)
+ # loss_dict = defaultdict(lambda : deque(maxlen = 1024))
+ loss_logging = deque(maxlen=len(ds) * 3) # this should be configurable parameter, this is 3 * epoch(dataset size)
+ # losses = torch.zeros((size,))
+ # previous_mean_losses = [0]
+ # previous_mean_loss = 0
+ # print("Mean loss of {} elements".format(size))
+
+ steps_without_grad = 0
+
+ last_saved_file = ""
+ last_saved_image = ""
+ forced_filename = ""
+
+ pbar = tqdm.tqdm(total=steps - initial_step)
+ try:
+ sd_hijack_checkpoint.add()
+
+ for _ in range((steps-initial_step) * gradient_step):
+ if scheduler.finished:
+ break
+ if shared.state.interrupted:
+ break
+ for j, batch in enumerate(dl):
+ # works as a drop_last=True for gradient accumulation
+ if j == max_steps_per_epoch:
+ break
+ scheduler.apply(optimizer, hypernetwork.step)
+ if scheduler.finished:
+ break
+ if shared.state.interrupted:
+ break
+
+ if clip_grad:
+ clip_grad_sched.step(hypernetwork.step)
+
+ with devices.autocast():
+ x = batch.latent_sample.to(devices.device, non_blocking=pin_memory)
+ if use_weight:
+ w = batch.weight.to(devices.device, non_blocking=pin_memory)
+ if tag_drop_out != 0 or shuffle_tags:
+ shared.sd_model.cond_stage_model.to(devices.device)
+ c = shared.sd_model.cond_stage_model(batch.cond_text).to(devices.device, non_blocking=pin_memory)
+ shared.sd_model.cond_stage_model.to(devices.cpu)
+ else:
+ c = stack_conds(batch.cond).to(devices.device, non_blocking=pin_memory)
+ if use_weight:
+ loss = shared.sd_model.weighted_forward(x, c, w)[0] / gradient_step
+ del w
+ else:
+ loss = shared.sd_model.forward(x, c)[0] / gradient_step
+ del x
+ del c
+
+ _loss_step += loss.item()
+ scaler.scale(loss).backward()
+
+ # go back until we reach gradient accumulation steps
+ if (j + 1) % gradient_step != 0:
+ continue
+ loss_logging.append(_loss_step)
+ if clip_grad:
+ clip_grad(weights, clip_grad_sched.learn_rate)
+
+ scaler.step(optimizer)
+ scaler.update()
+ hypernetwork.step += 1
+ pbar.update()
+ optimizer.zero_grad(set_to_none=True)
+ loss_step = _loss_step
+ _loss_step = 0
+
+ steps_done = hypernetwork.step + 1
+
+ epoch_num = hypernetwork.step // steps_per_epoch
+ epoch_step = hypernetwork.step % steps_per_epoch
+
+ description = f"Training hypernetwork [Epoch {epoch_num}: {epoch_step+1}/{steps_per_epoch}]loss: {loss_step:.7f}"
+ pbar.set_description(description)
+ if hypernetwork_dir is not None and steps_done % save_hypernetwork_every == 0:
+ # Before saving, change name to match current checkpoint.
+ hypernetwork_name_every = f'{hypernetwork_name}-{steps_done}'
+ last_saved_file = os.path.join(hypernetwork_dir, f'{hypernetwork_name_every}.pt')
+ hypernetwork.optimizer_name = optimizer_name
+ if shared.opts.save_optimizer_state:
+ hypernetwork.optimizer_state_dict = optimizer.state_dict()
+ save_hypernetwork(hypernetwork, checkpoint, hypernetwork_name, last_saved_file)
+ hypernetwork.optimizer_state_dict = None # dereference it after saving, to save memory.
+
+
+
+ if shared.opts.training_enable_tensorboard:
+ epoch_num = hypernetwork.step // len(ds)
+ epoch_step = hypernetwork.step - (epoch_num * len(ds)) + 1
+ mean_loss = sum(loss_logging) / len(loss_logging)
+ textual_inversion.tensorboard_add(tensorboard_writer, loss=mean_loss, global_step=hypernetwork.step, step=epoch_step, learn_rate=scheduler.learn_rate, epoch_num=epoch_num)
+
+ textual_inversion.write_loss(log_directory, "hypernetwork_loss.csv", hypernetwork.step, steps_per_epoch, {
+ "loss": f"{loss_step:.7f}",
+ "learn_rate": scheduler.learn_rate
+ })
+
+ if images_dir is not None and steps_done % create_image_every == 0:
+ forced_filename = f'{hypernetwork_name}-{steps_done}'
+ last_saved_image = os.path.join(images_dir, forced_filename)
+ hypernetwork.eval()
+ rng_state = torch.get_rng_state()
+ cuda_rng_state = None
+ if torch.cuda.is_available():
+ cuda_rng_state = torch.cuda.get_rng_state_all()
+ shared.sd_model.cond_stage_model.to(devices.device)
+ shared.sd_model.first_stage_model.to(devices.device)
+
+ p = processing.StableDiffusionProcessingTxt2Img(
+ sd_model=shared.sd_model,
+ do_not_save_grid=True,
+ do_not_save_samples=True,
+ )
+
+ p.disable_extra_networks = True
+
+ if preview_from_txt2img:
+ p.prompt = preview_prompt
+ p.negative_prompt = preview_negative_prompt
+ p.steps = preview_steps
+ p.sampler_name = sd_samplers.samplers[preview_sampler_index].name
+ p.cfg_scale = preview_cfg_scale
+ p.seed = preview_seed
+ p.width = preview_width
+ p.height = preview_height
+ else:
+ p.prompt = batch.cond_text[0]
+ p.steps = 20
+ p.width = training_width
+ p.height = training_height
+
+ preview_text = p.prompt
+
+ with closing(p):
+ processed = processing.process_images(p)
+ image = processed.images[0] if len(processed.images) > 0 else None
+
+ if unload:
+ shared.sd_model.cond_stage_model.to(devices.cpu)
+ shared.sd_model.first_stage_model.to(devices.cpu)
+ torch.set_rng_state(rng_state)
+ if torch.cuda.is_available():
+ torch.cuda.set_rng_state_all(cuda_rng_state)
+ hypernetwork.train()
+ if image is not None:
+ shared.state.assign_current_image(image)
+ if shared.opts.training_enable_tensorboard and shared.opts.training_tensorboard_save_images:
+ textual_inversion.tensorboard_add_image(tensorboard_writer,
+ f"Validation at epoch {epoch_num}", image,
+ hypernetwork.step)
+ last_saved_image, last_text_info = images.save_image(image, images_dir, "", p.seed, p.prompt, shared.opts.samples_format, processed.infotexts[0], p=p, forced_filename=forced_filename, save_to_dirs=False)
+ last_saved_image += f", prompt: {preview_text}"
+
+ shared.state.job_no = hypernetwork.step
+
+ shared.state.textinfo = f"""
+
+Loss: {loss_step:.7f}
+Step: {steps_done}
+Last prompt: {html.escape(batch.cond_text[0])}
+Last saved hypernetwork: {html.escape(last_saved_file)}
+Last saved image: {html.escape(last_saved_image)}
+
+"""
+ except Exception:
+ errors.report("Exception in training hypernetwork", exc_info=True)
+ finally:
+ pbar.leave = False
+ pbar.close()
+ hypernetwork.eval()
+ sd_hijack_checkpoint.remove()
+
+
+
+ filename = os.path.join(shared.cmd_opts.hypernetwork_dir, f'{hypernetwork_name}.pt')
+ hypernetwork.optimizer_name = optimizer_name
+ if shared.opts.save_optimizer_state:
+ hypernetwork.optimizer_state_dict = optimizer.state_dict()
+ save_hypernetwork(hypernetwork, checkpoint, hypernetwork_name, filename)
+
+ del optimizer
+ hypernetwork.optimizer_state_dict = None # dereference it after saving, to save memory.
+ shared.sd_model.cond_stage_model.to(devices.device)
+ shared.sd_model.first_stage_model.to(devices.device)
+ shared.parallel_processing_allowed = old_parallel_processing_allowed
+
+ return hypernetwork, filename
+
+def save_hypernetwork(hypernetwork, checkpoint, hypernetwork_name, filename):
+ old_hypernetwork_name = hypernetwork.name
+ old_sd_checkpoint = hypernetwork.sd_checkpoint if hasattr(hypernetwork, "sd_checkpoint") else None
+ old_sd_checkpoint_name = hypernetwork.sd_checkpoint_name if hasattr(hypernetwork, "sd_checkpoint_name") else None
+ try:
+ hypernetwork.sd_checkpoint = checkpoint.shorthash
+ hypernetwork.sd_checkpoint_name = checkpoint.model_name
+ hypernetwork.name = hypernetwork_name
+ hypernetwork.save(filename)
+ except:
+ hypernetwork.sd_checkpoint = old_sd_checkpoint
+ hypernetwork.sd_checkpoint_name = old_sd_checkpoint_name
+ hypernetwork.name = old_hypernetwork_name
+ raise
diff --git a/modules/hypernetworks/ui.py b/modules/hypernetworks/ui.py
new file mode 100644
index 0000000000000000000000000000000000000000..351910461dadbf3bfe027e542e0fddf896352d17
--- /dev/null
+++ b/modules/hypernetworks/ui.py
@@ -0,0 +1,38 @@
+import html
+
+import gradio as gr
+import modules.hypernetworks.hypernetwork
+from modules import devices, sd_hijack, shared
+
+not_available = ["hardswish", "multiheadattention"]
+keys = [x for x in modules.hypernetworks.hypernetwork.HypernetworkModule.activation_dict if x not in not_available]
+
+
+def create_hypernetwork(name, enable_sizes, overwrite_old, layer_structure=None, activation_func=None, weight_init=None, add_layer_norm=False, use_dropout=False, dropout_structure=None):
+ filename = modules.hypernetworks.hypernetwork.create_hypernetwork(name, enable_sizes, overwrite_old, layer_structure, activation_func, weight_init, add_layer_norm, use_dropout, dropout_structure)
+
+ return gr.Dropdown.update(choices=sorted(shared.hypernetworks)), f"Created: {filename}", ""
+
+
+def train_hypernetwork(*args):
+ shared.loaded_hypernetworks = []
+
+ assert not shared.cmd_opts.lowvram, 'Training models with lowvram is not possible'
+
+ try:
+ sd_hijack.undo_optimizations()
+
+ hypernetwork, filename = modules.hypernetworks.hypernetwork.train_hypernetwork(*args)
+
+ res = f"""
+Training {'interrupted' if shared.state.interrupted else 'finished'} at {hypernetwork.step} steps.
+Hypernetwork saved to {html.escape(filename)}
+"""
+ return res, ""
+ except Exception:
+ raise
+ finally:
+ shared.sd_model.cond_stage_model.to(devices.device)
+ shared.sd_model.first_stage_model.to(devices.device)
+ sd_hijack.apply_optimizations()
+
diff --git a/modules/images.py b/modules/images.py
new file mode 100644
index 0000000000000000000000000000000000000000..3b37cc3daee6591e6c59eb33546cc7944e5c8d41
--- /dev/null
+++ b/modules/images.py
@@ -0,0 +1,778 @@
+from __future__ import annotations
+
+import datetime
+
+import pytz
+import io
+import math
+import os
+from collections import namedtuple
+import re
+
+import numpy as np
+import piexif
+import piexif.helper
+from PIL import Image, ImageFont, ImageDraw, ImageColor, PngImagePlugin
+import string
+import json
+import hashlib
+
+from modules import sd_samplers, shared, script_callbacks, errors
+from modules.paths_internal import roboto_ttf_file
+from modules.shared import opts
+
+LANCZOS = (Image.Resampling.LANCZOS if hasattr(Image, 'Resampling') else Image.LANCZOS)
+
+
+def get_font(fontsize: int):
+ try:
+ return ImageFont.truetype(opts.font or roboto_ttf_file, fontsize)
+ except Exception:
+ return ImageFont.truetype(roboto_ttf_file, fontsize)
+
+
+def image_grid(imgs, batch_size=1, rows=None):
+ if rows is None:
+ if opts.n_rows > 0:
+ rows = opts.n_rows
+ elif opts.n_rows == 0:
+ rows = batch_size
+ elif opts.grid_prevent_empty_spots:
+ rows = math.floor(math.sqrt(len(imgs)))
+ while len(imgs) % rows != 0:
+ rows -= 1
+ else:
+ rows = math.sqrt(len(imgs))
+ rows = round(rows)
+ if rows > len(imgs):
+ rows = len(imgs)
+
+ cols = math.ceil(len(imgs) / rows)
+
+ params = script_callbacks.ImageGridLoopParams(imgs, cols, rows)
+ script_callbacks.image_grid_callback(params)
+
+ w, h = imgs[0].size
+ grid = Image.new('RGB', size=(params.cols * w, params.rows * h), color='black')
+
+ for i, img in enumerate(params.imgs):
+ grid.paste(img, box=(i % params.cols * w, i // params.cols * h))
+
+ return grid
+
+
+Grid = namedtuple("Grid", ["tiles", "tile_w", "tile_h", "image_w", "image_h", "overlap"])
+
+
+def split_grid(image, tile_w=512, tile_h=512, overlap=64):
+ w = image.width
+ h = image.height
+
+ non_overlap_width = tile_w - overlap
+ non_overlap_height = tile_h - overlap
+
+ cols = math.ceil((w - overlap) / non_overlap_width)
+ rows = math.ceil((h - overlap) / non_overlap_height)
+
+ dx = (w - tile_w) / (cols - 1) if cols > 1 else 0
+ dy = (h - tile_h) / (rows - 1) if rows > 1 else 0
+
+ grid = Grid([], tile_w, tile_h, w, h, overlap)
+ for row in range(rows):
+ row_images = []
+
+ y = int(row * dy)
+
+ if y + tile_h >= h:
+ y = h - tile_h
+
+ for col in range(cols):
+ x = int(col * dx)
+
+ if x + tile_w >= w:
+ x = w - tile_w
+
+ tile = image.crop((x, y, x + tile_w, y + tile_h))
+
+ row_images.append([x, tile_w, tile])
+
+ grid.tiles.append([y, tile_h, row_images])
+
+ return grid
+
+
+def combine_grid(grid):
+ def make_mask_image(r):
+ r = r * 255 / grid.overlap
+ r = r.astype(np.uint8)
+ return Image.fromarray(r, 'L')
+
+ mask_w = make_mask_image(np.arange(grid.overlap, dtype=np.float32).reshape((1, grid.overlap)).repeat(grid.tile_h, axis=0))
+ mask_h = make_mask_image(np.arange(grid.overlap, dtype=np.float32).reshape((grid.overlap, 1)).repeat(grid.image_w, axis=1))
+
+ combined_image = Image.new("RGB", (grid.image_w, grid.image_h))
+ for y, h, row in grid.tiles:
+ combined_row = Image.new("RGB", (grid.image_w, h))
+ for x, w, tile in row:
+ if x == 0:
+ combined_row.paste(tile, (0, 0))
+ continue
+
+ combined_row.paste(tile.crop((0, 0, grid.overlap, h)), (x, 0), mask=mask_w)
+ combined_row.paste(tile.crop((grid.overlap, 0, w, h)), (x + grid.overlap, 0))
+
+ if y == 0:
+ combined_image.paste(combined_row, (0, 0))
+ continue
+
+ combined_image.paste(combined_row.crop((0, 0, combined_row.width, grid.overlap)), (0, y), mask=mask_h)
+ combined_image.paste(combined_row.crop((0, grid.overlap, combined_row.width, h)), (0, y + grid.overlap))
+
+ return combined_image
+
+
+class GridAnnotation:
+ def __init__(self, text='', is_active=True):
+ self.text = text
+ self.is_active = is_active
+ self.size = None
+
+
+def draw_grid_annotations(im, width, height, hor_texts, ver_texts, margin=0):
+
+ color_active = ImageColor.getcolor(opts.grid_text_active_color, 'RGB')
+ color_inactive = ImageColor.getcolor(opts.grid_text_inactive_color, 'RGB')
+ color_background = ImageColor.getcolor(opts.grid_background_color, 'RGB')
+
+ def wrap(drawing, text, font, line_length):
+ lines = ['']
+ for word in text.split():
+ line = f'{lines[-1]} {word}'.strip()
+ if drawing.textlength(line, font=font) <= line_length:
+ lines[-1] = line
+ else:
+ lines.append(word)
+ return lines
+
+ def draw_texts(drawing, draw_x, draw_y, lines, initial_fnt, initial_fontsize):
+ for line in lines:
+ fnt = initial_fnt
+ fontsize = initial_fontsize
+ while drawing.multiline_textsize(line.text, font=fnt)[0] > line.allowed_width and fontsize > 0:
+ fontsize -= 1
+ fnt = get_font(fontsize)
+ drawing.multiline_text((draw_x, draw_y + line.size[1] / 2), line.text, font=fnt, fill=color_active if line.is_active else color_inactive, anchor="mm", align="center")
+
+ if not line.is_active:
+ drawing.line((draw_x - line.size[0] // 2, draw_y + line.size[1] // 2, draw_x + line.size[0] // 2, draw_y + line.size[1] // 2), fill=color_inactive, width=4)
+
+ draw_y += line.size[1] + line_spacing
+
+ fontsize = (width + height) // 25
+ line_spacing = fontsize // 2
+
+ fnt = get_font(fontsize)
+
+ pad_left = 0 if sum([sum([len(line.text) for line in lines]) for lines in ver_texts]) == 0 else width * 3 // 4
+
+ cols = im.width // width
+ rows = im.height // height
+
+ assert cols == len(hor_texts), f'bad number of horizontal texts: {len(hor_texts)}; must be {cols}'
+ assert rows == len(ver_texts), f'bad number of vertical texts: {len(ver_texts)}; must be {rows}'
+
+ calc_img = Image.new("RGB", (1, 1), color_background)
+ calc_d = ImageDraw.Draw(calc_img)
+
+ for texts, allowed_width in zip(hor_texts + ver_texts, [width] * len(hor_texts) + [pad_left] * len(ver_texts)):
+ items = [] + texts
+ texts.clear()
+
+ for line in items:
+ wrapped = wrap(calc_d, line.text, fnt, allowed_width)
+ texts += [GridAnnotation(x, line.is_active) for x in wrapped]
+
+ for line in texts:
+ bbox = calc_d.multiline_textbbox((0, 0), line.text, font=fnt)
+ line.size = (bbox[2] - bbox[0], bbox[3] - bbox[1])
+ line.allowed_width = allowed_width
+
+ hor_text_heights = [sum([line.size[1] + line_spacing for line in lines]) - line_spacing for lines in hor_texts]
+ ver_text_heights = [sum([line.size[1] + line_spacing for line in lines]) - line_spacing * len(lines) for lines in ver_texts]
+
+ pad_top = 0 if sum(hor_text_heights) == 0 else max(hor_text_heights) + line_spacing * 2
+
+ result = Image.new("RGB", (im.width + pad_left + margin * (cols-1), im.height + pad_top + margin * (rows-1)), color_background)
+
+ for row in range(rows):
+ for col in range(cols):
+ cell = im.crop((width * col, height * row, width * (col+1), height * (row+1)))
+ result.paste(cell, (pad_left + (width + margin) * col, pad_top + (height + margin) * row))
+
+ d = ImageDraw.Draw(result)
+
+ for col in range(cols):
+ x = pad_left + (width + margin) * col + width / 2
+ y = pad_top / 2 - hor_text_heights[col] / 2
+
+ draw_texts(d, x, y, hor_texts[col], fnt, fontsize)
+
+ for row in range(rows):
+ x = pad_left / 2
+ y = pad_top + (height + margin) * row + height / 2 - ver_text_heights[row] / 2
+
+ draw_texts(d, x, y, ver_texts[row], fnt, fontsize)
+
+ return result
+
+
+def draw_prompt_matrix(im, width, height, all_prompts, margin=0):
+ prompts = all_prompts[1:]
+ boundary = math.ceil(len(prompts) / 2)
+
+ prompts_horiz = prompts[:boundary]
+ prompts_vert = prompts[boundary:]
+
+ hor_texts = [[GridAnnotation(x, is_active=pos & (1 << i) != 0) for i, x in enumerate(prompts_horiz)] for pos in range(1 << len(prompts_horiz))]
+ ver_texts = [[GridAnnotation(x, is_active=pos & (1 << i) != 0) for i, x in enumerate(prompts_vert)] for pos in range(1 << len(prompts_vert))]
+
+ return draw_grid_annotations(im, width, height, hor_texts, ver_texts, margin)
+
+
+def resize_image(resize_mode, im, width, height, upscaler_name=None):
+ """
+ Resizes an image with the specified resize_mode, width, and height.
+
+ Args:
+ resize_mode: The mode to use when resizing the image.
+ 0: Resize the image to the specified width and height.
+ 1: Resize the image to fill the specified width and height, maintaining the aspect ratio, and then center the image within the dimensions, cropping the excess.
+ 2: Resize the image to fit within the specified width and height, maintaining the aspect ratio, and then center the image within the dimensions, filling empty with data from image.
+ im: The image to resize.
+ width: The width to resize the image to.
+ height: The height to resize the image to.
+ upscaler_name: The name of the upscaler to use. If not provided, defaults to opts.upscaler_for_img2img.
+ """
+
+ upscaler_name = upscaler_name or opts.upscaler_for_img2img
+
+ def resize(im, w, h):
+ if upscaler_name is None or upscaler_name == "None" or im.mode == 'L':
+ return im.resize((w, h), resample=LANCZOS)
+
+ scale = max(w / im.width, h / im.height)
+
+ if scale > 1.0:
+ upscalers = [x for x in shared.sd_upscalers if x.name == upscaler_name]
+ if len(upscalers) == 0:
+ upscaler = shared.sd_upscalers[0]
+ print(f"could not find upscaler named {upscaler_name or ''}, using {upscaler.name} as a fallback")
+ else:
+ upscaler = upscalers[0]
+
+ im = upscaler.scaler.upscale(im, scale, upscaler.data_path)
+
+ if im.width != w or im.height != h:
+ im = im.resize((w, h), resample=LANCZOS)
+
+ return im
+
+ if resize_mode == 0:
+ res = resize(im, width, height)
+
+ elif resize_mode == 1:
+ ratio = width / height
+ src_ratio = im.width / im.height
+
+ src_w = width if ratio > src_ratio else im.width * height // im.height
+ src_h = height if ratio <= src_ratio else im.height * width // im.width
+
+ resized = resize(im, src_w, src_h)
+ res = Image.new("RGB", (width, height))
+ res.paste(resized, box=(width // 2 - src_w // 2, height // 2 - src_h // 2))
+
+ else:
+ ratio = width / height
+ src_ratio = im.width / im.height
+
+ src_w = width if ratio < src_ratio else im.width * height // im.height
+ src_h = height if ratio >= src_ratio else im.height * width // im.width
+
+ resized = resize(im, src_w, src_h)
+ res = Image.new("RGB", (width, height))
+ res.paste(resized, box=(width // 2 - src_w // 2, height // 2 - src_h // 2))
+
+ if ratio < src_ratio:
+ fill_height = height // 2 - src_h // 2
+ if fill_height > 0:
+ res.paste(resized.resize((width, fill_height), box=(0, 0, width, 0)), box=(0, 0))
+ res.paste(resized.resize((width, fill_height), box=(0, resized.height, width, resized.height)), box=(0, fill_height + src_h))
+ elif ratio > src_ratio:
+ fill_width = width // 2 - src_w // 2
+ if fill_width > 0:
+ res.paste(resized.resize((fill_width, height), box=(0, 0, 0, height)), box=(0, 0))
+ res.paste(resized.resize((fill_width, height), box=(resized.width, 0, resized.width, height)), box=(fill_width + src_w, 0))
+
+ return res
+
+
+invalid_filename_chars = '<>:"/\\|?*\n\r\t'
+invalid_filename_prefix = ' '
+invalid_filename_postfix = ' .'
+re_nonletters = re.compile(r'[\s' + string.punctuation + ']+')
+re_pattern = re.compile(r"(.*?)(?:\[([^\[\]]+)\]|$)")
+re_pattern_arg = re.compile(r"(.*)<([^>]*)>$")
+max_filename_part_length = 128
+NOTHING_AND_SKIP_PREVIOUS_TEXT = object()
+
+
+def sanitize_filename_part(text, replace_spaces=True):
+ if text is None:
+ return None
+
+ if replace_spaces:
+ text = text.replace(' ', '_')
+
+ text = text.translate({ord(x): '_' for x in invalid_filename_chars})
+ text = text.lstrip(invalid_filename_prefix)[:max_filename_part_length]
+ text = text.rstrip(invalid_filename_postfix)
+ return text
+
+
+class FilenameGenerator:
+ replacements = {
+ 'seed': lambda self: self.seed if self.seed is not None else '',
+ 'seed_first': lambda self: self.seed if self.p.batch_size == 1 else self.p.all_seeds[0],
+ 'seed_last': lambda self: NOTHING_AND_SKIP_PREVIOUS_TEXT if self.p.batch_size == 1 else self.p.all_seeds[-1],
+ 'steps': lambda self: self.p and self.p.steps,
+ 'cfg': lambda self: self.p and self.p.cfg_scale,
+ 'width': lambda self: self.image.width,
+ 'height': lambda self: self.image.height,
+ 'styles': lambda self: self.p and sanitize_filename_part(", ".join([style for style in self.p.styles if not style == "None"]) or "None", replace_spaces=False),
+ 'sampler': lambda self: self.p and sanitize_filename_part(self.p.sampler_name, replace_spaces=False),
+ 'model_hash': lambda self: getattr(self.p, "sd_model_hash", shared.sd_model.sd_model_hash),
+ 'model_name': lambda self: sanitize_filename_part(shared.sd_model.sd_checkpoint_info.name_for_extra, replace_spaces=False),
+ 'date': lambda self: datetime.datetime.now().strftime('%Y-%m-%d'),
+ 'datetime': lambda self, *args: self.datetime(*args), # accepts formats: [datetime], [datetime], [datetime]
+ 'job_timestamp': lambda self: getattr(self.p, "job_timestamp", shared.state.job_timestamp),
+ 'prompt_hash': lambda self, *args: self.string_hash(self.prompt, *args),
+ 'negative_prompt_hash': lambda self, *args: self.string_hash(self.p.negative_prompt, *args),
+ 'full_prompt_hash': lambda self, *args: self.string_hash(f"{self.p.prompt} {self.p.negative_prompt}", *args), # a space in between to create a unique string
+ 'prompt': lambda self: sanitize_filename_part(self.prompt),
+ 'prompt_no_styles': lambda self: self.prompt_no_style(),
+ 'prompt_spaces': lambda self: sanitize_filename_part(self.prompt, replace_spaces=False),
+ 'prompt_words': lambda self: self.prompt_words(),
+ 'batch_number': lambda self: NOTHING_AND_SKIP_PREVIOUS_TEXT if self.p.batch_size == 1 or self.zip else self.p.batch_index + 1,
+ 'batch_size': lambda self: self.p.batch_size,
+ 'generation_number': lambda self: NOTHING_AND_SKIP_PREVIOUS_TEXT if (self.p.n_iter == 1 and self.p.batch_size == 1) or self.zip else self.p.iteration * self.p.batch_size + self.p.batch_index + 1,
+ 'hasprompt': lambda self, *args: self.hasprompt(*args), # accepts formats:[hasprompt..]
+ 'clip_skip': lambda self: opts.data["CLIP_stop_at_last_layers"],
+ 'denoising': lambda self: self.p.denoising_strength if self.p and self.p.denoising_strength else NOTHING_AND_SKIP_PREVIOUS_TEXT,
+ 'user': lambda self: self.p.user,
+ 'vae_filename': lambda self: self.get_vae_filename(),
+ 'none': lambda self: '', # Overrides the default, so you can get just the sequence number
+ 'image_hash': lambda self, *args: self.image_hash(*args) # accepts formats: [image_hash] default full hash
+ }
+ default_time_format = '%Y%m%d%H%M%S'
+
+ def __init__(self, p, seed, prompt, image, zip=False):
+ self.p = p
+ self.seed = seed
+ self.prompt = prompt
+ self.image = image
+ self.zip = zip
+
+ def get_vae_filename(self):
+ """Get the name of the VAE file."""
+
+ import modules.sd_vae as sd_vae
+
+ if sd_vae.loaded_vae_file is None:
+ return "NoneType"
+
+ file_name = os.path.basename(sd_vae.loaded_vae_file)
+ split_file_name = file_name.split('.')
+ if len(split_file_name) > 1 and split_file_name[0] == '':
+ return split_file_name[1] # if the first character of the filename is "." then [1] is obtained.
+ else:
+ return split_file_name[0]
+
+
+ def hasprompt(self, *args):
+ lower = self.prompt.lower()
+ if self.p is None or self.prompt is None:
+ return None
+ outres = ""
+ for arg in args:
+ if arg != "":
+ division = arg.split("|")
+ expected = division[0].lower()
+ default = division[1] if len(division) > 1 else ""
+ if lower.find(expected) >= 0:
+ outres = f'{outres}{expected}'
+ else:
+ outres = outres if default == "" else f'{outres}{default}'
+ return sanitize_filename_part(outres)
+
+ def prompt_no_style(self):
+ if self.p is None or self.prompt is None:
+ return None
+
+ prompt_no_style = self.prompt
+ for style in shared.prompt_styles.get_style_prompts(self.p.styles):
+ if style:
+ for part in style.split("{prompt}"):
+ prompt_no_style = prompt_no_style.replace(part, "").replace(", ,", ",").strip().strip(',')
+
+ prompt_no_style = prompt_no_style.replace(style, "").strip().strip(',').strip()
+
+ return sanitize_filename_part(prompt_no_style, replace_spaces=False)
+
+ def prompt_words(self):
+ words = [x for x in re_nonletters.split(self.prompt or "") if x]
+ if len(words) == 0:
+ words = ["empty"]
+ return sanitize_filename_part(" ".join(words[0:opts.directories_max_prompt_words]), replace_spaces=False)
+
+ def datetime(self, *args):
+ time_datetime = datetime.datetime.now()
+
+ time_format = args[0] if (args and args[0] != "") else self.default_time_format
+ try:
+ time_zone = pytz.timezone(args[1]) if len(args) > 1 else None
+ except pytz.exceptions.UnknownTimeZoneError:
+ time_zone = None
+
+ time_zone_time = time_datetime.astimezone(time_zone)
+ try:
+ formatted_time = time_zone_time.strftime(time_format)
+ except (ValueError, TypeError):
+ formatted_time = time_zone_time.strftime(self.default_time_format)
+
+ return sanitize_filename_part(formatted_time, replace_spaces=False)
+
+ def image_hash(self, *args):
+ length = int(args[0]) if (args and args[0] != "") else None
+ return hashlib.sha256(self.image.tobytes()).hexdigest()[0:length]
+
+ def string_hash(self, text, *args):
+ length = int(args[0]) if (args and args[0] != "") else 8
+ return hashlib.sha256(text.encode()).hexdigest()[0:length]
+
+ def apply(self, x):
+ res = ''
+
+ for m in re_pattern.finditer(x):
+ text, pattern = m.groups()
+
+ if pattern is None:
+ res += text
+ continue
+
+ pattern_args = []
+ while True:
+ m = re_pattern_arg.match(pattern)
+ if m is None:
+ break
+
+ pattern, arg = m.groups()
+ pattern_args.insert(0, arg)
+
+ fun = self.replacements.get(pattern.lower())
+ if fun is not None:
+ try:
+ replacement = fun(self, *pattern_args)
+ except Exception:
+ replacement = None
+ errors.report(f"Error adding [{pattern}] to filename", exc_info=True)
+
+ if replacement == NOTHING_AND_SKIP_PREVIOUS_TEXT:
+ continue
+ elif replacement is not None:
+ res += text + str(replacement)
+ continue
+
+ res += f'{text}[{pattern}]'
+
+ return res
+
+
+def get_next_sequence_number(path, basename):
+ """
+ Determines and returns the next sequence number to use when saving an image in the specified directory.
+
+ The sequence starts at 0.
+ """
+ result = -1
+ if basename != '':
+ basename = f"{basename}-"
+
+ prefix_length = len(basename)
+ for p in os.listdir(path):
+ if p.startswith(basename):
+ parts = os.path.splitext(p[prefix_length:])[0].split('-') # splits the filename (removing the basename first if one is defined, so the sequence number is always the first element)
+ try:
+ result = max(int(parts[0]), result)
+ except ValueError:
+ pass
+
+ return result + 1
+
+
+def save_image_with_geninfo(image, geninfo, filename, extension=None, existing_pnginfo=None, pnginfo_section_name='parameters'):
+ """
+ Saves image to filename, including geninfo as text information for generation info.
+ For PNG images, geninfo is added to existing pnginfo dictionary using the pnginfo_section_name argument as key.
+ For JPG images, there's no dictionary and geninfo just replaces the EXIF description.
+ """
+
+ if extension is None:
+ extension = os.path.splitext(filename)[1]
+
+ image_format = Image.registered_extensions()[extension]
+
+ if extension.lower() == '.png':
+ existing_pnginfo = existing_pnginfo or {}
+ if opts.enable_pnginfo:
+ existing_pnginfo[pnginfo_section_name] = geninfo
+
+ if opts.enable_pnginfo:
+ pnginfo_data = PngImagePlugin.PngInfo()
+ for k, v in (existing_pnginfo or {}).items():
+ pnginfo_data.add_text(k, str(v))
+ else:
+ pnginfo_data = None
+
+ image.save(filename, format=image_format, quality=opts.jpeg_quality, pnginfo=pnginfo_data)
+
+ elif extension.lower() in (".jpg", ".jpeg", ".webp"):
+ if image.mode == 'RGBA':
+ image = image.convert("RGB")
+ elif image.mode == 'I;16':
+ image = image.point(lambda p: p * 0.0038910505836576).convert("RGB" if extension.lower() == ".webp" else "L")
+
+ image.save(filename, format=image_format, quality=opts.jpeg_quality, lossless=opts.webp_lossless)
+
+ if opts.enable_pnginfo and geninfo is not None:
+ exif_bytes = piexif.dump({
+ "Exif": {
+ piexif.ExifIFD.UserComment: piexif.helper.UserComment.dump(geninfo or "", encoding="unicode")
+ },
+ })
+
+ piexif.insert(exif_bytes, filename)
+ else:
+ image.save(filename, format=image_format, quality=opts.jpeg_quality)
+
+
+def save_image(image, path, basename, seed=None, prompt=None, extension='png', info=None, short_filename=False, no_prompt=False, grid=False, pnginfo_section_name='parameters', p=None, existing_info=None, forced_filename=None, suffix="", save_to_dirs=None):
+ """Save an image.
+
+ Args:
+ image (`PIL.Image`):
+ The image to be saved.
+ path (`str`):
+ The directory to save the image. Note, the option `save_to_dirs` will make the image to be saved into a sub directory.
+ basename (`str`):
+ The base filename which will be applied to `filename pattern`.
+ seed, prompt, short_filename,
+ extension (`str`):
+ Image file extension, default is `png`.
+ pngsectionname (`str`):
+ Specify the name of the section which `info` will be saved in.
+ info (`str` or `PngImagePlugin.iTXt`):
+ PNG info chunks.
+ existing_info (`dict`):
+ Additional PNG info. `existing_info == {pngsectionname: info, ...}`
+ no_prompt:
+ TODO I don't know its meaning.
+ p (`StableDiffusionProcessing`)
+ forced_filename (`str`):
+ If specified, `basename` and filename pattern will be ignored.
+ save_to_dirs (bool):
+ If true, the image will be saved into a subdirectory of `path`.
+
+ Returns: (fullfn, txt_fullfn)
+ fullfn (`str`):
+ The full path of the saved imaged.
+ txt_fullfn (`str` or None):
+ If a text file is saved for this image, this will be its full path. Otherwise None.
+ """
+ namegen = FilenameGenerator(p, seed, prompt, image)
+
+ # WebP and JPG formats have maximum dimension limits of 16383 and 65535 respectively. switch to PNG which has a much higher limit
+ if (image.height > 65535 or image.width > 65535) and extension.lower() in ("jpg", "jpeg") or (image.height > 16383 or image.width > 16383) and extension.lower() == "webp":
+ print('Image dimensions too large; saving as PNG')
+ extension = ".png"
+
+ if save_to_dirs is None:
+ save_to_dirs = (grid and opts.grid_save_to_dirs) or (not grid and opts.save_to_dirs and not no_prompt)
+
+ if save_to_dirs:
+ dirname = namegen.apply(opts.directories_filename_pattern or "[prompt_words]").lstrip(' ').rstrip('\\ /')
+ path = os.path.join(path, dirname)
+
+ os.makedirs(path, exist_ok=True)
+
+ if forced_filename is None:
+ if short_filename or seed is None:
+ file_decoration = ""
+ elif opts.save_to_dirs:
+ file_decoration = opts.samples_filename_pattern or "[seed]"
+ else:
+ file_decoration = opts.samples_filename_pattern or "[seed]-[prompt_spaces]"
+
+ file_decoration = namegen.apply(file_decoration) + suffix
+
+ add_number = opts.save_images_add_number or file_decoration == ''
+
+ if file_decoration != "" and add_number:
+ file_decoration = f"-{file_decoration}"
+
+ if add_number:
+ basecount = get_next_sequence_number(path, basename)
+ fullfn = None
+ for i in range(500):
+ fn = f"{basecount + i:05}" if basename == '' else f"{basename}-{basecount + i:04}"
+ fullfn = os.path.join(path, f"{fn}{file_decoration}.{extension}")
+ if not os.path.exists(fullfn):
+ break
+ else:
+ fullfn = os.path.join(path, f"{file_decoration}.{extension}")
+ else:
+ fullfn = os.path.join(path, f"{forced_filename}.{extension}")
+
+ pnginfo = existing_info or {}
+ if info is not None:
+ pnginfo[pnginfo_section_name] = info
+
+ params = script_callbacks.ImageSaveParams(image, p, fullfn, pnginfo)
+ script_callbacks.before_image_saved_callback(params)
+
+ image = params.image
+ fullfn = params.filename
+ info = params.pnginfo.get(pnginfo_section_name, None)
+
+ def _atomically_save_image(image_to_save, filename_without_extension, extension):
+ """
+ save image with .tmp extension to avoid race condition when another process detects new image in the directory
+ """
+ temp_file_path = f"{filename_without_extension}.tmp"
+
+ save_image_with_geninfo(image_to_save, info, temp_file_path, extension, existing_pnginfo=params.pnginfo, pnginfo_section_name=pnginfo_section_name)
+
+ os.replace(temp_file_path, filename_without_extension + extension)
+
+ fullfn_without_extension, extension = os.path.splitext(params.filename)
+ if hasattr(os, 'statvfs'):
+ max_name_len = os.statvfs(path).f_namemax
+ fullfn_without_extension = fullfn_without_extension[:max_name_len - max(4, len(extension))]
+ params.filename = fullfn_without_extension + extension
+ fullfn = params.filename
+ _atomically_save_image(image, fullfn_without_extension, extension)
+
+ image.already_saved_as = fullfn
+
+ oversize = image.width > opts.target_side_length or image.height > opts.target_side_length
+ if opts.export_for_4chan and (oversize or os.stat(fullfn).st_size > opts.img_downscale_threshold * 1024 * 1024):
+ ratio = image.width / image.height
+ resize_to = None
+ if oversize and ratio > 1:
+ resize_to = round(opts.target_side_length), round(image.height * opts.target_side_length / image.width)
+ elif oversize:
+ resize_to = round(image.width * opts.target_side_length / image.height), round(opts.target_side_length)
+
+ if resize_to is not None:
+ try:
+ # Resizing image with LANCZOS could throw an exception if e.g. image mode is I;16
+ image = image.resize(resize_to, LANCZOS)
+ except Exception:
+ image = image.resize(resize_to)
+ try:
+ _atomically_save_image(image, fullfn_without_extension, ".jpg")
+ except Exception as e:
+ errors.display(e, "saving image as downscaled JPG")
+
+ if opts.save_txt and info is not None:
+ txt_fullfn = f"{fullfn_without_extension}.txt"
+ with open(txt_fullfn, "w", encoding="utf8") as file:
+ file.write(f"{info}\n")
+ else:
+ txt_fullfn = None
+
+ script_callbacks.image_saved_callback(params)
+
+ return fullfn, txt_fullfn
+
+
+IGNORED_INFO_KEYS = {
+ 'jfif', 'jfif_version', 'jfif_unit', 'jfif_density', 'dpi', 'exif',
+ 'loop', 'background', 'timestamp', 'duration', 'progressive', 'progression',
+ 'icc_profile', 'chromaticity', 'photoshop',
+}
+
+
+def read_info_from_image(image: Image.Image) -> tuple[str | None, dict]:
+ items = (image.info or {}).copy()
+
+ geninfo = items.pop('parameters', None)
+
+ if "exif" in items:
+ exif = piexif.load(items["exif"])
+ exif_comment = (exif or {}).get("Exif", {}).get(piexif.ExifIFD.UserComment, b'')
+ try:
+ exif_comment = piexif.helper.UserComment.load(exif_comment)
+ except ValueError:
+ exif_comment = exif_comment.decode('utf8', errors="ignore")
+
+ if exif_comment:
+ items['exif comment'] = exif_comment
+ geninfo = exif_comment
+
+ for field in IGNORED_INFO_KEYS:
+ items.pop(field, None)
+
+ if items.get("Software", None) == "NovelAI":
+ try:
+ json_info = json.loads(items["Comment"])
+ sampler = sd_samplers.samplers_map.get(json_info["sampler"], "Euler a")
+
+ geninfo = f"""{items["Description"]}
+Negative prompt: {json_info["uc"]}
+Steps: {json_info["steps"]}, Sampler: {sampler}, CFG scale: {json_info["scale"]}, Seed: {json_info["seed"]}, Size: {image.width}x{image.height}, Clip skip: 2, ENSD: 31337"""
+ except Exception:
+ errors.report("Error parsing NovelAI image generation parameters", exc_info=True)
+
+ return geninfo, items
+
+
+def image_data(data):
+ import gradio as gr
+
+ try:
+ image = Image.open(io.BytesIO(data))
+ textinfo, _ = read_info_from_image(image)
+ return textinfo, None
+ except Exception:
+ pass
+
+ try:
+ text = data.decode('utf8')
+ assert len(text) < 10000
+ return text, None
+
+ except Exception:
+ pass
+
+ return gr.update(), None
+
+
+def flatten(img, bgcolor):
+ """replaces transparency with bgcolor (example: "#ffffff"), returning an RGB mode image with no transparency"""
+
+ if img.mode == "RGBA":
+ background = Image.new('RGBA', img.size, bgcolor)
+ background.paste(img, mask=img)
+ img = background
+
+ return img.convert('RGB')
diff --git a/modules/img2img.py b/modules/img2img.py
new file mode 100644
index 0000000000000000000000000000000000000000..2f300606b5161f3d8e7c58a9df3e889c91f38eca
--- /dev/null
+++ b/modules/img2img.py
@@ -0,0 +1,219 @@
+import os
+from contextlib import closing
+from pathlib import Path
+
+import numpy as np
+from PIL import Image, ImageOps, ImageFilter, ImageEnhance, UnidentifiedImageError
+import gradio as gr
+
+from modules import images as imgutil
+from modules.generation_parameters_copypaste import create_override_settings_dict, parse_generation_parameters
+from modules.processing import Processed, StableDiffusionProcessingImg2Img, process_images
+from modules.shared import opts, state
+import modules.shared as shared
+import modules.processing as processing
+from modules.ui import plaintext_to_html
+import modules.scripts
+
+
+def process_batch(p, input_dir, output_dir, inpaint_mask_dir, args, to_scale=False, scale_by=1.0, use_png_info=False, png_info_props=None, png_info_dir=None):
+ output_dir = output_dir.strip()
+ processing.fix_seed(p)
+
+ images = list(shared.walk_files(input_dir, allowed_extensions=(".png", ".jpg", ".jpeg", ".webp", ".tif", ".tiff")))
+
+ is_inpaint_batch = False
+ if inpaint_mask_dir:
+ inpaint_masks = shared.listfiles(inpaint_mask_dir)
+ is_inpaint_batch = bool(inpaint_masks)
+
+ if is_inpaint_batch:
+ print(f"\nInpaint batch is enabled. {len(inpaint_masks)} masks found.")
+
+ print(f"Will process {len(images)} images, creating {p.n_iter * p.batch_size} new images for each.")
+
+ state.job_count = len(images) * p.n_iter
+
+ # extract "default" params to use in case getting png info fails
+ prompt = p.prompt
+ negative_prompt = p.negative_prompt
+ seed = p.seed
+ cfg_scale = p.cfg_scale
+ sampler_name = p.sampler_name
+ steps = p.steps
+
+ for i, image in enumerate(images):
+ state.job = f"{i+1} out of {len(images)}"
+ if state.skipped:
+ state.skipped = False
+
+ if state.interrupted:
+ break
+
+ try:
+ img = Image.open(image)
+ except UnidentifiedImageError as e:
+ print(e)
+ continue
+ # Use the EXIF orientation of photos taken by smartphones.
+ img = ImageOps.exif_transpose(img)
+
+ if to_scale:
+ p.width = int(img.width * scale_by)
+ p.height = int(img.height * scale_by)
+
+ p.init_images = [img] * p.batch_size
+
+ image_path = Path(image)
+ if is_inpaint_batch:
+ # try to find corresponding mask for an image using simple filename matching
+ if len(inpaint_masks) == 1:
+ mask_image_path = inpaint_masks[0]
+ else:
+ # try to find corresponding mask for an image using simple filename matching
+ mask_image_dir = Path(inpaint_mask_dir)
+ masks_found = list(mask_image_dir.glob(f"{image_path.stem}.*"))
+
+ if len(masks_found) == 0:
+ print(f"Warning: mask is not found for {image_path} in {mask_image_dir}. Skipping it.")
+ continue
+
+ # it should contain only 1 matching mask
+ # otherwise user has many masks with the same name but different extensions
+ mask_image_path = masks_found[0]
+
+ mask_image = Image.open(mask_image_path)
+ p.image_mask = mask_image
+
+ if use_png_info:
+ try:
+ info_img = img
+ if png_info_dir:
+ info_img_path = os.path.join(png_info_dir, os.path.basename(image))
+ info_img = Image.open(info_img_path)
+ geninfo, _ = imgutil.read_info_from_image(info_img)
+ parsed_parameters = parse_generation_parameters(geninfo)
+ parsed_parameters = {k: v for k, v in parsed_parameters.items() if k in (png_info_props or {})}
+ except Exception:
+ parsed_parameters = {}
+
+ p.prompt = prompt + (" " + parsed_parameters["Prompt"] if "Prompt" in parsed_parameters else "")
+ p.negative_prompt = negative_prompt + (" " + parsed_parameters["Negative prompt"] if "Negative prompt" in parsed_parameters else "")
+ p.seed = int(parsed_parameters.get("Seed", seed))
+ p.cfg_scale = float(parsed_parameters.get("CFG scale", cfg_scale))
+ p.sampler_name = parsed_parameters.get("Sampler", sampler_name)
+ p.steps = int(parsed_parameters.get("Steps", steps))
+
+ proc = modules.scripts.scripts_img2img.run(p, *args)
+ if proc is None:
+ if output_dir:
+ p.outpath_samples = output_dir
+ p.override_settings['save_to_dirs'] = False
+ if p.n_iter > 1 or p.batch_size > 1:
+ p.override_settings['samples_filename_pattern'] = f'{image_path.stem}-[generation_number]'
+ else:
+ p.override_settings['samples_filename_pattern'] = f'{image_path.stem}'
+ process_images(p)
+
+
+def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_styles, init_img, sketch, init_img_with_mask, inpaint_color_sketch, inpaint_color_sketch_orig, init_img_inpaint, init_mask_inpaint, steps: int, sampler_name: str, mask_blur: int, mask_alpha: float, inpainting_fill: int, n_iter: int, batch_size: int, cfg_scale: float, image_cfg_scale: float, denoising_strength: float, selected_scale_tab: int, height: int, width: int, scale_by: float, resize_mode: int, inpaint_full_res: bool, inpaint_full_res_padding: int, inpainting_mask_invert: int, img2img_batch_input_dir: str, img2img_batch_output_dir: str, img2img_batch_inpaint_mask_dir: str, override_settings_texts, img2img_batch_use_png_info: bool, img2img_batch_png_info_props: list, img2img_batch_png_info_dir: str, request: gr.Request, *args):
+ override_settings = create_override_settings_dict(override_settings_texts)
+
+ is_batch = mode == 5
+
+ if mode == 0: # img2img
+ image = init_img
+ mask = None
+ elif mode == 1: # img2img sketch
+ image = sketch
+ mask = None
+ elif mode == 2: # inpaint
+ image, mask = init_img_with_mask["image"], init_img_with_mask["mask"]
+ mask = processing.create_binary_mask(mask)
+ elif mode == 3: # inpaint sketch
+ image = inpaint_color_sketch
+ orig = inpaint_color_sketch_orig or inpaint_color_sketch
+ pred = np.any(np.array(image) != np.array(orig), axis=-1)
+ mask = Image.fromarray(pred.astype(np.uint8) * 255, "L")
+ mask = ImageEnhance.Brightness(mask).enhance(1 - mask_alpha / 100)
+ blur = ImageFilter.GaussianBlur(mask_blur)
+ image = Image.composite(image.filter(blur), orig, mask.filter(blur))
+ elif mode == 4: # inpaint upload mask
+ image = init_img_inpaint
+ mask = init_mask_inpaint
+ else:
+ image = None
+ mask = None
+
+ # Use the EXIF orientation of photos taken by smartphones.
+ if image is not None:
+ image = ImageOps.exif_transpose(image)
+
+ if selected_scale_tab == 1 and not is_batch:
+ assert image, "Can't scale by because no image is selected"
+
+ width = int(image.width * scale_by)
+ height = int(image.height * scale_by)
+
+ assert 0. <= denoising_strength <= 1., 'can only work with strength in [0.0, 1.0]'
+
+ p = StableDiffusionProcessingImg2Img(
+ sd_model=shared.sd_model,
+ outpath_samples=opts.outdir_samples or opts.outdir_img2img_samples,
+ outpath_grids=opts.outdir_grids or opts.outdir_img2img_grids,
+ prompt=prompt,
+ negative_prompt=negative_prompt,
+ styles=prompt_styles,
+ sampler_name=sampler_name,
+ batch_size=batch_size,
+ n_iter=n_iter,
+ steps=steps,
+ cfg_scale=cfg_scale,
+ width=width,
+ height=height,
+ init_images=[image],
+ mask=mask,
+ mask_blur=mask_blur,
+ inpainting_fill=inpainting_fill,
+ resize_mode=resize_mode,
+ denoising_strength=denoising_strength,
+ image_cfg_scale=image_cfg_scale,
+ inpaint_full_res=inpaint_full_res,
+ inpaint_full_res_padding=inpaint_full_res_padding,
+ inpainting_mask_invert=inpainting_mask_invert,
+ override_settings=override_settings,
+ )
+
+ p.scripts = modules.scripts.scripts_img2img
+ p.script_args = args
+
+ p.user = request.username
+
+ if shared.cmd_opts.enable_console_prompts:
+ print(f"\nimg2img: {prompt}", file=shared.progress_print_out)
+
+ if mask:
+ p.extra_generation_params["Mask blur"] = mask_blur
+
+ with closing(p):
+ if is_batch:
+ assert not shared.cmd_opts.hide_ui_dir_config, "Launched with --hide-ui-dir-config, batch img2img disabled"
+
+ process_batch(p, img2img_batch_input_dir, img2img_batch_output_dir, img2img_batch_inpaint_mask_dir, args, to_scale=selected_scale_tab == 1, scale_by=scale_by, use_png_info=img2img_batch_use_png_info, png_info_props=img2img_batch_png_info_props, png_info_dir=img2img_batch_png_info_dir)
+
+ processed = Processed(p, [], p.seed, "")
+ else:
+ processed = modules.scripts.scripts_img2img.run(p, *args)
+ if processed is None:
+ processed = process_images(p)
+
+ shared.total_tqdm.clear()
+
+ generation_info_js = processed.js()
+ if opts.samples_log_stdout:
+ print(generation_info_js)
+
+ if opts.do_not_show_images:
+ processed.images = []
+
+ return processed.images, generation_info_js, plaintext_to_html(processed.info), plaintext_to_html(processed.comments, classname="comments")
diff --git a/modules/import_hook.py b/modules/import_hook.py
new file mode 100644
index 0000000000000000000000000000000000000000..28c67dfa897abec5eeb4cfac3da79458d6fee278
--- /dev/null
+++ b/modules/import_hook.py
@@ -0,0 +1,5 @@
+import sys
+
+# this will break any attempt to import xformers which will prevent stability diffusion repo from trying to use it
+if "--xformers" not in "".join(sys.argv):
+ sys.modules["xformers"] = None
diff --git a/modules/initialize.py b/modules/initialize.py
new file mode 100644
index 0000000000000000000000000000000000000000..6c163c758468e23be44a9d8bb24588b4ad356246
--- /dev/null
+++ b/modules/initialize.py
@@ -0,0 +1,168 @@
+import importlib
+import logging
+import sys
+import warnings
+from threading import Thread
+
+from modules.timer import startup_timer
+
+
+def imports():
+ logging.getLogger("torch.distributed.nn").setLevel(logging.ERROR) # sshh...
+ logging.getLogger("xformers").addFilter(lambda record: 'A matching Triton is not available' not in record.getMessage())
+
+ import torch # noqa: F401
+ startup_timer.record("import torch")
+ import pytorch_lightning # noqa: F401
+ startup_timer.record("import torch")
+ warnings.filterwarnings(action="ignore", category=DeprecationWarning, module="pytorch_lightning")
+ warnings.filterwarnings(action="ignore", category=UserWarning, module="torchvision")
+
+ import gradio # noqa: F401
+ startup_timer.record("import gradio")
+
+ from modules import paths, timer, import_hook, errors # noqa: F401
+ startup_timer.record("setup paths")
+
+ import ldm.modules.encoders.modules # noqa: F401
+ startup_timer.record("import ldm")
+
+ import sgm.modules.encoders.modules # noqa: F401
+ startup_timer.record("import sgm")
+
+ from modules import shared_init
+ shared_init.initialize()
+ startup_timer.record("initialize shared")
+
+ from modules import processing, gradio_extensons, ui # noqa: F401
+ startup_timer.record("other imports")
+
+
+def check_versions():
+ from modules.shared_cmd_options import cmd_opts
+
+ if not cmd_opts.skip_version_check:
+ from modules import errors
+ errors.check_versions()
+
+
+def initialize():
+ from modules import initialize_util
+ initialize_util.fix_torch_version()
+ initialize_util.fix_asyncio_event_loop_policy()
+ initialize_util.validate_tls_options()
+ initialize_util.configure_sigint_handler()
+ initialize_util.configure_opts_onchange()
+
+ from modules import modelloader
+ modelloader.cleanup_models()
+
+ from modules import sd_models
+ sd_models.setup_model()
+ startup_timer.record("setup SD model")
+
+ from modules.shared_cmd_options import cmd_opts
+
+ from modules import codeformer_model
+ warnings.filterwarnings(action="ignore", category=UserWarning, module="torchvision.transforms.functional_tensor")
+ codeformer_model.setup_model(cmd_opts.codeformer_models_path)
+ startup_timer.record("setup codeformer")
+
+ from modules import gfpgan_model
+ gfpgan_model.setup_model(cmd_opts.gfpgan_models_path)
+ startup_timer.record("setup gfpgan")
+
+ initialize_rest(reload_script_modules=False)
+
+
+def initialize_rest(*, reload_script_modules=False):
+ """
+ Called both from initialize() and when reloading the webui.
+ """
+ from modules.shared_cmd_options import cmd_opts
+
+ from modules import sd_samplers
+ sd_samplers.set_samplers()
+ startup_timer.record("set samplers")
+
+ from modules import extensions
+ extensions.list_extensions()
+ startup_timer.record("list extensions")
+
+ from modules import initialize_util
+ initialize_util.restore_config_state_file()
+ startup_timer.record("restore config state file")
+
+ from modules import shared, upscaler, scripts
+ if cmd_opts.ui_debug_mode:
+ shared.sd_upscalers = upscaler.UpscalerLanczos().scalers
+ scripts.load_scripts()
+ return
+
+ from modules import sd_models
+ sd_models.list_models()
+ startup_timer.record("list SD models")
+
+ from modules import localization
+ localization.list_localizations(cmd_opts.localizations_dir)
+ startup_timer.record("list localizations")
+
+ with startup_timer.subcategory("load scripts"):
+ scripts.load_scripts()
+
+ if reload_script_modules:
+ for module in [module for name, module in sys.modules.items() if name.startswith("modules.ui")]:
+ importlib.reload(module)
+ startup_timer.record("reload script modules")
+
+ from modules import modelloader
+ modelloader.load_upscalers()
+ startup_timer.record("load upscalers")
+
+ from modules import sd_vae
+ sd_vae.refresh_vae_list()
+ startup_timer.record("refresh VAE")
+
+ from modules import textual_inversion
+ textual_inversion.textual_inversion.list_textual_inversion_templates()
+ startup_timer.record("refresh textual inversion templates")
+
+ from modules import script_callbacks, sd_hijack_optimizations, sd_hijack
+ script_callbacks.on_list_optimizers(sd_hijack_optimizations.list_optimizers)
+ sd_hijack.list_optimizers()
+ startup_timer.record("scripts list_optimizers")
+
+ from modules import sd_unet
+ sd_unet.list_unets()
+ startup_timer.record("scripts list_unets")
+
+ def load_model():
+ """
+ Accesses shared.sd_model property to load model.
+ After it's available, if it has been loaded before this access by some extension,
+ its optimization may be None because the list of optimizaers has neet been filled
+ by that time, so we apply optimization again.
+ """
+
+ shared.sd_model # noqa: B018
+
+ if sd_hijack.current_optimizer is None:
+ sd_hijack.apply_optimizations()
+
+ from modules import devices
+ devices.first_time_calculation()
+
+ Thread(target=load_model).start()
+
+ from modules import shared_items
+ shared_items.reload_hypernetworks()
+ startup_timer.record("reload hypernetworks")
+
+ from modules import ui_extra_networks
+ ui_extra_networks.initialize()
+ ui_extra_networks.register_default_pages()
+
+ from modules import extra_networks
+ extra_networks.initialize()
+ extra_networks.register_default_extra_networks()
+ startup_timer.record("initialize extra networks")
diff --git a/modules/initialize_util.py b/modules/initialize_util.py
new file mode 100644
index 0000000000000000000000000000000000000000..e1803487125730ca4a84dd2316ccb60dc612dcf6
--- /dev/null
+++ b/modules/initialize_util.py
@@ -0,0 +1,202 @@
+import json
+import os
+import signal
+import sys
+import re
+
+from modules.timer import startup_timer
+
+
+def gradio_server_name():
+ from modules.shared_cmd_options import cmd_opts
+
+ if cmd_opts.server_name:
+ return cmd_opts.server_name
+ else:
+ return "0.0.0.0" if cmd_opts.listen else None
+
+
+def fix_torch_version():
+ import torch
+
+ # Truncate version number of nightly/local build of PyTorch to not cause exceptions with CodeFormer or Safetensors
+ if ".dev" in torch.__version__ or "+git" in torch.__version__:
+ torch.__long_version__ = torch.__version__
+ torch.__version__ = re.search(r'[\d.]+[\d]', torch.__version__).group(0)
+
+
+def fix_asyncio_event_loop_policy():
+ """
+ The default `asyncio` event loop policy only automatically creates
+ event loops in the main threads. Other threads must create event
+ loops explicitly or `asyncio.get_event_loop` (and therefore
+ `.IOLoop.current`) will fail. Installing this policy allows event
+ loops to be created automatically on any thread, matching the
+ behavior of Tornado versions prior to 5.0 (or 5.0 on Python 2).
+ """
+
+ import asyncio
+
+ if sys.platform == "win32" and hasattr(asyncio, "WindowsSelectorEventLoopPolicy"):
+ # "Any thread" and "selector" should be orthogonal, but there's not a clean
+ # interface for composing policies so pick the right base.
+ _BasePolicy = asyncio.WindowsSelectorEventLoopPolicy # type: ignore
+ else:
+ _BasePolicy = asyncio.DefaultEventLoopPolicy
+
+ class AnyThreadEventLoopPolicy(_BasePolicy): # type: ignore
+ """Event loop policy that allows loop creation on any thread.
+ Usage::
+
+ asyncio.set_event_loop_policy(AnyThreadEventLoopPolicy())
+ """
+
+ def get_event_loop(self) -> asyncio.AbstractEventLoop:
+ try:
+ return super().get_event_loop()
+ except (RuntimeError, AssertionError):
+ # This was an AssertionError in python 3.4.2 (which ships with debian jessie)
+ # and changed to a RuntimeError in 3.4.3.
+ # "There is no current event loop in thread %r"
+ loop = self.new_event_loop()
+ self.set_event_loop(loop)
+ return loop
+
+ asyncio.set_event_loop_policy(AnyThreadEventLoopPolicy())
+
+
+def restore_config_state_file():
+ from modules import shared, config_states
+
+ config_state_file = shared.opts.restore_config_state_file
+ if config_state_file == "":
+ return
+
+ shared.opts.restore_config_state_file = ""
+ shared.opts.save(shared.config_filename)
+
+ if os.path.isfile(config_state_file):
+ print(f"*** About to restore extension state from file: {config_state_file}")
+ with open(config_state_file, "r", encoding="utf-8") as f:
+ config_state = json.load(f)
+ config_states.restore_extension_config(config_state)
+ startup_timer.record("restore extension config")
+ elif config_state_file:
+ print(f"!!! Config state backup not found: {config_state_file}")
+
+
+def validate_tls_options():
+ from modules.shared_cmd_options import cmd_opts
+
+ if not (cmd_opts.tls_keyfile and cmd_opts.tls_certfile):
+ return
+
+ try:
+ if not os.path.exists(cmd_opts.tls_keyfile):
+ print("Invalid path to TLS keyfile given")
+ if not os.path.exists(cmd_opts.tls_certfile):
+ print(f"Invalid path to TLS certfile: '{cmd_opts.tls_certfile}'")
+ except TypeError:
+ cmd_opts.tls_keyfile = cmd_opts.tls_certfile = None
+ print("TLS setup invalid, running webui without TLS")
+ else:
+ print("Running with TLS")
+ startup_timer.record("TLS")
+
+
+def get_gradio_auth_creds():
+ """
+ Convert the gradio_auth and gradio_auth_path commandline arguments into
+ an iterable of (username, password) tuples.
+ """
+ from modules.shared_cmd_options import cmd_opts
+
+ def process_credential_line(s):
+ s = s.strip()
+ if not s:
+ return None
+ return tuple(s.split(':', 1))
+
+ if cmd_opts.gradio_auth:
+ for cred in cmd_opts.gradio_auth.split(','):
+ cred = process_credential_line(cred)
+ if cred:
+ yield cred
+
+ if cmd_opts.gradio_auth_path:
+ with open(cmd_opts.gradio_auth_path, 'r', encoding="utf8") as file:
+ for line in file.readlines():
+ for cred in line.strip().split(','):
+ cred = process_credential_line(cred)
+ if cred:
+ yield cred
+
+
+def dumpstacks():
+ import threading
+ import traceback
+
+ id2name = {th.ident: th.name for th in threading.enumerate()}
+ code = []
+ for threadId, stack in sys._current_frames().items():
+ code.append(f"\n# Thread: {id2name.get(threadId, '')}({threadId})")
+ for filename, lineno, name, line in traceback.extract_stack(stack):
+ code.append(f"""File: "{filename}", line {lineno}, in {name}""")
+ if line:
+ code.append(" " + line.strip())
+
+ print("\n".join(code))
+
+
+def configure_sigint_handler():
+ # make the program just exit at ctrl+c without waiting for anything
+ def sigint_handler(sig, frame):
+ print(f'Interrupted with signal {sig} in {frame}')
+
+ dumpstacks()
+
+ os._exit(0)
+
+ if not os.environ.get("COVERAGE_RUN"):
+ # Don't install the immediate-quit handler when running under coverage,
+ # as then the coverage report won't be generated.
+ signal.signal(signal.SIGINT, sigint_handler)
+
+
+def configure_opts_onchange():
+ from modules import shared, sd_models, sd_vae, ui_tempdir, sd_hijack
+ from modules.call_queue import wrap_queued_call
+
+ shared.opts.onchange("sd_model_checkpoint", wrap_queued_call(lambda: sd_models.reload_model_weights()), call=False)
+ shared.opts.onchange("sd_vae", wrap_queued_call(lambda: sd_vae.reload_vae_weights()), call=False)
+ shared.opts.onchange("sd_vae_overrides_per_model_preferences", wrap_queued_call(lambda: sd_vae.reload_vae_weights()), call=False)
+ shared.opts.onchange("temp_dir", ui_tempdir.on_tmpdir_changed)
+ shared.opts.onchange("gradio_theme", shared.reload_gradio_theme)
+ shared.opts.onchange("cross_attention_optimization", wrap_queued_call(lambda: sd_hijack.model_hijack.redo_hijack(shared.sd_model)), call=False)
+ startup_timer.record("opts onchange")
+
+
+def setup_middleware(app):
+ from starlette.middleware.gzip import GZipMiddleware
+
+ app.middleware_stack = None # reset current middleware to allow modifying user provided list
+ app.add_middleware(GZipMiddleware, minimum_size=1000)
+ configure_cors_middleware(app)
+ app.build_middleware_stack() # rebuild middleware stack on-the-fly
+
+
+def configure_cors_middleware(app):
+ from starlette.middleware.cors import CORSMiddleware
+ from modules.shared_cmd_options import cmd_opts
+
+ cors_options = {
+ "allow_methods": ["*"],
+ "allow_headers": ["*"],
+ "allow_credentials": True,
+ }
+ if cmd_opts.cors_allow_origins:
+ cors_options["allow_origins"] = cmd_opts.cors_allow_origins.split(',')
+ if cmd_opts.cors_allow_origins_regex:
+ cors_options["allow_origin_regex"] = cmd_opts.cors_allow_origins_regex
+ app.add_middleware(CORSMiddleware, **cors_options)
+
diff --git a/modules/interrogate.py b/modules/interrogate.py
new file mode 100644
index 0000000000000000000000000000000000000000..dd4c291e361ae36b9ffa9f3ef3830dd56dff19d5
--- /dev/null
+++ b/modules/interrogate.py
@@ -0,0 +1,222 @@
+import os
+import sys
+from collections import namedtuple
+from pathlib import Path
+import re
+
+import torch
+import torch.hub
+
+from torchvision import transforms
+from torchvision.transforms.functional import InterpolationMode
+
+from modules import devices, paths, shared, lowvram, modelloader, errors
+
+blip_image_eval_size = 384
+clip_model_name = 'ViT-L/14'
+
+Category = namedtuple("Category", ["name", "topn", "items"])
+
+re_topn = re.compile(r"\.top(\d+)\.")
+
+def category_types():
+ return [f.stem for f in Path(shared.interrogator.content_dir).glob('*.txt')]
+
+
+def download_default_clip_interrogate_categories(content_dir):
+ print("Downloading CLIP categories...")
+
+ tmpdir = f"{content_dir}_tmp"
+ category_types = ["artists", "flavors", "mediums", "movements"]
+
+ try:
+ os.makedirs(tmpdir, exist_ok=True)
+ for category_type in category_types:
+ torch.hub.download_url_to_file(f"https://raw.githubusercontent.com/pharmapsychotic/clip-interrogator/main/clip_interrogator/data/{category_type}.txt", os.path.join(tmpdir, f"{category_type}.txt"))
+ os.rename(tmpdir, content_dir)
+
+ except Exception as e:
+ errors.display(e, "downloading default CLIP interrogate categories")
+ finally:
+ if os.path.exists(tmpdir):
+ os.removedirs(tmpdir)
+
+
+class InterrogateModels:
+ blip_model = None
+ clip_model = None
+ clip_preprocess = None
+ dtype = None
+ running_on_cpu = None
+
+ def __init__(self, content_dir):
+ self.loaded_categories = None
+ self.skip_categories = []
+ self.content_dir = content_dir
+ self.running_on_cpu = devices.device_interrogate == torch.device("cpu")
+
+ def categories(self):
+ if not os.path.exists(self.content_dir):
+ download_default_clip_interrogate_categories(self.content_dir)
+
+ if self.loaded_categories is not None and self.skip_categories == shared.opts.interrogate_clip_skip_categories:
+ return self.loaded_categories
+
+ self.loaded_categories = []
+
+ if os.path.exists(self.content_dir):
+ self.skip_categories = shared.opts.interrogate_clip_skip_categories
+ category_types = []
+ for filename in Path(self.content_dir).glob('*.txt'):
+ category_types.append(filename.stem)
+ if filename.stem in self.skip_categories:
+ continue
+ m = re_topn.search(filename.stem)
+ topn = 1 if m is None else int(m.group(1))
+ with open(filename, "r", encoding="utf8") as file:
+ lines = [x.strip() for x in file.readlines()]
+
+ self.loaded_categories.append(Category(name=filename.stem, topn=topn, items=lines))
+
+ return self.loaded_categories
+
+ def create_fake_fairscale(self):
+ class FakeFairscale:
+ def checkpoint_wrapper(self):
+ pass
+
+ sys.modules["fairscale.nn.checkpoint.checkpoint_activations"] = FakeFairscale
+
+ def load_blip_model(self):
+ self.create_fake_fairscale()
+ import models.blip
+
+ files = modelloader.load_models(
+ model_path=os.path.join(paths.models_path, "BLIP"),
+ model_url='https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_caption_capfilt_large.pth',
+ ext_filter=[".pth"],
+ download_name='model_base_caption_capfilt_large.pth',
+ )
+
+ blip_model = models.blip.blip_decoder(pretrained=files[0], image_size=blip_image_eval_size, vit='base', med_config=os.path.join(paths.paths["BLIP"], "configs", "med_config.json"))
+ blip_model.eval()
+
+ return blip_model
+
+ def load_clip_model(self):
+ import clip
+
+ if self.running_on_cpu:
+ model, preprocess = clip.load(clip_model_name, device="cpu", download_root=shared.cmd_opts.clip_models_path)
+ else:
+ model, preprocess = clip.load(clip_model_name, download_root=shared.cmd_opts.clip_models_path)
+
+ model.eval()
+ model = model.to(devices.device_interrogate)
+
+ return model, preprocess
+
+ def load(self):
+ if self.blip_model is None:
+ self.blip_model = self.load_blip_model()
+ if not shared.cmd_opts.no_half and not self.running_on_cpu:
+ self.blip_model = self.blip_model.half()
+
+ self.blip_model = self.blip_model.to(devices.device_interrogate)
+
+ if self.clip_model is None:
+ self.clip_model, self.clip_preprocess = self.load_clip_model()
+ if not shared.cmd_opts.no_half and not self.running_on_cpu:
+ self.clip_model = self.clip_model.half()
+
+ self.clip_model = self.clip_model.to(devices.device_interrogate)
+
+ self.dtype = next(self.clip_model.parameters()).dtype
+
+ def send_clip_to_ram(self):
+ if not shared.opts.interrogate_keep_models_in_memory:
+ if self.clip_model is not None:
+ self.clip_model = self.clip_model.to(devices.cpu)
+
+ def send_blip_to_ram(self):
+ if not shared.opts.interrogate_keep_models_in_memory:
+ if self.blip_model is not None:
+ self.blip_model = self.blip_model.to(devices.cpu)
+
+ def unload(self):
+ self.send_clip_to_ram()
+ self.send_blip_to_ram()
+
+ devices.torch_gc()
+
+ def rank(self, image_features, text_array, top_count=1):
+ import clip
+
+ devices.torch_gc()
+
+ if shared.opts.interrogate_clip_dict_limit != 0:
+ text_array = text_array[0:int(shared.opts.interrogate_clip_dict_limit)]
+
+ top_count = min(top_count, len(text_array))
+ text_tokens = clip.tokenize(list(text_array), truncate=True).to(devices.device_interrogate)
+ text_features = self.clip_model.encode_text(text_tokens).type(self.dtype)
+ text_features /= text_features.norm(dim=-1, keepdim=True)
+
+ similarity = torch.zeros((1, len(text_array))).to(devices.device_interrogate)
+ for i in range(image_features.shape[0]):
+ similarity += (100.0 * image_features[i].unsqueeze(0) @ text_features.T).softmax(dim=-1)
+ similarity /= image_features.shape[0]
+
+ top_probs, top_labels = similarity.cpu().topk(top_count, dim=-1)
+ return [(text_array[top_labels[0][i].numpy()], (top_probs[0][i].numpy()*100)) for i in range(top_count)]
+
+ def generate_caption(self, pil_image):
+ gpu_image = transforms.Compose([
+ transforms.Resize((blip_image_eval_size, blip_image_eval_size), interpolation=InterpolationMode.BICUBIC),
+ transforms.ToTensor(),
+ transforms.Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711))
+ ])(pil_image).unsqueeze(0).type(self.dtype).to(devices.device_interrogate)
+
+ with torch.no_grad():
+ caption = self.blip_model.generate(gpu_image, sample=False, num_beams=shared.opts.interrogate_clip_num_beams, min_length=shared.opts.interrogate_clip_min_length, max_length=shared.opts.interrogate_clip_max_length)
+
+ return caption[0]
+
+ def interrogate(self, pil_image):
+ res = ""
+ shared.state.begin(job="interrogate")
+ try:
+ lowvram.send_everything_to_cpu()
+ devices.torch_gc()
+
+ self.load()
+
+ caption = self.generate_caption(pil_image)
+ self.send_blip_to_ram()
+ devices.torch_gc()
+
+ res = caption
+
+ clip_image = self.clip_preprocess(pil_image).unsqueeze(0).type(self.dtype).to(devices.device_interrogate)
+
+ with torch.no_grad(), devices.autocast():
+ image_features = self.clip_model.encode_image(clip_image).type(self.dtype)
+
+ image_features /= image_features.norm(dim=-1, keepdim=True)
+
+ for cat in self.categories():
+ matches = self.rank(image_features, cat.items, top_count=cat.topn)
+ for match, score in matches:
+ if shared.opts.interrogate_return_ranks:
+ res += f", ({match}:{score/100:.3f})"
+ else:
+ res += f", {match}"
+
+ except Exception:
+ errors.report("Error interrogating", exc_info=True)
+ res += ""
+
+ self.unload()
+ shared.state.end()
+
+ return res
diff --git a/modules/launch_utils.py b/modules/launch_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..6e8a80af022424fa2ae65241522bec3b7c298095
--- /dev/null
+++ b/modules/launch_utils.py
@@ -0,0 +1,449 @@
+# this scripts installs necessary requirements and launches main program in webui.py
+import logging
+import re
+import subprocess
+import os
+import shutil
+import sys
+import importlib.util
+import platform
+import json
+from functools import lru_cache
+
+from modules import cmd_args, errors
+from modules.paths_internal import script_path, extensions_dir
+from modules.timer import startup_timer
+from modules import logging_config
+
+args, _ = cmd_args.parser.parse_known_args()
+logging_config.setup_logging(args.loglevel)
+
+python = sys.executable
+git = os.environ.get('GIT', "git")
+index_url = os.environ.get('INDEX_URL', "")
+dir_repos = "repositories"
+
+# Whether to default to printing command output
+default_command_live = (os.environ.get('WEBUI_LAUNCH_LIVE_OUTPUT') == "1")
+
+if 'GRADIO_ANALYTICS_ENABLED' not in os.environ:
+ os.environ['GRADIO_ANALYTICS_ENABLED'] = 'False'
+
+
+def check_python_version():
+ is_windows = platform.system() == "Windows"
+ major = sys.version_info.major
+ minor = sys.version_info.minor
+ micro = sys.version_info.micro
+
+ if is_windows:
+ supported_minors = [10]
+ else:
+ supported_minors = [7, 8, 9, 10, 11]
+
+ if not (major == 3 and minor in supported_minors):
+ import modules.errors
+
+ modules.errors.print_error_explanation(f"""
+INCOMPATIBLE PYTHON VERSION
+
+This program is tested with 3.10.6 Python, but you have {major}.{minor}.{micro}.
+If you encounter an error with "RuntimeError: Couldn't install torch." message,
+or any other error regarding unsuccessful package (library) installation,
+please downgrade (or upgrade) to the latest version of 3.10 Python
+and delete current Python and "venv" folder in WebUI's directory.
+
+You can download 3.10 Python from here: https://www.python.org/downloads/release/python-3106/
+
+{"Alternatively, use a binary release of WebUI: https://github.com/AUTOMATIC1111/stable-diffusion-webui/releases" if is_windows else ""}
+
+Use --skip-python-version-check to suppress this warning.
+""")
+
+
+@lru_cache()
+def commit_hash():
+ try:
+ return subprocess.check_output([git, "rev-parse", "HEAD"], shell=False, encoding='utf8').strip()
+ except Exception:
+ return ""
+
+
+@lru_cache()
+def git_tag():
+ try:
+ return subprocess.check_output([git, "describe", "--tags"], shell=False, encoding='utf8').strip()
+ except Exception:
+ try:
+
+ changelog_md = os.path.join(os.path.dirname(os.path.dirname(__file__)), "CHANGELOG.md")
+ with open(changelog_md, "r", encoding="utf-8") as file:
+ line = next((line.strip() for line in file if line.strip()), "")
+ line = line.replace("## ", "")
+ return line
+ except Exception:
+ return ""
+
+
+def run(command, desc=None, errdesc=None, custom_env=None, live: bool = default_command_live) -> str:
+ if desc is not None:
+ print(desc)
+
+ run_kwargs = {
+ "args": command,
+ "shell": True,
+ "env": os.environ if custom_env is None else custom_env,
+ "encoding": 'utf8',
+ "errors": 'ignore',
+ }
+
+ if not live:
+ run_kwargs["stdout"] = run_kwargs["stderr"] = subprocess.PIPE
+
+ result = subprocess.run(**run_kwargs)
+
+ if result.returncode != 0:
+ error_bits = [
+ f"{errdesc or 'Error running command'}.",
+ f"Command: {command}",
+ f"Error code: {result.returncode}",
+ ]
+ if result.stdout:
+ error_bits.append(f"stdout: {result.stdout}")
+ if result.stderr:
+ error_bits.append(f"stderr: {result.stderr}")
+ raise RuntimeError("\n".join(error_bits))
+
+ return (result.stdout or "")
+
+
+def is_installed(package):
+ try:
+ spec = importlib.util.find_spec(package)
+ except ModuleNotFoundError:
+ return False
+
+ return spec is not None
+
+
+def repo_dir(name):
+ return os.path.join(script_path, dir_repos, name)
+
+
+def run_pip(command, desc=None, live=default_command_live):
+ if args.skip_install:
+ return
+
+ index_url_line = f' --index-url {index_url}' if index_url != '' else ''
+ return run(f'"{python}" -m pip {command} --prefer-binary{index_url_line}', desc=f"Installing {desc}", errdesc=f"Couldn't install {desc}", live=live)
+
+
+def check_run_python(code: str) -> bool:
+ result = subprocess.run([python, "-c", code], capture_output=True, shell=False)
+ return result.returncode == 0
+
+
+def git_fix_workspace(dir, name):
+ run(f'"{git}" -C "{dir}" fetch --refetch --no-auto-gc', f"Fetching all contents for {name}", f"Couldn't fetch {name}", live=True)
+ run(f'"{git}" -C "{dir}" gc --aggressive --prune=now', f"Pruning {name}", f"Couldn't prune {name}", live=True)
+ return
+
+
+def run_git(dir, name, command, desc=None, errdesc=None, custom_env=None, live: bool = default_command_live, autofix=True):
+ try:
+ return run(f'"{git}" -C "{dir}" {command}', desc=desc, errdesc=errdesc, custom_env=custom_env, live=live)
+ except RuntimeError:
+ if not autofix:
+ raise
+
+ print(f"{errdesc}, attempting autofix...")
+ git_fix_workspace(dir, name)
+
+ return run(f'"{git}" -C "{dir}" {command}', desc=desc, errdesc=errdesc, custom_env=custom_env, live=live)
+
+
+def git_clone(url, dir, name, commithash=None):
+ # TODO clone into temporary dir and move if successful
+
+ if os.path.exists(dir):
+ if commithash is None:
+ return
+
+ current_hash = run_git(dir, name, 'rev-parse HEAD', None, f"Couldn't determine {name}'s hash: {commithash}", live=False).strip()
+ if current_hash == commithash:
+ return
+
+ if run_git(dir, name, 'config --get remote.origin.url', None, f"Couldn't determine {name}'s origin URL", live=False).strip() != url:
+ run_git(dir, name, f'remote set-url origin "{url}"', None, f"Failed to set {name}'s origin URL", live=False)
+
+ run_git(dir, name, 'fetch', f"Fetching updates for {name}...", f"Couldn't fetch {name}", autofix=False)
+
+ run_git(dir, name, f'checkout {commithash}', f"Checking out commit for {name} with hash: {commithash}...", f"Couldn't checkout commit {commithash} for {name}", live=True)
+
+ return
+
+ try:
+ run(f'"{git}" clone "{url}" "{dir}"', f"Cloning {name} into {dir}...", f"Couldn't clone {name}", live=True)
+ except RuntimeError:
+ shutil.rmtree(dir, ignore_errors=True)
+ raise
+
+ if commithash is not None:
+ run(f'"{git}" -C "{dir}" checkout {commithash}', None, "Couldn't checkout {name}'s hash: {commithash}")
+
+
+def git_pull_recursive(dir):
+ for subdir, _, _ in os.walk(dir):
+ if os.path.exists(os.path.join(subdir, '.git')):
+ try:
+ output = subprocess.check_output([git, '-C', subdir, 'pull', '--autostash'])
+ print(f"Pulled changes for repository in '{subdir}':\n{output.decode('utf-8').strip()}\n")
+ except subprocess.CalledProcessError as e:
+ print(f"Couldn't perform 'git pull' on repository in '{subdir}':\n{e.output.decode('utf-8').strip()}\n")
+
+
+def version_check(commit):
+ try:
+ import requests
+ commits = requests.get('https://api.github.com/repos/AUTOMATIC1111/stable-diffusion-webui/branches/master').json()
+ if commit != "" and commits['commit']['sha'] != commit:
+ print("--------------------------------------------------------")
+ print("| You are not up to date with the most recent release. |")
+ print("| Consider running `git pull` to update. |")
+ print("--------------------------------------------------------")
+ elif commits['commit']['sha'] == commit:
+ print("You are up to date with the most recent release.")
+ else:
+ print("Not a git clone, can't perform version check.")
+ except Exception as e:
+ print("version check failed", e)
+
+
+def run_extension_installer(extension_dir):
+ path_installer = os.path.join(extension_dir, "install.py")
+ if not os.path.isfile(path_installer):
+ return
+
+ try:
+ env = os.environ.copy()
+ env['PYTHONPATH'] = f"{os.path.abspath('.')}{os.pathsep}{env.get('PYTHONPATH', '')}"
+
+ stdout = run(f'"{python}" "{path_installer}"', errdesc=f"Error running install.py for extension {extension_dir}", custom_env=env).strip()
+ if stdout:
+ print(stdout)
+ except Exception as e:
+ errors.report(str(e))
+
+
+def list_extensions(settings_file):
+ settings = {}
+
+ try:
+ if os.path.isfile(settings_file):
+ with open(settings_file, "r", encoding="utf8") as file:
+ settings = json.load(file)
+ except Exception:
+ errors.report("Could not load settings", exc_info=True)
+
+ disabled_extensions = set(settings.get('disabled_extensions', []))
+ disable_all_extensions = settings.get('disable_all_extensions', 'none')
+
+ if disable_all_extensions != 'none' or args.disable_extra_extensions or args.disable_all_extensions or not os.path.isdir(extensions_dir):
+ return []
+
+ return [x for x in os.listdir(extensions_dir) if x not in disabled_extensions]
+
+
+def run_extensions_installers(settings_file):
+ if not os.path.isdir(extensions_dir):
+ return
+
+ with startup_timer.subcategory("run extensions installers"):
+ for dirname_extension in list_extensions(settings_file):
+ logging.debug(f"Installing {dirname_extension}")
+
+ path = os.path.join(extensions_dir, dirname_extension)
+
+ if os.path.isdir(path):
+ run_extension_installer(path)
+ startup_timer.record(dirname_extension)
+
+
+re_requirement = re.compile(r"\s*([-_a-zA-Z0-9]+)\s*(?:==\s*([-+_.a-zA-Z0-9]+))?\s*")
+
+
+def requirements_met(requirements_file):
+ """
+ Does a simple parse of a requirements.txt file to determine if all rerqirements in it
+ are already installed. Returns True if so, False if not installed or parsing fails.
+ """
+
+ import importlib.metadata
+ import packaging.version
+
+ with open(requirements_file, "r", encoding="utf8") as file:
+ for line in file:
+ if line.strip() == "":
+ continue
+
+ m = re.match(re_requirement, line)
+ if m is None:
+ return False
+
+ package = m.group(1).strip()
+ version_required = (m.group(2) or "").strip()
+
+ if version_required == "":
+ continue
+
+ try:
+ version_installed = importlib.metadata.version(package)
+ except Exception:
+ return False
+
+ if packaging.version.parse(version_required) != packaging.version.parse(version_installed):
+ return False
+
+ return True
+
+
+def prepare_environment():
+ torch_index_url = os.environ.get('TORCH_INDEX_URL', "https://download.pytorch.org/whl/cu118")
+ torch_command = os.environ.get('TORCH_COMMAND', f"pip install torch==2.0.1 torchvision==0.15.2 --extra-index-url {torch_index_url}")
+ requirements_file = os.environ.get('REQS_FILE', "requirements_versions.txt")
+
+ xformers_package = os.environ.get('XFORMERS_PACKAGE', 'xformers==0.0.20')
+ clip_package = os.environ.get('CLIP_PACKAGE', "https://github.com/openai/CLIP/archive/d50d76daa670286dd6cacf3bcd80b5e4823fc8e1.zip")
+ openclip_package = os.environ.get('OPENCLIP_PACKAGE', "https://github.com/mlfoundations/open_clip/archive/bb6e834e9c70d9c27d0dc3ecedeebeaeb1ffad6b.zip")
+
+ stable_diffusion_repo = os.environ.get('STABLE_DIFFUSION_REPO', "https://github.com/Stability-AI/stablediffusion.git")
+ stable_diffusion_xl_repo = os.environ.get('STABLE_DIFFUSION_XL_REPO', "https://github.com/Stability-AI/generative-models.git")
+ k_diffusion_repo = os.environ.get('K_DIFFUSION_REPO', 'https://github.com/crowsonkb/k-diffusion.git')
+ codeformer_repo = os.environ.get('CODEFORMER_REPO', 'https://github.com/sczhou/CodeFormer.git')
+ blip_repo = os.environ.get('BLIP_REPO', 'https://github.com/salesforce/BLIP.git')
+
+ stable_diffusion_commit_hash = os.environ.get('STABLE_DIFFUSION_COMMIT_HASH', "cf1d67a6fd5ea1aa600c4df58e5b47da45f6bdbf")
+ stable_diffusion_xl_commit_hash = os.environ.get('STABLE_DIFFUSION_XL_COMMIT_HASH', "45c443b316737a4ab6e40413d7794a7f5657c19f")
+ k_diffusion_commit_hash = os.environ.get('K_DIFFUSION_COMMIT_HASH', "ab527a9a6d347f364e3d185ba6d714e22d80cb3c")
+ codeformer_commit_hash = os.environ.get('CODEFORMER_COMMIT_HASH', "c5b4593074ba6214284d6acd5f1719b6c5d739af")
+ blip_commit_hash = os.environ.get('BLIP_COMMIT_HASH', "48211a1594f1321b00f14c9f7a5b4813144b2fb9")
+
+ try:
+ # the existence of this file is a signal to webui.sh/bat that webui needs to be restarted when it stops execution
+ os.remove(os.path.join(script_path, "tmp", "restart"))
+ os.environ.setdefault('SD_WEBUI_RESTARTING', '1')
+ except OSError:
+ pass
+
+ if not args.skip_python_version_check:
+ check_python_version()
+
+ startup_timer.record("checks")
+
+ commit = commit_hash()
+ tag = git_tag()
+ startup_timer.record("git version info")
+
+ print(f"Python {sys.version}")
+ print(f"Version: {tag}")
+ print(f"Commit hash: {commit}")
+
+ if args.reinstall_torch or not is_installed("torch") or not is_installed("torchvision"):
+ run(f'"{python}" -m {torch_command}', "Installing torch and torchvision", "Couldn't install torch", live=True)
+ startup_timer.record("install torch")
+
+ if not args.skip_torch_cuda_test and not check_run_python("import torch; assert torch.cuda.is_available()"):
+ raise RuntimeError(
+ 'Torch is not able to use GPU; '
+ 'add --skip-torch-cuda-test to COMMANDLINE_ARGS variable to disable this check'
+ )
+ startup_timer.record("torch GPU test")
+
+ if not is_installed("clip"):
+ run_pip(f"install {clip_package}", "clip")
+ startup_timer.record("install clip")
+
+ if not is_installed("open_clip"):
+ run_pip(f"install {openclip_package}", "open_clip")
+ startup_timer.record("install open_clip")
+
+ if (not is_installed("xformers") or args.reinstall_xformers) and args.xformers:
+ run_pip(f"install -U -I --no-deps {xformers_package}", "xformers")
+ startup_timer.record("install xformers")
+
+ if not is_installed("ngrok") and args.ngrok:
+ run_pip("install ngrok", "ngrok")
+ startup_timer.record("install ngrok")
+
+ os.makedirs(os.path.join(script_path, dir_repos), exist_ok=True)
+
+ git_clone(stable_diffusion_repo, repo_dir('stable-diffusion-stability-ai'), "Stable Diffusion", stable_diffusion_commit_hash)
+ git_clone(stable_diffusion_xl_repo, repo_dir('generative-models'), "Stable Diffusion XL", stable_diffusion_xl_commit_hash)
+ git_clone(k_diffusion_repo, repo_dir('k-diffusion'), "K-diffusion", k_diffusion_commit_hash)
+ git_clone(codeformer_repo, repo_dir('CodeFormer'), "CodeFormer", codeformer_commit_hash)
+ git_clone(blip_repo, repo_dir('BLIP'), "BLIP", blip_commit_hash)
+
+ startup_timer.record("clone repositores")
+
+ if not is_installed("lpips"):
+ run_pip(f"install -r \"{os.path.join(repo_dir('CodeFormer'), 'requirements.txt')}\"", "requirements for CodeFormer")
+ startup_timer.record("install CodeFormer requirements")
+
+ if not os.path.isfile(requirements_file):
+ requirements_file = os.path.join(script_path, requirements_file)
+
+ if not requirements_met(requirements_file):
+ run_pip(f"install -r \"{requirements_file}\"", "requirements")
+ startup_timer.record("install requirements")
+
+ if not args.skip_install:
+ run_extensions_installers(settings_file=args.ui_settings_file)
+
+ if args.update_check:
+ version_check(commit)
+ startup_timer.record("check version")
+
+ if args.update_all_extensions:
+ git_pull_recursive(extensions_dir)
+ startup_timer.record("update extensions")
+
+ if "--exit" in sys.argv:
+ print("Exiting because of --exit argument")
+ exit(0)
+
+
+
+def configure_for_tests():
+ if "--api" not in sys.argv:
+ sys.argv.append("--api")
+ if "--ckpt" not in sys.argv:
+ sys.argv.append("--ckpt")
+ sys.argv.append(os.path.join(script_path, "test/test_files/empty.pt"))
+ if "--skip-torch-cuda-test" not in sys.argv:
+ sys.argv.append("--skip-torch-cuda-test")
+ if "--disable-nan-check" not in sys.argv:
+ sys.argv.append("--disable-nan-check")
+
+ os.environ['COMMANDLINE_ARGS'] = ""
+
+
+def start():
+ print(f"Launching {'API server' if '--nowebui' in sys.argv else 'Web UI'} with arguments: {' '.join(sys.argv[1:])}")
+ import webui
+ if '--nowebui' in sys.argv:
+ webui.api_only()
+ else:
+ webui.webui()
+
+
+def dump_sysinfo():
+ from modules import sysinfo
+ import datetime
+
+ text = sysinfo.get()
+ filename = f"sysinfo-{datetime.datetime.utcnow().strftime('%Y-%m-%d-%H-%M')}.txt"
+
+ with open(filename, "w", encoding="utf8") as file:
+ file.write(text)
+
+ return filename
diff --git a/modules/localization.py b/modules/localization.py
new file mode 100644
index 0000000000000000000000000000000000000000..8f4db67b69c7951fe47d36e4224f3c8408a21646
--- /dev/null
+++ b/modules/localization.py
@@ -0,0 +1,34 @@
+import json
+import os
+
+from modules import errors, scripts
+
+localizations = {}
+
+
+def list_localizations(dirname):
+ localizations.clear()
+
+ for file in os.listdir(dirname):
+ fn, ext = os.path.splitext(file)
+ if ext.lower() != ".json":
+ continue
+
+ localizations[fn] = os.path.join(dirname, file)
+
+ for file in scripts.list_scripts("localizations", ".json"):
+ fn, ext = os.path.splitext(file.filename)
+ localizations[fn] = file.path
+
+
+def localization_js(current_localization_name: str) -> str:
+ fn = localizations.get(current_localization_name, None)
+ data = {}
+ if fn is not None:
+ try:
+ with open(fn, "r", encoding="utf8") as file:
+ data = json.load(file)
+ except Exception:
+ errors.report(f"Error loading localization from {fn}", exc_info=True)
+
+ return f"window.localization = {json.dumps(data)}"
diff --git a/modules/lowvram.py b/modules/lowvram.py
new file mode 100644
index 0000000000000000000000000000000000000000..d7b85e4ae17f7fb811e608fbd1c22ae2f52d3e3c
--- /dev/null
+++ b/modules/lowvram.py
@@ -0,0 +1,147 @@
+import torch
+from modules import devices, shared
+
+module_in_gpu = None
+cpu = torch.device("cpu")
+
+
+def send_everything_to_cpu():
+ global module_in_gpu
+
+ if module_in_gpu is not None:
+ module_in_gpu.to(cpu)
+
+ module_in_gpu = None
+
+
+def is_needed(sd_model):
+ return shared.cmd_opts.lowvram or shared.cmd_opts.medvram or shared.cmd_opts.medvram_sdxl and hasattr(sd_model, 'conditioner')
+
+
+def apply(sd_model):
+ enable = is_needed(sd_model)
+ shared.parallel_processing_allowed = not enable
+
+ if enable:
+ setup_for_low_vram(sd_model, not shared.cmd_opts.lowvram)
+ else:
+ sd_model.lowvram = False
+
+
+def setup_for_low_vram(sd_model, use_medvram):
+ if getattr(sd_model, 'lowvram', False):
+ return
+
+ sd_model.lowvram = True
+
+ parents = {}
+
+ def send_me_to_gpu(module, _):
+ """send this module to GPU; send whatever tracked module was previous in GPU to CPU;
+ we add this as forward_pre_hook to a lot of modules and this way all but one of them will
+ be in CPU
+ """
+ global module_in_gpu
+
+ module = parents.get(module, module)
+
+ if module_in_gpu == module:
+ return
+
+ if module_in_gpu is not None:
+ module_in_gpu.to(cpu)
+
+ module.to(devices.device)
+ module_in_gpu = module
+
+ # see below for register_forward_pre_hook;
+ # first_stage_model does not use forward(), it uses encode/decode, so register_forward_pre_hook is
+ # useless here, and we just replace those methods
+
+ first_stage_model = sd_model.first_stage_model
+ first_stage_model_encode = sd_model.first_stage_model.encode
+ first_stage_model_decode = sd_model.first_stage_model.decode
+
+ def first_stage_model_encode_wrap(x):
+ send_me_to_gpu(first_stage_model, None)
+ return first_stage_model_encode(x)
+
+ def first_stage_model_decode_wrap(z):
+ send_me_to_gpu(first_stage_model, None)
+ return first_stage_model_decode(z)
+
+ to_remain_in_cpu = [
+ (sd_model, 'first_stage_model'),
+ (sd_model, 'depth_model'),
+ (sd_model, 'embedder'),
+ (sd_model, 'model'),
+ (sd_model, 'embedder'),
+ ]
+
+ is_sdxl = hasattr(sd_model, 'conditioner')
+ is_sd2 = not is_sdxl and hasattr(sd_model.cond_stage_model, 'model')
+
+ if is_sdxl:
+ to_remain_in_cpu.append((sd_model, 'conditioner'))
+ elif is_sd2:
+ to_remain_in_cpu.append((sd_model.cond_stage_model, 'model'))
+ else:
+ to_remain_in_cpu.append((sd_model.cond_stage_model, 'transformer'))
+
+ # remove several big modules: cond, first_stage, depth/embedder (if applicable), and unet from the model
+ stored = []
+ for obj, field in to_remain_in_cpu:
+ module = getattr(obj, field, None)
+ stored.append(module)
+ setattr(obj, field, None)
+
+ # send the model to GPU.
+ sd_model.to(devices.device)
+
+ # put modules back. the modules will be in CPU.
+ for (obj, field), module in zip(to_remain_in_cpu, stored):
+ setattr(obj, field, module)
+
+ # register hooks for those the first three models
+ if is_sdxl:
+ sd_model.conditioner.register_forward_pre_hook(send_me_to_gpu)
+ elif is_sd2:
+ sd_model.cond_stage_model.model.register_forward_pre_hook(send_me_to_gpu)
+ sd_model.cond_stage_model.model.token_embedding.register_forward_pre_hook(send_me_to_gpu)
+ parents[sd_model.cond_stage_model.model] = sd_model.cond_stage_model
+ parents[sd_model.cond_stage_model.model.token_embedding] = sd_model.cond_stage_model
+ else:
+ sd_model.cond_stage_model.transformer.register_forward_pre_hook(send_me_to_gpu)
+ parents[sd_model.cond_stage_model.transformer] = sd_model.cond_stage_model
+
+ sd_model.first_stage_model.register_forward_pre_hook(send_me_to_gpu)
+ sd_model.first_stage_model.encode = first_stage_model_encode_wrap
+ sd_model.first_stage_model.decode = first_stage_model_decode_wrap
+ if sd_model.depth_model:
+ sd_model.depth_model.register_forward_pre_hook(send_me_to_gpu)
+ if sd_model.embedder:
+ sd_model.embedder.register_forward_pre_hook(send_me_to_gpu)
+
+ if use_medvram:
+ sd_model.model.register_forward_pre_hook(send_me_to_gpu)
+ else:
+ diff_model = sd_model.model.diffusion_model
+
+ # the third remaining model is still too big for 4 GB, so we also do the same for its submodules
+ # so that only one of them is in GPU at a time
+ stored = diff_model.input_blocks, diff_model.middle_block, diff_model.output_blocks, diff_model.time_embed
+ diff_model.input_blocks, diff_model.middle_block, diff_model.output_blocks, diff_model.time_embed = None, None, None, None
+ sd_model.model.to(devices.device)
+ diff_model.input_blocks, diff_model.middle_block, diff_model.output_blocks, diff_model.time_embed = stored
+
+ # install hooks for bits of third model
+ diff_model.time_embed.register_forward_pre_hook(send_me_to_gpu)
+ for block in diff_model.input_blocks:
+ block.register_forward_pre_hook(send_me_to_gpu)
+ diff_model.middle_block.register_forward_pre_hook(send_me_to_gpu)
+ for block in diff_model.output_blocks:
+ block.register_forward_pre_hook(send_me_to_gpu)
+
+
+def is_enabled(sd_model):
+ return sd_model.lowvram
diff --git a/modules/mac_specific.py b/modules/mac_specific.py
new file mode 100644
index 0000000000000000000000000000000000000000..89256c5b06073c38a903d71a10d3be31079085df
--- /dev/null
+++ b/modules/mac_specific.py
@@ -0,0 +1,83 @@
+import logging
+
+import torch
+import platform
+from modules.sd_hijack_utils import CondFunc
+from packaging import version
+from modules import shared
+
+log = logging.getLogger(__name__)
+
+
+# before torch version 1.13, has_mps is only available in nightly pytorch and macOS 12.3+,
+# use check `getattr` and try it for compatibility.
+# in torch version 1.13, backends.mps.is_available() and backends.mps.is_built() are introduced in to check mps availabilty,
+# since torch 2.0.1+ nightly build, getattr(torch, 'has_mps', False) was deprecated, see https://github.com/pytorch/pytorch/pull/103279
+def check_for_mps() -> bool:
+ if version.parse(torch.__version__) <= version.parse("2.0.1"):
+ if not getattr(torch, 'has_mps', False):
+ return False
+ try:
+ torch.zeros(1).to(torch.device("mps"))
+ return True
+ except Exception:
+ return False
+ else:
+ return torch.backends.mps.is_available() and torch.backends.mps.is_built()
+
+
+has_mps = check_for_mps()
+
+
+def torch_mps_gc() -> None:
+ try:
+ if shared.state.current_latent is not None:
+ log.debug("`current_latent` is set, skipping MPS garbage collection")
+ return
+ from torch.mps import empty_cache
+ empty_cache()
+ except Exception:
+ log.warning("MPS garbage collection failed", exc_info=True)
+
+
+# MPS workaround for https://github.com/pytorch/pytorch/issues/89784
+def cumsum_fix(input, cumsum_func, *args, **kwargs):
+ if input.device.type == 'mps':
+ output_dtype = kwargs.get('dtype', input.dtype)
+ if output_dtype == torch.int64:
+ return cumsum_func(input.cpu(), *args, **kwargs).to(input.device)
+ elif output_dtype == torch.bool or cumsum_needs_int_fix and (output_dtype == torch.int8 or output_dtype == torch.int16):
+ return cumsum_func(input.to(torch.int32), *args, **kwargs).to(torch.int64)
+ return cumsum_func(input, *args, **kwargs)
+
+
+if has_mps:
+ if platform.mac_ver()[0].startswith("13.2."):
+ # MPS workaround for https://github.com/pytorch/pytorch/issues/95188, thanks to danieldk (https://github.com/explosion/curated-transformers/pull/124)
+ CondFunc('torch.nn.functional.linear', lambda _, input, weight, bias: (torch.matmul(input, weight.t()) + bias) if bias is not None else torch.matmul(input, weight.t()), lambda _, input, weight, bias: input.numel() > 10485760)
+
+ if version.parse(torch.__version__) < version.parse("1.13"):
+ # PyTorch 1.13 doesn't need these fixes but unfortunately is slower and has regressions that prevent training from working
+
+ # MPS workaround for https://github.com/pytorch/pytorch/issues/79383
+ CondFunc('torch.Tensor.to', lambda orig_func, self, *args, **kwargs: orig_func(self.contiguous(), *args, **kwargs),
+ lambda _, self, *args, **kwargs: self.device.type != 'mps' and (args and isinstance(args[0], torch.device) and args[0].type == 'mps' or isinstance(kwargs.get('device'), torch.device) and kwargs['device'].type == 'mps'))
+ # MPS workaround for https://github.com/pytorch/pytorch/issues/80800
+ CondFunc('torch.nn.functional.layer_norm', lambda orig_func, *args, **kwargs: orig_func(*([args[0].contiguous()] + list(args[1:])), **kwargs),
+ lambda _, *args, **kwargs: args and isinstance(args[0], torch.Tensor) and args[0].device.type == 'mps')
+ # MPS workaround for https://github.com/pytorch/pytorch/issues/90532
+ CondFunc('torch.Tensor.numpy', lambda orig_func, self, *args, **kwargs: orig_func(self.detach(), *args, **kwargs), lambda _, self, *args, **kwargs: self.requires_grad)
+ elif version.parse(torch.__version__) > version.parse("1.13.1"):
+ cumsum_needs_int_fix = not torch.Tensor([1,2]).to(torch.device("mps")).equal(torch.ShortTensor([1,1]).to(torch.device("mps")).cumsum(0))
+ cumsum_fix_func = lambda orig_func, input, *args, **kwargs: cumsum_fix(input, orig_func, *args, **kwargs)
+ CondFunc('torch.cumsum', cumsum_fix_func, None)
+ CondFunc('torch.Tensor.cumsum', cumsum_fix_func, None)
+ CondFunc('torch.narrow', lambda orig_func, *args, **kwargs: orig_func(*args, **kwargs).clone(), None)
+
+ # MPS workaround for https://github.com/pytorch/pytorch/issues/96113
+ CondFunc('torch.nn.functional.layer_norm', lambda orig_func, x, normalized_shape, weight, bias, eps, **kwargs: orig_func(x.float(), normalized_shape, weight.float() if weight is not None else None, bias.float() if bias is not None else bias, eps).to(x.dtype), lambda _, input, *args, **kwargs: len(args) == 4 and input.device.type == 'mps')
+
+ # MPS workaround for https://github.com/pytorch/pytorch/issues/92311
+ if platform.processor() == 'i386':
+ for funcName in ['torch.argmax', 'torch.Tensor.argmax']:
+ CondFunc(funcName, lambda _, input, *args, **kwargs: torch.max(input.float() if input.dtype == torch.int64 else input, *args, **kwargs)[1], lambda _, input, *args, **kwargs: input.device.type == 'mps')
diff --git a/modules/memmon.py b/modules/memmon.py
new file mode 100644
index 0000000000000000000000000000000000000000..4018edcc7ef1c36d6f71fddc43f773aba6c078e4
--- /dev/null
+++ b/modules/memmon.py
@@ -0,0 +1,92 @@
+import threading
+import time
+from collections import defaultdict
+
+import torch
+
+
+class MemUsageMonitor(threading.Thread):
+ run_flag = None
+ device = None
+ disabled = False
+ opts = None
+ data = None
+
+ def __init__(self, name, device, opts):
+ threading.Thread.__init__(self)
+ self.name = name
+ self.device = device
+ self.opts = opts
+
+ self.daemon = True
+ self.run_flag = threading.Event()
+ self.data = defaultdict(int)
+
+ try:
+ self.cuda_mem_get_info()
+ torch.cuda.memory_stats(self.device)
+ except Exception as e: # AMD or whatever
+ print(f"Warning: caught exception '{e}', memory monitor disabled")
+ self.disabled = True
+
+ def cuda_mem_get_info(self):
+ index = self.device.index if self.device.index is not None else torch.cuda.current_device()
+ return torch.cuda.mem_get_info(index)
+
+ def run(self):
+ if self.disabled:
+ return
+
+ while True:
+ self.run_flag.wait()
+
+ torch.cuda.reset_peak_memory_stats()
+ self.data.clear()
+
+ if self.opts.memmon_poll_rate <= 0:
+ self.run_flag.clear()
+ continue
+
+ self.data["min_free"] = self.cuda_mem_get_info()[0]
+
+ while self.run_flag.is_set():
+ free, total = self.cuda_mem_get_info()
+ self.data["min_free"] = min(self.data["min_free"], free)
+
+ time.sleep(1 / self.opts.memmon_poll_rate)
+
+ def dump_debug(self):
+ print(self, 'recorded data:')
+ for k, v in self.read().items():
+ print(k, -(v // -(1024 ** 2)))
+
+ print(self, 'raw torch memory stats:')
+ tm = torch.cuda.memory_stats(self.device)
+ for k, v in tm.items():
+ if 'bytes' not in k:
+ continue
+ print('\t' if 'peak' in k else '', k, -(v // -(1024 ** 2)))
+
+ print(torch.cuda.memory_summary())
+
+ def monitor(self):
+ self.run_flag.set()
+
+ def read(self):
+ if not self.disabled:
+ free, total = self.cuda_mem_get_info()
+ self.data["free"] = free
+ self.data["total"] = total
+
+ torch_stats = torch.cuda.memory_stats(self.device)
+ self.data["active"] = torch_stats["active.all.current"]
+ self.data["active_peak"] = torch_stats["active_bytes.all.peak"]
+ self.data["reserved"] = torch_stats["reserved_bytes.all.current"]
+ self.data["reserved_peak"] = torch_stats["reserved_bytes.all.peak"]
+ self.data["system_peak"] = total - self.data["min_free"]
+
+ return self.data
+
+ def stop(self):
+ self.run_flag.clear()
+ return self.read()
diff --git a/modules/modelloader.py b/modules/modelloader.py
new file mode 100644
index 0000000000000000000000000000000000000000..098bcb7933632c8f9cb798dcc038f1834fdb128f
--- /dev/null
+++ b/modules/modelloader.py
@@ -0,0 +1,179 @@
+from __future__ import annotations
+
+import os
+import shutil
+import importlib
+from urllib.parse import urlparse
+
+from modules import shared
+from modules.upscaler import Upscaler, UpscalerLanczos, UpscalerNearest, UpscalerNone
+from modules.paths import script_path, models_path
+
+
+def load_file_from_url(
+ url: str,
+ *,
+ model_dir: str,
+ progress: bool = True,
+ file_name: str | None = None,
+) -> str:
+ """Download a file from `url` into `model_dir`, using the file present if possible.
+
+ Returns the path to the downloaded file.
+ """
+ os.makedirs(model_dir, exist_ok=True)
+ if not file_name:
+ parts = urlparse(url)
+ file_name = os.path.basename(parts.path)
+ cached_file = os.path.abspath(os.path.join(model_dir, file_name))
+ if not os.path.exists(cached_file):
+ print(f'Downloading: "{url}" to {cached_file}\n')
+ from torch.hub import download_url_to_file
+ download_url_to_file(url, cached_file, progress=progress)
+ return cached_file
+
+
+def load_models(model_path: str, model_url: str = None, command_path: str = None, ext_filter=None, download_name=None, ext_blacklist=None) -> list:
+ """
+ A one-and done loader to try finding the desired models in specified directories.
+
+ @param download_name: Specify to download from model_url immediately.
+ @param model_url: If no other models are found, this will be downloaded on upscale.
+ @param model_path: The location to store/find models in.
+ @param command_path: A command-line argument to search for models in first.
+ @param ext_filter: An optional list of filename extensions to filter by
+ @return: A list of paths containing the desired model(s)
+ """
+ output = []
+
+ try:
+ places = []
+
+ if command_path is not None and command_path != model_path:
+ pretrained_path = os.path.join(command_path, 'experiments/pretrained_models')
+ if os.path.exists(pretrained_path):
+ print(f"Appending path: {pretrained_path}")
+ places.append(pretrained_path)
+ elif os.path.exists(command_path):
+ places.append(command_path)
+
+ places.append(model_path)
+
+ for place in places:
+ for full_path in shared.walk_files(place, allowed_extensions=ext_filter):
+ if os.path.islink(full_path) and not os.path.exists(full_path):
+ print(f"Skipping broken symlink: {full_path}")
+ continue
+ if ext_blacklist is not None and any(full_path.endswith(x) for x in ext_blacklist):
+ continue
+ if full_path not in output:
+ output.append(full_path)
+
+ if model_url is not None and len(output) == 0:
+ if download_name is not None:
+ output.append(load_file_from_url(model_url, model_dir=places[0], file_name=download_name))
+ else:
+ output.append(model_url)
+
+ except Exception:
+ pass
+
+ return output
+
+
+def friendly_name(file: str):
+ if file.startswith("http"):
+ file = urlparse(file).path
+
+ file = os.path.basename(file)
+ model_name, extension = os.path.splitext(file)
+ return model_name
+
+
+def cleanup_models():
+ # This code could probably be more efficient if we used a tuple list or something to store the src/destinations
+ # and then enumerate that, but this works for now. In the future, it'd be nice to just have every "model" scaler
+ # somehow auto-register and just do these things...
+ root_path = script_path
+ src_path = models_path
+ dest_path = os.path.join(models_path, "Stable-diffusion")
+ move_files(src_path, dest_path, ".ckpt")
+ move_files(src_path, dest_path, ".safetensors")
+ src_path = os.path.join(root_path, "ESRGAN")
+ dest_path = os.path.join(models_path, "ESRGAN")
+ move_files(src_path, dest_path)
+ src_path = os.path.join(models_path, "BSRGAN")
+ dest_path = os.path.join(models_path, "ESRGAN")
+ move_files(src_path, dest_path, ".pth")
+ src_path = os.path.join(root_path, "gfpgan")
+ dest_path = os.path.join(models_path, "GFPGAN")
+ move_files(src_path, dest_path)
+ src_path = os.path.join(root_path, "SwinIR")
+ dest_path = os.path.join(models_path, "SwinIR")
+ move_files(src_path, dest_path)
+ src_path = os.path.join(root_path, "repositories/latent-diffusion/experiments/pretrained_models/")
+ dest_path = os.path.join(models_path, "LDSR")
+ move_files(src_path, dest_path)
+
+
+def move_files(src_path: str, dest_path: str, ext_filter: str = None):
+ try:
+ os.makedirs(dest_path, exist_ok=True)
+ if os.path.exists(src_path):
+ for file in os.listdir(src_path):
+ fullpath = os.path.join(src_path, file)
+ if os.path.isfile(fullpath):
+ if ext_filter is not None:
+ if ext_filter not in file:
+ continue
+ print(f"Moving {file} from {src_path} to {dest_path}.")
+ try:
+ shutil.move(fullpath, dest_path)
+ except Exception:
+ pass
+ if len(os.listdir(src_path)) == 0:
+ print(f"Removing empty folder: {src_path}")
+ shutil.rmtree(src_path, True)
+ except Exception:
+ pass
+
+
+def load_upscalers():
+ # We can only do this 'magic' method to dynamically load upscalers if they are referenced,
+ # so we'll try to import any _model.py files before looking in __subclasses__
+ modules_dir = os.path.join(shared.script_path, "modules")
+ for file in os.listdir(modules_dir):
+ if "_model.py" in file:
+ model_name = file.replace("_model.py", "")
+ full_model = f"modules.{model_name}_model"
+ try:
+ importlib.import_module(full_model)
+ except Exception:
+ pass
+
+ datas = []
+ commandline_options = vars(shared.cmd_opts)
+
+ # some of upscaler classes will not go away after reloading their modules, and we'll end
+ # up with two copies of those classes. The newest copy will always be the last in the list,
+ # so we go from end to beginning and ignore duplicates
+ used_classes = {}
+ for cls in reversed(Upscaler.__subclasses__()):
+ classname = str(cls)
+ if classname not in used_classes:
+ used_classes[classname] = cls
+
+ for cls in reversed(used_classes.values()):
+ name = cls.__name__
+ cmd_name = f"{name.lower().replace('upscaler', '')}_models_path"
+ commandline_model_path = commandline_options.get(cmd_name, None)
+ scaler = cls(commandline_model_path)
+ scaler.user_path = commandline_model_path
+ scaler.model_download_path = commandline_model_path or scaler.model_path
+ datas += scaler.scalers
+
+ shared.sd_upscalers = sorted(
+ datas,
+ # Special case for UpscalerNone keeps it at the beginning of the list.
+ key=lambda x: x.name.lower() if not isinstance(x.scaler, (UpscalerNone, UpscalerLanczos, UpscalerNearest)) else ""
+ )
diff --git a/modules/models/diffusion/ddpm_edit.py b/modules/models/diffusion/ddpm_edit.py
new file mode 100644
index 0000000000000000000000000000000000000000..b892d5fc7b04755a3de6c17cf8787605df0faed3
--- /dev/null
+++ b/modules/models/diffusion/ddpm_edit.py
@@ -0,0 +1,1455 @@
+"""
+wild mixture of
+https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py
+https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py
+https://github.com/CompVis/taming-transformers
+-- merci
+"""
+
+# File modified by authors of InstructPix2Pix from original (https://github.com/CompVis/stable-diffusion).
+# See more details in LICENSE.
+
+import torch
+import torch.nn as nn
+import numpy as np
+import pytorch_lightning as pl
+from torch.optim.lr_scheduler import LambdaLR
+from einops import rearrange, repeat
+from contextlib import contextmanager
+from functools import partial
+from tqdm import tqdm
+from torchvision.utils import make_grid
+from pytorch_lightning.utilities.distributed import rank_zero_only
+
+from ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config
+from ldm.modules.ema import LitEma
+from ldm.modules.distributions.distributions import normal_kl, DiagonalGaussianDistribution
+from ldm.models.autoencoder import VQModelInterface, IdentityFirstStage, AutoencoderKL
+from ldm.modules.diffusionmodules.util import make_beta_schedule, extract_into_tensor, noise_like
+from ldm.models.diffusion.ddim import DDIMSampler
+
+
+__conditioning_keys__ = {'concat': 'c_concat',
+ 'crossattn': 'c_crossattn',
+ 'adm': 'y'}
+
+
+def disabled_train(self, mode=True):
+ """Overwrite model.train with this function to make sure train/eval mode
+ does not change anymore."""
+ return self
+
+
+def uniform_on_device(r1, r2, shape, device):
+ return (r1 - r2) * torch.rand(*shape, device=device) + r2
+
+
+class DDPM(pl.LightningModule):
+ # classic DDPM with Gaussian diffusion, in image space
+ def __init__(self,
+ unet_config,
+ timesteps=1000,
+ beta_schedule="linear",
+ loss_type="l2",
+ ckpt_path=None,
+ ignore_keys=None,
+ load_only_unet=False,
+ monitor="val/loss",
+ use_ema=True,
+ first_stage_key="image",
+ image_size=256,
+ channels=3,
+ log_every_t=100,
+ clip_denoised=True,
+ linear_start=1e-4,
+ linear_end=2e-2,
+ cosine_s=8e-3,
+ given_betas=None,
+ original_elbo_weight=0.,
+ v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta
+ l_simple_weight=1.,
+ conditioning_key=None,
+ parameterization="eps", # all assuming fixed variance schedules
+ scheduler_config=None,
+ use_positional_encodings=False,
+ learn_logvar=False,
+ logvar_init=0.,
+ load_ema=True,
+ ):
+ super().__init__()
+ assert parameterization in ["eps", "x0"], 'currently only supporting "eps" and "x0"'
+ self.parameterization = parameterization
+ print(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode")
+ self.cond_stage_model = None
+ self.clip_denoised = clip_denoised
+ self.log_every_t = log_every_t
+ self.first_stage_key = first_stage_key
+ self.image_size = image_size # try conv?
+ self.channels = channels
+ self.use_positional_encodings = use_positional_encodings
+ self.model = DiffusionWrapper(unet_config, conditioning_key)
+ count_params(self.model, verbose=True)
+ self.use_ema = use_ema
+
+ self.use_scheduler = scheduler_config is not None
+ if self.use_scheduler:
+ self.scheduler_config = scheduler_config
+
+ self.v_posterior = v_posterior
+ self.original_elbo_weight = original_elbo_weight
+ self.l_simple_weight = l_simple_weight
+
+ if monitor is not None:
+ self.monitor = monitor
+
+ if self.use_ema and load_ema:
+ self.model_ema = LitEma(self.model)
+ print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.")
+
+ if ckpt_path is not None:
+ self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys or [], only_model=load_only_unet)
+
+ # If initialing from EMA-only checkpoint, create EMA model after loading.
+ if self.use_ema and not load_ema:
+ self.model_ema = LitEma(self.model)
+ print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.")
+
+ self.register_schedule(given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps,
+ linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s)
+
+ self.loss_type = loss_type
+
+ self.learn_logvar = learn_logvar
+ self.logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,))
+ if self.learn_logvar:
+ self.logvar = nn.Parameter(self.logvar, requires_grad=True)
+
+
+ def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000,
+ linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):
+ if exists(given_betas):
+ betas = given_betas
+ else:
+ betas = make_beta_schedule(beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end,
+ cosine_s=cosine_s)
+ alphas = 1. - betas
+ alphas_cumprod = np.cumprod(alphas, axis=0)
+ alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1])
+
+ timesteps, = betas.shape
+ self.num_timesteps = int(timesteps)
+ self.linear_start = linear_start
+ self.linear_end = linear_end
+ assert alphas_cumprod.shape[0] == self.num_timesteps, 'alphas have to be defined for each timestep'
+
+ to_torch = partial(torch.tensor, dtype=torch.float32)
+
+ self.register_buffer('betas', to_torch(betas))
+ self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))
+ self.register_buffer('alphas_cumprod_prev', to_torch(alphas_cumprod_prev))
+
+ # calculations for diffusion q(x_t | x_{t-1}) and others
+ self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod)))
+ self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod)))
+ self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod)))
+ self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod)))
+ self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod - 1)))
+
+ # calculations for posterior q(x_{t-1} | x_t, x_0)
+ posterior_variance = (1 - self.v_posterior) * betas * (1. - alphas_cumprod_prev) / (
+ 1. - alphas_cumprod) + self.v_posterior * betas
+ # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t)
+ self.register_buffer('posterior_variance', to_torch(posterior_variance))
+ # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain
+ self.register_buffer('posterior_log_variance_clipped', to_torch(np.log(np.maximum(posterior_variance, 1e-20))))
+ self.register_buffer('posterior_mean_coef1', to_torch(
+ betas * np.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod)))
+ self.register_buffer('posterior_mean_coef2', to_torch(
+ (1. - alphas_cumprod_prev) * np.sqrt(alphas) / (1. - alphas_cumprod)))
+
+ if self.parameterization == "eps":
+ lvlb_weights = self.betas ** 2 / (
+ 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod))
+ elif self.parameterization == "x0":
+ lvlb_weights = 0.5 * np.sqrt(torch.Tensor(alphas_cumprod)) / (2. * 1 - torch.Tensor(alphas_cumprod))
+ else:
+ raise NotImplementedError("mu not supported")
+ # TODO how to choose this term
+ lvlb_weights[0] = lvlb_weights[1]
+ self.register_buffer('lvlb_weights', lvlb_weights, persistent=False)
+ assert not torch.isnan(self.lvlb_weights).all()
+
+ @contextmanager
+ def ema_scope(self, context=None):
+ if self.use_ema:
+ self.model_ema.store(self.model.parameters())
+ self.model_ema.copy_to(self.model)
+ if context is not None:
+ print(f"{context}: Switched to EMA weights")
+ try:
+ yield None
+ finally:
+ if self.use_ema:
+ self.model_ema.restore(self.model.parameters())
+ if context is not None:
+ print(f"{context}: Restored training weights")
+
+ def init_from_ckpt(self, path, ignore_keys=None, only_model=False):
+ ignore_keys = ignore_keys or []
+
+ sd = torch.load(path, map_location="cpu")
+ if "state_dict" in list(sd.keys()):
+ sd = sd["state_dict"]
+ keys = list(sd.keys())
+
+ # Our model adds additional channels to the first layer to condition on an input image.
+ # For the first layer, copy existing channel weights and initialize new channel weights to zero.
+ input_keys = [
+ "model.diffusion_model.input_blocks.0.0.weight",
+ "model_ema.diffusion_modelinput_blocks00weight",
+ ]
+
+ self_sd = self.state_dict()
+ for input_key in input_keys:
+ if input_key not in sd or input_key not in self_sd:
+ continue
+
+ input_weight = self_sd[input_key]
+
+ if input_weight.size() != sd[input_key].size():
+ print(f"Manual init: {input_key}")
+ input_weight.zero_()
+ input_weight[:, :4, :, :].copy_(sd[input_key])
+ ignore_keys.append(input_key)
+
+ for k in keys:
+ for ik in ignore_keys:
+ if k.startswith(ik):
+ print(f"Deleting key {k} from state_dict.")
+ del sd[k]
+ missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict(
+ sd, strict=False)
+ print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys")
+ if missing:
+ print(f"Missing Keys: {missing}")
+ if unexpected:
+ print(f"Unexpected Keys: {unexpected}")
+
+ def q_mean_variance(self, x_start, t):
+ """
+ Get the distribution q(x_t | x_0).
+ :param x_start: the [N x C x ...] tensor of noiseless inputs.
+ :param t: the number of diffusion steps (minus 1). Here, 0 means one step.
+ :return: A tuple (mean, variance, log_variance), all of x_start's shape.
+ """
+ mean = (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start)
+ variance = extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape)
+ log_variance = extract_into_tensor(self.log_one_minus_alphas_cumprod, t, x_start.shape)
+ return mean, variance, log_variance
+
+ def predict_start_from_noise(self, x_t, t, noise):
+ return (
+ extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t -
+ extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise
+ )
+
+ def q_posterior(self, x_start, x_t, t):
+ posterior_mean = (
+ extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start +
+ extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t
+ )
+ posterior_variance = extract_into_tensor(self.posterior_variance, t, x_t.shape)
+ posterior_log_variance_clipped = extract_into_tensor(self.posterior_log_variance_clipped, t, x_t.shape)
+ return posterior_mean, posterior_variance, posterior_log_variance_clipped
+
+ def p_mean_variance(self, x, t, clip_denoised: bool):
+ model_out = self.model(x, t)
+ if self.parameterization == "eps":
+ x_recon = self.predict_start_from_noise(x, t=t, noise=model_out)
+ elif self.parameterization == "x0":
+ x_recon = model_out
+ if clip_denoised:
+ x_recon.clamp_(-1., 1.)
+
+ model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t)
+ return model_mean, posterior_variance, posterior_log_variance
+
+ @torch.no_grad()
+ def p_sample(self, x, t, clip_denoised=True, repeat_noise=False):
+ b, *_, device = *x.shape, x.device
+ model_mean, _, model_log_variance = self.p_mean_variance(x=x, t=t, clip_denoised=clip_denoised)
+ noise = noise_like(x.shape, device, repeat_noise)
+ # no noise when t == 0
+ nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1)))
+ return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise
+
+ @torch.no_grad()
+ def p_sample_loop(self, shape, return_intermediates=False):
+ device = self.betas.device
+ b = shape[0]
+ img = torch.randn(shape, device=device)
+ intermediates = [img]
+ for i in tqdm(reversed(range(0, self.num_timesteps)), desc='Sampling t', total=self.num_timesteps):
+ img = self.p_sample(img, torch.full((b,), i, device=device, dtype=torch.long),
+ clip_denoised=self.clip_denoised)
+ if i % self.log_every_t == 0 or i == self.num_timesteps - 1:
+ intermediates.append(img)
+ if return_intermediates:
+ return img, intermediates
+ return img
+
+ @torch.no_grad()
+ def sample(self, batch_size=16, return_intermediates=False):
+ image_size = self.image_size
+ channels = self.channels
+ return self.p_sample_loop((batch_size, channels, image_size, image_size),
+ return_intermediates=return_intermediates)
+
+ def q_sample(self, x_start, t, noise=None):
+ noise = default(noise, lambda: torch.randn_like(x_start))
+ return (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start +
+ extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise)
+
+ def get_loss(self, pred, target, mean=True):
+ if self.loss_type == 'l1':
+ loss = (target - pred).abs()
+ if mean:
+ loss = loss.mean()
+ elif self.loss_type == 'l2':
+ if mean:
+ loss = torch.nn.functional.mse_loss(target, pred)
+ else:
+ loss = torch.nn.functional.mse_loss(target, pred, reduction='none')
+ else:
+ raise NotImplementedError("unknown loss type '{loss_type}'")
+
+ return loss
+
+ def p_losses(self, x_start, t, noise=None):
+ noise = default(noise, lambda: torch.randn_like(x_start))
+ x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise)
+ model_out = self.model(x_noisy, t)
+
+ loss_dict = {}
+ if self.parameterization == "eps":
+ target = noise
+ elif self.parameterization == "x0":
+ target = x_start
+ else:
+ raise NotImplementedError(f"Paramterization {self.parameterization} not yet supported")
+
+ loss = self.get_loss(model_out, target, mean=False).mean(dim=[1, 2, 3])
+
+ log_prefix = 'train' if self.training else 'val'
+
+ loss_dict.update({f'{log_prefix}/loss_simple': loss.mean()})
+ loss_simple = loss.mean() * self.l_simple_weight
+
+ loss_vlb = (self.lvlb_weights[t] * loss).mean()
+ loss_dict.update({f'{log_prefix}/loss_vlb': loss_vlb})
+
+ loss = loss_simple + self.original_elbo_weight * loss_vlb
+
+ loss_dict.update({f'{log_prefix}/loss': loss})
+
+ return loss, loss_dict
+
+ def forward(self, x, *args, **kwargs):
+ # b, c, h, w, device, img_size, = *x.shape, x.device, self.image_size
+ # assert h == img_size and w == img_size, f'height and width of image must be {img_size}'
+ t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long()
+ return self.p_losses(x, t, *args, **kwargs)
+
+ def get_input(self, batch, k):
+ return batch[k]
+
+ def shared_step(self, batch):
+ x = self.get_input(batch, self.first_stage_key)
+ loss, loss_dict = self(x)
+ return loss, loss_dict
+
+ def training_step(self, batch, batch_idx):
+ loss, loss_dict = self.shared_step(batch)
+
+ self.log_dict(loss_dict, prog_bar=True,
+ logger=True, on_step=True, on_epoch=True)
+
+ self.log("global_step", self.global_step,
+ prog_bar=True, logger=True, on_step=True, on_epoch=False)
+
+ if self.use_scheduler:
+ lr = self.optimizers().param_groups[0]['lr']
+ self.log('lr_abs', lr, prog_bar=True, logger=True, on_step=True, on_epoch=False)
+
+ return loss
+
+ @torch.no_grad()
+ def validation_step(self, batch, batch_idx):
+ _, loss_dict_no_ema = self.shared_step(batch)
+ with self.ema_scope():
+ _, loss_dict_ema = self.shared_step(batch)
+ loss_dict_ema = {f"{key}_ema": loss_dict_ema[key] for key in loss_dict_ema}
+ self.log_dict(loss_dict_no_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True)
+ self.log_dict(loss_dict_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True)
+
+ def on_train_batch_end(self, *args, **kwargs):
+ if self.use_ema:
+ self.model_ema(self.model)
+
+ def _get_rows_from_list(self, samples):
+ n_imgs_per_row = len(samples)
+ denoise_grid = rearrange(samples, 'n b c h w -> b n c h w')
+ denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w')
+ denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row)
+ return denoise_grid
+
+ @torch.no_grad()
+ def log_images(self, batch, N=8, n_row=2, sample=True, return_keys=None, **kwargs):
+ log = {}
+ x = self.get_input(batch, self.first_stage_key)
+ N = min(x.shape[0], N)
+ n_row = min(x.shape[0], n_row)
+ x = x.to(self.device)[:N]
+ log["inputs"] = x
+
+ # get diffusion row
+ diffusion_row = []
+ x_start = x[:n_row]
+
+ for t in range(self.num_timesteps):
+ if t % self.log_every_t == 0 or t == self.num_timesteps - 1:
+ t = repeat(torch.tensor([t]), '1 -> b', b=n_row)
+ t = t.to(self.device).long()
+ noise = torch.randn_like(x_start)
+ x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise)
+ diffusion_row.append(x_noisy)
+
+ log["diffusion_row"] = self._get_rows_from_list(diffusion_row)
+
+ if sample:
+ # get denoise row
+ with self.ema_scope("Plotting"):
+ samples, denoise_row = self.sample(batch_size=N, return_intermediates=True)
+
+ log["samples"] = samples
+ log["denoise_row"] = self._get_rows_from_list(denoise_row)
+
+ if return_keys:
+ if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0:
+ return log
+ else:
+ return {key: log[key] for key in return_keys}
+ return log
+
+ def configure_optimizers(self):
+ lr = self.learning_rate
+ params = list(self.model.parameters())
+ if self.learn_logvar:
+ params = params + [self.logvar]
+ opt = torch.optim.AdamW(params, lr=lr)
+ return opt
+
+
+class LatentDiffusion(DDPM):
+ """main class"""
+ def __init__(self,
+ first_stage_config,
+ cond_stage_config,
+ num_timesteps_cond=None,
+ cond_stage_key="image",
+ cond_stage_trainable=False,
+ concat_mode=True,
+ cond_stage_forward=None,
+ conditioning_key=None,
+ scale_factor=1.0,
+ scale_by_std=False,
+ load_ema=True,
+ *args, **kwargs):
+ self.num_timesteps_cond = default(num_timesteps_cond, 1)
+ self.scale_by_std = scale_by_std
+ assert self.num_timesteps_cond <= kwargs['timesteps']
+ # for backwards compatibility after implementation of DiffusionWrapper
+ if conditioning_key is None:
+ conditioning_key = 'concat' if concat_mode else 'crossattn'
+ if cond_stage_config == '__is_unconditional__':
+ conditioning_key = None
+ ckpt_path = kwargs.pop("ckpt_path", None)
+ ignore_keys = kwargs.pop("ignore_keys", [])
+ super().__init__(*args, conditioning_key=conditioning_key, load_ema=load_ema, **kwargs)
+ self.concat_mode = concat_mode
+ self.cond_stage_trainable = cond_stage_trainable
+ self.cond_stage_key = cond_stage_key
+ try:
+ self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1
+ except Exception:
+ self.num_downs = 0
+ if not scale_by_std:
+ self.scale_factor = scale_factor
+ else:
+ self.register_buffer('scale_factor', torch.tensor(scale_factor))
+ self.instantiate_first_stage(first_stage_config)
+ self.instantiate_cond_stage(cond_stage_config)
+ self.cond_stage_forward = cond_stage_forward
+ self.clip_denoised = False
+ self.bbox_tokenizer = None
+
+ self.restarted_from_ckpt = False
+ if ckpt_path is not None:
+ self.init_from_ckpt(ckpt_path, ignore_keys)
+ self.restarted_from_ckpt = True
+
+ if self.use_ema and not load_ema:
+ self.model_ema = LitEma(self.model)
+ print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.")
+
+ def make_cond_schedule(self, ):
+ self.cond_ids = torch.full(size=(self.num_timesteps,), fill_value=self.num_timesteps - 1, dtype=torch.long)
+ ids = torch.round(torch.linspace(0, self.num_timesteps - 1, self.num_timesteps_cond)).long()
+ self.cond_ids[:self.num_timesteps_cond] = ids
+
+ @rank_zero_only
+ @torch.no_grad()
+ def on_train_batch_start(self, batch, batch_idx, dataloader_idx):
+ # only for very first batch
+ if self.scale_by_std and self.current_epoch == 0 and self.global_step == 0 and batch_idx == 0 and not self.restarted_from_ckpt:
+ assert self.scale_factor == 1., 'rather not use custom rescaling and std-rescaling simultaneously'
+ # set rescale weight to 1./std of encodings
+ print("### USING STD-RESCALING ###")
+ x = super().get_input(batch, self.first_stage_key)
+ x = x.to(self.device)
+ encoder_posterior = self.encode_first_stage(x)
+ z = self.get_first_stage_encoding(encoder_posterior).detach()
+ del self.scale_factor
+ self.register_buffer('scale_factor', 1. / z.flatten().std())
+ print(f"setting self.scale_factor to {self.scale_factor}")
+ print("### USING STD-RESCALING ###")
+
+ def register_schedule(self,
+ given_betas=None, beta_schedule="linear", timesteps=1000,
+ linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):
+ super().register_schedule(given_betas, beta_schedule, timesteps, linear_start, linear_end, cosine_s)
+
+ self.shorten_cond_schedule = self.num_timesteps_cond > 1
+ if self.shorten_cond_schedule:
+ self.make_cond_schedule()
+
+ def instantiate_first_stage(self, config):
+ model = instantiate_from_config(config)
+ self.first_stage_model = model.eval()
+ self.first_stage_model.train = disabled_train
+ for param in self.first_stage_model.parameters():
+ param.requires_grad = False
+
+ def instantiate_cond_stage(self, config):
+ if not self.cond_stage_trainable:
+ if config == "__is_first_stage__":
+ print("Using first stage also as cond stage.")
+ self.cond_stage_model = self.first_stage_model
+ elif config == "__is_unconditional__":
+ print(f"Training {self.__class__.__name__} as an unconditional model.")
+ self.cond_stage_model = None
+ # self.be_unconditional = True
+ else:
+ model = instantiate_from_config(config)
+ self.cond_stage_model = model.eval()
+ self.cond_stage_model.train = disabled_train
+ for param in self.cond_stage_model.parameters():
+ param.requires_grad = False
+ else:
+ assert config != '__is_first_stage__'
+ assert config != '__is_unconditional__'
+ model = instantiate_from_config(config)
+ self.cond_stage_model = model
+
+ def _get_denoise_row_from_list(self, samples, desc='', force_no_decoder_quantization=False):
+ denoise_row = []
+ for zd in tqdm(samples, desc=desc):
+ denoise_row.append(self.decode_first_stage(zd.to(self.device),
+ force_not_quantize=force_no_decoder_quantization))
+ n_imgs_per_row = len(denoise_row)
+ denoise_row = torch.stack(denoise_row) # n_log_step, n_row, C, H, W
+ denoise_grid = rearrange(denoise_row, 'n b c h w -> b n c h w')
+ denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w')
+ denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row)
+ return denoise_grid
+
+ def get_first_stage_encoding(self, encoder_posterior):
+ if isinstance(encoder_posterior, DiagonalGaussianDistribution):
+ z = encoder_posterior.sample()
+ elif isinstance(encoder_posterior, torch.Tensor):
+ z = encoder_posterior
+ else:
+ raise NotImplementedError(f"encoder_posterior of type '{type(encoder_posterior)}' not yet implemented")
+ return self.scale_factor * z
+
+ def get_learned_conditioning(self, c):
+ if self.cond_stage_forward is None:
+ if hasattr(self.cond_stage_model, 'encode') and callable(self.cond_stage_model.encode):
+ c = self.cond_stage_model.encode(c)
+ if isinstance(c, DiagonalGaussianDistribution):
+ c = c.mode()
+ else:
+ c = self.cond_stage_model(c)
+ else:
+ assert hasattr(self.cond_stage_model, self.cond_stage_forward)
+ c = getattr(self.cond_stage_model, self.cond_stage_forward)(c)
+ return c
+
+ def meshgrid(self, h, w):
+ y = torch.arange(0, h).view(h, 1, 1).repeat(1, w, 1)
+ x = torch.arange(0, w).view(1, w, 1).repeat(h, 1, 1)
+
+ arr = torch.cat([y, x], dim=-1)
+ return arr
+
+ def delta_border(self, h, w):
+ """
+ :param h: height
+ :param w: width
+ :return: normalized distance to image border,
+ wtith min distance = 0 at border and max dist = 0.5 at image center
+ """
+ lower_right_corner = torch.tensor([h - 1, w - 1]).view(1, 1, 2)
+ arr = self.meshgrid(h, w) / lower_right_corner
+ dist_left_up = torch.min(arr, dim=-1, keepdims=True)[0]
+ dist_right_down = torch.min(1 - arr, dim=-1, keepdims=True)[0]
+ edge_dist = torch.min(torch.cat([dist_left_up, dist_right_down], dim=-1), dim=-1)[0]
+ return edge_dist
+
+ def get_weighting(self, h, w, Ly, Lx, device):
+ weighting = self.delta_border(h, w)
+ weighting = torch.clip(weighting, self.split_input_params["clip_min_weight"],
+ self.split_input_params["clip_max_weight"], )
+ weighting = weighting.view(1, h * w, 1).repeat(1, 1, Ly * Lx).to(device)
+
+ if self.split_input_params["tie_braker"]:
+ L_weighting = self.delta_border(Ly, Lx)
+ L_weighting = torch.clip(L_weighting,
+ self.split_input_params["clip_min_tie_weight"],
+ self.split_input_params["clip_max_tie_weight"])
+
+ L_weighting = L_weighting.view(1, 1, Ly * Lx).to(device)
+ weighting = weighting * L_weighting
+ return weighting
+
+ def get_fold_unfold(self, x, kernel_size, stride, uf=1, df=1): # todo load once not every time, shorten code
+ """
+ :param x: img of size (bs, c, h, w)
+ :return: n img crops of size (n, bs, c, kernel_size[0], kernel_size[1])
+ """
+ bs, nc, h, w = x.shape
+
+ # number of crops in image
+ Ly = (h - kernel_size[0]) // stride[0] + 1
+ Lx = (w - kernel_size[1]) // stride[1] + 1
+
+ if uf == 1 and df == 1:
+ fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride)
+ unfold = torch.nn.Unfold(**fold_params)
+
+ fold = torch.nn.Fold(output_size=x.shape[2:], **fold_params)
+
+ weighting = self.get_weighting(kernel_size[0], kernel_size[1], Ly, Lx, x.device).to(x.dtype)
+ normalization = fold(weighting).view(1, 1, h, w) # normalizes the overlap
+ weighting = weighting.view((1, 1, kernel_size[0], kernel_size[1], Ly * Lx))
+
+ elif uf > 1 and df == 1:
+ fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride)
+ unfold = torch.nn.Unfold(**fold_params)
+
+ fold_params2 = dict(kernel_size=(kernel_size[0] * uf, kernel_size[0] * uf),
+ dilation=1, padding=0,
+ stride=(stride[0] * uf, stride[1] * uf))
+ fold = torch.nn.Fold(output_size=(x.shape[2] * uf, x.shape[3] * uf), **fold_params2)
+
+ weighting = self.get_weighting(kernel_size[0] * uf, kernel_size[1] * uf, Ly, Lx, x.device).to(x.dtype)
+ normalization = fold(weighting).view(1, 1, h * uf, w * uf) # normalizes the overlap
+ weighting = weighting.view((1, 1, kernel_size[0] * uf, kernel_size[1] * uf, Ly * Lx))
+
+ elif df > 1 and uf == 1:
+ fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride)
+ unfold = torch.nn.Unfold(**fold_params)
+
+ fold_params2 = dict(kernel_size=(kernel_size[0] // df, kernel_size[0] // df),
+ dilation=1, padding=0,
+ stride=(stride[0] // df, stride[1] // df))
+ fold = torch.nn.Fold(output_size=(x.shape[2] // df, x.shape[3] // df), **fold_params2)
+
+ weighting = self.get_weighting(kernel_size[0] // df, kernel_size[1] // df, Ly, Lx, x.device).to(x.dtype)
+ normalization = fold(weighting).view(1, 1, h // df, w // df) # normalizes the overlap
+ weighting = weighting.view((1, 1, kernel_size[0] // df, kernel_size[1] // df, Ly * Lx))
+
+ else:
+ raise NotImplementedError
+
+ return fold, unfold, normalization, weighting
+
+ @torch.no_grad()
+ def get_input(self, batch, k, return_first_stage_outputs=False, force_c_encode=False,
+ cond_key=None, return_original_cond=False, bs=None, uncond=0.05):
+ x = super().get_input(batch, k)
+ if bs is not None:
+ x = x[:bs]
+ x = x.to(self.device)
+ encoder_posterior = self.encode_first_stage(x)
+ z = self.get_first_stage_encoding(encoder_posterior).detach()
+ cond_key = cond_key or self.cond_stage_key
+ xc = super().get_input(batch, cond_key)
+ if bs is not None:
+ xc["c_crossattn"] = xc["c_crossattn"][:bs]
+ xc["c_concat"] = xc["c_concat"][:bs]
+ cond = {}
+
+ # To support classifier-free guidance, randomly drop out only text conditioning 5%, only image conditioning 5%, and both 5%.
+ random = torch.rand(x.size(0), device=x.device)
+ prompt_mask = rearrange(random < 2 * uncond, "n -> n 1 1")
+ input_mask = 1 - rearrange((random >= uncond).float() * (random < 3 * uncond).float(), "n -> n 1 1 1")
+
+ null_prompt = self.get_learned_conditioning([""])
+ cond["c_crossattn"] = [torch.where(prompt_mask, null_prompt, self.get_learned_conditioning(xc["c_crossattn"]).detach())]
+ cond["c_concat"] = [input_mask * self.encode_first_stage((xc["c_concat"].to(self.device))).mode().detach()]
+
+ out = [z, cond]
+ if return_first_stage_outputs:
+ xrec = self.decode_first_stage(z)
+ out.extend([x, xrec])
+ if return_original_cond:
+ out.append(xc)
+ return out
+
+ @torch.no_grad()
+ def decode_first_stage(self, z, predict_cids=False, force_not_quantize=False):
+ if predict_cids:
+ if z.dim() == 4:
+ z = torch.argmax(z.exp(), dim=1).long()
+ z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None)
+ z = rearrange(z, 'b h w c -> b c h w').contiguous()
+
+ z = 1. / self.scale_factor * z
+
+ if hasattr(self, "split_input_params"):
+ if self.split_input_params["patch_distributed_vq"]:
+ ks = self.split_input_params["ks"] # eg. (128, 128)
+ stride = self.split_input_params["stride"] # eg. (64, 64)
+ uf = self.split_input_params["vqf"]
+ bs, nc, h, w = z.shape
+ if ks[0] > h or ks[1] > w:
+ ks = (min(ks[0], h), min(ks[1], w))
+ print("reducing Kernel")
+
+ if stride[0] > h or stride[1] > w:
+ stride = (min(stride[0], h), min(stride[1], w))
+ print("reducing stride")
+
+ fold, unfold, normalization, weighting = self.get_fold_unfold(z, ks, stride, uf=uf)
+
+ z = unfold(z) # (bn, nc * prod(**ks), L)
+ # 1. Reshape to img shape
+ z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L )
+
+ # 2. apply model loop over last dim
+ if isinstance(self.first_stage_model, VQModelInterface):
+ output_list = [self.first_stage_model.decode(z[:, :, :, :, i],
+ force_not_quantize=predict_cids or force_not_quantize)
+ for i in range(z.shape[-1])]
+ else:
+
+ output_list = [self.first_stage_model.decode(z[:, :, :, :, i])
+ for i in range(z.shape[-1])]
+
+ o = torch.stack(output_list, axis=-1) # # (bn, nc, ks[0], ks[1], L)
+ o = o * weighting
+ # Reverse 1. reshape to img shape
+ o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L)
+ # stitch crops together
+ decoded = fold(o)
+ decoded = decoded / normalization # norm is shape (1, 1, h, w)
+ return decoded
+ else:
+ if isinstance(self.first_stage_model, VQModelInterface):
+ return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize)
+ else:
+ return self.first_stage_model.decode(z)
+
+ else:
+ if isinstance(self.first_stage_model, VQModelInterface):
+ return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize)
+ else:
+ return self.first_stage_model.decode(z)
+
+ # same as above but without decorator
+ def differentiable_decode_first_stage(self, z, predict_cids=False, force_not_quantize=False):
+ if predict_cids:
+ if z.dim() == 4:
+ z = torch.argmax(z.exp(), dim=1).long()
+ z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None)
+ z = rearrange(z, 'b h w c -> b c h w').contiguous()
+
+ z = 1. / self.scale_factor * z
+
+ if hasattr(self, "split_input_params"):
+ if self.split_input_params["patch_distributed_vq"]:
+ ks = self.split_input_params["ks"] # eg. (128, 128)
+ stride = self.split_input_params["stride"] # eg. (64, 64)
+ uf = self.split_input_params["vqf"]
+ bs, nc, h, w = z.shape
+ if ks[0] > h or ks[1] > w:
+ ks = (min(ks[0], h), min(ks[1], w))
+ print("reducing Kernel")
+
+ if stride[0] > h or stride[1] > w:
+ stride = (min(stride[0], h), min(stride[1], w))
+ print("reducing stride")
+
+ fold, unfold, normalization, weighting = self.get_fold_unfold(z, ks, stride, uf=uf)
+
+ z = unfold(z) # (bn, nc * prod(**ks), L)
+ # 1. Reshape to img shape
+ z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L )
+
+ # 2. apply model loop over last dim
+ if isinstance(self.first_stage_model, VQModelInterface):
+ output_list = [self.first_stage_model.decode(z[:, :, :, :, i],
+ force_not_quantize=predict_cids or force_not_quantize)
+ for i in range(z.shape[-1])]
+ else:
+
+ output_list = [self.first_stage_model.decode(z[:, :, :, :, i])
+ for i in range(z.shape[-1])]
+
+ o = torch.stack(output_list, axis=-1) # # (bn, nc, ks[0], ks[1], L)
+ o = o * weighting
+ # Reverse 1. reshape to img shape
+ o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L)
+ # stitch crops together
+ decoded = fold(o)
+ decoded = decoded / normalization # norm is shape (1, 1, h, w)
+ return decoded
+ else:
+ if isinstance(self.first_stage_model, VQModelInterface):
+ return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize)
+ else:
+ return self.first_stage_model.decode(z)
+
+ else:
+ if isinstance(self.first_stage_model, VQModelInterface):
+ return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize)
+ else:
+ return self.first_stage_model.decode(z)
+
+ @torch.no_grad()
+ def encode_first_stage(self, x):
+ if hasattr(self, "split_input_params"):
+ if self.split_input_params["patch_distributed_vq"]:
+ ks = self.split_input_params["ks"] # eg. (128, 128)
+ stride = self.split_input_params["stride"] # eg. (64, 64)
+ df = self.split_input_params["vqf"]
+ self.split_input_params['original_image_size'] = x.shape[-2:]
+ bs, nc, h, w = x.shape
+ if ks[0] > h or ks[1] > w:
+ ks = (min(ks[0], h), min(ks[1], w))
+ print("reducing Kernel")
+
+ if stride[0] > h or stride[1] > w:
+ stride = (min(stride[0], h), min(stride[1], w))
+ print("reducing stride")
+
+ fold, unfold, normalization, weighting = self.get_fold_unfold(x, ks, stride, df=df)
+ z = unfold(x) # (bn, nc * prod(**ks), L)
+ # Reshape to img shape
+ z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L )
+
+ output_list = [self.first_stage_model.encode(z[:, :, :, :, i])
+ for i in range(z.shape[-1])]
+
+ o = torch.stack(output_list, axis=-1)
+ o = o * weighting
+
+ # Reverse reshape to img shape
+ o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L)
+ # stitch crops together
+ decoded = fold(o)
+ decoded = decoded / normalization
+ return decoded
+
+ else:
+ return self.first_stage_model.encode(x)
+ else:
+ return self.first_stage_model.encode(x)
+
+ def shared_step(self, batch, **kwargs):
+ x, c = self.get_input(batch, self.first_stage_key)
+ loss = self(x, c)
+ return loss
+
+ def forward(self, x, c, *args, **kwargs):
+ t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long()
+ if self.model.conditioning_key is not None:
+ assert c is not None
+ if self.cond_stage_trainable:
+ c = self.get_learned_conditioning(c)
+ if self.shorten_cond_schedule: # TODO: drop this option
+ tc = self.cond_ids[t].to(self.device)
+ c = self.q_sample(x_start=c, t=tc, noise=torch.randn_like(c.float()))
+ return self.p_losses(x, c, t, *args, **kwargs)
+
+ def apply_model(self, x_noisy, t, cond, return_ids=False):
+
+ if isinstance(cond, dict):
+ # hybrid case, cond is exptected to be a dict
+ pass
+ else:
+ if not isinstance(cond, list):
+ cond = [cond]
+ key = 'c_concat' if self.model.conditioning_key == 'concat' else 'c_crossattn'
+ cond = {key: cond}
+
+ if hasattr(self, "split_input_params"):
+ assert len(cond) == 1 # todo can only deal with one conditioning atm
+ assert not return_ids
+ ks = self.split_input_params["ks"] # eg. (128, 128)
+ stride = self.split_input_params["stride"] # eg. (64, 64)
+
+ h, w = x_noisy.shape[-2:]
+
+ fold, unfold, normalization, weighting = self.get_fold_unfold(x_noisy, ks, stride)
+
+ z = unfold(x_noisy) # (bn, nc * prod(**ks), L)
+ # Reshape to img shape
+ z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L )
+ z_list = [z[:, :, :, :, i] for i in range(z.shape[-1])]
+
+ if self.cond_stage_key in ["image", "LR_image", "segmentation",
+ 'bbox_img'] and self.model.conditioning_key: # todo check for completeness
+ c_key = next(iter(cond.keys())) # get key
+ c = next(iter(cond.values())) # get value
+ assert (len(c) == 1) # todo extend to list with more than one elem
+ c = c[0] # get element
+
+ c = unfold(c)
+ c = c.view((c.shape[0], -1, ks[0], ks[1], c.shape[-1])) # (bn, nc, ks[0], ks[1], L )
+
+ cond_list = [{c_key: [c[:, :, :, :, i]]} for i in range(c.shape[-1])]
+
+ elif self.cond_stage_key == 'coordinates_bbox':
+ assert 'original_image_size' in self.split_input_params, 'BoudingBoxRescaling is missing original_image_size'
+
+ # assuming padding of unfold is always 0 and its dilation is always 1
+ n_patches_per_row = int((w - ks[0]) / stride[0] + 1)
+ full_img_h, full_img_w = self.split_input_params['original_image_size']
+ # as we are operating on latents, we need the factor from the original image size to the
+ # spatial latent size to properly rescale the crops for regenerating the bbox annotations
+ num_downs = self.first_stage_model.encoder.num_resolutions - 1
+ rescale_latent = 2 ** (num_downs)
+
+ # get top left postions of patches as conforming for the bbbox tokenizer, therefore we
+ # need to rescale the tl patch coordinates to be in between (0,1)
+ tl_patch_coordinates = [(rescale_latent * stride[0] * (patch_nr % n_patches_per_row) / full_img_w,
+ rescale_latent * stride[1] * (patch_nr // n_patches_per_row) / full_img_h)
+ for patch_nr in range(z.shape[-1])]
+
+ # patch_limits are tl_coord, width and height coordinates as (x_tl, y_tl, h, w)
+ patch_limits = [(x_tl, y_tl,
+ rescale_latent * ks[0] / full_img_w,
+ rescale_latent * ks[1] / full_img_h) for x_tl, y_tl in tl_patch_coordinates]
+ # patch_values = [(np.arange(x_tl,min(x_tl+ks, 1.)),np.arange(y_tl,min(y_tl+ks, 1.))) for x_tl, y_tl in tl_patch_coordinates]
+
+ # tokenize crop coordinates for the bounding boxes of the respective patches
+ patch_limits_tknzd = [torch.LongTensor(self.bbox_tokenizer._crop_encoder(bbox))[None].to(self.device)
+ for bbox in patch_limits] # list of length l with tensors of shape (1, 2)
+ print(patch_limits_tknzd[0].shape)
+ # cut tknzd crop position from conditioning
+ assert isinstance(cond, dict), 'cond must be dict to be fed into model'
+ cut_cond = cond['c_crossattn'][0][..., :-2].to(self.device)
+ print(cut_cond.shape)
+
+ adapted_cond = torch.stack([torch.cat([cut_cond, p], dim=1) for p in patch_limits_tknzd])
+ adapted_cond = rearrange(adapted_cond, 'l b n -> (l b) n')
+ print(adapted_cond.shape)
+ adapted_cond = self.get_learned_conditioning(adapted_cond)
+ print(adapted_cond.shape)
+ adapted_cond = rearrange(adapted_cond, '(l b) n d -> l b n d', l=z.shape[-1])
+ print(adapted_cond.shape)
+
+ cond_list = [{'c_crossattn': [e]} for e in adapted_cond]
+
+ else:
+ cond_list = [cond for i in range(z.shape[-1])] # Todo make this more efficient
+
+ # apply model by loop over crops
+ output_list = [self.model(z_list[i], t, **cond_list[i]) for i in range(z.shape[-1])]
+ assert not isinstance(output_list[0],
+ tuple) # todo cant deal with multiple model outputs check this never happens
+
+ o = torch.stack(output_list, axis=-1)
+ o = o * weighting
+ # Reverse reshape to img shape
+ o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L)
+ # stitch crops together
+ x_recon = fold(o) / normalization
+
+ else:
+ x_recon = self.model(x_noisy, t, **cond)
+
+ if isinstance(x_recon, tuple) and not return_ids:
+ return x_recon[0]
+ else:
+ return x_recon
+
+ def _predict_eps_from_xstart(self, x_t, t, pred_xstart):
+ return (extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - pred_xstart) / \
+ extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape)
+
+ def _prior_bpd(self, x_start):
+ """
+ Get the prior KL term for the variational lower-bound, measured in
+ bits-per-dim.
+ This term can't be optimized, as it only depends on the encoder.
+ :param x_start: the [N x C x ...] tensor of inputs.
+ :return: a batch of [N] KL values (in bits), one per batch element.
+ """
+ batch_size = x_start.shape[0]
+ t = torch.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device)
+ qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t)
+ kl_prior = normal_kl(mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0)
+ return mean_flat(kl_prior) / np.log(2.0)
+
+ def p_losses(self, x_start, cond, t, noise=None):
+ noise = default(noise, lambda: torch.randn_like(x_start))
+ x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise)
+ model_output = self.apply_model(x_noisy, t, cond)
+
+ loss_dict = {}
+ prefix = 'train' if self.training else 'val'
+
+ if self.parameterization == "x0":
+ target = x_start
+ elif self.parameterization == "eps":
+ target = noise
+ else:
+ raise NotImplementedError()
+
+ loss_simple = self.get_loss(model_output, target, mean=False).mean([1, 2, 3])
+ loss_dict.update({f'{prefix}/loss_simple': loss_simple.mean()})
+
+ logvar_t = self.logvar[t].to(self.device)
+ loss = loss_simple / torch.exp(logvar_t) + logvar_t
+ # loss = loss_simple / torch.exp(self.logvar) + self.logvar
+ if self.learn_logvar:
+ loss_dict.update({f'{prefix}/loss_gamma': loss.mean()})
+ loss_dict.update({'logvar': self.logvar.data.mean()})
+
+ loss = self.l_simple_weight * loss.mean()
+
+ loss_vlb = self.get_loss(model_output, target, mean=False).mean(dim=(1, 2, 3))
+ loss_vlb = (self.lvlb_weights[t] * loss_vlb).mean()
+ loss_dict.update({f'{prefix}/loss_vlb': loss_vlb})
+ loss += (self.original_elbo_weight * loss_vlb)
+ loss_dict.update({f'{prefix}/loss': loss})
+
+ return loss, loss_dict
+
+ def p_mean_variance(self, x, c, t, clip_denoised: bool, return_codebook_ids=False, quantize_denoised=False,
+ return_x0=False, score_corrector=None, corrector_kwargs=None):
+ t_in = t
+ model_out = self.apply_model(x, t_in, c, return_ids=return_codebook_ids)
+
+ if score_corrector is not None:
+ assert self.parameterization == "eps"
+ model_out = score_corrector.modify_score(self, model_out, x, t, c, **corrector_kwargs)
+
+ if return_codebook_ids:
+ model_out, logits = model_out
+
+ if self.parameterization == "eps":
+ x_recon = self.predict_start_from_noise(x, t=t, noise=model_out)
+ elif self.parameterization == "x0":
+ x_recon = model_out
+ else:
+ raise NotImplementedError()
+
+ if clip_denoised:
+ x_recon.clamp_(-1., 1.)
+ if quantize_denoised:
+ x_recon, _, [_, _, indices] = self.first_stage_model.quantize(x_recon)
+ model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t)
+ if return_codebook_ids:
+ return model_mean, posterior_variance, posterior_log_variance, logits
+ elif return_x0:
+ return model_mean, posterior_variance, posterior_log_variance, x_recon
+ else:
+ return model_mean, posterior_variance, posterior_log_variance
+
+ @torch.no_grad()
+ def p_sample(self, x, c, t, clip_denoised=False, repeat_noise=False,
+ return_codebook_ids=False, quantize_denoised=False, return_x0=False,
+ temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None):
+ b, *_, device = *x.shape, x.device
+ outputs = self.p_mean_variance(x=x, c=c, t=t, clip_denoised=clip_denoised,
+ return_codebook_ids=return_codebook_ids,
+ quantize_denoised=quantize_denoised,
+ return_x0=return_x0,
+ score_corrector=score_corrector, corrector_kwargs=corrector_kwargs)
+ if return_codebook_ids:
+ raise DeprecationWarning("Support dropped.")
+ model_mean, _, model_log_variance, logits = outputs
+ elif return_x0:
+ model_mean, _, model_log_variance, x0 = outputs
+ else:
+ model_mean, _, model_log_variance = outputs
+
+ noise = noise_like(x.shape, device, repeat_noise) * temperature
+ if noise_dropout > 0.:
+ noise = torch.nn.functional.dropout(noise, p=noise_dropout)
+ # no noise when t == 0
+ nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1)))
+
+ if return_codebook_ids:
+ return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, logits.argmax(dim=1)
+ if return_x0:
+ return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, x0
+ else:
+ return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise
+
+ @torch.no_grad()
+ def progressive_denoising(self, cond, shape, verbose=True, callback=None, quantize_denoised=False,
+ img_callback=None, mask=None, x0=None, temperature=1., noise_dropout=0.,
+ score_corrector=None, corrector_kwargs=None, batch_size=None, x_T=None, start_T=None,
+ log_every_t=None):
+ if not log_every_t:
+ log_every_t = self.log_every_t
+ timesteps = self.num_timesteps
+ if batch_size is not None:
+ b = batch_size if batch_size is not None else shape[0]
+ shape = [batch_size] + list(shape)
+ else:
+ b = batch_size = shape[0]
+ if x_T is None:
+ img = torch.randn(shape, device=self.device)
+ else:
+ img = x_T
+ intermediates = []
+ if cond is not None:
+ if isinstance(cond, dict):
+ cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else
+ [x[:batch_size] for x in cond[key]] for key in cond}
+ else:
+ cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size]
+
+ if start_T is not None:
+ timesteps = min(timesteps, start_T)
+ iterator = tqdm(reversed(range(0, timesteps)), desc='Progressive Generation',
+ total=timesteps) if verbose else reversed(
+ range(0, timesteps))
+ if type(temperature) == float:
+ temperature = [temperature] * timesteps
+
+ for i in iterator:
+ ts = torch.full((b,), i, device=self.device, dtype=torch.long)
+ if self.shorten_cond_schedule:
+ assert self.model.conditioning_key != 'hybrid'
+ tc = self.cond_ids[ts].to(cond.device)
+ cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond))
+
+ img, x0_partial = self.p_sample(img, cond, ts,
+ clip_denoised=self.clip_denoised,
+ quantize_denoised=quantize_denoised, return_x0=True,
+ temperature=temperature[i], noise_dropout=noise_dropout,
+ score_corrector=score_corrector, corrector_kwargs=corrector_kwargs)
+ if mask is not None:
+ assert x0 is not None
+ img_orig = self.q_sample(x0, ts)
+ img = img_orig * mask + (1. - mask) * img
+
+ if i % log_every_t == 0 or i == timesteps - 1:
+ intermediates.append(x0_partial)
+ if callback:
+ callback(i)
+ if img_callback:
+ img_callback(img, i)
+ return img, intermediates
+
+ @torch.no_grad()
+ def p_sample_loop(self, cond, shape, return_intermediates=False,
+ x_T=None, verbose=True, callback=None, timesteps=None, quantize_denoised=False,
+ mask=None, x0=None, img_callback=None, start_T=None,
+ log_every_t=None):
+
+ if not log_every_t:
+ log_every_t = self.log_every_t
+ device = self.betas.device
+ b = shape[0]
+ if x_T is None:
+ img = torch.randn(shape, device=device)
+ else:
+ img = x_T
+
+ intermediates = [img]
+ if timesteps is None:
+ timesteps = self.num_timesteps
+
+ if start_T is not None:
+ timesteps = min(timesteps, start_T)
+ iterator = tqdm(reversed(range(0, timesteps)), desc='Sampling t', total=timesteps) if verbose else reversed(
+ range(0, timesteps))
+
+ if mask is not None:
+ assert x0 is not None
+ assert x0.shape[2:3] == mask.shape[2:3] # spatial size has to match
+
+ for i in iterator:
+ ts = torch.full((b,), i, device=device, dtype=torch.long)
+ if self.shorten_cond_schedule:
+ assert self.model.conditioning_key != 'hybrid'
+ tc = self.cond_ids[ts].to(cond.device)
+ cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond))
+
+ img = self.p_sample(img, cond, ts,
+ clip_denoised=self.clip_denoised,
+ quantize_denoised=quantize_denoised)
+ if mask is not None:
+ img_orig = self.q_sample(x0, ts)
+ img = img_orig * mask + (1. - mask) * img
+
+ if i % log_every_t == 0 or i == timesteps - 1:
+ intermediates.append(img)
+ if callback:
+ callback(i)
+ if img_callback:
+ img_callback(img, i)
+
+ if return_intermediates:
+ return img, intermediates
+ return img
+
+ @torch.no_grad()
+ def sample(self, cond, batch_size=16, return_intermediates=False, x_T=None,
+ verbose=True, timesteps=None, quantize_denoised=False,
+ mask=None, x0=None, shape=None,**kwargs):
+ if shape is None:
+ shape = (batch_size, self.channels, self.image_size, self.image_size)
+ if cond is not None:
+ if isinstance(cond, dict):
+ cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else
+ [x[:batch_size] for x in cond[key]] for key in cond}
+ else:
+ cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size]
+ return self.p_sample_loop(cond,
+ shape,
+ return_intermediates=return_intermediates, x_T=x_T,
+ verbose=verbose, timesteps=timesteps, quantize_denoised=quantize_denoised,
+ mask=mask, x0=x0)
+
+ @torch.no_grad()
+ def sample_log(self,cond,batch_size,ddim, ddim_steps,**kwargs):
+
+ if ddim:
+ ddim_sampler = DDIMSampler(self)
+ shape = (self.channels, self.image_size, self.image_size)
+ samples, intermediates =ddim_sampler.sample(ddim_steps,batch_size,
+ shape,cond,verbose=False,**kwargs)
+
+ else:
+ samples, intermediates = self.sample(cond=cond, batch_size=batch_size,
+ return_intermediates=True,**kwargs)
+
+ return samples, intermediates
+
+
+ @torch.no_grad()
+ def log_images(self, batch, N=4, n_row=4, sample=True, ddim_steps=200, ddim_eta=1., return_keys=None,
+ quantize_denoised=True, inpaint=False, plot_denoise_rows=False, plot_progressive_rows=False,
+ plot_diffusion_rows=False, **kwargs):
+
+ use_ddim = False
+
+ log = {}
+ z, c, x, xrec, xc = self.get_input(batch, self.first_stage_key,
+ return_first_stage_outputs=True,
+ force_c_encode=True,
+ return_original_cond=True,
+ bs=N, uncond=0)
+ N = min(x.shape[0], N)
+ n_row = min(x.shape[0], n_row)
+ log["inputs"] = x
+ log["reals"] = xc["c_concat"]
+ log["reconstruction"] = xrec
+ if self.model.conditioning_key is not None:
+ if hasattr(self.cond_stage_model, "decode"):
+ xc = self.cond_stage_model.decode(c)
+ log["conditioning"] = xc
+ elif self.cond_stage_key in ["caption"]:
+ xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["caption"])
+ log["conditioning"] = xc
+ elif self.cond_stage_key == 'class_label':
+ xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["human_label"])
+ log['conditioning'] = xc
+ elif isimage(xc):
+ log["conditioning"] = xc
+ if ismap(xc):
+ log["original_conditioning"] = self.to_rgb(xc)
+
+ if plot_diffusion_rows:
+ # get diffusion row
+ diffusion_row = []
+ z_start = z[:n_row]
+ for t in range(self.num_timesteps):
+ if t % self.log_every_t == 0 or t == self.num_timesteps - 1:
+ t = repeat(torch.tensor([t]), '1 -> b', b=n_row)
+ t = t.to(self.device).long()
+ noise = torch.randn_like(z_start)
+ z_noisy = self.q_sample(x_start=z_start, t=t, noise=noise)
+ diffusion_row.append(self.decode_first_stage(z_noisy))
+
+ diffusion_row = torch.stack(diffusion_row) # n_log_step, n_row, C, H, W
+ diffusion_grid = rearrange(diffusion_row, 'n b c h w -> b n c h w')
+ diffusion_grid = rearrange(diffusion_grid, 'b n c h w -> (b n) c h w')
+ diffusion_grid = make_grid(diffusion_grid, nrow=diffusion_row.shape[0])
+ log["diffusion_row"] = diffusion_grid
+
+ if sample:
+ # get denoise row
+ with self.ema_scope("Plotting"):
+ samples, z_denoise_row = self.sample_log(cond=c,batch_size=N,ddim=use_ddim,
+ ddim_steps=ddim_steps,eta=ddim_eta)
+ # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True)
+ x_samples = self.decode_first_stage(samples)
+ log["samples"] = x_samples
+ if plot_denoise_rows:
+ denoise_grid = self._get_denoise_row_from_list(z_denoise_row)
+ log["denoise_row"] = denoise_grid
+
+ if quantize_denoised and not isinstance(self.first_stage_model, AutoencoderKL) and not isinstance(
+ self.first_stage_model, IdentityFirstStage):
+ # also display when quantizing x0 while sampling
+ with self.ema_scope("Plotting Quantized Denoised"):
+ samples, z_denoise_row = self.sample_log(cond=c,batch_size=N,ddim=use_ddim,
+ ddim_steps=ddim_steps,eta=ddim_eta,
+ quantize_denoised=True)
+ # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True,
+ # quantize_denoised=True)
+ x_samples = self.decode_first_stage(samples.to(self.device))
+ log["samples_x0_quantized"] = x_samples
+
+ if inpaint:
+ # make a simple center square
+ h, w = z.shape[2], z.shape[3]
+ mask = torch.ones(N, h, w).to(self.device)
+ # zeros will be filled in
+ mask[:, h // 4:3 * h // 4, w // 4:3 * w // 4] = 0.
+ mask = mask[:, None, ...]
+ with self.ema_scope("Plotting Inpaint"):
+
+ samples, _ = self.sample_log(cond=c,batch_size=N,ddim=use_ddim, eta=ddim_eta,
+ ddim_steps=ddim_steps, x0=z[:N], mask=mask)
+ x_samples = self.decode_first_stage(samples.to(self.device))
+ log["samples_inpainting"] = x_samples
+ log["mask"] = mask
+
+ # outpaint
+ with self.ema_scope("Plotting Outpaint"):
+ samples, _ = self.sample_log(cond=c, batch_size=N, ddim=use_ddim,eta=ddim_eta,
+ ddim_steps=ddim_steps, x0=z[:N], mask=mask)
+ x_samples = self.decode_first_stage(samples.to(self.device))
+ log["samples_outpainting"] = x_samples
+
+ if plot_progressive_rows:
+ with self.ema_scope("Plotting Progressives"):
+ img, progressives = self.progressive_denoising(c,
+ shape=(self.channels, self.image_size, self.image_size),
+ batch_size=N)
+ prog_row = self._get_denoise_row_from_list(progressives, desc="Progressive Generation")
+ log["progressive_row"] = prog_row
+
+ if return_keys:
+ if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0:
+ return log
+ else:
+ return {key: log[key] for key in return_keys}
+ return log
+
+ def configure_optimizers(self):
+ lr = self.learning_rate
+ params = list(self.model.parameters())
+ if self.cond_stage_trainable:
+ print(f"{self.__class__.__name__}: Also optimizing conditioner params!")
+ params = params + list(self.cond_stage_model.parameters())
+ if self.learn_logvar:
+ print('Diffusion model optimizing logvar')
+ params.append(self.logvar)
+ opt = torch.optim.AdamW(params, lr=lr)
+ if self.use_scheduler:
+ assert 'target' in self.scheduler_config
+ scheduler = instantiate_from_config(self.scheduler_config)
+
+ print("Setting up LambdaLR scheduler...")
+ scheduler = [
+ {
+ 'scheduler': LambdaLR(opt, lr_lambda=scheduler.schedule),
+ 'interval': 'step',
+ 'frequency': 1
+ }]
+ return [opt], scheduler
+ return opt
+
+ @torch.no_grad()
+ def to_rgb(self, x):
+ x = x.float()
+ if not hasattr(self, "colorize"):
+ self.colorize = torch.randn(3, x.shape[1], 1, 1).to(x)
+ x = nn.functional.conv2d(x, weight=self.colorize)
+ x = 2. * (x - x.min()) / (x.max() - x.min()) - 1.
+ return x
+
+
+class DiffusionWrapper(pl.LightningModule):
+ def __init__(self, diff_model_config, conditioning_key):
+ super().__init__()
+ self.diffusion_model = instantiate_from_config(diff_model_config)
+ self.conditioning_key = conditioning_key
+ assert self.conditioning_key in [None, 'concat', 'crossattn', 'hybrid', 'adm']
+
+ def forward(self, x, t, c_concat: list = None, c_crossattn: list = None):
+ if self.conditioning_key is None:
+ out = self.diffusion_model(x, t)
+ elif self.conditioning_key == 'concat':
+ xc = torch.cat([x] + c_concat, dim=1)
+ out = self.diffusion_model(xc, t)
+ elif self.conditioning_key == 'crossattn':
+ cc = torch.cat(c_crossattn, 1)
+ out = self.diffusion_model(x, t, context=cc)
+ elif self.conditioning_key == 'hybrid':
+ xc = torch.cat([x] + c_concat, dim=1)
+ cc = torch.cat(c_crossattn, 1)
+ out = self.diffusion_model(xc, t, context=cc)
+ elif self.conditioning_key == 'adm':
+ cc = c_crossattn[0]
+ out = self.diffusion_model(x, t, y=cc)
+ else:
+ raise NotImplementedError()
+
+ return out
+
+
+class Layout2ImgDiffusion(LatentDiffusion):
+ # TODO: move all layout-specific hacks to this class
+ def __init__(self, cond_stage_key, *args, **kwargs):
+ assert cond_stage_key == 'coordinates_bbox', 'Layout2ImgDiffusion only for cond_stage_key="coordinates_bbox"'
+ super().__init__(*args, cond_stage_key=cond_stage_key, **kwargs)
+
+ def log_images(self, batch, N=8, *args, **kwargs):
+ logs = super().log_images(*args, batch=batch, N=N, **kwargs)
+
+ key = 'train' if self.training else 'validation'
+ dset = self.trainer.datamodule.datasets[key]
+ mapper = dset.conditional_builders[self.cond_stage_key]
+
+ bbox_imgs = []
+ map_fn = lambda catno: dset.get_textual_label(dset.get_category_id(catno))
+ for tknzd_bbox in batch[self.cond_stage_key][:N]:
+ bboximg = mapper.plot(tknzd_bbox.detach().cpu(), map_fn, (256, 256))
+ bbox_imgs.append(bboximg)
+
+ cond_img = torch.stack(bbox_imgs, dim=0)
+ logs['bbox_image'] = cond_img
+ return logs
diff --git a/modules/models/diffusion/uni_pc/__init__.py b/modules/models/diffusion/uni_pc/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..dbb35964ce08f4d3e1b2f77d2a5359c87bf6f3fa
--- /dev/null
+++ b/modules/models/diffusion/uni_pc/__init__.py
@@ -0,0 +1 @@
+from .sampler import UniPCSampler # noqa: F401
diff --git a/modules/models/diffusion/uni_pc/__pycache__/__init__.cpython-39.pyc b/modules/models/diffusion/uni_pc/__pycache__/__init__.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..98d205551bed13915fcb3a991e210d1923ba5dc3
Binary files /dev/null and b/modules/models/diffusion/uni_pc/__pycache__/__init__.cpython-39.pyc differ
diff --git a/modules/models/diffusion/uni_pc/__pycache__/sampler.cpython-39.pyc b/modules/models/diffusion/uni_pc/__pycache__/sampler.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1d4188a837c4644daa2c8ee4af102261ada1fffc
Binary files /dev/null and b/modules/models/diffusion/uni_pc/__pycache__/sampler.cpython-39.pyc differ
diff --git a/modules/models/diffusion/uni_pc/__pycache__/uni_pc.cpython-39.pyc b/modules/models/diffusion/uni_pc/__pycache__/uni_pc.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..513d48b89f1a09c4329ff847a2fd6b30fec12ab8
Binary files /dev/null and b/modules/models/diffusion/uni_pc/__pycache__/uni_pc.cpython-39.pyc differ
diff --git a/modules/models/diffusion/uni_pc/sampler.py b/modules/models/diffusion/uni_pc/sampler.py
new file mode 100644
index 0000000000000000000000000000000000000000..0a9defa108b8e61adb8665b031c9770158654714
--- /dev/null
+++ b/modules/models/diffusion/uni_pc/sampler.py
@@ -0,0 +1,101 @@
+"""SAMPLING ONLY."""
+
+import torch
+
+from .uni_pc import NoiseScheduleVP, model_wrapper, UniPC
+from modules import shared, devices
+
+
+class UniPCSampler(object):
+ def __init__(self, model, **kwargs):
+ super().__init__()
+ self.model = model
+ to_torch = lambda x: x.clone().detach().to(torch.float32).to(model.device)
+ self.before_sample = None
+ self.after_sample = None
+ self.register_buffer('alphas_cumprod', to_torch(model.alphas_cumprod))
+
+ def register_buffer(self, name, attr):
+ if type(attr) == torch.Tensor:
+ if attr.device != devices.device:
+ attr = attr.to(devices.device)
+ setattr(self, name, attr)
+
+ def set_hooks(self, before_sample, after_sample, after_update):
+ self.before_sample = before_sample
+ self.after_sample = after_sample
+ self.after_update = after_update
+
+ @torch.no_grad()
+ def sample(self,
+ S,
+ batch_size,
+ shape,
+ conditioning=None,
+ callback=None,
+ normals_sequence=None,
+ img_callback=None,
+ quantize_x0=False,
+ eta=0.,
+ mask=None,
+ x0=None,
+ temperature=1.,
+ noise_dropout=0.,
+ score_corrector=None,
+ corrector_kwargs=None,
+ verbose=True,
+ x_T=None,
+ log_every_t=100,
+ unconditional_guidance_scale=1.,
+ unconditional_conditioning=None,
+ # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...
+ **kwargs
+ ):
+ if conditioning is not None:
+ if isinstance(conditioning, dict):
+ ctmp = conditioning[list(conditioning.keys())[0]]
+ while isinstance(ctmp, list):
+ ctmp = ctmp[0]
+ cbs = ctmp.shape[0]
+ if cbs != batch_size:
+ print(f"Warning: Got {cbs} conditionings but batch-size is {batch_size}")
+
+ elif isinstance(conditioning, list):
+ for ctmp in conditioning:
+ if ctmp.shape[0] != batch_size:
+ print(f"Warning: Got {cbs} conditionings but batch-size is {batch_size}")
+
+ else:
+ if conditioning.shape[0] != batch_size:
+ print(f"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}")
+
+ # sampling
+ C, H, W = shape
+ size = (batch_size, C, H, W)
+ # print(f'Data shape for UniPC sampling is {size}')
+
+ device = self.model.betas.device
+ if x_T is None:
+ img = torch.randn(size, device=device)
+ else:
+ img = x_T
+
+ ns = NoiseScheduleVP('discrete', alphas_cumprod=self.alphas_cumprod)
+
+ # SD 1.X is "noise", SD 2.X is "v"
+ model_type = "v" if self.model.parameterization == "v" else "noise"
+
+ model_fn = model_wrapper(
+ lambda x, t, c: self.model.apply_model(x, t, c),
+ ns,
+ model_type=model_type,
+ guidance_type="classifier-free",
+ #condition=conditioning,
+ #unconditional_condition=unconditional_conditioning,
+ guidance_scale=unconditional_guidance_scale,
+ )
+
+ uni_pc = UniPC(model_fn, ns, predict_x0=True, thresholding=False, variant=shared.opts.uni_pc_variant, condition=conditioning, unconditional_condition=unconditional_conditioning, before_sample=self.before_sample, after_sample=self.after_sample, after_update=self.after_update)
+ x = uni_pc.sample(img, steps=S, skip_type=shared.opts.uni_pc_skip_type, method="multistep", order=shared.opts.uni_pc_order, lower_order_final=shared.opts.uni_pc_lower_order_final)
+
+ return x.to(device), None
diff --git a/modules/models/diffusion/uni_pc/uni_pc.py b/modules/models/diffusion/uni_pc/uni_pc.py
new file mode 100644
index 0000000000000000000000000000000000000000..d257a7286fc6be7da4612041287313a3290cf45e
--- /dev/null
+++ b/modules/models/diffusion/uni_pc/uni_pc.py
@@ -0,0 +1,863 @@
+import torch
+import math
+import tqdm
+
+
+class NoiseScheduleVP:
+ def __init__(
+ self,
+ schedule='discrete',
+ betas=None,
+ alphas_cumprod=None,
+ continuous_beta_0=0.1,
+ continuous_beta_1=20.,
+ ):
+ """Create a wrapper class for the forward SDE (VP type).
+
+ ***
+ Update: We support discrete-time diffusion models by implementing a picewise linear interpolation for log_alpha_t.
+ We recommend to use schedule='discrete' for the discrete-time diffusion models, especially for high-resolution images.
+ ***
+
+ The forward SDE ensures that the condition distribution q_{t|0}(x_t | x_0) = N ( alpha_t * x_0, sigma_t^2 * I ).
+ We further define lambda_t = log(alpha_t) - log(sigma_t), which is the half-logSNR (described in the DPM-Solver paper).
+ Therefore, we implement the functions for computing alpha_t, sigma_t and lambda_t. For t in [0, T], we have:
+
+ log_alpha_t = self.marginal_log_mean_coeff(t)
+ sigma_t = self.marginal_std(t)
+ lambda_t = self.marginal_lambda(t)
+
+ Moreover, as lambda(t) is an invertible function, we also support its inverse function:
+
+ t = self.inverse_lambda(lambda_t)
+
+ ===============================================================
+
+ We support both discrete-time DPMs (trained on n = 0, 1, ..., N-1) and continuous-time DPMs (trained on t in [t_0, T]).
+
+ 1. For discrete-time DPMs:
+
+ For discrete-time DPMs trained on n = 0, 1, ..., N-1, we convert the discrete steps to continuous time steps by:
+ t_i = (i + 1) / N
+ e.g. for N = 1000, we have t_0 = 1e-3 and T = t_{N-1} = 1.
+ We solve the corresponding diffusion ODE from time T = 1 to time t_0 = 1e-3.
+
+ Args:
+ betas: A `torch.Tensor`. The beta array for the discrete-time DPM. (See the original DDPM paper for details)
+ alphas_cumprod: A `torch.Tensor`. The cumprod alphas for the discrete-time DPM. (See the original DDPM paper for details)
+
+ Note that we always have alphas_cumprod = cumprod(betas). Therefore, we only need to set one of `betas` and `alphas_cumprod`.
+
+ **Important**: Please pay special attention for the args for `alphas_cumprod`:
+ The `alphas_cumprod` is the \hat{alpha_n} arrays in the notations of DDPM. Specifically, DDPMs assume that
+ q_{t_n | 0}(x_{t_n} | x_0) = N ( \sqrt{\hat{alpha_n}} * x_0, (1 - \hat{alpha_n}) * I ).
+ Therefore, the notation \hat{alpha_n} is different from the notation alpha_t in DPM-Solver. In fact, we have
+ alpha_{t_n} = \sqrt{\hat{alpha_n}},
+ and
+ log(alpha_{t_n}) = 0.5 * log(\hat{alpha_n}).
+
+
+ 2. For continuous-time DPMs:
+
+ We support two types of VPSDEs: linear (DDPM) and cosine (improved-DDPM). The hyperparameters for the noise
+ schedule are the default settings in DDPM and improved-DDPM:
+
+ Args:
+ beta_min: A `float` number. The smallest beta for the linear schedule.
+ beta_max: A `float` number. The largest beta for the linear schedule.
+ cosine_s: A `float` number. The hyperparameter in the cosine schedule.
+ cosine_beta_max: A `float` number. The hyperparameter in the cosine schedule.
+ T: A `float` number. The ending time of the forward process.
+
+ ===============================================================
+
+ Args:
+ schedule: A `str`. The noise schedule of the forward SDE. 'discrete' for discrete-time DPMs,
+ 'linear' or 'cosine' for continuous-time DPMs.
+ Returns:
+ A wrapper object of the forward SDE (VP type).
+
+ ===============================================================
+
+ Example:
+
+ # For discrete-time DPMs, given betas (the beta array for n = 0, 1, ..., N - 1):
+ >>> ns = NoiseScheduleVP('discrete', betas=betas)
+
+ # For discrete-time DPMs, given alphas_cumprod (the \hat{alpha_n} array for n = 0, 1, ..., N - 1):
+ >>> ns = NoiseScheduleVP('discrete', alphas_cumprod=alphas_cumprod)
+
+ # For continuous-time DPMs (VPSDE), linear schedule:
+ >>> ns = NoiseScheduleVP('linear', continuous_beta_0=0.1, continuous_beta_1=20.)
+
+ """
+
+ if schedule not in ['discrete', 'linear', 'cosine']:
+ raise ValueError(f"Unsupported noise schedule {schedule}. The schedule needs to be 'discrete' or 'linear' or 'cosine'")
+
+ self.schedule = schedule
+ if schedule == 'discrete':
+ if betas is not None:
+ log_alphas = 0.5 * torch.log(1 - betas).cumsum(dim=0)
+ else:
+ assert alphas_cumprod is not None
+ log_alphas = 0.5 * torch.log(alphas_cumprod)
+ self.total_N = len(log_alphas)
+ self.T = 1.
+ self.t_array = torch.linspace(0., 1., self.total_N + 1)[1:].reshape((1, -1))
+ self.log_alpha_array = log_alphas.reshape((1, -1,))
+ else:
+ self.total_N = 1000
+ self.beta_0 = continuous_beta_0
+ self.beta_1 = continuous_beta_1
+ self.cosine_s = 0.008
+ self.cosine_beta_max = 999.
+ self.cosine_t_max = math.atan(self.cosine_beta_max * (1. + self.cosine_s) / math.pi) * 2. * (1. + self.cosine_s) / math.pi - self.cosine_s
+ self.cosine_log_alpha_0 = math.log(math.cos(self.cosine_s / (1. + self.cosine_s) * math.pi / 2.))
+ self.schedule = schedule
+ if schedule == 'cosine':
+ # For the cosine schedule, T = 1 will have numerical issues. So we manually set the ending time T.
+ # Note that T = 0.9946 may be not the optimal setting. However, we find it works well.
+ self.T = 0.9946
+ else:
+ self.T = 1.
+
+ def marginal_log_mean_coeff(self, t):
+ """
+ Compute log(alpha_t) of a given continuous-time label t in [0, T].
+ """
+ if self.schedule == 'discrete':
+ return interpolate_fn(t.reshape((-1, 1)), self.t_array.to(t.device), self.log_alpha_array.to(t.device)).reshape((-1))
+ elif self.schedule == 'linear':
+ return -0.25 * t ** 2 * (self.beta_1 - self.beta_0) - 0.5 * t * self.beta_0
+ elif self.schedule == 'cosine':
+ log_alpha_fn = lambda s: torch.log(torch.cos((s + self.cosine_s) / (1. + self.cosine_s) * math.pi / 2.))
+ log_alpha_t = log_alpha_fn(t) - self.cosine_log_alpha_0
+ return log_alpha_t
+
+ def marginal_alpha(self, t):
+ """
+ Compute alpha_t of a given continuous-time label t in [0, T].
+ """
+ return torch.exp(self.marginal_log_mean_coeff(t))
+
+ def marginal_std(self, t):
+ """
+ Compute sigma_t of a given continuous-time label t in [0, T].
+ """
+ return torch.sqrt(1. - torch.exp(2. * self.marginal_log_mean_coeff(t)))
+
+ def marginal_lambda(self, t):
+ """
+ Compute lambda_t = log(alpha_t) - log(sigma_t) of a given continuous-time label t in [0, T].
+ """
+ log_mean_coeff = self.marginal_log_mean_coeff(t)
+ log_std = 0.5 * torch.log(1. - torch.exp(2. * log_mean_coeff))
+ return log_mean_coeff - log_std
+
+ def inverse_lambda(self, lamb):
+ """
+ Compute the continuous-time label t in [0, T] of a given half-logSNR lambda_t.
+ """
+ if self.schedule == 'linear':
+ tmp = 2. * (self.beta_1 - self.beta_0) * torch.logaddexp(-2. * lamb, torch.zeros((1,)).to(lamb))
+ Delta = self.beta_0**2 + tmp
+ return tmp / (torch.sqrt(Delta) + self.beta_0) / (self.beta_1 - self.beta_0)
+ elif self.schedule == 'discrete':
+ log_alpha = -0.5 * torch.logaddexp(torch.zeros((1,)).to(lamb.device), -2. * lamb)
+ t = interpolate_fn(log_alpha.reshape((-1, 1)), torch.flip(self.log_alpha_array.to(lamb.device), [1]), torch.flip(self.t_array.to(lamb.device), [1]))
+ return t.reshape((-1,))
+ else:
+ log_alpha = -0.5 * torch.logaddexp(-2. * lamb, torch.zeros((1,)).to(lamb))
+ t_fn = lambda log_alpha_t: torch.arccos(torch.exp(log_alpha_t + self.cosine_log_alpha_0)) * 2. * (1. + self.cosine_s) / math.pi - self.cosine_s
+ t = t_fn(log_alpha)
+ return t
+
+
+def model_wrapper(
+ model,
+ noise_schedule,
+ model_type="noise",
+ model_kwargs=None,
+ guidance_type="uncond",
+ #condition=None,
+ #unconditional_condition=None,
+ guidance_scale=1.,
+ classifier_fn=None,
+ classifier_kwargs=None,
+):
+ """Create a wrapper function for the noise prediction model.
+
+ DPM-Solver needs to solve the continuous-time diffusion ODEs. For DPMs trained on discrete-time labels, we need to
+ firstly wrap the model function to a noise prediction model that accepts the continuous time as the input.
+
+ We support four types of the diffusion model by setting `model_type`:
+
+ 1. "noise": noise prediction model. (Trained by predicting noise).
+
+ 2. "x_start": data prediction model. (Trained by predicting the data x_0 at time 0).
+
+ 3. "v": velocity prediction model. (Trained by predicting the velocity).
+ The "v" prediction is derivation detailed in Appendix D of [1], and is used in Imagen-Video [2].
+
+ [1] Salimans, Tim, and Jonathan Ho. "Progressive distillation for fast sampling of diffusion models."
+ arXiv preprint arXiv:2202.00512 (2022).
+ [2] Ho, Jonathan, et al. "Imagen Video: High Definition Video Generation with Diffusion Models."
+ arXiv preprint arXiv:2210.02303 (2022).
+
+ 4. "score": marginal score function. (Trained by denoising score matching).
+ Note that the score function and the noise prediction model follows a simple relationship:
+ ```
+ noise(x_t, t) = -sigma_t * score(x_t, t)
+ ```
+
+ We support three types of guided sampling by DPMs by setting `guidance_type`:
+ 1. "uncond": unconditional sampling by DPMs.
+ The input `model` has the following format:
+ ``
+ model(x, t_input, **model_kwargs) -> noise | x_start | v | score
+ ``
+
+ 2. "classifier": classifier guidance sampling [3] by DPMs and another classifier.
+ The input `model` has the following format:
+ ``
+ model(x, t_input, **model_kwargs) -> noise | x_start | v | score
+ ``
+
+ The input `classifier_fn` has the following format:
+ ``
+ classifier_fn(x, t_input, cond, **classifier_kwargs) -> logits(x, t_input, cond)
+ ``
+
+ [3] P. Dhariwal and A. Q. Nichol, "Diffusion models beat GANs on image synthesis,"
+ in Advances in Neural Information Processing Systems, vol. 34, 2021, pp. 8780-8794.
+
+ 3. "classifier-free": classifier-free guidance sampling by conditional DPMs.
+ The input `model` has the following format:
+ ``
+ model(x, t_input, cond, **model_kwargs) -> noise | x_start | v | score
+ ``
+ And if cond == `unconditional_condition`, the model output is the unconditional DPM output.
+
+ [4] Ho, Jonathan, and Tim Salimans. "Classifier-free diffusion guidance."
+ arXiv preprint arXiv:2207.12598 (2022).
+
+
+ The `t_input` is the time label of the model, which may be discrete-time labels (i.e. 0 to 999)
+ or continuous-time labels (i.e. epsilon to T).
+
+ We wrap the model function to accept only `x` and `t_continuous` as inputs, and outputs the predicted noise:
+ ``
+ def model_fn(x, t_continuous) -> noise:
+ t_input = get_model_input_time(t_continuous)
+ return noise_pred(model, x, t_input, **model_kwargs)
+ ``
+ where `t_continuous` is the continuous time labels (i.e. epsilon to T). And we use `model_fn` for DPM-Solver.
+
+ ===============================================================
+
+ Args:
+ model: A diffusion model with the corresponding format described above.
+ noise_schedule: A noise schedule object, such as NoiseScheduleVP.
+ model_type: A `str`. The parameterization type of the diffusion model.
+ "noise" or "x_start" or "v" or "score".
+ model_kwargs: A `dict`. A dict for the other inputs of the model function.
+ guidance_type: A `str`. The type of the guidance for sampling.
+ "uncond" or "classifier" or "classifier-free".
+ condition: A pytorch tensor. The condition for the guided sampling.
+ Only used for "classifier" or "classifier-free" guidance type.
+ unconditional_condition: A pytorch tensor. The condition for the unconditional sampling.
+ Only used for "classifier-free" guidance type.
+ guidance_scale: A `float`. The scale for the guided sampling.
+ classifier_fn: A classifier function. Only used for the classifier guidance.
+ classifier_kwargs: A `dict`. A dict for the other inputs of the classifier function.
+ Returns:
+ A noise prediction model that accepts the noised data and the continuous time as the inputs.
+ """
+
+ model_kwargs = model_kwargs or {}
+ classifier_kwargs = classifier_kwargs or {}
+
+ def get_model_input_time(t_continuous):
+ """
+ Convert the continuous-time `t_continuous` (in [epsilon, T]) to the model input time.
+ For discrete-time DPMs, we convert `t_continuous` in [1 / N, 1] to `t_input` in [0, 1000 * (N - 1) / N].
+ For continuous-time DPMs, we just use `t_continuous`.
+ """
+ if noise_schedule.schedule == 'discrete':
+ return (t_continuous - 1. / noise_schedule.total_N) * 1000.
+ else:
+ return t_continuous
+
+ def noise_pred_fn(x, t_continuous, cond=None):
+ if t_continuous.reshape((-1,)).shape[0] == 1:
+ t_continuous = t_continuous.expand((x.shape[0]))
+ t_input = get_model_input_time(t_continuous)
+ if cond is None:
+ output = model(x, t_input, None, **model_kwargs)
+ else:
+ output = model(x, t_input, cond, **model_kwargs)
+ if model_type == "noise":
+ return output
+ elif model_type == "x_start":
+ alpha_t, sigma_t = noise_schedule.marginal_alpha(t_continuous), noise_schedule.marginal_std(t_continuous)
+ dims = x.dim()
+ return (x - expand_dims(alpha_t, dims) * output) / expand_dims(sigma_t, dims)
+ elif model_type == "v":
+ alpha_t, sigma_t = noise_schedule.marginal_alpha(t_continuous), noise_schedule.marginal_std(t_continuous)
+ dims = x.dim()
+ return expand_dims(alpha_t, dims) * output + expand_dims(sigma_t, dims) * x
+ elif model_type == "score":
+ sigma_t = noise_schedule.marginal_std(t_continuous)
+ dims = x.dim()
+ return -expand_dims(sigma_t, dims) * output
+
+ def cond_grad_fn(x, t_input, condition):
+ """
+ Compute the gradient of the classifier, i.e. nabla_{x} log p_t(cond | x_t).
+ """
+ with torch.enable_grad():
+ x_in = x.detach().requires_grad_(True)
+ log_prob = classifier_fn(x_in, t_input, condition, **classifier_kwargs)
+ return torch.autograd.grad(log_prob.sum(), x_in)[0]
+
+ def model_fn(x, t_continuous, condition, unconditional_condition):
+ """
+ The noise predicition model function that is used for DPM-Solver.
+ """
+ if t_continuous.reshape((-1,)).shape[0] == 1:
+ t_continuous = t_continuous.expand((x.shape[0]))
+ if guidance_type == "uncond":
+ return noise_pred_fn(x, t_continuous)
+ elif guidance_type == "classifier":
+ assert classifier_fn is not None
+ t_input = get_model_input_time(t_continuous)
+ cond_grad = cond_grad_fn(x, t_input, condition)
+ sigma_t = noise_schedule.marginal_std(t_continuous)
+ noise = noise_pred_fn(x, t_continuous)
+ return noise - guidance_scale * expand_dims(sigma_t, dims=cond_grad.dim()) * cond_grad
+ elif guidance_type == "classifier-free":
+ if guidance_scale == 1. or unconditional_condition is None:
+ return noise_pred_fn(x, t_continuous, cond=condition)
+ else:
+ x_in = torch.cat([x] * 2)
+ t_in = torch.cat([t_continuous] * 2)
+ if isinstance(condition, dict):
+ assert isinstance(unconditional_condition, dict)
+ c_in = {}
+ for k in condition:
+ if isinstance(condition[k], list):
+ c_in[k] = [torch.cat([
+ unconditional_condition[k][i],
+ condition[k][i]]) for i in range(len(condition[k]))]
+ else:
+ c_in[k] = torch.cat([
+ unconditional_condition[k],
+ condition[k]])
+ elif isinstance(condition, list):
+ c_in = []
+ assert isinstance(unconditional_condition, list)
+ for i in range(len(condition)):
+ c_in.append(torch.cat([unconditional_condition[i], condition[i]]))
+ else:
+ c_in = torch.cat([unconditional_condition, condition])
+ noise_uncond, noise = noise_pred_fn(x_in, t_in, cond=c_in).chunk(2)
+ return noise_uncond + guidance_scale * (noise - noise_uncond)
+
+ assert model_type in ["noise", "x_start", "v"]
+ assert guidance_type in ["uncond", "classifier", "classifier-free"]
+ return model_fn
+
+
+class UniPC:
+ def __init__(
+ self,
+ model_fn,
+ noise_schedule,
+ predict_x0=True,
+ thresholding=False,
+ max_val=1.,
+ variant='bh1',
+ condition=None,
+ unconditional_condition=None,
+ before_sample=None,
+ after_sample=None,
+ after_update=None
+ ):
+ """Construct a UniPC.
+
+ We support both data_prediction and noise_prediction.
+ """
+ self.model_fn_ = model_fn
+ self.noise_schedule = noise_schedule
+ self.variant = variant
+ self.predict_x0 = predict_x0
+ self.thresholding = thresholding
+ self.max_val = max_val
+ self.condition = condition
+ self.unconditional_condition = unconditional_condition
+ self.before_sample = before_sample
+ self.after_sample = after_sample
+ self.after_update = after_update
+
+ def dynamic_thresholding_fn(self, x0, t=None):
+ """
+ The dynamic thresholding method.
+ """
+ dims = x0.dim()
+ p = self.dynamic_thresholding_ratio
+ s = torch.quantile(torch.abs(x0).reshape((x0.shape[0], -1)), p, dim=1)
+ s = expand_dims(torch.maximum(s, self.thresholding_max_val * torch.ones_like(s).to(s.device)), dims)
+ x0 = torch.clamp(x0, -s, s) / s
+ return x0
+
+ def model(self, x, t):
+ cond = self.condition
+ uncond = self.unconditional_condition
+ if self.before_sample is not None:
+ x, t, cond, uncond = self.before_sample(x, t, cond, uncond)
+ res = self.model_fn_(x, t, cond, uncond)
+ if self.after_sample is not None:
+ x, t, cond, uncond, res = self.after_sample(x, t, cond, uncond, res)
+
+ if isinstance(res, tuple):
+ # (None, pred_x0)
+ res = res[1]
+
+ return res
+
+ def noise_prediction_fn(self, x, t):
+ """
+ Return the noise prediction model.
+ """
+ return self.model(x, t)
+
+ def data_prediction_fn(self, x, t):
+ """
+ Return the data prediction model (with thresholding).
+ """
+ noise = self.noise_prediction_fn(x, t)
+ dims = x.dim()
+ alpha_t, sigma_t = self.noise_schedule.marginal_alpha(t), self.noise_schedule.marginal_std(t)
+ x0 = (x - expand_dims(sigma_t, dims) * noise) / expand_dims(alpha_t, dims)
+ if self.thresholding:
+ p = 0.995 # A hyperparameter in the paper of "Imagen" [1].
+ s = torch.quantile(torch.abs(x0).reshape((x0.shape[0], -1)), p, dim=1)
+ s = expand_dims(torch.maximum(s, self.max_val * torch.ones_like(s).to(s.device)), dims)
+ x0 = torch.clamp(x0, -s, s) / s
+ return x0
+
+ def model_fn(self, x, t):
+ """
+ Convert the model to the noise prediction model or the data prediction model.
+ """
+ if self.predict_x0:
+ return self.data_prediction_fn(x, t)
+ else:
+ return self.noise_prediction_fn(x, t)
+
+ def get_time_steps(self, skip_type, t_T, t_0, N, device):
+ """Compute the intermediate time steps for sampling.
+ """
+ if skip_type == 'logSNR':
+ lambda_T = self.noise_schedule.marginal_lambda(torch.tensor(t_T).to(device))
+ lambda_0 = self.noise_schedule.marginal_lambda(torch.tensor(t_0).to(device))
+ logSNR_steps = torch.linspace(lambda_T.cpu().item(), lambda_0.cpu().item(), N + 1).to(device)
+ return self.noise_schedule.inverse_lambda(logSNR_steps)
+ elif skip_type == 'time_uniform':
+ return torch.linspace(t_T, t_0, N + 1).to(device)
+ elif skip_type == 'time_quadratic':
+ t_order = 2
+ t = torch.linspace(t_T**(1. / t_order), t_0**(1. / t_order), N + 1).pow(t_order).to(device)
+ return t
+ else:
+ raise ValueError(f"Unsupported skip_type {skip_type}, need to be 'logSNR' or 'time_uniform' or 'time_quadratic'")
+
+ def get_orders_and_timesteps_for_singlestep_solver(self, steps, order, skip_type, t_T, t_0, device):
+ """
+ Get the order of each step for sampling by the singlestep DPM-Solver.
+ """
+ if order == 3:
+ K = steps // 3 + 1
+ if steps % 3 == 0:
+ orders = [3,] * (K - 2) + [2, 1]
+ elif steps % 3 == 1:
+ orders = [3,] * (K - 1) + [1]
+ else:
+ orders = [3,] * (K - 1) + [2]
+ elif order == 2:
+ if steps % 2 == 0:
+ K = steps // 2
+ orders = [2,] * K
+ else:
+ K = steps // 2 + 1
+ orders = [2,] * (K - 1) + [1]
+ elif order == 1:
+ K = steps
+ orders = [1,] * steps
+ else:
+ raise ValueError("'order' must be '1' or '2' or '3'.")
+ if skip_type == 'logSNR':
+ # To reproduce the results in DPM-Solver paper
+ timesteps_outer = self.get_time_steps(skip_type, t_T, t_0, K, device)
+ else:
+ timesteps_outer = self.get_time_steps(skip_type, t_T, t_0, steps, device)[torch.cumsum(torch.tensor([0,] + orders), 0).to(device)]
+ return timesteps_outer, orders
+
+ def denoise_to_zero_fn(self, x, s):
+ """
+ Denoise at the final step, which is equivalent to solve the ODE from lambda_s to infty by first-order discretization.
+ """
+ return self.data_prediction_fn(x, s)
+
+ def multistep_uni_pc_update(self, x, model_prev_list, t_prev_list, t, order, **kwargs):
+ if len(t.shape) == 0:
+ t = t.view(-1)
+ if 'bh' in self.variant:
+ return self.multistep_uni_pc_bh_update(x, model_prev_list, t_prev_list, t, order, **kwargs)
+ else:
+ assert self.variant == 'vary_coeff'
+ return self.multistep_uni_pc_vary_update(x, model_prev_list, t_prev_list, t, order, **kwargs)
+
+ def multistep_uni_pc_vary_update(self, x, model_prev_list, t_prev_list, t, order, use_corrector=True):
+ #print(f'using unified predictor-corrector with order {order} (solver type: vary coeff)')
+ ns = self.noise_schedule
+ assert order <= len(model_prev_list)
+
+ # first compute rks
+ t_prev_0 = t_prev_list[-1]
+ lambda_prev_0 = ns.marginal_lambda(t_prev_0)
+ lambda_t = ns.marginal_lambda(t)
+ model_prev_0 = model_prev_list[-1]
+ sigma_prev_0, sigma_t = ns.marginal_std(t_prev_0), ns.marginal_std(t)
+ log_alpha_t = ns.marginal_log_mean_coeff(t)
+ alpha_t = torch.exp(log_alpha_t)
+
+ h = lambda_t - lambda_prev_0
+
+ rks = []
+ D1s = []
+ for i in range(1, order):
+ t_prev_i = t_prev_list[-(i + 1)]
+ model_prev_i = model_prev_list[-(i + 1)]
+ lambda_prev_i = ns.marginal_lambda(t_prev_i)
+ rk = (lambda_prev_i - lambda_prev_0) / h
+ rks.append(rk)
+ D1s.append((model_prev_i - model_prev_0) / rk)
+
+ rks.append(1.)
+ rks = torch.tensor(rks, device=x.device)
+
+ K = len(rks)
+ # build C matrix
+ C = []
+
+ col = torch.ones_like(rks)
+ for k in range(1, K + 1):
+ C.append(col)
+ col = col * rks / (k + 1)
+ C = torch.stack(C, dim=1)
+
+ if len(D1s) > 0:
+ D1s = torch.stack(D1s, dim=1) # (B, K)
+ C_inv_p = torch.linalg.inv(C[:-1, :-1])
+ A_p = C_inv_p
+
+ if use_corrector:
+ #print('using corrector')
+ C_inv = torch.linalg.inv(C)
+ A_c = C_inv
+
+ hh = -h if self.predict_x0 else h
+ h_phi_1 = torch.expm1(hh)
+ h_phi_ks = []
+ factorial_k = 1
+ h_phi_k = h_phi_1
+ for k in range(1, K + 2):
+ h_phi_ks.append(h_phi_k)
+ h_phi_k = h_phi_k / hh - 1 / factorial_k
+ factorial_k *= (k + 1)
+
+ model_t = None
+ if self.predict_x0:
+ x_t_ = (
+ sigma_t / sigma_prev_0 * x
+ - alpha_t * h_phi_1 * model_prev_0
+ )
+ # now predictor
+ x_t = x_t_
+ if len(D1s) > 0:
+ # compute the residuals for predictor
+ for k in range(K - 1):
+ x_t = x_t - alpha_t * h_phi_ks[k + 1] * torch.einsum('bkchw,k->bchw', D1s, A_p[k])
+ # now corrector
+ if use_corrector:
+ model_t = self.model_fn(x_t, t)
+ D1_t = (model_t - model_prev_0)
+ x_t = x_t_
+ k = 0
+ for k in range(K - 1):
+ x_t = x_t - alpha_t * h_phi_ks[k + 1] * torch.einsum('bkchw,k->bchw', D1s, A_c[k][:-1])
+ x_t = x_t - alpha_t * h_phi_ks[K] * (D1_t * A_c[k][-1])
+ else:
+ log_alpha_prev_0, log_alpha_t = ns.marginal_log_mean_coeff(t_prev_0), ns.marginal_log_mean_coeff(t)
+ x_t_ = (
+ (torch.exp(log_alpha_t - log_alpha_prev_0)) * x
+ - (sigma_t * h_phi_1) * model_prev_0
+ )
+ # now predictor
+ x_t = x_t_
+ if len(D1s) > 0:
+ # compute the residuals for predictor
+ for k in range(K - 1):
+ x_t = x_t - sigma_t * h_phi_ks[k + 1] * torch.einsum('bkchw,k->bchw', D1s, A_p[k])
+ # now corrector
+ if use_corrector:
+ model_t = self.model_fn(x_t, t)
+ D1_t = (model_t - model_prev_0)
+ x_t = x_t_
+ k = 0
+ for k in range(K - 1):
+ x_t = x_t - sigma_t * h_phi_ks[k + 1] * torch.einsum('bkchw,k->bchw', D1s, A_c[k][:-1])
+ x_t = x_t - sigma_t * h_phi_ks[K] * (D1_t * A_c[k][-1])
+ return x_t, model_t
+
+ def multistep_uni_pc_bh_update(self, x, model_prev_list, t_prev_list, t, order, x_t=None, use_corrector=True):
+ #print(f'using unified predictor-corrector with order {order} (solver type: B(h))')
+ ns = self.noise_schedule
+ assert order <= len(model_prev_list)
+ dims = x.dim()
+
+ # first compute rks
+ t_prev_0 = t_prev_list[-1]
+ lambda_prev_0 = ns.marginal_lambda(t_prev_0)
+ lambda_t = ns.marginal_lambda(t)
+ model_prev_0 = model_prev_list[-1]
+ sigma_prev_0, sigma_t = ns.marginal_std(t_prev_0), ns.marginal_std(t)
+ log_alpha_prev_0, log_alpha_t = ns.marginal_log_mean_coeff(t_prev_0), ns.marginal_log_mean_coeff(t)
+ alpha_t = torch.exp(log_alpha_t)
+
+ h = lambda_t - lambda_prev_0
+
+ rks = []
+ D1s = []
+ for i in range(1, order):
+ t_prev_i = t_prev_list[-(i + 1)]
+ model_prev_i = model_prev_list[-(i + 1)]
+ lambda_prev_i = ns.marginal_lambda(t_prev_i)
+ rk = ((lambda_prev_i - lambda_prev_0) / h)[0]
+ rks.append(rk)
+ D1s.append((model_prev_i - model_prev_0) / rk)
+
+ rks.append(1.)
+ rks = torch.tensor(rks, device=x.device)
+
+ R = []
+ b = []
+
+ hh = -h[0] if self.predict_x0 else h[0]
+ h_phi_1 = torch.expm1(hh) # h\phi_1(h) = e^h - 1
+ h_phi_k = h_phi_1 / hh - 1
+
+ factorial_i = 1
+
+ if self.variant == 'bh1':
+ B_h = hh
+ elif self.variant == 'bh2':
+ B_h = torch.expm1(hh)
+ else:
+ raise NotImplementedError()
+
+ for i in range(1, order + 1):
+ R.append(torch.pow(rks, i - 1))
+ b.append(h_phi_k * factorial_i / B_h)
+ factorial_i *= (i + 1)
+ h_phi_k = h_phi_k / hh - 1 / factorial_i
+
+ R = torch.stack(R)
+ b = torch.tensor(b, device=x.device)
+
+ # now predictor
+ use_predictor = len(D1s) > 0 and x_t is None
+ if len(D1s) > 0:
+ D1s = torch.stack(D1s, dim=1) # (B, K)
+ if x_t is None:
+ # for order 2, we use a simplified version
+ if order == 2:
+ rhos_p = torch.tensor([0.5], device=b.device)
+ else:
+ rhos_p = torch.linalg.solve(R[:-1, :-1], b[:-1])
+ else:
+ D1s = None
+
+ if use_corrector:
+ #print('using corrector')
+ # for order 1, we use a simplified version
+ if order == 1:
+ rhos_c = torch.tensor([0.5], device=b.device)
+ else:
+ rhos_c = torch.linalg.solve(R, b)
+
+ model_t = None
+ if self.predict_x0:
+ x_t_ = (
+ expand_dims(sigma_t / sigma_prev_0, dims) * x
+ - expand_dims(alpha_t * h_phi_1, dims)* model_prev_0
+ )
+
+ if x_t is None:
+ if use_predictor:
+ pred_res = torch.einsum('k,bkchw->bchw', rhos_p, D1s)
+ else:
+ pred_res = 0
+ x_t = x_t_ - expand_dims(alpha_t * B_h, dims) * pred_res
+
+ if use_corrector:
+ model_t = self.model_fn(x_t, t)
+ if D1s is not None:
+ corr_res = torch.einsum('k,bkchw->bchw', rhos_c[:-1], D1s)
+ else:
+ corr_res = 0
+ D1_t = (model_t - model_prev_0)
+ x_t = x_t_ - expand_dims(alpha_t * B_h, dims) * (corr_res + rhos_c[-1] * D1_t)
+ else:
+ x_t_ = (
+ expand_dims(torch.exp(log_alpha_t - log_alpha_prev_0), dims) * x
+ - expand_dims(sigma_t * h_phi_1, dims) * model_prev_0
+ )
+ if x_t is None:
+ if use_predictor:
+ pred_res = torch.einsum('k,bkchw->bchw', rhos_p, D1s)
+ else:
+ pred_res = 0
+ x_t = x_t_ - expand_dims(sigma_t * B_h, dims) * pred_res
+
+ if use_corrector:
+ model_t = self.model_fn(x_t, t)
+ if D1s is not None:
+ corr_res = torch.einsum('k,bkchw->bchw', rhos_c[:-1], D1s)
+ else:
+ corr_res = 0
+ D1_t = (model_t - model_prev_0)
+ x_t = x_t_ - expand_dims(sigma_t * B_h, dims) * (corr_res + rhos_c[-1] * D1_t)
+ return x_t, model_t
+
+
+ def sample(self, x, steps=20, t_start=None, t_end=None, order=3, skip_type='time_uniform',
+ method='singlestep', lower_order_final=True, denoise_to_zero=False, solver_type='dpm_solver',
+ atol=0.0078, rtol=0.05, corrector=False,
+ ):
+ t_0 = 1. / self.noise_schedule.total_N if t_end is None else t_end
+ t_T = self.noise_schedule.T if t_start is None else t_start
+ device = x.device
+ if method == 'multistep':
+ assert steps >= order, "UniPC order must be < sampling steps"
+ timesteps = self.get_time_steps(skip_type=skip_type, t_T=t_T, t_0=t_0, N=steps, device=device)
+ #print(f"Running UniPC Sampling with {timesteps.shape[0]} timesteps, order {order}")
+ assert timesteps.shape[0] - 1 == steps
+ with torch.no_grad():
+ vec_t = timesteps[0].expand((x.shape[0]))
+ model_prev_list = [self.model_fn(x, vec_t)]
+ t_prev_list = [vec_t]
+ with tqdm.tqdm(total=steps) as pbar:
+ # Init the first `order` values by lower order multistep DPM-Solver.
+ for init_order in range(1, order):
+ vec_t = timesteps[init_order].expand(x.shape[0])
+ x, model_x = self.multistep_uni_pc_update(x, model_prev_list, t_prev_list, vec_t, init_order, use_corrector=True)
+ if model_x is None:
+ model_x = self.model_fn(x, vec_t)
+ if self.after_update is not None:
+ self.after_update(x, model_x)
+ model_prev_list.append(model_x)
+ t_prev_list.append(vec_t)
+ pbar.update()
+
+ for step in range(order, steps + 1):
+ vec_t = timesteps[step].expand(x.shape[0])
+ if lower_order_final:
+ step_order = min(order, steps + 1 - step)
+ else:
+ step_order = order
+ #print('this step order:', step_order)
+ if step == steps:
+ #print('do not run corrector at the last step')
+ use_corrector = False
+ else:
+ use_corrector = True
+ x, model_x = self.multistep_uni_pc_update(x, model_prev_list, t_prev_list, vec_t, step_order, use_corrector=use_corrector)
+ if self.after_update is not None:
+ self.after_update(x, model_x)
+ for i in range(order - 1):
+ t_prev_list[i] = t_prev_list[i + 1]
+ model_prev_list[i] = model_prev_list[i + 1]
+ t_prev_list[-1] = vec_t
+ # We do not need to evaluate the final model value.
+ if step < steps:
+ if model_x is None:
+ model_x = self.model_fn(x, vec_t)
+ model_prev_list[-1] = model_x
+ pbar.update()
+ else:
+ raise NotImplementedError()
+ if denoise_to_zero:
+ x = self.denoise_to_zero_fn(x, torch.ones((x.shape[0],)).to(device) * t_0)
+ return x
+
+
+#############################################################
+# other utility functions
+#############################################################
+
+def interpolate_fn(x, xp, yp):
+ """
+ A piecewise linear function y = f(x), using xp and yp as keypoints.
+ We implement f(x) in a differentiable way (i.e. applicable for autograd).
+ The function f(x) is well-defined for all x-axis. (For x beyond the bounds of xp, we use the outmost points of xp to define the linear function.)
+
+ Args:
+ x: PyTorch tensor with shape [N, C], where N is the batch size, C is the number of channels (we use C = 1 for DPM-Solver).
+ xp: PyTorch tensor with shape [C, K], where K is the number of keypoints.
+ yp: PyTorch tensor with shape [C, K].
+ Returns:
+ The function values f(x), with shape [N, C].
+ """
+ N, K = x.shape[0], xp.shape[1]
+ all_x = torch.cat([x.unsqueeze(2), xp.unsqueeze(0).repeat((N, 1, 1))], dim=2)
+ sorted_all_x, x_indices = torch.sort(all_x, dim=2)
+ x_idx = torch.argmin(x_indices, dim=2)
+ cand_start_idx = x_idx - 1
+ start_idx = torch.where(
+ torch.eq(x_idx, 0),
+ torch.tensor(1, device=x.device),
+ torch.where(
+ torch.eq(x_idx, K), torch.tensor(K - 2, device=x.device), cand_start_idx,
+ ),
+ )
+ end_idx = torch.where(torch.eq(start_idx, cand_start_idx), start_idx + 2, start_idx + 1)
+ start_x = torch.gather(sorted_all_x, dim=2, index=start_idx.unsqueeze(2)).squeeze(2)
+ end_x = torch.gather(sorted_all_x, dim=2, index=end_idx.unsqueeze(2)).squeeze(2)
+ start_idx2 = torch.where(
+ torch.eq(x_idx, 0),
+ torch.tensor(0, device=x.device),
+ torch.where(
+ torch.eq(x_idx, K), torch.tensor(K - 2, device=x.device), cand_start_idx,
+ ),
+ )
+ y_positions_expanded = yp.unsqueeze(0).expand(N, -1, -1)
+ start_y = torch.gather(y_positions_expanded, dim=2, index=start_idx2.unsqueeze(2)).squeeze(2)
+ end_y = torch.gather(y_positions_expanded, dim=2, index=(start_idx2 + 1).unsqueeze(2)).squeeze(2)
+ cand = start_y + (x - start_x) * (end_y - start_y) / (end_x - start_x)
+ return cand
+
+
+def expand_dims(v, dims):
+ """
+ Expand the tensor `v` to the dim `dims`.
+
+ Args:
+ `v`: a PyTorch tensor with shape [N].
+ `dim`: a `int`.
+ Returns:
+ a PyTorch tensor with shape [N, 1, 1, ..., 1] and the total dimension is `dims`.
+ """
+ return v[(...,) + (None,)*(dims - 1)]
diff --git a/modules/ngrok.py b/modules/ngrok.py
new file mode 100644
index 0000000000000000000000000000000000000000..0c713e2765d293186313661474e21bd8779199ec
--- /dev/null
+++ b/modules/ngrok.py
@@ -0,0 +1,30 @@
+import ngrok
+
+# Connect to ngrok for ingress
+def connect(token, port, options):
+ account = None
+ if token is None:
+ token = 'None'
+ else:
+ if ':' in token:
+ # token = authtoken:username:password
+ token, username, password = token.split(':', 2)
+ account = f"{username}:{password}"
+
+ # For all options see: https://github.com/ngrok/ngrok-py/blob/main/examples/ngrok-connect-full.py
+ if not options.get('authtoken_from_env'):
+ options['authtoken'] = token
+ if account:
+ options['basic_auth'] = account
+ if not options.get('session_metadata'):
+ options['session_metadata'] = 'stable-diffusion-webui'
+
+
+ try:
+ public_url = ngrok.connect(f"127.0.0.1:{port}", **options).url()
+ except Exception as e:
+ print(f'Invalid ngrok authtoken? ngrok connection aborted due to: {e}\n'
+ f'Your token: {token}, get the right one on https://dashboard.ngrok.com/get-started/your-authtoken')
+ else:
+ print(f'ngrok connected to localhost:{port}! URL: {public_url}\n'
+ 'You can use this link after the launch is complete.')
diff --git a/modules/options.py b/modules/options.py
new file mode 100644
index 0000000000000000000000000000000000000000..0616ee95b05038c8c5659b49aacd9e5fcf2b7f61
--- /dev/null
+++ b/modules/options.py
@@ -0,0 +1,245 @@
+import json
+import sys
+
+import gradio as gr
+
+from modules import errors
+from modules.shared_cmd_options import cmd_opts
+
+
+class OptionInfo:
+ def __init__(self, default=None, label="", component=None, component_args=None, onchange=None, section=None, refresh=None, comment_before='', comment_after='', infotext=None, restrict_api=False):
+ self.default = default
+ self.label = label
+ self.component = component
+ self.component_args = component_args
+ self.onchange = onchange
+ self.section = section
+ self.refresh = refresh
+ self.do_not_save = False
+
+ self.comment_before = comment_before
+ """HTML text that will be added after label in UI"""
+
+ self.comment_after = comment_after
+ """HTML text that will be added before label in UI"""
+
+ self.infotext = infotext
+
+ self.restrict_api = restrict_api
+ """If True, the setting will not be accessible via API"""
+
+ def link(self, label, url):
+ self.comment_before += f"[{label} ]"
+ return self
+
+ def js(self, label, js_func):
+ self.comment_before += f"[{label} ]"
+ return self
+
+ def info(self, info):
+ self.comment_after += f"({info}) "
+ return self
+
+ def html(self, html):
+ self.comment_after += html
+ return self
+
+ def needs_restart(self):
+ self.comment_after += " (requires restart) "
+ return self
+
+ def needs_reload_ui(self):
+ self.comment_after += " (requires Reload UI) "
+ return self
+
+
+class OptionHTML(OptionInfo):
+ def __init__(self, text):
+ super().__init__(str(text).strip(), label='', component=lambda **kwargs: gr.HTML(elem_classes="settings-info", **kwargs))
+
+ self.do_not_save = True
+
+
+def options_section(section_identifier, options_dict):
+ for v in options_dict.values():
+ v.section = section_identifier
+
+ return options_dict
+
+
+options_builtin_fields = {"data_labels", "data", "restricted_opts", "typemap"}
+
+
+class Options:
+ typemap = {int: float}
+
+ def __init__(self, data_labels: dict[str, OptionInfo], restricted_opts):
+ self.data_labels = data_labels
+ self.data = {k: v.default for k, v in self.data_labels.items()}
+ self.restricted_opts = restricted_opts
+
+ def __setattr__(self, key, value):
+ if key in options_builtin_fields:
+ return super(Options, self).__setattr__(key, value)
+
+ if self.data is not None:
+ if key in self.data or key in self.data_labels:
+ assert not cmd_opts.freeze_settings, "changing settings is disabled"
+
+ info = self.data_labels.get(key, None)
+ if info.do_not_save:
+ return
+
+ comp_args = info.component_args if info else None
+ if isinstance(comp_args, dict) and comp_args.get('visible', True) is False:
+ raise RuntimeError(f"not possible to set {key} because it is restricted")
+
+ if cmd_opts.hide_ui_dir_config and key in self.restricted_opts:
+ raise RuntimeError(f"not possible to set {key} because it is restricted")
+
+ self.data[key] = value
+ return
+
+ return super(Options, self).__setattr__(key, value)
+
+ def __getattr__(self, item):
+ if item in options_builtin_fields:
+ return super(Options, self).__getattribute__(item)
+
+ if self.data is not None:
+ if item in self.data:
+ return self.data[item]
+
+ if item in self.data_labels:
+ return self.data_labels[item].default
+
+ return super(Options, self).__getattribute__(item)
+
+ def set(self, key, value, is_api=False, run_callbacks=True):
+ """sets an option and calls its onchange callback, returning True if the option changed and False otherwise"""
+
+ oldval = self.data.get(key, None)
+ if oldval == value:
+ return False
+
+ option = self.data_labels[key]
+ if option.do_not_save:
+ return False
+
+ if is_api and option.restrict_api:
+ return False
+
+ try:
+ setattr(self, key, value)
+ except RuntimeError:
+ return False
+
+ if run_callbacks and option.onchange is not None:
+ try:
+ option.onchange()
+ except Exception as e:
+ errors.display(e, f"changing setting {key} to {value}")
+ setattr(self, key, oldval)
+ return False
+
+ return True
+
+ def get_default(self, key):
+ """returns the default value for the key"""
+
+ data_label = self.data_labels.get(key)
+ if data_label is None:
+ return None
+
+ return data_label.default
+
+ def save(self, filename):
+ assert not cmd_opts.freeze_settings, "saving settings is disabled"
+
+ with open(filename, "w", encoding="utf8") as file:
+ json.dump(self.data, file, indent=4)
+
+ def same_type(self, x, y):
+ if x is None or y is None:
+ return True
+
+ type_x = self.typemap.get(type(x), type(x))
+ type_y = self.typemap.get(type(y), type(y))
+
+ return type_x == type_y
+
+ def load(self, filename):
+ with open(filename, "r", encoding="utf8") as file:
+ self.data = json.load(file)
+
+ # 1.6.0 VAE defaults
+ if self.data.get('sd_vae_as_default') is not None and self.data.get('sd_vae_overrides_per_model_preferences') is None:
+ self.data['sd_vae_overrides_per_model_preferences'] = not self.data.get('sd_vae_as_default')
+
+ # 1.1.1 quicksettings list migration
+ if self.data.get('quicksettings') is not None and self.data.get('quicksettings_list') is None:
+ self.data['quicksettings_list'] = [i.strip() for i in self.data.get('quicksettings').split(',')]
+
+ # 1.4.0 ui_reorder
+ if isinstance(self.data.get('ui_reorder'), str) and self.data.get('ui_reorder') and "ui_reorder_list" not in self.data:
+ self.data['ui_reorder_list'] = [i.strip() for i in self.data.get('ui_reorder').split(',')]
+
+ bad_settings = 0
+ for k, v in self.data.items():
+ info = self.data_labels.get(k, None)
+ if info is not None and not self.same_type(info.default, v):
+ print(f"Warning: bad setting value: {k}: {v} ({type(v).__name__}; expected {type(info.default).__name__})", file=sys.stderr)
+ bad_settings += 1
+
+ if bad_settings > 0:
+ print(f"The program is likely to not work with bad settings.\nSettings file: {filename}\nEither fix the file, or delete it and restart.", file=sys.stderr)
+
+ def onchange(self, key, func, call=True):
+ item = self.data_labels.get(key)
+ item.onchange = func
+
+ if call:
+ func()
+
+ def dumpjson(self):
+ d = {k: self.data.get(k, v.default) for k, v in self.data_labels.items()}
+ d["_comments_before"] = {k: v.comment_before for k, v in self.data_labels.items() if v.comment_before is not None}
+ d["_comments_after"] = {k: v.comment_after for k, v in self.data_labels.items() if v.comment_after is not None}
+ return json.dumps(d)
+
+ def add_option(self, key, info):
+ self.data_labels[key] = info
+
+ def reorder(self):
+ """reorder settings so that all items related to section always go together"""
+
+ section_ids = {}
+ settings_items = self.data_labels.items()
+ for _, item in settings_items:
+ if item.section not in section_ids:
+ section_ids[item.section] = len(section_ids)
+
+ self.data_labels = dict(sorted(settings_items, key=lambda x: section_ids[x[1].section]))
+
+ def cast_value(self, key, value):
+ """casts an arbitrary to the same type as this setting's value with key
+ Example: cast_value("eta_noise_seed_delta", "12") -> returns 12 (an int rather than str)
+ """
+
+ if value is None:
+ return None
+
+ default_value = self.data_labels[key].default
+ if default_value is None:
+ default_value = getattr(self, key, None)
+ if default_value is None:
+ return None
+
+ expected_type = type(default_value)
+ if expected_type == bool and value == "False":
+ value = False
+ else:
+ value = expected_type(value)
+
+ return value
diff --git a/modules/patches.py b/modules/patches.py
new file mode 100644
index 0000000000000000000000000000000000000000..3d36194a3ccddfd852c22cc234601ce6b50a1297
--- /dev/null
+++ b/modules/patches.py
@@ -0,0 +1,64 @@
+from collections import defaultdict
+
+
+def patch(key, obj, field, replacement):
+ """Replaces a function in a module or a class.
+
+ Also stores the original function in this module, possible to be retrieved via original(key, obj, field).
+ If the function is already replaced by this caller (key), an exception is raised -- use undo() before that.
+
+ Arguments:
+ key: identifying information for who is doing the replacement. You can use __name__.
+ obj: the module or the class
+ field: name of the function as a string
+ replacement: the new function
+
+ Returns:
+ the original function
+ """
+
+ patch_key = (obj, field)
+ if patch_key in originals[key]:
+ raise RuntimeError(f"patch for {field} is already applied")
+
+ original_func = getattr(obj, field)
+ originals[key][patch_key] = original_func
+
+ setattr(obj, field, replacement)
+
+ return original_func
+
+
+def undo(key, obj, field):
+ """Undoes the peplacement by the patch().
+
+ If the function is not replaced, raises an exception.
+
+ Arguments:
+ key: identifying information for who is doing the replacement. You can use __name__.
+ obj: the module or the class
+ field: name of the function as a string
+
+ Returns:
+ Always None
+ """
+
+ patch_key = (obj, field)
+
+ if patch_key not in originals[key]:
+ raise RuntimeError(f"there is no patch for {field} to undo")
+
+ original_func = originals[key].pop(patch_key)
+ setattr(obj, field, original_func)
+
+ return None
+
+
+def original(key, obj, field):
+ """Returns the original function for the patch created by the patch() function"""
+ patch_key = (obj, field)
+
+ return originals[key].get(patch_key, None)
+
+
+originals = defaultdict(dict)
diff --git a/modules/paths.py b/modules/paths.py
new file mode 100644
index 0000000000000000000000000000000000000000..0fecc5eb01daeedece36c469e80ac6c89fa95623
--- /dev/null
+++ b/modules/paths.py
@@ -0,0 +1,65 @@
+import os
+import sys
+from modules.paths_internal import models_path, script_path, data_path, extensions_dir, extensions_builtin_dir # noqa: F401
+
+import modules.safe # noqa: F401
+
+
+def mute_sdxl_imports():
+ """create fake modules that SDXL wants to import but doesn't actually use for our purposes"""
+
+ class Dummy:
+ pass
+
+ module = Dummy()
+ module.LPIPS = None
+ sys.modules['taming.modules.losses.lpips'] = module
+
+ module = Dummy()
+ module.StableDataModuleFromConfig = None
+ sys.modules['sgm.data'] = module
+
+
+# data_path = cmd_opts_pre.data
+sys.path.insert(0, script_path)
+
+# search for directory of stable diffusion in following places
+sd_path = None
+possible_sd_paths = [os.path.join(script_path, 'repositories/stable-diffusion-stability-ai'), '.', os.path.dirname(script_path)]
+for possible_sd_path in possible_sd_paths:
+ if os.path.exists(os.path.join(possible_sd_path, 'ldm/models/diffusion/ddpm.py')):
+ sd_path = os.path.abspath(possible_sd_path)
+ break
+
+assert sd_path is not None, f"Couldn't find Stable Diffusion in any of: {possible_sd_paths}"
+
+mute_sdxl_imports()
+
+path_dirs = [
+ (sd_path, 'ldm', 'Stable Diffusion', []),
+ (os.path.join(sd_path, '../generative-models'), 'sgm', 'Stable Diffusion XL', ["sgm"]),
+ (os.path.join(sd_path, '../CodeFormer'), 'inference_codeformer.py', 'CodeFormer', []),
+ (os.path.join(sd_path, '../BLIP'), 'models/blip.py', 'BLIP', []),
+ (os.path.join(sd_path, '../k-diffusion'), 'k_diffusion/sampling.py', 'k_diffusion', ["atstart"]),
+]
+
+paths = {}
+
+for d, must_exist, what, options in path_dirs:
+ must_exist_path = os.path.abspath(os.path.join(script_path, d, must_exist))
+ if not os.path.exists(must_exist_path):
+ print(f"Warning: {what} not found at path {must_exist_path}", file=sys.stderr)
+ else:
+ d = os.path.abspath(d)
+ if "atstart" in options:
+ sys.path.insert(0, d)
+ elif "sgm" in options:
+ # Stable Diffusion XL repo has scripts dir with __init__.py in it which ruins every extension's scripts dir, so we
+ # import sgm and remove it from sys.path so that when a script imports scripts.something, it doesbn't use sgm's scripts dir.
+
+ sys.path.insert(0, d)
+ import sgm # noqa: F401
+ sys.path.pop(0)
+ else:
+ sys.path.append(d)
+ paths[what] = d
diff --git a/modules/postprocessing.py b/modules/postprocessing.py
new file mode 100644
index 0000000000000000000000000000000000000000..7db614b250e821ad729c426a96445bdfb74910eb
--- /dev/null
+++ b/modules/postprocessing.py
@@ -0,0 +1,108 @@
+import os
+
+from PIL import Image
+
+from modules import shared, images, devices, scripts, scripts_postprocessing, ui_common, generation_parameters_copypaste
+from modules.shared import opts
+
+
+def run_postprocessing(extras_mode, image, image_folder, input_dir, output_dir, show_extras_results, *args, save_output: bool = True):
+ devices.torch_gc()
+
+ shared.state.begin(job="extras")
+
+ outputs = []
+
+ def get_images(extras_mode, image, image_folder, input_dir):
+ if extras_mode == 1:
+ for img in image_folder:
+ if isinstance(img, Image.Image):
+ image = img
+ fn = ''
+ else:
+ image = Image.open(os.path.abspath(img.name))
+ fn = os.path.splitext(img.orig_name)[0]
+ yield image, fn
+ elif extras_mode == 2:
+ assert not shared.cmd_opts.hide_ui_dir_config, '--hide-ui-dir-config option must be disabled'
+ assert input_dir, 'input directory not selected'
+
+ image_list = shared.listfiles(input_dir)
+ for filename in image_list:
+ try:
+ image = Image.open(filename)
+ except Exception:
+ continue
+ yield image, filename
+ else:
+ assert image, 'image not selected'
+ yield image, None
+
+ if extras_mode == 2 and output_dir != '':
+ outpath = output_dir
+ else:
+ outpath = opts.outdir_samples or opts.outdir_extras_samples
+
+ infotext = ''
+
+ for image_data, name in get_images(extras_mode, image, image_folder, input_dir):
+ image_data: Image.Image
+
+ shared.state.textinfo = name
+
+ parameters, existing_pnginfo = images.read_info_from_image(image_data)
+ if parameters:
+ existing_pnginfo["parameters"] = parameters
+
+ pp = scripts_postprocessing.PostprocessedImage(image_data.convert("RGB"))
+
+ scripts.scripts_postproc.run(pp, args)
+
+ if opts.use_original_name_batch and name is not None:
+ basename = os.path.splitext(os.path.basename(name))[0]
+ else:
+ basename = ''
+
+ infotext = ", ".join([k if k == v else f'{k}: {generation_parameters_copypaste.quote(v)}' for k, v in pp.info.items() if v is not None])
+
+ if opts.enable_pnginfo:
+ pp.image.info = existing_pnginfo
+ pp.image.info["postprocessing"] = infotext
+
+ if save_output:
+ images.save_image(pp.image, path=outpath, basename=basename, seed=None, prompt=None, extension=opts.samples_format, info=infotext, short_filename=True, no_prompt=True, grid=False, pnginfo_section_name="extras", existing_info=existing_pnginfo, forced_filename=None)
+
+ if extras_mode != 2 or show_extras_results:
+ outputs.append(pp.image)
+
+ image_data.close()
+
+ devices.torch_gc()
+
+ return outputs, ui_common.plaintext_to_html(infotext), ''
+
+
+def run_extras(extras_mode, resize_mode, image, image_folder, input_dir, output_dir, show_extras_results, gfpgan_visibility, codeformer_visibility, codeformer_weight, upscaling_resize, upscaling_resize_w, upscaling_resize_h, upscaling_crop, extras_upscaler_1, extras_upscaler_2, extras_upscaler_2_visibility, upscale_first: bool, save_output: bool = True):
+ """old handler for API"""
+
+ args = scripts.scripts_postproc.create_args_for_run({
+ "Upscale": {
+ "upscale_mode": resize_mode,
+ "upscale_by": upscaling_resize,
+ "upscale_to_width": upscaling_resize_w,
+ "upscale_to_height": upscaling_resize_h,
+ "upscale_crop": upscaling_crop,
+ "upscaler_1_name": extras_upscaler_1,
+ "upscaler_2_name": extras_upscaler_2,
+ "upscaler_2_visibility": extras_upscaler_2_visibility,
+ },
+ "GFPGAN": {
+ "gfpgan_visibility": gfpgan_visibility,
+ },
+ "CodeFormer": {
+ "codeformer_visibility": codeformer_visibility,
+ "codeformer_weight": codeformer_weight,
+ },
+ })
+
+ return run_postprocessing(extras_mode, image, image_folder, input_dir, output_dir, show_extras_results, *args, save_output=save_output)
diff --git a/modules/processing.py b/modules/processing.py
new file mode 100644
index 0000000000000000000000000000000000000000..dc0a479515774375f203212574ab2a8c65480b85
--- /dev/null
+++ b/modules/processing.py
@@ -0,0 +1,1539 @@
+from __future__ import annotations
+import json
+import logging
+import math
+import os
+import sys
+import hashlib
+from dataclasses import dataclass, field
+
+import torch
+import numpy as np
+from PIL import Image, ImageOps
+import random
+import cv2
+from skimage import exposure
+from typing import Any
+
+import modules.sd_hijack
+from modules import devices, prompt_parser, masking, sd_samplers, lowvram, generation_parameters_copypaste, extra_networks, sd_vae_approx, scripts, sd_samplers_common, sd_unet, errors, rng
+from modules.rng import slerp # noqa: F401
+from modules.sd_hijack import model_hijack
+from modules.sd_samplers_common import images_tensor_to_samples, decode_first_stage, approximation_indexes
+from modules.shared import opts, cmd_opts, state
+import modules.shared as shared
+import modules.paths as paths
+import modules.face_restoration
+import modules.images as images
+import modules.styles
+import modules.sd_models as sd_models
+import modules.sd_vae as sd_vae
+from ldm.data.util import AddMiDaS
+from ldm.models.diffusion.ddpm import LatentDepth2ImageDiffusion
+
+from einops import repeat, rearrange
+from blendmodes.blend import blendLayers, BlendType
+
+
+# some of those options should not be changed at all because they would break the model, so I removed them from options.
+opt_C = 4
+opt_f = 8
+
+
+def setup_color_correction(image):
+ logging.info("Calibrating color correction.")
+ correction_target = cv2.cvtColor(np.asarray(image.copy()), cv2.COLOR_RGB2LAB)
+ return correction_target
+
+
+def apply_color_correction(correction, original_image):
+ logging.info("Applying color correction.")
+ image = Image.fromarray(cv2.cvtColor(exposure.match_histograms(
+ cv2.cvtColor(
+ np.asarray(original_image),
+ cv2.COLOR_RGB2LAB
+ ),
+ correction,
+ channel_axis=2
+ ), cv2.COLOR_LAB2RGB).astype("uint8"))
+
+ image = blendLayers(image, original_image, BlendType.LUMINOSITY)
+
+ return image.convert('RGB')
+
+
+def apply_overlay(image, paste_loc, index, overlays):
+ if overlays is None or index >= len(overlays):
+ return image
+
+ overlay = overlays[index]
+
+ if paste_loc is not None:
+ x, y, w, h = paste_loc
+ base_image = Image.new('RGBA', (overlay.width, overlay.height))
+ image = images.resize_image(1, image, w, h)
+ base_image.paste(image, (x, y))
+ image = base_image
+
+ image = image.convert('RGBA')
+ image.alpha_composite(overlay)
+ image = image.convert('RGB')
+
+ return image
+
+def create_binary_mask(image):
+ if image.mode == 'RGBA' and image.getextrema()[-1] != (255, 255):
+ image = image.split()[-1].convert("L").point(lambda x: 255 if x > 128 else 0)
+ else:
+ image = image.convert('L')
+ return image
+
+def txt2img_image_conditioning(sd_model, x, width, height):
+ if sd_model.model.conditioning_key in {'hybrid', 'concat'}: # Inpainting models
+
+ # The "masked-image" in this case will just be all 0.5 since the entire image is masked.
+ image_conditioning = torch.ones(x.shape[0], 3, height, width, device=x.device) * 0.5
+ image_conditioning = images_tensor_to_samples(image_conditioning, approximation_indexes.get(opts.sd_vae_encode_method))
+
+ # Add the fake full 1s mask to the first dimension.
+ image_conditioning = torch.nn.functional.pad(image_conditioning, (0, 0, 0, 0, 1, 0), value=1.0)
+ image_conditioning = image_conditioning.to(x.dtype)
+
+ return image_conditioning
+
+ elif sd_model.model.conditioning_key == "crossattn-adm": # UnCLIP models
+
+ return x.new_zeros(x.shape[0], 2*sd_model.noise_augmentor.time_embed.dim, dtype=x.dtype, device=x.device)
+
+ else:
+ # Dummy zero conditioning if we're not using inpainting or unclip models.
+ # Still takes up a bit of memory, but no encoder call.
+ # Pretty sure we can just make this a 1x1 image since its not going to be used besides its batch size.
+ return x.new_zeros(x.shape[0], 5, 1, 1, dtype=x.dtype, device=x.device)
+
+
+@dataclass(repr=False)
+class StableDiffusionProcessing:
+ sd_model: object = None
+ outpath_samples: str = None
+ outpath_grids: str = None
+ prompt: str = ""
+ prompt_for_display: str = None
+ negative_prompt: str = ""
+ styles: list[str] = None
+ seed: int = -1
+ subseed: int = -1
+ subseed_strength: float = 0
+ seed_resize_from_h: int = -1
+ seed_resize_from_w: int = -1
+ seed_enable_extras: bool = True
+ sampler_name: str = None
+ batch_size: int = 1
+ n_iter: int = 1
+ steps: int = 50
+ cfg_scale: float = 7.0
+ width: int = 512
+ height: int = 512
+ restore_faces: bool = None
+ tiling: bool = None
+ do_not_save_samples: bool = False
+ do_not_save_grid: bool = False
+ extra_generation_params: dict[str, Any] = None
+ overlay_images: list = None
+ eta: float = None
+ do_not_reload_embeddings: bool = False
+ denoising_strength: float = 0
+ ddim_discretize: str = None
+ s_min_uncond: float = None
+ s_churn: float = None
+ s_tmax: float = None
+ s_tmin: float = None
+ s_noise: float = None
+ override_settings: dict[str, Any] = None
+ override_settings_restore_afterwards: bool = True
+ sampler_index: int = None
+ refiner_checkpoint: str = None
+ refiner_switch_at: float = None
+ token_merging_ratio = 0
+ token_merging_ratio_hr = 0
+ disable_extra_networks: bool = False
+
+ scripts_value: scripts.ScriptRunner = field(default=None, init=False)
+ script_args_value: list = field(default=None, init=False)
+ scripts_setup_complete: bool = field(default=False, init=False)
+
+ cached_uc = [None, None]
+ cached_c = [None, None]
+
+ comments: dict = None
+ sampler: sd_samplers_common.Sampler | None = field(default=None, init=False)
+ is_using_inpainting_conditioning: bool = field(default=False, init=False)
+ paste_to: tuple | None = field(default=None, init=False)
+
+ is_hr_pass: bool = field(default=False, init=False)
+
+ c: tuple = field(default=None, init=False)
+ uc: tuple = field(default=None, init=False)
+
+ rng: rng.ImageRNG | None = field(default=None, init=False)
+ step_multiplier: int = field(default=1, init=False)
+ color_corrections: list = field(default=None, init=False)
+
+ all_prompts: list = field(default=None, init=False)
+ all_negative_prompts: list = field(default=None, init=False)
+ all_seeds: list = field(default=None, init=False)
+ all_subseeds: list = field(default=None, init=False)
+ iteration: int = field(default=0, init=False)
+ main_prompt: str = field(default=None, init=False)
+ main_negative_prompt: str = field(default=None, init=False)
+
+ prompts: list = field(default=None, init=False)
+ negative_prompts: list = field(default=None, init=False)
+ seeds: list = field(default=None, init=False)
+ subseeds: list = field(default=None, init=False)
+ extra_network_data: dict = field(default=None, init=False)
+
+ user: str = field(default=None, init=False)
+
+ sd_model_name: str = field(default=None, init=False)
+ sd_model_hash: str = field(default=None, init=False)
+ sd_vae_name: str = field(default=None, init=False)
+ sd_vae_hash: str = field(default=None, init=False)
+
+ is_api: bool = field(default=False, init=False)
+
+ def __post_init__(self):
+ if self.sampler_index is not None:
+ print("sampler_index argument for StableDiffusionProcessing does not do anything; use sampler_name", file=sys.stderr)
+
+ self.comments = {}
+
+ if self.styles is None:
+ self.styles = []
+
+ self.sampler_noise_scheduler_override = None
+ self.s_min_uncond = self.s_min_uncond if self.s_min_uncond is not None else opts.s_min_uncond
+ self.s_churn = self.s_churn if self.s_churn is not None else opts.s_churn
+ self.s_tmin = self.s_tmin if self.s_tmin is not None else opts.s_tmin
+ self.s_tmax = (self.s_tmax if self.s_tmax is not None else opts.s_tmax) or float('inf')
+ self.s_noise = self.s_noise if self.s_noise is not None else opts.s_noise
+
+ self.extra_generation_params = self.extra_generation_params or {}
+ self.override_settings = self.override_settings or {}
+ self.script_args = self.script_args or {}
+
+ self.refiner_checkpoint_info = None
+
+ if not self.seed_enable_extras:
+ self.subseed = -1
+ self.subseed_strength = 0
+ self.seed_resize_from_h = 0
+ self.seed_resize_from_w = 0
+
+ self.cached_uc = StableDiffusionProcessing.cached_uc
+ self.cached_c = StableDiffusionProcessing.cached_c
+
+ @property
+ def sd_model(self):
+ return shared.sd_model
+
+ @sd_model.setter
+ def sd_model(self, value):
+ pass
+
+ @property
+ def scripts(self):
+ return self.scripts_value
+
+ @scripts.setter
+ def scripts(self, value):
+ self.scripts_value = value
+
+ if self.scripts_value and self.script_args_value and not self.scripts_setup_complete:
+ self.setup_scripts()
+
+ @property
+ def script_args(self):
+ return self.script_args_value
+
+ @script_args.setter
+ def script_args(self, value):
+ self.script_args_value = value
+
+ if self.scripts_value and self.script_args_value and not self.scripts_setup_complete:
+ self.setup_scripts()
+
+ def setup_scripts(self):
+ self.scripts_setup_complete = True
+
+ self.scripts.setup_scrips(self, is_ui=not self.is_api)
+
+ def comment(self, text):
+ self.comments[text] = 1
+
+ def txt2img_image_conditioning(self, x, width=None, height=None):
+ self.is_using_inpainting_conditioning = self.sd_model.model.conditioning_key in {'hybrid', 'concat'}
+
+ return txt2img_image_conditioning(self.sd_model, x, width or self.width, height or self.height)
+
+ def depth2img_image_conditioning(self, source_image):
+ # Use the AddMiDaS helper to Format our source image to suit the MiDaS model
+ transformer = AddMiDaS(model_type="dpt_hybrid")
+ transformed = transformer({"jpg": rearrange(source_image[0], "c h w -> h w c")})
+ midas_in = torch.from_numpy(transformed["midas_in"][None, ...]).to(device=shared.device)
+ midas_in = repeat(midas_in, "1 ... -> n ...", n=self.batch_size)
+
+ conditioning_image = images_tensor_to_samples(source_image*0.5+0.5, approximation_indexes.get(opts.sd_vae_encode_method))
+ conditioning = torch.nn.functional.interpolate(
+ self.sd_model.depth_model(midas_in),
+ size=conditioning_image.shape[2:],
+ mode="bicubic",
+ align_corners=False,
+ )
+
+ (depth_min, depth_max) = torch.aminmax(conditioning)
+ conditioning = 2. * (conditioning - depth_min) / (depth_max - depth_min) - 1.
+ return conditioning
+
+ def edit_image_conditioning(self, source_image):
+ conditioning_image = images_tensor_to_samples(source_image*0.5+0.5, approximation_indexes.get(opts.sd_vae_encode_method))
+
+ return conditioning_image
+
+ def unclip_image_conditioning(self, source_image):
+ c_adm = self.sd_model.embedder(source_image)
+ if self.sd_model.noise_augmentor is not None:
+ noise_level = 0 # TODO: Allow other noise levels?
+ c_adm, noise_level_emb = self.sd_model.noise_augmentor(c_adm, noise_level=repeat(torch.tensor([noise_level]).to(c_adm.device), '1 -> b', b=c_adm.shape[0]))
+ c_adm = torch.cat((c_adm, noise_level_emb), 1)
+ return c_adm
+
+ def inpainting_image_conditioning(self, source_image, latent_image, image_mask=None):
+ self.is_using_inpainting_conditioning = True
+
+ # Handle the different mask inputs
+ if image_mask is not None:
+ if torch.is_tensor(image_mask):
+ conditioning_mask = image_mask
+ else:
+ conditioning_mask = np.array(image_mask.convert("L"))
+ conditioning_mask = conditioning_mask.astype(np.float32) / 255.0
+ conditioning_mask = torch.from_numpy(conditioning_mask[None, None])
+
+ # Inpainting model uses a discretized mask as input, so we round to either 1.0 or 0.0
+ conditioning_mask = torch.round(conditioning_mask)
+ else:
+ conditioning_mask = source_image.new_ones(1, 1, *source_image.shape[-2:])
+
+ # Create another latent image, this time with a masked version of the original input.
+ # Smoothly interpolate between the masked and unmasked latent conditioning image using a parameter.
+ conditioning_mask = conditioning_mask.to(device=source_image.device, dtype=source_image.dtype)
+ conditioning_image = torch.lerp(
+ source_image,
+ source_image * (1.0 - conditioning_mask),
+ getattr(self, "inpainting_mask_weight", shared.opts.inpainting_mask_weight)
+ )
+
+ # Encode the new masked image using first stage of network.
+ conditioning_image = self.sd_model.get_first_stage_encoding(self.sd_model.encode_first_stage(conditioning_image))
+
+ # Create the concatenated conditioning tensor to be fed to `c_concat`
+ conditioning_mask = torch.nn.functional.interpolate(conditioning_mask, size=latent_image.shape[-2:])
+ conditioning_mask = conditioning_mask.expand(conditioning_image.shape[0], -1, -1, -1)
+ image_conditioning = torch.cat([conditioning_mask, conditioning_image], dim=1)
+ image_conditioning = image_conditioning.to(shared.device).type(self.sd_model.dtype)
+
+ return image_conditioning
+
+ def img2img_image_conditioning(self, source_image, latent_image, image_mask=None):
+ source_image = devices.cond_cast_float(source_image)
+
+ # HACK: Using introspection as the Depth2Image model doesn't appear to uniquely
+ # identify itself with a field common to all models. The conditioning_key is also hybrid.
+ if isinstance(self.sd_model, LatentDepth2ImageDiffusion):
+ return self.depth2img_image_conditioning(source_image)
+
+ if self.sd_model.cond_stage_key == "edit":
+ return self.edit_image_conditioning(source_image)
+
+ if self.sampler.conditioning_key in {'hybrid', 'concat'}:
+ return self.inpainting_image_conditioning(source_image, latent_image, image_mask=image_mask)
+
+ if self.sampler.conditioning_key == "crossattn-adm":
+ return self.unclip_image_conditioning(source_image)
+
+ # Dummy zero conditioning if we're not using inpainting or depth model.
+ return latent_image.new_zeros(latent_image.shape[0], 5, 1, 1)
+
+ def init(self, all_prompts, all_seeds, all_subseeds):
+ pass
+
+ def sample(self, conditioning, unconditional_conditioning, seeds, subseeds, subseed_strength, prompts):
+ raise NotImplementedError()
+
+ def close(self):
+ self.sampler = None
+ self.c = None
+ self.uc = None
+ if not opts.persistent_cond_cache:
+ StableDiffusionProcessing.cached_c = [None, None]
+ StableDiffusionProcessing.cached_uc = [None, None]
+
+ def get_token_merging_ratio(self, for_hr=False):
+ if for_hr:
+ return self.token_merging_ratio_hr or opts.token_merging_ratio_hr or self.token_merging_ratio or opts.token_merging_ratio
+
+ return self.token_merging_ratio or opts.token_merging_ratio
+
+ def setup_prompts(self):
+ if isinstance(self.prompt,list):
+ self.all_prompts = self.prompt
+ elif isinstance(self.negative_prompt, list):
+ self.all_prompts = [self.prompt] * len(self.negative_prompt)
+ else:
+ self.all_prompts = self.batch_size * self.n_iter * [self.prompt]
+
+ if isinstance(self.negative_prompt, list):
+ self.all_negative_prompts = self.negative_prompt
+ else:
+ self.all_negative_prompts = [self.negative_prompt] * len(self.all_prompts)
+
+ if len(self.all_prompts) != len(self.all_negative_prompts):
+ raise RuntimeError(f"Received a different number of prompts ({len(self.all_prompts)}) and negative prompts ({len(self.all_negative_prompts)})")
+
+ self.all_prompts = [shared.prompt_styles.apply_styles_to_prompt(x, self.styles) for x in self.all_prompts]
+ self.all_negative_prompts = [shared.prompt_styles.apply_negative_styles_to_prompt(x, self.styles) for x in self.all_negative_prompts]
+
+ self.main_prompt = self.all_prompts[0]
+ self.main_negative_prompt = self.all_negative_prompts[0]
+
+ def cached_params(self, required_prompts, steps, extra_network_data, hires_steps=None, use_old_scheduling=False):
+ """Returns parameters that invalidate the cond cache if changed"""
+
+ return (
+ required_prompts,
+ steps,
+ hires_steps,
+ use_old_scheduling,
+ opts.CLIP_stop_at_last_layers,
+ shared.sd_model.sd_checkpoint_info,
+ extra_network_data,
+ opts.sdxl_crop_left,
+ opts.sdxl_crop_top,
+ self.width,
+ self.height,
+ )
+
+ def get_conds_with_caching(self, function, required_prompts, steps, caches, extra_network_data, hires_steps=None):
+ """
+ Returns the result of calling function(shared.sd_model, required_prompts, steps)
+ using a cache to store the result if the same arguments have been used before.
+
+ cache is an array containing two elements. The first element is a tuple
+ representing the previously used arguments, or None if no arguments
+ have been used before. The second element is where the previously
+ computed result is stored.
+
+ caches is a list with items described above.
+ """
+
+ if shared.opts.use_old_scheduling:
+ old_schedules = prompt_parser.get_learned_conditioning_prompt_schedules(required_prompts, steps, hires_steps, False)
+ new_schedules = prompt_parser.get_learned_conditioning_prompt_schedules(required_prompts, steps, hires_steps, True)
+ if old_schedules != new_schedules:
+ self.extra_generation_params["Old prompt editing timelines"] = True
+
+ cached_params = self.cached_params(required_prompts, steps, extra_network_data, hires_steps, shared.opts.use_old_scheduling)
+
+ for cache in caches:
+ if cache[0] is not None and cached_params == cache[0]:
+ return cache[1]
+
+ cache = caches[0]
+
+ with devices.autocast():
+ cache[1] = function(shared.sd_model, required_prompts, steps, hires_steps, shared.opts.use_old_scheduling)
+
+ cache[0] = cached_params
+ return cache[1]
+
+ def setup_conds(self):
+ prompts = prompt_parser.SdConditioning(self.prompts, width=self.width, height=self.height)
+ negative_prompts = prompt_parser.SdConditioning(self.negative_prompts, width=self.width, height=self.height, is_negative_prompt=True)
+
+ sampler_config = sd_samplers.find_sampler_config(self.sampler_name)
+ total_steps = sampler_config.total_steps(self.steps) if sampler_config else self.steps
+ self.step_multiplier = total_steps // self.steps
+ self.firstpass_steps = total_steps
+
+ self.uc = self.get_conds_with_caching(prompt_parser.get_learned_conditioning, negative_prompts, total_steps, [self.cached_uc], self.extra_network_data)
+ self.c = self.get_conds_with_caching(prompt_parser.get_multicond_learned_conditioning, prompts, total_steps, [self.cached_c], self.extra_network_data)
+
+ def get_conds(self):
+ return self.c, self.uc
+
+ def parse_extra_network_prompts(self):
+ self.prompts, self.extra_network_data = extra_networks.parse_prompts(self.prompts)
+
+ def save_samples(self) -> bool:
+ """Returns whether generated images need to be written to disk"""
+ return opts.samples_save and not self.do_not_save_samples and (opts.save_incomplete_images or not state.interrupted and not state.skipped)
+
+
+class Processed:
+ def __init__(self, p: StableDiffusionProcessing, images_list, seed=-1, info="", subseed=None, all_prompts=None, all_negative_prompts=None, all_seeds=None, all_subseeds=None, index_of_first_image=0, infotexts=None, comments=""):
+ self.images = images_list
+ self.prompt = p.prompt
+ self.negative_prompt = p.negative_prompt
+ self.seed = seed
+ self.subseed = subseed
+ self.subseed_strength = p.subseed_strength
+ self.info = info
+ self.comments = "".join(f"{comment}\n" for comment in p.comments)
+ self.width = p.width
+ self.height = p.height
+ self.sampler_name = p.sampler_name
+ self.cfg_scale = p.cfg_scale
+ self.image_cfg_scale = getattr(p, 'image_cfg_scale', None)
+ self.steps = p.steps
+ self.batch_size = p.batch_size
+ self.restore_faces = p.restore_faces
+ self.face_restoration_model = opts.face_restoration_model if p.restore_faces else None
+ self.sd_model_name = p.sd_model_name
+ self.sd_model_hash = p.sd_model_hash
+ self.sd_vae_name = p.sd_vae_name
+ self.sd_vae_hash = p.sd_vae_hash
+ self.seed_resize_from_w = p.seed_resize_from_w
+ self.seed_resize_from_h = p.seed_resize_from_h
+ self.denoising_strength = getattr(p, 'denoising_strength', None)
+ self.extra_generation_params = p.extra_generation_params
+ self.index_of_first_image = index_of_first_image
+ self.styles = p.styles
+ self.job_timestamp = state.job_timestamp
+ self.clip_skip = opts.CLIP_stop_at_last_layers
+ self.token_merging_ratio = p.token_merging_ratio
+ self.token_merging_ratio_hr = p.token_merging_ratio_hr
+
+ self.eta = p.eta
+ self.ddim_discretize = p.ddim_discretize
+ self.s_churn = p.s_churn
+ self.s_tmin = p.s_tmin
+ self.s_tmax = p.s_tmax
+ self.s_noise = p.s_noise
+ self.s_min_uncond = p.s_min_uncond
+ self.sampler_noise_scheduler_override = p.sampler_noise_scheduler_override
+ self.prompt = self.prompt if not isinstance(self.prompt, list) else self.prompt[0]
+ self.negative_prompt = self.negative_prompt if not isinstance(self.negative_prompt, list) else self.negative_prompt[0]
+ self.seed = int(self.seed if not isinstance(self.seed, list) else self.seed[0]) if self.seed is not None else -1
+ self.subseed = int(self.subseed if not isinstance(self.subseed, list) else self.subseed[0]) if self.subseed is not None else -1
+ self.is_using_inpainting_conditioning = p.is_using_inpainting_conditioning
+
+ self.all_prompts = all_prompts or p.all_prompts or [self.prompt]
+ self.all_negative_prompts = all_negative_prompts or p.all_negative_prompts or [self.negative_prompt]
+ self.all_seeds = all_seeds or p.all_seeds or [self.seed]
+ self.all_subseeds = all_subseeds or p.all_subseeds or [self.subseed]
+ self.infotexts = infotexts or [info]
+
+ def js(self):
+ obj = {
+ "prompt": self.all_prompts[0],
+ "all_prompts": self.all_prompts,
+ "negative_prompt": self.all_negative_prompts[0],
+ "all_negative_prompts": self.all_negative_prompts,
+ "seed": self.seed,
+ "all_seeds": self.all_seeds,
+ "subseed": self.subseed,
+ "all_subseeds": self.all_subseeds,
+ "subseed_strength": self.subseed_strength,
+ "width": self.width,
+ "height": self.height,
+ "sampler_name": self.sampler_name,
+ "cfg_scale": self.cfg_scale,
+ "steps": self.steps,
+ "batch_size": self.batch_size,
+ "restore_faces": self.restore_faces,
+ "face_restoration_model": self.face_restoration_model,
+ "sd_model_name": self.sd_model_name,
+ "sd_model_hash": self.sd_model_hash,
+ "sd_vae_name": self.sd_vae_name,
+ "sd_vae_hash": self.sd_vae_hash,
+ "seed_resize_from_w": self.seed_resize_from_w,
+ "seed_resize_from_h": self.seed_resize_from_h,
+ "denoising_strength": self.denoising_strength,
+ "extra_generation_params": self.extra_generation_params,
+ "index_of_first_image": self.index_of_first_image,
+ "infotexts": self.infotexts,
+ "styles": self.styles,
+ "job_timestamp": self.job_timestamp,
+ "clip_skip": self.clip_skip,
+ "is_using_inpainting_conditioning": self.is_using_inpainting_conditioning,
+ }
+
+ return json.dumps(obj)
+
+ def infotext(self, p: StableDiffusionProcessing, index):
+ return create_infotext(p, self.all_prompts, self.all_seeds, self.all_subseeds, comments=[], position_in_batch=index % self.batch_size, iteration=index // self.batch_size)
+
+ def get_token_merging_ratio(self, for_hr=False):
+ return self.token_merging_ratio_hr if for_hr else self.token_merging_ratio
+
+
+def create_random_tensors(shape, seeds, subseeds=None, subseed_strength=0.0, seed_resize_from_h=0, seed_resize_from_w=0, p=None):
+ g = rng.ImageRNG(shape, seeds, subseeds=subseeds, subseed_strength=subseed_strength, seed_resize_from_h=seed_resize_from_h, seed_resize_from_w=seed_resize_from_w)
+ return g.next()
+
+
+class DecodedSamples(list):
+ already_decoded = True
+
+
+def decode_latent_batch(model, batch, target_device=None, check_for_nans=False):
+ samples = DecodedSamples()
+
+ for i in range(batch.shape[0]):
+ sample = decode_first_stage(model, batch[i:i + 1])[0]
+
+ if check_for_nans:
+ try:
+ devices.test_for_nans(sample, "vae")
+ except devices.NansException as e:
+ if devices.dtype_vae == torch.float32 or not shared.opts.auto_vae_precision:
+ raise e
+
+ errors.print_error_explanation(
+ "A tensor with all NaNs was produced in VAE.\n"
+ "Web UI will now convert VAE into 32-bit float and retry.\n"
+ "To disable this behavior, disable the 'Automatically revert VAE to 32-bit floats' setting.\n"
+ "To always start with 32-bit VAE, use --no-half-vae commandline flag."
+ )
+
+ devices.dtype_vae = torch.float32
+ model.first_stage_model.to(devices.dtype_vae)
+ batch = batch.to(devices.dtype_vae)
+
+ sample = decode_first_stage(model, batch[i:i + 1])[0]
+
+ if target_device is not None:
+ sample = sample.to(target_device)
+
+ samples.append(sample)
+
+ return samples
+
+
+def get_fixed_seed(seed):
+ if seed == '' or seed is None:
+ seed = -1
+ elif isinstance(seed, str):
+ try:
+ seed = int(seed)
+ except Exception:
+ seed = -1
+
+ if seed == -1:
+ return int(random.randrange(4294967294))
+
+ return seed
+
+
+def fix_seed(p):
+ p.seed = get_fixed_seed(p.seed)
+ p.subseed = get_fixed_seed(p.subseed)
+
+
+def program_version():
+ import launch
+
+ res = launch.git_tag()
+ if res == "":
+ res = None
+
+ return res
+
+
+def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iteration=0, position_in_batch=0, use_main_prompt=False, index=None, all_negative_prompts=None):
+ if index is None:
+ index = position_in_batch + iteration * p.batch_size
+
+ if all_negative_prompts is None:
+ all_negative_prompts = p.all_negative_prompts
+
+ clip_skip = getattr(p, 'clip_skip', opts.CLIP_stop_at_last_layers)
+ enable_hr = getattr(p, 'enable_hr', False)
+ token_merging_ratio = p.get_token_merging_ratio()
+ token_merging_ratio_hr = p.get_token_merging_ratio(for_hr=True)
+
+ uses_ensd = opts.eta_noise_seed_delta != 0
+ if uses_ensd:
+ uses_ensd = sd_samplers_common.is_sampler_using_eta_noise_seed_delta(p)
+
+ generation_params = {
+ "Steps": p.steps,
+ "Sampler": p.sampler_name,
+ "CFG scale": p.cfg_scale,
+ "Image CFG scale": getattr(p, 'image_cfg_scale', None),
+ "Seed": p.all_seeds[0] if use_main_prompt else all_seeds[index],
+ "Face restoration": opts.face_restoration_model if p.restore_faces else None,
+ "Size": f"{p.width}x{p.height}",
+ "Model hash": p.sd_model_hash if opts.add_model_hash_to_info else None,
+ "Model": p.sd_model_name if opts.add_model_name_to_info else None,
+ "VAE hash": p.sd_vae_hash if opts.add_model_hash_to_info else None,
+ "VAE": p.sd_vae_name if opts.add_model_name_to_info else None,
+ "Variation seed": (None if p.subseed_strength == 0 else (p.all_subseeds[0] if use_main_prompt else all_subseeds[index])),
+ "Variation seed strength": (None if p.subseed_strength == 0 else p.subseed_strength),
+ "Seed resize from": (None if p.seed_resize_from_w <= 0 or p.seed_resize_from_h <= 0 else f"{p.seed_resize_from_w}x{p.seed_resize_from_h}"),
+ "Denoising strength": getattr(p, 'denoising_strength', None),
+ "Conditional mask weight": getattr(p, "inpainting_mask_weight", shared.opts.inpainting_mask_weight) if p.is_using_inpainting_conditioning else None,
+ "Clip skip": None if clip_skip <= 1 else clip_skip,
+ "ENSD": opts.eta_noise_seed_delta if uses_ensd else None,
+ "Token merging ratio": None if token_merging_ratio == 0 else token_merging_ratio,
+ "Token merging ratio hr": None if not enable_hr or token_merging_ratio_hr == 0 else token_merging_ratio_hr,
+ "Init image hash": getattr(p, 'init_img_hash', None),
+ "RNG": opts.randn_source if opts.randn_source != "GPU" else None,
+ "NGMS": None if p.s_min_uncond == 0 else p.s_min_uncond,
+ "Tiling": "True" if p.tiling else None,
+ **p.extra_generation_params,
+ "Version": program_version() if opts.add_version_to_infotext else None,
+ "User": p.user if opts.add_user_name_to_info else None,
+ }
+
+ generation_params_text = ", ".join([k if k == v else f'{k}: {generation_parameters_copypaste.quote(v)}' for k, v in generation_params.items() if v is not None])
+
+ prompt_text = p.main_prompt if use_main_prompt else all_prompts[index]
+ negative_prompt_text = f"\nNegative prompt: {p.main_negative_prompt if use_main_prompt else all_negative_prompts[index]}" if all_negative_prompts[index] else ""
+
+ return f"{prompt_text}{negative_prompt_text}\n{generation_params_text}".strip()
+
+
+def process_images(p: StableDiffusionProcessing) -> Processed:
+ if p.scripts is not None:
+ p.scripts.before_process(p)
+
+ stored_opts = {k: opts.data[k] for k in p.override_settings.keys()}
+
+ try:
+ # if no checkpoint override or the override checkpoint can't be found, remove override entry and load opts checkpoint
+ # and if after running refiner, the refiner model is not unloaded - webui swaps back to main model here, if model over is present it will be reloaded afterwards
+ if sd_models.checkpoint_aliases.get(p.override_settings.get('sd_model_checkpoint')) is None:
+ p.override_settings.pop('sd_model_checkpoint', None)
+ sd_models.reload_model_weights()
+
+ for k, v in p.override_settings.items():
+ opts.set(k, v, is_api=True, run_callbacks=False)
+
+ if k == 'sd_model_checkpoint':
+ sd_models.reload_model_weights()
+
+ if k == 'sd_vae':
+ sd_vae.reload_vae_weights()
+
+ sd_models.apply_token_merging(p.sd_model, p.get_token_merging_ratio())
+
+ res = process_images_inner(p)
+
+ finally:
+ sd_models.apply_token_merging(p.sd_model, 0)
+
+ # restore opts to original state
+ if p.override_settings_restore_afterwards:
+ for k, v in stored_opts.items():
+ setattr(opts, k, v)
+
+ if k == 'sd_vae':
+ sd_vae.reload_vae_weights()
+
+ return res
+
+
+def process_images_inner(p: StableDiffusionProcessing) -> Processed:
+ """this is the main loop that both txt2img and img2img use; it calls func_init once inside all the scopes and func_sample once per batch"""
+
+ if isinstance(p.prompt, list):
+ assert(len(p.prompt) > 0)
+ else:
+ assert p.prompt is not None
+
+ devices.torch_gc()
+
+ seed = get_fixed_seed(p.seed)
+ subseed = get_fixed_seed(p.subseed)
+
+ if p.restore_faces is None:
+ p.restore_faces = opts.face_restoration
+
+ if p.tiling is None:
+ p.tiling = opts.tiling
+
+ if p.refiner_checkpoint not in (None, "", "None", "none"):
+ p.refiner_checkpoint_info = sd_models.get_closet_checkpoint_match(p.refiner_checkpoint)
+ if p.refiner_checkpoint_info is None:
+ raise Exception(f'Could not find checkpoint with name {p.refiner_checkpoint}')
+
+ p.sd_model_name = shared.sd_model.sd_checkpoint_info.name_for_extra
+ p.sd_model_hash = shared.sd_model.sd_model_hash
+ p.sd_vae_name = sd_vae.get_loaded_vae_name()
+ p.sd_vae_hash = sd_vae.get_loaded_vae_hash()
+
+ modules.sd_hijack.model_hijack.apply_circular(p.tiling)
+ modules.sd_hijack.model_hijack.clear_comments()
+
+ p.setup_prompts()
+
+ if isinstance(seed, list):
+ p.all_seeds = seed
+ else:
+ p.all_seeds = [int(seed) + (x if p.subseed_strength == 0 else 0) for x in range(len(p.all_prompts))]
+
+ if isinstance(subseed, list):
+ p.all_subseeds = subseed
+ else:
+ p.all_subseeds = [int(subseed) + x for x in range(len(p.all_prompts))]
+
+ if os.path.exists(cmd_opts.embeddings_dir) and not p.do_not_reload_embeddings:
+ model_hijack.embedding_db.load_textual_inversion_embeddings()
+
+ if p.scripts is not None:
+ p.scripts.process(p)
+
+ infotexts = []
+ output_images = []
+
+ with torch.no_grad(), p.sd_model.ema_scope():
+ with devices.autocast():
+ p.init(p.all_prompts, p.all_seeds, p.all_subseeds)
+
+ # for OSX, loading the model during sampling changes the generated picture, so it is loaded here
+ if shared.opts.live_previews_enable and opts.show_progress_type == "Approx NN":
+ sd_vae_approx.model()
+
+ sd_unet.apply_unet()
+
+ if state.job_count == -1:
+ state.job_count = p.n_iter
+
+ for n in range(p.n_iter):
+ p.iteration = n
+
+ if state.skipped:
+ state.skipped = False
+
+ if state.interrupted:
+ break
+
+ sd_models.reload_model_weights() # model can be changed for example by refiner
+
+ p.prompts = p.all_prompts[n * p.batch_size:(n + 1) * p.batch_size]
+ p.negative_prompts = p.all_negative_prompts[n * p.batch_size:(n + 1) * p.batch_size]
+ p.seeds = p.all_seeds[n * p.batch_size:(n + 1) * p.batch_size]
+ p.subseeds = p.all_subseeds[n * p.batch_size:(n + 1) * p.batch_size]
+
+ p.rng = rng.ImageRNG((opt_C, p.height // opt_f, p.width // opt_f), p.seeds, subseeds=p.subseeds, subseed_strength=p.subseed_strength, seed_resize_from_h=p.seed_resize_from_h, seed_resize_from_w=p.seed_resize_from_w)
+
+ if p.scripts is not None:
+ p.scripts.before_process_batch(p, batch_number=n, prompts=p.prompts, seeds=p.seeds, subseeds=p.subseeds)
+
+ if len(p.prompts) == 0:
+ break
+
+ p.parse_extra_network_prompts()
+
+ if not p.disable_extra_networks:
+ with devices.autocast():
+ extra_networks.activate(p, p.extra_network_data)
+
+ if p.scripts is not None:
+ p.scripts.process_batch(p, batch_number=n, prompts=p.prompts, seeds=p.seeds, subseeds=p.subseeds)
+
+ # params.txt should be saved after scripts.process_batch, since the
+ # infotext could be modified by that callback
+ # Example: a wildcard processed by process_batch sets an extra model
+ # strength, which is saved as "Model Strength: 1.0" in the infotext
+ if n == 0:
+ with open(os.path.join(paths.data_path, "params.txt"), "w", encoding="utf8") as file:
+ processed = Processed(p, [])
+ file.write(processed.infotext(p, 0))
+
+ p.setup_conds()
+
+ for comment in model_hijack.comments:
+ p.comment(comment)
+
+ p.extra_generation_params.update(model_hijack.extra_generation_params)
+
+ if p.n_iter > 1:
+ shared.state.job = f"Batch {n+1} out of {p.n_iter}"
+
+ with devices.without_autocast() if devices.unet_needs_upcast else devices.autocast():
+ samples_ddim = p.sample(conditioning=p.c, unconditional_conditioning=p.uc, seeds=p.seeds, subseeds=p.subseeds, subseed_strength=p.subseed_strength, prompts=p.prompts)
+
+ if getattr(samples_ddim, 'already_decoded', False):
+ x_samples_ddim = samples_ddim
+ else:
+ if opts.sd_vae_decode_method != 'Full':
+ p.extra_generation_params['VAE Decoder'] = opts.sd_vae_decode_method
+
+ x_samples_ddim = decode_latent_batch(p.sd_model, samples_ddim, target_device=devices.cpu, check_for_nans=True)
+
+ x_samples_ddim = torch.stack(x_samples_ddim).float()
+ x_samples_ddim = torch.clamp((x_samples_ddim + 1.0) / 2.0, min=0.0, max=1.0)
+
+ del samples_ddim
+
+ if lowvram.is_enabled(shared.sd_model):
+ lowvram.send_everything_to_cpu()
+
+ devices.torch_gc()
+
+ if p.scripts is not None:
+ p.scripts.postprocess_batch(p, x_samples_ddim, batch_number=n)
+
+ p.prompts = p.all_prompts[n * p.batch_size:(n + 1) * p.batch_size]
+ p.negative_prompts = p.all_negative_prompts[n * p.batch_size:(n + 1) * p.batch_size]
+
+ batch_params = scripts.PostprocessBatchListArgs(list(x_samples_ddim))
+ p.scripts.postprocess_batch_list(p, batch_params, batch_number=n)
+ x_samples_ddim = batch_params.images
+
+ def infotext(index=0, use_main_prompt=False):
+ return create_infotext(p, p.prompts, p.seeds, p.subseeds, use_main_prompt=use_main_prompt, index=index, all_negative_prompts=p.negative_prompts)
+
+ save_samples = p.save_samples()
+
+ for i, x_sample in enumerate(x_samples_ddim):
+ p.batch_index = i
+
+ x_sample = 255. * np.moveaxis(x_sample.cpu().numpy(), 0, 2)
+ x_sample = x_sample.astype(np.uint8)
+
+ if p.restore_faces:
+ if save_samples and opts.save_images_before_face_restoration:
+ images.save_image(Image.fromarray(x_sample), p.outpath_samples, "", p.seeds[i], p.prompts[i], opts.samples_format, info=infotext(i), p=p, suffix="-before-face-restoration")
+
+ devices.torch_gc()
+
+ x_sample = modules.face_restoration.restore_faces(x_sample)
+ devices.torch_gc()
+
+ image = Image.fromarray(x_sample)
+
+ if p.scripts is not None:
+ pp = scripts.PostprocessImageArgs(image)
+ p.scripts.postprocess_image(p, pp)
+ image = pp.image
+ if p.color_corrections is not None and i < len(p.color_corrections):
+ if save_samples and opts.save_images_before_color_correction:
+ image_without_cc = apply_overlay(image, p.paste_to, i, p.overlay_images)
+ images.save_image(image_without_cc, p.outpath_samples, "", p.seeds[i], p.prompts[i], opts.samples_format, info=infotext(i), p=p, suffix="-before-color-correction")
+ image = apply_color_correction(p.color_corrections[i], image)
+
+ image = apply_overlay(image, p.paste_to, i, p.overlay_images)
+
+ if save_samples:
+ images.save_image(image, p.outpath_samples, "", p.seeds[i], p.prompts[i], opts.samples_format, info=infotext(i), p=p)
+
+ text = infotext(i)
+ infotexts.append(text)
+ if opts.enable_pnginfo:
+ image.info["parameters"] = text
+ output_images.append(image)
+ if save_samples and hasattr(p, 'mask_for_overlay') and p.mask_for_overlay and any([opts.save_mask, opts.save_mask_composite, opts.return_mask, opts.return_mask_composite]):
+ image_mask = p.mask_for_overlay.convert('RGB')
+ image_mask_composite = Image.composite(image.convert('RGBA').convert('RGBa'), Image.new('RGBa', image.size), images.resize_image(2, p.mask_for_overlay, image.width, image.height).convert('L')).convert('RGBA')
+
+ if opts.save_mask:
+ images.save_image(image_mask, p.outpath_samples, "", p.seeds[i], p.prompts[i], opts.samples_format, info=infotext(i), p=p, suffix="-mask")
+
+ if opts.save_mask_composite:
+ images.save_image(image_mask_composite, p.outpath_samples, "", p.seeds[i], p.prompts[i], opts.samples_format, info=infotext(i), p=p, suffix="-mask-composite")
+
+ if opts.return_mask:
+ output_images.append(image_mask)
+
+ if opts.return_mask_composite:
+ output_images.append(image_mask_composite)
+
+ del x_samples_ddim
+
+ devices.torch_gc()
+
+ state.nextjob()
+
+ p.color_corrections = None
+
+ index_of_first_image = 0
+ unwanted_grid_because_of_img_count = len(output_images) < 2 and opts.grid_only_if_multiple
+ if (opts.return_grid or opts.grid_save) and not p.do_not_save_grid and not unwanted_grid_because_of_img_count:
+ grid = images.image_grid(output_images, p.batch_size)
+
+ if opts.return_grid:
+ text = infotext(use_main_prompt=True)
+ infotexts.insert(0, text)
+ if opts.enable_pnginfo:
+ grid.info["parameters"] = text
+ output_images.insert(0, grid)
+ index_of_first_image = 1
+ if opts.grid_save:
+ images.save_image(grid, p.outpath_grids, "grid", p.all_seeds[0], p.all_prompts[0], opts.grid_format, info=infotext(use_main_prompt=True), short_filename=not opts.grid_extended_filename, p=p, grid=True)
+
+ if not p.disable_extra_networks and p.extra_network_data:
+ extra_networks.deactivate(p, p.extra_network_data)
+
+ devices.torch_gc()
+
+ res = Processed(
+ p,
+ images_list=output_images,
+ seed=p.all_seeds[0],
+ info=infotexts[0],
+ subseed=p.all_subseeds[0],
+ index_of_first_image=index_of_first_image,
+ infotexts=infotexts,
+ )
+
+ if p.scripts is not None:
+ p.scripts.postprocess(p, res)
+
+ return res
+
+
+def old_hires_fix_first_pass_dimensions(width, height):
+ """old algorithm for auto-calculating first pass size"""
+
+ desired_pixel_count = 512 * 512
+ actual_pixel_count = width * height
+ scale = math.sqrt(desired_pixel_count / actual_pixel_count)
+ width = math.ceil(scale * width / 64) * 64
+ height = math.ceil(scale * height / 64) * 64
+
+ return width, height
+
+
+@dataclass(repr=False)
+class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing):
+ enable_hr: bool = False
+ denoising_strength: float = 0.75
+ firstphase_width: int = 0
+ firstphase_height: int = 0
+ hr_scale: float = 2.0
+ hr_upscaler: str = None
+ hr_second_pass_steps: int = 0
+ hr_resize_x: int = 0
+ hr_resize_y: int = 0
+ hr_checkpoint_name: str = None
+ hr_sampler_name: str = None
+ hr_prompt: str = ''
+ hr_negative_prompt: str = ''
+
+ cached_hr_uc = [None, None]
+ cached_hr_c = [None, None]
+
+ hr_checkpoint_info: dict = field(default=None, init=False)
+ hr_upscale_to_x: int = field(default=0, init=False)
+ hr_upscale_to_y: int = field(default=0, init=False)
+ truncate_x: int = field(default=0, init=False)
+ truncate_y: int = field(default=0, init=False)
+ applied_old_hires_behavior_to: tuple = field(default=None, init=False)
+ latent_scale_mode: dict = field(default=None, init=False)
+ hr_c: tuple | None = field(default=None, init=False)
+ hr_uc: tuple | None = field(default=None, init=False)
+ all_hr_prompts: list = field(default=None, init=False)
+ all_hr_negative_prompts: list = field(default=None, init=False)
+ hr_prompts: list = field(default=None, init=False)
+ hr_negative_prompts: list = field(default=None, init=False)
+ hr_extra_network_data: list = field(default=None, init=False)
+
+ def __post_init__(self):
+ super().__post_init__()
+
+ if self.firstphase_width != 0 or self.firstphase_height != 0:
+ self.hr_upscale_to_x = self.width
+ self.hr_upscale_to_y = self.height
+ self.width = self.firstphase_width
+ self.height = self.firstphase_height
+
+ self.cached_hr_uc = StableDiffusionProcessingTxt2Img.cached_hr_uc
+ self.cached_hr_c = StableDiffusionProcessingTxt2Img.cached_hr_c
+
+ def calculate_target_resolution(self):
+ if opts.use_old_hires_fix_width_height and self.applied_old_hires_behavior_to != (self.width, self.height):
+ self.hr_resize_x = self.width
+ self.hr_resize_y = self.height
+ self.hr_upscale_to_x = self.width
+ self.hr_upscale_to_y = self.height
+
+ self.width, self.height = old_hires_fix_first_pass_dimensions(self.width, self.height)
+ self.applied_old_hires_behavior_to = (self.width, self.height)
+
+ if self.hr_resize_x == 0 and self.hr_resize_y == 0:
+ self.extra_generation_params["Hires upscale"] = self.hr_scale
+ self.hr_upscale_to_x = int(self.width * self.hr_scale)
+ self.hr_upscale_to_y = int(self.height * self.hr_scale)
+ else:
+ self.extra_generation_params["Hires resize"] = f"{self.hr_resize_x}x{self.hr_resize_y}"
+
+ if self.hr_resize_y == 0:
+ self.hr_upscale_to_x = self.hr_resize_x
+ self.hr_upscale_to_y = self.hr_resize_x * self.height // self.width
+ elif self.hr_resize_x == 0:
+ self.hr_upscale_to_x = self.hr_resize_y * self.width // self.height
+ self.hr_upscale_to_y = self.hr_resize_y
+ else:
+ target_w = self.hr_resize_x
+ target_h = self.hr_resize_y
+ src_ratio = self.width / self.height
+ dst_ratio = self.hr_resize_x / self.hr_resize_y
+
+ if src_ratio < dst_ratio:
+ self.hr_upscale_to_x = self.hr_resize_x
+ self.hr_upscale_to_y = self.hr_resize_x * self.height // self.width
+ else:
+ self.hr_upscale_to_x = self.hr_resize_y * self.width // self.height
+ self.hr_upscale_to_y = self.hr_resize_y
+
+ self.truncate_x = (self.hr_upscale_to_x - target_w) // opt_f
+ self.truncate_y = (self.hr_upscale_to_y - target_h) // opt_f
+
+ def init(self, all_prompts, all_seeds, all_subseeds):
+ if self.enable_hr:
+ if self.hr_checkpoint_name:
+ self.hr_checkpoint_info = sd_models.get_closet_checkpoint_match(self.hr_checkpoint_name)
+
+ if self.hr_checkpoint_info is None:
+ raise Exception(f'Could not find checkpoint with name {self.hr_checkpoint_name}')
+
+ self.extra_generation_params["Hires checkpoint"] = self.hr_checkpoint_info.short_title
+
+ if self.hr_sampler_name is not None and self.hr_sampler_name != self.sampler_name:
+ self.extra_generation_params["Hires sampler"] = self.hr_sampler_name
+
+ if tuple(self.hr_prompt) != tuple(self.prompt):
+ self.extra_generation_params["Hires prompt"] = self.hr_prompt
+
+ if tuple(self.hr_negative_prompt) != tuple(self.negative_prompt):
+ self.extra_generation_params["Hires negative prompt"] = self.hr_negative_prompt
+
+ self.latent_scale_mode = shared.latent_upscale_modes.get(self.hr_upscaler, None) if self.hr_upscaler is not None else shared.latent_upscale_modes.get(shared.latent_upscale_default_mode, "nearest")
+ if self.enable_hr and self.latent_scale_mode is None:
+ if not any(x.name == self.hr_upscaler for x in shared.sd_upscalers):
+ raise Exception(f"could not find upscaler named {self.hr_upscaler}")
+
+ self.calculate_target_resolution()
+
+ if not state.processing_has_refined_job_count:
+ if state.job_count == -1:
+ state.job_count = self.n_iter
+
+ shared.total_tqdm.updateTotal((self.steps + (self.hr_second_pass_steps or self.steps)) * state.job_count)
+ state.job_count = state.job_count * 2
+ state.processing_has_refined_job_count = True
+
+ if self.hr_second_pass_steps:
+ self.extra_generation_params["Hires steps"] = self.hr_second_pass_steps
+
+ if self.hr_upscaler is not None:
+ self.extra_generation_params["Hires upscaler"] = self.hr_upscaler
+
+ def sample(self, conditioning, unconditional_conditioning, seeds, subseeds, subseed_strength, prompts):
+ self.sampler = sd_samplers.create_sampler(self.sampler_name, self.sd_model)
+
+ x = self.rng.next()
+ samples = self.sampler.sample(self, x, conditioning, unconditional_conditioning, image_conditioning=self.txt2img_image_conditioning(x))
+ del x
+
+ if not self.enable_hr:
+ return samples
+
+ if self.latent_scale_mode is None:
+ decoded_samples = torch.stack(decode_latent_batch(self.sd_model, samples, target_device=devices.cpu, check_for_nans=True)).to(dtype=torch.float32)
+ else:
+ decoded_samples = None
+
+ with sd_models.SkipWritingToConfig():
+ sd_models.reload_model_weights(info=self.hr_checkpoint_info)
+
+ devices.torch_gc()
+
+ return self.sample_hr_pass(samples, decoded_samples, seeds, subseeds, subseed_strength, prompts)
+
+ def sample_hr_pass(self, samples, decoded_samples, seeds, subseeds, subseed_strength, prompts):
+ if shared.state.interrupted:
+ return samples
+
+ self.is_hr_pass = True
+
+ target_width = self.hr_upscale_to_x
+ target_height = self.hr_upscale_to_y
+
+ def save_intermediate(image, index):
+ """saves image before applying hires fix, if enabled in options; takes as an argument either an image or batch with latent space images"""
+
+ if not self.save_samples() or not opts.save_images_before_highres_fix:
+ return
+
+ if not isinstance(image, Image.Image):
+ image = sd_samplers.sample_to_image(image, index, approximation=0)
+
+ info = create_infotext(self, self.all_prompts, self.all_seeds, self.all_subseeds, [], iteration=self.iteration, position_in_batch=index)
+ images.save_image(image, self.outpath_samples, "", seeds[index], prompts[index], opts.samples_format, info=info, p=self, suffix="-before-highres-fix")
+
+ img2img_sampler_name = self.hr_sampler_name or self.sampler_name
+
+ self.sampler = sd_samplers.create_sampler(img2img_sampler_name, self.sd_model)
+
+ if self.latent_scale_mode is not None:
+ for i in range(samples.shape[0]):
+ save_intermediate(samples, i)
+
+ samples = torch.nn.functional.interpolate(samples, size=(target_height // opt_f, target_width // opt_f), mode=self.latent_scale_mode["mode"], antialias=self.latent_scale_mode["antialias"])
+
+ # Avoid making the inpainting conditioning unless necessary as
+ # this does need some extra compute to decode / encode the image again.
+ if getattr(self, "inpainting_mask_weight", shared.opts.inpainting_mask_weight) < 1.0:
+ image_conditioning = self.img2img_image_conditioning(decode_first_stage(self.sd_model, samples), samples)
+ else:
+ image_conditioning = self.txt2img_image_conditioning(samples)
+ else:
+ lowres_samples = torch.clamp((decoded_samples + 1.0) / 2.0, min=0.0, max=1.0)
+
+ batch_images = []
+ for i, x_sample in enumerate(lowres_samples):
+ x_sample = 255. * np.moveaxis(x_sample.cpu().numpy(), 0, 2)
+ x_sample = x_sample.astype(np.uint8)
+ image = Image.fromarray(x_sample)
+
+ save_intermediate(image, i)
+
+ image = images.resize_image(0, image, target_width, target_height, upscaler_name=self.hr_upscaler)
+ image = np.array(image).astype(np.float32) / 255.0
+ image = np.moveaxis(image, 2, 0)
+ batch_images.append(image)
+
+ decoded_samples = torch.from_numpy(np.array(batch_images))
+ decoded_samples = decoded_samples.to(shared.device, dtype=devices.dtype_vae)
+
+ if opts.sd_vae_encode_method != 'Full':
+ self.extra_generation_params['VAE Encoder'] = opts.sd_vae_encode_method
+ samples = images_tensor_to_samples(decoded_samples, approximation_indexes.get(opts.sd_vae_encode_method))
+
+ image_conditioning = self.img2img_image_conditioning(decoded_samples, samples)
+
+ shared.state.nextjob()
+
+ samples = samples[:, :, self.truncate_y//2:samples.shape[2]-(self.truncate_y+1)//2, self.truncate_x//2:samples.shape[3]-(self.truncate_x+1)//2]
+
+ self.rng = rng.ImageRNG(samples.shape[1:], self.seeds, subseeds=self.subseeds, subseed_strength=self.subseed_strength, seed_resize_from_h=self.seed_resize_from_h, seed_resize_from_w=self.seed_resize_from_w)
+ noise = self.rng.next()
+
+ # GC now before running the next img2img to prevent running out of memory
+ devices.torch_gc()
+
+ if not self.disable_extra_networks:
+ with devices.autocast():
+ extra_networks.activate(self, self.hr_extra_network_data)
+
+ with devices.autocast():
+ self.calculate_hr_conds()
+
+ sd_models.apply_token_merging(self.sd_model, self.get_token_merging_ratio(for_hr=True))
+
+ if self.scripts is not None:
+ self.scripts.before_hr(self)
+
+ samples = self.sampler.sample_img2img(self, samples, noise, self.hr_c, self.hr_uc, steps=self.hr_second_pass_steps or self.steps, image_conditioning=image_conditioning)
+
+ sd_models.apply_token_merging(self.sd_model, self.get_token_merging_ratio())
+
+ self.sampler = None
+ devices.torch_gc()
+
+ decoded_samples = decode_latent_batch(self.sd_model, samples, target_device=devices.cpu, check_for_nans=True)
+
+ self.is_hr_pass = False
+
+ return decoded_samples
+
+ def close(self):
+ super().close()
+ self.hr_c = None
+ self.hr_uc = None
+ if not opts.persistent_cond_cache:
+ StableDiffusionProcessingTxt2Img.cached_hr_uc = [None, None]
+ StableDiffusionProcessingTxt2Img.cached_hr_c = [None, None]
+
+ def setup_prompts(self):
+ super().setup_prompts()
+
+ if not self.enable_hr:
+ return
+
+ if self.hr_prompt == '':
+ self.hr_prompt = self.prompt
+
+ if self.hr_negative_prompt == '':
+ self.hr_negative_prompt = self.negative_prompt
+
+ if isinstance(self.hr_prompt, list):
+ self.all_hr_prompts = self.hr_prompt
+ else:
+ self.all_hr_prompts = self.batch_size * self.n_iter * [self.hr_prompt]
+
+ if isinstance(self.hr_negative_prompt, list):
+ self.all_hr_negative_prompts = self.hr_negative_prompt
+ else:
+ self.all_hr_negative_prompts = self.batch_size * self.n_iter * [self.hr_negative_prompt]
+
+ self.all_hr_prompts = [shared.prompt_styles.apply_styles_to_prompt(x, self.styles) for x in self.all_hr_prompts]
+ self.all_hr_negative_prompts = [shared.prompt_styles.apply_negative_styles_to_prompt(x, self.styles) for x in self.all_hr_negative_prompts]
+
+ def calculate_hr_conds(self):
+ if self.hr_c is not None:
+ return
+
+ hr_prompts = prompt_parser.SdConditioning(self.hr_prompts, width=self.hr_upscale_to_x, height=self.hr_upscale_to_y)
+ hr_negative_prompts = prompt_parser.SdConditioning(self.hr_negative_prompts, width=self.hr_upscale_to_x, height=self.hr_upscale_to_y, is_negative_prompt=True)
+
+ sampler_config = sd_samplers.find_sampler_config(self.hr_sampler_name or self.sampler_name)
+ steps = self.hr_second_pass_steps or self.steps
+ total_steps = sampler_config.total_steps(steps) if sampler_config else steps
+
+ self.hr_uc = self.get_conds_with_caching(prompt_parser.get_learned_conditioning, hr_negative_prompts, self.firstpass_steps, [self.cached_hr_uc, self.cached_uc], self.hr_extra_network_data, total_steps)
+ self.hr_c = self.get_conds_with_caching(prompt_parser.get_multicond_learned_conditioning, hr_prompts, self.firstpass_steps, [self.cached_hr_c, self.cached_c], self.hr_extra_network_data, total_steps)
+
+ def setup_conds(self):
+ if self.is_hr_pass:
+ # if we are in hr pass right now, the call is being made from the refiner, and we don't need to setup firstpass cons or switch model
+ self.hr_c = None
+ self.calculate_hr_conds()
+ return
+
+ super().setup_conds()
+
+ self.hr_uc = None
+ self.hr_c = None
+
+ if self.enable_hr and self.hr_checkpoint_info is None:
+ if shared.opts.hires_fix_use_firstpass_conds:
+ self.calculate_hr_conds()
+
+ elif lowvram.is_enabled(shared.sd_model) and shared.sd_model.sd_checkpoint_info == sd_models.select_checkpoint(): # if in lowvram mode, we need to calculate conds right away, before the cond NN is unloaded
+ with devices.autocast():
+ extra_networks.activate(self, self.hr_extra_network_data)
+
+ self.calculate_hr_conds()
+
+ with devices.autocast():
+ extra_networks.activate(self, self.extra_network_data)
+
+ def get_conds(self):
+ if self.is_hr_pass:
+ return self.hr_c, self.hr_uc
+
+ return super().get_conds()
+
+ def parse_extra_network_prompts(self):
+ res = super().parse_extra_network_prompts()
+
+ if self.enable_hr:
+ self.hr_prompts = self.all_hr_prompts[self.iteration * self.batch_size:(self.iteration + 1) * self.batch_size]
+ self.hr_negative_prompts = self.all_hr_negative_prompts[self.iteration * self.batch_size:(self.iteration + 1) * self.batch_size]
+
+ self.hr_prompts, self.hr_extra_network_data = extra_networks.parse_prompts(self.hr_prompts)
+
+ return res
+
+
+@dataclass(repr=False)
+class StableDiffusionProcessingImg2Img(StableDiffusionProcessing):
+ init_images: list = None
+ resize_mode: int = 0
+ denoising_strength: float = 0.75
+ image_cfg_scale: float = None
+ mask: Any = None
+ mask_blur_x: int = 4
+ mask_blur_y: int = 4
+ mask_blur: int = None
+ inpainting_fill: int = 0
+ inpaint_full_res: bool = True
+ inpaint_full_res_padding: int = 0
+ inpainting_mask_invert: int = 0
+ initial_noise_multiplier: float = None
+ latent_mask: Image = None
+
+ image_mask: Any = field(default=None, init=False)
+
+ nmask: torch.Tensor = field(default=None, init=False)
+ image_conditioning: torch.Tensor = field(default=None, init=False)
+ init_img_hash: str = field(default=None, init=False)
+ mask_for_overlay: Image = field(default=None, init=False)
+ init_latent: torch.Tensor = field(default=None, init=False)
+
+ def __post_init__(self):
+ super().__post_init__()
+
+ self.image_mask = self.mask
+ self.mask = None
+ self.initial_noise_multiplier = opts.initial_noise_multiplier if self.initial_noise_multiplier is None else self.initial_noise_multiplier
+
+ @property
+ def mask_blur(self):
+ if self.mask_blur_x == self.mask_blur_y:
+ return self.mask_blur_x
+ return None
+
+ @mask_blur.setter
+ def mask_blur(self, value):
+ if isinstance(value, int):
+ self.mask_blur_x = value
+ self.mask_blur_y = value
+
+ def init(self, all_prompts, all_seeds, all_subseeds):
+ self.image_cfg_scale: float = self.image_cfg_scale if shared.sd_model.cond_stage_key == "edit" else None
+
+ self.sampler = sd_samplers.create_sampler(self.sampler_name, self.sd_model)
+ crop_region = None
+
+ image_mask = self.image_mask
+
+ if image_mask is not None:
+ # image_mask is passed in as RGBA by Gradio to support alpha masks,
+ # but we still want to support binary masks.
+ image_mask = create_binary_mask(image_mask)
+
+ if self.inpainting_mask_invert:
+ image_mask = ImageOps.invert(image_mask)
+
+ if self.mask_blur_x > 0:
+ np_mask = np.array(image_mask)
+ kernel_size = 2 * int(2.5 * self.mask_blur_x + 0.5) + 1
+ np_mask = cv2.GaussianBlur(np_mask, (kernel_size, 1), self.mask_blur_x)
+ image_mask = Image.fromarray(np_mask)
+
+ if self.mask_blur_y > 0:
+ np_mask = np.array(image_mask)
+ kernel_size = 2 * int(2.5 * self.mask_blur_y + 0.5) + 1
+ np_mask = cv2.GaussianBlur(np_mask, (1, kernel_size), self.mask_blur_y)
+ image_mask = Image.fromarray(np_mask)
+
+ if self.inpaint_full_res:
+ self.mask_for_overlay = image_mask
+ mask = image_mask.convert('L')
+ crop_region = masking.get_crop_region(np.array(mask), self.inpaint_full_res_padding)
+ crop_region = masking.expand_crop_region(crop_region, self.width, self.height, mask.width, mask.height)
+ x1, y1, x2, y2 = crop_region
+
+ mask = mask.crop(crop_region)
+ image_mask = images.resize_image(2, mask, self.width, self.height)
+ self.paste_to = (x1, y1, x2-x1, y2-y1)
+ else:
+ image_mask = images.resize_image(self.resize_mode, image_mask, self.width, self.height)
+ np_mask = np.array(image_mask)
+ np_mask = np.clip((np_mask.astype(np.float32)) * 2, 0, 255).astype(np.uint8)
+ self.mask_for_overlay = Image.fromarray(np_mask)
+
+ self.overlay_images = []
+
+ latent_mask = self.latent_mask if self.latent_mask is not None else image_mask
+
+ add_color_corrections = opts.img2img_color_correction and self.color_corrections is None
+ if add_color_corrections:
+ self.color_corrections = []
+ imgs = []
+ for img in self.init_images:
+
+ # Save init image
+ if opts.save_init_img:
+ self.init_img_hash = hashlib.md5(img.tobytes()).hexdigest()
+ images.save_image(img, path=opts.outdir_init_images, basename=None, forced_filename=self.init_img_hash, save_to_dirs=False)
+
+ image = images.flatten(img, opts.img2img_background_color)
+
+ if crop_region is None and self.resize_mode != 3:
+ image = images.resize_image(self.resize_mode, image, self.width, self.height)
+
+ if image_mask is not None:
+ image_masked = Image.new('RGBa', (image.width, image.height))
+ image_masked.paste(image.convert("RGBA").convert("RGBa"), mask=ImageOps.invert(self.mask_for_overlay.convert('L')))
+
+ self.overlay_images.append(image_masked.convert('RGBA'))
+
+ # crop_region is not None if we are doing inpaint full res
+ if crop_region is not None:
+ image = image.crop(crop_region)
+ image = images.resize_image(2, image, self.width, self.height)
+
+ if image_mask is not None:
+ if self.inpainting_fill != 1:
+ image = masking.fill(image, latent_mask)
+
+ if add_color_corrections:
+ self.color_corrections.append(setup_color_correction(image))
+
+ image = np.array(image).astype(np.float32) / 255.0
+ image = np.moveaxis(image, 2, 0)
+
+ imgs.append(image)
+
+ if len(imgs) == 1:
+ batch_images = np.expand_dims(imgs[0], axis=0).repeat(self.batch_size, axis=0)
+ if self.overlay_images is not None:
+ self.overlay_images = self.overlay_images * self.batch_size
+
+ if self.color_corrections is not None and len(self.color_corrections) == 1:
+ self.color_corrections = self.color_corrections * self.batch_size
+
+ elif len(imgs) <= self.batch_size:
+ self.batch_size = len(imgs)
+ batch_images = np.array(imgs)
+ else:
+ raise RuntimeError(f"bad number of images passed: {len(imgs)}; expecting {self.batch_size} or less")
+
+ image = torch.from_numpy(batch_images)
+ image = image.to(shared.device, dtype=devices.dtype_vae)
+
+ if opts.sd_vae_encode_method != 'Full':
+ self.extra_generation_params['VAE Encoder'] = opts.sd_vae_encode_method
+
+ self.init_latent = images_tensor_to_samples(image, approximation_indexes.get(opts.sd_vae_encode_method), self.sd_model)
+ devices.torch_gc()
+
+ if self.resize_mode == 3:
+ self.init_latent = torch.nn.functional.interpolate(self.init_latent, size=(self.height // opt_f, self.width // opt_f), mode="bilinear")
+
+ if image_mask is not None:
+ init_mask = latent_mask
+ latmask = init_mask.convert('RGB').resize((self.init_latent.shape[3], self.init_latent.shape[2]))
+ latmask = np.moveaxis(np.array(latmask, dtype=np.float32), 2, 0) / 255
+ latmask = latmask[0]
+ latmask = np.around(latmask)
+ latmask = np.tile(latmask[None], (4, 1, 1))
+
+ self.mask = torch.asarray(1.0 - latmask).to(shared.device).type(self.sd_model.dtype)
+ self.nmask = torch.asarray(latmask).to(shared.device).type(self.sd_model.dtype)
+
+ # this needs to be fixed to be done in sample() using actual seeds for batches
+ if self.inpainting_fill == 2:
+ self.init_latent = self.init_latent * self.mask + create_random_tensors(self.init_latent.shape[1:], all_seeds[0:self.init_latent.shape[0]]) * self.nmask
+ elif self.inpainting_fill == 3:
+ self.init_latent = self.init_latent * self.mask
+
+ self.image_conditioning = self.img2img_image_conditioning(image * 2 - 1, self.init_latent, image_mask)
+
+ def sample(self, conditioning, unconditional_conditioning, seeds, subseeds, subseed_strength, prompts):
+ x = self.rng.next()
+
+ if self.initial_noise_multiplier != 1.0:
+ self.extra_generation_params["Noise multiplier"] = self.initial_noise_multiplier
+ x *= self.initial_noise_multiplier
+
+ samples = self.sampler.sample_img2img(self, self.init_latent, x, conditioning, unconditional_conditioning, image_conditioning=self.image_conditioning)
+
+ if self.mask is not None:
+ samples = samples * self.nmask + self.init_latent * self.mask
+
+ del x
+ devices.torch_gc()
+
+ return samples
+
+ def get_token_merging_ratio(self, for_hr=False):
+ return self.token_merging_ratio or ("token_merging_ratio" in self.override_settings and opts.token_merging_ratio) or opts.token_merging_ratio_img2img or opts.token_merging_ratio
diff --git a/modules/processing_scripts/__pycache__/refiner.cpython-39.pyc b/modules/processing_scripts/__pycache__/refiner.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c8b9f71ec32ea766390eb18db4f2cf6daeb3bcbb
Binary files /dev/null and b/modules/processing_scripts/__pycache__/refiner.cpython-39.pyc differ
diff --git a/modules/processing_scripts/__pycache__/seed.cpython-39.pyc b/modules/processing_scripts/__pycache__/seed.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ac5fa687302cdf4c635f30e534e1e60b61a44944
Binary files /dev/null and b/modules/processing_scripts/__pycache__/seed.cpython-39.pyc differ
diff --git a/modules/processing_scripts/refiner.py b/modules/processing_scripts/refiner.py
new file mode 100644
index 0000000000000000000000000000000000000000..00e035053ad18c18092499a8a1a63faeab397e8e
--- /dev/null
+++ b/modules/processing_scripts/refiner.py
@@ -0,0 +1,49 @@
+import gradio as gr
+
+from modules import scripts, sd_models
+from modules.ui_common import create_refresh_button
+from modules.ui_components import InputAccordion
+
+
+class ScriptRefiner(scripts.ScriptBuiltinUI):
+ section = "accordions"
+ create_group = False
+
+ def __init__(self):
+ pass
+
+ def title(self):
+ return "Refiner"
+
+ def show(self, is_img2img):
+ return scripts.AlwaysVisible
+
+ def ui(self, is_img2img):
+ with InputAccordion(False, label="Refiner", elem_id=self.elem_id("enable")) as enable_refiner:
+ with gr.Row():
+ refiner_checkpoint = gr.Dropdown(label='Checkpoint', elem_id=self.elem_id("checkpoint"), choices=sd_models.checkpoint_tiles(), value='', tooltip="switch to another model in the middle of generation")
+ create_refresh_button(refiner_checkpoint, sd_models.list_models, lambda: {"choices": sd_models.checkpoint_tiles()}, self.elem_id("checkpoint_refresh"))
+
+ refiner_switch_at = gr.Slider(value=0.8, label="Switch at", minimum=0.01, maximum=1.0, step=0.01, elem_id=self.elem_id("switch_at"), tooltip="fraction of sampling steps when the switch to refiner model should happen; 1=never, 0.5=switch in the middle of generation")
+
+ def lookup_checkpoint(title):
+ info = sd_models.get_closet_checkpoint_match(title)
+ return None if info is None else info.title
+
+ self.infotext_fields = [
+ (enable_refiner, lambda d: 'Refiner' in d),
+ (refiner_checkpoint, lambda d: lookup_checkpoint(d.get('Refiner'))),
+ (refiner_switch_at, 'Refiner switch at'),
+ ]
+
+ return enable_refiner, refiner_checkpoint, refiner_switch_at
+
+ def setup(self, p, enable_refiner, refiner_checkpoint, refiner_switch_at):
+ # the actual implementation is in sd_samplers_common.py, apply_refiner
+
+ if not enable_refiner or refiner_checkpoint in (None, "", "None"):
+ p.refiner_checkpoint = None
+ p.refiner_switch_at = None
+ else:
+ p.refiner_checkpoint = refiner_checkpoint
+ p.refiner_switch_at = refiner_switch_at
diff --git a/modules/processing_scripts/seed.py b/modules/processing_scripts/seed.py
new file mode 100644
index 0000000000000000000000000000000000000000..265087fd85e48764e3fad5493afbacddabf7b2fb
--- /dev/null
+++ b/modules/processing_scripts/seed.py
@@ -0,0 +1,111 @@
+import json
+
+import gradio as gr
+
+from modules import scripts, ui, errors
+from modules.shared import cmd_opts
+from modules.ui_components import ToolButton
+
+
+class ScriptSeed(scripts.ScriptBuiltinUI):
+ section = "seed"
+ create_group = False
+
+ def __init__(self):
+ self.seed = None
+ self.reuse_seed = None
+ self.reuse_subseed = None
+
+ def title(self):
+ return "Seed"
+
+ def show(self, is_img2img):
+ return scripts.AlwaysVisible
+
+ def ui(self, is_img2img):
+ with gr.Row(elem_id=self.elem_id("seed_row")):
+ if cmd_opts.use_textbox_seed:
+ self.seed = gr.Textbox(label='Seed', value="", elem_id=self.elem_id("seed"), min_width=100)
+ else:
+ self.seed = gr.Number(label='Seed', value=-1, elem_id=self.elem_id("seed"), min_width=100, precision=0)
+
+ random_seed = ToolButton(ui.random_symbol, elem_id=self.elem_id("random_seed"), label='Random seed')
+ reuse_seed = ToolButton(ui.reuse_symbol, elem_id=self.elem_id("reuse_seed"), label='Reuse seed')
+
+ seed_checkbox = gr.Checkbox(label='Extra', elem_id=self.elem_id("subseed_show"), value=False)
+
+ with gr.Group(visible=False, elem_id=self.elem_id("seed_extras")) as seed_extras:
+ with gr.Row(elem_id=self.elem_id("subseed_row")):
+ subseed = gr.Number(label='Variation seed', value=-1, elem_id=self.elem_id("subseed"), precision=0)
+ random_subseed = ToolButton(ui.random_symbol, elem_id=self.elem_id("random_subseed"))
+ reuse_subseed = ToolButton(ui.reuse_symbol, elem_id=self.elem_id("reuse_subseed"))
+ subseed_strength = gr.Slider(label='Variation strength', value=0.0, minimum=0, maximum=1, step=0.01, elem_id=self.elem_id("subseed_strength"))
+
+ with gr.Row(elem_id=self.elem_id("seed_resize_from_row")):
+ seed_resize_from_w = gr.Slider(minimum=0, maximum=2048, step=8, label="Resize seed from width", value=0, elem_id=self.elem_id("seed_resize_from_w"))
+ seed_resize_from_h = gr.Slider(minimum=0, maximum=2048, step=8, label="Resize seed from height", value=0, elem_id=self.elem_id("seed_resize_from_h"))
+
+ random_seed.click(fn=None, _js="function(){setRandomSeed('" + self.elem_id("seed") + "')}", show_progress=False, inputs=[], outputs=[])
+ random_subseed.click(fn=None, _js="function(){setRandomSeed('" + self.elem_id("subseed") + "')}", show_progress=False, inputs=[], outputs=[])
+
+ seed_checkbox.change(lambda x: gr.update(visible=x), show_progress=False, inputs=[seed_checkbox], outputs=[seed_extras])
+
+ self.infotext_fields = [
+ (self.seed, "Seed"),
+ (seed_checkbox, lambda d: "Variation seed" in d or "Seed resize from-1" in d),
+ (subseed, "Variation seed"),
+ (subseed_strength, "Variation seed strength"),
+ (seed_resize_from_w, "Seed resize from-1"),
+ (seed_resize_from_h, "Seed resize from-2"),
+ ]
+
+ self.on_after_component(lambda x: connect_reuse_seed(self.seed, reuse_seed, x.component, False), elem_id=f'generation_info_{self.tabname}')
+ self.on_after_component(lambda x: connect_reuse_seed(subseed, reuse_subseed, x.component, True), elem_id=f'generation_info_{self.tabname}')
+
+ return self.seed, seed_checkbox, subseed, subseed_strength, seed_resize_from_w, seed_resize_from_h
+
+ def setup(self, p, seed, seed_checkbox, subseed, subseed_strength, seed_resize_from_w, seed_resize_from_h):
+ p.seed = seed
+
+ if seed_checkbox and subseed_strength > 0:
+ p.subseed = subseed
+ p.subseed_strength = subseed_strength
+
+ if seed_checkbox and seed_resize_from_w > 0 and seed_resize_from_h > 0:
+ p.seed_resize_from_w = seed_resize_from_w
+ p.seed_resize_from_h = seed_resize_from_h
+
+
+
+def connect_reuse_seed(seed: gr.Number, reuse_seed: gr.Button, generation_info: gr.Textbox, is_subseed):
+ """ Connects a 'reuse (sub)seed' button's click event so that it copies last used
+ (sub)seed value from generation info the to the seed field. If copying subseed and subseed strength
+ was 0, i.e. no variation seed was used, it copies the normal seed value instead."""
+
+ def copy_seed(gen_info_string: str, index):
+ res = -1
+
+ try:
+ gen_info = json.loads(gen_info_string)
+ index -= gen_info.get('index_of_first_image', 0)
+
+ if is_subseed and gen_info.get('subseed_strength', 0) > 0:
+ all_subseeds = gen_info.get('all_subseeds', [-1])
+ res = all_subseeds[index if 0 <= index < len(all_subseeds) else 0]
+ else:
+ all_seeds = gen_info.get('all_seeds', [-1])
+ res = all_seeds[index if 0 <= index < len(all_seeds) else 0]
+
+ except json.decoder.JSONDecodeError:
+ if gen_info_string:
+ errors.report(f"Error parsing JSON generation info: {gen_info_string}")
+
+ return [res, gr.update()]
+
+ reuse_seed.click(
+ fn=copy_seed,
+ _js="(x, y) => [x, selected_gallery_index()]",
+ show_progress=False,
+ inputs=[generation_info, seed],
+ outputs=[seed, seed]
+ )
diff --git a/modules/progress.py b/modules/progress.py
new file mode 100644
index 0000000000000000000000000000000000000000..dc8bb9996d9d0969933f5f375e5b5c3847083e56
--- /dev/null
+++ b/modules/progress.py
@@ -0,0 +1,134 @@
+import base64
+import io
+import time
+
+import gradio as gr
+from pydantic import BaseModel, Field
+
+from modules.shared import opts
+
+import modules.shared as shared
+
+
+current_task = None
+pending_tasks = {}
+finished_tasks = []
+recorded_results = []
+recorded_results_limit = 2
+
+
+def start_task(id_task):
+ global current_task
+
+ current_task = id_task
+ pending_tasks.pop(id_task, None)
+
+
+def finish_task(id_task):
+ global current_task
+
+ if current_task == id_task:
+ current_task = None
+
+ finished_tasks.append(id_task)
+ if len(finished_tasks) > 16:
+ finished_tasks.pop(0)
+
+
+def record_results(id_task, res):
+ recorded_results.append((id_task, res))
+ if len(recorded_results) > recorded_results_limit:
+ recorded_results.pop(0)
+
+
+def add_task_to_queue(id_job):
+ pending_tasks[id_job] = time.time()
+
+
+class ProgressRequest(BaseModel):
+ id_task: str = Field(default=None, title="Task ID", description="id of the task to get progress for")
+ id_live_preview: int = Field(default=-1, title="Live preview image ID", description="id of last received last preview image")
+ live_preview: bool = Field(default=True, title="Include live preview", description="boolean flag indicating whether to include the live preview image")
+
+
+class ProgressResponse(BaseModel):
+ active: bool = Field(title="Whether the task is being worked on right now")
+ queued: bool = Field(title="Whether the task is in queue")
+ completed: bool = Field(title="Whether the task has already finished")
+ progress: float = Field(default=None, title="Progress", description="The progress with a range of 0 to 1")
+ eta: float = Field(default=None, title="ETA in secs")
+ live_preview: str = Field(default=None, title="Live preview image", description="Current live preview; a data: uri")
+ id_live_preview: int = Field(default=None, title="Live preview image ID", description="Send this together with next request to prevent receiving same image")
+ textinfo: str = Field(default=None, title="Info text", description="Info text used by WebUI.")
+
+
+def setup_progress_api(app):
+ return app.add_api_route("/internal/progress", progressapi, methods=["POST"], response_model=ProgressResponse)
+
+
+def progressapi(req: ProgressRequest):
+ active = req.id_task == current_task
+ queued = req.id_task in pending_tasks
+ completed = req.id_task in finished_tasks
+
+ if not active:
+ textinfo = "Waiting..."
+ if queued:
+ sorted_queued = sorted(pending_tasks.keys(), key=lambda x: pending_tasks[x])
+ queue_index = sorted_queued.index(req.id_task)
+ textinfo = "In queue: {}/{}".format(queue_index + 1, len(sorted_queued))
+ return ProgressResponse(active=active, queued=queued, completed=completed, id_live_preview=-1, textinfo=textinfo)
+
+ progress = 0
+
+ job_count, job_no = shared.state.job_count, shared.state.job_no
+ sampling_steps, sampling_step = shared.state.sampling_steps, shared.state.sampling_step
+
+ if job_count > 0:
+ progress += job_no / job_count
+ if sampling_steps > 0 and job_count > 0:
+ progress += 1 / job_count * sampling_step / sampling_steps
+
+ progress = min(progress, 1)
+
+ elapsed_since_start = time.time() - shared.state.time_start
+ predicted_duration = elapsed_since_start / progress if progress > 0 else None
+ eta = predicted_duration - elapsed_since_start if predicted_duration is not None else None
+
+ live_preview = None
+ id_live_preview = req.id_live_preview
+
+ if opts.live_previews_enable and req.live_preview:
+ shared.state.set_current_image()
+ if shared.state.id_live_preview != req.id_live_preview:
+ image = shared.state.current_image
+ if image is not None:
+ buffered = io.BytesIO()
+
+ if opts.live_previews_image_format == "png":
+ # using optimize for large images takes an enormous amount of time
+ if max(*image.size) <= 256:
+ save_kwargs = {"optimize": True}
+ else:
+ save_kwargs = {"optimize": False, "compress_level": 1}
+
+ else:
+ save_kwargs = {}
+
+ image.save(buffered, format=opts.live_previews_image_format, **save_kwargs)
+ base64_image = base64.b64encode(buffered.getvalue()).decode('ascii')
+ live_preview = f"data:image/{opts.live_previews_image_format};base64,{base64_image}"
+ id_live_preview = shared.state.id_live_preview
+
+ return ProgressResponse(active=active, queued=queued, completed=completed, progress=progress, eta=eta, live_preview=live_preview, id_live_preview=id_live_preview, textinfo=shared.state.textinfo)
+
+
+def restore_progress(id_task):
+ while id_task == current_task or id_task in pending_tasks:
+ time.sleep(0.1)
+
+ res = next(iter([x[1] for x in recorded_results if id_task == x[0]]), None)
+ if res is not None:
+ return res
+
+ return gr.update(), gr.update(), gr.update(), f"Couldn't restore progress for {id_task}: results either have been discarded or never were obtained"
diff --git a/modules/prompt_parser.py b/modules/prompt_parser.py
new file mode 100644
index 0000000000000000000000000000000000000000..ea0421dec0fa81a3f1eced529fab32626ab2c9f2
--- /dev/null
+++ b/modules/prompt_parser.py
@@ -0,0 +1,465 @@
+from __future__ import annotations
+
+import re
+from collections import namedtuple
+from typing import List
+import lark
+
+# a prompt like this: "fantasy landscape with a [mountain:lake:0.25] and [an oak:a christmas tree:0.75][ in foreground::0.6][ in background:0.25] [shoddy:masterful:0.5]"
+# will be represented with prompt_schedule like this (assuming steps=100):
+# [25, 'fantasy landscape with a mountain and an oak in foreground shoddy']
+# [50, 'fantasy landscape with a lake and an oak in foreground in background shoddy']
+# [60, 'fantasy landscape with a lake and an oak in foreground in background masterful']
+# [75, 'fantasy landscape with a lake and an oak in background masterful']
+# [100, 'fantasy landscape with a lake and a christmas tree in background masterful']
+
+schedule_parser = lark.Lark(r"""
+!start: (prompt | /[][():]/+)*
+prompt: (emphasized | scheduled | alternate | plain | WHITESPACE)*
+!emphasized: "(" prompt ")"
+ | "(" prompt ":" prompt ")"
+ | "[" prompt "]"
+scheduled: "[" [prompt ":"] prompt ":" [WHITESPACE] NUMBER [WHITESPACE] "]"
+alternate: "[" prompt ("|" [prompt])+ "]"
+WHITESPACE: /\s+/
+plain: /([^\\\[\]():|]|\\.)+/
+%import common.SIGNED_NUMBER -> NUMBER
+""")
+
+def get_learned_conditioning_prompt_schedules(prompts, base_steps, hires_steps=None, use_old_scheduling=False):
+ """
+ >>> g = lambda p: get_learned_conditioning_prompt_schedules([p], 10)[0]
+ >>> g("test")
+ [[10, 'test']]
+ >>> g("a [b:3]")
+ [[3, 'a '], [10, 'a b']]
+ >>> g("a [b: 3]")
+ [[3, 'a '], [10, 'a b']]
+ >>> g("a [[[b]]:2]")
+ [[2, 'a '], [10, 'a [[b]]']]
+ >>> g("[(a:2):3]")
+ [[3, ''], [10, '(a:2)']]
+ >>> g("a [b : c : 1] d")
+ [[1, 'a b d'], [10, 'a c d']]
+ >>> g("a[b:[c:d:2]:1]e")
+ [[1, 'abe'], [2, 'ace'], [10, 'ade']]
+ >>> g("a [unbalanced")
+ [[10, 'a [unbalanced']]
+ >>> g("a [b:.5] c")
+ [[5, 'a c'], [10, 'a b c']]
+ >>> g("a [{b|d{:.5] c") # not handling this right now
+ [[5, 'a c'], [10, 'a {b|d{ c']]
+ >>> g("((a][:b:c [d:3]")
+ [[3, '((a][:b:c '], [10, '((a][:b:c d']]
+ >>> g("[a|(b:1.1)]")
+ [[1, 'a'], [2, '(b:1.1)'], [3, 'a'], [4, '(b:1.1)'], [5, 'a'], [6, '(b:1.1)'], [7, 'a'], [8, '(b:1.1)'], [9, 'a'], [10, '(b:1.1)']]
+ >>> g("[fe|]male")
+ [[1, 'female'], [2, 'male'], [3, 'female'], [4, 'male'], [5, 'female'], [6, 'male'], [7, 'female'], [8, 'male'], [9, 'female'], [10, 'male']]
+ >>> g("[fe|||]male")
+ [[1, 'female'], [2, 'male'], [3, 'male'], [4, 'male'], [5, 'female'], [6, 'male'], [7, 'male'], [8, 'male'], [9, 'female'], [10, 'male']]
+ >>> g = lambda p: get_learned_conditioning_prompt_schedules([p], 10, 10)[0]
+ >>> g("a [b:.5] c")
+ [[10, 'a b c']]
+ >>> g("a [b:1.5] c")
+ [[5, 'a c'], [10, 'a b c']]
+ """
+
+ if hires_steps is None or use_old_scheduling:
+ int_offset = 0
+ flt_offset = 0
+ steps = base_steps
+ else:
+ int_offset = base_steps
+ flt_offset = 1.0
+ steps = hires_steps
+
+ def collect_steps(steps, tree):
+ res = [steps]
+
+ class CollectSteps(lark.Visitor):
+ def scheduled(self, tree):
+ s = tree.children[-2]
+ v = float(s)
+ if use_old_scheduling:
+ v = v*steps if v<1 else v
+ else:
+ if "." in s:
+ v = (v - flt_offset) * steps
+ else:
+ v = (v - int_offset)
+ tree.children[-2] = min(steps, int(v))
+ if tree.children[-2] >= 1:
+ res.append(tree.children[-2])
+
+ def alternate(self, tree):
+ res.extend(range(1, steps+1))
+
+ CollectSteps().visit(tree)
+ return sorted(set(res))
+
+ def at_step(step, tree):
+ class AtStep(lark.Transformer):
+ def scheduled(self, args):
+ before, after, _, when, _ = args
+ yield before or () if step <= when else after
+ def alternate(self, args):
+ args = ["" if not arg else arg for arg in args]
+ yield args[(step - 1) % len(args)]
+ def start(self, args):
+ def flatten(x):
+ if isinstance(x, str):
+ yield x
+ else:
+ for gen in x:
+ yield from flatten(gen)
+ return ''.join(flatten(args))
+ def plain(self, args):
+ yield args[0].value
+ def __default__(self, data, children, meta):
+ for child in children:
+ yield child
+ return AtStep().transform(tree)
+
+ def get_schedule(prompt):
+ try:
+ tree = schedule_parser.parse(prompt)
+ except lark.exceptions.LarkError:
+ if 0:
+ import traceback
+ traceback.print_exc()
+ return [[steps, prompt]]
+ return [[t, at_step(t, tree)] for t in collect_steps(steps, tree)]
+
+ promptdict = {prompt: get_schedule(prompt) for prompt in set(prompts)}
+ return [promptdict[prompt] for prompt in prompts]
+
+
+ScheduledPromptConditioning = namedtuple("ScheduledPromptConditioning", ["end_at_step", "cond"])
+
+
+class SdConditioning(list):
+ """
+ A list with prompts for stable diffusion's conditioner model.
+ Can also specify width and height of created image - SDXL needs it.
+ """
+ def __init__(self, prompts, is_negative_prompt=False, width=None, height=None, copy_from=None):
+ super().__init__()
+ self.extend(prompts)
+
+ if copy_from is None:
+ copy_from = prompts
+
+ self.is_negative_prompt = is_negative_prompt or getattr(copy_from, 'is_negative_prompt', False)
+ self.width = width or getattr(copy_from, 'width', None)
+ self.height = height or getattr(copy_from, 'height', None)
+
+
+
+def get_learned_conditioning(model, prompts: SdConditioning | list[str], steps, hires_steps=None, use_old_scheduling=False):
+ """converts a list of prompts into a list of prompt schedules - each schedule is a list of ScheduledPromptConditioning, specifying the comdition (cond),
+ and the sampling step at which this condition is to be replaced by the next one.
+
+ Input:
+ (model, ['a red crown', 'a [blue:green:5] jeweled crown'], 20)
+
+ Output:
+ [
+ [
+ ScheduledPromptConditioning(end_at_step=20, cond=tensor([[-0.3886, 0.0229, -0.0523, ..., -0.4901, -0.3066, 0.0674], ..., [ 0.3317, -0.5102, -0.4066, ..., 0.4119, -0.7647, -1.0160]], device='cuda:0'))
+ ],
+ [
+ ScheduledPromptConditioning(end_at_step=5, cond=tensor([[-0.3886, 0.0229, -0.0522, ..., -0.4901, -0.3067, 0.0673], ..., [-0.0192, 0.3867, -0.4644, ..., 0.1135, -0.3696, -0.4625]], device='cuda:0')),
+ ScheduledPromptConditioning(end_at_step=20, cond=tensor([[-0.3886, 0.0229, -0.0522, ..., -0.4901, -0.3067, 0.0673], ..., [-0.7352, -0.4356, -0.7888, ..., 0.6994, -0.4312, -1.2593]], device='cuda:0'))
+ ]
+ ]
+ """
+ res = []
+
+ prompt_schedules = get_learned_conditioning_prompt_schedules(prompts, steps, hires_steps, use_old_scheduling)
+ cache = {}
+
+ for prompt, prompt_schedule in zip(prompts, prompt_schedules):
+
+ cached = cache.get(prompt, None)
+ if cached is not None:
+ res.append(cached)
+ continue
+
+ texts = SdConditioning([x[1] for x in prompt_schedule], copy_from=prompts)
+ conds = model.get_learned_conditioning(texts)
+
+ cond_schedule = []
+ for i, (end_at_step, _) in enumerate(prompt_schedule):
+ if isinstance(conds, dict):
+ cond = {k: v[i] for k, v in conds.items()}
+ else:
+ cond = conds[i]
+
+ cond_schedule.append(ScheduledPromptConditioning(end_at_step, cond))
+
+ cache[prompt] = cond_schedule
+ res.append(cond_schedule)
+
+ return res
+
+
+re_AND = re.compile(r"\bAND\b")
+re_weight = re.compile(r"^((?:\s|.)*?)(?:\s*:\s*([-+]?(?:\d+\.?|\d*\.\d+)))?\s*$")
+
+
+def get_multicond_prompt_list(prompts: SdConditioning | list[str]):
+ res_indexes = []
+
+ prompt_indexes = {}
+ prompt_flat_list = SdConditioning(prompts)
+ prompt_flat_list.clear()
+
+ for prompt in prompts:
+ subprompts = re_AND.split(prompt)
+
+ indexes = []
+ for subprompt in subprompts:
+ match = re_weight.search(subprompt)
+
+ text, weight = match.groups() if match is not None else (subprompt, 1.0)
+
+ weight = float(weight) if weight is not None else 1.0
+
+ index = prompt_indexes.get(text, None)
+ if index is None:
+ index = len(prompt_flat_list)
+ prompt_flat_list.append(text)
+ prompt_indexes[text] = index
+
+ indexes.append((index, weight))
+
+ res_indexes.append(indexes)
+
+ return res_indexes, prompt_flat_list, prompt_indexes
+
+
+class ComposableScheduledPromptConditioning:
+ def __init__(self, schedules, weight=1.0):
+ self.schedules: List[ScheduledPromptConditioning] = schedules
+ self.weight: float = weight
+
+
+class MulticondLearnedConditioning:
+ def __init__(self, shape, batch):
+ self.shape: tuple = shape # the shape field is needed to send this object to DDIM/PLMS
+ self.batch: List[List[ComposableScheduledPromptConditioning]] = batch
+
+
+def get_multicond_learned_conditioning(model, prompts, steps, hires_steps=None, use_old_scheduling=False) -> MulticondLearnedConditioning:
+ """same as get_learned_conditioning, but returns a list of ScheduledPromptConditioning along with the weight objects for each prompt.
+ For each prompt, the list is obtained by splitting the prompt using the AND separator.
+
+ https://energy-based-model.github.io/Compositional-Visual-Generation-with-Composable-Diffusion-Models/
+ """
+
+ res_indexes, prompt_flat_list, prompt_indexes = get_multicond_prompt_list(prompts)
+
+ learned_conditioning = get_learned_conditioning(model, prompt_flat_list, steps, hires_steps, use_old_scheduling)
+
+ res = []
+ for indexes in res_indexes:
+ res.append([ComposableScheduledPromptConditioning(learned_conditioning[i], weight) for i, weight in indexes])
+
+ return MulticondLearnedConditioning(shape=(len(prompts),), batch=res)
+
+
+class DictWithShape(dict):
+ def __init__(self, x, shape):
+ super().__init__()
+ self.update(x)
+
+ @property
+ def shape(self):
+ return self["crossattn"].shape
+
+
+def reconstruct_cond_batch(c: List[List[ScheduledPromptConditioning]], current_step):
+ param = c[0][0].cond
+ is_dict = isinstance(param, dict)
+
+ if is_dict:
+ dict_cond = param
+ res = {k: torch.zeros((len(c),) + param.shape, device=param.device, dtype=param.dtype) for k, param in dict_cond.items()}
+ res = DictWithShape(res, (len(c),) + dict_cond['crossattn'].shape)
+ else:
+ res = torch.zeros((len(c),) + param.shape, device=param.device, dtype=param.dtype)
+
+ for i, cond_schedule in enumerate(c):
+ target_index = 0
+ for current, entry in enumerate(cond_schedule):
+ if current_step <= entry.end_at_step:
+ target_index = current
+ break
+
+ if is_dict:
+ for k, param in cond_schedule[target_index].cond.items():
+ res[k][i] = param
+ else:
+ res[i] = cond_schedule[target_index].cond
+
+ return res
+
+
+def stack_conds(tensors):
+ # if prompts have wildly different lengths above the limit we'll get tensors of different shapes
+ # and won't be able to torch.stack them. So this fixes that.
+ token_count = max([x.shape[0] for x in tensors])
+ for i in range(len(tensors)):
+ if tensors[i].shape[0] != token_count:
+ last_vector = tensors[i][-1:]
+ last_vector_repeated = last_vector.repeat([token_count - tensors[i].shape[0], 1])
+ tensors[i] = torch.vstack([tensors[i], last_vector_repeated])
+
+ return torch.stack(tensors)
+
+
+
+def reconstruct_multicond_batch(c: MulticondLearnedConditioning, current_step):
+ param = c.batch[0][0].schedules[0].cond
+
+ tensors = []
+ conds_list = []
+
+ for composable_prompts in c.batch:
+ conds_for_batch = []
+
+ for composable_prompt in composable_prompts:
+ target_index = 0
+ for current, entry in enumerate(composable_prompt.schedules):
+ if current_step <= entry.end_at_step:
+ target_index = current
+ break
+
+ conds_for_batch.append((len(tensors), composable_prompt.weight))
+ tensors.append(composable_prompt.schedules[target_index].cond)
+
+ conds_list.append(conds_for_batch)
+
+ if isinstance(tensors[0], dict):
+ keys = list(tensors[0].keys())
+ stacked = {k: stack_conds([x[k] for x in tensors]) for k in keys}
+ stacked = DictWithShape(stacked, stacked['crossattn'].shape)
+ else:
+ stacked = stack_conds(tensors).to(device=param.device, dtype=param.dtype)
+
+ return conds_list, stacked
+
+
+re_attention = re.compile(r"""
+\\\(|
+\\\)|
+\\\[|
+\\]|
+\\\\|
+\\|
+\(|
+\[|
+:\s*([+-]?[.\d]+)\s*\)|
+\)|
+]|
+[^\\()\[\]:]+|
+:
+""", re.X)
+
+re_break = re.compile(r"\s*\bBREAK\b\s*", re.S)
+
+def parse_prompt_attention(text):
+ """
+ Parses a string with attention tokens and returns a list of pairs: text and its associated weight.
+ Accepted tokens are:
+ (abc) - increases attention to abc by a multiplier of 1.1
+ (abc:3.12) - increases attention to abc by a multiplier of 3.12
+ [abc] - decreases attention to abc by a multiplier of 1.1
+ \( - literal character '('
+ \[ - literal character '['
+ \) - literal character ')'
+ \] - literal character ']'
+ \\ - literal character '\'
+ anything else - just text
+
+ >>> parse_prompt_attention('normal text')
+ [['normal text', 1.0]]
+ >>> parse_prompt_attention('an (important) word')
+ [['an ', 1.0], ['important', 1.1], [' word', 1.0]]
+ >>> parse_prompt_attention('(unbalanced')
+ [['unbalanced', 1.1]]
+ >>> parse_prompt_attention('\(literal\]')
+ [['(literal]', 1.0]]
+ >>> parse_prompt_attention('(unnecessary)(parens)')
+ [['unnecessaryparens', 1.1]]
+ >>> parse_prompt_attention('a (((house:1.3)) [on] a (hill:0.5), sun, (((sky))).')
+ [['a ', 1.0],
+ ['house', 1.5730000000000004],
+ [' ', 1.1],
+ ['on', 1.0],
+ [' a ', 1.1],
+ ['hill', 0.55],
+ [', sun, ', 1.1],
+ ['sky', 1.4641000000000006],
+ ['.', 1.1]]
+ """
+
+ res = []
+ round_brackets = []
+ square_brackets = []
+
+ round_bracket_multiplier = 1.1
+ square_bracket_multiplier = 1 / 1.1
+
+ def multiply_range(start_position, multiplier):
+ for p in range(start_position, len(res)):
+ res[p][1] *= multiplier
+
+ for m in re_attention.finditer(text):
+ text = m.group(0)
+ weight = m.group(1)
+
+ if text.startswith('\\'):
+ res.append([text[1:], 1.0])
+ elif text == '(':
+ round_brackets.append(len(res))
+ elif text == '[':
+ square_brackets.append(len(res))
+ elif weight is not None and round_brackets:
+ multiply_range(round_brackets.pop(), float(weight))
+ elif text == ')' and round_brackets:
+ multiply_range(round_brackets.pop(), round_bracket_multiplier)
+ elif text == ']' and square_brackets:
+ multiply_range(square_brackets.pop(), square_bracket_multiplier)
+ else:
+ parts = re.split(re_break, text)
+ for i, part in enumerate(parts):
+ if i > 0:
+ res.append(["BREAK", -1])
+ res.append([part, 1.0])
+
+ for pos in round_brackets:
+ multiply_range(pos, round_bracket_multiplier)
+
+ for pos in square_brackets:
+ multiply_range(pos, square_bracket_multiplier)
+
+ if len(res) == 0:
+ res = [["", 1.0]]
+
+ # merge runs of identical weights
+ i = 0
+ while i + 1 < len(res):
+ if res[i][1] == res[i + 1][1]:
+ res[i][0] += res[i + 1][0]
+ res.pop(i + 1)
+ else:
+ i += 1
+
+ return res
+
+if __name__ == "__main__":
+ import doctest
+ doctest.testmod(optionflags=doctest.NORMALIZE_WHITESPACE)
+else:
+ import torch # doctest faster
diff --git a/modules/realesrgan_model.py b/modules/realesrgan_model.py
new file mode 100644
index 0000000000000000000000000000000000000000..bf69d3d2044e07c5fc662742dacc5de8a32fc2ed
--- /dev/null
+++ b/modules/realesrgan_model.py
@@ -0,0 +1,133 @@
+import os
+
+import numpy as np
+from PIL import Image
+from realesrgan import RealESRGANer
+
+from modules.upscaler import Upscaler, UpscalerData
+from modules.shared import cmd_opts, opts
+from modules import modelloader, errors
+
+
+class UpscalerRealESRGAN(Upscaler):
+ def __init__(self, path):
+ self.name = "RealESRGAN"
+ self.user_path = path
+ super().__init__()
+ try:
+ from basicsr.archs.rrdbnet_arch import RRDBNet # noqa: F401
+ from realesrgan import RealESRGANer # noqa: F401
+ from realesrgan.archs.srvgg_arch import SRVGGNetCompact # noqa: F401
+ self.enable = True
+ self.scalers = []
+ scalers = self.load_models(path)
+
+ local_model_paths = self.find_models(ext_filter=[".pth"])
+ for scaler in scalers:
+ if scaler.local_data_path.startswith("http"):
+ filename = modelloader.friendly_name(scaler.local_data_path)
+ local_model_candidates = [local_model for local_model in local_model_paths if local_model.endswith(f"{filename}.pth")]
+ if local_model_candidates:
+ scaler.local_data_path = local_model_candidates[0]
+
+ if scaler.name in opts.realesrgan_enabled_models:
+ self.scalers.append(scaler)
+
+ except Exception:
+ errors.report("Error importing Real-ESRGAN", exc_info=True)
+ self.enable = False
+ self.scalers = []
+
+ def do_upscale(self, img, path):
+ if not self.enable:
+ return img
+
+ try:
+ info = self.load_model(path)
+ except Exception:
+ errors.report(f"Unable to load RealESRGAN model {path}", exc_info=True)
+ return img
+
+ upsampler = RealESRGANer(
+ scale=info.scale,
+ model_path=info.local_data_path,
+ model=info.model(),
+ half=not cmd_opts.no_half and not cmd_opts.upcast_sampling,
+ tile=opts.ESRGAN_tile,
+ tile_pad=opts.ESRGAN_tile_overlap,
+ device=self.device,
+ )
+
+ upsampled = upsampler.enhance(np.array(img), outscale=info.scale)[0]
+
+ image = Image.fromarray(upsampled)
+ return image
+
+ def load_model(self, path):
+ for scaler in self.scalers:
+ if scaler.data_path == path:
+ if scaler.local_data_path.startswith("http"):
+ scaler.local_data_path = modelloader.load_file_from_url(
+ scaler.data_path,
+ model_dir=self.model_download_path,
+ )
+ if not os.path.exists(scaler.local_data_path):
+ raise FileNotFoundError(f"RealESRGAN data missing: {scaler.local_data_path}")
+ return scaler
+ raise ValueError(f"Unable to find model info: {path}")
+
+ def load_models(self, _):
+ return get_realesrgan_models(self)
+
+
+def get_realesrgan_models(scaler):
+ try:
+ from basicsr.archs.rrdbnet_arch import RRDBNet
+ from realesrgan.archs.srvgg_arch import SRVGGNetCompact
+ models = [
+ UpscalerData(
+ name="R-ESRGAN General 4xV3",
+ path="https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-general-x4v3.pth",
+ scale=4,
+ upscaler=scaler,
+ model=lambda: SRVGGNetCompact(num_in_ch=3, num_out_ch=3, num_feat=64, num_conv=32, upscale=4, act_type='prelu')
+ ),
+ UpscalerData(
+ name="R-ESRGAN General WDN 4xV3",
+ path="https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-general-wdn-x4v3.pth",
+ scale=4,
+ upscaler=scaler,
+ model=lambda: SRVGGNetCompact(num_in_ch=3, num_out_ch=3, num_feat=64, num_conv=32, upscale=4, act_type='prelu')
+ ),
+ UpscalerData(
+ name="R-ESRGAN AnimeVideo",
+ path="https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-animevideov3.pth",
+ scale=4,
+ upscaler=scaler,
+ model=lambda: SRVGGNetCompact(num_in_ch=3, num_out_ch=3, num_feat=64, num_conv=16, upscale=4, act_type='prelu')
+ ),
+ UpscalerData(
+ name="R-ESRGAN 4x+",
+ path="https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.0/RealESRGAN_x4plus.pth",
+ scale=4,
+ upscaler=scaler,
+ model=lambda: RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=32, scale=4)
+ ),
+ UpscalerData(
+ name="R-ESRGAN 4x+ Anime6B",
+ path="https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.2.4/RealESRGAN_x4plus_anime_6B.pth",
+ scale=4,
+ upscaler=scaler,
+ model=lambda: RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=6, num_grow_ch=32, scale=4)
+ ),
+ UpscalerData(
+ name="R-ESRGAN 2x+",
+ path="https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.1/RealESRGAN_x2plus.pth",
+ scale=2,
+ upscaler=scaler,
+ model=lambda: RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=32, scale=2)
+ ),
+ ]
+ return models
+ except Exception:
+ errors.report("Error making Real-ESRGAN models list", exc_info=True)
diff --git a/modules/restart.py b/modules/restart.py
new file mode 100644
index 0000000000000000000000000000000000000000..18eacaf377ee4f13c5dc3e12d7a2d818143cc69d
--- /dev/null
+++ b/modules/restart.py
@@ -0,0 +1,23 @@
+import os
+from pathlib import Path
+
+from modules.paths_internal import script_path
+
+
+def is_restartable() -> bool:
+ """
+ Return True if the webui is restartable (i.e. there is something watching to restart it with)
+ """
+ return bool(os.environ.get('SD_WEBUI_RESTART'))
+
+
+def restart_program() -> None:
+ """creates file tmp/restart and immediately stops the process, which webui.bat/webui.sh interpret as a command to start webui again"""
+
+ (Path(script_path) / "tmp" / "restart").touch()
+
+ stop_program()
+
+
+def stop_program() -> None:
+ os._exit(0)
diff --git a/modules/rng.py b/modules/rng.py
new file mode 100644
index 0000000000000000000000000000000000000000..21113c9449c4a0610b688b9dab615e5a25a70f10
--- /dev/null
+++ b/modules/rng.py
@@ -0,0 +1,170 @@
+import torch
+
+from modules import devices, rng_philox, shared
+
+
+def randn(seed, shape, generator=None):
+ """Generate a tensor with random numbers from a normal distribution using seed.
+
+ Uses the seed parameter to set the global torch seed; to generate more with that seed, use randn_like/randn_without_seed."""
+
+ manual_seed(seed)
+
+ if shared.opts.randn_source == "NV":
+ return torch.asarray((generator or nv_rng).randn(shape), device=devices.device)
+
+ if shared.opts.randn_source == "CPU" or devices.device.type == 'mps':
+ return torch.randn(shape, device=devices.cpu, generator=generator).to(devices.device)
+
+ return torch.randn(shape, device=devices.device, generator=generator)
+
+
+def randn_local(seed, shape):
+ """Generate a tensor with random numbers from a normal distribution using seed.
+
+ Does not change the global random number generator. You can only generate the seed's first tensor using this function."""
+
+ if shared.opts.randn_source == "NV":
+ rng = rng_philox.Generator(seed)
+ return torch.asarray(rng.randn(shape), device=devices.device)
+
+ local_device = devices.cpu if shared.opts.randn_source == "CPU" or devices.device.type == 'mps' else devices.device
+ local_generator = torch.Generator(local_device).manual_seed(int(seed))
+ return torch.randn(shape, device=local_device, generator=local_generator).to(devices.device)
+
+
+def randn_like(x):
+ """Generate a tensor with random numbers from a normal distribution using the previously initialized genrator.
+
+ Use either randn() or manual_seed() to initialize the generator."""
+
+ if shared.opts.randn_source == "NV":
+ return torch.asarray(nv_rng.randn(x.shape), device=x.device, dtype=x.dtype)
+
+ if shared.opts.randn_source == "CPU" or x.device.type == 'mps':
+ return torch.randn_like(x, device=devices.cpu).to(x.device)
+
+ return torch.randn_like(x)
+
+
+def randn_without_seed(shape, generator=None):
+ """Generate a tensor with random numbers from a normal distribution using the previously initialized genrator.
+
+ Use either randn() or manual_seed() to initialize the generator."""
+
+ if shared.opts.randn_source == "NV":
+ return torch.asarray((generator or nv_rng).randn(shape), device=devices.device)
+
+ if shared.opts.randn_source == "CPU" or devices.device.type == 'mps':
+ return torch.randn(shape, device=devices.cpu, generator=generator).to(devices.device)
+
+ return torch.randn(shape, device=devices.device, generator=generator)
+
+
+def manual_seed(seed):
+ """Set up a global random number generator using the specified seed."""
+
+ if shared.opts.randn_source == "NV":
+ global nv_rng
+ nv_rng = rng_philox.Generator(seed)
+ return
+
+ torch.manual_seed(seed)
+
+
+def create_generator(seed):
+ if shared.opts.randn_source == "NV":
+ return rng_philox.Generator(seed)
+
+ device = devices.cpu if shared.opts.randn_source == "CPU" or devices.device.type == 'mps' else devices.device
+ generator = torch.Generator(device).manual_seed(int(seed))
+ return generator
+
+
+# from https://discuss.pytorch.org/t/help-regarding-slerp-function-for-generative-model-sampling/32475/3
+def slerp(val, low, high):
+ low_norm = low/torch.norm(low, dim=1, keepdim=True)
+ high_norm = high/torch.norm(high, dim=1, keepdim=True)
+ dot = (low_norm*high_norm).sum(1)
+
+ if dot.mean() > 0.9995:
+ return low * val + high * (1 - val)
+
+ omega = torch.acos(dot)
+ so = torch.sin(omega)
+ res = (torch.sin((1.0-val)*omega)/so).unsqueeze(1)*low + (torch.sin(val*omega)/so).unsqueeze(1) * high
+ return res
+
+
+class ImageRNG:
+ def __init__(self, shape, seeds, subseeds=None, subseed_strength=0.0, seed_resize_from_h=0, seed_resize_from_w=0):
+ self.shape = tuple(map(int, shape))
+ self.seeds = seeds
+ self.subseeds = subseeds
+ self.subseed_strength = subseed_strength
+ self.seed_resize_from_h = seed_resize_from_h
+ self.seed_resize_from_w = seed_resize_from_w
+
+ self.generators = [create_generator(seed) for seed in seeds]
+
+ self.is_first = True
+
+ def first(self):
+ noise_shape = self.shape if self.seed_resize_from_h <= 0 or self.seed_resize_from_w <= 0 else (self.shape[0], self.seed_resize_from_h // 8, self.seed_resize_from_w // 8)
+
+ xs = []
+
+ for i, (seed, generator) in enumerate(zip(self.seeds, self.generators)):
+ subnoise = None
+ if self.subseeds is not None and self.subseed_strength != 0:
+ subseed = 0 if i >= len(self.subseeds) else self.subseeds[i]
+ subnoise = randn(subseed, noise_shape)
+
+ if noise_shape != self.shape:
+ noise = randn(seed, noise_shape)
+ else:
+ noise = randn(seed, self.shape, generator=generator)
+
+ if subnoise is not None:
+ noise = slerp(self.subseed_strength, noise, subnoise)
+
+ if noise_shape != self.shape:
+ x = randn(seed, self.shape, generator=generator)
+ dx = (self.shape[2] - noise_shape[2]) // 2
+ dy = (self.shape[1] - noise_shape[1]) // 2
+ w = noise_shape[2] if dx >= 0 else noise_shape[2] + 2 * dx
+ h = noise_shape[1] if dy >= 0 else noise_shape[1] + 2 * dy
+ tx = 0 if dx < 0 else dx
+ ty = 0 if dy < 0 else dy
+ dx = max(-dx, 0)
+ dy = max(-dy, 0)
+
+ x[:, ty:ty + h, tx:tx + w] = noise[:, dy:dy + h, dx:dx + w]
+ noise = x
+
+ xs.append(noise)
+
+ eta_noise_seed_delta = shared.opts.eta_noise_seed_delta or 0
+ if eta_noise_seed_delta:
+ self.generators = [create_generator(seed + eta_noise_seed_delta) for seed in self.seeds]
+
+ return torch.stack(xs).to(shared.device)
+
+ def next(self):
+ if self.is_first:
+ self.is_first = False
+ return self.first()
+
+ xs = []
+ for generator in self.generators:
+ x = randn_without_seed(self.shape, generator=generator)
+ xs.append(x)
+
+ return torch.stack(xs).to(shared.device)
+
+
+devices.randn = randn
+devices.randn_local = randn_local
+devices.randn_like = randn_like
+devices.randn_without_seed = randn_without_seed
+devices.manual_seed = manual_seed
diff --git a/modules/rng_philox.py b/modules/rng_philox.py
new file mode 100644
index 0000000000000000000000000000000000000000..8897dc3ac54beec0eb43e315e2201ccd6613576e
--- /dev/null
+++ b/modules/rng_philox.py
@@ -0,0 +1,102 @@
+"""RNG imitiating torch cuda randn on CPU. You are welcome.
+
+Usage:
+
+```
+g = Generator(seed=0)
+print(g.randn(shape=(3, 4)))
+```
+
+Expected output:
+```
+[[-0.92466259 -0.42534415 -2.6438457 0.14518388]
+ [-0.12086647 -0.57972564 -0.62285122 -0.32838709]
+ [-1.07454231 -0.36314407 -1.67105067 2.26550497]]
+```
+"""
+
+import numpy as np
+
+philox_m = [0xD2511F53, 0xCD9E8D57]
+philox_w = [0x9E3779B9, 0xBB67AE85]
+
+two_pow32_inv = np.array([2.3283064e-10], dtype=np.float32)
+two_pow32_inv_2pi = np.array([2.3283064e-10 * 6.2831855], dtype=np.float32)
+
+
+def uint32(x):
+ """Converts (N,) np.uint64 array into (2, N) np.unit32 array."""
+ return x.view(np.uint32).reshape(-1, 2).transpose(1, 0)
+
+
+def philox4_round(counter, key):
+ """A single round of the Philox 4x32 random number generator."""
+
+ v1 = uint32(counter[0].astype(np.uint64) * philox_m[0])
+ v2 = uint32(counter[2].astype(np.uint64) * philox_m[1])
+
+ counter[0] = v2[1] ^ counter[1] ^ key[0]
+ counter[1] = v2[0]
+ counter[2] = v1[1] ^ counter[3] ^ key[1]
+ counter[3] = v1[0]
+
+
+def philox4_32(counter, key, rounds=10):
+ """Generates 32-bit random numbers using the Philox 4x32 random number generator.
+
+ Parameters:
+ counter (numpy.ndarray): A 4xN array of 32-bit integers representing the counter values (offset into generation).
+ key (numpy.ndarray): A 2xN array of 32-bit integers representing the key values (seed).
+ rounds (int): The number of rounds to perform.
+
+ Returns:
+ numpy.ndarray: A 4xN array of 32-bit integers containing the generated random numbers.
+ """
+
+ for _ in range(rounds - 1):
+ philox4_round(counter, key)
+
+ key[0] = key[0] + philox_w[0]
+ key[1] = key[1] + philox_w[1]
+
+ philox4_round(counter, key)
+ return counter
+
+
+def box_muller(x, y):
+ """Returns just the first out of two numbers generated by Box–Muller transform algorithm."""
+ u = x * two_pow32_inv + two_pow32_inv / 2
+ v = y * two_pow32_inv_2pi + two_pow32_inv_2pi / 2
+
+ s = np.sqrt(-2.0 * np.log(u))
+
+ r1 = s * np.sin(v)
+ return r1.astype(np.float32)
+
+
+class Generator:
+ """RNG that produces same outputs as torch.randn(..., device='cuda') on CPU"""
+
+ def __init__(self, seed):
+ self.seed = seed
+ self.offset = 0
+
+ def randn(self, shape):
+ """Generate a sequence of n standard normal random variables using the Philox 4x32 random number generator and the Box-Muller transform."""
+
+ n = 1
+ for x in shape:
+ n *= x
+
+ counter = np.zeros((4, n), dtype=np.uint32)
+ counter[0] = self.offset
+ counter[2] = np.arange(n, dtype=np.uint32) # up to 2^32 numbers can be generated - if you want more you'd need to spill into counter[3]
+ self.offset += 1
+
+ key = np.empty(n, dtype=np.uint64)
+ key.fill(self.seed)
+ key = uint32(key)
+
+ g = philox4_32(counter, key)
+
+ return box_muller(g[0], g[1]).reshape(shape) # discard g[2] and g[3]
diff --git a/modules/safe.py b/modules/safe.py
new file mode 100644
index 0000000000000000000000000000000000000000..00df31348376ebd7daef9b39f858eace22caa105
--- /dev/null
+++ b/modules/safe.py
@@ -0,0 +1,196 @@
+# this code is adapted from the script contributed by anon from /h/
+
+import pickle
+import collections
+
+import torch
+import numpy
+import _codecs
+import zipfile
+import re
+
+
+# PyTorch 1.13 and later have _TypedStorage renamed to TypedStorage
+from modules import errors
+
+TypedStorage = torch.storage.TypedStorage if hasattr(torch.storage, 'TypedStorage') else torch.storage._TypedStorage
+
+def encode(*args):
+ out = _codecs.encode(*args)
+ return out
+
+
+class RestrictedUnpickler(pickle.Unpickler):
+ extra_handler = None
+
+ def persistent_load(self, saved_id):
+ assert saved_id[0] == 'storage'
+
+ try:
+ return TypedStorage(_internal=True)
+ except TypeError:
+ return TypedStorage() # PyTorch before 2.0 does not have the _internal argument
+
+ def find_class(self, module, name):
+ if self.extra_handler is not None:
+ res = self.extra_handler(module, name)
+ if res is not None:
+ return res
+
+ if module == 'collections' and name == 'OrderedDict':
+ return getattr(collections, name)
+ if module == 'torch._utils' and name in ['_rebuild_tensor_v2', '_rebuild_parameter', '_rebuild_device_tensor_from_numpy']:
+ return getattr(torch._utils, name)
+ if module == 'torch' and name in ['FloatStorage', 'HalfStorage', 'IntStorage', 'LongStorage', 'DoubleStorage', 'ByteStorage', 'float32', 'BFloat16Storage']:
+ return getattr(torch, name)
+ if module == 'torch.nn.modules.container' and name in ['ParameterDict']:
+ return getattr(torch.nn.modules.container, name)
+ if module == 'numpy.core.multiarray' and name in ['scalar', '_reconstruct']:
+ return getattr(numpy.core.multiarray, name)
+ if module == 'numpy' and name in ['dtype', 'ndarray']:
+ return getattr(numpy, name)
+ if module == '_codecs' and name == 'encode':
+ return encode
+ if module == "pytorch_lightning.callbacks" and name == 'model_checkpoint':
+ import pytorch_lightning.callbacks
+ return pytorch_lightning.callbacks.model_checkpoint
+ if module == "pytorch_lightning.callbacks.model_checkpoint" and name == 'ModelCheckpoint':
+ import pytorch_lightning.callbacks.model_checkpoint
+ return pytorch_lightning.callbacks.model_checkpoint.ModelCheckpoint
+ if module == "__builtin__" and name == 'set':
+ return set
+
+ # Forbid everything else.
+ raise Exception(f"global '{module}/{name}' is forbidden")
+
+
+# Regular expression that accepts 'dirname/version', 'dirname/data.pkl', and 'dirname/data/'
+allowed_zip_names_re = re.compile(r"^([^/]+)/((data/\d+)|version|(data\.pkl))$")
+data_pkl_re = re.compile(r"^([^/]+)/data\.pkl$")
+
+def check_zip_filenames(filename, names):
+ for name in names:
+ if allowed_zip_names_re.match(name):
+ continue
+
+ raise Exception(f"bad file inside {filename}: {name}")
+
+
+def check_pt(filename, extra_handler):
+ try:
+
+ # new pytorch format is a zip file
+ with zipfile.ZipFile(filename) as z:
+ check_zip_filenames(filename, z.namelist())
+
+ # find filename of data.pkl in zip file: '/data.pkl'
+ data_pkl_filenames = [f for f in z.namelist() if data_pkl_re.match(f)]
+ if len(data_pkl_filenames) == 0:
+ raise Exception(f"data.pkl not found in {filename}")
+ if len(data_pkl_filenames) > 1:
+ raise Exception(f"Multiple data.pkl found in {filename}")
+ with z.open(data_pkl_filenames[0]) as file:
+ unpickler = RestrictedUnpickler(file)
+ unpickler.extra_handler = extra_handler
+ unpickler.load()
+
+ except zipfile.BadZipfile:
+
+ # if it's not a zip file, it's an old pytorch format, with five objects written to pickle
+ with open(filename, "rb") as file:
+ unpickler = RestrictedUnpickler(file)
+ unpickler.extra_handler = extra_handler
+ for _ in range(5):
+ unpickler.load()
+
+
+def load(filename, *args, **kwargs):
+ return load_with_extra(filename, *args, extra_handler=global_extra_handler, **kwargs)
+
+
+def load_with_extra(filename, extra_handler=None, *args, **kwargs):
+ """
+ this function is intended to be used by extensions that want to load models with
+ some extra classes in them that the usual unpickler would find suspicious.
+
+ Use the extra_handler argument to specify a function that takes module and field name as text,
+ and returns that field's value:
+
+ ```python
+ def extra(module, name):
+ if module == 'collections' and name == 'OrderedDict':
+ return collections.OrderedDict
+
+ return None
+
+ safe.load_with_extra('model.pt', extra_handler=extra)
+ ```
+
+ The alternative to this is just to use safe.unsafe_torch_load('model.pt'), which as the name implies is
+ definitely unsafe.
+ """
+
+ from modules import shared
+
+ try:
+ if not shared.cmd_opts.disable_safe_unpickle:
+ check_pt(filename, extra_handler)
+
+ except pickle.UnpicklingError:
+ errors.report(
+ f"Error verifying pickled file from {filename}\n"
+ "-----> !!!! The file is most likely corrupted !!!! <-----\n"
+ "You can skip this check with --disable-safe-unpickle commandline argument, but that is not going to help you.\n\n",
+ exc_info=True,
+ )
+ return None
+ except Exception:
+ errors.report(
+ f"Error verifying pickled file from {filename}\n"
+ f"The file may be malicious, so the program is not going to read it.\n"
+ f"You can skip this check with --disable-safe-unpickle commandline argument.\n\n",
+ exc_info=True,
+ )
+ return None
+
+ return unsafe_torch_load(filename, *args, **kwargs)
+
+
+class Extra:
+ """
+ A class for temporarily setting the global handler for when you can't explicitly call load_with_extra
+ (because it's not your code making the torch.load call). The intended use is like this:
+
+```
+import torch
+from modules import safe
+
+def handler(module, name):
+ if module == 'torch' and name in ['float64', 'float16']:
+ return getattr(torch, name)
+
+ return None
+
+with safe.Extra(handler):
+ x = torch.load('model.pt')
+```
+ """
+
+ def __init__(self, handler):
+ self.handler = handler
+
+ def __enter__(self):
+ global global_extra_handler
+
+ assert global_extra_handler is None, 'already inside an Extra() block'
+ global_extra_handler = self.handler
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ global global_extra_handler
+
+ global_extra_handler = None
+
+
+unsafe_torch_load = torch.load
+torch.load = load
+global_extra_handler = None
diff --git a/modules/script_callbacks.py b/modules/script_callbacks.py
new file mode 100644
index 0000000000000000000000000000000000000000..1dab09ecb9e102c8e2e1cc6d87eab62247629caa
--- /dev/null
+++ b/modules/script_callbacks.py
@@ -0,0 +1,482 @@
+import inspect
+import os
+from collections import namedtuple
+from typing import Optional, Dict, Any
+
+from fastapi import FastAPI
+from gradio import Blocks
+
+from modules import errors, timer
+
+
+def report_exception(c, job):
+ errors.report(f"Error executing callback {job} for {c.script}", exc_info=True)
+
+
+class ImageSaveParams:
+ def __init__(self, image, p, filename, pnginfo):
+ self.image = image
+ """the PIL image itself"""
+
+ self.p = p
+ """p object with processing parameters; either StableDiffusionProcessing or an object with same fields"""
+
+ self.filename = filename
+ """name of file that the image would be saved to"""
+
+ self.pnginfo = pnginfo
+ """dictionary with parameters for image's PNG info data; infotext will have the key 'parameters'"""
+
+
+class ExtraNoiseParams:
+ def __init__(self, noise, x, xi):
+ self.noise = noise
+ """Random noise generated by the seed"""
+
+ self.x = x
+ """Latent representation of the image"""
+
+ self.xi = xi
+ """Noisy latent representation of the image"""
+
+
+class CFGDenoiserParams:
+ def __init__(self, x, image_cond, sigma, sampling_step, total_sampling_steps, text_cond, text_uncond):
+ self.x = x
+ """Latent image representation in the process of being denoised"""
+
+ self.image_cond = image_cond
+ """Conditioning image"""
+
+ self.sigma = sigma
+ """Current sigma noise step value"""
+
+ self.sampling_step = sampling_step
+ """Current Sampling step number"""
+
+ self.total_sampling_steps = total_sampling_steps
+ """Total number of sampling steps planned"""
+
+ self.text_cond = text_cond
+ """ Encoder hidden states of text conditioning from prompt"""
+
+ self.text_uncond = text_uncond
+ """ Encoder hidden states of text conditioning from negative prompt"""
+
+
+class CFGDenoisedParams:
+ def __init__(self, x, sampling_step, total_sampling_steps, inner_model):
+ self.x = x
+ """Latent image representation in the process of being denoised"""
+
+ self.sampling_step = sampling_step
+ """Current Sampling step number"""
+
+ self.total_sampling_steps = total_sampling_steps
+ """Total number of sampling steps planned"""
+
+ self.inner_model = inner_model
+ """Inner model reference used for denoising"""
+
+
+class AfterCFGCallbackParams:
+ def __init__(self, x, sampling_step, total_sampling_steps):
+ self.x = x
+ """Latent image representation in the process of being denoised"""
+
+ self.sampling_step = sampling_step
+ """Current Sampling step number"""
+
+ self.total_sampling_steps = total_sampling_steps
+ """Total number of sampling steps planned"""
+
+
+class UiTrainTabParams:
+ def __init__(self, txt2img_preview_params):
+ self.txt2img_preview_params = txt2img_preview_params
+
+
+class ImageGridLoopParams:
+ def __init__(self, imgs, cols, rows):
+ self.imgs = imgs
+ self.cols = cols
+ self.rows = rows
+
+
+ScriptCallback = namedtuple("ScriptCallback", ["script", "callback"])
+callback_map = dict(
+ callbacks_app_started=[],
+ callbacks_model_loaded=[],
+ callbacks_ui_tabs=[],
+ callbacks_ui_train_tabs=[],
+ callbacks_ui_settings=[],
+ callbacks_before_image_saved=[],
+ callbacks_image_saved=[],
+ callbacks_extra_noise=[],
+ callbacks_cfg_denoiser=[],
+ callbacks_cfg_denoised=[],
+ callbacks_cfg_after_cfg=[],
+ callbacks_before_component=[],
+ callbacks_after_component=[],
+ callbacks_image_grid=[],
+ callbacks_infotext_pasted=[],
+ callbacks_script_unloaded=[],
+ callbacks_before_ui=[],
+ callbacks_on_reload=[],
+ callbacks_list_optimizers=[],
+ callbacks_list_unets=[],
+)
+
+
+def clear_callbacks():
+ for callback_list in callback_map.values():
+ callback_list.clear()
+
+
+def app_started_callback(demo: Optional[Blocks], app: FastAPI):
+ for c in callback_map['callbacks_app_started']:
+ try:
+ c.callback(demo, app)
+ timer.startup_timer.record(os.path.basename(c.script))
+ except Exception:
+ report_exception(c, 'app_started_callback')
+
+
+def app_reload_callback():
+ for c in callback_map['callbacks_on_reload']:
+ try:
+ c.callback()
+ except Exception:
+ report_exception(c, 'callbacks_on_reload')
+
+
+def model_loaded_callback(sd_model):
+ for c in callback_map['callbacks_model_loaded']:
+ try:
+ c.callback(sd_model)
+ except Exception:
+ report_exception(c, 'model_loaded_callback')
+
+
+def ui_tabs_callback():
+ res = []
+
+ for c in callback_map['callbacks_ui_tabs']:
+ try:
+ res += c.callback() or []
+ except Exception:
+ report_exception(c, 'ui_tabs_callback')
+
+ return res
+
+
+def ui_train_tabs_callback(params: UiTrainTabParams):
+ for c in callback_map['callbacks_ui_train_tabs']:
+ try:
+ c.callback(params)
+ except Exception:
+ report_exception(c, 'callbacks_ui_train_tabs')
+
+
+def ui_settings_callback():
+ for c in callback_map['callbacks_ui_settings']:
+ try:
+ c.callback()
+ except Exception:
+ report_exception(c, 'ui_settings_callback')
+
+
+def before_image_saved_callback(params: ImageSaveParams):
+ for c in callback_map['callbacks_before_image_saved']:
+ try:
+ c.callback(params)
+ except Exception:
+ report_exception(c, 'before_image_saved_callback')
+
+
+def image_saved_callback(params: ImageSaveParams):
+ for c in callback_map['callbacks_image_saved']:
+ try:
+ c.callback(params)
+ except Exception:
+ report_exception(c, 'image_saved_callback')
+
+
+def extra_noise_callback(params: ExtraNoiseParams):
+ for c in callback_map['callbacks_extra_noise']:
+ try:
+ c.callback(params)
+ except Exception:
+ report_exception(c, 'callbacks_extra_noise')
+
+
+def cfg_denoiser_callback(params: CFGDenoiserParams):
+ for c in callback_map['callbacks_cfg_denoiser']:
+ try:
+ c.callback(params)
+ except Exception:
+ report_exception(c, 'cfg_denoiser_callback')
+
+
+def cfg_denoised_callback(params: CFGDenoisedParams):
+ for c in callback_map['callbacks_cfg_denoised']:
+ try:
+ c.callback(params)
+ except Exception:
+ report_exception(c, 'cfg_denoised_callback')
+
+
+def cfg_after_cfg_callback(params: AfterCFGCallbackParams):
+ for c in callback_map['callbacks_cfg_after_cfg']:
+ try:
+ c.callback(params)
+ except Exception:
+ report_exception(c, 'cfg_after_cfg_callback')
+
+
+def before_component_callback(component, **kwargs):
+ for c in callback_map['callbacks_before_component']:
+ try:
+ c.callback(component, **kwargs)
+ except Exception:
+ report_exception(c, 'before_component_callback')
+
+
+def after_component_callback(component, **kwargs):
+ for c in callback_map['callbacks_after_component']:
+ try:
+ c.callback(component, **kwargs)
+ except Exception:
+ report_exception(c, 'after_component_callback')
+
+
+def image_grid_callback(params: ImageGridLoopParams):
+ for c in callback_map['callbacks_image_grid']:
+ try:
+ c.callback(params)
+ except Exception:
+ report_exception(c, 'image_grid')
+
+
+def infotext_pasted_callback(infotext: str, params: Dict[str, Any]):
+ for c in callback_map['callbacks_infotext_pasted']:
+ try:
+ c.callback(infotext, params)
+ except Exception:
+ report_exception(c, 'infotext_pasted')
+
+
+def script_unloaded_callback():
+ for c in reversed(callback_map['callbacks_script_unloaded']):
+ try:
+ c.callback()
+ except Exception:
+ report_exception(c, 'script_unloaded')
+
+
+def before_ui_callback():
+ for c in reversed(callback_map['callbacks_before_ui']):
+ try:
+ c.callback()
+ except Exception:
+ report_exception(c, 'before_ui')
+
+
+def list_optimizers_callback():
+ res = []
+
+ for c in callback_map['callbacks_list_optimizers']:
+ try:
+ c.callback(res)
+ except Exception:
+ report_exception(c, 'list_optimizers')
+
+ return res
+
+
+def list_unets_callback():
+ res = []
+
+ for c in callback_map['callbacks_list_unets']:
+ try:
+ c.callback(res)
+ except Exception:
+ report_exception(c, 'list_unets')
+
+ return res
+
+
+def add_callback(callbacks, fun):
+ stack = [x for x in inspect.stack() if x.filename != __file__]
+ filename = stack[0].filename if stack else 'unknown file'
+
+ callbacks.append(ScriptCallback(filename, fun))
+
+
+def remove_current_script_callbacks():
+ stack = [x for x in inspect.stack() if x.filename != __file__]
+ filename = stack[0].filename if stack else 'unknown file'
+ if filename == 'unknown file':
+ return
+ for callback_list in callback_map.values():
+ for callback_to_remove in [cb for cb in callback_list if cb.script == filename]:
+ callback_list.remove(callback_to_remove)
+
+
+def remove_callbacks_for_function(callback_func):
+ for callback_list in callback_map.values():
+ for callback_to_remove in [cb for cb in callback_list if cb.callback == callback_func]:
+ callback_list.remove(callback_to_remove)
+
+
+def on_app_started(callback):
+ """register a function to be called when the webui started, the gradio `Block` component and
+ fastapi `FastAPI` object are passed as the arguments"""
+ add_callback(callback_map['callbacks_app_started'], callback)
+
+
+def on_before_reload(callback):
+ """register a function to be called just before the server reloads."""
+ add_callback(callback_map['callbacks_on_reload'], callback)
+
+
+def on_model_loaded(callback):
+ """register a function to be called when the stable diffusion model is created; the model is
+ passed as an argument; this function is also called when the script is reloaded. """
+ add_callback(callback_map['callbacks_model_loaded'], callback)
+
+
+def on_ui_tabs(callback):
+ """register a function to be called when the UI is creating new tabs.
+ The function must either return a None, which means no new tabs to be added, or a list, where
+ each element is a tuple:
+ (gradio_component, title, elem_id)
+
+ gradio_component is a gradio component to be used for contents of the tab (usually gr.Blocks)
+ title is tab text displayed to user in the UI
+ elem_id is HTML id for the tab
+ """
+ add_callback(callback_map['callbacks_ui_tabs'], callback)
+
+
+def on_ui_train_tabs(callback):
+ """register a function to be called when the UI is creating new tabs for the train tab.
+ Create your new tabs with gr.Tab.
+ """
+ add_callback(callback_map['callbacks_ui_train_tabs'], callback)
+
+
+def on_ui_settings(callback):
+ """register a function to be called before UI settings are populated; add your settings
+ by using shared.opts.add_option(shared.OptionInfo(...)) """
+ add_callback(callback_map['callbacks_ui_settings'], callback)
+
+
+def on_before_image_saved(callback):
+ """register a function to be called before an image is saved to a file.
+ The callback is called with one argument:
+ - params: ImageSaveParams - parameters the image is to be saved with. You can change fields in this object.
+ """
+ add_callback(callback_map['callbacks_before_image_saved'], callback)
+
+
+def on_image_saved(callback):
+ """register a function to be called after an image is saved to a file.
+ The callback is called with one argument:
+ - params: ImageSaveParams - parameters the image was saved with. Changing fields in this object does nothing.
+ """
+ add_callback(callback_map['callbacks_image_saved'], callback)
+
+
+def on_extra_noise(callback):
+ """register a function to be called before adding extra noise in img2img or hires fix;
+ The callback is called with one argument:
+ - params: ExtraNoiseParams - contains noise determined by seed and latent representation of image
+ """
+ add_callback(callback_map['callbacks_extra_noise'], callback)
+
+
+def on_cfg_denoiser(callback):
+ """register a function to be called in the kdiffussion cfg_denoiser method after building the inner model inputs.
+ The callback is called with one argument:
+ - params: CFGDenoiserParams - parameters to be passed to the inner model and sampling state details.
+ """
+ add_callback(callback_map['callbacks_cfg_denoiser'], callback)
+
+
+def on_cfg_denoised(callback):
+ """register a function to be called in the kdiffussion cfg_denoiser method after building the inner model inputs.
+ The callback is called with one argument:
+ - params: CFGDenoisedParams - parameters to be passed to the inner model and sampling state details.
+ """
+ add_callback(callback_map['callbacks_cfg_denoised'], callback)
+
+
+def on_cfg_after_cfg(callback):
+ """register a function to be called in the kdiffussion cfg_denoiser method after cfg calculations are completed.
+ The callback is called with one argument:
+ - params: AfterCFGCallbackParams - parameters to be passed to the script for post-processing after cfg calculation.
+ """
+ add_callback(callback_map['callbacks_cfg_after_cfg'], callback)
+
+
+def on_before_component(callback):
+ """register a function to be called before a component is created.
+ The callback is called with arguments:
+ - component - gradio component that is about to be created.
+ - **kwargs - args to gradio.components.IOComponent.__init__ function
+
+ Use elem_id/label fields of kwargs to figure out which component it is.
+ This can be useful to inject your own components somewhere in the middle of vanilla UI.
+ """
+ add_callback(callback_map['callbacks_before_component'], callback)
+
+
+def on_after_component(callback):
+ """register a function to be called after a component is created. See on_before_component for more."""
+ add_callback(callback_map['callbacks_after_component'], callback)
+
+
+def on_image_grid(callback):
+ """register a function to be called before making an image grid.
+ The callback is called with one argument:
+ - params: ImageGridLoopParams - parameters to be used for grid creation. Can be modified.
+ """
+ add_callback(callback_map['callbacks_image_grid'], callback)
+
+
+def on_infotext_pasted(callback):
+ """register a function to be called before applying an infotext.
+ The callback is called with two arguments:
+ - infotext: str - raw infotext.
+ - result: Dict[str, any] - parsed infotext parameters.
+ """
+ add_callback(callback_map['callbacks_infotext_pasted'], callback)
+
+
+def on_script_unloaded(callback):
+ """register a function to be called before the script is unloaded. Any hooks/hijacks/monkeying about that
+ the script did should be reverted here"""
+
+ add_callback(callback_map['callbacks_script_unloaded'], callback)
+
+
+def on_before_ui(callback):
+ """register a function to be called before the UI is created."""
+
+ add_callback(callback_map['callbacks_before_ui'], callback)
+
+
+def on_list_optimizers(callback):
+ """register a function to be called when UI is making a list of cross attention optimization options.
+ The function will be called with one argument, a list, and shall add objects of type modules.sd_hijack_optimizations.SdOptimization
+ to it."""
+
+ add_callback(callback_map['callbacks_list_optimizers'], callback)
+
+
+def on_list_unets(callback):
+ """register a function to be called when UI is making a list of alternative options for unet.
+ The function will be called with one argument, a list, and shall add objects of type modules.sd_unet.SdUnetOption to it."""
+
+ add_callback(callback_map['callbacks_list_unets'], callback)
diff --git a/modules/script_loading.py b/modules/script_loading.py
new file mode 100644
index 0000000000000000000000000000000000000000..02ae522a806c760a393e548a60083e6901c891c8
--- /dev/null
+++ b/modules/script_loading.py
@@ -0,0 +1,31 @@
+import os
+import importlib.util
+
+from modules import errors
+
+
+def load_module(path):
+ module_spec = importlib.util.spec_from_file_location(os.path.basename(path), path)
+ module = importlib.util.module_from_spec(module_spec)
+ module_spec.loader.exec_module(module)
+
+ return module
+
+
+def preload_extensions(extensions_dir, parser, extension_list=None):
+ if not os.path.isdir(extensions_dir):
+ return
+
+ extensions = extension_list if extension_list is not None else os.listdir(extensions_dir)
+ for dirname in sorted(extensions):
+ preload_script = os.path.join(extensions_dir, dirname, "preload.py")
+ if not os.path.isfile(preload_script):
+ continue
+
+ try:
+ module = load_module(preload_script)
+ if hasattr(module, 'preload'):
+ module.preload(parser)
+
+ except Exception:
+ errors.report(f"Error running preload() for {preload_script}", exc_info=True)
diff --git a/modules/scripts.py b/modules/scripts.py
new file mode 100644
index 0000000000000000000000000000000000000000..08745bf65769c48164096b1952e0ea270e4c53ff
--- /dev/null
+++ b/modules/scripts.py
@@ -0,0 +1,758 @@
+import os
+import re
+import sys
+import inspect
+from collections import namedtuple
+from dataclasses import dataclass
+
+import gradio as gr
+
+from modules import shared, paths, script_callbacks, extensions, script_loading, scripts_postprocessing, errors, timer
+
+AlwaysVisible = object()
+
+
+class PostprocessImageArgs:
+ def __init__(self, image):
+ self.image = image
+
+
+class PostprocessBatchListArgs:
+ def __init__(self, images):
+ self.images = images
+
+
+@dataclass
+class OnComponent:
+ component: gr.blocks.Block
+
+
+class Script:
+ name = None
+ """script's internal name derived from title"""
+
+ section = None
+ """name of UI section that the script's controls will be placed into"""
+
+ filename = None
+ args_from = None
+ args_to = None
+ alwayson = False
+
+ is_txt2img = False
+ is_img2img = False
+ tabname = None
+
+ group = None
+ """A gr.Group component that has all script's UI inside it."""
+
+ create_group = True
+ """If False, for alwayson scripts, a group component will not be created."""
+
+ infotext_fields = None
+ """if set in ui(), this is a list of pairs of gradio component + text; the text will be used when
+ parsing infotext to set the value for the component; see ui.py's txt2img_paste_fields for an example
+ """
+
+ paste_field_names = None
+ """if set in ui(), this is a list of names of infotext fields; the fields will be sent through the
+ various "Send to " buttons when clicked
+ """
+
+ api_info = None
+ """Generated value of type modules.api.models.ScriptInfo with information about the script for API"""
+
+ on_before_component_elem_id = None
+ """list of callbacks to be called before a component with an elem_id is created"""
+
+ on_after_component_elem_id = None
+ """list of callbacks to be called after a component with an elem_id is created"""
+
+ setup_for_ui_only = False
+ """If true, the script setup will only be run in Gradio UI, not in API"""
+
+ def title(self):
+ """this function should return the title of the script. This is what will be displayed in the dropdown menu."""
+
+ raise NotImplementedError()
+
+ def ui(self, is_img2img):
+ """this function should create gradio UI elements. See https://gradio.app/docs/#components
+ The return value should be an array of all components that are used in processing.
+ Values of those returned components will be passed to run() and process() functions.
+ """
+
+ pass
+
+ def show(self, is_img2img):
+ """
+ is_img2img is True if this function is called for the img2img interface, and Fasle otherwise
+
+ This function should return:
+ - False if the script should not be shown in UI at all
+ - True if the script should be shown in UI if it's selected in the scripts dropdown
+ - script.AlwaysVisible if the script should be shown in UI at all times
+ """
+
+ return True
+
+ def run(self, p, *args):
+ """
+ This function is called if the script has been selected in the script dropdown.
+ It must do all processing and return the Processed object with results, same as
+ one returned by processing.process_images.
+
+ Usually the processing is done by calling the processing.process_images function.
+
+ args contains all values returned by components from ui()
+ """
+
+ pass
+
+ def setup(self, p, *args):
+ """For AlwaysVisible scripts, this function is called when the processing object is set up, before any processing starts.
+ args contains all values returned by components from ui().
+ """
+ pass
+
+
+ def before_process(self, p, *args):
+ """
+ This function is called very early during processing begins for AlwaysVisible scripts.
+ You can modify the processing object (p) here, inject hooks, etc.
+ args contains all values returned by components from ui()
+ """
+
+ pass
+
+ def process(self, p, *args):
+ """
+ This function is called before processing begins for AlwaysVisible scripts.
+ You can modify the processing object (p) here, inject hooks, etc.
+ args contains all values returned by components from ui()
+ """
+
+ pass
+
+ def before_process_batch(self, p, *args, **kwargs):
+ """
+ Called before extra networks are parsed from the prompt, so you can add
+ new extra network keywords to the prompt with this callback.
+
+ **kwargs will have those items:
+ - batch_number - index of current batch, from 0 to number of batches-1
+ - prompts - list of prompts for current batch; you can change contents of this list but changing the number of entries will likely break things
+ - seeds - list of seeds for current batch
+ - subseeds - list of subseeds for current batch
+ """
+
+ pass
+
+ def after_extra_networks_activate(self, p, *args, **kwargs):
+ """
+ Called after extra networks activation, before conds calculation
+ allow modification of the network after extra networks activation been applied
+ won't be call if p.disable_extra_networks
+
+ **kwargs will have those items:
+ - batch_number - index of current batch, from 0 to number of batches-1
+ - prompts - list of prompts for current batch; you can change contents of this list but changing the number of entries will likely break things
+ - seeds - list of seeds for current batch
+ - subseeds - list of subseeds for current batch
+ - extra_network_data - list of ExtraNetworkParams for current stage
+ """
+ pass
+
+ def process_batch(self, p, *args, **kwargs):
+ """
+ Same as process(), but called for every batch.
+
+ **kwargs will have those items:
+ - batch_number - index of current batch, from 0 to number of batches-1
+ - prompts - list of prompts for current batch; you can change contents of this list but changing the number of entries will likely break things
+ - seeds - list of seeds for current batch
+ - subseeds - list of subseeds for current batch
+ """
+
+ pass
+
+ def postprocess_batch(self, p, *args, **kwargs):
+ """
+ Same as process_batch(), but called for every batch after it has been generated.
+
+ **kwargs will have same items as process_batch, and also:
+ - batch_number - index of current batch, from 0 to number of batches-1
+ - images - torch tensor with all generated images, with values ranging from 0 to 1;
+ """
+
+ pass
+
+ def postprocess_batch_list(self, p, pp: PostprocessBatchListArgs, *args, **kwargs):
+ """
+ Same as postprocess_batch(), but receives batch images as a list of 3D tensors instead of a 4D tensor.
+ This is useful when you want to update the entire batch instead of individual images.
+
+ You can modify the postprocessing object (pp) to update the images in the batch, remove images, add images, etc.
+ If the number of images is different from the batch size when returning,
+ then the script has the responsibility to also update the following attributes in the processing object (p):
+ - p.prompts
+ - p.negative_prompts
+ - p.seeds
+ - p.subseeds
+
+ **kwargs will have same items as process_batch, and also:
+ - batch_number - index of current batch, from 0 to number of batches-1
+ """
+
+ pass
+
+ def postprocess_image(self, p, pp: PostprocessImageArgs, *args):
+ """
+ Called for every image after it has been generated.
+ """
+
+ pass
+
+ def postprocess(self, p, processed, *args):
+ """
+ This function is called after processing ends for AlwaysVisible scripts.
+ args contains all values returned by components from ui()
+ """
+
+ pass
+
+ def before_component(self, component, **kwargs):
+ """
+ Called before a component is created.
+ Use elem_id/label fields of kwargs to figure out which component it is.
+ This can be useful to inject your own components somewhere in the middle of vanilla UI.
+ You can return created components in the ui() function to add them to the list of arguments for your processing functions
+ """
+
+ pass
+
+ def after_component(self, component, **kwargs):
+ """
+ Called after a component is created. Same as above.
+ """
+
+ pass
+
+ def on_before_component(self, callback, *, elem_id):
+ """
+ Calls callback before a component is created. The callback function is called with a single argument of type OnComponent.
+
+ May be called in show() or ui() - but it may be too late in latter as some components may already be created.
+
+ This function is an alternative to before_component in that it also cllows to run before a component is created, but
+ it doesn't require to be called for every created component - just for the one you need.
+ """
+ if self.on_before_component_elem_id is None:
+ self.on_before_component_elem_id = []
+
+ self.on_before_component_elem_id.append((elem_id, callback))
+
+ def on_after_component(self, callback, *, elem_id):
+ """
+ Calls callback after a component is created. The callback function is called with a single argument of type OnComponent.
+ """
+ if self.on_after_component_elem_id is None:
+ self.on_after_component_elem_id = []
+
+ self.on_after_component_elem_id.append((elem_id, callback))
+
+ def describe(self):
+ """unused"""
+ return ""
+
+ def elem_id(self, item_id):
+ """helper function to generate id for a HTML element, constructs final id out of script name, tab and user-supplied item_id"""
+
+ need_tabname = self.show(True) == self.show(False)
+ tabkind = 'img2img' if self.is_img2img else 'txt2img'
+ tabname = f"{tabkind}_" if need_tabname else ""
+ title = re.sub(r'[^a-z_0-9]', '', re.sub(r'\s', '_', self.title().lower()))
+
+ return f'script_{tabname}{title}_{item_id}'
+
+ def before_hr(self, p, *args):
+ """
+ This function is called before hires fix start.
+ """
+ pass
+
+
+class ScriptBuiltinUI(Script):
+ setup_for_ui_only = True
+
+ def elem_id(self, item_id):
+ """helper function to generate id for a HTML element, constructs final id out of tab and user-supplied item_id"""
+
+ need_tabname = self.show(True) == self.show(False)
+ tabname = ('img2img' if self.is_img2img else 'txt2img') + "_" if need_tabname else ""
+
+ return f'{tabname}{item_id}'
+
+
+current_basedir = paths.script_path
+
+
+def basedir():
+ """returns the base directory for the current script. For scripts in the main scripts directory,
+ this is the main directory (where webui.py resides), and for scripts in extensions directory
+ (ie extensions/aesthetic/script/aesthetic.py), this is extension's directory (extensions/aesthetic)
+ """
+ return current_basedir
+
+
+ScriptFile = namedtuple("ScriptFile", ["basedir", "filename", "path"])
+
+scripts_data = []
+postprocessing_scripts_data = []
+ScriptClassData = namedtuple("ScriptClassData", ["script_class", "path", "basedir", "module"])
+
+
+def list_scripts(scriptdirname, extension, *, include_extensions=True):
+ scripts_list = []
+
+ basedir = os.path.join(paths.script_path, scriptdirname)
+ if os.path.exists(basedir):
+ for filename in sorted(os.listdir(basedir)):
+ scripts_list.append(ScriptFile(paths.script_path, filename, os.path.join(basedir, filename)))
+
+ if include_extensions:
+ for ext in extensions.active():
+ scripts_list += ext.list_files(scriptdirname, extension)
+
+ scripts_list = [x for x in scripts_list if os.path.splitext(x.path)[1].lower() == extension and os.path.isfile(x.path)]
+
+ return scripts_list
+
+
+def list_files_with_name(filename):
+ res = []
+
+ dirs = [paths.script_path] + [ext.path for ext in extensions.active()]
+
+ for dirpath in dirs:
+ if not os.path.isdir(dirpath):
+ continue
+
+ path = os.path.join(dirpath, filename)
+ if os.path.isfile(path):
+ res.append(path)
+
+ return res
+
+
+def load_scripts():
+ global current_basedir
+ scripts_data.clear()
+ postprocessing_scripts_data.clear()
+ script_callbacks.clear_callbacks()
+
+ scripts_list = list_scripts("scripts", ".py") + list_scripts("modules/processing_scripts", ".py", include_extensions=False)
+
+ syspath = sys.path
+
+ def register_scripts_from_module(module):
+ for script_class in module.__dict__.values():
+ if not inspect.isclass(script_class):
+ continue
+
+ if issubclass(script_class, Script):
+ scripts_data.append(ScriptClassData(script_class, scriptfile.path, scriptfile.basedir, module))
+ elif issubclass(script_class, scripts_postprocessing.ScriptPostprocessing):
+ postprocessing_scripts_data.append(ScriptClassData(script_class, scriptfile.path, scriptfile.basedir, module))
+
+ def orderby(basedir):
+ # 1st webui, 2nd extensions-builtin, 3rd extensions
+ priority = {os.path.join(paths.script_path, "extensions-builtin"):1, paths.script_path:0}
+ for key in priority:
+ if basedir.startswith(key):
+ return priority[key]
+ return 9999
+
+ for scriptfile in sorted(scripts_list, key=lambda x: [orderby(x.basedir), x]):
+ try:
+ if scriptfile.basedir != paths.script_path:
+ sys.path = [scriptfile.basedir] + sys.path
+ current_basedir = scriptfile.basedir
+
+ script_module = script_loading.load_module(scriptfile.path)
+ register_scripts_from_module(script_module)
+
+ except Exception:
+ errors.report(f"Error loading script: {scriptfile.filename}", exc_info=True)
+
+ finally:
+ sys.path = syspath
+ current_basedir = paths.script_path
+ timer.startup_timer.record(scriptfile.filename)
+
+ global scripts_txt2img, scripts_img2img, scripts_postproc
+
+ scripts_txt2img = ScriptRunner()
+ scripts_img2img = ScriptRunner()
+ scripts_postproc = scripts_postprocessing.ScriptPostprocessingRunner()
+
+
+def wrap_call(func, filename, funcname, *args, default=None, **kwargs):
+ try:
+ return func(*args, **kwargs)
+ except Exception:
+ errors.report(f"Error calling: {filename}/{funcname}", exc_info=True)
+
+ return default
+
+
+class ScriptRunner:
+ def __init__(self):
+ self.scripts = []
+ self.selectable_scripts = []
+ self.alwayson_scripts = []
+ self.titles = []
+ self.title_map = {}
+ self.infotext_fields = []
+ self.paste_field_names = []
+ self.inputs = [None]
+
+ self.on_before_component_elem_id = {}
+ """dict of callbacks to be called before an element is created; key=elem_id, value=list of callbacks"""
+
+ self.on_after_component_elem_id = {}
+ """dict of callbacks to be called after an element is created; key=elem_id, value=list of callbacks"""
+
+ def initialize_scripts(self, is_img2img):
+ from modules import scripts_auto_postprocessing
+
+ self.scripts.clear()
+ self.alwayson_scripts.clear()
+ self.selectable_scripts.clear()
+
+ auto_processing_scripts = scripts_auto_postprocessing.create_auto_preprocessing_script_data()
+
+ for script_data in auto_processing_scripts + scripts_data:
+ script = script_data.script_class()
+ script.filename = script_data.path
+ script.is_txt2img = not is_img2img
+ script.is_img2img = is_img2img
+ script.tabname = "img2img" if is_img2img else "txt2img"
+
+ visibility = script.show(script.is_img2img)
+
+ if visibility == AlwaysVisible:
+ self.scripts.append(script)
+ self.alwayson_scripts.append(script)
+ script.alwayson = True
+
+ elif visibility:
+ self.scripts.append(script)
+ self.selectable_scripts.append(script)
+
+ self.apply_on_before_component_callbacks()
+
+ def apply_on_before_component_callbacks(self):
+ for script in self.scripts:
+ on_before = script.on_before_component_elem_id or []
+ on_after = script.on_after_component_elem_id or []
+
+ for elem_id, callback in on_before:
+ if elem_id not in self.on_before_component_elem_id:
+ self.on_before_component_elem_id[elem_id] = []
+
+ self.on_before_component_elem_id[elem_id].append((callback, script))
+
+ for elem_id, callback in on_after:
+ if elem_id not in self.on_after_component_elem_id:
+ self.on_after_component_elem_id[elem_id] = []
+
+ self.on_after_component_elem_id[elem_id].append((callback, script))
+
+ on_before.clear()
+ on_after.clear()
+
+ def create_script_ui(self, script):
+ import modules.api.models as api_models
+
+ script.args_from = len(self.inputs)
+ script.args_to = len(self.inputs)
+
+ controls = wrap_call(script.ui, script.filename, "ui", script.is_img2img)
+
+ if controls is None:
+ return
+
+ script.name = wrap_call(script.title, script.filename, "title", default=script.filename).lower()
+ api_args = []
+
+ for control in controls:
+ control.custom_script_source = os.path.basename(script.filename)
+
+ arg_info = api_models.ScriptArg(label=control.label or "")
+
+ for field in ("value", "minimum", "maximum", "step", "choices"):
+ v = getattr(control, field, None)
+ if v is not None:
+ setattr(arg_info, field, v)
+
+ api_args.append(arg_info)
+
+ script.api_info = api_models.ScriptInfo(
+ name=script.name,
+ is_img2img=script.is_img2img,
+ is_alwayson=script.alwayson,
+ args=api_args,
+ )
+
+ if script.infotext_fields is not None:
+ self.infotext_fields += script.infotext_fields
+
+ if script.paste_field_names is not None:
+ self.paste_field_names += script.paste_field_names
+
+ self.inputs += controls
+ script.args_to = len(self.inputs)
+
+ def setup_ui_for_section(self, section, scriptlist=None):
+ if scriptlist is None:
+ scriptlist = self.alwayson_scripts
+
+ for script in scriptlist:
+ if script.alwayson and script.section != section:
+ continue
+
+ if script.create_group:
+ with gr.Group(visible=script.alwayson) as group:
+ self.create_script_ui(script)
+
+ script.group = group
+ else:
+ self.create_script_ui(script)
+
+ def prepare_ui(self):
+ self.inputs = [None]
+
+ def setup_ui(self):
+ all_titles = [wrap_call(script.title, script.filename, "title") or script.filename for script in self.scripts]
+ self.title_map = {title.lower(): script for title, script in zip(all_titles, self.scripts)}
+ self.titles = [wrap_call(script.title, script.filename, "title") or f"{script.filename} [error]" for script in self.selectable_scripts]
+
+ self.setup_ui_for_section(None)
+
+ dropdown = gr.Dropdown(label="Script", elem_id="script_list", choices=["None"] + self.titles, value="None", type="index")
+ self.inputs[0] = dropdown
+
+ self.setup_ui_for_section(None, self.selectable_scripts)
+
+ def select_script(script_index):
+ selected_script = self.selectable_scripts[script_index - 1] if script_index>0 else None
+
+ return [gr.update(visible=selected_script == s) for s in self.selectable_scripts]
+
+ def init_field(title):
+ """called when an initial value is set from ui-config.json to show script's UI components"""
+
+ if title == 'None':
+ return
+
+ script_index = self.titles.index(title)
+ self.selectable_scripts[script_index].group.visible = True
+
+ dropdown.init_field = init_field
+
+ dropdown.change(
+ fn=select_script,
+ inputs=[dropdown],
+ outputs=[script.group for script in self.selectable_scripts]
+ )
+
+ self.script_load_ctr = 0
+
+ def onload_script_visibility(params):
+ title = params.get('Script', None)
+ if title:
+ title_index = self.titles.index(title)
+ visibility = title_index == self.script_load_ctr
+ self.script_load_ctr = (self.script_load_ctr + 1) % len(self.titles)
+ return gr.update(visible=visibility)
+ else:
+ return gr.update(visible=False)
+
+ self.infotext_fields.append((dropdown, lambda x: gr.update(value=x.get('Script', 'None'))))
+ self.infotext_fields.extend([(script.group, onload_script_visibility) for script in self.selectable_scripts])
+
+ self.apply_on_before_component_callbacks()
+
+ return self.inputs
+
+ def run(self, p, *args):
+ script_index = args[0]
+
+ if script_index == 0:
+ return None
+
+ script = self.selectable_scripts[script_index-1]
+
+ if script is None:
+ return None
+
+ script_args = args[script.args_from:script.args_to]
+ processed = script.run(p, *script_args)
+
+ shared.total_tqdm.clear()
+
+ return processed
+
+ def before_process(self, p):
+ for script in self.alwayson_scripts:
+ try:
+ script_args = p.script_args[script.args_from:script.args_to]
+ script.before_process(p, *script_args)
+ except Exception:
+ errors.report(f"Error running before_process: {script.filename}", exc_info=True)
+
+ def process(self, p):
+ for script in self.alwayson_scripts:
+ try:
+ script_args = p.script_args[script.args_from:script.args_to]
+ script.process(p, *script_args)
+ except Exception:
+ errors.report(f"Error running process: {script.filename}", exc_info=True)
+
+ def before_process_batch(self, p, **kwargs):
+ for script in self.alwayson_scripts:
+ try:
+ script_args = p.script_args[script.args_from:script.args_to]
+ script.before_process_batch(p, *script_args, **kwargs)
+ except Exception:
+ errors.report(f"Error running before_process_batch: {script.filename}", exc_info=True)
+
+ def after_extra_networks_activate(self, p, **kwargs):
+ for script in self.alwayson_scripts:
+ try:
+ script_args = p.script_args[script.args_from:script.args_to]
+ script.after_extra_networks_activate(p, *script_args, **kwargs)
+ except Exception:
+ errors.report(f"Error running after_extra_networks_activate: {script.filename}", exc_info=True)
+
+ def process_batch(self, p, **kwargs):
+ for script in self.alwayson_scripts:
+ try:
+ script_args = p.script_args[script.args_from:script.args_to]
+ script.process_batch(p, *script_args, **kwargs)
+ except Exception:
+ errors.report(f"Error running process_batch: {script.filename}", exc_info=True)
+
+ def postprocess(self, p, processed):
+ for script in self.alwayson_scripts:
+ try:
+ script_args = p.script_args[script.args_from:script.args_to]
+ script.postprocess(p, processed, *script_args)
+ except Exception:
+ errors.report(f"Error running postprocess: {script.filename}", exc_info=True)
+
+ def postprocess_batch(self, p, images, **kwargs):
+ for script in self.alwayson_scripts:
+ try:
+ script_args = p.script_args[script.args_from:script.args_to]
+ script.postprocess_batch(p, *script_args, images=images, **kwargs)
+ except Exception:
+ errors.report(f"Error running postprocess_batch: {script.filename}", exc_info=True)
+
+ def postprocess_batch_list(self, p, pp: PostprocessBatchListArgs, **kwargs):
+ for script in self.alwayson_scripts:
+ try:
+ script_args = p.script_args[script.args_from:script.args_to]
+ script.postprocess_batch_list(p, pp, *script_args, **kwargs)
+ except Exception:
+ errors.report(f"Error running postprocess_batch_list: {script.filename}", exc_info=True)
+
+ def postprocess_image(self, p, pp: PostprocessImageArgs):
+ for script in self.alwayson_scripts:
+ try:
+ script_args = p.script_args[script.args_from:script.args_to]
+ script.postprocess_image(p, pp, *script_args)
+ except Exception:
+ errors.report(f"Error running postprocess_image: {script.filename}", exc_info=True)
+
+ def before_component(self, component, **kwargs):
+ for callback, script in self.on_before_component_elem_id.get(kwargs.get("elem_id"), []):
+ try:
+ callback(OnComponent(component=component))
+ except Exception:
+ errors.report(f"Error running on_before_component: {script.filename}", exc_info=True)
+
+ for script in self.scripts:
+ try:
+ script.before_component(component, **kwargs)
+ except Exception:
+ errors.report(f"Error running before_component: {script.filename}", exc_info=True)
+
+ def after_component(self, component, **kwargs):
+ for callback, script in self.on_after_component_elem_id.get(component.elem_id, []):
+ try:
+ callback(OnComponent(component=component))
+ except Exception:
+ errors.report(f"Error running on_after_component: {script.filename}", exc_info=True)
+
+ for script in self.scripts:
+ try:
+ script.after_component(component, **kwargs)
+ except Exception:
+ errors.report(f"Error running after_component: {script.filename}", exc_info=True)
+
+ def script(self, title):
+ return self.title_map.get(title.lower())
+
+ def reload_sources(self, cache):
+ for si, script in list(enumerate(self.scripts)):
+ args_from = script.args_from
+ args_to = script.args_to
+ filename = script.filename
+
+ module = cache.get(filename, None)
+ if module is None:
+ module = script_loading.load_module(script.filename)
+ cache[filename] = module
+
+ for script_class in module.__dict__.values():
+ if type(script_class) == type and issubclass(script_class, Script):
+ self.scripts[si] = script_class()
+ self.scripts[si].filename = filename
+ self.scripts[si].args_from = args_from
+ self.scripts[si].args_to = args_to
+
+ def before_hr(self, p):
+ for script in self.alwayson_scripts:
+ try:
+ script_args = p.script_args[script.args_from:script.args_to]
+ script.before_hr(p, *script_args)
+ except Exception:
+ errors.report(f"Error running before_hr: {script.filename}", exc_info=True)
+
+ def setup_scrips(self, p, *, is_ui=True):
+ for script in self.alwayson_scripts:
+ if not is_ui and script.setup_for_ui_only:
+ continue
+
+ try:
+ script_args = p.script_args[script.args_from:script.args_to]
+ script.setup(p, *script_args)
+ except Exception:
+ errors.report(f"Error running setup: {script.filename}", exc_info=True)
+
+
+scripts_txt2img: ScriptRunner = None
+scripts_img2img: ScriptRunner = None
+scripts_postproc: scripts_postprocessing.ScriptPostprocessingRunner = None
+scripts_current: ScriptRunner = None
+
+
+def reload_script_body_only():
+ cache = {}
+ scripts_txt2img.reload_sources(cache)
+ scripts_img2img.reload_sources(cache)
+
+
+reload_scripts = load_scripts # compatibility alias
diff --git a/modules/scripts_auto_postprocessing.py b/modules/scripts_auto_postprocessing.py
new file mode 100644
index 0000000000000000000000000000000000000000..610139b767c37d032daea709d4c841fd257cddb8
--- /dev/null
+++ b/modules/scripts_auto_postprocessing.py
@@ -0,0 +1,42 @@
+from modules import scripts, scripts_postprocessing, shared
+
+
+class ScriptPostprocessingForMainUI(scripts.Script):
+ def __init__(self, script_postproc):
+ self.script: scripts_postprocessing.ScriptPostprocessing = script_postproc
+ self.postprocessing_controls = None
+
+ def title(self):
+ return self.script.name
+
+ def show(self, is_img2img):
+ return scripts.AlwaysVisible
+
+ def ui(self, is_img2img):
+ self.postprocessing_controls = self.script.ui()
+ return self.postprocessing_controls.values()
+
+ def postprocess_image(self, p, script_pp, *args):
+ args_dict = dict(zip(self.postprocessing_controls, args))
+
+ pp = scripts_postprocessing.PostprocessedImage(script_pp.image)
+ pp.info = {}
+ self.script.process(pp, **args_dict)
+ p.extra_generation_params.update(pp.info)
+ script_pp.image = pp.image
+
+
+def create_auto_preprocessing_script_data():
+ from modules import scripts
+
+ res = []
+
+ for name in shared.opts.postprocessing_enable_in_main_ui:
+ script = next(iter([x for x in scripts.postprocessing_scripts_data if x.script_class.name == name]), None)
+ if script is None:
+ continue
+
+ constructor = lambda s=script: ScriptPostprocessingForMainUI(s.script_class())
+ res.append(scripts.ScriptClassData(script_class=constructor, path=script.path, basedir=script.basedir, module=script.module))
+
+ return res
diff --git a/modules/scripts_postprocessing.py b/modules/scripts_postprocessing.py
new file mode 100644
index 0000000000000000000000000000000000000000..d9edf3705c3843fa02cde88c025239fa23fae379
--- /dev/null
+++ b/modules/scripts_postprocessing.py
@@ -0,0 +1,152 @@
+import os
+import gradio as gr
+
+from modules import errors, shared
+
+
+class PostprocessedImage:
+ def __init__(self, image):
+ self.image = image
+ self.info = {}
+
+
+class ScriptPostprocessing:
+ filename = None
+ controls = None
+ args_from = None
+ args_to = None
+
+ order = 1000
+ """scripts will be ordred by this value in postprocessing UI"""
+
+ name = None
+ """this function should return the title of the script."""
+
+ group = None
+ """A gr.Group component that has all script's UI inside it"""
+
+ def ui(self):
+ """
+ This function should create gradio UI elements. See https://gradio.app/docs/#components
+ The return value should be a dictionary that maps parameter names to components used in processing.
+ Values of those components will be passed to process() function.
+ """
+
+ pass
+
+ def process(self, pp: PostprocessedImage, **args):
+ """
+ This function is called to postprocess the image.
+ args contains a dictionary with all values returned by components from ui()
+ """
+
+ pass
+
+ def image_changed(self):
+ pass
+
+
+
+
+def wrap_call(func, filename, funcname, *args, default=None, **kwargs):
+ try:
+ res = func(*args, **kwargs)
+ return res
+ except Exception as e:
+ errors.display(e, f"calling {filename}/{funcname}")
+
+ return default
+
+
+class ScriptPostprocessingRunner:
+ def __init__(self):
+ self.scripts = None
+ self.ui_created = False
+
+ def initialize_scripts(self, scripts_data):
+ self.scripts = []
+
+ for script_data in scripts_data:
+ script: ScriptPostprocessing = script_data.script_class()
+ script.filename = script_data.path
+
+ if script.name == "Simple Upscale":
+ continue
+
+ self.scripts.append(script)
+
+ def create_script_ui(self, script, inputs):
+ script.args_from = len(inputs)
+ script.args_to = len(inputs)
+
+ script.controls = wrap_call(script.ui, script.filename, "ui")
+
+ for control in script.controls.values():
+ control.custom_script_source = os.path.basename(script.filename)
+
+ inputs += list(script.controls.values())
+ script.args_to = len(inputs)
+
+ def scripts_in_preferred_order(self):
+ if self.scripts is None:
+ import modules.scripts
+ self.initialize_scripts(modules.scripts.postprocessing_scripts_data)
+
+ scripts_order = shared.opts.postprocessing_operation_order
+
+ def script_score(name):
+ for i, possible_match in enumerate(scripts_order):
+ if possible_match == name:
+ return i
+
+ return len(self.scripts)
+
+ script_scores = {script.name: (script_score(script.name), script.order, script.name, original_index) for original_index, script in enumerate(self.scripts)}
+
+ return sorted(self.scripts, key=lambda x: script_scores[x.name])
+
+ def setup_ui(self):
+ inputs = []
+
+ for script in self.scripts_in_preferred_order():
+ with gr.Row() as group:
+ self.create_script_ui(script, inputs)
+
+ script.group = group
+
+ self.ui_created = True
+ return inputs
+
+ def run(self, pp: PostprocessedImage, args):
+ for script in self.scripts_in_preferred_order():
+ shared.state.job = script.name
+
+ script_args = args[script.args_from:script.args_to]
+
+ process_args = {}
+ for (name, _component), value in zip(script.controls.items(), script_args):
+ process_args[name] = value
+
+ script.process(pp, **process_args)
+
+ def create_args_for_run(self, scripts_args):
+ if not self.ui_created:
+ with gr.Blocks(analytics_enabled=False):
+ self.setup_ui()
+
+ scripts = self.scripts_in_preferred_order()
+ args = [None] * max([x.args_to for x in scripts])
+
+ for script in scripts:
+ script_args_dict = scripts_args.get(script.name, None)
+ if script_args_dict is not None:
+
+ for i, name in enumerate(script.controls):
+ args[script.args_from + i] = script_args_dict.get(name, None)
+
+ return args
+
+ def image_changed(self):
+ for script in self.scripts_in_preferred_order():
+ script.image_changed()
+
diff --git a/modules/sd_hijack.py b/modules/sd_hijack.py
new file mode 100644
index 0000000000000000000000000000000000000000..a7f2ae48b04c83900bf26adb5db133231250f35e
--- /dev/null
+++ b/modules/sd_hijack.py
@@ -0,0 +1,364 @@
+import torch
+from torch.nn.functional import silu
+from types import MethodType
+
+from modules import devices, sd_hijack_optimizations, shared, script_callbacks, errors, sd_unet
+from modules.hypernetworks import hypernetwork
+from modules.shared import cmd_opts
+from modules import sd_hijack_clip, sd_hijack_open_clip, sd_hijack_unet, sd_hijack_xlmr, xlmr
+
+import ldm.modules.attention
+import ldm.modules.diffusionmodules.model
+import ldm.modules.diffusionmodules.openaimodel
+import ldm.models.diffusion.ddim
+import ldm.models.diffusion.plms
+import ldm.modules.encoders.modules
+
+import sgm.modules.attention
+import sgm.modules.diffusionmodules.model
+import sgm.modules.diffusionmodules.openaimodel
+import sgm.modules.encoders.modules
+
+attention_CrossAttention_forward = ldm.modules.attention.CrossAttention.forward
+diffusionmodules_model_nonlinearity = ldm.modules.diffusionmodules.model.nonlinearity
+diffusionmodules_model_AttnBlock_forward = ldm.modules.diffusionmodules.model.AttnBlock.forward
+
+# new memory efficient cross attention blocks do not support hypernets and we already
+# have memory efficient cross attention anyway, so this disables SD2.0's memory efficient cross attention
+ldm.modules.attention.MemoryEfficientCrossAttention = ldm.modules.attention.CrossAttention
+ldm.modules.attention.BasicTransformerBlock.ATTENTION_MODES["softmax-xformers"] = ldm.modules.attention.CrossAttention
+
+# silence new console spam from SD2
+ldm.modules.attention.print = shared.ldm_print
+ldm.modules.diffusionmodules.model.print = shared.ldm_print
+ldm.util.print = shared.ldm_print
+ldm.models.diffusion.ddpm.print = shared.ldm_print
+
+optimizers = []
+current_optimizer: sd_hijack_optimizations.SdOptimization = None
+
+
+def list_optimizers():
+ new_optimizers = script_callbacks.list_optimizers_callback()
+
+ new_optimizers = [x for x in new_optimizers if x.is_available()]
+
+ new_optimizers = sorted(new_optimizers, key=lambda x: x.priority, reverse=True)
+
+ optimizers.clear()
+ optimizers.extend(new_optimizers)
+
+
+def apply_optimizations(option=None):
+ global current_optimizer
+
+ undo_optimizations()
+
+ if len(optimizers) == 0:
+ # a script can access the model very early, and optimizations would not be filled by then
+ current_optimizer = None
+ return ''
+
+ ldm.modules.diffusionmodules.model.nonlinearity = silu
+ ldm.modules.diffusionmodules.openaimodel.th = sd_hijack_unet.th
+
+ sgm.modules.diffusionmodules.model.nonlinearity = silu
+ sgm.modules.diffusionmodules.openaimodel.th = sd_hijack_unet.th
+
+ if current_optimizer is not None:
+ current_optimizer.undo()
+ current_optimizer = None
+
+ selection = option or shared.opts.cross_attention_optimization
+ if selection == "Automatic" and len(optimizers) > 0:
+ matching_optimizer = next(iter([x for x in optimizers if x.cmd_opt and getattr(shared.cmd_opts, x.cmd_opt, False)]), optimizers[0])
+ else:
+ matching_optimizer = next(iter([x for x in optimizers if x.title() == selection]), None)
+
+ if selection == "None":
+ matching_optimizer = None
+ elif selection == "Automatic" and shared.cmd_opts.disable_opt_split_attention:
+ matching_optimizer = None
+ elif matching_optimizer is None:
+ matching_optimizer = optimizers[0]
+
+ if matching_optimizer is not None:
+ print(f"Applying attention optimization: {matching_optimizer.name}... ", end='')
+ matching_optimizer.apply()
+ print("done.")
+ current_optimizer = matching_optimizer
+ return current_optimizer.name
+ else:
+ print("Disabling attention optimization")
+ return ''
+
+
+def undo_optimizations():
+ ldm.modules.diffusionmodules.model.nonlinearity = diffusionmodules_model_nonlinearity
+ ldm.modules.attention.CrossAttention.forward = hypernetwork.attention_CrossAttention_forward
+ ldm.modules.diffusionmodules.model.AttnBlock.forward = diffusionmodules_model_AttnBlock_forward
+
+ sgm.modules.diffusionmodules.model.nonlinearity = diffusionmodules_model_nonlinearity
+ sgm.modules.attention.CrossAttention.forward = hypernetwork.attention_CrossAttention_forward
+ sgm.modules.diffusionmodules.model.AttnBlock.forward = diffusionmodules_model_AttnBlock_forward
+
+
+def fix_checkpoint():
+ """checkpoints are now added and removed in embedding/hypernet code, since torch doesn't want
+ checkpoints to be added when not training (there's a warning)"""
+
+ pass
+
+
+def weighted_loss(sd_model, pred, target, mean=True):
+ #Calculate the weight normally, but ignore the mean
+ loss = sd_model._old_get_loss(pred, target, mean=False)
+
+ #Check if we have weights available
+ weight = getattr(sd_model, '_custom_loss_weight', None)
+ if weight is not None:
+ loss *= weight
+
+ #Return the loss, as mean if specified
+ return loss.mean() if mean else loss
+
+def weighted_forward(sd_model, x, c, w, *args, **kwargs):
+ try:
+ #Temporarily append weights to a place accessible during loss calc
+ sd_model._custom_loss_weight = w
+
+ #Replace 'get_loss' with a weight-aware one. Otherwise we need to reimplement 'forward' completely
+ #Keep 'get_loss', but don't overwrite the previous old_get_loss if it's already set
+ if not hasattr(sd_model, '_old_get_loss'):
+ sd_model._old_get_loss = sd_model.get_loss
+ sd_model.get_loss = MethodType(weighted_loss, sd_model)
+
+ #Run the standard forward function, but with the patched 'get_loss'
+ return sd_model.forward(x, c, *args, **kwargs)
+ finally:
+ try:
+ #Delete temporary weights if appended
+ del sd_model._custom_loss_weight
+ except AttributeError:
+ pass
+
+ #If we have an old loss function, reset the loss function to the original one
+ if hasattr(sd_model, '_old_get_loss'):
+ sd_model.get_loss = sd_model._old_get_loss
+ del sd_model._old_get_loss
+
+def apply_weighted_forward(sd_model):
+ #Add new function 'weighted_forward' that can be called to calc weighted loss
+ sd_model.weighted_forward = MethodType(weighted_forward, sd_model)
+
+def undo_weighted_forward(sd_model):
+ try:
+ del sd_model.weighted_forward
+ except AttributeError:
+ pass
+
+
+class StableDiffusionModelHijack:
+ fixes = None
+ layers = None
+ circular_enabled = False
+ clip = None
+ optimization_method = None
+
+ def __init__(self):
+ import modules.textual_inversion.textual_inversion
+
+ self.extra_generation_params = {}
+ self.comments = []
+
+ self.embedding_db = modules.textual_inversion.textual_inversion.EmbeddingDatabase()
+ self.embedding_db.add_embedding_dir(cmd_opts.embeddings_dir)
+
+ def apply_optimizations(self, option=None):
+ try:
+ self.optimization_method = apply_optimizations(option)
+ except Exception as e:
+ errors.display(e, "applying cross attention optimization")
+ undo_optimizations()
+
+ def hijack(self, m):
+ conditioner = getattr(m, 'conditioner', None)
+ if conditioner:
+ text_cond_models = []
+
+ for i in range(len(conditioner.embedders)):
+ embedder = conditioner.embedders[i]
+ typename = type(embedder).__name__
+ if typename == 'FrozenOpenCLIPEmbedder':
+ embedder.model.token_embedding = EmbeddingsWithFixes(embedder.model.token_embedding, self)
+ conditioner.embedders[i] = sd_hijack_open_clip.FrozenOpenCLIPEmbedderWithCustomWords(embedder, self)
+ text_cond_models.append(conditioner.embedders[i])
+ if typename == 'FrozenCLIPEmbedder':
+ model_embeddings = embedder.transformer.text_model.embeddings
+ model_embeddings.token_embedding = EmbeddingsWithFixes(model_embeddings.token_embedding, self)
+ conditioner.embedders[i] = sd_hijack_clip.FrozenCLIPEmbedderForSDXLWithCustomWords(embedder, self)
+ text_cond_models.append(conditioner.embedders[i])
+ if typename == 'FrozenOpenCLIPEmbedder2':
+ embedder.model.token_embedding = EmbeddingsWithFixes(embedder.model.token_embedding, self, textual_inversion_key='clip_g')
+ conditioner.embedders[i] = sd_hijack_open_clip.FrozenOpenCLIPEmbedder2WithCustomWords(embedder, self)
+ text_cond_models.append(conditioner.embedders[i])
+
+ if len(text_cond_models) == 1:
+ m.cond_stage_model = text_cond_models[0]
+ else:
+ m.cond_stage_model = conditioner
+
+ if type(m.cond_stage_model) == xlmr.BertSeriesModelWithTransformation:
+ model_embeddings = m.cond_stage_model.roberta.embeddings
+ model_embeddings.token_embedding = EmbeddingsWithFixes(model_embeddings.word_embeddings, self)
+ m.cond_stage_model = sd_hijack_xlmr.FrozenXLMREmbedderWithCustomWords(m.cond_stage_model, self)
+
+ elif type(m.cond_stage_model) == ldm.modules.encoders.modules.FrozenCLIPEmbedder:
+ model_embeddings = m.cond_stage_model.transformer.text_model.embeddings
+ model_embeddings.token_embedding = EmbeddingsWithFixes(model_embeddings.token_embedding, self)
+ m.cond_stage_model = sd_hijack_clip.FrozenCLIPEmbedderWithCustomWords(m.cond_stage_model, self)
+
+ elif type(m.cond_stage_model) == ldm.modules.encoders.modules.FrozenOpenCLIPEmbedder:
+ m.cond_stage_model.model.token_embedding = EmbeddingsWithFixes(m.cond_stage_model.model.token_embedding, self)
+ m.cond_stage_model = sd_hijack_open_clip.FrozenOpenCLIPEmbedderWithCustomWords(m.cond_stage_model, self)
+
+ apply_weighted_forward(m)
+ if m.cond_stage_key == "edit":
+ sd_hijack_unet.hijack_ddpm_edit()
+
+ self.apply_optimizations()
+
+ self.clip = m.cond_stage_model
+
+ def flatten(el):
+ flattened = [flatten(children) for children in el.children()]
+ res = [el]
+ for c in flattened:
+ res += c
+ return res
+
+ self.layers = flatten(m)
+
+ if not hasattr(ldm.modules.diffusionmodules.openaimodel, 'copy_of_UNetModel_forward_for_webui'):
+ ldm.modules.diffusionmodules.openaimodel.copy_of_UNetModel_forward_for_webui = ldm.modules.diffusionmodules.openaimodel.UNetModel.forward
+
+ ldm.modules.diffusionmodules.openaimodel.UNetModel.forward = sd_unet.UNetModel_forward
+
+ def undo_hijack(self, m):
+ conditioner = getattr(m, 'conditioner', None)
+ if conditioner:
+ for i in range(len(conditioner.embedders)):
+ embedder = conditioner.embedders[i]
+ if isinstance(embedder, (sd_hijack_open_clip.FrozenOpenCLIPEmbedderWithCustomWords, sd_hijack_open_clip.FrozenOpenCLIPEmbedder2WithCustomWords)):
+ embedder.wrapped.model.token_embedding = embedder.wrapped.model.token_embedding.wrapped
+ conditioner.embedders[i] = embedder.wrapped
+ if isinstance(embedder, sd_hijack_clip.FrozenCLIPEmbedderForSDXLWithCustomWords):
+ embedder.wrapped.transformer.text_model.embeddings.token_embedding = embedder.wrapped.transformer.text_model.embeddings.token_embedding.wrapped
+ conditioner.embedders[i] = embedder.wrapped
+
+ if hasattr(m, 'cond_stage_model'):
+ delattr(m, 'cond_stage_model')
+
+ elif type(m.cond_stage_model) == sd_hijack_xlmr.FrozenXLMREmbedderWithCustomWords:
+ m.cond_stage_model = m.cond_stage_model.wrapped
+
+ elif type(m.cond_stage_model) == sd_hijack_clip.FrozenCLIPEmbedderWithCustomWords:
+ m.cond_stage_model = m.cond_stage_model.wrapped
+
+ model_embeddings = m.cond_stage_model.transformer.text_model.embeddings
+ if type(model_embeddings.token_embedding) == EmbeddingsWithFixes:
+ model_embeddings.token_embedding = model_embeddings.token_embedding.wrapped
+ elif type(m.cond_stage_model) == sd_hijack_open_clip.FrozenOpenCLIPEmbedderWithCustomWords:
+ m.cond_stage_model.wrapped.model.token_embedding = m.cond_stage_model.wrapped.model.token_embedding.wrapped
+ m.cond_stage_model = m.cond_stage_model.wrapped
+
+ undo_optimizations()
+ undo_weighted_forward(m)
+
+ self.apply_circular(False)
+ self.layers = None
+ self.clip = None
+
+ ldm.modules.diffusionmodules.openaimodel.UNetModel.forward = ldm.modules.diffusionmodules.openaimodel.copy_of_UNetModel_forward_for_webui
+
+ def apply_circular(self, enable):
+ if self.circular_enabled == enable:
+ return
+
+ self.circular_enabled = enable
+
+ for layer in [layer for layer in self.layers if type(layer) == torch.nn.Conv2d]:
+ layer.padding_mode = 'circular' if enable else 'zeros'
+
+ def clear_comments(self):
+ self.comments = []
+ self.extra_generation_params = {}
+
+ def get_prompt_lengths(self, text):
+ if self.clip is None:
+ return "-", "-"
+
+ _, token_count = self.clip.process_texts([text])
+
+ return token_count, self.clip.get_target_prompt_token_count(token_count)
+
+ def redo_hijack(self, m):
+ self.undo_hijack(m)
+ self.hijack(m)
+
+
+class EmbeddingsWithFixes(torch.nn.Module):
+ def __init__(self, wrapped, embeddings, textual_inversion_key='clip_l'):
+ super().__init__()
+ self.wrapped = wrapped
+ self.embeddings = embeddings
+ self.textual_inversion_key = textual_inversion_key
+
+ def forward(self, input_ids):
+ batch_fixes = self.embeddings.fixes
+ self.embeddings.fixes = None
+
+ inputs_embeds = self.wrapped(input_ids)
+
+ if batch_fixes is None or len(batch_fixes) == 0 or max([len(x) for x in batch_fixes]) == 0:
+ return inputs_embeds
+
+ vecs = []
+ for fixes, tensor in zip(batch_fixes, inputs_embeds):
+ for offset, embedding in fixes:
+ vec = embedding.vec[self.textual_inversion_key] if isinstance(embedding.vec, dict) else embedding.vec
+ emb = devices.cond_cast_unet(vec)
+ emb_len = min(tensor.shape[0] - offset - 1, emb.shape[0])
+ tensor = torch.cat([tensor[0:offset + 1], emb[0:emb_len], tensor[offset + 1 + emb_len:]])
+
+ vecs.append(tensor)
+
+ return torch.stack(vecs)
+
+
+def add_circular_option_to_conv_2d():
+ conv2d_constructor = torch.nn.Conv2d.__init__
+
+ def conv2d_constructor_circular(self, *args, **kwargs):
+ return conv2d_constructor(self, *args, padding_mode='circular', **kwargs)
+
+ torch.nn.Conv2d.__init__ = conv2d_constructor_circular
+
+
+model_hijack = StableDiffusionModelHijack()
+
+
+def register_buffer(self, name, attr):
+ """
+ Fix register buffer bug for Mac OS.
+ """
+
+ if type(attr) == torch.Tensor:
+ if attr.device != devices.device:
+ attr = attr.to(device=devices.device, dtype=(torch.float32 if devices.device.type == 'mps' else None))
+
+ setattr(self, name, attr)
+
+
+ldm.models.diffusion.ddim.DDIMSampler.register_buffer = register_buffer
+ldm.models.diffusion.plms.PLMSSampler.register_buffer = register_buffer
diff --git a/modules/sd_hijack_clip.py b/modules/sd_hijack_clip.py
new file mode 100644
index 0000000000000000000000000000000000000000..c8b369975c2c12d8438118c21874a970b99b9fb3
--- /dev/null
+++ b/modules/sd_hijack_clip.py
@@ -0,0 +1,356 @@
+import math
+from collections import namedtuple
+
+import torch
+
+from modules import prompt_parser, devices, sd_hijack
+from modules.shared import opts
+
+
+class PromptChunk:
+ """
+ This object contains token ids, weight (multipliers:1.4) and textual inversion embedding info for a chunk of prompt.
+ If a prompt is short, it is represented by one PromptChunk, otherwise, multiple are necessary.
+ Each PromptChunk contains an exact amount of tokens - 77, which includes one for start and end token,
+ so just 75 tokens from prompt.
+ """
+
+ def __init__(self):
+ self.tokens = []
+ self.multipliers = []
+ self.fixes = []
+
+
+PromptChunkFix = namedtuple('PromptChunkFix', ['offset', 'embedding'])
+"""An object of this type is a marker showing that textual inversion embedding's vectors have to placed at offset in the prompt
+chunk. Thos objects are found in PromptChunk.fixes and, are placed into FrozenCLIPEmbedderWithCustomWordsBase.hijack.fixes, and finally
+are applied by sd_hijack.EmbeddingsWithFixes's forward function."""
+
+
+class FrozenCLIPEmbedderWithCustomWordsBase(torch.nn.Module):
+ """A pytorch module that is a wrapper for FrozenCLIPEmbedder module. it enhances FrozenCLIPEmbedder, making it possible to
+ have unlimited prompt length and assign weights to tokens in prompt.
+ """
+
+ def __init__(self, wrapped, hijack):
+ super().__init__()
+
+ self.wrapped = wrapped
+ """Original FrozenCLIPEmbedder module; can also be FrozenOpenCLIPEmbedder or xlmr.BertSeriesModelWithTransformation,
+ depending on model."""
+
+ self.hijack: sd_hijack.StableDiffusionModelHijack = hijack
+ self.chunk_length = 75
+
+ self.is_trainable = getattr(wrapped, 'is_trainable', False)
+ self.input_key = getattr(wrapped, 'input_key', 'txt')
+ self.legacy_ucg_val = None
+
+ def empty_chunk(self):
+ """creates an empty PromptChunk and returns it"""
+
+ chunk = PromptChunk()
+ chunk.tokens = [self.id_start] + [self.id_end] * (self.chunk_length + 1)
+ chunk.multipliers = [1.0] * (self.chunk_length + 2)
+ return chunk
+
+ def get_target_prompt_token_count(self, token_count):
+ """returns the maximum number of tokens a prompt of a known length can have before it requires one more PromptChunk to be represented"""
+
+ return math.ceil(max(token_count, 1) / self.chunk_length) * self.chunk_length
+
+ def tokenize(self, texts):
+ """Converts a batch of texts into a batch of token ids"""
+
+ raise NotImplementedError
+
+ def encode_with_transformers(self, tokens):
+ """
+ converts a batch of token ids (in python lists) into a single tensor with numeric respresentation of those tokens;
+ All python lists with tokens are assumed to have same length, usually 77.
+ if input is a list with B elements and each element has T tokens, expected output shape is (B, T, C), where C depends on
+ model - can be 768 and 1024.
+ Among other things, this call will read self.hijack.fixes, apply it to its inputs, and clear it (setting it to None).
+ """
+
+ raise NotImplementedError
+
+ def encode_embedding_init_text(self, init_text, nvpt):
+ """Converts text into a tensor with this text's tokens' embeddings. Note that those are embeddings before they are passed through
+ transformers. nvpt is used as a maximum length in tokens. If text produces less teokens than nvpt, only this many is returned."""
+
+ raise NotImplementedError
+
+ def tokenize_line(self, line):
+ """
+ this transforms a single prompt into a list of PromptChunk objects - as many as needed to
+ represent the prompt.
+ Returns the list and the total number of tokens in the prompt.
+ """
+
+ if opts.enable_emphasis:
+ parsed = prompt_parser.parse_prompt_attention(line)
+ else:
+ parsed = [[line, 1.0]]
+
+ tokenized = self.tokenize([text for text, _ in parsed])
+
+ chunks = []
+ chunk = PromptChunk()
+ token_count = 0
+ last_comma = -1
+
+ def next_chunk(is_last=False):
+ """puts current chunk into the list of results and produces the next one - empty;
+ if is_last is true, tokens tokens at the end won't add to token_count"""
+ nonlocal token_count
+ nonlocal last_comma
+ nonlocal chunk
+
+ if is_last:
+ token_count += len(chunk.tokens)
+ else:
+ token_count += self.chunk_length
+
+ to_add = self.chunk_length - len(chunk.tokens)
+ if to_add > 0:
+ chunk.tokens += [self.id_end] * to_add
+ chunk.multipliers += [1.0] * to_add
+
+ chunk.tokens = [self.id_start] + chunk.tokens + [self.id_end]
+ chunk.multipliers = [1.0] + chunk.multipliers + [1.0]
+
+ last_comma = -1
+ chunks.append(chunk)
+ chunk = PromptChunk()
+
+ for tokens, (text, weight) in zip(tokenized, parsed):
+ if text == 'BREAK' and weight == -1:
+ next_chunk()
+ continue
+
+ position = 0
+ while position < len(tokens):
+ token = tokens[position]
+
+ if token == self.comma_token:
+ last_comma = len(chunk.tokens)
+
+ # this is when we are at the end of alloted 75 tokens for the current chunk, and the current token is not a comma. opts.comma_padding_backtrack
+ # is a setting that specifies that if there is a comma nearby, the text after the comma should be moved out of this chunk and into the next.
+ elif opts.comma_padding_backtrack != 0 and len(chunk.tokens) == self.chunk_length and last_comma != -1 and len(chunk.tokens) - last_comma <= opts.comma_padding_backtrack:
+ break_location = last_comma + 1
+
+ reloc_tokens = chunk.tokens[break_location:]
+ reloc_mults = chunk.multipliers[break_location:]
+
+ chunk.tokens = chunk.tokens[:break_location]
+ chunk.multipliers = chunk.multipliers[:break_location]
+
+ next_chunk()
+ chunk.tokens = reloc_tokens
+ chunk.multipliers = reloc_mults
+
+ if len(chunk.tokens) == self.chunk_length:
+ next_chunk()
+
+ embedding, embedding_length_in_tokens = self.hijack.embedding_db.find_embedding_at_position(tokens, position)
+ if embedding is None:
+ chunk.tokens.append(token)
+ chunk.multipliers.append(weight)
+ position += 1
+ continue
+
+ emb_len = int(embedding.vectors)
+ if len(chunk.tokens) + emb_len > self.chunk_length:
+ next_chunk()
+
+ chunk.fixes.append(PromptChunkFix(len(chunk.tokens), embedding))
+
+ chunk.tokens += [0] * emb_len
+ chunk.multipliers += [weight] * emb_len
+ position += embedding_length_in_tokens
+
+ if chunk.tokens or not chunks:
+ next_chunk(is_last=True)
+
+ return chunks, token_count
+
+ def process_texts(self, texts):
+ """
+ Accepts a list of texts and calls tokenize_line() on each, with cache. Returns the list of results and maximum
+ length, in tokens, of all texts.
+ """
+
+ token_count = 0
+
+ cache = {}
+ batch_chunks = []
+ for line in texts:
+ if line in cache:
+ chunks = cache[line]
+ else:
+ chunks, current_token_count = self.tokenize_line(line)
+ token_count = max(current_token_count, token_count)
+
+ cache[line] = chunks
+
+ batch_chunks.append(chunks)
+
+ return batch_chunks, token_count
+
+ def forward(self, texts):
+ """
+ Accepts an array of texts; Passes texts through transformers network to create a tensor with numerical representation of those texts.
+ Returns a tensor with shape of (B, T, C), where B is length of the array; T is length, in tokens, of texts (including padding) - T will
+ be a multiple of 77; and C is dimensionality of each token - for SD1 it's 768, for SD2 it's 1024, and for SDXL it's 1280.
+ An example shape returned by this function can be: (2, 77, 768).
+ For SDXL, instead of returning one tensor avobe, it returns a tuple with two: the other one with shape (B, 1280) with pooled values.
+ Webui usually sends just one text at a time through this function - the only time when texts is an array with more than one elemenet
+ is when you do prompt editing: "a picture of a [cat:dog:0.4] eating ice cream"
+ """
+
+ if opts.use_old_emphasis_implementation:
+ import modules.sd_hijack_clip_old
+ return modules.sd_hijack_clip_old.forward_old(self, texts)
+
+ batch_chunks, token_count = self.process_texts(texts)
+
+ used_embeddings = {}
+ chunk_count = max([len(x) for x in batch_chunks])
+
+ zs = []
+ for i in range(chunk_count):
+ batch_chunk = [chunks[i] if i < len(chunks) else self.empty_chunk() for chunks in batch_chunks]
+
+ tokens = [x.tokens for x in batch_chunk]
+ multipliers = [x.multipliers for x in batch_chunk]
+ self.hijack.fixes = [x.fixes for x in batch_chunk]
+
+ for fixes in self.hijack.fixes:
+ for _position, embedding in fixes:
+ used_embeddings[embedding.name] = embedding
+
+ z = self.process_tokens(tokens, multipliers)
+ zs.append(z)
+
+ if opts.textual_inversion_add_hashes_to_infotext and used_embeddings:
+ hashes = []
+ for name, embedding in used_embeddings.items():
+ shorthash = embedding.shorthash
+ if not shorthash:
+ continue
+
+ name = name.replace(":", "").replace(",", "")
+ hashes.append(f"{name}: {shorthash}")
+
+ if hashes:
+ if self.hijack.extra_generation_params.get("TI hashes"):
+ hashes.append(self.hijack.extra_generation_params.get("TI hashes"))
+ self.hijack.extra_generation_params["TI hashes"] = ", ".join(hashes)
+
+ if getattr(self.wrapped, 'return_pooled', False):
+ return torch.hstack(zs), zs[0].pooled
+ else:
+ return torch.hstack(zs)
+
+ def process_tokens(self, remade_batch_tokens, batch_multipliers):
+ """
+ sends one single prompt chunk to be encoded by transformers neural network.
+ remade_batch_tokens is a batch of tokens - a list, where every element is a list of tokens; usually
+ there are exactly 77 tokens in the list. batch_multipliers is the same but for multipliers instead of tokens.
+ Multipliers are used to give more or less weight to the outputs of transformers network. Each multiplier
+ corresponds to one token.
+ """
+ tokens = torch.asarray(remade_batch_tokens).to(devices.device)
+
+ # this is for SD2: SD1 uses the same token for padding and end of text, while SD2 uses different ones.
+ if self.id_end != self.id_pad:
+ for batch_pos in range(len(remade_batch_tokens)):
+ index = remade_batch_tokens[batch_pos].index(self.id_end)
+ tokens[batch_pos, index+1:tokens.shape[1]] = self.id_pad
+
+ z = self.encode_with_transformers(tokens)
+
+ pooled = getattr(z, 'pooled', None)
+
+ # restoring original mean is likely not correct, but it seems to work well to prevent artifacts that happen otherwise
+ batch_multipliers = torch.asarray(batch_multipliers).to(devices.device)
+ original_mean = z.mean()
+ z = z * batch_multipliers.reshape(batch_multipliers.shape + (1,)).expand(z.shape)
+ new_mean = z.mean()
+ z = z * (original_mean / new_mean)
+
+ if pooled is not None:
+ z.pooled = pooled
+
+ return z
+
+
+class FrozenCLIPEmbedderWithCustomWords(FrozenCLIPEmbedderWithCustomWordsBase):
+ def __init__(self, wrapped, hijack):
+ super().__init__(wrapped, hijack)
+ self.tokenizer = wrapped.tokenizer
+
+ vocab = self.tokenizer.get_vocab()
+
+ self.comma_token = vocab.get(',', None)
+
+ self.token_mults = {}
+ tokens_with_parens = [(k, v) for k, v in vocab.items() if '(' in k or ')' in k or '[' in k or ']' in k]
+ for text, ident in tokens_with_parens:
+ mult = 1.0
+ for c in text:
+ if c == '[':
+ mult /= 1.1
+ if c == ']':
+ mult *= 1.1
+ if c == '(':
+ mult *= 1.1
+ if c == ')':
+ mult /= 1.1
+
+ if mult != 1.0:
+ self.token_mults[ident] = mult
+
+ self.id_start = self.wrapped.tokenizer.bos_token_id
+ self.id_end = self.wrapped.tokenizer.eos_token_id
+ self.id_pad = self.id_end
+
+ def tokenize(self, texts):
+ tokenized = self.wrapped.tokenizer(texts, truncation=False, add_special_tokens=False)["input_ids"]
+
+ return tokenized
+
+ def encode_with_transformers(self, tokens):
+ outputs = self.wrapped.transformer(input_ids=tokens, output_hidden_states=-opts.CLIP_stop_at_last_layers)
+
+ if opts.CLIP_stop_at_last_layers > 1:
+ z = outputs.hidden_states[-opts.CLIP_stop_at_last_layers]
+ z = self.wrapped.transformer.text_model.final_layer_norm(z)
+ else:
+ z = outputs.last_hidden_state
+
+ return z
+
+ def encode_embedding_init_text(self, init_text, nvpt):
+ embedding_layer = self.wrapped.transformer.text_model.embeddings
+ ids = self.wrapped.tokenizer(init_text, max_length=nvpt, return_tensors="pt", add_special_tokens=False)["input_ids"]
+ embedded = embedding_layer.token_embedding.wrapped(ids.to(embedding_layer.token_embedding.wrapped.weight.device)).squeeze(0)
+
+ return embedded
+
+
+class FrozenCLIPEmbedderForSDXLWithCustomWords(FrozenCLIPEmbedderWithCustomWords):
+ def __init__(self, wrapped, hijack):
+ super().__init__(wrapped, hijack)
+
+ def encode_with_transformers(self, tokens):
+ outputs = self.wrapped.transformer(input_ids=tokens, output_hidden_states=self.wrapped.layer == "hidden")
+
+ if self.wrapped.layer == "last":
+ z = outputs.last_hidden_state
+ else:
+ z = outputs.hidden_states[self.wrapped.layer_idx]
+
+ return z
diff --git a/modules/sd_hijack_clip_old.py b/modules/sd_hijack_clip_old.py
new file mode 100644
index 0000000000000000000000000000000000000000..1bd207671c5517de3d7f809d71c16904a2b0ca34
--- /dev/null
+++ b/modules/sd_hijack_clip_old.py
@@ -0,0 +1,82 @@
+from modules import sd_hijack_clip
+from modules import shared
+
+
+def process_text_old(self: sd_hijack_clip.FrozenCLIPEmbedderWithCustomWordsBase, texts):
+ id_start = self.id_start
+ id_end = self.id_end
+ maxlen = self.wrapped.max_length # you get to stay at 77
+ used_custom_terms = []
+ remade_batch_tokens = []
+ hijack_comments = []
+ hijack_fixes = []
+ token_count = 0
+
+ cache = {}
+ batch_tokens = self.tokenize(texts)
+ batch_multipliers = []
+ for tokens in batch_tokens:
+ tuple_tokens = tuple(tokens)
+
+ if tuple_tokens in cache:
+ remade_tokens, fixes, multipliers = cache[tuple_tokens]
+ else:
+ fixes = []
+ remade_tokens = []
+ multipliers = []
+ mult = 1.0
+
+ i = 0
+ while i < len(tokens):
+ token = tokens[i]
+
+ embedding, embedding_length_in_tokens = self.hijack.embedding_db.find_embedding_at_position(tokens, i)
+
+ mult_change = self.token_mults.get(token) if shared.opts.enable_emphasis else None
+ if mult_change is not None:
+ mult *= mult_change
+ i += 1
+ elif embedding is None:
+ remade_tokens.append(token)
+ multipliers.append(mult)
+ i += 1
+ else:
+ emb_len = int(embedding.vec.shape[0])
+ fixes.append((len(remade_tokens), embedding))
+ remade_tokens += [0] * emb_len
+ multipliers += [mult] * emb_len
+ used_custom_terms.append((embedding.name, embedding.checksum()))
+ i += embedding_length_in_tokens
+
+ if len(remade_tokens) > maxlen - 2:
+ vocab = {v: k for k, v in self.wrapped.tokenizer.get_vocab().items()}
+ ovf = remade_tokens[maxlen - 2:]
+ overflowing_words = [vocab.get(int(x), "") for x in ovf]
+ overflowing_text = self.wrapped.tokenizer.convert_tokens_to_string(''.join(overflowing_words))
+ hijack_comments.append(f"Warning: too many input tokens; some ({len(overflowing_words)}) have been truncated:\n{overflowing_text}\n")
+
+ token_count = len(remade_tokens)
+ remade_tokens = remade_tokens + [id_end] * (maxlen - 2 - len(remade_tokens))
+ remade_tokens = [id_start] + remade_tokens[0:maxlen - 2] + [id_end]
+ cache[tuple_tokens] = (remade_tokens, fixes, multipliers)
+
+ multipliers = multipliers + [1.0] * (maxlen - 2 - len(multipliers))
+ multipliers = [1.0] + multipliers[0:maxlen - 2] + [1.0]
+
+ remade_batch_tokens.append(remade_tokens)
+ hijack_fixes.append(fixes)
+ batch_multipliers.append(multipliers)
+ return batch_multipliers, remade_batch_tokens, used_custom_terms, hijack_comments, hijack_fixes, token_count
+
+
+def forward_old(self: sd_hijack_clip.FrozenCLIPEmbedderWithCustomWordsBase, texts):
+ batch_multipliers, remade_batch_tokens, used_custom_terms, hijack_comments, hijack_fixes, token_count = process_text_old(self, texts)
+
+ self.hijack.comments += hijack_comments
+
+ if used_custom_terms:
+ embedding_names = ", ".join(f"{word} [{checksum}]" for word, checksum in used_custom_terms)
+ self.hijack.comments.append(f"Used embeddings: {embedding_names}")
+
+ self.hijack.fixes = hijack_fixes
+ return self.process_tokens(remade_batch_tokens, batch_multipliers)
diff --git a/modules/sd_hijack_ip2p.py b/modules/sd_hijack_ip2p.py
new file mode 100644
index 0000000000000000000000000000000000000000..6fe6b6ffec64d78a615e001398f3272decf348a5
--- /dev/null
+++ b/modules/sd_hijack_ip2p.py
@@ -0,0 +1,10 @@
+import os.path
+
+
+def should_hijack_ip2p(checkpoint_info):
+ from modules import sd_models_config
+
+ ckpt_basename = os.path.basename(checkpoint_info.filename).lower()
+ cfg_basename = os.path.basename(sd_models_config.find_checkpoint_config_near_filename(checkpoint_info)).lower()
+
+ return "pix2pix" in ckpt_basename and "pix2pix" not in cfg_basename
diff --git a/modules/sd_hijack_open_clip.py b/modules/sd_hijack_open_clip.py
new file mode 100644
index 0000000000000000000000000000000000000000..703490ee4c21e56a2a208e6b67d0f89d0a897856
--- /dev/null
+++ b/modules/sd_hijack_open_clip.py
@@ -0,0 +1,71 @@
+import open_clip.tokenizer
+import torch
+
+from modules import sd_hijack_clip, devices
+from modules.shared import opts
+
+tokenizer = open_clip.tokenizer._tokenizer
+
+
+class FrozenOpenCLIPEmbedderWithCustomWords(sd_hijack_clip.FrozenCLIPEmbedderWithCustomWordsBase):
+ def __init__(self, wrapped, hijack):
+ super().__init__(wrapped, hijack)
+
+ self.comma_token = [v for k, v in tokenizer.encoder.items() if k == ','][0]
+ self.id_start = tokenizer.encoder[""]
+ self.id_end = tokenizer.encoder[""]
+ self.id_pad = 0
+
+ def tokenize(self, texts):
+ assert not opts.use_old_emphasis_implementation, 'Old emphasis implementation not supported for Open Clip'
+
+ tokenized = [tokenizer.encode(text) for text in texts]
+
+ return tokenized
+
+ def encode_with_transformers(self, tokens):
+ # set self.wrapped.layer_idx here according to opts.CLIP_stop_at_last_layers
+ z = self.wrapped.encode_with_transformer(tokens)
+
+ return z
+
+ def encode_embedding_init_text(self, init_text, nvpt):
+ ids = tokenizer.encode(init_text)
+ ids = torch.asarray([ids], device=devices.device, dtype=torch.int)
+ embedded = self.wrapped.model.token_embedding.wrapped(ids).squeeze(0)
+
+ return embedded
+
+
+class FrozenOpenCLIPEmbedder2WithCustomWords(sd_hijack_clip.FrozenCLIPEmbedderWithCustomWordsBase):
+ def __init__(self, wrapped, hijack):
+ super().__init__(wrapped, hijack)
+
+ self.comma_token = [v for k, v in tokenizer.encoder.items() if k == ','][0]
+ self.id_start = tokenizer.encoder[""]
+ self.id_end = tokenizer.encoder[""]
+ self.id_pad = 0
+
+ def tokenize(self, texts):
+ assert not opts.use_old_emphasis_implementation, 'Old emphasis implementation not supported for Open Clip'
+
+ tokenized = [tokenizer.encode(text) for text in texts]
+
+ return tokenized
+
+ def encode_with_transformers(self, tokens):
+ d = self.wrapped.encode_with_transformer(tokens)
+ z = d[self.wrapped.layer]
+
+ pooled = d.get("pooled")
+ if pooled is not None:
+ z.pooled = pooled
+
+ return z
+
+ def encode_embedding_init_text(self, init_text, nvpt):
+ ids = tokenizer.encode(init_text)
+ ids = torch.asarray([ids], device=devices.device, dtype=torch.int)
+ embedded = self.wrapped.model.token_embedding.wrapped(ids.to(self.wrapped.model.token_embedding.wrapped.weight.device)).squeeze(0)
+
+ return embedded
diff --git a/modules/sd_hijack_unet.py b/modules/sd_hijack_unet.py
new file mode 100644
index 0000000000000000000000000000000000000000..119a89dbdfa04de0ff6d2011e6280d0798f309b8
--- /dev/null
+++ b/modules/sd_hijack_unet.py
@@ -0,0 +1,85 @@
+import torch
+from packaging import version
+
+from modules import devices
+from modules.sd_hijack_utils import CondFunc
+
+
+class TorchHijackForUnet:
+ """
+ This is torch, but with cat that resizes tensors to appropriate dimensions if they do not match;
+ this makes it possible to create pictures with dimensions that are multiples of 8 rather than 64
+ """
+
+ def __getattr__(self, item):
+ if item == 'cat':
+ return self.cat
+
+ if hasattr(torch, item):
+ return getattr(torch, item)
+
+ raise AttributeError(f"'{type(self).__name__}' object has no attribute '{item}'")
+
+ def cat(self, tensors, *args, **kwargs):
+ if len(tensors) == 2:
+ a, b = tensors
+ if a.shape[-2:] != b.shape[-2:]:
+ a = torch.nn.functional.interpolate(a, b.shape[-2:], mode="nearest")
+
+ tensors = (a, b)
+
+ return torch.cat(tensors, *args, **kwargs)
+
+
+th = TorchHijackForUnet()
+
+
+# Below are monkey patches to enable upcasting a float16 UNet for float32 sampling
+def apply_model(orig_func, self, x_noisy, t, cond, **kwargs):
+
+ if isinstance(cond, dict):
+ for y in cond.keys():
+ if isinstance(cond[y], list):
+ cond[y] = [x.to(devices.dtype_unet) if isinstance(x, torch.Tensor) else x for x in cond[y]]
+ else:
+ cond[y] = cond[y].to(devices.dtype_unet) if isinstance(cond[y], torch.Tensor) else cond[y]
+
+ with devices.autocast():
+ return orig_func(self, x_noisy.to(devices.dtype_unet), t.to(devices.dtype_unet), cond, **kwargs).float()
+
+
+class GELUHijack(torch.nn.GELU, torch.nn.Module):
+ def __init__(self, *args, **kwargs):
+ torch.nn.GELU.__init__(self, *args, **kwargs)
+ def forward(self, x):
+ if devices.unet_needs_upcast:
+ return torch.nn.GELU.forward(self.float(), x.float()).to(devices.dtype_unet)
+ else:
+ return torch.nn.GELU.forward(self, x)
+
+
+ddpm_edit_hijack = None
+def hijack_ddpm_edit():
+ global ddpm_edit_hijack
+ if not ddpm_edit_hijack:
+ CondFunc('modules.models.diffusion.ddpm_edit.LatentDiffusion.decode_first_stage', first_stage_sub, first_stage_cond)
+ CondFunc('modules.models.diffusion.ddpm_edit.LatentDiffusion.encode_first_stage', first_stage_sub, first_stage_cond)
+ ddpm_edit_hijack = CondFunc('modules.models.diffusion.ddpm_edit.LatentDiffusion.apply_model', apply_model, unet_needs_upcast)
+
+
+unet_needs_upcast = lambda *args, **kwargs: devices.unet_needs_upcast
+CondFunc('ldm.models.diffusion.ddpm.LatentDiffusion.apply_model', apply_model, unet_needs_upcast)
+CondFunc('ldm.modules.diffusionmodules.openaimodel.timestep_embedding', lambda orig_func, timesteps, *args, **kwargs: orig_func(timesteps, *args, **kwargs).to(torch.float32 if timesteps.dtype == torch.int64 else devices.dtype_unet), unet_needs_upcast)
+if version.parse(torch.__version__) <= version.parse("1.13.2") or torch.cuda.is_available():
+ CondFunc('ldm.modules.diffusionmodules.util.GroupNorm32.forward', lambda orig_func, self, *args, **kwargs: orig_func(self.float(), *args, **kwargs), unet_needs_upcast)
+ CondFunc('ldm.modules.attention.GEGLU.forward', lambda orig_func, self, x: orig_func(self.float(), x.float()).to(devices.dtype_unet), unet_needs_upcast)
+ CondFunc('open_clip.transformer.ResidualAttentionBlock.__init__', lambda orig_func, *args, **kwargs: kwargs.update({'act_layer': GELUHijack}) and False or orig_func(*args, **kwargs), lambda _, *args, **kwargs: kwargs.get('act_layer') is None or kwargs['act_layer'] == torch.nn.GELU)
+
+first_stage_cond = lambda _, self, *args, **kwargs: devices.unet_needs_upcast and self.model.diffusion_model.dtype == torch.float16
+first_stage_sub = lambda orig_func, self, x, **kwargs: orig_func(self, x.to(devices.dtype_vae), **kwargs)
+CondFunc('ldm.models.diffusion.ddpm.LatentDiffusion.decode_first_stage', first_stage_sub, first_stage_cond)
+CondFunc('ldm.models.diffusion.ddpm.LatentDiffusion.encode_first_stage', first_stage_sub, first_stage_cond)
+CondFunc('ldm.models.diffusion.ddpm.LatentDiffusion.get_first_stage_encoding', lambda orig_func, *args, **kwargs: orig_func(*args, **kwargs).float(), first_stage_cond)
+
+CondFunc('sgm.modules.diffusionmodules.wrappers.OpenAIWrapper.forward', apply_model, unet_needs_upcast)
+CondFunc('sgm.modules.diffusionmodules.openaimodel.timestep_embedding', lambda orig_func, timesteps, *args, **kwargs: orig_func(timesteps, *args, **kwargs).to(torch.float32 if timesteps.dtype == torch.int64 else devices.dtype_unet), unet_needs_upcast)
diff --git a/modules/sd_hijack_utils.py b/modules/sd_hijack_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..179ebc78e6a3d16e7a4318b8644fee690b447d12
--- /dev/null
+++ b/modules/sd_hijack_utils.py
@@ -0,0 +1,28 @@
+import importlib
+
+class CondFunc:
+ def __new__(cls, orig_func, sub_func, cond_func):
+ self = super(CondFunc, cls).__new__(cls)
+ if isinstance(orig_func, str):
+ func_path = orig_func.split('.')
+ for i in range(len(func_path)-1, -1, -1):
+ try:
+ resolved_obj = importlib.import_module('.'.join(func_path[:i]))
+ break
+ except ImportError:
+ pass
+ for attr_name in func_path[i:-1]:
+ resolved_obj = getattr(resolved_obj, attr_name)
+ orig_func = getattr(resolved_obj, func_path[-1])
+ setattr(resolved_obj, func_path[-1], lambda *args, **kwargs: self(*args, **kwargs))
+ self.__init__(orig_func, sub_func, cond_func)
+ return lambda *args, **kwargs: self(*args, **kwargs)
+ def __init__(self, orig_func, sub_func, cond_func):
+ self.__orig_func = orig_func
+ self.__sub_func = sub_func
+ self.__cond_func = cond_func
+ def __call__(self, *args, **kwargs):
+ if not self.__cond_func or self.__cond_func(self.__orig_func, *args, **kwargs):
+ return self.__sub_func(self.__orig_func, *args, **kwargs)
+ else:
+ return self.__orig_func(*args, **kwargs)
diff --git a/modules/sd_hijack_xlmr.py b/modules/sd_hijack_xlmr.py
new file mode 100644
index 0000000000000000000000000000000000000000..2aa646aeae3f0af39122b02b7b081ff7a3daf8ee
--- /dev/null
+++ b/modules/sd_hijack_xlmr.py
@@ -0,0 +1,32 @@
+import torch
+
+from modules import sd_hijack_clip, devices
+
+
+class FrozenXLMREmbedderWithCustomWords(sd_hijack_clip.FrozenCLIPEmbedderWithCustomWords):
+ def __init__(self, wrapped, hijack):
+ super().__init__(wrapped, hijack)
+
+ self.id_start = wrapped.config.bos_token_id
+ self.id_end = wrapped.config.eos_token_id
+ self.id_pad = wrapped.config.pad_token_id
+
+ self.comma_token = self.tokenizer.get_vocab().get(',', None) # alt diffusion doesn't have bits for comma
+
+ def encode_with_transformers(self, tokens):
+ # there's no CLIP Skip here because all hidden layers have size of 1024 and the last one uses a
+ # trained layer to transform those 1024 into 768 for unet; so you can't choose which transformer
+ # layer to work with - you have to use the last
+
+ attention_mask = (tokens != self.id_pad).to(device=tokens.device, dtype=torch.int64)
+ features = self.wrapped(input_ids=tokens, attention_mask=attention_mask)
+ z = features['projection_state']
+
+ return z
+
+ def encode_embedding_init_text(self, init_text, nvpt):
+ embedding_layer = self.wrapped.roberta.embeddings
+ ids = self.wrapped.tokenizer(init_text, max_length=nvpt, return_tensors="pt", add_special_tokens=False)["input_ids"]
+ embedded = embedding_layer.token_embedding.wrapped(ids.to(devices.device)).squeeze(0)
+
+ return embedded
diff --git a/modules/sd_models.py b/modules/sd_models.py
new file mode 100644
index 0000000000000000000000000000000000000000..efc9f447517386dee4c7e7d0f4b10c7c6148e5fa
--- /dev/null
+++ b/modules/sd_models.py
@@ -0,0 +1,818 @@
+import collections
+import os.path
+import sys
+import gc
+import threading
+
+import torch
+import re
+import safetensors.torch
+from omegaconf import OmegaConf
+from os import mkdir
+from urllib import request
+import ldm.modules.midas as midas
+
+from ldm.util import instantiate_from_config
+
+from modules import paths, shared, modelloader, devices, script_callbacks, sd_vae, sd_disable_initialization, errors, hashes, sd_models_config, sd_unet, sd_models_xl, cache, extra_networks, processing, lowvram, sd_hijack
+from modules.timer import Timer
+import tomesd
+
+model_dir = "Stable-diffusion"
+model_path = os.path.abspath(os.path.join(paths.models_path, model_dir))
+
+checkpoints_list = {}
+checkpoint_aliases = {}
+checkpoint_alisases = checkpoint_aliases # for compatibility with old name
+checkpoints_loaded = collections.OrderedDict()
+
+
+def replace_key(d, key, new_key, value):
+ keys = list(d.keys())
+
+ d[new_key] = value
+
+ if key not in keys:
+ return d
+
+ index = keys.index(key)
+ keys[index] = new_key
+
+ new_d = {k: d[k] for k in keys}
+
+ d.clear()
+ d.update(new_d)
+ return d
+
+
+class CheckpointInfo:
+ def __init__(self, filename):
+ self.filename = filename
+ abspath = os.path.abspath(filename)
+
+ self.is_safetensors = os.path.splitext(filename)[1].lower() == ".safetensors"
+
+ if shared.cmd_opts.ckpt_dir is not None and abspath.startswith(shared.cmd_opts.ckpt_dir):
+ name = abspath.replace(shared.cmd_opts.ckpt_dir, '')
+ elif abspath.startswith(model_path):
+ name = abspath.replace(model_path, '')
+ else:
+ name = os.path.basename(filename)
+
+ if name.startswith("\\") or name.startswith("/"):
+ name = name[1:]
+
+ def read_metadata():
+ metadata = read_metadata_from_safetensors(filename)
+ self.modelspec_thumbnail = metadata.pop('modelspec.thumbnail', None)
+
+ return metadata
+
+ self.metadata = {}
+ if self.is_safetensors:
+ try:
+ self.metadata = cache.cached_data_for_file('safetensors-metadata', "checkpoint/" + name, filename, read_metadata)
+ except Exception as e:
+ errors.display(e, f"reading metadata for {filename}")
+
+ self.name = name
+ self.name_for_extra = os.path.splitext(os.path.basename(filename))[0]
+ self.model_name = os.path.splitext(name.replace("/", "_").replace("\\", "_"))[0]
+ self.hash = model_hash(filename)
+
+ self.sha256 = hashes.sha256_from_cache(self.filename, f"checkpoint/{name}")
+ self.shorthash = self.sha256[0:10] if self.sha256 else None
+
+ self.title = name if self.shorthash is None else f'{name} [{self.shorthash}]'
+ self.short_title = self.name_for_extra if self.shorthash is None else f'{self.name_for_extra} [{self.shorthash}]'
+
+ self.ids = [self.hash, self.model_name, self.title, name, self.name_for_extra, f'{name} [{self.hash}]']
+ if self.shorthash:
+ self.ids += [self.shorthash, self.sha256, f'{self.name} [{self.shorthash}]', f'{self.name_for_extra} [{self.shorthash}]']
+
+ def register(self):
+ checkpoints_list[self.title] = self
+ for id in self.ids:
+ checkpoint_aliases[id] = self
+
+ def calculate_shorthash(self):
+ self.sha256 = hashes.sha256(self.filename, f"checkpoint/{self.name}")
+ if self.sha256 is None:
+ return
+
+ shorthash = self.sha256[0:10]
+ if self.shorthash == self.sha256[0:10]:
+ return self.shorthash
+
+ self.shorthash = shorthash
+
+ if self.shorthash not in self.ids:
+ self.ids += [self.shorthash, self.sha256, f'{self.name} [{self.shorthash}]', f'{self.name_for_extra} [{self.shorthash}]']
+
+ old_title = self.title
+ self.title = f'{self.name} [{self.shorthash}]'
+ self.short_title = f'{self.name_for_extra} [{self.shorthash}]'
+
+ replace_key(checkpoints_list, old_title, self.title, self)
+ self.register()
+
+ return self.shorthash
+
+
+try:
+ # this silences the annoying "Some weights of the model checkpoint were not used when initializing..." message at start.
+ from transformers import logging, CLIPModel # noqa: F401
+
+ logging.set_verbosity_error()
+except Exception:
+ pass
+
+
+def setup_model():
+ os.makedirs(model_path, exist_ok=True)
+
+ enable_midas_autodownload()
+
+
+def checkpoint_tiles(use_short=False):
+ return [x.short_title if use_short else x.title for x in checkpoints_list.values()]
+
+
+def list_models():
+ checkpoints_list.clear()
+ checkpoint_aliases.clear()
+
+ cmd_ckpt = shared.cmd_opts.ckpt
+ if shared.cmd_opts.no_download_sd_model or cmd_ckpt != shared.sd_model_file or os.path.exists(cmd_ckpt):
+ model_url = None
+ else:
+ model_url = "https://huggingface.co/runwayml/stable-diffusion-v1-5/resolve/main/v1-5-pruned-emaonly.safetensors"
+
+ model_list = modelloader.load_models(model_path=model_path, model_url=model_url, command_path=shared.cmd_opts.ckpt_dir, ext_filter=[".ckpt", ".safetensors"], download_name="v1-5-pruned-emaonly.safetensors", ext_blacklist=[".vae.ckpt", ".vae.safetensors"])
+
+ if os.path.exists(cmd_ckpt):
+ checkpoint_info = CheckpointInfo(cmd_ckpt)
+ checkpoint_info.register()
+
+ shared.opts.data['sd_model_checkpoint'] = checkpoint_info.title
+ elif cmd_ckpt is not None and cmd_ckpt != shared.default_sd_model_file:
+ print(f"Checkpoint in --ckpt argument not found (Possible it was moved to {model_path}: {cmd_ckpt}", file=sys.stderr)
+
+ for filename in model_list:
+ checkpoint_info = CheckpointInfo(filename)
+ checkpoint_info.register()
+
+
+re_strip_checksum = re.compile(r"\s*\[[^]]+]\s*$")
+
+
+def get_closet_checkpoint_match(search_string):
+ if not search_string:
+ return None
+
+ checkpoint_info = checkpoint_aliases.get(search_string, None)
+ if checkpoint_info is not None:
+ return checkpoint_info
+
+ found = sorted([info for info in checkpoints_list.values() if search_string in info.title], key=lambda x: len(x.title))
+ if found:
+ return found[0]
+
+ search_string_without_checksum = re.sub(re_strip_checksum, '', search_string)
+ found = sorted([info for info in checkpoints_list.values() if search_string_without_checksum in info.title], key=lambda x: len(x.title))
+ if found:
+ return found[0]
+
+ return None
+
+
+def model_hash(filename):
+ """old hash that only looks at a small part of the file and is prone to collisions"""
+
+ try:
+ with open(filename, "rb") as file:
+ import hashlib
+ m = hashlib.sha256()
+
+ file.seek(0x100000)
+ m.update(file.read(0x10000))
+ return m.hexdigest()[0:8]
+ except FileNotFoundError:
+ return 'NOFILE'
+
+
+def select_checkpoint():
+ """Raises `FileNotFoundError` if no checkpoints are found."""
+ model_checkpoint = shared.opts.sd_model_checkpoint
+
+ checkpoint_info = checkpoint_aliases.get(model_checkpoint, None)
+ if checkpoint_info is not None:
+ return checkpoint_info
+
+ if len(checkpoints_list) == 0:
+ error_message = "No checkpoints found. When searching for checkpoints, looked at:"
+ if shared.cmd_opts.ckpt is not None:
+ error_message += f"\n - file {os.path.abspath(shared.cmd_opts.ckpt)}"
+ error_message += f"\n - directory {model_path}"
+ if shared.cmd_opts.ckpt_dir is not None:
+ error_message += f"\n - directory {os.path.abspath(shared.cmd_opts.ckpt_dir)}"
+ error_message += "Can't run without a checkpoint. Find and place a .ckpt or .safetensors file into any of those locations."
+ raise FileNotFoundError(error_message)
+
+ checkpoint_info = next(iter(checkpoints_list.values()))
+ if model_checkpoint is not None:
+ print(f"Checkpoint {model_checkpoint} not found; loading fallback {checkpoint_info.title}", file=sys.stderr)
+
+ return checkpoint_info
+
+
+checkpoint_dict_replacements = {
+ 'cond_stage_model.transformer.embeddings.': 'cond_stage_model.transformer.text_model.embeddings.',
+ 'cond_stage_model.transformer.encoder.': 'cond_stage_model.transformer.text_model.encoder.',
+ 'cond_stage_model.transformer.final_layer_norm.': 'cond_stage_model.transformer.text_model.final_layer_norm.',
+}
+
+
+def transform_checkpoint_dict_key(k):
+ for text, replacement in checkpoint_dict_replacements.items():
+ if k.startswith(text):
+ k = replacement + k[len(text):]
+
+ return k
+
+
+def get_state_dict_from_checkpoint(pl_sd):
+ pl_sd = pl_sd.pop("state_dict", pl_sd)
+ pl_sd.pop("state_dict", None)
+
+ sd = {}
+ for k, v in pl_sd.items():
+ new_key = transform_checkpoint_dict_key(k)
+
+ if new_key is not None:
+ sd[new_key] = v
+
+ pl_sd.clear()
+ pl_sd.update(sd)
+
+ return pl_sd
+
+
+def read_metadata_from_safetensors(filename):
+ import json
+
+ with open(filename, mode="rb") as file:
+ metadata_len = file.read(8)
+ metadata_len = int.from_bytes(metadata_len, "little")
+ json_start = file.read(2)
+
+ assert metadata_len > 2 and json_start in (b'{"', b"{'"), f"{filename} is not a safetensors file"
+ json_data = json_start + file.read(metadata_len-2)
+ json_obj = json.loads(json_data)
+
+ res = {}
+ for k, v in json_obj.get("__metadata__", {}).items():
+ res[k] = v
+ if isinstance(v, str) and v[0:1] == '{':
+ try:
+ res[k] = json.loads(v)
+ except Exception:
+ pass
+
+ return res
+
+
+def read_state_dict(checkpoint_file, print_global_state=False, map_location=None):
+ _, extension = os.path.splitext(checkpoint_file)
+ if extension.lower() == ".safetensors":
+ device = map_location or shared.weight_load_location or devices.get_optimal_device_name()
+
+ if not shared.opts.disable_mmap_load_safetensors:
+ pl_sd = safetensors.torch.load_file(checkpoint_file, device=device)
+ else:
+ pl_sd = safetensors.torch.load(open(checkpoint_file, 'rb').read())
+ pl_sd = {k: v.to(device) for k, v in pl_sd.items()}
+ else:
+ pl_sd = torch.load(checkpoint_file, map_location=map_location or shared.weight_load_location)
+
+ if print_global_state and "global_step" in pl_sd:
+ print(f"Global Step: {pl_sd['global_step']}")
+
+ sd = get_state_dict_from_checkpoint(pl_sd)
+ return sd
+
+
+def get_checkpoint_state_dict(checkpoint_info: CheckpointInfo, timer):
+ sd_model_hash = checkpoint_info.calculate_shorthash()
+ timer.record("calculate hash")
+
+ if checkpoint_info in checkpoints_loaded:
+ # use checkpoint cache
+ print(f"Loading weights [{sd_model_hash}] from cache")
+ return checkpoints_loaded[checkpoint_info]
+
+ print(f"Loading weights [{sd_model_hash}] from {checkpoint_info.filename}")
+ res = read_state_dict(checkpoint_info.filename)
+ timer.record("load weights from disk")
+
+ return res
+
+
+class SkipWritingToConfig:
+ """This context manager prevents load_model_weights from writing checkpoint name to the config when it loads weight."""
+
+ skip = False
+ previous = None
+
+ def __enter__(self):
+ self.previous = SkipWritingToConfig.skip
+ SkipWritingToConfig.skip = True
+ return self
+
+ def __exit__(self, exc_type, exc_value, exc_traceback):
+ SkipWritingToConfig.skip = self.previous
+
+
+def load_model_weights(model, checkpoint_info: CheckpointInfo, state_dict, timer):
+ sd_model_hash = checkpoint_info.calculate_shorthash()
+ timer.record("calculate hash")
+
+ if not SkipWritingToConfig.skip:
+ shared.opts.data["sd_model_checkpoint"] = checkpoint_info.title
+
+ if state_dict is None:
+ state_dict = get_checkpoint_state_dict(checkpoint_info, timer)
+
+ model.is_sdxl = hasattr(model, 'conditioner')
+ model.is_sd2 = not model.is_sdxl and hasattr(model.cond_stage_model, 'model')
+ model.is_sd1 = not model.is_sdxl and not model.is_sd2
+
+ if model.is_sdxl:
+ sd_models_xl.extend_sdxl(model)
+
+ model.load_state_dict(state_dict, strict=False)
+ timer.record("apply weights to model")
+
+ if shared.opts.sd_checkpoint_cache > 0:
+ # cache newly loaded model
+ checkpoints_loaded[checkpoint_info] = state_dict
+
+ del state_dict
+
+ if shared.cmd_opts.opt_channelslast:
+ model.to(memory_format=torch.channels_last)
+ timer.record("apply channels_last")
+
+ if shared.cmd_opts.no_half:
+ model.float()
+ devices.dtype_unet = torch.float32
+ timer.record("apply float()")
+ else:
+ vae = model.first_stage_model
+ depth_model = getattr(model, 'depth_model', None)
+
+ # with --no-half-vae, remove VAE from model when doing half() to prevent its weights from being converted to float16
+ if shared.cmd_opts.no_half_vae:
+ model.first_stage_model = None
+ # with --upcast-sampling, don't convert the depth model weights to float16
+ if shared.cmd_opts.upcast_sampling and depth_model:
+ model.depth_model = None
+
+ model.half()
+ model.first_stage_model = vae
+ if depth_model:
+ model.depth_model = depth_model
+
+ devices.dtype_unet = torch.float16
+ timer.record("apply half()")
+
+ devices.unet_needs_upcast = shared.cmd_opts.upcast_sampling and devices.dtype == torch.float16 and devices.dtype_unet == torch.float16
+
+ model.first_stage_model.to(devices.dtype_vae)
+ timer.record("apply dtype to VAE")
+
+ # clean up cache if limit is reached
+ while len(checkpoints_loaded) > shared.opts.sd_checkpoint_cache:
+ checkpoints_loaded.popitem(last=False)
+
+ model.sd_model_hash = sd_model_hash
+ model.sd_model_checkpoint = checkpoint_info.filename
+ model.sd_checkpoint_info = checkpoint_info
+ shared.opts.data["sd_checkpoint_hash"] = checkpoint_info.sha256
+
+ if hasattr(model, 'logvar'):
+ model.logvar = model.logvar.to(devices.device) # fix for training
+
+ sd_vae.delete_base_vae()
+ sd_vae.clear_loaded_vae()
+ vae_file, vae_source = sd_vae.resolve_vae(checkpoint_info.filename).tuple()
+ sd_vae.load_vae(model, vae_file, vae_source)
+ timer.record("load VAE")
+
+
+def enable_midas_autodownload():
+ """
+ Gives the ldm.modules.midas.api.load_model function automatic downloading.
+
+ When the 512-depth-ema model, and other future models like it, is loaded,
+ it calls midas.api.load_model to load the associated midas depth model.
+ This function applies a wrapper to download the model to the correct
+ location automatically.
+ """
+
+ midas_path = os.path.join(paths.models_path, 'midas')
+
+ # stable-diffusion-stability-ai hard-codes the midas model path to
+ # a location that differs from where other scripts using this model look.
+ # HACK: Overriding the path here.
+ for k, v in midas.api.ISL_PATHS.items():
+ file_name = os.path.basename(v)
+ midas.api.ISL_PATHS[k] = os.path.join(midas_path, file_name)
+
+ midas_urls = {
+ "dpt_large": "https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt",
+ "dpt_hybrid": "https://github.com/intel-isl/DPT/releases/download/1_0/dpt_hybrid-midas-501f0c75.pt",
+ "midas_v21": "https://github.com/AlexeyAB/MiDaS/releases/download/midas_dpt/midas_v21-f6b98070.pt",
+ "midas_v21_small": "https://github.com/AlexeyAB/MiDaS/releases/download/midas_dpt/midas_v21_small-70d6b9c8.pt",
+ }
+
+ midas.api.load_model_inner = midas.api.load_model
+
+ def load_model_wrapper(model_type):
+ path = midas.api.ISL_PATHS[model_type]
+ if not os.path.exists(path):
+ if not os.path.exists(midas_path):
+ mkdir(midas_path)
+
+ print(f"Downloading midas model weights for {model_type} to {path}")
+ request.urlretrieve(midas_urls[model_type], path)
+ print(f"{model_type} downloaded")
+
+ return midas.api.load_model_inner(model_type)
+
+ midas.api.load_model = load_model_wrapper
+
+
+def repair_config(sd_config):
+
+ if not hasattr(sd_config.model.params, "use_ema"):
+ sd_config.model.params.use_ema = False
+
+ if hasattr(sd_config.model.params, 'unet_config'):
+ if shared.cmd_opts.no_half:
+ sd_config.model.params.unet_config.params.use_fp16 = False
+ elif shared.cmd_opts.upcast_sampling:
+ sd_config.model.params.unet_config.params.use_fp16 = True
+
+ if getattr(sd_config.model.params.first_stage_config.params.ddconfig, "attn_type", None) == "vanilla-xformers" and not shared.xformers_available:
+ sd_config.model.params.first_stage_config.params.ddconfig.attn_type = "vanilla"
+
+ # For UnCLIP-L, override the hardcoded karlo directory
+ if hasattr(sd_config.model.params, "noise_aug_config") and hasattr(sd_config.model.params.noise_aug_config.params, "clip_stats_path"):
+ karlo_path = os.path.join(paths.models_path, 'karlo')
+ sd_config.model.params.noise_aug_config.params.clip_stats_path = sd_config.model.params.noise_aug_config.params.clip_stats_path.replace("checkpoints/karlo_models", karlo_path)
+
+
+sd1_clip_weight = 'cond_stage_model.transformer.text_model.embeddings.token_embedding.weight'
+sd2_clip_weight = 'cond_stage_model.model.transformer.resblocks.0.attn.in_proj_weight'
+sdxl_clip_weight = 'conditioner.embedders.1.model.ln_final.weight'
+sdxl_refiner_clip_weight = 'conditioner.embedders.0.model.ln_final.weight'
+
+
+class SdModelData:
+ def __init__(self):
+ self.sd_model = None
+ self.loaded_sd_models = []
+ self.was_loaded_at_least_once = False
+ self.lock = threading.Lock()
+
+ def get_sd_model(self):
+ if self.was_loaded_at_least_once:
+ return self.sd_model
+
+ if self.sd_model is None:
+ with self.lock:
+ if self.sd_model is not None or self.was_loaded_at_least_once:
+ return self.sd_model
+
+ try:
+ load_model()
+
+ except Exception as e:
+ errors.display(e, "loading stable diffusion model", full_traceback=True)
+ print("", file=sys.stderr)
+ print("Stable diffusion model failed to load", file=sys.stderr)
+ self.sd_model = None
+
+ return self.sd_model
+
+ def set_sd_model(self, v, already_loaded=False):
+ self.sd_model = v
+ if already_loaded:
+ sd_vae.base_vae = getattr(v, "base_vae", None)
+ sd_vae.loaded_vae_file = getattr(v, "loaded_vae_file", None)
+ sd_vae.checkpoint_info = v.sd_checkpoint_info
+
+ try:
+ self.loaded_sd_models.remove(v)
+ except ValueError:
+ pass
+
+ if v is not None:
+ self.loaded_sd_models.insert(0, v)
+
+
+model_data = SdModelData()
+
+
+def get_empty_cond(sd_model):
+
+ p = processing.StableDiffusionProcessingTxt2Img()
+ extra_networks.activate(p, {})
+
+ if hasattr(sd_model, 'conditioner'):
+ d = sd_model.get_learned_conditioning([""])
+ return d['crossattn']
+ else:
+ return sd_model.cond_stage_model([""])
+
+
+def send_model_to_cpu(m):
+ if m.lowvram:
+ lowvram.send_everything_to_cpu()
+ else:
+ m.to(devices.cpu)
+
+ devices.torch_gc()
+
+
+def model_target_device(m):
+ if lowvram.is_needed(m):
+ return devices.cpu
+ else:
+ return devices.device
+
+
+def send_model_to_device(m):
+ lowvram.apply(m)
+
+ if not m.lowvram:
+ m.to(shared.device)
+
+
+def send_model_to_trash(m):
+ m.to(device="meta")
+ devices.torch_gc()
+
+
+def load_model(checkpoint_info=None, already_loaded_state_dict=None):
+ from modules import sd_hijack
+ checkpoint_info = checkpoint_info or select_checkpoint()
+
+ timer = Timer()
+
+ if model_data.sd_model:
+ send_model_to_trash(model_data.sd_model)
+ model_data.sd_model = None
+ devices.torch_gc()
+
+ timer.record("unload existing model")
+
+ if already_loaded_state_dict is not None:
+ state_dict = already_loaded_state_dict
+ else:
+ state_dict = get_checkpoint_state_dict(checkpoint_info, timer)
+
+ checkpoint_config = sd_models_config.find_checkpoint_config(state_dict, checkpoint_info)
+ clip_is_included_into_sd = any(x for x in [sd1_clip_weight, sd2_clip_weight, sdxl_clip_weight, sdxl_refiner_clip_weight] if x in state_dict)
+
+ timer.record("find config")
+
+ sd_config = OmegaConf.load(checkpoint_config)
+ repair_config(sd_config)
+
+ timer.record("load config")
+
+ print(f"Creating model from config: {checkpoint_config}")
+
+ sd_model = None
+ try:
+ with sd_disable_initialization.DisableInitialization(disable_clip=clip_is_included_into_sd or shared.cmd_opts.do_not_download_clip):
+ with sd_disable_initialization.InitializeOnMeta():
+ sd_model = instantiate_from_config(sd_config.model)
+
+ except Exception as e:
+ errors.display(e, "creating model quickly", full_traceback=True)
+
+ if sd_model is None:
+ print('Failed to create model quickly; will retry using slow method.', file=sys.stderr)
+
+ with sd_disable_initialization.InitializeOnMeta():
+ sd_model = instantiate_from_config(sd_config.model)
+
+ sd_model.used_config = checkpoint_config
+
+ timer.record("create model")
+
+ if shared.cmd_opts.no_half:
+ weight_dtype_conversion = None
+ else:
+ weight_dtype_conversion = {
+ 'first_stage_model': None,
+ '': torch.float16,
+ }
+
+ with sd_disable_initialization.LoadStateDictOnMeta(state_dict, device=model_target_device(sd_model), weight_dtype_conversion=weight_dtype_conversion):
+ load_model_weights(sd_model, checkpoint_info, state_dict, timer)
+ timer.record("load weights from state dict")
+
+ send_model_to_device(sd_model)
+ timer.record("move model to device")
+
+ sd_hijack.model_hijack.hijack(sd_model)
+
+ timer.record("hijack")
+
+ sd_model.eval()
+ model_data.set_sd_model(sd_model)
+ model_data.was_loaded_at_least_once = True
+
+ sd_hijack.model_hijack.embedding_db.load_textual_inversion_embeddings(force_reload=True) # Reload embeddings after model load as they may or may not fit the model
+
+ timer.record("load textual inversion embeddings")
+
+ script_callbacks.model_loaded_callback(sd_model)
+
+ timer.record("scripts callbacks")
+
+ with devices.autocast(), torch.no_grad():
+ sd_model.cond_stage_model_empty_prompt = get_empty_cond(sd_model)
+
+ timer.record("calculate empty prompt")
+
+ print(f"Model loaded in {timer.summary()}.")
+
+ return sd_model
+
+
+def reuse_model_from_already_loaded(sd_model, checkpoint_info, timer):
+ """
+ Checks if the desired checkpoint from checkpoint_info is not already loaded in model_data.loaded_sd_models.
+ If it is loaded, returns that (moving it to GPU if necessary, and moving the currently loadded model to CPU if necessary).
+ If not, returns the model that can be used to load weights from checkpoint_info's file.
+ If no such model exists, returns None.
+ Additionaly deletes loaded models that are over the limit set in settings (sd_checkpoints_limit).
+ """
+
+ already_loaded = None
+ for i in reversed(range(len(model_data.loaded_sd_models))):
+ loaded_model = model_data.loaded_sd_models[i]
+ if loaded_model.sd_checkpoint_info.filename == checkpoint_info.filename:
+ already_loaded = loaded_model
+ continue
+
+ if len(model_data.loaded_sd_models) > shared.opts.sd_checkpoints_limit > 0:
+ print(f"Unloading model {len(model_data.loaded_sd_models)} over the limit of {shared.opts.sd_checkpoints_limit}: {loaded_model.sd_checkpoint_info.title}")
+ model_data.loaded_sd_models.pop()
+ send_model_to_trash(loaded_model)
+ timer.record("send model to trash")
+
+ if shared.opts.sd_checkpoints_keep_in_cpu:
+ send_model_to_cpu(sd_model)
+ timer.record("send model to cpu")
+
+ if already_loaded is not None:
+ send_model_to_device(already_loaded)
+ timer.record("send model to device")
+
+ model_data.set_sd_model(already_loaded, already_loaded=True)
+
+ if not SkipWritingToConfig.skip:
+ shared.opts.data["sd_model_checkpoint"] = already_loaded.sd_checkpoint_info.title
+ shared.opts.data["sd_checkpoint_hash"] = already_loaded.sd_checkpoint_info.sha256
+
+ print(f"Using already loaded model {already_loaded.sd_checkpoint_info.title}: done in {timer.summary()}")
+ sd_vae.reload_vae_weights(already_loaded)
+ return model_data.sd_model
+ elif shared.opts.sd_checkpoints_limit > 1 and len(model_data.loaded_sd_models) < shared.opts.sd_checkpoints_limit:
+ print(f"Loading model {checkpoint_info.title} ({len(model_data.loaded_sd_models) + 1} out of {shared.opts.sd_checkpoints_limit})")
+
+ model_data.sd_model = None
+ load_model(checkpoint_info)
+ return model_data.sd_model
+ elif len(model_data.loaded_sd_models) > 0:
+ sd_model = model_data.loaded_sd_models.pop()
+ model_data.sd_model = sd_model
+
+ sd_vae.base_vae = getattr(sd_model, "base_vae", None)
+ sd_vae.loaded_vae_file = getattr(sd_model, "loaded_vae_file", None)
+ sd_vae.checkpoint_info = sd_model.sd_checkpoint_info
+
+ print(f"Reusing loaded model {sd_model.sd_checkpoint_info.title} to load {checkpoint_info.title}")
+ return sd_model
+ else:
+ return None
+
+
+def reload_model_weights(sd_model=None, info=None):
+ checkpoint_info = info or select_checkpoint()
+
+ timer = Timer()
+
+ if not sd_model:
+ sd_model = model_data.sd_model
+
+ if sd_model is None: # previous model load failed
+ current_checkpoint_info = None
+ else:
+ current_checkpoint_info = sd_model.sd_checkpoint_info
+ if sd_model.sd_model_checkpoint == checkpoint_info.filename:
+ return sd_model
+
+ sd_model = reuse_model_from_already_loaded(sd_model, checkpoint_info, timer)
+ if sd_model is not None and sd_model.sd_checkpoint_info.filename == checkpoint_info.filename:
+ return sd_model
+
+ if sd_model is not None:
+ sd_unet.apply_unet("None")
+ send_model_to_cpu(sd_model)
+ sd_hijack.model_hijack.undo_hijack(sd_model)
+
+ state_dict = get_checkpoint_state_dict(checkpoint_info, timer)
+
+ checkpoint_config = sd_models_config.find_checkpoint_config(state_dict, checkpoint_info)
+
+ timer.record("find config")
+
+ if sd_model is None or checkpoint_config != sd_model.used_config:
+ if sd_model is not None:
+ send_model_to_trash(sd_model)
+
+ load_model(checkpoint_info, already_loaded_state_dict=state_dict)
+ return model_data.sd_model
+
+ try:
+ load_model_weights(sd_model, checkpoint_info, state_dict, timer)
+ except Exception:
+ print("Failed to load checkpoint, restoring previous")
+ load_model_weights(sd_model, current_checkpoint_info, None, timer)
+ raise
+ finally:
+ sd_hijack.model_hijack.hijack(sd_model)
+ timer.record("hijack")
+
+ script_callbacks.model_loaded_callback(sd_model)
+ timer.record("script callbacks")
+
+ if not sd_model.lowvram:
+ sd_model.to(devices.device)
+ timer.record("move model to device")
+
+ print(f"Weights loaded in {timer.summary()}.")
+
+ model_data.set_sd_model(sd_model)
+ sd_unet.apply_unet()
+
+ return sd_model
+
+
+def unload_model_weights(sd_model=None, info=None):
+ timer = Timer()
+
+ if model_data.sd_model:
+ model_data.sd_model.to(devices.cpu)
+ sd_hijack.model_hijack.undo_hijack(model_data.sd_model)
+ model_data.sd_model = None
+ sd_model = None
+ gc.collect()
+ devices.torch_gc()
+
+ print(f"Unloaded weights {timer.summary()}.")
+
+ return sd_model
+
+
+def apply_token_merging(sd_model, token_merging_ratio):
+ """
+ Applies speed and memory optimizations from tomesd.
+ """
+
+ current_token_merging_ratio = getattr(sd_model, 'applied_token_merged_ratio', 0)
+
+ if current_token_merging_ratio == token_merging_ratio:
+ return
+
+ if current_token_merging_ratio > 0:
+ tomesd.remove_patch(sd_model)
+
+ if token_merging_ratio > 0:
+ tomesd.apply_patch(
+ sd_model,
+ ratio=token_merging_ratio,
+ use_rand=False, # can cause issues with some samplers
+ merge_attn=True,
+ merge_crossattn=False,
+ merge_mlp=False
+ )
+
+ sd_model.applied_token_merged_ratio = token_merging_ratio
diff --git a/modules/sd_models_config.py b/modules/sd_models_config.py
new file mode 100644
index 0000000000000000000000000000000000000000..941acab055b498d9953a9f6afe961e22b5be9906
--- /dev/null
+++ b/modules/sd_models_config.py
@@ -0,0 +1,124 @@
+import os
+
+import torch
+
+from modules import shared, paths, sd_disable_initialization, devices
+
+sd_configs_path = shared.sd_configs_path
+sd_repo_configs_path = os.path.join(paths.paths['Stable Diffusion'], "configs", "stable-diffusion")
+sd_xl_repo_configs_path = os.path.join(paths.paths['Stable Diffusion XL'], "configs", "inference")
+
+
+config_default = shared.sd_default_config
+config_sd2 = os.path.join(sd_repo_configs_path, "v2-inference.yaml")
+config_sd2v = os.path.join(sd_repo_configs_path, "v2-inference-v.yaml")
+config_sd2_inpainting = os.path.join(sd_repo_configs_path, "v2-inpainting-inference.yaml")
+config_sdxl = os.path.join(sd_xl_repo_configs_path, "sd_xl_base.yaml")
+config_sdxl_refiner = os.path.join(sd_xl_repo_configs_path, "sd_xl_refiner.yaml")
+config_depth_model = os.path.join(sd_repo_configs_path, "v2-midas-inference.yaml")
+config_unclip = os.path.join(sd_repo_configs_path, "v2-1-stable-unclip-l-inference.yaml")
+config_unopenclip = os.path.join(sd_repo_configs_path, "v2-1-stable-unclip-h-inference.yaml")
+config_inpainting = os.path.join(sd_configs_path, "v1-inpainting-inference.yaml")
+config_instruct_pix2pix = os.path.join(sd_configs_path, "instruct-pix2pix.yaml")
+config_alt_diffusion = os.path.join(sd_configs_path, "alt-diffusion-inference.yaml")
+
+
+def is_using_v_parameterization_for_sd2(state_dict):
+ """
+ Detects whether unet in state_dict is using v-parameterization. Returns True if it is. You're welcome.
+ """
+
+ import ldm.modules.diffusionmodules.openaimodel
+
+ device = devices.cpu
+
+ with sd_disable_initialization.DisableInitialization():
+ unet = ldm.modules.diffusionmodules.openaimodel.UNetModel(
+ use_checkpoint=True,
+ use_fp16=False,
+ image_size=32,
+ in_channels=4,
+ out_channels=4,
+ model_channels=320,
+ attention_resolutions=[4, 2, 1],
+ num_res_blocks=2,
+ channel_mult=[1, 2, 4, 4],
+ num_head_channels=64,
+ use_spatial_transformer=True,
+ use_linear_in_transformer=True,
+ transformer_depth=1,
+ context_dim=1024,
+ legacy=False
+ )
+ unet.eval()
+
+ with torch.no_grad():
+ unet_sd = {k.replace("model.diffusion_model.", ""): v for k, v in state_dict.items() if "model.diffusion_model." in k}
+ unet.load_state_dict(unet_sd, strict=True)
+ unet.to(device=device, dtype=torch.float)
+
+ test_cond = torch.ones((1, 2, 1024), device=device) * 0.5
+ x_test = torch.ones((1, 4, 8, 8), device=device) * 0.5
+
+ out = (unet(x_test, torch.asarray([999], device=device), context=test_cond) - x_test).mean().item()
+
+ return out < -1
+
+
+def guess_model_config_from_state_dict(sd, filename):
+ sd2_cond_proj_weight = sd.get('cond_stage_model.model.transformer.resblocks.0.attn.in_proj_weight', None)
+ diffusion_model_input = sd.get('model.diffusion_model.input_blocks.0.0.weight', None)
+ sd2_variations_weight = sd.get('embedder.model.ln_final.weight', None)
+
+ if sd.get('conditioner.embedders.1.model.ln_final.weight', None) is not None:
+ return config_sdxl
+ if sd.get('conditioner.embedders.0.model.ln_final.weight', None) is not None:
+ return config_sdxl_refiner
+ elif sd.get('depth_model.model.pretrained.act_postprocess3.0.project.0.bias', None) is not None:
+ return config_depth_model
+ elif sd2_variations_weight is not None and sd2_variations_weight.shape[0] == 768:
+ return config_unclip
+ elif sd2_variations_weight is not None and sd2_variations_weight.shape[0] == 1024:
+ return config_unopenclip
+
+ if sd2_cond_proj_weight is not None and sd2_cond_proj_weight.shape[1] == 1024:
+ if diffusion_model_input.shape[1] == 9:
+ return config_sd2_inpainting
+ elif is_using_v_parameterization_for_sd2(sd):
+ return config_sd2v
+ else:
+ return config_sd2
+
+ if diffusion_model_input is not None:
+ if diffusion_model_input.shape[1] == 9:
+ return config_inpainting
+ if diffusion_model_input.shape[1] == 8:
+ return config_instruct_pix2pix
+
+ if sd.get('cond_stage_model.roberta.embeddings.word_embeddings.weight', None) is not None:
+ return config_alt_diffusion
+
+ return config_default
+
+
+def find_checkpoint_config(state_dict, info):
+ if info is None:
+ return guess_model_config_from_state_dict(state_dict, "")
+
+ config = find_checkpoint_config_near_filename(info)
+ if config is not None:
+ return config
+
+ return guess_model_config_from_state_dict(state_dict, info.filename)
+
+
+def find_checkpoint_config_near_filename(info):
+ if info is None:
+ return None
+
+ config = f"{os.path.splitext(info.filename)[0]}.yaml"
+ if os.path.exists(config):
+ return config
+
+ return None
+
diff --git a/modules/sd_models_types.py b/modules/sd_models_types.py
new file mode 100644
index 0000000000000000000000000000000000000000..4ff81d4c679d55340fe1f4349e23d38651db5fce
--- /dev/null
+++ b/modules/sd_models_types.py
@@ -0,0 +1,31 @@
+from ldm.models.diffusion.ddpm import LatentDiffusion
+from typing import TYPE_CHECKING
+
+
+if TYPE_CHECKING:
+ from modules.sd_models import CheckpointInfo
+
+
+class WebuiSdModel(LatentDiffusion):
+ """This class is not actually instantinated, but its fields are created and fieeld by webui"""
+
+ lowvram: bool
+ """True if lowvram/medvram optimizations are enabled -- see modules.lowvram for more info"""
+
+ sd_model_hash: str
+ """short hash, 10 first characters of SHA1 hash of the model file; may be None if --no-hashing flag is used"""
+
+ sd_model_checkpoint: str
+ """path to the file on disk that model weights were obtained from"""
+
+ sd_checkpoint_info: 'CheckpointInfo'
+ """structure with additional information about the file with model's weights"""
+
+ is_sdxl: bool
+ """True if the model's architecture is SDXL"""
+
+ is_sd2: bool
+ """True if the model's architecture is SD 2.x"""
+
+ is_sd1: bool
+ """True if the model's architecture is SD 1.x"""
diff --git a/modules/sd_samplers.py b/modules/sd_samplers.py
new file mode 100644
index 0000000000000000000000000000000000000000..576ff43d9cb5898ea546f6fb74b8759b0b72978d
--- /dev/null
+++ b/modules/sd_samplers.py
@@ -0,0 +1,59 @@
+from modules import sd_samplers_kdiffusion, sd_samplers_timesteps, shared
+
+# imports for functions that previously were here and are used by other modules
+from modules.sd_samplers_common import samples_to_image_grid, sample_to_image # noqa: F401
+
+all_samplers = [
+ *sd_samplers_kdiffusion.samplers_data_k_diffusion,
+ *sd_samplers_timesteps.samplers_data_timesteps,
+]
+all_samplers_map = {x.name: x for x in all_samplers}
+
+samplers = []
+samplers_for_img2img = []
+samplers_map = {}
+samplers_hidden = {}
+
+
+def find_sampler_config(name):
+ if name is not None:
+ config = all_samplers_map.get(name, None)
+ else:
+ config = all_samplers[0]
+
+ return config
+
+
+def create_sampler(name, model):
+ config = find_sampler_config(name)
+
+ assert config is not None, f'bad sampler name: {name}'
+
+ if model.is_sdxl and config.options.get("no_sdxl", False):
+ raise Exception(f"Sampler {config.name} is not supported for SDXL")
+
+ sampler = config.constructor(model)
+ sampler.config = config
+
+ return sampler
+
+
+def set_samplers():
+ global samplers, samplers_for_img2img, samplers_hidden
+
+ samplers_hidden = set(shared.opts.hide_samplers)
+ samplers = all_samplers
+ samplers_for_img2img = all_samplers
+
+ samplers_map.clear()
+ for sampler in all_samplers:
+ samplers_map[sampler.name.lower()] = sampler.name
+ for alias in sampler.aliases:
+ samplers_map[alias.lower()] = sampler.name
+
+
+def visible_sampler_names():
+ return [x.name for x in samplers if x.name not in samplers_hidden]
+
+
+set_samplers()
diff --git a/modules/sd_samplers_common.py b/modules/sd_samplers_common.py
new file mode 100644
index 0000000000000000000000000000000000000000..94c807e37ad6f999bf1ac7a4295e22f9d4566cdf
--- /dev/null
+++ b/modules/sd_samplers_common.py
@@ -0,0 +1,337 @@
+import inspect
+from collections import namedtuple
+import numpy as np
+import torch
+from PIL import Image
+from modules import devices, images, sd_vae_approx, sd_samplers, sd_vae_taesd, shared, sd_models
+from modules.shared import opts, state
+import k_diffusion.sampling
+
+
+SamplerDataTuple = namedtuple('SamplerData', ['name', 'constructor', 'aliases', 'options'])
+
+
+class SamplerData(SamplerDataTuple):
+ def total_steps(self, steps):
+ if self.options.get("second_order", False):
+ steps = steps * 2
+
+ return steps
+
+
+def setup_img2img_steps(p, steps=None):
+ if opts.img2img_fix_steps or steps is not None:
+ requested_steps = (steps or p.steps)
+ steps = int(requested_steps / min(p.denoising_strength, 0.999)) if p.denoising_strength > 0 else 0
+ t_enc = requested_steps - 1
+ else:
+ steps = p.steps
+ t_enc = int(min(p.denoising_strength, 0.999) * steps)
+
+ return steps, t_enc
+
+
+approximation_indexes = {"Full": 0, "Approx NN": 1, "Approx cheap": 2, "TAESD": 3}
+
+
+def samples_to_images_tensor(sample, approximation=None, model=None):
+ """Transforms 4-channel latent space images into 3-channel RGB image tensors, with values in range [-1, 1]."""
+
+ if approximation is None or (shared.state.interrupted and opts.live_preview_fast_interrupt):
+ approximation = approximation_indexes.get(opts.show_progress_type, 0)
+
+ from modules import lowvram
+ if approximation == 0 and lowvram.is_enabled(shared.sd_model) and not shared.opts.live_preview_allow_lowvram_full:
+ approximation = 1
+
+ if approximation == 2:
+ x_sample = sd_vae_approx.cheap_approximation(sample)
+ elif approximation == 1:
+ x_sample = sd_vae_approx.model()(sample.to(devices.device, devices.dtype)).detach()
+ elif approximation == 3:
+ x_sample = sd_vae_taesd.decoder_model()(sample.to(devices.device, devices.dtype)).detach()
+ x_sample = x_sample * 2 - 1
+ else:
+ if model is None:
+ model = shared.sd_model
+ with devices.without_autocast(): # fixes an issue with unstable VAEs that are flaky even in fp32
+ x_sample = model.decode_first_stage(sample.to(model.first_stage_model.dtype))
+
+ return x_sample
+
+
+def single_sample_to_image(sample, approximation=None):
+ x_sample = samples_to_images_tensor(sample.unsqueeze(0), approximation)[0] * 0.5 + 0.5
+
+ x_sample = torch.clamp(x_sample, min=0.0, max=1.0)
+ x_sample = 255. * np.moveaxis(x_sample.cpu().numpy(), 0, 2)
+ x_sample = x_sample.astype(np.uint8)
+
+ return Image.fromarray(x_sample)
+
+
+def decode_first_stage(model, x):
+ x = x.to(devices.dtype_vae)
+ approx_index = approximation_indexes.get(opts.sd_vae_decode_method, 0)
+ return samples_to_images_tensor(x, approx_index, model)
+
+
+def sample_to_image(samples, index=0, approximation=None):
+ return single_sample_to_image(samples[index], approximation)
+
+
+def samples_to_image_grid(samples, approximation=None):
+ return images.image_grid([single_sample_to_image(sample, approximation) for sample in samples])
+
+
+def images_tensor_to_samples(image, approximation=None, model=None):
+ '''image[0, 1] -> latent'''
+ if approximation is None:
+ approximation = approximation_indexes.get(opts.sd_vae_encode_method, 0)
+
+ if approximation == 3:
+ image = image.to(devices.device, devices.dtype)
+ x_latent = sd_vae_taesd.encoder_model()(image)
+ else:
+ if model is None:
+ model = shared.sd_model
+ model.first_stage_model.to(devices.dtype_vae)
+
+ image = image.to(shared.device, dtype=devices.dtype_vae)
+ image = image * 2 - 1
+ if len(image) > 1:
+ x_latent = torch.stack([
+ model.get_first_stage_encoding(
+ model.encode_first_stage(torch.unsqueeze(img, 0))
+ )[0]
+ for img in image
+ ])
+ else:
+ x_latent = model.get_first_stage_encoding(model.encode_first_stage(image))
+
+ return x_latent
+
+
+def store_latent(decoded):
+ state.current_latent = decoded
+
+ if opts.live_previews_enable and opts.show_progress_every_n_steps > 0 and shared.state.sampling_step % opts.show_progress_every_n_steps == 0:
+ if not shared.parallel_processing_allowed:
+ shared.state.assign_current_image(sample_to_image(decoded))
+
+
+def is_sampler_using_eta_noise_seed_delta(p):
+ """returns whether sampler from config will use eta noise seed delta for image creation"""
+
+ sampler_config = sd_samplers.find_sampler_config(p.sampler_name)
+
+ eta = p.eta
+
+ if eta is None and p.sampler is not None:
+ eta = p.sampler.eta
+
+ if eta is None and sampler_config is not None:
+ eta = 0 if sampler_config.options.get("default_eta_is_0", False) else 1.0
+
+ if eta == 0:
+ return False
+
+ return sampler_config.options.get("uses_ensd", False)
+
+
+class InterruptedException(BaseException):
+ pass
+
+
+def replace_torchsde_browinan():
+ import torchsde._brownian.brownian_interval
+
+ def torchsde_randn(size, dtype, device, seed):
+ return devices.randn_local(seed, size).to(device=device, dtype=dtype)
+
+ torchsde._brownian.brownian_interval._randn = torchsde_randn
+
+
+replace_torchsde_browinan()
+
+
+def apply_refiner(cfg_denoiser):
+ completed_ratio = cfg_denoiser.step / cfg_denoiser.total_steps
+ refiner_switch_at = cfg_denoiser.p.refiner_switch_at
+ refiner_checkpoint_info = cfg_denoiser.p.refiner_checkpoint_info
+
+ if refiner_switch_at is not None and completed_ratio < refiner_switch_at:
+ return False
+
+ if refiner_checkpoint_info is None or shared.sd_model.sd_checkpoint_info == refiner_checkpoint_info:
+ return False
+
+ if getattr(cfg_denoiser.p, "enable_hr", False):
+ is_second_pass = cfg_denoiser.p.is_hr_pass
+
+ if opts.hires_fix_refiner_pass == "first pass" and is_second_pass:
+ return False
+
+ if opts.hires_fix_refiner_pass == "second pass" and not is_second_pass:
+ return False
+
+ if opts.hires_fix_refiner_pass != "second pass":
+ cfg_denoiser.p.extra_generation_params['Hires refiner'] = opts.hires_fix_refiner_pass
+
+ cfg_denoiser.p.extra_generation_params['Refiner'] = refiner_checkpoint_info.short_title
+ cfg_denoiser.p.extra_generation_params['Refiner switch at'] = refiner_switch_at
+
+ with sd_models.SkipWritingToConfig():
+ sd_models.reload_model_weights(info=refiner_checkpoint_info)
+
+ devices.torch_gc()
+ cfg_denoiser.p.setup_conds()
+ cfg_denoiser.update_inner_model()
+
+ return True
+
+
+class TorchHijack:
+ """This is here to replace torch.randn_like of k-diffusion.
+
+ k-diffusion has random_sampler argument for most samplers, but not for all, so
+ this is needed to properly replace every use of torch.randn_like.
+
+ We need to replace to make images generated in batches to be same as images generated individually."""
+
+ def __init__(self, p):
+ self.rng = p.rng
+
+ def __getattr__(self, item):
+ if item == 'randn_like':
+ return self.randn_like
+
+ if hasattr(torch, item):
+ return getattr(torch, item)
+
+ raise AttributeError(f"'{type(self).__name__}' object has no attribute '{item}'")
+
+ def randn_like(self, x):
+ return self.rng.next()
+
+
+class Sampler:
+ def __init__(self, funcname):
+ self.funcname = funcname
+ self.func = funcname
+ self.extra_params = []
+ self.sampler_noises = None
+ self.stop_at = None
+ self.eta = None
+ self.config: SamplerData = None # set by the function calling the constructor
+ self.last_latent = None
+ self.s_min_uncond = None
+ self.s_churn = 0.0
+ self.s_tmin = 0.0
+ self.s_tmax = float('inf')
+ self.s_noise = 1.0
+
+ self.eta_option_field = 'eta_ancestral'
+ self.eta_infotext_field = 'Eta'
+ self.eta_default = 1.0
+
+ self.conditioning_key = shared.sd_model.model.conditioning_key
+
+ self.p = None
+ self.model_wrap_cfg = None
+ self.sampler_extra_args = None
+ self.options = {}
+
+ def callback_state(self, d):
+ step = d['i']
+
+ if self.stop_at is not None and step > self.stop_at:
+ raise InterruptedException
+
+ state.sampling_step = step
+ shared.total_tqdm.update()
+
+ def launch_sampling(self, steps, func):
+ self.model_wrap_cfg.steps = steps
+ self.model_wrap_cfg.total_steps = self.config.total_steps(steps)
+ state.sampling_steps = steps
+ state.sampling_step = 0
+
+ try:
+ return func()
+ except RecursionError:
+ print(
+ 'Encountered RecursionError during sampling, returning last latent. '
+ 'rho >5 with a polyexponential scheduler may cause this error. '
+ 'You should try to use a smaller rho value instead.'
+ )
+ return self.last_latent
+ except InterruptedException:
+ return self.last_latent
+
+ def number_of_needed_noises(self, p):
+ return p.steps
+
+ def initialize(self, p) -> dict:
+ self.p = p
+ self.model_wrap_cfg.p = p
+ self.model_wrap_cfg.mask = p.mask if hasattr(p, 'mask') else None
+ self.model_wrap_cfg.nmask = p.nmask if hasattr(p, 'nmask') else None
+ self.model_wrap_cfg.step = 0
+ self.model_wrap_cfg.image_cfg_scale = getattr(p, 'image_cfg_scale', None)
+ self.eta = p.eta if p.eta is not None else getattr(opts, self.eta_option_field, 0.0)
+ self.s_min_uncond = getattr(p, 's_min_uncond', 0.0)
+
+ k_diffusion.sampling.torch = TorchHijack(p)
+
+ extra_params_kwargs = {}
+ for param_name in self.extra_params:
+ if hasattr(p, param_name) and param_name in inspect.signature(self.func).parameters:
+ extra_params_kwargs[param_name] = getattr(p, param_name)
+
+ if 'eta' in inspect.signature(self.func).parameters:
+ if self.eta != self.eta_default:
+ p.extra_generation_params[self.eta_infotext_field] = self.eta
+
+ extra_params_kwargs['eta'] = self.eta
+
+ if len(self.extra_params) > 0:
+ s_churn = getattr(opts, 's_churn', p.s_churn)
+ s_tmin = getattr(opts, 's_tmin', p.s_tmin)
+ s_tmax = getattr(opts, 's_tmax', p.s_tmax) or self.s_tmax # 0 = inf
+ s_noise = getattr(opts, 's_noise', p.s_noise)
+
+ if 's_churn' in extra_params_kwargs and s_churn != self.s_churn:
+ extra_params_kwargs['s_churn'] = s_churn
+ p.s_churn = s_churn
+ p.extra_generation_params['Sigma churn'] = s_churn
+ if 's_tmin' in extra_params_kwargs and s_tmin != self.s_tmin:
+ extra_params_kwargs['s_tmin'] = s_tmin
+ p.s_tmin = s_tmin
+ p.extra_generation_params['Sigma tmin'] = s_tmin
+ if 's_tmax' in extra_params_kwargs and s_tmax != self.s_tmax:
+ extra_params_kwargs['s_tmax'] = s_tmax
+ p.s_tmax = s_tmax
+ p.extra_generation_params['Sigma tmax'] = s_tmax
+ if 's_noise' in extra_params_kwargs and s_noise != self.s_noise:
+ extra_params_kwargs['s_noise'] = s_noise
+ p.s_noise = s_noise
+ p.extra_generation_params['Sigma noise'] = s_noise
+
+ return extra_params_kwargs
+
+ def create_noise_sampler(self, x, sigmas, p):
+ """For DPM++ SDE: manually create noise sampler to enable deterministic results across different batch sizes"""
+ if shared.opts.no_dpmpp_sde_batch_determinism:
+ return None
+
+ from k_diffusion.sampling import BrownianTreeNoiseSampler
+ sigma_min, sigma_max = sigmas[sigmas > 0].min(), sigmas.max()
+ current_iter_seeds = p.all_seeds[p.iteration * p.batch_size:(p.iteration + 1) * p.batch_size]
+ return BrownianTreeNoiseSampler(x, sigma_min, sigma_max, seed=current_iter_seeds)
+
+ def sample(self, p, x, conditioning, unconditional_conditioning, steps=None, image_conditioning=None):
+ raise NotImplementedError()
+
+ def sample_img2img(self, p, x, noise, conditioning, unconditional_conditioning, steps=None, image_conditioning=None):
+ raise NotImplementedError()
diff --git a/modules/sd_samplers_compvis.py b/modules/sd_samplers_compvis.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/modules/sd_samplers_extra.py b/modules/sd_samplers_extra.py
new file mode 100644
index 0000000000000000000000000000000000000000..24491ca1616d3d563e47e8d5dc0b7aa16152ef5f
--- /dev/null
+++ b/modules/sd_samplers_extra.py
@@ -0,0 +1,74 @@
+import torch
+import tqdm
+import k_diffusion.sampling
+
+
+@torch.no_grad()
+def restart_sampler(model, x, sigmas, extra_args=None, callback=None, disable=None, s_noise=1., restart_list=None):
+ """Implements restart sampling in Restart Sampling for Improving Generative Processes (2023)
+ Restart_list format: {min_sigma: [ restart_steps, restart_times, max_sigma]}
+ If restart_list is None: will choose restart_list automatically, otherwise will use the given restart_list
+ """
+ extra_args = {} if extra_args is None else extra_args
+ s_in = x.new_ones([x.shape[0]])
+ step_id = 0
+ from k_diffusion.sampling import to_d, get_sigmas_karras
+
+ def heun_step(x, old_sigma, new_sigma, second_order=True):
+ nonlocal step_id
+ denoised = model(x, old_sigma * s_in, **extra_args)
+ d = to_d(x, old_sigma, denoised)
+ if callback is not None:
+ callback({'x': x, 'i': step_id, 'sigma': new_sigma, 'sigma_hat': old_sigma, 'denoised': denoised})
+ dt = new_sigma - old_sigma
+ if new_sigma == 0 or not second_order:
+ # Euler method
+ x = x + d * dt
+ else:
+ # Heun's method
+ x_2 = x + d * dt
+ denoised_2 = model(x_2, new_sigma * s_in, **extra_args)
+ d_2 = to_d(x_2, new_sigma, denoised_2)
+ d_prime = (d + d_2) / 2
+ x = x + d_prime * dt
+ step_id += 1
+ return x
+
+ steps = sigmas.shape[0] - 1
+ if restart_list is None:
+ if steps >= 20:
+ restart_steps = 9
+ restart_times = 1
+ if steps >= 36:
+ restart_steps = steps // 4
+ restart_times = 2
+ sigmas = get_sigmas_karras(steps - restart_steps * restart_times, sigmas[-2].item(), sigmas[0].item(), device=sigmas.device)
+ restart_list = {0.1: [restart_steps + 1, restart_times, 2]}
+ else:
+ restart_list = {}
+
+ restart_list = {int(torch.argmin(abs(sigmas - key), dim=0)): value for key, value in restart_list.items()}
+
+ step_list = []
+ for i in range(len(sigmas) - 1):
+ step_list.append((sigmas[i], sigmas[i + 1]))
+ if i + 1 in restart_list:
+ restart_steps, restart_times, restart_max = restart_list[i + 1]
+ min_idx = i + 1
+ max_idx = int(torch.argmin(abs(sigmas - restart_max), dim=0))
+ if max_idx < min_idx:
+ sigma_restart = get_sigmas_karras(restart_steps, sigmas[min_idx].item(), sigmas[max_idx].item(), device=sigmas.device)[:-1]
+ while restart_times > 0:
+ restart_times -= 1
+ step_list.extend([(old_sigma, new_sigma) for (old_sigma, new_sigma) in zip(sigma_restart[:-1], sigma_restart[1:])])
+
+ last_sigma = None
+ for old_sigma, new_sigma in tqdm.tqdm(step_list, disable=disable):
+ if last_sigma is None:
+ last_sigma = old_sigma
+ elif last_sigma < old_sigma:
+ x = x + k_diffusion.sampling.torch.randn_like(x) * s_noise * (old_sigma ** 2 - last_sigma ** 2) ** 0.5
+ x = heun_step(x, old_sigma, new_sigma)
+ last_sigma = new_sigma
+
+ return x
diff --git a/modules/sd_samplers_timesteps.py b/modules/sd_samplers_timesteps.py
new file mode 100644
index 0000000000000000000000000000000000000000..26e18d19da077cd724438b6969651dc3bde48e55
--- /dev/null
+++ b/modules/sd_samplers_timesteps.py
@@ -0,0 +1,167 @@
+import torch
+import inspect
+import sys
+from modules import devices, sd_samplers_common, sd_samplers_timesteps_impl
+from modules.sd_samplers_cfg_denoiser import CFGDenoiser
+from modules.script_callbacks import ExtraNoiseParams, extra_noise_callback
+
+from modules.shared import opts
+import modules.shared as shared
+
+samplers_timesteps = [
+ ('DDIM', sd_samplers_timesteps_impl.ddim, ['ddim'], {}),
+ ('PLMS', sd_samplers_timesteps_impl.plms, ['plms'], {}),
+ ('UniPC', sd_samplers_timesteps_impl.unipc, ['unipc'], {}),
+]
+
+
+samplers_data_timesteps = [
+ sd_samplers_common.SamplerData(label, lambda model, funcname=funcname: CompVisSampler(funcname, model), aliases, options)
+ for label, funcname, aliases, options in samplers_timesteps
+]
+
+
+class CompVisTimestepsDenoiser(torch.nn.Module):
+ def __init__(self, model, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+ self.inner_model = model
+
+ def forward(self, input, timesteps, **kwargs):
+ return self.inner_model.apply_model(input, timesteps, **kwargs)
+
+
+class CompVisTimestepsVDenoiser(torch.nn.Module):
+ def __init__(self, model, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+ self.inner_model = model
+
+ def predict_eps_from_z_and_v(self, x_t, t, v):
+ return self.inner_model.sqrt_alphas_cumprod[t.to(torch.int), None, None, None] * v + self.inner_model.sqrt_one_minus_alphas_cumprod[t.to(torch.int), None, None, None] * x_t
+
+ def forward(self, input, timesteps, **kwargs):
+ model_output = self.inner_model.apply_model(input, timesteps, **kwargs)
+ e_t = self.predict_eps_from_z_and_v(input, timesteps, model_output)
+ return e_t
+
+
+class CFGDenoiserTimesteps(CFGDenoiser):
+
+ def __init__(self, sampler):
+ super().__init__(sampler)
+
+ self.alphas = shared.sd_model.alphas_cumprod
+ self.mask_before_denoising = True
+
+ def get_pred_x0(self, x_in, x_out, sigma):
+ ts = sigma.to(dtype=int)
+
+ a_t = self.alphas[ts][:, None, None, None]
+ sqrt_one_minus_at = (1 - a_t).sqrt()
+
+ pred_x0 = (x_in - sqrt_one_minus_at * x_out) / a_t.sqrt()
+
+ return pred_x0
+
+ @property
+ def inner_model(self):
+ if self.model_wrap is None:
+ denoiser = CompVisTimestepsVDenoiser if shared.sd_model.parameterization == "v" else CompVisTimestepsDenoiser
+ self.model_wrap = denoiser(shared.sd_model)
+
+ return self.model_wrap
+
+
+class CompVisSampler(sd_samplers_common.Sampler):
+ def __init__(self, funcname, sd_model):
+ super().__init__(funcname)
+
+ self.eta_option_field = 'eta_ddim'
+ self.eta_infotext_field = 'Eta DDIM'
+ self.eta_default = 0.0
+
+ self.model_wrap_cfg = CFGDenoiserTimesteps(self)
+
+ def get_timesteps(self, p, steps):
+ discard_next_to_last_sigma = self.config is not None and self.config.options.get('discard_next_to_last_sigma', False)
+ if opts.always_discard_next_to_last_sigma and not discard_next_to_last_sigma:
+ discard_next_to_last_sigma = True
+ p.extra_generation_params["Discard penultimate sigma"] = True
+
+ steps += 1 if discard_next_to_last_sigma else 0
+
+ timesteps = torch.clip(torch.asarray(list(range(0, 1000, 1000 // steps)), device=devices.device) + 1, 0, 999)
+
+ return timesteps
+
+ def sample_img2img(self, p, x, noise, conditioning, unconditional_conditioning, steps=None, image_conditioning=None):
+ steps, t_enc = sd_samplers_common.setup_img2img_steps(p, steps)
+
+ timesteps = self.get_timesteps(p, steps)
+ timesteps_sched = timesteps[:t_enc]
+
+ alphas_cumprod = shared.sd_model.alphas_cumprod
+ sqrt_alpha_cumprod = torch.sqrt(alphas_cumprod[timesteps[t_enc]])
+ sqrt_one_minus_alpha_cumprod = torch.sqrt(1 - alphas_cumprod[timesteps[t_enc]])
+
+ xi = x * sqrt_alpha_cumprod + noise * sqrt_one_minus_alpha_cumprod
+
+ if opts.img2img_extra_noise > 0:
+ p.extra_generation_params["Extra noise"] = opts.img2img_extra_noise
+ extra_noise_params = ExtraNoiseParams(noise, x, xi)
+ extra_noise_callback(extra_noise_params)
+ noise = extra_noise_params.noise
+ xi += noise * opts.img2img_extra_noise * sqrt_alpha_cumprod
+
+ extra_params_kwargs = self.initialize(p)
+ parameters = inspect.signature(self.func).parameters
+
+ if 'timesteps' in parameters:
+ extra_params_kwargs['timesteps'] = timesteps_sched
+ if 'is_img2img' in parameters:
+ extra_params_kwargs['is_img2img'] = True
+
+ self.model_wrap_cfg.init_latent = x
+ self.last_latent = x
+ self.sampler_extra_args = {
+ 'cond': conditioning,
+ 'image_cond': image_conditioning,
+ 'uncond': unconditional_conditioning,
+ 'cond_scale': p.cfg_scale,
+ 's_min_uncond': self.s_min_uncond
+ }
+
+ samples = self.launch_sampling(t_enc + 1, lambda: self.func(self.model_wrap_cfg, xi, extra_args=self.sampler_extra_args, disable=False, callback=self.callback_state, **extra_params_kwargs))
+
+ if self.model_wrap_cfg.padded_cond_uncond:
+ p.extra_generation_params["Pad conds"] = True
+
+ return samples
+
+ def sample(self, p, x, conditioning, unconditional_conditioning, steps=None, image_conditioning=None):
+ steps = steps or p.steps
+ timesteps = self.get_timesteps(p, steps)
+
+ extra_params_kwargs = self.initialize(p)
+ parameters = inspect.signature(self.func).parameters
+
+ if 'timesteps' in parameters:
+ extra_params_kwargs['timesteps'] = timesteps
+
+ self.last_latent = x
+ self.sampler_extra_args = {
+ 'cond': conditioning,
+ 'image_cond': image_conditioning,
+ 'uncond': unconditional_conditioning,
+ 'cond_scale': p.cfg_scale,
+ 's_min_uncond': self.s_min_uncond
+ }
+ samples = self.launch_sampling(steps, lambda: self.func(self.model_wrap_cfg, x, extra_args=self.sampler_extra_args, disable=False, callback=self.callback_state, **extra_params_kwargs))
+
+ if self.model_wrap_cfg.padded_cond_uncond:
+ p.extra_generation_params["Pad conds"] = True
+
+ return samples
+
+
+sys.modules['modules.sd_samplers_compvis'] = sys.modules[__name__]
+VanillaStableDiffusionSampler = CompVisSampler # temp. compatibility with older extensions
diff --git a/modules/sd_samplers_timesteps_impl.py b/modules/sd_samplers_timesteps_impl.py
new file mode 100644
index 0000000000000000000000000000000000000000..9b33232b4eb8e1be007629ed26a861039e5d1b86
--- /dev/null
+++ b/modules/sd_samplers_timesteps_impl.py
@@ -0,0 +1,137 @@
+import torch
+import tqdm
+import k_diffusion.sampling
+import numpy as np
+
+from modules import shared
+from modules.models.diffusion.uni_pc import uni_pc
+
+
+@torch.no_grad()
+def ddim(model, x, timesteps, extra_args=None, callback=None, disable=None, eta=0.0):
+ alphas_cumprod = model.inner_model.inner_model.alphas_cumprod
+ alphas = alphas_cumprod[timesteps]
+ alphas_prev = alphas_cumprod[torch.nn.functional.pad(timesteps[:-1], pad=(1, 0))].to(torch.float64 if x.device.type != 'mps' else torch.float32)
+ sqrt_one_minus_alphas = torch.sqrt(1 - alphas)
+ sigmas = eta * np.sqrt((1 - alphas_prev.cpu().numpy()) / (1 - alphas.cpu()) * (1 - alphas.cpu() / alphas_prev.cpu().numpy()))
+
+ extra_args = {} if extra_args is None else extra_args
+ s_in = x.new_ones((x.shape[0]))
+ s_x = x.new_ones((x.shape[0], 1, 1, 1))
+ for i in tqdm.trange(len(timesteps) - 1, disable=disable):
+ index = len(timesteps) - 1 - i
+
+ e_t = model(x, timesteps[index].item() * s_in, **extra_args)
+
+ a_t = alphas[index].item() * s_x
+ a_prev = alphas_prev[index].item() * s_x
+ sigma_t = sigmas[index].item() * s_x
+ sqrt_one_minus_at = sqrt_one_minus_alphas[index].item() * s_x
+
+ pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()
+ dir_xt = (1. - a_prev - sigma_t ** 2).sqrt() * e_t
+ noise = sigma_t * k_diffusion.sampling.torch.randn_like(x)
+ x = a_prev.sqrt() * pred_x0 + dir_xt + noise
+
+ if callback is not None:
+ callback({'x': x, 'i': i, 'sigma': 0, 'sigma_hat': 0, 'denoised': pred_x0})
+
+ return x
+
+
+@torch.no_grad()
+def plms(model, x, timesteps, extra_args=None, callback=None, disable=None):
+ alphas_cumprod = model.inner_model.inner_model.alphas_cumprod
+ alphas = alphas_cumprod[timesteps]
+ alphas_prev = alphas_cumprod[torch.nn.functional.pad(timesteps[:-1], pad=(1, 0))].to(torch.float64 if x.device.type != 'mps' else torch.float32)
+ sqrt_one_minus_alphas = torch.sqrt(1 - alphas)
+
+ extra_args = {} if extra_args is None else extra_args
+ s_in = x.new_ones([x.shape[0]])
+ s_x = x.new_ones((x.shape[0], 1, 1, 1))
+ old_eps = []
+
+ def get_x_prev_and_pred_x0(e_t, index):
+ # select parameters corresponding to the currently considered timestep
+ a_t = alphas[index].item() * s_x
+ a_prev = alphas_prev[index].item() * s_x
+ sqrt_one_minus_at = sqrt_one_minus_alphas[index].item() * s_x
+
+ # current prediction for x_0
+ pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()
+
+ # direction pointing to x_t
+ dir_xt = (1. - a_prev).sqrt() * e_t
+ x_prev = a_prev.sqrt() * pred_x0 + dir_xt
+ return x_prev, pred_x0
+
+ for i in tqdm.trange(len(timesteps) - 1, disable=disable):
+ index = len(timesteps) - 1 - i
+ ts = timesteps[index].item() * s_in
+ t_next = timesteps[max(index - 1, 0)].item() * s_in
+
+ e_t = model(x, ts, **extra_args)
+
+ if len(old_eps) == 0:
+ # Pseudo Improved Euler (2nd order)
+ x_prev, pred_x0 = get_x_prev_and_pred_x0(e_t, index)
+ e_t_next = model(x_prev, t_next, **extra_args)
+ e_t_prime = (e_t + e_t_next) / 2
+ elif len(old_eps) == 1:
+ # 2nd order Pseudo Linear Multistep (Adams-Bashforth)
+ e_t_prime = (3 * e_t - old_eps[-1]) / 2
+ elif len(old_eps) == 2:
+ # 3nd order Pseudo Linear Multistep (Adams-Bashforth)
+ e_t_prime = (23 * e_t - 16 * old_eps[-1] + 5 * old_eps[-2]) / 12
+ else:
+ # 4nd order Pseudo Linear Multistep (Adams-Bashforth)
+ e_t_prime = (55 * e_t - 59 * old_eps[-1] + 37 * old_eps[-2] - 9 * old_eps[-3]) / 24
+
+ x_prev, pred_x0 = get_x_prev_and_pred_x0(e_t_prime, index)
+
+ old_eps.append(e_t)
+ if len(old_eps) >= 4:
+ old_eps.pop(0)
+
+ x = x_prev
+
+ if callback is not None:
+ callback({'x': x, 'i': i, 'sigma': 0, 'sigma_hat': 0, 'denoised': pred_x0})
+
+ return x
+
+
+class UniPCCFG(uni_pc.UniPC):
+ def __init__(self, cfg_model, extra_args, callback, *args, **kwargs):
+ super().__init__(None, *args, **kwargs)
+
+ def after_update(x, model_x):
+ callback({'x': x, 'i': self.index, 'sigma': 0, 'sigma_hat': 0, 'denoised': model_x})
+ self.index += 1
+
+ self.cfg_model = cfg_model
+ self.extra_args = extra_args
+ self.callback = callback
+ self.index = 0
+ self.after_update = after_update
+
+ def get_model_input_time(self, t_continuous):
+ return (t_continuous - 1. / self.noise_schedule.total_N) * 1000.
+
+ def model(self, x, t):
+ t_input = self.get_model_input_time(t)
+
+ res = self.cfg_model(x, t_input, **self.extra_args)
+
+ return res
+
+
+def unipc(model, x, timesteps, extra_args=None, callback=None, disable=None, is_img2img=False):
+ alphas_cumprod = model.inner_model.inner_model.alphas_cumprod
+
+ ns = uni_pc.NoiseScheduleVP('discrete', alphas_cumprod=alphas_cumprod)
+ t_start = timesteps[-1] / 1000 + 1 / 1000 if is_img2img else None # this is likely off by a bit - if someone wants to fix it please by all means
+ unipc_sampler = UniPCCFG(model, extra_args, callback, ns, predict_x0=True, thresholding=False, variant=shared.opts.uni_pc_variant)
+ x = unipc_sampler.sample(x, steps=len(timesteps), t_start=t_start, skip_type=shared.opts.uni_pc_skip_type, method="multistep", order=shared.opts.uni_pc_order, lower_order_final=shared.opts.uni_pc_lower_order_final)
+
+ return x
diff --git a/modules/sd_unet.py b/modules/sd_unet.py
new file mode 100644
index 0000000000000000000000000000000000000000..430b9ae468bb84fb08d49a3c6bc9635ba23500cd
--- /dev/null
+++ b/modules/sd_unet.py
@@ -0,0 +1,92 @@
+import torch.nn
+import ldm.modules.diffusionmodules.openaimodel
+
+from modules import script_callbacks, shared, devices
+
+unet_options = []
+current_unet_option = None
+current_unet = None
+
+
+def list_unets():
+ new_unets = script_callbacks.list_unets_callback()
+
+ unet_options.clear()
+ unet_options.extend(new_unets)
+
+
+def get_unet_option(option=None):
+ option = option or shared.opts.sd_unet
+
+ if option == "None":
+ return None
+
+ if option == "Automatic":
+ name = shared.sd_model.sd_checkpoint_info.model_name
+
+ options = [x for x in unet_options if x.model_name == name]
+
+ option = options[0].label if options else "None"
+
+ return next(iter([x for x in unet_options if x.label == option]), None)
+
+
+def apply_unet(option=None):
+ global current_unet_option
+ global current_unet
+
+ new_option = get_unet_option(option)
+ if new_option == current_unet_option:
+ return
+
+ if current_unet is not None:
+ print(f"Dectivating unet: {current_unet.option.label}")
+ current_unet.deactivate()
+
+ current_unet_option = new_option
+ if current_unet_option is None:
+ current_unet = None
+
+ if not shared.sd_model.lowvram:
+ shared.sd_model.model.diffusion_model.to(devices.device)
+
+ return
+
+ shared.sd_model.model.diffusion_model.to(devices.cpu)
+ devices.torch_gc()
+
+ current_unet = current_unet_option.create_unet()
+ current_unet.option = current_unet_option
+ print(f"Activating unet: {current_unet.option.label}")
+ current_unet.activate()
+
+
+class SdUnetOption:
+ model_name = None
+ """name of related checkpoint - this option will be selected automatically for unet if the name of checkpoint matches this"""
+
+ label = None
+ """name of the unet in UI"""
+
+ def create_unet(self):
+ """returns SdUnet object to be used as a Unet instead of built-in unet when making pictures"""
+ raise NotImplementedError()
+
+
+class SdUnet(torch.nn.Module):
+ def forward(self, x, timesteps, context, *args, **kwargs):
+ raise NotImplementedError()
+
+ def activate(self):
+ pass
+
+ def deactivate(self):
+ pass
+
+
+def UNetModel_forward(self, x, timesteps=None, context=None, *args, **kwargs):
+ if current_unet is not None:
+ return current_unet.forward(x, timesteps, context, *args, **kwargs)
+
+ return ldm.modules.diffusionmodules.openaimodel.copy_of_UNetModel_forward_for_webui(self, x, timesteps, context, *args, **kwargs)
+
diff --git a/modules/sd_vae.py b/modules/sd_vae.py
new file mode 100644
index 0000000000000000000000000000000000000000..31306d8ba4bf10223df199b163a0dc386e322b03
--- /dev/null
+++ b/modules/sd_vae.py
@@ -0,0 +1,282 @@
+import os
+import collections
+from dataclasses import dataclass
+
+from modules import paths, shared, devices, script_callbacks, sd_models, extra_networks, lowvram, sd_hijack, hashes
+
+import glob
+from copy import deepcopy
+
+
+vae_path = os.path.abspath(os.path.join(paths.models_path, "VAE"))
+vae_ignore_keys = {"model_ema.decay", "model_ema.num_updates"}
+vae_dict = {}
+
+
+base_vae = None
+loaded_vae_file = None
+checkpoint_info = None
+
+checkpoints_loaded = collections.OrderedDict()
+
+
+def get_loaded_vae_name():
+ if loaded_vae_file is None:
+ return None
+
+ return os.path.basename(loaded_vae_file)
+
+
+def get_loaded_vae_hash():
+ if loaded_vae_file is None:
+ return None
+
+ sha256 = hashes.sha256(loaded_vae_file, 'vae')
+
+ return sha256[0:10] if sha256 else None
+
+
+def get_base_vae(model):
+ if base_vae is not None and checkpoint_info == model.sd_checkpoint_info and model:
+ return base_vae
+ return None
+
+
+def store_base_vae(model):
+ global base_vae, checkpoint_info
+ if checkpoint_info != model.sd_checkpoint_info:
+ assert not loaded_vae_file, "Trying to store non-base VAE!"
+ base_vae = deepcopy(model.first_stage_model.state_dict())
+ checkpoint_info = model.sd_checkpoint_info
+
+
+def delete_base_vae():
+ global base_vae, checkpoint_info
+ base_vae = None
+ checkpoint_info = None
+
+
+def restore_base_vae(model):
+ global loaded_vae_file
+ if base_vae is not None and checkpoint_info == model.sd_checkpoint_info:
+ print("Restoring base VAE")
+ _load_vae_dict(model, base_vae)
+ loaded_vae_file = None
+ delete_base_vae()
+
+
+def get_filename(filepath):
+ return os.path.basename(filepath)
+
+
+def refresh_vae_list():
+ vae_dict.clear()
+
+ paths = [
+ os.path.join(sd_models.model_path, '**/*.vae.ckpt'),
+ os.path.join(sd_models.model_path, '**/*.vae.pt'),
+ os.path.join(sd_models.model_path, '**/*.vae.safetensors'),
+ os.path.join(vae_path, '**/*.ckpt'),
+ os.path.join(vae_path, '**/*.pt'),
+ os.path.join(vae_path, '**/*.safetensors'),
+ ]
+
+ if shared.cmd_opts.ckpt_dir is not None and os.path.isdir(shared.cmd_opts.ckpt_dir):
+ paths += [
+ os.path.join(shared.cmd_opts.ckpt_dir, '**/*.vae.ckpt'),
+ os.path.join(shared.cmd_opts.ckpt_dir, '**/*.vae.pt'),
+ os.path.join(shared.cmd_opts.ckpt_dir, '**/*.vae.safetensors'),
+ ]
+
+ if shared.cmd_opts.vae_dir is not None and os.path.isdir(shared.cmd_opts.vae_dir):
+ paths += [
+ os.path.join(shared.cmd_opts.vae_dir, '**/*.ckpt'),
+ os.path.join(shared.cmd_opts.vae_dir, '**/*.pt'),
+ os.path.join(shared.cmd_opts.vae_dir, '**/*.safetensors'),
+ ]
+
+ candidates = []
+ for path in paths:
+ candidates += glob.iglob(path, recursive=True)
+
+ for filepath in candidates:
+ name = get_filename(filepath)
+ vae_dict[name] = filepath
+
+ vae_dict.update(dict(sorted(vae_dict.items(), key=lambda item: shared.natural_sort_key(item[0]))))
+
+
+def find_vae_near_checkpoint(checkpoint_file):
+ checkpoint_path = os.path.basename(checkpoint_file).rsplit('.', 1)[0]
+ for vae_file in vae_dict.values():
+ if os.path.basename(vae_file).startswith(checkpoint_path):
+ return vae_file
+
+ return None
+
+
+@dataclass
+class VaeResolution:
+ vae: str = None
+ source: str = None
+ resolved: bool = True
+
+ def tuple(self):
+ return self.vae, self.source
+
+
+def is_automatic():
+ return shared.opts.sd_vae in {"Automatic", "auto"} # "auto" for people with old config
+
+
+def resolve_vae_from_setting() -> VaeResolution:
+ if shared.opts.sd_vae == "None":
+ return VaeResolution()
+
+ vae_from_options = vae_dict.get(shared.opts.sd_vae, None)
+ if vae_from_options is not None:
+ return VaeResolution(vae_from_options, 'specified in settings')
+
+ if not is_automatic():
+ print(f"Couldn't find VAE named {shared.opts.sd_vae}; using None instead")
+
+ return VaeResolution(resolved=False)
+
+
+def resolve_vae_from_user_metadata(checkpoint_file) -> VaeResolution:
+ metadata = extra_networks.get_user_metadata(checkpoint_file)
+ vae_metadata = metadata.get("vae", None)
+ if vae_metadata is not None and vae_metadata != "Automatic":
+ if vae_metadata == "None":
+ return VaeResolution()
+
+ vae_from_metadata = vae_dict.get(vae_metadata, None)
+ if vae_from_metadata is not None:
+ return VaeResolution(vae_from_metadata, "from user metadata")
+
+ return VaeResolution(resolved=False)
+
+
+def resolve_vae_near_checkpoint(checkpoint_file) -> VaeResolution:
+ vae_near_checkpoint = find_vae_near_checkpoint(checkpoint_file)
+ if vae_near_checkpoint is not None and (not shared.opts.sd_vae_overrides_per_model_preferences or is_automatic()):
+ return VaeResolution(vae_near_checkpoint, 'found near the checkpoint')
+
+ return VaeResolution(resolved=False)
+
+
+def resolve_vae(checkpoint_file) -> VaeResolution:
+ if shared.cmd_opts.vae_path is not None:
+ return VaeResolution(shared.cmd_opts.vae_path, 'from commandline argument')
+
+ if shared.opts.sd_vae_overrides_per_model_preferences and not is_automatic():
+ return resolve_vae_from_setting()
+
+ res = resolve_vae_from_user_metadata(checkpoint_file)
+ if res.resolved:
+ return res
+
+ res = resolve_vae_near_checkpoint(checkpoint_file)
+ if res.resolved:
+ return res
+
+ res = resolve_vae_from_setting()
+
+ return res
+
+
+def load_vae_dict(filename, map_location):
+ vae_ckpt = sd_models.read_state_dict(filename, map_location=map_location)
+ vae_dict_1 = {k: v for k, v in vae_ckpt.items() if k[0:4] != "loss" and k not in vae_ignore_keys}
+ return vae_dict_1
+
+
+def load_vae(model, vae_file=None, vae_source="from unknown source"):
+ global vae_dict, base_vae, loaded_vae_file
+ # save_settings = False
+
+ cache_enabled = shared.opts.sd_vae_checkpoint_cache > 0
+
+ if vae_file:
+ if cache_enabled and vae_file in checkpoints_loaded:
+ # use vae checkpoint cache
+ print(f"Loading VAE weights {vae_source}: cached {get_filename(vae_file)}")
+ store_base_vae(model)
+ _load_vae_dict(model, checkpoints_loaded[vae_file])
+ else:
+ assert os.path.isfile(vae_file), f"VAE {vae_source} doesn't exist: {vae_file}"
+ print(f"Loading VAE weights {vae_source}: {vae_file}")
+ store_base_vae(model)
+
+ vae_dict_1 = load_vae_dict(vae_file, map_location=shared.weight_load_location)
+ _load_vae_dict(model, vae_dict_1)
+
+ if cache_enabled:
+ # cache newly loaded vae
+ checkpoints_loaded[vae_file] = vae_dict_1.copy()
+
+ # clean up cache if limit is reached
+ if cache_enabled:
+ while len(checkpoints_loaded) > shared.opts.sd_vae_checkpoint_cache + 1: # we need to count the current model
+ checkpoints_loaded.popitem(last=False) # LRU
+
+ # If vae used is not in dict, update it
+ # It will be removed on refresh though
+ vae_opt = get_filename(vae_file)
+ if vae_opt not in vae_dict:
+ vae_dict[vae_opt] = vae_file
+
+ elif loaded_vae_file:
+ restore_base_vae(model)
+
+ loaded_vae_file = vae_file
+ model.base_vae = base_vae
+ model.loaded_vae_file = loaded_vae_file
+
+
+# don't call this from outside
+def _load_vae_dict(model, vae_dict_1):
+ model.first_stage_model.load_state_dict(vae_dict_1)
+ model.first_stage_model.to(devices.dtype_vae)
+
+
+def clear_loaded_vae():
+ global loaded_vae_file
+ loaded_vae_file = None
+
+
+unspecified = object()
+
+
+def reload_vae_weights(sd_model=None, vae_file=unspecified):
+ if not sd_model:
+ sd_model = shared.sd_model
+
+ checkpoint_info = sd_model.sd_checkpoint_info
+ checkpoint_file = checkpoint_info.filename
+
+ if vae_file == unspecified:
+ vae_file, vae_source = resolve_vae(checkpoint_file).tuple()
+ else:
+ vae_source = "from function argument"
+
+ if loaded_vae_file == vae_file:
+ return
+
+ if sd_model.lowvram:
+ lowvram.send_everything_to_cpu()
+ else:
+ sd_model.to(devices.cpu)
+
+ sd_hijack.model_hijack.undo_hijack(sd_model)
+
+ load_vae(sd_model, vae_file, vae_source)
+
+ sd_hijack.model_hijack.hijack(sd_model)
+ script_callbacks.model_loaded_callback(sd_model)
+
+ if not sd_model.lowvram:
+ sd_model.to(devices.device)
+
+ print("VAE weights loaded.")
+ return sd_model
diff --git a/modules/sd_vae_approx.py b/modules/sd_vae_approx.py
new file mode 100644
index 0000000000000000000000000000000000000000..1412bc8e02b9791938a0fb54d776ab853019e0e3
--- /dev/null
+++ b/modules/sd_vae_approx.py
@@ -0,0 +1,86 @@
+import os
+
+import torch
+from torch import nn
+from modules import devices, paths, shared
+
+sd_vae_approx_models = {}
+
+
+class VAEApprox(nn.Module):
+ def __init__(self):
+ super(VAEApprox, self).__init__()
+ self.conv1 = nn.Conv2d(4, 8, (7, 7))
+ self.conv2 = nn.Conv2d(8, 16, (5, 5))
+ self.conv3 = nn.Conv2d(16, 32, (3, 3))
+ self.conv4 = nn.Conv2d(32, 64, (3, 3))
+ self.conv5 = nn.Conv2d(64, 32, (3, 3))
+ self.conv6 = nn.Conv2d(32, 16, (3, 3))
+ self.conv7 = nn.Conv2d(16, 8, (3, 3))
+ self.conv8 = nn.Conv2d(8, 3, (3, 3))
+
+ def forward(self, x):
+ extra = 11
+ x = nn.functional.interpolate(x, (x.shape[2] * 2, x.shape[3] * 2))
+ x = nn.functional.pad(x, (extra, extra, extra, extra))
+
+ for layer in [self.conv1, self.conv2, self.conv3, self.conv4, self.conv5, self.conv6, self.conv7, self.conv8, ]:
+ x = layer(x)
+ x = nn.functional.leaky_relu(x, 0.1)
+
+ return x
+
+
+def download_model(model_path, model_url):
+ if not os.path.exists(model_path):
+ os.makedirs(os.path.dirname(model_path), exist_ok=True)
+
+ print(f'Downloading VAEApprox model to: {model_path}')
+ torch.hub.download_url_to_file(model_url, model_path)
+
+
+def model():
+ model_name = "vaeapprox-sdxl.pt" if getattr(shared.sd_model, 'is_sdxl', False) else "model.pt"
+ loaded_model = sd_vae_approx_models.get(model_name)
+
+ if loaded_model is None:
+ model_path = os.path.join(paths.models_path, "VAE-approx", model_name)
+ if not os.path.exists(model_path):
+ model_path = os.path.join(paths.script_path, "models", "VAE-approx", model_name)
+
+ if not os.path.exists(model_path):
+ model_path = os.path.join(paths.models_path, "VAE-approx", model_name)
+ download_model(model_path, 'https://github.com/AUTOMATIC1111/stable-diffusion-webui/releases/download/v1.0.0-pre/' + model_name)
+
+ loaded_model = VAEApprox()
+ loaded_model.load_state_dict(torch.load(model_path, map_location='cpu' if devices.device.type != 'cuda' else None))
+ loaded_model.eval()
+ loaded_model.to(devices.device, devices.dtype)
+ sd_vae_approx_models[model_name] = loaded_model
+
+ return loaded_model
+
+
+def cheap_approximation(sample):
+ # https://discuss.huggingface.co/t/decoding-latents-to-rgb-without-upscaling/23204/2
+
+ if shared.sd_model.is_sdxl:
+ coeffs = [
+ [ 0.3448, 0.4168, 0.4395],
+ [-0.1953, -0.0290, 0.0250],
+ [ 0.1074, 0.0886, -0.0163],
+ [-0.3730, -0.2499, -0.2088],
+ ]
+ else:
+ coeffs = [
+ [ 0.298, 0.207, 0.208],
+ [ 0.187, 0.286, 0.173],
+ [-0.158, 0.189, 0.264],
+ [-0.184, -0.271, -0.473],
+ ]
+
+ coefs = torch.tensor(coeffs).to(sample.device)
+
+ x_sample = torch.einsum("...lxy,lr -> ...rxy", sample, coefs)
+
+ return x_sample
diff --git a/modules/sd_vae_taesd.py b/modules/sd_vae_taesd.py
new file mode 100644
index 0000000000000000000000000000000000000000..808eb3624fd40daa56bcbdb5f8ad771ae5557346
--- /dev/null
+++ b/modules/sd_vae_taesd.py
@@ -0,0 +1,124 @@
+"""
+Tiny AutoEncoder for Stable Diffusion
+(DNN for encoding / decoding SD's latent space)
+
+https://github.com/madebyollin/taesd
+"""
+import os
+import torch
+import torch.nn as nn
+
+from modules import devices, paths_internal, shared
+
+sd_vae_taesd_models = {}
+
+
+def conv(n_in, n_out, **kwargs):
+ return nn.Conv2d(n_in, n_out, 3, padding=1, **kwargs)
+
+
+class Clamp(nn.Module):
+ @staticmethod
+ def forward(x):
+ return torch.tanh(x / 3) * 3
+
+
+class Block(nn.Module):
+ def __init__(self, n_in, n_out):
+ super().__init__()
+ self.conv = nn.Sequential(conv(n_in, n_out), nn.ReLU(), conv(n_out, n_out), nn.ReLU(), conv(n_out, n_out))
+ self.skip = nn.Conv2d(n_in, n_out, 1, bias=False) if n_in != n_out else nn.Identity()
+ self.fuse = nn.ReLU()
+
+ def forward(self, x):
+ return self.fuse(self.conv(x) + self.skip(x))
+
+
+def decoder():
+ return nn.Sequential(
+ Clamp(), conv(4, 64), nn.ReLU(),
+ Block(64, 64), Block(64, 64), Block(64, 64), nn.Upsample(scale_factor=2), conv(64, 64, bias=False),
+ Block(64, 64), Block(64, 64), Block(64, 64), nn.Upsample(scale_factor=2), conv(64, 64, bias=False),
+ Block(64, 64), Block(64, 64), Block(64, 64), nn.Upsample(scale_factor=2), conv(64, 64, bias=False),
+ Block(64, 64), conv(64, 3),
+ )
+
+
+def encoder():
+ return nn.Sequential(
+ conv(3, 64), Block(64, 64),
+ conv(64, 64, stride=2, bias=False), Block(64, 64), Block(64, 64), Block(64, 64),
+ conv(64, 64, stride=2, bias=False), Block(64, 64), Block(64, 64), Block(64, 64),
+ conv(64, 64, stride=2, bias=False), Block(64, 64), Block(64, 64), Block(64, 64),
+ conv(64, 4),
+ )
+
+
+class TAESDDecoder(nn.Module):
+ latent_magnitude = 3
+ latent_shift = 0.5
+
+ def __init__(self, decoder_path="taesd_decoder.pth"):
+ """Initialize pretrained TAESD on the given device from the given checkpoints."""
+ super().__init__()
+ self.decoder = decoder()
+ self.decoder.load_state_dict(
+ torch.load(decoder_path, map_location='cpu' if devices.device.type != 'cuda' else None))
+
+
+class TAESDEncoder(nn.Module):
+ latent_magnitude = 3
+ latent_shift = 0.5
+
+ def __init__(self, encoder_path="taesd_encoder.pth"):
+ """Initialize pretrained TAESD on the given device from the given checkpoints."""
+ super().__init__()
+ self.encoder = encoder()
+ self.encoder.load_state_dict(
+ torch.load(encoder_path, map_location='cpu' if devices.device.type != 'cuda' else None))
+
+
+def download_model(model_path, model_url):
+ if not os.path.exists(model_path):
+ os.makedirs(os.path.dirname(model_path), exist_ok=True)
+
+ print(f'Downloading TAESD model to: {model_path}')
+ torch.hub.download_url_to_file(model_url, model_path)
+
+
+def decoder_model():
+ model_name = "taesdxl_decoder.pth" if getattr(shared.sd_model, 'is_sdxl', False) else "taesd_decoder.pth"
+ loaded_model = sd_vae_taesd_models.get(model_name)
+
+ if loaded_model is None:
+ model_path = os.path.join(paths_internal.models_path, "VAE-taesd", model_name)
+ download_model(model_path, 'https://github.com/madebyollin/taesd/raw/main/' + model_name)
+
+ if os.path.exists(model_path):
+ loaded_model = TAESDDecoder(model_path)
+ loaded_model.eval()
+ loaded_model.to(devices.device, devices.dtype)
+ sd_vae_taesd_models[model_name] = loaded_model
+ else:
+ raise FileNotFoundError('TAESD model not found')
+
+ return loaded_model.decoder
+
+
+def encoder_model():
+ model_name = "taesdxl_encoder.pth" if getattr(shared.sd_model, 'is_sdxl', False) else "taesd_encoder.pth"
+ loaded_model = sd_vae_taesd_models.get(model_name)
+
+ if loaded_model is None:
+ model_path = os.path.join(paths_internal.models_path, "VAE-taesd", model_name)
+ download_model(model_path, 'https://github.com/madebyollin/taesd/raw/main/' + model_name)
+
+ if os.path.exists(model_path):
+ loaded_model = TAESDEncoder(model_path)
+ loaded_model.eval()
+ loaded_model.to(devices.device, devices.dtype)
+ sd_vae_taesd_models[model_name] = loaded_model
+ else:
+ raise FileNotFoundError('TAESD model not found')
+
+ return loaded_model.encoder
diff --git a/modules/shared.py b/modules/shared.py
new file mode 100644
index 0000000000000000000000000000000000000000..e6d08cad7206c8f2526716dcbe844941de965698
--- /dev/null
+++ b/modules/shared.py
@@ -0,0 +1,87 @@
+import sys
+
+import gradio as gr
+
+from modules import shared_cmd_options, shared_gradio_themes, options, shared_items, sd_models_types
+from modules.paths_internal import models_path, script_path, data_path, sd_configs_path, sd_default_config, sd_model_file, default_sd_model_file, extensions_dir, extensions_builtin_dir # noqa: F401
+from modules import util
+
+cmd_opts = shared_cmd_options.cmd_opts
+parser = shared_cmd_options.parser
+
+batch_cond_uncond = True # old field, unused now in favor of shared.opts.batch_cond_uncond
+parallel_processing_allowed = True
+styles_filename = cmd_opts.styles_file
+config_filename = cmd_opts.ui_settings_file
+hide_dirs = {"visible": not cmd_opts.hide_ui_dir_config}
+
+demo = None
+
+device = None
+
+weight_load_location = None
+
+xformers_available = False
+
+hypernetworks = {}
+
+loaded_hypernetworks = []
+
+state = None
+
+prompt_styles = None
+
+interrogator = None
+
+face_restorers = []
+
+options_templates = None
+opts = None
+restricted_opts = None
+
+sd_model: sd_models_types.WebuiSdModel = None
+
+settings_components = None
+"""assinged from ui.py, a mapping on setting names to gradio components repsponsible for those settings"""
+
+tab_names = []
+
+latent_upscale_default_mode = "Latent"
+latent_upscale_modes = {
+ "Latent": {"mode": "bilinear", "antialias": False},
+ "Latent (antialiased)": {"mode": "bilinear", "antialias": True},
+ "Latent (bicubic)": {"mode": "bicubic", "antialias": False},
+ "Latent (bicubic antialiased)": {"mode": "bicubic", "antialias": True},
+ "Latent (nearest)": {"mode": "nearest", "antialias": False},
+ "Latent (nearest-exact)": {"mode": "nearest-exact", "antialias": False},
+}
+
+sd_upscalers = []
+
+clip_model = None
+
+progress_print_out = sys.stdout
+
+gradio_theme = gr.themes.Base()
+
+total_tqdm = None
+
+mem_mon = None
+
+options_section = options.options_section
+OptionInfo = options.OptionInfo
+OptionHTML = options.OptionHTML
+
+natural_sort_key = util.natural_sort_key
+listfiles = util.listfiles
+html_path = util.html_path
+html = util.html
+walk_files = util.walk_files
+ldm_print = util.ldm_print
+
+reload_gradio_theme = shared_gradio_themes.reload_gradio_theme
+
+list_checkpoint_tiles = shared_items.list_checkpoint_tiles
+refresh_checkpoints = shared_items.refresh_checkpoints
+list_samplers = shared_items.list_samplers
+reload_hypernetworks = shared_items.reload_hypernetworks
diff --git a/modules/shared_gradio_themes.py b/modules/shared_gradio_themes.py
new file mode 100644
index 0000000000000000000000000000000000000000..4520cfe319f8cce867313bf3d5c4b0f4fef83aac
--- /dev/null
+++ b/modules/shared_gradio_themes.py
@@ -0,0 +1,67 @@
+import os
+
+import gradio as gr
+
+from modules import errors, shared
+from modules.paths_internal import script_path
+
+
+# https://huggingface.co/datasets/freddyaboulton/gradio-theme-subdomains/resolve/main/subdomains.json
+gradio_hf_hub_themes = [
+ "gradio/base",
+ "gradio/glass",
+ "gradio/monochrome",
+ "gradio/seafoam",
+ "gradio/soft",
+ "gradio/dracula_test",
+ "abidlabs/dracula_test",
+ "abidlabs/Lime",
+ "abidlabs/pakistan",
+ "Ama434/neutral-barlow",
+ "dawood/microsoft_windows",
+ "finlaymacklon/smooth_slate",
+ "Franklisi/darkmode",
+ "freddyaboulton/dracula_revamped",
+ "freddyaboulton/test-blue",
+ "gstaff/xkcd",
+ "Insuz/Mocha",
+ "Insuz/SimpleIndigo",
+ "JohnSmith9982/small_and_pretty",
+ "nota-ai/theme",
+ "nuttea/Softblue",
+ "ParityError/Anime",
+ "reilnuud/polite",
+ "remilia/Ghostly",
+ "rottenlittlecreature/Moon_Goblin",
+ "step-3-profit/Midnight-Deep",
+ "Taithrah/Minimal",
+ "ysharma/huggingface",
+ "ysharma/steampunk",
+ "NoCrypt/miku"
+]
+
+
+def reload_gradio_theme(theme_name=None):
+ if not theme_name:
+ theme_name = shared.opts.gradio_theme
+
+ default_theme_args = dict(
+ font=["Source Sans Pro", 'ui-sans-serif', 'system-ui', 'sans-serif'],
+ font_mono=['IBM Plex Mono', 'ui-monospace', 'Consolas', 'monospace'],
+ )
+
+ if theme_name == "Default":
+ shared.gradio_theme = gr.themes.Default(**default_theme_args)
+ else:
+ try:
+ theme_cache_dir = os.path.join(script_path, 'tmp', 'gradio_themes')
+ theme_cache_path = os.path.join(theme_cache_dir, f'{theme_name.replace("/", "_")}.json')
+ if shared.opts.gradio_themes_cache and os.path.exists(theme_cache_path):
+ shared.gradio_theme = gr.themes.ThemeClass.load(theme_cache_path)
+ else:
+ os.makedirs(theme_cache_dir, exist_ok=True)
+ shared.gradio_theme = gr.themes.ThemeClass.from_hub(theme_name)
+ shared.gradio_theme.dump(theme_cache_path)
+ except Exception as e:
+ errors.display(e, "changing gradio theme")
+ shared.gradio_theme = gr.themes.Default(**default_theme_args)
diff --git a/modules/shared_init.py b/modules/shared_init.py
new file mode 100644
index 0000000000000000000000000000000000000000..acd698c291936148753be1d2faf70d123c0fa8aa
--- /dev/null
+++ b/modules/shared_init.py
@@ -0,0 +1,49 @@
+import os
+
+import torch
+
+from modules import shared
+from modules.shared import cmd_opts
+
+
+def initialize():
+ """Initializes fields inside the shared module in a controlled manner.
+
+ Should be called early because some other modules you can import mingt need these fields to be already set.
+ """
+
+ os.makedirs(cmd_opts.hypernetwork_dir, exist_ok=True)
+
+ from modules import options, shared_options
+ shared.options_templates = shared_options.options_templates
+ shared.opts = options.Options(shared_options.options_templates, shared_options.restricted_opts)
+ shared.restricted_opts = shared_options.restricted_opts
+ if os.path.exists(shared.config_filename):
+ shared.opts.load(shared.config_filename)
+
+ from modules import devices
+ devices.device, devices.device_interrogate, devices.device_gfpgan, devices.device_esrgan, devices.device_codeformer = \
+ (devices.cpu if any(y in cmd_opts.use_cpu for y in [x, 'all']) else devices.get_optimal_device() for x in ['sd', 'interrogate', 'gfpgan', 'esrgan', 'codeformer'])
+
+ devices.dtype = torch.float32 if cmd_opts.no_half else torch.float16
+ devices.dtype_vae = torch.float32 if cmd_opts.no_half or cmd_opts.no_half_vae else torch.float16
+
+ shared.device = devices.device
+ shared.weight_load_location = None if cmd_opts.lowram else "cpu"
+
+ from modules import shared_state
+ shared.state = shared_state.State()
+
+ from modules import styles
+ shared.prompt_styles = styles.StyleDatabase(shared.styles_filename)
+
+ from modules import interrogate
+ shared.interrogator = interrogate.InterrogateModels("interrogate")
+
+ from modules import shared_total_tqdm
+ shared.total_tqdm = shared_total_tqdm.TotalTQDM()
+
+ from modules import memmon, devices
+ shared.mem_mon = memmon.MemUsageMonitor("MemMon", devices.device, shared.opts)
+ shared.mem_mon.start()
+
diff --git a/modules/shared_items.py b/modules/shared_items.py
new file mode 100644
index 0000000000000000000000000000000000000000..11d89c6f2e2dfd08846e478626eb81eaa0d50564
--- /dev/null
+++ b/modules/shared_items.py
@@ -0,0 +1,119 @@
+import sys
+
+from modules.shared_cmd_options import cmd_opts
+
+
+def realesrgan_models_names():
+ import modules.realesrgan_model
+ return [x.name for x in modules.realesrgan_model.get_realesrgan_models(None)]
+
+
+def postprocessing_scripts():
+ import modules.scripts
+
+ return modules.scripts.scripts_postproc.scripts
+
+
+def sd_vae_items():
+ import modules.sd_vae
+
+ return ["Automatic", "None"] + list(modules.sd_vae.vae_dict)
+
+
+def refresh_vae_list():
+ import modules.sd_vae
+
+ modules.sd_vae.refresh_vae_list()
+
+
+def cross_attention_optimizations():
+ import modules.sd_hijack
+
+ return ["Automatic"] + [x.title() for x in modules.sd_hijack.optimizers] + ["None"]
+
+
+def sd_unet_items():
+ import modules.sd_unet
+
+ return ["Automatic"] + [x.label for x in modules.sd_unet.unet_options] + ["None"]
+
+
+def refresh_unet_list():
+ import modules.sd_unet
+
+ modules.sd_unet.list_unets()
+
+
+def list_checkpoint_tiles():
+ import modules.sd_models
+ return modules.sd_models.checkpoint_tiles()
+
+
+def refresh_checkpoints():
+ import modules.sd_models
+ return modules.sd_models.list_models()
+
+
+def list_samplers():
+ import modules.sd_samplers
+ return modules.sd_samplers.all_samplers
+
+
+def reload_hypernetworks():
+ from modules.hypernetworks import hypernetwork
+ from modules import shared
+
+ shared.hypernetworks = hypernetwork.list_hypernetworks(cmd_opts.hypernetwork_dir)
+
+
+ui_reorder_categories_builtin_items = [
+ "inpaint",
+ "sampler",
+ "accordions",
+ "checkboxes",
+ "dimensions",
+ "cfg",
+ "denoising",
+ "seed",
+ "batch",
+ "override_settings",
+]
+
+
+def ui_reorder_categories():
+ from modules import scripts
+
+ yield from ui_reorder_categories_builtin_items
+
+ sections = {}
+ for script in scripts.scripts_txt2img.scripts + scripts.scripts_img2img.scripts:
+ if isinstance(script.section, str) and script.section not in ui_reorder_categories_builtin_items:
+ sections[script.section] = 1
+
+ yield from sections
+
+ yield "scripts"
+
+
+class Shared(sys.modules[__name__].__class__):
+ """
+ this class is here to provide sd_model field as a property, so that it can be created and loaded on demand rather than
+ at program startup.
+ """
+
+ sd_model_val = None
+
+ @property
+ def sd_model(self):
+ import modules.sd_models
+
+ return modules.sd_models.model_data.get_sd_model()
+
+ @sd_model.setter
+ def sd_model(self, value):
+ import modules.sd_models
+
+ modules.sd_models.model_data.set_sd_model(value)
+
+
+sys.modules['modules.shared'].__class__ = Shared
diff --git a/modules/shared_options.py b/modules/shared_options.py
new file mode 100644
index 0000000000000000000000000000000000000000..7739b80291fa55ae6f0b0a5b49ee058ce20ec501
--- /dev/null
+++ b/modules/shared_options.py
@@ -0,0 +1,332 @@
+import gradio as gr
+
+from modules import localization, ui_components, shared_items, shared, interrogate, shared_gradio_themes
+from modules.paths_internal import models_path, script_path, data_path, sd_configs_path, sd_default_config, sd_model_file, default_sd_model_file, extensions_dir, extensions_builtin_dir # noqa: F401
+from modules.shared_cmd_options import cmd_opts
+from modules.options import options_section, OptionInfo, OptionHTML
+
+options_templates = {}
+hide_dirs = shared.hide_dirs
+
+restricted_opts = {
+ "samples_filename_pattern",
+ "directories_filename_pattern",
+ "outdir_samples",
+ "outdir_txt2img_samples",
+ "outdir_img2img_samples",
+ "outdir_extras_samples",
+ "outdir_grids",
+ "outdir_txt2img_grids",
+ "outdir_save",
+ "outdir_init_images"
+}
+
+options_templates.update(options_section(('saving-images', "Saving images/grids"), {
+ "samples_save": OptionInfo(True, "Always save all generated images"),
+ "samples_format": OptionInfo('png', 'File format for images'),
+ "samples_filename_pattern": OptionInfo("", "Images filename pattern", component_args=hide_dirs).link("wiki", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Custom-Images-Filename-Name-and-Subdirectory"),
+ "save_images_add_number": OptionInfo(True, "Add number to filename when saving", component_args=hide_dirs),
+
+ "grid_save": OptionInfo(True, "Always save all generated image grids"),
+ "grid_format": OptionInfo('png', 'File format for grids'),
+ "grid_extended_filename": OptionInfo(False, "Add extended info (seed, prompt) to filename when saving grid"),
+ "grid_only_if_multiple": OptionInfo(True, "Do not save grids consisting of one picture"),
+ "grid_prevent_empty_spots": OptionInfo(False, "Prevent empty spots in grid (when set to autodetect)"),
+ "grid_zip_filename_pattern": OptionInfo("", "Archive filename pattern", component_args=hide_dirs).link("wiki", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Custom-Images-Filename-Name-and-Subdirectory"),
+ "n_rows": OptionInfo(-1, "Grid row count; use -1 for autodetect and 0 for it to be same as batch size", gr.Slider, {"minimum": -1, "maximum": 16, "step": 1}),
+ "font": OptionInfo("", "Font for image grids that have text"),
+ "grid_text_active_color": OptionInfo("#000000", "Text color for image grids", ui_components.FormColorPicker, {}),
+ "grid_text_inactive_color": OptionInfo("#999999", "Inactive text color for image grids", ui_components.FormColorPicker, {}),
+ "grid_background_color": OptionInfo("#ffffff", "Background color for image grids", ui_components.FormColorPicker, {}),
+
+ "enable_pnginfo": OptionInfo(True, "Save text information about generation parameters as chunks to png files"),
+ "save_txt": OptionInfo(False, "Create a text file next to every image with generation parameters."),
+ "save_images_before_face_restoration": OptionInfo(False, "Save a copy of image before doing face restoration."),
+ "save_images_before_highres_fix": OptionInfo(False, "Save a copy of image before applying highres fix."),
+ "save_images_before_color_correction": OptionInfo(False, "Save a copy of image before applying color correction to img2img results"),
+ "save_mask": OptionInfo(False, "For inpainting, save a copy of the greyscale mask"),
+ "save_mask_composite": OptionInfo(False, "For inpainting, save a masked composite"),
+ "jpeg_quality": OptionInfo(80, "Quality for saved jpeg images", gr.Slider, {"minimum": 1, "maximum": 100, "step": 1}),
+ "webp_lossless": OptionInfo(False, "Use lossless compression for webp images"),
+ "export_for_4chan": OptionInfo(True, "Save copy of large images as JPG").info("if the file size is above the limit, or either width or height are above the limit"),
+ "img_downscale_threshold": OptionInfo(4.0, "File size limit for the above option, MB", gr.Number),
+ "target_side_length": OptionInfo(4000, "Width/height limit for the above option, in pixels", gr.Number),
+ "img_max_size_mp": OptionInfo(200, "Maximum image size", gr.Number).info("in megapixels"),
+
+ "use_original_name_batch": OptionInfo(True, "Use original name for output filename during batch process in extras tab"),
+ "use_upscaler_name_as_suffix": OptionInfo(False, "Use upscaler name as filename suffix in the extras tab"),
+ "save_selected_only": OptionInfo(True, "When using 'Save' button, only save a single selected image"),
+ "save_init_img": OptionInfo(False, "Save init images when using img2img"),
+
+ "temp_dir": OptionInfo("", "Directory for temporary images; leave empty for default"),
+ "clean_temp_dir_at_start": OptionInfo(False, "Cleanup non-default temporary directory when starting webui"),
+
+ "save_incomplete_images": OptionInfo(False, "Save incomplete images").info("save images that has been interrupted in mid-generation; even if not saved, they will still show up in webui output."),
+}))
+
+options_templates.update(options_section(('saving-paths', "Paths for saving"), {
+ "outdir_samples": OptionInfo("", "Output directory for images; if empty, defaults to three directories below", component_args=hide_dirs),
+ "outdir_txt2img_samples": OptionInfo("outputs/txt2img-images", 'Output directory for txt2img images', component_args=hide_dirs),
+ "outdir_img2img_samples": OptionInfo("outputs/img2img-images", 'Output directory for img2img images', component_args=hide_dirs),
+ "outdir_extras_samples": OptionInfo("outputs/extras-images", 'Output directory for images from extras tab', component_args=hide_dirs),
+ "outdir_grids": OptionInfo("", "Output directory for grids; if empty, defaults to two directories below", component_args=hide_dirs),
+ "outdir_txt2img_grids": OptionInfo("outputs/txt2img-grids", 'Output directory for txt2img grids', component_args=hide_dirs),
+ "outdir_img2img_grids": OptionInfo("outputs/img2img-grids", 'Output directory for img2img grids', component_args=hide_dirs),
+ "outdir_save": OptionInfo("log/images", "Directory for saving images using the Save button", component_args=hide_dirs),
+ "outdir_init_images": OptionInfo("outputs/init-images", "Directory for saving init images when using img2img", component_args=hide_dirs),
+}))
+
+options_templates.update(options_section(('saving-to-dirs', "Saving to a directory"), {
+ "save_to_dirs": OptionInfo(True, "Save images to a subdirectory"),
+ "grid_save_to_dirs": OptionInfo(True, "Save grids to a subdirectory"),
+ "use_save_to_dirs_for_ui": OptionInfo(False, "When using \"Save\" button, save images to a subdirectory"),
+ "directories_filename_pattern": OptionInfo("[date]", "Directory name pattern", component_args=hide_dirs).link("wiki", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Custom-Images-Filename-Name-and-Subdirectory"),
+ "directories_max_prompt_words": OptionInfo(8, "Max prompt words for [prompt_words] pattern", gr.Slider, {"minimum": 1, "maximum": 20, "step": 1, **hide_dirs}),
+}))
+
+options_templates.update(options_section(('upscaling', "Upscaling"), {
+ "ESRGAN_tile": OptionInfo(192, "Tile size for ESRGAN upscalers.", gr.Slider, {"minimum": 0, "maximum": 512, "step": 16}).info("0 = no tiling"),
+ "ESRGAN_tile_overlap": OptionInfo(8, "Tile overlap for ESRGAN upscalers.", gr.Slider, {"minimum": 0, "maximum": 48, "step": 1}).info("Low values = visible seam"),
+ "realesrgan_enabled_models": OptionInfo(["R-ESRGAN 4x+", "R-ESRGAN 4x+ Anime6B"], "Select which Real-ESRGAN models to show in the web UI.", gr.CheckboxGroup, lambda: {"choices": shared_items.realesrgan_models_names()}),
+ "upscaler_for_img2img": OptionInfo(None, "Upscaler for img2img", gr.Dropdown, lambda: {"choices": [x.name for x in shared.sd_upscalers]}),
+}))
+
+options_templates.update(options_section(('face-restoration', "Face restoration"), {
+ "face_restoration": OptionInfo(False, "Restore faces", infotext='Face restoration').info("will use a third-party model on generation result to reconstruct faces"),
+ "face_restoration_model": OptionInfo("CodeFormer", "Face restoration model", gr.Radio, lambda: {"choices": [x.name() for x in shared.face_restorers]}),
+ "code_former_weight": OptionInfo(0.5, "CodeFormer weight", gr.Slider, {"minimum": 0, "maximum": 1, "step": 0.01}).info("0 = maximum effect; 1 = minimum effect"),
+ "face_restoration_unload": OptionInfo(False, "Move face restoration model from VRAM into RAM after processing"),
+}))
+
+options_templates.update(options_section(('system', "System"), {
+ "auto_launch_browser": OptionInfo("Local", "Automatically open webui in browser on startup", gr.Radio, lambda: {"choices": ["Disable", "Local", "Remote"]}),
+ "show_warnings": OptionInfo(False, "Show warnings in console.").needs_reload_ui(),
+ "show_gradio_deprecation_warnings": OptionInfo(True, "Show gradio deprecation warnings in console.").needs_reload_ui(),
+ "memmon_poll_rate": OptionInfo(8, "VRAM usage polls per second during generation.", gr.Slider, {"minimum": 0, "maximum": 40, "step": 1}).info("0 = disable"),
+ "samples_log_stdout": OptionInfo(False, "Always print all generation info to standard output"),
+ "multiple_tqdm": OptionInfo(True, "Add a second progress bar to the console that shows progress for an entire job."),
+ "print_hypernet_extra": OptionInfo(False, "Print extra hypernetwork information to console."),
+ "list_hidden_files": OptionInfo(True, "Load models/files in hidden directories").info("directory is hidden if its name starts with \".\""),
+ "disable_mmap_load_safetensors": OptionInfo(False, "Disable memmapping for loading .safetensors files.").info("fixes very slow loading speed in some cases"),
+ "hide_ldm_prints": OptionInfo(True, "Prevent Stability-AI's ldm/sgm modules from printing noise to console."),
+}))
+
+options_templates.update(options_section(('API', "API"), {
+ "api_enable_requests": OptionInfo(True, "Allow http:// and https:// URLs for input images in API", restrict_api=True),
+ "api_forbid_local_requests": OptionInfo(True, "Forbid URLs to local resources", restrict_api=True),
+ "api_useragent": OptionInfo("", "User agent for requests", restrict_api=True),
+}))
+
+options_templates.update(options_section(('training', "Training"), {
+ "unload_models_when_training": OptionInfo(False, "Move VAE and CLIP to RAM when training if possible. Saves VRAM."),
+ "pin_memory": OptionInfo(False, "Turn on pin_memory for DataLoader. Makes training slightly faster but can increase memory usage."),
+ "save_optimizer_state": OptionInfo(False, "Saves Optimizer state as separate *.optim file. Training of embedding or HN can be resumed with the matching optim file."),
+ "save_training_settings_to_txt": OptionInfo(True, "Save textual inversion and hypernet settings to a text file whenever training starts."),
+ "dataset_filename_word_regex": OptionInfo("", "Filename word regex"),
+ "dataset_filename_join_string": OptionInfo(" ", "Filename join string"),
+ "training_image_repeats_per_epoch": OptionInfo(1, "Number of repeats for a single input image per epoch; used only for displaying epoch number", gr.Number, {"precision": 0}),
+ "training_write_csv_every": OptionInfo(500, "Save an csv containing the loss to log directory every N steps, 0 to disable"),
+ "training_xattention_optimizations": OptionInfo(False, "Use cross attention optimizations while training"),
+ "training_enable_tensorboard": OptionInfo(False, "Enable tensorboard logging."),
+ "training_tensorboard_save_images": OptionInfo(False, "Save generated images within tensorboard."),
+ "training_tensorboard_flush_every": OptionInfo(120, "How often, in seconds, to flush the pending tensorboard events and summaries to disk."),
+}))
+
+options_templates.update(options_section(('sd', "Stable Diffusion"), {
+ "sd_model_checkpoint": OptionInfo(None, "Stable Diffusion checkpoint", gr.Dropdown, lambda: {"choices": shared_items.list_checkpoint_tiles()}, refresh=shared_items.refresh_checkpoints, infotext='Model hash'),
+ "sd_checkpoints_limit": OptionInfo(1, "Maximum number of checkpoints loaded at the same time", gr.Slider, {"minimum": 1, "maximum": 10, "step": 1}),
+ "sd_checkpoints_keep_in_cpu": OptionInfo(True, "Only keep one model on device").info("will keep models other than the currently used one in RAM rather than VRAM"),
+ "sd_checkpoint_cache": OptionInfo(0, "Checkpoints to cache in RAM", gr.Slider, {"minimum": 0, "maximum": 10, "step": 1}).info("obsolete; set to 0 and use the two settings above instead"),
+ "sd_unet": OptionInfo("Automatic", "SD Unet", gr.Dropdown, lambda: {"choices": shared_items.sd_unet_items()}, refresh=shared_items.refresh_unet_list).info("choose Unet model: Automatic = use one with same filename as checkpoint; None = use Unet from checkpoint"),
+ "enable_quantization": OptionInfo(False, "Enable quantization in K samplers for sharper and cleaner results. This may change existing seeds").needs_reload_ui(),
+ "enable_emphasis": OptionInfo(True, "Enable emphasis").info("use (text) to make model pay more attention to text and [text] to make it pay less attention"),
+ "enable_batch_seeds": OptionInfo(True, "Make K-diffusion samplers produce same images in a batch as when making a single image"),
+ "comma_padding_backtrack": OptionInfo(20, "Prompt word wrap length limit", gr.Slider, {"minimum": 0, "maximum": 74, "step": 1}).info("in tokens - for texts shorter than specified, if they don't fit into 75 token limit, move them to the next 75 token chunk"),
+ "CLIP_stop_at_last_layers": OptionInfo(1, "Clip skip", gr.Slider, {"minimum": 1, "maximum": 12, "step": 1}, infotext="Clip skip").link("wiki", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Features#clip-skip").info("ignore last layers of CLIP network; 1 ignores none, 2 ignores one layer"),
+ "upcast_attn": OptionInfo(False, "Upcast cross attention layer to float32"),
+ "randn_source": OptionInfo("GPU", "Random number generator source.", gr.Radio, {"choices": ["GPU", "CPU", "NV"]}, infotext="RNG").info("changes seeds drastically; use CPU to produce the same picture across different videocard vendors; use NV to produce same picture as on NVidia videocards"),
+ "tiling": OptionInfo(False, "Tiling", infotext='Tiling').info("produce a tileable picture"),
+ "hires_fix_refiner_pass": OptionInfo("second pass", "Hires fix: which pass to enable refiner for", gr.Radio, {"choices": ["first pass", "second pass", "both passes"]}, infotext="Hires refiner"),
+}))
+
+options_templates.update(options_section(('sdxl', "Stable Diffusion XL"), {
+ "sdxl_crop_top": OptionInfo(0, "crop top coordinate"),
+ "sdxl_crop_left": OptionInfo(0, "crop left coordinate"),
+ "sdxl_refiner_low_aesthetic_score": OptionInfo(2.5, "SDXL low aesthetic score", gr.Number).info("used for refiner model negative prompt"),
+ "sdxl_refiner_high_aesthetic_score": OptionInfo(6.0, "SDXL high aesthetic score", gr.Number).info("used for refiner model prompt"),
+}))
+
+options_templates.update(options_section(('vae', "VAE"), {
+ "sd_vae_explanation": OptionHTML("""
+VAE is a neural network that transforms a standard RGB
+image into latent space representation and back. Latent space representation is what stable diffusion is working on during sampling
+(i.e. when the progress bar is between empty and full). For txt2img, VAE is used to create a resulting image after the sampling is finished.
+For img2img, VAE is used to process user's input image before the sampling, and to create an image after sampling.
+"""),
+ "sd_vae_checkpoint_cache": OptionInfo(0, "VAE Checkpoints to cache in RAM", gr.Slider, {"minimum": 0, "maximum": 10, "step": 1}),
+ "sd_vae": OptionInfo("Automatic", "SD VAE", gr.Dropdown, lambda: {"choices": shared_items.sd_vae_items()}, refresh=shared_items.refresh_vae_list, infotext='VAE').info("choose VAE model: Automatic = use one with same filename as checkpoint; None = use VAE from checkpoint"),
+ "sd_vae_overrides_per_model_preferences": OptionInfo(True, "Selected VAE overrides per-model preferences").info("you can set per-model VAE either by editing user metadata for checkpoints, or by making the VAE have same name as checkpoint"),
+ "auto_vae_precision": OptionInfo(True, "Automatically revert VAE to 32-bit floats").info("triggers when a tensor with NaNs is produced in VAE; disabling the option in this case will result in a black square image"),
+ "sd_vae_encode_method": OptionInfo("Full", "VAE type for encode", gr.Radio, {"choices": ["Full", "TAESD"]}, infotext='VAE Encoder').info("method to encode image to latent (use in img2img, hires-fix or inpaint mask)"),
+ "sd_vae_decode_method": OptionInfo("Full", "VAE type for decode", gr.Radio, {"choices": ["Full", "TAESD"]}, infotext='VAE Decoder').info("method to decode latent to image"),
+}))
+
+options_templates.update(options_section(('img2img', "img2img"), {
+ "inpainting_mask_weight": OptionInfo(1.0, "Inpainting conditioning mask strength", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}, infotext='Conditional mask weight'),
+ "initial_noise_multiplier": OptionInfo(1.0, "Noise multiplier for img2img", gr.Slider, {"minimum": 0.0, "maximum": 1.5, "step": 0.001}, infotext='Noise multiplier'),
+ "img2img_extra_noise": OptionInfo(0.0, "Extra noise multiplier for img2img and hires fix", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}, infotext='Extra noise').info("0 = disabled (default); should be lower than denoising strength"),
+ "img2img_color_correction": OptionInfo(False, "Apply color correction to img2img results to match original colors."),
+ "img2img_fix_steps": OptionInfo(False, "With img2img, do exactly the amount of steps the slider specifies.").info("normally you'd do less with less denoising"),
+ "img2img_background_color": OptionInfo("#ffffff", "With img2img, fill transparent parts of the input image with this color.", ui_components.FormColorPicker, {}),
+ "img2img_editor_height": OptionInfo(720, "Height of the image editor", gr.Slider, {"minimum": 80, "maximum": 1600, "step": 1}).info("in pixels").needs_reload_ui(),
+ "img2img_sketch_default_brush_color": OptionInfo("#ffffff", "Sketch initial brush color", ui_components.FormColorPicker, {}).info("default brush color of img2img sketch").needs_reload_ui(),
+ "img2img_inpaint_mask_brush_color": OptionInfo("#ffffff", "Inpaint mask brush color", ui_components.FormColorPicker, {}).info("brush color of inpaint mask").needs_reload_ui(),
+ "img2img_inpaint_sketch_default_brush_color": OptionInfo("#ffffff", "Inpaint sketch initial brush color", ui_components.FormColorPicker, {}).info("default brush color of img2img inpaint sketch").needs_reload_ui(),
+ "return_mask": OptionInfo(False, "For inpainting, include the greyscale mask in results for web"),
+ "return_mask_composite": OptionInfo(False, "For inpainting, include masked composite in results for web"),
+}))
+
+options_templates.update(options_section(('optimizations', "Optimizations"), {
+ "cross_attention_optimization": OptionInfo("Automatic", "Cross attention optimization", gr.Dropdown, lambda: {"choices": shared_items.cross_attention_optimizations()}),
+ "s_min_uncond": OptionInfo(0.0, "Negative Guidance minimum sigma", gr.Slider, {"minimum": 0.0, "maximum": 15.0, "step": 0.01}).link("PR", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/9177").info("skip negative prompt for some steps when the image is almost ready; 0=disable, higher=faster"),
+ "token_merging_ratio": OptionInfo(0.0, "Token merging ratio", gr.Slider, {"minimum": 0.0, "maximum": 0.9, "step": 0.1}, infotext='Token merging ratio').link("PR", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/9256").info("0=disable, higher=faster"),
+ "token_merging_ratio_img2img": OptionInfo(0.0, "Token merging ratio for img2img", gr.Slider, {"minimum": 0.0, "maximum": 0.9, "step": 0.1}).info("only applies if non-zero and overrides above"),
+ "token_merging_ratio_hr": OptionInfo(0.0, "Token merging ratio for high-res pass", gr.Slider, {"minimum": 0.0, "maximum": 0.9, "step": 0.1}, infotext='Token merging ratio hr').info("only applies if non-zero and overrides above"),
+ "pad_cond_uncond": OptionInfo(False, "Pad prompt/negative prompt to be same length", infotext='Pad conds').info("improves performance when prompt and negative prompt have different lengths; changes seeds"),
+ "persistent_cond_cache": OptionInfo(True, "Persistent cond cache").info("do not recalculate conds from prompts if prompts have not changed since previous calculation"),
+ "batch_cond_uncond": OptionInfo(True, "Batch cond/uncond").info("do both conditional and unconditional denoising in one batch; uses a bit more VRAM during sampling, but improves speed; previously this was controlled by --always-batch-cond-uncond comandline argument"),
+}))
+
+options_templates.update(options_section(('compatibility', "Compatibility"), {
+ "use_old_emphasis_implementation": OptionInfo(False, "Use old emphasis implementation. Can be useful to reproduce old seeds."),
+ "use_old_karras_scheduler_sigmas": OptionInfo(False, "Use old karras scheduler sigmas (0.1 to 10)."),
+ "no_dpmpp_sde_batch_determinism": OptionInfo(False, "Do not make DPM++ SDE deterministic across different batch sizes."),
+ "use_old_hires_fix_width_height": OptionInfo(False, "For hires fix, use width/height sliders to set final resolution rather than first pass (disables Upscale by, Resize width/height to)."),
+ "dont_fix_second_order_samplers_schedule": OptionInfo(False, "Do not fix prompt schedule for second order samplers."),
+ "hires_fix_use_firstpass_conds": OptionInfo(False, "For hires fix, calculate conds of second pass using extra networks of first pass."),
+ "use_old_scheduling": OptionInfo(False, "Use old prompt editing timelines.", infotext="Old prompt editing timelines").info("For [red:green:N]; old: If N < 1, it's a fraction of steps (and hires fix uses range from 0 to 1), if N >= 1, it's an absolute number of steps; new: If N has a decimal point in it, it's a fraction of steps (and hires fix uses range from 1 to 2), othewrwise it's an absolute number of steps"),
+}))
+
+options_templates.update(options_section(('interrogate', "Interrogate"), {
+ "interrogate_keep_models_in_memory": OptionInfo(False, "Keep models in VRAM"),
+ "interrogate_return_ranks": OptionInfo(False, "Include ranks of model tags matches in results.").info("booru only"),
+ "interrogate_clip_num_beams": OptionInfo(1, "BLIP: num_beams", gr.Slider, {"minimum": 1, "maximum": 16, "step": 1}),
+ "interrogate_clip_min_length": OptionInfo(24, "BLIP: minimum description length", gr.Slider, {"minimum": 1, "maximum": 128, "step": 1}),
+ "interrogate_clip_max_length": OptionInfo(48, "BLIP: maximum description length", gr.Slider, {"minimum": 1, "maximum": 256, "step": 1}),
+ "interrogate_clip_dict_limit": OptionInfo(1500, "CLIP: maximum number of lines in text file").info("0 = No limit"),
+ "interrogate_clip_skip_categories": OptionInfo([], "CLIP: skip inquire categories", gr.CheckboxGroup, lambda: {"choices": interrogate.category_types()}, refresh=interrogate.category_types),
+ "interrogate_deepbooru_score_threshold": OptionInfo(0.5, "deepbooru: score threshold", gr.Slider, {"minimum": 0, "maximum": 1, "step": 0.01}),
+ "deepbooru_sort_alpha": OptionInfo(True, "deepbooru: sort tags alphabetically").info("if not: sort by score"),
+ "deepbooru_use_spaces": OptionInfo(True, "deepbooru: use spaces in tags").info("if not: use underscores"),
+ "deepbooru_escape": OptionInfo(True, "deepbooru: escape (\\) brackets").info("so they are used as literal brackets and not for emphasis"),
+ "deepbooru_filter_tags": OptionInfo("", "deepbooru: filter out those tags").info("separate by comma"),
+}))
+
+options_templates.update(options_section(('extra_networks', "Extra Networks"), {
+ "extra_networks_show_hidden_directories": OptionInfo(True, "Show hidden directories").info("directory is hidden if its name starts with \".\"."),
+ "extra_networks_hidden_models": OptionInfo("When searched", "Show cards for models in hidden directories", gr.Radio, {"choices": ["Always", "When searched", "Never"]}).info('"When searched" option will only show the item when the search string has 4 characters or more'),
+ "extra_networks_default_multiplier": OptionInfo(1.0, "Default multiplier for extra networks", gr.Slider, {"minimum": 0.0, "maximum": 2.0, "step": 0.01}),
+ "extra_networks_card_width": OptionInfo(0, "Card width for Extra Networks").info("in pixels"),
+ "extra_networks_card_height": OptionInfo(0, "Card height for Extra Networks").info("in pixels"),
+ "extra_networks_card_text_scale": OptionInfo(1.0, "Card text scale", gr.Slider, {"minimum": 0.0, "maximum": 2.0, "step": 0.01}).info("1 = original size"),
+ "extra_networks_card_show_desc": OptionInfo(True, "Show description on card"),
+ "extra_networks_add_text_separator": OptionInfo(" ", "Extra networks separator").info("extra text to add before <...> when adding extra network to prompt"),
+ "ui_extra_networks_tab_reorder": OptionInfo("", "Extra networks tab order").needs_reload_ui(),
+ "textual_inversion_print_at_load": OptionInfo(False, "Print a list of Textual Inversion embeddings when loading model"),
+ "textual_inversion_add_hashes_to_infotext": OptionInfo(True, "Add Textual Inversion hashes to infotext"),
+ "sd_hypernetwork": OptionInfo("None", "Add hypernetwork to prompt", gr.Dropdown, lambda: {"choices": ["None", *shared.hypernetworks]}, refresh=shared_items.reload_hypernetworks),
+}))
+
+options_templates.update(options_section(('ui', "User interface"), {
+ "localization": OptionInfo("None", "Localization", gr.Dropdown, lambda: {"choices": ["None"] + list(localization.localizations.keys())}, refresh=lambda: localization.list_localizations(cmd_opts.localizations_dir)).needs_reload_ui(),
+ "gradio_theme": OptionInfo("Default", "Gradio theme", ui_components.DropdownEditable, lambda: {"choices": ["Default"] + shared_gradio_themes.gradio_hf_hub_themes}).info("you can also manually enter any of themes from the gallery .").needs_reload_ui(),
+ "gradio_themes_cache": OptionInfo(True, "Cache gradio themes locally").info("disable to update the selected Gradio theme"),
+ "gallery_height": OptionInfo("", "Gallery height", gr.Textbox).info("an be any valid CSS value").needs_reload_ui(),
+ "return_grid": OptionInfo(True, "Show grid in results for web"),
+ "do_not_show_images": OptionInfo(False, "Do not show any images in results for web"),
+ "send_seed": OptionInfo(True, "Send seed when sending prompt or image to other interface"),
+ "send_size": OptionInfo(True, "Send size when sending prompt or image to another interface"),
+ "js_modal_lightbox": OptionInfo(True, "Enable full page image viewer"),
+ "js_modal_lightbox_initially_zoomed": OptionInfo(True, "Show images zoomed in by default in full page image viewer"),
+ "js_modal_lightbox_gamepad": OptionInfo(False, "Navigate image viewer with gamepad"),
+ "js_modal_lightbox_gamepad_repeat": OptionInfo(250, "Gamepad repeat period, in milliseconds"),
+ "show_progress_in_title": OptionInfo(True, "Show generation progress in window title."),
+ "samplers_in_dropdown": OptionInfo(True, "Use dropdown for sampler selection instead of radio group").needs_reload_ui(),
+ "dimensions_and_batch_together": OptionInfo(True, "Show Width/Height and Batch sliders in same row").needs_reload_ui(),
+ "keyedit_precision_attention": OptionInfo(0.1, "Ctrl+up/down precision when editing (attention:1.1)", gr.Slider, {"minimum": 0.01, "maximum": 0.2, "step": 0.001}),
+ "keyedit_precision_extra": OptionInfo(0.05, "Ctrl+up/down precision when editing ", gr.Slider, {"minimum": 0.01, "maximum": 0.2, "step": 0.001}),
+ "keyedit_delimiters": OptionInfo(".,\\/!?%^*;:{}=`~()", "Ctrl+up/down word delimiters"),
+ "keyedit_move": OptionInfo(True, "Alt+left/right moves prompt elements"),
+ "quicksettings_list": OptionInfo(["sd_model_checkpoint"], "Quicksettings list", ui_components.DropdownMulti, lambda: {"choices": list(shared.opts.data_labels.keys())}).js("info", "settingsHintsShowQuicksettings").info("setting entries that appear at the top of page rather than in settings tab").needs_reload_ui(),
+ "ui_tab_order": OptionInfo([], "UI tab order", ui_components.DropdownMulti, lambda: {"choices": list(shared.tab_names)}).needs_reload_ui(),
+ "hidden_tabs": OptionInfo([], "Hidden UI tabs", ui_components.DropdownMulti, lambda: {"choices": list(shared.tab_names)}).needs_reload_ui(),
+ "ui_reorder_list": OptionInfo([], "txt2img/img2img UI item order", ui_components.DropdownMulti, lambda: {"choices": list(shared_items.ui_reorder_categories())}).info("selected items appear first").needs_reload_ui(),
+ "hires_fix_show_sampler": OptionInfo(False, "Hires fix: show hires checkpoint and sampler selection").needs_reload_ui(),
+ "hires_fix_show_prompts": OptionInfo(False, "Hires fix: show hires prompt and negative prompt").needs_reload_ui(),
+ "disable_token_counters": OptionInfo(False, "Disable prompt token counters").needs_reload_ui(),
+}))
+
+
+options_templates.update(options_section(('infotext', "Infotext"), {
+ "add_model_hash_to_info": OptionInfo(True, "Add model hash to generation information"),
+ "add_model_name_to_info": OptionInfo(True, "Add model name to generation information"),
+ "add_user_name_to_info": OptionInfo(False, "Add user name to generation information when authenticated"),
+ "add_version_to_infotext": OptionInfo(True, "Add program version to generation information"),
+ "disable_weights_auto_swap": OptionInfo(True, "Disregard checkpoint information from pasted infotext").info("when reading generation parameters from text into UI"),
+ "infotext_styles": OptionInfo("Apply if any", "Infer styles from prompts of pasted infotext", gr.Radio, {"choices": ["Ignore", "Apply", "Discard", "Apply if any"]}).info("when reading generation parameters from text into UI)").html("""
+Ignore: keep prompt and styles dropdown as it is.
+Apply: remove style text from prompt, always replace styles dropdown value with found styles (even if none are found).
+Discard: remove style text from prompt, keep styles dropdown as it is.
+Apply if any: remove style text from prompt; if any styles are found in prompt, put them into styles dropdown, otherwise keep it as it is.
+ """),
+
+}))
+
+options_templates.update(options_section(('ui', "Live previews"), {
+ "show_progressbar": OptionInfo(True, "Show progressbar"),
+ "live_previews_enable": OptionInfo(True, "Show live previews of the created image"),
+ "live_previews_image_format": OptionInfo("png", "Live preview file format", gr.Radio, {"choices": ["jpeg", "png", "webp"]}),
+ "show_progress_grid": OptionInfo(True, "Show previews of all images generated in a batch as a grid"),
+ "show_progress_every_n_steps": OptionInfo(10, "Live preview display period", gr.Slider, {"minimum": -1, "maximum": 32, "step": 1}).info("in sampling steps - show new live preview image every N sampling steps; -1 = only show after completion of batch"),
+ "show_progress_type": OptionInfo("Approx NN", "Live preview method", gr.Radio, {"choices": ["Full", "Approx NN", "Approx cheap", "TAESD"]}).info("Full = slow but pretty; Approx NN and TAESD = fast but low quality; Approx cheap = super fast but terrible otherwise"),
+ "live_preview_allow_lowvram_full": OptionInfo(False, "Allow Full live preview method with lowvram/medvram").info("If not, Approx NN will be used instead; Full live preview method is very detrimental to speed if lowvram/medvram optimizations are enabled"),
+ "live_preview_content": OptionInfo("Prompt", "Live preview subject", gr.Radio, {"choices": ["Combined", "Prompt", "Negative prompt"]}),
+ "live_preview_refresh_period": OptionInfo(1000, "Progressbar and preview update period").info("in milliseconds"),
+ "live_preview_fast_interrupt": OptionInfo(False, "Return image with chosen live preview method on interrupt").info("makes interrupts faster"),
+}))
+
+options_templates.update(options_section(('sampler-params', "Sampler parameters"), {
+ "hide_samplers": OptionInfo([], "Hide samplers in user interface", gr.CheckboxGroup, lambda: {"choices": [x.name for x in shared_items.list_samplers()]}).needs_reload_ui(),
+ "eta_ddim": OptionInfo(0.0, "Eta for DDIM", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}, infotext='Eta DDIM').info("noise multiplier; higher = more unpredictable results"),
+ "eta_ancestral": OptionInfo(1.0, "Eta for k-diffusion samplers", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}, infotext='Eta').info("noise multiplier; currently only applies to ancestral samplers (i.e. Euler a) and SDE samplers"),
+ "ddim_discretize": OptionInfo('uniform', "img2img DDIM discretize", gr.Radio, {"choices": ['uniform', 'quad']}),
+ 's_churn': OptionInfo(0.0, "sigma churn", gr.Slider, {"minimum": 0.0, "maximum": 100.0, "step": 0.01}, infotext='Sigma churn').info('amount of stochasticity; only applies to Euler, Heun, and DPM2'),
+ 's_tmin': OptionInfo(0.0, "sigma tmin", gr.Slider, {"minimum": 0.0, "maximum": 10.0, "step": 0.01}, infotext='Sigma tmin').info('enable stochasticity; start value of the sigma range; only applies to Euler, Heun, and DPM2'),
+ 's_tmax': OptionInfo(0.0, "sigma tmax", gr.Slider, {"minimum": 0.0, "maximum": 999.0, "step": 0.01}, infotext='Sigma tmax').info("0 = inf; end value of the sigma range; only applies to Euler, Heun, and DPM2"),
+ 's_noise': OptionInfo(1.0, "sigma noise", gr.Slider, {"minimum": 0.0, "maximum": 1.1, "step": 0.001}, infotext='Sigma noise').info('amount of additional noise to counteract loss of detail during sampling'),
+ 'k_sched_type': OptionInfo("Automatic", "Scheduler type", gr.Dropdown, {"choices": ["Automatic", "karras", "exponential", "polyexponential"]}, infotext='Schedule type').info("lets you override the noise schedule for k-diffusion samplers; choosing Automatic disables the three parameters below"),
+ 'sigma_min': OptionInfo(0.0, "sigma min", gr.Number, infotext='Schedule max sigma').info("0 = default (~0.03); minimum noise strength for k-diffusion noise scheduler"),
+ 'sigma_max': OptionInfo(0.0, "sigma max", gr.Number, infotext='Schedule min sigma').info("0 = default (~14.6); maximum noise strength for k-diffusion noise scheduler"),
+ 'rho': OptionInfo(0.0, "rho", gr.Number, infotext='Schedule rho').info("0 = default (7 for karras, 1 for polyexponential); higher values result in a steeper noise schedule (decreases faster)"),
+ 'eta_noise_seed_delta': OptionInfo(0, "Eta noise seed delta", gr.Number, {"precision": 0}, infotext='ENSD').info("ENSD; does not improve anything, just produces different results for ancestral samplers - only useful for reproducing images"),
+ 'always_discard_next_to_last_sigma': OptionInfo(False, "Always discard next-to-last sigma", infotext='Discard penultimate sigma').link("PR", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/6044"),
+ 'sgm_noise_multiplier': OptionInfo(False, "SGM noise multiplier", infotext='SGM noise multplier').link("PR", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12818").info("Match initial noise to official SDXL implementation - only useful for reproducing images"),
+ 'uni_pc_variant': OptionInfo("bh1", "UniPC variant", gr.Radio, {"choices": ["bh1", "bh2", "vary_coeff"]}, infotext='UniPC variant'),
+ 'uni_pc_skip_type': OptionInfo("time_uniform", "UniPC skip type", gr.Radio, {"choices": ["time_uniform", "time_quadratic", "logSNR"]}, infotext='UniPC skip type'),
+ 'uni_pc_order': OptionInfo(3, "UniPC order", gr.Slider, {"minimum": 1, "maximum": 50, "step": 1}, infotext='UniPC order').info("must be < sampling steps"),
+ 'uni_pc_lower_order_final': OptionInfo(True, "UniPC lower order final", infotext='UniPC lower order final'),
+}))
+
+options_templates.update(options_section(('postprocessing', "Postprocessing"), {
+ 'postprocessing_enable_in_main_ui': OptionInfo([], "Enable postprocessing operations in txt2img and img2img tabs", ui_components.DropdownMulti, lambda: {"choices": [x.name for x in shared_items.postprocessing_scripts()]}),
+ 'postprocessing_operation_order': OptionInfo([], "Postprocessing operation order", ui_components.DropdownMulti, lambda: {"choices": [x.name for x in shared_items.postprocessing_scripts()]}),
+ 'upscaling_max_images_in_cache': OptionInfo(5, "Maximum number of images in upscaling cache", gr.Slider, {"minimum": 0, "maximum": 10, "step": 1}),
+}))
+
+options_templates.update(options_section((None, "Hidden options"), {
+ "disabled_extensions": OptionInfo([], "Disable these extensions"),
+ "disable_all_extensions": OptionInfo("none", "Disable all extensions (preserves the list of disabled extensions)", gr.Radio, {"choices": ["none", "extra", "all"]}),
+ "restore_config_state_file": OptionInfo("", "Config state file to restore from, under 'config-states/' folder"),
+ "sd_checkpoint_hash": OptionInfo("", "SHA256 hash of the current checkpoint"),
+}))
+
diff --git a/modules/shared_state.py b/modules/shared_state.py
new file mode 100644
index 0000000000000000000000000000000000000000..91740d38fb134921a79b15edb1d0a707511b14a8
--- /dev/null
+++ b/modules/shared_state.py
@@ -0,0 +1,159 @@
+import datetime
+import logging
+import threading
+import time
+
+from modules import errors, shared, devices
+from typing import Optional
+
+log = logging.getLogger(__name__)
+
+
+class State:
+ skipped = False
+ interrupted = False
+ job = ""
+ job_no = 0
+ job_count = 0
+ processing_has_refined_job_count = False
+ job_timestamp = '0'
+ sampling_step = 0
+ sampling_steps = 0
+ current_latent = None
+ current_image = None
+ current_image_sampling_step = 0
+ id_live_preview = 0
+ textinfo = None
+ time_start = None
+ server_start = None
+ _server_command_signal = threading.Event()
+ _server_command: Optional[str] = None
+
+ def __init__(self):
+ self.server_start = time.time()
+
+ @property
+ def need_restart(self) -> bool:
+ # Compatibility getter for need_restart.
+ return self.server_command == "restart"
+
+ @need_restart.setter
+ def need_restart(self, value: bool) -> None:
+ # Compatibility setter for need_restart.
+ if value:
+ self.server_command = "restart"
+
+ @property
+ def server_command(self):
+ return self._server_command
+
+ @server_command.setter
+ def server_command(self, value: Optional[str]) -> None:
+ """
+ Set the server command to `value` and signal that it's been set.
+ """
+ self._server_command = value
+ self._server_command_signal.set()
+
+ def wait_for_server_command(self, timeout: Optional[float] = None) -> Optional[str]:
+ """
+ Wait for server command to get set; return and clear the value and signal.
+ """
+ if self._server_command_signal.wait(timeout):
+ self._server_command_signal.clear()
+ req = self._server_command
+ self._server_command = None
+ return req
+ return None
+
+ def request_restart(self) -> None:
+ self.interrupt()
+ self.server_command = "restart"
+ log.info("Received restart request")
+
+ def skip(self):
+ self.skipped = True
+ log.info("Received skip request")
+
+ def interrupt(self):
+ self.interrupted = True
+ log.info("Received interrupt request")
+
+ def nextjob(self):
+ if shared.opts.live_previews_enable and shared.opts.show_progress_every_n_steps == -1:
+ self.do_set_current_image()
+
+ self.job_no += 1
+ self.sampling_step = 0
+ self.current_image_sampling_step = 0
+
+ def dict(self):
+ obj = {
+ "skipped": self.skipped,
+ "interrupted": self.interrupted,
+ "job": self.job,
+ "job_count": self.job_count,
+ "job_timestamp": self.job_timestamp,
+ "job_no": self.job_no,
+ "sampling_step": self.sampling_step,
+ "sampling_steps": self.sampling_steps,
+ }
+
+ return obj
+
+ def begin(self, job: str = "(unknown)"):
+ self.sampling_step = 0
+ self.job_count = -1
+ self.processing_has_refined_job_count = False
+ self.job_no = 0
+ self.job_timestamp = datetime.datetime.now().strftime("%Y%m%d%H%M%S")
+ self.current_latent = None
+ self.current_image = None
+ self.current_image_sampling_step = 0
+ self.id_live_preview = 0
+ self.skipped = False
+ self.interrupted = False
+ self.textinfo = None
+ self.time_start = time.time()
+ self.job = job
+ devices.torch_gc()
+ log.info("Starting job %s", job)
+
+ def end(self):
+ duration = time.time() - self.time_start
+ log.info("Ending job %s (%.2f seconds)", self.job, duration)
+ self.job = ""
+ self.job_count = 0
+
+ devices.torch_gc()
+
+ def set_current_image(self):
+ """if enough sampling steps have been made after the last call to this, sets self.current_image from self.current_latent, and modifies self.id_live_preview accordingly"""
+ if not shared.parallel_processing_allowed:
+ return
+
+ if self.sampling_step - self.current_image_sampling_step >= shared.opts.show_progress_every_n_steps and shared.opts.live_previews_enable and shared.opts.show_progress_every_n_steps != -1:
+ self.do_set_current_image()
+
+ def do_set_current_image(self):
+ if self.current_latent is None:
+ return
+
+ import modules.sd_samplers
+
+ try:
+ if shared.opts.show_progress_grid:
+ self.assign_current_image(modules.sd_samplers.samples_to_image_grid(self.current_latent))
+ else:
+ self.assign_current_image(modules.sd_samplers.sample_to_image(self.current_latent))
+
+ self.current_image_sampling_step = self.sampling_step
+
+ except Exception:
+ # when switching models during genration, VAE would be on CPU, so creating an image will fail.
+ # we silently ignore this error
+ errors.record_exception()
+
+ def assign_current_image(self, image):
+ self.current_image = image
+ self.id_live_preview += 1
diff --git a/modules/shared_total_tqdm.py b/modules/shared_total_tqdm.py
new file mode 100644
index 0000000000000000000000000000000000000000..a023b1588f0fce8cc47ae9e72217733aedb16581
--- /dev/null
+++ b/modules/shared_total_tqdm.py
@@ -0,0 +1,37 @@
+import tqdm
+
+from modules import shared
+
+
+class TotalTQDM:
+ def __init__(self):
+ self._tqdm = None
+
+ def reset(self):
+ self._tqdm = tqdm.tqdm(
+ desc="Total progress",
+ total=shared.state.job_count * shared.state.sampling_steps,
+ position=1,
+ file=shared.progress_print_out
+ )
+
+ def update(self):
+ if not shared.opts.multiple_tqdm or shared.cmd_opts.disable_console_progressbars:
+ return
+ if self._tqdm is None:
+ self.reset()
+ self._tqdm.update()
+
+ def updateTotal(self, new_total):
+ if not shared.opts.multiple_tqdm or shared.cmd_opts.disable_console_progressbars:
+ return
+ if self._tqdm is None:
+ self.reset()
+ self._tqdm.total = new_total
+
+ def clear(self):
+ if self._tqdm is not None:
+ self._tqdm.refresh()
+ self._tqdm.close()
+ self._tqdm = None
+
diff --git a/modules/styles.py b/modules/styles.py
new file mode 100644
index 0000000000000000000000000000000000000000..b633772d5fc07a86ce3156262d038c677dfae577
--- /dev/null
+++ b/modules/styles.py
@@ -0,0 +1,136 @@
+import csv
+import os
+import os.path
+import re
+import typing
+import shutil
+
+
+class PromptStyle(typing.NamedTuple):
+ name: str
+ prompt: str
+ negative_prompt: str
+
+
+def merge_prompts(style_prompt: str, prompt: str) -> str:
+ if "{prompt}" in style_prompt:
+ res = style_prompt.replace("{prompt}", prompt)
+ else:
+ parts = filter(None, (prompt.strip(), style_prompt.strip()))
+ res = ", ".join(parts)
+
+ return res
+
+
+def apply_styles_to_prompt(prompt, styles):
+ for style in styles:
+ prompt = merge_prompts(style, prompt)
+
+ return prompt
+
+
+re_spaces = re.compile(" +")
+
+
+def extract_style_text_from_prompt(style_text, prompt):
+ stripped_prompt = re.sub(re_spaces, " ", prompt.strip())
+ stripped_style_text = re.sub(re_spaces, " ", style_text.strip())
+ if "{prompt}" in stripped_style_text:
+ left, right = stripped_style_text.split("{prompt}", 2)
+ if stripped_prompt.startswith(left) and stripped_prompt.endswith(right):
+ prompt = stripped_prompt[len(left):len(stripped_prompt)-len(right)]
+ return True, prompt
+ else:
+ if stripped_prompt.endswith(stripped_style_text):
+ prompt = stripped_prompt[:len(stripped_prompt)-len(stripped_style_text)]
+
+ if prompt.endswith(', '):
+ prompt = prompt[:-2]
+
+ return True, prompt
+
+ return False, prompt
+
+
+def extract_style_from_prompts(style: PromptStyle, prompt, negative_prompt):
+ if not style.prompt and not style.negative_prompt:
+ return False, prompt, negative_prompt
+
+ match_positive, extracted_positive = extract_style_text_from_prompt(style.prompt, prompt)
+ if not match_positive:
+ return False, prompt, negative_prompt
+
+ match_negative, extracted_negative = extract_style_text_from_prompt(style.negative_prompt, negative_prompt)
+ if not match_negative:
+ return False, prompt, negative_prompt
+
+ return True, extracted_positive, extracted_negative
+
+
+class StyleDatabase:
+ def __init__(self, path: str):
+ self.no_style = PromptStyle("None", "", "")
+ self.styles = {}
+ self.path = path
+
+ self.reload()
+
+ def reload(self):
+ self.styles.clear()
+
+ if not os.path.exists(self.path):
+ return
+
+ with open(self.path, "r", encoding="utf-8-sig", newline='') as file:
+ reader = csv.DictReader(file, skipinitialspace=True)
+ for row in reader:
+ # Support loading old CSV format with "name, text"-columns
+ prompt = row["prompt"] if "prompt" in row else row["text"]
+ negative_prompt = row.get("negative_prompt", "")
+ self.styles[row["name"]] = PromptStyle(row["name"], prompt, negative_prompt)
+
+ def get_style_prompts(self, styles):
+ return [self.styles.get(x, self.no_style).prompt for x in styles]
+
+ def get_negative_style_prompts(self, styles):
+ return [self.styles.get(x, self.no_style).negative_prompt for x in styles]
+
+ def apply_styles_to_prompt(self, prompt, styles):
+ return apply_styles_to_prompt(prompt, [self.styles.get(x, self.no_style).prompt for x in styles])
+
+ def apply_negative_styles_to_prompt(self, prompt, styles):
+ return apply_styles_to_prompt(prompt, [self.styles.get(x, self.no_style).negative_prompt for x in styles])
+
+ def save_styles(self, path: str) -> None:
+ # Always keep a backup file around
+ if os.path.exists(path):
+ shutil.copy(path, f"{path}.bak")
+
+ with open(path, "w", encoding="utf-8-sig", newline='') as file:
+ writer = csv.DictWriter(file, fieldnames=PromptStyle._fields)
+ writer.writeheader()
+ writer.writerows(style._asdict() for k, style in self.styles.items())
+
+ def extract_styles_from_prompt(self, prompt, negative_prompt):
+ extracted = []
+
+ applicable_styles = list(self.styles.values())
+
+ while True:
+ found_style = None
+
+ for style in applicable_styles:
+ is_match, new_prompt, new_neg_prompt = extract_style_from_prompts(style, prompt, negative_prompt)
+ if is_match:
+ found_style = style
+ prompt = new_prompt
+ negative_prompt = new_neg_prompt
+ break
+
+ if not found_style:
+ break
+
+ applicable_styles.remove(found_style)
+ extracted.append(found_style.name)
+
+ return list(reversed(extracted)), prompt, negative_prompt
diff --git a/modules/sub_quadratic_attention.py b/modules/sub_quadratic_attention.py
new file mode 100644
index 0000000000000000000000000000000000000000..ae4ee4bbec061b72cf20bfc369f3e14ca4188c7a
--- /dev/null
+++ b/modules/sub_quadratic_attention.py
@@ -0,0 +1,215 @@
+# original source:
+# https://github.com/AminRezaei0x443/memory-efficient-attention/blob/1bc0d9e6ac5f82ea43a375135c4e1d3896ee1694/memory_efficient_attention/attention_torch.py
+# license:
+# MIT License (see Memory Efficient Attention under the Licenses section in the web UI interface for the full license)
+# credit:
+# Amin Rezaei (original author)
+# Alex Birch (optimized algorithm for 3D tensors, at the expense of removing bias, masking and callbacks)
+# brkirch (modified to use torch.narrow instead of dynamic_slice implementation)
+# implementation of:
+# Self-attention Does Not Need O(n2) Memory":
+# https://arxiv.org/abs/2112.05682v2
+
+from functools import partial
+import torch
+from torch import Tensor
+from torch.utils.checkpoint import checkpoint
+import math
+from typing import Optional, NamedTuple, List
+
+
+def narrow_trunc(
+ input: Tensor,
+ dim: int,
+ start: int,
+ length: int
+) -> Tensor:
+ return torch.narrow(input, dim, start, length if input.shape[dim] >= start + length else input.shape[dim] - start)
+
+
+class AttnChunk(NamedTuple):
+ exp_values: Tensor
+ exp_weights_sum: Tensor
+ max_score: Tensor
+
+
+class SummarizeChunk:
+ @staticmethod
+ def __call__(
+ query: Tensor,
+ key: Tensor,
+ value: Tensor,
+ ) -> AttnChunk: ...
+
+
+class ComputeQueryChunkAttn:
+ @staticmethod
+ def __call__(
+ query: Tensor,
+ key: Tensor,
+ value: Tensor,
+ ) -> Tensor: ...
+
+
+def _summarize_chunk(
+ query: Tensor,
+ key: Tensor,
+ value: Tensor,
+ scale: float,
+) -> AttnChunk:
+ attn_weights = torch.baddbmm(
+ torch.zeros(1, 1, 1, device=query.device, dtype=query.dtype),
+ query,
+ key.transpose(1,2),
+ alpha=scale,
+ beta=0,
+ )
+ max_score, _ = torch.max(attn_weights, -1, keepdim=True)
+ max_score = max_score.detach()
+ exp_weights = torch.exp(attn_weights - max_score)
+ exp_values = torch.bmm(exp_weights, value) if query.device.type == 'mps' else torch.bmm(exp_weights, value.to(exp_weights.dtype)).to(value.dtype)
+ max_score = max_score.squeeze(-1)
+ return AttnChunk(exp_values, exp_weights.sum(dim=-1), max_score)
+
+
+def _query_chunk_attention(
+ query: Tensor,
+ key: Tensor,
+ value: Tensor,
+ summarize_chunk: SummarizeChunk,
+ kv_chunk_size: int,
+) -> Tensor:
+ batch_x_heads, k_tokens, k_channels_per_head = key.shape
+ _, _, v_channels_per_head = value.shape
+
+ def chunk_scanner(chunk_idx: int) -> AttnChunk:
+ key_chunk = narrow_trunc(
+ key,
+ 1,
+ chunk_idx,
+ kv_chunk_size
+ )
+ value_chunk = narrow_trunc(
+ value,
+ 1,
+ chunk_idx,
+ kv_chunk_size
+ )
+ return summarize_chunk(query, key_chunk, value_chunk)
+
+ chunks: List[AttnChunk] = [
+ chunk_scanner(chunk) for chunk in torch.arange(0, k_tokens, kv_chunk_size)
+ ]
+ acc_chunk = AttnChunk(*map(torch.stack, zip(*chunks)))
+ chunk_values, chunk_weights, chunk_max = acc_chunk
+
+ global_max, _ = torch.max(chunk_max, 0, keepdim=True)
+ max_diffs = torch.exp(chunk_max - global_max)
+ chunk_values *= torch.unsqueeze(max_diffs, -1)
+ chunk_weights *= max_diffs
+
+ all_values = chunk_values.sum(dim=0)
+ all_weights = torch.unsqueeze(chunk_weights, -1).sum(dim=0)
+ return all_values / all_weights
+
+
+# TODO: refactor CrossAttention#get_attention_scores to share code with this
+def _get_attention_scores_no_kv_chunking(
+ query: Tensor,
+ key: Tensor,
+ value: Tensor,
+ scale: float,
+) -> Tensor:
+ attn_scores = torch.baddbmm(
+ torch.zeros(1, 1, 1, device=query.device, dtype=query.dtype),
+ query,
+ key.transpose(1,2),
+ alpha=scale,
+ beta=0,
+ )
+ attn_probs = attn_scores.softmax(dim=-1)
+ del attn_scores
+ hidden_states_slice = torch.bmm(attn_probs, value) if query.device.type == 'mps' else torch.bmm(attn_probs, value.to(attn_probs.dtype)).to(value.dtype)
+ return hidden_states_slice
+
+
+class ScannedChunk(NamedTuple):
+ chunk_idx: int
+ attn_chunk: AttnChunk
+
+
+def efficient_dot_product_attention(
+ query: Tensor,
+ key: Tensor,
+ value: Tensor,
+ query_chunk_size=1024,
+ kv_chunk_size: Optional[int] = None,
+ kv_chunk_size_min: Optional[int] = None,
+ use_checkpoint=True,
+):
+ """Computes efficient dot-product attention given query, key, and value.
+ This is efficient version of attention presented in
+ https://arxiv.org/abs/2112.05682v2 which comes with O(sqrt(n)) memory requirements.
+ Args:
+ query: queries for calculating attention with shape of
+ `[batch * num_heads, tokens, channels_per_head]`.
+ key: keys for calculating attention with shape of
+ `[batch * num_heads, tokens, channels_per_head]`.
+ value: values to be used in attention with shape of
+ `[batch * num_heads, tokens, channels_per_head]`.
+ query_chunk_size: int: query chunks size
+ kv_chunk_size: Optional[int]: key/value chunks size. if None: defaults to sqrt(key_tokens)
+ kv_chunk_size_min: Optional[int]: key/value minimum chunk size. only considered when kv_chunk_size is None. changes `sqrt(key_tokens)` into `max(sqrt(key_tokens), kv_chunk_size_min)`, to ensure our chunk sizes don't get too small (smaller chunks = more chunks = less concurrent work done).
+ use_checkpoint: bool: whether to use checkpointing (recommended True for training, False for inference)
+ Returns:
+ Output of shape `[batch * num_heads, query_tokens, channels_per_head]`.
+ """
+ batch_x_heads, q_tokens, q_channels_per_head = query.shape
+ _, k_tokens, _ = key.shape
+ scale = q_channels_per_head ** -0.5
+
+ kv_chunk_size = min(kv_chunk_size or int(math.sqrt(k_tokens)), k_tokens)
+ if kv_chunk_size_min is not None:
+ kv_chunk_size = max(kv_chunk_size, kv_chunk_size_min)
+
+ def get_query_chunk(chunk_idx: int) -> Tensor:
+ return narrow_trunc(
+ query,
+ 1,
+ chunk_idx,
+ min(query_chunk_size, q_tokens)
+ )
+
+ summarize_chunk: SummarizeChunk = partial(_summarize_chunk, scale=scale)
+ summarize_chunk: SummarizeChunk = partial(checkpoint, summarize_chunk) if use_checkpoint else summarize_chunk
+ compute_query_chunk_attn: ComputeQueryChunkAttn = partial(
+ _get_attention_scores_no_kv_chunking,
+ scale=scale
+ ) if k_tokens <= kv_chunk_size else (
+ # fast-path for when there's just 1 key-value chunk per query chunk (this is just sliced attention btw)
+ partial(
+ _query_chunk_attention,
+ kv_chunk_size=kv_chunk_size,
+ summarize_chunk=summarize_chunk,
+ )
+ )
+
+ if q_tokens <= query_chunk_size:
+ # fast-path for when there's just 1 query chunk
+ return compute_query_chunk_attn(
+ query=query,
+ key=key,
+ value=value,
+ )
+
+ res = torch.zeros_like(query)
+ for i in range(math.ceil(q_tokens / query_chunk_size)):
+ attn_scores = compute_query_chunk_attn(
+ query=get_query_chunk(i * query_chunk_size),
+ key=key,
+ value=value,
+ )
+
+ res[:, i * query_chunk_size:i * query_chunk_size + attn_scores.shape[1], :] = attn_scores
+
+ return res
diff --git a/modules/sysinfo.py b/modules/sysinfo.py
new file mode 100644
index 0000000000000000000000000000000000000000..bdad85b8a79e97b97b48eb2819554648986ebc7c
--- /dev/null
+++ b/modules/sysinfo.py
@@ -0,0 +1,176 @@
+import json
+import os
+import sys
+import traceback
+
+import platform
+import hashlib
+import pkg_resources
+import psutil
+import re
+
+import launch
+from modules import paths_internal, timer, shared, extensions, errors
+
+checksum_token = "DontStealMyGamePlz__WINNERS_DONT_USE_DRUGS__DONT_COPY_THAT_FLOPPY"
+environment_whitelist = {
+ "GIT",
+ "INDEX_URL",
+ "WEBUI_LAUNCH_LIVE_OUTPUT",
+ "GRADIO_ANALYTICS_ENABLED",
+ "PYTHONPATH",
+ "TORCH_INDEX_URL",
+ "TORCH_COMMAND",
+ "REQS_FILE",
+ "XFORMERS_PACKAGE",
+ "CLIP_PACKAGE",
+ "OPENCLIP_PACKAGE",
+ "STABLE_DIFFUSION_REPO",
+ "K_DIFFUSION_REPO",
+ "CODEFORMER_REPO",
+ "BLIP_REPO",
+ "STABLE_DIFFUSION_COMMIT_HASH",
+ "K_DIFFUSION_COMMIT_HASH",
+ "CODEFORMER_COMMIT_HASH",
+ "BLIP_COMMIT_HASH",
+ "COMMANDLINE_ARGS",
+ "IGNORE_CMD_ARGS_ERRORS",
+}
+
+
+def pretty_bytes(num, suffix="B"):
+ for unit in ["", "K", "M", "G", "T", "P", "E", "Z", "Y"]:
+ if abs(num) < 1024 or unit == 'Y':
+ return f"{num:.0f}{unit}{suffix}"
+ num /= 1024
+
+
+def get():
+ res = get_dict()
+
+ text = json.dumps(res, ensure_ascii=False, indent=4)
+
+ h = hashlib.sha256(text.encode("utf8"))
+ text = text.replace(checksum_token, h.hexdigest())
+
+ return text
+
+
+re_checksum = re.compile(r'"Checksum": "([0-9a-fA-F]{64})"')
+
+
+def check(x):
+ m = re.search(re_checksum, x)
+ if not m:
+ return False
+
+ replaced = re.sub(re_checksum, f'"Checksum": "{checksum_token}"', x)
+
+ h = hashlib.sha256(replaced.encode("utf8"))
+ return h.hexdigest() == m.group(1)
+
+
+def get_dict():
+ ram = psutil.virtual_memory()
+
+ res = {
+ "Platform": platform.platform(),
+ "Python": platform.python_version(),
+ "Version": launch.git_tag(),
+ "Commit": launch.commit_hash(),
+ "Script path": paths_internal.script_path,
+ "Data path": paths_internal.data_path,
+ "Extensions dir": paths_internal.extensions_dir,
+ "Checksum": checksum_token,
+ "Commandline": get_argv(),
+ "Torch env info": get_torch_sysinfo(),
+ "Exceptions": get_exceptions(),
+ "CPU": {
+ "model": platform.processor(),
+ "count logical": psutil.cpu_count(logical=True),
+ "count physical": psutil.cpu_count(logical=False),
+ },
+ "RAM": {
+ x: pretty_bytes(getattr(ram, x, 0)) for x in ["total", "used", "free", "active", "inactive", "buffers", "cached", "shared"] if getattr(ram, x, 0) != 0
+ },
+ "Extensions": get_extensions(enabled=True),
+ "Inactive extensions": get_extensions(enabled=False),
+ "Environment": get_environment(),
+ "Config": get_config(),
+ "Startup": timer.startup_record,
+ "Packages": sorted([f"{pkg.key}=={pkg.version}" for pkg in pkg_resources.working_set]),
+ }
+
+ return res
+
+
+def format_traceback(tb):
+ return [[f"{x.filename}, line {x.lineno}, {x.name}", x.line] for x in traceback.extract_tb(tb)]
+
+
+def format_exception(e, tb):
+ return {"exception": str(e), "traceback": format_traceback(tb)}
+
+
+def get_exceptions():
+ try:
+ return list(reversed(errors.exception_records))
+ except Exception as e:
+ return str(e)
+
+
+def get_environment():
+ return {k: os.environ[k] for k in sorted(os.environ) if k in environment_whitelist}
+
+
+def get_argv():
+ res = []
+
+ for v in sys.argv:
+ if shared.cmd_opts.gradio_auth and shared.cmd_opts.gradio_auth == v:
+ res.append("")
+ continue
+
+ if shared.cmd_opts.api_auth and shared.cmd_opts.api_auth == v:
+ res.append("")
+ continue
+
+ res.append(v)
+
+ return res
+
+re_newline = re.compile(r"\r*\n")
+
+
+def get_torch_sysinfo():
+ try:
+ import torch.utils.collect_env
+ info = torch.utils.collect_env.get_env_info()._asdict()
+
+ return {k: re.split(re_newline, str(v)) if "\n" in str(v) else v for k, v in info.items()}
+ except Exception as e:
+ return str(e)
+
+
+def get_extensions(*, enabled):
+
+ try:
+ def to_json(x: extensions.Extension):
+ return {
+ "name": x.name,
+ "path": x.path,
+ "version": x.version,
+ "branch": x.branch,
+ "remote": x.remote,
+ }
+
+ return [to_json(x) for x in extensions.extensions if not x.is_builtin and x.enabled == enabled]
+ except Exception as e:
+ return str(e)
+
+
+def get_config():
+ try:
+ return shared.opts.data
+ except Exception as e:
+ return str(e)
diff --git a/modules/textual_inversion/__pycache__/autocrop.cpython-39.pyc b/modules/textual_inversion/__pycache__/autocrop.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e5ef155b72cd59a7d95f118a35efec2e58b0634e
Binary files /dev/null and b/modules/textual_inversion/__pycache__/autocrop.cpython-39.pyc differ
diff --git a/modules/textual_inversion/__pycache__/dataset.cpython-39.pyc b/modules/textual_inversion/__pycache__/dataset.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..29cd9b39eb870819967bd245a208d16e159f9527
Binary files /dev/null and b/modules/textual_inversion/__pycache__/dataset.cpython-39.pyc differ
diff --git a/modules/textual_inversion/__pycache__/image_embedding.cpython-39.pyc b/modules/textual_inversion/__pycache__/image_embedding.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..7861104dd2c5806734fefe6ad5aa712c35d9b61d
Binary files /dev/null and b/modules/textual_inversion/__pycache__/image_embedding.cpython-39.pyc differ
diff --git a/modules/textual_inversion/__pycache__/learn_schedule.cpython-39.pyc b/modules/textual_inversion/__pycache__/learn_schedule.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f54217e0c4272125814e0be9de17e85bf6053bf2
Binary files /dev/null and b/modules/textual_inversion/__pycache__/learn_schedule.cpython-39.pyc differ
diff --git a/modules/textual_inversion/__pycache__/logging.cpython-39.pyc b/modules/textual_inversion/__pycache__/logging.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..47c7cf0a66e499b8a3641c5ac1579ef270ee6387
Binary files /dev/null and b/modules/textual_inversion/__pycache__/logging.cpython-39.pyc differ
diff --git a/modules/textual_inversion/__pycache__/preprocess.cpython-39.pyc b/modules/textual_inversion/__pycache__/preprocess.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a3563b8bbd6b6f16c11f2f72b5ae15ecae110982
Binary files /dev/null and b/modules/textual_inversion/__pycache__/preprocess.cpython-39.pyc differ
diff --git a/modules/textual_inversion/__pycache__/textual_inversion.cpython-39.pyc b/modules/textual_inversion/__pycache__/textual_inversion.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..23fe6a6ee1c1136d7603e1642048e2f8bf3296a5
Binary files /dev/null and b/modules/textual_inversion/__pycache__/textual_inversion.cpython-39.pyc differ
diff --git a/modules/textual_inversion/__pycache__/ui.cpython-39.pyc b/modules/textual_inversion/__pycache__/ui.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..bb6fd33a521f0d55d7047fecdf10ab690f345411
Binary files /dev/null and b/modules/textual_inversion/__pycache__/ui.cpython-39.pyc differ
diff --git a/modules/textual_inversion/autocrop.py b/modules/textual_inversion/autocrop.py
new file mode 100644
index 0000000000000000000000000000000000000000..5a42d3503fd100a59759f18149c18d9618af440b
--- /dev/null
+++ b/modules/textual_inversion/autocrop.py
@@ -0,0 +1,340 @@
+import cv2
+import requests
+import os
+import numpy as np
+from PIL import ImageDraw
+
+GREEN = "#0F0"
+BLUE = "#00F"
+RED = "#F00"
+
+
+def crop_image(im, settings):
+ """ Intelligently crop an image to the subject matter """
+
+ scale_by = 1
+ if is_landscape(im.width, im.height):
+ scale_by = settings.crop_height / im.height
+ elif is_portrait(im.width, im.height):
+ scale_by = settings.crop_width / im.width
+ elif is_square(im.width, im.height):
+ if is_square(settings.crop_width, settings.crop_height):
+ scale_by = settings.crop_width / im.width
+ elif is_landscape(settings.crop_width, settings.crop_height):
+ scale_by = settings.crop_width / im.width
+ elif is_portrait(settings.crop_width, settings.crop_height):
+ scale_by = settings.crop_height / im.height
+
+
+ im = im.resize((int(im.width * scale_by), int(im.height * scale_by)))
+ im_debug = im.copy()
+
+ focus = focal_point(im_debug, settings)
+
+ # take the focal point and turn it into crop coordinates that try to center over the focal
+ # point but then get adjusted back into the frame
+ y_half = int(settings.crop_height / 2)
+ x_half = int(settings.crop_width / 2)
+
+ x1 = focus.x - x_half
+ if x1 < 0:
+ x1 = 0
+ elif x1 + settings.crop_width > im.width:
+ x1 = im.width - settings.crop_width
+
+ y1 = focus.y - y_half
+ if y1 < 0:
+ y1 = 0
+ elif y1 + settings.crop_height > im.height:
+ y1 = im.height - settings.crop_height
+
+ x2 = x1 + settings.crop_width
+ y2 = y1 + settings.crop_height
+
+ crop = [x1, y1, x2, y2]
+
+ results = []
+
+ results.append(im.crop(tuple(crop)))
+
+ if settings.annotate_image:
+ d = ImageDraw.Draw(im_debug)
+ rect = list(crop)
+ rect[2] -= 1
+ rect[3] -= 1
+ d.rectangle(rect, outline=GREEN)
+ results.append(im_debug)
+ if settings.destop_view_image:
+ im_debug.show()
+
+ return results
+
+def focal_point(im, settings):
+ corner_points = image_corner_points(im, settings) if settings.corner_points_weight > 0 else []
+ entropy_points = image_entropy_points(im, settings) if settings.entropy_points_weight > 0 else []
+ face_points = image_face_points(im, settings) if settings.face_points_weight > 0 else []
+
+ pois = []
+
+ weight_pref_total = 0
+ if corner_points:
+ weight_pref_total += settings.corner_points_weight
+ if entropy_points:
+ weight_pref_total += settings.entropy_points_weight
+ if face_points:
+ weight_pref_total += settings.face_points_weight
+
+ corner_centroid = None
+ if corner_points:
+ corner_centroid = centroid(corner_points)
+ corner_centroid.weight = settings.corner_points_weight / weight_pref_total
+ pois.append(corner_centroid)
+
+ entropy_centroid = None
+ if entropy_points:
+ entropy_centroid = centroid(entropy_points)
+ entropy_centroid.weight = settings.entropy_points_weight / weight_pref_total
+ pois.append(entropy_centroid)
+
+ face_centroid = None
+ if face_points:
+ face_centroid = centroid(face_points)
+ face_centroid.weight = settings.face_points_weight / weight_pref_total
+ pois.append(face_centroid)
+
+ average_point = poi_average(pois, settings)
+
+ if settings.annotate_image:
+ d = ImageDraw.Draw(im)
+ max_size = min(im.width, im.height) * 0.07
+ if corner_centroid is not None:
+ color = BLUE
+ box = corner_centroid.bounding(max_size * corner_centroid.weight)
+ d.text((box[0], box[1]-15), f"Edge: {corner_centroid.weight:.02f}", fill=color)
+ d.ellipse(box, outline=color)
+ if len(corner_points) > 1:
+ for f in corner_points:
+ d.rectangle(f.bounding(4), outline=color)
+ if entropy_centroid is not None:
+ color = "#ff0"
+ box = entropy_centroid.bounding(max_size * entropy_centroid.weight)
+ d.text((box[0], box[1]-15), f"Entropy: {entropy_centroid.weight:.02f}", fill=color)
+ d.ellipse(box, outline=color)
+ if len(entropy_points) > 1:
+ for f in entropy_points:
+ d.rectangle(f.bounding(4), outline=color)
+ if face_centroid is not None:
+ color = RED
+ box = face_centroid.bounding(max_size * face_centroid.weight)
+ d.text((box[0], box[1]-15), f"Face: {face_centroid.weight:.02f}", fill=color)
+ d.ellipse(box, outline=color)
+ if len(face_points) > 1:
+ for f in face_points:
+ d.rectangle(f.bounding(4), outline=color)
+
+ d.ellipse(average_point.bounding(max_size), outline=GREEN)
+
+ return average_point
+
+
+def image_face_points(im, settings):
+ if settings.dnn_model_path is not None:
+ detector = cv2.FaceDetectorYN.create(
+ settings.dnn_model_path,
+ "",
+ (im.width, im.height),
+ 0.9, # score threshold
+ 0.3, # nms threshold
+ 5000 # keep top k before nms
+ )
+ faces = detector.detect(np.array(im))
+ results = []
+ if faces[1] is not None:
+ for face in faces[1]:
+ x = face[0]
+ y = face[1]
+ w = face[2]
+ h = face[3]
+ results.append(
+ PointOfInterest(
+ int(x + (w * 0.5)), # face focus left/right is center
+ int(y + (h * 0.33)), # face focus up/down is close to the top of the head
+ size = w,
+ weight = 1/len(faces[1])
+ )
+ )
+ return results
+ else:
+ np_im = np.array(im)
+ gray = cv2.cvtColor(np_im, cv2.COLOR_BGR2GRAY)
+
+ tries = [
+ [ f'{cv2.data.haarcascades}haarcascade_eye.xml', 0.01 ],
+ [ f'{cv2.data.haarcascades}haarcascade_frontalface_default.xml', 0.05 ],
+ [ f'{cv2.data.haarcascades}haarcascade_profileface.xml', 0.05 ],
+ [ f'{cv2.data.haarcascades}haarcascade_frontalface_alt.xml', 0.05 ],
+ [ f'{cv2.data.haarcascades}haarcascade_frontalface_alt2.xml', 0.05 ],
+ [ f'{cv2.data.haarcascades}haarcascade_frontalface_alt_tree.xml', 0.05 ],
+ [ f'{cv2.data.haarcascades}haarcascade_eye_tree_eyeglasses.xml', 0.05 ],
+ [ f'{cv2.data.haarcascades}haarcascade_upperbody.xml', 0.05 ]
+ ]
+ for t in tries:
+ classifier = cv2.CascadeClassifier(t[0])
+ minsize = int(min(im.width, im.height) * t[1]) # at least N percent of the smallest side
+ try:
+ faces = classifier.detectMultiScale(gray, scaleFactor=1.1,
+ minNeighbors=7, minSize=(minsize, minsize), flags=cv2.CASCADE_SCALE_IMAGE)
+ except Exception:
+ continue
+
+ if faces:
+ rects = [[f[0], f[1], f[0] + f[2], f[1] + f[3]] for f in faces]
+ return [PointOfInterest((r[0] +r[2]) // 2, (r[1] + r[3]) // 2, size=abs(r[0]-r[2]), weight=1/len(rects)) for r in rects]
+ return []
+
+
+def image_corner_points(im, settings):
+ grayscale = im.convert("L")
+
+ # naive attempt at preventing focal points from collecting at watermarks near the bottom
+ gd = ImageDraw.Draw(grayscale)
+ gd.rectangle([0, im.height*.9, im.width, im.height], fill="#999")
+
+ np_im = np.array(grayscale)
+
+ points = cv2.goodFeaturesToTrack(
+ np_im,
+ maxCorners=100,
+ qualityLevel=0.04,
+ minDistance=min(grayscale.width, grayscale.height)*0.06,
+ useHarrisDetector=False,
+ )
+
+ if points is None:
+ return []
+
+ focal_points = []
+ for point in points:
+ x, y = point.ravel()
+ focal_points.append(PointOfInterest(x, y, size=4, weight=1/len(points)))
+
+ return focal_points
+
+
+def image_entropy_points(im, settings):
+ landscape = im.height < im.width
+ portrait = im.height > im.width
+ if landscape:
+ move_idx = [0, 2]
+ move_max = im.size[0]
+ elif portrait:
+ move_idx = [1, 3]
+ move_max = im.size[1]
+ else:
+ return []
+
+ e_max = 0
+ crop_current = [0, 0, settings.crop_width, settings.crop_height]
+ crop_best = crop_current
+ while crop_current[move_idx[1]] < move_max:
+ crop = im.crop(tuple(crop_current))
+ e = image_entropy(crop)
+
+ if (e > e_max):
+ e_max = e
+ crop_best = list(crop_current)
+
+ crop_current[move_idx[0]] += 4
+ crop_current[move_idx[1]] += 4
+
+ x_mid = int(crop_best[0] + settings.crop_width/2)
+ y_mid = int(crop_best[1] + settings.crop_height/2)
+
+ return [PointOfInterest(x_mid, y_mid, size=25, weight=1.0)]
+
+
+def image_entropy(im):
+ # greyscale image entropy
+ # band = np.asarray(im.convert("L"))
+ band = np.asarray(im.convert("1"), dtype=np.uint8)
+ hist, _ = np.histogram(band, bins=range(0, 256))
+ hist = hist[hist > 0]
+ return -np.log2(hist / hist.sum()).sum()
+
+
+def centroid(pois):
+ x = [poi.x for poi in pois]
+ y = [poi.y for poi in pois]
+ return PointOfInterest(sum(x) / len(pois), sum(y) / len(pois))
+
+
+def poi_average(pois, settings):
+ weight = 0.0
+ x = 0.0
+ y = 0.0
+ for poi in pois:
+ weight += poi.weight
+ x += poi.x * poi.weight
+ y += poi.y * poi.weight
+ avg_x = round(weight and x / weight)
+ avg_y = round(weight and y / weight)
+
+ return PointOfInterest(avg_x, avg_y)
+
+
+def is_landscape(w, h):
+ return w > h
+
+
+def is_portrait(w, h):
+ return h > w
+
+
+def is_square(w, h):
+ return w == h
+
+
+def download_and_cache_models(dirname):
+ download_url = 'https://github.com/opencv/opencv_zoo/blob/91fb0290f50896f38a0ab1e558b74b16bc009428/models/face_detection_yunet/face_detection_yunet_2022mar.onnx?raw=true'
+ model_file_name = 'face_detection_yunet.onnx'
+
+ os.makedirs(dirname, exist_ok=True)
+
+ cache_file = os.path.join(dirname, model_file_name)
+ if not os.path.exists(cache_file):
+ print(f"downloading face detection model from '{download_url}' to '{cache_file}'")
+ response = requests.get(download_url)
+ with open(cache_file, "wb") as f:
+ f.write(response.content)
+
+ if os.path.exists(cache_file):
+ return cache_file
+ return None
+
+
+class PointOfInterest:
+ def __init__(self, x, y, weight=1.0, size=10):
+ self.x = x
+ self.y = y
+ self.weight = weight
+ self.size = size
+
+ def bounding(self, size):
+ return [
+ self.x - size // 2,
+ self.y - size // 2,
+ self.x + size // 2,
+ self.y + size // 2
+ ]
+
+
+class Settings:
+ def __init__(self, crop_width=512, crop_height=512, corner_points_weight=0.5, entropy_points_weight=0.5, face_points_weight=0.5, annotate_image=False, dnn_model_path=None):
+ self.crop_width = crop_width
+ self.crop_height = crop_height
+ self.corner_points_weight = corner_points_weight
+ self.entropy_points_weight = entropy_points_weight
+ self.face_points_weight = face_points_weight
+ self.annotate_image = annotate_image
+ self.destop_view_image = False
+ self.dnn_model_path = dnn_model_path
diff --git a/modules/textual_inversion/dataset.py b/modules/textual_inversion/dataset.py
new file mode 100644
index 0000000000000000000000000000000000000000..21a3437571dd2762beab2de811ef93be0e425fb3
--- /dev/null
+++ b/modules/textual_inversion/dataset.py
@@ -0,0 +1,246 @@
+import os
+import numpy as np
+import PIL
+import torch
+from PIL import Image
+from torch.utils.data import Dataset, DataLoader, Sampler
+from torchvision import transforms
+from collections import defaultdict
+from random import shuffle, choices
+
+import random
+import tqdm
+from modules import devices, shared
+import re
+
+from ldm.modules.distributions.distributions import DiagonalGaussianDistribution
+
+re_numbers_at_start = re.compile(r"^[-\d]+\s*")
+
+
+class DatasetEntry:
+ def __init__(self, filename=None, filename_text=None, latent_dist=None, latent_sample=None, cond=None, cond_text=None, pixel_values=None, weight=None):
+ self.filename = filename
+ self.filename_text = filename_text
+ self.weight = weight
+ self.latent_dist = latent_dist
+ self.latent_sample = latent_sample
+ self.cond = cond
+ self.cond_text = cond_text
+ self.pixel_values = pixel_values
+
+
+class PersonalizedBase(Dataset):
+ def __init__(self, data_root, width, height, repeats, flip_p=0.5, placeholder_token="*", model=None, cond_model=None, device=None, template_file=None, include_cond=False, batch_size=1, gradient_step=1, shuffle_tags=False, tag_drop_out=0, latent_sampling_method='once', varsize=False, use_weight=False):
+ re_word = re.compile(shared.opts.dataset_filename_word_regex) if shared.opts.dataset_filename_word_regex else None
+
+ self.placeholder_token = placeholder_token
+
+ self.flip = transforms.RandomHorizontalFlip(p=flip_p)
+
+ self.dataset = []
+
+ with open(template_file, "r") as file:
+ lines = [x.strip() for x in file.readlines()]
+
+ self.lines = lines
+
+ assert data_root, 'dataset directory not specified'
+ assert os.path.isdir(data_root), "Dataset directory doesn't exist"
+ assert os.listdir(data_root), "Dataset directory is empty"
+
+ self.image_paths = [os.path.join(data_root, file_path) for file_path in os.listdir(data_root)]
+
+ self.shuffle_tags = shuffle_tags
+ self.tag_drop_out = tag_drop_out
+ groups = defaultdict(list)
+
+ print("Preparing dataset...")
+ for path in tqdm.tqdm(self.image_paths):
+ alpha_channel = None
+ if shared.state.interrupted:
+ raise Exception("interrupted")
+ try:
+ image = Image.open(path)
+ #Currently does not work for single color transparency
+ #We would need to read image.info['transparency'] for that
+ if use_weight and 'A' in image.getbands():
+ alpha_channel = image.getchannel('A')
+ image = image.convert('RGB')
+ if not varsize:
+ image = image.resize((width, height), PIL.Image.BICUBIC)
+ except Exception:
+ continue
+
+ text_filename = f"{os.path.splitext(path)[0]}.txt"
+ filename = os.path.basename(path)
+
+ if os.path.exists(text_filename):
+ with open(text_filename, "r", encoding="utf8") as file:
+ filename_text = file.read()
+ else:
+ filename_text = os.path.splitext(filename)[0]
+ filename_text = re.sub(re_numbers_at_start, '', filename_text)
+ if re_word:
+ tokens = re_word.findall(filename_text)
+ filename_text = (shared.opts.dataset_filename_join_string or "").join(tokens)
+
+ npimage = np.array(image).astype(np.uint8)
+ npimage = (npimage / 127.5 - 1.0).astype(np.float32)
+
+ torchdata = torch.from_numpy(npimage).permute(2, 0, 1).to(device=device, dtype=torch.float32)
+ latent_sample = None
+
+ with devices.autocast():
+ latent_dist = model.encode_first_stage(torchdata.unsqueeze(dim=0))
+
+ #Perform latent sampling, even for random sampling.
+ #We need the sample dimensions for the weights
+ if latent_sampling_method == "deterministic":
+ if isinstance(latent_dist, DiagonalGaussianDistribution):
+ # Works only for DiagonalGaussianDistribution
+ latent_dist.std = 0
+ else:
+ latent_sampling_method = "once"
+ latent_sample = model.get_first_stage_encoding(latent_dist).squeeze().to(devices.cpu)
+
+ if use_weight and alpha_channel is not None:
+ channels, *latent_size = latent_sample.shape
+ weight_img = alpha_channel.resize(latent_size)
+ npweight = np.array(weight_img).astype(np.float32)
+ #Repeat for every channel in the latent sample
+ weight = torch.tensor([npweight] * channels).reshape([channels] + latent_size)
+ #Normalize the weight to a minimum of 0 and a mean of 1, that way the loss will be comparable to default.
+ weight -= weight.min()
+ weight /= weight.mean()
+ elif use_weight:
+ #If an image does not have a alpha channel, add a ones weight map anyway so we can stack it later
+ weight = torch.ones(latent_sample.shape)
+ else:
+ weight = None
+
+ if latent_sampling_method == "random":
+ entry = DatasetEntry(filename=path, filename_text=filename_text, latent_dist=latent_dist, weight=weight)
+ else:
+ entry = DatasetEntry(filename=path, filename_text=filename_text, latent_sample=latent_sample, weight=weight)
+
+ if not (self.tag_drop_out != 0 or self.shuffle_tags):
+ entry.cond_text = self.create_text(filename_text)
+
+ if include_cond and not (self.tag_drop_out != 0 or self.shuffle_tags):
+ with devices.autocast():
+ entry.cond = cond_model([entry.cond_text]).to(devices.cpu).squeeze(0)
+ groups[image.size].append(len(self.dataset))
+ self.dataset.append(entry)
+ del torchdata
+ del latent_dist
+ del latent_sample
+ del weight
+
+ self.length = len(self.dataset)
+ self.groups = list(groups.values())
+ assert self.length > 0, "No images have been found in the dataset."
+ self.batch_size = min(batch_size, self.length)
+ self.gradient_step = min(gradient_step, self.length // self.batch_size)
+ self.latent_sampling_method = latent_sampling_method
+
+ if len(groups) > 1:
+ print("Buckets:")
+ for (w, h), ids in sorted(groups.items(), key=lambda x: x[0]):
+ print(f" {w}x{h}: {len(ids)}")
+ print()
+
+ def create_text(self, filename_text):
+ text = random.choice(self.lines)
+ tags = filename_text.split(',')
+ if self.tag_drop_out != 0:
+ tags = [t for t in tags if random.random() > self.tag_drop_out]
+ if self.shuffle_tags:
+ random.shuffle(tags)
+ text = text.replace("[filewords]", ','.join(tags))
+ text = text.replace("[name]", self.placeholder_token)
+ return text
+
+ def __len__(self):
+ return self.length
+
+ def __getitem__(self, i):
+ entry = self.dataset[i]
+ if self.tag_drop_out != 0 or self.shuffle_tags:
+ entry.cond_text = self.create_text(entry.filename_text)
+ if self.latent_sampling_method == "random":
+ entry.latent_sample = shared.sd_model.get_first_stage_encoding(entry.latent_dist).to(devices.cpu)
+ return entry
+
+
+class GroupedBatchSampler(Sampler):
+ def __init__(self, data_source: PersonalizedBase, batch_size: int):
+ super().__init__(data_source)
+
+ n = len(data_source)
+ self.groups = data_source.groups
+ self.len = n_batch = n // batch_size
+ expected = [len(g) / n * n_batch * batch_size for g in data_source.groups]
+ self.base = [int(e) // batch_size for e in expected]
+ self.n_rand_batches = nrb = n_batch - sum(self.base)
+ self.probs = [e%batch_size/nrb/batch_size if nrb>0 else 0 for e in expected]
+ self.batch_size = batch_size
+
+ def __len__(self):
+ return self.len
+
+ def __iter__(self):
+ b = self.batch_size
+
+ for g in self.groups:
+ shuffle(g)
+
+ batches = []
+ for g in self.groups:
+ batches.extend(g[i*b:(i+1)*b] for i in range(len(g) // b))
+ for _ in range(self.n_rand_batches):
+ rand_group = choices(self.groups, self.probs)[0]
+ batches.append(choices(rand_group, k=b))
+
+ shuffle(batches)
+
+ yield from batches
+
+
+class PersonalizedDataLoader(DataLoader):
+ def __init__(self, dataset, latent_sampling_method="once", batch_size=1, pin_memory=False):
+ super(PersonalizedDataLoader, self).__init__(dataset, batch_sampler=GroupedBatchSampler(dataset, batch_size), pin_memory=pin_memory)
+ if latent_sampling_method == "random":
+ self.collate_fn = collate_wrapper_random
+ else:
+ self.collate_fn = collate_wrapper
+
+
+class BatchLoader:
+ def __init__(self, data):
+ self.cond_text = [entry.cond_text for entry in data]
+ self.cond = [entry.cond for entry in data]
+ self.latent_sample = torch.stack([entry.latent_sample for entry in data]).squeeze(1)
+ if all(entry.weight is not None for entry in data):
+ self.weight = torch.stack([entry.weight for entry in data]).squeeze(1)
+ else:
+ self.weight = None
+ #self.emb_index = [entry.emb_index for entry in data]
+ #print(self.latent_sample.device)
+
+ def pin_memory(self):
+ self.latent_sample = self.latent_sample.pin_memory()
+ return self
+
+def collate_wrapper(batch):
+ return BatchLoader(batch)
+
+class BatchLoaderRandom(BatchLoader):
+ def __init__(self, data):
+ super().__init__(data)
+
+ def pin_memory(self):
+ return self
+
+def collate_wrapper_random(batch):
+ return BatchLoaderRandom(batch)
diff --git a/modules/textual_inversion/image_embedding.py b/modules/textual_inversion/image_embedding.py
new file mode 100644
index 0000000000000000000000000000000000000000..09df97e9572ff10fb90cc00e8cbbcff2c8089538
--- /dev/null
+++ b/modules/textual_inversion/image_embedding.py
@@ -0,0 +1,220 @@
+import base64
+import json
+import warnings
+
+import numpy as np
+import zlib
+from PIL import Image, ImageDraw
+import torch
+
+
+class EmbeddingEncoder(json.JSONEncoder):
+ def default(self, obj):
+ if isinstance(obj, torch.Tensor):
+ return {'TORCHTENSOR': obj.cpu().detach().numpy().tolist()}
+ return json.JSONEncoder.default(self, obj)
+
+
+class EmbeddingDecoder(json.JSONDecoder):
+ def __init__(self, *args, **kwargs):
+ json.JSONDecoder.__init__(self, *args, object_hook=self.object_hook, **kwargs)
+
+ def object_hook(self, d):
+ if 'TORCHTENSOR' in d:
+ return torch.from_numpy(np.array(d['TORCHTENSOR']))
+ return d
+
+
+def embedding_to_b64(data):
+ d = json.dumps(data, cls=EmbeddingEncoder)
+ return base64.b64encode(d.encode())
+
+
+def embedding_from_b64(data):
+ d = base64.b64decode(data)
+ return json.loads(d, cls=EmbeddingDecoder)
+
+
+def lcg(m=2**32, a=1664525, c=1013904223, seed=0):
+ while True:
+ seed = (a * seed + c) % m
+ yield seed % 255
+
+
+def xor_block(block):
+ g = lcg()
+ randblock = np.array([next(g) for _ in range(np.product(block.shape))]).astype(np.uint8).reshape(block.shape)
+ return np.bitwise_xor(block.astype(np.uint8), randblock & 0x0F)
+
+
+def style_block(block, sequence):
+ im = Image.new('RGB', (block.shape[1], block.shape[0]))
+ draw = ImageDraw.Draw(im)
+ i = 0
+ for x in range(-6, im.size[0], 8):
+ for yi, y in enumerate(range(-6, im.size[1], 8)):
+ offset = 0
+ if yi % 2 == 0:
+ offset = 4
+ shade = sequence[i % len(sequence)]
+ i += 1
+ draw.ellipse((x+offset, y, x+6+offset, y+6), fill=(shade, shade, shade))
+
+ fg = np.array(im).astype(np.uint8) & 0xF0
+
+ return block ^ fg
+
+
+def insert_image_data_embed(image, data):
+ d = 3
+ data_compressed = zlib.compress(json.dumps(data, cls=EmbeddingEncoder).encode(), level=9)
+ data_np_ = np.frombuffer(data_compressed, np.uint8).copy()
+ data_np_high = data_np_ >> 4
+ data_np_low = data_np_ & 0x0F
+
+ h = image.size[1]
+ next_size = data_np_low.shape[0] + (h-(data_np_low.shape[0] % h))
+ next_size = next_size + ((h*d)-(next_size % (h*d)))
+
+ data_np_low = np.resize(data_np_low, next_size)
+ data_np_low = data_np_low.reshape((h, -1, d))
+
+ data_np_high = np.resize(data_np_high, next_size)
+ data_np_high = data_np_high.reshape((h, -1, d))
+
+ edge_style = list(data['string_to_param'].values())[0].cpu().detach().numpy().tolist()[0][:1024]
+ edge_style = (np.abs(edge_style)/np.max(np.abs(edge_style))*255).astype(np.uint8)
+
+ data_np_low = style_block(data_np_low, sequence=edge_style)
+ data_np_low = xor_block(data_np_low)
+ data_np_high = style_block(data_np_high, sequence=edge_style[::-1])
+ data_np_high = xor_block(data_np_high)
+
+ im_low = Image.fromarray(data_np_low, mode='RGB')
+ im_high = Image.fromarray(data_np_high, mode='RGB')
+
+ background = Image.new('RGB', (image.size[0]+im_low.size[0]+im_high.size[0]+2, image.size[1]), (0, 0, 0))
+ background.paste(im_low, (0, 0))
+ background.paste(image, (im_low.size[0]+1, 0))
+ background.paste(im_high, (im_low.size[0]+1+image.size[0]+1, 0))
+
+ return background
+
+
+def crop_black(img, tol=0):
+ mask = (img > tol).all(2)
+ mask0, mask1 = mask.any(0), mask.any(1)
+ col_start, col_end = mask0.argmax(), mask.shape[1]-mask0[::-1].argmax()
+ row_start, row_end = mask1.argmax(), mask.shape[0]-mask1[::-1].argmax()
+ return img[row_start:row_end, col_start:col_end]
+
+
+def extract_image_data_embed(image):
+ d = 3
+ outarr = crop_black(np.array(image.convert('RGB').getdata()).reshape(image.size[1], image.size[0], d).astype(np.uint8)) & 0x0F
+ black_cols = np.where(np.sum(outarr, axis=(0, 2)) == 0)
+ if black_cols[0].shape[0] < 2:
+ print('No Image data blocks found.')
+ return None
+
+ data_block_lower = outarr[:, :black_cols[0].min(), :].astype(np.uint8)
+ data_block_upper = outarr[:, black_cols[0].max()+1:, :].astype(np.uint8)
+
+ data_block_lower = xor_block(data_block_lower)
+ data_block_upper = xor_block(data_block_upper)
+
+ data_block = (data_block_upper << 4) | (data_block_lower)
+ data_block = data_block.flatten().tobytes()
+
+ data = zlib.decompress(data_block)
+ return json.loads(data, cls=EmbeddingDecoder)
+
+
+def caption_image_overlay(srcimage, title, footerLeft, footerMid, footerRight, textfont=None):
+ from modules.images import get_font
+ if textfont:
+ warnings.warn(
+ 'passing in a textfont to caption_image_overlay is deprecated and does nothing',
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ from math import cos
+
+ image = srcimage.copy()
+ fontsize = 32
+ factor = 1.5
+ gradient = Image.new('RGBA', (1, image.size[1]), color=(0, 0, 0, 0))
+ for y in range(image.size[1]):
+ mag = 1-cos(y/image.size[1]*factor)
+ mag = max(mag, 1-cos((image.size[1]-y)/image.size[1]*factor*1.1))
+ gradient.putpixel((0, y), (0, 0, 0, int(mag*255)))
+ image = Image.alpha_composite(image.convert('RGBA'), gradient.resize(image.size))
+
+ draw = ImageDraw.Draw(image)
+
+ font = get_font(fontsize)
+ padding = 10
+
+ _, _, w, h = draw.textbbox((0, 0), title, font=font)
+ fontsize = min(int(fontsize * (((image.size[0]*0.75)-(padding*4))/w)), 72)
+ font = get_font(fontsize)
+ _, _, w, h = draw.textbbox((0, 0), title, font=font)
+ draw.text((padding, padding), title, anchor='lt', font=font, fill=(255, 255, 255, 230))
+
+ _, _, w, h = draw.textbbox((0, 0), footerLeft, font=font)
+ fontsize_left = min(int(fontsize * (((image.size[0]/3)-(padding))/w)), 72)
+ _, _, w, h = draw.textbbox((0, 0), footerMid, font=font)
+ fontsize_mid = min(int(fontsize * (((image.size[0]/3)-(padding))/w)), 72)
+ _, _, w, h = draw.textbbox((0, 0), footerRight, font=font)
+ fontsize_right = min(int(fontsize * (((image.size[0]/3)-(padding))/w)), 72)
+
+ font = get_font(min(fontsize_left, fontsize_mid, fontsize_right))
+
+ draw.text((padding, image.size[1]-padding), footerLeft, anchor='ls', font=font, fill=(255, 255, 255, 230))
+ draw.text((image.size[0]/2, image.size[1]-padding), footerMid, anchor='ms', font=font, fill=(255, 255, 255, 230))
+ draw.text((image.size[0]-padding, image.size[1]-padding), footerRight, anchor='rs', font=font, fill=(255, 255, 255, 230))
+
+ return image
+
+
+if __name__ == '__main__':
+
+ testEmbed = Image.open('test_embedding.png')
+ data = extract_image_data_embed(testEmbed)
+ assert data is not None
+
+ data = embedding_from_b64(testEmbed.text['sd-ti-embedding'])
+ assert data is not None
+
+ image = Image.new('RGBA', (512, 512), (255, 255, 200, 255))
+ cap_image = caption_image_overlay(image, 'title', 'footerLeft', 'footerMid', 'footerRight')
+
+ test_embed = {'string_to_param': {'*': torch.from_numpy(np.random.random((2, 4096)))}}
+
+ embedded_image = insert_image_data_embed(cap_image, test_embed)
+
+ retrived_embed = extract_image_data_embed(embedded_image)
+
+ assert str(retrived_embed) == str(test_embed)
+
+ embedded_image2 = insert_image_data_embed(cap_image, retrived_embed)
+
+ assert embedded_image == embedded_image2
+
+ g = lcg()
+ shared_random = np.array([next(g) for _ in range(100)]).astype(np.uint8).tolist()
+
+ reference_random = [253, 242, 127, 44, 157, 27, 239, 133, 38, 79, 167, 4, 177,
+ 95, 130, 79, 78, 14, 52, 215, 220, 194, 126, 28, 240, 179,
+ 160, 153, 149, 50, 105, 14, 21, 218, 199, 18, 54, 198, 193,
+ 38, 128, 19, 53, 195, 124, 75, 205, 12, 6, 145, 0, 28,
+ 30, 148, 8, 45, 218, 171, 55, 249, 97, 166, 12, 35, 0,
+ 41, 221, 122, 215, 170, 31, 113, 186, 97, 119, 31, 23, 185,
+ 66, 140, 30, 41, 37, 63, 137, 109, 216, 55, 159, 145, 82,
+ 204, 86, 73, 222, 44, 198, 118, 240, 97]
+
+ assert shared_random == reference_random
+
+ hunna_kay_random_sum = sum(np.array([next(g) for _ in range(100000)]).astype(np.uint8).tolist())
+
+ assert 12731374 == hunna_kay_random_sum
diff --git a/modules/textual_inversion/learn_schedule.py b/modules/textual_inversion/learn_schedule.py
new file mode 100644
index 0000000000000000000000000000000000000000..252368a66bcc98db86f6d49f99d14d2d75f5543a
--- /dev/null
+++ b/modules/textual_inversion/learn_schedule.py
@@ -0,0 +1,81 @@
+import tqdm
+
+
+class LearnScheduleIterator:
+ def __init__(self, learn_rate, max_steps, cur_step=0):
+ """
+ specify learn_rate as "0.001:100, 0.00001:1000, 1e-5:10000" to have lr of 0.001 until step 100, 0.00001 until 1000, and 1e-5 until 10000
+ """
+
+ pairs = learn_rate.split(',')
+ self.rates = []
+ self.it = 0
+ self.maxit = 0
+ try:
+ for pair in pairs:
+ if not pair.strip():
+ continue
+ tmp = pair.split(':')
+ if len(tmp) == 2:
+ step = int(tmp[1])
+ if step > cur_step:
+ self.rates.append((float(tmp[0]), min(step, max_steps)))
+ self.maxit += 1
+ if step > max_steps:
+ return
+ elif step == -1:
+ self.rates.append((float(tmp[0]), max_steps))
+ self.maxit += 1
+ return
+ else:
+ self.rates.append((float(tmp[0]), max_steps))
+ self.maxit += 1
+ return
+ assert self.rates
+ except (ValueError, AssertionError) as e:
+ raise Exception('Invalid learning rate schedule. It should be a number or, for example, like "0.001:100, 0.00001:1000, 1e-5:10000" to have lr of 0.001 until step 100, 0.00001 until 1000, and 1e-5 until 10000.') from e
+
+
+ def __iter__(self):
+ return self
+
+ def __next__(self):
+ if self.it < self.maxit:
+ self.it += 1
+ return self.rates[self.it - 1]
+ else:
+ raise StopIteration
+
+
+class LearnRateScheduler:
+ def __init__(self, learn_rate, max_steps, cur_step=0, verbose=True):
+ self.schedules = LearnScheduleIterator(learn_rate, max_steps, cur_step)
+ (self.learn_rate, self.end_step) = next(self.schedules)
+ self.verbose = verbose
+
+ if self.verbose:
+ print(f'Training at rate of {self.learn_rate} until step {self.end_step}')
+
+ self.finished = False
+
+ def step(self, step_number):
+ if step_number < self.end_step:
+ return False
+
+ try:
+ (self.learn_rate, self.end_step) = next(self.schedules)
+ except StopIteration:
+ self.finished = True
+ return False
+ return True
+
+ def apply(self, optimizer, step_number):
+ if not self.step(step_number):
+ return
+
+ if self.verbose:
+ tqdm.tqdm.write(f'Training at rate of {self.learn_rate} until step {self.end_step}')
+
+ for pg in optimizer.param_groups:
+ pg['lr'] = self.learn_rate
+
diff --git a/modules/textual_inversion/logging.py b/modules/textual_inversion/logging.py
new file mode 100644
index 0000000000000000000000000000000000000000..953051409dd87a119ed54f60238336bd50396184
--- /dev/null
+++ b/modules/textual_inversion/logging.py
@@ -0,0 +1,64 @@
+import datetime
+import json
+import os
+
+saved_params_shared = {
+ "batch_size",
+ "clip_grad_mode",
+ "clip_grad_value",
+ "create_image_every",
+ "data_root",
+ "gradient_step",
+ "initial_step",
+ "latent_sampling_method",
+ "learn_rate",
+ "log_directory",
+ "model_hash",
+ "model_name",
+ "num_of_dataset_images",
+ "steps",
+ "template_file",
+ "training_height",
+ "training_width",
+}
+saved_params_ti = {
+ "embedding_name",
+ "num_vectors_per_token",
+ "save_embedding_every",
+ "save_image_with_stored_embedding",
+}
+saved_params_hypernet = {
+ "activation_func",
+ "add_layer_norm",
+ "hypernetwork_name",
+ "layer_structure",
+ "save_hypernetwork_every",
+ "use_dropout",
+ "weight_init",
+}
+saved_params_all = saved_params_shared | saved_params_ti | saved_params_hypernet
+saved_params_previews = {
+ "preview_cfg_scale",
+ "preview_height",
+ "preview_negative_prompt",
+ "preview_prompt",
+ "preview_sampler_index",
+ "preview_seed",
+ "preview_steps",
+ "preview_width",
+}
+
+
+def save_settings_to_file(log_directory, all_params):
+ now = datetime.datetime.now()
+ params = {"datetime": now.strftime("%Y-%m-%d %H:%M:%S")}
+
+ keys = saved_params_all
+ if all_params.get('preview_from_txt2img'):
+ keys = keys | saved_params_previews
+
+ params.update({k: v for k, v in all_params.items() if k in keys})
+
+ filename = f'settings-{now.strftime("%Y-%m-%d-%H-%M-%S")}.json'
+ with open(os.path.join(log_directory, filename), "w") as file:
+ json.dump(params, file, indent=4)
diff --git a/modules/textual_inversion/preprocess.py b/modules/textual_inversion/preprocess.py
new file mode 100644
index 0000000000000000000000000000000000000000..914adebb8c869d65c89541e6d8196bbcb750ccee
--- /dev/null
+++ b/modules/textual_inversion/preprocess.py
@@ -0,0 +1,232 @@
+import os
+from PIL import Image, ImageOps
+import math
+import tqdm
+
+from modules import paths, shared, images, deepbooru
+from modules.textual_inversion import autocrop
+
+
+def preprocess(id_task, process_src, process_dst, process_width, process_height, preprocess_txt_action, process_keep_original_size, process_flip, process_split, process_caption, process_caption_deepbooru=False, split_threshold=0.5, overlap_ratio=0.2, process_focal_crop=False, process_focal_crop_face_weight=0.9, process_focal_crop_entropy_weight=0.15, process_focal_crop_edges_weight=0.5, process_focal_crop_debug=False, process_multicrop=None, process_multicrop_mindim=None, process_multicrop_maxdim=None, process_multicrop_minarea=None, process_multicrop_maxarea=None, process_multicrop_objective=None, process_multicrop_threshold=None):
+ try:
+ if process_caption:
+ shared.interrogator.load()
+
+ if process_caption_deepbooru:
+ deepbooru.model.start()
+
+ preprocess_work(process_src, process_dst, process_width, process_height, preprocess_txt_action, process_keep_original_size, process_flip, process_split, process_caption, process_caption_deepbooru, split_threshold, overlap_ratio, process_focal_crop, process_focal_crop_face_weight, process_focal_crop_entropy_weight, process_focal_crop_edges_weight, process_focal_crop_debug, process_multicrop, process_multicrop_mindim, process_multicrop_maxdim, process_multicrop_minarea, process_multicrop_maxarea, process_multicrop_objective, process_multicrop_threshold)
+
+ finally:
+
+ if process_caption:
+ shared.interrogator.send_blip_to_ram()
+
+ if process_caption_deepbooru:
+ deepbooru.model.stop()
+
+
+def listfiles(dirname):
+ return os.listdir(dirname)
+
+
+class PreprocessParams:
+ src = None
+ dstdir = None
+ subindex = 0
+ flip = False
+ process_caption = False
+ process_caption_deepbooru = False
+ preprocess_txt_action = None
+
+
+def save_pic_with_caption(image, index, params: PreprocessParams, existing_caption=None):
+ caption = ""
+
+ if params.process_caption:
+ caption += shared.interrogator.generate_caption(image)
+
+ if params.process_caption_deepbooru:
+ if caption:
+ caption += ", "
+ caption += deepbooru.model.tag_multi(image)
+
+ filename_part = params.src
+ filename_part = os.path.splitext(filename_part)[0]
+ filename_part = os.path.basename(filename_part)
+
+ basename = f"{index:05}-{params.subindex}-{filename_part}"
+ image.save(os.path.join(params.dstdir, f"{basename}.png"))
+
+ if params.preprocess_txt_action == 'prepend' and existing_caption:
+ caption = f"{existing_caption} {caption}"
+ elif params.preprocess_txt_action == 'append' and existing_caption:
+ caption = f"{caption} {existing_caption}"
+ elif params.preprocess_txt_action == 'copy' and existing_caption:
+ caption = existing_caption
+
+ caption = caption.strip()
+
+ if caption:
+ with open(os.path.join(params.dstdir, f"{basename}.txt"), "w", encoding="utf8") as file:
+ file.write(caption)
+
+ params.subindex += 1
+
+
+def save_pic(image, index, params, existing_caption=None):
+ save_pic_with_caption(image, index, params, existing_caption=existing_caption)
+
+ if params.flip:
+ save_pic_with_caption(ImageOps.mirror(image), index, params, existing_caption=existing_caption)
+
+
+def split_pic(image, inverse_xy, width, height, overlap_ratio):
+ if inverse_xy:
+ from_w, from_h = image.height, image.width
+ to_w, to_h = height, width
+ else:
+ from_w, from_h = image.width, image.height
+ to_w, to_h = width, height
+ h = from_h * to_w // from_w
+ if inverse_xy:
+ image = image.resize((h, to_w))
+ else:
+ image = image.resize((to_w, h))
+
+ split_count = math.ceil((h - to_h * overlap_ratio) / (to_h * (1.0 - overlap_ratio)))
+ y_step = (h - to_h) / (split_count - 1)
+ for i in range(split_count):
+ y = int(y_step * i)
+ if inverse_xy:
+ splitted = image.crop((y, 0, y + to_h, to_w))
+ else:
+ splitted = image.crop((0, y, to_w, y + to_h))
+ yield splitted
+
+# not using torchvision.transforms.CenterCrop because it doesn't allow float regions
+def center_crop(image: Image, w: int, h: int):
+ iw, ih = image.size
+ if ih / h < iw / w:
+ sw = w * ih / h
+ box = (iw - sw) / 2, 0, iw - (iw - sw) / 2, ih
+ else:
+ sh = h * iw / w
+ box = 0, (ih - sh) / 2, iw, ih - (ih - sh) / 2
+ return image.resize((w, h), Image.Resampling.LANCZOS, box)
+
+
+def multicrop_pic(image: Image, mindim, maxdim, minarea, maxarea, objective, threshold):
+ iw, ih = image.size
+ err = lambda w, h: 1-(lambda x: x if x < 1 else 1/x)(iw/ih/(w/h))
+ wh = max(((w, h) for w in range(mindim, maxdim+1, 64) for h in range(mindim, maxdim+1, 64)
+ if minarea <= w * h <= maxarea and err(w, h) <= threshold),
+ key= lambda wh: (wh[0]*wh[1], -err(*wh))[::1 if objective=='Maximize area' else -1],
+ default=None
+ )
+ return wh and center_crop(image, *wh)
+
+
+def preprocess_work(process_src, process_dst, process_width, process_height, preprocess_txt_action, process_keep_original_size, process_flip, process_split, process_caption, process_caption_deepbooru=False, split_threshold=0.5, overlap_ratio=0.2, process_focal_crop=False, process_focal_crop_face_weight=0.9, process_focal_crop_entropy_weight=0.3, process_focal_crop_edges_weight=0.5, process_focal_crop_debug=False, process_multicrop=None, process_multicrop_mindim=None, process_multicrop_maxdim=None, process_multicrop_minarea=None, process_multicrop_maxarea=None, process_multicrop_objective=None, process_multicrop_threshold=None):
+ width = process_width
+ height = process_height
+ src = os.path.abspath(process_src)
+ dst = os.path.abspath(process_dst)
+ split_threshold = max(0.0, min(1.0, split_threshold))
+ overlap_ratio = max(0.0, min(0.9, overlap_ratio))
+
+ assert src != dst, 'same directory specified as source and destination'
+
+ os.makedirs(dst, exist_ok=True)
+
+ files = listfiles(src)
+
+ shared.state.job = "preprocess"
+ shared.state.textinfo = "Preprocessing..."
+ shared.state.job_count = len(files)
+
+ params = PreprocessParams()
+ params.dstdir = dst
+ params.flip = process_flip
+ params.process_caption = process_caption
+ params.process_caption_deepbooru = process_caption_deepbooru
+ params.preprocess_txt_action = preprocess_txt_action
+
+ pbar = tqdm.tqdm(files)
+ for index, imagefile in enumerate(pbar):
+ params.subindex = 0
+ filename = os.path.join(src, imagefile)
+ try:
+ img = Image.open(filename)
+ img = ImageOps.exif_transpose(img)
+ img = img.convert("RGB")
+ except Exception:
+ continue
+
+ description = f"Preprocessing [Image {index}/{len(files)}]"
+ pbar.set_description(description)
+ shared.state.textinfo = description
+
+ params.src = filename
+
+ existing_caption = None
+ existing_caption_filename = f"{os.path.splitext(filename)[0]}.txt"
+ if os.path.exists(existing_caption_filename):
+ with open(existing_caption_filename, 'r', encoding="utf8") as file:
+ existing_caption = file.read()
+
+ if shared.state.interrupted:
+ break
+
+ if img.height > img.width:
+ ratio = (img.width * height) / (img.height * width)
+ inverse_xy = False
+ else:
+ ratio = (img.height * width) / (img.width * height)
+ inverse_xy = True
+
+ process_default_resize = True
+
+ if process_split and ratio < 1.0 and ratio <= split_threshold:
+ for splitted in split_pic(img, inverse_xy, width, height, overlap_ratio):
+ save_pic(splitted, index, params, existing_caption=existing_caption)
+ process_default_resize = False
+
+ if process_focal_crop and img.height != img.width:
+
+ dnn_model_path = None
+ try:
+ dnn_model_path = autocrop.download_and_cache_models(os.path.join(paths.models_path, "opencv"))
+ except Exception as e:
+ print("Unable to load face detection model for auto crop selection. Falling back to lower quality haar method.", e)
+
+ autocrop_settings = autocrop.Settings(
+ crop_width = width,
+ crop_height = height,
+ face_points_weight = process_focal_crop_face_weight,
+ entropy_points_weight = process_focal_crop_entropy_weight,
+ corner_points_weight = process_focal_crop_edges_weight,
+ annotate_image = process_focal_crop_debug,
+ dnn_model_path = dnn_model_path,
+ )
+ for focal in autocrop.crop_image(img, autocrop_settings):
+ save_pic(focal, index, params, existing_caption=existing_caption)
+ process_default_resize = False
+
+ if process_multicrop:
+ cropped = multicrop_pic(img, process_multicrop_mindim, process_multicrop_maxdim, process_multicrop_minarea, process_multicrop_maxarea, process_multicrop_objective, process_multicrop_threshold)
+ if cropped is not None:
+ save_pic(cropped, index, params, existing_caption=existing_caption)
+ else:
+ print(f"skipped {img.width}x{img.height} image {filename} (can't find suitable size within error threshold)")
+ process_default_resize = False
+
+ if process_keep_original_size:
+ save_pic(img, index, params, existing_caption=existing_caption)
+ process_default_resize = False
+
+ if process_default_resize:
+ img = images.resize_image(1, img, width, height)
+ save_pic(img, index, params, existing_caption=existing_caption)
+
+ shared.state.nextjob()
diff --git a/modules/textual_inversion/test_embedding.png b/modules/textual_inversion/test_embedding.png
new file mode 100644
index 0000000000000000000000000000000000000000..07e2d9afaeaff3751b68a7c0f49d8b3466474282
Binary files /dev/null and b/modules/textual_inversion/test_embedding.png differ
diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py
new file mode 100644
index 0000000000000000000000000000000000000000..25b71b83d9c52af6608a981ff0677e440700547b
--- /dev/null
+++ b/modules/textual_inversion/textual_inversion.py
@@ -0,0 +1,694 @@
+import os
+from collections import namedtuple
+from contextlib import closing
+
+import torch
+import tqdm
+import html
+import datetime
+import csv
+import safetensors.torch
+
+import numpy as np
+from PIL import Image, PngImagePlugin
+from torch.utils.tensorboard import SummaryWriter
+
+from modules import shared, devices, sd_hijack, sd_models, images, sd_samplers, sd_hijack_checkpoint, errors, hashes
+import modules.textual_inversion.dataset
+from modules.textual_inversion.learn_schedule import LearnRateScheduler
+
+from modules.textual_inversion.image_embedding import embedding_to_b64, embedding_from_b64, insert_image_data_embed, extract_image_data_embed, caption_image_overlay
+from modules.textual_inversion.logging import save_settings_to_file
+
+
+TextualInversionTemplate = namedtuple("TextualInversionTemplate", ["name", "path"])
+textual_inversion_templates = {}
+
+
+def list_textual_inversion_templates():
+ textual_inversion_templates.clear()
+
+ for root, _, fns in os.walk(shared.cmd_opts.textual_inversion_templates_dir):
+ for fn in fns:
+ path = os.path.join(root, fn)
+
+ textual_inversion_templates[fn] = TextualInversionTemplate(fn, path)
+
+ return textual_inversion_templates
+
+
+class Embedding:
+ def __init__(self, vec, name, step=None):
+ self.vec = vec
+ self.name = name
+ self.step = step
+ self.shape = None
+ self.vectors = 0
+ self.cached_checksum = None
+ self.sd_checkpoint = None
+ self.sd_checkpoint_name = None
+ self.optimizer_state_dict = None
+ self.filename = None
+ self.hash = None
+ self.shorthash = None
+
+ def save(self, filename):
+ embedding_data = {
+ "string_to_token": {"*": 265},
+ "string_to_param": {"*": self.vec},
+ "name": self.name,
+ "step": self.step,
+ "sd_checkpoint": self.sd_checkpoint,
+ "sd_checkpoint_name": self.sd_checkpoint_name,
+ }
+
+ torch.save(embedding_data, filename)
+
+ if shared.opts.save_optimizer_state and self.optimizer_state_dict is not None:
+ optimizer_saved_dict = {
+ 'hash': self.checksum(),
+ 'optimizer_state_dict': self.optimizer_state_dict,
+ }
+ torch.save(optimizer_saved_dict, f"{filename}.optim")
+
+ def checksum(self):
+ if self.cached_checksum is not None:
+ return self.cached_checksum
+
+ def const_hash(a):
+ r = 0
+ for v in a:
+ r = (r * 281 ^ int(v) * 997) & 0xFFFFFFFF
+ return r
+
+ self.cached_checksum = f'{const_hash(self.vec.reshape(-1) * 100) & 0xffff:04x}'
+ return self.cached_checksum
+
+ def set_hash(self, v):
+ self.hash = v
+ self.shorthash = self.hash[0:12]
+
+
+class DirWithTextualInversionEmbeddings:
+ def __init__(self, path):
+ self.path = path
+ self.mtime = None
+
+ def has_changed(self):
+ if not os.path.isdir(self.path):
+ return False
+
+ mt = os.path.getmtime(self.path)
+ if self.mtime is None or mt > self.mtime:
+ return True
+
+ def update(self):
+ if not os.path.isdir(self.path):
+ return
+
+ self.mtime = os.path.getmtime(self.path)
+
+
+class EmbeddingDatabase:
+ def __init__(self):
+ self.ids_lookup = {}
+ self.word_embeddings = {}
+ self.skipped_embeddings = {}
+ self.expected_shape = -1
+ self.embedding_dirs = {}
+ self.previously_displayed_embeddings = ()
+
+ def add_embedding_dir(self, path):
+ self.embedding_dirs[path] = DirWithTextualInversionEmbeddings(path)
+
+ def clear_embedding_dirs(self):
+ self.embedding_dirs.clear()
+
+ def register_embedding(self, embedding, model):
+ return self.register_embedding_by_name(embedding, model, embedding.name)
+
+ def register_embedding_by_name(self, embedding, model, name):
+ ids = model.cond_stage_model.tokenize([name])[0]
+ first_id = ids[0]
+ if first_id not in self.ids_lookup:
+ self.ids_lookup[first_id] = []
+ if name in self.word_embeddings:
+ # remove old one from the lookup list
+ lookup = [x for x in self.ids_lookup[first_id] if x[1].name!=name]
+ else:
+ lookup = self.ids_lookup[first_id]
+ if embedding is not None:
+ lookup += [(ids, embedding)]
+ self.ids_lookup[first_id] = sorted(lookup, key=lambda x: len(x[0]), reverse=True)
+ if embedding is None:
+ # unregister embedding with specified name
+ if name in self.word_embeddings:
+ del self.word_embeddings[name]
+ if len(self.ids_lookup[first_id])==0:
+ del self.ids_lookup[first_id]
+ return None
+ self.word_embeddings[name] = embedding
+ return embedding
+
+ def get_expected_shape(self):
+ vec = shared.sd_model.cond_stage_model.encode_embedding_init_text(",", 1)
+ return vec.shape[1]
+
+ def load_from_file(self, path, filename):
+ name, ext = os.path.splitext(filename)
+ ext = ext.upper()
+
+ if ext in ['.PNG', '.WEBP', '.JXL', '.AVIF']:
+ _, second_ext = os.path.splitext(name)
+ if second_ext.upper() == '.PREVIEW':
+ return
+
+ embed_image = Image.open(path)
+ if hasattr(embed_image, 'text') and 'sd-ti-embedding' in embed_image.text:
+ data = embedding_from_b64(embed_image.text['sd-ti-embedding'])
+ name = data.get('name', name)
+ else:
+ data = extract_image_data_embed(embed_image)
+ if data:
+ name = data.get('name', name)
+ else:
+ # if data is None, means this is not an embeding, just a preview image
+ return
+ elif ext in ['.BIN', '.PT']:
+ data = torch.load(path, map_location="cpu")
+ elif ext in ['.SAFETENSORS']:
+ data = safetensors.torch.load_file(path, device="cpu")
+ else:
+ return
+
+
+ # textual inversion embeddings
+ if 'string_to_param' in data:
+ param_dict = data['string_to_param']
+ param_dict = getattr(param_dict, '_parameters', param_dict) # fix for torch 1.12.1 loading saved file from torch 1.11
+ assert len(param_dict) == 1, 'embedding file has multiple terms in it'
+ emb = next(iter(param_dict.items()))[1]
+ vec = emb.detach().to(devices.device, dtype=torch.float32)
+ shape = vec.shape[-1]
+ vectors = vec.shape[0]
+ elif type(data) == dict and 'clip_g' in data and 'clip_l' in data: # SDXL embedding
+ vec = {k: v.detach().to(devices.device, dtype=torch.float32) for k, v in data.items()}
+ shape = data['clip_g'].shape[-1] + data['clip_l'].shape[-1]
+ vectors = data['clip_g'].shape[0]
+ elif type(data) == dict and type(next(iter(data.values()))) == torch.Tensor: # diffuser concepts
+ assert len(data.keys()) == 1, 'embedding file has multiple terms in it'
+
+ emb = next(iter(data.values()))
+ if len(emb.shape) == 1:
+ emb = emb.unsqueeze(0)
+ vec = emb.detach().to(devices.device, dtype=torch.float32)
+ shape = vec.shape[-1]
+ vectors = vec.shape[0]
+ else:
+ raise Exception(f"Couldn't identify {filename} as neither textual inversion embedding nor diffuser concept.")
+
+ embedding = Embedding(vec, name)
+ embedding.step = data.get('step', None)
+ embedding.sd_checkpoint = data.get('sd_checkpoint', None)
+ embedding.sd_checkpoint_name = data.get('sd_checkpoint_name', None)
+ embedding.vectors = vectors
+ embedding.shape = shape
+ embedding.filename = path
+ embedding.set_hash(hashes.sha256(embedding.filename, "textual_inversion/" + name) or '')
+
+ if self.expected_shape == -1 or self.expected_shape == embedding.shape:
+ self.register_embedding(embedding, shared.sd_model)
+ else:
+ self.skipped_embeddings[name] = embedding
+
+ def load_from_dir(self, embdir):
+ if not os.path.isdir(embdir.path):
+ return
+
+ for root, _, fns in os.walk(embdir.path, followlinks=True):
+ for fn in fns:
+ try:
+ fullfn = os.path.join(root, fn)
+
+ if os.stat(fullfn).st_size == 0:
+ continue
+
+ self.load_from_file(fullfn, fn)
+ except Exception:
+ errors.report(f"Error loading embedding {fn}", exc_info=True)
+ continue
+
+ def load_textual_inversion_embeddings(self, force_reload=False):
+ if not force_reload:
+ need_reload = False
+ for embdir in self.embedding_dirs.values():
+ if embdir.has_changed():
+ need_reload = True
+ break
+
+ if not need_reload:
+ return
+
+ self.ids_lookup.clear()
+ self.word_embeddings.clear()
+ self.skipped_embeddings.clear()
+ self.expected_shape = self.get_expected_shape()
+
+ for embdir in self.embedding_dirs.values():
+ self.load_from_dir(embdir)
+ embdir.update()
+
+ # re-sort word_embeddings because load_from_dir may not load in alphabetic order.
+ # using a temporary copy so we don't reinitialize self.word_embeddings in case other objects have a reference to it.
+ sorted_word_embeddings = {e.name: e for e in sorted(self.word_embeddings.values(), key=lambda e: e.name.lower())}
+ self.word_embeddings.clear()
+ self.word_embeddings.update(sorted_word_embeddings)
+
+ displayed_embeddings = (tuple(self.word_embeddings.keys()), tuple(self.skipped_embeddings.keys()))
+ if shared.opts.textual_inversion_print_at_load and self.previously_displayed_embeddings != displayed_embeddings:
+ self.previously_displayed_embeddings = displayed_embeddings
+ print(f"Textual inversion embeddings loaded({len(self.word_embeddings)}): {', '.join(self.word_embeddings.keys())}")
+ if self.skipped_embeddings:
+ print(f"Textual inversion embeddings skipped({len(self.skipped_embeddings)}): {', '.join(self.skipped_embeddings.keys())}")
+
+ def find_embedding_at_position(self, tokens, offset):
+ token = tokens[offset]
+ possible_matches = self.ids_lookup.get(token, None)
+
+ if possible_matches is None:
+ return None, None
+
+ for ids, embedding in possible_matches:
+ if tokens[offset:offset + len(ids)] == ids:
+ return embedding, len(ids)
+
+ return None, None
+
+
+def create_embedding(name, num_vectors_per_token, overwrite_old, init_text='*'):
+ cond_model = shared.sd_model.cond_stage_model
+
+ with devices.autocast():
+ cond_model([""]) # will send cond model to GPU if lowvram/medvram is active
+
+ #cond_model expects at least some text, so we provide '*' as backup.
+ embedded = cond_model.encode_embedding_init_text(init_text or '*', num_vectors_per_token)
+ vec = torch.zeros((num_vectors_per_token, embedded.shape[1]), device=devices.device)
+
+ #Only copy if we provided an init_text, otherwise keep vectors as zeros
+ if init_text:
+ for i in range(num_vectors_per_token):
+ vec[i] = embedded[i * int(embedded.shape[0]) // num_vectors_per_token]
+
+ # Remove illegal characters from name.
+ name = "".join( x for x in name if (x.isalnum() or x in "._- "))
+ fn = os.path.join(shared.cmd_opts.embeddings_dir, f"{name}.pt")
+ if not overwrite_old:
+ assert not os.path.exists(fn), f"file {fn} already exists"
+
+ embedding = Embedding(vec, name)
+ embedding.step = 0
+ embedding.save(fn)
+
+ return fn
+
+
+def write_loss(log_directory, filename, step, epoch_len, values):
+ if shared.opts.training_write_csv_every == 0:
+ return
+
+ if step % shared.opts.training_write_csv_every != 0:
+ return
+ write_csv_header = False if os.path.exists(os.path.join(log_directory, filename)) else True
+
+ with open(os.path.join(log_directory, filename), "a+", newline='') as fout:
+ csv_writer = csv.DictWriter(fout, fieldnames=["step", "epoch", "epoch_step", *(values.keys())])
+
+ if write_csv_header:
+ csv_writer.writeheader()
+
+ epoch = (step - 1) // epoch_len
+ epoch_step = (step - 1) % epoch_len
+
+ csv_writer.writerow({
+ "step": step,
+ "epoch": epoch,
+ "epoch_step": epoch_step,
+ **values,
+ })
+
+def tensorboard_setup(log_directory):
+ os.makedirs(os.path.join(log_directory, "tensorboard"), exist_ok=True)
+ return SummaryWriter(
+ log_dir=os.path.join(log_directory, "tensorboard"),
+ flush_secs=shared.opts.training_tensorboard_flush_every)
+
+def tensorboard_add(tensorboard_writer, loss, global_step, step, learn_rate, epoch_num):
+ tensorboard_add_scaler(tensorboard_writer, "Loss/train", loss, global_step)
+ tensorboard_add_scaler(tensorboard_writer, f"Loss/train/epoch-{epoch_num}", loss, step)
+ tensorboard_add_scaler(tensorboard_writer, "Learn rate/train", learn_rate, global_step)
+ tensorboard_add_scaler(tensorboard_writer, f"Learn rate/train/epoch-{epoch_num}", learn_rate, step)
+
+def tensorboard_add_scaler(tensorboard_writer, tag, value, step):
+ tensorboard_writer.add_scalar(tag=tag,
+ scalar_value=value, global_step=step)
+
+def tensorboard_add_image(tensorboard_writer, tag, pil_image, step):
+ # Convert a pil image to a torch tensor
+ img_tensor = torch.as_tensor(np.array(pil_image, copy=True))
+ img_tensor = img_tensor.view(pil_image.size[1], pil_image.size[0],
+ len(pil_image.getbands()))
+ img_tensor = img_tensor.permute((2, 0, 1))
+
+ tensorboard_writer.add_image(tag, img_tensor, global_step=step)
+
+def validate_train_inputs(model_name, learn_rate, batch_size, gradient_step, data_root, template_file, template_filename, steps, save_model_every, create_image_every, log_directory, name="embedding"):
+ assert model_name, f"{name} not selected"
+ assert learn_rate, "Learning rate is empty or 0"
+ assert isinstance(batch_size, int), "Batch size must be integer"
+ assert batch_size > 0, "Batch size must be positive"
+ assert isinstance(gradient_step, int), "Gradient accumulation step must be integer"
+ assert gradient_step > 0, "Gradient accumulation step must be positive"
+ assert data_root, "Dataset directory is empty"
+ assert os.path.isdir(data_root), "Dataset directory doesn't exist"
+ assert os.listdir(data_root), "Dataset directory is empty"
+ assert template_filename, "Prompt template file not selected"
+ assert template_file, f"Prompt template file {template_filename} not found"
+ assert os.path.isfile(template_file.path), f"Prompt template file {template_filename} doesn't exist"
+ assert steps, "Max steps is empty or 0"
+ assert isinstance(steps, int), "Max steps must be integer"
+ assert steps > 0, "Max steps must be positive"
+ assert isinstance(save_model_every, int), "Save {name} must be integer"
+ assert save_model_every >= 0, "Save {name} must be positive or 0"
+ assert isinstance(create_image_every, int), "Create image must be integer"
+ assert create_image_every >= 0, "Create image must be positive or 0"
+ if save_model_every or create_image_every:
+ assert log_directory, "Log directory is empty"
+
+
+def train_embedding(id_task, embedding_name, learn_rate, batch_size, gradient_step, data_root, log_directory, training_width, training_height, varsize, steps, clip_grad_mode, clip_grad_value, shuffle_tags, tag_drop_out, latent_sampling_method, use_weight, create_image_every, save_embedding_every, template_filename, save_image_with_stored_embedding, preview_from_txt2img, preview_prompt, preview_negative_prompt, preview_steps, preview_sampler_index, preview_cfg_scale, preview_seed, preview_width, preview_height):
+ from modules import processing
+
+ save_embedding_every = save_embedding_every or 0
+ create_image_every = create_image_every or 0
+ template_file = textual_inversion_templates.get(template_filename, None)
+ validate_train_inputs(embedding_name, learn_rate, batch_size, gradient_step, data_root, template_file, template_filename, steps, save_embedding_every, create_image_every, log_directory, name="embedding")
+ template_file = template_file.path
+
+ shared.state.job = "train-embedding"
+ shared.state.textinfo = "Initializing textual inversion training..."
+ shared.state.job_count = steps
+
+ filename = os.path.join(shared.cmd_opts.embeddings_dir, f'{embedding_name}.pt')
+
+ log_directory = os.path.join(log_directory, datetime.datetime.now().strftime("%Y-%m-%d"), embedding_name)
+ unload = shared.opts.unload_models_when_training
+
+ if save_embedding_every > 0:
+ embedding_dir = os.path.join(log_directory, "embeddings")
+ os.makedirs(embedding_dir, exist_ok=True)
+ else:
+ embedding_dir = None
+
+ if create_image_every > 0:
+ images_dir = os.path.join(log_directory, "images")
+ os.makedirs(images_dir, exist_ok=True)
+ else:
+ images_dir = None
+
+ if create_image_every > 0 and save_image_with_stored_embedding:
+ images_embeds_dir = os.path.join(log_directory, "image_embeddings")
+ os.makedirs(images_embeds_dir, exist_ok=True)
+ else:
+ images_embeds_dir = None
+
+ hijack = sd_hijack.model_hijack
+
+ embedding = hijack.embedding_db.word_embeddings[embedding_name]
+ checkpoint = sd_models.select_checkpoint()
+
+ initial_step = embedding.step or 0
+ if initial_step >= steps:
+ shared.state.textinfo = "Model has already been trained beyond specified max steps"
+ return embedding, filename
+
+ scheduler = LearnRateScheduler(learn_rate, steps, initial_step)
+ clip_grad = torch.nn.utils.clip_grad_value_ if clip_grad_mode == "value" else \
+ torch.nn.utils.clip_grad_norm_ if clip_grad_mode == "norm" else \
+ None
+ if clip_grad:
+ clip_grad_sched = LearnRateScheduler(clip_grad_value, steps, initial_step, verbose=False)
+ # dataset loading may take a while, so input validations and early returns should be done before this
+ shared.state.textinfo = f"Preparing dataset from {html.escape(data_root)}..."
+ old_parallel_processing_allowed = shared.parallel_processing_allowed
+
+ if shared.opts.training_enable_tensorboard:
+ tensorboard_writer = tensorboard_setup(log_directory)
+
+ pin_memory = shared.opts.pin_memory
+
+ ds = modules.textual_inversion.dataset.PersonalizedBase(data_root=data_root, width=training_width, height=training_height, repeats=shared.opts.training_image_repeats_per_epoch, placeholder_token=embedding_name, model=shared.sd_model, cond_model=shared.sd_model.cond_stage_model, device=devices.device, template_file=template_file, batch_size=batch_size, gradient_step=gradient_step, shuffle_tags=shuffle_tags, tag_drop_out=tag_drop_out, latent_sampling_method=latent_sampling_method, varsize=varsize, use_weight=use_weight)
+
+ if shared.opts.save_training_settings_to_txt:
+ save_settings_to_file(log_directory, {**dict(model_name=checkpoint.model_name, model_hash=checkpoint.shorthash, num_of_dataset_images=len(ds), num_vectors_per_token=len(embedding.vec)), **locals()})
+
+ latent_sampling_method = ds.latent_sampling_method
+
+ dl = modules.textual_inversion.dataset.PersonalizedDataLoader(ds, latent_sampling_method=latent_sampling_method, batch_size=ds.batch_size, pin_memory=pin_memory)
+
+ if unload:
+ shared.parallel_processing_allowed = False
+ shared.sd_model.first_stage_model.to(devices.cpu)
+
+ embedding.vec.requires_grad = True
+ optimizer = torch.optim.AdamW([embedding.vec], lr=scheduler.learn_rate, weight_decay=0.0)
+ if shared.opts.save_optimizer_state:
+ optimizer_state_dict = None
+ if os.path.exists(f"{filename}.optim"):
+ optimizer_saved_dict = torch.load(f"{filename}.optim", map_location='cpu')
+ if embedding.checksum() == optimizer_saved_dict.get('hash', None):
+ optimizer_state_dict = optimizer_saved_dict.get('optimizer_state_dict', None)
+
+ if optimizer_state_dict is not None:
+ optimizer.load_state_dict(optimizer_state_dict)
+ print("Loaded existing optimizer from checkpoint")
+ else:
+ print("No saved optimizer exists in checkpoint")
+
+ scaler = torch.cuda.amp.GradScaler()
+
+ batch_size = ds.batch_size
+ gradient_step = ds.gradient_step
+ # n steps = batch_size * gradient_step * n image processed
+ steps_per_epoch = len(ds) // batch_size // gradient_step
+ max_steps_per_epoch = len(ds) // batch_size - (len(ds) // batch_size) % gradient_step
+ loss_step = 0
+ _loss_step = 0 #internal
+
+ last_saved_file = ""
+ last_saved_image = ""
+ forced_filename = ""
+ embedding_yet_to_be_embedded = False
+
+ is_training_inpainting_model = shared.sd_model.model.conditioning_key in {'hybrid', 'concat'}
+ img_c = None
+
+ pbar = tqdm.tqdm(total=steps - initial_step)
+ try:
+ sd_hijack_checkpoint.add()
+
+ for _ in range((steps-initial_step) * gradient_step):
+ if scheduler.finished:
+ break
+ if shared.state.interrupted:
+ break
+ for j, batch in enumerate(dl):
+ # works as a drop_last=True for gradient accumulation
+ if j == max_steps_per_epoch:
+ break
+ scheduler.apply(optimizer, embedding.step)
+ if scheduler.finished:
+ break
+ if shared.state.interrupted:
+ break
+
+ if clip_grad:
+ clip_grad_sched.step(embedding.step)
+
+ with devices.autocast():
+ x = batch.latent_sample.to(devices.device, non_blocking=pin_memory)
+ if use_weight:
+ w = batch.weight.to(devices.device, non_blocking=pin_memory)
+ c = shared.sd_model.cond_stage_model(batch.cond_text)
+
+ if is_training_inpainting_model:
+ if img_c is None:
+ img_c = processing.txt2img_image_conditioning(shared.sd_model, c, training_width, training_height)
+
+ cond = {"c_concat": [img_c], "c_crossattn": [c]}
+ else:
+ cond = c
+
+ if use_weight:
+ loss = shared.sd_model.weighted_forward(x, cond, w)[0] / gradient_step
+ del w
+ else:
+ loss = shared.sd_model.forward(x, cond)[0] / gradient_step
+ del x
+
+ _loss_step += loss.item()
+ scaler.scale(loss).backward()
+
+ # go back until we reach gradient accumulation steps
+ if (j + 1) % gradient_step != 0:
+ continue
+
+ if clip_grad:
+ clip_grad(embedding.vec, clip_grad_sched.learn_rate)
+
+ scaler.step(optimizer)
+ scaler.update()
+ embedding.step += 1
+ pbar.update()
+ optimizer.zero_grad(set_to_none=True)
+ loss_step = _loss_step
+ _loss_step = 0
+
+ steps_done = embedding.step + 1
+
+ epoch_num = embedding.step // steps_per_epoch
+ epoch_step = embedding.step % steps_per_epoch
+
+ description = f"Training textual inversion [Epoch {epoch_num}: {epoch_step+1}/{steps_per_epoch}] loss: {loss_step:.7f}"
+ pbar.set_description(description)
+ if embedding_dir is not None and steps_done % save_embedding_every == 0:
+ # Before saving, change name to match current checkpoint.
+ embedding_name_every = f'{embedding_name}-{steps_done}'
+ last_saved_file = os.path.join(embedding_dir, f'{embedding_name_every}.pt')
+ save_embedding(embedding, optimizer, checkpoint, embedding_name_every, last_saved_file, remove_cached_checksum=True)
+ embedding_yet_to_be_embedded = True
+
+ write_loss(log_directory, "textual_inversion_loss.csv", embedding.step, steps_per_epoch, {
+ "loss": f"{loss_step:.7f}",
+ "learn_rate": scheduler.learn_rate
+ })
+
+ if images_dir is not None and steps_done % create_image_every == 0:
+ forced_filename = f'{embedding_name}-{steps_done}'
+ last_saved_image = os.path.join(images_dir, forced_filename)
+
+ shared.sd_model.first_stage_model.to(devices.device)
+
+ p = processing.StableDiffusionProcessingTxt2Img(
+ sd_model=shared.sd_model,
+ do_not_save_grid=True,
+ do_not_save_samples=True,
+ do_not_reload_embeddings=True,
+ )
+
+ if preview_from_txt2img:
+ p.prompt = preview_prompt
+ p.negative_prompt = preview_negative_prompt
+ p.steps = preview_steps
+ p.sampler_name = sd_samplers.samplers[preview_sampler_index].name
+ p.cfg_scale = preview_cfg_scale
+ p.seed = preview_seed
+ p.width = preview_width
+ p.height = preview_height
+ else:
+ p.prompt = batch.cond_text[0]
+ p.steps = 20
+ p.width = training_width
+ p.height = training_height
+
+ preview_text = p.prompt
+
+ with closing(p):
+ processed = processing.process_images(p)
+ image = processed.images[0] if len(processed.images) > 0 else None
+
+ if unload:
+ shared.sd_model.first_stage_model.to(devices.cpu)
+
+ if image is not None:
+ shared.state.assign_current_image(image)
+
+ last_saved_image, last_text_info = images.save_image(image, images_dir, "", p.seed, p.prompt, shared.opts.samples_format, processed.infotexts[0], p=p, forced_filename=forced_filename, save_to_dirs=False)
+ last_saved_image += f", prompt: {preview_text}"
+
+ if shared.opts.training_enable_tensorboard and shared.opts.training_tensorboard_save_images:
+ tensorboard_add_image(tensorboard_writer, f"Validation at epoch {epoch_num}", image, embedding.step)
+
+ if save_image_with_stored_embedding and os.path.exists(last_saved_file) and embedding_yet_to_be_embedded:
+
+ last_saved_image_chunks = os.path.join(images_embeds_dir, f'{embedding_name}-{steps_done}.png')
+
+ info = PngImagePlugin.PngInfo()
+ data = torch.load(last_saved_file)
+ info.add_text("sd-ti-embedding", embedding_to_b64(data))
+
+ title = f"<{data.get('name', '???')}>"
+
+ try:
+ vectorSize = list(data['string_to_param'].values())[0].shape[0]
+ except Exception:
+ vectorSize = '?'
+
+ checkpoint = sd_models.select_checkpoint()
+ footer_left = checkpoint.model_name
+ footer_mid = f'[{checkpoint.shorthash}]'
+ footer_right = f'{vectorSize}v {steps_done}s'
+
+ captioned_image = caption_image_overlay(image, title, footer_left, footer_mid, footer_right)
+ captioned_image = insert_image_data_embed(captioned_image, data)
+
+ captioned_image.save(last_saved_image_chunks, "PNG", pnginfo=info)
+ embedding_yet_to_be_embedded = False
+
+ last_saved_image, last_text_info = images.save_image(image, images_dir, "", p.seed, p.prompt, shared.opts.samples_format, processed.infotexts[0], p=p, forced_filename=forced_filename, save_to_dirs=False)
+ last_saved_image += f", prompt: {preview_text}"
+
+ shared.state.job_no = embedding.step
+
+ shared.state.textinfo = f"""
+
+Loss: {loss_step:.7f}
+Step: {steps_done}
+Last prompt: {html.escape(batch.cond_text[0])}
+Last saved embedding: {html.escape(last_saved_file)}
+Last saved image: {html.escape(last_saved_image)}
+
+"""
+ filename = os.path.join(shared.cmd_opts.embeddings_dir, f'{embedding_name}.pt')
+ save_embedding(embedding, optimizer, checkpoint, embedding_name, filename, remove_cached_checksum=True)
+ except Exception:
+ errors.report("Error training embedding", exc_info=True)
+ finally:
+ pbar.leave = False
+ pbar.close()
+ shared.sd_model.first_stage_model.to(devices.device)
+ shared.parallel_processing_allowed = old_parallel_processing_allowed
+ sd_hijack_checkpoint.remove()
+
+ return embedding, filename
+
+
+def save_embedding(embedding, optimizer, checkpoint, embedding_name, filename, remove_cached_checksum=True):
+ old_embedding_name = embedding.name
+ old_sd_checkpoint = embedding.sd_checkpoint if hasattr(embedding, "sd_checkpoint") else None
+ old_sd_checkpoint_name = embedding.sd_checkpoint_name if hasattr(embedding, "sd_checkpoint_name") else None
+ old_cached_checksum = embedding.cached_checksum if hasattr(embedding, "cached_checksum") else None
+ try:
+ embedding.sd_checkpoint = checkpoint.shorthash
+ embedding.sd_checkpoint_name = checkpoint.model_name
+ if remove_cached_checksum:
+ embedding.cached_checksum = None
+ embedding.name = embedding_name
+ embedding.optimizer_state_dict = optimizer.state_dict()
+ embedding.save(filename)
+ except:
+ embedding.sd_checkpoint = old_sd_checkpoint
+ embedding.sd_checkpoint_name = old_sd_checkpoint_name
+ embedding.name = old_embedding_name
+ embedding.cached_checksum = old_cached_checksum
+ raise
diff --git a/modules/textual_inversion/ui.py b/modules/textual_inversion/ui.py
new file mode 100644
index 0000000000000000000000000000000000000000..5b75f799e745fa693cda06763af80069324a964f
--- /dev/null
+++ b/modules/textual_inversion/ui.py
@@ -0,0 +1,45 @@
+import html
+
+import gradio as gr
+
+import modules.textual_inversion.textual_inversion
+import modules.textual_inversion.preprocess
+from modules import sd_hijack, shared
+
+
+def create_embedding(name, initialization_text, nvpt, overwrite_old):
+ filename = modules.textual_inversion.textual_inversion.create_embedding(name, nvpt, overwrite_old, init_text=initialization_text)
+
+ sd_hijack.model_hijack.embedding_db.load_textual_inversion_embeddings()
+
+ return gr.Dropdown.update(choices=sorted(sd_hijack.model_hijack.embedding_db.word_embeddings.keys())), f"Created: {filename}", ""
+
+
+def preprocess(*args):
+ modules.textual_inversion.preprocess.preprocess(*args)
+
+ return f"Preprocessing {'interrupted' if shared.state.interrupted else 'finished'}.", ""
+
+
+def train_embedding(*args):
+
+ assert not shared.cmd_opts.lowvram, 'Training models with lowvram not possible'
+
+ apply_optimizations = shared.opts.training_xattention_optimizations
+ try:
+ if not apply_optimizations:
+ sd_hijack.undo_optimizations()
+
+ embedding, filename = modules.textual_inversion.textual_inversion.train_embedding(*args)
+
+ res = f"""
+Training {'interrupted' if shared.state.interrupted else 'finished'} at {embedding.step} steps.
+Embedding saved to {html.escape(filename)}
+"""
+ return res, ""
+ except Exception:
+ raise
+ finally:
+ if not apply_optimizations:
+ sd_hijack.apply_optimizations()
+
diff --git a/modules/timer.py b/modules/timer.py
new file mode 100644
index 0000000000000000000000000000000000000000..22d1272dddfb5710525c04126a8b17e449c7a8d9
--- /dev/null
+++ b/modules/timer.py
@@ -0,0 +1,91 @@
+import time
+import argparse
+
+
+class TimerSubcategory:
+ def __init__(self, timer, category):
+ self.timer = timer
+ self.category = category
+ self.start = None
+ self.original_base_category = timer.base_category
+
+ def __enter__(self):
+ self.start = time.time()
+ self.timer.base_category = self.original_base_category + self.category + "/"
+ self.timer.subcategory_level += 1
+
+ if self.timer.print_log:
+ print(f"{' ' * self.timer.subcategory_level}{self.category}:")
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ elapsed_for_subcategroy = time.time() - self.start
+ self.timer.base_category = self.original_base_category
+ self.timer.add_time_to_record(self.original_base_category + self.category, elapsed_for_subcategroy)
+ self.timer.subcategory_level -= 1
+ self.timer.record(self.category, disable_log=True)
+
+
+class Timer:
+ def __init__(self, print_log=False):
+ self.start = time.time()
+ self.records = {}
+ self.total = 0
+ self.base_category = ''
+ self.print_log = print_log
+ self.subcategory_level = 0
+
+ def elapsed(self):
+ end = time.time()
+ res = end - self.start
+ self.start = end
+ return res
+
+ def add_time_to_record(self, category, amount):
+ if category not in self.records:
+ self.records[category] = 0
+
+ self.records[category] += amount
+
+ def record(self, category, extra_time=0, disable_log=False):
+ e = self.elapsed()
+
+ self.add_time_to_record(self.base_category + category, e + extra_time)
+
+ self.total += e + extra_time
+
+ if self.print_log and not disable_log:
+ print(f"{' ' * self.subcategory_level}{category}: done in {e + extra_time:.3f}s")
+
+ def subcategory(self, name):
+ self.elapsed()
+
+ subcat = TimerSubcategory(self, name)
+ return subcat
+
+ def summary(self):
+ res = f"{self.total:.1f}s"
+
+ additions = [(category, time_taken) for category, time_taken in self.records.items() if time_taken >= 0.1 and '/' not in category]
+ if not additions:
+ return res
+
+ res += " ("
+ res += ", ".join([f"{category}: {time_taken:.1f}s" for category, time_taken in additions])
+ res += ")"
+
+ return res
+
+ def dump(self):
+ return {'total': self.total, 'records': self.records}
+
+ def reset(self):
+ self.__init__()
+
+
+parser = argparse.ArgumentParser(add_help=False)
+parser.add_argument("--log-startup", action='store_true', help="print a detailed log of what's happening at startup")
+args = parser.parse_known_args()[0]
+
+startup_timer = Timer(print_log=args.log_startup)
+
+startup_record = None
diff --git a/modules/txt2img.py b/modules/txt2img.py
new file mode 100644
index 0000000000000000000000000000000000000000..94391ce6d84942b908a1a6e5bf92b26e4621991b
--- /dev/null
+++ b/modules/txt2img.py
@@ -0,0 +1,66 @@
+from contextlib import closing
+
+import modules.scripts
+from modules import processing
+from modules.generation_parameters_copypaste import create_override_settings_dict
+from modules.shared import opts, cmd_opts
+import modules.shared as shared
+from modules.ui import plaintext_to_html
+import gradio as gr
+
+
+def txt2img(id_task: str, prompt: str, negative_prompt: str, prompt_styles, steps: int, sampler_name: str, n_iter: int, batch_size: int, cfg_scale: float, height: int, width: int, enable_hr: bool, denoising_strength: float, hr_scale: float, hr_upscaler: str, hr_second_pass_steps: int, hr_resize_x: int, hr_resize_y: int, hr_checkpoint_name: str, hr_sampler_name: str, hr_prompt: str, hr_negative_prompt, override_settings_texts, request: gr.Request, *args):
+ override_settings = create_override_settings_dict(override_settings_texts)
+
+ p = processing.StableDiffusionProcessingTxt2Img(
+ sd_model=shared.sd_model,
+ outpath_samples=opts.outdir_samples or opts.outdir_txt2img_samples,
+ outpath_grids=opts.outdir_grids or opts.outdir_txt2img_grids,
+ prompt=prompt,
+ styles=prompt_styles,
+ negative_prompt=negative_prompt,
+ sampler_name=sampler_name,
+ batch_size=batch_size,
+ n_iter=n_iter,
+ steps=steps,
+ cfg_scale=cfg_scale,
+ width=width,
+ height=height,
+ enable_hr=enable_hr,
+ denoising_strength=denoising_strength if enable_hr else None,
+ hr_scale=hr_scale,
+ hr_upscaler=hr_upscaler,
+ hr_second_pass_steps=hr_second_pass_steps,
+ hr_resize_x=hr_resize_x,
+ hr_resize_y=hr_resize_y,
+ hr_checkpoint_name=None if hr_checkpoint_name == 'Use same checkpoint' else hr_checkpoint_name,
+ hr_sampler_name=None if hr_sampler_name == 'Use same sampler' else hr_sampler_name,
+ hr_prompt=hr_prompt,
+ hr_negative_prompt=hr_negative_prompt,
+ override_settings=override_settings,
+ )
+
+ p.scripts = modules.scripts.scripts_txt2img
+ p.script_args = args
+
+ p.user = request.username
+
+ if cmd_opts.enable_console_prompts:
+ print(f"\ntxt2img: {prompt}", file=shared.progress_print_out)
+
+ with closing(p):
+ processed = modules.scripts.scripts_txt2img.run(p, *args)
+
+ if processed is None:
+ processed = processing.process_images(p)
+
+ shared.total_tqdm.clear()
+
+ generation_info_js = processed.js()
+ if opts.samples_log_stdout:
+ print(generation_info_js)
+
+ if opts.do_not_show_images:
+ processed.images = []
+
+ return processed.images, generation_info_js, plaintext_to_html(processed.info), plaintext_to_html(processed.comments, classname="comments")
diff --git a/modules/ui.py b/modules/ui.py
new file mode 100644
index 0000000000000000000000000000000000000000..1ed47eb2d1adcadd63fa7feaf5aac95b331bc5f7
--- /dev/null
+++ b/modules/ui.py
@@ -0,0 +1,1366 @@
+import datetime
+import mimetypes
+import os
+import sys
+from functools import reduce
+import warnings
+
+import gradio as gr
+import gradio.utils
+import numpy as np
+from PIL import Image, PngImagePlugin # noqa: F401
+from modules.call_queue import wrap_gradio_gpu_call, wrap_queued_call, wrap_gradio_call
+
+from modules import gradio_extensons # noqa: F401
+from modules import sd_hijack, sd_models, script_callbacks, ui_extensions, deepbooru, extra_networks, ui_common, ui_postprocessing, progress, ui_loadsave, shared_items, ui_settings, timer, sysinfo, ui_checkpoint_merger, ui_prompt_styles, scripts, sd_samplers, processing, ui_extra_networks
+from modules.ui_components import FormRow, FormGroup, ToolButton, FormHTML, InputAccordion, ResizeHandleRow
+from modules.paths import script_path
+from modules.ui_common import create_refresh_button
+from modules.ui_gradio_extensions import reload_javascript
+
+from modules.shared import opts, cmd_opts
+
+import modules.generation_parameters_copypaste as parameters_copypaste
+import modules.hypernetworks.ui as hypernetworks_ui
+import modules.textual_inversion.ui as textual_inversion_ui
+import modules.textual_inversion.textual_inversion as textual_inversion
+import modules.shared as shared
+import modules.images
+from modules import prompt_parser
+from modules.sd_hijack import model_hijack
+from modules.generation_parameters_copypaste import image_from_url_text
+
+create_setting_component = ui_settings.create_setting_component
+
+warnings.filterwarnings("default" if opts.show_warnings else "ignore", category=UserWarning)
+warnings.filterwarnings("default" if opts.show_gradio_deprecation_warnings else "ignore", category=gr.deprecation.GradioDeprecationWarning)
+
+# this is a fix for Windows users. Without it, javascript files will be served with text/html content-type and the browser will not show any UI
+mimetypes.init()
+mimetypes.add_type('application/javascript', '.js')
+
+# Likewise, add explicit content-type header for certain missing image types
+mimetypes.add_type('image/webp', '.webp')
+
+if not cmd_opts.share and not cmd_opts.listen:
+ # fix gradio phoning home
+ gradio.utils.version_check = lambda: None
+ gradio.utils.get_local_ip_address = lambda: '127.0.0.1'
+
+if cmd_opts.ngrok is not None:
+ import modules.ngrok as ngrok
+ print('ngrok authtoken detected, trying to connect...')
+ ngrok.connect(
+ cmd_opts.ngrok,
+ cmd_opts.port if cmd_opts.port is not None else 7860,
+ cmd_opts.ngrok_options
+ )
+
+
+def gr_show(visible=True):
+ return {"visible": visible, "__type__": "update"}
+
+
+sample_img2img = "assets/stable-samples/img2img/sketch-mountains-input.jpg"
+sample_img2img = sample_img2img if os.path.exists(sample_img2img) else None
+
+# Using constants for these since the variation selector isn't visible.
+# Important that they exactly match script.js for tooltip to work.
+random_symbol = '\U0001f3b2\ufe0f' # 🎲️
+reuse_symbol = '\u267b\ufe0f' # ♻️
+paste_symbol = '\u2199\ufe0f' # ↙
+refresh_symbol = '\U0001f504' # 🔄
+save_style_symbol = '\U0001f4be' # 💾
+apply_style_symbol = '\U0001f4cb' # 📋
+clear_prompt_symbol = '\U0001f5d1\ufe0f' # 🗑️
+extra_networks_symbol = '\U0001F3B4' # 🎴
+switch_values_symbol = '\U000021C5' # ⇅
+restore_progress_symbol = '\U0001F300' # 🌀
+detect_image_size_symbol = '\U0001F4D0' # 📐
+
+
+plaintext_to_html = ui_common.plaintext_to_html
+
+
+def send_gradio_gallery_to_image(x):
+ if len(x) == 0:
+ return None
+ return image_from_url_text(x[0])
+
+
+def calc_resolution_hires(enable, width, height, hr_scale, hr_resize_x, hr_resize_y):
+ if not enable:
+ return ""
+
+ p = processing.StableDiffusionProcessingTxt2Img(width=width, height=height, enable_hr=True, hr_scale=hr_scale, hr_resize_x=hr_resize_x, hr_resize_y=hr_resize_y)
+ p.calculate_target_resolution()
+
+ return f"from {p.width}x{p.height} to {p.hr_resize_x or p.hr_upscale_to_x}x{p.hr_resize_y or p.hr_upscale_to_y} "
+
+
+def resize_from_to_html(width, height, scale_by):
+ target_width = int(width * scale_by)
+ target_height = int(height * scale_by)
+
+ if not target_width or not target_height:
+ return "no image selected"
+
+ return f"resize: from {width}x{height} to {target_width}x{target_height} "
+
+
+def process_interrogate(interrogation_function, mode, ii_input_dir, ii_output_dir, *ii_singles):
+ if mode in {0, 1, 3, 4}:
+ return [interrogation_function(ii_singles[mode]), None]
+ elif mode == 2:
+ return [interrogation_function(ii_singles[mode]["image"]), None]
+ elif mode == 5:
+ assert not shared.cmd_opts.hide_ui_dir_config, "Launched with --hide-ui-dir-config, batch img2img disabled"
+ images = shared.listfiles(ii_input_dir)
+ print(f"Will process {len(images)} images.")
+ if ii_output_dir != "":
+ os.makedirs(ii_output_dir, exist_ok=True)
+ else:
+ ii_output_dir = ii_input_dir
+
+ for image in images:
+ img = Image.open(image)
+ filename = os.path.basename(image)
+ left, _ = os.path.splitext(filename)
+ print(interrogation_function(img), file=open(os.path.join(ii_output_dir, f"{left}.txt"), 'a', encoding='utf-8'))
+
+ return [gr.update(), None]
+
+
+def interrogate(image):
+ prompt = shared.interrogator.interrogate(image.convert("RGB"))
+ return gr.update() if prompt is None else prompt
+
+
+def interrogate_deepbooru(image):
+ prompt = deepbooru.model.tag(image)
+ return gr.update() if prompt is None else prompt
+
+
+def connect_clear_prompt(button):
+ """Given clear button, prompt, and token_counter objects, setup clear prompt button click event"""
+ button.click(
+ _js="clear_prompt",
+ fn=None,
+ inputs=[],
+ outputs=[],
+ )
+
+
+def update_token_counter(text, steps):
+ try:
+ text, _ = extra_networks.parse_prompt(text)
+
+ _, prompt_flat_list, _ = prompt_parser.get_multicond_prompt_list([text])
+ prompt_schedules = prompt_parser.get_learned_conditioning_prompt_schedules(prompt_flat_list, steps)
+
+ except Exception:
+ # a parsing error can happen here during typing, and we don't want to bother the user with
+ # messages related to it in console
+ prompt_schedules = [[[steps, text]]]
+
+ flat_prompts = reduce(lambda list1, list2: list1+list2, prompt_schedules)
+ prompts = [prompt_text for step, prompt_text in flat_prompts]
+ token_count, max_length = max([model_hijack.get_prompt_lengths(prompt) for prompt in prompts], key=lambda args: args[0])
+ return f"{token_count}/{max_length} "
+
+
+class Toprow:
+ """Creates a top row UI with prompts, generate button, styles, extra little buttons for things, and enables some functionality related to their operation"""
+
+ def __init__(self, is_img2img):
+ id_part = "img2img" if is_img2img else "txt2img"
+ self.id_part = id_part
+
+ with gr.Row(elem_id=f"{id_part}_toprow", variant="compact"):
+ with gr.Column(elem_id=f"{id_part}_prompt_container", scale=6):
+ with gr.Row():
+ with gr.Column(scale=80):
+ with gr.Row():
+ self.prompt = gr.Textbox(label="Prompt", elem_id=f"{id_part}_prompt", show_label=False, lines=3, placeholder="Prompt (press Ctrl+Enter or Alt+Enter to generate)", elem_classes=["prompt"])
+ self.prompt_img = gr.File(label="", elem_id=f"{id_part}_prompt_image", file_count="single", type="binary", visible=False)
+
+ with gr.Row():
+ with gr.Column(scale=80):
+ with gr.Row():
+ self.negative_prompt = gr.Textbox(label="Negative prompt", elem_id=f"{id_part}_neg_prompt", show_label=False, lines=3, placeholder="Negative prompt (press Ctrl+Enter or Alt+Enter to generate)", elem_classes=["prompt"])
+
+ self.button_interrogate = None
+ self.button_deepbooru = None
+ if is_img2img:
+ with gr.Column(scale=1, elem_classes="interrogate-col"):
+ self.button_interrogate = gr.Button('Interrogate\nCLIP', elem_id="interrogate")
+ self.button_deepbooru = gr.Button('Interrogate\nDeepBooru', elem_id="deepbooru")
+
+ with gr.Column(scale=1, elem_id=f"{id_part}_actions_column"):
+ with gr.Row(elem_id=f"{id_part}_generate_box", elem_classes="generate-box"):
+ self.interrupt = gr.Button('Interrupt', elem_id=f"{id_part}_interrupt", elem_classes="generate-box-interrupt")
+ self.skip = gr.Button('Skip', elem_id=f"{id_part}_skip", elem_classes="generate-box-skip")
+ self.submit = gr.Button('Generate', elem_id=f"{id_part}_generate", variant='primary')
+
+ self.skip.click(
+ fn=lambda: shared.state.skip(),
+ inputs=[],
+ outputs=[],
+ )
+
+ self.interrupt.click(
+ fn=lambda: shared.state.interrupt(),
+ inputs=[],
+ outputs=[],
+ )
+
+ with gr.Row(elem_id=f"{id_part}_tools"):
+ self.paste = ToolButton(value=paste_symbol, elem_id="paste")
+ self.clear_prompt_button = ToolButton(value=clear_prompt_symbol, elem_id=f"{id_part}_clear_prompt")
+ self.restore_progress_button = ToolButton(value=restore_progress_symbol, elem_id=f"{id_part}_restore_progress", visible=False)
+
+ self.token_counter = gr.HTML(value="0/75 ", elem_id=f"{id_part}_token_counter", elem_classes=["token-counter"])
+ self.token_button = gr.Button(visible=False, elem_id=f"{id_part}_token_button")
+ self.negative_token_counter = gr.HTML(value="0/75 ", elem_id=f"{id_part}_negative_token_counter", elem_classes=["token-counter"])
+ self.negative_token_button = gr.Button(visible=False, elem_id=f"{id_part}_negative_token_button")
+
+ self.clear_prompt_button.click(
+ fn=lambda *x: x,
+ _js="confirm_clear_prompt",
+ inputs=[self.prompt, self.negative_prompt],
+ outputs=[self.prompt, self.negative_prompt],
+ )
+
+ self.ui_styles = ui_prompt_styles.UiPromptStyles(id_part, self.prompt, self.negative_prompt)
+
+ self.prompt_img.change(
+ fn=modules.images.image_data,
+ inputs=[self.prompt_img],
+ outputs=[self.prompt, self.prompt_img],
+ show_progress=False,
+ )
+
+
+def setup_progressbar(*args, **kwargs):
+ pass
+
+
+def apply_setting(key, value):
+ if value is None:
+ return gr.update()
+
+ if shared.cmd_opts.freeze_settings:
+ return gr.update()
+
+ # dont allow model to be swapped when model hash exists in prompt
+ if key == "sd_model_checkpoint" and opts.disable_weights_auto_swap:
+ return gr.update()
+
+ if key == "sd_model_checkpoint":
+ ckpt_info = sd_models.get_closet_checkpoint_match(value)
+
+ if ckpt_info is not None:
+ value = ckpt_info.title
+ else:
+ return gr.update()
+
+ comp_args = opts.data_labels[key].component_args
+ if comp_args and isinstance(comp_args, dict) and comp_args.get('visible') is False:
+ return
+
+ valtype = type(opts.data_labels[key].default)
+ oldval = opts.data.get(key, None)
+ opts.data[key] = valtype(value) if valtype != type(None) else value
+ if oldval != value and opts.data_labels[key].onchange is not None:
+ opts.data_labels[key].onchange()
+
+ opts.save(shared.config_filename)
+ return getattr(opts, key)
+
+
+def create_output_panel(tabname, outdir):
+ return ui_common.create_output_panel(tabname, outdir)
+
+
+def create_sampler_and_steps_selection(choices, tabname):
+ if opts.samplers_in_dropdown:
+ with FormRow(elem_id=f"sampler_selection_{tabname}"):
+ sampler_name = gr.Dropdown(label='Sampling method', elem_id=f"{tabname}_sampling", choices=choices, value=choices[0])
+ steps = gr.Slider(minimum=1, maximum=150, step=1, elem_id=f"{tabname}_steps", label="Sampling steps", value=20)
+ else:
+ with FormGroup(elem_id=f"sampler_selection_{tabname}"):
+ steps = gr.Slider(minimum=1, maximum=150, step=1, elem_id=f"{tabname}_steps", label="Sampling steps", value=20)
+ sampler_name = gr.Radio(label='Sampling method', elem_id=f"{tabname}_sampling", choices=choices, value=choices[0])
+
+ return steps, sampler_name
+
+
+def ordered_ui_categories():
+ user_order = {x.strip(): i * 2 + 1 for i, x in enumerate(shared.opts.ui_reorder_list)}
+
+ for _, category in sorted(enumerate(shared_items.ui_reorder_categories()), key=lambda x: user_order.get(x[1], x[0] * 2 + 0)):
+ yield category
+
+
+def create_override_settings_dropdown(tabname, row):
+ dropdown = gr.Dropdown([], label="Override settings", visible=False, elem_id=f"{tabname}_override_settings", multiselect=True)
+
+ dropdown.change(
+ fn=lambda x: gr.Dropdown.update(visible=bool(x)),
+ inputs=[dropdown],
+ outputs=[dropdown],
+ )
+
+ return dropdown
+
+
+def create_ui():
+ import modules.img2img
+ import modules.txt2img
+
+ reload_javascript()
+
+ parameters_copypaste.reset()
+
+ scripts.scripts_current = scripts.scripts_txt2img
+ scripts.scripts_txt2img.initialize_scripts(is_img2img=False)
+
+ with gr.Blocks(analytics_enabled=False) as txt2img_interface:
+ toprow = Toprow(is_img2img=False)
+
+ dummy_component = gr.Label(visible=False)
+
+ extra_tabs = gr.Tabs(elem_id="txt2img_extra_tabs")
+ extra_tabs.__enter__()
+
+ with gr.Tab("Generation", id="txt2img_generation") as txt2img_generation_tab, ResizeHandleRow(equal_height=False):
+ with gr.Column(variant='compact', elem_id="txt2img_settings"):
+ scripts.scripts_txt2img.prepare_ui()
+
+ for category in ordered_ui_categories():
+ if category == "sampler":
+ steps, sampler_name = create_sampler_and_steps_selection(sd_samplers.visible_sampler_names(), "txt2img")
+
+ elif category == "dimensions":
+ with FormRow():
+ with gr.Column(elem_id="txt2img_column_size", scale=4):
+ width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="txt2img_width")
+ height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="txt2img_height")
+
+ with gr.Column(elem_id="txt2img_dimensions_row", scale=1, elem_classes="dimensions-tools"):
+ res_switch_btn = ToolButton(value=switch_values_symbol, elem_id="txt2img_res_switch_btn", label="Switch dims")
+
+ if opts.dimensions_and_batch_together:
+ with gr.Column(elem_id="txt2img_column_batch"):
+ batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1, elem_id="txt2img_batch_count")
+ batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1, elem_id="txt2img_batch_size")
+
+ elif category == "cfg":
+ with gr.Row():
+ cfg_scale = gr.Slider(minimum=1.0, maximum=30.0, step=0.5, label='CFG Scale', value=7.0, elem_id="txt2img_cfg_scale")
+
+ elif category == "checkboxes":
+ with FormRow(elem_classes="checkboxes-row", variant="compact"):
+ pass
+
+ elif category == "accordions":
+ with gr.Row(elem_id="txt2img_accordions", elem_classes="accordions"):
+ with InputAccordion(False, label="Hires. fix", elem_id="txt2img_hr") as enable_hr:
+ with enable_hr.extra():
+ hr_final_resolution = FormHTML(value="", elem_id="txtimg_hr_finalres", label="Upscaled resolution", interactive=False, min_width=0)
+
+ with FormRow(elem_id="txt2img_hires_fix_row1", variant="compact"):
+ hr_upscaler = gr.Dropdown(label="Upscaler", elem_id="txt2img_hr_upscaler", choices=[*shared.latent_upscale_modes, *[x.name for x in shared.sd_upscalers]], value=shared.latent_upscale_default_mode)
+ hr_second_pass_steps = gr.Slider(minimum=0, maximum=150, step=1, label='Hires steps', value=0, elem_id="txt2img_hires_steps")
+ denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.7, elem_id="txt2img_denoising_strength")
+
+ with FormRow(elem_id="txt2img_hires_fix_row2", variant="compact"):
+ hr_scale = gr.Slider(minimum=1.0, maximum=4.0, step=0.05, label="Upscale by", value=2.0, elem_id="txt2img_hr_scale")
+ hr_resize_x = gr.Slider(minimum=0, maximum=2048, step=8, label="Resize width to", value=0, elem_id="txt2img_hr_resize_x")
+ hr_resize_y = gr.Slider(minimum=0, maximum=2048, step=8, label="Resize height to", value=0, elem_id="txt2img_hr_resize_y")
+
+ with FormRow(elem_id="txt2img_hires_fix_row3", variant="compact", visible=opts.hires_fix_show_sampler) as hr_sampler_container:
+
+ hr_checkpoint_name = gr.Dropdown(label='Hires checkpoint', elem_id="hr_checkpoint", choices=["Use same checkpoint"] + modules.sd_models.checkpoint_tiles(use_short=True), value="Use same checkpoint")
+ create_refresh_button(hr_checkpoint_name, modules.sd_models.list_models, lambda: {"choices": ["Use same checkpoint"] + modules.sd_models.checkpoint_tiles(use_short=True)}, "hr_checkpoint_refresh")
+
+ hr_sampler_name = gr.Dropdown(label='Hires sampling method', elem_id="hr_sampler", choices=["Use same sampler"] + sd_samplers.visible_sampler_names(), value="Use same sampler")
+
+ with FormRow(elem_id="txt2img_hires_fix_row4", variant="compact", visible=opts.hires_fix_show_prompts) as hr_prompts_container:
+ with gr.Column(scale=80):
+ with gr.Row():
+ hr_prompt = gr.Textbox(label="Hires prompt", elem_id="hires_prompt", show_label=False, lines=3, placeholder="Prompt for hires fix pass.\nLeave empty to use the same prompt as in first pass.", elem_classes=["prompt"])
+ with gr.Column(scale=80):
+ with gr.Row():
+ hr_negative_prompt = gr.Textbox(label="Hires negative prompt", elem_id="hires_neg_prompt", show_label=False, lines=3, placeholder="Negative prompt for hires fix pass.\nLeave empty to use the same negative prompt as in first pass.", elem_classes=["prompt"])
+
+ scripts.scripts_txt2img.setup_ui_for_section(category)
+
+ elif category == "batch":
+ if not opts.dimensions_and_batch_together:
+ with FormRow(elem_id="txt2img_column_batch"):
+ batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1, elem_id="txt2img_batch_count")
+ batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1, elem_id="txt2img_batch_size")
+
+ elif category == "override_settings":
+ with FormRow(elem_id="txt2img_override_settings_row") as row:
+ override_settings = create_override_settings_dropdown('txt2img', row)
+
+ elif category == "scripts":
+ with FormGroup(elem_id="txt2img_script_container"):
+ custom_inputs = scripts.scripts_txt2img.setup_ui()
+
+ if category not in {"accordions"}:
+ scripts.scripts_txt2img.setup_ui_for_section(category)
+
+ hr_resolution_preview_inputs = [enable_hr, width, height, hr_scale, hr_resize_x, hr_resize_y]
+
+ for component in hr_resolution_preview_inputs:
+ event = component.release if isinstance(component, gr.Slider) else component.change
+
+ event(
+ fn=calc_resolution_hires,
+ inputs=hr_resolution_preview_inputs,
+ outputs=[hr_final_resolution],
+ show_progress=False,
+ )
+ event(
+ None,
+ _js="onCalcResolutionHires",
+ inputs=hr_resolution_preview_inputs,
+ outputs=[],
+ show_progress=False,
+ )
+
+ txt2img_gallery, generation_info, html_info, html_log = create_output_panel("txt2img", opts.outdir_txt2img_samples)
+
+ txt2img_args = dict(
+ fn=wrap_gradio_gpu_call(modules.txt2img.txt2img, extra_outputs=[None, '', '']),
+ _js="submit",
+ inputs=[
+ dummy_component,
+ toprow.prompt,
+ toprow.negative_prompt,
+ toprow.ui_styles.dropdown,
+ steps,
+ sampler_name,
+ batch_count,
+ batch_size,
+ cfg_scale,
+ height,
+ width,
+ enable_hr,
+ denoising_strength,
+ hr_scale,
+ hr_upscaler,
+ hr_second_pass_steps,
+ hr_resize_x,
+ hr_resize_y,
+ hr_checkpoint_name,
+ hr_sampler_name,
+ hr_prompt,
+ hr_negative_prompt,
+ override_settings,
+
+ ] + custom_inputs,
+
+ outputs=[
+ txt2img_gallery,
+ generation_info,
+ html_info,
+ html_log,
+ ],
+ show_progress=False,
+ )
+
+ toprow.prompt.submit(**txt2img_args)
+ toprow.submit.click(**txt2img_args)
+
+ res_switch_btn.click(fn=None, _js="function(){switchWidthHeight('txt2img')}", inputs=None, outputs=None, show_progress=False)
+
+ toprow.restore_progress_button.click(
+ fn=progress.restore_progress,
+ _js="restoreProgressTxt2img",
+ inputs=[dummy_component],
+ outputs=[
+ txt2img_gallery,
+ generation_info,
+ html_info,
+ html_log,
+ ],
+ show_progress=False,
+ )
+
+ txt2img_paste_fields = [
+ (toprow.prompt, "Prompt"),
+ (toprow.negative_prompt, "Negative prompt"),
+ (steps, "Steps"),
+ (sampler_name, "Sampler"),
+ (cfg_scale, "CFG scale"),
+ (width, "Size-1"),
+ (height, "Size-2"),
+ (batch_size, "Batch size"),
+ (toprow.ui_styles.dropdown, lambda d: d["Styles array"] if isinstance(d.get("Styles array"), list) else gr.update()),
+ (denoising_strength, "Denoising strength"),
+ (enable_hr, lambda d: "Denoising strength" in d and ("Hires upscale" in d or "Hires upscaler" in d or "Hires resize-1" in d)),
+ (hr_scale, "Hires upscale"),
+ (hr_upscaler, "Hires upscaler"),
+ (hr_second_pass_steps, "Hires steps"),
+ (hr_resize_x, "Hires resize-1"),
+ (hr_resize_y, "Hires resize-2"),
+ (hr_checkpoint_name, "Hires checkpoint"),
+ (hr_sampler_name, "Hires sampler"),
+ (hr_sampler_container, lambda d: gr.update(visible=True) if d.get("Hires sampler", "Use same sampler") != "Use same sampler" or d.get("Hires checkpoint", "Use same checkpoint") != "Use same checkpoint" else gr.update()),
+ (hr_prompt, "Hires prompt"),
+ (hr_negative_prompt, "Hires negative prompt"),
+ (hr_prompts_container, lambda d: gr.update(visible=True) if d.get("Hires prompt", "") != "" or d.get("Hires negative prompt", "") != "" else gr.update()),
+ *scripts.scripts_txt2img.infotext_fields
+ ]
+ parameters_copypaste.add_paste_fields("txt2img", None, txt2img_paste_fields, override_settings)
+ parameters_copypaste.register_paste_params_button(parameters_copypaste.ParamBinding(
+ paste_button=toprow.paste, tabname="txt2img", source_text_component=toprow.prompt, source_image_component=None,
+ ))
+
+ txt2img_preview_params = [
+ toprow.prompt,
+ toprow.negative_prompt,
+ steps,
+ sampler_name,
+ cfg_scale,
+ scripts.scripts_txt2img.script('Seed').seed,
+ width,
+ height,
+ ]
+
+ toprow.token_button.click(fn=wrap_queued_call(update_token_counter), inputs=[toprow.prompt, steps], outputs=[toprow.token_counter])
+ toprow.negative_token_button.click(fn=wrap_queued_call(update_token_counter), inputs=[toprow.negative_prompt, steps], outputs=[toprow.negative_token_counter])
+
+ extra_networks_ui = ui_extra_networks.create_ui(txt2img_interface, [txt2img_generation_tab], 'txt2img')
+ ui_extra_networks.setup_ui(extra_networks_ui, txt2img_gallery)
+
+ extra_tabs.__exit__()
+
+ scripts.scripts_current = scripts.scripts_img2img
+ scripts.scripts_img2img.initialize_scripts(is_img2img=True)
+
+ with gr.Blocks(analytics_enabled=False) as img2img_interface:
+ toprow = Toprow(is_img2img=True)
+
+ extra_tabs = gr.Tabs(elem_id="img2img_extra_tabs")
+ extra_tabs.__enter__()
+
+ with gr.Tab("Generation", id="img2img_generation") as img2img_generation_tab, ResizeHandleRow(equal_height=False):
+ with gr.Column(variant='compact', elem_id="img2img_settings"):
+ copy_image_buttons = []
+ copy_image_destinations = {}
+
+ def add_copy_image_controls(tab_name, elem):
+ with gr.Row(variant="compact", elem_id=f"img2img_copy_to_{tab_name}"):
+ gr.HTML("Copy image to: ", elem_id=f"img2img_label_copy_to_{tab_name}")
+
+ for title, name in zip(['img2img', 'sketch', 'inpaint', 'inpaint sketch'], ['img2img', 'sketch', 'inpaint', 'inpaint_sketch']):
+ if name == tab_name:
+ gr.Button(title, interactive=False)
+ copy_image_destinations[name] = elem
+ continue
+
+ button = gr.Button(title)
+ copy_image_buttons.append((button, name, elem))
+
+ with gr.Tabs(elem_id="mode_img2img"):
+ img2img_selected_tab = gr.State(0)
+
+ with gr.TabItem('img2img', id='img2img', elem_id="img2img_img2img_tab") as tab_img2img:
+ init_img = gr.Image(label="Image for img2img", elem_id="img2img_image", show_label=False, source="upload", interactive=True, type="pil", tool="editor", image_mode="RGBA", height=opts.img2img_editor_height)
+ add_copy_image_controls('img2img', init_img)
+
+ with gr.TabItem('Sketch', id='img2img_sketch', elem_id="img2img_img2img_sketch_tab") as tab_sketch:
+ sketch = gr.Image(label="Image for img2img", elem_id="img2img_sketch", show_label=False, source="upload", interactive=True, type="pil", tool="color-sketch", image_mode="RGB", height=opts.img2img_editor_height, brush_color=opts.img2img_sketch_default_brush_color)
+ add_copy_image_controls('sketch', sketch)
+
+ with gr.TabItem('Inpaint', id='inpaint', elem_id="img2img_inpaint_tab") as tab_inpaint:
+ init_img_with_mask = gr.Image(label="Image for inpainting with mask", show_label=False, elem_id="img2maskimg", source="upload", interactive=True, type="pil", tool="sketch", image_mode="RGBA", height=opts.img2img_editor_height, brush_color=opts.img2img_inpaint_mask_brush_color)
+ add_copy_image_controls('inpaint', init_img_with_mask)
+
+ with gr.TabItem('Inpaint sketch', id='inpaint_sketch', elem_id="img2img_inpaint_sketch_tab") as tab_inpaint_color:
+ inpaint_color_sketch = gr.Image(label="Color sketch inpainting", show_label=False, elem_id="inpaint_sketch", source="upload", interactive=True, type="pil", tool="color-sketch", image_mode="RGB", height=opts.img2img_editor_height, brush_color=opts.img2img_inpaint_sketch_default_brush_color)
+ inpaint_color_sketch_orig = gr.State(None)
+ add_copy_image_controls('inpaint_sketch', inpaint_color_sketch)
+
+ def update_orig(image, state):
+ if image is not None:
+ same_size = state is not None and state.size == image.size
+ has_exact_match = np.any(np.all(np.array(image) == np.array(state), axis=-1))
+ edited = same_size and has_exact_match
+ return image if not edited or state is None else state
+
+ inpaint_color_sketch.change(update_orig, [inpaint_color_sketch, inpaint_color_sketch_orig], inpaint_color_sketch_orig)
+
+ with gr.TabItem('Inpaint upload', id='inpaint_upload', elem_id="img2img_inpaint_upload_tab") as tab_inpaint_upload:
+ init_img_inpaint = gr.Image(label="Image for img2img", show_label=False, source="upload", interactive=True, type="pil", elem_id="img_inpaint_base")
+ init_mask_inpaint = gr.Image(label="Mask", source="upload", interactive=True, type="pil", image_mode="RGBA", elem_id="img_inpaint_mask")
+
+ with gr.TabItem('Batch', id='batch', elem_id="img2img_batch_tab") as tab_batch:
+ hidden = ' Disabled when launched with --hide-ui-dir-config.' if shared.cmd_opts.hide_ui_dir_config else ''
+ gr.HTML(
+ "Process images in a directory on the same machine where the server is running." +
+ " Use an empty output directory to save pictures normally instead of writing to the output directory." +
+ f" Add inpaint batch mask directory to enable inpaint batch processing."
+ f"{hidden}
"
+ )
+ img2img_batch_input_dir = gr.Textbox(label="Input directory", **shared.hide_dirs, elem_id="img2img_batch_input_dir")
+ img2img_batch_output_dir = gr.Textbox(label="Output directory", **shared.hide_dirs, elem_id="img2img_batch_output_dir")
+ img2img_batch_inpaint_mask_dir = gr.Textbox(label="Inpaint batch mask directory (required for inpaint batch processing only)", **shared.hide_dirs, elem_id="img2img_batch_inpaint_mask_dir")
+ with gr.Accordion("PNG info", open=False):
+ img2img_batch_use_png_info = gr.Checkbox(label="Append png info to prompts", **shared.hide_dirs, elem_id="img2img_batch_use_png_info")
+ img2img_batch_png_info_dir = gr.Textbox(label="PNG info directory", **shared.hide_dirs, placeholder="Leave empty to use input directory", elem_id="img2img_batch_png_info_dir")
+ img2img_batch_png_info_props = gr.CheckboxGroup(["Prompt", "Negative prompt", "Seed", "CFG scale", "Sampler", "Steps"], label="Parameters to take from png info", info="Prompts from png info will be appended to prompts set in ui.")
+
+ img2img_tabs = [tab_img2img, tab_sketch, tab_inpaint, tab_inpaint_color, tab_inpaint_upload, tab_batch]
+
+ for i, tab in enumerate(img2img_tabs):
+ tab.select(fn=lambda tabnum=i: tabnum, inputs=[], outputs=[img2img_selected_tab])
+
+ def copy_image(img):
+ if isinstance(img, dict) and 'image' in img:
+ return img['image']
+
+ return img
+
+ for button, name, elem in copy_image_buttons:
+ button.click(
+ fn=copy_image,
+ inputs=[elem],
+ outputs=[copy_image_destinations[name]],
+ )
+ button.click(
+ fn=lambda: None,
+ _js=f"switch_to_{name.replace(' ', '_')}",
+ inputs=[],
+ outputs=[],
+ )
+
+ with FormRow():
+ resize_mode = gr.Radio(label="Resize mode", elem_id="resize_mode", choices=["Just resize", "Crop and resize", "Resize and fill", "Just resize (latent upscale)"], type="index", value="Just resize")
+
+ scripts.scripts_img2img.prepare_ui()
+
+ for category in ordered_ui_categories():
+ if category == "sampler":
+ steps, sampler_name = create_sampler_and_steps_selection(sd_samplers.visible_sampler_names(), "img2img")
+
+ elif category == "dimensions":
+ with FormRow():
+ with gr.Column(elem_id="img2img_column_size", scale=4):
+ selected_scale_tab = gr.State(value=0)
+
+ with gr.Tabs():
+ with gr.Tab(label="Resize to", elem_id="img2img_tab_resize_to") as tab_scale_to:
+ with FormRow():
+ with gr.Column(elem_id="img2img_column_size", scale=4):
+ width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="img2img_width")
+ height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="img2img_height")
+ with gr.Column(elem_id="img2img_dimensions_row", scale=1, elem_classes="dimensions-tools"):
+ res_switch_btn = ToolButton(value=switch_values_symbol, elem_id="img2img_res_switch_btn")
+ detect_image_size_btn = ToolButton(value=detect_image_size_symbol, elem_id="img2img_detect_image_size_btn")
+
+ with gr.Tab(label="Resize by", elem_id="img2img_tab_resize_by") as tab_scale_by:
+ scale_by = gr.Slider(minimum=0.05, maximum=4.0, step=0.05, label="Scale", value=1.0, elem_id="img2img_scale")
+
+ with FormRow():
+ scale_by_html = FormHTML(resize_from_to_html(0, 0, 0.0), elem_id="img2img_scale_resolution_preview")
+ gr.Slider(label="Unused", elem_id="img2img_unused_scale_by_slider")
+ button_update_resize_to = gr.Button(visible=False, elem_id="img2img_update_resize_to")
+
+ on_change_args = dict(
+ fn=resize_from_to_html,
+ _js="currentImg2imgSourceResolution",
+ inputs=[dummy_component, dummy_component, scale_by],
+ outputs=scale_by_html,
+ show_progress=False,
+ )
+
+ scale_by.release(**on_change_args)
+ button_update_resize_to.click(**on_change_args)
+
+ # the code below is meant to update the resolution label after the image in the image selection UI has changed.
+ # as it is now the event keeps firing continuously for inpaint edits, which ruins the page with constant requests.
+ # I assume this must be a gradio bug and for now we'll just do it for non-inpaint inputs.
+ for component in [init_img, sketch]:
+ component.change(fn=lambda: None, _js="updateImg2imgResizeToTextAfterChangingImage", inputs=[], outputs=[], show_progress=False)
+
+ tab_scale_to.select(fn=lambda: 0, inputs=[], outputs=[selected_scale_tab])
+ tab_scale_by.select(fn=lambda: 1, inputs=[], outputs=[selected_scale_tab])
+
+ if opts.dimensions_and_batch_together:
+ with gr.Column(elem_id="img2img_column_batch"):
+ batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1, elem_id="img2img_batch_count")
+ batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1, elem_id="img2img_batch_size")
+
+ elif category == "denoising":
+ denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.75, elem_id="img2img_denoising_strength")
+
+ elif category == "cfg":
+ with gr.Row():
+ cfg_scale = gr.Slider(minimum=1.0, maximum=30.0, step=0.5, label='CFG Scale', value=7.0, elem_id="img2img_cfg_scale")
+ image_cfg_scale = gr.Slider(minimum=0, maximum=3.0, step=0.05, label='Image CFG Scale', value=1.5, elem_id="img2img_image_cfg_scale", visible=False)
+
+ elif category == "checkboxes":
+ with FormRow(elem_classes="checkboxes-row", variant="compact"):
+ pass
+
+ elif category == "accordions":
+ with gr.Row(elem_id="img2img_accordions", elem_classes="accordions"):
+ scripts.scripts_img2img.setup_ui_for_section(category)
+
+ elif category == "batch":
+ if not opts.dimensions_and_batch_together:
+ with FormRow(elem_id="img2img_column_batch"):
+ batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1, elem_id="img2img_batch_count")
+ batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1, elem_id="img2img_batch_size")
+
+ elif category == "override_settings":
+ with FormRow(elem_id="img2img_override_settings_row") as row:
+ override_settings = create_override_settings_dropdown('img2img', row)
+
+ elif category == "scripts":
+ with FormGroup(elem_id="img2img_script_container"):
+ custom_inputs = scripts.scripts_img2img.setup_ui()
+
+ elif category == "inpaint":
+ with FormGroup(elem_id="inpaint_controls", visible=False) as inpaint_controls:
+ with FormRow():
+ mask_blur = gr.Slider(label='Mask blur', minimum=0, maximum=64, step=1, value=4, elem_id="img2img_mask_blur")
+ mask_alpha = gr.Slider(label="Mask transparency", visible=False, elem_id="img2img_mask_alpha")
+
+ with FormRow():
+ inpainting_mask_invert = gr.Radio(label='Mask mode', choices=['Inpaint masked', 'Inpaint not masked'], value='Inpaint masked', type="index", elem_id="img2img_mask_mode")
+
+ with FormRow():
+ inpainting_fill = gr.Radio(label='Masked content', choices=['fill', 'original', 'latent noise', 'latent nothing'], value='original', type="index", elem_id="img2img_inpainting_fill")
+
+ with FormRow():
+ with gr.Column():
+ inpaint_full_res = gr.Radio(label="Inpaint area", choices=["Whole picture", "Only masked"], type="index", value="Whole picture", elem_id="img2img_inpaint_full_res")
+
+ with gr.Column(scale=4):
+ inpaint_full_res_padding = gr.Slider(label='Only masked padding, pixels', minimum=0, maximum=256, step=4, value=32, elem_id="img2img_inpaint_full_res_padding")
+
+ def select_img2img_tab(tab):
+ return gr.update(visible=tab in [2, 3, 4]), gr.update(visible=tab == 3),
+
+ for i, elem in enumerate(img2img_tabs):
+ elem.select(
+ fn=lambda tab=i: select_img2img_tab(tab),
+ inputs=[],
+ outputs=[inpaint_controls, mask_alpha],
+ )
+
+ if category not in {"accordions"}:
+ scripts.scripts_img2img.setup_ui_for_section(category)
+
+ img2img_gallery, generation_info, html_info, html_log = create_output_panel("img2img", opts.outdir_img2img_samples)
+
+ img2img_args = dict(
+ fn=wrap_gradio_gpu_call(modules.img2img.img2img, extra_outputs=[None, '', '']),
+ _js="submit_img2img",
+ inputs=[
+ dummy_component,
+ dummy_component,
+ toprow.prompt,
+ toprow.negative_prompt,
+ toprow.ui_styles.dropdown,
+ init_img,
+ sketch,
+ init_img_with_mask,
+ inpaint_color_sketch,
+ inpaint_color_sketch_orig,
+ init_img_inpaint,
+ init_mask_inpaint,
+ steps,
+ sampler_name,
+ mask_blur,
+ mask_alpha,
+ inpainting_fill,
+ batch_count,
+ batch_size,
+ cfg_scale,
+ image_cfg_scale,
+ denoising_strength,
+ selected_scale_tab,
+ height,
+ width,
+ scale_by,
+ resize_mode,
+ inpaint_full_res,
+ inpaint_full_res_padding,
+ inpainting_mask_invert,
+ img2img_batch_input_dir,
+ img2img_batch_output_dir,
+ img2img_batch_inpaint_mask_dir,
+ override_settings,
+ img2img_batch_use_png_info,
+ img2img_batch_png_info_props,
+ img2img_batch_png_info_dir,
+ ] + custom_inputs,
+ outputs=[
+ img2img_gallery,
+ generation_info,
+ html_info,
+ html_log,
+ ],
+ show_progress=False,
+ )
+
+ interrogate_args = dict(
+ _js="get_img2img_tab_index",
+ inputs=[
+ dummy_component,
+ img2img_batch_input_dir,
+ img2img_batch_output_dir,
+ init_img,
+ sketch,
+ init_img_with_mask,
+ inpaint_color_sketch,
+ init_img_inpaint,
+ ],
+ outputs=[toprow.prompt, dummy_component],
+ )
+
+ toprow.prompt.submit(**img2img_args)
+ toprow.submit.click(**img2img_args)
+
+ res_switch_btn.click(fn=None, _js="function(){switchWidthHeight('img2img')}", inputs=None, outputs=None, show_progress=False)
+
+ detect_image_size_btn.click(
+ fn=lambda w, h, _: (w or gr.update(), h or gr.update()),
+ _js="currentImg2imgSourceResolution",
+ inputs=[dummy_component, dummy_component, dummy_component],
+ outputs=[width, height],
+ show_progress=False,
+ )
+
+ toprow.restore_progress_button.click(
+ fn=progress.restore_progress,
+ _js="restoreProgressImg2img",
+ inputs=[dummy_component],
+ outputs=[
+ img2img_gallery,
+ generation_info,
+ html_info,
+ html_log,
+ ],
+ show_progress=False,
+ )
+
+ toprow.button_interrogate.click(
+ fn=lambda *args: process_interrogate(interrogate, *args),
+ **interrogate_args,
+ )
+
+ toprow.button_deepbooru.click(
+ fn=lambda *args: process_interrogate(interrogate_deepbooru, *args),
+ **interrogate_args,
+ )
+
+ toprow.token_button.click(fn=update_token_counter, inputs=[toprow.prompt, steps], outputs=[toprow.token_counter])
+ toprow.negative_token_button.click(fn=wrap_queued_call(update_token_counter), inputs=[toprow.negative_prompt, steps], outputs=[toprow.negative_token_counter])
+
+ img2img_paste_fields = [
+ (toprow.prompt, "Prompt"),
+ (toprow.negative_prompt, "Negative prompt"),
+ (steps, "Steps"),
+ (sampler_name, "Sampler"),
+ (cfg_scale, "CFG scale"),
+ (image_cfg_scale, "Image CFG scale"),
+ (width, "Size-1"),
+ (height, "Size-2"),
+ (batch_size, "Batch size"),
+ (toprow.ui_styles.dropdown, lambda d: d["Styles array"] if isinstance(d.get("Styles array"), list) else gr.update()),
+ (denoising_strength, "Denoising strength"),
+ (mask_blur, "Mask blur"),
+ *scripts.scripts_img2img.infotext_fields
+ ]
+ parameters_copypaste.add_paste_fields("img2img", init_img, img2img_paste_fields, override_settings)
+ parameters_copypaste.add_paste_fields("inpaint", init_img_with_mask, img2img_paste_fields, override_settings)
+ parameters_copypaste.register_paste_params_button(parameters_copypaste.ParamBinding(
+ paste_button=toprow.paste, tabname="img2img", source_text_component=toprow.prompt, source_image_component=None,
+ ))
+
+ extra_networks_ui_img2img = ui_extra_networks.create_ui(img2img_interface, [img2img_generation_tab], 'img2img')
+ ui_extra_networks.setup_ui(extra_networks_ui_img2img, img2img_gallery)
+
+ extra_tabs.__exit__()
+
+ scripts.scripts_current = None
+
+ with gr.Blocks(analytics_enabled=False) as extras_interface:
+ ui_postprocessing.create_ui()
+
+ with gr.Blocks(analytics_enabled=False) as pnginfo_interface:
+ with gr.Row(equal_height=False):
+ with gr.Column(variant='panel'):
+ image = gr.Image(elem_id="pnginfo_image", label="Source", source="upload", interactive=True, type="pil")
+
+ with gr.Column(variant='panel'):
+ html = gr.HTML()
+ generation_info = gr.Textbox(visible=False, elem_id="pnginfo_generation_info")
+ html2 = gr.HTML()
+ with gr.Row():
+ buttons = parameters_copypaste.create_buttons(["txt2img", "img2img", "inpaint", "extras"])
+
+ for tabname, button in buttons.items():
+ parameters_copypaste.register_paste_params_button(parameters_copypaste.ParamBinding(
+ paste_button=button, tabname=tabname, source_text_component=generation_info, source_image_component=image,
+ ))
+
+ image.change(
+ fn=wrap_gradio_call(modules.extras.run_pnginfo),
+ inputs=[image],
+ outputs=[html, generation_info, html2],
+ )
+
+ modelmerger_ui = ui_checkpoint_merger.UiCheckpointMerger()
+
+ with gr.Blocks(analytics_enabled=False) as train_interface:
+ with gr.Row(equal_height=False):
+ gr.HTML(value="See wiki for detailed explanation.
")
+
+ with gr.Row(variant="compact", equal_height=False):
+ with gr.Tabs(elem_id="train_tabs"):
+
+ with gr.Tab(label="Create embedding", id="create_embedding"):
+ new_embedding_name = gr.Textbox(label="Name", elem_id="train_new_embedding_name")
+ initialization_text = gr.Textbox(label="Initialization text", value="*", elem_id="train_initialization_text")
+ nvpt = gr.Slider(label="Number of vectors per token", minimum=1, maximum=75, step=1, value=1, elem_id="train_nvpt")
+ overwrite_old_embedding = gr.Checkbox(value=False, label="Overwrite Old Embedding", elem_id="train_overwrite_old_embedding")
+
+ with gr.Row():
+ with gr.Column(scale=3):
+ gr.HTML(value="")
+
+ with gr.Column():
+ create_embedding = gr.Button(value="Create embedding", variant='primary', elem_id="train_create_embedding")
+
+ with gr.Tab(label="Create hypernetwork", id="create_hypernetwork"):
+ new_hypernetwork_name = gr.Textbox(label="Name", elem_id="train_new_hypernetwork_name")
+ new_hypernetwork_sizes = gr.CheckboxGroup(label="Modules", value=["768", "320", "640", "1280"], choices=["768", "1024", "320", "640", "1280"], elem_id="train_new_hypernetwork_sizes")
+ new_hypernetwork_layer_structure = gr.Textbox("1, 2, 1", label="Enter hypernetwork layer structure", placeholder="1st and last digit must be 1. ex:'1, 2, 1'", elem_id="train_new_hypernetwork_layer_structure")
+ new_hypernetwork_activation_func = gr.Dropdown(value="linear", label="Select activation function of hypernetwork. Recommended : Swish / Linear(none)", choices=hypernetworks_ui.keys, elem_id="train_new_hypernetwork_activation_func")
+ new_hypernetwork_initialization_option = gr.Dropdown(value = "Normal", label="Select Layer weights initialization. Recommended: Kaiming for relu-like, Xavier for sigmoid-like, Normal otherwise", choices=["Normal", "KaimingUniform", "KaimingNormal", "XavierUniform", "XavierNormal"], elem_id="train_new_hypernetwork_initialization_option")
+ new_hypernetwork_add_layer_norm = gr.Checkbox(label="Add layer normalization", elem_id="train_new_hypernetwork_add_layer_norm")
+ new_hypernetwork_use_dropout = gr.Checkbox(label="Use dropout", elem_id="train_new_hypernetwork_use_dropout")
+ new_hypernetwork_dropout_structure = gr.Textbox("0, 0, 0", label="Enter hypernetwork Dropout structure (or empty). Recommended : 0~0.35 incrementing sequence: 0, 0.05, 0.15", placeholder="1st and last digit must be 0 and values should be between 0 and 1. ex:'0, 0.01, 0'")
+ overwrite_old_hypernetwork = gr.Checkbox(value=False, label="Overwrite Old Hypernetwork", elem_id="train_overwrite_old_hypernetwork")
+
+ with gr.Row():
+ with gr.Column(scale=3):
+ gr.HTML(value="")
+
+ with gr.Column():
+ create_hypernetwork = gr.Button(value="Create hypernetwork", variant='primary', elem_id="train_create_hypernetwork")
+
+ with gr.Tab(label="Preprocess images", id="preprocess_images"):
+ process_src = gr.Textbox(label='Source directory', elem_id="train_process_src")
+ process_dst = gr.Textbox(label='Destination directory', elem_id="train_process_dst")
+ process_width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="train_process_width")
+ process_height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="train_process_height")
+ preprocess_txt_action = gr.Dropdown(label='Existing Caption txt Action', value="ignore", choices=["ignore", "copy", "prepend", "append"], elem_id="train_preprocess_txt_action")
+
+ with gr.Row():
+ process_keep_original_size = gr.Checkbox(label='Keep original size', elem_id="train_process_keep_original_size")
+ process_flip = gr.Checkbox(label='Create flipped copies', elem_id="train_process_flip")
+ process_split = gr.Checkbox(label='Split oversized images', elem_id="train_process_split")
+ process_focal_crop = gr.Checkbox(label='Auto focal point crop', elem_id="train_process_focal_crop")
+ process_multicrop = gr.Checkbox(label='Auto-sized crop', elem_id="train_process_multicrop")
+ process_caption = gr.Checkbox(label='Use BLIP for caption', elem_id="train_process_caption")
+ process_caption_deepbooru = gr.Checkbox(label='Use deepbooru for caption', visible=True, elem_id="train_process_caption_deepbooru")
+
+ with gr.Row(visible=False) as process_split_extra_row:
+ process_split_threshold = gr.Slider(label='Split image threshold', value=0.5, minimum=0.0, maximum=1.0, step=0.05, elem_id="train_process_split_threshold")
+ process_overlap_ratio = gr.Slider(label='Split image overlap ratio', value=0.2, minimum=0.0, maximum=0.9, step=0.05, elem_id="train_process_overlap_ratio")
+
+ with gr.Row(visible=False) as process_focal_crop_row:
+ process_focal_crop_face_weight = gr.Slider(label='Focal point face weight', value=0.9, minimum=0.0, maximum=1.0, step=0.05, elem_id="train_process_focal_crop_face_weight")
+ process_focal_crop_entropy_weight = gr.Slider(label='Focal point entropy weight', value=0.15, minimum=0.0, maximum=1.0, step=0.05, elem_id="train_process_focal_crop_entropy_weight")
+ process_focal_crop_edges_weight = gr.Slider(label='Focal point edges weight', value=0.5, minimum=0.0, maximum=1.0, step=0.05, elem_id="train_process_focal_crop_edges_weight")
+ process_focal_crop_debug = gr.Checkbox(label='Create debug image', elem_id="train_process_focal_crop_debug")
+
+ with gr.Column(visible=False) as process_multicrop_col:
+ gr.Markdown('Each image is center-cropped with an automatically chosen width and height.')
+ with gr.Row():
+ process_multicrop_mindim = gr.Slider(minimum=64, maximum=2048, step=8, label="Dimension lower bound", value=384, elem_id="train_process_multicrop_mindim")
+ process_multicrop_maxdim = gr.Slider(minimum=64, maximum=2048, step=8, label="Dimension upper bound", value=768, elem_id="train_process_multicrop_maxdim")
+ with gr.Row():
+ process_multicrop_minarea = gr.Slider(minimum=64*64, maximum=2048*2048, step=1, label="Area lower bound", value=64*64, elem_id="train_process_multicrop_minarea")
+ process_multicrop_maxarea = gr.Slider(minimum=64*64, maximum=2048*2048, step=1, label="Area upper bound", value=640*640, elem_id="train_process_multicrop_maxarea")
+ with gr.Row():
+ process_multicrop_objective = gr.Radio(["Maximize area", "Minimize error"], value="Maximize area", label="Resizing objective", elem_id="train_process_multicrop_objective")
+ process_multicrop_threshold = gr.Slider(minimum=0, maximum=1, step=0.01, label="Error threshold", value=0.1, elem_id="train_process_multicrop_threshold")
+
+ with gr.Row():
+ with gr.Column(scale=3):
+ gr.HTML(value="")
+
+ with gr.Column():
+ with gr.Row():
+ interrupt_preprocessing = gr.Button("Interrupt", elem_id="train_interrupt_preprocessing")
+ run_preprocess = gr.Button(value="Preprocess", variant='primary', elem_id="train_run_preprocess")
+
+ process_split.change(
+ fn=lambda show: gr_show(show),
+ inputs=[process_split],
+ outputs=[process_split_extra_row],
+ )
+
+ process_focal_crop.change(
+ fn=lambda show: gr_show(show),
+ inputs=[process_focal_crop],
+ outputs=[process_focal_crop_row],
+ )
+
+ process_multicrop.change(
+ fn=lambda show: gr_show(show),
+ inputs=[process_multicrop],
+ outputs=[process_multicrop_col],
+ )
+
+ def get_textual_inversion_template_names():
+ return sorted(textual_inversion.textual_inversion_templates)
+
+ with gr.Tab(label="Train", id="train"):
+ gr.HTML(value="Train an embedding or Hypernetwork; you must specify a directory with a set of 1:1 ratio images [wiki]
")
+ with FormRow():
+ train_embedding_name = gr.Dropdown(label='Embedding', elem_id="train_embedding", choices=sorted(sd_hijack.model_hijack.embedding_db.word_embeddings.keys()))
+ create_refresh_button(train_embedding_name, sd_hijack.model_hijack.embedding_db.load_textual_inversion_embeddings, lambda: {"choices": sorted(sd_hijack.model_hijack.embedding_db.word_embeddings.keys())}, "refresh_train_embedding_name")
+
+ train_hypernetwork_name = gr.Dropdown(label='Hypernetwork', elem_id="train_hypernetwork", choices=sorted(shared.hypernetworks))
+ create_refresh_button(train_hypernetwork_name, shared.reload_hypernetworks, lambda: {"choices": sorted(shared.hypernetworks)}, "refresh_train_hypernetwork_name")
+
+ with FormRow():
+ embedding_learn_rate = gr.Textbox(label='Embedding Learning rate', placeholder="Embedding Learning rate", value="0.005", elem_id="train_embedding_learn_rate")
+ hypernetwork_learn_rate = gr.Textbox(label='Hypernetwork Learning rate', placeholder="Hypernetwork Learning rate", value="0.00001", elem_id="train_hypernetwork_learn_rate")
+
+ with FormRow():
+ clip_grad_mode = gr.Dropdown(value="disabled", label="Gradient Clipping", choices=["disabled", "value", "norm"])
+ clip_grad_value = gr.Textbox(placeholder="Gradient clip value", value="0.1", show_label=False)
+
+ with FormRow():
+ batch_size = gr.Number(label='Batch size', value=1, precision=0, elem_id="train_batch_size")
+ gradient_step = gr.Number(label='Gradient accumulation steps', value=1, precision=0, elem_id="train_gradient_step")
+
+ dataset_directory = gr.Textbox(label='Dataset directory', placeholder="Path to directory with input images", elem_id="train_dataset_directory")
+ log_directory = gr.Textbox(label='Log directory', placeholder="Path to directory where to write outputs", value="textual_inversion", elem_id="train_log_directory")
+
+ with FormRow():
+ template_file = gr.Dropdown(label='Prompt template', value="style_filewords.txt", elem_id="train_template_file", choices=get_textual_inversion_template_names())
+ create_refresh_button(template_file, textual_inversion.list_textual_inversion_templates, lambda: {"choices": get_textual_inversion_template_names()}, "refrsh_train_template_file")
+
+ training_width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="train_training_width")
+ training_height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="train_training_height")
+ varsize = gr.Checkbox(label="Do not resize images", value=False, elem_id="train_varsize")
+ steps = gr.Number(label='Max steps', value=100000, precision=0, elem_id="train_steps")
+
+ with FormRow():
+ create_image_every = gr.Number(label='Save an image to log directory every N steps, 0 to disable', value=500, precision=0, elem_id="train_create_image_every")
+ save_embedding_every = gr.Number(label='Save a copy of embedding to log directory every N steps, 0 to disable', value=500, precision=0, elem_id="train_save_embedding_every")
+
+ use_weight = gr.Checkbox(label="Use PNG alpha channel as loss weight", value=False, elem_id="use_weight")
+
+ save_image_with_stored_embedding = gr.Checkbox(label='Save images with embedding in PNG chunks', value=True, elem_id="train_save_image_with_stored_embedding")
+ preview_from_txt2img = gr.Checkbox(label='Read parameters (prompt, etc...) from txt2img tab when making previews', value=False, elem_id="train_preview_from_txt2img")
+
+ shuffle_tags = gr.Checkbox(label="Shuffle tags by ',' when creating prompts.", value=False, elem_id="train_shuffle_tags")
+ tag_drop_out = gr.Slider(minimum=0, maximum=1, step=0.1, label="Drop out tags when creating prompts.", value=0, elem_id="train_tag_drop_out")
+
+ latent_sampling_method = gr.Radio(label='Choose latent sampling method', value="once", choices=['once', 'deterministic', 'random'], elem_id="train_latent_sampling_method")
+
+ with gr.Row():
+ train_embedding = gr.Button(value="Train Embedding", variant='primary', elem_id="train_train_embedding")
+ interrupt_training = gr.Button(value="Interrupt", elem_id="train_interrupt_training")
+ train_hypernetwork = gr.Button(value="Train Hypernetwork", variant='primary', elem_id="train_train_hypernetwork")
+
+ params = script_callbacks.UiTrainTabParams(txt2img_preview_params)
+
+ script_callbacks.ui_train_tabs_callback(params)
+
+ with gr.Column(elem_id='ti_gallery_container'):
+ ti_output = gr.Text(elem_id="ti_output", value="", show_label=False)
+ gr.Gallery(label='Output', show_label=False, elem_id='ti_gallery', columns=4)
+ gr.HTML(elem_id="ti_progress", value="")
+ ti_outcome = gr.HTML(elem_id="ti_error", value="")
+
+ create_embedding.click(
+ fn=textual_inversion_ui.create_embedding,
+ inputs=[
+ new_embedding_name,
+ initialization_text,
+ nvpt,
+ overwrite_old_embedding,
+ ],
+ outputs=[
+ train_embedding_name,
+ ti_output,
+ ti_outcome,
+ ]
+ )
+
+ create_hypernetwork.click(
+ fn=hypernetworks_ui.create_hypernetwork,
+ inputs=[
+ new_hypernetwork_name,
+ new_hypernetwork_sizes,
+ overwrite_old_hypernetwork,
+ new_hypernetwork_layer_structure,
+ new_hypernetwork_activation_func,
+ new_hypernetwork_initialization_option,
+ new_hypernetwork_add_layer_norm,
+ new_hypernetwork_use_dropout,
+ new_hypernetwork_dropout_structure
+ ],
+ outputs=[
+ train_hypernetwork_name,
+ ti_output,
+ ti_outcome,
+ ]
+ )
+
+ run_preprocess.click(
+ fn=wrap_gradio_gpu_call(textual_inversion_ui.preprocess, extra_outputs=[gr.update()]),
+ _js="start_training_textual_inversion",
+ inputs=[
+ dummy_component,
+ process_src,
+ process_dst,
+ process_width,
+ process_height,
+ preprocess_txt_action,
+ process_keep_original_size,
+ process_flip,
+ process_split,
+ process_caption,
+ process_caption_deepbooru,
+ process_split_threshold,
+ process_overlap_ratio,
+ process_focal_crop,
+ process_focal_crop_face_weight,
+ process_focal_crop_entropy_weight,
+ process_focal_crop_edges_weight,
+ process_focal_crop_debug,
+ process_multicrop,
+ process_multicrop_mindim,
+ process_multicrop_maxdim,
+ process_multicrop_minarea,
+ process_multicrop_maxarea,
+ process_multicrop_objective,
+ process_multicrop_threshold,
+ ],
+ outputs=[
+ ti_output,
+ ti_outcome,
+ ],
+ )
+
+ train_embedding.click(
+ fn=wrap_gradio_gpu_call(textual_inversion_ui.train_embedding, extra_outputs=[gr.update()]),
+ _js="start_training_textual_inversion",
+ inputs=[
+ dummy_component,
+ train_embedding_name,
+ embedding_learn_rate,
+ batch_size,
+ gradient_step,
+ dataset_directory,
+ log_directory,
+ training_width,
+ training_height,
+ varsize,
+ steps,
+ clip_grad_mode,
+ clip_grad_value,
+ shuffle_tags,
+ tag_drop_out,
+ latent_sampling_method,
+ use_weight,
+ create_image_every,
+ save_embedding_every,
+ template_file,
+ save_image_with_stored_embedding,
+ preview_from_txt2img,
+ *txt2img_preview_params,
+ ],
+ outputs=[
+ ti_output,
+ ti_outcome,
+ ]
+ )
+
+ train_hypernetwork.click(
+ fn=wrap_gradio_gpu_call(hypernetworks_ui.train_hypernetwork, extra_outputs=[gr.update()]),
+ _js="start_training_textual_inversion",
+ inputs=[
+ dummy_component,
+ train_hypernetwork_name,
+ hypernetwork_learn_rate,
+ batch_size,
+ gradient_step,
+ dataset_directory,
+ log_directory,
+ training_width,
+ training_height,
+ varsize,
+ steps,
+ clip_grad_mode,
+ clip_grad_value,
+ shuffle_tags,
+ tag_drop_out,
+ latent_sampling_method,
+ use_weight,
+ create_image_every,
+ save_embedding_every,
+ template_file,
+ preview_from_txt2img,
+ *txt2img_preview_params,
+ ],
+ outputs=[
+ ti_output,
+ ti_outcome,
+ ]
+ )
+
+ interrupt_training.click(
+ fn=lambda: shared.state.interrupt(),
+ inputs=[],
+ outputs=[],
+ )
+
+ interrupt_preprocessing.click(
+ fn=lambda: shared.state.interrupt(),
+ inputs=[],
+ outputs=[],
+ )
+
+ loadsave = ui_loadsave.UiLoadsave(cmd_opts.ui_config_file)
+
+ settings = ui_settings.UiSettings()
+ settings.create_ui(loadsave, dummy_component)
+
+ interfaces = [
+ (txt2img_interface, "txt2img", "txt2img"),
+ (img2img_interface, "img2img", "img2img"),
+ (extras_interface, "Extras", "extras"),
+ (pnginfo_interface, "PNG Info", "pnginfo"),
+ (modelmerger_ui.blocks, "Checkpoint Merger", "modelmerger"),
+ (train_interface, "Train", "train"),
+ ]
+
+ interfaces += script_callbacks.ui_tabs_callback()
+ interfaces += [(settings.interface, "Settings", "settings")]
+
+ extensions_interface = ui_extensions.create_ui()
+ interfaces += [(extensions_interface, "Extensions", "extensions")]
+
+ shared.tab_names = []
+ for _interface, label, _ifid in interfaces:
+ shared.tab_names.append(label)
+
+ with gr.Blocks(theme=shared.gradio_theme, analytics_enabled=False, title="Stable Diffusion") as demo:
+ settings.add_quicksettings()
+
+ parameters_copypaste.connect_paste_params_buttons()
+
+ with gr.Tabs(elem_id="tabs") as tabs:
+ tab_order = {k: i for i, k in enumerate(opts.ui_tab_order)}
+ sorted_interfaces = sorted(interfaces, key=lambda x: tab_order.get(x[1], 9999))
+
+ for interface, label, ifid in sorted_interfaces:
+ if label in shared.opts.hidden_tabs:
+ continue
+ with gr.TabItem(label, id=ifid, elem_id=f"tab_{ifid}"):
+ interface.render()
+
+ if ifid not in ["extensions", "settings"]:
+ loadsave.add_block(interface, ifid)
+
+ loadsave.add_component(f"webui/Tabs@{tabs.elem_id}", tabs)
+
+ loadsave.setup_ui()
+
+ if os.path.exists(os.path.join(script_path, "notification.mp3")):
+ gr.Audio(interactive=False, value=os.path.join(script_path, "notification.mp3"), elem_id="audio_notification", visible=False)
+
+ footer = shared.html("footer.html")
+ footer = footer.format(versions=versions_html(), api_docs="/docs" if shared.cmd_opts.api else "https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/API")
+ gr.HTML(footer, elem_id="footer")
+
+ settings.add_functionality(demo)
+
+ update_image_cfg_scale_visibility = lambda: gr.update(visible=shared.sd_model and shared.sd_model.cond_stage_key == "edit")
+ settings.text_settings.change(fn=update_image_cfg_scale_visibility, inputs=[], outputs=[image_cfg_scale])
+ demo.load(fn=update_image_cfg_scale_visibility, inputs=[], outputs=[image_cfg_scale])
+
+ modelmerger_ui.setup_ui(dummy_component=dummy_component, sd_model_checkpoint_component=settings.component_dict['sd_model_checkpoint'])
+
+ loadsave.dump_defaults()
+ demo.ui_loadsave = loadsave
+
+ return demo
+
+
+def versions_html():
+ import torch
+ import launch
+
+ python_version = ".".join([str(x) for x in sys.version_info[0:3]])
+ commit = launch.commit_hash()
+ tag = launch.git_tag()
+
+ if shared.xformers_available:
+ import xformers
+ xformers_version = xformers.__version__
+ else:
+ xformers_version = "N/A"
+
+ return f"""
+version: {tag}
+ •
+python: {python_version}
+ •
+torch: {getattr(torch, '__long_version__',torch.__version__)}
+ •
+xformers: {xformers_version}
+ •
+gradio: {gr.__version__}
+ •
+checkpoint: N/A
+"""
+
+
+def setup_ui_api(app):
+ from pydantic import BaseModel, Field
+ from typing import List
+
+ class QuicksettingsHint(BaseModel):
+ name: str = Field(title="Name of the quicksettings field")
+ label: str = Field(title="Label of the quicksettings field")
+
+ def quicksettings_hint():
+ return [QuicksettingsHint(name=k, label=v.label) for k, v in opts.data_labels.items()]
+
+ app.add_api_route("/internal/quicksettings-hint", quicksettings_hint, methods=["GET"], response_model=List[QuicksettingsHint])
+
+ app.add_api_route("/internal/ping", lambda: {}, methods=["GET"])
+
+ app.add_api_route("/internal/profile-startup", lambda: timer.startup_record, methods=["GET"])
+
+ def download_sysinfo(attachment=False):
+ from fastapi.responses import PlainTextResponse
+
+ text = sysinfo.get()
+ filename = f"sysinfo-{datetime.datetime.utcnow().strftime('%Y-%m-%d-%H-%M')}.txt"
+
+ return PlainTextResponse(text, headers={'Content-Disposition': f'{"attachment" if attachment else "inline"}; filename="{filename}"'})
+
+ app.add_api_route("/internal/sysinfo", download_sysinfo, methods=["GET"])
+ app.add_api_route("/internal/sysinfo-download", lambda: download_sysinfo(attachment=True), methods=["GET"])
+
diff --git a/modules/ui_checkpoint_merger.py b/modules/ui_checkpoint_merger.py
new file mode 100644
index 0000000000000000000000000000000000000000..d74fafa43f1f9b5c66b52f03fefb938488e8935b
--- /dev/null
+++ b/modules/ui_checkpoint_merger.py
@@ -0,0 +1,124 @@
+
+import gradio as gr
+
+from modules import sd_models, sd_vae, errors, extras, call_queue
+from modules.ui_components import FormRow
+from modules.ui_common import create_refresh_button
+
+
+def update_interp_description(value):
+ interp_description_css = "{}
"
+ interp_descriptions = {
+ "No interpolation": interp_description_css.format("No interpolation will be used. Requires one model; A. Allows for format conversion and VAE baking."),
+ "Weighted sum": interp_description_css.format("A weighted sum will be used for interpolation. Requires two models; A and B. The result is calculated as A * (1 - M) + B * M"),
+ "Add difference": interp_description_css.format("The difference between the last two models will be added to the first. Requires three models; A, B and C. The result is calculated as A + (B - C) * M")
+ }
+ return interp_descriptions[value]
+
+
+def modelmerger(*args):
+ try:
+ results = extras.run_modelmerger(*args)
+ except Exception as e:
+ errors.report("Error loading/saving model file", exc_info=True)
+ sd_models.list_models() # to remove the potentially missing models from the list
+ return [*[gr.Dropdown.update(choices=sd_models.checkpoint_tiles()) for _ in range(4)], f"Error merging checkpoints: {e}"]
+ return results
+
+
+class UiCheckpointMerger:
+ def __init__(self):
+ with gr.Blocks(analytics_enabled=False) as modelmerger_interface:
+ with gr.Row(equal_height=False):
+ with gr.Column(variant='compact'):
+ self.interp_description = gr.HTML(value=update_interp_description("Weighted sum"), elem_id="modelmerger_interp_description")
+
+ with FormRow(elem_id="modelmerger_models"):
+ self.primary_model_name = gr.Dropdown(sd_models.checkpoint_tiles(), elem_id="modelmerger_primary_model_name", label="Primary model (A)")
+ create_refresh_button(self.primary_model_name, sd_models.list_models, lambda: {"choices": sd_models.checkpoint_tiles()}, "refresh_checkpoint_A")
+
+ self.secondary_model_name = gr.Dropdown(sd_models.checkpoint_tiles(), elem_id="modelmerger_secondary_model_name", label="Secondary model (B)")
+ create_refresh_button(self.secondary_model_name, sd_models.list_models, lambda: {"choices": sd_models.checkpoint_tiles()}, "refresh_checkpoint_B")
+
+ self.tertiary_model_name = gr.Dropdown(sd_models.checkpoint_tiles(), elem_id="modelmerger_tertiary_model_name", label="Tertiary model (C)")
+ create_refresh_button(self.tertiary_model_name, sd_models.list_models, lambda: {"choices": sd_models.checkpoint_tiles()}, "refresh_checkpoint_C")
+
+ self.custom_name = gr.Textbox(label="Custom Name (Optional)", elem_id="modelmerger_custom_name")
+ self.interp_amount = gr.Slider(minimum=0.0, maximum=1.0, step=0.05, label='Multiplier (M) - set to 0 to get model A', value=0.3, elem_id="modelmerger_interp_amount")
+ self.interp_method = gr.Radio(choices=["No interpolation", "Weighted sum", "Add difference"], value="Weighted sum", label="Interpolation Method", elem_id="modelmerger_interp_method")
+ self.interp_method.change(fn=update_interp_description, inputs=[self.interp_method], outputs=[self.interp_description])
+
+ with FormRow():
+ self.checkpoint_format = gr.Radio(choices=["ckpt", "safetensors"], value="safetensors", label="Checkpoint format", elem_id="modelmerger_checkpoint_format")
+ self.save_as_half = gr.Checkbox(value=False, label="Save as float16", elem_id="modelmerger_save_as_half")
+
+ with FormRow():
+ with gr.Column():
+ self.config_source = gr.Radio(choices=["A, B or C", "B", "C", "Don't"], value="A, B or C", label="Copy config from", type="index", elem_id="modelmerger_config_method")
+
+ with gr.Column():
+ with FormRow():
+ self.bake_in_vae = gr.Dropdown(choices=["None"] + list(sd_vae.vae_dict), value="None", label="Bake in VAE", elem_id="modelmerger_bake_in_vae")
+ create_refresh_button(self.bake_in_vae, sd_vae.refresh_vae_list, lambda: {"choices": ["None"] + list(sd_vae.vae_dict)}, "modelmerger_refresh_bake_in_vae")
+
+ with FormRow():
+ self.discard_weights = gr.Textbox(value="", label="Discard weights with matching name", elem_id="modelmerger_discard_weights")
+
+ with gr.Accordion("Metadata", open=False) as metadata_editor:
+ with FormRow():
+ self.save_metadata = gr.Checkbox(value=True, label="Save metadata", elem_id="modelmerger_save_metadata")
+ self.add_merge_recipe = gr.Checkbox(value=True, label="Add merge recipe metadata", elem_id="modelmerger_add_recipe")
+ self.copy_metadata_fields = gr.Checkbox(value=True, label="Copy metadata from merged models", elem_id="modelmerger_copy_metadata")
+
+ self.metadata_json = gr.TextArea('{}', label="Metadata in JSON format")
+ self.read_metadata = gr.Button("Read metadata from selected checkpoints")
+
+ with FormRow():
+ self.modelmerger_merge = gr.Button(elem_id="modelmerger_merge", value="Merge", variant='primary')
+
+ with gr.Column(variant='compact', elem_id="modelmerger_results_container"):
+ with gr.Group(elem_id="modelmerger_results_panel"):
+ self.modelmerger_result = gr.HTML(elem_id="modelmerger_result", show_label=False)
+
+ self.metadata_editor = metadata_editor
+ self.blocks = modelmerger_interface
+
+ def setup_ui(self, dummy_component, sd_model_checkpoint_component):
+ self.checkpoint_format.change(lambda fmt: gr.update(visible=fmt == 'safetensors'), inputs=[self.checkpoint_format], outputs=[self.metadata_editor], show_progress=False)
+
+ self.read_metadata.click(extras.read_metadata, inputs=[self.primary_model_name, self.secondary_model_name, self.tertiary_model_name], outputs=[self.metadata_json])
+
+ self.modelmerger_merge.click(fn=lambda: '', inputs=[], outputs=[self.modelmerger_result])
+ self.modelmerger_merge.click(
+ fn=call_queue.wrap_gradio_gpu_call(modelmerger, extra_outputs=lambda: [gr.update() for _ in range(4)]),
+ _js='modelmerger',
+ inputs=[
+ dummy_component,
+ self.primary_model_name,
+ self.secondary_model_name,
+ self.tertiary_model_name,
+ self.interp_method,
+ self.interp_amount,
+ self.save_as_half,
+ self.custom_name,
+ self.checkpoint_format,
+ self.config_source,
+ self.bake_in_vae,
+ self.discard_weights,
+ self.save_metadata,
+ self.add_merge_recipe,
+ self.copy_metadata_fields,
+ self.metadata_json,
+ ],
+ outputs=[
+ self.primary_model_name,
+ self.secondary_model_name,
+ self.tertiary_model_name,
+ sd_model_checkpoint_component,
+ self.modelmerger_result,
+ ]
+ )
+
+ # Required as a workaround for change() event not triggering when loading values from ui-config.json
+ self.interp_description.value = update_interp_description(self.interp_method.value)
+
diff --git a/modules/ui_common.py b/modules/ui_common.py
new file mode 100644
index 0000000000000000000000000000000000000000..7e3b0f0801c2c1334c1e3909ce7da9bb637daec0
--- /dev/null
+++ b/modules/ui_common.py
@@ -0,0 +1,268 @@
+import json
+import html
+import os
+import platform
+import sys
+
+import gradio as gr
+import subprocess as sp
+
+from modules import call_queue, shared
+from modules.generation_parameters_copypaste import image_from_url_text
+import modules.images
+from modules.ui_components import ToolButton
+import modules.generation_parameters_copypaste as parameters_copypaste
+
+folder_symbol = '\U0001f4c2' # 📂
+refresh_symbol = '\U0001f504' # 🔄
+
+
+def update_generation_info(generation_info, html_info, img_index):
+ try:
+ generation_info = json.loads(generation_info)
+ if img_index < 0 or img_index >= len(generation_info["infotexts"]):
+ return html_info, gr.update()
+ return plaintext_to_html(generation_info["infotexts"][img_index]), gr.update()
+ except Exception:
+ pass
+ # if the json parse or anything else fails, just return the old html_info
+ return html_info, gr.update()
+
+
+def plaintext_to_html(text, classname=None):
+ content = " \n".join(html.escape(x) for x in text.split('\n'))
+
+ return f"{content}
" if classname else f"{content}
"
+
+
+def save_files(js_data, images, do_make_zip, index):
+ import csv
+ filenames = []
+ fullfns = []
+
+ #quick dictionary to class object conversion. Its necessary due apply_filename_pattern requiring it
+ class MyObject:
+ def __init__(self, d=None):
+ if d is not None:
+ for key, value in d.items():
+ setattr(self, key, value)
+
+ data = json.loads(js_data)
+
+ p = MyObject(data)
+ path = shared.opts.outdir_save
+ save_to_dirs = shared.opts.use_save_to_dirs_for_ui
+ extension: str = shared.opts.samples_format
+ start_index = 0
+ only_one = False
+
+ if index > -1 and shared.opts.save_selected_only and (index >= data["index_of_first_image"]): # ensures we are looking at a specific non-grid picture, and we have save_selected_only
+ only_one = True
+ images = [images[index]]
+ start_index = index
+
+ os.makedirs(shared.opts.outdir_save, exist_ok=True)
+
+ with open(os.path.join(shared.opts.outdir_save, "log.csv"), "a", encoding="utf8", newline='') as file:
+ at_start = file.tell() == 0
+ writer = csv.writer(file)
+ if at_start:
+ writer.writerow(["prompt", "seed", "width", "height", "sampler", "cfgs", "steps", "filename", "negative_prompt"])
+
+ for image_index, filedata in enumerate(images, start_index):
+ image = image_from_url_text(filedata)
+
+ is_grid = image_index < p.index_of_first_image
+ i = 0 if is_grid else (image_index - p.index_of_first_image)
+
+ p.batch_index = image_index-1
+ fullfn, txt_fullfn = modules.images.save_image(image, path, "", seed=p.all_seeds[i], prompt=p.all_prompts[i], extension=extension, info=p.infotexts[image_index], grid=is_grid, p=p, save_to_dirs=save_to_dirs)
+
+ filename = os.path.relpath(fullfn, path)
+ filenames.append(filename)
+ fullfns.append(fullfn)
+ if txt_fullfn:
+ filenames.append(os.path.basename(txt_fullfn))
+ fullfns.append(txt_fullfn)
+
+ writer.writerow([data["prompt"], data["seed"], data["width"], data["height"], data["sampler_name"], data["cfg_scale"], data["steps"], filenames[0], data["negative_prompt"]])
+
+ # Make Zip
+ if do_make_zip:
+ zip_fileseed = p.all_seeds[index-1] if only_one else p.all_seeds[0]
+ namegen = modules.images.FilenameGenerator(p, zip_fileseed, p.all_prompts[0], image, True)
+ zip_filename = namegen.apply(shared.opts.grid_zip_filename_pattern or "[datetime]_[[model_name]]_[seed]-[seed_last]")
+ zip_filepath = os.path.join(path, f"{zip_filename}.zip")
+
+ from zipfile import ZipFile
+ with ZipFile(zip_filepath, "w") as zip_file:
+ for i in range(len(fullfns)):
+ with open(fullfns[i], mode="rb") as f:
+ zip_file.writestr(filenames[i], f.read())
+ fullfns.insert(0, zip_filepath)
+
+ return gr.File.update(value=fullfns, visible=True), plaintext_to_html(f"Saved: {filenames[0]}")
+
+
+def create_output_panel(tabname, outdir):
+
+ def open_folder(f):
+ if not os.path.exists(f):
+ print(f'Folder "{f}" does not exist. After you create an image, the folder will be created.')
+ return
+ elif not os.path.isdir(f):
+ print(f"""
+WARNING
+An open_folder request was made with an argument that is not a folder.
+This could be an error or a malicious attempt to run code on your computer.
+Requested path was: {f}
+""", file=sys.stderr)
+ return
+
+ if not shared.cmd_opts.hide_ui_dir_config:
+ path = os.path.normpath(f)
+ if platform.system() == "Windows":
+ os.startfile(path)
+ elif platform.system() == "Darwin":
+ sp.Popen(["open", path])
+ elif "microsoft-standard-WSL2" in platform.uname().release:
+ sp.Popen(["wsl-open", path])
+ else:
+ sp.Popen(["xdg-open", path])
+
+ with gr.Column(variant='panel', elem_id=f"{tabname}_results"):
+ with gr.Group(elem_id=f"{tabname}_gallery_container"):
+ result_gallery = gr.Gallery(label='Output', show_label=False, elem_id=f"{tabname}_gallery", columns=4, preview=True, height=shared.opts.gallery_height or None)
+
+ generation_info = None
+ with gr.Column():
+ with gr.Row(elem_id=f"image_buttons_{tabname}", elem_classes="image-buttons"):
+ open_folder_button = ToolButton(folder_symbol, elem_id=f'{tabname}_open_folder', visible=not shared.cmd_opts.hide_ui_dir_config, tooltip="Open images output directory.")
+
+ if tabname != "extras":
+ save = ToolButton('💾', elem_id=f'save_{tabname}', tooltip=f"Save the image to a dedicated directory ({shared.opts.outdir_save}).")
+ save_zip = ToolButton('🗃️', elem_id=f'save_zip_{tabname}', tooltip=f"Save zip archive with images to a dedicated directory ({shared.opts.outdir_save})")
+
+ buttons = {
+ 'img2img': ToolButton('🖼️', elem_id=f'{tabname}_send_to_img2img', tooltip="Send image and generation parameters to img2img tab."),
+ 'inpaint': ToolButton('🎨️', elem_id=f'{tabname}_send_to_inpaint', tooltip="Send image and generation parameters to img2img inpaint tab."),
+ 'extras': ToolButton('📐', elem_id=f'{tabname}_send_to_extras', tooltip="Send image and generation parameters to extras tab.")
+ }
+
+ open_folder_button.click(
+ fn=lambda: open_folder(shared.opts.outdir_samples or outdir),
+ inputs=[],
+ outputs=[],
+ )
+
+ if tabname != "extras":
+ download_files = gr.File(None, file_count="multiple", interactive=False, show_label=False, visible=False, elem_id=f'download_files_{tabname}')
+
+ with gr.Group():
+ html_info = gr.HTML(elem_id=f'html_info_{tabname}', elem_classes="infotext")
+ html_log = gr.HTML(elem_id=f'html_log_{tabname}', elem_classes="html-log")
+
+ generation_info = gr.Textbox(visible=False, elem_id=f'generation_info_{tabname}')
+ if tabname == 'txt2img' or tabname == 'img2img':
+ generation_info_button = gr.Button(visible=False, elem_id=f"{tabname}_generation_info_button")
+ generation_info_button.click(
+ fn=update_generation_info,
+ _js="function(x, y, z){ return [x, y, selected_gallery_index()] }",
+ inputs=[generation_info, html_info, html_info],
+ outputs=[html_info, html_info],
+ show_progress=False,
+ )
+
+ save.click(
+ fn=call_queue.wrap_gradio_call(save_files),
+ _js="(x, y, z, w) => [x, y, false, selected_gallery_index()]",
+ inputs=[
+ generation_info,
+ result_gallery,
+ html_info,
+ html_info,
+ ],
+ outputs=[
+ download_files,
+ html_log,
+ ],
+ show_progress=False,
+ )
+
+ save_zip.click(
+ fn=call_queue.wrap_gradio_call(save_files),
+ _js="(x, y, z, w) => [x, y, true, selected_gallery_index()]",
+ inputs=[
+ generation_info,
+ result_gallery,
+ html_info,
+ html_info,
+ ],
+ outputs=[
+ download_files,
+ html_log,
+ ]
+ )
+
+ else:
+ html_info_x = gr.HTML(elem_id=f'html_info_x_{tabname}')
+ html_info = gr.HTML(elem_id=f'html_info_{tabname}', elem_classes="infotext")
+ html_log = gr.HTML(elem_id=f'html_log_{tabname}')
+
+ paste_field_names = []
+ if tabname == "txt2img":
+ paste_field_names = modules.scripts.scripts_txt2img.paste_field_names
+ elif tabname == "img2img":
+ paste_field_names = modules.scripts.scripts_img2img.paste_field_names
+
+ for paste_tabname, paste_button in buttons.items():
+ parameters_copypaste.register_paste_params_button(parameters_copypaste.ParamBinding(
+ paste_button=paste_button, tabname=paste_tabname, source_tabname="txt2img" if tabname == "txt2img" else None, source_image_component=result_gallery,
+ paste_field_names=paste_field_names
+ ))
+
+ return result_gallery, generation_info if tabname != "extras" else html_info_x, html_info, html_log
+
+
+def create_refresh_button(refresh_component, refresh_method, refreshed_args, elem_id):
+ refresh_components = refresh_component if isinstance(refresh_component, list) else [refresh_component]
+
+ label = None
+ for comp in refresh_components:
+ label = getattr(comp, 'label', None)
+ if label is not None:
+ break
+
+ def refresh():
+ refresh_method()
+ args = refreshed_args() if callable(refreshed_args) else refreshed_args
+
+ for k, v in args.items():
+ for comp in refresh_components:
+ setattr(comp, k, v)
+
+ return [gr.update(**(args or {})) for _ in refresh_components] if len(refresh_components) > 1 else gr.update(**(args or {}))
+
+ refresh_button = ToolButton(value=refresh_symbol, elem_id=elem_id, tooltip=f"{label}: refresh" if label else "Refresh")
+ refresh_button.click(
+ fn=refresh,
+ inputs=[],
+ outputs=refresh_components
+ )
+ return refresh_button
+
+
+def setup_dialog(button_show, dialog, *, button_close=None):
+ """Sets up the UI so that the dialog (gr.Box) is invisible, and is only shown when buttons_show is clicked, in a fullscreen modal window."""
+
+ dialog.visible = False
+
+ button_show.click(
+ fn=lambda: gr.update(visible=True),
+ inputs=[],
+ outputs=[dialog],
+ ).then(fn=None, _js="function(){ popupId('" + dialog.elem_id + "'); }")
+
+ if button_close:
+ button_close.click(fn=None, _js="closePopup")
+
diff --git a/modules/ui_extra_networks.py b/modules/ui_extra_networks.py
new file mode 100644
index 0000000000000000000000000000000000000000..e919d105b18fe2c4c19d20f9e26a69dd173c9b94
--- /dev/null
+++ b/modules/ui_extra_networks.py
@@ -0,0 +1,455 @@
+import os.path
+import urllib.parse
+from pathlib import Path
+
+from modules import shared, ui_extra_networks_user_metadata, errors, extra_networks
+from modules.images import read_info_from_image, save_image_with_geninfo
+import gradio as gr
+import json
+import html
+from fastapi.exceptions import HTTPException
+
+from modules.generation_parameters_copypaste import image_from_url_text
+from modules.ui_components import ToolButton
+
+extra_pages = []
+allowed_dirs = set()
+
+
+def register_page(page):
+ """registers extra networks page for the UI; recommend doing it in on_before_ui() callback for extensions"""
+
+ extra_pages.append(page)
+ allowed_dirs.clear()
+ allowed_dirs.update(set(sum([x.allowed_directories_for_previews() for x in extra_pages], [])))
+
+
+def fetch_file(filename: str = ""):
+ from starlette.responses import FileResponse
+
+ if not os.path.isfile(filename):
+ raise HTTPException(status_code=404, detail="File not found")
+
+ if not any(Path(x).absolute() in Path(filename).absolute().parents for x in allowed_dirs):
+ raise ValueError(f"File cannot be fetched: {filename}. Must be in one of directories registered by extra pages.")
+
+ ext = os.path.splitext(filename)[1].lower()
+ if ext not in (".png", ".jpg", ".jpeg", ".webp", ".gif"):
+ raise ValueError(f"File cannot be fetched: {filename}. Only png, jpg, webp, and gif.")
+
+ # would profit from returning 304
+ return FileResponse(filename, headers={"Accept-Ranges": "bytes"})
+
+
+def get_metadata(page: str = "", item: str = ""):
+ from starlette.responses import JSONResponse
+
+ page = next(iter([x for x in extra_pages if x.name == page]), None)
+ if page is None:
+ return JSONResponse({})
+
+ metadata = page.metadata.get(item)
+ if metadata is None:
+ return JSONResponse({})
+
+ return JSONResponse({"metadata": json.dumps(metadata, indent=4, ensure_ascii=False)})
+
+
+def get_single_card(page: str = "", tabname: str = "", name: str = ""):
+ from starlette.responses import JSONResponse
+
+ page = next(iter([x for x in extra_pages if x.name == page]), None)
+
+ try:
+ item = page.create_item(name, enable_filter=False)
+ page.items[name] = item
+ except Exception as e:
+ errors.display(e, "creating item for extra network")
+ item = page.items.get(name)
+
+ page.read_user_metadata(item)
+ item_html = page.create_html_for_item(item, tabname)
+
+ return JSONResponse({"html": item_html})
+
+
+def add_pages_to_demo(app):
+ app.add_api_route("/sd_extra_networks/thumb", fetch_file, methods=["GET"])
+ app.add_api_route("/sd_extra_networks/metadata", get_metadata, methods=["GET"])
+ app.add_api_route("/sd_extra_networks/get-single-card", get_single_card, methods=["GET"])
+
+
+def quote_js(s):
+ s = s.replace('\\', '\\\\')
+ s = s.replace('"', '\\"')
+ return f'"{s}"'
+
+
+class ExtraNetworksPage:
+ def __init__(self, title):
+ self.title = title
+ self.name = title.lower()
+ self.id_page = self.name.replace(" ", "_")
+ self.card_page = shared.html("extra-networks-card.html")
+ self.allow_negative_prompt = False
+ self.metadata = {}
+ self.items = {}
+
+ def refresh(self):
+ pass
+
+ def read_user_metadata(self, item):
+ filename = item.get("filename", None)
+ metadata = extra_networks.get_user_metadata(filename)
+
+ desc = metadata.get("description", None)
+ if desc is not None:
+ item["description"] = desc
+
+ item["user_metadata"] = metadata
+
+ def link_preview(self, filename):
+ quoted_filename = urllib.parse.quote(filename.replace('\\', '/'))
+ mtime = os.path.getmtime(filename)
+ return f"./sd_extra_networks/thumb?filename={quoted_filename}&mtime={mtime}"
+
+ def search_terms_from_path(self, filename, possible_directories=None):
+ abspath = os.path.abspath(filename)
+
+ for parentdir in (possible_directories if possible_directories is not None else self.allowed_directories_for_previews()):
+ parentdir = os.path.abspath(parentdir)
+ if abspath.startswith(parentdir):
+ return abspath[len(parentdir):].replace('\\', '/')
+
+ return ""
+
+ def create_html(self, tabname):
+ items_html = ''
+
+ self.metadata = {}
+
+ subdirs = {}
+ for parentdir in [os.path.abspath(x) for x in self.allowed_directories_for_previews()]:
+ for root, dirs, _ in sorted(os.walk(parentdir, followlinks=True), key=lambda x: shared.natural_sort_key(x[0])):
+ for dirname in sorted(dirs, key=shared.natural_sort_key):
+ x = os.path.join(root, dirname)
+
+ if not os.path.isdir(x):
+ continue
+
+ subdir = os.path.abspath(x)[len(parentdir):].replace("\\", "/")
+ while subdir.startswith("/"):
+ subdir = subdir[1:]
+
+ is_empty = len(os.listdir(x)) == 0
+ if not is_empty and not subdir.endswith("/"):
+ subdir = subdir + "/"
+
+ if ("/." in subdir or subdir.startswith(".")) and not shared.opts.extra_networks_show_hidden_directories:
+ continue
+
+ subdirs[subdir] = 1
+
+ if subdirs:
+ subdirs = {"": 1, **subdirs}
+
+ subdirs_html = "".join([f"""
+
+{html.escape(subdir if subdir!="" else "all")}
+
+""" for subdir in subdirs])
+
+ self.items = {x["name"]: x for x in self.list_items()}
+ for item in self.items.values():
+ metadata = item.get("metadata")
+ if metadata:
+ self.metadata[item["name"]] = metadata
+
+ if "user_metadata" not in item:
+ self.read_user_metadata(item)
+
+ items_html += self.create_html_for_item(item, tabname)
+
+ if items_html == '':
+ dirs = "".join([f"{x} " for x in self.allowed_directories_for_previews()])
+ items_html = shared.html("extra-networks-no-cards.html").format(dirs=dirs)
+
+ self_name_id = self.name.replace(" ", "_")
+
+ res = f"""
+
+
+"""
+
+ return res
+
+ def create_item(self, name, index=None):
+ raise NotImplementedError()
+
+ def list_items(self):
+ raise NotImplementedError()
+
+ def allowed_directories_for_previews(self):
+ return []
+
+ def create_html_for_item(self, item, tabname):
+ """
+ Create HTML for card item in tab tabname; can return empty string if the item is not meant to be shown.
+ """
+
+ preview = item.get("preview", None)
+
+ onclick = item.get("onclick", None)
+ if onclick is None:
+ onclick = '"' + html.escape(f"""return cardClicked({quote_js(tabname)}, {item["prompt"]}, {"true" if self.allow_negative_prompt else "false"})""") + '"'
+
+ height = f"height: {shared.opts.extra_networks_card_height}px;" if shared.opts.extra_networks_card_height else ''
+ width = f"width: {shared.opts.extra_networks_card_width}px;" if shared.opts.extra_networks_card_width else ''
+ background_image = f' ' if preview else ''
+ metadata_button = ""
+ metadata = item.get("metadata")
+ if metadata:
+ metadata_button = f"
"
+
+ edit_button = f"
"
+
+ local_path = ""
+ filename = item.get("filename", "")
+ for reldir in self.allowed_directories_for_previews():
+ absdir = os.path.abspath(reldir)
+
+ if filename.startswith(absdir):
+ local_path = filename[len(absdir):]
+
+ # if this is true, the item must not be shown in the default view, and must instead only be
+ # shown when searching for it
+ if shared.opts.extra_networks_hidden_models == "Always":
+ search_only = False
+ else:
+ search_only = "/." in local_path or "\\." in local_path
+
+ if search_only and shared.opts.extra_networks_hidden_models == "Never":
+ return ""
+
+ sort_keys = " ".join([html.escape(f'data-sort-{k}={v}') for k, v in item.get("sort_keys", {}).items()]).strip()
+
+ args = {
+ "background_image": background_image,
+ "style": f"'display: none; {height}{width}; font-size: {shared.opts.extra_networks_card_text_scale*100}%'",
+ "prompt": item.get("prompt", None),
+ "tabname": quote_js(tabname),
+ "local_preview": quote_js(item["local_preview"]),
+ "name": html.escape(item["name"]),
+ "description": (item.get("description") or "" if shared.opts.extra_networks_card_show_desc else ""),
+ "card_clicked": onclick,
+ "save_card_preview": '"' + html.escape(f"""return saveCardPreview(event, {quote_js(tabname)}, {quote_js(item["local_preview"])})""") + '"',
+ "search_term": item.get("search_term", ""),
+ "metadata_button": metadata_button,
+ "edit_button": edit_button,
+ "search_only": " search_only" if search_only else "",
+ "sort_keys": sort_keys,
+ }
+
+ return self.card_page.format(**args)
+
+ def get_sort_keys(self, path):
+ """
+ List of default keys used for sorting in the UI.
+ """
+ pth = Path(path)
+ stat = pth.stat()
+ return {
+ "date_created": int(stat.st_ctime or 0),
+ "date_modified": int(stat.st_mtime or 0),
+ "name": pth.name.lower(),
+ }
+
+ def find_preview(self, path):
+ """
+ Find a preview PNG for a given path (without extension) and call link_preview on it.
+ """
+
+ preview_extensions = ["png", "jpg", "jpeg", "webp"]
+ if shared.opts.samples_format not in preview_extensions:
+ preview_extensions.append(shared.opts.samples_format)
+
+ potential_files = sum([[path + "." + ext, path + ".preview." + ext] for ext in preview_extensions], [])
+
+ for file in potential_files:
+ if os.path.isfile(file):
+ return self.link_preview(file)
+
+ return None
+
+ def find_description(self, path):
+ """
+ Find and read a description file for a given path (without extension).
+ """
+ for file in [f"{path}.txt", f"{path}.description.txt"]:
+ try:
+ with open(file, "r", encoding="utf-8", errors="replace") as f:
+ return f.read()
+ except OSError:
+ pass
+ return None
+
+ def create_user_metadata_editor(self, ui, tabname):
+ return ui_extra_networks_user_metadata.UserMetadataEditor(ui, tabname, self)
+
+
+def initialize():
+ extra_pages.clear()
+
+
+def register_default_pages():
+ from modules.ui_extra_networks_textual_inversion import ExtraNetworksPageTextualInversion
+ from modules.ui_extra_networks_hypernets import ExtraNetworksPageHypernetworks
+ from modules.ui_extra_networks_checkpoints import ExtraNetworksPageCheckpoints
+ register_page(ExtraNetworksPageTextualInversion())
+ register_page(ExtraNetworksPageHypernetworks())
+ register_page(ExtraNetworksPageCheckpoints())
+
+
+class ExtraNetworksUi:
+ def __init__(self):
+ self.pages = None
+ """gradio HTML components related to extra networks' pages"""
+
+ self.page_contents = None
+ """HTML content of the above; empty initially, filled when extra pages have to be shown"""
+
+ self.stored_extra_pages = None
+
+ self.button_save_preview = None
+ self.preview_target_filename = None
+
+ self.tabname = None
+
+
+def pages_in_preferred_order(pages):
+ tab_order = [x.lower().strip() for x in shared.opts.ui_extra_networks_tab_reorder.split(",")]
+
+ def tab_name_score(name):
+ name = name.lower()
+ for i, possible_match in enumerate(tab_order):
+ if possible_match in name:
+ return i
+
+ return len(pages)
+
+ tab_scores = {page.name: (tab_name_score(page.name), original_index) for original_index, page in enumerate(pages)}
+
+ return sorted(pages, key=lambda x: tab_scores[x.name])
+
+
+def create_ui(interface: gr.Blocks, unrelated_tabs, tabname):
+ from modules.ui import switch_values_symbol
+
+ ui = ExtraNetworksUi()
+ ui.pages = []
+ ui.pages_contents = []
+ ui.user_metadata_editors = []
+ ui.stored_extra_pages = pages_in_preferred_order(extra_pages.copy())
+ ui.tabname = tabname
+
+ related_tabs = []
+
+ for page in ui.stored_extra_pages:
+ with gr.Tab(page.title, id=page.id_page) as tab:
+ elem_id = f"{tabname}_{page.id_page}_cards_html"
+ page_elem = gr.HTML('Loading...', elem_id=elem_id)
+ ui.pages.append(page_elem)
+
+ page_elem.change(fn=lambda: None, _js='function(){applyExtraNetworkFilter(' + quote_js(tabname) + '); return []}', inputs=[], outputs=[])
+
+ editor = page.create_user_metadata_editor(ui, tabname)
+ editor.create_ui()
+ ui.user_metadata_editors.append(editor)
+
+ related_tabs.append(tab)
+
+ edit_search = gr.Textbox('', show_label=False, elem_id=tabname+"_extra_search", elem_classes="search", placeholder="Search...", visible=False, interactive=True)
+ dropdown_sort = gr.Dropdown(choices=['Default Sort', 'Date Created', 'Date Modified', 'Name'], value='Default Sort', elem_id=tabname+"_extra_sort", elem_classes="sort", multiselect=False, visible=False, show_label=False, interactive=True, label=tabname+"_extra_sort_order")
+ button_sortorder = ToolButton(switch_values_symbol, elem_id=tabname+"_extra_sortorder", elem_classes="sortorder", visible=False)
+ button_refresh = gr.Button('Refresh', elem_id=tabname+"_extra_refresh", visible=False)
+ checkbox_show_dirs = gr.Checkbox(True, label='Show dirs', elem_id=tabname+"_extra_show_dirs", elem_classes="show-dirs", visible=False)
+
+ ui.button_save_preview = gr.Button('Save preview', elem_id=tabname+"_save_preview", visible=False)
+ ui.preview_target_filename = gr.Textbox('Preview save filename', elem_id=tabname+"_preview_filename", visible=False)
+
+ for tab in unrelated_tabs:
+ tab.select(fn=lambda: [gr.update(visible=False) for _ in range(5)], inputs=[], outputs=[edit_search, dropdown_sort, button_sortorder, button_refresh, checkbox_show_dirs], show_progress=False)
+
+ for tab in related_tabs:
+ tab.select(fn=lambda: [gr.update(visible=True) for _ in range(5)], inputs=[], outputs=[edit_search, dropdown_sort, button_sortorder, button_refresh, checkbox_show_dirs], show_progress=False)
+
+ def pages_html():
+ if not ui.pages_contents:
+ return refresh()
+
+ return ui.pages_contents
+
+ def refresh():
+ for pg in ui.stored_extra_pages:
+ pg.refresh()
+
+ ui.pages_contents = [pg.create_html(ui.tabname) for pg in ui.stored_extra_pages]
+
+ return ui.pages_contents
+
+ interface.load(fn=pages_html, inputs=[], outputs=[*ui.pages])
+ button_refresh.click(fn=refresh, inputs=[], outputs=ui.pages)
+
+ return ui
+
+
+def path_is_parent(parent_path, child_path):
+ parent_path = os.path.abspath(parent_path)
+ child_path = os.path.abspath(child_path)
+
+ return child_path.startswith(parent_path)
+
+
+def setup_ui(ui, gallery):
+ def save_preview(index, images, filename):
+ # this function is here for backwards compatibility and likely will be removed soon
+
+ if len(images) == 0:
+ print("There is no image in gallery to save as a preview.")
+ return [page.create_html(ui.tabname) for page in ui.stored_extra_pages]
+
+ index = int(index)
+ index = 0 if index < 0 else index
+ index = len(images) - 1 if index >= len(images) else index
+
+ img_info = images[index if index >= 0 else 0]
+ image = image_from_url_text(img_info)
+ geninfo, items = read_info_from_image(image)
+
+ is_allowed = False
+ for extra_page in ui.stored_extra_pages:
+ if any(path_is_parent(x, filename) for x in extra_page.allowed_directories_for_previews()):
+ is_allowed = True
+ break
+
+ assert is_allowed, f'writing to {filename} is not allowed'
+
+ save_image_with_geninfo(image, geninfo, filename)
+
+ return [page.create_html(ui.tabname) for page in ui.stored_extra_pages]
+
+ ui.button_save_preview.click(
+ fn=save_preview,
+ _js="function(x, y, z){return [selected_gallery_index(), y, z]}",
+ inputs=[ui.preview_target_filename, gallery, ui.preview_target_filename],
+ outputs=[*ui.pages]
+ )
+
+ for editor in ui.user_metadata_editors:
+ editor.setup_ui(gallery)
+
+
diff --git a/modules/ui_extra_networks_checkpoints.py b/modules/ui_extra_networks_checkpoints.py
new file mode 100644
index 0000000000000000000000000000000000000000..418e359f30ad76c053ee66ae655c725a55fb01e5
--- /dev/null
+++ b/modules/ui_extra_networks_checkpoints.py
@@ -0,0 +1,41 @@
+import html
+import os
+
+from modules import shared, ui_extra_networks, sd_models
+from modules.ui_extra_networks import quote_js
+from modules.ui_extra_networks_checkpoints_user_metadata import CheckpointUserMetadataEditor
+
+
+class ExtraNetworksPageCheckpoints(ui_extra_networks.ExtraNetworksPage):
+ def __init__(self):
+ super().__init__('Checkpoints')
+
+ def refresh(self):
+ shared.refresh_checkpoints()
+
+ def create_item(self, name, index=None, enable_filter=True):
+ checkpoint: sd_models.CheckpointInfo = sd_models.checkpoint_aliases.get(name)
+ path, ext = os.path.splitext(checkpoint.filename)
+ return {
+ "name": checkpoint.name_for_extra,
+ "filename": checkpoint.filename,
+ "shorthash": checkpoint.shorthash,
+ "preview": self.find_preview(path),
+ "description": self.find_description(path),
+ "search_term": self.search_terms_from_path(checkpoint.filename) + " " + (checkpoint.sha256 or ""),
+ "onclick": '"' + html.escape(f"""return selectCheckpoint({quote_js(name)})""") + '"',
+ "local_preview": f"{path}.{shared.opts.samples_format}",
+ "metadata": checkpoint.metadata,
+ "sort_keys": {'default': index, **self.get_sort_keys(checkpoint.filename)},
+ }
+
+ def list_items(self):
+ names = list(sd_models.checkpoints_list)
+ for index, name in enumerate(names):
+ yield self.create_item(name, index)
+
+ def allowed_directories_for_previews(self):
+ return [v for v in [shared.cmd_opts.ckpt_dir, sd_models.model_path] if v is not None]
+
+ def create_user_metadata_editor(self, ui, tabname):
+ return CheckpointUserMetadataEditor(ui, tabname, self)
diff --git a/modules/ui_extra_networks_hypernets.py b/modules/ui_extra_networks_hypernets.py
new file mode 100644
index 0000000000000000000000000000000000000000..a591d42f5eb86ce9ecee9766452c860d280e5533
--- /dev/null
+++ b/modules/ui_extra_networks_hypernets.py
@@ -0,0 +1,39 @@
+import os
+
+from modules import shared, ui_extra_networks
+from modules.ui_extra_networks import quote_js
+from modules.hashes import sha256_from_cache
+
+
+class ExtraNetworksPageHypernetworks(ui_extra_networks.ExtraNetworksPage):
+ def __init__(self):
+ super().__init__('Hypernetworks')
+
+ def refresh(self):
+ shared.reload_hypernetworks()
+
+ def create_item(self, name, index=None, enable_filter=True):
+ full_path = shared.hypernetworks[name]
+ path, ext = os.path.splitext(full_path)
+ sha256 = sha256_from_cache(full_path, f'hypernet/{name}')
+ shorthash = sha256[0:10] if sha256 else None
+
+ return {
+ "name": name,
+ "filename": full_path,
+ "shorthash": shorthash,
+ "preview": self.find_preview(path),
+ "description": self.find_description(path),
+ "search_term": self.search_terms_from_path(path) + " " + (sha256 or ""),
+ "prompt": quote_js(f""),
+ "local_preview": f"{path}.preview.{shared.opts.samples_format}",
+ "sort_keys": {'default': index, **self.get_sort_keys(path + ext)},
+ }
+
+ def list_items(self):
+ for index, name in enumerate(shared.hypernetworks):
+ yield self.create_item(name, index)
+
+ def allowed_directories_for_previews(self):
+ return [shared.cmd_opts.hypernetwork_dir]
+
diff --git a/modules/ui_extra_networks_textual_inversion.py b/modules/ui_extra_networks_textual_inversion.py
new file mode 100644
index 0000000000000000000000000000000000000000..fcdd6981be68d75ac583c0a617f68dffae393c52
--- /dev/null
+++ b/modules/ui_extra_networks_textual_inversion.py
@@ -0,0 +1,36 @@
+import os
+
+from modules import ui_extra_networks, sd_hijack, shared
+from modules.ui_extra_networks import quote_js
+
+
+class ExtraNetworksPageTextualInversion(ui_extra_networks.ExtraNetworksPage):
+ def __init__(self):
+ super().__init__('Textual Inversion')
+ self.allow_negative_prompt = True
+
+ def refresh(self):
+ sd_hijack.model_hijack.embedding_db.load_textual_inversion_embeddings(force_reload=True)
+
+ def create_item(self, name, index=None, enable_filter=True):
+ embedding = sd_hijack.model_hijack.embedding_db.word_embeddings.get(name)
+
+ path, ext = os.path.splitext(embedding.filename)
+ return {
+ "name": name,
+ "filename": embedding.filename,
+ "shorthash": embedding.shorthash,
+ "preview": self.find_preview(path),
+ "description": self.find_description(path),
+ "search_term": self.search_terms_from_path(embedding.filename) + " " + (embedding.hash or ""),
+ "prompt": quote_js(embedding.name),
+ "local_preview": f"{path}.preview.{shared.opts.samples_format}",
+ "sort_keys": {'default': index, **self.get_sort_keys(embedding.filename)},
+ }
+
+ def list_items(self):
+ for index, name in enumerate(sd_hijack.model_hijack.embedding_db.word_embeddings):
+ yield self.create_item(name, index)
+
+ def allowed_directories_for_previews(self):
+ return list(sd_hijack.model_hijack.embedding_db.embedding_dirs)
diff --git a/modules/ui_extra_networks_user_metadata.py b/modules/ui_extra_networks_user_metadata.py
new file mode 100644
index 0000000000000000000000000000000000000000..f7c3f472a9534074c25028ea3263948f63d6afc5
--- /dev/null
+++ b/modules/ui_extra_networks_user_metadata.py
@@ -0,0 +1,205 @@
+import datetime
+import html
+import json
+import os.path
+
+import gradio as gr
+
+from modules import generation_parameters_copypaste, images, sysinfo, errors, ui_extra_networks
+
+
+class UserMetadataEditor:
+
+ def __init__(self, ui, tabname, page):
+ self.ui = ui
+ self.tabname = tabname
+ self.page = page
+ self.id_part = f"{self.tabname}_{self.page.id_page}_edit_user_metadata"
+
+ self.box = None
+
+ self.edit_name_input = None
+ self.button_edit = None
+
+ self.edit_name = None
+ self.edit_description = None
+ self.edit_notes = None
+ self.html_filedata = None
+ self.html_preview = None
+ self.html_status = None
+
+ self.button_cancel = None
+ self.button_replace_preview = None
+ self.button_save = None
+
+ def get_user_metadata(self, name):
+ item = self.page.items.get(name, {})
+
+ user_metadata = item.get('user_metadata', None)
+ if not user_metadata:
+ user_metadata = {'description': item.get('description', '')}
+ item['user_metadata'] = user_metadata
+
+ return user_metadata
+
+ def create_extra_default_items_in_left_column(self):
+ pass
+
+ def create_default_editor_elems(self):
+ with gr.Row():
+ with gr.Column(scale=2):
+ self.edit_name = gr.HTML(elem_classes="extra-network-name")
+ self.edit_description = gr.Textbox(label="Description", lines=4)
+ self.html_filedata = gr.HTML()
+
+ self.create_extra_default_items_in_left_column()
+
+ with gr.Column(scale=1, min_width=0):
+ self.html_preview = gr.HTML()
+
+ def create_default_buttons(self):
+
+ with gr.Row(elem_classes="edit-user-metadata-buttons"):
+ self.button_cancel = gr.Button('Cancel')
+ self.button_replace_preview = gr.Button('Replace preview', variant='primary')
+ self.button_save = gr.Button('Save', variant='primary')
+
+ self.html_status = gr.HTML(elem_classes="edit-user-metadata-status")
+
+ self.button_cancel.click(fn=None, _js="closePopup")
+
+ def get_card_html(self, name):
+ item = self.page.items.get(name, {})
+
+ preview_url = item.get("preview", None)
+
+ if not preview_url:
+ filename, _ = os.path.splitext(item["filename"])
+ preview_url = self.page.find_preview(filename)
+ item["preview"] = preview_url
+
+ if preview_url:
+ preview = f'''
+
+
+
+ '''
+ else:
+ preview = "
"
+
+ return preview
+
+ def relative_path(self, path):
+ for parent_path in self.page.allowed_directories_for_previews():
+ if ui_extra_networks.path_is_parent(parent_path, path):
+ return os.path.relpath(path, parent_path)
+
+ return os.path.basename(path)
+
+ def get_metadata_table(self, name):
+ item = self.page.items.get(name, {})
+ try:
+ filename = item["filename"]
+ shorthash = item.get("shorthash", None)
+
+ stats = os.stat(filename)
+ params = [
+ ('Filename: ', self.relative_path(filename)),
+ ('File size: ', sysinfo.pretty_bytes(stats.st_size)),
+ ('Hash: ', shorthash),
+ ('Modified: ', datetime.datetime.fromtimestamp(stats.st_mtime).strftime('%Y-%m-%d %H:%M')),
+ ]
+
+ return params
+ except Exception as e:
+ errors.display(e, f"reading info for {name}")
+ return []
+
+ def put_values_into_components(self, name):
+ user_metadata = self.get_user_metadata(name)
+
+ try:
+ params = self.get_metadata_table(name)
+ except Exception as e:
+ errors.display(e, f"reading metadata info for {name}")
+ params = []
+
+ table = ''
+
+ return html.escape(name), user_metadata.get('description', ''), table, self.get_card_html(name), user_metadata.get('notes', '')
+
+ def write_user_metadata(self, name, metadata):
+ item = self.page.items.get(name, {})
+ filename = item.get("filename", None)
+ basename, ext = os.path.splitext(filename)
+
+ with open(basename + '.json', "w", encoding="utf8") as file:
+ json.dump(metadata, file, indent=4)
+
+ def save_user_metadata(self, name, desc, notes):
+ user_metadata = self.get_user_metadata(name)
+ user_metadata["description"] = desc
+ user_metadata["notes"] = notes
+
+ self.write_user_metadata(name, user_metadata)
+
+ def setup_save_handler(self, button, func, components):
+ button\
+ .click(fn=func, inputs=[self.edit_name_input, *components], outputs=[])\
+ .then(fn=None, _js="function(name){closePopup(); extraNetworksRefreshSingleCard(" + json.dumps(self.page.name) + "," + json.dumps(self.tabname) + ", name);}", inputs=[self.edit_name_input], outputs=[])
+
+ def create_editor(self):
+ self.create_default_editor_elems()
+
+ self.edit_notes = gr.TextArea(label='Notes', lines=4)
+
+ self.create_default_buttons()
+
+ self.button_edit\
+ .click(fn=self.put_values_into_components, inputs=[self.edit_name_input], outputs=[self.edit_name, self.edit_description, self.html_filedata, self.html_preview, self.edit_notes])\
+ .then(fn=lambda: gr.update(visible=True), inputs=[], outputs=[self.box])
+
+ self.setup_save_handler(self.button_save, self.save_user_metadata, [self.edit_description, self.edit_notes])
+
+ def create_ui(self):
+ with gr.Box(visible=False, elem_id=self.id_part, elem_classes="edit-user-metadata") as box:
+ self.box = box
+
+ self.edit_name_input = gr.Textbox("Edit user metadata card id", visible=False, elem_id=f"{self.id_part}_name")
+ self.button_edit = gr.Button("Edit user metadata", visible=False, elem_id=f"{self.id_part}_button")
+
+ self.create_editor()
+
+ def save_preview(self, index, gallery, name):
+ if len(gallery) == 0:
+ return self.get_card_html(name), "There is no image in gallery to save as a preview."
+
+ item = self.page.items.get(name, {})
+
+ index = int(index)
+ index = 0 if index < 0 else index
+ index = len(gallery) - 1 if index >= len(gallery) else index
+
+ img_info = gallery[index if index >= 0 else 0]
+ image = generation_parameters_copypaste.image_from_url_text(img_info)
+ geninfo, items = images.read_info_from_image(image)
+
+ images.save_image_with_geninfo(image, geninfo, item["local_preview"])
+
+ return self.get_card_html(name), ''
+
+ def setup_ui(self, gallery):
+ self.button_replace_preview.click(
+ fn=self.save_preview,
+ _js="function(x, y, z){return [selected_gallery_index(), y, z]}",
+ inputs=[self.edit_name_input, gallery, self.edit_name_input],
+ outputs=[self.html_preview, self.html_status]
+ ).then(
+ fn=None,
+ _js="function(name){extraNetworksRefreshSingleCard(" + json.dumps(self.page.name) + "," + json.dumps(self.tabname) + ", name);}",
+ inputs=[self.edit_name_input],
+ outputs=[]
+ )
+
+
+
diff --git a/modules/ui_gradio_extensions.py b/modules/ui_gradio_extensions.py
new file mode 100644
index 0000000000000000000000000000000000000000..4e761aa54828d44d9479fa626fa835c225c66b1c
--- /dev/null
+++ b/modules/ui_gradio_extensions.py
@@ -0,0 +1,69 @@
+import os
+import gradio as gr
+
+from modules import localization, shared, scripts
+from modules.paths import script_path, data_path
+
+
+def webpath(fn):
+ if fn.startswith(script_path):
+ web_path = os.path.relpath(fn, script_path).replace('\\', '/')
+ else:
+ web_path = os.path.abspath(fn)
+
+ return f'file={web_path}?{os.path.getmtime(fn)}'
+
+
+def javascript_html():
+ # Ensure localization is in `window` before scripts
+ head = f'\n'
+
+ script_js = os.path.join(script_path, "script.js")
+ head += f'\n'
+
+ for script in scripts.list_scripts("javascript", ".js"):
+ head += f'\n'
+
+ for script in scripts.list_scripts("javascript", ".mjs"):
+ head += f'\n'
+
+ if shared.cmd_opts.theme:
+ head += f'\n'
+
+ return head
+
+
+def css_html():
+ head = ""
+
+ def stylesheet(fn):
+ return f' '
+
+ for cssfile in scripts.list_files_with_name("style.css"):
+ if not os.path.isfile(cssfile):
+ continue
+
+ head += stylesheet(cssfile)
+
+ if os.path.exists(os.path.join(data_path, "user.css")):
+ head += stylesheet(os.path.join(data_path, "user.css"))
+
+ return head
+
+
+def reload_javascript():
+ js = javascript_html()
+ css = css_html()
+
+ def template_response(*args, **kwargs):
+ res = shared.GradioTemplateResponseOriginal(*args, **kwargs)
+ res.body = res.body.replace(b'', f'{js}'.encode("utf8"))
+ res.body = res.body.replace(b'