vaskers5 commited on
Commit
caf79e8
·
verified ·
1 Parent(s): bddf705

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. competitors_inference_code/DemoFusion/LICENSE +21 -0
  2. competitors_inference_code/DemoFusion/README.md +154 -0
  3. competitors_inference_code/DemoFusion/__pycache__/pipeline_demofusion_sdxl.cpython-312.pyc +0 -0
  4. competitors_inference_code/DemoFusion/demo_lowvram.py +34 -0
  5. competitors_inference_code/DemoFusion/generate_demofusion_images.py +176 -0
  6. competitors_inference_code/DemoFusion/gradio_demo.py +46 -0
  7. competitors_inference_code/DemoFusion/gradio_demo_controlnet.py +93 -0
  8. competitors_inference_code/DemoFusion/gradio_demo_controlnet_img2img.py +93 -0
  9. competitors_inference_code/DemoFusion/gradio_demo_img2img.py +81 -0
  10. competitors_inference_code/DemoFusion/pipeline_demofusion_sdxl.py +1446 -0
  11. competitors_inference_code/DemoFusion/pipeline_demofusion_sdxl_controlnet.py +1796 -0
  12. competitors_inference_code/DemoFusion/requirements.txt +11 -0
  13. competitors_inference_code/LSRNA/README.md +59 -0
  14. competitors_inference_code/LSRNA/__pycache__/pipeline_lsrna_demofusion_sdxl.cpython-312.pyc +0 -0
  15. competitors_inference_code/LSRNA/__pycache__/utils.cpython-312.pyc +0 -0
  16. competitors_inference_code/LSRNA/generate_lsrna_images.py +189 -0
  17. competitors_inference_code/LSRNA/lsr/__init__.py +3 -0
  18. competitors_inference_code/LSRNA/lsr/__pycache__/liif.cpython-312.pyc +0 -0
  19. competitors_inference_code/LSRNA/lsr/__pycache__/mlp.cpython-312.pyc +0 -0
  20. competitors_inference_code/LSRNA/lsr/__pycache__/models.cpython-312.pyc +0 -0
  21. competitors_inference_code/LSRNA/lsr/__pycache__/swinir.cpython-312.pyc +0 -0
  22. competitors_inference_code/LSRNA/lsr/liif.py +127 -0
  23. competitors_inference_code/LSRNA/lsr/mlp.py +23 -0
  24. competitors_inference_code/LSRNA/lsr/models.py +23 -0
  25. competitors_inference_code/LSRNA/lsr/swinir-liif-latent-sdxl.yaml +20 -0
  26. competitors_inference_code/LSRNA/lsr/swinir.py +777 -0
  27. competitors_inference_code/LSRNA/lsr_training/configs/swinir-liif-latent-sdxl-v3.yaml +58 -0
  28. competitors_inference_code/LSRNA/lsr_training/datasets/datasets.py +18 -0
  29. competitors_inference_code/LSRNA/lsr_training/datasets/scripts/make_trainset.py +144 -0
  30. competitors_inference_code/LSRNA/lsr_training/datasets/wrappers.py +61 -0
  31. competitors_inference_code/LSRNA/lsr_training/dist.sh +21 -0
  32. competitors_inference_code/LSRNA/lsr_training/find_port.py +11 -0
  33. competitors_inference_code/LSRNA/lsr_training/models/__init__.py +3 -0
  34. competitors_inference_code/LSRNA/lsr_training/models/liif.py +117 -0
  35. competitors_inference_code/LSRNA/lsr_training/models/mlp.py +23 -0
  36. competitors_inference_code/LSRNA/lsr_training/models/models.py +23 -0
  37. competitors_inference_code/LSRNA/lsr_training/models/swinir.py +776 -0
  38. competitors_inference_code/LSRNA/lsr_training/utils/__init__.py +8 -0
  39. competitors_inference_code/LSRNA/lsr_training/utils/utils.py +127 -0
  40. competitors_inference_code/LSRNA/lsr_training/utils/utils_blindsr.py +301 -0
  41. competitors_inference_code/LSRNA/lsr_training/utils/utils_calc.py +64 -0
  42. competitors_inference_code/LSRNA/lsr_training/utils/utils_config.py +12 -0
  43. competitors_inference_code/LSRNA/lsr_training/utils/utils_dist.py +202 -0
  44. competitors_inference_code/LSRNA/lsr_training/utils/utils_image.py +110 -0
  45. competitors_inference_code/LSRNA/lsr_training/utils/utils_io.py +493 -0
  46. competitors_inference_code/LSRNA/lsr_training/utils/utils_state.py +57 -0
  47. competitors_inference_code/LSRNA/main.py +65 -0
  48. competitors_inference_code/LSRNA/pipeline_lsrna_demofusion_sdxl.py +1296 -0
  49. competitors_inference_code/LSRNA/requirements.txt +18 -0
  50. competitors_inference_code/LSRNA/run.sh +13 -0
competitors_inference_code/DemoFusion/LICENSE ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MIT License
2
+
3
+ Copyright (c) 2025 PRIS-CV: Computer Vision Group
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
competitors_inference_code/DemoFusion/README.md ADDED
@@ -0,0 +1,154 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # DemoFusion
2
+ [![Project Page](https://img.shields.io/badge/Project-Page-green.svg)](https://ruoyidu.github.io/demofusion/demofusion.html)
3
+ [![arXiv](https://img.shields.io/badge/arXiv-2311.16973-b31b1b.svg)](https://arxiv.org/pdf/2311.16973.pdf)
4
+ [![Replicate](https://img.shields.io/badge/Demo-%F0%9F%9A%80%20Replicate-blue)](https://replicate.com/lucataco/demofusion)
5
+ [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/camenduru/DemoFusion-colab/blob/main/DemoFusion_colab.ipynb)
6
+ [![Hugging Face](https://img.shields.io/badge/i2i-%F0%9F%A4%97%20Hugging%20Face-blue)](https://huggingface.co/spaces/radames/Enhance-This-DemoFusion-SDXL)
7
+ [![Page Views Count](https://badges.toozhao.com/badges/01HFMAPCVTA1T32KN2PASNYGYK/blue.svg)](https://badges.toozhao.com/stats/01HFMAPCVTA1T32KN2PASNYGYK "Get your own page views count badge on badges.toozhao.com")
8
+
9
+ Code release for "DemoFusion: Democratising High-Resolution Image Generation With No 💰"
10
+
11
+ <img src="figures/illustration.jpg" width="800"/>
12
+
13
+ **Abstract**: High-resolution image generation with Generative Artificial Intelligence (GenAI) has immense potential but, due to the enormous capital investment required for training, it is increasingly centralised to a few large corporations, and hidden behind paywalls. This paper aims to democratise high-resolution GenAI by advancing the frontier of high-resolution generation while remaining accessible to a broad audience. We demonstrate that existing Latent Diffusion Models (LDMs) possess untapped potential for higher-resolution image generation. Our novel DemoFusion framework seamlessly extends open-source GenAI models, employing Progressive Upscaling, Skip Residual, and Dilated Sampling mechanisms to achieve higher-resolution image generation. The progressive nature of DemoFusion requires more passes, but the intermediate results can serve as "previews", facilitating rapid prompt iteration.
14
+
15
+ # News
16
+ - **2024.02.27**: 🔥 DemoFusion has been accepted to CVPR'24!
17
+ - **2023.12.15**: 🚀 A [ComfyUI Demofusion Custom Node](https://github.com/deroberon/demofusion-comfyui) is available! Thank [Andre](https://github.com/deroberon) for the implementation!
18
+ - **2023.12.12**: ✨ DemoFusion with ControNet is availabe now! Check it out at `pipeline_demofusion_sdxl_controlnet`! The local [Gradio Demo](https://github.com/PRIS-CV/DemoFusion#DemoFusionControlNet-with-local-Gradio-demo) is also available.
19
+ - **2023.12.10**: ✨ Image2Image is supported by `pipeline_demofusion_sdxl` now! The local [Gradio Demo](https://github.com/PRIS-CV/DemoFusion#Image2Image-with-local-Gradio-demo) is also available.
20
+ - **2023.12.08**: 🚀 A HuggingFace Demo for Img2Img is now available! [![Hugging Face](https://img.shields.io/badge/i2i-%F0%9F%A4%97%20Hugging%20Face-blue)](https://huggingface.co/spaces/radames/Enhance-This-DemoFusion-SDXL) Thank [Radamés](https://github.com/radames) for the implementation and [![Hugging Face](https://img.shields.io/badge/Hugging%20Face-Diffusers-orange.svg)](https://huggingface.co/docs/diffusers/index) for the support!
21
+ - **2023.12.07**: 🚀 Add Colab demo [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/camenduru/DemoFusion-colab/blob/main/DemoFusion_colab.ipynb). Check it out! Thank [camenduru](https://github.com/camenduru) for the implementation!
22
+ - **2023.12.06**: ✨ The local [Gradio Demo](https://github.com/PRIS-CV/DemoFusion#Text2Image-with-local-Gradio-demo) is now available! Better interaction and presentation!
23
+ - **2023.12.04**: ✨ A [low-vram version](https://github.com/PRIS-CV/DemoFusion#Text2Image-on-Windows-with-8-GB-of-VRAM) of DemoFusion is available! Thank [klimaleksus](https://github.com/klimaleksus) for the implementation!
24
+ - **2023.12.01**: 🚀 Integrated to [Replicate](https://replicate.com/explore). Check out the online demo: [![Replicate](https://img.shields.io/badge/Demo-%F0%9F%9A%80%20Replicate-blue)](https://replicate.com/lucataco/demofusion) Thank [Luis C.](https://github.com/lucataco) for the implementation!
25
+ - **2023.11.29**: 💰 `pipeline_demofusion_sdxl` is released.
26
+
27
+ # Usage
28
+ ## A quick try with integrated demos
29
+ - HuggingFace Space: Try Text2Image generation at [![Hugging Face](https://img.shields.io/badge/t2i-%F0%9F%A4%97%20Hugging%20Face-blue)](https://huggingface.co/spaces/fffiloni/DemoFusion) and Image2Image enhancement at [![Hugging Face](https://img.shields.io/badge/i2i-%F0%9F%A4%97%20Hugging%20Face-blue)](https://huggingface.co/spaces/radames/Enhance-This-DemoFusion-SDXL).
30
+ - Colab: Try Text2Image generation at [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/camenduru/DemoFusion-colab/blob/main/DemoFusion_colab.ipynb) and Image2Image enhancement at [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/camenduru/DemoFusion-colab/blob/main/DemoFusion_img2img_colab.ipynb).
31
+ - Replicate: Try Text2Image generation at [![Replicate](https://img.shields.io/badge/Demo-%F0%9F%9A%80%20Replicate-blue)](https://replicate.com/lucataco/demofusion) and Image2Image enhancement at [![Replicate](https://img.shields.io/badge/Demo-%F0%9F%9A%80%20Replicate-blue)](https://replicate.com/lucataco/demofusion-enhance).
32
+
33
+ ## Starting with our code
34
+ ### Hyper-parameters
35
+ - `view_batch_size` (`int`, defaults to 16):
36
+ The batch size for multiple denoising paths. Typically, a larger batch size can result in higher efficiency but comes with increased GPU memory requirements.
37
+ - `stride` (`int`, defaults to 64):
38
+ The stride of moving local patches. A smaller stride is better for alleviating seam issues, but it also introduces additional computational overhead and inference time.
39
+ - `cosine_scale_1` (`float`, defaults to 3):
40
+ Control the decreasing rate of skip-residual. A smaller value results in better consistency with low-resolution results, but it may lead to more pronounced upsampling noise. Please refer to Appendix C in the DemoFusion paper.
41
+ - `cosine_scale_2` (`float`, defaults to 1):
42
+ Control the decreasing rate of dilated sampling. A smaller value can better address the repetition issue, but it may lead to grainy images. For specific impacts, please refer to Appendix C in the DemoFusion paper.
43
+ - `cosine_scale_3` (`float`, defaults to 1):
44
+ Control the decrease rate of the Gaussian filter. A smaller value results in less grainy images, but it may lead to over-smoothing images. Please refer to Appendix C in the DemoFusion paper.
45
+ - `sigma` (`float`, defaults to 1):
46
+ The standard value of the Gaussian filter. A larger sigma promotes the global guidance of dilated sampling, but it has the potential of over-smoothing.
47
+ - `multi_decoder` (`bool`, defaults to True):
48
+ Determine whether to use a tiled decoder. Generally, a tiled decoder becomes necessary when the resolution exceeds 3072*3072 on an RTX 3090 GPU.
49
+ - `show_image` (`bool`, defaults to False):
50
+ Determine whether to show intermediate results during generation.
51
+
52
+ ### Text2Image (will take about 17 GB of VRAM)
53
+ - Set up the dependencies as:
54
+ ```
55
+ conda create -n demofusion python=3.9
56
+ conda activate demofusion
57
+ pip install -r requirements.txt
58
+ ```
59
+ - Download `pipeline_demofusion_sdxl.py` and run it as follows. A use case can be found in `demo.ipynb`.
60
+ ```
61
+ from pipeline_demofusion_sdxl import DemoFusionSDXLPipeline
62
+ import torch
63
+
64
+ model_ckpt = "stabilityai/stable-diffusion-xl-base-1.0"
65
+ pipe = DemoFusionSDXLPipeline.from_pretrained(model_ckpt, torch_dtype=torch.float16)
66
+ pipe = pipe.to("cuda")
67
+
68
+ prompt = "Envision a portrait of an elderly woman, her face a canvas of time, framed by a headscarf with muted tones of rust and cream. Her eyes, blue like faded denim. Her attire, simple yet dignified."
69
+ negative_prompt = "blurry, ugly, duplicate, poorly drawn, deformed, mosaic"
70
+
71
+ images = pipe(prompt, negative_prompt=negative_prompt,
72
+ height=3072, width=3072, view_batch_size=16, stride=64,
73
+ num_inference_steps=50, guidance_scale=7.5,
74
+ cosine_scale_1=3, cosine_scale_2=1, cosine_scale_3=1, sigma=0.8,
75
+ multi_decoder=True, show_image=True
76
+ )
77
+
78
+ for i, image in enumerate(images):
79
+ image.save('image_' + str(i) + '.png')
80
+ ```
81
+ - ⚠️ When you have enough VRAM (e.g., generating 2048*2048 images on hardware with more than 18GB RAM), you can set `multi_decoder=False`, which can make the decoding process faster.
82
+ - Please feel free to try different prompts and resolutions.
83
+ - Default hyper-parameters are recommended, but they may not be optimal for all cases. For specific impacts of each hyper-parameter, please refer to Appendix C in the DemoFusion paper.
84
+ - The code was cleaned before the release. If you encounter any issues, please contact us.
85
+
86
+ ### Text2Image on Windows with 8 GB of VRAM
87
+
88
+ - Set up the environment as:
89
+
90
+ ```
91
+ cmd
92
+ git clone "https://github.com/PRIS-CV/DemoFusion"
93
+ cd DemoFusion
94
+ python -m venv venv
95
+ venv\Scripts\activate
96
+ pip install -U "xformers==0.0.22.post7+cu118" --index-url https://download.pytorch.org/whl/cu118
97
+ pip install "diffusers==0.21.4" "matplotlib==3.8.2" "transformers==4.35.2" "accelerate==0.25.0"
98
+ ```
99
+
100
+ - Launch DemoFusion as follows. The use case can be found in `demo_lowvram.py`.
101
+
102
+ ```
103
+ python
104
+ from pipeline_demofusion_sdxl import DemoFusionSDXLPipeline
105
+
106
+ import torch
107
+ from diffusers.models import AutoencoderKL
108
+ vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16)
109
+
110
+ model_ckpt = "stabilityai/stable-diffusion-xl-base-1.0"
111
+ pipe = DemoFusionSDXLPipeline.from_pretrained(model_ckpt, torch_dtype=torch.float16, vae=vae)
112
+ pipe = pipe.to("cuda")
113
+
114
+ prompt = "Envision a portrait of an elderly woman, her face a canvas of time, framed by a headscarf with muted tones of rust and cream. Her eyes, blue like faded denim. Her attire, simple yet dignified."
115
+ negative_prompt = "blurry, ugly, duplicate, poorly drawn, deformed, mosaic"
116
+
117
+ images = pipe(prompt, negative_prompt=negative_prompt,
118
+ height=2048, width=2048, view_batch_size=4, stride=64,
119
+ num_inference_steps=40, guidance_scale=7.5,
120
+ cosine_scale_1=3, cosine_scale_2=1, cosine_scale_3=1, sigma=0.8,
121
+ multi_decoder=True, show_image=False, lowvram=True
122
+ )
123
+
124
+ for i, image in enumerate(images):
125
+ image.save('image_' + str(i) + '.png')
126
+ ```
127
+ ### Text2Image with local Gradio demo
128
+ - Make sure you have installed `gradio` and `gradio_imageslider`.
129
+ - Launch DemoFusion via Gradio demo now -- try `python gradio_demo.py`! Better Interaction and Presentation!
130
+ <img src="figures/gradio_demo.png" width="600"/>
131
+
132
+ ### Image2Image with local Gradio demo
133
+ - Make sure you have installed `gradio` and `gradio_imageslider`.
134
+ - Launch DemoFusion Image2Image by `python gradio_demo_img2img.py`.
135
+ <img src="figures/gradio_demo_img2img.png" width="600"/>
136
+ - ⚠️ Please note that, as a tuning-free framework, DemoFusion's Image2Image capability is strongly correlated with the SDXL's training data distribution and will show a significant bias. An accurate prompt to describe the content and style of the input also significantly improves performance. Have fun and regard it as a side application of text+image based generation.
137
+
138
+ ### DemoFusion+ControlNet with local Gradio demo
139
+ - Make sure you have installed `gradio` and `gradio_imageslider`.
140
+ - Launch DemoFusion+ControNet Text2Image by `python gradio_demo.py`.
141
+ - <img src="figures/gradio_demo_controlnet.png" width="600"/>
142
+ - Launch DemoFusion+ControNet Image2Image by `python gradio_demo_img2img.py`.
143
+ - <img src="figures/gradio_demo_controlnet_img2img.png" width="600"/>
144
+
145
+ ## Citation
146
+ If you find this paper useful in your research, please consider citing:
147
+ ```
148
+ @inproceedings{du2024demofusion,
149
+ title={DemoFusion: Democratising High-Resolution Image Generation With No \$\$\$},
150
+ author={Du, Ruoyi and Chang, Dongliang and Hospedales, Timothy and Song, Yi-Zhe and Ma, Zhanyu},
151
+ booktitle={CVPR},
152
+ year={2024}
153
+ }
154
+ ```
competitors_inference_code/DemoFusion/__pycache__/pipeline_demofusion_sdxl.cpython-312.pyc ADDED
Binary file (72.4 kB). View file
 
competitors_inference_code/DemoFusion/demo_lowvram.py ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ '''
3
+ Installation on Windows for GPU with 8 Gb of VRAM and xformers:
4
+
5
+ git clone "https://github.com/PRIS-CV/DemoFusion"
6
+ cd DemoFusion
7
+ python -m venv venv
8
+ venv\Scripts\activate
9
+ pip install -U "xformers==0.0.22.post7+cu118" --index-url https://download.pytorch.org/whl/cu118
10
+ pip install "diffusers==0.21.4" "matplotlib==3.8.2" "transformers==4.35.2" "accelerate==0.25.0"
11
+ '''
12
+
13
+ from pipeline_demofusion_sdxl import DemoFusionSDXLPipeline
14
+
15
+ import torch
16
+ from diffusers.models import AutoencoderKL
17
+ vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16)
18
+
19
+ model_ckpt = "stabilityai/stable-diffusion-xl-base-1.0"
20
+ pipe = DemoFusionSDXLPipeline.from_pretrained(model_ckpt, torch_dtype=torch.float16, vae=vae)
21
+ pipe = pipe.to("cuda")
22
+
23
+ prompt = "Envision a portrait of an elderly woman, her face a canvas of time, framed by a headscarf with muted tones of rust and cream. Her eyes, blue like faded denim. Her attire, simple yet dignified."
24
+ negative_prompt = "blurry, ugly, duplicate, poorly drawn, deformed, mosaic"
25
+
26
+ images = pipe(prompt, negative_prompt=negative_prompt,
27
+ height=2048, width=2048, view_batch_size=4, stride=64,
28
+ num_inference_steps=40, guidance_scale=7.5,
29
+ cosine_scale_1=3, cosine_scale_2=1, cosine_scale_3=1, sigma=0.8,
30
+ multi_decoder=True, show_image=False, lowvram=True
31
+ )
32
+
33
+ for i, image in enumerate(images):
34
+ image.save('image_'+str(i)+'.png')
competitors_inference_code/DemoFusion/generate_demofusion_images.py ADDED
@@ -0,0 +1,176 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """Generate SDXL images for the selected validation prompts with DemoFusion."""
3
+
4
+ from __future__ import annotations
5
+
6
+ import csv
7
+ import json
8
+ import time
9
+ from collections.abc import Sequence
10
+ from pathlib import Path
11
+ from typing import Any
12
+
13
+ import torch
14
+ from diffusers.models import AutoencoderKL
15
+ from pipeline_demofusion_sdxl import DemoFusionSDXLPipeline
16
+
17
+
18
+ NEGATIVE_PROMPT = "blurry, ugly, duplicate, poorly drawn face, deformed, mosaic, artifacts, bad limbs"
19
+ DEFAULT_CSV = "/data/kazanplova/latent_vae_upscale_train/datasets/new_validation_dataset/original_openim/images/selected_validation_images.csv"
20
+ DEFAULT_OUTPUT_DIR = "/data/kazanplova/latent_vae_upscale_train/datasets/new_validation_dataset/demofusion/images"
21
+ STATISTICS_PATH = "/data/kazanplova/latent_vae_upscale_train/datasets/new_validation_dataset/demofusion/statistics.json"
22
+ PRETRAINED_MODEL = "stabilityai/stable-diffusion-xl-base-1.0"
23
+ VAE_REPO = "madebyollin/sdxl-vae-fp16-fix"
24
+ CFG_SCALE = 7.5
25
+ NUM_INFERENCE_STEPS = 40
26
+ SEED = 42
27
+ VIEW_BATCH_SIZE = 4
28
+ STRIDE = 64
29
+ COSINE_SCALE_1 = 3.0
30
+ COSINE_SCALE_2 = 1.0
31
+ COSINE_SCALE_3 = 1.0
32
+ SIGMA = 0.8
33
+ MULTI_DECODER = True
34
+ SHOW_IMAGE = False
35
+ LOW_VRAM = True
36
+ RESOLUTIONS: dict[str, tuple[int, int]] = {
37
+ "4096px": (4096, 4096),
38
+ "2048px": (2048, 2048),
39
+ "1024px": (1024, 1024),
40
+ # "512px": (512, 512),
41
+ }
42
+
43
+
44
+ def load_prompts(csv_path: Path) -> list[tuple[str, str]]:
45
+ prompts: list[tuple[str, str]] = []
46
+ with csv_path.open("r", encoding="utf-8") as handle:
47
+ reader = csv.DictReader(handle)
48
+ for row in reader:
49
+ caption_raw = (row.get("gpt_caption") or "").strip()
50
+ if not caption_raw:
51
+ continue
52
+ try:
53
+ caption = json.loads(caption_raw)
54
+ except json.JSONDecodeError:
55
+ print(f"Skipping row with invalid JSON: {row.get('img_path')}")
56
+ continue
57
+ prompt = caption.get("sdxl")
58
+ if not prompt:
59
+ print(f"Skipping row without 'sdxl' prompt: {row.get('img_path')}")
60
+ continue
61
+ prompts.append((row.get("img_path", ""), prompt))
62
+ return prompts
63
+
64
+
65
+ def build_pipeline() -> DemoFusionSDXLPipeline:
66
+ if not torch.cuda.is_available():
67
+ raise RuntimeError("CUDA is required to run this script.")
68
+
69
+ vae = AutoencoderKL.from_pretrained(VAE_REPO, torch_dtype=torch.float16)
70
+ pipe = DemoFusionSDXLPipeline.from_pretrained(
71
+ PRETRAINED_MODEL,
72
+ torch_dtype=torch.float16,
73
+ vae=vae,
74
+ ).to("cuda")
75
+ pipe.set_progress_bar_config(disable=True)
76
+ return pipe
77
+
78
+
79
+ def get_first_image(result: Any) -> Any:
80
+ if hasattr(result, "images"):
81
+ images = result.images
82
+ elif isinstance(result, Sequence) and not isinstance(result, (str, bytes, bytearray)):
83
+ images = result
84
+ else:
85
+ images = [result]
86
+ if not images:
87
+ raise RuntimeError("DemoFusion pipeline returned no images.")
88
+ return images[0]
89
+
90
+
91
+ def main() -> None:
92
+ csv_path = Path(DEFAULT_CSV)
93
+ output_dir = Path(DEFAULT_OUTPUT_DIR)
94
+ prompts = load_prompts(csv_path)
95
+ if not prompts:
96
+ raise SystemExit("No prompts were found in the CSV file.")
97
+
98
+ resolution_dirs = {name: output_dir / name for name in RESOLUTIONS}
99
+ for folder in resolution_dirs.values():
100
+ folder.mkdir(parents=True, exist_ok=True)
101
+
102
+ statistics_path = Path(STATISTICS_PATH)
103
+ stats_tracker = {
104
+ name: {"count": 0, "total_time": 0.0, "max_vram_bytes": 0}
105
+ for name in RESOLUTIONS
106
+ }
107
+
108
+ generator = torch.Generator(device="cuda").manual_seed(SEED)
109
+ pipe = build_pipeline()
110
+ device = torch.device("cuda")
111
+
112
+ for idx, (img_path, prompt) in enumerate(prompts):
113
+ filename = f"{idx}.png"
114
+ written_paths: list[str] = []
115
+
116
+ for name, (width, height) in RESOLUTIONS.items():
117
+ print(prompt)
118
+ torch.cuda.synchronize(device)
119
+ torch.cuda.reset_peak_memory_stats(device)
120
+ start_time = time.perf_counter()
121
+
122
+ result = pipe(
123
+ prompt,
124
+ negative_prompt=NEGATIVE_PROMPT,
125
+ guidance_scale=CFG_SCALE,
126
+ num_inference_steps=NUM_INFERENCE_STEPS,
127
+ width=width,
128
+ height=height,
129
+ generator=generator,
130
+ view_batch_size=VIEW_BATCH_SIZE,
131
+ stride=STRIDE,
132
+ cosine_scale_1=COSINE_SCALE_1,
133
+ cosine_scale_2=COSINE_SCALE_2,
134
+ cosine_scale_3=COSINE_SCALE_3,
135
+ sigma=SIGMA,
136
+ multi_decoder=MULTI_DECODER,
137
+ show_image=SHOW_IMAGE,
138
+ lowvram=False,
139
+ )
140
+
141
+ image = get_first_image(result)
142
+
143
+ torch.cuda.synchronize(device)
144
+ elapsed = time.perf_counter() - start_time
145
+ vram_bytes = torch.cuda.max_memory_allocated(device)
146
+
147
+ stats = stats_tracker[name]
148
+ stats["count"] += 1
149
+ stats["total_time"] += elapsed
150
+ stats["max_vram_bytes"] = max(stats["max_vram_bytes"], vram_bytes)
151
+
152
+ output_path = resolution_dirs[name] / filename
153
+ image.save(output_path)
154
+ written_paths.append(str(output_path))
155
+
156
+ print(f"[{idx + 1}/{len(prompts)}] wrote {', '.join(written_paths)}")
157
+
158
+ statistics = {
159
+ "total_prompts": len(prompts),
160
+ "resolutions": {
161
+ name: {
162
+ "images": metrics["count"],
163
+ "mean_time_sec": (metrics["total_time"] / metrics["count"]) if metrics["count"] else 0.0,
164
+ "max_vram_mb": metrics["max_vram_bytes"] / (1024**2),
165
+ }
166
+ for name, metrics in stats_tracker.items()
167
+ },
168
+ }
169
+
170
+ statistics_path.parent.mkdir(parents=True, exist_ok=True)
171
+ statistics_path.write_text(json.dumps(statistics, indent=2))
172
+ print(f"Saved statistics to {statistics_path}")
173
+
174
+
175
+ if __name__ == "__main__":
176
+ main()
competitors_inference_code/DemoFusion/gradio_demo.py ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from pipeline_demofusion_sdxl import DemoFusionSDXLPipeline
3
+ from gradio_imageslider import ImageSlider
4
+ import torch
5
+
6
+ def generate_images(prompt, negative_prompt, height, width, num_inference_steps, guidance_scale, cosine_scale_1, cosine_scale_2, cosine_scale_3, sigma, view_batch_size, stride, seed):
7
+ model_ckpt = "stabilityai/stable-diffusion-xl-base-1.0"
8
+ pipe = DemoFusionSDXLPipeline.from_pretrained(model_ckpt, torch_dtype=torch.float16)
9
+ pipe = pipe.to("cuda")
10
+
11
+ generator = torch.Generator(device='cuda')
12
+ generator = generator.manual_seed(int(seed))
13
+
14
+ images = pipe(prompt, negative_prompt=negative_prompt, generator=generator,
15
+ height=int(height), width=int(width), view_batch_size=int(view_batch_size), stride=int(stride),
16
+ num_inference_steps=int(num_inference_steps), guidance_scale=guidance_scale,
17
+ cosine_scale_1=cosine_scale_1, cosine_scale_2=cosine_scale_2, cosine_scale_3=cosine_scale_3, sigma=sigma,
18
+ multi_decoder=True, show_image=False
19
+ )
20
+
21
+ return (images[0], images[-1])
22
+
23
+ iface = gr.Interface(
24
+ fn=generate_images,
25
+ inputs=[
26
+ gr.Textbox(label="Prompt"),
27
+ gr.Textbox(label="Negative Prompt", value="blurry, ugly, duplicate, poorly drawn, deformed, mosaic"),
28
+ gr.Slider(minimum=1024, maximum=4096, step=1024, value=2048, label="Height"),
29
+ gr.Slider(minimum=1024, maximum=4096, step=1024, value=2048, label="Width"),
30
+ gr.Slider(minimum=10, maximum=100, step=1, value=50, label="Num Inference Steps"),
31
+ gr.Slider(minimum=1, maximum=20, step=0.1, value=7.5, label="Guidance Scale"),
32
+ gr.Slider(minimum=0, maximum=5, step=0.1, value=3, label="Cosine Scale 1"),
33
+ gr.Slider(minimum=0, maximum=5, step=0.1, value=1, label="Cosine Scale 2"),
34
+ gr.Slider(minimum=0, maximum=5, step=0.1, value=1, label="Cosine Scale 3"),
35
+ gr.Slider(minimum=0.1, maximum=1, step=0.1, value=0.8, label="Sigma"),
36
+ gr.Slider(minimum=4, maximum=32, step=4, value=16, label="View Batch Size"),
37
+ gr.Slider(minimum=8, maximum=96, step=8, value=64, label="Stride"),
38
+ gr.Number(label="Seed", value=2013)
39
+ ],
40
+ # outputs=gr.Gallery(label="Generated Images"),
41
+ outputs=ImageSlider(label="Comparison of SDXL and DemoFusion"),
42
+ title="DemoFusion Gradio Demo",
43
+ description="Generate images with the DemoFusion SDXL Pipeline."
44
+ )
45
+
46
+ iface.launch()
competitors_inference_code/DemoFusion/gradio_demo_controlnet.py ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from diffusers import ControlNetModel, AutoencoderKL
3
+ from pipeline_demofusion_sdxl import DemoFusionSDXLPipeline
4
+ from pipeline_demofusion_sdxl_controlnet import DemoFusionSDXLControlNetPipeline
5
+ from gradio_imageslider import ImageSlider
6
+ import torch, gc
7
+ from torchvision import transforms
8
+ from PIL import Image
9
+ import numpy as np
10
+ import cv2
11
+
12
+ def load_and_process_image(pil_image):
13
+ transform = transforms.Compose(
14
+ [
15
+ transforms.Resize((1024, 1024)),
16
+ transforms.ToTensor(),
17
+ transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
18
+ ]
19
+ )
20
+ image = transform(pil_image)
21
+ image = image.unsqueeze(0).half()
22
+ return image
23
+
24
+
25
+ def pad_image(image):
26
+ w, h = image.size
27
+ if w == h:
28
+ return image
29
+ elif w > h:
30
+ new_image = Image.new(image.mode, (w, w), (0, 0, 0))
31
+ pad_w = 0
32
+ pad_h = (w - h) // 2
33
+ new_image.paste(image, (0, pad_h))
34
+ return new_image
35
+ else:
36
+ new_image = Image.new(image.mode, (h, h), (0, 0, 0))
37
+ pad_w = (h - w) // 2
38
+ pad_h = 0
39
+ new_image.paste(image, (pad_w, 0))
40
+ return new_image
41
+
42
+ def generate_images(prompt, negative_prompt, controlnet_conditioning_scale, height, width, num_inference_steps, guidance_scale, cosine_scale_1, cosine_scale_2, cosine_scale_3, sigma, view_batch_size, stride, seed, input_image):
43
+ padded_image = pad_image(input_image).resize((1024, 1024)).convert("RGB")
44
+ image_lr = load_and_process_image(padded_image).to('cuda')
45
+ controlnet = ControlNetModel.from_pretrained("diffusers/controlnet-canny-sdxl-1.0", torch_dtype=torch.float16)
46
+ vae = AutoencoderKL.from_pretrained("madebyollin/stable-diffusion-xl-base-1.0/vae-fix", torch_dtype=torch.float16)
47
+ pipe = DemoFusionSDXLControlNetPipeline.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0", controlnet=controlnet, vae=vae, torch_dtype=torch.float16)
48
+ pipe = pipe.to("cuda")
49
+ generator = torch.Generator(device='cuda')
50
+ generator = generator.manual_seed(int(seed))
51
+ # get canny image
52
+ canny_image = np.array(padded_image)
53
+ canny_image = cv2.Canny(canny_image, 100, 200)
54
+ canny_image = canny_image[:, :, None]
55
+ canny_image = np.concatenate([canny_image, canny_image, canny_image], axis=2)
56
+ canny_image = Image.fromarray(canny_image)
57
+ images = pipe(prompt, negative_prompt=negative_prompt, controlnet_conditioning_scale=controlnet_conditioning_scale,
58
+ condition_image=canny_image, generator=generator,
59
+ height=int(height), width=int(width), view_batch_size=int(view_batch_size), stride=int(stride),
60
+ num_inference_steps=int(num_inference_steps), guidance_scale=guidance_scale,
61
+ cosine_scale_1=cosine_scale_1, cosine_scale_2=cosine_scale_2, cosine_scale_3=cosine_scale_3, sigma=sigma,
62
+ multi_decoder=True, show_image=False, lowvram=False
63
+ )
64
+ for i, image in enumerate(images):
65
+ image.save('image_'+str(i)+'.png')
66
+ pipe = None
67
+ gc.collect()
68
+ torch.cuda.empty_cache()
69
+ return (canny_image, images[-1])
70
+
71
+ with gr.Blocks(title=f"DemoFusion") as demo:
72
+ with gr.Column():
73
+ with gr.Row():
74
+ with gr.Group():
75
+ image_input = gr.Image(type="pil", label="Input Image")
76
+ prompt = gr.Textbox(label="Prompt", value="")
77
+ negative_prompt = gr.Textbox(label="Negative Prompt", value="blurry, ugly, duplicate, poorly drawn, deformed, mosaic")
78
+ controlnet_conditioning_scale = gr.Slider(minimum=0, maximum=1, step=0.1, value=0.5, label="ControlNet Conditioning Scale")
79
+ width = gr.Slider(minimum=1024, maximum=4096, step=1024, value=2048, label="Width")
80
+ height = gr.Slider(minimum=1024, maximum=4096, step=1024, value=2048, label="Height")
81
+ num_inference_steps = gr.Slider(minimum=10, maximum=100, step=1, value=50, label="Num Inference Steps")
82
+ guidance_scale = gr.Slider(minimum=1, maximum=20, step=0.1, value=7.5, label="Guidance Scale")
83
+ cosine_scale_1 = gr.Slider(minimum=0, maximum=5, step=0.1, value=3, label="Cosine Scale 1")
84
+ cosine_scale_2 = gr.Slider(minimum=0, maximum=5, step=0.1, value=1, label="Cosine Scale 2")
85
+ cosine_scale_3 = gr.Slider(minimum=0, maximum=5, step=0.1, value=1, label="Cosine Scale 3")
86
+ sigma = gr.Slider(minimum=0.1, maximum=1, step=0.1, value=0.8, label="Sigma")
87
+ view_batch_size = gr.Slider(minimum=4, maximum=32, step=4, value=16, label="View Batch Size")
88
+ stride = gr.Slider(minimum=8, maximum=96, step=8, value=64, label="Stride")
89
+ seed = gr.Number(label="Seed", value=2013)
90
+ button = gr.Button()
91
+ output_images = ImageSlider(show_label=False)
92
+ button.click(fn=generate_images, inputs=[prompt, negative_prompt, controlnet_conditioning_scale, height, width, num_inference_steps, guidance_scale, cosine_scale_1, cosine_scale_2, cosine_scale_3, sigma, view_batch_size, stride, seed, image_input], outputs=[output_images], show_progress=True)
93
+ demo.queue().launch(inline=False, share=True, debug=True)
competitors_inference_code/DemoFusion/gradio_demo_controlnet_img2img.py ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from diffusers import ControlNetModel, AutoencoderKL
3
+ from pipeline_demofusion_sdxl import DemoFusionSDXLPipeline
4
+ from pipeline_demofusion_sdxl_controlnet import DemoFusionSDXLControlNetPipeline
5
+ from gradio_imageslider import ImageSlider
6
+ import torch, gc
7
+ from torchvision import transforms
8
+ from PIL import Image
9
+ import numpy as np
10
+ import cv2
11
+
12
+ def load_and_process_image(pil_image):
13
+ transform = transforms.Compose(
14
+ [
15
+ transforms.Resize((1024, 1024)),
16
+ transforms.ToTensor(),
17
+ transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
18
+ ]
19
+ )
20
+ image = transform(pil_image)
21
+ image = image.unsqueeze(0).half()
22
+ return image
23
+
24
+
25
+ def pad_image(image):
26
+ w, h = image.size
27
+ if w == h:
28
+ return image
29
+ elif w > h:
30
+ new_image = Image.new(image.mode, (w, w), (0, 0, 0))
31
+ pad_w = 0
32
+ pad_h = (w - h) // 2
33
+ new_image.paste(image, (0, pad_h))
34
+ return new_image
35
+ else:
36
+ new_image = Image.new(image.mode, (h, h), (0, 0, 0))
37
+ pad_w = (h - w) // 2
38
+ pad_h = 0
39
+ new_image.paste(image, (pad_w, 0))
40
+ return new_image
41
+
42
+ def generate_images(prompt, negative_prompt, controlnet_conditioning_scale, height, width, num_inference_steps, guidance_scale, cosine_scale_1, cosine_scale_2, cosine_scale_3, sigma, view_batch_size, stride, seed, input_image):
43
+ padded_image = pad_image(input_image).resize((1024, 1024)).convert("RGB")
44
+ image_lr = load_and_process_image(padded_image).to('cuda')
45
+ controlnet = ControlNetModel.from_pretrained("diffusers/controlnet-canny-sdxl-1.0", torch_dtype=torch.float16)
46
+ vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16)
47
+ pipe = DemoFusionSDXLControlNetPipeline.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0", controlnet=controlnet, vae=vae, torch_dtype=torch.float16)
48
+ pipe = pipe.to("cuda")
49
+ generator = torch.Generator(device='cuda')
50
+ generator = generator.manual_seed(int(seed))
51
+ # get canny image
52
+ canny_image = np.array(padded_image)
53
+ canny_image = cv2.Canny(canny_image, 100, 200)
54
+ canny_image = canny_image[:, :, None]
55
+ canny_image = np.concatenate([canny_image, canny_image, canny_image], axis=2)
56
+ canny_image = Image.fromarray(canny_image)
57
+ images = pipe(prompt, negative_prompt=negative_prompt, controlnet_conditioning_scale=controlnet_conditioning_scale,
58
+ image_lr=image_lr, condition_image=canny_image, generator=generator,
59
+ height=int(height), width=int(width), view_batch_size=int(view_batch_size), stride=int(stride),
60
+ num_inference_steps=int(num_inference_steps), guidance_scale=guidance_scale,
61
+ cosine_scale_1=cosine_scale_1, cosine_scale_2=cosine_scale_2, cosine_scale_3=cosine_scale_3, sigma=sigma,
62
+ multi_decoder=True, show_image=False, lowvram=False
63
+ )
64
+ for i, image in enumerate(images):
65
+ image.save('image_'+str(i)+'.png')
66
+ pipe = None
67
+ gc.collect()
68
+ torch.cuda.empty_cache()
69
+ return (images[0], images[-1])
70
+
71
+ with gr.Blocks(title=f"DemoFusion") as demo:
72
+ with gr.Column():
73
+ with gr.Row():
74
+ with gr.Group():
75
+ image_input = gr.Image(type="pil", label="Input Image")
76
+ prompt = gr.Textbox(label="Prompt (Note: an accurate prompt to describe the content and style of the input will significantly improve performance.)", value="8k high definition, high details")
77
+ negative_prompt = gr.Textbox(label="Negative Prompt", value="blurry, ugly, duplicate, poorly drawn, deformed, mosaic")
78
+ controlnet_conditioning_scale = gr.Slider(minimum=0, maximum=1, step=0.1, value=0.5, label="ControlNet Conditioning Scale")
79
+ width = gr.Slider(minimum=1024, maximum=4096, step=1024, value=2048, label="Width")
80
+ height = gr.Slider(minimum=1024, maximum=4096, step=1024, value=2048, label="Height")
81
+ num_inference_steps = gr.Slider(minimum=10, maximum=100, step=1, value=50, label="Num Inference Steps")
82
+ guidance_scale = gr.Slider(minimum=1, maximum=20, step=0.1, value=7.5, label="Guidance Scale")
83
+ cosine_scale_1 = gr.Slider(minimum=0, maximum=5, step=0.1, value=3, label="Cosine Scale 1")
84
+ cosine_scale_2 = gr.Slider(minimum=0, maximum=5, step=0.1, value=1, label="Cosine Scale 2")
85
+ cosine_scale_3 = gr.Slider(minimum=0, maximum=5, step=0.1, value=1, label="Cosine Scale 3")
86
+ sigma = gr.Slider(minimum=0.1, maximum=1, step=0.1, value=0.8, label="Sigma")
87
+ view_batch_size = gr.Slider(minimum=4, maximum=32, step=4, value=16, label="View Batch Size")
88
+ stride = gr.Slider(minimum=8, maximum=96, step=8, value=64, label="Stride")
89
+ seed = gr.Number(label="Seed", value=2013)
90
+ button = gr.Button()
91
+ output_images = ImageSlider(show_label=False)
92
+ button.click(fn=generate_images, inputs=[prompt, negative_prompt, controlnet_conditioning_scale, height, width, num_inference_steps, guidance_scale, cosine_scale_1, cosine_scale_2, cosine_scale_3, sigma, view_batch_size, stride, seed, image_input], outputs=[output_images], show_progress=True)
93
+ demo.queue().launch(inline=False, share=True, debug=True)
competitors_inference_code/DemoFusion/gradio_demo_img2img.py ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from diffusers import AutoencoderKL
3
+ from pipeline_demofusion_sdxl import DemoFusionSDXLPipeline
4
+ from gradio_imageslider import ImageSlider
5
+ import torch, gc
6
+ from torchvision import transforms
7
+ from PIL import Image
8
+
9
+ def load_and_process_image(pil_image):
10
+ transform = transforms.Compose(
11
+ [
12
+ transforms.Resize((1024, 1024)),
13
+ transforms.ToTensor(),
14
+ transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
15
+ ]
16
+ )
17
+ image = transform(pil_image)
18
+ image = image.unsqueeze(0).half()
19
+ return image
20
+
21
+
22
+ def pad_image(image):
23
+ w, h = image.size
24
+ if w == h:
25
+ return image
26
+ elif w > h:
27
+ new_image = Image.new(image.mode, (w, w), (0, 0, 0))
28
+ pad_w = 0
29
+ pad_h = (w - h) // 2
30
+ new_image.paste(image, (0, pad_h))
31
+ return new_image
32
+ else:
33
+ new_image = Image.new(image.mode, (h, h), (0, 0, 0))
34
+ pad_w = (h - w) // 2
35
+ pad_h = 0
36
+ new_image.paste(image, (pad_w, 0))
37
+ return new_image
38
+
39
+ def generate_images(prompt, negative_prompt, height, width, num_inference_steps, guidance_scale, cosine_scale_1, cosine_scale_2, cosine_scale_3, sigma, view_batch_size, stride, seed, input_image):
40
+ padded_image = pad_image(input_image).resize((1024, 1024)).convert("RGB")
41
+ image_lr = load_and_process_image(padded_image).to('cuda')
42
+ vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16)
43
+ pipe = DemoFusionSDXLPipeline.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0", vae=vae, torch_dtype=torch.float16)
44
+ pipe = pipe.to("cuda")
45
+ generator = torch.Generator(device='cuda')
46
+ generator = generator.manual_seed(int(seed))
47
+ images = pipe(prompt, negative_prompt=negative_prompt, generator=generator,
48
+ height=int(height), width=int(width), view_batch_size=int(view_batch_size), stride=int(stride),
49
+ num_inference_steps=int(num_inference_steps), guidance_scale=guidance_scale,
50
+ cosine_scale_1=cosine_scale_1, cosine_scale_2=cosine_scale_2, cosine_scale_3=cosine_scale_3, sigma=sigma,
51
+ multi_decoder=True, show_image=False, lowvram=False, image_lr=image_lr
52
+ )
53
+ for i, image in enumerate(images):
54
+ image.save('image_'+str(i)+'.png')
55
+ pipe = None
56
+ gc.collect()
57
+ torch.cuda.empty_cache()
58
+ return (images[0], images[-1])
59
+
60
+ with gr.Blocks(title=f"DemoFusion") as demo:
61
+ with gr.Column():
62
+ with gr.Row():
63
+ with gr.Group():
64
+ image_input = gr.Image(type="pil", label="Input Image")
65
+ prompt = gr.Textbox(label="Prompt (Note: an accurate prompt to describe the content and style of the input will significantly improve performance.)", value="8k high definition, high details")
66
+ negative_prompt = gr.Textbox(label="Negative Prompt", value="blurry, ugly, duplicate, poorly drawn, deformed, mosaic")
67
+ width = gr.Slider(minimum=1024, maximum=4096, step=1024, value=2048, label="Width")
68
+ height = gr.Slider(minimum=1024, maximum=4096, step=1024, value=2048, label="Height")
69
+ num_inference_steps = gr.Slider(minimum=5, maximum=100, step=1, value=50, label="Num Inference Steps")
70
+ guidance_scale = gr.Slider(minimum=1, maximum=20, step=0.1, value=7.5, label="Guidance Scale")
71
+ cosine_scale_1 = gr.Slider(minimum=0, maximum=5, step=0.1, value=3, label="Cosine Scale 1")
72
+ cosine_scale_2 = gr.Slider(minimum=0, maximum=5, step=0.1, value=1, label="Cosine Scale 2")
73
+ cosine_scale_3 = gr.Slider(minimum=0, maximum=5, step=0.1, value=1, label="Cosine Scale 3")
74
+ sigma = gr.Slider(minimum=0.1, maximum=1, step=0.1, value=0.8, label="Sigma")
75
+ view_batch_size = gr.Slider(minimum=4, maximum=32, step=4, value=16, label="View Batch Size")
76
+ stride = gr.Slider(minimum=8, maximum=96, step=8, value=64, label="Stride")
77
+ seed = gr.Number(label="Seed", value=2013)
78
+ button = gr.Button()
79
+ output_images = ImageSlider(show_label=False)
80
+ button.click(fn=generate_images, inputs=[prompt, negative_prompt, height, width, num_inference_steps, guidance_scale, cosine_scale_1, cosine_scale_2, cosine_scale_3, sigma, view_batch_size, stride, seed, image_input], outputs=[output_images], show_progress=True)
81
+ demo.queue().launch(inline=False, share=True, debug=True)
competitors_inference_code/DemoFusion/pipeline_demofusion_sdxl.py ADDED
@@ -0,0 +1,1446 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import inspect
16
+ import os
17
+ from typing import Any, Callable, Dict, List, Optional, Tuple, Union
18
+ import matplotlib.pyplot as plt
19
+
20
+ import torch
21
+ import torch.nn.functional as F
22
+ import numpy as np
23
+ import random
24
+ import warnings
25
+ from transformers import CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
26
+
27
+ from diffusers.image_processor import VaeImageProcessor
28
+ from diffusers.loaders import (
29
+ FromSingleFileMixin,
30
+ LoraLoaderMixin,
31
+ TextualInversionLoaderMixin,
32
+ )
33
+ from diffusers.models import AutoencoderKL, UNet2DConditionModel
34
+ from diffusers.models.attention_processor import (
35
+ AttnProcessor2_0,
36
+ LoRAAttnProcessor2_0,
37
+ LoRAXFormersAttnProcessor,
38
+ XFormersAttnProcessor,
39
+ )
40
+ from diffusers.models.lora import adjust_lora_scale_text_encoder
41
+ from diffusers.schedulers import KarrasDiffusionSchedulers
42
+ from diffusers.utils import (
43
+ is_accelerate_available,
44
+ is_accelerate_version,
45
+ is_invisible_watermark_available,
46
+ logging,
47
+ replace_example_docstring,
48
+ )
49
+ from diffusers.utils.torch_utils import randn_tensor
50
+ from diffusers.pipelines.pipeline_utils import DiffusionPipeline
51
+ from diffusers.pipelines.stable_diffusion_xl import StableDiffusionXLPipelineOutput
52
+
53
+
54
+ if is_invisible_watermark_available():
55
+ from .watermark import StableDiffusionXLWatermarker
56
+
57
+
58
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
59
+
60
+ EXAMPLE_DOC_STRING = """
61
+ Examples:
62
+ ```py
63
+ >>> import torch
64
+ >>> from diffusers import StableDiffusionXLPipeline
65
+
66
+ >>> pipe = StableDiffusionXLPipeline.from_pretrained(
67
+ ... "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16
68
+ ... )
69
+ >>> pipe = pipe.to("cuda")
70
+
71
+ >>> prompt = "a photo of an astronaut riding a horse on mars"
72
+ >>> image = pipe(prompt).images[0]
73
+ ```
74
+ """
75
+
76
+ def gaussian_kernel(kernel_size=3, sigma=1.0, channels=3):
77
+ x_coord = torch.arange(kernel_size)
78
+ gaussian_1d = torch.exp(-(x_coord - (kernel_size - 1) / 2) ** 2 / (2 * sigma ** 2))
79
+ gaussian_1d = gaussian_1d / gaussian_1d.sum()
80
+ gaussian_2d = gaussian_1d[:, None] * gaussian_1d[None, :]
81
+ kernel = gaussian_2d[None, None, :, :].repeat(channels, 1, 1, 1)
82
+
83
+ return kernel
84
+
85
+ def gaussian_filter(latents, kernel_size=3, sigma=1.0):
86
+ channels = latents.shape[1]
87
+ kernel = gaussian_kernel(kernel_size, sigma, channels).to(latents.device, latents.dtype)
88
+ blurred_latents = F.conv2d(latents, kernel, padding=kernel_size//2, groups=channels)
89
+
90
+ return blurred_latents
91
+
92
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.rescale_noise_cfg
93
+ def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0):
94
+ """
95
+ Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and
96
+ Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). See Section 3.4
97
+ """
98
+ std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True)
99
+ std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True)
100
+ # rescale the results from guidance (fixes overexposure)
101
+ noise_pred_rescaled = noise_cfg * (std_text / std_cfg)
102
+ # mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images
103
+ noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg
104
+ return noise_cfg
105
+
106
+
107
+ class DemoFusionSDXLPipeline(DiffusionPipeline, FromSingleFileMixin, LoraLoaderMixin, TextualInversionLoaderMixin):
108
+ """
109
+ Pipeline for text-to-image generation using Stable Diffusion XL.
110
+
111
+ This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
112
+ library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
113
+
114
+ In addition the pipeline inherits the following loading methods:
115
+ - *LoRA*: [`StableDiffusionXLPipeline.load_lora_weights`]
116
+ - *Ckpt*: [`loaders.FromSingleFileMixin.from_single_file`]
117
+
118
+ as well as the following saving methods:
119
+ - *LoRA*: [`loaders.StableDiffusionXLPipeline.save_lora_weights`]
120
+
121
+ Args:
122
+ vae ([`AutoencoderKL`]):
123
+ Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
124
+ text_encoder ([`CLIPTextModel`]):
125
+ Frozen text-encoder. Stable Diffusion XL uses the text portion of
126
+ [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
127
+ the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
128
+ text_encoder_2 ([` CLIPTextModelWithProjection`]):
129
+ Second frozen text-encoder. Stable Diffusion XL uses the text and pool portion of
130
+ [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModelWithProjection),
131
+ specifically the
132
+ [laion/CLIP-ViT-bigG-14-laion2B-39B-b160k](https://huggingface.co/laion/CLIP-ViT-bigG-14-laion2B-39B-b160k)
133
+ variant.
134
+ tokenizer (`CLIPTokenizer`):
135
+ Tokenizer of class
136
+ [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
137
+ tokenizer_2 (`CLIPTokenizer`):
138
+ Second Tokenizer of class
139
+ [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
140
+ unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
141
+ scheduler ([`SchedulerMixin`]):
142
+ A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
143
+ [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
144
+ force_zeros_for_empty_prompt (`bool`, *optional*, defaults to `"True"`):
145
+ Whether the negative prompt embeddings shall be forced to always be set to 0. Also see the config of
146
+ `stabilityai/stable-diffusion-xl-base-1-0`.
147
+ add_watermarker (`bool`, *optional*):
148
+ Whether to use the [invisible_watermark library](https://github.com/ShieldMnt/invisible-watermark/) to
149
+ watermark output images. If not defined, it will default to True if the package is installed, otherwise no
150
+ watermarker will be used.
151
+ """
152
+ model_cpu_offload_seq = "text_encoder->text_encoder_2->unet->vae"
153
+
154
+ def __init__(
155
+ self,
156
+ vae: AutoencoderKL,
157
+ text_encoder: CLIPTextModel,
158
+ text_encoder_2: CLIPTextModelWithProjection,
159
+ tokenizer: CLIPTokenizer,
160
+ tokenizer_2: CLIPTokenizer,
161
+ unet: UNet2DConditionModel,
162
+ scheduler: KarrasDiffusionSchedulers,
163
+ force_zeros_for_empty_prompt: bool = True,
164
+ add_watermarker: Optional[bool] = None,
165
+ ):
166
+ super().__init__()
167
+
168
+ self.register_modules(
169
+ vae=vae,
170
+ text_encoder=text_encoder,
171
+ text_encoder_2=text_encoder_2,
172
+ tokenizer=tokenizer,
173
+ tokenizer_2=tokenizer_2,
174
+ unet=unet,
175
+ scheduler=scheduler,
176
+ )
177
+ self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt)
178
+ self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
179
+ self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
180
+ self.default_sample_size = self.unet.config.sample_size
181
+
182
+ add_watermarker = add_watermarker if add_watermarker is not None else is_invisible_watermark_available()
183
+
184
+ if add_watermarker:
185
+ self.watermark = StableDiffusionXLWatermarker()
186
+ else:
187
+ self.watermark = None
188
+
189
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_vae_slicing
190
+ def enable_vae_slicing(self):
191
+ r"""
192
+ Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to
193
+ compute decoding in several steps. This is useful to save some memory and allow larger batch sizes.
194
+ """
195
+ self.vae.enable_slicing()
196
+
197
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.disable_vae_slicing
198
+ def disable_vae_slicing(self):
199
+ r"""
200
+ Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to
201
+ computing decoding in one step.
202
+ """
203
+ self.vae.disable_slicing()
204
+
205
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_vae_tiling
206
+ def enable_vae_tiling(self):
207
+ r"""
208
+ Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to
209
+ compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow
210
+ processing larger images.
211
+ """
212
+ self.vae.enable_tiling()
213
+
214
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.disable_vae_tiling
215
+ def disable_vae_tiling(self):
216
+ r"""
217
+ Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to
218
+ computing decoding in one step.
219
+ """
220
+ self.vae.disable_tiling()
221
+
222
+ def encode_prompt(
223
+ self,
224
+ prompt: str,
225
+ prompt_2: Optional[str] = None,
226
+ device: Optional[torch.device] = None,
227
+ num_images_per_prompt: int = 1,
228
+ do_classifier_free_guidance: bool = True,
229
+ negative_prompt: Optional[str] = None,
230
+ negative_prompt_2: Optional[str] = None,
231
+ prompt_embeds: Optional[torch.FloatTensor] = None,
232
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
233
+ pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
234
+ negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
235
+ lora_scale: Optional[float] = None,
236
+ ):
237
+ r"""
238
+ Encodes the prompt into text encoder hidden states.
239
+
240
+ Args:
241
+ prompt (`str` or `List[str]`, *optional*):
242
+ prompt to be encoded
243
+ prompt_2 (`str` or `List[str]`, *optional*):
244
+ The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is
245
+ used in both text-encoders
246
+ device: (`torch.device`):
247
+ torch device
248
+ num_images_per_prompt (`int`):
249
+ number of images that should be generated per prompt
250
+ do_classifier_free_guidance (`bool`):
251
+ whether to use classifier free guidance or not
252
+ negative_prompt (`str` or `List[str]`, *optional*):
253
+ The prompt or prompts not to guide the image generation. If not defined, one has to pass
254
+ `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
255
+ less than `1`).
256
+ negative_prompt_2 (`str` or `List[str]`, *optional*):
257
+ The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and
258
+ `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders
259
+ prompt_embeds (`torch.FloatTensor`, *optional*):
260
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
261
+ provided, text embeddings will be generated from `prompt` input argument.
262
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
263
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
264
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
265
+ argument.
266
+ pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
267
+ Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting.
268
+ If not provided, pooled text embeddings will be generated from `prompt` input argument.
269
+ negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
270
+ Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
271
+ weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt`
272
+ input argument.
273
+ lora_scale (`float`, *optional*):
274
+ A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
275
+ """
276
+ device = device or self._execution_device
277
+
278
+ # set lora scale so that monkey patched LoRA
279
+ # function of text encoder can correctly access it
280
+ if lora_scale is not None and isinstance(self, LoraLoaderMixin):
281
+ self._lora_scale = lora_scale
282
+
283
+ # dynamically adjust the LoRA scale
284
+ adjust_lora_scale_text_encoder(self.text_encoder, lora_scale)
285
+ adjust_lora_scale_text_encoder(self.text_encoder_2, lora_scale)
286
+
287
+ if prompt is not None and isinstance(prompt, str):
288
+ batch_size = 1
289
+ elif prompt is not None and isinstance(prompt, list):
290
+ batch_size = len(prompt)
291
+ else:
292
+ batch_size = prompt_embeds.shape[0]
293
+
294
+ # Define tokenizers and text encoders
295
+ tokenizers = [self.tokenizer, self.tokenizer_2] if self.tokenizer is not None else [self.tokenizer_2]
296
+ text_encoders = (
297
+ [self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2]
298
+ )
299
+
300
+ if prompt_embeds is None:
301
+ prompt_2 = prompt_2 or prompt
302
+ # textual inversion: procecss multi-vector tokens if necessary
303
+ prompt_embeds_list = []
304
+ prompts = [prompt, prompt_2]
305
+ for prompt, tokenizer, text_encoder in zip(prompts, tokenizers, text_encoders):
306
+ if isinstance(self, TextualInversionLoaderMixin):
307
+ prompt = self.maybe_convert_prompt(prompt, tokenizer)
308
+
309
+ text_inputs = tokenizer(
310
+ prompt,
311
+ padding="max_length",
312
+ max_length=tokenizer.model_max_length,
313
+ truncation=True,
314
+ return_tensors="pt",
315
+ )
316
+
317
+ text_input_ids = text_inputs.input_ids
318
+ untruncated_ids = tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
319
+
320
+ if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
321
+ text_input_ids, untruncated_ids
322
+ ):
323
+ removed_text = tokenizer.batch_decode(untruncated_ids[:, tokenizer.model_max_length - 1 : -1])
324
+ logger.warning(
325
+ "The following part of your input was truncated because CLIP can only handle sequences up to"
326
+ f" {tokenizer.model_max_length} tokens: {removed_text}"
327
+ )
328
+
329
+ prompt_embeds = text_encoder(
330
+ text_input_ids.to(device),
331
+ output_hidden_states=True,
332
+ )
333
+
334
+ # We are only ALWAYS interested in the pooled output of the final text encoder
335
+ pooled_prompt_embeds = prompt_embeds[0]
336
+ prompt_embeds = prompt_embeds.hidden_states[-2]
337
+
338
+ prompt_embeds_list.append(prompt_embeds)
339
+
340
+ prompt_embeds = torch.concat(prompt_embeds_list, dim=-1)
341
+
342
+ # get unconditional embeddings for classifier free guidance
343
+ zero_out_negative_prompt = negative_prompt is None and self.config.force_zeros_for_empty_prompt
344
+ if do_classifier_free_guidance and negative_prompt_embeds is None and zero_out_negative_prompt:
345
+ negative_prompt_embeds = torch.zeros_like(prompt_embeds)
346
+ negative_pooled_prompt_embeds = torch.zeros_like(pooled_prompt_embeds)
347
+ elif do_classifier_free_guidance and negative_prompt_embeds is None:
348
+ negative_prompt = negative_prompt or ""
349
+ negative_prompt_2 = negative_prompt_2 or negative_prompt
350
+
351
+ uncond_tokens: List[str]
352
+ if prompt is not None and type(prompt) is not type(negative_prompt):
353
+ raise TypeError(
354
+ f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
355
+ f" {type(prompt)}."
356
+ )
357
+ elif isinstance(negative_prompt, str):
358
+ uncond_tokens = [negative_prompt, negative_prompt_2]
359
+ elif batch_size != len(negative_prompt):
360
+ raise ValueError(
361
+ f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
362
+ f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
363
+ " the batch size of `prompt`."
364
+ )
365
+ else:
366
+ uncond_tokens = [negative_prompt, negative_prompt_2]
367
+
368
+ negative_prompt_embeds_list = []
369
+ for negative_prompt, tokenizer, text_encoder in zip(uncond_tokens, tokenizers, text_encoders):
370
+ if isinstance(self, TextualInversionLoaderMixin):
371
+ negative_prompt = self.maybe_convert_prompt(negative_prompt, tokenizer)
372
+
373
+ max_length = prompt_embeds.shape[1]
374
+ uncond_input = tokenizer(
375
+ negative_prompt,
376
+ padding="max_length",
377
+ max_length=max_length,
378
+ truncation=True,
379
+ return_tensors="pt",
380
+ )
381
+
382
+ negative_prompt_embeds = text_encoder(
383
+ uncond_input.input_ids.to(device),
384
+ output_hidden_states=True,
385
+ )
386
+ # We are only ALWAYS interested in the pooled output of the final text encoder
387
+ negative_pooled_prompt_embeds = negative_prompt_embeds[0]
388
+ negative_prompt_embeds = negative_prompt_embeds.hidden_states[-2]
389
+
390
+ negative_prompt_embeds_list.append(negative_prompt_embeds)
391
+
392
+ negative_prompt_embeds = torch.concat(negative_prompt_embeds_list, dim=-1)
393
+
394
+ prompt_embeds = prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device)
395
+ bs_embed, seq_len, _ = prompt_embeds.shape
396
+ # duplicate text embeddings for each generation per prompt, using mps friendly method
397
+ prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
398
+ prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
399
+
400
+ if do_classifier_free_guidance:
401
+ # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
402
+ seq_len = negative_prompt_embeds.shape[1]
403
+ negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device)
404
+ negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
405
+ negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
406
+
407
+ pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(
408
+ bs_embed * num_images_per_prompt, -1
409
+ )
410
+ if do_classifier_free_guidance:
411
+ negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(
412
+ bs_embed * num_images_per_prompt, -1
413
+ )
414
+
415
+ return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds
416
+
417
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
418
+ def prepare_extra_step_kwargs(self, generator, eta):
419
+ # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
420
+ # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
421
+ # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
422
+ # and should be between [0, 1]
423
+
424
+ accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
425
+ extra_step_kwargs = {}
426
+ if accepts_eta:
427
+ extra_step_kwargs["eta"] = eta
428
+
429
+ # check if the scheduler accepts generator
430
+ accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
431
+ if accepts_generator:
432
+ extra_step_kwargs["generator"] = generator
433
+ return extra_step_kwargs
434
+
435
+ def check_inputs(
436
+ self,
437
+ prompt,
438
+ prompt_2,
439
+ height,
440
+ width,
441
+ callback_steps,
442
+ negative_prompt=None,
443
+ negative_prompt_2=None,
444
+ prompt_embeds=None,
445
+ negative_prompt_embeds=None,
446
+ pooled_prompt_embeds=None,
447
+ negative_pooled_prompt_embeds=None,
448
+ num_images_per_prompt=None,
449
+ ):
450
+ if height % 8 != 0 or width % 8 != 0:
451
+ raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
452
+
453
+ if (callback_steps is None) or (
454
+ callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
455
+ ):
456
+ raise ValueError(
457
+ f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
458
+ f" {type(callback_steps)}."
459
+ )
460
+
461
+ if prompt is not None and prompt_embeds is not None:
462
+ raise ValueError(
463
+ f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
464
+ " only forward one of the two."
465
+ )
466
+ elif prompt_2 is not None and prompt_embeds is not None:
467
+ raise ValueError(
468
+ f"Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
469
+ " only forward one of the two."
470
+ )
471
+ elif prompt is None and prompt_embeds is None:
472
+ raise ValueError(
473
+ "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
474
+ )
475
+ elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
476
+ raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
477
+ elif prompt_2 is not None and (not isinstance(prompt_2, str) and not isinstance(prompt_2, list)):
478
+ raise ValueError(f"`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}")
479
+
480
+ if negative_prompt is not None and negative_prompt_embeds is not None:
481
+ raise ValueError(
482
+ f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
483
+ f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
484
+ )
485
+ elif negative_prompt_2 is not None and negative_prompt_embeds is not None:
486
+ raise ValueError(
487
+ f"Cannot forward both `negative_prompt_2`: {negative_prompt_2} and `negative_prompt_embeds`:"
488
+ f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
489
+ )
490
+
491
+ if prompt_embeds is not None and negative_prompt_embeds is not None:
492
+ if prompt_embeds.shape != negative_prompt_embeds.shape:
493
+ raise ValueError(
494
+ "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
495
+ f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
496
+ f" {negative_prompt_embeds.shape}."
497
+ )
498
+
499
+ if prompt_embeds is not None and pooled_prompt_embeds is None:
500
+ raise ValueError(
501
+ "If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`."
502
+ )
503
+
504
+ if negative_prompt_embeds is not None and negative_pooled_prompt_embeds is None:
505
+ raise ValueError(
506
+ "If `negative_prompt_embeds` are provided, `negative_pooled_prompt_embeds` also have to be passed. Make sure to generate `negative_pooled_prompt_embeds` from the same text encoder that was used to generate `negative_prompt_embeds`."
507
+ )
508
+
509
+ # DemoFusion specific checks
510
+ if max(height, width) % 1024 != 0:
511
+ raise ValueError(f"the larger one of `height` and `width` has to be divisible by 1024 but are {height} and {width}.")
512
+
513
+ if num_images_per_prompt != 1:
514
+ warnings.warn("num_images_per_prompt != 1 is not supported by DemoFusion and will be ignored.")
515
+ num_images_per_prompt = 1
516
+
517
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents
518
+ def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None):
519
+ shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor)
520
+ if isinstance(generator, list) and len(generator) != batch_size:
521
+ raise ValueError(
522
+ f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
523
+ f" size of {batch_size}. Make sure the batch size matches the length of the generators."
524
+ )
525
+
526
+ if latents is None:
527
+ latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
528
+ else:
529
+ latents = latents.to(device)
530
+
531
+ # scale the initial noise by the standard deviation required by the scheduler
532
+ latents = latents * self.scheduler.init_noise_sigma
533
+ return latents
534
+
535
+ def _get_add_time_ids(self, original_size, crops_coords_top_left, target_size, dtype):
536
+ add_time_ids = list(original_size + crops_coords_top_left + target_size)
537
+
538
+ passed_add_embed_dim = (
539
+ self.unet.config.addition_time_embed_dim * len(add_time_ids) + self.text_encoder_2.config.projection_dim
540
+ )
541
+ expected_add_embed_dim = self.unet.add_embedding.linear_1.in_features
542
+
543
+ if expected_add_embed_dim != passed_add_embed_dim:
544
+ raise ValueError(
545
+ f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`."
546
+ )
547
+
548
+ add_time_ids = torch.tensor([add_time_ids], dtype=dtype)
549
+ return add_time_ids
550
+
551
+ def get_views(self, height, width, window_size=128, stride=64, random_jitter=False):
552
+ # Here, we define the mappings F_i (see Eq. 7 in the MultiDiffusion paper https://arxiv.org/abs/2302.08113)
553
+ # if panorama's height/width < window_size, num_blocks of height/width should return 1
554
+ height //= self.vae_scale_factor
555
+ width //= self.vae_scale_factor
556
+ num_blocks_height = int((height - window_size) / stride - 1e-6) + 2 if height > window_size else 1
557
+ num_blocks_width = int((width - window_size) / stride - 1e-6) + 2 if width > window_size else 1
558
+ total_num_blocks = int(num_blocks_height * num_blocks_width)
559
+ views = []
560
+ for i in range(total_num_blocks):
561
+ h_start = int((i // num_blocks_width) * stride)
562
+ h_end = h_start + window_size
563
+ w_start = int((i % num_blocks_width) * stride)
564
+ w_end = w_start + window_size
565
+
566
+ if h_end > height:
567
+ h_start = int(h_start + height - h_end)
568
+ h_end = int(height)
569
+ if w_end > width:
570
+ w_start = int(w_start + width - w_end)
571
+ w_end = int(width)
572
+ if h_start < 0:
573
+ h_end = int(h_end - h_start)
574
+ h_start = 0
575
+ if w_start < 0:
576
+ w_end = int(w_end - w_start)
577
+ w_start = 0
578
+
579
+ if random_jitter:
580
+ jitter_range = (window_size - stride) // 4
581
+ w_jitter = 0
582
+ h_jitter = 0
583
+ if (w_start != 0) and (w_end != width):
584
+ w_jitter = random.randint(-jitter_range, jitter_range)
585
+ elif (w_start == 0) and (w_end != width):
586
+ w_jitter = random.randint(-jitter_range, 0)
587
+ elif (w_start != 0) and (w_end == width):
588
+ w_jitter = random.randint(0, jitter_range)
589
+ if (h_start != 0) and (h_end != height):
590
+ h_jitter = random.randint(-jitter_range, jitter_range)
591
+ elif (h_start == 0) and (h_end != height):
592
+ h_jitter = random.randint(-jitter_range, 0)
593
+ elif (h_start != 0) and (h_end == height):
594
+ h_jitter = random.randint(0, jitter_range)
595
+ h_start += (h_jitter + jitter_range)
596
+ h_end += (h_jitter + jitter_range)
597
+ w_start += (w_jitter + jitter_range)
598
+ w_end += (w_jitter + jitter_range)
599
+
600
+ views.append((h_start, h_end, w_start, w_end))
601
+ return views
602
+
603
+ def tiled_decode(self, latents, current_height, current_width):
604
+ sample_size = self.unet.config.sample_size
605
+ core_size = self.unet.config.sample_size // 4
606
+ core_stride = core_size
607
+ pad_size = self.unet.config.sample_size // 8 * 3
608
+ decoder_view_batch_size = 1
609
+
610
+ if self.lowvram:
611
+ core_stride = core_size // 2
612
+ pad_size = core_size
613
+
614
+ views = self.get_views(current_height, current_width, stride=core_stride, window_size=core_size)
615
+ views_batch = [views[i : i + decoder_view_batch_size] for i in range(0, len(views), decoder_view_batch_size)]
616
+ latents_ = F.pad(latents, (pad_size, pad_size, pad_size, pad_size), 'constant', 0)
617
+ image = torch.zeros(latents.size(0), 3, current_height, current_width).to(latents.device)
618
+ count = torch.zeros_like(image).to(latents.device)
619
+ # get the latents corresponding to the current view coordinates
620
+ with self.progress_bar(total=len(views_batch)) as progress_bar:
621
+ for j, batch_view in enumerate(views_batch):
622
+ vb_size = len(batch_view)
623
+ latents_for_view = torch.cat(
624
+ [
625
+ latents_[:, :, h_start:h_end+pad_size*2, w_start:w_end+pad_size*2]
626
+ for h_start, h_end, w_start, w_end in batch_view
627
+ ]
628
+ ).to(self.vae.device)
629
+ image_patch = self.vae.decode(latents_for_view / self.vae.config.scaling_factor, return_dict=False)[0]
630
+ h_start, h_end, w_start, w_end = views[j]
631
+ h_start, h_end, w_start, w_end = h_start * self.vae_scale_factor, h_end * self.vae_scale_factor, w_start * self.vae_scale_factor, w_end * self.vae_scale_factor
632
+ p_h_start, p_h_end, p_w_start, p_w_end = pad_size * self.vae_scale_factor, image_patch.size(2) - pad_size * self.vae_scale_factor, pad_size * self.vae_scale_factor, image_patch.size(3) - pad_size * self.vae_scale_factor
633
+ image[:, :, h_start:h_end, w_start:w_end] += image_patch[:, :, p_h_start:p_h_end, p_w_start:p_w_end].to(latents.device)
634
+ count[:, :, h_start:h_end, w_start:w_end] += 1
635
+ progress_bar.update()
636
+ image = image / count
637
+
638
+ return image
639
+
640
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale.StableDiffusionUpscalePipeline.upcast_vae
641
+ def upcast_vae(self):
642
+ dtype = self.vae.dtype
643
+ self.vae.to(dtype=torch.float32)
644
+ use_torch_2_0_or_xformers = isinstance(
645
+ self.vae.decoder.mid_block.attentions[0].processor,
646
+ (
647
+ AttnProcessor2_0,
648
+ XFormersAttnProcessor,
649
+ LoRAXFormersAttnProcessor,
650
+ LoRAAttnProcessor2_0,
651
+ ),
652
+ )
653
+ # if xformers or torch_2_0 is used attention block does not need
654
+ # to be in float32 which can save lots of memory
655
+ if use_torch_2_0_or_xformers:
656
+ self.vae.post_quant_conv.to(dtype)
657
+ self.vae.decoder.conv_in.to(dtype)
658
+ self.vae.decoder.mid_block.to(dtype)
659
+
660
+ @torch.no_grad()
661
+ @replace_example_docstring(EXAMPLE_DOC_STRING)
662
+ def __call__(
663
+ self,
664
+ prompt: Union[str, List[str]] = None,
665
+ prompt_2: Optional[Union[str, List[str]]] = None,
666
+ height: Optional[int] = None,
667
+ width: Optional[int] = None,
668
+ num_inference_steps: int = 50,
669
+ denoising_end: Optional[float] = None,
670
+ guidance_scale: float = 5.0,
671
+ negative_prompt: Optional[Union[str, List[str]]] = None,
672
+ negative_prompt_2: Optional[Union[str, List[str]]] = None,
673
+ num_images_per_prompt: Optional[int] = 1,
674
+ eta: float = 0.0,
675
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
676
+ latents: Optional[torch.FloatTensor] = None,
677
+ prompt_embeds: Optional[torch.FloatTensor] = None,
678
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
679
+ pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
680
+ negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
681
+ output_type: Optional[str] = "pil",
682
+ return_dict: bool = False,
683
+ callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
684
+ callback_steps: int = 1,
685
+ cross_attention_kwargs: Optional[Dict[str, Any]] = None,
686
+ guidance_rescale: float = 0.0,
687
+ original_size: Optional[Tuple[int, int]] = None,
688
+ crops_coords_top_left: Tuple[int, int] = (0, 0),
689
+ target_size: Optional[Tuple[int, int]] = None,
690
+ negative_original_size: Optional[Tuple[int, int]] = None,
691
+ negative_crops_coords_top_left: Tuple[int, int] = (0, 0),
692
+ negative_target_size: Optional[Tuple[int, int]] = None,
693
+ ################### DemoFusion specific parameters ####################
694
+ image_lr: Optional[torch.FloatTensor] = None,
695
+ view_batch_size: int = 16,
696
+ multi_decoder: bool = True,
697
+ stride: Optional[int] = 64,
698
+ cosine_scale_1: Optional[float] = 3.,
699
+ cosine_scale_2: Optional[float] = 1.,
700
+ cosine_scale_3: Optional[float] = 1.,
701
+ sigma: Optional[float] = 1.0,
702
+ show_image: bool = False,
703
+ lowvram: bool = False,
704
+ ):
705
+ r"""
706
+ Function invoked when calling the pipeline for generation.
707
+
708
+ Args:
709
+ prompt (`str` or `List[str]`, *optional*):
710
+ The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
711
+ instead.
712
+ prompt_2 (`str` or `List[str]`, *optional*):
713
+ The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is
714
+ used in both text-encoders
715
+ height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
716
+ The height in pixels of the generated image. This is set to 1024 by default for the best results.
717
+ Anything below 512 pixels won't work well for
718
+ [stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0)
719
+ and checkpoints that are not specifically fine-tuned on low resolutions.
720
+ width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
721
+ The width in pixels of the generated image. This is set to 1024 by default for the best results.
722
+ Anything below 512 pixels won't work well for
723
+ [stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0)
724
+ and checkpoints that are not specifically fine-tuned on low resolutions.
725
+ num_inference_steps (`int`, *optional*, defaults to 50):
726
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
727
+ expense of slower inference.
728
+ denoising_end (`float`, *optional*):
729
+ When specified, determines the fraction (between 0.0 and 1.0) of the total denoising process to be
730
+ completed before it is intentionally prematurely terminated. As a result, the returned sample will
731
+ still retain a substantial amount of noise as determined by the discrete timesteps selected by the
732
+ scheduler. The denoising_end parameter should ideally be utilized when this pipeline forms a part of a
733
+ "Mixture of Denoisers" multi-pipeline setup, as elaborated in [**Refining the Image
734
+ Output**](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/stable_diffusion_xl#refining-the-image-output)
735
+ guidance_scale (`float`, *optional*, defaults to 5.0):
736
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
737
+ `guidance_scale` is defined as `w` of equation 2. of [Imagen
738
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
739
+ 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
740
+ usually at the expense of lower image quality.
741
+ negative_prompt (`str` or `List[str]`, *optional*):
742
+ The prompt or prompts not to guide the image generation. If not defined, one has to pass
743
+ `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
744
+ less than `1`).
745
+ negative_prompt_2 (`str` or `List[str]`, *optional*):
746
+ The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and
747
+ `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders
748
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
749
+ The number of images to generate per prompt.
750
+ eta (`float`, *optional*, defaults to 0.0):
751
+ Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
752
+ [`schedulers.DDIMScheduler`], will be ignored for others.
753
+ generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
754
+ One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
755
+ to make generation deterministic.
756
+ latents (`torch.FloatTensor`, *optional*):
757
+ Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
758
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
759
+ tensor will ge generated by sampling using the supplied random `generator`.
760
+ prompt_embeds (`torch.FloatTensor`, *optional*):
761
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
762
+ provided, text embeddings will be generated from `prompt` input argument.
763
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
764
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
765
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
766
+ argument.
767
+ pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
768
+ Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting.
769
+ If not provided, pooled text embeddings will be generated from `prompt` input argument.
770
+ negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
771
+ Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
772
+ weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt`
773
+ input argument.
774
+ output_type (`str`, *optional*, defaults to `"pil"`):
775
+ The output format of the generate image. Choose between
776
+ [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
777
+ return_dict (`bool`, *optional*, defaults to `True`):
778
+ Whether or not to return a [`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] instead
779
+ of a plain tuple.
780
+ callback (`Callable`, *optional*):
781
+ A function that will be called every `callback_steps` steps during inference. The function will be
782
+ called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
783
+ callback_steps (`int`, *optional*, defaults to 1):
784
+ The frequency at which the `callback` function will be called. If not specified, the callback will be
785
+ called at every step.
786
+ cross_attention_kwargs (`dict`, *optional*):
787
+ A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
788
+ `self.processor` in
789
+ [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
790
+ guidance_rescale (`float`, *optional*, defaults to 0.7):
791
+ Guidance rescale factor proposed by [Common Diffusion Noise Schedules and Sample Steps are
792
+ Flawed](https://arxiv.org/pdf/2305.08891.pdf) `guidance_scale` is defined as `φ` in equation 16. of
793
+ [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf).
794
+ Guidance rescale factor should fix overexposure when using zero terminal SNR.
795
+ original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
796
+ If `original_size` is not the same as `target_size` the image will appear to be down- or upsampled.
797
+ `original_size` defaults to `(width, height)` if not specified. Part of SDXL's micro-conditioning as
798
+ explained in section 2.2 of
799
+ [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
800
+ crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)):
801
+ `crops_coords_top_left` can be used to generate an image that appears to be "cropped" from the position
802
+ `crops_coords_top_left` downwards. Favorable, well-centered images are usually achieved by setting
803
+ `crops_coords_top_left` to (0, 0). Part of SDXL's micro-conditioning as explained in section 2.2 of
804
+ [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
805
+ target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
806
+ For most cases, `target_size` should be set to the desired height and width of the generated image. If
807
+ not specified it will default to `(width, height)`. Part of SDXL's micro-conditioning as explained in
808
+ section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
809
+ negative_original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
810
+ To negatively condition the generation process based on a specific image resolution. Part of SDXL's
811
+ micro-conditioning as explained in section 2.2 of
812
+ [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more
813
+ information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208.
814
+ negative_crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)):
815
+ To negatively condition the generation process based on a specific crop coordinates. Part of SDXL's
816
+ micro-conditioning as explained in section 2.2 of
817
+ [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more
818
+ information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208.
819
+ negative_target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
820
+ To negatively condition the generation process based on a target image resolution. It should be as same
821
+ as the `target_size` for most cases. Part of SDXL's micro-conditioning as explained in section 2.2 of
822
+ [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more
823
+ information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208.
824
+ ################### DemoFusion specific parameters ####################
825
+ image_lr (`torch.FloatTensor`, *optional*, , defaults to None):
826
+ Low-resolution image input for upscaling. If provided, DemoFusion will encode it as the initial latent representation.
827
+ view_batch_size (`int`, defaults to 16):
828
+ The batch size for multiple denoising paths. Typically, a larger batch size can result in higher
829
+ efficiency but comes with increased GPU memory requirements.
830
+ multi_decoder (`bool`, defaults to True):
831
+ Determine whether to use a tiled decoder. Generally, when the resolution exceeds 3072x3072,
832
+ a tiled decoder becomes necessary.
833
+ stride (`int`, defaults to 64):
834
+ The stride of moving local patches. A smaller stride is better for alleviating seam issues,
835
+ but it also introduces additional computational overhead and inference time.
836
+ cosine_scale_1 (`float`, defaults to 3):
837
+ Control the strength of skip-residual. For specific impacts, please refer to Appendix C
838
+ in the DemoFusion paper.
839
+ cosine_scale_2 (`float`, defaults to 1):
840
+ Control the strength of dilated sampling. For specific impacts, please refer to Appendix C
841
+ in the DemoFusion paper.
842
+ cosine_scale_3 (`float`, defaults to 1):
843
+ Control the strength of the gaussion filter. For specific impacts, please refer to Appendix C
844
+ in the DemoFusion paper.
845
+ sigma (`float`, defaults to 1):
846
+ The standard value of the gaussian filter.
847
+ show_image (`bool`, defaults to False):
848
+ Determine whether to show intermediate results during generation.
849
+ lowvram (`bool`, defaults to False):
850
+ Try to fit in 8 Gb of VRAM, with xformers installed.
851
+
852
+ Examples:
853
+
854
+ Returns:
855
+ a `list` with the generated images at each phase.
856
+ """
857
+
858
+ # 0. Default height and width to unet
859
+ height = height or self.default_sample_size * self.vae_scale_factor
860
+ width = width or self.default_sample_size * self.vae_scale_factor
861
+
862
+ x1_size = self.default_sample_size * self.vae_scale_factor
863
+
864
+ height_scale = height / x1_size
865
+ width_scale = width / x1_size
866
+ scale_num = int(max(height_scale, width_scale))
867
+ aspect_ratio = min(height_scale, width_scale) / max(height_scale, width_scale)
868
+
869
+ original_size = original_size or (height, width)
870
+ target_size = target_size or (height, width)
871
+
872
+ # 1. Check inputs. Raise error if not correct
873
+ self.check_inputs(
874
+ prompt,
875
+ prompt_2,
876
+ height,
877
+ width,
878
+ callback_steps,
879
+ negative_prompt,
880
+ negative_prompt_2,
881
+ prompt_embeds,
882
+ negative_prompt_embeds,
883
+ pooled_prompt_embeds,
884
+ negative_pooled_prompt_embeds,
885
+ num_images_per_prompt,
886
+ )
887
+
888
+ # 2. Define call parameters
889
+ if prompt is not None and isinstance(prompt, str):
890
+ batch_size = 1
891
+ elif prompt is not None and isinstance(prompt, list):
892
+ batch_size = len(prompt)
893
+ else:
894
+ batch_size = prompt_embeds.shape[0]
895
+
896
+ device = self._execution_device
897
+ self.lowvram = lowvram
898
+ if self.lowvram:
899
+ self.vae.cpu()
900
+ self.unet.cpu()
901
+ self.text_encoder.to(device)
902
+ self.text_encoder_2.to(device)
903
+ image_lr.cpu()
904
+
905
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
906
+ # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
907
+ # corresponds to doing no classifier free guidance.
908
+ do_classifier_free_guidance = guidance_scale > 1.0
909
+
910
+ # 3. Encode input prompt
911
+ text_encoder_lora_scale = (
912
+ cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None
913
+ )
914
+ (
915
+ prompt_embeds,
916
+ negative_prompt_embeds,
917
+ pooled_prompt_embeds,
918
+ negative_pooled_prompt_embeds,
919
+ ) = self.encode_prompt(
920
+ prompt=prompt,
921
+ prompt_2=prompt_2,
922
+ device=device,
923
+ num_images_per_prompt=num_images_per_prompt,
924
+ do_classifier_free_guidance=do_classifier_free_guidance,
925
+ negative_prompt=negative_prompt,
926
+ negative_prompt_2=negative_prompt_2,
927
+ prompt_embeds=prompt_embeds,
928
+ negative_prompt_embeds=negative_prompt_embeds,
929
+ pooled_prompt_embeds=pooled_prompt_embeds,
930
+ negative_pooled_prompt_embeds=negative_pooled_prompt_embeds,
931
+ lora_scale=text_encoder_lora_scale,
932
+ )
933
+
934
+ # 4. Prepare timesteps
935
+ self.scheduler.set_timesteps(num_inference_steps, device=device)
936
+
937
+ timesteps = self.scheduler.timesteps
938
+
939
+ # 5. Prepare latent variables
940
+ num_channels_latents = self.unet.config.in_channels
941
+ latents = self.prepare_latents(
942
+ batch_size * num_images_per_prompt,
943
+ num_channels_latents,
944
+ height // scale_num,
945
+ width // scale_num,
946
+ prompt_embeds.dtype,
947
+ device,
948
+ generator,
949
+ latents,
950
+ )
951
+
952
+ # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
953
+ extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
954
+
955
+ # 7. Prepare added time ids & embeddings
956
+ add_text_embeds = pooled_prompt_embeds
957
+ add_time_ids = self._get_add_time_ids(
958
+ original_size, crops_coords_top_left, target_size, dtype=prompt_embeds.dtype
959
+ )
960
+ if negative_original_size is not None and negative_target_size is not None:
961
+ negative_add_time_ids = self._get_add_time_ids(
962
+ negative_original_size,
963
+ negative_crops_coords_top_left,
964
+ negative_target_size,
965
+ dtype=prompt_embeds.dtype,
966
+ )
967
+ else:
968
+ negative_add_time_ids = add_time_ids
969
+
970
+ if do_classifier_free_guidance:
971
+ prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0)
972
+ add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0)
973
+ add_time_ids = torch.cat([negative_add_time_ids, add_time_ids], dim=0)
974
+ del negative_prompt_embeds, negative_pooled_prompt_embeds, negative_add_time_ids
975
+
976
+ prompt_embeds = prompt_embeds.to(device)
977
+ add_text_embeds = add_text_embeds.to(device)
978
+ add_time_ids = add_time_ids.to(device).repeat(batch_size * num_images_per_prompt, 1)
979
+
980
+ # 8. Denoising loop
981
+ num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0)
982
+
983
+ # 7.1 Apply denoising_end
984
+ if denoising_end is not None and isinstance(denoising_end, float) and denoising_end > 0 and denoising_end < 1:
985
+ discrete_timestep_cutoff = int(
986
+ round(
987
+ self.scheduler.config.num_train_timesteps
988
+ - (denoising_end * self.scheduler.config.num_train_timesteps)
989
+ )
990
+ )
991
+ num_inference_steps = len(list(filter(lambda ts: ts >= discrete_timestep_cutoff, timesteps)))
992
+ timesteps = timesteps[:num_inference_steps]
993
+
994
+ output_images = []
995
+
996
+ ###################################################### Phase Initialization ########################################################
997
+
998
+ if self.lowvram:
999
+ self.text_encoder.cpu()
1000
+ self.text_encoder_2.cpu()
1001
+
1002
+ if image_lr == None:
1003
+ print("### Phase 1 Denoising ###")
1004
+ with self.progress_bar(total=num_inference_steps) as progress_bar:
1005
+ for i, t in enumerate(timesteps):
1006
+
1007
+ if self.lowvram:
1008
+ self.vae.cpu()
1009
+ self.unet.to(device)
1010
+
1011
+ latents_for_view = latents
1012
+
1013
+ # expand the latents if we are doing classifier free guidance
1014
+ latent_model_input = (
1015
+ latents.repeat_interleave(2, dim=0)
1016
+ if do_classifier_free_guidance
1017
+ else latents
1018
+ )
1019
+ latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
1020
+
1021
+ # predict the noise residual
1022
+ added_cond_kwargs = {"text_embeds": add_text_embeds, "time_ids": add_time_ids}
1023
+ noise_pred = self.unet(
1024
+ latent_model_input,
1025
+ t,
1026
+ encoder_hidden_states=prompt_embeds,
1027
+ cross_attention_kwargs=cross_attention_kwargs,
1028
+ added_cond_kwargs=added_cond_kwargs,
1029
+ return_dict=False,
1030
+ )[0]
1031
+
1032
+ # perform guidance
1033
+ if do_classifier_free_guidance:
1034
+ noise_pred_uncond, noise_pred_text = noise_pred[::2], noise_pred[1::2]
1035
+ noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
1036
+
1037
+ if do_classifier_free_guidance and guidance_rescale > 0.0:
1038
+ # Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf
1039
+ noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=guidance_rescale)
1040
+
1041
+ # compute the previous noisy sample x_t -> x_t-1
1042
+ latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
1043
+
1044
+ # call the callback, if provided
1045
+ if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
1046
+ progress_bar.update()
1047
+ if callback is not None and i % callback_steps == 0:
1048
+ step_idx = i // getattr(self.scheduler, "order", 1)
1049
+ callback(step_idx, t, latents)
1050
+ del latents_for_view, latent_model_input, noise_pred, noise_pred_text, noise_pred_uncond
1051
+ else:
1052
+ print("### Encoding Real Image ###")
1053
+ latents = self.vae.encode(image_lr)
1054
+ latents = latents.latent_dist.sample() * self.vae.config.scaling_factor
1055
+
1056
+ anchor_mean = latents.mean()
1057
+ anchor_std = latents.std()
1058
+ if self.lowvram:
1059
+ latents = latents.cpu()
1060
+ torch.cuda.empty_cache()
1061
+ if not output_type == "latent":
1062
+ # make sure the VAE is in float32 mode, as it overflows in float16
1063
+ needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast
1064
+
1065
+ if self.lowvram:
1066
+ needs_upcasting = False # use madebyollin/sdxl-vae-fp16-fix in lowvram mode!
1067
+ self.unet.cpu()
1068
+ self.vae.to(device)
1069
+
1070
+ if needs_upcasting:
1071
+ self.upcast_vae()
1072
+ latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype)
1073
+ if self.lowvram and multi_decoder:
1074
+ current_width_height = self.unet.config.sample_size * self.vae_scale_factor
1075
+ image = self.tiled_decode(latents, current_width_height, current_width_height)
1076
+ else:
1077
+ image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0]
1078
+ # cast back to fp16 if needed
1079
+ if needs_upcasting:
1080
+ self.vae.to(dtype=torch.float16)
1081
+
1082
+ image = self.image_processor.postprocess(image, output_type=output_type)
1083
+ if show_image:
1084
+ plt.figure(figsize=(10, 10))
1085
+ plt.imshow(image[0])
1086
+ plt.axis('off') # Turn off axis numbers and ticks
1087
+ plt.show()
1088
+ output_images.append(image[0])
1089
+
1090
+ ####################################################### Phase Upscaling #####################################################
1091
+ if image_lr == None:
1092
+ starting_scale = 2
1093
+ else:
1094
+ starting_scale = 1
1095
+ for current_scale_num in range(starting_scale, scale_num + 1):
1096
+ if self.lowvram:
1097
+ latents = latents.to(device)
1098
+ self.unet.to(device)
1099
+ torch.cuda.empty_cache()
1100
+ print("### Phase {} Denoising ###".format(current_scale_num))
1101
+ current_height = self.unet.config.sample_size * self.vae_scale_factor * current_scale_num
1102
+ current_width = self.unet.config.sample_size * self.vae_scale_factor * current_scale_num
1103
+ if height > width:
1104
+ current_width = int(current_width * aspect_ratio)
1105
+ else:
1106
+ current_height = int(current_height * aspect_ratio)
1107
+
1108
+ latents = F.interpolate(latents.to(device), size=(int(current_height / self.vae_scale_factor), int(current_width / self.vae_scale_factor)), mode='bicubic')
1109
+
1110
+ noise_latents = []
1111
+ noise = torch.randn_like(latents)
1112
+ for timestep in timesteps:
1113
+ noise_latent = self.scheduler.add_noise(latents, noise, timestep.unsqueeze(0))
1114
+ noise_latents.append(noise_latent)
1115
+ latents = noise_latents[0]
1116
+
1117
+ with self.progress_bar(total=num_inference_steps) as progress_bar:
1118
+ for i, t in enumerate(timesteps):
1119
+ count = torch.zeros_like(latents)
1120
+ value = torch.zeros_like(latents)
1121
+ cosine_factor = 0.5 * (1 + torch.cos(torch.pi * (self.scheduler.config.num_train_timesteps - t) / self.scheduler.config.num_train_timesteps)).cpu()
1122
+
1123
+ c1 = cosine_factor ** cosine_scale_1
1124
+ latents = latents * (1 - c1) + noise_latents[i] * c1
1125
+
1126
+ ############################################# MultiDiffusion #############################################
1127
+
1128
+ views = self.get_views(current_height, current_width, stride=stride, window_size=self.unet.config.sample_size, random_jitter=True)
1129
+ views_batch = [views[i : i + view_batch_size] for i in range(0, len(views), view_batch_size)]
1130
+
1131
+ jitter_range = (self.unet.config.sample_size - stride) // 4
1132
+ latents_ = F.pad(latents, (jitter_range, jitter_range, jitter_range, jitter_range), 'constant', 0)
1133
+
1134
+ count_local = torch.zeros_like(latents_)
1135
+ value_local = torch.zeros_like(latents_)
1136
+
1137
+ for j, batch_view in enumerate(views_batch):
1138
+ vb_size = len(batch_view)
1139
+
1140
+ # get the latents corresponding to the current view coordinates
1141
+ latents_for_view = torch.cat(
1142
+ [
1143
+ latents_[:, :, h_start:h_end, w_start:w_end]
1144
+ for h_start, h_end, w_start, w_end in batch_view
1145
+ ]
1146
+ )
1147
+
1148
+ # expand the latents if we are doing classifier free guidance
1149
+ latent_model_input = latents_for_view
1150
+ latent_model_input = (
1151
+ latent_model_input.repeat_interleave(2, dim=0)
1152
+ if do_classifier_free_guidance
1153
+ else latent_model_input
1154
+ )
1155
+ latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
1156
+
1157
+ prompt_embeds_input = torch.cat([prompt_embeds] * vb_size)
1158
+ add_text_embeds_input = torch.cat([add_text_embeds] * vb_size)
1159
+ add_time_ids_input = []
1160
+ for h_start, h_end, w_start, w_end in batch_view:
1161
+ add_time_ids_ = add_time_ids.clone()
1162
+ add_time_ids_[:, 2] = h_start * self.vae_scale_factor
1163
+ add_time_ids_[:, 3] = w_start * self.vae_scale_factor
1164
+ add_time_ids_input.append(add_time_ids_)
1165
+ add_time_ids_input = torch.cat(add_time_ids_input)
1166
+
1167
+ # predict the noise residual
1168
+ added_cond_kwargs = {"text_embeds": add_text_embeds_input, "time_ids": add_time_ids_input}
1169
+ noise_pred = self.unet(
1170
+ latent_model_input,
1171
+ t,
1172
+ encoder_hidden_states=prompt_embeds_input,
1173
+ cross_attention_kwargs=cross_attention_kwargs,
1174
+ added_cond_kwargs=added_cond_kwargs,
1175
+ return_dict=False,
1176
+ )[0]
1177
+
1178
+ if do_classifier_free_guidance:
1179
+ noise_pred_uncond, noise_pred_text = noise_pred[::2], noise_pred[1::2]
1180
+ noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
1181
+
1182
+ if do_classifier_free_guidance and guidance_rescale > 0.0:
1183
+ # Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf
1184
+ noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=guidance_rescale)
1185
+
1186
+ # compute the previous noisy sample x_t -> x_t-1
1187
+ self.scheduler._init_step_index(t)
1188
+ latents_denoised_batch = self.scheduler.step(
1189
+ noise_pred, t, latents_for_view, **extra_step_kwargs, return_dict=False)[0]
1190
+
1191
+ # extract value from batch
1192
+ for latents_view_denoised, (h_start, h_end, w_start, w_end) in zip(
1193
+ latents_denoised_batch.chunk(vb_size), batch_view
1194
+ ):
1195
+ value_local[:, :, h_start:h_end, w_start:w_end] += latents_view_denoised
1196
+ count_local[:, :, h_start:h_end, w_start:w_end] += 1
1197
+
1198
+ value_local = value_local[: ,:, jitter_range: jitter_range + current_height // self.vae_scale_factor, jitter_range: jitter_range + current_width // self.vae_scale_factor]
1199
+ count_local = count_local[: ,:, jitter_range: jitter_range + current_height // self.vae_scale_factor, jitter_range: jitter_range + current_width // self.vae_scale_factor]
1200
+
1201
+ c2 = cosine_factor ** cosine_scale_2
1202
+
1203
+ value += value_local / count_local * (1 - c2)
1204
+ count += torch.ones_like(value_local) * (1 - c2)
1205
+
1206
+ ############################################# Dilated Sampling #############################################
1207
+
1208
+ views = [[h, w] for h in range(current_scale_num) for w in range(current_scale_num)]
1209
+ views_batch = [views[i : i + view_batch_size] for i in range(0, len(views), view_batch_size)]
1210
+
1211
+ h_pad = (current_scale_num - (latents.size(2) % current_scale_num)) % current_scale_num
1212
+ w_pad = (current_scale_num - (latents.size(3) % current_scale_num)) % current_scale_num
1213
+ latents_ = F.pad(latents, (w_pad, 0, h_pad, 0), 'constant', 0)
1214
+
1215
+ count_global = torch.zeros_like(latents_)
1216
+ value_global = torch.zeros_like(latents_)
1217
+
1218
+ c3 = 0.99 * cosine_factor ** cosine_scale_3 + 1e-2
1219
+ std_, mean_ = latents_.std(), latents_.mean()
1220
+ latents_gaussian = gaussian_filter(latents_, kernel_size=(2*current_scale_num-1), sigma=sigma*c3)
1221
+ latents_gaussian = (latents_gaussian - latents_gaussian.mean()) / latents_gaussian.std() * std_ + mean_
1222
+
1223
+ for j, batch_view in enumerate(views_batch):
1224
+ latents_for_view = torch.cat(
1225
+ [
1226
+ latents_[:, :, h::current_scale_num, w::current_scale_num]
1227
+ for h, w in batch_view
1228
+ ]
1229
+ )
1230
+ latents_for_view_gaussian = torch.cat(
1231
+ [
1232
+ latents_gaussian[:, :, h::current_scale_num, w::current_scale_num]
1233
+ for h, w in batch_view
1234
+ ]
1235
+ )
1236
+
1237
+ vb_size = latents_for_view.size(0)
1238
+
1239
+ # expand the latents if we are doing classifier free guidance
1240
+ latent_model_input = latents_for_view_gaussian
1241
+ latent_model_input = (
1242
+ latent_model_input.repeat_interleave(2, dim=0)
1243
+ if do_classifier_free_guidance
1244
+ else latent_model_input
1245
+ )
1246
+ latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
1247
+
1248
+ prompt_embeds_input = torch.cat([prompt_embeds] * vb_size)
1249
+ add_text_embeds_input = torch.cat([add_text_embeds] * vb_size)
1250
+ add_time_ids_input = torch.cat([add_time_ids] * vb_size)
1251
+
1252
+ # predict the noise residual
1253
+ added_cond_kwargs = {"text_embeds": add_text_embeds_input, "time_ids": add_time_ids_input}
1254
+ noise_pred = self.unet(
1255
+ latent_model_input,
1256
+ t,
1257
+ encoder_hidden_states=prompt_embeds_input,
1258
+ cross_attention_kwargs=cross_attention_kwargs,
1259
+ added_cond_kwargs=added_cond_kwargs,
1260
+ return_dict=False,
1261
+ )[0]
1262
+
1263
+ if do_classifier_free_guidance:
1264
+ noise_pred_uncond, noise_pred_text = noise_pred[::2], noise_pred[1::2]
1265
+ noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
1266
+
1267
+ if do_classifier_free_guidance and guidance_rescale > 0.0:
1268
+ # Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf
1269
+ noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=guidance_rescale)
1270
+
1271
+ # compute the previous noisy sample x_t -> x_t-1
1272
+ self.scheduler._init_step_index(t)
1273
+ latents_denoised_batch = self.scheduler.step(
1274
+ noise_pred, t, latents_for_view, **extra_step_kwargs, return_dict=False)[0]
1275
+
1276
+ # extract value from batch
1277
+ for latents_view_denoised, (h, w) in zip(
1278
+ latents_denoised_batch.chunk(vb_size), batch_view
1279
+ ):
1280
+ value_global[:, :, h::current_scale_num, w::current_scale_num] += latents_view_denoised
1281
+ count_global[:, :, h::current_scale_num, w::current_scale_num] += 1
1282
+
1283
+ c2 = cosine_factor ** cosine_scale_2
1284
+
1285
+ value_global = value_global[: ,:, h_pad:, w_pad:]
1286
+
1287
+ value += value_global * c2
1288
+ count += torch.ones_like(value_global) * c2
1289
+
1290
+ ###########################################################
1291
+
1292
+ latents = torch.where(count > 0, value / count, value)
1293
+
1294
+ # call the callback, if provided
1295
+ if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
1296
+ progress_bar.update()
1297
+ if callback is not None and i % callback_steps == 0:
1298
+ step_idx = i // getattr(self.scheduler, "order", 1)
1299
+ callback(step_idx, t, latents)
1300
+
1301
+ #########################################################################################################################################
1302
+
1303
+ latents = (latents - latents.mean()) / latents.std() * anchor_std + anchor_mean
1304
+ if self.lowvram:
1305
+ latents = latents.cpu()
1306
+ torch.cuda.empty_cache()
1307
+ if not output_type == "latent":
1308
+ # make sure the VAE is in float32 mode, as it overflows in float16
1309
+ needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast
1310
+
1311
+ if self.lowvram:
1312
+ needs_upcasting = False # use madebyollin/sdxl-vae-fp16-fix in lowvram mode!
1313
+ self.unet.cpu()
1314
+ self.vae.to(device)
1315
+
1316
+ if needs_upcasting:
1317
+ self.upcast_vae()
1318
+ latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype)
1319
+
1320
+ print("### Phase {} Decoding ###".format(current_scale_num))
1321
+ if multi_decoder:
1322
+ image = self.tiled_decode(latents, current_height, current_width)
1323
+ else:
1324
+ image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0]
1325
+
1326
+ # cast back to fp16 if needed
1327
+ if needs_upcasting:
1328
+ self.vae.to(dtype=torch.float16)
1329
+ else:
1330
+ image = latents
1331
+
1332
+ if not output_type == "latent":
1333
+ image = self.image_processor.postprocess(image, output_type=output_type)
1334
+ if show_image:
1335
+ plt.figure(figsize=(10, 10))
1336
+ plt.imshow(image[0])
1337
+ plt.axis('off') # Turn off axis numbers and ticks
1338
+ plt.show()
1339
+ output_images.append(image[0])
1340
+
1341
+ # Offload all models
1342
+ self.maybe_free_model_hooks()
1343
+
1344
+ return output_images
1345
+
1346
+ # Overrride to properly handle the loading and unloading of the additional text encoder.
1347
+ def load_lora_weights(self, pretrained_model_name_or_path_or_dict: Union[str, Dict[str, torch.Tensor]], **kwargs):
1348
+ # We could have accessed the unet config from `lora_state_dict()` too. We pass
1349
+ # it here explicitly to be able to tell that it's coming from an SDXL
1350
+ # pipeline.
1351
+
1352
+ # Remove any existing hooks.
1353
+ if is_accelerate_available() and is_accelerate_version(">=", "0.17.0.dev0"):
1354
+ from accelerate.hooks import AlignDevicesHook, CpuOffload, remove_hook_from_module
1355
+ else:
1356
+ raise ImportError("Offloading requires `accelerate v0.17.0` or higher.")
1357
+
1358
+ is_model_cpu_offload = False
1359
+ is_sequential_cpu_offload = False
1360
+ recursive = False
1361
+ for _, component in self.components.items():
1362
+ if isinstance(component, torch.nn.Module):
1363
+ if hasattr(component, "_hf_hook"):
1364
+ is_model_cpu_offload = isinstance(getattr(component, "_hf_hook"), CpuOffload)
1365
+ is_sequential_cpu_offload = isinstance(getattr(component, "_hf_hook"), AlignDevicesHook)
1366
+ logger.info(
1367
+ "Accelerate hooks detected. Since you have called `load_lora_weights()`, the previous hooks will be first removed. Then the LoRA parameters will be loaded and the hooks will be applied again."
1368
+ )
1369
+ recursive = is_sequential_cpu_offload
1370
+ remove_hook_from_module(component, recurse=recursive)
1371
+ state_dict, network_alphas = self.lora_state_dict(
1372
+ pretrained_model_name_or_path_or_dict,
1373
+ unet_config=self.unet.config,
1374
+ **kwargs,
1375
+ )
1376
+ self.load_lora_into_unet(state_dict, network_alphas=network_alphas, unet=self.unet)
1377
+
1378
+ text_encoder_state_dict = {k: v for k, v in state_dict.items() if "text_encoder." in k}
1379
+ if len(text_encoder_state_dict) > 0:
1380
+ self.load_lora_into_text_encoder(
1381
+ text_encoder_state_dict,
1382
+ network_alphas=network_alphas,
1383
+ text_encoder=self.text_encoder,
1384
+ prefix="text_encoder",
1385
+ lora_scale=self.lora_scale,
1386
+ )
1387
+
1388
+ text_encoder_2_state_dict = {k: v for k, v in state_dict.items() if "text_encoder_2." in k}
1389
+ if len(text_encoder_2_state_dict) > 0:
1390
+ self.load_lora_into_text_encoder(
1391
+ text_encoder_2_state_dict,
1392
+ network_alphas=network_alphas,
1393
+ text_encoder=self.text_encoder_2,
1394
+ prefix="text_encoder_2",
1395
+ lora_scale=self.lora_scale,
1396
+ )
1397
+
1398
+ # Offload back.
1399
+ if is_model_cpu_offload:
1400
+ self.enable_model_cpu_offload()
1401
+ elif is_sequential_cpu_offload:
1402
+ self.enable_sequential_cpu_offload()
1403
+
1404
+ @classmethod
1405
+ def save_lora_weights(
1406
+ self,
1407
+ save_directory: Union[str, os.PathLike],
1408
+ unet_lora_layers: Dict[str, Union[torch.nn.Module, torch.Tensor]] = None,
1409
+ text_encoder_lora_layers: Dict[str, Union[torch.nn.Module, torch.Tensor]] = None,
1410
+ text_encoder_2_lora_layers: Dict[str, Union[torch.nn.Module, torch.Tensor]] = None,
1411
+ is_main_process: bool = True,
1412
+ weight_name: str = None,
1413
+ save_function: Callable = None,
1414
+ safe_serialization: bool = True,
1415
+ ):
1416
+ state_dict = {}
1417
+
1418
+ def pack_weights(layers, prefix):
1419
+ layers_weights = layers.state_dict() if isinstance(layers, torch.nn.Module) else layers
1420
+ layers_state_dict = {f"{prefix}.{module_name}": param for module_name, param in layers_weights.items()}
1421
+ return layers_state_dict
1422
+
1423
+ if not (unet_lora_layers or text_encoder_lora_layers or text_encoder_2_lora_layers):
1424
+ raise ValueError(
1425
+ "You must pass at least one of `unet_lora_layers`, `text_encoder_lora_layers` or `text_encoder_2_lora_layers`."
1426
+ )
1427
+
1428
+ if unet_lora_layers:
1429
+ state_dict.update(pack_weights(unet_lora_layers, "unet"))
1430
+
1431
+ if text_encoder_lora_layers and text_encoder_2_lora_layers:
1432
+ state_dict.update(pack_weights(text_encoder_lora_layers, "text_encoder"))
1433
+ state_dict.update(pack_weights(text_encoder_2_lora_layers, "text_encoder_2"))
1434
+
1435
+ self.write_lora_layers(
1436
+ state_dict=state_dict,
1437
+ save_directory=save_directory,
1438
+ is_main_process=is_main_process,
1439
+ weight_name=weight_name,
1440
+ save_function=save_function,
1441
+ safe_serialization=safe_serialization,
1442
+ )
1443
+
1444
+ def _remove_text_encoder_monkey_patch(self):
1445
+ self._remove_text_encoder_monkey_patch_classmethod(self.text_encoder)
1446
+ self._remove_text_encoder_monkey_patch_classmethod(self.text_encoder_2)
competitors_inference_code/DemoFusion/pipeline_demofusion_sdxl_controlnet.py ADDED
@@ -0,0 +1,1796 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+
16
+ import inspect
17
+ import os
18
+ from typing import Any, Callable, Dict, List, Optional, Tuple, Union
19
+ import matplotlib.pyplot as plt
20
+
21
+ import numpy as np
22
+ import PIL.Image
23
+ import torch
24
+ import torch.nn.functional as F
25
+ import random
26
+ import warnings
27
+ from transformers import CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
28
+
29
+ from diffusers.utils.import_utils import is_invisible_watermark_available
30
+
31
+ from diffusers.image_processor import PipelineImageInput, VaeImageProcessor
32
+ from diffusers.loaders import FromSingleFileMixin, LoraLoaderMixin, TextualInversionLoaderMixin
33
+ from diffusers.models import AutoencoderKL, ControlNetModel, UNet2DConditionModel
34
+ from diffusers.models.attention_processor import (
35
+ AttnProcessor2_0,
36
+ LoRAAttnProcessor2_0,
37
+ LoRAXFormersAttnProcessor,
38
+ XFormersAttnProcessor,
39
+ )
40
+ from diffusers.models.lora import adjust_lora_scale_text_encoder
41
+ from diffusers.schedulers import KarrasDiffusionSchedulers
42
+ from diffusers.utils import (
43
+ is_accelerate_available,
44
+ is_accelerate_version,
45
+ logging,
46
+ replace_example_docstring,
47
+ )
48
+ from diffusers.utils.torch_utils import is_compiled_module, randn_tensor
49
+ from diffusers.pipelines.pipeline_utils import DiffusionPipeline
50
+ from diffusers.pipelines.stable_diffusion_xl import StableDiffusionXLPipelineOutput
51
+
52
+
53
+ if is_invisible_watermark_available():
54
+ from diffusers.pipelines.stable_diffusion_xl.watermark import StableDiffusionXLWatermarker
55
+
56
+ from diffusers.pipelines.controlnet.multicontrolnet import MultiControlNetModel
57
+
58
+
59
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
60
+
61
+
62
+ EXAMPLE_DOC_STRING = """
63
+ Examples:
64
+ """
65
+
66
+ def gaussian_kernel(kernel_size=3, sigma=1.0, channels=3):
67
+ x_coord = torch.arange(kernel_size)
68
+ gaussian_1d = torch.exp(-(x_coord - (kernel_size - 1) / 2) ** 2 / (2 * sigma ** 2))
69
+ gaussian_1d = gaussian_1d / gaussian_1d.sum()
70
+ gaussian_2d = gaussian_1d[:, None] * gaussian_1d[None, :]
71
+ kernel = gaussian_2d[None, None, :, :].repeat(channels, 1, 1, 1)
72
+
73
+ return kernel
74
+
75
+ def gaussian_filter(latents, kernel_size=3, sigma=1.0):
76
+ channels = latents.shape[1]
77
+ kernel = gaussian_kernel(kernel_size, sigma, channels).to(latents.device, latents.dtype)
78
+ blurred_latents = F.conv2d(latents, kernel, padding=kernel_size//2, groups=channels)
79
+
80
+ return blurred_latents
81
+
82
+ class DemoFusionSDXLControlNetPipeline(
83
+ DiffusionPipeline, TextualInversionLoaderMixin, LoraLoaderMixin, FromSingleFileMixin
84
+ ):
85
+ r"""
86
+ Pipeline for text-to-image generation using Stable Diffusion XL with ControlNet guidance.
87
+
88
+ This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods
89
+ implemented for all pipelines (downloading, saving, running on a particular device, etc.).
90
+
91
+ The pipeline also inherits the following loading methods:
92
+ - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings
93
+ - [`loaders.LoraLoaderMixin.load_lora_weights`] for loading LoRA weights
94
+ - [`loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files
95
+
96
+ Args:
97
+ vae ([`AutoencoderKL`]):
98
+ Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations.
99
+ text_encoder ([`~transformers.CLIPTextModel`]):
100
+ Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)).
101
+ text_encoder_2 ([`~transformers.CLIPTextModelWithProjection`]):
102
+ Second frozen text-encoder
103
+ ([laion/CLIP-ViT-bigG-14-laion2B-39B-b160k](https://huggingface.co/laion/CLIP-ViT-bigG-14-laion2B-39B-b160k)).
104
+ tokenizer ([`~transformers.CLIPTokenizer`]):
105
+ A `CLIPTokenizer` to tokenize text.
106
+ tokenizer_2 ([`~transformers.CLIPTokenizer`]):
107
+ A `CLIPTokenizer` to tokenize text.
108
+ unet ([`UNet2DConditionModel`]):
109
+ A `UNet2DConditionModel` to denoise the encoded image latents.
110
+ controlnet ([`ControlNetModel`] or `List[ControlNetModel]`):
111
+ Provides additional conditioning to the `unet` during the denoising process. If you set multiple
112
+ ControlNets as a list, the outputs from each ControlNet are added together to create one combined
113
+ additional conditioning.
114
+ scheduler ([`SchedulerMixin`]):
115
+ A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
116
+ [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
117
+ force_zeros_for_empty_prompt (`bool`, *optional*, defaults to `"True"`):
118
+ Whether the negative prompt embeddings should always be set to 0. Also see the config of
119
+ `stabilityai/stable-diffusion-xl-base-1-0`.
120
+ add_watermarker (`bool`, *optional*):
121
+ Whether to use the [invisible_watermark](https://github.com/ShieldMnt/invisible-watermark/) library to
122
+ watermark output images. If not defined, it defaults to `True` if the package is installed; otherwise no
123
+ watermarker is used.
124
+ """
125
+ model_cpu_offload_seq = (
126
+ "text_encoder->text_encoder_2->unet->vae" # leave controlnet out on purpose because it iterates with unet
127
+ )
128
+
129
+ def __init__(
130
+ self,
131
+ vae: AutoencoderKL,
132
+ text_encoder: CLIPTextModel,
133
+ text_encoder_2: CLIPTextModelWithProjection,
134
+ tokenizer: CLIPTokenizer,
135
+ tokenizer_2: CLIPTokenizer,
136
+ unet: UNet2DConditionModel,
137
+ controlnet: Union[ControlNetModel, List[ControlNetModel], Tuple[ControlNetModel], MultiControlNetModel],
138
+ scheduler: KarrasDiffusionSchedulers,
139
+ force_zeros_for_empty_prompt: bool = True,
140
+ add_watermarker: Optional[bool] = None,
141
+ ):
142
+ super().__init__()
143
+
144
+ if isinstance(controlnet, (list, tuple)):
145
+ controlnet = MultiControlNetModel(controlnet)
146
+
147
+ self.register_modules(
148
+ vae=vae,
149
+ text_encoder=text_encoder,
150
+ text_encoder_2=text_encoder_2,
151
+ tokenizer=tokenizer,
152
+ tokenizer_2=tokenizer_2,
153
+ unet=unet,
154
+ controlnet=controlnet,
155
+ scheduler=scheduler,
156
+ )
157
+ self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
158
+ self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True)
159
+ self.control_image_processor = VaeImageProcessor(
160
+ vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True, do_normalize=False
161
+ )
162
+ add_watermarker = add_watermarker if add_watermarker is not None else is_invisible_watermark_available()
163
+
164
+ if add_watermarker:
165
+ self.watermark = StableDiffusionXLWatermarker()
166
+ else:
167
+ self.watermark = None
168
+
169
+ self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt)
170
+
171
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_vae_slicing
172
+ def enable_vae_slicing(self):
173
+ r"""
174
+ Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to
175
+ compute decoding in several steps. This is useful to save some memory and allow larger batch sizes.
176
+ """
177
+ self.vae.enable_slicing()
178
+
179
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.disable_vae_slicing
180
+ def disable_vae_slicing(self):
181
+ r"""
182
+ Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to
183
+ computing decoding in one step.
184
+ """
185
+ self.vae.disable_slicing()
186
+
187
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_vae_tiling
188
+ def enable_vae_tiling(self):
189
+ r"""
190
+ Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to
191
+ compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow
192
+ processing larger images.
193
+ """
194
+ self.vae.enable_tiling()
195
+
196
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.disable_vae_tiling
197
+ def disable_vae_tiling(self):
198
+ r"""
199
+ Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to
200
+ computing decoding in one step.
201
+ """
202
+ self.vae.disable_tiling()
203
+
204
+ # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline.encode_prompt
205
+ def encode_prompt(
206
+ self,
207
+ prompt: str,
208
+ prompt_2: Optional[str] = None,
209
+ device: Optional[torch.device] = None,
210
+ num_images_per_prompt: int = 1,
211
+ do_classifier_free_guidance: bool = True,
212
+ negative_prompt: Optional[str] = None,
213
+ negative_prompt_2: Optional[str] = None,
214
+ prompt_embeds: Optional[torch.FloatTensor] = None,
215
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
216
+ pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
217
+ negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
218
+ lora_scale: Optional[float] = None,
219
+ ):
220
+ r"""
221
+ Encodes the prompt into text encoder hidden states.
222
+
223
+ Args:
224
+ prompt (`str` or `List[str]`, *optional*):
225
+ prompt to be encoded
226
+ prompt_2 (`str` or `List[str]`, *optional*):
227
+ The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is
228
+ used in both text-encoders
229
+ device: (`torch.device`):
230
+ torch device
231
+ num_images_per_prompt (`int`):
232
+ number of images that should be generated per prompt
233
+ do_classifier_free_guidance (`bool`):
234
+ whether to use classifier free guidance or not
235
+ negative_prompt (`str` or `List[str]`, *optional*):
236
+ The prompt or prompts not to guide the image generation. If not defined, one has to pass
237
+ `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
238
+ less than `1`).
239
+ negative_prompt_2 (`str` or `List[str]`, *optional*):
240
+ The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and
241
+ `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders
242
+ prompt_embeds (`torch.FloatTensor`, *optional*):
243
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
244
+ provided, text embeddings will be generated from `prompt` input argument.
245
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
246
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
247
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
248
+ argument.
249
+ pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
250
+ Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting.
251
+ If not provided, pooled text embeddings will be generated from `prompt` input argument.
252
+ negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
253
+ Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
254
+ weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt`
255
+ input argument.
256
+ lora_scale (`float`, *optional*):
257
+ A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
258
+ """
259
+ device = device or self._execution_device
260
+
261
+ # set lora scale so that monkey patched LoRA
262
+ # function of text encoder can correctly access it
263
+ if lora_scale is not None and isinstance(self, LoraLoaderMixin):
264
+ self._lora_scale = lora_scale
265
+
266
+ # dynamically adjust the LoRA scale
267
+ adjust_lora_scale_text_encoder(self.text_encoder, lora_scale)
268
+ adjust_lora_scale_text_encoder(self.text_encoder_2, lora_scale)
269
+
270
+ if prompt is not None and isinstance(prompt, str):
271
+ batch_size = 1
272
+ elif prompt is not None and isinstance(prompt, list):
273
+ batch_size = len(prompt)
274
+ else:
275
+ batch_size = prompt_embeds.shape[0]
276
+
277
+ # Define tokenizers and text encoders
278
+ tokenizers = [self.tokenizer, self.tokenizer_2] if self.tokenizer is not None else [self.tokenizer_2]
279
+ text_encoders = (
280
+ [self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2]
281
+ )
282
+
283
+ if prompt_embeds is None:
284
+ prompt_2 = prompt_2 or prompt
285
+ # textual inversion: procecss multi-vector tokens if necessary
286
+ prompt_embeds_list = []
287
+ prompts = [prompt, prompt_2]
288
+ for prompt, tokenizer, text_encoder in zip(prompts, tokenizers, text_encoders):
289
+ if isinstance(self, TextualInversionLoaderMixin):
290
+ prompt = self.maybe_convert_prompt(prompt, tokenizer)
291
+
292
+ text_inputs = tokenizer(
293
+ prompt,
294
+ padding="max_length",
295
+ max_length=tokenizer.model_max_length,
296
+ truncation=True,
297
+ return_tensors="pt",
298
+ )
299
+
300
+ text_input_ids = text_inputs.input_ids
301
+ untruncated_ids = tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
302
+
303
+ if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
304
+ text_input_ids, untruncated_ids
305
+ ):
306
+ removed_text = tokenizer.batch_decode(untruncated_ids[:, tokenizer.model_max_length - 1 : -1])
307
+ logger.warning(
308
+ "The following part of your input was truncated because CLIP can only handle sequences up to"
309
+ f" {tokenizer.model_max_length} tokens: {removed_text}"
310
+ )
311
+
312
+ prompt_embeds = text_encoder(
313
+ text_input_ids.to(device),
314
+ output_hidden_states=True,
315
+ )
316
+
317
+ # We are only ALWAYS interested in the pooled output of the final text encoder
318
+ pooled_prompt_embeds = prompt_embeds[0]
319
+ prompt_embeds = prompt_embeds.hidden_states[-2]
320
+
321
+ prompt_embeds_list.append(prompt_embeds)
322
+
323
+ prompt_embeds = torch.concat(prompt_embeds_list, dim=-1)
324
+
325
+ # get unconditional embeddings for classifier free guidance
326
+ zero_out_negative_prompt = negative_prompt is None and self.config.force_zeros_for_empty_prompt
327
+ if do_classifier_free_guidance and negative_prompt_embeds is None and zero_out_negative_prompt:
328
+ negative_prompt_embeds = torch.zeros_like(prompt_embeds)
329
+ negative_pooled_prompt_embeds = torch.zeros_like(pooled_prompt_embeds)
330
+ elif do_classifier_free_guidance and negative_prompt_embeds is None:
331
+ negative_prompt = negative_prompt or ""
332
+ negative_prompt_2 = negative_prompt_2 or negative_prompt
333
+
334
+ uncond_tokens: List[str]
335
+ if prompt is not None and type(prompt) is not type(negative_prompt):
336
+ raise TypeError(
337
+ f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
338
+ f" {type(prompt)}."
339
+ )
340
+ elif isinstance(negative_prompt, str):
341
+ uncond_tokens = [negative_prompt, negative_prompt_2]
342
+ elif batch_size != len(negative_prompt):
343
+ raise ValueError(
344
+ f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
345
+ f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
346
+ " the batch size of `prompt`."
347
+ )
348
+ else:
349
+ uncond_tokens = [negative_prompt, negative_prompt_2]
350
+
351
+ negative_prompt_embeds_list = []
352
+ for negative_prompt, tokenizer, text_encoder in zip(uncond_tokens, tokenizers, text_encoders):
353
+ if isinstance(self, TextualInversionLoaderMixin):
354
+ negative_prompt = self.maybe_convert_prompt(negative_prompt, tokenizer)
355
+
356
+ max_length = prompt_embeds.shape[1]
357
+ uncond_input = tokenizer(
358
+ negative_prompt,
359
+ padding="max_length",
360
+ max_length=max_length,
361
+ truncation=True,
362
+ return_tensors="pt",
363
+ )
364
+
365
+ negative_prompt_embeds = text_encoder(
366
+ uncond_input.input_ids.to(device),
367
+ output_hidden_states=True,
368
+ )
369
+ # We are only ALWAYS interested in the pooled output of the final text encoder
370
+ negative_pooled_prompt_embeds = negative_prompt_embeds[0]
371
+ negative_prompt_embeds = negative_prompt_embeds.hidden_states[-2]
372
+
373
+ negative_prompt_embeds_list.append(negative_prompt_embeds)
374
+
375
+ negative_prompt_embeds = torch.concat(negative_prompt_embeds_list, dim=-1)
376
+
377
+ prompt_embeds = prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device)
378
+ bs_embed, seq_len, _ = prompt_embeds.shape
379
+ # duplicate text embeddings for each generation per prompt, using mps friendly method
380
+ prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
381
+ prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
382
+
383
+ if do_classifier_free_guidance:
384
+ # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
385
+ seq_len = negative_prompt_embeds.shape[1]
386
+ negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device)
387
+ negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
388
+ negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
389
+
390
+ pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(
391
+ bs_embed * num_images_per_prompt, -1
392
+ )
393
+ if do_classifier_free_guidance:
394
+ negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(
395
+ bs_embed * num_images_per_prompt, -1
396
+ )
397
+
398
+ return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds
399
+
400
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
401
+ def prepare_extra_step_kwargs(self, generator, eta):
402
+ # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
403
+ # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
404
+ # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
405
+ # and should be between [0, 1]
406
+
407
+ accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
408
+ extra_step_kwargs = {}
409
+ if accepts_eta:
410
+ extra_step_kwargs["eta"] = eta
411
+
412
+ # check if the scheduler accepts generator
413
+ accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
414
+ if accepts_generator:
415
+ extra_step_kwargs["generator"] = generator
416
+ return extra_step_kwargs
417
+
418
+ def check_inputs(
419
+ self,
420
+ prompt,
421
+ prompt_2,
422
+ image,
423
+ callback_steps,
424
+ negative_prompt=None,
425
+ negative_prompt_2=None,
426
+ prompt_embeds=None,
427
+ negative_prompt_embeds=None,
428
+ pooled_prompt_embeds=None,
429
+ negative_pooled_prompt_embeds=None,
430
+ controlnet_conditioning_scale=1.0,
431
+ control_guidance_start=0.0,
432
+ control_guidance_end=1.0,
433
+ ):
434
+ if (callback_steps is None) or (
435
+ callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
436
+ ):
437
+ raise ValueError(
438
+ f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
439
+ f" {type(callback_steps)}."
440
+ )
441
+
442
+ if prompt is not None and prompt_embeds is not None:
443
+ raise ValueError(
444
+ f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
445
+ " only forward one of the two."
446
+ )
447
+ elif prompt_2 is not None and prompt_embeds is not None:
448
+ raise ValueError(
449
+ f"Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
450
+ " only forward one of the two."
451
+ )
452
+ elif prompt is None and prompt_embeds is None:
453
+ raise ValueError(
454
+ "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
455
+ )
456
+ elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
457
+ raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
458
+ elif prompt_2 is not None and (not isinstance(prompt_2, str) and not isinstance(prompt_2, list)):
459
+ raise ValueError(f"`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}")
460
+
461
+ if negative_prompt is not None and negative_prompt_embeds is not None:
462
+ raise ValueError(
463
+ f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
464
+ f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
465
+ )
466
+ elif negative_prompt_2 is not None and negative_prompt_embeds is not None:
467
+ raise ValueError(
468
+ f"Cannot forward both `negative_prompt_2`: {negative_prompt_2} and `negative_prompt_embeds`:"
469
+ f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
470
+ )
471
+
472
+ if prompt_embeds is not None and negative_prompt_embeds is not None:
473
+ if prompt_embeds.shape != negative_prompt_embeds.shape:
474
+ raise ValueError(
475
+ "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
476
+ f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
477
+ f" {negative_prompt_embeds.shape}."
478
+ )
479
+
480
+ if prompt_embeds is not None and pooled_prompt_embeds is None:
481
+ raise ValueError(
482
+ "If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`."
483
+ )
484
+
485
+ if negative_prompt_embeds is not None and negative_pooled_prompt_embeds is None:
486
+ raise ValueError(
487
+ "If `negative_prompt_embeds` are provided, `negative_pooled_prompt_embeds` also have to be passed. Make sure to generate `negative_pooled_prompt_embeds` from the same text encoder that was used to generate `negative_prompt_embeds`."
488
+ )
489
+
490
+ # `prompt` needs more sophisticated handling when there are multiple
491
+ # conditionings.
492
+ if isinstance(self.controlnet, MultiControlNetModel):
493
+ if isinstance(prompt, list):
494
+ logger.warning(
495
+ f"You have {len(self.controlnet.nets)} ControlNets and you have passed {len(prompt)}"
496
+ " prompts. The conditionings will be fixed across the prompts."
497
+ )
498
+
499
+ # Check `image`
500
+ is_compiled = hasattr(F, "scaled_dot_product_attention") and isinstance(
501
+ self.controlnet, torch._dynamo.eval_frame.OptimizedModule
502
+ )
503
+ if (
504
+ isinstance(self.controlnet, ControlNetModel)
505
+ or is_compiled
506
+ and isinstance(self.controlnet._orig_mod, ControlNetModel)
507
+ ):
508
+ self.check_image(image, prompt, prompt_embeds)
509
+ elif (
510
+ isinstance(self.controlnet, MultiControlNetModel)
511
+ or is_compiled
512
+ and isinstance(self.controlnet._orig_mod, MultiControlNetModel)
513
+ ):
514
+ if not isinstance(image, list):
515
+ raise TypeError("For multiple controlnets: `image` must be type `list`")
516
+
517
+ # When `image` is a nested list:
518
+ # (e.g. [[canny_image_1, pose_image_1], [canny_image_2, pose_image_2]])
519
+ elif any(isinstance(i, list) for i in image):
520
+ raise ValueError("A single batch of multiple conditionings are supported at the moment.")
521
+ elif len(image) != len(self.controlnet.nets):
522
+ raise ValueError(
523
+ f"For multiple controlnets: `image` must have the same length as the number of controlnets, but got {len(image)} images and {len(self.controlnet.nets)} ControlNets."
524
+ )
525
+
526
+ for image_ in image:
527
+ self.check_image(image_, prompt, prompt_embeds)
528
+ else:
529
+ assert False
530
+
531
+ # Check `controlnet_conditioning_scale`
532
+ if (
533
+ isinstance(self.controlnet, ControlNetModel)
534
+ or is_compiled
535
+ and isinstance(self.controlnet._orig_mod, ControlNetModel)
536
+ ):
537
+ if not isinstance(controlnet_conditioning_scale, float):
538
+ raise TypeError("For single controlnet: `controlnet_conditioning_scale` must be type `float`.")
539
+ elif (
540
+ isinstance(self.controlnet, MultiControlNetModel)
541
+ or is_compiled
542
+ and isinstance(self.controlnet._orig_mod, MultiControlNetModel)
543
+ ):
544
+ if isinstance(controlnet_conditioning_scale, list):
545
+ if any(isinstance(i, list) for i in controlnet_conditioning_scale):
546
+ raise ValueError("A single batch of multiple conditionings are supported at the moment.")
547
+ elif isinstance(controlnet_conditioning_scale, list) and len(controlnet_conditioning_scale) != len(
548
+ self.controlnet.nets
549
+ ):
550
+ raise ValueError(
551
+ "For multiple controlnets: When `controlnet_conditioning_scale` is specified as `list`, it must have"
552
+ " the same length as the number of controlnets"
553
+ )
554
+ else:
555
+ assert False
556
+
557
+ if not isinstance(control_guidance_start, (tuple, list)):
558
+ control_guidance_start = [control_guidance_start]
559
+
560
+ if not isinstance(control_guidance_end, (tuple, list)):
561
+ control_guidance_end = [control_guidance_end]
562
+
563
+ if len(control_guidance_start) != len(control_guidance_end):
564
+ raise ValueError(
565
+ f"`control_guidance_start` has {len(control_guidance_start)} elements, but `control_guidance_end` has {len(control_guidance_end)} elements. Make sure to provide the same number of elements to each list."
566
+ )
567
+
568
+ if isinstance(self.controlnet, MultiControlNetModel):
569
+ if len(control_guidance_start) != len(self.controlnet.nets):
570
+ raise ValueError(
571
+ f"`control_guidance_start`: {control_guidance_start} has {len(control_guidance_start)} elements but there are {len(self.controlnet.nets)} controlnets available. Make sure to provide {len(self.controlnet.nets)}."
572
+ )
573
+
574
+ for start, end in zip(control_guidance_start, control_guidance_end):
575
+ if start >= end:
576
+ raise ValueError(
577
+ f"control guidance start: {start} cannot be larger or equal to control guidance end: {end}."
578
+ )
579
+ if start < 0.0:
580
+ raise ValueError(f"control guidance start: {start} can't be smaller than 0.")
581
+ if end > 1.0:
582
+ raise ValueError(f"control guidance end: {end} can't be larger than 1.0.")
583
+
584
+ # Copied from diffusers.pipelines.controlnet.pipeline_controlnet.StableDiffusionControlNetPipeline.check_image
585
+ def check_image(self, image, prompt, prompt_embeds):
586
+ image_is_pil = isinstance(image, PIL.Image.Image)
587
+ image_is_tensor = isinstance(image, torch.Tensor)
588
+ image_is_np = isinstance(image, np.ndarray)
589
+ image_is_pil_list = isinstance(image, list) and isinstance(image[0], PIL.Image.Image)
590
+ image_is_tensor_list = isinstance(image, list) and isinstance(image[0], torch.Tensor)
591
+ image_is_np_list = isinstance(image, list) and isinstance(image[0], np.ndarray)
592
+
593
+ if (
594
+ not image_is_pil
595
+ and not image_is_tensor
596
+ and not image_is_np
597
+ and not image_is_pil_list
598
+ and not image_is_tensor_list
599
+ and not image_is_np_list
600
+ ):
601
+ raise TypeError(
602
+ f"image must be passed and be one of PIL image, numpy array, torch tensor, list of PIL images, list of numpy arrays or list of torch tensors, but is {type(image)}"
603
+ )
604
+
605
+ if image_is_pil:
606
+ image_batch_size = 1
607
+ else:
608
+ image_batch_size = len(image)
609
+
610
+ if prompt is not None and isinstance(prompt, str):
611
+ prompt_batch_size = 1
612
+ elif prompt is not None and isinstance(prompt, list):
613
+ prompt_batch_size = len(prompt)
614
+ elif prompt_embeds is not None:
615
+ prompt_batch_size = prompt_embeds.shape[0]
616
+
617
+ if image_batch_size != 1 and image_batch_size != prompt_batch_size:
618
+ raise ValueError(
619
+ f"If image batch size is not 1, image batch size must be same as prompt batch size. image batch size: {image_batch_size}, prompt batch size: {prompt_batch_size}"
620
+ )
621
+
622
+ # Copied from diffusers.pipelines.controlnet.pipeline_controlnet.StableDiffusionControlNetPipeline.prepare_image
623
+ def prepare_image(
624
+ self,
625
+ image,
626
+ width,
627
+ height,
628
+ batch_size,
629
+ num_images_per_prompt,
630
+ device,
631
+ dtype,
632
+ do_classifier_free_guidance=False,
633
+ guess_mode=False,
634
+ ):
635
+ image = self.control_image_processor.preprocess(image, height=height, width=width).to(dtype=torch.float32)
636
+ image_batch_size = image.shape[0]
637
+
638
+ if image_batch_size == 1:
639
+ repeat_by = batch_size
640
+ else:
641
+ # image batch size is the same as prompt batch size
642
+ repeat_by = num_images_per_prompt
643
+
644
+ image = image.repeat_interleave(repeat_by, dim=0)
645
+
646
+ image = image.to(device=device, dtype=dtype)
647
+
648
+ if do_classifier_free_guidance and not guess_mode:
649
+ image = torch.cat([image] * 2)
650
+
651
+ return image
652
+
653
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents
654
+ def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None):
655
+ shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor)
656
+ if isinstance(generator, list) and len(generator) != batch_size:
657
+ raise ValueError(
658
+ f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
659
+ f" size of {batch_size}. Make sure the batch size matches the length of the generators."
660
+ )
661
+
662
+ if latents is None:
663
+ latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
664
+ else:
665
+ latents = latents.to(device)
666
+
667
+ # scale the initial noise by the standard deviation required by the scheduler
668
+ latents = latents * self.scheduler.init_noise_sigma
669
+ return latents
670
+
671
+ # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline._get_add_time_ids
672
+ def _get_add_time_ids(self, original_size, crops_coords_top_left, target_size, dtype):
673
+ add_time_ids = list(original_size + crops_coords_top_left + target_size)
674
+
675
+ passed_add_embed_dim = (
676
+ self.unet.config.addition_time_embed_dim * len(add_time_ids) + self.text_encoder_2.config.projection_dim
677
+ )
678
+ expected_add_embed_dim = self.unet.add_embedding.linear_1.in_features
679
+
680
+ if expected_add_embed_dim != passed_add_embed_dim:
681
+ raise ValueError(
682
+ f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`."
683
+ )
684
+
685
+ add_time_ids = torch.tensor([add_time_ids], dtype=dtype)
686
+ return add_time_ids
687
+
688
+ def get_views(self, height, width, window_size=128, stride=64, random_jitter=False):
689
+ # Here, we define the mappings F_i (see Eq. 7 in the MultiDiffusion paper https://arxiv.org/abs/2302.08113)
690
+ # if panorama's height/width < window_size, num_blocks of height/width should return 1
691
+ height //= self.vae_scale_factor
692
+ width //= self.vae_scale_factor
693
+ num_blocks_height = int((height - window_size) / stride - 1e-6) + 2 if height > window_size else 1
694
+ num_blocks_width = int((width - window_size) / stride - 1e-6) + 2 if width > window_size else 1
695
+ total_num_blocks = int(num_blocks_height * num_blocks_width)
696
+ views = []
697
+ for i in range(total_num_blocks):
698
+ h_start = int((i // num_blocks_width) * stride)
699
+ h_end = h_start + window_size
700
+ w_start = int((i % num_blocks_width) * stride)
701
+ w_end = w_start + window_size
702
+
703
+ if h_end > height:
704
+ h_start = int(h_start + height - h_end)
705
+ h_end = int(height)
706
+ if w_end > width:
707
+ w_start = int(w_start + width - w_end)
708
+ w_end = int(width)
709
+ if h_start < 0:
710
+ h_end = int(h_end - h_start)
711
+ h_start = 0
712
+ if w_start < 0:
713
+ w_end = int(w_end - w_start)
714
+ w_start = 0
715
+
716
+ if random_jitter:
717
+ jitter_range = (window_size - stride) // 4
718
+ w_jitter = 0
719
+ h_jitter = 0
720
+ if (w_start != 0) and (w_end != width):
721
+ w_jitter = random.randint(-jitter_range, jitter_range)
722
+ elif (w_start == 0) and (w_end != width):
723
+ w_jitter = random.randint(-jitter_range, 0)
724
+ elif (w_start != 0) and (w_end == width):
725
+ w_jitter = random.randint(0, jitter_range)
726
+ if (h_start != 0) and (h_end != height):
727
+ h_jitter = random.randint(-jitter_range, jitter_range)
728
+ elif (h_start == 0) and (h_end != height):
729
+ h_jitter = random.randint(-jitter_range, 0)
730
+ elif (h_start != 0) and (h_end == height):
731
+ h_jitter = random.randint(0, jitter_range)
732
+ h_start += (h_jitter + jitter_range)
733
+ h_end += (h_jitter + jitter_range)
734
+ w_start += (w_jitter + jitter_range)
735
+ w_end += (w_jitter + jitter_range)
736
+
737
+ views.append((h_start, h_end, w_start, w_end))
738
+ return views
739
+
740
+ def tiled_decode(self, latents, current_height, current_width):
741
+ sample_size = self.unet.config.sample_size
742
+ core_size = self.unet.config.sample_size // 4
743
+ core_stride = core_size
744
+ pad_size = self.unet.config.sample_size // 8 * 3
745
+ decoder_view_batch_size = 1
746
+
747
+ if self.lowvram:
748
+ core_stride = core_size // 2
749
+ pad_size = core_size
750
+
751
+ views = self.get_views(current_height, current_width, stride=core_stride, window_size=core_size)
752
+ views_batch = [views[i : i + decoder_view_batch_size] for i in range(0, len(views), decoder_view_batch_size)]
753
+ latents_ = F.pad(latents, (pad_size, pad_size, pad_size, pad_size), 'constant', 0)
754
+ image = torch.zeros(latents.size(0), 3, current_height, current_width).to(latents.device)
755
+ count = torch.zeros_like(image).to(latents.device)
756
+ # get the latents corresponding to the current view coordinates
757
+ with self.progress_bar(total=len(views_batch)) as progress_bar:
758
+ for j, batch_view in enumerate(views_batch):
759
+ vb_size = len(batch_view)
760
+ latents_for_view = torch.cat(
761
+ [
762
+ latents_[:, :, h_start:h_end+pad_size*2, w_start:w_end+pad_size*2]
763
+ for h_start, h_end, w_start, w_end in batch_view
764
+ ]
765
+ ).to(self.vae.device)
766
+ image_patch = self.vae.decode(latents_for_view / self.vae.config.scaling_factor, return_dict=False)[0]
767
+ h_start, h_end, w_start, w_end = views[j]
768
+ h_start, h_end, w_start, w_end = h_start * self.vae_scale_factor, h_end * self.vae_scale_factor, w_start * self.vae_scale_factor, w_end * self.vae_scale_factor
769
+ p_h_start, p_h_end, p_w_start, p_w_end = pad_size * self.vae_scale_factor, image_patch.size(2) - pad_size * self.vae_scale_factor, pad_size * self.vae_scale_factor, image_patch.size(3) - pad_size * self.vae_scale_factor
770
+ image[:, :, h_start:h_end, w_start:w_end] += image_patch[:, :, p_h_start:p_h_end, p_w_start:p_w_end].to(latents.device)
771
+ count[:, :, h_start:h_end, w_start:w_end] += 1
772
+ progress_bar.update()
773
+ image = image / count
774
+
775
+ return image
776
+
777
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale.StableDiffusionUpscalePipeline.upcast_vae
778
+ def upcast_vae(self):
779
+ dtype = self.vae.dtype
780
+ self.vae.to(dtype=torch.float32)
781
+ use_torch_2_0_or_xformers = isinstance(
782
+ self.vae.decoder.mid_block.attentions[0].processor,
783
+ (
784
+ AttnProcessor2_0,
785
+ XFormersAttnProcessor,
786
+ LoRAXFormersAttnProcessor,
787
+ LoRAAttnProcessor2_0,
788
+ ),
789
+ )
790
+ # if xformers or torch_2_0 is used attention block does not need
791
+ # to be in float32 which can save lots of memory
792
+ if use_torch_2_0_or_xformers:
793
+ self.vae.post_quant_conv.to(dtype)
794
+ self.vae.decoder.conv_in.to(dtype)
795
+ self.vae.decoder.mid_block.to(dtype)
796
+
797
+ @torch.no_grad()
798
+ @replace_example_docstring(EXAMPLE_DOC_STRING)
799
+ def __call__(
800
+ self,
801
+ prompt: Union[str, List[str]] = None,
802
+ prompt_2: Optional[Union[str, List[str]]] = None,
803
+ condition_image: PipelineImageInput = None,
804
+ height: Optional[int] = None,
805
+ width: Optional[int] = None,
806
+ num_inference_steps: int = 50,
807
+ guidance_scale: float = 5.0,
808
+ negative_prompt: Optional[Union[str, List[str]]] = None,
809
+ negative_prompt_2: Optional[Union[str, List[str]]] = None,
810
+ num_images_per_prompt: Optional[int] = 1,
811
+ eta: float = 0.0,
812
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
813
+ latents: Optional[torch.FloatTensor] = None,
814
+ prompt_embeds: Optional[torch.FloatTensor] = None,
815
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
816
+ pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
817
+ negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
818
+ output_type: Optional[str] = "pil",
819
+ return_dict: bool = True,
820
+ callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
821
+ callback_steps: int = 1,
822
+ cross_attention_kwargs: Optional[Dict[str, Any]] = None,
823
+ controlnet_conditioning_scale: Union[float, List[float]] = 1.0,
824
+ guess_mode: bool = False,
825
+ control_guidance_start: Union[float, List[float]] = 0.0,
826
+ control_guidance_end: Union[float, List[float]] = 1.0,
827
+ original_size: Tuple[int, int] = None,
828
+ crops_coords_top_left: Tuple[int, int] = (0, 0),
829
+ target_size: Tuple[int, int] = None,
830
+ negative_original_size: Optional[Tuple[int, int]] = None,
831
+ negative_crops_coords_top_left: Tuple[int, int] = (0, 0),
832
+ negative_target_size: Optional[Tuple[int, int]] = None,
833
+ ################### DemoFusion specific parameters ####################
834
+ image_lr: Optional[torch.FloatTensor] = None,
835
+ view_batch_size: int = 16,
836
+ multi_decoder: bool = True,
837
+ stride: Optional[int] = 64,
838
+ cosine_scale_1: Optional[float] = 3.,
839
+ cosine_scale_2: Optional[float] = 1.,
840
+ cosine_scale_3: Optional[float] = 1.,
841
+ sigma: Optional[float] = 1.0,
842
+ show_image: bool = False,
843
+ lowvram: bool = False,
844
+ ):
845
+ r"""
846
+ The call function to the pipeline for generation.
847
+
848
+ Args:
849
+ prompt (`str` or `List[str]`, *optional*):
850
+ The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`.
851
+ prompt_2 (`str` or `List[str]`, *optional*):
852
+ The prompt or prompts to be sent to `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is
853
+ used in both text-encoders.
854
+ image (`torch.FloatTensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.FloatTensor]`, `List[PIL.Image.Image]`, `List[np.ndarray]`,:
855
+ `List[List[torch.FloatTensor]]`, `List[List[np.ndarray]]` or `List[List[PIL.Image.Image]]`):
856
+ The ControlNet input condition to provide guidance to the `unet` for generation. If the type is
857
+ specified as `torch.FloatTensor`, it is passed to ControlNet as is. `PIL.Image.Image` can also be
858
+ accepted as an image. The dimensions of the output image defaults to `image`'s dimensions. If height
859
+ and/or width are passed, `image` is resized accordingly. If multiple ControlNets are specified in
860
+ `init`, images must be passed as a list such that each element of the list can be correctly batched for
861
+ input to a single ControlNet.
862
+ height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
863
+ The height in pixels of the generated image. Anything below 512 pixels won't work well for
864
+ [stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0)
865
+ and checkpoints that are not specifically fine-tuned on low resolutions.
866
+ width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
867
+ The width in pixels of the generated image. Anything below 512 pixels won't work well for
868
+ [stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0)
869
+ and checkpoints that are not specifically fine-tuned on low resolutions.
870
+ num_inference_steps (`int`, *optional*, defaults to 50):
871
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
872
+ expense of slower inference.
873
+ guidance_scale (`float`, *optional*, defaults to 5.0):
874
+ A higher guidance scale value encourages the model to generate images closely linked to the text
875
+ `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.
876
+ negative_prompt (`str` or `List[str]`, *optional*):
877
+ The prompt or prompts to guide what to not include in image generation. If not defined, you need to
878
+ pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`).
879
+ negative_prompt_2 (`str` or `List[str]`, *optional*):
880
+ The prompt or prompts to guide what to not include in image generation. This is sent to `tokenizer_2`
881
+ and `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders.
882
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
883
+ The number of images to generate per prompt.
884
+ eta (`float`, *optional*, defaults to 0.0):
885
+ Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies
886
+ to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers.
887
+ generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
888
+ A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
889
+ generation deterministic.
890
+ latents (`torch.FloatTensor`, *optional*):
891
+ Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image
892
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
893
+ tensor is generated by sampling using the supplied random `generator`.
894
+ prompt_embeds (`torch.FloatTensor`, *optional*):
895
+ Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not
896
+ provided, text embeddings are generated from the `prompt` input argument.
897
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
898
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If
899
+ not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument.
900
+ pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
901
+ Pre-generated pooled text embeddings. Can be used to easily tweak text inputs (prompt weighting). If
902
+ not provided, pooled text embeddings are generated from `prompt` input argument.
903
+ negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
904
+ Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs (prompt
905
+ weighting). If not provided, pooled `negative_prompt_embeds` are generated from `negative_prompt` input
906
+ argument.
907
+ output_type (`str`, *optional*, defaults to `"pil"`):
908
+ The output format of the generated image. Choose between `PIL.Image` or `np.array`.
909
+ return_dict (`bool`, *optional*, defaults to `True`):
910
+ Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
911
+ plain tuple.
912
+ callback (`Callable`, *optional*):
913
+ A function that calls every `callback_steps` steps during inference. The function is called with the
914
+ following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
915
+ callback_steps (`int`, *optional*, defaults to 1):
916
+ The frequency at which the `callback` function is called. If not specified, the callback is called at
917
+ every step.
918
+ cross_attention_kwargs (`dict`, *optional*):
919
+ A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in
920
+ [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
921
+ controlnet_conditioning_scale (`float` or `List[float]`, *optional*, defaults to 1.0):
922
+ The outputs of the ControlNet are multiplied by `controlnet_conditioning_scale` before they are added
923
+ to the residual in the original `unet`. If multiple ControlNets are specified in `init`, you can set
924
+ the corresponding scale as a list.
925
+ guess_mode (`bool`, *optional*, defaults to `False`):
926
+ The ControlNet encoder tries to recognize the content of the input image even if you remove all
927
+ prompts. A `guidance_scale` value between 3.0 and 5.0 is recommended.
928
+ control_guidance_start (`float` or `List[float]`, *optional*, defaults to 0.0):
929
+ The percentage of total steps at which the ControlNet starts applying.
930
+ control_guidance_end (`float` or `List[float]`, *optional*, defaults to 1.0):
931
+ The percentage of total steps at which the ControlNet stops applying.
932
+ original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
933
+ If `original_size` is not the same as `target_size` the image will appear to be down- or upsampled.
934
+ `original_size` defaults to `(width, height)` if not specified. Part of SDXL's micro-conditioning as
935
+ explained in section 2.2 of
936
+ [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
937
+ crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)):
938
+ `crops_coords_top_left` can be used to generate an image that appears to be "cropped" from the position
939
+ `crops_coords_top_left` downwards. Favorable, well-centered images are usually achieved by setting
940
+ `crops_coords_top_left` to (0, 0). Part of SDXL's micro-conditioning as explained in section 2.2 of
941
+ [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
942
+ target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
943
+ For most cases, `target_size` should be set to the desired height and width of the generated image. If
944
+ not specified it will default to `(width, height)`. Part of SDXL's micro-conditioning as explained in
945
+ section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
946
+ negative_original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
947
+ To negatively condition the generation process based on a specific image resolution. Part of SDXL's
948
+ micro-conditioning as explained in section 2.2 of
949
+ [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more
950
+ information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208.
951
+ negative_crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)):
952
+ To negatively condition the generation process based on a specific crop coordinates. Part of SDXL's
953
+ micro-conditioning as explained in section 2.2 of
954
+ [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more
955
+ information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208.
956
+ negative_target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
957
+ To negatively condition the generation process based on a target image resolution. It should be as same
958
+ as the `target_size` for most cases. Part of SDXL's micro-conditioning as explained in section 2.2 of
959
+ [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more
960
+ information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208.
961
+ ################### DemoFusion specific parameters ####################
962
+ image_lr (`torch.FloatTensor`, *optional*, , defaults to None):
963
+ Low-resolution image input for upscaling. If provided, DemoFusion will encode it as the initial latent representation.
964
+ view_batch_size (`int`, defaults to 16):
965
+ The batch size for multiple denoising paths. Typically, a larger batch size can result in higher
966
+ efficiency but comes with increased GPU memory requirements.
967
+ multi_decoder (`bool`, defaults to True):
968
+ Determine whether to use a tiled decoder. Generally, when the resolution exceeds 3072x3072,
969
+ a tiled decoder becomes necessary.
970
+ stride (`int`, defaults to 64):
971
+ The stride of moving local patches. A smaller stride is better for alleviating seam issues,
972
+ but it also introduces additional computational overhead and inference time.
973
+ cosine_scale_1 (`float`, defaults to 3):
974
+ Control the strength of skip-residual. For specific impacts, please refer to Appendix C
975
+ in the DemoFusion paper.
976
+ cosine_scale_2 (`float`, defaults to 1):
977
+ Control the strength of dilated sampling. For specific impacts, please refer to Appendix C
978
+ in the DemoFusion paper.
979
+ cosine_scale_3 (`float`, defaults to 1):
980
+ Control the strength of the gaussion filter. For specific impacts, please refer to Appendix C
981
+ in the DemoFusion paper.
982
+ sigma (`float`, defaults to 1):
983
+ The standard value of the gaussian filter.
984
+ show_image (`bool`, defaults to False):
985
+ Determine whether to show intermediate results during generation.
986
+ lowvram (`bool`, defaults to False):
987
+ Try to fit in 8 Gb of VRAM, with xformers installed.
988
+ Examples:
989
+
990
+ Returns:
991
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
992
+ If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned,
993
+ otherwise a `tuple` is returned containing the output images.
994
+ """
995
+ controlnet = self.controlnet._orig_mod if is_compiled_module(self.controlnet) else self.controlnet
996
+
997
+ # align format for control guidance
998
+ if not isinstance(control_guidance_start, list) and isinstance(control_guidance_end, list):
999
+ control_guidance_start = len(control_guidance_end) * [control_guidance_start]
1000
+ elif not isinstance(control_guidance_end, list) and isinstance(control_guidance_start, list):
1001
+ control_guidance_end = len(control_guidance_start) * [control_guidance_end]
1002
+ elif not isinstance(control_guidance_start, list) and not isinstance(control_guidance_end, list):
1003
+ mult = len(controlnet.nets) if isinstance(controlnet, MultiControlNetModel) else 1
1004
+ control_guidance_start, control_guidance_end = mult * [control_guidance_start], mult * [
1005
+ control_guidance_end
1006
+ ]
1007
+
1008
+ # 0. Default height and width to unet
1009
+ height = height or self.unet.config.sample_size * self.vae_scale_factor
1010
+ width = width or self.unet.config.sample_size * self.vae_scale_factor
1011
+
1012
+ x1_size = self.unet.config.sample_size * self.vae_scale_factor
1013
+
1014
+ height_scale = height / x1_size
1015
+ width_scale = width / x1_size
1016
+ scale_num = int(max(height_scale, width_scale))
1017
+ aspect_ratio = min(height_scale, width_scale) / max(height_scale, width_scale)
1018
+
1019
+ original_size = original_size or (height, width)
1020
+ target_size = target_size or (height, width)
1021
+
1022
+ # 1. Check inputs. Raise error if not correct
1023
+ self.check_inputs(
1024
+ prompt,
1025
+ prompt_2,
1026
+ condition_image,
1027
+ callback_steps,
1028
+ negative_prompt,
1029
+ negative_prompt_2,
1030
+ prompt_embeds,
1031
+ negative_prompt_embeds,
1032
+ pooled_prompt_embeds,
1033
+ negative_pooled_prompt_embeds,
1034
+ controlnet_conditioning_scale,
1035
+ control_guidance_start,
1036
+ control_guidance_end,
1037
+ )
1038
+
1039
+ # 2. Define call parameters
1040
+ if prompt is not None and isinstance(prompt, str):
1041
+ batch_size = 1
1042
+ elif prompt is not None and isinstance(prompt, list):
1043
+ batch_size = len(prompt)
1044
+ else:
1045
+ batch_size = prompt_embeds.shape[0]
1046
+
1047
+ device = self._execution_device
1048
+ self.lowvram = lowvram
1049
+ if self.lowvram:
1050
+ self.vae.cpu()
1051
+ self.unet.cpu()
1052
+ self.text_encoder.to(device)
1053
+ self.text_encoder_2.to(device)
1054
+ if image_lr:
1055
+ image_lr.cpu()
1056
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
1057
+ # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
1058
+ # corresponds to doing no classifier free guidance.
1059
+ do_classifier_free_guidance = guidance_scale > 1.0
1060
+
1061
+ if isinstance(controlnet, MultiControlNetModel) and isinstance(controlnet_conditioning_scale, float):
1062
+ controlnet_conditioning_scale = [controlnet_conditioning_scale] * len(controlnet.nets)
1063
+
1064
+ global_pool_conditions = (
1065
+ controlnet.config.global_pool_conditions
1066
+ if isinstance(controlnet, ControlNetModel)
1067
+ else controlnet.nets[0].config.global_pool_conditions
1068
+ )
1069
+ guess_mode = guess_mode or global_pool_conditions
1070
+
1071
+ # 3. Encode input prompt
1072
+ text_encoder_lora_scale = (
1073
+ cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None
1074
+ )
1075
+ (
1076
+ prompt_embeds,
1077
+ negative_prompt_embeds,
1078
+ pooled_prompt_embeds,
1079
+ negative_pooled_prompt_embeds,
1080
+ ) = self.encode_prompt(
1081
+ prompt,
1082
+ prompt_2,
1083
+ device,
1084
+ num_images_per_prompt,
1085
+ do_classifier_free_guidance,
1086
+ negative_prompt,
1087
+ negative_prompt_2,
1088
+ prompt_embeds=prompt_embeds,
1089
+ negative_prompt_embeds=negative_prompt_embeds,
1090
+ pooled_prompt_embeds=pooled_prompt_embeds,
1091
+ negative_pooled_prompt_embeds=negative_pooled_prompt_embeds,
1092
+ lora_scale=text_encoder_lora_scale,
1093
+ )
1094
+
1095
+ # 4. Prepare image
1096
+ if isinstance(controlnet, ControlNetModel):
1097
+ condition_image = self.prepare_image(
1098
+ image=condition_image,
1099
+ width=width // scale_num,
1100
+ height=height // scale_num,
1101
+ batch_size=batch_size * num_images_per_prompt,
1102
+ num_images_per_prompt=num_images_per_prompt,
1103
+ device=device,
1104
+ dtype=controlnet.dtype,
1105
+ do_classifier_free_guidance=do_classifier_free_guidance,
1106
+ guess_mode=guess_mode,
1107
+ )
1108
+ # height, width = condition_image.shape[-2:]
1109
+ # condition_image.shape ([2, 3, 1024, 1024])
1110
+ elif isinstance(controlnet, MultiControlNetModel):
1111
+ condition_images = []
1112
+
1113
+ for image_ in condition_image:
1114
+ image_ = self.prepare_image(
1115
+ image=image_,
1116
+ width=width // scale_num,
1117
+ height=height // scale_num,
1118
+ batch_size=batch_size * num_images_per_prompt,
1119
+ num_images_per_prompt=num_images_per_prompt,
1120
+ device=device,
1121
+ dtype=controlnet.dtype,
1122
+ do_classifier_free_guidance=do_classifier_free_guidance,
1123
+ guess_mode=guess_mode,
1124
+ )
1125
+
1126
+ condition_images.append(image_)
1127
+
1128
+ condition_image = condition_images
1129
+ # height, width = condition_image[0].shape[-2:]
1130
+ else:
1131
+ assert False
1132
+
1133
+ # 5. Prepare timesteps
1134
+ self.scheduler.set_timesteps(num_inference_steps, device=device)
1135
+ timesteps = self.scheduler.timesteps
1136
+
1137
+ # 6. Prepare latent variables
1138
+ num_channels_latents = self.unet.config.in_channels
1139
+ latents = self.prepare_latents(
1140
+ batch_size * num_images_per_prompt,
1141
+ num_channels_latents,
1142
+ height // scale_num,
1143
+ width // scale_num,
1144
+ prompt_embeds.dtype,
1145
+ device,
1146
+ generator,
1147
+ latents,
1148
+ )
1149
+
1150
+ # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
1151
+ extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
1152
+
1153
+ # 7.1 Create tensor stating which controlnets to keep
1154
+ controlnet_keep = []
1155
+ for i in range(len(timesteps)):
1156
+ keeps = [
1157
+ 1.0 - float(i / len(timesteps) < s or (i + 1) / len(timesteps) > e)
1158
+ for s, e in zip(control_guidance_start, control_guidance_end)
1159
+ ]
1160
+ controlnet_keep.append(keeps[0] if isinstance(controlnet, ControlNetModel) else keeps)
1161
+
1162
+ # 7.2 Prepare added time ids & embeddings
1163
+ if isinstance(condition_image, list):
1164
+ original_size = original_size or condition_image[0].shape[-2:]
1165
+ else:
1166
+ original_size = original_size or condition_image.shape[-2:]
1167
+ target_size = target_size or (height, width)
1168
+
1169
+ add_text_embeds = pooled_prompt_embeds
1170
+ add_time_ids = self._get_add_time_ids(
1171
+ original_size, crops_coords_top_left, target_size, dtype=prompt_embeds.dtype
1172
+ )
1173
+
1174
+ if negative_original_size is not None and negative_target_size is not None:
1175
+ negative_add_time_ids = self._get_add_time_ids(
1176
+ negative_original_size,
1177
+ negative_crops_coords_top_left,
1178
+ negative_target_size,
1179
+ dtype=prompt_embeds.dtype,
1180
+ )
1181
+ else:
1182
+ negative_add_time_ids = add_time_ids
1183
+
1184
+ if do_classifier_free_guidance:
1185
+ prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0)
1186
+ add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0)
1187
+ add_time_ids = torch.cat([negative_add_time_ids, add_time_ids], dim=0)
1188
+
1189
+ prompt_embeds = prompt_embeds.to(device)
1190
+ add_text_embeds = add_text_embeds.to(device)
1191
+ add_time_ids = add_time_ids.to(device).repeat(batch_size * num_images_per_prompt, 1)
1192
+
1193
+
1194
+
1195
+ # 8. Denoising loop
1196
+ num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
1197
+
1198
+ output_images = []
1199
+
1200
+ ###################################################### Phase Initialization ########################################################
1201
+
1202
+ if self.lowvram:
1203
+ self.text_encoder.cpu()
1204
+ self.text_encoder_2.cpu()
1205
+
1206
+ if image_lr == None:
1207
+ print("### Phase 1 Denoising ###")
1208
+ with self.progress_bar(total=num_inference_steps) as progress_bar:
1209
+ for i, t in enumerate(timesteps):
1210
+
1211
+ if self.lowvram:
1212
+ self.vae.cpu()
1213
+ self.unet.to(device)
1214
+
1215
+ latents_for_view = latents
1216
+
1217
+ # expand the latents if we are doing classifier free guidance
1218
+ latent_model_input = (
1219
+ latents.repeat_interleave(2, dim=0)
1220
+ if do_classifier_free_guidance
1221
+ else latents
1222
+ )
1223
+ latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
1224
+
1225
+
1226
+ added_cond_kwargs = {"text_embeds": add_text_embeds, "time_ids": add_time_ids}
1227
+
1228
+ # controlnet(s) inference
1229
+ if guess_mode and do_classifier_free_guidance:
1230
+ # Infer ControlNet only for the conditional batch.
1231
+ control_model_input = latents
1232
+ control_model_input = self.scheduler.scale_model_input(control_model_input, t)
1233
+ controlnet_prompt_embeds = prompt_embeds.chunk(2)[1]
1234
+ controlnet_added_cond_kwargs = {
1235
+ "text_embeds": add_text_embeds.chunk(2)[1],
1236
+ "time_ids": add_time_ids.chunk(2)[1],
1237
+ }
1238
+ else:
1239
+ control_model_input = latent_model_input
1240
+ controlnet_prompt_embeds = prompt_embeds
1241
+ controlnet_added_cond_kwargs = added_cond_kwargs
1242
+
1243
+ if isinstance(controlnet_keep[i], list):
1244
+ cond_scale = [c * s for c, s in zip(controlnet_conditioning_scale, controlnet_keep[i])]
1245
+ else:
1246
+ controlnet_cond_scale = controlnet_conditioning_scale
1247
+ if isinstance(controlnet_cond_scale, list):
1248
+ controlnet_cond_scale = controlnet_cond_scale[0]
1249
+ cond_scale = controlnet_cond_scale * controlnet_keep[i]
1250
+
1251
+ # print(condition_image.shape, control_model_input.shape, controlnet_prompt_embeds.shape, t, cond_scale, guess_mode)
1252
+ # print(controlnet_added_cond_kwargs["text_embeds"].shape, controlnet_added_cond_kwargs["time_ids"].shape)
1253
+ down_block_res_samples, mid_block_res_sample = self.controlnet(
1254
+ control_model_input,
1255
+ t,
1256
+ encoder_hidden_states=controlnet_prompt_embeds,
1257
+ controlnet_cond=condition_image,
1258
+ conditioning_scale=cond_scale,
1259
+ guess_mode=guess_mode,
1260
+ added_cond_kwargs=controlnet_added_cond_kwargs,
1261
+ return_dict=False,
1262
+ )
1263
+
1264
+ if guess_mode and do_classifier_free_guidance:
1265
+ # Infered ControlNet only for the conditional batch.
1266
+ # To apply the output of ControlNet to both the unconditional and conditional batches,
1267
+ # add 0 to the unconditional batch to keep it unchanged.
1268
+ down_block_res_samples = [torch.cat([torch.zeros_like(d), d]) for d in down_block_res_samples]
1269
+ mid_block_res_sample = torch.cat([torch.zeros_like(mid_block_res_sample), mid_block_res_sample])
1270
+
1271
+ # predict the noise residual
1272
+ noise_pred = self.unet(
1273
+ latent_model_input,
1274
+ t,
1275
+ encoder_hidden_states=prompt_embeds,
1276
+ cross_attention_kwargs=cross_attention_kwargs,
1277
+ down_block_additional_residuals=down_block_res_samples,
1278
+ mid_block_additional_residual=mid_block_res_sample,
1279
+ added_cond_kwargs=added_cond_kwargs,
1280
+ return_dict=False,
1281
+ )[0]
1282
+
1283
+ # perform guidance
1284
+ if do_classifier_free_guidance:
1285
+ noise_pred_uncond, noise_pred_text = noise_pred[::2], noise_pred[1::2]
1286
+ noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
1287
+
1288
+ # compute the previous noisy sample x_t -> x_t-1
1289
+ latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
1290
+
1291
+ # call the callback, if provided
1292
+ if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
1293
+ progress_bar.update()
1294
+ if callback is not None and i % callback_steps == 0:
1295
+ step_idx = i // getattr(self.scheduler, "order", 1)
1296
+ callback(step_idx, t, latents)
1297
+ else:
1298
+ print("### Encoding Real Image ###")
1299
+ latents = self.vae.encode(image_lr)
1300
+ latents = latents.latent_dist.sample() * self.vae.config.scaling_factor
1301
+
1302
+ anchor_mean = latents.mean()
1303
+ anchor_std = latents.std()
1304
+ if self.lowvram:
1305
+ latents = latents.cpu()
1306
+ torch.cuda.empty_cache()
1307
+ if not output_type == "latent":
1308
+ # make sure the VAE is in float32 mode, as it overflows in float16
1309
+ needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast
1310
+
1311
+ if self.lowvram:
1312
+ needs_upcasting = False # use madebyollin/sdxl-vae-fp16-fix in lowvram mode!
1313
+ self.unet.cpu()
1314
+ self.vae.to(device)
1315
+
1316
+ if needs_upcasting:
1317
+ self.upcast_vae()
1318
+ latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype)
1319
+ if self.lowvram and multi_decoder:
1320
+ current_width_height = self.unet.config.sample_size * self.vae_scale_factor
1321
+ image = self.tiled_decode(latents, current_width_height, current_width_height)
1322
+ else:
1323
+ image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0]
1324
+ # cast back to fp16 if needed
1325
+ if needs_upcasting:
1326
+ self.vae.to(dtype=torch.float16)
1327
+
1328
+ image = self.image_processor.postprocess(image, output_type=output_type)
1329
+ if show_image:
1330
+ plt.figure(figsize=(10, 10))
1331
+ plt.imshow(image[0])
1332
+ plt.axis('off') # Turn off axis numbers and ticks
1333
+ plt.show()
1334
+ output_images.append(image[0])
1335
+
1336
+ ####################################################### Phase Upscaling #####################################################
1337
+ if image_lr == None:
1338
+ starting_scale = 2
1339
+ else:
1340
+ starting_scale = 1
1341
+ for current_scale_num in range(starting_scale, scale_num + 1):
1342
+ if self.lowvram:
1343
+ latents = latents.to(device)
1344
+ self.unet.to(device)
1345
+ torch.cuda.empty_cache()
1346
+ print("### Phase {} Denoising ###".format(current_scale_num))
1347
+ current_height = self.unet.config.sample_size * self.vae_scale_factor * current_scale_num
1348
+ current_width = self.unet.config.sample_size * self.vae_scale_factor * current_scale_num
1349
+ if height > width:
1350
+ current_width = int(current_width * aspect_ratio)
1351
+ else:
1352
+ current_height = int(current_height * aspect_ratio)
1353
+
1354
+ latents = F.interpolate(latents, size=(int(current_height / self.vae_scale_factor), int(current_width / self.vae_scale_factor)), mode='bicubic')
1355
+ condition_image = F.interpolate(condition_image, size=(current_height, current_width), mode='bicubic')
1356
+
1357
+ noise_latents = []
1358
+ noise = torch.randn_like(latents)
1359
+ for timestep in timesteps:
1360
+ noise_latent = self.scheduler.add_noise(latents, noise, timestep.unsqueeze(0))
1361
+ noise_latents.append(noise_latent)
1362
+ latents = noise_latents[0]
1363
+
1364
+ with self.progress_bar(total=num_inference_steps) as progress_bar:
1365
+ for i, t in enumerate(timesteps):
1366
+ count = torch.zeros_like(latents)
1367
+ value = torch.zeros_like(latents)
1368
+ cosine_factor = 0.5 * (1 + torch.cos(torch.pi * (self.scheduler.config.num_train_timesteps - t) / self.scheduler.config.num_train_timesteps)).cpu()
1369
+
1370
+ c1 = cosine_factor ** cosine_scale_1
1371
+ latents = latents * (1 - c1) + noise_latents[i] * c1
1372
+
1373
+ ############################################# MultiDiffusion #############################################
1374
+
1375
+ views = self.get_views(current_height, current_width, stride=stride, window_size=self.unet.config.sample_size, random_jitter=True)
1376
+ views_batch = [views[i : i + view_batch_size] for i in range(0, len(views), view_batch_size)]
1377
+
1378
+ jitter_range = (self.unet.config.sample_size - stride) // 4
1379
+ latents_ = F.pad(latents, (jitter_range, jitter_range, jitter_range, jitter_range), 'constant', 0)
1380
+ condition_image_ = F.pad(condition_image, (jitter_range * self.vae_scale_factor, jitter_range * self.vae_scale_factor, jitter_range * self.vae_scale_factor, jitter_range * self.vae_scale_factor), 'constant', 0)
1381
+
1382
+ count_local = torch.zeros_like(latents_)
1383
+ value_local = torch.zeros_like(latents_)
1384
+
1385
+ for j, batch_view in enumerate(views_batch):
1386
+ vb_size = len(batch_view)
1387
+
1388
+ # get the latents corresponding to the current view coordinates
1389
+ latents_for_view = torch.cat(
1390
+ [
1391
+ latents_[:, :, h_start:h_end, w_start:w_end]
1392
+ for h_start, h_end, w_start, w_end in batch_view
1393
+ ]
1394
+ )
1395
+ condition_image_for_view = torch.cat(
1396
+ [
1397
+ condition_image_[0:1, :, h_start * self.vae_scale_factor:h_end * self.vae_scale_factor, w_start * self.vae_scale_factor:w_end * self.vae_scale_factor]
1398
+ for h_start, h_end, w_start, w_end in batch_view
1399
+ ]
1400
+ )
1401
+
1402
+ # expand the latents if we are doing classifier free guidance
1403
+ latent_model_input = latents_for_view
1404
+ latent_model_input = (
1405
+ latent_model_input.repeat_interleave(2, dim=0)
1406
+ if do_classifier_free_guidance
1407
+ else latent_model_input
1408
+ )
1409
+ latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
1410
+
1411
+ condition_image_input = condition_image_for_view
1412
+ condition_image_input = (
1413
+ condition_image_input.repeat_interleave(2, dim=0)
1414
+ if do_classifier_free_guidance
1415
+ else condition_image_input
1416
+ )
1417
+
1418
+ prompt_embeds_input = torch.cat([prompt_embeds] * vb_size)
1419
+ add_text_embeds_input = torch.cat([add_text_embeds] * vb_size)
1420
+ add_time_ids_input = []
1421
+ for h_start, h_end, w_start, w_end in batch_view:
1422
+ add_time_ids_ = add_time_ids.clone()
1423
+ add_time_ids_[:, 2] = h_start * self.vae_scale_factor
1424
+ add_time_ids_[:, 3] = w_start * self.vae_scale_factor
1425
+ add_time_ids_input.append(add_time_ids_)
1426
+ add_time_ids_input = torch.cat(add_time_ids_input)
1427
+
1428
+ added_cond_kwargs = {"text_embeds": add_text_embeds_input, "time_ids": add_time_ids_input}
1429
+
1430
+ # controlnet(s) inference
1431
+ if guess_mode and do_classifier_free_guidance:
1432
+ # Infer ControlNet only for the conditional batch.
1433
+ control_model_input = latent_model_input
1434
+ control_model_input = self.scheduler.scale_model_input(control_model_input, t)
1435
+ controlnet_prompt_embeds = prompt_embeds_input.chunk(2)[1]
1436
+ controlnet_added_cond_kwargs = {
1437
+ "text_embeds": add_text_embeds_input.chunk(2)[1],
1438
+ "time_ids": add_time_ids_input.chunk(2)[1],
1439
+ }
1440
+ else:
1441
+ control_model_input = latent_model_input
1442
+ controlnet_prompt_embeds = prompt_embeds_input
1443
+ controlnet_added_cond_kwargs = added_cond_kwargs
1444
+
1445
+ if isinstance(controlnet_keep[i], list):
1446
+ cond_scale = [c * s for c, s in zip(controlnet_conditioning_scale, controlnet_keep[i])]
1447
+ else:
1448
+ controlnet_cond_scale = controlnet_conditioning_scale
1449
+ if isinstance(controlnet_cond_scale, list):
1450
+ controlnet_cond_scale = controlnet_cond_scale[0]
1451
+ cond_scale = controlnet_cond_scale * controlnet_keep[i]
1452
+
1453
+ down_block_res_samples, mid_block_res_sample = self.controlnet(
1454
+ control_model_input,
1455
+ t,
1456
+ encoder_hidden_states=controlnet_prompt_embeds,
1457
+ controlnet_cond=condition_image_input,
1458
+ conditioning_scale=cond_scale,
1459
+ guess_mode=guess_mode,
1460
+ added_cond_kwargs=controlnet_added_cond_kwargs,
1461
+ return_dict=False,
1462
+ )
1463
+
1464
+ if guess_mode and do_classifier_free_guidance:
1465
+ # Infered ControlNet only for the conditional batch.
1466
+ # To apply the output of ControlNet to both the unconditional and conditional batches,
1467
+ # add 0 to the unconditional batch to keep it unchanged.
1468
+ down_block_res_samples = [torch.cat([torch.zeros_like(d), d]) for d in down_block_res_samples]
1469
+ mid_block_res_sample = torch.cat([torch.zeros_like(mid_block_res_sample), mid_block_res_sample])
1470
+
1471
+ # predict the noise residual
1472
+ noise_pred = self.unet(
1473
+ latent_model_input,
1474
+ t,
1475
+ encoder_hidden_states=prompt_embeds_input,
1476
+ cross_attention_kwargs=cross_attention_kwargs,
1477
+ down_block_additional_residuals=down_block_res_samples,
1478
+ mid_block_additional_residual=mid_block_res_sample,
1479
+ added_cond_kwargs=added_cond_kwargs,
1480
+ return_dict=False,
1481
+ )[0]
1482
+
1483
+ if do_classifier_free_guidance:
1484
+ noise_pred_uncond, noise_pred_text = noise_pred[::2], noise_pred[1::2]
1485
+ noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) * 1
1486
+
1487
+ # compute the previous noisy sample x_t -> x_t-1
1488
+ self.scheduler._init_step_index(t)
1489
+ latents_denoised_batch = self.scheduler.step(
1490
+ noise_pred, t, latents_for_view, **extra_step_kwargs, return_dict=False)[0]
1491
+
1492
+ # extract value from batch
1493
+ for latents_view_denoised, (h_start, h_end, w_start, w_end) in zip(
1494
+ latents_denoised_batch.chunk(vb_size), batch_view
1495
+ ):
1496
+ value_local[:, :, h_start:h_end, w_start:w_end] += latents_view_denoised
1497
+ count_local[:, :, h_start:h_end, w_start:w_end] += 1
1498
+
1499
+ value_local = value_local[: ,:, jitter_range: jitter_range + current_height // self.vae_scale_factor, jitter_range: jitter_range + current_width // self.vae_scale_factor]
1500
+ count_local = count_local[: ,:, jitter_range: jitter_range + current_height // self.vae_scale_factor, jitter_range: jitter_range + current_width // self.vae_scale_factor]
1501
+
1502
+ c2 = cosine_factor ** cosine_scale_2
1503
+
1504
+ value += value_local / count_local * (1 - c2)
1505
+ count += torch.ones_like(value_local) * (1 - c2)
1506
+
1507
+ ############################################# Dilated Sampling #############################################
1508
+
1509
+ h_pad = (current_scale_num - (latents.size(2) % current_scale_num)) % current_scale_num
1510
+ w_pad = (current_scale_num - (latents.size(3) % current_scale_num)) % current_scale_num
1511
+ latents_ = F.pad(latents, (w_pad, 0, h_pad, 0), 'constant', 0)
1512
+
1513
+ count_global = torch.zeros_like(latents_)
1514
+ value_global = torch.zeros_like(latents_)
1515
+
1516
+ c3 = 0.99 * cosine_factor ** cosine_scale_3 + 1e-2
1517
+ std_, mean_ = latents_.std(), latents_.mean()
1518
+ latents_gaussian = gaussian_filter(latents_, kernel_size=(2*current_scale_num-1), sigma=sigma*c3)
1519
+ latents_gaussian = (latents_gaussian - latents_gaussian.mean()) / latents_gaussian.std() * std_ + mean_
1520
+
1521
+ latents_for_view = []
1522
+ for h in range(current_scale_num):
1523
+ for w in range(current_scale_num):
1524
+ latents_for_view.append(latents_[:, :, h::current_scale_num, w::current_scale_num])
1525
+ latents_for_view = torch.cat(latents_for_view)
1526
+
1527
+ latents_for_view_gaussian = []
1528
+ for h in range(current_scale_num):
1529
+ for w in range(current_scale_num):
1530
+ latents_for_view_gaussian.append(latents_gaussian[:, :, h::current_scale_num, w::current_scale_num])
1531
+ latents_for_view_gaussian = torch.cat(latents_for_view_gaussian)
1532
+
1533
+ condition_image_for_view = []
1534
+ for h in range(current_scale_num):
1535
+ for w in range(current_scale_num):
1536
+ condition_image_ = F.pad(condition_image, (w_pad * self.vae_scale_factor, w * self.vae_scale_factor, h_pad * self.vae_scale_factor, h * self.vae_scale_factor), 'constant', 0)
1537
+ condition_image_for_view.append(condition_image_[0:1, :, h * self.vae_scale_factor::current_scale_num, w * self.vae_scale_factor::current_scale_num])
1538
+ condition_image_for_view = torch.cat(condition_image_for_view)
1539
+
1540
+ vb_size = latents_for_view.size(0)
1541
+
1542
+ # expand the latents if we are doing classifier free guidance
1543
+ latent_model_input = latents_for_view_gaussian
1544
+ latent_model_input = (
1545
+ latent_model_input.repeat_interleave(2, dim=0)
1546
+ if do_classifier_free_guidance
1547
+ else latent_model_input
1548
+ )
1549
+ latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
1550
+
1551
+ condition_image_input = condition_image_for_view
1552
+ condition_image_input = (
1553
+ condition_image_input.repeat_interleave(2, dim=0)
1554
+ if do_classifier_free_guidance
1555
+ else condition_image_input
1556
+ )
1557
+
1558
+ prompt_embeds_input = torch.cat([prompt_embeds] * vb_size)
1559
+ add_text_embeds_input = torch.cat([add_text_embeds] * vb_size)
1560
+ add_time_ids_input = torch.cat([add_time_ids] * vb_size)
1561
+
1562
+ added_cond_kwargs = {"text_embeds": add_text_embeds_input, "time_ids": add_time_ids_input}
1563
+
1564
+ # controlnet(s) inference
1565
+ if guess_mode and do_classifier_free_guidance:
1566
+ # Infer ControlNet only for the conditional batch.
1567
+ control_model_input = latent_model_input
1568
+ control_model_input = self.scheduler.scale_model_input(control_model_input, t)
1569
+ controlnet_prompt_embeds = prompt_embeds_input.chunk(2)[1]
1570
+ controlnet_added_cond_kwargs = {
1571
+ "text_embeds": add_text_embeds_input.chunk(2)[1],
1572
+ "time_ids": add_time_ids_input.chunk(2)[1],
1573
+ }
1574
+ else:
1575
+ control_model_input = latent_model_input
1576
+ controlnet_prompt_embeds = prompt_embeds_input
1577
+ controlnet_added_cond_kwargs = added_cond_kwargs
1578
+
1579
+ if isinstance(controlnet_keep[i], list):
1580
+ cond_scale = [c * s for c, s in zip(controlnet_conditioning_scale, controlnet_keep[i])]
1581
+ else:
1582
+ controlnet_cond_scale = controlnet_conditioning_scale
1583
+ if isinstance(controlnet_cond_scale, list):
1584
+ controlnet_cond_scale = controlnet_cond_scale[0]
1585
+ cond_scale = controlnet_cond_scale * controlnet_keep[i]
1586
+
1587
+ down_block_res_samples, mid_block_res_sample = self.controlnet(
1588
+ control_model_input,
1589
+ t,
1590
+ encoder_hidden_states=controlnet_prompt_embeds,
1591
+ controlnet_cond=condition_image_input,
1592
+ conditioning_scale=cond_scale,
1593
+ guess_mode=guess_mode,
1594
+ added_cond_kwargs=controlnet_added_cond_kwargs,
1595
+ return_dict=False,
1596
+ )
1597
+
1598
+ if guess_mode and do_classifier_free_guidance:
1599
+ # Infered ControlNet only for the conditional batch.
1600
+ # To apply the output of ControlNet to both the unconditional and conditional batches,
1601
+ # add 0 to the unconditional batch to keep it unchanged.
1602
+ down_block_res_samples = [torch.cat([torch.zeros_like(d), d]) for d in down_block_res_samples]
1603
+ mid_block_res_sample = torch.cat([torch.zeros_like(mid_block_res_sample), mid_block_res_sample])
1604
+
1605
+ # predict the noise residual
1606
+ noise_pred = self.unet(
1607
+ latent_model_input,
1608
+ t,
1609
+ encoder_hidden_states=prompt_embeds_input,
1610
+ cross_attention_kwargs=cross_attention_kwargs,
1611
+ down_block_additional_residuals=down_block_res_samples,
1612
+ mid_block_additional_residual=mid_block_res_sample,
1613
+ added_cond_kwargs=added_cond_kwargs,
1614
+ return_dict=False,
1615
+ )[0]
1616
+
1617
+ if do_classifier_free_guidance:
1618
+ noise_pred_uncond, noise_pred_text = noise_pred[::2], noise_pred[1::2]
1619
+ noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
1620
+
1621
+ # extract value from batch
1622
+ for h in range(current_scale_num):
1623
+ for w in range(current_scale_num):
1624
+ noise_pred_ = noise_pred.chunk(vb_size)[h*current_scale_num+w]
1625
+ value_global[:, :, h::current_scale_num, w::current_scale_num] += noise_pred_
1626
+ count_global[:, :, h::current_scale_num, w::current_scale_num] += 1
1627
+
1628
+ # compute the previous noisy sample x_t -> x_t-1
1629
+ self.scheduler._init_step_index(t)
1630
+ value_global = self.scheduler.step(
1631
+ value_global, t, latents_, **extra_step_kwargs, return_dict=False)[0]
1632
+
1633
+ c2 = cosine_factor ** cosine_scale_2
1634
+
1635
+ value_global = value_global[: ,:, h_pad:, w_pad:]
1636
+
1637
+ value += value_global * c2
1638
+ count += torch.ones_like(value_global) * c2
1639
+
1640
+ ###########################################################
1641
+
1642
+ latents = torch.where(count > 0, value / count, value)
1643
+
1644
+ # call the callback, if provided
1645
+ if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
1646
+ progress_bar.update()
1647
+ if callback is not None and i % callback_steps == 0:
1648
+ step_idx = i // getattr(self.scheduler, "order", 1)
1649
+ callback(step_idx, t, latents)
1650
+
1651
+ #########################################################################################################################################
1652
+
1653
+ latents = (latents - latents.mean()) / latents.std() * anchor_std + anchor_mean
1654
+ if self.lowvram:
1655
+ latents = latents.cpu()
1656
+ torch.cuda.empty_cache()
1657
+ if not output_type == "latent":
1658
+ # make sure the VAE is in float32 mode, as it overflows in float16
1659
+ needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast
1660
+
1661
+ if self.lowvram:
1662
+ needs_upcasting = False # use madebyollin/sdxl-vae-fp16-fix in lowvram mode!
1663
+ self.unet.cpu()
1664
+ self.vae.to(device)
1665
+
1666
+ if needs_upcasting:
1667
+ self.upcast_vae()
1668
+ latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype)
1669
+
1670
+ print("### Phase {} Decoding ###".format(current_scale_num))
1671
+ if multi_decoder:
1672
+ image = self.tiled_decode(latents, current_height, current_width)
1673
+ else:
1674
+ image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0]
1675
+
1676
+ # cast back to fp16 if needed
1677
+ if needs_upcasting:
1678
+ self.vae.to(dtype=torch.float16)
1679
+ else:
1680
+ image = latents
1681
+
1682
+ if not output_type == "latent":
1683
+ image = self.image_processor.postprocess(image, output_type=output_type)
1684
+ if show_image:
1685
+ plt.figure(figsize=(10, 10))
1686
+ plt.imshow(image[0])
1687
+ plt.axis('off') # Turn off axis numbers and ticks
1688
+ plt.show()
1689
+ output_images.append(image[0])
1690
+
1691
+ # Offload all models
1692
+ self.maybe_free_model_hooks()
1693
+
1694
+ return output_images
1695
+
1696
+ # Overrride to properly handle the loading and unloading of the additional text encoder.
1697
+ def load_lora_weights(self, pretrained_model_name_or_path_or_dict: Union[str, Dict[str, torch.Tensor]], **kwargs):
1698
+ # We could have accessed the unet config from `lora_state_dict()` too. We pass
1699
+ # it here explicitly to be able to tell that it's coming from an SDXL
1700
+ # pipeline.
1701
+
1702
+ # Remove any existing hooks.
1703
+ if is_accelerate_available() and is_accelerate_version(">=", "0.17.0.dev0"):
1704
+ from accelerate.hooks import AlignDevicesHook, CpuOffload, remove_hook_from_module
1705
+ else:
1706
+ raise ImportError("Offloading requires `accelerate v0.17.0` or higher.")
1707
+
1708
+ is_model_cpu_offload = False
1709
+ is_sequential_cpu_offload = False
1710
+ recursive = False
1711
+ for _, component in self.components.items():
1712
+ if isinstance(component, torch.nn.Module):
1713
+ if hasattr(component, "_hf_hook"):
1714
+ is_model_cpu_offload = isinstance(getattr(component, "_hf_hook"), CpuOffload)
1715
+ is_sequential_cpu_offload = isinstance(getattr(component, "_hf_hook"), AlignDevicesHook)
1716
+ logger.info(
1717
+ "Accelerate hooks detected. Since you have called `load_lora_weights()`, the previous hooks will be first removed. Then the LoRA parameters will be loaded and the hooks will be applied again."
1718
+ )
1719
+ recursive = is_sequential_cpu_offload
1720
+ remove_hook_from_module(component, recurse=recursive)
1721
+ state_dict, network_alphas = self.lora_state_dict(
1722
+ pretrained_model_name_or_path_or_dict,
1723
+ unet_config=self.unet.config,
1724
+ **kwargs,
1725
+ )
1726
+ self.load_lora_into_unet(state_dict, network_alphas=network_alphas, unet=self.unet)
1727
+
1728
+ text_encoder_state_dict = {k: v for k, v in state_dict.items() if "text_encoder." in k}
1729
+ if len(text_encoder_state_dict) > 0:
1730
+ self.load_lora_into_text_encoder(
1731
+ text_encoder_state_dict,
1732
+ network_alphas=network_alphas,
1733
+ text_encoder=self.text_encoder,
1734
+ prefix="text_encoder",
1735
+ lora_scale=self.lora_scale,
1736
+ )
1737
+
1738
+ text_encoder_2_state_dict = {k: v for k, v in state_dict.items() if "text_encoder_2." in k}
1739
+ if len(text_encoder_2_state_dict) > 0:
1740
+ self.load_lora_into_text_encoder(
1741
+ text_encoder_2_state_dict,
1742
+ network_alphas=network_alphas,
1743
+ text_encoder=self.text_encoder_2,
1744
+ prefix="text_encoder_2",
1745
+ lora_scale=self.lora_scale,
1746
+ )
1747
+
1748
+ # Offload back.
1749
+ if is_model_cpu_offload:
1750
+ self.enable_model_cpu_offload()
1751
+ elif is_sequential_cpu_offload:
1752
+ self.enable_sequential_cpu_offload()
1753
+
1754
+ @classmethod
1755
+ def save_lora_weights(
1756
+ self,
1757
+ save_directory: Union[str, os.PathLike],
1758
+ unet_lora_layers: Dict[str, Union[torch.nn.Module, torch.Tensor]] = None,
1759
+ text_encoder_lora_layers: Dict[str, Union[torch.nn.Module, torch.Tensor]] = None,
1760
+ text_encoder_2_lora_layers: Dict[str, Union[torch.nn.Module, torch.Tensor]] = None,
1761
+ is_main_process: bool = True,
1762
+ weight_name: str = None,
1763
+ save_function: Callable = None,
1764
+ safe_serialization: bool = True,
1765
+ ):
1766
+ state_dict = {}
1767
+
1768
+ def pack_weights(layers, prefix):
1769
+ layers_weights = layers.state_dict() if isinstance(layers, torch.nn.Module) else layers
1770
+ layers_state_dict = {f"{prefix}.{module_name}": param for module_name, param in layers_weights.items()}
1771
+ return layers_state_dict
1772
+
1773
+ if not (unet_lora_layers or text_encoder_lora_layers or text_encoder_2_lora_layers):
1774
+ raise ValueError(
1775
+ "You must pass at least one of `unet_lora_layers`, `text_encoder_lora_layers` or `text_encoder_2_lora_layers`."
1776
+ )
1777
+
1778
+ if unet_lora_layers:
1779
+ state_dict.update(pack_weights(unet_lora_layers, "unet"))
1780
+
1781
+ if text_encoder_lora_layers and text_encoder_2_lora_layers:
1782
+ state_dict.update(pack_weights(text_encoder_lora_layers, "text_encoder"))
1783
+ state_dict.update(pack_weights(text_encoder_2_lora_layers, "text_encoder_2"))
1784
+
1785
+ self.write_lora_layers(
1786
+ state_dict=state_dict,
1787
+ save_directory=save_directory,
1788
+ is_main_process=is_main_process,
1789
+ weight_name=weight_name,
1790
+ save_function=save_function,
1791
+ safe_serialization=safe_serialization,
1792
+ )
1793
+
1794
+ def _remove_text_encoder_monkey_patch(self):
1795
+ self._remove_text_encoder_monkey_patch_classmethod(self.text_encoder)
1796
+ self._remove_text_encoder_monkey_patch_classmethod(self.text_encoder_2)
competitors_inference_code/DemoFusion/requirements.txt ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ diffusers~=0.21.4
2
+ torch~=2.1.0
3
+ scipy~=1.11.3
4
+ omegaconf~=2.3.0
5
+ accelerate~=0.23.0
6
+ transformers~=4.34.0
7
+ tqdm
8
+ einops
9
+ matplotlib
10
+ gradio
11
+ gradio_imageslider
competitors_inference_code/LSRNA/README.md ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # LSRNA
2
+ [![Project Page](https://img.shields.io/badge/Project-Page-green)](https://3587jjh.github.io/LSRNA/)
3
+ [![arXiv](https://img.shields.io/badge/arXiv-2503.18446-b31b1b)](https://arxiv.org/abs/2503.18446)
4
+
5
+ Official code for "Latent Space Super-Resolution for Higher-Resolution Image Generation with Diffusion Models".
6
+
7
+ <img src="figures/teaser.jpg" alt="Teaser" width="80%" />
8
+
9
+ <div align="justify">
10
+ <b>Abstract</b>: In this paper, we propose LSRNA, a novel framework for higher-resolution (exceeding 1K) image generation using diffusion models by leveraging super-resolution directly in the latent space. Existing diffusion models struggle with scaling beyond their training resolutions, often leading to structural distortions or content repetition. Reference-based methods address the issues by upsampling a low-resolution reference to guide higher-resolution generation. However, they face significant challenges: upsampling in latent space often causes manifold deviation, which degrades output quality. On the other hand, upsampling in RGB space tends to produce overly smoothed outputs. To overcome these limitations, LSRNA combines Latent space Super-Resolution (LSR) for manifold alignment and Region-wise Noise Addition (RNA) to enhance high-frequency details. Our extensive experiments demonstrate that integrating LSRNA outperforms state-of-the-art reference-based methods across various resolutions and metrics, while showing the critical role of latent space upsampling in preserving detail and sharpness.
11
+ </div>
12
+
13
+ ## Environment (Inference)
14
+ ```
15
+ conda create -n lsrna python=3.10
16
+ conda activate lsrna
17
+ pip install -r requirements.txt
18
+ ```
19
+
20
+ ## Text-to-Image Generation
21
+ > **Note:**
22
+ > Although our LSRNA framework is designed to be compatible with any reference-based method,
23
+ > this repo provides example code for LSRNA-DemoFusion, as DemoFusion is a pioneering reference-based approach.
24
+ ```
25
+ CUDA_VISIBLE_DEVICES=0 python main.py \
26
+ --prompt "A well-worn baseball glove and ball sitting on fresh-cut grass." \
27
+ --negative_prompt "blurry, ugly, duplicate, poorly drawn, deformed, mosaic" \
28
+ --height 2048 \
29
+ --width 2048 \
30
+ --seed 0 \
31
+ --lsr_path "lsr/swinir-liif-latent-sdxl.pth" \
32
+ --rna_min_std 0.0 \
33
+ --rna_max_std 1.2 \
34
+ --inversion_depth 30 \
35
+ --save_dir "results" \
36
+ #--low_vram
37
+ ```
38
+ Feel free to adjust the RNA hyperparameters (e.g., --rna_max_std) to adjust the level of detail in the generated images.
39
+ If you’re running out of VRAM, enable the low-VRAM mode with `--low_vram`.
40
+ We also provide a `run.sh` script for the generation.
41
+
42
+ ## Visual Comparison
43
+ <img src="figures/comparison.jpg" alt="Comparison" width="80%" />
44
+
45
+ Additional results can be found on the [project page](https://3587jjh.github.io/LSRNA/).
46
+
47
+ ## Citation
48
+ ```
49
+ @inproceedings{jeong2025latent,
50
+ title={Latent space super-resolution for higher-resolution image generation with diffusion models},
51
+ author={Jeong, Jinho and Han, Sangmin and Kim, Jinwoo and Kim, Seon Joo},
52
+ booktitle={Proceedings of the Computer Vision and Pattern Recognition Conference},
53
+ pages={2355--2365},
54
+ year={2025}
55
+ }
56
+ ```
57
+
58
+ ## Acknowledgement
59
+ This repo is based on [DemoFusion](https://github.com/PRIS-CV/DemoFusion) and [LIIF](https://github.com/yinboc/liif).
competitors_inference_code/LSRNA/__pycache__/pipeline_lsrna_demofusion_sdxl.cpython-312.pyc ADDED
Binary file (64.9 kB). View file
 
competitors_inference_code/LSRNA/__pycache__/utils.cpython-312.pyc ADDED
Binary file (2.74 kB). View file
 
competitors_inference_code/LSRNA/generate_lsrna_images.py ADDED
@@ -0,0 +1,189 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """Generate SDXL images for the selected validation prompts with LSRNA."""
3
+
4
+ from __future__ import annotations
5
+
6
+ import csv
7
+ import json
8
+ import sys
9
+ import time
10
+ from collections.abc import Sequence
11
+ from pathlib import Path
12
+ from typing import Any
13
+
14
+ import torch
15
+ from diffusers import DDIMScheduler
16
+
17
+ ROOT_DIR = Path(__file__).resolve().parent
18
+ LSRNA_DIR = ROOT_DIR / "LSRNA"
19
+ if str(LSRNA_DIR) not in sys.path:
20
+ sys.path.insert(0, str(LSRNA_DIR))
21
+
22
+ from pipeline_lsrna_demofusion_sdxl import DemoFusionLSRNASDXLPipeline # noqa: E402
23
+
24
+ NEGATIVE_PROMPT = "blurry, ugly, duplicate, poorly drawn face, deformed, mosaic, artifacts, bad limbs"
25
+ DEFAULT_CSV = "/data/kazanplova/latent_vae_upscale_train/datasets/new_validation_dataset/original_openim/images/selected_validation_images.csv"
26
+ DEFAULT_OUTPUT_DIR = "/data/kazanplova/latent_vae_upscale_train/datasets/new_validation_dataset/lsrna/images"
27
+ STATISTICS_PATH = "/data/kazanplova/latent_vae_upscale_train/datasets/new_validation_dataset/lsrna/statistics.json"
28
+ PRETRAINED_MODEL = "stabilityai/stable-diffusion-xl-base-1.0"
29
+ CFG_SCALE = 7.5
30
+ NUM_INFERENCE_STEPS = 50
31
+ SEED = 42
32
+ VIEW_BATCH_SIZE = 8
33
+ STRIDE_RATIO = 0.5
34
+ COSINE_SCALE_1 = 3.0
35
+ COSINE_SCALE_2 = 1.0
36
+ COSINE_SCALE_3 = 1.0
37
+ SIGMA = 0.8
38
+ RNA_MIN_STD = 0.0
39
+ RNA_MAX_STD = 1.2
40
+ INVERSION_DEPTH = 30
41
+ LOW_VRAM = False
42
+ DEFAULT_LSR_PATH = Path("lsr") / "swinir-liif-latent-sdxl.pth"
43
+ RESOLUTIONS: dict[str, tuple[int, int]] = {
44
+ "4096px": (4096, 4096),
45
+ "2048px": (2048, 2048),
46
+ "1024px": (1024, 1024),
47
+ }
48
+
49
+
50
+ def load_prompts(csv_path: Path) -> list[tuple[str, str]]:
51
+ prompts: list[tuple[str, str]] = []
52
+ with csv_path.open("r", encoding="utf-8") as handle:
53
+ reader = csv.DictReader(handle)
54
+ for row in reader:
55
+ caption_raw = (row.get("gpt_caption") or "").strip()
56
+ if not caption_raw:
57
+ continue
58
+ try:
59
+ caption = json.loads(caption_raw)
60
+ except json.JSONDecodeError:
61
+ print(f"Skipping row with invalid JSON: {row.get('img_path')}")
62
+ continue
63
+ prompt = caption.get("sdxl")
64
+ if not prompt:
65
+ print(f"Skipping row without 'sdxl' prompt: {row.get('img_path')}")
66
+ continue
67
+ prompts.append((row.get("img_path", ""), prompt))
68
+ return prompts
69
+
70
+
71
+ def build_pipeline() -> DemoFusionLSRNASDXLPipeline:
72
+ if not torch.cuda.is_available():
73
+ raise RuntimeError("CUDA is required to run this script.")
74
+
75
+ scheduler = DDIMScheduler.from_pretrained(PRETRAINED_MODEL, subfolder="scheduler")
76
+ pipe = DemoFusionLSRNASDXLPipeline.from_pretrained(
77
+ PRETRAINED_MODEL,
78
+ scheduler=scheduler,
79
+ torch_dtype=torch.float16,
80
+ ).to("cuda")
81
+ pipe.vae.enable_tiling()
82
+ pipe.set_progress_bar_config(disable=True)
83
+ return pipe
84
+
85
+
86
+ def get_target_image(result: Any) -> Any:
87
+ if hasattr(result, "images"):
88
+ images = result.images
89
+ elif isinstance(result, Sequence) and not isinstance(result, (str, bytes, bytearray)):
90
+ images = list(result)
91
+ else:
92
+ images = [result]
93
+ if not images:
94
+ raise RuntimeError("LSRNA pipeline returned no images.")
95
+ return images[-1]
96
+
97
+
98
+ def main() -> None:
99
+ csv_path = Path(DEFAULT_CSV)
100
+ output_dir = Path(DEFAULT_OUTPUT_DIR)
101
+ lsr_path = DEFAULT_LSR_PATH
102
+ if not lsr_path.exists():
103
+ raise SystemExit(f"LSR checkpoint not found at {lsr_path}")
104
+
105
+ prompts = load_prompts(csv_path)
106
+ if not prompts:
107
+ raise SystemExit("No prompts were found in the CSV file.")
108
+
109
+ resolution_dirs = {name: output_dir / name for name in RESOLUTIONS}
110
+ for folder in resolution_dirs.values():
111
+ folder.mkdir(parents=True, exist_ok=True)
112
+
113
+ statistics_path = Path(STATISTICS_PATH)
114
+ stats_tracker = {
115
+ name: {"count": 0, "total_time": 0.0, "max_vram_bytes": 0}
116
+ for name in RESOLUTIONS
117
+ }
118
+
119
+ generator = torch.Generator(device="cuda").manual_seed(SEED)
120
+ pipe = build_pipeline()
121
+ device = torch.device("cuda")
122
+
123
+ for idx, (img_path, prompt) in enumerate(prompts):
124
+ filename = f"{idx}.png"
125
+ written_paths: list[str] = []
126
+
127
+ for name, (width, height) in RESOLUTIONS.items():
128
+ print(prompt)
129
+ torch.cuda.synchronize(device)
130
+ torch.cuda.reset_peak_memory_stats(device)
131
+ start_time = time.perf_counter()
132
+
133
+ result = pipe(
134
+ prompt,
135
+ negative_prompt=NEGATIVE_PROMPT,
136
+ guidance_scale=CFG_SCALE,
137
+ num_inference_steps=NUM_INFERENCE_STEPS,
138
+ width=width,
139
+ height=height,
140
+ generator=generator,
141
+ view_batch_size=VIEW_BATCH_SIZE,
142
+ stride_ratio=STRIDE_RATIO,
143
+ lsr_path=str(lsr_path),
144
+ cosine_scale_1=COSINE_SCALE_1,
145
+ cosine_scale_2=COSINE_SCALE_2,
146
+ cosine_scale_3=COSINE_SCALE_3,
147
+ sigma=SIGMA,
148
+ rna_min_std=RNA_MIN_STD,
149
+ rna_max_std=RNA_MAX_STD,
150
+ inversion_depth=INVERSION_DEPTH,
151
+ low_vram=LOW_VRAM,
152
+ )
153
+
154
+ image = get_target_image(result)
155
+
156
+ torch.cuda.synchronize(device)
157
+ elapsed = time.perf_counter() - start_time
158
+ vram_bytes = torch.cuda.max_memory_allocated(device)
159
+
160
+ stats = stats_tracker[name]
161
+ stats["count"] += 1
162
+ stats["total_time"] += elapsed
163
+ stats["max_vram_bytes"] = max(stats["max_vram_bytes"], vram_bytes)
164
+
165
+ output_path = resolution_dirs[name] / filename
166
+ image.save(output_path)
167
+ written_paths.append(str(output_path))
168
+
169
+ print(f"[{idx + 1}/{len(prompts)}] wrote {', '.join(written_paths)}")
170
+
171
+ statistics = {
172
+ "total_prompts": len(prompts),
173
+ "resolutions": {
174
+ name: {
175
+ "images": metrics["count"],
176
+ "mean_time_sec": (metrics["total_time"] / metrics["count"]) if metrics["count"] else 0.0,
177
+ "max_vram_mb": metrics["max_vram_bytes"] / (1024**2),
178
+ }
179
+ for name, metrics in stats_tracker.items()
180
+ },
181
+ }
182
+
183
+ statistics_path.parent.mkdir(parents=True, exist_ok=True)
184
+ statistics_path.write_text(json.dumps(statistics, indent=2))
185
+ print(f"Saved statistics to {statistics_path}")
186
+
187
+
188
+ if __name__ == "__main__":
189
+ main()
competitors_inference_code/LSRNA/lsr/__init__.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ from . import models
2
+ from . import swinir
3
+ from . import liif, mlp
competitors_inference_code/LSRNA/lsr/__pycache__/liif.cpython-312.pyc ADDED
Binary file (7.48 kB). View file
 
competitors_inference_code/LSRNA/lsr/__pycache__/mlp.cpython-312.pyc ADDED
Binary file (1.58 kB). View file
 
competitors_inference_code/LSRNA/lsr/__pycache__/models.cpython-312.pyc ADDED
Binary file (1 kB). View file
 
competitors_inference_code/LSRNA/lsr/__pycache__/swinir.cpython-312.pyc ADDED
Binary file (42.1 kB). View file
 
competitors_inference_code/LSRNA/lsr/liif.py ADDED
@@ -0,0 +1,127 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ import torch.nn.functional as F
4
+
5
+ from .models import register
6
+ from . models import make as make_model
7
+
8
+ def make_coord(shape, ranges=None, flatten=True, device='cpu'):
9
+ # Make coordinates at grid centers.
10
+ coord_seqs = []
11
+ for i, n in enumerate(shape):
12
+ if ranges is None:
13
+ v0, v1 = -1, 1
14
+ else:
15
+ v0, v1 = ranges[i]
16
+ r = (v1 - v0) / (2 * n)
17
+ seq = v0 + r + (2 * r) * torch.arange(n, device=device).float()
18
+ coord_seqs.append(seq)
19
+ ret = torch.stack(torch.meshgrid(*coord_seqs), dim=-1)
20
+ if flatten:
21
+ ret = ret.view(-1, ret.shape[-1])
22
+ return ret
23
+
24
+ @register('liif')
25
+ class LIIF(nn.Module):
26
+
27
+ def __init__(self, encoder_spec, imnet_spec, feat_unfold=True, local_ensemble=True):
28
+ super().__init__()
29
+ self.local_ensemble = local_ensemble
30
+ self.feat_unfold = feat_unfold
31
+ self.encoder = make_model(encoder_spec)
32
+
33
+ imnet_in_dim = self.encoder.out_dim
34
+ if self.feat_unfold:
35
+ imnet_in_dim *= 9
36
+ imnet_in_dim += 4 # attach coord, cell
37
+ self.imnet = make_model(imnet_spec, args={'in_dim': imnet_in_dim})
38
+
39
+ def gen_feat(self, inp):
40
+ self.inp = inp
41
+ feat = self.encoder(inp)
42
+ if self.feat_unfold:
43
+ feat = F.unfold(feat, 3, padding=1).view(
44
+ feat.shape[0], feat.shape[1] * 9, feat.shape[2], feat.shape[3])
45
+ self.feat = feat
46
+ self.feat_coord = make_coord(feat.shape[-2:], flatten=False).cuda() \
47
+ .permute(2, 0, 1) \
48
+ .unsqueeze(0).expand(feat.shape[0], 2, *feat.shape[-2:])
49
+
50
+ def query_rgb(self, coord, cell):
51
+ # coord, cell: (b,h,w,c)
52
+ feat = self.feat
53
+ feat_coord = self.feat_coord
54
+ if self.local_ensemble:
55
+ vx_lst = [-1, 1]
56
+ vy_lst = [-1, 1]
57
+ eps_shift = 1e-6
58
+ else:
59
+ vx_lst, vy_lst, eps_shift = [0], [0], 0
60
+
61
+ # field radius (global: [-1, 1])
62
+ rx = 2 / feat.shape[-2] / 2
63
+ ry = 2 / feat.shape[-1] / 2
64
+
65
+ preds = []
66
+ areas = []
67
+ for vx in vx_lst:
68
+ for vy in vy_lst:
69
+ coord_ = coord.clone()
70
+ coord_[:, :, :, 0] += vx * rx + eps_shift
71
+ coord_[:, :, :, 1] += vy * ry + eps_shift
72
+ coord_.clamp_(-1 + 1e-6, 1 - 1e-6)
73
+
74
+ q_feat = F.grid_sample(feat, coord_.flip(-1),
75
+ mode='nearest', align_corners=False).permute(0, 2, 3, 1) # (b,h,w,c)
76
+ q_coord = F.grid_sample(feat_coord, coord_.flip(-1),
77
+ mode='nearest', align_corners=False).permute(0, 2, 3, 1)
78
+
79
+ rel_coord = coord - q_coord
80
+ rel_coord[:, :, :, 0] *= feat.shape[-2]
81
+ rel_coord[:, :, :, 1] *= feat.shape[-1]
82
+ inp = torch.cat([q_feat, rel_coord], dim=-1)
83
+
84
+ rel_cell = cell.clone()
85
+ rel_cell[:, :, :, 0] *= feat.shape[-2]
86
+ rel_cell[:, :, :, 1] *= feat.shape[-1]
87
+ inp = torch.cat([inp, rel_cell], dim=-1) # (b,h,w,c)
88
+
89
+ pred = self.imnet(inp.contiguous())
90
+ preds.append(pred)
91
+
92
+ area = torch.abs(rel_coord[:, :, :, 0] * rel_coord[:, :, :, 1]) # (b,h,w)
93
+ areas.append(area + 1e-9)
94
+
95
+ tot_area = torch.stack(areas).sum(dim=0) # (b,h,w)
96
+ if self.local_ensemble:
97
+ t = areas[0]; areas[0] = areas[3]; areas[3] = t
98
+ t = areas[1]; areas[1] = areas[2]; areas[2] = t
99
+ ret = 0
100
+ for pred, area in zip(preds, areas):
101
+ ret = ret + pred * (area / tot_area).unsqueeze(-1)
102
+ ret = ret.permute(0,3,1,2)
103
+ if ret.shape[1] != self.inp.shape[1]:
104
+ ret[:,:-1,:,:] += F.grid_sample(self.inp, coord.flip(-1), mode='bicubic',\
105
+ padding_mode='border', align_corners=False)
106
+ else:
107
+ ret += F.grid_sample(self.inp, coord.flip(-1), mode='bicubic',\
108
+ padding_mode='border', align_corners=False)
109
+ return ret
110
+
111
+ def forward(self, inp, coord, cell):
112
+ self.gen_feat(inp)
113
+ #return self.query_rgb(coord, cell)
114
+ H,W = coord.shape[1:3]
115
+ n = H*W
116
+ coord = coord.view(1,1,n,2)
117
+ cell = cell.view(1,1,n,2)
118
+
119
+ ql = 0
120
+ preds = None
121
+ while ql < n:
122
+ qr = min(ql + 512*512, n)
123
+ pred = self.query_rgb(coord[:,:,ql:qr,:], cell[:,:,ql:qr,:])
124
+ preds = pred if preds is None else torch.cat([preds, pred], dim=-1)
125
+ ql = qr
126
+ preds = preds.view(1,-1,H,W)
127
+ return preds
competitors_inference_code/LSRNA/lsr/mlp.py ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch.nn as nn
2
+
3
+ from .models import register
4
+
5
+
6
+ @register('mlp')
7
+ class MLP(nn.Module):
8
+
9
+ def __init__(self, in_dim, out_dim, hidden_list):
10
+ super().__init__()
11
+ layers = []
12
+ lastv = in_dim
13
+ for hidden in hidden_list:
14
+ layers.append(nn.Linear(lastv, hidden))
15
+ layers.append(nn.ReLU())
16
+ lastv = hidden
17
+ layers.append(nn.Linear(lastv, out_dim))
18
+ self.layers = nn.Sequential(*layers)
19
+
20
+ def forward(self, x):
21
+ shape = x.shape[:-1]
22
+ x = self.layers(x.view(-1, x.shape[-1]))
23
+ return x.view(*shape, -1)
competitors_inference_code/LSRNA/lsr/models.py ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import copy
2
+
3
+
4
+ models = {}
5
+
6
+
7
+ def register(name):
8
+ def decorator(cls):
9
+ models[name] = cls
10
+ return cls
11
+ return decorator
12
+
13
+
14
+ def make(model_spec, args=None, load_sd=False):
15
+ if args is not None:
16
+ model_args = copy.deepcopy(model_spec['args'])
17
+ model_args.update(args)
18
+ else:
19
+ model_args = model_spec['args']
20
+ model = models[model_spec['name']](**model_args)
21
+ if load_sd:
22
+ model.load_state_dict(model_spec['sd'])
23
+ return model
competitors_inference_code/LSRNA/lsr/swinir-liif-latent-sdxl.yaml ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model:
2
+ name: liif
3
+ args:
4
+ feat_unfold: true
5
+ local_ensemble: true
6
+ encoder_spec:
7
+ name: swinir
8
+ args:
9
+ img_size: 32 # inp_size
10
+ in_chans: 4
11
+ embed_dim: 60
12
+ depths: [6,6,6,6]
13
+ num_heads: [6,6,6,6]
14
+ window_size: 8
15
+ upsampler: none
16
+ imnet_spec:
17
+ name: mlp
18
+ args:
19
+ out_dim: 4
20
+ hidden_list: [256,256,256,256]
competitors_inference_code/LSRNA/lsr/swinir.py ADDED
@@ -0,0 +1,777 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -----------------------------------------------------------------------------------
2
+ # SwinIR: Image Restoration Using Swin Transformer, https://arxiv.org/abs/2108.10257
3
+ # Originally Written by Ze Liu, Modified by Jingyun Liang.
4
+ # ----------------------------------------------------------------------------------
5
+
6
+ import math
7
+ import torch
8
+ import torch.nn as nn
9
+ import torch.nn.functional as F
10
+ import torch.utils.checkpoint as checkpoint
11
+ from timm.models.layers import DropPath, to_2tuple, trunc_normal_
12
+
13
+ from argparse import Namespace
14
+
15
+ from .models import register
16
+
17
+ class Mlp(nn.Module):
18
+ def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
19
+ super().__init__()
20
+ out_features = out_features or in_features
21
+ hidden_features = hidden_features or in_features
22
+ self.fc1 = nn.Linear(in_features, hidden_features)
23
+ self.act = act_layer()
24
+ self.fc2 = nn.Linear(hidden_features, out_features)
25
+ self.drop = nn.Dropout(drop)
26
+
27
+ def forward(self, x):
28
+ x = self.fc1(x)
29
+ x = self.act(x)
30
+ x = self.drop(x)
31
+ x = self.fc2(x)
32
+ x = self.drop(x)
33
+ return x
34
+
35
+
36
+ def window_partition(x, window_size):
37
+ """
38
+ Args:
39
+ x: (B, H, W, C)
40
+ window_size (int): window size
41
+
42
+ Returns:
43
+ windows: (num_windows*B, window_size, window_size, C)
44
+ """
45
+ B, H, W, C = x.shape
46
+ x = x.view(B, H // window_size, window_size, W // window_size, window_size, C)
47
+ windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C)
48
+ return windows
49
+
50
+
51
+ def window_reverse(windows, window_size, H, W):
52
+ """
53
+ Args:
54
+ windows: (num_windows*B, window_size, window_size, C)
55
+ window_size (int): Window size
56
+ H (int): Height of image
57
+ W (int): Width of image
58
+
59
+ Returns:
60
+ x: (B, H, W, C)
61
+ """
62
+ B = int(windows.shape[0] / (H * W / window_size / window_size))
63
+ x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1)
64
+ x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1)
65
+ return x
66
+
67
+
68
+ class WindowAttention(nn.Module):
69
+ r""" Window based multi-head self attention (W-MSA) module with relative position bias.
70
+ It supports both of shifted and non-shifted window.
71
+
72
+ Args:
73
+ dim (int): Number of input channels.
74
+ window_size (tuple[int]): The height and width of the window.
75
+ num_heads (int): Number of attention heads.
76
+ qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
77
+ qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set
78
+ attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0
79
+ proj_drop (float, optional): Dropout ratio of output. Default: 0.0
80
+ """
81
+
82
+ def __init__(self, dim, window_size, num_heads, qkv_bias=True, qk_scale=None, attn_drop=0., proj_drop=0.):
83
+
84
+ super().__init__()
85
+ self.dim = dim
86
+ self.window_size = window_size # Wh, Ww
87
+ self.num_heads = num_heads
88
+ head_dim = dim // num_heads
89
+ self.scale = qk_scale or head_dim ** -0.5
90
+
91
+ # define a parameter table of relative position bias
92
+ self.relative_position_bias_table = nn.Parameter(
93
+ torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1), num_heads)) # 2*Wh-1 * 2*Ww-1, nH
94
+
95
+ # get pair-wise relative position index for each token inside the window
96
+ coords_h = torch.arange(self.window_size[0])
97
+ coords_w = torch.arange(self.window_size[1])
98
+ coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww
99
+ coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww
100
+ relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww
101
+ relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2
102
+ relative_coords[:, :, 0] += self.window_size[0] - 1 # shift to start from 0
103
+ relative_coords[:, :, 1] += self.window_size[1] - 1
104
+ relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1
105
+ relative_position_index = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
106
+ self.register_buffer("relative_position_index", relative_position_index)
107
+
108
+ self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
109
+ self.attn_drop = nn.Dropout(attn_drop)
110
+ self.proj = nn.Linear(dim, dim)
111
+
112
+ self.proj_drop = nn.Dropout(proj_drop)
113
+
114
+ trunc_normal_(self.relative_position_bias_table, std=.02)
115
+ self.softmax = nn.Softmax(dim=-1)
116
+
117
+ def forward(self, x, mask=None):
118
+ """
119
+ Args:
120
+ x: input features with shape of (num_windows*B, N, C)
121
+ mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None
122
+ """
123
+ B_, N, C = x.shape
124
+ qkv = self.qkv(x).reshape(B_, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
125
+ q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple)
126
+
127
+ q = q * self.scale
128
+ attn = (q @ k.transpose(-2, -1))
129
+
130
+ relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)].view(
131
+ self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1) # Wh*Ww,Wh*Ww,nH
132
+ relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww
133
+ attn = attn + relative_position_bias.unsqueeze(0)
134
+
135
+ if mask is not None:
136
+ nW = mask.shape[0]
137
+ attn = attn.view(B_ // nW, nW, self.num_heads, N, N) + mask.unsqueeze(1).unsqueeze(0)
138
+ attn = attn.view(-1, self.num_heads, N, N)
139
+ attn = self.softmax(attn)
140
+ else:
141
+ attn = self.softmax(attn)
142
+
143
+ attn = self.attn_drop(attn)
144
+
145
+ x = (attn @ v).transpose(1, 2).reshape(B_, N, C)
146
+ x = self.proj(x)
147
+ x = self.proj_drop(x)
148
+ return x
149
+
150
+ def extra_repr(self) -> str:
151
+ return f'dim={self.dim}, window_size={self.window_size}, num_heads={self.num_heads}'
152
+
153
+
154
+ class SwinTransformerBlock(nn.Module):
155
+ r""" Swin Transformer Block.
156
+
157
+ Args:
158
+ dim (int): Number of input channels.
159
+ input_resolution (tuple[int]): Input resulotion.
160
+ num_heads (int): Number of attention heads.
161
+ window_size (int): Window size.
162
+ shift_size (int): Shift size for SW-MSA.
163
+ mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
164
+ qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
165
+ qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
166
+ drop (float, optional): Dropout rate. Default: 0.0
167
+ attn_drop (float, optional): Attention dropout rate. Default: 0.0
168
+ drop_path (float, optional): Stochastic depth rate. Default: 0.0
169
+ act_layer (nn.Module, optional): Activation layer. Default: nn.GELU
170
+ norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
171
+ """
172
+
173
+ def __init__(self, dim, input_resolution, num_heads, window_size=7, shift_size=0,
174
+ mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0., drop_path=0.,
175
+ act_layer=nn.GELU, norm_layer=nn.LayerNorm):
176
+ super().__init__()
177
+ self.dim = dim
178
+ self.input_resolution = input_resolution
179
+ self.num_heads = num_heads
180
+ self.window_size = window_size
181
+ self.shift_size = shift_size
182
+ self.mlp_ratio = mlp_ratio
183
+ if min(self.input_resolution) <= self.window_size:
184
+ # if window size is larger than input resolution, we don't partition windows
185
+ self.shift_size = 0
186
+ self.window_size = min(self.input_resolution)
187
+ assert 0 <= self.shift_size < self.window_size, "shift_size must in 0-window_size"
188
+
189
+ self.norm1 = norm_layer(dim)
190
+ self.attn = WindowAttention(
191
+ dim, window_size=to_2tuple(self.window_size), num_heads=num_heads,
192
+ qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)
193
+
194
+ self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
195
+ self.norm2 = norm_layer(dim)
196
+ mlp_hidden_dim = int(dim * mlp_ratio)
197
+ self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
198
+
199
+ if self.shift_size > 0:
200
+ attn_mask = self.calculate_mask(self.input_resolution)
201
+ else:
202
+ attn_mask = None
203
+
204
+ self.register_buffer("attn_mask", attn_mask)
205
+
206
+ def calculate_mask(self, x_size):
207
+ # calculate attention mask for SW-MSA
208
+ H, W = x_size
209
+ img_mask = torch.zeros((1, H, W, 1)) # 1 H W 1
210
+ h_slices = (slice(0, -self.window_size),
211
+ slice(-self.window_size, -self.shift_size),
212
+ slice(-self.shift_size, None))
213
+ w_slices = (slice(0, -self.window_size),
214
+ slice(-self.window_size, -self.shift_size),
215
+ slice(-self.shift_size, None))
216
+ cnt = 0
217
+ for h in h_slices:
218
+ for w in w_slices:
219
+ img_mask[:, h, w, :] = cnt
220
+ cnt += 1
221
+
222
+ mask_windows = window_partition(img_mask, self.window_size) # nW, window_size, window_size, 1
223
+ mask_windows = mask_windows.view(-1, self.window_size * self.window_size)
224
+ attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)
225
+ attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0))
226
+
227
+ return attn_mask
228
+
229
+ def forward(self, x, x_size):
230
+ H, W = x_size
231
+ B, L, C = x.shape
232
+ # assert L == H * W, "input feature has wrong size"
233
+
234
+ shortcut = x
235
+ x = self.norm1(x)
236
+ x = x.view(B, H, W, C)
237
+
238
+ # cyclic shift
239
+ if self.shift_size > 0:
240
+ shifted_x = torch.roll(x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2))
241
+ else:
242
+ shifted_x = x
243
+
244
+ # partition windows
245
+ x_windows = window_partition(shifted_x, self.window_size) # nW*B, window_size, window_size, C
246
+ x_windows = x_windows.view(-1, self.window_size * self.window_size, C) # nW*B, window_size*window_size, C
247
+
248
+ # W-MSA/SW-MSA (to be compatible for testing on images whose shapes are the multiple of window size
249
+ if self.input_resolution == x_size:
250
+ attn_windows = self.attn(x_windows, mask=self.attn_mask) # nW*B, window_size*window_size, C
251
+ else:
252
+ attn_windows = self.attn(x_windows, mask=self.calculate_mask(x_size).to(x.device))
253
+
254
+ # merge windows
255
+ attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C)
256
+ shifted_x = window_reverse(attn_windows, self.window_size, H, W) # B H' W' C
257
+
258
+ # reverse cyclic shift
259
+ if self.shift_size > 0:
260
+ x = torch.roll(shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2))
261
+ else:
262
+ x = shifted_x
263
+ x = x.view(B, H * W, C)
264
+
265
+ # FFN
266
+ x = shortcut + self.drop_path(x)
267
+ x = x + self.drop_path(self.mlp(self.norm2(x)))
268
+
269
+ return x
270
+
271
+ def extra_repr(self) -> str:
272
+ return f"dim={self.dim}, input_resolution={self.input_resolution}, num_heads={self.num_heads}, " \
273
+ f"window_size={self.window_size}, shift_size={self.shift_size}, mlp_ratio={self.mlp_ratio}"
274
+
275
+
276
+ class PatchMerging(nn.Module):
277
+ r""" Patch Merging Layer.
278
+
279
+ Args:
280
+ input_resolution (tuple[int]): Resolution of input feature.
281
+ dim (int): Number of input channels.
282
+ norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
283
+ """
284
+
285
+ def __init__(self, input_resolution, dim, norm_layer=nn.LayerNorm):
286
+ super().__init__()
287
+ self.input_resolution = input_resolution
288
+ self.dim = dim
289
+ self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False)
290
+ self.norm = norm_layer(4 * dim)
291
+
292
+ def forward(self, x):
293
+ """
294
+ x: B, H*W, C
295
+ """
296
+ H, W = self.input_resolution
297
+ B, L, C = x.shape
298
+ assert L == H * W, "input feature has wrong size"
299
+ assert H % 2 == 0 and W % 2 == 0, f"x size ({H}*{W}) are not even."
300
+
301
+ x = x.view(B, H, W, C)
302
+
303
+ x0 = x[:, 0::2, 0::2, :] # B H/2 W/2 C
304
+ x1 = x[:, 1::2, 0::2, :] # B H/2 W/2 C
305
+ x2 = x[:, 0::2, 1::2, :] # B H/2 W/2 C
306
+ x3 = x[:, 1::2, 1::2, :] # B H/2 W/2 C
307
+ x = torch.cat([x0, x1, x2, x3], -1) # B H/2 W/2 4*C
308
+ x = x.view(B, -1, 4 * C) # B H/2*W/2 4*C
309
+
310
+ x = self.norm(x)
311
+ x = self.reduction(x)
312
+
313
+ return x
314
+
315
+ def extra_repr(self) -> str:
316
+ return f"input_resolution={self.input_resolution}, dim={self.dim}"
317
+
318
+
319
+ class BasicLayer(nn.Module):
320
+ """ A basic Swin Transformer layer for one stage.
321
+
322
+ Args:
323
+ dim (int): Number of input channels.
324
+ input_resolution (tuple[int]): Input resolution.
325
+ depth (int): Number of blocks.
326
+ num_heads (int): Number of attention heads.
327
+ window_size (int): Local window size.
328
+ mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
329
+ qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
330
+ qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
331
+ drop (float, optional): Dropout rate. Default: 0.0
332
+ attn_drop (float, optional): Attention dropout rate. Default: 0.0
333
+ drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0
334
+ norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
335
+ downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None
336
+ use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False.
337
+ """
338
+
339
+ def __init__(self, dim, input_resolution, depth, num_heads, window_size,
340
+ mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0.,
341
+ drop_path=0., norm_layer=nn.LayerNorm, downsample=None, use_checkpoint=False):
342
+
343
+ super().__init__()
344
+ self.dim = dim
345
+ self.input_resolution = input_resolution
346
+ self.depth = depth
347
+ self.use_checkpoint = use_checkpoint
348
+
349
+ # build blocks
350
+ self.blocks = nn.ModuleList([
351
+ SwinTransformerBlock(dim=dim, input_resolution=input_resolution,
352
+ num_heads=num_heads, window_size=window_size,
353
+ shift_size=0 if (i % 2 == 0) else window_size // 2,
354
+ mlp_ratio=mlp_ratio,
355
+ qkv_bias=qkv_bias, qk_scale=qk_scale,
356
+ drop=drop, attn_drop=attn_drop,
357
+ drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path,
358
+ norm_layer=norm_layer)
359
+ for i in range(depth)])
360
+
361
+ # patch merging layer
362
+ if downsample is not None:
363
+ self.downsample = downsample(input_resolution, dim=dim, norm_layer=norm_layer)
364
+ else:
365
+ self.downsample = None
366
+
367
+ def forward(self, x, x_size):
368
+ for blk in self.blocks:
369
+ if self.use_checkpoint:
370
+ x = checkpoint.checkpoint(blk, x, x_size)
371
+ else:
372
+ x = blk(x, x_size)
373
+ if self.downsample is not None:
374
+ x = self.downsample(x)
375
+ return x
376
+
377
+ def extra_repr(self) -> str:
378
+ return f"dim={self.dim}, input_resolution={self.input_resolution}, depth={self.depth}"
379
+
380
+
381
+ class RSTB(nn.Module):
382
+ """Residual Swin Transformer Block (RSTB).
383
+
384
+ Args:
385
+ dim (int): Number of input channels.
386
+ input_resolution (tuple[int]): Input resolution.
387
+ depth (int): Number of blocks.
388
+ num_heads (int): Number of attention heads.
389
+ window_size (int): Local window size.
390
+ mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
391
+ qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
392
+ qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
393
+ drop (float, optional): Dropout rate. Default: 0.0
394
+ attn_drop (float, optional): Attention dropout rate. Default: 0.0
395
+ drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0
396
+ norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
397
+ downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None
398
+ use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False.
399
+ img_size: Input image size.
400
+ patch_size: Patch size.
401
+ resi_connection: The convolutional block before residual connection.
402
+ """
403
+
404
+ def __init__(self, dim, input_resolution, depth, num_heads, window_size,
405
+ mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0.,
406
+ drop_path=0., norm_layer=nn.LayerNorm, downsample=None, use_checkpoint=False,
407
+ img_size=224, patch_size=4, resi_connection='1conv'):
408
+ super(RSTB, self).__init__()
409
+
410
+ self.dim = dim
411
+ self.input_resolution = input_resolution
412
+
413
+ self.residual_group = BasicLayer(dim=dim,
414
+ input_resolution=input_resolution,
415
+ depth=depth,
416
+ num_heads=num_heads,
417
+ window_size=window_size,
418
+ mlp_ratio=mlp_ratio,
419
+ qkv_bias=qkv_bias, qk_scale=qk_scale,
420
+ drop=drop, attn_drop=attn_drop,
421
+ drop_path=drop_path,
422
+ norm_layer=norm_layer,
423
+ downsample=downsample,
424
+ use_checkpoint=use_checkpoint)
425
+
426
+ if resi_connection == '1conv':
427
+ self.conv = nn.Conv2d(dim, dim, 3, 1, 1)
428
+ elif resi_connection == '3conv':
429
+ # to save parameters and memory
430
+ self.conv = nn.Sequential(nn.Conv2d(dim, dim // 4, 3, 1, 1), nn.LeakyReLU(negative_slope=0.2, inplace=True),
431
+ nn.Conv2d(dim // 4, dim // 4, 1, 1, 0),
432
+ nn.LeakyReLU(negative_slope=0.2, inplace=True),
433
+ nn.Conv2d(dim // 4, dim, 3, 1, 1))
434
+
435
+ self.patch_embed = PatchEmbed(
436
+ img_size=img_size, patch_size=patch_size, in_chans=0, embed_dim=dim,
437
+ norm_layer=None)
438
+
439
+ self.patch_unembed = PatchUnEmbed(
440
+ img_size=img_size, patch_size=patch_size, in_chans=0, embed_dim=dim,
441
+ norm_layer=None)
442
+
443
+ def forward(self, x, x_size):
444
+ return self.patch_embed(self.conv(self.patch_unembed(self.residual_group(x, x_size), x_size))) + x
445
+
446
+
447
+ class PatchEmbed(nn.Module):
448
+ r""" Image to Patch Embedding
449
+
450
+ Args:
451
+ img_size (int): Image size. Default: 224.
452
+ patch_size (int): Patch token size. Default: 4.
453
+ in_chans (int): Number of input image channels. Default: 3.
454
+ embed_dim (int): Number of linear projection output channels. Default: 96.
455
+ norm_layer (nn.Module, optional): Normalization layer. Default: None
456
+ """
457
+
458
+ def __init__(self, img_size=224, patch_size=4, in_chans=3, embed_dim=96, norm_layer=None):
459
+ super().__init__()
460
+ img_size = to_2tuple(img_size)
461
+ patch_size = to_2tuple(patch_size)
462
+ patches_resolution = [img_size[0] // patch_size[0], img_size[1] // patch_size[1]]
463
+ self.img_size = img_size
464
+ self.patch_size = patch_size
465
+ self.patches_resolution = patches_resolution
466
+ self.num_patches = patches_resolution[0] * patches_resolution[1]
467
+
468
+ self.in_chans = in_chans
469
+ self.embed_dim = embed_dim
470
+
471
+ if norm_layer is not None:
472
+ self.norm = norm_layer(embed_dim)
473
+ else:
474
+ self.norm = None
475
+
476
+ def forward(self, x):
477
+ x = x.flatten(2).transpose(1, 2) # B Ph*Pw C
478
+ if self.norm is not None:
479
+ x = self.norm(x)
480
+ return x
481
+
482
+
483
+ class PatchUnEmbed(nn.Module):
484
+ r""" Image to Patch Unembedding
485
+
486
+ Args:
487
+ img_size (int): Image size. Default: 224.
488
+ patch_size (int): Patch token size. Default: 4.
489
+ in_chans (int): Number of input image channels. Default: 3.
490
+ embed_dim (int): Number of linear projection output channels. Default: 96.
491
+ norm_layer (nn.Module, optional): Normalization layer. Default: None
492
+ """
493
+
494
+ def __init__(self, img_size=224, patch_size=4, in_chans=3, embed_dim=96, norm_layer=None):
495
+ super().__init__()
496
+ img_size = to_2tuple(img_size)
497
+ patch_size = to_2tuple(patch_size)
498
+ patches_resolution = [img_size[0] // patch_size[0], img_size[1] // patch_size[1]]
499
+ self.img_size = img_size
500
+ self.patch_size = patch_size
501
+ self.patches_resolution = patches_resolution
502
+ self.num_patches = patches_resolution[0] * patches_resolution[1]
503
+
504
+ self.in_chans = in_chans
505
+ self.embed_dim = embed_dim
506
+
507
+ def forward(self, x, x_size):
508
+ B, HW, C = x.shape
509
+ x = x.transpose(1, 2).view(B, self.embed_dim, x_size[0], x_size[1]) # B Ph*Pw C
510
+ return x
511
+
512
+
513
+ class Upsample(nn.Sequential):
514
+ """Upsample module.
515
+
516
+ Args:
517
+ scale (int): Scale factor. Supported scales: 2^n and 3.
518
+ num_feat (int): Channel number of intermediate features.
519
+ """
520
+
521
+ def __init__(self, scale, num_feat):
522
+ m = []
523
+ if (scale & (scale - 1)) == 0: # scale = 2^n
524
+ for _ in range(int(math.log(scale, 2))):
525
+ m.append(nn.Conv2d(num_feat, 4 * num_feat, 3, 1, 1))
526
+ m.append(nn.PixelShuffle(2))
527
+ elif scale == 3:
528
+ m.append(nn.Conv2d(num_feat, 9 * num_feat, 3, 1, 1))
529
+ m.append(nn.PixelShuffle(3))
530
+ else:
531
+ raise ValueError(f'scale {scale} is not supported. ' 'Supported scales: 2^n and 3.')
532
+ super(Upsample, self).__init__(*m)
533
+
534
+
535
+ class UpsampleOneStep(nn.Sequential):
536
+ """UpsampleOneStep module (the difference with Upsample is that it always only has 1conv + 1pixelshuffle)
537
+ Used in lightweight SR to save parameters.
538
+
539
+ Args:
540
+ scale (int): Scale factor. Supported scales: 2^n and 3.
541
+ num_feat (int): Channel number of intermediate features.
542
+
543
+ """
544
+
545
+ def __init__(self, scale, num_feat, num_out_ch, input_resolution=None):
546
+ self.num_feat = num_feat
547
+ self.input_resolution = input_resolution
548
+ m = []
549
+ m.append(nn.Conv2d(num_feat, (scale ** 2) * num_out_ch, 3, 1, 1))
550
+ m.append(nn.PixelShuffle(scale))
551
+ super(UpsampleOneStep, self).__init__(*m)
552
+
553
+
554
+ @register('swinir')
555
+ class SwinIR(nn.Module):
556
+ r""" SwinIR
557
+ A PyTorch impl of : `SwinIR: Image Restoration Using Swin Transformer`, based on Swin Transformer.
558
+
559
+ Args:
560
+ img_size (int | tuple(int)): Input image size. Default 64
561
+ patch_size (int | tuple(int)): Patch size. Default: 1
562
+ in_chans (int): Number of input image channels. Default: 3
563
+ embed_dim (int): Patch embedding dimension. Default: 96
564
+ depths (tuple(int)): Depth of each Swin Transformer layer.
565
+ num_heads (tuple(int)): Number of attention heads in different layers.
566
+ window_size (int): Window size. Default: 7
567
+ mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4
568
+ qkv_bias (bool): If True, add a learnable bias to query, key, value. Default: True
569
+ qk_scale (float): Override default qk scale of head_dim ** -0.5 if set. Default: None
570
+ drop_rate (float): Dropout rate. Default: 0
571
+ attn_drop_rate (float): Attention dropout rate. Default: 0
572
+ drop_path_rate (float): Stochastic depth rate. Default: 0.1
573
+ norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm.
574
+ ape (bool): If True, add absolute position embedding to the patch embedding. Default: False
575
+ patch_norm (bool): If True, add normalization after patch embedding. Default: True
576
+ use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False
577
+ upscale: Upscale factor. 2/3/4/8 for image SR, 1 for denoising and compress artifact reduction
578
+ img_range: Image range. 1. or 255.
579
+ upsampler: The reconstruction reconstruction module. 'pixelshuffle'/'pixelshuffledirect'/'nearest+conv'/None
580
+ resi_connection: The convolutional block before residual connection. '1conv'/'3conv'
581
+ """
582
+
583
+ def __init__(self, img_size=64, patch_size=1, in_chans=4,
584
+ embed_dim=180, depths=[6,6,6,6,6,6], num_heads=[6,6,6,6,6,6],
585
+ window_size=8, mlp_ratio=2., qkv_bias=True, qk_scale=None,
586
+ drop_rate=0., attn_drop_rate=0., drop_path_rate=0.1,
587
+ norm_layer=nn.LayerNorm, ape=False, patch_norm=True,
588
+ use_checkpoint=False, upscale=2, img_range=1., upsampler='none', resi_connection='1conv',
589
+ **kwargs):
590
+ super(SwinIR, self).__init__()
591
+ num_in_ch = in_chans
592
+ num_out_ch = in_chans
593
+ num_feat = 64
594
+ self.img_range = img_range
595
+
596
+ self.upscale = upscale
597
+ self.upsampler = upsampler
598
+ self.window_size = window_size
599
+ self.out_dim = num_feat
600
+ #####################################################################################################
601
+ ################################### 1, shallow feature extraction ###################################
602
+ self.conv_first = nn.Conv2d(num_in_ch, embed_dim, 3, 1, 1)
603
+
604
+ #####################################################################################################
605
+ ################################### 2, deep feature extraction ######################################
606
+ self.num_layers = len(depths)
607
+ self.embed_dim = embed_dim
608
+ self.ape = ape
609
+ self.patch_norm = patch_norm
610
+ self.num_features = embed_dim
611
+ self.mlp_ratio = mlp_ratio
612
+
613
+ # split image into non-overlapping patches
614
+ self.patch_embed = PatchEmbed(
615
+ img_size=img_size, patch_size=patch_size, in_chans=embed_dim, embed_dim=embed_dim,
616
+ norm_layer=norm_layer if self.patch_norm else None)
617
+ num_patches = self.patch_embed.num_patches
618
+ patches_resolution = self.patch_embed.patches_resolution
619
+ self.patches_resolution = patches_resolution
620
+
621
+ # merge non-overlapping patches into image
622
+ self.patch_unembed = PatchUnEmbed(
623
+ img_size=img_size, patch_size=patch_size, in_chans=embed_dim, embed_dim=embed_dim,
624
+ norm_layer=norm_layer if self.patch_norm else None)
625
+
626
+ # absolute position embedding
627
+ if self.ape:
628
+ self.absolute_pos_embed = nn.Parameter(torch.zeros(1, num_patches, embed_dim))
629
+ trunc_normal_(self.absolute_pos_embed, std=.02)
630
+
631
+ self.pos_drop = nn.Dropout(p=drop_rate)
632
+
633
+ # stochastic depth
634
+ dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule
635
+
636
+ # build Residual Swin Transformer blocks (RSTB)
637
+ self.layers = nn.ModuleList()
638
+ for i_layer in range(self.num_layers):
639
+ layer = RSTB(dim=embed_dim,
640
+ input_resolution=(patches_resolution[0],
641
+ patches_resolution[1]),
642
+ depth=depths[i_layer],
643
+ num_heads=num_heads[i_layer],
644
+ window_size=window_size,
645
+ mlp_ratio=self.mlp_ratio,
646
+ qkv_bias=qkv_bias, qk_scale=qk_scale,
647
+ drop=drop_rate, attn_drop=attn_drop_rate,
648
+ drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])], # no impact on SR results
649
+ norm_layer=norm_layer,
650
+ downsample=None,
651
+ use_checkpoint=use_checkpoint,
652
+ img_size=img_size,
653
+ patch_size=patch_size,
654
+ resi_connection=resi_connection
655
+ )
656
+ self.layers.append(layer)
657
+ self.norm = norm_layer(self.num_features)
658
+
659
+ # build the last conv layer in deep feature extraction
660
+ if resi_connection == '1conv':
661
+ self.conv_after_body = nn.Conv2d(embed_dim, embed_dim, 3, 1, 1)
662
+ elif resi_connection == '3conv':
663
+ # to save parameters and memory
664
+ self.conv_after_body = nn.Sequential(nn.Conv2d(embed_dim, embed_dim // 4, 3, 1, 1),
665
+ nn.LeakyReLU(negative_slope=0.2, inplace=True),
666
+ nn.Conv2d(embed_dim // 4, embed_dim // 4, 1, 1, 0),
667
+ nn.LeakyReLU(negative_slope=0.2, inplace=True),
668
+ nn.Conv2d(embed_dim // 4, embed_dim, 3, 1, 1))
669
+
670
+ #####################################################################################################
671
+ ################################ 3, high quality image reconstruction ################################
672
+ if self.upsampler == 'none':
673
+ self.conv_before_upsample = nn.Sequential(nn.Conv2d(embed_dim, num_feat, 3, 1, 1),
674
+ nn.LeakyReLU(inplace=True))
675
+ elif self.upsampler == 'pixelshuffle':
676
+ # for classical SR
677
+ self.conv_before_upsample = nn.Sequential(nn.Conv2d(embed_dim, num_feat, 3, 1, 1),
678
+ nn.LeakyReLU(inplace=True))
679
+ self.upsample = Upsample(upscale, num_feat)
680
+ self.conv_last = nn.Conv2d(num_feat, num_out_ch, 3, 1, 1)
681
+ elif self.upsampler == 'pixelshuffledirect':
682
+ # for lightweight SR (to save parameters)
683
+ self.upsample = UpsampleOneStep(upscale, embed_dim, num_out_ch,
684
+ (patches_resolution[0], patches_resolution[1]))
685
+ elif self.upsampler == 'nearest+conv':
686
+ # for real-world SR (less artifacts)
687
+ assert self.upscale == 4, 'only support x4 now.'
688
+ self.conv_before_upsample = nn.Sequential(nn.Conv2d(embed_dim, num_feat, 3, 1, 1),
689
+ nn.LeakyReLU(inplace=True))
690
+ self.conv_up1 = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
691
+ self.conv_up2 = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
692
+ self.conv_hr = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
693
+ self.conv_last = nn.Conv2d(num_feat, num_out_ch, 3, 1, 1)
694
+ self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)
695
+ else:
696
+ # for image denoising and JPEG compression artifact reduction
697
+ self.conv_last = nn.Conv2d(embed_dim, num_out_ch, 3, 1, 1)
698
+
699
+ self.apply(self._init_weights)
700
+
701
+ def _init_weights(self, m):
702
+ if isinstance(m, nn.Linear):
703
+ trunc_normal_(m.weight, std=.02)
704
+ if isinstance(m, nn.Linear) and m.bias is not None:
705
+ nn.init.constant_(m.bias, 0)
706
+ elif isinstance(m, nn.LayerNorm):
707
+ nn.init.constant_(m.bias, 0)
708
+ nn.init.constant_(m.weight, 1.0)
709
+
710
+ @torch.jit.ignore
711
+ def no_weight_decay(self):
712
+ return {'absolute_pos_embed'}
713
+
714
+ @torch.jit.ignore
715
+ def no_weight_decay_keywords(self):
716
+ return {'relative_position_bias_table'}
717
+
718
+ def check_image_size(self, x):
719
+ _, _, h, w = x.size()
720
+ mod_pad_h = (self.window_size - h % self.window_size) % self.window_size
721
+ mod_pad_w = (self.window_size - w % self.window_size) % self.window_size
722
+ x = F.pad(x, (0, mod_pad_w, 0, mod_pad_h), 'reflect')
723
+ return x
724
+
725
+ def forward_features(self, x):
726
+ x_size = (x.shape[2], x.shape[3])
727
+ x = self.patch_embed(x)
728
+ if self.ape:
729
+ x = x + self.absolute_pos_embed
730
+ x = self.pos_drop(x)
731
+
732
+ for layer in self.layers:
733
+ x = layer(x, x_size)
734
+
735
+ x = self.norm(x) # B L C
736
+ x = self.patch_unembed(x, x_size)
737
+
738
+ return x
739
+
740
+ def forward(self, x):
741
+ H,W = x.shape[2:]
742
+ x = self.check_image_size(x)
743
+
744
+ # self.mean = self.mean.type_as(x)
745
+ # x = (x - self.mean) * self.img_range
746
+
747
+ if self.upsampler == 'none':
748
+ x = self.conv_first(x)
749
+ x = self.conv_after_body(self.forward_features(x)) + x
750
+ x = self.conv_before_upsample(x)
751
+ elif self.upsampler == 'pixelshuffle':
752
+ # for classical SR
753
+ x = self.conv_first(x)
754
+ x = self.conv_after_body(self.forward_features(x)) + x
755
+ x = self.conv_before_upsample(x)
756
+ x = self.conv_last(self.upsample(x))
757
+ elif self.upsampler == 'pixelshuffledirect':
758
+ # for lightweight SR
759
+ x = self.conv_first(x)
760
+ x = self.conv_after_body(self.forward_features(x)) + x
761
+ x = self.upsample(x)
762
+ elif self.upsampler == 'nearest+conv':
763
+ # for real-world SR
764
+ x = self.conv_first(x)
765
+ x = self.conv_after_body(self.forward_features(x)) + x
766
+ x = self.conv_before_upsample(x)
767
+ x = self.lrelu(self.conv_up1(torch.nn.functional.interpolate(x, scale_factor=2, mode='nearest')))
768
+ x = self.lrelu(self.conv_up2(torch.nn.functional.interpolate(x, scale_factor=2, mode='nearest')))
769
+ x = self.conv_last(self.lrelu(self.conv_hr(x)))
770
+ else:
771
+ # for image denoising and JPEG compression artifact reduction
772
+ x_first = self.conv_first(x)
773
+ res = self.conv_after_body(self.forward_features(x_first)) + x_first
774
+ x = x + self.conv_last(res)
775
+
776
+ # x = x / self.img_range + self.mean
777
+ return x[:,:,:H,:W]
competitors_inference_code/LSRNA/lsr_training/configs/swinir-liif-latent-sdxl-v3.yaml ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # use datasets/scripts/make_trainset.py
2
+ train_dataset:
3
+ dataset:
4
+ name: image-folder
5
+ args:
6
+ hr_path: ../datasets/train/OpenImages/HR_sdxl_latent # shared
7
+ lr_path: ../datasets/train/OpenImages/LR_sdxl_latent
8
+ scales: [2,3,4]
9
+ wrapper:
10
+ name: sr-explicit-paired
11
+ args:
12
+ inp_size: 32 # lr
13
+ augment: []
14
+ sample_size: 64 # hr | should be less than min(scales)*inp_size
15
+ num_workers: 4 # total
16
+ batch_size: 32 # total
17
+
18
+ valid_path: ../datasets/test/SDXL/original
19
+ sd_ckpt: stabilityai/stable-diffusion-xl-base-1.0 # fixed
20
+
21
+ model:
22
+ name: liif
23
+ args:
24
+ feat_unfold: true
25
+ local_ensemble: true
26
+ encoder_spec:
27
+ name: swinir
28
+ args:
29
+ img_size: 32 # inp_size
30
+ in_chans: 4
31
+ embed_dim: 60
32
+ depths: [6,6,6,6]
33
+ num_heads: [6,6,6,6]
34
+ window_size: 8
35
+ upsampler: none
36
+ imnet_spec:
37
+ name: mlp
38
+ args:
39
+ out_dim: 4
40
+ hidden_list: [256,256,256,256]
41
+
42
+ optimizer:
43
+ name: adam
44
+ args:
45
+ lr: 2.e-4
46
+
47
+ lr_scheduler:
48
+ name: CosineAnnealingLR_Restart
49
+ args:
50
+ T_period: [1000000]
51
+ restarts: [1000000]
52
+ weights: [1]
53
+ eta_min: 1.e-7
54
+
55
+ iter_max: 1000000
56
+ iter_print: 2000
57
+ iter_val: 50000
58
+ iter_save: 200000
competitors_inference_code/LSRNA/lsr_training/datasets/datasets.py ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import copy
2
+
3
+ datasets = {}
4
+
5
+ def register(name):
6
+ def decorator(cls):
7
+ datasets[name] = cls
8
+ return cls
9
+ return decorator
10
+
11
+ def make(dataset_spec, args=None):
12
+ if args is not None:
13
+ dataset_args = copy.deepcopy(dataset_spec['args'])
14
+ dataset_args.update(args)
15
+ else:
16
+ dataset_args = dataset_spec['args']
17
+ dataset = datasets[dataset_spec['name']](**dataset_args)
18
+ return dataset
competitors_inference_code/LSRNA/lsr_training/datasets/scripts/make_trainset.py ADDED
@@ -0,0 +1,144 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import random
2
+ import os
3
+ import pandas as pd
4
+ import requests
5
+ from PIL import Image
6
+ from io import BytesIO
7
+ from tqdm import tqdm
8
+ import argparse
9
+ import pickle
10
+
11
+ import numpy as np
12
+ import torch
13
+ import torchvision.transforms as transforms
14
+ from diffusers import StableDiffusionXLPipeline
15
+
16
+ import sys
17
+ sys.path.append('../..')
18
+ import core
19
+
20
+ #random.seed(0)
21
+ #np.random.seed(0)
22
+ #torch.manual_seed(0)
23
+ #torch.cuda.manual_seed_all(0)
24
+
25
+ parser = argparse.ArgumentParser(description='OpenImages downloader')
26
+ parser.add_argument('--max_sample', type=int, default=1560000) # per part
27
+ parser.add_argument('--part', type=str, default='1/1')
28
+ args = parser.parse_args()
29
+
30
+ down_scales = [2,3,4] # fixed
31
+ base_dir = '/workspace/datasets/train/OpenImages' # fixed
32
+ count = 0
33
+
34
+ annotation_path = f'{base_dir}/image_ids_and_rotation.csv' # metadata of OpenImages
35
+ print('loading annotation file...')
36
+ a,b=map(int, args.part.split('/'))
37
+ urls = list(pd.read_csv(annotation_path)['OriginalURL'])[(a-1)::b]
38
+
39
+ processed_info = {}
40
+ processed_info_path = f'{base_dir}/process_info_{a}_{b}.pkl'
41
+ if os.path.exists(processed_info_path):
42
+ with open(f'{base_dir}/process_info_{a}_{b}.pkl', 'rb') as f:
43
+ processed_info = pickle.load(f)
44
+
45
+ def get_image(url):
46
+ global count, processed_info
47
+ session = requests.Session()
48
+ try:
49
+ img_name = url.split('/')[-1].split('?')[0]
50
+ if img_name[-4:].lower() not in ['.jpg', 'jpeg']:
51
+ return None, None
52
+ assert img_name.count('.') == 1
53
+ img_name = img_name.split('.')[0] # w/o extension
54
+
55
+ key = f'{base_dir}/HR/{img_name}_s000.jpg'
56
+ if key in processed_info:
57
+ count += processed_info[key]
58
+ print(f'[skip] files already exists for {img_name} | count: {count}')
59
+ return None, None
60
+
61
+ response = session.get(url, timeout=2)
62
+ response.raise_for_status()
63
+ img = Image.open(BytesIO(response.content))
64
+
65
+ width, height = img.size
66
+ if height >= 1440 and width >= 1440 and img.mode == 'RGB':
67
+ return img, img_name
68
+ return None, None
69
+
70
+ except requests.exceptions.RequestException as e:
71
+ print(f"Request failed: {e}")
72
+ return None, None
73
+ except Exception as e:
74
+ print(f"Other error occurred: {e}")
75
+ return None, None
76
+ finally:
77
+ session.close()
78
+
79
+ os.makedirs(f'{base_dir}/HR', exist_ok=True)
80
+ os.makedirs(f'{base_dir}/HR_sdxl_latent', exist_ok=True)
81
+ for down_scale in down_scales:
82
+ os.makedirs(f'{base_dir}/LR/X{down_scale}', exist_ok=True)
83
+ os.makedirs(f'{base_dir}/LR_sdxl_latent/X{down_scale}', exist_ok=True)
84
+
85
+ sd_ckpt = 'stabilityai/stable-diffusion-xl-base-1.0'
86
+ pipeline = StableDiffusionXLPipeline.from_pretrained(sd_ckpt)
87
+ vae = pipeline.vae.cuda() # eval mode, float32, i/o range [-1,1]
88
+
89
+ for url in urls:
90
+ if count >= args.max_sample:
91
+ print(f'count ({count}) reached the max_sample={args.max_sample}')
92
+ break
93
+ img, base_name = get_image(url)
94
+ if img is None: continue
95
+
96
+ # found new HR image
97
+ crop_size = random.randint(1056,1440)//96*96
98
+ step = crop_size
99
+ w,h = img.size
100
+
101
+ h_space = np.arange(0, h-crop_size+1, step)
102
+ if h > h_space[-1] + crop_size:
103
+ h_space = np.append(h_space, h-crop_size)
104
+ w_space = np.arange(0, w-crop_size+1, step)
105
+ if w > w_space[-1] + crop_size:
106
+ w_space = np.append(w_space, w-crop_size)
107
+
108
+ hrs = []
109
+ for x in h_space:
110
+ for y in w_space:
111
+ hr = img.crop((y, x, y+crop_size, x+crop_size))
112
+ hrs.append(hr)
113
+ hrs = hrs[::-1]
114
+
115
+ for i, hr in enumerate(tqdm(hrs)):
116
+ index = len(hrs)-i-1
117
+ name = f'{base_name}_s{index:03d}' # w/o extension
118
+ hr = transforms.ToTensor()(hr).unsqueeze(0).cuda() # (1,3,csz,csz), range [0,1]
119
+ with torch.no_grad():
120
+ hr_latent = vae.encode((hr-0.5)*2).latent_dist.mode() * vae.config.scaling_factor
121
+
122
+ # bicubic degradation & conv_latent
123
+ for down_scale in down_scales:
124
+ lr = core.imresize(hr, sizes=(crop_size//down_scale, crop_size//down_scale))
125
+ lr = (lr*255).clip(0,255).to(torch.uint8).float() / 255 # discretized [0,1]
126
+ transforms.ToPILImage()(lr.squeeze(0)).save(f'{base_dir}/LR/X{down_scale}/{name}.jpg')
127
+
128
+ with torch.no_grad():
129
+ lr_latent = vae.encode((lr-0.5)*2).latent_dist.mode() * vae.config.scaling_factor
130
+ np.save(f'{base_dir}/LR_sdxl_latent/X{down_scale}/{name}.npy',
131
+ lr_latent.squeeze(0).permute(1,2,0).detach().cpu().numpy())
132
+
133
+ np.save(f'{base_dir}/HR_sdxl_latent/{name}.npy',
134
+ hr_latent.squeeze(0).permute(1,2,0).detach().cpu().numpy())
135
+ transforms.ToPILImage()(hr.squeeze(0)).save(f'{base_dir}/HR/{name}.jpg')
136
+
137
+ if index == 0:
138
+ key = f'{base_dir}/HR/{name}.jpg'
139
+ assert key not in processed_info
140
+ processed_info[key] = len(hrs)
141
+ with open(processed_info_path, 'wb') as f:
142
+ pickle.dump(processed_info, f)
143
+ count += len(hrs)
144
+ print(f'count: {count} / {args.max_sample} | succesfully processed {base_name}')
competitors_inference_code/LSRNA/lsr_training/datasets/wrappers.py ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ from torch.utils.data import Dataset
3
+ from datasets import register
4
+ from utils import *
5
+
6
+
7
+ @register('sr-explicit-paired')
8
+ class SRExplicitPaired(Dataset):
9
+
10
+ def __init__(self, dataset, inp_size, augment=[], sample_size=None, num_channels=None):
11
+ self.dataset = dataset
12
+ self.inp_size = inp_size
13
+ self.augment = augment
14
+ self.sample_size = inp_size if sample_size is None else sample_size
15
+ self.num_channels = num_channels
16
+
17
+ def __len__(self):
18
+ return len(self.dataset)
19
+
20
+ def __getitem__(self, idx):
21
+ hr_path, lr_paths = self.dataset[idx]
22
+ lr_path = lr_paths[np.random.randint(len(lr_paths))]
23
+
24
+ # img: (H,W,C), numpy, range [-3,3] or [0,1]
25
+ hr, lr = read_img(hr_path), read_img(lr_path)
26
+ if self.num_channels:
27
+ assert hr.shape[-1] == lr.shape[-1] == self.num_channels
28
+ hr, lr = random_crop_together(hr, lr, self.inp_size)
29
+
30
+ # augmentation
31
+ hflip = (np.random.random() < 0.5) if 'hflip' in self.augment else False
32
+ vflip = (np.random.random() < 0.5) if 'vflip' in self.augment else False
33
+ dflip = (np.random.random() < 0.5) if 'dflip' in self.augment else False
34
+
35
+ def base_augment(img):
36
+ if hflip:
37
+ img = img[::-1, :, :]
38
+ if vflip:
39
+ img = img[:, ::-1, :]
40
+ if dflip:
41
+ img = np.transpose(img, (1, 0, 2))
42
+ return img.copy()
43
+ hr = torch.from_numpy(base_augment(hr)).permute(2,0,1).float() # (C,H,W)
44
+ lr = torch.from_numpy(base_augment(lr)).permute(2,0,1).float() # (C,h,w)
45
+
46
+ coord = make_coord(hr.shape[-2:], flatten=False) # (H,W,2)
47
+ cell = torch.ones_like(coord) # (H,W,2)
48
+ cell[:,:,0] *= 2 / hr.shape[-2]
49
+ cell[:,:,1] *= 2 / hr.shape[-1]
50
+
51
+ P = self.sample_size
52
+ hr, pos = random_crop(hr, P, return_pos=True) # (C,P,P)
53
+ coord = coord[pos[0]:pos[0]+P, pos[1]:pos[1]+P] # (P,P,2)
54
+ cell = cell[pos[0]:pos[0]+P, pos[1]:pos[1]+P] # (P,P,2)
55
+
56
+ return {
57
+ 'lr': lr,
58
+ 'coord': coord,
59
+ 'cell': cell,
60
+ 'hr': hr
61
+ }
competitors_inference_code/LSRNA/lsr_training/dist.sh ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ # Usage | ./dist.sh train.py --config configs/swinir-liif-latent-sdxl-v3.yaml --gpu 0,1
3
+ SCRIPT=$1
4
+ shift
5
+ ARGS=("$@")
6
+
7
+ for ((i=0; i<${#ARGS[@]}; i++)); do
8
+ if [[ ${ARGS[i]} == "--gpu" ]]; then
9
+ GPU=${ARGS[i+1]}
10
+ unset ARGS[i]
11
+ unset ARGS[i+1]
12
+ break
13
+ fi
14
+ done
15
+
16
+ ARGS=("${ARGS[@]}")
17
+ NPROC_PER_NODE=$(echo $GPU | tr -cd ',' | wc -c)
18
+ let NPROC_PER_NODE+=1
19
+ FREE_PORT=$(python find_port.py)
20
+ echo free port: $FREE_PORT
21
+ CUDA_VISIBLE_DEVICES=$GPU python -m torch.distributed.launch --nproc_per_node=$NPROC_PER_NODE --master_port=$FREE_PORT $SCRIPT ${ARGS[@]}
competitors_inference_code/LSRNA/lsr_training/find_port.py ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import socket
2
+ from contextlib import closing
3
+
4
+ def find_free_port():
5
+ with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s:
6
+ s.bind(('', 0))
7
+ s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
8
+ return s.getsockname()[1]
9
+
10
+ if __name__ == '__main__':
11
+ print(find_free_port())
competitors_inference_code/LSRNA/lsr_training/models/__init__.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ from .models import register, make
2
+ from . import swinir
3
+ from . import liif, mlp
competitors_inference_code/LSRNA/lsr_training/models/liif.py ADDED
@@ -0,0 +1,117 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ import torch.nn.functional as F
4
+
5
+ import models
6
+ from models import register
7
+ from utils import make_coord
8
+
9
+
10
+ @register('liif')
11
+ class LIIF(nn.Module):
12
+
13
+ def __init__(self, encoder_spec, imnet_spec, feat_unfold=True, local_ensemble=True):
14
+ super().__init__()
15
+ self.local_ensemble = local_ensemble
16
+ self.feat_unfold = feat_unfold
17
+ self.encoder = models.make(encoder_spec)
18
+
19
+ imnet_in_dim = self.encoder.out_dim
20
+ if self.feat_unfold:
21
+ imnet_in_dim *= 9
22
+ imnet_in_dim += 4 # attach coord, cell
23
+ self.imnet = models.make(imnet_spec, args={'in_dim': imnet_in_dim})
24
+
25
+ def gen_feat(self, inp):
26
+ self.inp = inp
27
+ feat = self.encoder(inp)
28
+ if self.feat_unfold:
29
+ feat = F.unfold(feat, 3, padding=1).view(
30
+ feat.shape[0], feat.shape[1] * 9, feat.shape[2], feat.shape[3])
31
+ self.feat = feat
32
+ self.feat_coord = make_coord(feat.shape[-2:], flatten=False).cuda() \
33
+ .permute(2, 0, 1) \
34
+ .unsqueeze(0).expand(feat.shape[0], 2, *feat.shape[-2:])
35
+
36
+ def query_rgb(self, coord, cell):
37
+ # coord, cell: (b,h,w,c)
38
+ feat = self.feat
39
+ feat_coord = self.feat_coord
40
+ if self.local_ensemble:
41
+ vx_lst = [-1, 1]
42
+ vy_lst = [-1, 1]
43
+ eps_shift = 1e-6
44
+ else:
45
+ vx_lst, vy_lst, eps_shift = [0], [0], 0
46
+
47
+ # field radius (global: [-1, 1])
48
+ rx = 2 / feat.shape[-2] / 2
49
+ ry = 2 / feat.shape[-1] / 2
50
+
51
+ preds = []
52
+ areas = []
53
+ for vx in vx_lst:
54
+ for vy in vy_lst:
55
+ coord_ = coord.clone()
56
+ coord_[:, :, :, 0] += vx * rx + eps_shift
57
+ coord_[:, :, :, 1] += vy * ry + eps_shift
58
+ coord_.clamp_(-1 + 1e-6, 1 - 1e-6)
59
+
60
+ q_feat = F.grid_sample(feat, coord_.flip(-1),
61
+ mode='nearest', align_corners=False).permute(0, 2, 3, 1) # (b,h,w,c)
62
+ q_coord = F.grid_sample(feat_coord, coord_.flip(-1),
63
+ mode='nearest', align_corners=False).permute(0, 2, 3, 1)
64
+
65
+ rel_coord = coord - q_coord
66
+ rel_coord[:, :, :, 0] *= feat.shape[-2]
67
+ rel_coord[:, :, :, 1] *= feat.shape[-1]
68
+ inp = torch.cat([q_feat, rel_coord], dim=-1)
69
+
70
+ rel_cell = cell.clone()
71
+ rel_cell[:, :, :, 0] *= feat.shape[-2]
72
+ rel_cell[:, :, :, 1] *= feat.shape[-1]
73
+ inp = torch.cat([inp, rel_cell], dim=-1) # (b,h,w,c)
74
+
75
+ pred = self.imnet(inp.contiguous())
76
+ preds.append(pred)
77
+
78
+ area = torch.abs(rel_coord[:, :, :, 0] * rel_coord[:, :, :, 1]) # (b,h,w)
79
+ areas.append(area + 1e-9)
80
+
81
+ tot_area = torch.stack(areas).sum(dim=0) # (b,h,w)
82
+ if self.local_ensemble:
83
+ t = areas[0]; areas[0] = areas[3]; areas[3] = t
84
+ t = areas[1]; areas[1] = areas[2]; areas[2] = t
85
+ ret = 0
86
+ for pred, area in zip(preds, areas):
87
+ ret = ret + pred * (area / tot_area).unsqueeze(-1)
88
+ ret = ret.permute(0,3,1,2)
89
+
90
+ if ret.shape[1] != self.inp.shape[1]:
91
+ ret[:,:-1,:,:] += F.grid_sample(self.inp, coord.flip(-1), mode='bicubic',\
92
+ padding_mode='border', align_corners=False)
93
+ else:
94
+ ret += F.grid_sample(self.inp, coord.flip(-1), mode='bicubic',\
95
+ padding_mode='border', align_corners=False)
96
+ return ret
97
+
98
+ def forward(self, inp, coord, cell):
99
+ self.gen_feat(inp)
100
+ return self.query_rgb(coord, cell)
101
+
102
+ def batched_predict(self, inp, coord, cell, bsize=512*512):
103
+ self.gen_feat(inp)
104
+ H,W = coord.shape[1:3]
105
+ n = H*W
106
+ coord = coord.view(1,1,n,2)
107
+ cell = cell.view(1,1,n,2)
108
+
109
+ ql = 0
110
+ preds = []
111
+ while ql < n:
112
+ qr = min(ql + bsize, n)
113
+ pred = self.query_rgb(coord[:,:,ql:qr,:], cell[:,:,ql:qr,:])
114
+ preds.append(pred)
115
+ ql = qr
116
+ pred = torch.cat(preds, dim=-1).view(1,-1,H,W)
117
+ return pred
competitors_inference_code/LSRNA/lsr_training/models/mlp.py ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch.nn as nn
2
+
3
+ from models import register
4
+
5
+
6
+ @register('mlp')
7
+ class MLP(nn.Module):
8
+
9
+ def __init__(self, in_dim, out_dim, hidden_list):
10
+ super().__init__()
11
+ layers = []
12
+ lastv = in_dim
13
+ for hidden in hidden_list:
14
+ layers.append(nn.Linear(lastv, hidden))
15
+ layers.append(nn.ReLU())
16
+ lastv = hidden
17
+ layers.append(nn.Linear(lastv, out_dim))
18
+ self.layers = nn.Sequential(*layers)
19
+
20
+ def forward(self, x):
21
+ shape = x.shape[:-1]
22
+ x = self.layers(x.view(-1, x.shape[-1]))
23
+ return x.view(*shape, -1)
competitors_inference_code/LSRNA/lsr_training/models/models.py ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import copy
2
+
3
+
4
+ models = {}
5
+
6
+
7
+ def register(name):
8
+ def decorator(cls):
9
+ models[name] = cls
10
+ return cls
11
+ return decorator
12
+
13
+
14
+ def make(model_spec, args=None, load_sd=False):
15
+ if args is not None:
16
+ model_args = copy.deepcopy(model_spec['args'])
17
+ model_args.update(args)
18
+ else:
19
+ model_args = model_spec['args']
20
+ model = models[model_spec['name']](**model_args)
21
+ if load_sd:
22
+ model.load_state_dict(model_spec['sd'])
23
+ return model
competitors_inference_code/LSRNA/lsr_training/models/swinir.py ADDED
@@ -0,0 +1,776 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -----------------------------------------------------------------------------------
2
+ # SwinIR: Image Restoration Using Swin Transformer, https://arxiv.org/abs/2108.10257
3
+ # Originally Written by Ze Liu, Modified by Jingyun Liang.
4
+ # ----------------------------------------------------------------------------------
5
+ # modified from: https://github.com/JingyunLiang/SwinIR
6
+
7
+ import math
8
+ import torch
9
+ import torch.nn as nn
10
+ import torch.nn.functional as F
11
+ import torch.utils.checkpoint as checkpoint
12
+ from timm.models.layers import DropPath, to_2tuple, trunc_normal_
13
+ from models import register
14
+
15
+ class Mlp(nn.Module):
16
+ def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
17
+ super().__init__()
18
+ out_features = out_features or in_features
19
+ hidden_features = hidden_features or in_features
20
+ self.fc1 = nn.Linear(in_features, hidden_features)
21
+ self.act = act_layer()
22
+ self.fc2 = nn.Linear(hidden_features, out_features)
23
+ self.drop = nn.Dropout(drop)
24
+
25
+ def forward(self, x):
26
+ x = self.fc1(x)
27
+ x = self.act(x)
28
+ x = self.drop(x)
29
+ x = self.fc2(x)
30
+ x = self.drop(x)
31
+ return x
32
+
33
+
34
+ def window_partition(x, window_size):
35
+ """
36
+ Args:
37
+ x: (B, H, W, C)
38
+ window_size (int): window size
39
+
40
+ Returns:
41
+ windows: (num_windows*B, window_size, window_size, C)
42
+ """
43
+ B, H, W, C = x.shape
44
+ x = x.view(B, H // window_size, window_size, W // window_size, window_size, C)
45
+ windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C)
46
+ return windows
47
+
48
+
49
+ def window_reverse(windows, window_size, H, W):
50
+ """
51
+ Args:
52
+ windows: (num_windows*B, window_size, window_size, C)
53
+ window_size (int): Window size
54
+ H (int): Height of image
55
+ W (int): Width of image
56
+
57
+ Returns:
58
+ x: (B, H, W, C)
59
+ """
60
+ B = int(windows.shape[0] / (H * W / window_size / window_size))
61
+ x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1)
62
+ x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1)
63
+ return x
64
+
65
+
66
+ class WindowAttention(nn.Module):
67
+ r""" Window based multi-head self attention (W-MSA) module with relative position bias.
68
+ It supports both of shifted and non-shifted window.
69
+
70
+ Args:
71
+ dim (int): Number of input channels.
72
+ window_size (tuple[int]): The height and width of the window.
73
+ num_heads (int): Number of attention heads.
74
+ qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
75
+ qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set
76
+ attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0
77
+ proj_drop (float, optional): Dropout ratio of output. Default: 0.0
78
+ """
79
+
80
+ def __init__(self, dim, window_size, num_heads, qkv_bias=True, qk_scale=None, attn_drop=0., proj_drop=0.):
81
+
82
+ super().__init__()
83
+ self.dim = dim
84
+ self.window_size = window_size # Wh, Ww
85
+ self.num_heads = num_heads
86
+ head_dim = dim // num_heads
87
+ self.scale = qk_scale or head_dim ** -0.5
88
+
89
+ # define a parameter table of relative position bias
90
+ self.relative_position_bias_table = nn.Parameter(
91
+ torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1), num_heads)) # 2*Wh-1 * 2*Ww-1, nH
92
+
93
+ # get pair-wise relative position index for each token inside the window
94
+ coords_h = torch.arange(self.window_size[0])
95
+ coords_w = torch.arange(self.window_size[1])
96
+ coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww
97
+ coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww
98
+ relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww
99
+ relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2
100
+ relative_coords[:, :, 0] += self.window_size[0] - 1 # shift to start from 0
101
+ relative_coords[:, :, 1] += self.window_size[1] - 1
102
+ relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1
103
+ relative_position_index = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
104
+ self.register_buffer("relative_position_index", relative_position_index)
105
+
106
+ self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
107
+ self.attn_drop = nn.Dropout(attn_drop)
108
+ self.proj = nn.Linear(dim, dim)
109
+
110
+ self.proj_drop = nn.Dropout(proj_drop)
111
+
112
+ trunc_normal_(self.relative_position_bias_table, std=.02)
113
+ self.softmax = nn.Softmax(dim=-1)
114
+
115
+ def forward(self, x, mask=None):
116
+ """
117
+ Args:
118
+ x: input features with shape of (num_windows*B, N, C)
119
+ mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None
120
+ """
121
+ B_, N, C = x.shape
122
+ qkv = self.qkv(x).reshape(B_, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
123
+ q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple)
124
+
125
+ q = q * self.scale
126
+ attn = (q @ k.transpose(-2, -1))
127
+
128
+ relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)].view(
129
+ self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1) # Wh*Ww,Wh*Ww,nH
130
+ relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww
131
+ attn = attn + relative_position_bias.unsqueeze(0)
132
+
133
+ if mask is not None:
134
+ nW = mask.shape[0]
135
+ attn = attn.view(B_ // nW, nW, self.num_heads, N, N) + mask.unsqueeze(1).unsqueeze(0)
136
+ attn = attn.view(-1, self.num_heads, N, N)
137
+ attn = self.softmax(attn)
138
+ else:
139
+ attn = self.softmax(attn)
140
+
141
+ attn = self.attn_drop(attn)
142
+
143
+ x = (attn @ v).transpose(1, 2).reshape(B_, N, C)
144
+ x = self.proj(x)
145
+ x = self.proj_drop(x)
146
+ return x
147
+
148
+ def extra_repr(self) -> str:
149
+ return f'dim={self.dim}, window_size={self.window_size}, num_heads={self.num_heads}'
150
+
151
+
152
+ class SwinTransformerBlock(nn.Module):
153
+ r""" Swin Transformer Block.
154
+
155
+ Args:
156
+ dim (int): Number of input channels.
157
+ input_resolution (tuple[int]): Input resulotion.
158
+ num_heads (int): Number of attention heads.
159
+ window_size (int): Window size.
160
+ shift_size (int): Shift size for SW-MSA.
161
+ mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
162
+ qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
163
+ qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
164
+ drop (float, optional): Dropout rate. Default: 0.0
165
+ attn_drop (float, optional): Attention dropout rate. Default: 0.0
166
+ drop_path (float, optional): Stochastic depth rate. Default: 0.0
167
+ act_layer (nn.Module, optional): Activation layer. Default: nn.GELU
168
+ norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
169
+ """
170
+
171
+ def __init__(self, dim, input_resolution, num_heads, window_size=7, shift_size=0,
172
+ mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0., drop_path=0.,
173
+ act_layer=nn.GELU, norm_layer=nn.LayerNorm):
174
+ super().__init__()
175
+ self.dim = dim
176
+ self.input_resolution = input_resolution
177
+ self.num_heads = num_heads
178
+ self.window_size = window_size
179
+ self.shift_size = shift_size
180
+ self.mlp_ratio = mlp_ratio
181
+ if min(self.input_resolution) <= self.window_size:
182
+ # if window size is larger than input resolution, we don't partition windows
183
+ self.shift_size = 0
184
+ self.window_size = min(self.input_resolution)
185
+ assert 0 <= self.shift_size < self.window_size, "shift_size must in 0-window_size"
186
+
187
+ self.norm1 = norm_layer(dim)
188
+ self.attn = WindowAttention(
189
+ dim, window_size=to_2tuple(self.window_size), num_heads=num_heads,
190
+ qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)
191
+
192
+ self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
193
+ self.norm2 = norm_layer(dim)
194
+ mlp_hidden_dim = int(dim * mlp_ratio)
195
+ self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
196
+
197
+ if self.shift_size > 0:
198
+ attn_mask = self.calculate_mask(self.input_resolution)
199
+ else:
200
+ attn_mask = None
201
+
202
+ self.register_buffer("attn_mask", attn_mask)
203
+
204
+ def calculate_mask(self, x_size):
205
+ # calculate attention mask for SW-MSA
206
+ H, W = x_size
207
+ img_mask = torch.zeros((1, H, W, 1)) # 1 H W 1
208
+ h_slices = (slice(0, -self.window_size),
209
+ slice(-self.window_size, -self.shift_size),
210
+ slice(-self.shift_size, None))
211
+ w_slices = (slice(0, -self.window_size),
212
+ slice(-self.window_size, -self.shift_size),
213
+ slice(-self.shift_size, None))
214
+ cnt = 0
215
+ for h in h_slices:
216
+ for w in w_slices:
217
+ img_mask[:, h, w, :] = cnt
218
+ cnt += 1
219
+
220
+ mask_windows = window_partition(img_mask, self.window_size) # nW, window_size, window_size, 1
221
+ mask_windows = mask_windows.view(-1, self.window_size * self.window_size)
222
+ attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)
223
+ attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0))
224
+
225
+ return attn_mask
226
+
227
+ def forward(self, x, x_size):
228
+ H, W = x_size
229
+ B, L, C = x.shape
230
+ # assert L == H * W, "input feature has wrong size"
231
+
232
+ shortcut = x
233
+ x = self.norm1(x)
234
+ x = x.view(B, H, W, C)
235
+
236
+ # cyclic shift
237
+ if self.shift_size > 0:
238
+ shifted_x = torch.roll(x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2))
239
+ else:
240
+ shifted_x = x
241
+
242
+ # partition windows
243
+ x_windows = window_partition(shifted_x, self.window_size) # nW*B, window_size, window_size, C
244
+ x_windows = x_windows.view(-1, self.window_size * self.window_size, C) # nW*B, window_size*window_size, C
245
+
246
+ # W-MSA/SW-MSA (to be compatible for testing on images whose shapes are the multiple of window size
247
+ if self.input_resolution == x_size:
248
+ attn_windows = self.attn(x_windows, mask=self.attn_mask) # nW*B, window_size*window_size, C
249
+ else:
250
+ attn_windows = self.attn(x_windows, mask=self.calculate_mask(x_size).to(x.device))
251
+
252
+ # merge windows
253
+ attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C)
254
+ shifted_x = window_reverse(attn_windows, self.window_size, H, W) # B H' W' C
255
+
256
+ # reverse cyclic shift
257
+ if self.shift_size > 0:
258
+ x = torch.roll(shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2))
259
+ else:
260
+ x = shifted_x
261
+ x = x.view(B, H * W, C)
262
+
263
+ # FFN
264
+ x = shortcut + self.drop_path(x)
265
+ x = x + self.drop_path(self.mlp(self.norm2(x)))
266
+
267
+ return x
268
+
269
+ def extra_repr(self) -> str:
270
+ return f"dim={self.dim}, input_resolution={self.input_resolution}, num_heads={self.num_heads}, " \
271
+ f"window_size={self.window_size}, shift_size={self.shift_size}, mlp_ratio={self.mlp_ratio}"
272
+
273
+
274
+ class PatchMerging(nn.Module):
275
+ r""" Patch Merging Layer.
276
+
277
+ Args:
278
+ input_resolution (tuple[int]): Resolution of input feature.
279
+ dim (int): Number of input channels.
280
+ norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
281
+ """
282
+
283
+ def __init__(self, input_resolution, dim, norm_layer=nn.LayerNorm):
284
+ super().__init__()
285
+ self.input_resolution = input_resolution
286
+ self.dim = dim
287
+ self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False)
288
+ self.norm = norm_layer(4 * dim)
289
+
290
+ def forward(self, x):
291
+ """
292
+ x: B, H*W, C
293
+ """
294
+ H, W = self.input_resolution
295
+ B, L, C = x.shape
296
+ assert L == H * W, "input feature has wrong size"
297
+ assert H % 2 == 0 and W % 2 == 0, f"x size ({H}*{W}) are not even."
298
+
299
+ x = x.view(B, H, W, C)
300
+
301
+ x0 = x[:, 0::2, 0::2, :] # B H/2 W/2 C
302
+ x1 = x[:, 1::2, 0::2, :] # B H/2 W/2 C
303
+ x2 = x[:, 0::2, 1::2, :] # B H/2 W/2 C
304
+ x3 = x[:, 1::2, 1::2, :] # B H/2 W/2 C
305
+ x = torch.cat([x0, x1, x2, x3], -1) # B H/2 W/2 4*C
306
+ x = x.view(B, -1, 4 * C) # B H/2*W/2 4*C
307
+
308
+ x = self.norm(x)
309
+ x = self.reduction(x)
310
+
311
+ return x
312
+
313
+ def extra_repr(self) -> str:
314
+ return f"input_resolution={self.input_resolution}, dim={self.dim}"
315
+
316
+
317
+ class BasicLayer(nn.Module):
318
+ """ A basic Swin Transformer layer for one stage.
319
+
320
+ Args:
321
+ dim (int): Number of input channels.
322
+ input_resolution (tuple[int]): Input resolution.
323
+ depth (int): Number of blocks.
324
+ num_heads (int): Number of attention heads.
325
+ window_size (int): Local window size.
326
+ mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
327
+ qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
328
+ qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
329
+ drop (float, optional): Dropout rate. Default: 0.0
330
+ attn_drop (float, optional): Attention dropout rate. Default: 0.0
331
+ drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0
332
+ norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
333
+ downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None
334
+ use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False.
335
+ """
336
+
337
+ def __init__(self, dim, input_resolution, depth, num_heads, window_size,
338
+ mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0.,
339
+ drop_path=0., norm_layer=nn.LayerNorm, downsample=None, use_checkpoint=False):
340
+
341
+ super().__init__()
342
+ self.dim = dim
343
+ self.input_resolution = input_resolution
344
+ self.depth = depth
345
+ self.use_checkpoint = use_checkpoint
346
+
347
+ # build blocks
348
+ self.blocks = nn.ModuleList([
349
+ SwinTransformerBlock(dim=dim, input_resolution=input_resolution,
350
+ num_heads=num_heads, window_size=window_size,
351
+ shift_size=0 if (i % 2 == 0) else window_size // 2,
352
+ mlp_ratio=mlp_ratio,
353
+ qkv_bias=qkv_bias, qk_scale=qk_scale,
354
+ drop=drop, attn_drop=attn_drop,
355
+ drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path,
356
+ norm_layer=norm_layer)
357
+ for i in range(depth)])
358
+
359
+ # patch merging layer
360
+ if downsample is not None:
361
+ self.downsample = downsample(input_resolution, dim=dim, norm_layer=norm_layer)
362
+ else:
363
+ self.downsample = None
364
+
365
+ def forward(self, x, x_size):
366
+ for blk in self.blocks:
367
+ if self.use_checkpoint:
368
+ x = checkpoint.checkpoint(blk, x, x_size)
369
+ else:
370
+ x = blk(x, x_size)
371
+ if self.downsample is not None:
372
+ x = self.downsample(x)
373
+ return x
374
+
375
+ def extra_repr(self) -> str:
376
+ return f"dim={self.dim}, input_resolution={self.input_resolution}, depth={self.depth}"
377
+
378
+
379
+ class RSTB(nn.Module):
380
+ """Residual Swin Transformer Block (RSTB).
381
+
382
+ Args:
383
+ dim (int): Number of input channels.
384
+ input_resolution (tuple[int]): Input resolution.
385
+ depth (int): Number of blocks.
386
+ num_heads (int): Number of attention heads.
387
+ window_size (int): Local window size.
388
+ mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
389
+ qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
390
+ qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
391
+ drop (float, optional): Dropout rate. Default: 0.0
392
+ attn_drop (float, optional): Attention dropout rate. Default: 0.0
393
+ drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0
394
+ norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
395
+ downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None
396
+ use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False.
397
+ img_size: Input image size.
398
+ patch_size: Patch size.
399
+ resi_connection: The convolutional block before residual connection.
400
+ """
401
+
402
+ def __init__(self, dim, input_resolution, depth, num_heads, window_size,
403
+ mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0.,
404
+ drop_path=0., norm_layer=nn.LayerNorm, downsample=None, use_checkpoint=False,
405
+ img_size=224, patch_size=4, resi_connection='1conv'):
406
+ super(RSTB, self).__init__()
407
+
408
+ self.dim = dim
409
+ self.input_resolution = input_resolution
410
+
411
+ self.residual_group = BasicLayer(dim=dim,
412
+ input_resolution=input_resolution,
413
+ depth=depth,
414
+ num_heads=num_heads,
415
+ window_size=window_size,
416
+ mlp_ratio=mlp_ratio,
417
+ qkv_bias=qkv_bias, qk_scale=qk_scale,
418
+ drop=drop, attn_drop=attn_drop,
419
+ drop_path=drop_path,
420
+ norm_layer=norm_layer,
421
+ downsample=downsample,
422
+ use_checkpoint=use_checkpoint)
423
+
424
+ if resi_connection == '1conv':
425
+ self.conv = nn.Conv2d(dim, dim, 3, 1, 1)
426
+ elif resi_connection == '3conv':
427
+ # to save parameters and memory
428
+ self.conv = nn.Sequential(nn.Conv2d(dim, dim // 4, 3, 1, 1), nn.LeakyReLU(negative_slope=0.2, inplace=True),
429
+ nn.Conv2d(dim // 4, dim // 4, 1, 1, 0),
430
+ nn.LeakyReLU(negative_slope=0.2, inplace=True),
431
+ nn.Conv2d(dim // 4, dim, 3, 1, 1))
432
+
433
+ self.patch_embed = PatchEmbed(
434
+ img_size=img_size, patch_size=patch_size, in_chans=0, embed_dim=dim,
435
+ norm_layer=None)
436
+
437
+ self.patch_unembed = PatchUnEmbed(
438
+ img_size=img_size, patch_size=patch_size, in_chans=0, embed_dim=dim,
439
+ norm_layer=None)
440
+
441
+ def forward(self, x, x_size):
442
+ return self.patch_embed(self.conv(self.patch_unembed(self.residual_group(x, x_size), x_size))) + x
443
+
444
+
445
+ class PatchEmbed(nn.Module):
446
+ r""" Image to Patch Embedding
447
+
448
+ Args:
449
+ img_size (int): Image size. Default: 224.
450
+ patch_size (int): Patch token size. Default: 4.
451
+ in_chans (int): Number of input image channels. Default: 3.
452
+ embed_dim (int): Number of linear projection output channels. Default: 96.
453
+ norm_layer (nn.Module, optional): Normalization layer. Default: None
454
+ """
455
+
456
+ def __init__(self, img_size=224, patch_size=4, in_chans=3, embed_dim=96, norm_layer=None):
457
+ super().__init__()
458
+ img_size = to_2tuple(img_size)
459
+ patch_size = to_2tuple(patch_size)
460
+ patches_resolution = [img_size[0] // patch_size[0], img_size[1] // patch_size[1]]
461
+ self.img_size = img_size
462
+ self.patch_size = patch_size
463
+ self.patches_resolution = patches_resolution
464
+ self.num_patches = patches_resolution[0] * patches_resolution[1]
465
+
466
+ self.in_chans = in_chans
467
+ self.embed_dim = embed_dim
468
+
469
+ if norm_layer is not None:
470
+ self.norm = norm_layer(embed_dim)
471
+ else:
472
+ self.norm = None
473
+
474
+ def forward(self, x):
475
+ x = x.flatten(2).transpose(1, 2) # B Ph*Pw C
476
+ if self.norm is not None:
477
+ x = self.norm(x)
478
+ return x
479
+
480
+
481
+ class PatchUnEmbed(nn.Module):
482
+ r""" Image to Patch Unembedding
483
+
484
+ Args:
485
+ img_size (int): Image size. Default: 224.
486
+ patch_size (int): Patch token size. Default: 4.
487
+ in_chans (int): Number of input image channels. Default: 3.
488
+ embed_dim (int): Number of linear projection output channels. Default: 96.
489
+ norm_layer (nn.Module, optional): Normalization layer. Default: None
490
+ """
491
+
492
+ def __init__(self, img_size=224, patch_size=4, in_chans=3, embed_dim=96, norm_layer=None):
493
+ super().__init__()
494
+ img_size = to_2tuple(img_size)
495
+ patch_size = to_2tuple(patch_size)
496
+ patches_resolution = [img_size[0] // patch_size[0], img_size[1] // patch_size[1]]
497
+ self.img_size = img_size
498
+ self.patch_size = patch_size
499
+ self.patches_resolution = patches_resolution
500
+ self.num_patches = patches_resolution[0] * patches_resolution[1]
501
+
502
+ self.in_chans = in_chans
503
+ self.embed_dim = embed_dim
504
+
505
+ def forward(self, x, x_size):
506
+ B, HW, C = x.shape
507
+ x = x.transpose(1, 2).view(B, self.embed_dim, x_size[0], x_size[1]) # B Ph*Pw C
508
+ return x
509
+
510
+
511
+ class Upsample(nn.Sequential):
512
+ """Upsample module.
513
+
514
+ Args:
515
+ scale (int): Scale factor. Supported scales: 2^n and 3.
516
+ num_feat (int): Channel number of intermediate features.
517
+ """
518
+
519
+ def __init__(self, scale, num_feat):
520
+ m = []
521
+ if (scale & (scale - 1)) == 0: # scale = 2^n
522
+ for _ in range(int(math.log(scale, 2))):
523
+ m.append(nn.Conv2d(num_feat, 4 * num_feat, 3, 1, 1))
524
+ m.append(nn.PixelShuffle(2))
525
+ elif scale == 3:
526
+ m.append(nn.Conv2d(num_feat, 9 * num_feat, 3, 1, 1))
527
+ m.append(nn.PixelShuffle(3))
528
+ else:
529
+ raise ValueError(f'scale {scale} is not supported. ' 'Supported scales: 2^n and 3.')
530
+ super(Upsample, self).__init__(*m)
531
+
532
+
533
+ class UpsampleOneStep(nn.Sequential):
534
+ """UpsampleOneStep module (the difference with Upsample is that it always only has 1conv + 1pixelshuffle)
535
+ Used in lightweight SR to save parameters.
536
+
537
+ Args:
538
+ scale (int): Scale factor. Supported scales: 2^n and 3.
539
+ num_feat (int): Channel number of intermediate features.
540
+
541
+ """
542
+
543
+ def __init__(self, scale, num_feat, num_out_ch, input_resolution=None):
544
+ self.num_feat = num_feat
545
+ self.input_resolution = input_resolution
546
+ m = []
547
+ m.append(nn.Conv2d(num_feat, (scale ** 2) * num_out_ch, 3, 1, 1))
548
+ m.append(nn.PixelShuffle(scale))
549
+ super(UpsampleOneStep, self).__init__(*m)
550
+
551
+
552
+ @register('swinir')
553
+ class SwinIR(nn.Module):
554
+ r""" SwinIR
555
+ A PyTorch impl of : `SwinIR: Image Restoration Using Swin Transformer`, based on Swin Transformer.
556
+
557
+ Args:
558
+ img_size (int | tuple(int)): Input image size. Default 64
559
+ patch_size (int | tuple(int)): Patch size. Default: 1
560
+ in_chans (int): Number of input image channels. Default: 3
561
+ embed_dim (int): Patch embedding dimension. Default: 96
562
+ depths (tuple(int)): Depth of each Swin Transformer layer.
563
+ num_heads (tuple(int)): Number of attention heads in different layers.
564
+ window_size (int): Window size. Default: 8
565
+ mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4
566
+ qkv_bias (bool): If True, add a learnable bias to query, key, value. Default: True
567
+ qk_scale (float): Override default qk scale of head_dim ** -0.5 if set. Default: None
568
+ drop_rate (float): Dropout rate. Default: 0
569
+ attn_drop_rate (float): Attention dropout rate. Default: 0
570
+ drop_path_rate (float): Stochastic depth rate. Default: 0.1
571
+ norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm.
572
+ ape (bool): If True, add absolute position embedding to the patch embedding. Default: False
573
+ patch_norm (bool): If True, add normalization after patch embedding. Default: True
574
+ use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False
575
+ upscale: Upscale factor. 2/3/4/8 for image SR, 1 for denoising and compress artifact reduction
576
+ img_range: Image range. 1. or 255.
577
+ upsampler: The reconstruction reconstruction module. 'pixelshuffle'/'pixelshuffledirect'/'nearest+conv'/None
578
+ resi_connection: The convolutional block before residual connection. '1conv'/'3conv'
579
+ """
580
+
581
+ def __init__(self, img_size=64, patch_size=1, in_chans=4,
582
+ embed_dim=180, depths=[6,6,6,6,6,6], num_heads=[6,6,6,6,6,6],
583
+ window_size=8, mlp_ratio=2., qkv_bias=True, qk_scale=None,
584
+ drop_rate=0., attn_drop_rate=0., drop_path_rate=0.1,
585
+ norm_layer=nn.LayerNorm, ape=False, patch_norm=True,
586
+ use_checkpoint=False, upscale=2, img_range=1., upsampler='none', resi_connection='1conv',
587
+ **kwargs):
588
+ super(SwinIR, self).__init__()
589
+ num_in_ch = in_chans
590
+ num_out_ch = in_chans
591
+ num_feat = 64
592
+ self.img_range = img_range
593
+
594
+ self.upscale = upscale
595
+ self.upsampler = upsampler
596
+ self.window_size = window_size
597
+ self.out_dim = num_feat
598
+ #####################################################################################################
599
+ ################################### 1, shallow feature extraction ###################################
600
+ self.conv_first = nn.Conv2d(num_in_ch, embed_dim, 3, 1, 1)
601
+
602
+ #####################################################################################################
603
+ ################################### 2, deep feature extraction ######################################
604
+ self.num_layers = len(depths)
605
+ self.embed_dim = embed_dim
606
+ self.ape = ape
607
+ self.patch_norm = patch_norm
608
+ self.num_features = embed_dim
609
+ self.mlp_ratio = mlp_ratio
610
+
611
+ # split image into non-overlapping patches
612
+ self.patch_embed = PatchEmbed(
613
+ img_size=img_size, patch_size=patch_size, in_chans=embed_dim, embed_dim=embed_dim,
614
+ norm_layer=norm_layer if self.patch_norm else None)
615
+ num_patches = self.patch_embed.num_patches
616
+ patches_resolution = self.patch_embed.patches_resolution
617
+ self.patches_resolution = patches_resolution
618
+
619
+ # merge non-overlapping patches into image
620
+ self.patch_unembed = PatchUnEmbed(
621
+ img_size=img_size, patch_size=patch_size, in_chans=embed_dim, embed_dim=embed_dim,
622
+ norm_layer=norm_layer if self.patch_norm else None)
623
+
624
+ # absolute position embedding
625
+ if self.ape:
626
+ self.absolute_pos_embed = nn.Parameter(torch.zeros(1, num_patches, embed_dim))
627
+ trunc_normal_(self.absolute_pos_embed, std=.02)
628
+
629
+ self.pos_drop = nn.Dropout(p=drop_rate)
630
+
631
+ # stochastic depth
632
+ dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule
633
+
634
+ # build Residual Swin Transformer blocks (RSTB)
635
+ self.layers = nn.ModuleList()
636
+ for i_layer in range(self.num_layers):
637
+ layer = RSTB(dim=embed_dim,
638
+ input_resolution=(patches_resolution[0],
639
+ patches_resolution[1]),
640
+ depth=depths[i_layer],
641
+ num_heads=num_heads[i_layer],
642
+ window_size=window_size,
643
+ mlp_ratio=self.mlp_ratio,
644
+ qkv_bias=qkv_bias, qk_scale=qk_scale,
645
+ drop=drop_rate, attn_drop=attn_drop_rate,
646
+ drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])], # no impact on SR results
647
+ norm_layer=norm_layer,
648
+ downsample=None,
649
+ use_checkpoint=use_checkpoint,
650
+ img_size=img_size,
651
+ patch_size=patch_size,
652
+ resi_connection=resi_connection
653
+
654
+ )
655
+ self.layers.append(layer)
656
+ self.norm = norm_layer(self.num_features)
657
+
658
+ # build the last conv layer in deep feature extraction
659
+ if resi_connection == '1conv':
660
+ self.conv_after_body = nn.Conv2d(embed_dim, embed_dim, 3, 1, 1)
661
+ elif resi_connection == '3conv':
662
+ # to save parameters and memory
663
+ self.conv_after_body = nn.Sequential(nn.Conv2d(embed_dim, embed_dim // 4, 3, 1, 1),
664
+ nn.LeakyReLU(negative_slope=0.2, inplace=True),
665
+ nn.Conv2d(embed_dim // 4, embed_dim // 4, 1, 1, 0),
666
+ nn.LeakyReLU(negative_slope=0.2, inplace=True),
667
+ nn.Conv2d(embed_dim // 4, embed_dim, 3, 1, 1))
668
+
669
+ #####################################################################################################
670
+ ################################ 3, high quality image reconstruction ################################
671
+ if self.upsampler == 'none':
672
+ self.conv_before_upsample = nn.Sequential(nn.Conv2d(embed_dim, num_feat, 3, 1, 1),
673
+ nn.LeakyReLU(inplace=True))
674
+ elif self.upsampler == 'pixelshuffle':
675
+ # for classical SR
676
+ self.conv_before_upsample = nn.Sequential(nn.Conv2d(embed_dim, num_feat, 3, 1, 1),
677
+ nn.LeakyReLU(inplace=True))
678
+ self.upsample = Upsample(upscale, num_feat)
679
+ self.conv_last = nn.Conv2d(num_feat, num_out_ch, 3, 1, 1)
680
+ elif self.upsampler == 'pixelshuffledirect':
681
+ # for lightweight SR (to save parameters)
682
+ self.upsample = UpsampleOneStep(upscale, embed_dim, num_out_ch,
683
+ (patches_resolution[0], patches_resolution[1]))
684
+ elif self.upsampler == 'nearest+conv':
685
+ # for real-world SR (less artifacts)
686
+ assert self.upscale == 4, 'only support x4 now.'
687
+ self.conv_before_upsample = nn.Sequential(nn.Conv2d(embed_dim, num_feat, 3, 1, 1),
688
+ nn.LeakyReLU(inplace=True))
689
+ self.conv_up1 = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
690
+ self.conv_up2 = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
691
+ self.conv_hr = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
692
+ self.conv_last = nn.Conv2d(num_feat, num_out_ch, 3, 1, 1)
693
+ self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)
694
+ else:
695
+ # for image denoising and JPEG compression artifact reduction
696
+ self.conv_last = nn.Conv2d(embed_dim, num_out_ch, 3, 1, 1)
697
+
698
+ self.apply(self._init_weights)
699
+
700
+ def _init_weights(self, m):
701
+ if isinstance(m, nn.Linear):
702
+ trunc_normal_(m.weight, std=.02)
703
+ if isinstance(m, nn.Linear) and m.bias is not None:
704
+ nn.init.constant_(m.bias, 0)
705
+ elif isinstance(m, nn.LayerNorm):
706
+ nn.init.constant_(m.bias, 0)
707
+ nn.init.constant_(m.weight, 1.0)
708
+
709
+ @torch.jit.ignore
710
+ def no_weight_decay(self):
711
+ return {'absolute_pos_embed'}
712
+
713
+ @torch.jit.ignore
714
+ def no_weight_decay_keywords(self):
715
+ return {'relative_position_bias_table'}
716
+
717
+ def check_image_size(self, x):
718
+ _, _, h, w = x.size()
719
+ mod_pad_h = (self.window_size - h % self.window_size) % self.window_size
720
+ mod_pad_w = (self.window_size - w % self.window_size) % self.window_size
721
+ x = F.pad(x, (0, mod_pad_w, 0, mod_pad_h), 'reflect')
722
+ return x
723
+
724
+ def forward_features(self, x):
725
+ x_size = (x.shape[2], x.shape[3])
726
+ x = self.patch_embed(x)
727
+ if self.ape:
728
+ x = x + self.absolute_pos_embed
729
+ x = self.pos_drop(x)
730
+
731
+ for layer in self.layers:
732
+ x = layer(x, x_size)
733
+
734
+ x = self.norm(x) # B L C
735
+ x = self.patch_unembed(x, x_size)
736
+
737
+ return x
738
+
739
+ def forward(self, x):
740
+ H,W = x.shape[2:]
741
+ x = self.check_image_size(x)
742
+
743
+ # self.mean = self.mean.type_as(x)
744
+ # x = (x - self.mean) * self.img_range
745
+
746
+ if self.upsampler == 'none':
747
+ x = self.conv_first(x)
748
+ x = self.conv_after_body(self.forward_features(x)) + x
749
+ x = self.conv_before_upsample(x)
750
+ elif self.upsampler == 'pixelshuffle':
751
+ # for classical SR
752
+ x = self.conv_first(x)
753
+ x = self.conv_after_body(self.forward_features(x)) + x
754
+ x = self.conv_before_upsample(x)
755
+ x = self.conv_last(self.upsample(x))
756
+ elif self.upsampler == 'pixelshuffledirect':
757
+ # for lightweight SR
758
+ x = self.conv_first(x)
759
+ x = self.conv_after_body(self.forward_features(x)) + x
760
+ x = self.upsample(x)
761
+ elif self.upsampler == 'nearest+conv':
762
+ # for real-world SR
763
+ x = self.conv_first(x)
764
+ x = self.conv_after_body(self.forward_features(x)) + x
765
+ x = self.conv_before_upsample(x)
766
+ x = self.lrelu(self.conv_up1(torch.nn.functional.interpolate(x, scale_factor=2, mode='nearest')))
767
+ x = self.lrelu(self.conv_up2(torch.nn.functional.interpolate(x, scale_factor=2, mode='nearest')))
768
+ x = self.conv_last(self.lrelu(self.conv_hr(x)))
769
+ else:
770
+ # for image denoising and JPEG compression artifact reduction
771
+ x_first = self.conv_first(x)
772
+ res = self.conv_after_body(self.forward_features(x_first)) + x_first
773
+ x = x + self.conv_last(res)
774
+
775
+ # x = x / self.img_range + self.mean
776
+ return x[:,:,:H,:W]
competitors_inference_code/LSRNA/lsr_training/utils/__init__.py ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ from utils.utils_config import *
2
+ from utils.utils_state import *
3
+ from utils.utils_image import *
4
+ from utils.utils_calc import *
5
+ from utils.utils_io import *
6
+ from utils.utils_dist import *
7
+ from utils.utils_blindsr import *
8
+ from utils.utils import *
competitors_inference_code/LSRNA/lsr_training/utils/utils.py ADDED
@@ -0,0 +1,127 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os, sys
2
+ import shutil
3
+ import time
4
+
5
+ import numpy as np
6
+ import random
7
+ import torch
8
+ import torch.backends.cudnn as cudnn
9
+
10
+
11
+ def compute_num_params(model, text=False):
12
+ tot = int(sum([np.prod(p.shape) for p in model.parameters()]))
13
+ if text:
14
+ if tot >= 1e6:
15
+ return '{:.3f}M'.format(tot / 1e6)
16
+ elif tot >= 1e3:
17
+ return '{:.2f}K'.format(tot / 1e3)
18
+ else:
19
+ return '{}'.format(tot)
20
+ else:
21
+ return tot
22
+
23
+
24
+ def set_seed(seed):
25
+ random.seed(seed)
26
+ np.random.seed(seed)
27
+ torch.manual_seed(seed)
28
+ torch.cuda.manual_seed(0)
29
+ torch.cuda.manual_seed_all(0)
30
+ os.environ["PYTHONHASHSEED"] = str(seed)
31
+ cudnn.benchmark = False # slower training
32
+ cudnn.deterministic = True # slower training
33
+
34
+
35
+ class Logger:
36
+ def __init__(self, log_path=None):
37
+ self.log_path = log_path
38
+ self.ignore = False
39
+
40
+ def set_log_path(self, path):
41
+ self.log_path = path
42
+
43
+ def disable(self):
44
+ self.ignore = True
45
+
46
+ def log(self, obj, filename='log.txt'):
47
+ if not self.ignore:
48
+ print(obj)
49
+ if self.log_path is not None:
50
+ with open(os.path.join(self.log_path, filename), 'a') as f:
51
+ print(obj, file=f)
52
+
53
+ @staticmethod
54
+ def ensure_path(path, remove=True):
55
+ basename = os.path.basename(path.rstrip('/'))
56
+ if os.path.exists(path):
57
+ if remove and (basename.startswith('_') or input('{} exists, remove? (y/[n]): '.format(path)).lower() == 'y'):
58
+ shutil.rmtree(path)
59
+ os.makedirs(path)
60
+ else:
61
+ os.makedirs(path)
62
+
63
+ def set_save_path(self, save_path, remove=True):
64
+ self.ensure_path(save_path, remove=remove)
65
+ self.set_log_path(save_path)
66
+ return self.log
67
+
68
+
69
+ def make_coord(shape, ranges=None, flatten=True, device='cpu'):
70
+ # Make coordinates at grid centers.
71
+ coord_seqs = []
72
+ for i, n in enumerate(shape):
73
+ if ranges is None:
74
+ v0, v1 = -1, 1
75
+ else:
76
+ v0, v1 = ranges[i]
77
+ r = (v1 - v0) / (2 * n)
78
+ seq = v0 + r + (2 * r) * torch.arange(n, device=device).float()
79
+ coord_seqs.append(seq)
80
+ ret = torch.stack(torch.meshgrid(*coord_seqs), dim=-1)
81
+ if flatten:
82
+ ret = ret.view(-1, ret.shape[-1])
83
+ return ret
84
+
85
+ def to_pixel_samples(img, flatten=True, device='cpu'):
86
+ """
87
+ Convert the image to coord-Val pairs.
88
+ img: Tensor, (C, H, W)
89
+ """
90
+ assert img.ndim == 3
91
+ coord = make_coord(img.shape[-2:], flatten=flatten, device=device)
92
+ if flatten:
93
+ val = img.flatten(1).transpose(0,1)
94
+ else:
95
+ val = img.permute(1,2,0)
96
+ return coord, val
97
+
98
+
99
+ class Averager():
100
+ def __init__(self):
101
+ self.n = 0.0
102
+ self.v = 0.0
103
+
104
+ def add(self, v, n=1.0):
105
+ self.v = (self.v * self.n + v * n) / (self.n + n)
106
+ self.n += n
107
+
108
+ def item(self):
109
+ return self.v
110
+
111
+ class Timer():
112
+ def __init__(self):
113
+ self.v = time.time()
114
+
115
+ def s(self):
116
+ self.v = time.time()
117
+
118
+ def t(self):
119
+ return time.time() - self.v
120
+
121
+ def time_text(t):
122
+ if t >= 3600:
123
+ return '{:.1f}h'.format(t / 3600)
124
+ elif t >= 60:
125
+ return '{:.1f}m'.format(t / 60)
126
+ else:
127
+ return '{:.1f}s'.format(t)
competitors_inference_code/LSRNA/lsr_training/utils/utils_blindsr.py ADDED
@@ -0,0 +1,301 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # https://github.com/cszn/KAIR/blob/master/utils/utils_blindsr.py
2
+ # -*- coding: utf-8 -*-
3
+ import numpy as np
4
+ import cv2
5
+ import torch
6
+ import torch.nn.functional as F
7
+
8
+ import random
9
+ from scipy import ndimage
10
+ import scipy
11
+ import scipy.stats as ss
12
+ from scipy.linalg import orth
13
+
14
+
15
+ def uint2single(img):
16
+ return np.float32(img/255.)
17
+
18
+ def single2uint(img):
19
+ return np.uint8((img.clip(0, 1)*255.).round())
20
+
21
+ """
22
+ # --------------------------------------------
23
+ # anisotropic Gaussian kernels
24
+ # --------------------------------------------
25
+ """
26
+ def anisotropic_Gaussian(ksize=15, theta=np.pi, l1=6, l2=6):
27
+ """ generate an anisotropic Gaussian kernel
28
+ Args:
29
+ ksize : e.g., 15, kernel size
30
+ theta : [0, pi], rotation angle range
31
+ l1 : [0.1,50], scaling of eigenvalues
32
+ l2 : [0.1,l1], scaling of eigenvalues
33
+ If l1 = l2, will get an isotropic Gaussian kernel.
34
+
35
+ Returns:
36
+ k : kernel
37
+ """
38
+
39
+ v = np.dot(np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]]), np.array([1., 0.]))
40
+ V = np.array([[v[0], v[1]], [v[1], -v[0]]])
41
+ D = np.array([[l1, 0], [0, l2]])
42
+ Sigma = np.dot(np.dot(V, D), np.linalg.inv(V))
43
+ k = gm_blur_kernel(mean=[0, 0], cov=Sigma, size=ksize)
44
+
45
+ return k
46
+
47
+
48
+ def gm_blur_kernel(mean, cov, size=15):
49
+ center = size / 2.0 + 0.5
50
+ k = np.zeros([size, size])
51
+ for y in range(size):
52
+ for x in range(size):
53
+ cy = y - center + 1
54
+ cx = x - center + 1
55
+ k[y, x] = ss.multivariate_normal.pdf([cx, cy], mean=mean, cov=cov)
56
+
57
+ k = k / np.sum(k)
58
+ return k
59
+
60
+
61
+
62
+ def fspecial_gaussian(hsize, sigma):
63
+ hsize = [hsize, hsize]
64
+ siz = [(hsize[0]-1.0)/2.0, (hsize[1]-1.0)/2.0]
65
+ std = sigma
66
+ [x, y] = np.meshgrid(np.arange(-siz[1], siz[1]+1), np.arange(-siz[0], siz[0]+1))
67
+ arg = -(x*x + y*y)/(2*std*std)
68
+ h = np.exp(arg)
69
+ h[h < np.finfo(float).eps * h.max()] = 0
70
+ sumh = h.sum()
71
+ if sumh != 0:
72
+ h = h/sumh
73
+ return h
74
+
75
+
76
+ def fspecial_laplacian(alpha):
77
+ alpha = max([0, min([alpha,1])])
78
+ h1 = alpha/(alpha+1)
79
+ h2 = (1-alpha)/(alpha+1)
80
+ h = [[h1, h2, h1], [h2, -4/(alpha+1), h2], [h1, h2, h1]]
81
+ h = np.array(h)
82
+ return h
83
+
84
+
85
+ def fspecial(filter_type, *args, **kwargs):
86
+ '''
87
+ python code from:
88
+ https://github.com/ronaldosena/imagens-medicas-2/blob/40171a6c259edec7827a6693a93955de2bd39e76/Aulas/aula_2_-_uniform_filter/matlab_fspecial.py
89
+ '''
90
+ if filter_type == 'gaussian':
91
+ return fspecial_gaussian(*args, **kwargs)
92
+ if filter_type == 'laplacian':
93
+ return fspecial_laplacian(*args, **kwargs)
94
+
95
+ """
96
+ # --------------------------------------------
97
+ # degradation models
98
+ # --------------------------------------------
99
+ """
100
+
101
+ def add_sharpening(img, weight=0.5, radius=50, threshold=10):
102
+ """USM sharpening. borrowed from real-ESRGAN
103
+ Input image: I; Blurry image: B.
104
+ 1. K = I + weight * (I - B)
105
+ 2. Mask = 1 if abs(I - B) > threshold, else: 0
106
+ 3. Blur mask:
107
+ 4. Out = Mask * K + (1 - Mask) * I
108
+ Args:
109
+ img (Numpy array): Input image, HWC, BGR; float32, [0, 1].
110
+ weight (float): Sharp weight. Default: 1.
111
+ radius (float): Kernel size of Gaussian blur. Default: 50.
112
+ threshold (int):
113
+ """
114
+ if radius % 2 == 0:
115
+ radius += 1
116
+ blur = cv2.GaussianBlur(img, (radius, radius), 0)
117
+ residual = img - blur
118
+ mask = np.abs(residual) * 255 > threshold
119
+ mask = mask.astype('float32')
120
+ soft_mask = cv2.GaussianBlur(mask, (radius, radius), 0)
121
+
122
+ K = img + weight * residual
123
+ K = np.clip(K, 0, 1)
124
+ return soft_mask * K + (1 - soft_mask) * img
125
+
126
+
127
+ def torch_convolve(img, k):
128
+ img_tensor = torch.tensor(img, dtype=torch.float32).permute(2, 0, 1).unsqueeze(0) # (1,3,h,w)
129
+ k_tensor = torch.tensor(k, dtype=torch.float32).unsqueeze(0).unsqueeze(0) # (1,1,p,p)
130
+ k_tensor = k_tensor.expand(3, 1, -1, -1) # (3,1,p,p)
131
+ k_height, k_width = k_tensor.shape[-2:]
132
+
133
+ pad_height = k_height // 2
134
+ pad_width = k_width // 2
135
+ img_padded = F.pad(img_tensor, (pad_width, pad_width, pad_height, pad_height), mode='reflect')
136
+
137
+ output = F.conv2d(img_padded, k_tensor, groups=3)
138
+ output = output.squeeze(0).permute(1,2,0).detach().cpu().numpy()
139
+ return output
140
+
141
+ def add_blur(img, sf=4):
142
+ wd2 = 4.0 + sf
143
+ wd = 2.0 + 0.2*sf
144
+ if random.random() < 0.5:
145
+ l1 = wd2*random.random()
146
+ l2 = wd2*random.random()
147
+ k = anisotropic_Gaussian(ksize=2*random.randint(2,11)+3, theta=random.random()*np.pi, l1=l1, l2=l2)
148
+ else:
149
+ k = fspecial('gaussian', 2*random.randint(2,11)+3, wd*random.random())
150
+ #img = ndimage.filters.convolve(img, np.expand_dims(k, axis=2), mode='mirror') # too heavy for high-resolution image
151
+ img = torch_convolve(img, k)
152
+ return img
153
+
154
+
155
+ def add_resize(img, sf=4):
156
+ rnum = np.random.rand()
157
+ if rnum > 0.8: # up
158
+ sf1 = random.uniform(1, 2)
159
+ elif rnum < 0.7: # down
160
+ sf1 = random.uniform(0.5/sf, 1)
161
+ else:
162
+ sf1 = 1.0
163
+ img = cv2.resize(img, (int(sf1*img.shape[1]), int(sf1*img.shape[0])), interpolation=random.choice([1, 2, 3]))
164
+ img = np.clip(img, 0.0, 1.0)
165
+
166
+ return img
167
+
168
+
169
+ def add_Gaussian_noise(img, noise_level1=2, noise_level2=25):
170
+ noise_level = random.randint(noise_level1, noise_level2)
171
+ rnum = np.random.rand()
172
+ if rnum > 0.6: # add color Gaussian noise
173
+ img += np.random.normal(0, noise_level/255.0, img.shape).astype(np.float32)
174
+ elif rnum < 0.4: # add grayscale Gaussian noise
175
+ img += np.random.normal(0, noise_level/255.0, (*img.shape[:2], 1)).astype(np.float32)
176
+ else: # add noise
177
+ L = noise_level2/255.
178
+ D = np.diag(np.random.rand(3))
179
+ U = orth(np.random.rand(3,3))
180
+ conv = np.dot(np.dot(np.transpose(U), D), U)
181
+ img += np.random.multivariate_normal([0,0,0], np.abs(L**2*conv), img.shape[:2]).astype(np.float32)
182
+ img = np.clip(img, 0.0, 1.0)
183
+ return img
184
+
185
+
186
+ def add_speckle_noise(img, noise_level1=2, noise_level2=25):
187
+ noise_level = random.randint(noise_level1, noise_level2)
188
+ img = np.clip(img, 0.0, 1.0)
189
+ rnum = random.random()
190
+ if rnum > 0.6:
191
+ img += img*np.random.normal(0, noise_level/255.0, img.shape).astype(np.float32)
192
+ elif rnum < 0.4:
193
+ img += img*np.random.normal(0, noise_level/255.0, (*img.shape[:2], 1)).astype(np.float32)
194
+ else:
195
+ L = noise_level2/255.
196
+ D = np.diag(np.random.rand(3))
197
+ U = orth(np.random.rand(3,3))
198
+ conv = np.dot(np.dot(np.transpose(U), D), U)
199
+ img += img*np.random.multivariate_normal([0,0,0], np.abs(L**2*conv), img.shape[:2]).astype(np.float32)
200
+ img = np.clip(img, 0.0, 1.0)
201
+ return img
202
+
203
+
204
+ def add_Poisson_noise(img):
205
+ img = np.clip((img * 255.0).round(), 0, 255) / 255.
206
+ vals = 10**(2*random.random()+2.0) # [2, 4]
207
+ if random.random() < 0.5:
208
+ img = np.random.poisson(img * vals).astype(np.float32) / vals
209
+ else:
210
+ img_gray = np.dot(img[...,:3], [0.299, 0.587, 0.114])
211
+ img_gray = np.clip((img_gray * 255.0).round(), 0, 255) / 255.
212
+ noise_gray = np.random.poisson(img_gray * vals).astype(np.float32) / vals - img_gray
213
+ img += noise_gray[:, :, np.newaxis]
214
+ img = np.clip(img, 0.0, 1.0)
215
+ return img
216
+
217
+
218
+ def add_JPEG_noise(img):
219
+ quality_factor = random.randint(30, 95)
220
+ img = cv2.cvtColor(single2uint(img), cv2.COLOR_RGB2BGR)
221
+ result, encimg = cv2.imencode('.jpg', img, [int(cv2.IMWRITE_JPEG_QUALITY), quality_factor])
222
+ img = cv2.imdecode(encimg, 1)
223
+ img = cv2.cvtColor(uint2single(img), cv2.COLOR_BGR2RGB)
224
+ return img
225
+
226
+
227
+ def degradation_bsrgan_plus(img, sf=4, shuffle_prob=0.1, use_sharp=True, isp_model=None):
228
+ """
229
+ This is an extended degradation model by combining
230
+ the degradation models of BSRGAN and Real-ESRGAN
231
+ ----------
232
+ img: HXWXC, [0, 1]
233
+ sf: scale factor
234
+ use_shuffle: the degradation shuffle
235
+ use_sharp: sharpening the img
236
+
237
+ Returns
238
+ -------
239
+ img: low-quality patch, range: [0, 1]
240
+ """
241
+ original_h, original_w = img.shape[:2]
242
+ h1, w1 = img.shape[:2]
243
+ if use_sharp:
244
+ img = add_sharpening(img)
245
+
246
+ if random.random() < shuffle_prob:
247
+ shuffle_order = random.sample(range(13), 13)
248
+ else:
249
+ shuffle_order = list(range(13))
250
+ # local shuffle for noise, JPEG is always the last one
251
+ shuffle_order[2:6] = random.sample(shuffle_order[2:6], len(range(2, 6)))
252
+ shuffle_order[9:13] = random.sample(shuffle_order[9:13], len(range(9, 13)))
253
+
254
+ poisson_prob, speckle_prob, isp_prob = 0.1, 0.1, 0.1
255
+
256
+ for i in shuffle_order:
257
+ if i == 0:
258
+ img = add_blur(img, sf=sf)
259
+ elif i == 1:
260
+ img = add_resize(img, sf=sf)
261
+ elif i == 2:
262
+ img = add_Gaussian_noise(img, noise_level1=2, noise_level2=25)
263
+ elif i == 3:
264
+ if random.random() < poisson_prob:
265
+ img = add_Poisson_noise(img)
266
+ elif i == 4:
267
+ if random.random() < speckle_prob:
268
+ img = add_speckle_noise(img)
269
+ elif i == 5:
270
+ continue
271
+ # if random.random() < isp_prob and isp_model is not None:
272
+ # with torch.no_grad():
273
+ # img, hq = isp_model.forward(img.copy(), hq)
274
+ elif i == 6:
275
+ img = add_JPEG_noise(img)
276
+ elif i == 7:
277
+ img = add_blur(img, sf=sf)
278
+ elif i == 8:
279
+ img = add_resize(img, sf=sf)
280
+ elif i == 9:
281
+ img = add_Gaussian_noise(img, noise_level1=2, noise_level2=25)
282
+ elif i == 10:
283
+ if random.random() < poisson_prob:
284
+ img = add_Poisson_noise(img)
285
+ elif i == 11:
286
+ if random.random() < speckle_prob:
287
+ img = add_speckle_noise(img)
288
+ elif i == 12:
289
+ continue
290
+ # if random.random() < isp_prob and isp_model is not None:
291
+ # with torch.no_grad():
292
+ # img, hq = isp_model.forward(img.copy(), hq)
293
+ else:
294
+ print('check the shuffle!')
295
+
296
+ # resize to desired size
297
+ img = cv2.resize(img, (int(1/sf*original_w), int(1/sf*original_h)), interpolation=random.choice([1, 2, 3]))
298
+
299
+ # add final JPEG compression noise
300
+ img = add_JPEG_noise(img)
301
+ return img
competitors_inference_code/LSRNA/lsr_training/utils/utils_calc.py ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import torch
3
+ from utils.utils_image import tensor2numpy
4
+
5
+ # https://github.com/cszn/KAIR
6
+ def rgb2ycbcr(img, only_y=True):
7
+ """same as matlab rgb2ycbcr
8
+ only_y: only return Y channel
9
+ Input: (h,w,3) np array
10
+ uint8, [0, 255]
11
+ float, [0, 1]
12
+ """
13
+ in_img_type = img.dtype
14
+ img.astype(np.float32)
15
+ if in_img_type != np.uint8:
16
+ img *= 255.
17
+ # convert
18
+ if only_y:
19
+ rlt = np.dot(img, [65.481, 128.553, 24.966]) / 255.0 + 16.0
20
+ else:
21
+ rlt = np.matmul(img, [[65.481, -37.797, 112.0], [128.553, -74.203, -93.786],
22
+ [24.966, 112.0, -18.214]]) / 255.0 + [16, 128, 128]
23
+ if in_img_type == np.uint8:
24
+ rlt = rlt.round()
25
+ else:
26
+ rlt /= 255.
27
+ return rlt.astype(in_img_type)
28
+
29
+
30
+ def psnr_measure(src, tar, y_channel=False, shave_border=0):
31
+ # np array must be 0-255, (h,w,3)
32
+ # tensor must be 0-1, (3,h,w)
33
+ if isinstance(src, torch.Tensor):
34
+ assert isinstance(tar, torch.Tensor)
35
+ if src.ndim == 4:
36
+ src = src.squeeze(0)
37
+ if tar.ndim == 4:
38
+ tar = tar.squeeze(0)
39
+ if y_channel:
40
+ src = tensor2numpy(src)
41
+ tar = tensor2numpy(tar)
42
+ src = rgb2ycbcr(src).astype(np.float32, copy=False)
43
+ tar = rgb2ycbcr(tar).astype(np.float32, copy=False)
44
+ else:
45
+ src = (src*255).clamp_(0,255).round().permute(1,2,0)
46
+ tar = (tar*255).clamp_(0,255).round().permute(1,2,0)
47
+ else:
48
+ if y_channel:
49
+ src = rgb2ycbcr(src)
50
+ tar = rgb2ycbcr(tar)
51
+ src = src.astype(np.float32, copy=False)
52
+ tar = tar.astype(np.float32, copy=False)
53
+ diff = tar - src
54
+ if shave_border > 0:
55
+ diff = diff[shave_border:-shave_border, shave_border:-shave_border]
56
+
57
+ if isinstance(diff, torch.Tensor):
58
+ err = torch.mean(torch.pow(diff, 2)).item()
59
+ else:
60
+ err = np.mean(np.power(diff, 2))
61
+ #if err < 0.6502:
62
+ # return 50
63
+ #else:
64
+ return 10 * np.log10((255. ** 2) / err)
competitors_inference_code/LSRNA/lsr_training/utils/utils_config.py ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import yaml
2
+ import os
3
+
4
+ def load_config(config_path):
5
+ with open(config_path, 'r') as f:
6
+ config = yaml.load(f, Loader=yaml.FullLoader)
7
+ if not config.get('seed'):
8
+ config['seed'] = None
9
+ save_path = os.path.join('save', config_path.split('/')[-1][:-len('.yaml')])
10
+ config['save_path'] = save_path
11
+ config['resume_path'] = os.path.join(save_path, 'iter_last.pth')
12
+ return config
competitors_inference_code/LSRNA/lsr_training/utils/utils_dist.py ADDED
@@ -0,0 +1,202 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Modified from https://github.com/open-mmlab/mmcv/blob/master/mmcv/runner/dist_utils.py # noqa: E501
2
+ import functools
3
+ import os
4
+ import subprocess
5
+ import torch
6
+ import torch.distributed as dist
7
+ import torch.multiprocessing as mp
8
+ import pickle
9
+
10
+
11
+ # ----------------------------------
12
+ # init
13
+ # ----------------------------------
14
+ def init_dist(launcher, backend='nccl', **kwargs):
15
+ if mp.get_start_method(allow_none=True) is None:
16
+ mp.set_start_method('spawn')
17
+
18
+ if launcher == 'pytorch':
19
+ _init_dist_pytorch(backend, **kwargs)
20
+ elif launcher == 'slurm':
21
+ _init_dist_slurm(backend, **kwargs)
22
+ else:
23
+ raise ValueError(f'Invalid launcher type: {launcher}')
24
+
25
+
26
+ def _init_dist_pytorch(backend, **kwargs):
27
+ rank = int(os.environ['RANK'])
28
+ num_gpus = torch.cuda.device_count()
29
+ torch.cuda.set_device(rank % num_gpus)
30
+ dist.init_process_group(backend=backend, **kwargs)
31
+
32
+
33
+ def _init_dist_slurm(backend, port=None):
34
+ """Initialize slurm distributed training environment.
35
+ If argument ``port`` is not specified, then the master port will be system
36
+ environment variable ``MASTER_PORT``. If ``MASTER_PORT`` is not in system
37
+ environment variable, then a default port ``29500`` will be used.
38
+ Args:
39
+ backend (str): Backend of torch.distributed.
40
+ port (int, optional): Master port. Defaults to None.
41
+ """
42
+ proc_id = int(os.environ['SLURM_PROCID'])
43
+ ntasks = int(os.environ['SLURM_NTASKS'])
44
+ node_list = os.environ['SLURM_NODELIST']
45
+ num_gpus = torch.cuda.device_count()
46
+ torch.cuda.set_device(proc_id % num_gpus)
47
+ addr = subprocess.getoutput(
48
+ f'scontrol show hostname {node_list} | head -n1')
49
+ # specify master port
50
+ if port is not None:
51
+ os.environ['MASTER_PORT'] = str(port)
52
+ elif 'MASTER_PORT' in os.environ:
53
+ pass # use MASTER_PORT in the environment variable
54
+ else:
55
+ # 29500 is torch.distributed default port
56
+ os.environ['MASTER_PORT'] = '29500'
57
+ os.environ['MASTER_ADDR'] = addr
58
+ os.environ['WORLD_SIZE'] = str(ntasks)
59
+ os.environ['LOCAL_RANK'] = str(proc_id % num_gpus)
60
+ os.environ['RANK'] = str(proc_id)
61
+ dist.init_process_group(backend=backend)
62
+
63
+
64
+
65
+ # ----------------------------------
66
+ # get rank and world_size
67
+ # ----------------------------------
68
+ def get_dist_info():
69
+ if dist.is_available():
70
+ initialized = dist.is_initialized()
71
+ else:
72
+ initialized = False
73
+ if initialized:
74
+ rank = dist.get_rank()
75
+ world_size = dist.get_world_size()
76
+ else:
77
+ rank = 0
78
+ world_size = 1
79
+ return rank, world_size
80
+
81
+
82
+ def get_rank():
83
+ if not dist.is_available():
84
+ return 0
85
+
86
+ if not dist.is_initialized():
87
+ return 0
88
+
89
+ return dist.get_rank()
90
+
91
+
92
+ def get_world_size():
93
+ if not dist.is_available():
94
+ return 1
95
+
96
+ if not dist.is_initialized():
97
+ return 1
98
+
99
+ return dist.get_world_size()
100
+
101
+
102
+ def master_only(func):
103
+
104
+ @functools.wraps(func)
105
+ def wrapper(*args, **kwargs):
106
+ rank, _ = get_dist_info()
107
+ if rank == 0:
108
+ return func(*args, **kwargs)
109
+
110
+ return wrapper
111
+
112
+
113
+
114
+
115
+
116
+
117
+ # ----------------------------------
118
+ # operation across ranks
119
+ # ----------------------------------
120
+ def reduce_sum(tensor):
121
+ if not dist.is_available():
122
+ return tensor
123
+
124
+ if not dist.is_initialized():
125
+ return tensor
126
+
127
+ tensor = tensor.clone()
128
+ dist.all_reduce(tensor, op=dist.ReduceOp.SUM)
129
+
130
+ return tensor
131
+
132
+
133
+ def gather_grad(params):
134
+ world_size = get_world_size()
135
+
136
+ if world_size == 1:
137
+ return
138
+
139
+ for param in params:
140
+ if param.grad is not None:
141
+ dist.all_reduce(param.grad.data, op=dist.ReduceOp.SUM)
142
+ param.grad.data.div_(world_size)
143
+
144
+
145
+ def all_gather(data):
146
+ world_size = get_world_size()
147
+
148
+ if world_size == 1:
149
+ return [data]
150
+
151
+ buffer = pickle.dumps(data)
152
+ storage = torch.ByteStorage.from_buffer(buffer)
153
+ tensor = torch.ByteTensor(storage).to('cuda')
154
+
155
+ local_size = torch.IntTensor([tensor.numel()]).to('cuda')
156
+ size_list = [torch.IntTensor([0]).to('cuda') for _ in range(world_size)]
157
+ dist.all_gather(size_list, local_size)
158
+ size_list = [int(size.item()) for size in size_list]
159
+ max_size = max(size_list)
160
+
161
+ tensor_list = []
162
+ for _ in size_list:
163
+ tensor_list.append(torch.ByteTensor(size=(max_size,)).to('cuda'))
164
+
165
+ if local_size != max_size:
166
+ padding = torch.ByteTensor(size=(max_size - local_size,)).to('cuda')
167
+ tensor = torch.cat((tensor, padding), 0)
168
+
169
+ dist.all_gather(tensor_list, tensor)
170
+
171
+ data_list = []
172
+
173
+ for size, tensor in zip(size_list, tensor_list):
174
+ buffer = tensor.cpu().numpy().tobytes()[:size]
175
+ data_list.append(pickle.loads(buffer))
176
+
177
+ return data_list
178
+
179
+
180
+ def reduce_loss_dict(loss_dict):
181
+ world_size = get_world_size()
182
+
183
+ if world_size < 2:
184
+ return loss_dict
185
+
186
+ with torch.no_grad():
187
+ keys = []
188
+ losses = []
189
+
190
+ for k in sorted(loss_dict.keys()):
191
+ keys.append(k)
192
+ losses.append(loss_dict[k])
193
+
194
+ losses = torch.stack(losses, 0)
195
+ dist.reduce(losses, dst=0)
196
+
197
+ if dist.get_rank() == 0:
198
+ losses /= world_size
199
+
200
+ reduced_losses = {k: v for k, v in zip(keys, losses)}
201
+
202
+ return reduced_losses
competitors_inference_code/LSRNA/lsr_training/utils/utils_image.py ADDED
@@ -0,0 +1,110 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from os import path as osp
3
+ import numpy as np
4
+ from PIL import Image
5
+ from tqdm import tqdm
6
+
7
+ import torch
8
+ import torch.nn.functional as F
9
+ from torchvision import transforms
10
+ IMG_EXTENSIONS = ['.jpg', '.JPG', '.jpeg', '.JPEG', '.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP']
11
+
12
+
13
+ def get_resolution(img_path):
14
+ img = Image.open(img_path).convert('RGB')
15
+ w,h = img.size
16
+ return (h,w)
17
+
18
+ def get_image_np(img_path):
19
+ img = Image.open(img_path).convert('RGB')
20
+ return np.array(img)
21
+
22
+ def get_image_tensor(img_path):
23
+ img = Image.open(img_path).convert('RGB')
24
+ return transforms.ToTensor()(img)
25
+
26
+
27
+ def get_resolutions(folder_path):
28
+ assert os.path.isdir(folder_path), '{:s} is not a valid directory'.format(folder_path)
29
+ resols = []
30
+ for fname in tqdm(sorted(os.listdir(folder_path))):
31
+ if any(fname.endswith(extension) for extension in IMG_EXTENSIONS):
32
+ img_path = os.path.join(folder_path, fname)
33
+ resols.append(get_resolution(img_path))
34
+ return resols
35
+
36
+ def get_images_np(folder_path):
37
+ assert os.path.isdir(folder_path), '{:s} is not a valid directory'.format(folder_path)
38
+ imgs = []
39
+ for fname in tqdm(sorted(os.listdir(folder_path))):
40
+ if any(fname.endswith(extension) for extension in IMG_EXTENSIONS):
41
+ img_path = os.path.join(folder_path, fname)
42
+ img = get_image_np(img_path)
43
+ imgs.append(img)
44
+ return imgs
45
+
46
+ def get_images_tensor(folder_path):
47
+ assert os.path.isdir(folder_path), '{:s} is not a valid directory'.format(folder_path)
48
+ imgs = []
49
+ for fname in tqdm(sorted(os.listdir(folder_path))):
50
+ if any(fname.endswith(extension) for extension in IMG_EXTENSIONS):
51
+ img_path = os.path.join(folder_path, fname)
52
+ img = get_image_tensor(img_path)
53
+ imgs.append(img)
54
+ return imgs
55
+
56
+
57
+ def read_img(img_path):
58
+ if img_path.split('.')[-1] == 'npy':
59
+ img = np.load(img_path)
60
+ else:
61
+ img = np.array(Image.open(img_path).convert('RGB')) / 255.
62
+ return img
63
+
64
+ def random_crop(img, size, return_pos=False):
65
+ assert img.ndim == 3
66
+ if img.shape[0] in [3,4,8] and img.shape[0] < img.shape[1]: # (c,h,w)
67
+ x0 = np.random.randint(0, img.shape[1]-size+1)
68
+ y0 = np.random.randint(0, img.shape[2]-size+1)
69
+ img = img[:, x0: x0+size, y0: y0+size]
70
+ else: # (h,w,c)
71
+ x0 = np.random.randint(0, img.shape[0]-size+1)
72
+ y0 = np.random.randint(0, img.shape[1]-size+1)
73
+ img = img[x0: x0+size, y0: y0+size, :]
74
+ if return_pos:
75
+ return img, (x0, y0)
76
+ else:
77
+ return img
78
+
79
+ def random_crop_together(hr, lr, lsize, return_pos=False):
80
+ # img: (h,w,c), range independent
81
+ assert lr.shape[0] > lr.shape[-1]
82
+ s = hr.shape[0] // lr.shape[0]
83
+ x0 = np.random.randint(0, lr.shape[0]-lsize+1)
84
+ y0 = np.random.randint(0, lr.shape[1]-lsize+1)
85
+ lr = lr[x0: x0+lsize, y0: y0+lsize, :]
86
+ hr = hr[x0*s: (x0+lsize)*s, y0*s: (y0+lsize)*s, :]
87
+ if return_pos:
88
+ return hr, lr, (x0, y0)
89
+ else:
90
+ return hr, lr
91
+
92
+ def center_crop(img, size):
93
+ # img: (h,w,3), range independent
94
+ h,w = img.shape[:2]
95
+ cut_h, cut_w = h-size[0], w-size[1]
96
+
97
+ lh = cut_h // 2
98
+ rh = h - (cut_h - lh)
99
+ lw = cut_w // 2
100
+ rw = w - (cut_w - lw)
101
+
102
+ img = img[lh:rh, lw:rw, :]
103
+ return img
104
+
105
+ def tensor2numpy(tensor, rgb_range=1.):
106
+ rgb_coefficient = 255 / rgb_range
107
+ img = tensor.mul(rgb_coefficient).clamp(0, 255).round()
108
+ img = img[0].data if img.ndim==4 else img.data
109
+ img = np.transpose(img.cpu().numpy(), (1, 2, 0)).astype(np.uint8)
110
+ return img
competitors_inference_code/LSRNA/lsr_training/utils/utils_io.py ADDED
@@ -0,0 +1,493 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #https://github.com/cszn/KAIR
2
+ import os
3
+ import cv2
4
+ import numpy as np
5
+ import torch
6
+ import random
7
+ from os import path as osp
8
+ from torch.nn import functional as F
9
+ from abc import ABCMeta, abstractmethod
10
+
11
+
12
+ def scandir(dir_path, suffix=None, recursive=False, full_path=False):
13
+ """Scan a directory to find the interested files.
14
+
15
+ Args:
16
+ dir_path (str): Path of the directory.
17
+ suffix (str | tuple(str), optional): File suffix that we are
18
+ interested in. Default: None.
19
+ recursive (bool, optional): If set to True, recursively scan the
20
+ directory. Default: False.
21
+ full_path (bool, optional): If set to True, include the dir_path.
22
+ Default: False.
23
+
24
+ Returns:
25
+ A generator for all the interested files with relative paths.
26
+ """
27
+
28
+ if (suffix is not None) and not isinstance(suffix, (str, tuple)):
29
+ raise TypeError('"suffix" must be a string or tuple of strings')
30
+
31
+ root = dir_path
32
+
33
+ def _scandir(dir_path, suffix, recursive):
34
+ for entry in os.scandir(dir_path):
35
+ if not entry.name.startswith('.') and entry.is_file():
36
+ if full_path:
37
+ return_path = entry.path
38
+ else:
39
+ return_path = osp.relpath(entry.path, root)
40
+
41
+ if suffix is None:
42
+ yield return_path
43
+ elif return_path.endswith(suffix):
44
+ yield return_path
45
+ else:
46
+ if recursive:
47
+ yield from _scandir(entry.path, suffix=suffix, recursive=recursive)
48
+ else:
49
+ continue
50
+
51
+ return _scandir(dir_path, suffix=suffix, recursive=recursive)
52
+
53
+
54
+ def read_img_seq(path, require_mod_crop=False, scale=1, return_imgname=False):
55
+ """Read a sequence of images from a given folder path.
56
+
57
+ Args:
58
+ path (list[str] | str): List of image paths or image folder path.
59
+ require_mod_crop (bool): Require mod crop for each image.
60
+ Default: False.
61
+ scale (int): Scale factor for mod_crop. Default: 1.
62
+ return_imgname(bool): Whether return image names. Default False.
63
+
64
+ Returns:
65
+ Tensor: size (t, c, h, w), RGB, [0, 1].
66
+ list[str]: Returned image name list.
67
+ """
68
+ if isinstance(path, list):
69
+ img_paths = path
70
+ else:
71
+ img_paths = sorted(list(scandir(path, full_path=True)))
72
+ imgs = [cv2.imread(v).astype(np.float32) / 255. for v in img_paths]
73
+
74
+ if require_mod_crop:
75
+ imgs = [mod_crop(img, scale) for img in imgs]
76
+ imgs = img2tensor(imgs, bgr2rgb=True, float32=True)
77
+ imgs = torch.stack(imgs, dim=0)
78
+
79
+ if return_imgname:
80
+ imgnames = [osp.splitext(osp.basename(path))[0] for path in img_paths]
81
+ return imgs, imgnames
82
+ else:
83
+ return imgs
84
+
85
+
86
+ def img2tensor(imgs, bgr2rgb=True, float32=True):
87
+ """Numpy array to tensor.
88
+
89
+ Args:
90
+ imgs (list[ndarray] | ndarray): Input images.
91
+ bgr2rgb (bool): Whether to change bgr to rgb.
92
+ float32 (bool): Whether to change to float32.
93
+
94
+ Returns:
95
+ list[tensor] | tensor: Tensor images. If returned results only have
96
+ one element, just return tensor.
97
+ """
98
+
99
+ def _totensor(img, bgr2rgb, float32):
100
+ if img.shape[2] == 3 and bgr2rgb:
101
+ if img.dtype == 'float64':
102
+ img = img.astype('float32')
103
+ img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
104
+ img = torch.from_numpy(img.transpose(2, 0, 1))
105
+ if float32:
106
+ img = img.float()
107
+ return img
108
+
109
+ if isinstance(imgs, list):
110
+ return [_totensor(img, bgr2rgb, float32) for img in imgs]
111
+ else:
112
+ return _totensor(imgs, bgr2rgb, float32)
113
+
114
+
115
+ def tensor2img(tensor, rgb2bgr=True, out_type=np.uint8, min_max=(0, 1)):
116
+ """Convert torch Tensors into image numpy arrays.
117
+
118
+ After clamping to [min, max], values will be normalized to [0, 1].
119
+
120
+ Args:
121
+ tensor (Tensor or list[Tensor]): Accept shapes:
122
+ 1) 4D mini-batch Tensor of shape (B x 3/1 x H x W);
123
+ 2) 3D Tensor of shape (3/1 x H x W);
124
+ 3) 2D Tensor of shape (H x W).
125
+ Tensor channel should be in RGB order.
126
+ rgb2bgr (bool): Whether to change rgb to bgr.
127
+ out_type (numpy type): output types. If ``np.uint8``, transform outputs
128
+ to uint8 type with range [0, 255]; otherwise, float type with
129
+ range [0, 1]. Default: ``np.uint8``.
130
+ min_max (tuple[int]): min and max values for clamp.
131
+
132
+ Returns:
133
+ (Tensor or list): 3D ndarray of shape (H x W x C) OR 2D ndarray of
134
+ shape (H x W). The channel order is BGR.
135
+ """
136
+ if not (torch.is_tensor(tensor) or (isinstance(tensor, list) and all(torch.is_tensor(t) for t in tensor))):
137
+ raise TypeError(f'tensor or list of tensors expected, got {type(tensor)}')
138
+
139
+ if torch.is_tensor(tensor):
140
+ tensor = [tensor]
141
+ result = []
142
+ for _tensor in tensor:
143
+ _tensor = _tensor.squeeze(0).float().detach().cpu().clamp_(*min_max)
144
+ _tensor = (_tensor - min_max[0]) / (min_max[1] - min_max[0])
145
+
146
+ n_dim = _tensor.dim()
147
+ if n_dim == 4:
148
+ img_np = make_grid(_tensor, nrow=int(math.sqrt(_tensor.size(0))), normalize=False).numpy()
149
+ img_np = img_np.transpose(1, 2, 0)
150
+ if rgb2bgr:
151
+ img_np = cv2.cvtColor(img_np, cv2.COLOR_RGB2BGR)
152
+ elif n_dim == 3:
153
+ img_np = _tensor.numpy()
154
+ img_np = img_np.transpose(1, 2, 0)
155
+ if img_np.shape[2] == 1: # gray image
156
+ img_np = np.squeeze(img_np, axis=2)
157
+ else:
158
+ if rgb2bgr:
159
+ img_np = cv2.cvtColor(img_np, cv2.COLOR_RGB2BGR)
160
+ elif n_dim == 2:
161
+ img_np = _tensor.numpy()
162
+ else:
163
+ raise TypeError(f'Only support 4D, 3D or 2D tensor. But received with dimension: {n_dim}')
164
+ if out_type == np.uint8:
165
+ # Unlike MATLAB, numpy.unit8() WILL NOT round by default.
166
+ img_np = (img_np * 255.0).round()
167
+ img_np = img_np.astype(out_type)
168
+ result.append(img_np)
169
+ if len(result) == 1:
170
+ result = result[0]
171
+ return result
172
+
173
+
174
+ def augment(imgs, hflip=True, rotation=True, flows=None, return_status=False):
175
+ """Augment: horizontal flips OR rotate (0, 90, 180, 270 degrees).
176
+
177
+ We use vertical flip and transpose for rotation implementation.
178
+ All the images in the list use the same augmentation.
179
+
180
+ Args:
181
+ imgs (list[ndarray] | ndarray): Images to be augmented. If the input
182
+ is an ndarray, it will be transformed to a list.
183
+ hflip (bool): Horizontal flip. Default: True.
184
+ rotation (bool): Ratotation. Default: True.
185
+ flows (list[ndarray]: Flows to be augmented. If the input is an
186
+ ndarray, it will be transformed to a list.
187
+ Dimension is (h, w, 2). Default: None.
188
+ return_status (bool): Return the status of flip and rotation.
189
+ Default: False.
190
+
191
+ Returns:
192
+ list[ndarray] | ndarray: Augmented images and flows. If returned
193
+ results only have one element, just return ndarray.
194
+
195
+ """
196
+ hflip = hflip and random.random() < 0.5
197
+ vflip = rotation and random.random() < 0.5
198
+ rot90 = rotation and random.random() < 0.5
199
+
200
+ def _augment(img):
201
+ if hflip: # horizontal
202
+ cv2.flip(img, 1, img)
203
+ if vflip: # vertical
204
+ cv2.flip(img, 0, img)
205
+ if rot90:
206
+ img = img.transpose(1, 0, 2)
207
+ return img
208
+
209
+ def _augment_flow(flow):
210
+ if hflip: # horizontal
211
+ cv2.flip(flow, 1, flow)
212
+ flow[:, :, 0] *= -1
213
+ if vflip: # vertical
214
+ cv2.flip(flow, 0, flow)
215
+ flow[:, :, 1] *= -1
216
+ if rot90:
217
+ flow = flow.transpose(1, 0, 2)
218
+ flow = flow[:, :, [1, 0]]
219
+ return flow
220
+
221
+ if not isinstance(imgs, list):
222
+ imgs = [imgs]
223
+ imgs = [_augment(img) for img in imgs]
224
+ if len(imgs) == 1:
225
+ imgs = imgs[0]
226
+
227
+ if flows is not None:
228
+ if not isinstance(flows, list):
229
+ flows = [flows]
230
+ flows = [_augment_flow(flow) for flow in flows]
231
+ if len(flows) == 1:
232
+ flows = flows[0]
233
+ return imgs, flows
234
+ else:
235
+ if return_status:
236
+ return imgs, (hflip, vflip, rot90)
237
+ else:
238
+ return imgs
239
+
240
+
241
+ def paired_random_crop(img_gts, img_lqs, gt_patch_size, scale, gt_path=None):
242
+ """Paired random crop. Support Numpy array and Tensor inputs.
243
+
244
+ It crops lists of lq and gt images with corresponding locations.
245
+
246
+ Args:
247
+ img_gts (list[ndarray] | ndarray | list[Tensor] | Tensor): GT images. Note that all images
248
+ should have the same shape. If the input is an ndarray, it will
249
+ be transformed to a list containing itself.
250
+ img_lqs (list[ndarray] | ndarray): LQ images. Note that all images
251
+ should have the same shape. If the input is an ndarray, it will
252
+ be transformed to a list containing itself.
253
+ gt_patch_size (int): GT patch size.
254
+ scale (int): Scale factor.
255
+ gt_path (str): Path to ground-truth. Default: None.
256
+
257
+ Returns:
258
+ list[ndarray] | ndarray: GT images and LQ images. If returned results
259
+ only have one element, just return ndarray.
260
+ """
261
+
262
+ if not isinstance(img_gts, list):
263
+ img_gts = [img_gts]
264
+ if not isinstance(img_lqs, list):
265
+ img_lqs = [img_lqs]
266
+
267
+ # determine input type: Numpy array or Tensor
268
+ input_type = 'Tensor' if torch.is_tensor(img_gts[0]) else 'Numpy'
269
+
270
+ if input_type == 'Tensor':
271
+ h_lq, w_lq = img_lqs[0].size()[-2:]
272
+ h_gt, w_gt = img_gts[0].size()[-2:]
273
+ else:
274
+ h_lq, w_lq = img_lqs[0].shape[0:2]
275
+ h_gt, w_gt = img_gts[0].shape[0:2]
276
+ lq_patch_size = gt_patch_size // scale
277
+
278
+ if h_gt != h_lq * scale or w_gt != w_lq * scale:
279
+ raise ValueError(f'Scale mismatches. GT ({h_gt}, {w_gt}) is not {scale}x ',
280
+ f'multiplication of LQ ({h_lq}, {w_lq}).')
281
+ if h_lq < lq_patch_size or w_lq < lq_patch_size:
282
+ raise ValueError(f'LQ ({h_lq}, {w_lq}) is smaller than patch size '
283
+ f'({lq_patch_size}, {lq_patch_size}). '
284
+ f'Please remove {gt_path}.')
285
+
286
+ # randomly choose top and left coordinates for lq patch
287
+ top = random.randint(0, h_lq - lq_patch_size)
288
+ left = random.randint(0, w_lq - lq_patch_size)
289
+
290
+ # crop lq patch
291
+ if input_type == 'Tensor':
292
+ img_lqs = [v[:, :, top:top + lq_patch_size, left:left + lq_patch_size] for v in img_lqs]
293
+ else:
294
+ img_lqs = [v[top:top + lq_patch_size, left:left + lq_patch_size, ...] for v in img_lqs]
295
+
296
+ # crop corresponding gt patch
297
+ top_gt, left_gt = int(top * scale), int(left * scale)
298
+ if input_type == 'Tensor':
299
+ img_gts = [v[:, :, top_gt:top_gt + gt_patch_size, left_gt:left_gt + gt_patch_size] for v in img_gts]
300
+ else:
301
+ img_gts = [v[top_gt:top_gt + gt_patch_size, left_gt:left_gt + gt_patch_size, ...] for v in img_gts]
302
+ if len(img_gts) == 1:
303
+ img_gts = img_gts[0]
304
+ if len(img_lqs) == 1:
305
+ img_lqs = img_lqs[0]
306
+ return img_gts, img_lqs
307
+
308
+
309
+ # Modified from https://github.com/open-mmlab/mmcv/blob/master/mmcv/fileio/file_client.py # noqa: E501
310
+ class BaseStorageBackend(metaclass=ABCMeta):
311
+ """Abstract class of storage backends.
312
+
313
+ All backends need to implement two apis: ``get()`` and ``get_text()``.
314
+ ``get()`` reads the file as a byte stream and ``get_text()`` reads the file
315
+ as texts.
316
+ """
317
+
318
+ @abstractmethod
319
+ def get(self, filepath):
320
+ pass
321
+
322
+ @abstractmethod
323
+ def get_text(self, filepath):
324
+ pass
325
+
326
+
327
+ class MemcachedBackend(BaseStorageBackend):
328
+ """Memcached storage backend.
329
+
330
+ Attributes:
331
+ server_list_cfg (str): Config file for memcached server list.
332
+ client_cfg (str): Config file for memcached client.
333
+ sys_path (str | None): Additional path to be appended to `sys.path`.
334
+ Default: None.
335
+ """
336
+
337
+ def __init__(self, server_list_cfg, client_cfg, sys_path=None):
338
+ if sys_path is not None:
339
+ import sys
340
+ sys.path.append(sys_path)
341
+ try:
342
+ import mc
343
+ except ImportError:
344
+ raise ImportError('Please install memcached to enable MemcachedBackend.')
345
+
346
+ self.server_list_cfg = server_list_cfg
347
+ self.client_cfg = client_cfg
348
+ self._client = mc.MemcachedClient.GetInstance(self.server_list_cfg, self.client_cfg)
349
+ # mc.pyvector servers as a point which points to a memory cache
350
+ self._mc_buffer = mc.pyvector()
351
+
352
+ def get(self, filepath):
353
+ filepath = str(filepath)
354
+ import mc
355
+ self._client.Get(filepath, self._mc_buffer)
356
+ value_buf = mc.ConvertBuffer(self._mc_buffer)
357
+ return value_buf
358
+
359
+ def get_text(self, filepath):
360
+ raise NotImplementedError
361
+
362
+
363
+ class HardDiskBackend(BaseStorageBackend):
364
+ """Raw hard disks storage backend."""
365
+
366
+ def get(self, filepath):
367
+ filepath = str(filepath)
368
+ with open(filepath, 'rb') as f:
369
+ value_buf = f.read()
370
+ return value_buf
371
+
372
+ def get_text(self, filepath):
373
+ filepath = str(filepath)
374
+ with open(filepath, 'r') as f:
375
+ value_buf = f.read()
376
+ return value_buf
377
+
378
+
379
+ class LmdbBackend(BaseStorageBackend):
380
+ """Lmdb storage backend.
381
+
382
+ Args:
383
+ db_paths (str | list[str]): Lmdb database paths.
384
+ client_keys (str | list[str]): Lmdb client keys. Default: 'default'.
385
+ readonly (bool, optional): Lmdb environment parameter. If True,
386
+ disallow any write operations. Default: True.
387
+ lock (bool, optional): Lmdb environment parameter. If False, when
388
+ concurrent access occurs, do not lock the database. Default: False.
389
+ readahead (bool, optional): Lmdb environment parameter. If False,
390
+ disable the OS filesystem readahead mechanism, which may improve
391
+ random read performance when a database is larger than RAM.
392
+ Default: False.
393
+
394
+ Attributes:
395
+ db_paths (list): Lmdb database path.
396
+ _client (list): A list of several lmdb envs.
397
+ """
398
+
399
+ def __init__(self, db_paths, client_keys='default', readonly=True, lock=False, readahead=False, **kwargs):
400
+ try:
401
+ import lmdb
402
+ except ImportError:
403
+ raise ImportError('Please install lmdb to enable LmdbBackend.')
404
+
405
+ if isinstance(client_keys, str):
406
+ client_keys = [client_keys]
407
+
408
+ if isinstance(db_paths, list):
409
+ self.db_paths = [str(v) for v in db_paths]
410
+ elif isinstance(db_paths, str):
411
+ self.db_paths = [str(db_paths)]
412
+ assert len(client_keys) == len(self.db_paths), ('client_keys and db_paths should have the same length, '
413
+ f'but received {len(client_keys)} and {len(self.db_paths)}.')
414
+
415
+ self._client = {}
416
+ for client, path in zip(client_keys, self.db_paths):
417
+ self._client[client] = lmdb.open(path, readonly=readonly, lock=lock, readahead=readahead, **kwargs)
418
+
419
+ def get(self, filepath, client_key):
420
+ """Get values according to the filepath from one lmdb named client_key.
421
+
422
+ Args:
423
+ filepath (str | obj:`Path`): Here, filepath is the lmdb key.
424
+ client_key (str): Used for distinguishing different lmdb envs.
425
+ """
426
+ filepath = str(filepath)
427
+ assert client_key in self._client, (f'client_key {client_key} is not ' 'in lmdb clients.')
428
+ client = self._client[client_key]
429
+ with client.begin(write=False) as txn:
430
+ value_buf = txn.get(filepath.encode('ascii'))
431
+ return value_buf
432
+
433
+ def get_text(self, filepath):
434
+ raise NotImplementedError
435
+
436
+
437
+ class FileClient(object):
438
+ """A general file client to access files in different backend.
439
+
440
+ The client loads a file or text in a specified backend from its path
441
+ and return it as a binary file. it can also register other backend
442
+ accessor with a given name and backend class.
443
+
444
+ Attributes:
445
+ backend (str): The storage backend type. Options are "disk",
446
+ "memcached" and "lmdb".
447
+ client (:obj:`BaseStorageBackend`): The backend object.
448
+ """
449
+
450
+ _backends = {
451
+ 'disk': HardDiskBackend,
452
+ 'memcached': MemcachedBackend,
453
+ 'lmdb': LmdbBackend,
454
+ }
455
+
456
+ def __init__(self, backend='disk', **kwargs):
457
+ if backend not in self._backends:
458
+ raise ValueError(f'Backend {backend} is not supported. Currently supported ones'
459
+ f' are {list(self._backends.keys())}')
460
+ self.backend = backend
461
+ self.client = self._backends[backend](**kwargs)
462
+
463
+ def get(self, filepath, client_key='default'):
464
+ # client_key is used only for lmdb, where different fileclients have
465
+ # different lmdb environments.
466
+ if self.backend == 'lmdb':
467
+ return self.client.get(filepath, client_key)
468
+ else:
469
+ return self.client.get(filepath)
470
+
471
+ def get_text(self, filepath):
472
+ return self.client.get_text(filepath)
473
+
474
+
475
+ def imfrombytes(content, flag='color', float32=False):
476
+ """Read an image from bytes.
477
+
478
+ Args:
479
+ content (bytes): Image bytes got from files or other streams.
480
+ flag (str): Flags specifying the color type of a loaded image,
481
+ candidates are `color`, `grayscale` and `unchanged`.
482
+ float32 (bool): Whether to change to float32., If True, will also norm
483
+ to [0, 1]. Default: False.
484
+
485
+ Returns:
486
+ ndarray: Loaded image array.
487
+ """
488
+ img_np = np.frombuffer(content, np.uint8)
489
+ imread_flags = {'color': cv2.IMREAD_COLOR, 'grayscale': cv2.IMREAD_GRAYSCALE, 'unchanged': cv2.IMREAD_UNCHANGED}
490
+ img = cv2.imdecode(img_np, imread_flags[flag])
491
+ if float32:
492
+ img = img.astype(np.float32) / 255.
493
+ return img
competitors_inference_code/LSRNA/lsr_training/utils/utils_state.py ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ import torch
3
+ from torch.optim import Adam
4
+ from torch.optim.lr_scheduler import _LRScheduler, CosineAnnealingLR
5
+
6
+
7
+ # https://github.com/XPixelGroup/ClassSR
8
+ class CosineAnnealingLR_Restart(_LRScheduler):
9
+ def __init__(self, optimizer, T_period, restarts=None, weights=None, eta_min=0, last_epoch=-1):
10
+ self.T_period = T_period
11
+ self.T_max = self.T_period[0] # current T period
12
+ self.eta_min = eta_min
13
+ self.restarts = restarts if restarts else [0]
14
+ self.restarts = [v + 1 for v in self.restarts]
15
+ self.restart_weights = weights if weights else [1]
16
+ self.last_restart = 0
17
+ assert len(self.restarts) == len(
18
+ self.restart_weights), 'restarts and their weights do not match.'
19
+ super(CosineAnnealingLR_Restart, self).__init__(optimizer, last_epoch)
20
+
21
+ def get_lr(self):
22
+ if self.last_epoch == 0:
23
+ return self.base_lrs
24
+ elif self.last_epoch in self.restarts:
25
+ self.last_restart = self.last_epoch
26
+ if self.restarts.index(self.last_epoch) + 1 == len(self.T_period):
27
+ print('Already trained.')
28
+ exit()
29
+ self.T_max = self.T_period[self.restarts.index(self.last_epoch) + 1]
30
+ weight = self.restart_weights[self.restarts.index(self.last_epoch)]
31
+ return [group['initial_lr'] * weight for group in self.optimizer.param_groups]
32
+ elif (self.last_epoch - self.last_restart - 1 - self.T_max) % (2 * self.T_max) == 0:
33
+ return [
34
+ group['lr'] + (base_lr - self.eta_min) * (1 - math.cos(math.pi / self.T_max)) / 2
35
+ for base_lr, group in zip(self.base_lrs, self.optimizer.param_groups)
36
+ ]
37
+ return [(1 + math.cos(math.pi * (self.last_epoch - self.last_restart) / self.T_max)) /
38
+ (1 + math.cos(math.pi * ((self.last_epoch - self.last_restart) - 1) / self.T_max)) *
39
+ (group['lr'] - self.eta_min) + self.eta_min
40
+ for group in self.optimizer.param_groups]
41
+
42
+
43
+ def make_optim_sched(param_list, optimizer_spec, lr_scheduler_spec, load_sd=False):
44
+ Optimizer = {
45
+ 'adam': Adam
46
+ }[optimizer_spec['name']]
47
+ Scheduler = {
48
+ 'CosineAnnealingLR_Restart': CosineAnnealingLR_Restart,
49
+ 'CosineAnnealingLR': CosineAnnealingLR
50
+ }[lr_scheduler_spec['name']]
51
+
52
+ optimizer = Optimizer(param_list, **optimizer_spec['args'])
53
+ lr_scheduler = Scheduler(optimizer, **lr_scheduler_spec['args'])
54
+ if load_sd: # jointly loading state_dict with all initialized does matter
55
+ optimizer.load_state_dict(optimizer_spec['sd'])
56
+ lr_scheduler.load_state_dict(lr_scheduler_spec['sd'])
57
+ return optimizer, lr_scheduler
competitors_inference_code/LSRNA/main.py ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import argparse
3
+ import random
4
+ import numpy as np
5
+ import torch
6
+
7
+ from diffusers import DDIMScheduler
8
+ from pipeline_lsrna_demofusion_sdxl import DemoFusionLSRNASDXLPipeline
9
+
10
+
11
+ def main():
12
+ parser = argparse.ArgumentParser()
13
+ parser.add_argument('--prompt', type=str, required=True)
14
+ parser.add_argument('--negative_prompt', type=str)
15
+ parser.add_argument('--height', type=int, default=2048, help='target height')
16
+ parser.add_argument('--width', type=int, default=2048, help='target width')
17
+ parser.add_argument('--seed', type=int)
18
+ parser.add_argument('--lsr_path', type=str, default='lsr/checkpoints/swinir-liif-latent-sdxl.pth')
19
+ parser.add_argument('--rna_min_std', type=float, default=0.0)
20
+ parser.add_argument('--rna_max_std', type=float, default=1.2)
21
+ parser.add_argument('--inversion_depth', type=int, default=30)
22
+ parser.add_argument('--save_dir', type=str, default='results')
23
+ parser.add_argument('--low_vram', action='store_true')
24
+ args = parser.parse_args()
25
+
26
+ # load pipeline
27
+ model_ckpt = 'stabilityai/stable-diffusion-xl-base-1.0'
28
+ scheduler = DDIMScheduler.from_pretrained(model_ckpt, subfolder='scheduler')
29
+ pipe = DemoFusionLSRNASDXLPipeline.from_pretrained(model_ckpt, scheduler=scheduler, torch_dtype=torch.float16).to('cuda')
30
+ pipe.vae.enable_tiling()
31
+
32
+ # fix seed
33
+ if args.seed is not None:
34
+ seed = args.seed
35
+ random.seed(seed)
36
+ np.random.seed(seed)
37
+ torch.manual_seed(seed)
38
+ torch.cuda.manual_seed_all(seed)
39
+ torch.backends.cudnn.deterministic = True
40
+ torch.backends.cudnn.benchmark = False
41
+
42
+ # generate image (with default setting of DemoFusion)
43
+ images = pipe(
44
+ args.prompt,
45
+ negative_prompt=args.negative_prompt,
46
+ height=args.height, width=args.width,
47
+ view_batch_size=8,
48
+ stride_ratio=0.5, # 1-overlap_ratio
49
+ lsr_path=args.lsr_path,
50
+ cosine_scale_1=3,
51
+ cosine_scale_2=1,
52
+ cosine_scale_3=1,
53
+ sigma=0.8,
54
+ rna_min_std=args.rna_min_std,
55
+ rna_max_std=args.rna_max_std,
56
+ inversion_depth=args.inversion_depth,
57
+ low_vram=args.low_vram
58
+ )
59
+ os.makedirs(args.save_dir, exist_ok=True)
60
+ images[0].save(os.path.join(args.save_dir, 'ref.png'))
61
+ images[1].save(os.path.join(args.save_dir, 'trg.png'))
62
+
63
+
64
+ if __name__ == '__main__':
65
+ main()
competitors_inference_code/LSRNA/pipeline_lsrna_demofusion_sdxl.py ADDED
@@ -0,0 +1,1296 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ # Modified from https://github.com/PRIS-CV/DemoFusion/blob/main/pipeline_demofusion_sdxl.py
16
+ import warnings
17
+ warnings.filterwarnings("ignore")
18
+
19
+ import os
20
+ import random
21
+ import numpy as np
22
+ import torch
23
+ import torch.nn.functional as F
24
+
25
+ import inspect
26
+ import functools
27
+ import operator
28
+ from typing import Any, Callable, Dict, List, Optional, Tuple, Union
29
+ import matplotlib.pyplot as plt
30
+ from PIL import Image
31
+ from tqdm import tqdm
32
+
33
+ import lsr #
34
+ from utils import * #
35
+
36
+ from transformers import CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
37
+ from diffusers.image_processor import VaeImageProcessor
38
+ from diffusers.loaders import (
39
+ FromSingleFileMixin,
40
+ LoraLoaderMixin,
41
+ TextualInversionLoaderMixin,
42
+ )
43
+ from diffusers.models import AutoencoderKL, UNet2DConditionModel
44
+ from diffusers.models.attention_processor import (
45
+ AttnProcessor2_0,
46
+ LoRAAttnProcessor2_0,
47
+ LoRAXFormersAttnProcessor,
48
+ XFormersAttnProcessor,
49
+ )
50
+ from diffusers.models.lora import adjust_lora_scale_text_encoder
51
+ from diffusers.schedulers import KarrasDiffusionSchedulers
52
+ from diffusers.utils import (
53
+ is_accelerate_available,
54
+ is_accelerate_version,
55
+ logging,
56
+ )
57
+ from diffusers.utils.torch_utils import randn_tensor
58
+ from diffusers.pipelines.pipeline_utils import DiffusionPipeline
59
+ from diffusers.pipelines.stable_diffusion_xl import StableDiffusionXLPipelineOutput
60
+
61
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
62
+
63
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.rescale_noise_cfg
64
+ def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0):
65
+ """
66
+ Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and
67
+ Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). See Section 3.4
68
+ """
69
+ std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True)
70
+ std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True)
71
+ # rescale the results from guidance (fixes overexposure)
72
+ noise_pred_rescaled = noise_cfg * (std_text / std_cfg)
73
+ # mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images
74
+ noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg
75
+ return noise_cfg
76
+
77
+
78
+ class DemoFusionLSRNASDXLPipeline(DiffusionPipeline, FromSingleFileMixin, LoraLoaderMixin, TextualInversionLoaderMixin):
79
+ """
80
+ Pipeline for text-to-image generation using Stable Diffusion XL.
81
+
82
+ This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
83
+ library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
84
+
85
+ In addition the pipeline inherits the following loading methods:
86
+ - *LoRA*: [`StableDiffusionXLPipeline.load_lora_weights`]
87
+ - *Ckpt*: [`loaders.FromSingleFileMixin.from_single_file`]
88
+
89
+ as well as the following saving methods:
90
+ - *LoRA*: [`loaders.StableDiffusionXLPipeline.save_lora_weights`]
91
+
92
+ Args:
93
+ vae ([`AutoencoderKL`]):
94
+ Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
95
+ text_encoder ([`CLIPTextModel`]):
96
+ Frozen text-encoder. Stable Diffusion XL uses the text portion of
97
+ [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
98
+ the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
99
+ text_encoder_2 ([` CLIPTextModelWithProjection`]):
100
+ Second frozen text-encoder. Stable Diffusion XL uses the text and pool portion of
101
+ [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModelWithProjection),
102
+ specifically the
103
+ [laion/CLIP-ViT-bigG-14-laion2B-39B-b160k](https://huggingface.co/laion/CLIP-ViT-bigG-14-laion2B-39B-b160k)
104
+ variant.
105
+ tokenizer (`CLIPTokenizer`):
106
+ Tokenizer of class
107
+ [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
108
+ tokenizer_2 (`CLIPTokenizer`):
109
+ Second Tokenizer of class
110
+ [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
111
+ unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
112
+ scheduler ([`SchedulerMixin`]):
113
+ A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
114
+ [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
115
+ force_zeros_for_empty_prompt (`bool`, *optional*, defaults to `"True"`):
116
+ Whether the negative prompt embeddings shall be forced to always be set to 0. Also see the config of
117
+ `stabilityai/stable-diffusion-xl-base-1-0`.
118
+ """
119
+ model_cpu_offload_seq = "text_encoder->text_encoder_2->unet->vae"
120
+
121
+ def __init__(
122
+ self,
123
+ vae: AutoencoderKL,
124
+ text_encoder: CLIPTextModel,
125
+ text_encoder_2: CLIPTextModelWithProjection,
126
+ tokenizer: CLIPTokenizer,
127
+ tokenizer_2: CLIPTokenizer,
128
+ unet: UNet2DConditionModel,
129
+ scheduler: KarrasDiffusionSchedulers,
130
+ force_zeros_for_empty_prompt: bool = True,
131
+ ):
132
+ super().__init__()
133
+
134
+ self.register_modules(
135
+ vae=vae,
136
+ text_encoder=text_encoder,
137
+ text_encoder_2=text_encoder_2,
138
+ tokenizer=tokenizer,
139
+ tokenizer_2=tokenizer_2,
140
+ unet=unet,
141
+ scheduler=scheduler,
142
+ )
143
+ self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt)
144
+ self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
145
+ self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
146
+ self.default_sample_size = self.unet.config.sample_size # 1024//8 = 128
147
+
148
+ def encode_prompt(
149
+ self,
150
+ prompt: str,
151
+ prompt_2: Optional[str] = None,
152
+ device: Optional[torch.device] = None,
153
+ num_images_per_prompt: int = 1,
154
+ do_classifier_free_guidance: bool = True,
155
+ negative_prompt: Optional[str] = None,
156
+ negative_prompt_2: Optional[str] = None,
157
+ prompt_embeds: Optional[torch.FloatTensor] = None,
158
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
159
+ pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
160
+ negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
161
+ lora_scale: Optional[float] = None,
162
+ ):
163
+ r"""
164
+ Encodes the prompt into text encoder hidden states.
165
+
166
+ Args:
167
+ prompt (`str` or `List[str]`, *optional*):
168
+ prompt to be encoded
169
+ prompt_2 (`str` or `List[str]`, *optional*):
170
+ The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is
171
+ used in both text-encoders
172
+ device: (`torch.device`):
173
+ torch device
174
+ num_images_per_prompt (`int`):
175
+ number of images that should be generated per prompt
176
+ do_classifier_free_guidance (`bool`):
177
+ whether to use classifier free guidance or not
178
+ negative_prompt (`str` or `List[str]`, *optional*):
179
+ The prompt or prompts not to guide the image generation. If not defined, one has to pass
180
+ `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
181
+ less than `1`).
182
+ negative_prompt_2 (`str` or `List[str]`, *optional*):
183
+ The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and
184
+ `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders
185
+ prompt_embeds (`torch.FloatTensor`, *optional*):
186
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
187
+ provided, text embeddings will be generated from `prompt` input argument.
188
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
189
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
190
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
191
+ argument.
192
+ pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
193
+ Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting.
194
+ If not provided, pooled text embeddings will be generated from `prompt` input argument.
195
+ negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
196
+ Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
197
+ weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt`
198
+ input argument.
199
+ lora_scale (`float`, *optional*):
200
+ A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
201
+ """
202
+ device = device or self._execution_device
203
+
204
+ # set lora scale so that monkey patched LoRA
205
+ # function of text encoder can correctly access it
206
+ if lora_scale is not None and isinstance(self, LoraLoaderMixin):
207
+ self._lora_scale = lora_scale
208
+
209
+ # dynamically adjust the LoRA scale
210
+ adjust_lora_scale_text_encoder(self.text_encoder, lora_scale)
211
+ adjust_lora_scale_text_encoder(self.text_encoder_2, lora_scale)
212
+
213
+ if prompt is not None and isinstance(prompt, str):
214
+ batch_size = 1
215
+ elif prompt is not None and isinstance(prompt, list):
216
+ batch_size = len(prompt)
217
+ else:
218
+ batch_size = prompt_embeds.shape[0]
219
+
220
+ # Define tokenizers and text encoders
221
+ tokenizers = [self.tokenizer, self.tokenizer_2] if self.tokenizer is not None else [self.tokenizer_2]
222
+ text_encoders = (
223
+ [self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2]
224
+ )
225
+
226
+ if prompt_embeds is None:
227
+ prompt_2 = prompt_2 or prompt
228
+ # textual inversion: procecss multi-vector tokens if necessary
229
+ prompt_embeds_list = []
230
+ prompts = [prompt, prompt_2]
231
+ for prompt, tokenizer, text_encoder in zip(prompts, tokenizers, text_encoders):
232
+ if isinstance(self, TextualInversionLoaderMixin):
233
+ prompt = self.maybe_convert_prompt(prompt, tokenizer)
234
+
235
+ text_inputs = tokenizer(
236
+ prompt,
237
+ padding="max_length",
238
+ max_length=tokenizer.model_max_length,
239
+ truncation=True,
240
+ return_tensors="pt",
241
+ )
242
+
243
+ text_input_ids = text_inputs.input_ids
244
+ untruncated_ids = tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
245
+
246
+ if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
247
+ text_input_ids, untruncated_ids
248
+ ):
249
+ removed_text = tokenizer.batch_decode(untruncated_ids[:, tokenizer.model_max_length - 1 : -1])
250
+ logger.warning(
251
+ "The following part of your input was truncated because CLIP can only handle sequences up to"
252
+ f" {tokenizer.model_max_length} tokens: {removed_text}"
253
+ )
254
+
255
+ prompt_embeds = text_encoder(
256
+ text_input_ids.to(device),
257
+ output_hidden_states=True,
258
+ )
259
+
260
+ # We are only ALWAYS interested in the pooled output of the final text encoder
261
+ pooled_prompt_embeds = prompt_embeds[0]
262
+ prompt_embeds = prompt_embeds.hidden_states[-2]
263
+
264
+ prompt_embeds_list.append(prompt_embeds)
265
+
266
+ prompt_embeds = torch.concat(prompt_embeds_list, dim=-1)
267
+
268
+ # get unconditional embeddings for classifier free guidance
269
+ zero_out_negative_prompt = negative_prompt is None and self.config.force_zeros_for_empty_prompt
270
+ if do_classifier_free_guidance and negative_prompt_embeds is None and zero_out_negative_prompt:
271
+ negative_prompt_embeds = torch.zeros_like(prompt_embeds)
272
+ negative_pooled_prompt_embeds = torch.zeros_like(pooled_prompt_embeds)
273
+ elif do_classifier_free_guidance and negative_prompt_embeds is None:
274
+ negative_prompt = negative_prompt or ""
275
+ negative_prompt_2 = negative_prompt_2 or negative_prompt
276
+
277
+ uncond_tokens: List[str]
278
+ if prompt is not None and type(prompt) is not type(negative_prompt):
279
+ raise TypeError(
280
+ f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
281
+ f" {type(prompt)}."
282
+ )
283
+ elif isinstance(negative_prompt, str):
284
+ uncond_tokens = [negative_prompt, negative_prompt_2]
285
+ elif batch_size != len(negative_prompt):
286
+ raise ValueError(
287
+ f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
288
+ f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
289
+ " the batch size of `prompt`."
290
+ )
291
+ else:
292
+ uncond_tokens = [negative_prompt, negative_prompt_2]
293
+
294
+ negative_prompt_embeds_list = []
295
+ for negative_prompt, tokenizer, text_encoder in zip(uncond_tokens, tokenizers, text_encoders):
296
+ if isinstance(self, TextualInversionLoaderMixin):
297
+ negative_prompt = self.maybe_convert_prompt(negative_prompt, tokenizer)
298
+
299
+ max_length = prompt_embeds.shape[1]
300
+ uncond_input = tokenizer(
301
+ negative_prompt,
302
+ padding="max_length",
303
+ max_length=max_length,
304
+ truncation=True,
305
+ return_tensors="pt",
306
+ )
307
+
308
+ negative_prompt_embeds = text_encoder(
309
+ uncond_input.input_ids.to(device),
310
+ output_hidden_states=True,
311
+ )
312
+ # We are only ALWAYS interested in the pooled output of the final text encoder
313
+ negative_pooled_prompt_embeds = negative_prompt_embeds[0]
314
+ negative_prompt_embeds = negative_prompt_embeds.hidden_states[-2]
315
+
316
+ negative_prompt_embeds_list.append(negative_prompt_embeds)
317
+
318
+ negative_prompt_embeds = torch.concat(negative_prompt_embeds_list, dim=-1)
319
+
320
+ prompt_embeds = prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device)
321
+ bs_embed, seq_len, _ = prompt_embeds.shape
322
+ # duplicate text embeddings for each generation per prompt, using mps friendly method
323
+ prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
324
+ prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
325
+
326
+ if do_classifier_free_guidance:
327
+ # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
328
+ seq_len = negative_prompt_embeds.shape[1]
329
+ negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device)
330
+ negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
331
+ negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
332
+
333
+ pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(
334
+ bs_embed * num_images_per_prompt, -1
335
+ )
336
+ if do_classifier_free_guidance:
337
+ negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(
338
+ bs_embed * num_images_per_prompt, -1
339
+ )
340
+
341
+ return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds
342
+
343
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
344
+ def prepare_extra_step_kwargs(self, generator, eta):
345
+ # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
346
+ # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
347
+ # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
348
+ # and should be between [0, 1]
349
+
350
+ accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
351
+ extra_step_kwargs = {}
352
+ if accepts_eta:
353
+ extra_step_kwargs["eta"] = eta
354
+
355
+ # check if the scheduler accepts generator
356
+ accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
357
+ if accepts_generator:
358
+ extra_step_kwargs["generator"] = generator
359
+ return extra_step_kwargs
360
+
361
+ def check_inputs(
362
+ self,
363
+ prompt,
364
+ prompt_2,
365
+ height,
366
+ width,
367
+ callback_steps,
368
+ negative_prompt=None,
369
+ negative_prompt_2=None,
370
+ prompt_embeds=None,
371
+ negative_prompt_embeds=None,
372
+ pooled_prompt_embeds=None,
373
+ negative_pooled_prompt_embeds=None,
374
+ num_images_per_prompt=None,
375
+ ):
376
+ if height % 8 != 0 or width % 8 != 0:
377
+ raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
378
+
379
+ if (callback_steps is None) or (
380
+ callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
381
+ ):
382
+ raise ValueError(
383
+ f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
384
+ f" {type(callback_steps)}."
385
+ )
386
+
387
+ if prompt is not None and prompt_embeds is not None:
388
+ raise ValueError(
389
+ f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
390
+ " only forward one of the two."
391
+ )
392
+ elif prompt_2 is not None and prompt_embeds is not None:
393
+ raise ValueError(
394
+ f"Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
395
+ " only forward one of the two."
396
+ )
397
+ elif prompt is None and prompt_embeds is None:
398
+ raise ValueError(
399
+ "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
400
+ )
401
+ elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
402
+ raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
403
+ elif prompt_2 is not None and (not isinstance(prompt_2, str) and not isinstance(prompt_2, list)):
404
+ raise ValueError(f"`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}")
405
+
406
+ if negative_prompt is not None and negative_prompt_embeds is not None:
407
+ raise ValueError(
408
+ f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
409
+ f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
410
+ )
411
+ elif negative_prompt_2 is not None and negative_prompt_embeds is not None:
412
+ raise ValueError(
413
+ f"Cannot forward both `negative_prompt_2`: {negative_prompt_2} and `negative_prompt_embeds`:"
414
+ f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
415
+ )
416
+
417
+ if prompt_embeds is not None and negative_prompt_embeds is not None:
418
+ if prompt_embeds.shape != negative_prompt_embeds.shape:
419
+ raise ValueError(
420
+ "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
421
+ f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
422
+ f" {negative_prompt_embeds.shape}."
423
+ )
424
+
425
+ if prompt_embeds is not None and pooled_prompt_embeds is None:
426
+ raise ValueError(
427
+ "If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`."
428
+ )
429
+
430
+ if negative_prompt_embeds is not None and negative_pooled_prompt_embeds is None:
431
+ raise ValueError(
432
+ "If `negative_prompt_embeds` are provided, `negative_pooled_prompt_embeds` also have to be passed. Make sure to generate `negative_pooled_prompt_embeds` from the same text encoder that was used to generate `negative_prompt_embeds`."
433
+ )
434
+ assert num_images_per_prompt == 1
435
+
436
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents
437
+ def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None):
438
+ shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor)
439
+ if isinstance(generator, list) and len(generator) != batch_size:
440
+ raise ValueError(
441
+ f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
442
+ f" size of {batch_size}. Make sure the batch size matches the length of the generators."
443
+ )
444
+
445
+ if latents is None:
446
+ latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
447
+ else:
448
+ latents = latents.to(device)
449
+
450
+ # scale the initial noise by the standard deviation required by the scheduler
451
+ latents = latents * self.scheduler.init_noise_sigma
452
+ return latents
453
+
454
+ def _get_add_time_ids(self, original_size, crops_coords_top_left, target_size, dtype):
455
+ add_time_ids = list(original_size + crops_coords_top_left + target_size)
456
+
457
+ passed_add_embed_dim = (
458
+ self.unet.config.addition_time_embed_dim * len(add_time_ids) + self.text_encoder_2.config.projection_dim
459
+ )
460
+ expected_add_embed_dim = self.unet.add_embedding.linear_1.in_features
461
+
462
+ if expected_add_embed_dim != passed_add_embed_dim:
463
+ raise ValueError(
464
+ f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`."
465
+ )
466
+
467
+ add_time_ids = torch.tensor([add_time_ids], dtype=dtype)
468
+ return add_time_ids
469
+
470
+ def get_views(self, height, width, window_size=128, stride=64, random_jitter=False):
471
+ # Define the mappings F_i (see Eq. 7 in the MultiDiffusion paper https://arxiv.org/abs/2302.08113)
472
+ # if panorama's height/width < window_size, num_blocks of height/width should return 1
473
+ num_blocks_height = int((height - window_size) / stride - 1e-6) + 2 if height > window_size else 1
474
+ num_blocks_width = int((width - window_size) / stride - 1e-6) + 2 if width > window_size else 1
475
+ total_num_blocks = int(num_blocks_height * num_blocks_width)
476
+ views = []
477
+ for i in range(total_num_blocks):
478
+ h_start = int((i // num_blocks_width) * stride)
479
+ h_end = h_start + window_size
480
+ w_start = int((i % num_blocks_width) * stride)
481
+ w_end = w_start + window_size
482
+
483
+ if h_end > height:
484
+ h_start = int(h_start + height - h_end)
485
+ h_end = int(height)
486
+ if w_end > width:
487
+ w_start = int(w_start + width - w_end)
488
+ w_end = int(width)
489
+ if h_start < 0:
490
+ h_end = int(h_end - h_start)
491
+ h_start = 0
492
+ if w_start < 0:
493
+ w_end = int(w_end - w_start)
494
+ w_start = 0
495
+
496
+ if random_jitter:
497
+ jitter_range = (window_size - stride) // 4
498
+ w_jitter = 0
499
+ h_jitter = 0
500
+ if (w_start != 0) and (w_end != width):
501
+ w_jitter = random.randint(-jitter_range, jitter_range)
502
+ elif (w_start == 0) and (w_end != width):
503
+ w_jitter = random.randint(-jitter_range, 0)
504
+ elif (w_start != 0) and (w_end == width):
505
+ w_jitter = random.randint(0, jitter_range)
506
+ if (h_start != 0) and (h_end != height):
507
+ h_jitter = random.randint(-jitter_range, jitter_range)
508
+ elif (h_start == 0) and (h_end != height):
509
+ h_jitter = random.randint(-jitter_range, 0)
510
+ elif (h_start != 0) and (h_end == height):
511
+ h_jitter = random.randint(0, jitter_range)
512
+ h_start += (h_jitter + jitter_range)
513
+ h_end += (h_jitter + jitter_range)
514
+ w_start += (w_jitter + jitter_range)
515
+ w_end += (w_jitter + jitter_range)
516
+
517
+ views.append((h_start, h_end, w_start, w_end))
518
+ return views
519
+
520
+ def tiled_decode(self, latents):
521
+ h,w = latents.shape[-2:]
522
+ H,W = h*self.vae_scale_factor, w*self.vae_scale_factor
523
+ core_size = self.unet.config.sample_size // 4 # 32
524
+ core_stride = core_size # 32
525
+ pad_size = self.unet.config.sample_size // 8 * 3 # 24
526
+ decoder_view_batch_size = 1 # should be fixed
527
+
528
+ views = self.get_views(h, w, stride=core_stride, window_size=core_size)
529
+ views_batch = [views[i : i + decoder_view_batch_size] for i in range(0, len(views), decoder_view_batch_size)]
530
+ latents_ = F.pad(latents, (pad_size, pad_size, pad_size, pad_size), 'constant', 0)
531
+ image = torch.zeros(latents.size(0), 3, H, W).to(latents.device)
532
+ count = torch.zeros_like(image).to(latents.device)
533
+ # get the latents corresponding to the current view coordinates
534
+ with self.progress_bar(total=len(views_batch)) as progress_bar:
535
+ for j, batch_view in enumerate(views_batch):
536
+ latents_for_view = torch.cat(
537
+ [
538
+ latents_[:, :, h_start:h_end+pad_size*2, w_start:w_end+pad_size*2]
539
+ for h_start, h_end, w_start, w_end in batch_view
540
+ ]
541
+ ).to(self.vae.device)
542
+ image_patch = self.vae.decode(latents_for_view / self.vae.config.scaling_factor, return_dict=False)[0]
543
+ h_start, h_end, w_start, w_end = views[j]
544
+ h_start, h_end, w_start, w_end = h_start * self.vae_scale_factor, h_end * self.vae_scale_factor, w_start * self.vae_scale_factor, w_end * self.vae_scale_factor
545
+ p_h_start, p_h_end, p_w_start, p_w_end = pad_size * self.vae_scale_factor, image_patch.size(2) - pad_size * self.vae_scale_factor, pad_size * self.vae_scale_factor, image_patch.size(3) - pad_size * self.vae_scale_factor
546
+
547
+ image[:, :, h_start:h_end, w_start:w_end] += image_patch[:, :, p_h_start:p_h_end, p_w_start:p_w_end].to(latents.device)
548
+ count[:, :, h_start:h_end, w_start:w_end] += 1
549
+ progress_bar.update()
550
+ image = image / count
551
+ return image
552
+
553
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale.StableDiffusionUpscalePipeline.upcast_vae
554
+ def upcast_vae(self):
555
+ dtype = self.vae.dtype
556
+ self.vae.to(dtype=torch.float32)
557
+ use_torch_2_0_or_xformers = isinstance(
558
+ self.vae.decoder.mid_block.attentions[0].processor,
559
+ (
560
+ AttnProcessor2_0,
561
+ XFormersAttnProcessor,
562
+ LoRAXFormersAttnProcessor,
563
+ LoRAAttnProcessor2_0,
564
+ ),
565
+ )
566
+ # if xformers or torch_2_0 is used attention block does not need
567
+ # to be in float32 which can save lots of memory
568
+ if use_torch_2_0_or_xformers:
569
+ self.vae.post_quant_conv.to(dtype)
570
+ self.vae.decoder.conv_in.to(dtype)
571
+ self.vae.decoder.mid_block.to(dtype)
572
+
573
+ def latent2image(self, latents, advanced_decode=False):
574
+ needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast
575
+ if self.low_vram:
576
+ self.unet.cpu()
577
+ self.vae.cuda()
578
+ if needs_upcasting:
579
+ self.upcast_vae()
580
+ latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype)
581
+
582
+ if advanced_decode:
583
+ image = self.tiled_decode(latents)
584
+ else:
585
+ image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0]
586
+
587
+ if needs_upcasting:
588
+ self.vae.to(dtype=torch.float16)
589
+ latents = latents.to(dtype=torch.float16)
590
+ image = self.image_processor.postprocess(image, output_type='pil')[0] # unnormalize
591
+ return image
592
+
593
+ @torch.no_grad()
594
+ def __call__(
595
+ self,
596
+ prompt: Union[str, List[str]] = None,
597
+ prompt_2: Optional[Union[str, List[str]]] = None,
598
+ height: int = 1024,
599
+ width: int = 1024,
600
+ num_inference_steps: int = 50,
601
+ denoising_end: Optional[float] = None,
602
+ guidance_scale: float = 7.5,
603
+ negative_prompt: Optional[Union[str, List[str]]] = None,
604
+ negative_prompt_2: Optional[Union[str, List[str]]] = None,
605
+ num_images_per_prompt: Optional[int] = 1,
606
+ eta: float = 0.0,
607
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
608
+ latents: Optional[torch.FloatTensor] = None,
609
+ prompt_embeds: Optional[torch.FloatTensor] = None,
610
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
611
+ pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
612
+ negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
613
+ return_dict: bool = False,
614
+ callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
615
+ callback_steps: int = 1,
616
+ cross_attention_kwargs: Optional[Dict[str, Any]] = None,
617
+ guidance_rescale: float = 0.0,
618
+ crops_coords_top_left: Tuple[int, int] = (0, 0),
619
+ negative_original_size: Optional[Tuple[int, int]] = None,
620
+ negative_crops_coords_top_left: Tuple[int, int] = (0, 0),
621
+ negative_target_size: Optional[Tuple[int, int]] = None,
622
+ ################### Added parameters (including DemoFusion) ####################
623
+ view_batch_size: int = 8,
624
+ stride_ratio: float = 0.5,
625
+ lsr_path: str = 'lsr/checkpoints/swinir-liif-latent-sdxl.pth',
626
+ cosine_scale_1: float = 3.,
627
+ cosine_scale_2: float = 1.,
628
+ cosine_scale_3: float = 1.,
629
+ sigma: float = 0.8,
630
+ rna_min_std: float = 0.,
631
+ rna_max_std: float = 1.2,
632
+ inversion_depth: int = 30,
633
+ low_vram = False,
634
+ ):
635
+ r"""
636
+ Function invoked when calling the pipeline for generation.
637
+
638
+ Args:
639
+ prompt (`str` or `List[str]`, *optional*):
640
+ The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
641
+ instead.
642
+ prompt_2 (`str` or `List[str]`, *optional*):
643
+ The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is
644
+ used in both text-encoders
645
+ height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
646
+ The height in pixels of the generated image. This is set to 1024 by default for the best results.
647
+ Anything below 512 pixels won't work well for
648
+ [stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0)
649
+ and checkpoints that are not specifically fine-tuned on low resolutions.
650
+ width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
651
+ The width in pixels of the generated image. This is set to 1024 by default for the best results.
652
+ Anything below 512 pixels won't work well for
653
+ [stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0)
654
+ and checkpoints that are not specifically fine-tuned on low resolutions.
655
+ num_inference_steps (`int`, *optional*, defaults to 50):
656
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
657
+ expense of slower inference.
658
+ denoising_end (`float`, *optional*):
659
+ When specified, determines the fraction (between 0.0 and 1.0) of the total denoising process to be
660
+ completed before it is intentionally prematurely terminated. As a result, the returned sample will
661
+ still retain a substantial amount of noise as determined by the discrete timesteps selected by the
662
+ scheduler. The denoising_end parameter should ideally be utilized when this pipeline forms a part of a
663
+ "Mixture of Denoisers" multi-pipeline setup, as elaborated in [**Refining the Image
664
+ Output**](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/stable_diffusion_xl#refining-the-image-output)
665
+ guidance_scale (`float`, *optional*, defaults to 5.0):
666
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
667
+ `guidance_scale` is defined as `w` of equation 2. of [Imagen
668
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
669
+ 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
670
+ usually at the expense of lower image quality.
671
+ negative_prompt (`str` or `List[str]`, *optional*):
672
+ The prompt or prompts not to guide the image generation. If not defined, one has to pass
673
+ `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
674
+ less than `1`).
675
+ negative_prompt_2 (`str` or `List[str]`, *optional*):
676
+ The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and
677
+ `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders
678
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
679
+ The number of images to generate per prompt.
680
+ eta (`float`, *optional*, defaults to 0.0):
681
+ Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
682
+ [`schedulers.DDIMScheduler`], will be ignored for others.
683
+ generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
684
+ One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
685
+ to make generation deterministic.
686
+ latents (`torch.FloatTensor`, *optional*):
687
+ Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
688
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
689
+ tensor will ge generated by sampling using the supplied random `generator`.
690
+ prompt_embeds (`torch.FloatTensor`, *optional*):
691
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
692
+ provided, text embeddings will be generated from `prompt` input argument.
693
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
694
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
695
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
696
+ argument.
697
+ pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
698
+ Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting.
699
+ If not provided, pooled text embeddings will be generated from `prompt` input argument.
700
+ negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
701
+ Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
702
+ weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt`
703
+ input argument.
704
+ return_dict (`bool`, *optional*, defaults to `True`):
705
+ Whether or not to return a [`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] instead
706
+ of a plain tuple.
707
+ callback (`Callable`, *optional*):
708
+ A function that will be called every `callback_steps` steps during inference. The function will be
709
+ called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
710
+ callback_steps (`int`, *optional*, defaults to 1):
711
+ The frequency at which the `callback` function will be called. If not specified, the callback will be
712
+ called at every step.
713
+ cross_attention_kwargs (`dict`, *optional*):
714
+ A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
715
+ `self.processor` in
716
+ [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
717
+ guidance_rescale (`float`, *optional*, defaults to 0.7):
718
+ Guidance rescale factor proposed by [Common Diffusion Noise Schedules and Sample Steps are
719
+ Flawed](https://arxiv.org/pdf/2305.08891.pdf) `guidance_scale` is defined as `φ` in equation 16. of
720
+ [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf).
721
+ Guidance rescale factor should fix overexposure when using zero terminal SNR.
722
+ original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
723
+ If `original_size` is not the same as `target_size` the image will appear to be down- or upsampled.
724
+ `original_size` defaults to `(width, height)` if not specified. Part of SDXL's micro-conditioning as
725
+ explained in section 2.2 of
726
+ [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
727
+ crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)):
728
+ `crops_coords_top_left` can be used to generate an image that appears to be "cropped" from the position
729
+ `crops_coords_top_left` downwards. Favorable, well-centered images are usually achieved by setting
730
+ `crops_coords_top_left` to (0, 0). Part of SDXL's micro-conditioning as explained in section 2.2 of
731
+ [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
732
+ target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
733
+ For most cases, `target_size` should be set to the desired height and width of the generated image. If
734
+ not specified it will default to `(width, height)`. Part of SDXL's micro-conditioning as explained in
735
+ section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
736
+ negative_original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
737
+ To negatively condition the generation process based on a specific image resolution. Part of SDXL's
738
+ micro-conditioning as explained in section 2.2 of
739
+ [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more
740
+ information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208.
741
+ negative_crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)):
742
+ To negatively condition the generation process based on a specific crop coordinates. Part of SDXL's
743
+ micro-conditioning as explained in section 2.2 of
744
+ [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more
745
+ information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208.
746
+ negative_target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
747
+ To negatively condition the generation process based on a target image resolution. It should be as same
748
+ as the `target_size` for most cases. Part of SDXL's micro-conditioning as explained in section 2.2 of
749
+ [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more
750
+ information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208.
751
+
752
+ Returns:
753
+ a `list` with the generated images at each phase.
754
+ """
755
+ # 0. Default height and width to unet
756
+ assert self.default_sample_size * self.vae_scale_factor == 1024
757
+ if max(height, width) % 1024 != 0:
758
+ raise ValueError(f"the larger one of `height` and `width` has to be divisible by 1024 but are {height} and {width}.")
759
+ scale_num = max(height, width) // 1024
760
+ original_size = target_size = (height, width)
761
+ stride = int(self.unet.config.sample_size * stride_ratio)
762
+ self.low_vram = low_vram
763
+
764
+ # load LSR model
765
+ print('LSR model loaded from ...', lsr_path)
766
+ sv_file = torch.load(lsr_path)
767
+ lsr_model = lsr.models.make(sv_file['model'], load_sd=True).cuda()
768
+
769
+ # 1. Check inputs. Raise error if not correct
770
+ self.check_inputs(
771
+ prompt,
772
+ prompt_2,
773
+ height,
774
+ width,
775
+ callback_steps,
776
+ negative_prompt,
777
+ negative_prompt_2,
778
+ prompt_embeds,
779
+ negative_prompt_embeds,
780
+ pooled_prompt_embeds,
781
+ negative_pooled_prompt_embeds,
782
+ num_images_per_prompt,
783
+ )
784
+
785
+ # 2. Define call parameters
786
+ if prompt is not None and isinstance(prompt, str):
787
+ batch_size = 1
788
+ elif prompt is not None and isinstance(prompt, list):
789
+ batch_size = len(prompt)
790
+ else:
791
+ batch_size = prompt_embeds.shape[0]
792
+
793
+ device = self._execution_device
794
+ self.low_vram = low_vram
795
+ if low_vram:
796
+ self.vae.cpu()
797
+ self.unet.cpu()
798
+ self.text_encoder.to(device)
799
+ self.text_encoder_2.to(device)
800
+ lsr_model.cpu()
801
+
802
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
803
+ # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
804
+ # corresponds to doing no classifier free guidance.
805
+ do_classifier_free_guidance = guidance_scale > 1.0
806
+
807
+ # 3. Encode input prompt
808
+ text_encoder_lora_scale = (
809
+ cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None
810
+ )
811
+ (
812
+ prompt_embeds,
813
+ negative_prompt_embeds,
814
+ pooled_prompt_embeds,
815
+ negative_pooled_prompt_embeds,
816
+ ) = self.encode_prompt(
817
+ prompt=prompt,
818
+ prompt_2=prompt_2,
819
+ device=device,
820
+ num_images_per_prompt=num_images_per_prompt,
821
+ do_classifier_free_guidance=do_classifier_free_guidance,
822
+ negative_prompt=negative_prompt,
823
+ negative_prompt_2=negative_prompt_2,
824
+ prompt_embeds=prompt_embeds,
825
+ negative_prompt_embeds=negative_prompt_embeds,
826
+ pooled_prompt_embeds=pooled_prompt_embeds,
827
+ negative_pooled_prompt_embeds=negative_pooled_prompt_embeds,
828
+ lora_scale=text_encoder_lora_scale,
829
+ )
830
+
831
+ # 4. Prepare timesteps
832
+ self.scheduler.set_timesteps(num_inference_steps, device=device)
833
+ timesteps = self.scheduler.timesteps
834
+ assert len(timesteps) == 50
835
+
836
+ # 5. Prepare latent variables
837
+ num_channels_latents = self.unet.config.in_channels
838
+ latents = self.prepare_latents(
839
+ batch_size * num_images_per_prompt,
840
+ num_channels_latents,
841
+ height // scale_num,
842
+ width // scale_num,
843
+ prompt_embeds.dtype,
844
+ device,
845
+ generator,
846
+ latents,
847
+ )
848
+
849
+ # 6. Prepare extra step kwargs.
850
+ extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
851
+
852
+ # 7. Prepare added time ids & embeddings
853
+ add_text_embeds = pooled_prompt_embeds
854
+
855
+ # maintain scene consistency across scale_num
856
+ # add_time_ids = self._get_add_time_ids(
857
+ # original_size, crops_coords_top_left, target_size, dtype=prompt_embeds.dtype
858
+ # )
859
+ size = (height // scale_num, width // scale_num)
860
+ add_time_ids = self._get_add_time_ids(
861
+ size, crops_coords_top_left, size, dtype=prompt_embeds.dtype
862
+ )
863
+
864
+ if negative_original_size is not None and negative_target_size is not None:
865
+ negative_add_time_ids = self._get_add_time_ids(
866
+ negative_original_size,
867
+ negative_crops_coords_top_left,
868
+ negative_target_size,
869
+ dtype=prompt_embeds.dtype,
870
+ )
871
+ else:
872
+ negative_add_time_ids = add_time_ids
873
+
874
+ if do_classifier_free_guidance:
875
+ prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0)
876
+ add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0)
877
+ add_time_ids = torch.cat([negative_add_time_ids, add_time_ids], dim=0)
878
+ del negative_prompt_embeds, negative_pooled_prompt_embeds, negative_add_time_ids
879
+
880
+ prompt_embeds = prompt_embeds.to(device)
881
+ add_text_embeds = add_text_embeds.to(device)
882
+ add_time_ids = add_time_ids.to(device).repeat(batch_size * num_images_per_prompt, 1)
883
+
884
+ # 7.1 Apply denoising_end
885
+ if denoising_end is not None and isinstance(denoising_end, float) and denoising_end > 0 and denoising_end < 1:
886
+ discrete_timestep_cutoff = int(
887
+ round(
888
+ self.scheduler.config.num_train_timesteps
889
+ - (denoising_end * self.scheduler.config.num_train_timesteps)
890
+ )
891
+ )
892
+ num_inference_steps = len(list(filter(lambda ts: ts >= discrete_timestep_cutoff, timesteps)))
893
+ timesteps = timesteps[:num_inference_steps]
894
+
895
+ ############### Phase Initialization ###############
896
+ output_images = []
897
+
898
+ if low_vram:
899
+ self.text_encoder.cpu()
900
+ self.text_encoder_2.cpu()
901
+ self.unet.to(device)
902
+
903
+ print("### Denoising 1X Reference ###")
904
+ with self.progress_bar(total=num_inference_steps) as progress_bar:
905
+ for i, t in enumerate(timesteps):
906
+ # expand the latents if doing classifier free guidance
907
+ latent_model_input = (
908
+ latents.repeat_interleave(2, dim=0)
909
+ if do_classifier_free_guidance
910
+ else latents
911
+ )
912
+ latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
913
+
914
+ # predict the noise residual
915
+ added_cond_kwargs = {"text_embeds": add_text_embeds, "time_ids": add_time_ids}
916
+ noise_pred = self.unet(
917
+ latent_model_input,
918
+ t,
919
+ encoder_hidden_states=prompt_embeds,
920
+ cross_attention_kwargs=cross_attention_kwargs,
921
+ added_cond_kwargs=added_cond_kwargs,
922
+ return_dict=False,
923
+ )[0]
924
+
925
+ # perform guidance
926
+ if do_classifier_free_guidance:
927
+ noise_pred_uncond, noise_pred_text = noise_pred[::2], noise_pred[1::2]
928
+ noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
929
+
930
+ if do_classifier_free_guidance and guidance_rescale > 0.0:
931
+ # Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf
932
+ noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=guidance_rescale)
933
+
934
+ # compute the previous noisy sample x_t -> x_t-1
935
+ latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
936
+
937
+ # call the callback, if provided
938
+ if i == len(timesteps) - 1 or (i+1) % self.scheduler.order == 0:
939
+ progress_bar.update()
940
+ if callback is not None and i % callback_steps == 0:
941
+ step_idx = i // getattr(self.scheduler, "order", 1)
942
+ callback(step_idx, t, latents)
943
+ del latent_model_input, noise_pred, noise_pred_text, noise_pred_uncond
944
+
945
+ anchor_mean = latents.mean()
946
+ anchor_std = latents.std()
947
+ image = self.latent2image(latents) # rgb (discretized), pil
948
+
949
+ output_images.append(image)
950
+ if scale_num == 1:
951
+ output_images.append(image)
952
+ return output_images
953
+
954
+ ########### latent super resolution (LSR) ###########
955
+ # w/o progressive upsampling
956
+ current_height = height // scale_num * scale_num
957
+ current_width = width // scale_num * scale_num
958
+ current_scale_num = scale_num
959
+
960
+ # define new add_time_ids
961
+ add_time_ids = self._get_add_time_ids(
962
+ (current_height, current_width), crops_coords_top_left, (current_height, current_width), dtype=prompt_embeds.dtype
963
+ )
964
+ negative_add_time_ids = add_time_ids
965
+ if do_classifier_free_guidance:
966
+ add_time_ids = torch.cat([negative_add_time_ids, add_time_ids], dim=0)
967
+ add_time_ids = add_time_ids.to(device).repeat(batch_size * num_images_per_prompt, 1)
968
+
969
+ print(f"### Upsampling latent to {current_scale_num}X ###")
970
+ if low_vram:
971
+ self.unet.cpu()
972
+ lsr_model.to(device)
973
+
974
+ H = current_height // self.vae_scale_factor
975
+ W = current_width // self.vae_scale_factor
976
+ coord = make_coord((H,W), flatten=False, device='cuda').unsqueeze(0)
977
+ cell = torch.ones_like(coord)
978
+ cell[:,:,:,0] *= 2/H
979
+ cell[:,:,:,1] *= 2/W
980
+
981
+ dtype = latents.dtype
982
+ latents = latents.to(torch.float32)
983
+ latents = lsr_model(latents, coord, cell)
984
+ latents = latents.to(dtype) # upsampled latent, float16
985
+
986
+ ########### region-wise noise addition (RNA) ###########
987
+ image_ref = np.array(output_images[0])
988
+ diff = apply_canny_detection(image_ref, low_threshold=0, high_threshold=255).astype(np.float32)
989
+ diff = torch.tensor(diff).cuda().unsqueeze(0).unsqueeze(0)
990
+ diff = torch.nn.AdaptiveAvgPool2d((H,W))(diff)
991
+ std = ((diff - diff.min()) / (diff.max() - diff.min())) * (rna_max_std - rna_min_std) + rna_min_std
992
+ latents += torch.randn_like(latents) * std
993
+
994
+ ########### target denoising ###########
995
+ if low_vram:
996
+ self.unet.to(device)
997
+ lsr_model.cpu()
998
+
999
+ # noise inversion for noise initialization & skip residual
1000
+ noise_latents = []
1001
+ noise = torch.randn_like(latents)
1002
+ for timestep in timesteps:
1003
+ noise_latent = self.scheduler.add_noise(latents, noise, timestep.unsqueeze(0))
1004
+ noise_latents.append(noise_latent)
1005
+ assert 0 < inversion_depth <= num_inference_steps and num_inference_steps == len(timesteps)
1006
+ latents = noise_latents[num_inference_steps-inversion_depth]
1007
+
1008
+ print(f"### Denoising {current_scale_num}X Target ###")
1009
+ with self.progress_bar(total=inversion_depth) as progress_bar:
1010
+ for i, t in enumerate(timesteps):
1011
+ if i < num_inference_steps-inversion_depth: continue
1012
+ count = torch.zeros_like(latents)
1013
+ value = torch.zeros_like(latents)
1014
+
1015
+ # Skip Residual (from DemoFusion)
1016
+ cosine_factor = 0.5 * (1 + torch.cos(torch.pi * (self.scheduler.config.num_train_timesteps - t) / self.scheduler.config.num_train_timesteps)).cpu()
1017
+ c1 = cosine_factor ** cosine_scale_1
1018
+ latents = latents * (1 - c1) + noise_latents[i] * c1
1019
+
1020
+ # patch-wise denoising (MultiDiffusion)
1021
+ views = self.get_views(H, W, window_size=self.unet.config.sample_size, stride=stride, random_jitter=True)
1022
+ views_batch = [views[i : i + view_batch_size] for i in range(0, len(views), view_batch_size)]
1023
+ jitter_range = (self.unet.config.sample_size - stride) // 4
1024
+ latents_ = F.pad(latents, (jitter_range, jitter_range, jitter_range, jitter_range), 'constant', 0)
1025
+ count_local = torch.zeros_like(latents_)
1026
+ value_local = torch.zeros_like(latents_)
1027
+
1028
+ for j, batch_view in enumerate(views_batch):
1029
+ vb_size = len(batch_view)
1030
+ # get the latents corresponding to the current view coordinates
1031
+ latents_for_view = torch.cat(
1032
+ [
1033
+ latents_[:, :, h_start:h_end, w_start:w_end]
1034
+ for h_start, h_end, w_start, w_end in batch_view
1035
+ ]
1036
+ )
1037
+ # expand the latents if doing classifier free guidance
1038
+ latent_model_input = latents_for_view
1039
+ latent_model_input = (
1040
+ latent_model_input.repeat_interleave(2, dim=0)
1041
+ if do_classifier_free_guidance
1042
+ else latent_model_input
1043
+ )
1044
+ latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
1045
+
1046
+ prompt_embeds_input = torch.cat([prompt_embeds] * vb_size)
1047
+ add_text_embeds_input = torch.cat([add_text_embeds] * vb_size)
1048
+ add_time_ids_input = []
1049
+ for h_start, h_end, w_start, w_end in batch_view:
1050
+ add_time_ids_ = add_time_ids.clone()
1051
+ add_time_ids_[:, 2] = h_start * self.vae_scale_factor
1052
+ add_time_ids_[:, 3] = w_start * self.vae_scale_factor
1053
+ add_time_ids_input.append(add_time_ids_)
1054
+ add_time_ids_input = torch.cat(add_time_ids_input)
1055
+
1056
+ # predict the noise residual
1057
+ added_cond_kwargs = {"text_embeds": add_text_embeds_input, "time_ids": add_time_ids_input}
1058
+ noise_pred = self.unet(
1059
+ latent_model_input,
1060
+ t,
1061
+ encoder_hidden_states=prompt_embeds_input,
1062
+ cross_attention_kwargs=cross_attention_kwargs,
1063
+ added_cond_kwargs=added_cond_kwargs,
1064
+ return_dict=False,
1065
+ )[0]
1066
+
1067
+ if do_classifier_free_guidance:
1068
+ noise_pred_uncond, noise_pred_text = noise_pred[::2], noise_pred[1::2]
1069
+ noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
1070
+
1071
+ if do_classifier_free_guidance and guidance_rescale > 0.0:
1072
+ # Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf
1073
+ noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=guidance_rescale)
1074
+
1075
+ # compute the previous noisy sample x_t -> x_t-1
1076
+ if hasattr(self.scheduler, '_init_step_index'):
1077
+ self.scheduler._init_step_index(t)
1078
+ latents_denoised_batch = self.scheduler.step(
1079
+ noise_pred, t, latents_for_view, **extra_step_kwargs, return_dict=False)[0]
1080
+
1081
+ # extract value from batch
1082
+ for latents_view_denoised, (h_start, h_end, w_start, w_end) in zip(
1083
+ latents_denoised_batch.chunk(vb_size), batch_view
1084
+ ):
1085
+ value_local[:, :, h_start:h_end, w_start:w_end] += latents_view_denoised
1086
+ count_local[:, :, h_start:h_end, w_start:w_end] += 1
1087
+ value_local = value_local[: ,:, jitter_range: jitter_range + H, jitter_range: jitter_range + W]
1088
+ count_local = count_local[: ,:, jitter_range: jitter_range + H, jitter_range: jitter_range + W]
1089
+
1090
+ # Dilated Sampling (from DemoFusion)
1091
+ c2 = cosine_factor ** cosine_scale_2
1092
+ value += value_local / count_local * (1 - c2)
1093
+ count += torch.ones_like(value_local) * (1 - c2)
1094
+
1095
+ views = [[h, w] for h in range(current_scale_num) for w in range(current_scale_num)]
1096
+ views_batch = [views[i : i + view_batch_size] for i in range(0, len(views), view_batch_size)]
1097
+
1098
+ h_pad = (current_scale_num - (latents.size(2) % current_scale_num)) % current_scale_num
1099
+ w_pad = (current_scale_num - (latents.size(3) % current_scale_num)) % current_scale_num
1100
+ latents_ = F.pad(latents, (w_pad, 0, h_pad, 0), 'constant', 0)
1101
+
1102
+ count_global = torch.zeros_like(latents_)
1103
+ value_global = torch.zeros_like(latents_)
1104
+
1105
+ c3 = 0.99 * cosine_factor ** cosine_scale_3 + 1e-2
1106
+ std_, mean_ = latents_.std(), latents_.mean()
1107
+ latents_gaussian = gaussian_filter(latents_, kernel_size=(2*current_scale_num-1), sigma=sigma*c3)
1108
+ latents_gaussian = (latents_gaussian - latents_gaussian.mean()) / latents_gaussian.std() * std_ + mean_
1109
+
1110
+ for j, batch_view in enumerate(views_batch):
1111
+ latents_for_view = torch.cat(
1112
+ [
1113
+ latents_[:, :, h::current_scale_num, w::current_scale_num]
1114
+ for h, w in batch_view
1115
+ ]
1116
+ )
1117
+ latents_for_view_gaussian = torch.cat(
1118
+ [
1119
+ latents_gaussian[:, :, h::current_scale_num, w::current_scale_num]
1120
+ for h, w in batch_view
1121
+ ]
1122
+ )
1123
+
1124
+ # latents_for_view.size(0) != view_batch_size
1125
+ vb_size = latents_for_view.size(0)
1126
+
1127
+ # expand the latents if doing classifier free guidance
1128
+ latent_model_input = latents_for_view_gaussian
1129
+ latent_model_input = (
1130
+ latent_model_input.repeat_interleave(2, dim=0)
1131
+ if do_classifier_free_guidance
1132
+ else latent_model_input
1133
+ )
1134
+ latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
1135
+
1136
+ prompt_embeds_input = torch.cat([prompt_embeds] * vb_size)
1137
+ add_text_embeds_input = torch.cat([add_text_embeds] * vb_size)
1138
+ add_time_ids_input = torch.cat([add_time_ids] * vb_size)
1139
+
1140
+ # predict the noise residual
1141
+ added_cond_kwargs = {"text_embeds": add_text_embeds_input, "time_ids": add_time_ids_input}
1142
+ noise_pred = self.unet(
1143
+ latent_model_input,
1144
+ t,
1145
+ encoder_hidden_states=prompt_embeds_input,
1146
+ cross_attention_kwargs=cross_attention_kwargs,
1147
+ added_cond_kwargs=added_cond_kwargs,
1148
+ return_dict=False,
1149
+ )[0]
1150
+
1151
+ if do_classifier_free_guidance:
1152
+ noise_pred_uncond, noise_pred_text = noise_pred[::2], noise_pred[1::2]
1153
+ noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
1154
+
1155
+ if do_classifier_free_guidance and guidance_rescale > 0.0:
1156
+ # Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf
1157
+ noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=guidance_rescale)
1158
+
1159
+ # compute the previous noisy sample x_t -> x_t-1
1160
+ if hasattr(self.scheduler, '_init_step_index'):
1161
+ self.scheduler._init_step_index(t)
1162
+ latents_denoised_batch = self.scheduler.step(
1163
+ noise_pred, t, latents_for_view, **extra_step_kwargs, return_dict=False)[0]
1164
+
1165
+ # extract value from batch
1166
+ for latents_view_denoised, (h, w) in zip(
1167
+ latents_denoised_batch.chunk(vb_size), batch_view
1168
+ ):
1169
+ value_global[:, :, h::current_scale_num, w::current_scale_num] += latents_view_denoised
1170
+ count_global[:, :, h::current_scale_num, w::current_scale_num] += 1
1171
+
1172
+ value_global = value_global[: ,:, h_pad:, w_pad:]
1173
+ value += value_global * c2
1174
+ count += torch.ones_like(value_global) * c2
1175
+
1176
+ latents = torch.where(count > 0, value / count, value)
1177
+
1178
+ # call the callback, if provided
1179
+ if i == len(timesteps) - 1 or (i+1) % self.scheduler.order == 0:
1180
+ progress_bar.update()
1181
+ if callback is not None and i % callback_steps == 0:
1182
+ step_idx = i // getattr(self.scheduler, "order", 1)
1183
+ callback(step_idx, t, latents)
1184
+ latents = (latents - latents.mean()) / latents.std() * anchor_std + anchor_mean
1185
+
1186
+ # reconstruct target image
1187
+ print(f"### Reconstructing Target ({scale_num}X) ###")
1188
+ image = self.latent2image(latents, advanced_decode=False)
1189
+ output_images.append(image)
1190
+
1191
+ # offload all models
1192
+ self.maybe_free_model_hooks()
1193
+ return output_images
1194
+
1195
+
1196
+ # Overrride to properly handle the loading and unloading of the additional text encoder.
1197
+ def load_lora_weights(self, pretrained_model_name_or_path_or_dict: Union[str, Dict[str, torch.Tensor]], **kwargs):
1198
+ # We could have accessed the unet config from `lora_state_dict()` too. We pass
1199
+ # it here explicitly to be able to tell that it's coming from an SDXL
1200
+ # pipeline.
1201
+
1202
+ # Remove any existing hooks.
1203
+ if is_accelerate_available() and is_accelerate_version(">=", "0.17.0.dev0"):
1204
+ from accelerate.hooks import AlignDevicesHook, CpuOffload, remove_hook_from_module
1205
+ else:
1206
+ raise ImportError("Offloading requires `accelerate v0.17.0` or higher.")
1207
+
1208
+ is_model_cpu_offload = False
1209
+ is_sequential_cpu_offload = False
1210
+ recursive = False
1211
+ for _, component in self.components.items():
1212
+ if isinstance(component, torch.nn.Module):
1213
+ if hasattr(component, "_hf_hook"):
1214
+ is_model_cpu_offload = isinstance(getattr(component, "_hf_hook"), CpuOffload)
1215
+ is_sequential_cpu_offload = isinstance(getattr(component, "_hf_hook"), AlignDevicesHook)
1216
+ logger.info(
1217
+ "Accelerate hooks detected. Since you have called `load_lora_weights()`, the previous hooks will be first removed. Then the LoRA parameters will be loaded and the hooks will be applied again."
1218
+ )
1219
+ recursive = is_sequential_cpu_offload
1220
+ remove_hook_from_module(component, recurse=recursive)
1221
+ state_dict, network_alphas = self.lora_state_dict(
1222
+ pretrained_model_name_or_path_or_dict,
1223
+ unet_config=self.unet.config,
1224
+ **kwargs,
1225
+ )
1226
+ self.load_lora_into_unet(state_dict, network_alphas=network_alphas, unet=self.unet)
1227
+
1228
+ text_encoder_state_dict = {k: v for k, v in state_dict.items() if "text_encoder." in k}
1229
+ if len(text_encoder_state_dict) > 0:
1230
+ self.load_lora_into_text_encoder(
1231
+ text_encoder_state_dict,
1232
+ network_alphas=network_alphas,
1233
+ text_encoder=self.text_encoder,
1234
+ prefix="text_encoder",
1235
+ lora_scale=self.lora_scale,
1236
+ )
1237
+
1238
+ text_encoder_2_state_dict = {k: v for k, v in state_dict.items() if "text_encoder_2." in k}
1239
+ if len(text_encoder_2_state_dict) > 0:
1240
+ self.load_lora_into_text_encoder(
1241
+ text_encoder_2_state_dict,
1242
+ network_alphas=network_alphas,
1243
+ text_encoder=self.text_encoder_2,
1244
+ prefix="text_encoder_2",
1245
+ lora_scale=self.lora_scale,
1246
+ )
1247
+
1248
+ # Offload back.
1249
+ if is_model_cpu_offload:
1250
+ self.enable_model_cpu_offload()
1251
+ elif is_sequential_cpu_offload:
1252
+ self.enable_sequential_cpu_offload()
1253
+
1254
+ @classmethod
1255
+ def save_lora_weights(
1256
+ self,
1257
+ save_directory: Union[str, os.PathLike],
1258
+ unet_lora_layers: Dict[str, Union[torch.nn.Module, torch.Tensor]] = None,
1259
+ text_encoder_lora_layers: Dict[str, Union[torch.nn.Module, torch.Tensor]] = None,
1260
+ text_encoder_2_lora_layers: Dict[str, Union[torch.nn.Module, torch.Tensor]] = None,
1261
+ is_main_process: bool = True,
1262
+ weight_name: str = None,
1263
+ save_function: Callable = None,
1264
+ safe_serialization: bool = True,
1265
+ ):
1266
+ state_dict = {}
1267
+
1268
+ def pack_weights(layers, prefix):
1269
+ layers_weights = layers.state_dict() if isinstance(layers, torch.nn.Module) else layers
1270
+ layers_state_dict = {f"{prefix}.{module_name}": param for module_name, param in layers_weights.items()}
1271
+ return layers_state_dict
1272
+
1273
+ if not (unet_lora_layers or text_encoder_lora_layers or text_encoder_2_lora_layers):
1274
+ raise ValueError(
1275
+ "You must pass at least one of `unet_lora_layers`, `text_encoder_lora_layers` or `text_encoder_2_lora_layers`."
1276
+ )
1277
+
1278
+ if unet_lora_layers:
1279
+ state_dict.update(pack_weights(unet_lora_layers, "unet"))
1280
+
1281
+ if text_encoder_lora_layers and text_encoder_2_lora_layers:
1282
+ state_dict.update(pack_weights(text_encoder_lora_layers, "text_encoder"))
1283
+ state_dict.update(pack_weights(text_encoder_2_lora_layers, "text_encoder_2"))
1284
+
1285
+ self.write_lora_layers(
1286
+ state_dict=state_dict,
1287
+ save_directory=save_directory,
1288
+ is_main_process=is_main_process,
1289
+ weight_name=weight_name,
1290
+ save_function=save_function,
1291
+ safe_serialization=safe_serialization,
1292
+ )
1293
+
1294
+ def _remove_text_encoder_monkey_patch(self):
1295
+ self._remove_text_encoder_monkey_patch_classmethod(self.text_encoder)
1296
+ self._remove_text_encoder_monkey_patch_classmethod(self.text_encoder_2)
competitors_inference_code/LSRNA/requirements.txt ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ torch==2.3.1
2
+ accelerate==0.31.0
3
+ diffusers==0.29.1
4
+ einops==0.8.0
5
+ gradio==4.38.1
6
+ huggingface-hub==0.24.0
7
+ MarkupSafe==2.1.5
8
+ matplotlib==3.9.1
9
+ numpy==1.26.4
10
+ omegaconf==2.3.0
11
+ pandas==2.2.2
12
+ safetensors==0.4.3
13
+ scipy==1.11.4
14
+ timm==1.0.7
15
+ transformers==4.41.2
16
+ triton==2.3.1
17
+ xformers==0.0.27
18
+ opencv-python
competitors_inference_code/LSRNA/run.sh ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env bash
2
+ CUDA_VISIBLE_DEVICES=0 python main.py \
3
+ --prompt "A well-worn baseball glove and ball sitting on fresh-cut grass." \
4
+ --negative_prompt "blurry, ugly, duplicate, poorly drawn, deformed, mosaic" \
5
+ --height 2048 \
6
+ --width 2048 \
7
+ --seed 0 \
8
+ --lsr_path "lsr/swinir-liif-latent-sdxl.pth" \
9
+ --rna_min_std 0.0 \
10
+ --rna_max_std 1.2 \
11
+ --inversion_depth 30 \
12
+ --save_dir "results" \
13
+ #--low_vram