comfyri commited on
Commit
70f94c6
·
verified ·
1 Parent(s): e29412d

Upload 2 files

Browse files
Files changed (2) hide show
  1. app.py +214 -0
  2. requirements.txt +9 -0
app.py ADDED
@@ -0,0 +1,214 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import spaces
3
+ from gradio_imageslider import ImageSlider
4
+ from image_gen_aux import UpscaleWithModel
5
+ from image_gen_aux.utils import load_image
6
+ import tempfile
7
+ from PIL import Image
8
+ import traceback
9
+ import torch
10
+
11
+ # --- Model Dictionary ---
12
+ # A complete dictionary of your self-trained models.
13
+ MODELS = {
14
+ "1xDeH264_realplksr": "Phips/1xDeH264_realplksr",
15
+ "1xDeJPG_HAT": "Phips/1xDeJPG_HAT",
16
+ "1xDeJPG_OmniSR": "Phips/1xDeJPG_OmniSR",
17
+ "1xDeJPG_realplksr_otf": "Phips/1xDeJPG_realplksr_otf",
18
+ "1xDeJPG_SRFormer_light": "Phips/1xDeJPG_SRFormer_light",
19
+ "1xDeNoise_realplksr_otf": "Phips/1xDeNoise_realplksr_otf",
20
+ "1xExposureCorrection_compact": "Phips/1xExposureCorrection_compact",
21
+ "1xOverExposureCorrection_compact": "Phips/1xOverExposureCorrection_compact",
22
+ "1xUnderExposureCorrection_compact": "Phips/1xUnderExposureCorrection_compact",
23
+ "2xAoMR_mosr": "Phips/2xAoMR_mosr",
24
+ "2xEvangelion_compact": "Phips/2xEvangelion_compact",
25
+ "2xEvangelion_dat2": "Phips/2xEvangelion_dat2",
26
+ "2xEvangelion_omnisr": "Phips/2xEvangelion_omnisr",
27
+ "2xHFA2k_compact_multijpg": "Phips/2xHFA2k_compact_multijpg",
28
+ "2xHFA2k_LUDVAE_compact": "Phips/2xHFA2k_LUDVAE_compact",
29
+ "2xHFA2k_LUDVAE_SPAN": "Phips/2xHFA2k_LUDVAE_SPAN",
30
+ "2xHFA2kAVCCompact": "Phips/2xHFA2kAVCCompact",
31
+ "2xHFA2kAVCOmniSR": "Phips/2xHFA2kAVCOmniSR",
32
+ "2xHFA2kAVCSRFormer_light": "Phips/2xHFA2kAVCSRFormer_light",
33
+ "2xHFA2kCompact": "Phips/2xHFA2kCompact",
34
+ "2xHFA2kOmniSR": "Phips/2xHFA2kOmniSR",
35
+ "2xHFA2kReal-CUGAN": "Phips/2xHFA2kReal-CUGAN",
36
+ "2xHFA2kShallowESRGAN": "Phips/2xHFA2kShallowESRGAN",
37
+ "2xHFA2kSPAN": "Phips/2xHFA2kSPAN",
38
+ "2xHFA2kSwinIR-S": "Phips/2xHFA2kSwinIR-S",
39
+ "2xLexicaRRDBNet": "Phips/2xLexicaRRDBNet",
40
+ "2xLexicaRRDBNet_Sharp": "Phips/2xLexicaRRDBNet_Sharp",
41
+ "2xNomosUni_compact_multijpg": "Phips/2xNomosUni_compact_multijpg",
42
+ "2xNomosUni_compact_multijpg_ldl": "Phips/2xNomosUni_compact_multijpg_ldl",
43
+ "2xNomosUni_compact_otf_medium": "Phips/2xNomosUni_compact_otf_medium",
44
+ "2xNomosUni_esrgan_multijpg": "Phips/2xNomosUni_esrgan_multijpg",
45
+ "2xNomosUni_span_multijpg": "Phips/2xNomosUni_span_multijpg",
46
+ "2xNomosUni_span_multijpg_ldl": "Phips/2xNomosUni_span_multijpg_ldl",
47
+ "2xParimgCompact": "Phips/2xParimgCompact",
48
+ "4x4xTextures_GTAV_rgt-s": "Phips/4xTextures_GTAV_rgt-s",
49
+ "4xArtFaces_realplksr_dysample": "Phips/4xArtFaces_realplksr_dysample",
50
+ "4xBHI_dat2_multiblur": "Phips/4xBHI_dat2_multiblur",
51
+ "4xBHI_dat2_multiblurjpg": "Phips/4xBHI_dat2_multiblurjpg",
52
+ "4xBHI_dat2_otf": "Phips/4xBHI_dat2_otf",
53
+ "4xBHI_dat2_real": "Phips/4xBHI_dat2_real",
54
+ "4xBHI_realplksr_dysample_multi": "Phips/4xBHI_realplksr_dysample_multi",
55
+ "4xBHI_realplksr_dysample_multiblur": "Phips/4xBHI_realplksr_dysample_multiblur",
56
+ "4xBHI_realplksr_dysample_otf": "Phips/4xBHI_realplksr_dysample_otf",
57
+ "4xBHI_realplksr_dysample_otf_nn": "Phips/4xBHI_realplksr_dysample_otf_nn",
58
+ "4xBHI_realplksr_dysample_real": "Phips/4xBHI_realplksr_dysample_real",
59
+ "4xFaceUpDAT": "Phips/4xFaceUpDAT",
60
+ "4xFaceUpLDAT": "Phips/4xFaceUpLDAT",
61
+ "4xFaceUpSharpDAT": "Phips/4xFaceUpSharpDAT",
62
+ "4xFaceUpSharpLDAT": "Phips/4xFaceUpSharpLDAT",
63
+ "4xFFHQDAT": "Phips/4xFFHQDAT",
64
+ "4xFFHQLDAT": "Phips/4xFFHQLDAT",
65
+ "4xHFA2k": "Phips/4xHFA2k",
66
+ "4xHFA2k_ludvae_realplksr_dysample": "Phips/4xHFA2k_ludvae_realplksr_dysample",
67
+ "4xHFA2kLUDVAEGRL_small": "Phips/4xHFA2kLUDVAEGRL_small",
68
+ "4xHFA2kLUDVAESRFormer_light": "Phips/4xHFA2kLUDVAESRFormer_light",
69
+ "4xHFA2kLUDVAESwinIR_light": "Phips/4xHFA2kLUDVAESwinIR_light",
70
+ "4xLexicaDAT2_otf": "Phips/4xLexicaDAT2_otf",
71
+ "4xLSDIRCompact2": "Phips/4xLSDIRCompact2",
72
+ "4xLSDIRCompact": "Phips/4xLSDIRCompact",
73
+ "4xLSDIRCompactC3": "Phips/4xLSDIRCompactC3",
74
+ "4xLSDIRCompactC": "Phips/4xLSDIRCompactC",
75
+ "4xLSDIRCompactCR3": "Phips/4xLSDIRCompactCR3",
76
+ "4xLSDIRCompactN3": "Phips/4xLSDIRCompactN3",
77
+ "4xLSDIRCompactR3": "Phips/4xLSDIRCompactR3",
78
+ "4xLSDIRCompactR": "Phips/4xLSDIRCompactR",
79
+ "4xLSDIRDAT": "Phips/4xLSDIRDAT",
80
+ "4xNature_realplksr_dysample": "Phips/4xNature_realplksr_dysample",
81
+ "4xNomos2_hq_atd": "Phips/4xNomos2_hq_atd",
82
+ "4xNomos2_hq_dat2": "Phips/4xNomos2_hq_dat2",
83
+ "4xNomos2_hq_drct-l": "Phips/4xNomos2_hq_drct-l",
84
+ "4xNomos2_hq_mosr": "Phips/4xNomos2_hq_mosr",
85
+ "4xNomos2_otf_esrgan": "Phips/4xNomos2_otf_esrgan",
86
+ "4xNomos2_realplksr_dysample": "Phips/4xNomos2_realplksr_dysample",
87
+ "4xNomos8k_atd_jpg": "Phips/4xNomos8k_atd_jpg",
88
+ "4xNomos8kDAT": "Phips/4xNomos8kDAT",
89
+ "4xNomos8kHAT-L_bokeh_jpg": "Phips/4xNomos8kHAT-L_bokeh_jpg",
90
+ "4xNomos8kHAT-L_otf": "Phips/4xNomos8kHAT-L_otf",
91
+ "4xNomos8kSC": "Phips/4xNomos8kSC",
92
+ "4xNomos8kSCHAT-L": "Phips/4xNomos8kSCHAT-L",
93
+ "4xNomos8kSCHAT-S": "Phips/4xNomos8kSCHAT-S",
94
+ "4xNomos8kSCSRFormer": "Phips/4xNomos8kSCSRFormer",
95
+ "4xNomosUni_rgt_multijpg": "Phips/4xNomosUni_rgt_multijpg",
96
+ "4xNomosUni_rgt_s_multijpg": "Phips/4xNomosUni_rgt_s_multijpg",
97
+ "4xNomosUni_span_multijpg": "Phips/4xNomosUni_span_multijpg",
98
+ "4xNomosUniDAT2_box": "Phips/4xNomosUniDAT2_box",
99
+ "4xNomosUniDAT2_multijpg_ldl": "Phips/4xNomosUniDAT2_multijpg_ldl",
100
+ "4xNomosUniDAT2_multijpg_ldl_sharp": "Phips/4xNomosUniDAT2_multijpg_ldl_sharp",
101
+ "4xNomosUniDAT_bokeh_jpg": "Phips/4xNomosUniDAT_bokeh_jpg",
102
+ "4xNomosUniDAT_otf": "Phips/4xNomosUniDAT_otf",
103
+ "4xNomosWebPhoto_atd": "Phips/4xNomosWebPhoto_atd",
104
+ "4xNomosWebPhoto_esrgan": "Phips/4xNomosWebPhoto_esrgan",
105
+ "4xNomosWebPhoto_RealPLKSR": "Phips/4xNomosWebPhoto_RealPLKSR",
106
+ "4xReal_SSDIR_DAT_GAN": "Phips/4xReal_SSDIR_DAT_GAN",
107
+ "4xRealWebPhoto_v3_atd": "Phips/4xRealWebPhoto_v3_atd",
108
+ "4xRealWebPhoto_v4_dat2": "Phips/4xRealWebPhoto_v4_dat2",
109
+ "4xRealWebPhoto_v4_drct-l": "Phips/4xRealWebPhoto_v4_drct-l",
110
+ "4xSSDIRDAT": "Phips/4xSSDIRDAT",
111
+ "4xTextureDAT2_otf": "Phips/4xTextureDAT2_otf",
112
+ "4xTextures_GTAV_rgt-s": "Phips/4xTextures_GTAV_rgt-s",
113
+ "4xTextures_GTAV_rgt-s_dither": "Phips/4xTextures_GTAV_rgt-s_dither",
114
+ }
115
+
116
+ # --- Efficient Model Loading and Caching ---
117
+ LOADED_MODELS_CACHE = {}
118
+
119
+ def get_upscaler(model_name: str):
120
+ if model_name not in LOADED_MODELS_CACHE:
121
+ print(f"Loading model: {model_name}")
122
+ LOADED_MODELS_CACHE[model_name] = UpscaleWithModel.from_pretrained(
123
+ MODELS[model_name]
124
+ ).to("cuda")
125
+ return LOADED_MODELS_CACHE[model_name]
126
+
127
+ # --- Core Upscaling Function ---
128
+ @spaces.GPU
129
+ def upscale_image(image, model_selection: str, progress=gr.Progress(track_tqdm=True)):
130
+ if image is None:
131
+ raise gr.Error("No image uploaded. Please upload an image to upscale.")
132
+
133
+ try:
134
+ progress(0, desc="Loading image and model...")
135
+ original = load_image(image)
136
+ upscaler = get_upscaler(model_selection)
137
+
138
+ progress(0.5, desc="Upscaling image... (this may take a moment)")
139
+ upscaled_pil_image = upscaler(original, tiling=True, tile_width=1024, tile_height=1024)
140
+
141
+ progress(0.9, desc="Saving result...")
142
+ with tempfile.NamedTemporaryFile(delete=False, suffix=".png") as temp_file:
143
+ upscaled_pil_image.save(temp_file.name, "PNG")
144
+ output_filepath = temp_file.name
145
+
146
+ return (original, upscaled_pil_image), output_filepath
147
+
148
+ except Exception as e:
149
+ print(f"An error occurred: {traceback.format_exc()}")
150
+ raise gr.Error(f"An error occurred during processing: {e}")
151
+
152
+ def clear_outputs():
153
+ return None, None
154
+
155
+ # --- Gradio Interface Definition ---
156
+ title = """<h1 align="center">Image Upscaler</h1>
157
+ <div align="center">
158
+ Use this Space to upscale your images with a collection of custom-trained models.<br>
159
+ This app uses the <a href="https://github.com/asomoza/image_gen_aux">Image Generation Auxiliary Tools</a> library and <a href="https://github.com/Phhofm/models">my models</a>.<br>
160
+ Tiling is fixed at 1024x1024 for optimal performance. An <a href="https://huggingface.co/spaces/Phips/Upscaler/resolve/main/input_example1.png">example input image</a> is available to try.
161
+ </div>
162
+ """
163
+
164
+ with gr.Blocks(delete_cache=(3600, 3600)) as demo:
165
+ gr.HTML(title)
166
+ with gr.Row():
167
+ with gr.Column(scale=1):
168
+ input_image = gr.Image(type="pil", label="Input Image")
169
+ model_selection = gr.Dropdown(
170
+ choices=list(MODELS.keys()),
171
+ value="4xBHI_dat2_real",
172
+ label="Model (alphabetically sorted)",
173
+ )
174
+ run_button = gr.Button("Upscale", variant="primary")
175
+
176
+ with gr.Column(scale=2):
177
+ result_slider = ImageSlider(
178
+ interactive=False,
179
+ label="Compare Original vs. Upscaled",
180
+ show_label=True,
181
+ show_download_button=False
182
+ )
183
+
184
+ # --- THIS IS THE NEW ADDITION ---
185
+ # Add a descriptive note to guide the user about the preview vs. download quality.
186
+ gr.Markdown(
187
+ "<center><i>Note: The slider above shows a web-optimized preview. For the full-quality, lossless PNG, please use the download button below.</i></center>"
188
+ )
189
+
190
+ download_output = gr.File(label="Download Full-Quality Upscaled Image (Lossless PNG)")
191
+
192
+ # --- Event Handling ---
193
+ run_button.click(
194
+ fn=clear_outputs,
195
+ inputs=None,
196
+ outputs=[result_slider, download_output],
197
+ queue=False
198
+ ).then(
199
+ fn=upscale_image,
200
+ inputs=[input_image, model_selection],
201
+ outputs=[result_slider, download_output],
202
+ )
203
+
204
+ # --- Pre-load the default model for a faster first-time user experience ---
205
+ try:
206
+ print("Pre-loading default model...")
207
+ get_upscaler("4xNomosWebPhoto_RealPLKSR")
208
+ print("Default model loaded successfully.")
209
+ except Exception as e:
210
+ print(f"Could not pre-load the default model. The app will still work. Error: {e}")
211
+
212
+ # Queueing is essential for public-facing apps to handle concurrent users.
213
+ demo.queue()
214
+ demo.launch(share=False)
requirements.txt ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ # Core deep learning library
2
+ torch
3
+
4
+ # Gradio for the web interface
5
+ gradio
6
+ gradio-imageslider
7
+
8
+ # The upscaling library
9
+ git+https://github.com/asomoza/image_gen_aux.git