Update app.py
Browse files
app.py
CHANGED
|
@@ -115,7 +115,6 @@ def inference(image, upscale, large_input_flag, color_fix):
|
|
| 115 |
# img2tensor
|
| 116 |
y = np.array(image).astype(np.float32) / 255.
|
| 117 |
y = torch.from_numpy(np.transpose(y[:, :, [2, 1, 0]], (2, 0, 1))).float()
|
| 118 |
-
# y = torch.from_numpy(np.transpose(y, (2, 0, 1))).float()
|
| 119 |
y = y.unsqueeze(0).to(device)
|
| 120 |
|
| 121 |
# inference
|
|
@@ -183,18 +182,18 @@ description = ''' ### Spatially-Adaptive Feature Modulation for Efficient Image
|
|
| 183 |
|
| 184 |
article = "<p style='text-align: center'><a href='https://github.com/sunny2109/SAFMN/tree/main' target='_blank'>Spatially-Adaptive Feature Modulation for Efficient Image Super-Resolution</a></p>"
|
| 185 |
|
| 186 |
-
#### Image
|
| 187 |
examples = [
|
| 188 |
['real_testdata/060.png'],
|
| 189 |
-
|
| 190 |
-
|
| 191 |
-
|
| 192 |
-
|
| 193 |
-
|
| 194 |
-
|
| 195 |
-
|
| 196 |
-
|
| 197 |
-
|
| 198 |
]
|
| 199 |
|
| 200 |
css = """
|
|
@@ -205,26 +204,6 @@ css = """
|
|
| 205 |
}
|
| 206 |
"""
|
| 207 |
|
| 208 |
-
# demo = gr.Interface(
|
| 209 |
-
# fn=inference,
|
| 210 |
-
# inputs=[
|
| 211 |
-
# gr.Image(value="real_testdata/060.png", type="pil", label="Input"),
|
| 212 |
-
# gr.Number(minimum=2, maximum=4, label="Upscaling factor (up to 4)"),
|
| 213 |
-
# gr.Checkbox(value=False, label="Memory-efficient inference"),
|
| 214 |
-
# gr.Checkbox(value=False, label="Color correction"),
|
| 215 |
-
# ],
|
| 216 |
-
|
| 217 |
-
# outputs = ImageSlider(label="Super-Resolved Image",
|
| 218 |
-
# type="pil",
|
| 219 |
-
# show_download_button=True,
|
| 220 |
-
# ),
|
| 221 |
-
# title=title,
|
| 222 |
-
# description=description,
|
| 223 |
-
# article=article,
|
| 224 |
-
# examples=examples,
|
| 225 |
-
# css=css,
|
| 226 |
-
# )
|
| 227 |
-
|
| 228 |
demo = gr.Interface(
|
| 229 |
fn=inference,
|
| 230 |
inputs=[
|
|
@@ -243,9 +222,6 @@ demo = gr.Interface(
|
|
| 243 |
label="Download Output",
|
| 244 |
type='filepath',
|
| 245 |
),
|
| 246 |
-
# gr.Image(
|
| 247 |
-
# label="Download Output"
|
| 248 |
-
# ),
|
| 249 |
],
|
| 250 |
title=title,
|
| 251 |
description=description,
|
|
|
|
| 115 |
# img2tensor
|
| 116 |
y = np.array(image).astype(np.float32) / 255.
|
| 117 |
y = torch.from_numpy(np.transpose(y[:, :, [2, 1, 0]], (2, 0, 1))).float()
|
|
|
|
| 118 |
y = y.unsqueeze(0).to(device)
|
| 119 |
|
| 120 |
# inference
|
|
|
|
| 182 |
|
| 183 |
article = "<p style='text-align: center'><a href='https://github.com/sunny2109/SAFMN/tree/main' target='_blank'>Spatially-Adaptive Feature Modulation for Efficient Image Super-Resolution</a></p>"
|
| 184 |
|
| 185 |
+
#### Image examples
|
| 186 |
examples = [
|
| 187 |
['real_testdata/060.png'],
|
| 188 |
+
['real_testdata/004.png'],
|
| 189 |
+
['real_testdata/013.png'],
|
| 190 |
+
['real_testdata/014.png'],
|
| 191 |
+
['real_testdata/015.png'],
|
| 192 |
+
['real_testdata/021.png'],
|
| 193 |
+
['real_testdata/032.png'],
|
| 194 |
+
['real_testdata/045.png'],
|
| 195 |
+
['real_testdata/036.png'],
|
| 196 |
+
['real_testdata/058.png'],
|
| 197 |
]
|
| 198 |
|
| 199 |
css = """
|
|
|
|
| 204 |
}
|
| 205 |
"""
|
| 206 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 207 |
demo = gr.Interface(
|
| 208 |
fn=inference,
|
| 209 |
inputs=[
|
|
|
|
| 222 |
label="Download Output",
|
| 223 |
type='filepath',
|
| 224 |
),
|
|
|
|
|
|
|
|
|
|
| 225 |
],
|
| 226 |
title=title,
|
| 227 |
description=description,
|