Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -158,6 +158,7 @@ validation_pipeline._init_tiled_vae(encoder_tile_size=1024,
|
|
| 158 |
weight_dtype = torch.float16
|
| 159 |
device = "cuda"
|
| 160 |
|
|
|
|
| 161 |
# Move text_encode and vae to gpu and cast to weight_dtype
|
| 162 |
text_encoder.to(device, dtype=weight_dtype)
|
| 163 |
vae.to(device, dtype=weight_dtype)
|
|
@@ -173,17 +174,27 @@ tag_model.eval()
|
|
| 173 |
tag_model.to(device, dtype=weight_dtype)
|
| 174 |
|
| 175 |
def preprocess_image(input_image: Image.Image) -> Image.Image:
|
| 176 |
-
img = input_image.copy()
|
| 177 |
-
img.thumbnail((1024, 1024), Image.Resampling.BILINEAR)
|
| 178 |
-
return img
|
| 179 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 180 |
|
| 181 |
@spaces.GPU()
|
| 182 |
def preprocess_n_magnify(input_image: Image.Image):
|
| 183 |
-
|
| 184 |
-
|
|
|
|
|
|
|
| 185 |
|
| 186 |
-
return preprocessed_img, (preprocessed_img, magnified_img)
|
| 187 |
|
| 188 |
def get_duration(
|
| 189 |
input_image: Image.Image,
|
|
@@ -201,12 +212,7 @@ def get_duration(
|
|
| 201 |
progress,
|
| 202 |
):
|
| 203 |
|
| 204 |
-
|
| 205 |
-
return 90
|
| 206 |
-
elif steps > 4 or duration_seconds > 2:
|
| 207 |
-
return 75
|
| 208 |
-
else:
|
| 209 |
-
return 60
|
| 210 |
|
| 211 |
|
| 212 |
@spaces.GPU(duration=get_duration)
|
|
@@ -227,7 +233,6 @@ def magnify(
|
|
| 227 |
):
|
| 228 |
|
| 229 |
|
| 230 |
-
|
| 231 |
process_size = 512
|
| 232 |
resize_preproc = transforms.Compose([
|
| 233 |
transforms.Resize(process_size, interpolation=transforms.InterpolationMode.BILINEAR),
|
|
@@ -319,7 +324,7 @@ with gr.Blocks(css=css, theme=theme) as demo:
|
|
| 319 |
with gr.Column():
|
| 320 |
input_image = gr.Image(type="pil", height=256)
|
| 321 |
run_button = gr.Button("π Magnify 4x", variant="primary")
|
| 322 |
-
|
| 323 |
with gr.Accordion("Options", visible=False):
|
| 324 |
user_prompt = gr.Textbox(label="User Prompt", value="")
|
| 325 |
positive_prompt = gr.Textbox(label="Positive Prompt", value="clean, high-resolution, 8k, best quality, masterpiece")
|
|
@@ -356,14 +361,14 @@ with gr.Blocks(css=css, theme=theme) as demo:
|
|
| 356 |
inputs=[
|
| 357 |
input_image,
|
| 358 |
],
|
| 359 |
-
outputs=[
|
| 360 |
fn=preprocess_n_magnify,
|
| 361 |
cache_examples=True,
|
| 362 |
)
|
| 363 |
inputs = [
|
| 364 |
input_image,
|
| 365 |
]
|
| 366 |
-
run_button.click(fn=magnify, inputs=
|
| 367 |
-
input_image.upload(fn=preprocess_image,inputs=input_image, outputs=
|
| 368 |
|
| 369 |
demo.launch(share=True)
|
|
|
|
| 158 |
weight_dtype = torch.float16
|
| 159 |
device = "cuda"
|
| 160 |
|
| 161 |
+
|
| 162 |
# Move text_encode and vae to gpu and cast to weight_dtype
|
| 163 |
text_encoder.to(device, dtype=weight_dtype)
|
| 164 |
vae.to(device, dtype=weight_dtype)
|
|
|
|
| 174 |
tag_model.to(device, dtype=weight_dtype)
|
| 175 |
|
| 176 |
def preprocess_image(input_image: Image.Image) -> Image.Image:
|
|
|
|
|
|
|
|
|
|
| 177 |
|
| 178 |
+
ori_width, ori_height = input_image.size
|
| 179 |
+
|
| 180 |
+
duration = 60
|
| 181 |
+
|
| 182 |
+
if ori_width > 384 or ori_height > 384:
|
| 183 |
+
duration = 75
|
| 184 |
+
elif ori_width > 512 or ori_height > 512:
|
| 185 |
+
duration = 90
|
| 186 |
+
elif ori_width > 1024 or ori_height > 1024:
|
| 187 |
+
duration = 120
|
| 188 |
+
|
| 189 |
+
return duration
|
| 190 |
|
| 191 |
@spaces.GPU()
|
| 192 |
def preprocess_n_magnify(input_image: Image.Image):
|
| 193 |
+
duration = preprocess_image(input_image)
|
| 194 |
+
magnified_img = magnify(input_image, duration)
|
| 195 |
+
|
| 196 |
+
return duration, (input_image, magnified_img)
|
| 197 |
|
|
|
|
| 198 |
|
| 199 |
def get_duration(
|
| 200 |
input_image: Image.Image,
|
|
|
|
| 212 |
progress,
|
| 213 |
):
|
| 214 |
|
| 215 |
+
return duration_seconds
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 216 |
|
| 217 |
|
| 218 |
@spaces.GPU(duration=get_duration)
|
|
|
|
| 233 |
):
|
| 234 |
|
| 235 |
|
|
|
|
| 236 |
process_size = 512
|
| 237 |
resize_preproc = transforms.Compose([
|
| 238 |
transforms.Resize(process_size, interpolation=transforms.InterpolationMode.BILINEAR),
|
|
|
|
| 324 |
with gr.Column():
|
| 325 |
input_image = gr.Image(type="pil", height=256)
|
| 326 |
run_button = gr.Button("π Magnify 4x", variant="primary")
|
| 327 |
+
duration_time = gr.Text(label="duration time", value=60)
|
| 328 |
with gr.Accordion("Options", visible=False):
|
| 329 |
user_prompt = gr.Textbox(label="User Prompt", value="")
|
| 330 |
positive_prompt = gr.Textbox(label="Positive Prompt", value="clean, high-resolution, 8k, best quality, masterpiece")
|
|
|
|
| 361 |
inputs=[
|
| 362 |
input_image,
|
| 363 |
],
|
| 364 |
+
outputs=[duration_time, result_gallery],
|
| 365 |
fn=preprocess_n_magnify,
|
| 366 |
cache_examples=True,
|
| 367 |
)
|
| 368 |
inputs = [
|
| 369 |
input_image,
|
| 370 |
]
|
| 371 |
+
run_button.click(fn=magnify, inputs=[input_image,duration_time], outputs=[result_gallery])
|
| 372 |
+
input_image.upload(fn=preprocess_image,inputs=input_image, outputs=duration_time)
|
| 373 |
|
| 374 |
demo.launch(share=True)
|