Spaces:
Runtime error
Runtime error
lzyhha
commited on
Commit
·
1c029a6
1
Parent(s):
4828266
test
Browse files- app.py +12 -10
- visualcloze.py +1 -0
app.py
CHANGED
|
@@ -3,6 +3,7 @@ import spaces
|
|
| 3 |
from visualcloze import VisualClozeModel
|
| 4 |
import gradio as gr
|
| 5 |
import examples
|
|
|
|
| 6 |
from functools import partial
|
| 7 |
from data.prefix_instruction import get_layout_instruction
|
| 8 |
from huggingface_hub import snapshot_download
|
|
@@ -27,7 +28,7 @@ GUIDANCE = """
|
|
| 27 |
|
| 28 |
## 🔥 Task Examples:
|
| 29 |
Click the task button in the right bottom to acquire **examples** of various tasks.
|
| 30 |
-
Make sure all images and prompts are loaded before clicking the generate button
|
| 31 |
|
| 32 |
|
| 33 |
## 💻 Runtime on the Zero GPU:
|
|
@@ -338,7 +339,7 @@ def create_demo(model):
|
|
| 338 |
upsampling_steps=upsampling_steps, upsampling_noise=upsampling_noise
|
| 339 |
)
|
| 340 |
except Exception as e:
|
| 341 |
-
raise gr.Error('Process error. Possible that the task examples have not finished loading yet. Error: ' + e)
|
| 342 |
|
| 343 |
output = gr.update(
|
| 344 |
elem_id='output_gallery',
|
|
@@ -502,14 +503,15 @@ def generate(
|
|
| 502 |
prompts,
|
| 503 |
seed, cfg, steps,
|
| 504 |
upsampling_steps, upsampling_noise):
|
| 505 |
-
|
| 506 |
-
|
| 507 |
-
|
| 508 |
-
|
| 509 |
-
|
| 510 |
-
|
| 511 |
-
|
| 512 |
-
|
|
|
|
| 513 |
|
| 514 |
|
| 515 |
def parse_args():
|
|
|
|
| 3 |
from visualcloze import VisualClozeModel
|
| 4 |
import gradio as gr
|
| 5 |
import examples
|
| 6 |
+
import torch
|
| 7 |
from functools import partial
|
| 8 |
from data.prefix_instruction import get_layout_instruction
|
| 9 |
from huggingface_hub import snapshot_download
|
|
|
|
| 28 |
|
| 29 |
## 🔥 Task Examples:
|
| 30 |
Click the task button in the right bottom to acquire **examples** of various tasks.
|
| 31 |
+
**Make sure all images and prompts are loaded before clicking the generate button.**
|
| 32 |
|
| 33 |
|
| 34 |
## 💻 Runtime on the Zero GPU:
|
|
|
|
| 339 |
upsampling_steps=upsampling_steps, upsampling_noise=upsampling_noise
|
| 340 |
)
|
| 341 |
except Exception as e:
|
| 342 |
+
raise gr.Error('Process error. Possible that the task examples have not finished loading yet. Error: ' + str(e))
|
| 343 |
|
| 344 |
output = gr.update(
|
| 345 |
elem_id='output_gallery',
|
|
|
|
| 503 |
prompts,
|
| 504 |
seed, cfg, steps,
|
| 505 |
upsampling_steps, upsampling_noise):
|
| 506 |
+
with torch.no_grad():
|
| 507 |
+
return model.process_images(
|
| 508 |
+
images=images,
|
| 509 |
+
prompts=prompts,
|
| 510 |
+
seed=seed,
|
| 511 |
+
cfg=cfg,
|
| 512 |
+
steps=steps,
|
| 513 |
+
upsampling_steps=upsampling_steps,
|
| 514 |
+
upsampling_noise=upsampling_noise)
|
| 515 |
|
| 516 |
|
| 517 |
def parse_args():
|
visualcloze.py
CHANGED
|
@@ -450,6 +450,7 @@ class VisualClozeModel:
|
|
| 450 |
row_end = grid_h * grid_w
|
| 451 |
for i in range(row_start, row_end):
|
| 452 |
# when the image is masked, then output it
|
|
|
|
| 453 |
if mask_position[i - row_start] and is_upsampling:
|
| 454 |
cropped = output_images[-1].crop(((i - row_start) * ret_w // self.grid_w, 0, ((i - row_start) + 1) * ret_w // self.grid_w, ret_h))
|
| 455 |
upsampled = self.upsampling(
|
|
|
|
| 450 |
row_end = grid_h * grid_w
|
| 451 |
for i in range(row_start, row_end):
|
| 452 |
# when the image is masked, then output it
|
| 453 |
+
print(mask_position, i, row_start, is_upsampling, grid_h, grid_w)
|
| 454 |
if mask_position[i - row_start] and is_upsampling:
|
| 455 |
cropped = output_images[-1].crop(((i - row_start) * ret_w // self.grid_w, 0, ((i - row_start) + 1) * ret_w // self.grid_w, ret_h))
|
| 456 |
upsampled = self.upsampling(
|