Spaces:
Runtime error
Runtime error
Joseph Catrambone
commited on
Commit
·
61dc3f4
1
Parent(s):
e14c450
Perform the annotation after the image is resized to avoid losing line size.
Browse files
app.py
CHANGED
|
@@ -73,16 +73,14 @@ def process(input_image: Image.Image, prompt, a_prompt, n_prompt, max_faces: int
|
|
| 73 |
raise gr.Error("Please provide an image")
|
| 74 |
try:
|
| 75 |
if image_file_live_opt == 'file':
|
| 76 |
-
|
| 77 |
-
|
| 78 |
-
|
| 79 |
-
visualization = Image.fromarray(empty)
|
| 80 |
-
visualization = pad_image(visualization).resize((512, 512))
|
| 81 |
elif image_file_live_opt == 'webcam':
|
| 82 |
base64_img = live_conditioning['image']
|
| 83 |
image_data = base64.b64decode(base64_img.split(',')[1])
|
| 84 |
-
visualization = Image.open(BytesIO(image_data)).convert(
|
| 85 |
-
'RGB').resize((512, 512))
|
| 86 |
if seed == -1:
|
| 87 |
seed = random.randint(0, 2147483647)
|
| 88 |
generator = torch.Generator(device).manual_seed(seed)
|
|
|
|
| 73 |
raise gr.Error("Please provide an image")
|
| 74 |
try:
|
| 75 |
if image_file_live_opt == 'file':
|
| 76 |
+
# Resize before annotation so that we can keep our line-widths consistent with the training data.
|
| 77 |
+
input_image = pad_image(input_image.convert('RGB')).resize((512, 512))
|
| 78 |
+
empty = generate_annotation(np.array(input_image), max_faces, min_confidence)
|
| 79 |
+
visualization = Image.fromarray(empty)
|
|
|
|
| 80 |
elif image_file_live_opt == 'webcam':
|
| 81 |
base64_img = live_conditioning['image']
|
| 82 |
image_data = base64.b64decode(base64_img.split(',')[1])
|
| 83 |
+
visualization = Image.open(BytesIO(image_data)).convert('RGB').resize((512, 512))
|
|
|
|
| 84 |
if seed == -1:
|
| 85 |
seed = random.randint(0, 2147483647)
|
| 86 |
generator = torch.Generator(device).manual_seed(seed)
|