Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -15,8 +15,9 @@ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
|
| 15 |
|
| 16 |
@spaces.GPU
|
| 17 |
def udop_box_inference(image, text_prompt, box_coordinates):
|
| 18 |
-
|
| 19 |
-
|
|
|
|
| 20 |
extracted_image = extract_box(image, box_coordinates)
|
| 21 |
extracted_image.save("cropped_image.png")
|
| 22 |
|
|
@@ -50,6 +51,11 @@ def normalize_bbox(bbox, width, height):
|
|
| 50 |
|
| 51 |
|
| 52 |
def extract_box(image, coordinates):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 53 |
x, y, x2, y2 = coordinates
|
| 54 |
cropped_image = image.crop((x, y, x2, y2))
|
| 55 |
return cropped_image
|
|
@@ -61,9 +67,10 @@ def infer_box(prompts, text_prompts):
|
|
| 61 |
image = prompts["image"]
|
| 62 |
if image is None:
|
| 63 |
gr.Error("Please upload an image and draw a box before submitting")
|
| 64 |
-
|
| 65 |
-
|
| 66 |
-
|
|
|
|
| 67 |
return udop_box_inference(image, text_prompts, points)
|
| 68 |
|
| 69 |
|
|
@@ -80,7 +87,16 @@ with gr.Blocks(title="UDOP") as demo:
|
|
| 80 |
with gr.Column():
|
| 81 |
output = gr.Textbox(label="UDOP Output")
|
| 82 |
|
| 83 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 84 |
btn.click(infer_box, inputs=[im,text_prompt], outputs=[output])
|
| 85 |
|
| 86 |
demo.launch(debug=True)
|
|
|
|
| 15 |
|
| 16 |
@spaces.GPU
|
| 17 |
def udop_box_inference(image, text_prompt, box_coordinates):
|
| 18 |
+
if box_coordinates != []:
|
| 19 |
+
box_coordinates = [box_coordinates[0], box_coordinates[1], box_coordinates[3], box_coordinates[4]]
|
| 20 |
+
|
| 21 |
extracted_image = extract_box(image, box_coordinates)
|
| 22 |
extracted_image.save("cropped_image.png")
|
| 23 |
|
|
|
|
| 51 |
|
| 52 |
|
| 53 |
def extract_box(image, coordinates):
|
| 54 |
+
if type(image) == str:
|
| 55 |
+
image = Image.open(image)
|
| 56 |
+
if coordinates==[]:
|
| 57 |
+
return image
|
| 58 |
+
else:
|
| 59 |
x, y, x2, y2 = coordinates
|
| 60 |
cropped_image = image.crop((x, y, x2, y2))
|
| 61 |
return cropped_image
|
|
|
|
| 67 |
image = prompts["image"]
|
| 68 |
if image is None:
|
| 69 |
gr.Error("Please upload an image and draw a box before submitting")
|
| 70 |
+
try:
|
| 71 |
+
points = prompts["points"][0]
|
| 72 |
+
except:
|
| 73 |
+
points = []
|
| 74 |
return udop_box_inference(image, text_prompts, points)
|
| 75 |
|
| 76 |
|
|
|
|
| 87 |
with gr.Column():
|
| 88 |
output = gr.Textbox(label="UDOP Output")
|
| 89 |
|
| 90 |
+
with gr.Row():
|
| 91 |
+
gr.Examples(
|
| 92 |
+
examples = [[PromptValue(image = "/content/dummy_pdf.png",
|
| 93 |
+
points = [[87.0, 908.0, 2.0, 456.0, 972.0, 3.0]]), "Question answering. What is the objective?"],
|
| 94 |
+
[PromptValue(image = "/content/docvqa_example (3).png",
|
| 95 |
+
points = [[]]), "Question answering. How much is the total?"]],
|
| 96 |
+
inputs=[im, text_prompt],
|
| 97 |
+
outputs=output,
|
| 98 |
+
fn=infer_box,
|
| 99 |
+
)
|
| 100 |
btn.click(infer_box, inputs=[im,text_prompt], outputs=[output])
|
| 101 |
|
| 102 |
demo.launch(debug=True)
|