Update app.py
Browse files
app.py
CHANGED
|
@@ -1,107 +1,107 @@
|
|
| 1 |
-
from typing import Optional
|
| 2 |
-
import spaces
|
| 3 |
-
|
| 4 |
-
import gradio as gr
|
| 5 |
-
import numpy as np
|
| 6 |
-
import torch
|
| 7 |
-
from PIL import Image
|
| 8 |
-
import io
|
| 9 |
-
|
| 10 |
-
|
| 11 |
-
import base64, os
|
| 12 |
-
from utils import check_ocr_box, get_yolo_model, get_caption_model_processor, get_som_labeled_img
|
| 13 |
-
import torch
|
| 14 |
-
from PIL import Image
|
| 15 |
-
|
| 16 |
-
# yolo_model = get_yolo_model(model_path='weights/icon_detect/best.pt')
|
| 17 |
-
# caption_model_processor = get_caption_model_processor(model_name="florence2", model_name_or_path="weights/icon_caption_florence")
|
| 18 |
-
|
| 19 |
-
from ultralytics import YOLO
|
| 20 |
-
yolo_model = YOLO('
|
| 21 |
-
from transformers import AutoProcessor, AutoModelForCausalLM
|
| 22 |
-
processor = AutoProcessor.from_pretrained("microsoft/Florence-2-base", trust_remote_code=True)
|
| 23 |
-
model = AutoModelForCausalLM.from_pretrained("weights/icon_caption_florence", torch_dtype=torch.float16, trust_remote_code=True).to('cuda')
|
| 24 |
-
caption_model_processor = {'processor': processor, 'model': model}
|
| 25 |
-
print('finish loading model!!!')
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
MARKDOWN = """
|
| 29 |
-
# OmniParser for Pure Vision Based General GUI Agent 🔥
|
| 30 |
-
<div>
|
| 31 |
-
<a href="https://arxiv.org/pdf/2408.00203">
|
| 32 |
-
<img src="https://img.shields.io/badge/arXiv-2408.00203-b31b1b.svg" alt="Arxiv" style="display:inline-block;">
|
| 33 |
-
</a>
|
| 34 |
-
</div>
|
| 35 |
-
|
| 36 |
-
OmniParser is a screen parsing tool to convert general GUI screen to structured elements.
|
| 37 |
-
|
| 38 |
-
📢 [[Project Page](https://microsoft.github.io/OmniParser/)] [[Blog Post](https://www.microsoft.com/en-us/research/articles/omniparser-for-pure-vision-based-gui-agent/)] [[Models](https://huggingface.co/microsoft/OmniParser)]
|
| 39 |
-
"""
|
| 40 |
-
|
| 41 |
-
# DEVICE = torch.device('cuda')
|
| 42 |
-
|
| 43 |
-
@spaces.GPU
|
| 44 |
-
@torch.inference_mode()
|
| 45 |
-
# @torch.autocast(device_type="cuda", dtype=torch.bfloat16)
|
| 46 |
-
# @spaces.GPU(duration=65)
|
| 47 |
-
def process(
|
| 48 |
-
image_input,
|
| 49 |
-
box_threshold,
|
| 50 |
-
iou_threshold
|
| 51 |
-
) -> Optional[Image.Image]:
|
| 52 |
-
|
| 53 |
-
image_save_path = 'imgs/saved_image_demo.png'
|
| 54 |
-
image_input.save(image_save_path)
|
| 55 |
-
# import pdb; pdb.set_trace()
|
| 56 |
-
image = Image.open(image_save_path)
|
| 57 |
-
box_overlay_ratio = image.size[0] / 3200
|
| 58 |
-
draw_bbox_config = {
|
| 59 |
-
'text_scale': 0.8 * box_overlay_ratio,
|
| 60 |
-
'text_thickness': max(int(2 * box_overlay_ratio), 1),
|
| 61 |
-
'text_padding': max(int(3 * box_overlay_ratio), 1),
|
| 62 |
-
'thickness': max(int(3 * box_overlay_ratio), 1),
|
| 63 |
-
}
|
| 64 |
-
|
| 65 |
-
ocr_bbox_rslt, is_goal_filtered = check_ocr_box(image_save_path, display_img = False, output_bb_format='xyxy', goal_filtering=None, easyocr_args={'paragraph': False, 'text_threshold':0.9}, use_paddleocr=True)
|
| 66 |
-
text, ocr_bbox = ocr_bbox_rslt
|
| 67 |
-
# print('prompt:', prompt)
|
| 68 |
-
dino_labled_img, label_coordinates, parsed_content_list = get_som_labeled_img(image_save_path, yolo_model, BOX_TRESHOLD = box_threshold, output_coord_in_ratio=True, ocr_bbox=ocr_bbox,draw_bbox_config=draw_bbox_config, caption_model_processor=caption_model_processor, ocr_text=text,iou_threshold=iou_threshold)
|
| 69 |
-
image = Image.open(io.BytesIO(base64.b64decode(dino_labled_img)))
|
| 70 |
-
print('finish processing')
|
| 71 |
-
parsed_content_list = '\n'.join(parsed_content_list)
|
| 72 |
-
return image, str(parsed_content_list), str(label_coordinates)
|
| 73 |
-
|
| 74 |
-
|
| 75 |
-
|
| 76 |
-
with gr.Blocks() as demo:
|
| 77 |
-
gr.Markdown(MARKDOWN)
|
| 78 |
-
with gr.Row():
|
| 79 |
-
with gr.Column():
|
| 80 |
-
image_input_component = gr.Image(
|
| 81 |
-
type='pil', label='Upload image')
|
| 82 |
-
# set the threshold for removing the bounding boxes with low confidence, default is 0.05
|
| 83 |
-
box_threshold_component = gr.Slider(
|
| 84 |
-
label='Box Threshold', minimum=0.01, maximum=1.0, step=0.01, value=0.05)
|
| 85 |
-
# set the threshold for removing the bounding boxes with large overlap, default is 0.1
|
| 86 |
-
iou_threshold_component = gr.Slider(
|
| 87 |
-
label='IOU Threshold', minimum=0.01, maximum=1.0, step=0.01, value=0.1)
|
| 88 |
-
submit_button_component = gr.Button(
|
| 89 |
-
value='Submit', variant='primary')
|
| 90 |
-
with gr.Column():
|
| 91 |
-
image_output_component = gr.Image(type='pil', label='Image Output')
|
| 92 |
-
text_output_component = gr.Textbox(label='Parsed screen elements', placeholder='Text Output')
|
| 93 |
-
coordinates_output_component = gr.Textbox(label='Coordinates', placeholder='Coordinates Output')
|
| 94 |
-
|
| 95 |
-
submit_button_component.click(
|
| 96 |
-
fn=process,
|
| 97 |
-
inputs=[
|
| 98 |
-
image_input_component,
|
| 99 |
-
box_threshold_component,
|
| 100 |
-
iou_threshold_component
|
| 101 |
-
],
|
| 102 |
-
outputs=[image_output_component, text_output_component, coordinates_output_component]
|
| 103 |
-
)
|
| 104 |
-
|
| 105 |
-
# demo.launch(debug=False, show_error=True, share=True)
|
| 106 |
-
# demo.launch(share=True, server_port=7861, server_name='0.0.0.0')
|
| 107 |
demo.queue().launch(share=False)
|
|
|
|
| 1 |
+
from typing import Optional
|
| 2 |
+
import spaces
|
| 3 |
+
|
| 4 |
+
import gradio as gr
|
| 5 |
+
import numpy as np
|
| 6 |
+
import torch
|
| 7 |
+
from PIL import Image
|
| 8 |
+
import io
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
import base64, os
|
| 12 |
+
from utils import check_ocr_box, get_yolo_model, get_caption_model_processor, get_som_labeled_img
|
| 13 |
+
import torch
|
| 14 |
+
from PIL import Image
|
| 15 |
+
|
| 16 |
+
# yolo_model = get_yolo_model(model_path='weights/icon_detect/best.pt')
|
| 17 |
+
# caption_model_processor = get_caption_model_processor(model_name="florence2", model_name_or_path="weights/icon_caption_florence")
|
| 18 |
+
|
| 19 |
+
from ultralytics import YOLO
|
| 20 |
+
yolo_model = YOLO('best.pt').to('cpu')
|
| 21 |
+
from transformers import AutoProcessor, AutoModelForCausalLM
|
| 22 |
+
processor = AutoProcessor.from_pretrained("microsoft/Florence-2-base", trust_remote_code=True)
|
| 23 |
+
model = AutoModelForCausalLM.from_pretrained("weights/icon_caption_florence", torch_dtype=torch.float16, trust_remote_code=True).to('cuda')
|
| 24 |
+
caption_model_processor = {'processor': processor, 'model': model}
|
| 25 |
+
print('finish loading model!!!')
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
MARKDOWN = """
|
| 29 |
+
# OmniParser for Pure Vision Based General GUI Agent 🔥
|
| 30 |
+
<div>
|
| 31 |
+
<a href="https://arxiv.org/pdf/2408.00203">
|
| 32 |
+
<img src="https://img.shields.io/badge/arXiv-2408.00203-b31b1b.svg" alt="Arxiv" style="display:inline-block;">
|
| 33 |
+
</a>
|
| 34 |
+
</div>
|
| 35 |
+
|
| 36 |
+
OmniParser is a screen parsing tool to convert general GUI screen to structured elements.
|
| 37 |
+
|
| 38 |
+
📢 [[Project Page](https://microsoft.github.io/OmniParser/)] [[Blog Post](https://www.microsoft.com/en-us/research/articles/omniparser-for-pure-vision-based-gui-agent/)] [[Models](https://huggingface.co/microsoft/OmniParser)]
|
| 39 |
+
"""
|
| 40 |
+
|
| 41 |
+
# DEVICE = torch.device('cuda')
|
| 42 |
+
|
| 43 |
+
@spaces.GPU
|
| 44 |
+
@torch.inference_mode()
|
| 45 |
+
# @torch.autocast(device_type="cuda", dtype=torch.bfloat16)
|
| 46 |
+
# @spaces.GPU(duration=65)
|
| 47 |
+
def process(
|
| 48 |
+
image_input,
|
| 49 |
+
box_threshold,
|
| 50 |
+
iou_threshold
|
| 51 |
+
) -> Optional[Image.Image]:
|
| 52 |
+
|
| 53 |
+
image_save_path = 'imgs/saved_image_demo.png'
|
| 54 |
+
image_input.save(image_save_path)
|
| 55 |
+
# import pdb; pdb.set_trace()
|
| 56 |
+
image = Image.open(image_save_path)
|
| 57 |
+
box_overlay_ratio = image.size[0] / 3200
|
| 58 |
+
draw_bbox_config = {
|
| 59 |
+
'text_scale': 0.8 * box_overlay_ratio,
|
| 60 |
+
'text_thickness': max(int(2 * box_overlay_ratio), 1),
|
| 61 |
+
'text_padding': max(int(3 * box_overlay_ratio), 1),
|
| 62 |
+
'thickness': max(int(3 * box_overlay_ratio), 1),
|
| 63 |
+
}
|
| 64 |
+
|
| 65 |
+
ocr_bbox_rslt, is_goal_filtered = check_ocr_box(image_save_path, display_img = False, output_bb_format='xyxy', goal_filtering=None, easyocr_args={'paragraph': False, 'text_threshold':0.9}, use_paddleocr=True)
|
| 66 |
+
text, ocr_bbox = ocr_bbox_rslt
|
| 67 |
+
# print('prompt:', prompt)
|
| 68 |
+
dino_labled_img, label_coordinates, parsed_content_list = get_som_labeled_img(image_save_path, yolo_model, BOX_TRESHOLD = box_threshold, output_coord_in_ratio=True, ocr_bbox=ocr_bbox,draw_bbox_config=draw_bbox_config, caption_model_processor=caption_model_processor, ocr_text=text,iou_threshold=iou_threshold)
|
| 69 |
+
image = Image.open(io.BytesIO(base64.b64decode(dino_labled_img)))
|
| 70 |
+
print('finish processing')
|
| 71 |
+
parsed_content_list = '\n'.join(parsed_content_list)
|
| 72 |
+
return image, str(parsed_content_list), str(label_coordinates)
|
| 73 |
+
|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
with gr.Blocks() as demo:
|
| 77 |
+
gr.Markdown(MARKDOWN)
|
| 78 |
+
with gr.Row():
|
| 79 |
+
with gr.Column():
|
| 80 |
+
image_input_component = gr.Image(
|
| 81 |
+
type='pil', label='Upload image')
|
| 82 |
+
# set the threshold for removing the bounding boxes with low confidence, default is 0.05
|
| 83 |
+
box_threshold_component = gr.Slider(
|
| 84 |
+
label='Box Threshold', minimum=0.01, maximum=1.0, step=0.01, value=0.05)
|
| 85 |
+
# set the threshold for removing the bounding boxes with large overlap, default is 0.1
|
| 86 |
+
iou_threshold_component = gr.Slider(
|
| 87 |
+
label='IOU Threshold', minimum=0.01, maximum=1.0, step=0.01, value=0.1)
|
| 88 |
+
submit_button_component = gr.Button(
|
| 89 |
+
value='Submit', variant='primary')
|
| 90 |
+
with gr.Column():
|
| 91 |
+
image_output_component = gr.Image(type='pil', label='Image Output')
|
| 92 |
+
text_output_component = gr.Textbox(label='Parsed screen elements', placeholder='Text Output')
|
| 93 |
+
coordinates_output_component = gr.Textbox(label='Coordinates', placeholder='Coordinates Output')
|
| 94 |
+
|
| 95 |
+
submit_button_component.click(
|
| 96 |
+
fn=process,
|
| 97 |
+
inputs=[
|
| 98 |
+
image_input_component,
|
| 99 |
+
box_threshold_component,
|
| 100 |
+
iou_threshold_component
|
| 101 |
+
],
|
| 102 |
+
outputs=[image_output_component, text_output_component, coordinates_output_component]
|
| 103 |
+
)
|
| 104 |
+
|
| 105 |
+
# demo.launch(debug=False, show_error=True, share=True)
|
| 106 |
+
# demo.launch(share=True, server_port=7861, server_name='0.0.0.0')
|
| 107 |
demo.queue().launch(share=False)
|