Spaces:
Running
Running
| import torch | |
| from transformers import AutoImageProcessor, AutoModelForObjectDetection | |
| #from transformers import pipeline | |
| from PIL import Image | |
| import matplotlib.pyplot as plt | |
| import matplotlib.patches as patches | |
| import io | |
| from random import choice | |
| image_processor_tiny = AutoImageProcessor.from_pretrained("hustvl/yolos-tiny") | |
| model_tiny = AutoModelForObjectDetection.from_pretrained("hustvl/yolos-tiny") | |
| image_processor_small = AutoImageProcessor.from_pretrained("hustvl/yolos-small") | |
| model_small = AutoModelForObjectDetection.from_pretrained("hustvl/yolos-small") | |
| import gradio as gr | |
| COLORS = ["#ff7f7f", "#ff7fbf", "#ff7fff", "#bf7fff", | |
| "#7f7fff", "#7fbfff", "#7fffff", "#7fffbf", | |
| "#7fff7f", "#bfff7f", "#ffff7f", "#ffbf7f"] | |
| fdic = { | |
| "family" : "Impact", | |
| "style" : "italic", | |
| "size" : 15, | |
| "color" : "yellow", | |
| "weight" : "bold" | |
| } | |
| def get_figure(in_pil_img, in_results): | |
| plt.figure(figsize=(16, 10)) | |
| plt.imshow(in_pil_img) | |
| ax = plt.gca() | |
| for score, label, box in zip(in_results["scores"], in_results["labels"], in_results["boxes"]): | |
| selected_color = choice(COLORS) | |
| #box = [round(i, 2) for i in box.tolist()] | |
| x, y, w, h = int(box[0]), int(box[1]), int(box[2]-box[0]), int(box[3]-box[1]) | |
| print(x, y, w, h) | |
| ax.add_patch(plt.Rectangle((x, y), w, h, fill=False, color=selected_color, linewidth=3)) | |
| ax.text(x, y, f"{model_tiny.config.id2label[label.item()]}: {round(score.item()*100, 1)}%", fontdict=fdic) | |
| #print( | |
| # f"Detected {model_tiny.config.id2label[label.item()]} with confidence " | |
| # f"{round(score.item(), 3)} at location {box}" | |
| #) | |
| plt.axis("off") | |
| return plt.gcf() | |
| def infer(in_model, in_threshold, in_pil_img): | |
| print(type(in_pil_img)) | |
| print(threshold) | |
| inputs = image_processor_tiny(images=in_pil_img, return_tensors="pt") | |
| outputs = model_tiny(**inputs) | |
| # convert outputs (bounding boxes and class logits) to COCO API | |
| target_sizes = torch.tensor([in_pil_img.size[::-1]]) | |
| results = image_processor_tiny.post_process_object_detection(outputs, threshold=in_threshold, target_sizes=target_sizes)[ | |
| 0 | |
| ] | |
| print(results) | |
| figure = get_figure(in_pil_img, results) | |
| buf = io.BytesIO() | |
| figure.savefig(buf, bbox_inches='tight') | |
| buf.seek(0) | |
| output_pil_img = Image.open(buf) | |
| return output_pil_img | |
| #from transformers.models.flava import modeling_flava | |
| with gr.Blocks(css=".gradio-container {background:lightyellow;color:red;}", title="γγΉγ" | |
| ) as demo: | |
| #sample_index = gr.State([]) | |
| gr.HTML('<div style="font-size:12pt; text-align:center; color:yellow;">MNIST ει‘ε¨</div>') | |
| model = gr.Radio(["detr-resnet-50", "detr-resnet-101"], value="detr-resnet-50") | |
| with gr.Row(): | |
| input_image = gr.Image(label="", type="pil") | |
| output_image = gr.Image(type="pil") | |
| threshold = gr.Slider(0, 1.0, value=0.9, label='threshold') | |
| send_btn = gr.Button("δΊζΈ¬γγ") | |
| send_btn.click(fn=infer, inputs=[model, threshold, input_image], outputs=[output_image]) | |
| #demo.queue() | |
| demo.launch(debug=True) | |
| ### EOF ### | |