| import gradio as gr |
| import torch |
| import json |
| |
| from ultralytics import YOLO |
|
|
| |
| |
| |
| |
|
|
| |
| image_size = 640 |
| conf_threshold = 0.25 |
| iou_threshold = 0.15 |
| |
| model = YOLO("yolo11x.pt") |
|
|
| def yolov5_inference( |
| image: gr.inputs.Image = None |
| |
| |
| |
| ): |
| """ |
| YOLOv5 inference function |
| Args: |
| image: Input image |
| model_path: Path to the model |
| image_size: Image size |
| conf_threshold: Confidence threshold |
| iou_threshold: IOU threshold |
| Returns: |
| Rendered image |
| """ |
| model.conf = conf_threshold |
| model.iou = iou_threshold |
| results = model([image]) |
| tensor = { |
| "tensorflow": [ |
| ] |
| } |
|
|
| ''' if results.pred is not None: |
| for i, element in enumerate(results.pred[0]): |
| object = {} |
| #print (element[0]) |
| itemclass = round(element[5].item()) |
| object["classe"] = itemclass |
| object["nome"] = results.names[itemclass] |
| object["score"] = element[4].item() |
| object["x"] = element[0].item() |
| object["y"] = element[1].item() |
| object["w"] = element[2].item() |
| object["h"] = element[3].item() |
| tensor["tensorflow"].append(object) |
| ''' |
| for result in results: |
|
|
| |
| boxes = result.boxes |
|
|
| |
| for box in boxes: |
| object = {} |
| |
| x1, y1, x2, y2 = box.xyxy[0].cpu().numpy().astype(int) |
|
|
| |
| numpy_array = box.conf[0].cpu().numpy() |
| object["score"] = numpy_array.item() if numpy_array.size > 0 else 0.0 |
|
|
| |
| class_id = int(box.cls[0].cpu().numpy()) |
| object["classe"] = class_id |
| |
| |
| object["nome"] = result.names[class_id] |
|
|
| object["x"] = int (x1) |
| object["y"] = int (y1) |
| object["w"] = int (x2) |
| object["h"] = int (y2) |
| tensor["tensorflow"].append(object) |
|
|
| text = json.dumps(tensor) |
| return text |
| |
|
|
| inputs = [ |
| gr.inputs.Image(type="pil", label="Input Image"), |
| |
| |
| |
| ] |
|
|
| outputs = gr.outputs.Image(type="filepath", label="Output Image") |
| title = "YOLO11" |
| description = "YOLO11 is a family of object detection models pretrained on COCO dataset. This model is a pip implementation of the original YOLOv5 model." |
|
|
| examples = [['zidane.jpg'], ['image3.jpg']] |
| demo_app = gr.Interface( |
| fn=yolov5_inference, |
| inputs=inputs, |
| outputs=["text"], |
| |
| title=title, |
| |
| |
| |
| |
| ) |
| demo_app.launch( enable_queue=True) |
| |
|
|