Spaces:
Build error
Build error
Upload 8 files
Browse files- Others_30_png.rf.acb127f595ac89326853c386021c900c.jpg +0 -0
- Roof_Elements_Y5.pt +3 -0
- app.py +68 -0
- image_0.jpg +0 -0
- image_1.jpg +0 -0
- image_2.jpg +0 -0
- image_2e.jpg +0 -0
- requirements.txt +5 -0
Others_30_png.rf.acb127f595ac89326853c386021c900c.jpg
ADDED
|
Roof_Elements_Y5.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:8f26cc2ea112dd530ecdef90b982750e3257e3b5bff498e2881929e2f0a039bf
|
| 3 |
+
size 42274665
|
app.py
ADDED
|
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gradio as gr
|
| 2 |
+
from gradio.outputs import Label
|
| 3 |
+
import cv2
|
| 4 |
+
import requests
|
| 5 |
+
import os
|
| 6 |
+
import numpy as np
|
| 7 |
+
|
| 8 |
+
from ultralytics import YOLO
|
| 9 |
+
import yolov5
|
| 10 |
+
|
| 11 |
+
# Function for inference
|
| 12 |
+
def yolov5_inference(
|
| 13 |
+
image: gr.inputs.Image = None,
|
| 14 |
+
model_path: gr.inputs.Dropdown = None,
|
| 15 |
+
image_size: gr.inputs.Slider = 640,
|
| 16 |
+
conf_threshold: gr.inputs.Slider = 0.25,
|
| 17 |
+
iou_threshold: gr.inputs.Slider = 0.45 ):
|
| 18 |
+
|
| 19 |
+
# Loading Yolo V5 model
|
| 20 |
+
model = yolov5.load(model_path, device="cpu")
|
| 21 |
+
|
| 22 |
+
# Setting model configuration
|
| 23 |
+
model.conf = conf_threshold
|
| 24 |
+
model.iou = iou_threshold
|
| 25 |
+
|
| 26 |
+
# Inference
|
| 27 |
+
results = model([image], size=image_size)
|
| 28 |
+
|
| 29 |
+
# Cropping the predictions
|
| 30 |
+
crops = results.crop(save=False)
|
| 31 |
+
img_crops = []
|
| 32 |
+
for i in range(len(crops)):
|
| 33 |
+
img_crops.append(crops[i]["im"][..., ::-1])
|
| 34 |
+
return results.render()[0], img_crops
|
| 35 |
+
|
| 36 |
+
# gradio Input
|
| 37 |
+
inputs = [
|
| 38 |
+
gr.inputs.Image(type="pil", label="Input Image"),
|
| 39 |
+
gr.inputs.Dropdown(["Roof_Elements_Y5.pt"], label="Model", default = 'Roof_Elements_Y5.pt'),
|
| 40 |
+
gr.inputs.Slider(minimum=320, maximum=1280, default=640, step=32, label="Image Size"),
|
| 41 |
+
gr.inputs.Slider(minimum=0.0, maximum=1.0, default=0.25, step=0.05, label="Confidence Threshold"),
|
| 42 |
+
gr.inputs.Slider(minimum=0.0, maximum=1.0, default=0.45, step=0.05, label="IOU Threshold"),
|
| 43 |
+
]
|
| 44 |
+
|
| 45 |
+
# gradio Output
|
| 46 |
+
outputs = gr.outputs.Image(type="filepath", label="Output Image")
|
| 47 |
+
outputs_crops = gr.Gallery(label="Object crop")
|
| 48 |
+
|
| 49 |
+
title = "Roof element identification"
|
| 50 |
+
|
| 51 |
+
# gradio examples: "Image", "Model", "Image Size", "Confidence Threshold", "IOU Threshold"
|
| 52 |
+
examples = [['image_0.jpg', 'Roof_Elements_Y5.pt', 640, 0.35, 0.45]
|
| 53 |
+
,['image_1.jpg', 'Roof_Elements_Y5.pt', 640, 0.35, 0.45]
|
| 54 |
+
,['image_2.jpg', 'Roof_Elements_Y5.pt', 640, 0.35, 0.45],
|
| 55 |
+
]
|
| 56 |
+
|
| 57 |
+
# gradio app launch
|
| 58 |
+
demo_app = gr.Interface(
|
| 59 |
+
fn=yolov5_inference,
|
| 60 |
+
inputs=inputs,
|
| 61 |
+
outputs=[outputs,outputs_crops],
|
| 62 |
+
title=title,
|
| 63 |
+
examples=examples,
|
| 64 |
+
cache_examples=True,
|
| 65 |
+
live=True,
|
| 66 |
+
theme='huggingface',
|
| 67 |
+
)
|
| 68 |
+
demo_app.launch(debug=True, enable_queue=True, width=50, height=50)
|
image_0.jpg
ADDED
|
image_1.jpg
ADDED
|
image_2.jpg
ADDED
|
image_2e.jpg
ADDED
|
requirements.txt
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
gradio==3.4.0
|
| 2 |
+
opencv-python
|
| 3 |
+
numpy<1.24
|
| 4 |
+
ultralytics
|
| 5 |
+
yolov5
|