Spaces:
Runtime error
Runtime error
Upload 5 files
Browse files- README.md +0 -13
- app.py +82 -0
- labels.txt +8 -0
- model.onnx +3 -0
- requirements.txt +5 -0
README.md
CHANGED
|
@@ -1,13 +0,0 @@
|
|
| 1 |
-
---
|
| 2 |
-
title: Irisbuddy
|
| 3 |
-
emoji: 🐢
|
| 4 |
-
colorFrom: indigo
|
| 5 |
-
colorTo: gray
|
| 6 |
-
sdk: gradio
|
| 7 |
-
sdk_version: 5.45.0
|
| 8 |
-
app_file: app.py
|
| 9 |
-
pinned: false
|
| 10 |
-
license: apache-2.0
|
| 11 |
-
---
|
| 12 |
-
|
| 13 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
app.py
ADDED
|
@@ -0,0 +1,82 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# app.py
|
| 2 |
+
import gradio as gr
|
| 3 |
+
import onnxruntime as ort
|
| 4 |
+
import numpy as np
|
| 5 |
+
from PIL import Image, ImageDraw, ImageFont
|
| 6 |
+
import cv2
|
| 7 |
+
import io
|
| 8 |
+
|
| 9 |
+
# Load labels
|
| 10 |
+
with open("labels.txt", "r") as f:
|
| 11 |
+
LABELS = [x.strip() for x in f.readlines()]
|
| 12 |
+
|
| 13 |
+
# Load ONNX model
|
| 14 |
+
sess = ort.InferenceSession("model.onnx", providers=["CPUExecutionProvider"])
|
| 15 |
+
|
| 16 |
+
# Helper: simple preprocess - adapt to your model's needs
|
| 17 |
+
def preprocess(pil_img, input_size=(640,640)):
|
| 18 |
+
img = pil_img.convert("RGB")
|
| 19 |
+
img = img.resize(input_size)
|
| 20 |
+
np_img = np.array(img).astype(np.float32) / 255.0 # normalize if model expected 0-1
|
| 21 |
+
# Change to channels-first if model expects (1,C,H,W)
|
| 22 |
+
np_img = np.transpose(np_img, (2,0,1))[np.newaxis, ...]
|
| 23 |
+
return np_img
|
| 24 |
+
|
| 25 |
+
# Helper: very basic NMS and postprocess - adapt as necessary
|
| 26 |
+
def postprocess(outputs, orig_w, orig_h, conf_threshold=0.3, iou_threshold=0.45):
|
| 27 |
+
# This section depends on your model outputs.
|
| 28 |
+
# Example: suppose outputs[0] -> [N, 6] with (x1,y1,x2,y2,score,class)
|
| 29 |
+
preds = outputs[0]
|
| 30 |
+
boxes = []
|
| 31 |
+
for row in preds:
|
| 32 |
+
x1,y1,x2,y2,score,cls = row
|
| 33 |
+
if score < conf_threshold:
|
| 34 |
+
continue
|
| 35 |
+
# scale coords back to original image size if your model used 640x640
|
| 36 |
+
boxes.append({
|
| 37 |
+
"box": [x1*orig_w, y1*orig_h, x2*orig_w, y2*orig_h],
|
| 38 |
+
"score": float(score),
|
| 39 |
+
"class": int(cls)
|
| 40 |
+
})
|
| 41 |
+
# (Optional) Apply NMS here if model doesn't already do it
|
| 42 |
+
return boxes
|
| 43 |
+
|
| 44 |
+
def draw_boxes(pil_img, boxes):
|
| 45 |
+
img = pil_img.convert("RGB")
|
| 46 |
+
draw = ImageDraw.Draw(img)
|
| 47 |
+
for b in boxes:
|
| 48 |
+
x1,y1,x2,y2 = b["box"]
|
| 49 |
+
label = LABELS[b["class"]] if 0 <= b["class"] < len(LABELS) else str(b["class"])
|
| 50 |
+
draw.rectangle([x1,y1,x2,y2], outline="red", width=3)
|
| 51 |
+
draw.text((x1, y1-10), f"{label} {b['score']:.2f}", fill="red")
|
| 52 |
+
return img
|
| 53 |
+
|
| 54 |
+
def predict(image):
|
| 55 |
+
if image is None:
|
| 56 |
+
return None, "No image"
|
| 57 |
+
pil = Image.fromarray(image.astype('uint8')) if isinstance(image, np.ndarray) else Image.open(io.BytesIO(image.read()))
|
| 58 |
+
orig_w, orig_h = pil.size
|
| 59 |
+
input_tensor = preprocess(pil) # adapt input_size if needed
|
| 60 |
+
|
| 61 |
+
# Run ONNX
|
| 62 |
+
input_name = sess.get_inputs()[0].name
|
| 63 |
+
outputs = sess.run(None, {input_name: input_tensor})
|
| 64 |
+
# Postprocess according to your model's output structure
|
| 65 |
+
boxes = postprocess(outputs, orig_w, orig_h)
|
| 66 |
+
out_img = draw_boxes(pil, boxes)
|
| 67 |
+
txt = "\n".join([f"{LABELS[b['class']]}: {b['score']:.2f}" for b in boxes]) if boxes else "No detections"
|
| 68 |
+
return out_img, txt
|
| 69 |
+
|
| 70 |
+
# Gradio UI
|
| 71 |
+
title = "ONNX Demo"
|
| 72 |
+
desc = "Upload an image or use webcam. Adapt preprocessing/postprocessing per your model."
|
| 73 |
+
|
| 74 |
+
iface = gr.Interface(fn=predict,
|
| 75 |
+
inputs=gr.Image(source="upload", tool="editor", type="numpy"),
|
| 76 |
+
outputs=[gr.Image(type="pil"), gr.Textbox()],
|
| 77 |
+
title=title,
|
| 78 |
+
description=desc,
|
| 79 |
+
examples=None)
|
| 80 |
+
|
| 81 |
+
if __name__ == "__main__":
|
| 82 |
+
iface.launch()
|
labels.txt
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
car
|
| 2 |
+
curb
|
| 3 |
+
motorcycle
|
| 4 |
+
pothole
|
| 5 |
+
pedestrian
|
| 6 |
+
step
|
| 7 |
+
post
|
| 8 |
+
obstacle
|
model.onnx
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:4daa0df1cbd53d9addf692c553ddb885ffe1e4b68d78de58c7eb423439c8e5e2
|
| 3 |
+
size 12938955
|
requirements.txt
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
gradio>=3.0
|
| 2 |
+
onnxruntime
|
| 3 |
+
numpy
|
| 4 |
+
pillow
|
| 5 |
+
opencv-python-headless
|