Spaces:
Build error
Build error
Commit ·
57c9538
1
Parent(s): 6182c37
Fixing the error with the last instance of onnx
Browse files- app.py +97 -83
- requirements.txt +2 -2
app.py
CHANGED
|
@@ -1,95 +1,109 @@
|
|
| 1 |
import streamlit as st
|
| 2 |
-
import cv2
|
| 3 |
import numpy as np
|
| 4 |
-
import
|
| 5 |
-
from huggingface_hub import hf_hub_download
|
| 6 |
from PIL import Image
|
|
|
|
|
|
|
| 7 |
|
| 8 |
-
# Configurable Variables
|
| 9 |
-
|
| 10 |
-
MODEL_PATH = hf_hub_download(repo_id="DhominickJ/DengueTect", filename="mosquito_location.onnx")
|
| 11 |
-
CONF_THRESHOLD = 0.5 # Confidence threshold for detection
|
| 12 |
-
IOU_THRESHOLD = 0.4 # IoU threshold for NMS
|
| 13 |
-
CLASS_NAMES = ["dengue-regions", "wet_surface"] # Define class names
|
| 14 |
-
IMAGE_SIZE = 640 # YOLO model expects 640x640 images
|
| 15 |
-
|
| 16 |
-
# Load ONNX model
|
| 17 |
-
session = ort.InferenceSession(MODEL_PATH)
|
| 18 |
-
input_name = session.get_inputs()[0].name
|
| 19 |
-
output_name = session.get_outputs()[0].name
|
| 20 |
-
|
| 21 |
-
# Function to preprocess image
|
| 22 |
-
def preprocess(image):
|
| 23 |
-
img = np.array(image)
|
| 24 |
-
img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR) # Convert RGB to BGR for OpenCV
|
| 25 |
-
img_resized = cv2.resize(img, (IMAGE_SIZE, IMAGE_SIZE))
|
| 26 |
-
img_transposed = img_resized.transpose(2, 0, 1) # Change to channel-first
|
| 27 |
-
img_normalized = img_transposed / 255.0 # Normalize to [0,1]
|
| 28 |
-
img_input = np.expand_dims(img_normalized.astype(np.float32), axis=0)
|
| 29 |
-
return img, img_input
|
| 30 |
|
| 31 |
-
#
|
| 32 |
-
|
| 33 |
-
|
| 34 |
-
if len(outputs.shape) != 2 or outputs.shape[1] < 6:
|
| 35 |
-
return [], [], [] # Return empty lists if the output format is incorrect
|
| 36 |
-
|
| 37 |
-
boxes = outputs[:, :4]
|
| 38 |
-
scores = outputs[:, 4]
|
| 39 |
-
class_ids = outputs[:, 5].astype(int)
|
| 40 |
-
|
| 41 |
-
# Apply confidence threshold
|
| 42 |
-
mask = scores >= CONF_THRESHOLD
|
| 43 |
-
boxes, scores, class_ids = boxes[mask], scores[mask], class_ids[mask]
|
| 44 |
-
|
| 45 |
-
# Ensure valid class IDs
|
| 46 |
-
valid_mask = (class_ids >= 0) & (class_ids < len(CLASS_NAMES))
|
| 47 |
-
boxes, scores, class_ids = boxes[valid_mask], scores[valid_mask], class_ids[valid_mask]
|
| 48 |
-
|
| 49 |
-
# Rescale boxes to original image size
|
| 50 |
-
h, w, _ = original_shape
|
| 51 |
-
scale_w, scale_h = w / IMAGE_SIZE, h / IMAGE_SIZE
|
| 52 |
-
boxes[:, [0, 2]] *= scale_w
|
| 53 |
-
boxes[:, [1, 3]] *= scale_h
|
| 54 |
-
|
| 55 |
-
return boxes.tolist(), scores.tolist(), class_ids.tolist()
|
| 56 |
|
| 57 |
-
#
|
| 58 |
-
|
| 59 |
-
|
| 60 |
-
|
| 61 |
-
|
| 62 |
-
|
| 63 |
-
cv2.putText(image, label, (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
|
| 64 |
|
| 65 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 66 |
|
| 67 |
-
#
|
| 68 |
-
|
| 69 |
-
|
| 70 |
-
outputs = session.run([output_name], {input_name: input_tensor})[0]
|
| 71 |
-
|
| 72 |
-
# Debugging: Show raw outputs
|
| 73 |
-
st.write("Raw Predictions Shape:", outputs.shape)
|
| 74 |
-
st.write("Raw Predictions Sample:", outputs.flatten()[:10])
|
| 75 |
-
|
| 76 |
-
boxes, scores, class_ids = postprocess(outputs, original_img.shape)
|
| 77 |
-
|
| 78 |
-
# Show confidence score range
|
| 79 |
-
if scores:
|
| 80 |
-
st.write("Min Confidence:", np.min(scores), "Max Confidence:", np.max(scores))
|
| 81 |
-
else:
|
| 82 |
-
st.write("No objects detected.")
|
| 83 |
|
| 84 |
-
|
|
|
|
| 85 |
|
| 86 |
-
#
|
| 87 |
-
|
| 88 |
|
| 89 |
-
|
| 90 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 91 |
|
| 92 |
-
|
| 93 |
-
|
| 94 |
-
|
| 95 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
import streamlit as st
|
|
|
|
| 2 |
import numpy as np
|
| 3 |
+
import cv2
|
|
|
|
| 4 |
from PIL import Image
|
| 5 |
+
from ultralytics import YOLO
|
| 6 |
+
from huggingface_hub import hf_hub_download
|
| 7 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 8 |
|
| 9 |
+
# Define static class names
|
| 10 |
+
CLASS_NAMES = {0: 'dengue-regions', 1: 'wet_surface'}
|
| 11 |
+
CONFIDENCE_VALUE = 0.3
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 12 |
|
| 13 |
+
# Load YOLOv8 (.pt) model
|
| 14 |
+
@st.cache_resource
|
| 15 |
+
def load_pt_model():
|
| 16 |
+
# Update the path to your .pt model file
|
| 17 |
+
model_path = hf_hub_download(repo_id="DhominickJ/DengueTect", filename="best.pt")
|
| 18 |
+
return YOLO(model_path)
|
|
|
|
| 19 |
|
| 20 |
+
# Draw bounding boxes on the image and return the image with detections drawn
|
| 21 |
+
def draw_boxes_pt(image, results):
|
| 22 |
+
image_cv = np.array(image)
|
| 23 |
+
image_cv = cv2.cvtColor(image_cv, cv2.COLOR_RGB2BGR)
|
| 24 |
+
for result in results:
|
| 25 |
+
if result.boxes is not None:
|
| 26 |
+
# Get bounding boxes, confidences, and class IDs
|
| 27 |
+
boxes = result.boxes.xyxy.cpu().numpy() # (n,4)
|
| 28 |
+
confidences = result.boxes.conf.cpu().numpy() # (n,)
|
| 29 |
+
classes = result.boxes.cls.cpu().numpy() # (n,)
|
| 30 |
+
for box, conf, cls in zip(boxes, confidences, classes):
|
| 31 |
+
x1, y1, x2, y2 = box.astype(int)
|
| 32 |
+
label = f"{CLASS_NAMES.get(int(cls), 'Unknown')}: {conf:.2f}"
|
| 33 |
+
# Set color based on class
|
| 34 |
+
color = (0, 0, 255) if CLASS_NAMES[int(cls)] == 'dengue-regions' else (255, 0, 0)
|
| 35 |
+
cv2.rectangle(image_cv, (x1, y1), (x2, y2), color, 3)
|
| 36 |
+
cv2.putText(image_cv, label, (x1, max(y1-10, 0)),
|
| 37 |
+
cv2.FONT_HERSHEY_SIMPLEX, 1.5, color, 3)
|
| 38 |
+
# Convert back to RGB
|
| 39 |
+
image_cv = cv2.cvtColor(image_cv, cv2.COLOR_BGR2RGB)
|
| 40 |
+
return Image.fromarray(image_cv)
|
| 41 |
|
| 42 |
+
# Streamlit App
|
| 43 |
+
st.title("🦟 DengueTect: Object Detection with YOLOv8 (.pt)")
|
| 44 |
+
st.write("Upload an image or capture from your camera to detect mosquito breeding grounds!")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 45 |
|
| 46 |
+
# Choose input method
|
| 47 |
+
option = st.radio("Choose Input Method:", ("Upload Image", "Use Camera"))
|
| 48 |
|
| 49 |
+
# Load model once
|
| 50 |
+
model = load_pt_model()
|
| 51 |
|
| 52 |
+
if option == "Upload Image":
|
| 53 |
+
uploaded_file = st.file_uploader("Choose an image...", type=["jpg", "png", "jpeg"])
|
| 54 |
+
if uploaded_file is not None:
|
| 55 |
+
image = Image.open(uploaded_file).convert("RGB")
|
| 56 |
+
st.image(image, caption="Uploaded Image", use_column_width=True)
|
| 57 |
+
|
| 58 |
+
# Run YOLOv8 prediction; the predict() method accepts a NumPy array
|
| 59 |
+
results = model.predict(source=np.array(image), conf=CONFIDENCE_VALUE, imgsz=640)
|
| 60 |
+
|
| 61 |
+
# Collect detection details (for logging purposes)
|
| 62 |
+
detections = []
|
| 63 |
+
for result in results:
|
| 64 |
+
if result.boxes is not None:
|
| 65 |
+
boxes = result.boxes.xyxy.cpu().numpy()
|
| 66 |
+
confidences = result.boxes.conf.cpu().numpy()
|
| 67 |
+
classes = result.boxes.cls.cpu().numpy()
|
| 68 |
+
for box, conf, cls in zip(boxes, confidences, classes):
|
| 69 |
+
x1, y1, x2, y2 = box.astype(int)
|
| 70 |
+
detections.append((int(cls), x1, y1, x2, y2, conf))
|
| 71 |
+
|
| 72 |
+
if detections:
|
| 73 |
+
st.success(f"✅ Detected {len(detections)} objects!")
|
| 74 |
+
for det in detections:
|
| 75 |
+
st.text(f"Detected: {CLASS_NAMES.get(det[0], 'Unknown')}, "
|
| 76 |
+
f"BBox: ({det[1]}, {det[2]}), ({det[3]}, {det[4]}), "
|
| 77 |
+
f"Confidence: {det[5]:.2f}")
|
| 78 |
+
result_image = draw_boxes_pt(image, results)
|
| 79 |
+
st.image(result_image, caption="Detection Results", use_column_width=True)
|
| 80 |
+
else:
|
| 81 |
+
st.warning("⚠ No objects detected. Try adjusting confidence threshold.")
|
| 82 |
|
| 83 |
+
elif option == "Use Camera":
|
| 84 |
+
camera_image = st.camera_input("Capture an image")
|
| 85 |
+
if camera_image is not None:
|
| 86 |
+
image = Image.open(camera_image).convert("RGB")
|
| 87 |
+
st.image(image, caption="Captured Image", use_column_width=True)
|
| 88 |
+
|
| 89 |
+
results = model.predict(source=np.array(image), conf=CONFIDENCE_VALUE, imgsz=640)
|
| 90 |
+
detections = []
|
| 91 |
+
for result in results:
|
| 92 |
+
if result.boxes is not None:
|
| 93 |
+
boxes = result.boxes.xyxy.cpu().numpy()
|
| 94 |
+
confidences = result.boxes.conf.cpu().numpy()
|
| 95 |
+
classes = result.boxes.cls.cpu().numpy()
|
| 96 |
+
for box, conf, cls in zip(boxes, confidences, classes):
|
| 97 |
+
x1, y1, x2, y2 = box.astype(int)
|
| 98 |
+
detections.append((int(cls), x1, y1, x2, y2, conf))
|
| 99 |
+
|
| 100 |
+
if detections:
|
| 101 |
+
st.success(f"✅ Detected {len(detections)} objects!")
|
| 102 |
+
for det in detections:
|
| 103 |
+
st.text(f"Detected: {CLASS_NAMES.get(det[0], 'Unknown')}, "
|
| 104 |
+
f"BBox: ({det[1]}, {det[2]}), ({det[3]}, {det[4]}), "
|
| 105 |
+
f"Confidence: {det[5]:.2f}")
|
| 106 |
+
result_image = draw_boxes_pt(image, results)
|
| 107 |
+
st.image(result_image, caption="Detection Results", use_column_width=True)
|
| 108 |
+
else:
|
| 109 |
+
st.warning("⚠ No objects detected. Try adjusting confidence threshold.")
|
requirements.txt
CHANGED
|
@@ -1,5 +1,5 @@
|
|
| 1 |
streamlit
|
| 2 |
numpy
|
| 3 |
opencv-python
|
| 4 |
-
|
| 5 |
-
|
|
|
|
| 1 |
streamlit
|
| 2 |
numpy
|
| 3 |
opencv-python
|
| 4 |
+
Pillow
|
| 5 |
+
ultralytics
|