Spaces:
Runtime error
Runtime error
| import numpy as np | |
| import cv2 # OpenCV for loading the YOLO model | |
| import streamlit as st | |
| from PIL import Image | |
| from collections import Counter | |
| # Streamlit app title | |
| st.set_page_config(page_title="Object Detection App", page_icon="🖼️", layout="centered") | |
| st.title("Object Detection with OpenCV and YOLO 🖼️") | |
| # Custom header and instructions | |
| st.markdown(""" | |
| ### Detect Objects in Your Images | |
| Upload an image to automatically detect common objects and get a count of each detected object. | |
| The application will display the image with bounding boxes around the objects. | |
| """) | |
| # Upload image | |
| st.markdown("### Step 1: Upload an Image for Object Detection") | |
| uploaded_file = st.file_uploader("Choose an image...", type=["png", "jpg", "jpeg"]) | |
| # Function to detect objects using OpenCV's YOLO | |
| def detect_objects_with_yolo(image): | |
| # Load YOLO model configuration and weights | |
| model_config = 'yolov3.cfg' # Path to your downloaded .cfg file | |
| model_weights = 'yolov3.weights' # Path to your downloaded .weights file | |
| net = cv2.dnn.readNet(model_weights, model_config) | |
| # Get the names of the output layers | |
| layer_names = net.getLayerNames() | |
| output_layers = [layer_names[i - 1] for i in net.getUnconnectedOutLayers()] | |
| # Prepare the image for YOLO | |
| blob = cv2.dnn.blobFromImage(image, 0.00392, (416, 416), (0, 0, 0), True, crop=False) | |
| net.setInput(blob) | |
| outs = net.forward(output_layers) | |
| # Process YOLO outputs | |
| boxes = [] | |
| labels = [] | |
| confidences = [] | |
| for out in outs: | |
| for detection in out: | |
| scores = detection[5:] | |
| class_id = np.argmax(scores) | |
| confidence = scores[class_id] | |
| if confidence > 0.5: # Confidence threshold | |
| center_x = int(detection[0] * image.shape[1]) | |
| center_y = int(detection[1] * image.shape[0]) | |
| w = int(detection[2] * image.shape[1]) | |
| h = int(detection[3] * image.shape[0]) | |
| x = int(center_x - w / 2) | |
| y = int(center_y - h / 2) | |
| boxes.append([x, y, w, h]) | |
| confidences.append(float(confidence)) | |
| labels.append(class_id) | |
| return boxes, labels, confidences | |
| # Display the uploaded image and detect objects | |
| if uploaded_file is not None: | |
| # Convert image file to OpenCV format | |
| image = np.array(Image.open(uploaded_file)) | |
| image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR) # Convert from RGB to BGR for OpenCV | |
| # Perform object detection with YOLO | |
| boxes, labels, confidences = detect_objects_with_yolo(image) | |
| # Draw bounding boxes on the image | |
| for i, box in enumerate(boxes): | |
| x, y, w, h = box | |
| label = labels[i] | |
| confidence = confidences[i] | |
| cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 2) | |
| cv2.putText(image, f"{label} {confidence:.2f}", (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2) | |
| # Step 2: Display image with bounding boxes | |
| st.markdown("### Step 2: Detected Objects in the Image") | |
| st.image(image, channels="BGR") | |
| # Step 3: Display the count of detected objects dynamically | |
| st.markdown("### Step 3: Detected Objects Count") | |
| label_counts = Counter(labels) | |
| # Display counts in a well-formatted table | |
| for obj, count in label_counts.items(): | |
| st.markdown(f"**Object ID {obj}**: {count}") | |
| st.markdown(""" | |
| --- | |
| ### Tips: | |
| - You can upload different images to see how the object detection model works. | |
| - Supported formats: PNG, JPG, JPEG. | |
| - The app uses OpenCV with YOLO for detecting common objects. | |
| """) | |
| # Display a footer | |
| st.markdown(""" | |
| --- | |
| Made with ❤️ by [SenasuDemir](https://github.com/SenasuDemir). | |
| """) |