Spaces:
Sleeping
Sleeping
Initial commit with Traffic Sign Detector app
Browse filesAmp-Thread-ID: https://ampcode.com/threads/T-22e83726-d77c-475f-b185-6d55f37c5979
Co-authored-by: Amp <amp@ampcode.com>
- Dockerfile +19 -0
- app.py +43 -0
- config.yaml +53 -0
- model.py +41 -0
- requirements.txt +5 -0
Dockerfile
ADDED
|
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
FROM python:3.9-slim
|
| 2 |
+
|
| 3 |
+
WORKDIR /app
|
| 4 |
+
|
| 5 |
+
# Copy requirements first to leverage Docker cache
|
| 6 |
+
COPY requirements.txt .
|
| 7 |
+
RUN pip install --no-cache-dir -r requirements.txt
|
| 8 |
+
|
| 9 |
+
# Copy application files
|
| 10 |
+
COPY app.py model.py config.yaml .
|
| 11 |
+
|
| 12 |
+
# Download the YOLO model from Hugging Face Hub
|
| 13 |
+
RUN python -c "from ultralytics import YOLO; YOLO('VietCat/GTSRB-Model')"
|
| 14 |
+
|
| 15 |
+
# Expose the port that Gradio uses
|
| 16 |
+
EXPOSE 7860
|
| 17 |
+
|
| 18 |
+
# Run the application
|
| 19 |
+
CMD ["python", "app.py"]
|
app.py
ADDED
|
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gradio as gr
|
| 2 |
+
import cv2
|
| 3 |
+
import numpy as np
|
| 4 |
+
from model import TrafficSignDetector
|
| 5 |
+
|
| 6 |
+
# Load the detector
|
| 7 |
+
detector = TrafficSignDetector('config.yaml')
|
| 8 |
+
|
| 9 |
+
def detect_traffic_signs(image):
|
| 10 |
+
"""
|
| 11 |
+
Process the uploaded image and return the image with detected signs.
|
| 12 |
+
:param image: PIL Image or numpy array
|
| 13 |
+
:return: numpy array of processed image
|
| 14 |
+
"""
|
| 15 |
+
# Convert PIL to numpy if necessary
|
| 16 |
+
if hasattr(image, 'convert'):
|
| 17 |
+
image = np.array(image)
|
| 18 |
+
|
| 19 |
+
# Convert RGB to BGR for OpenCV
|
| 20 |
+
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
|
| 21 |
+
|
| 22 |
+
# Perform detection
|
| 23 |
+
result_image = detector.detect(image)
|
| 24 |
+
|
| 25 |
+
# Convert back to RGB for Gradio
|
| 26 |
+
result_image = cv2.cvtColor(result_image, cv2.COLOR_BGR2RGB)
|
| 27 |
+
|
| 28 |
+
return result_image
|
| 29 |
+
|
| 30 |
+
# Create Gradio interface
|
| 31 |
+
with gr.Blocks(title="Traffic Sign Detector") as demo:
|
| 32 |
+
gr.Markdown("# Traffic Sign Detector")
|
| 33 |
+
gr.Markdown("Upload an image to detect traffic signs using YOLOv8.")
|
| 34 |
+
|
| 35 |
+
with gr.Row():
|
| 36 |
+
input_image = gr.Image(label="Upload Image", type="pil")
|
| 37 |
+
output_image = gr.Image(label="Detected Signs")
|
| 38 |
+
|
| 39 |
+
detect_btn = gr.Button("Detect Traffic Signs")
|
| 40 |
+
detect_btn.click(fn=detect_traffic_signs, inputs=input_image, outputs=output_image)
|
| 41 |
+
|
| 42 |
+
if __name__ == "__main__":
|
| 43 |
+
demo.launch(server_name="0.0.0.0", server_port=7860)
|
config.yaml
ADDED
|
@@ -0,0 +1,53 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
model:
|
| 2 |
+
path: 'VietCat/GTSRB-Model' # Path to the YOLO model on Hugging Face Hub
|
| 3 |
+
confidence_threshold: 0.5 # Minimum confidence for detections
|
| 4 |
+
|
| 5 |
+
inference:
|
| 6 |
+
box_color: (128, 0, 128) # Purple color for bounding boxes (BGR format)
|
| 7 |
+
text_color: (255, 255, 255) # White color for labels
|
| 8 |
+
thickness: 2 # Thickness of bounding box lines
|
| 9 |
+
|
| 10 |
+
classes:
|
| 11 |
+
- 'speed_limit_20'
|
| 12 |
+
- 'speed_limit_30'
|
| 13 |
+
- 'speed_limit_50'
|
| 14 |
+
- 'speed_limit_60'
|
| 15 |
+
- 'speed_limit_70'
|
| 16 |
+
- 'speed_limit_80'
|
| 17 |
+
- 'restriction_ends_80'
|
| 18 |
+
- 'speed_limit_100'
|
| 19 |
+
- 'speed_limit_120'
|
| 20 |
+
- 'no_overtaking'
|
| 21 |
+
- 'no_overtaking_trucks'
|
| 22 |
+
- 'priority_at_next_intersection'
|
| 23 |
+
- 'priority_road'
|
| 24 |
+
- 'give_way'
|
| 25 |
+
- 'stop'
|
| 26 |
+
- 'no_traffic_both_ways'
|
| 27 |
+
- 'no_trucks'
|
| 28 |
+
- 'no_entry'
|
| 29 |
+
- 'general_caution'
|
| 30 |
+
- 'dangerous_curve_left'
|
| 31 |
+
- 'dangerous_curve_right'
|
| 32 |
+
- 'double_curve'
|
| 33 |
+
- 'bumpy_road'
|
| 34 |
+
- 'slippery_road'
|
| 35 |
+
- 'road_narrows'
|
| 36 |
+
- 'construction'
|
| 37 |
+
- 'traffic_signal'
|
| 38 |
+
- 'pedestrians'
|
| 39 |
+
- 'children_crossing'
|
| 40 |
+
- 'bicycles_crossing'
|
| 41 |
+
- 'snow'
|
| 42 |
+
- 'wild_animals'
|
| 43 |
+
- 'restriction_ends'
|
| 44 |
+
- 'go_right'
|
| 45 |
+
- 'go_left'
|
| 46 |
+
- 'go_straight'
|
| 47 |
+
- 'go_right_or_straight'
|
| 48 |
+
- 'go_left_or_straight'
|
| 49 |
+
- 'keep_right'
|
| 50 |
+
- 'keep_left'
|
| 51 |
+
- 'roundabout'
|
| 52 |
+
- 'restriction_ends_overtaking'
|
| 53 |
+
- 'restriction_ends_overtaking_trucks'
|
model.py
ADDED
|
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import cv2
|
| 2 |
+
import numpy as np
|
| 3 |
+
from ultralytics import YOLO
|
| 4 |
+
import yaml
|
| 5 |
+
|
| 6 |
+
class TrafficSignDetector:
|
| 7 |
+
def __init__(self, config_path):
|
| 8 |
+
with open(config_path, 'r') as f:
|
| 9 |
+
config = yaml.safe_load(f)
|
| 10 |
+
|
| 11 |
+
self.model = YOLO(config['model']['path'])
|
| 12 |
+
self.conf_threshold = config['model']['confidence_threshold']
|
| 13 |
+
self.box_color = config['inference']['box_color']
|
| 14 |
+
self.text_color = config['inference']['text_color']
|
| 15 |
+
self.thickness = config['inference']['thickness']
|
| 16 |
+
self.classes = config['classes']
|
| 17 |
+
|
| 18 |
+
def detect(self, image):
|
| 19 |
+
"""
|
| 20 |
+
Perform inference on the image and draw bounding boxes.
|
| 21 |
+
:param image: numpy array of the image
|
| 22 |
+
:return: image with drawn bounding boxes
|
| 23 |
+
"""
|
| 24 |
+
results = self.model(image, conf=self.conf_threshold)
|
| 25 |
+
|
| 26 |
+
for result in results:
|
| 27 |
+
boxes = result.boxes
|
| 28 |
+
for box in boxes:
|
| 29 |
+
# Get bounding box coordinates
|
| 30 |
+
x1, y1, x2, y2 = box.xyxy[0].cpu().numpy().astype(int)
|
| 31 |
+
conf = box.conf[0].cpu().numpy()
|
| 32 |
+
cls = int(box.cls[0].cpu().numpy())
|
| 33 |
+
|
| 34 |
+
# Draw bounding box
|
| 35 |
+
cv2.rectangle(image, (x1, y1), (x2, y2), self.box_color, self.thickness)
|
| 36 |
+
|
| 37 |
+
# Draw label
|
| 38 |
+
label = f"{self.classes[cls]}: {conf:.2f}"
|
| 39 |
+
cv2.putText(image, label, (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, self.text_color, 2)
|
| 40 |
+
|
| 41 |
+
return image
|
requirements.txt
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
ultralytics==8.2.0
|
| 2 |
+
gradio==4.36.1
|
| 3 |
+
opencv-python==4.9.0.80
|
| 4 |
+
pyyaml==6.0.1
|
| 5 |
+
numpy==1.26.4
|