copilot-swe-agent[bot]
Add Gradio UI with OpenCV lane detection implementation
2b342d6
raw
history blame
5.8 kB
import cv2
import numpy as np
import gradio as gr
import tempfile
import os
def region_of_interest(img, vertices):
"""
Apply a region of interest mask to the image.
"""
mask = np.zeros_like(img)
cv2.fillPoly(mask, vertices, 255)
masked_image = cv2.bitwise_and(img, mask)
return masked_image
def draw_lines(img, lines, color=[0, 255, 0], thickness=3):
"""
Draw lines on the image.
"""
if lines is None:
return img
line_img = np.zeros_like(img)
# Separate left and right lane lines
left_lines = []
right_lines = []
for line in lines:
x1, y1, x2, y2 = line[0]
if x2 == x1:
continue
slope = (y2 - y1) / (x2 - x1)
# Filter by slope to separate left and right lanes
if slope < -0.5: # Left lane (negative slope)
left_lines.append(line[0])
elif slope > 0.5: # Right lane (positive slope)
right_lines.append(line[0])
# Average lines for left and right lanes
def average_lines(lines, img_shape):
if len(lines) == 0:
return None
x_coords = []
y_coords = []
for line in lines:
x1, y1, x2, y2 = line
x_coords.extend([x1, x2])
y_coords.extend([y1, y2])
# Fit a polynomial to the points
poly = np.polyfit(y_coords, x_coords, 1)
# Calculate line endpoints
y1 = img_shape[0]
y2 = int(img_shape[0] * 0.6)
x1 = int(poly[0] * y1 + poly[1])
x2 = int(poly[0] * y2 + poly[1])
return [x1, y1, x2, y2]
# Draw averaged lines
left_line = average_lines(left_lines, img.shape)
right_line = average_lines(right_lines, img.shape)
if left_line is not None:
cv2.line(line_img, (left_line[0], left_line[1]), (left_line[2], left_line[3]), color, thickness)
if right_line is not None:
cv2.line(line_img, (right_line[0], right_line[1]), (right_line[2], right_line[3]), color, thickness)
return cv2.addWeighted(img, 1.0, line_img, 1.0, 0)
def process_frame(frame):
"""
Process a single frame for lane detection.
"""
height, width = frame.shape[:2]
# Convert to grayscale
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# Apply Gaussian blur
blur = cv2.GaussianBlur(gray, (5, 5), 0)
# Apply Canny edge detection
edges = cv2.Canny(blur, 50, 150)
# Define region of interest (ROI)
vertices = np.array([[
(int(width * 0.1), height),
(int(width * 0.45), int(height * 0.6)),
(int(width * 0.55), int(height * 0.6)),
(int(width * 0.9), height)
]], dtype=np.int32)
# Apply ROI mask
masked_edges = region_of_interest(edges, vertices)
# Apply Hough transform to detect lines
lines = cv2.HoughLinesP(
masked_edges,
rho=2,
theta=np.pi / 180,
threshold=50,
minLineLength=40,
maxLineGap=100
)
# Draw detected lanes on the original frame
result = draw_lines(frame.copy(), lines)
return result
def process_video(video_path):
"""
Process the uploaded video and return side-by-side comparison.
"""
if video_path is None:
return None
# Open the video
cap = cv2.VideoCapture(video_path)
if not cap.isOpened():
return None
# Get video properties
fps = int(cap.get(cv2.CAP_PROP_FPS))
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
# Create temporary output file
temp_output = tempfile.NamedTemporaryFile(delete=False, suffix='.mp4')
output_path = temp_output.name
temp_output.close()
# Video writer for output (side-by-side, so width is doubled)
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
out = cv2.VideoWriter(output_path, fourcc, fps, (width * 2, height))
# Process each frame
while True:
ret, frame = cap.read()
if not ret:
break
# Process frame for lane detection
processed_frame = process_frame(frame)
# Create side-by-side comparison
# Original on left, processed on right
combined = np.hstack((frame, processed_frame))
# Write the combined frame
out.write(combined)
# Release resources
cap.release()
out.release()
return output_path
# Create Gradio interface
with gr.Blocks(title="Lane Detection Demo") as demo:
gr.Markdown("# ๐Ÿš— OpenCV Lane Detection Demo")
gr.Markdown("Upload a video to detect lane lines. The result will show the original video on the left and the lane-detected video on the right.")
with gr.Row():
with gr.Column():
video_input = gr.Video(label="Upload Video")
process_btn = gr.Button("Process Video", variant="primary")
with gr.Column():
video_output = gr.Video(label="Result (Original | Lane Detection)")
process_btn.click(
fn=process_video,
inputs=video_input,
outputs=video_output
)
gr.Markdown("""
### How it works:
1. Upload a video file containing road scenes
2. Click "Process Video" button
3. The system will:
- Convert frames to grayscale
- Apply Gaussian blur to reduce noise
- Use Canny edge detection to find edges
- Apply region of interest (ROI) mask to focus on the road
- Use Hough transform to detect lane lines
- Draw detected lanes on the original video
4. View the side-by-side comparison result
""")
if __name__ == "__main__":
demo.launch()