Nadun102's picture
Update app.py
34ba693 verified
import gradio as gr
from roboflow import Roboflow
import cv2
import numpy as np
import tempfile
import os
# ----------------------------
# Roboflow setup
# ----------------------------
API_KEY = "DIAhXQf6AUsyM1PRfdFa" # Replace with your API key if needed
PROJECT_NAME = "garbage-detection-pbcjq"
VERSION_NUMBER = 7
rf = Roboflow(api_key=API_KEY)
project = rf.workspace().project(PROJECT_NAME)
model = project.version(VERSION_NUMBER).model
# ----------------------------
# Image prediction function
# ----------------------------
def predict_image(image):
"""
Accepts a PIL image or NumPy array, returns image with bounding boxes.
"""
temp_file = tempfile.NamedTemporaryFile(delete=False, suffix=".jpg")
cv2.imwrite(temp_file.name, cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR))
# Make prediction
result = model.predict(temp_file.name).json()
img = np.array(image).copy()
# Draw bounding boxes
for pred in result.get("predictions", []):
x1, y1, w, h = pred["x"], pred["y"], pred["width"], pred["height"]
x2, y2 = x1 + w, y1 + h
label = f"{pred['class']} {pred['confidence']:.2f}"
cv2.rectangle(img, (int(x1), int(y1)), (int(x2), int(y2)), (0, 255, 0), 2)
cv2.putText(img, label, (int(x1), int(y1)-10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
os.unlink(temp_file.name)
return img
# ----------------------------
# Video prediction function
# ----------------------------
def predict_video(video_file):
"""
Accepts a video file path, returns path to video with bounding boxes.
"""
temp_output = tempfile.NamedTemporaryFile(delete=False, suffix=".mp4").name
cap = cv2.VideoCapture(video_file)
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
fps = int(cap.get(cv2.CAP_PROP_FPS))
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
out = cv2.VideoWriter(temp_output, fourcc, fps, (width, height))
while True:
ret, frame = cap.read()
if not ret:
break
# Save frame temporarily
temp_frame_file = tempfile.NamedTemporaryFile(delete=False, suffix=".jpg").name
cv2.imwrite(temp_frame_file, frame)
result = model.predict(temp_frame_file).json()
os.unlink(temp_frame_file)
# Draw bounding boxes
for pred in result.get("predictions", []):
x1, y1, w, h = pred["x"], pred["y"], pred["width"], pred["height"]
x2, y2 = x1 + w, y1 + h
label = f"{pred['class']} {pred['confidence']:.2f}"
cv2.rectangle(frame, (int(x1), int(y1)), (int(x2), int(y2)), (0, 255, 0), 2)
cv2.putText(frame, label, (int(x1), int(y1)-10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
out.write(frame)
cap.release()
out.release()
return temp_output
# ----------------------------
# Gradio Interface
# ----------------------------
with gr.Blocks() as demo:
gr.Markdown("## ๐Ÿ—‘ Garbage Detection App (Image & Video)")
gr.Markdown("Upload an image or video to detect objects using Roboflow.")
with gr.Tabs():
with gr.Tab("Image"):
image_input = gr.Image(type="pil")
image_output = gr.Image()
image_button = gr.Button("Predict Image")
image_button.click(predict_image, inputs=image_input, outputs=image_output)
with gr.Tab("Video"):
video_input = gr.Video()
video_output = gr.Video()
video_button = gr.Button("Predict Video")
video_button.click(predict_video, inputs=video_input, outputs=video_output)
demo.launch()