mlbench123's picture
Update app.py
54e36a3 verified
import cv2
from ultralytics import YOLO
import gradio as gr
import tempfile
import os
# Load trained model
model = YOLO("best.pt")
def process_video(input_video):
cap = cv2.VideoCapture(input_video)
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
fps = int(cap.get(cv2.CAP_PROP_FPS))
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
tmp_out = tempfile.NamedTemporaryFile(delete=False, suffix=".mp4")
output_path = tmp_out.name
tmp_out.close()
out = cv2.VideoWriter(output_path, fourcc, fps, (width, height))
while True:
ret, frame = cap.read()
if not ret:
break
results = model(frame)[0]
for box in results.boxes:
x1, y1, x2, y2 = map(int, box.xyxy[0])
conf = box.conf[0]
if conf < 0.4:
continue
cls = int(box.cls[0])
label = f"Cleaner {conf:.2f}"
cv2.rectangle(frame, (x1, y1), (x2, y2), (255, 200, 0), 2)
cv2.putText(frame, label, (x1, y1 - 10),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 200, 0), 2)
out.write(frame)
cap.release()
out.release()
cv2.destroyAllWindows()
return output_path
with gr.Blocks(css=".gradio-container {color: #cce0ff;}") as demo:
gr.Markdown("## Detecting hands with cleaning cloth")
with gr.Row():
with gr.Column():
inp = gr.Video(label="Upload Video")
btn = gr.Button("Process Video", variant="primary")
with gr.Column():
out = gr.Video(label="Output Video")
btn.click(fn=process_video, inputs=inp, outputs=out)
if __name__ == "__main__":
demo.launch()