NeuralNoble's picture
Update app.py
6e0273b verified
import gdown
import os
import gradio as gr
import cv2
from ultralytics import YOLO
import math
import cvzone
import numpy as np
import torch
# Define the Google Drive file ID and destination path
file_id = "1Kcb132P4GjSHWZQ-sw5V-CB5vNr_5m3F"
model_path = "gun.pt"
# Download the model from Google Drive if it doesn't exist
if not os.path.exists(model_path):
gdown.download(f"https://drive.google.com/uc?id={file_id}", model_path, quiet=False)
# Check for the appropriate device
# Load the model
model = YOLO(model_path)
# Class names
classnames = ['gun', 'person']
def draw_transparent_overlay(frame, x1, y1, x2, y2, color=(0, 0, 255), alpha=0.5):
"""Draw a transparent overlay on the frame."""
overlay = frame.copy()
cv2.rectangle(overlay, (x1, y1), (x2, y2), color, -1)
cv2.addWeighted(overlay, alpha, frame, 1 - alpha, 0, frame)
def is_in_extended_area(px1, py1, px2, py2, gx1, gy1, gx2, gy2, extension=50):
"""Check if the gun is in the extended area around the person."""
ex1, ey1, ex2, ey2 = px1 - extension, py1 - extension, px2 + extension, py2 + extension
return (gx1 < ex2 and gx2 > ex1 and gy1 < ey2 and gy2 > ey1)
def process_image(image):
frame = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR)
frame = cv2.resize(frame, (640, 640))
results = model(frame, stream=True)
persons = []
threats = []
for r in results:
boxes = r.boxes
for box in boxes:
x1, y1, x2, y2 = box.xyxy[0]
x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2)
conf = math.ceil((box.conf[0] * 100)) / 100
cls = int(box.cls[0])
label = f'{classnames[cls]} {conf}'
if classnames[cls] == 'person':
persons.append((x1, y1, x2, y2))
elif classnames[cls] == 'gun' and conf > 0.4:
threats.append((x1, y1, x2, y2))
for (px1, py1, px2, py2) in persons:
for (gx1, gy1, gx2, gy2) in threats:
if is_in_extended_area(px1, py1, px2, py2, gx1, gy1, gx2, gy2):
draw_transparent_overlay(frame, px1, py1, px2, py2)
cvzone.putTextRect(frame, 'Threat', (px1, py1 - 10), scale=2, thickness=3)
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
return frame
# Create a Gradio interface
interface = gr.Interface(
fn=process_image,
inputs="image",
outputs="image"
)
interface.launch(share=True)