KPrashanth commited on
Commit
76d8244
·
verified ·
1 Parent(s): e0dfadf

Upload 4 files

Browse files
Files changed (4) hide show
  1. app.py +180 -0
  2. beep-warning.mp3 +0 -0
  3. requirements.txt +8 -0
  4. yolov8n.pt +3 -0
app.py ADDED
@@ -0,0 +1,180 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import cv2
3
+ import numpy as np
4
+ import gradio as gr
5
+ from ultralytics import YOLO
6
+
7
+ # ----------------------------
8
+ # Config
9
+ # ----------------------------
10
+ MODEL_NAME = os.getenv("YOLO_MODEL", "yolov8n.pt")
11
+ CLASS_OF_INTEREST = "person"
12
+
13
+ # Danger zone: top-left, bottom-right (x, y)
14
+ DANGER_ZONE = ((100, 100), (400, 400))
15
+
16
+ # Inference config
17
+ CONF_THRES = 0.35
18
+ IMG_SIZE = 640
19
+
20
+ # ----------------------------
21
+ # Load model once (global)
22
+ # ----------------------------
23
+ model = YOLO(MODEL_NAME)
24
+
25
+ # Build class-name -> id mapping once (YOLOv8 COCO)
26
+ # For yolov8n.pt, names is dict {id: name}
27
+ NAMES = model.names
28
+ PERSON_CLASS_ID = None
29
+ for k, v in NAMES.items():
30
+ if v == CLASS_OF_INTEREST:
31
+ PERSON_CLASS_ID = int(k)
32
+ break
33
+
34
+ if PERSON_CLASS_ID is None:
35
+ raise RuntimeError("Could not find 'person' class in model.names")
36
+
37
+
38
+ # ----------------------------
39
+ # Helpers
40
+ # ----------------------------
41
+ def overlaps_zone(box_xyxy, zone):
42
+ """True if box overlaps danger zone (partial overlap)."""
43
+ x1, y1, x2, y2 = box_xyxy
44
+ (zx1, zy1), (zx2, zy2) = zone
45
+ overlap_x = (x1 < zx2) and (x2 > zx1)
46
+ overlap_y = (y1 < zy2) and (y2 > zy1)
47
+ return overlap_x and overlap_y
48
+
49
+
50
+ def make_beep(sr=22050, freq=880, duration=0.25):
51
+ """Return a short beep waveform for browser playback."""
52
+ t = np.linspace(0, duration, int(sr * duration), endpoint=False)
53
+ wave = 0.2 * np.sin(2 * np.pi * freq * t) # low volume
54
+ return (sr, wave.astype(np.float32))
55
+
56
+
57
+ BEEP_AUDIO = make_beep()
58
+
59
+
60
+ # ----------------------------
61
+ # Frame processor
62
+ # ----------------------------
63
+ def process_frame(frame, zone_x1, zone_y1, zone_x2, zone_y2, conf_thres):
64
+ """
65
+ frame: numpy array RGB from gradio
66
+ returns:
67
+ - annotated RGB frame
68
+ - grayscale RGB frame
69
+ - infrared frame (RGB)
70
+ - beep audio tuple or None
71
+ - status text
72
+ """
73
+ if frame is None:
74
+ return None, None, None, None, "No frame"
75
+
76
+ # Gradio gives RGB; OpenCV prefers BGR for drawing
77
+ rgb = frame
78
+ bgr = cv2.cvtColor(rgb, cv2.COLOR_RGB2BGR)
79
+
80
+ zone = ((int(zone_x1), int(zone_y1)), (int(zone_x2), int(zone_y2)))
81
+
82
+ # Derived feeds
83
+ gray = cv2.cvtColor(bgr, cv2.COLOR_BGR2GRAY)
84
+ gray_bgr = cv2.cvtColor(gray, cv2.COLOR_GRAY2BGR)
85
+ infrared = cv2.applyColorMap(gray, cv2.COLORMAP_JET)
86
+
87
+ # Draw danger zone
88
+ for img in (bgr, gray_bgr, infrared):
89
+ cv2.rectangle(img, zone[0], zone[1], (0, 0, 255), 2)
90
+
91
+ # YOLO inference (stream=False for single image)
92
+ # verbose=False keeps logs clean
93
+ results = model.predict(
94
+ source=bgr,
95
+ imgsz=IMG_SIZE,
96
+ conf=float(conf_thres),
97
+ verbose=False
98
+ )
99
+
100
+ alert = False
101
+ det_count = 0
102
+
103
+ r = results[0]
104
+ if r.boxes is not None and len(r.boxes) > 0:
105
+ boxes = r.boxes.xyxy.cpu().numpy().astype(int)
106
+ cls_ids = r.boxes.cls.cpu().numpy().astype(int)
107
+ confs = r.boxes.conf.cpu().numpy()
108
+
109
+ for (x1, y1, x2, y2), cid, c in zip(boxes, cls_ids, confs):
110
+ if cid != PERSON_CLASS_ID:
111
+ continue
112
+
113
+ det_count += 1
114
+ label = f"person: {c:.2f}"
115
+
116
+ # draw bbox
117
+ for img in (bgr, gray_bgr, infrared):
118
+ cv2.rectangle(img, (x1, y1), (x2, y2), (255, 0, 0), 2)
119
+ cv2.putText(img, label, (x1, max(15, y1 - 8)),
120
+ cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 0, 0), 2)
121
+
122
+ if overlaps_zone((x1, y1, x2, y2), zone):
123
+ alert = True
124
+
125
+ if alert:
126
+ for img in (bgr, gray_bgr, infrared):
127
+ cv2.putText(img, "ALERT: Person in danger zone", (20, 45),
128
+ cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 0, 255), 3)
129
+ status = f"🚨 ALERT! persons detected: {det_count}"
130
+ beep = BEEP_AUDIO
131
+ else:
132
+ status = f"✅ OK (persons detected: {det_count})"
133
+ beep = None
134
+
135
+ # Convert back to RGB for gradio display
136
+ out_rgb = cv2.cvtColor(bgr, cv2.COLOR_BGR2RGB)
137
+ out_gray = cv2.cvtColor(gray_bgr, cv2.COLOR_BGR2RGB)
138
+ out_infra = cv2.cvtColor(infrared, cv2.COLOR_BGR2RGB)
139
+
140
+ return out_rgb, out_gray, out_infra, beep, status
141
+
142
+
143
+ # ----------------------------
144
+ # Gradio UI
145
+ # ----------------------------
146
+ with gr.Blocks(title="YOLOv8 Danger Zone Demo") as demo:
147
+ gr.Markdown(
148
+ """
149
+ # YOLOv8 Danger Zone Detection (Demo)
150
+ - Uses browser webcam input (works on Hugging Face Spaces)
151
+ - Detects **person** and triggers **alert** if they overlap the danger zone
152
+ """
153
+ )
154
+
155
+ with gr.Row():
156
+ cam = gr.Image(source="webcam", streaming=True, type="numpy", label="Webcam (Input)")
157
+ with gr.Column():
158
+ zone_x1 = gr.Slider(0, 1280, value=DANGER_ZONE[0][0], step=1, label="Zone x1")
159
+ zone_y1 = gr.Slider(0, 720, value=DANGER_ZONE[0][1], step=1, label="Zone y1")
160
+ zone_x2 = gr.Slider(0, 1280, value=DANGER_ZONE[1][0], step=1, label="Zone x2")
161
+ zone_y2 = gr.Slider(0, 720, value=DANGER_ZONE[1][1], step=1, label="Zone y2")
162
+ conf = gr.Slider(0.05, 0.90, value=CONF_THRES, step=0.01, label="Confidence Threshold")
163
+
164
+ with gr.Row():
165
+ out1 = gr.Image(type="numpy", label="Color (Annotated)")
166
+ out2 = gr.Image(type="numpy", label="Grayscale (Annotated)")
167
+ out3 = gr.Image(type="numpy", label="Infrared (Annotated)")
168
+
169
+ with gr.Row():
170
+ alert_audio = gr.Audio(label="Alert Beep (plays when triggered)", autoplay=True)
171
+ status = gr.Textbox(label="Status", interactive=False)
172
+
173
+ cam.stream(
174
+ fn=process_frame,
175
+ inputs=[cam, zone_x1, zone_y1, zone_x2, zone_y2, conf],
176
+ outputs=[out1, out2, out3, alert_audio, status],
177
+ show_progress=False
178
+ )
179
+
180
+ demo.queue(concurrency_count=1).launch()
beep-warning.mp3 ADDED
Binary file (56 kB). View file
 
requirements.txt ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ ultralytics
2
+ torch
3
+ torchvision
4
+ opencv-python
5
+ numpy
6
+ pandas
7
+ matplotlib
8
+ gradio
yolov8n.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f59b3d833e2ff32e194b5bb8e08d211dc7c5bdf144b90d2c8412c47ccfc83b36
3
+ size 6549796