Spaces:
Sleeping
Sleeping
Update detect_people.py
Browse files- detect_people.py +100 -9
detect_people.py
CHANGED
|
@@ -1,13 +1,104 @@
|
|
|
|
|
| 1 |
import cv2
|
|
|
|
|
|
|
|
|
|
| 2 |
|
| 3 |
-
|
| 4 |
-
|
| 5 |
-
|
| 6 |
-
|
| 7 |
|
| 8 |
-
|
| 9 |
-
|
| 10 |
-
|
|
|
|
| 11 |
|
| 12 |
-
|
| 13 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
import cv2
|
| 3 |
+
import numpy as np
|
| 4 |
+
from pathlib import Path
|
| 5 |
+
import urllib.request
|
| 6 |
|
| 7 |
+
# YOLOv4-tiny (fast, decent accuracy, ~23MB weights)
|
| 8 |
+
YOLO_CFG_URL = "https://raw.githubusercontent.com/AlexeyAB/darknet/master/cfg/yolov4-tiny.cfg"
|
| 9 |
+
YOLO_WEIGHTS_URL = "https://github.com/AlexeyAB/darknet/releases/download/darknet_yolo_v4_pre/yolov4-tiny.weights"
|
| 10 |
+
YOLO_NAMES_URL = "https://raw.githubusercontent.com/pjreddie/darknet/master/data/coco.names"
|
| 11 |
|
| 12 |
+
MODEL_DIR = Path(os.getenv("MODEL_DIR", "models"))
|
| 13 |
+
CFG_PATH = MODEL_DIR / "yolov4-tiny.cfg"
|
| 14 |
+
WEIGHTS_PATH = MODEL_DIR / "yolov4-tiny.weights"
|
| 15 |
+
NAMES_PATH = MODEL_DIR / "coco.names"
|
| 16 |
|
| 17 |
+
def _ensure_models():
|
| 18 |
+
MODEL_DIR.mkdir(parents=True, exist_ok=True)
|
| 19 |
+
if not CFG_PATH.exists():
|
| 20 |
+
urllib.request.urlretrieve(YOLO_CFG_URL, CFG_PATH)
|
| 21 |
+
if not WEIGHTS_PATH.exists():
|
| 22 |
+
urllib.request.urlretrieve(YOLO_WEIGHTS_URL, WEIGHTS_PATH)
|
| 23 |
+
if not NAMES_PATH.exists():
|
| 24 |
+
urllib.request.urlretrieve(YOLO_NAMES_URL, NAMES_PATH)
|
| 25 |
+
|
| 26 |
+
with open(NAMES_PATH, "r") as f:
|
| 27 |
+
classes = [line.strip() for line in f.readlines()]
|
| 28 |
+
return classes
|
| 29 |
+
|
| 30 |
+
_net = None
|
| 31 |
+
_output_layers = None
|
| 32 |
+
_classes = None
|
| 33 |
+
|
| 34 |
+
def _load_net():
|
| 35 |
+
global _net, _output_layers, _classes
|
| 36 |
+
if _net is not None:
|
| 37 |
+
return _net, _output_layers, _classes
|
| 38 |
+
_classes = _ensure_models()
|
| 39 |
+
_net = cv2.dnn.readNetFromDarknet(str(CFG_PATH), str(WEIGHTS_PATH))
|
| 40 |
+
_net.setPreferableBackend(cv2.dnn.DNN_BACKEND_OPENCV)
|
| 41 |
+
_net.setPreferableTarget(cv2.dnn.DNN_TARGET_CPU)
|
| 42 |
+
layer_names = _net.getLayerNames()
|
| 43 |
+
_output_layers = [layer_names[i - 1] for i in _net.getUnconnectedOutLayers().flatten()]
|
| 44 |
+
return _net, _output_layers, _classes
|
| 45 |
+
|
| 46 |
+
def detect_people_yolo(frame_bgr, conf_thresh=0.55, nms_thresh=0.45, draw=True):
|
| 47 |
+
"""
|
| 48 |
+
Returns:
|
| 49 |
+
people_indices: list of indices of 'person' boxes after NMS
|
| 50 |
+
boxes: list[(x,y,w,h)]
|
| 51 |
+
confidences: list[float]
|
| 52 |
+
annotated: frame with boxes (BGR)
|
| 53 |
+
"""
|
| 54 |
+
net, out_layers, classes = _load_net()
|
| 55 |
+
|
| 56 |
+
h, w = frame_bgr.shape[:2]
|
| 57 |
+
blob = cv2.dnn.blobFromImage(frame_bgr, scalefactor=1/255.0, size=(416, 416),
|
| 58 |
+
swapRB=True, crop=False)
|
| 59 |
+
net.setInput(blob)
|
| 60 |
+
layer_outputs = net.forward(out_layers)
|
| 61 |
+
|
| 62 |
+
boxes = []
|
| 63 |
+
confidences = []
|
| 64 |
+
class_ids = []
|
| 65 |
+
|
| 66 |
+
for output in layer_outputs:
|
| 67 |
+
for detection in output:
|
| 68 |
+
scores = detection[5:]
|
| 69 |
+
class_id = int(np.argmax(scores))
|
| 70 |
+
confidence = float(scores[class_id])
|
| 71 |
+
if confidence < conf_thresh:
|
| 72 |
+
continue
|
| 73 |
+
center_x = int(detection[0] * w)
|
| 74 |
+
center_y = int(detection[1] * h)
|
| 75 |
+
bw = int(detection[2] * w)
|
| 76 |
+
bh = int(detection[3] * h)
|
| 77 |
+
x = int(center_x - bw / 2)
|
| 78 |
+
y = int(center_y - bh / 2)
|
| 79 |
+
boxes.append([x, y, bw, bh])
|
| 80 |
+
confidences.append(confidence)
|
| 81 |
+
class_ids.append(class_id)
|
| 82 |
+
|
| 83 |
+
idxs = cv2.dnn.NMSBoxes(boxes, confidences, conf_thresh, nms_thresh)
|
| 84 |
+
|
| 85 |
+
people_indices = []
|
| 86 |
+
annotated = frame_bgr.copy()
|
| 87 |
+
if len(idxs) > 0:
|
| 88 |
+
for i in idxs.flatten():
|
| 89 |
+
if class_ids[i] < len(classes) and classes[class_ids[i]] == "person":
|
| 90 |
+
people_indices.append(i)
|
| 91 |
+
if draw:
|
| 92 |
+
x, y, bw, bh = boxes[i]
|
| 93 |
+
cv2.rectangle(annotated, (x, y), (x + bw, y + bh), (0, 255, 0), 2)
|
| 94 |
+
label = f"person {confidences[i]:.2f}"
|
| 95 |
+
cv2.putText(annotated, label, (x, y - 6), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2)
|
| 96 |
+
|
| 97 |
+
# Big on-screen counter for debugging
|
| 98 |
+
if draw:
|
| 99 |
+
cv2.putText(annotated, f"People: {len(people_indices)}", (12, 36),
|
| 100 |
+
cv2.FONT_HERSHEY_SIMPLEX, 1.1, (0, 0, 0), 4)
|
| 101 |
+
cv2.putText(annotated, f"People: {len(people_indices)}", (12, 36),
|
| 102 |
+
cv2.FONT_HERSHEY_SIMPLEX, 1.1, (255, 255, 255), 2)
|
| 103 |
+
|
| 104 |
+
return people_indices, boxes, confidences, annotated
|