detect / dust_check_new.py
lukehuang6666's picture
Upload 8 files
eeb0fd6 verified
import time
import json
import math
import threading
import concurrent.futures
import subprocess
import os
from dataclasses import dataclass, field
from typing import Dict, Optional, Tuple, List
os.environ.setdefault("ULTRALYTICS_SETTINGS", os.path.join(os.path.dirname(__file__), "ultralytics_settings.yaml"))
os.environ.setdefault("ULTRALYTICS_OFFLINE", "1")
import cv2
import numpy as np
import requests
from ultralytics import YOLO
import config as cfg
BYTETRACK_PATH = os.path.join(os.path.dirname(__file__), "bytetrack.yaml")
def _patch_ultralytics_offline() -> None:
try:
from ultralytics.utils import checks, downloads
checks.check_version = lambda *a, **k: None
downloads.safe_download = lambda *a, **k: None
downloads.get_github_assets = lambda *a, **k: ("", [])
except Exception:
pass
_patch_ultralytics_offline()
from pyav_media import PyAVFrameReader, PyAVPusher, PyAVClipBuffer
CLIP_RECORDER = None # main() 初始化
CLIP_EXPORT_POOL = concurrent.futures.ThreadPoolExecutor(max_workers=1)
# -------------------------
# 基础配置
# -------------------------
RTSP_URL = cfg.DUST_VIDEO_PATH
RTMP_OUTPUT_URL = cfg.DUST_RTMP_OUTPUT_URL
REPORT_API_URL = cfg.DUST_REPORT_API_URL
REPORT_API_TIMEOUT = getattr(cfg, "REPORT_API_TIMEOUT", 1.5)
VIDEO_WIDTH = getattr(cfg, "VIDEO_WIDTH", 960)
VIDEO_HEIGHT = getattr(cfg, "VIDEO_HEIGHT", 540)
FPS = getattr(cfg, "FPS", 15)
PUSH_VIDEO = getattr(cfg, "PUSH_VIDEO", True)
CAMERA_ID = getattr(cfg, "DUST_CAMERA_ID", 2)
DUST_STAY_SEC = getattr(cfg, "DUST_STAY_SEC", 4.0)
MOVE_THRESH_PX = getattr(cfg, "DUST_STATIONARY_MOVE_THRESH_PX", 25.0)
# 静止确认:连续“位移<=阈值”满该秒数后才开始累计(避免一进区域就计时/慢走误计)
DUST_STATIONARY_CONFIRM_SEC = float(getattr(cfg, "DUST_STATIONARY_CONFIRM_SEC", 0.8))
MIN_EFFECTIVE_STATIONARY_SEC = float(getattr(cfg, "DUST_MIN_EFFECTIVE_STATIONARY_SEC", 1.0))
SKIP_INFER_N = getattr(cfg, "DUST_SKIP_INFER_N", 3)
MIN_FRAMES_IN_ENTRY = getattr(cfg, "DUST_MIN_FRAMES_IN_ENTRY", 3)
LOST_TIMEOUT_SEC = getattr(cfg, "DUST_LOST_TIMEOUT_SEC", 15.0)
STITCH_TTL_SEC = getattr(cfg, "DUST_STITCH_TTL_SEC", 2.0)
STITCH_DIST_PX = getattr(cfg, "DUST_STITCH_DIST_PX", 80.0)
DETECT_MODEL = getattr(cfg, "DETECT_MODEL", "person.engine")
# 声音提示
SOUND_ENABLE = getattr(cfg, "DUST_SOUND_ENABLE", True)
SOUND_MODE = getattr(cfg, "DUST_SOUND_MODE", "tts")
SOUND_POLICY = getattr(cfg, "DUST_SOUND_POLICY", "per_event")
SOUND_MAX_QUEUE = int(getattr(cfg, "DUST_SOUND_MAX_QUEUE", 20))
SOUND_MIN_INTERVAL_SEC = getattr(cfg, "DUST_SOUND_MIN_INTERVAL_SEC", 2.0)
SOUND_TEXT_NO_DUST = getattr(cfg, "DUST_SOUND_TEXT_NO_DUST", "未沾尘")
SOUND_TEXT_BAD_DUST = getattr(cfg, "DUST_SOUND_TEXT_BAD_DUST", "沾尘不规范")
SOUND_FILE_NO_DUST = str(getattr(cfg, "DUST_SOUND_FILE_NO_DUST", "voice/未沾尘.mp3"))
SOUND_FILE_BAD_DUST = str(getattr(cfg, "DUST_SOUND_FILE_BAD_DUST", "voice/沾尘不规范.mp3"))
SOUND_FILE_VOLUME = int(getattr(cfg, "DUST_SOUND_FILE_VOLUME", 100))
TTS_VOLUME = float(getattr(cfg, "DUST_TTS_VOLUME", 1.0))
TTS_RATE = int(getattr(cfg, "DUST_TTS_RATE", 180))
TTS_VOICE_HINT = str(getattr(cfg, "DUST_TTS_VOICE_HINT", "zh,Chinese,CN,中文"))
PS_TTS_VOLUME = int(getattr(cfg, "DUST_PS_TTS_VOLUME", 100))
PS_TTS_RATE = int(getattr(cfg, "DUST_PS_TTS_RATE", 0))
SOUND_TEST_ON_START = bool(getattr(cfg, "DUST_SOUND_TEST_ON_START", False))
SOUND_TEST_TEXT = str(getattr(cfg, "DUST_SOUND_TEST_TEXT", "沾尘检测已启动"))
DEBUG_MODE = False
def point_in_poly(pt: Tuple[float, float], poly: np.ndarray) -> bool:
# poly: Nx2 int32
return cv2.pointPolygonTest(poly, pt, False) >= 0
def draw_poly(img, poly: np.ndarray, color, label: str):
cv2.polylines(img, [poly], True, color, 2)
x, y = int(poly[0][0]), int(poly[0][1])
cv2.putText(img, label, (x, max(0, y - 10)), cv2.FONT_HERSHEY_SIMPLEX, 0.7, color, 2)
def _sec(v) -> float:
try:
return float(v or 0.0)
except Exception:
return 0.0
def send_report(track_id: int,
entry_source: str,
result: str,
dust_time_sec: float,
reason: str,
exited_at_iso: str,
active_count: int,
clip_url: Optional[str] = None,
clip_start_at: Optional[str] = None,
clip_end_at: Optional[str] = None):
payload = {
"track_id": str(track_id),
"camera_id": CAMERA_ID,
"entry_source": entry_source,
"dust_ok": True if result == "OK" else (False if result == "NG" else None),
"dust_time_sec": float(dust_time_sec) if dust_time_sec is not None else 0.0,
# OK/NG:出口上报;UNKNOWN:追踪丢失/遮挡等
"status": "EXITED" if result in ("OK", "NG") else "UNKNOWN",
"reason": reason,
"dust_finished_at": exited_at_iso,
"exited_at": exited_at_iso,
"active_count": int(active_count),
}
if clip_url:
payload["clip_url"] = clip_url
if clip_start_at:
payload["clip_start_at"] = clip_start_at
if clip_end_at:
payload["clip_end_at"] = clip_end_at
try:
r = requests.post(REPORT_API_URL, json=payload, timeout=REPORT_API_TIMEOUT)
if r.ok:
print(f"✅ [DUST] 上报成功: ID {track_id} (结果: {result}, 时间: {dust_time_sec:.1f}s)")
else:
print(f"⚠️ [DUST] 上报失败: http={r.status_code} body={r.text[:200]}")
except Exception as e:
print(f"⚠️ [DUST] 上报异常: {e}")
def schedule_dust_clip_export_and_report(
tid: int,
entry_source: str,
result: str,
dust_time: float,
reason: str,
exited_iso: str,
active_count: int,
now_ts: float,
category: str,
):
"""
异步导出沾尘回放并上报,避免主循环同步导出导致推流卡顿。
"""
def _task():
clip_url = None
clip_start_at = None
clip_end_at = None
try:
global CLIP_RECORDER
if CLIP_RECORDER is not None:
pre_sec = int(getattr(cfg, "CLIP_PRE_SEC", 10))
base_dir = getattr(cfg, "CLIP_BASE_DIR", os.path.join(os.path.dirname(__file__), "clips"))
day = time.strftime("%Y%m%d", time.localtime(now_ts))
cat = str(category or "unknown")
out_dir = os.path.join(str(base_dir), "dust", day, cat)
os.makedirs(out_dir, exist_ok=True)
fname = f"dust_{cat}_cam{CAMERA_ID}_{time.strftime('%H%M%S', time.localtime(now_ts))}_id{tid}.mp4"
out_path = os.path.join(out_dir, fname)
meta = CLIP_RECORDER.export_last_seconds(pre_sec, out_path, end_ts=now_ts)
if meta is not None and os.path.exists(out_path):
clip_url = f"/clips/gengyishi2/dust/{day}/{cat}/{fname}"
clip_start_at = meta.clip_start_iso
clip_end_at = meta.clip_end_iso
print(f"[CLIP] exported: {out_path}")
else:
print(f"[CLIP] export skipped: meta=None or file missing. result={result} reason={reason}")
except Exception as e:
print(f"[CLIP] export failed: {e}")
send_report(
tid, entry_source, result, dust_time, reason, exited_iso, active_count,
clip_url=clip_url, clip_start_at=clip_start_at, clip_end_at=clip_end_at
)
try:
CLIP_EXPORT_POOL.submit(_task)
except Exception:
_task()
class SoundWorker:
"""后台声音播报,避免阻塞主循环"""
def __init__(self):
self._q = []
self._lock = threading.Lock()
self._evt = threading.Event()
self._stop = threading.Event()
self._last_play_at = 0.0
self._tts_engine = None
self._tts_inited = False
self._tts_ok = False
self._file_no_dust = self._resolve_path(SOUND_FILE_NO_DUST)
self._file_bad_dust = self._resolve_path(SOUND_FILE_BAD_DUST)
self._wav_no_dust = None
self._wav_bad_dust = None
if SOUND_MODE == "file":
self._wav_no_dust = self._ensure_wav(self._file_no_dust)
self._wav_bad_dust = self._ensure_wav(self._file_bad_dust)
self._th = threading.Thread(target=self._loop, daemon=True)
self._th.start()
def _resolve_path(self, p: str) -> str:
# 允许相对路径(相对 gengyishi 目录 / 当前脚本目录)
if not p:
return ""
if os.path.isabs(p):
return p
base = os.path.dirname(os.path.abspath(__file__))
return os.path.normpath(os.path.join(base, p))
def _ensure_wav(self, src_path: str) -> str:
"""
优先生成 wav 再用 winsound 播放(最稳)。
src 可以是 mp3/wav;若已是 wav 则直接返回。
"""
if not src_path:
return ""
if not os.path.exists(src_path):
print(f"[DUST][SOUND] file not found: {src_path}")
return ""
ext = os.path.splitext(src_path)[1].lower()
if ext == ".wav":
return src_path
wav_path = os.path.splitext(src_path)[0] + ".wav"
if os.path.exists(wav_path):
return wav_path
# 用 ffmpeg 转一次(不生成新 mp4,只是音频 wav)
try:
subprocess.run(
["ffmpeg", "-y", "-hide_banner", "-loglevel", "error", "-i", src_path, wav_path],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
check=False,
)
if os.path.exists(wav_path):
print(f"[DUST][SOUND] wav generated: {wav_path}")
return wav_path
except Exception as e:
print(f"[DUST][SOUND] ffmpeg convert failed: {e}")
# 生成失败则返回原文件(后续用 ffplay 兜底)
return src_path
def _play_file(self, ng_type: str) -> bool:
"""播放录音文件(优先 winsound 播 wav;否则 ffplay 播原文件)"""
try:
path = self._wav_no_dust if ng_type == "NO_DUST" else self._wav_bad_dust
if not path:
return False
ext = os.path.splitext(path)[1].lower()
if ext == ".wav":
import winsound
print(f"[DUST][SOUND] play wav: {path}")
winsound.PlaySound(path, winsound.SND_FILENAME)
return True
# 非 wav:用 ffplay 兜底(支持 mp3)
vol = max(0, min(100, int(SOUND_FILE_VOLUME)))
print(f"[DUST][SOUND] play via ffplay: {path} vol={vol}")
subprocess.run(
["ffplay", "-nodisp", "-autoexit", "-loglevel", "error", "-volume", str(vol), path],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
check=False,
)
return True
except Exception as e:
print(f"[DUST][SOUND] play file failed: {e}")
return False
def _init_tts(self):
if self._tts_engine is not None:
return self._tts_engine
try:
import pyttsx3 # optional
# Windows 上优先用 sapi5(更稳定)
try:
eng = pyttsx3.init(driverName="sapi5")
except Exception:
eng = pyttsx3.init()
# volume:
# - Windows SAPI/pyttsx3 通常用 0.0~1.0,但用户更直觉用 0~100
# 这里兼容:>1 视为 0~100,映射到 0~1
vol01 = TTS_VOLUME
if vol01 > 1.0:
vol01 = max(0.0, min(100.0, vol01)) / 100.0
vol01 = max(0.0, min(1.0, vol01))
try:
eng.setProperty("volume", vol01)
except Exception:
pass
try:
eng.setProperty("rate", TTS_RATE)
except Exception:
pass
# 选中文 voice(如果能找到)
try:
hints = [h.strip().lower() for h in TTS_VOICE_HINT.split(",") if h.strip()]
voices = eng.getProperty("voices") or []
chosen = None
for v in voices:
name = (getattr(v, "name", "") or "").lower()
vid = (getattr(v, "id", "") or "").lower()
if any(h in name or h in vid for h in hints):
chosen = v
break
if chosen is not None:
eng.setProperty("voice", chosen.id)
print(f"[DUST][TTS] voice selected: name={getattr(chosen, 'name', '')} id={getattr(chosen, 'id', '')}")
else:
print(f"[DUST][TTS] no matching zh voice, use default. voices={len(voices)}")
except Exception as e:
print(f"[DUST][TTS] voice select failed: {e}")
self._tts_engine = eng
self._tts_inited = True
self._tts_ok = True
print(f"[DUST][TTS] init ok, mode={SOUND_MODE}, volume01={vol01:.2f}, rate={TTS_RATE}")
return eng
except Exception as e:
self._tts_inited = True
self._tts_ok = False
print(f"[DUST][TTS] init failed, fallback to beep. err={e}")
self._tts_engine = None
return None
def play_ng(self, ng_type: str):
if not SOUND_ENABLE:
return
with self._lock:
self._q.append(ng_type)
# 控制队列长度:太长就合并一次,避免排队排到“很久以后才播”
if len(self._q) > SOUND_MAX_QUEUE:
self._q = self._q[-SOUND_MAX_QUEUE:]
self._evt.set()
def _beep(self, ng_type: str):
try:
import winsound
# 两类 NG 用不同节奏,保证“有声音”且能分辨
if ng_type == "NO_DUST":
winsound.Beep(1200, 180)
winsound.Beep(1200, 180)
else:
winsound.Beep(800, 350)
except Exception:
pass
def _ps_speak(self, text: str) -> bool:
"""用 Windows PowerShell 内置 System.Speech 播报(不依赖 pyttsx3)"""
try:
# PowerShell -EncodedCommand 需要 UTF-16LE base64
safe_text = (text or "").replace("'", "''")
vol = max(0, min(100, int(PS_TTS_VOLUME)))
rate = max(-10, min(10, int(PS_TTS_RATE)))
script = (
"Add-Type -AssemblyName System.Speech;"
"$s=New-Object System.Speech.Synthesis.SpeechSynthesizer;"
f"$s.Volume={vol};"
f"$s.Rate={rate};"
f"$s.Speak('{safe_text}');"
)
b = script.encode("utf-16le")
import base64
enc = base64.b64encode(b).decode("ascii")
subprocess.run(
["powershell", "-NoProfile", "-NonInteractive", "-EncodedCommand", enc],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
check=False,
)
print(f"[DUST][PS_TTS] speak: {text}")
return True
except Exception as e:
print(f"[DUST][PS_TTS] speak failed: {e}")
return False
def _tts(self, text: str):
eng = self._init_tts()
if eng is None:
return False
try:
eng.say(text)
print(f"[DUST][TTS] speak: {text}")
eng.runAndWait()
return True
except Exception as e:
print(f"[DUST][TTS] speak failed, fallback to beep. err={e}")
return False
def _loop(self):
while not self._stop.is_set():
self._evt.wait(0.5)
self._evt.clear()
now = time.time()
if now - self._last_play_at < SOUND_MIN_INTERVAL_SEC:
self._evt.set()
time.sleep(0.05)
continue
self._last_play_at = now
# 根据策略取出要播报的内容
with self._lock:
if not self._q:
continue
if SOUND_POLICY == "merge":
items = self._q[:]
self._q.clear()
else:
items = [self._q.pop(0)]
# 生成播报文本
if SOUND_POLICY == "merge":
cnt_no = sum(1 for x in items if x == "NO_DUST")
cnt_bad = len(items) - cnt_no
parts = []
if cnt_no > 0:
parts.append(f"{SOUND_TEXT_NO_DUST}{'' if cnt_no == 1 else f' {cnt_no}次'}")
if cnt_bad > 0:
parts.append(f"{SOUND_TEXT_BAD_DUST}{'' if cnt_bad == 1 else f' {cnt_bad}次'}")
text = ",".join(parts)
beep_type = "NO_DUST" if cnt_no > 0 else "BAD_DUST"
else:
ng_type = items[0]
text = SOUND_TEXT_NO_DUST if ng_type == "NO_DUST" else SOUND_TEXT_BAD_DUST
beep_type = ng_type
if SOUND_MODE == "tts":
ok = self._tts(text)
if not ok:
self._beep(beep_type)
elif SOUND_MODE == "file":
ok = self._play_file(beep_type)
if not ok:
self._beep(beep_type)
elif SOUND_MODE == "ps":
ok = self._ps_speak(text)
if not ok:
self._beep(beep_type)
else:
self._beep(beep_type)
def close(self):
self._stop.set()
self._evt.set()
try:
if self._th.is_alive():
self._th.join(timeout=1.0)
except Exception:
pass
@dataclass
class TrackState:
first_seen: float
last_seen: float
last_pos: Tuple[float, float]
last_box_bottom_y: float = 0.0
entry_ok: bool = False
frames_in_entry: int = 0
in_scope: bool = False # 只有确认经过入口的才进入“业务流程”
dust_entered: bool = False
dust_stationary_start: Optional[float] = None
# “相对静止”锚点:进入静止窗口时的参考位置
# 以前用“上一帧位移”判断,慢走/远景时每帧位移可能很小,导致“路过也在计时”
dust_stationary_ref_pos: Optional[Tuple[float, float]] = None
dust_ok: bool = False
dust_ok_time: float = 0.0
dust_best_stationary_sec: float = 0.0 # 最大连续静止时长(用于 NG 也上报时长)
# 总静止时长:通过“确认静止后逐帧累加”的方式累计(即使已 OK 也继续累计)
dust_total_stationary_sec: float = 0.0
dust_last_count_time: Optional[float] = None
dust_stationary_confirmed: bool = False
last_in_exit: bool = False
last_in_dust: bool = False
finished: bool = False
reported: bool = False
reason: str = ""
def main():
# 设备选择:避免“有显卡但 torch 是 cpu 版”导致 device=0 崩
device_arg = "cpu"
try:
import torch
if torch.cuda.is_available() and torch.cuda.device_count() > 0:
device_arg = "0"
except Exception:
device_arg = "cpu"
detect_model = YOLO(DETECT_MODEL)
reader = PyAVFrameReader(RTSP_URL, use_cuda=True, buffer_size=2, reconnect_interval=1.0).start()
retry_n = int(getattr(cfg, "STARTUP_FIRST_FRAME_RETRY", 5))
timeout_s = float(getattr(cfg, "STARTUP_FIRST_FRAME_TIMEOUT_SEC", 2.0))
retry_interval_s = float(getattr(cfg, "STARTUP_FIRST_FRAME_RETRY_INTERVAL_SEC", 1.0))
first_frame = None
for i in range(max(1, retry_n)):
first_frame = reader.get_latest(timeout=max(0.2, timeout_s))
if first_frame is not None:
break
print(f"[RTSP] 首帧读取失败,重试 {i + 1}/{max(1, retry_n)} ...")
time.sleep(max(0.1, retry_interval_s))
if first_frame is None:
reader.stop()
raise RuntimeError(f"无法读取 RTSP 第一帧(已重试{max(1, retry_n)}次): {RTSP_URL}")
last_good_frame = None # numpy ndarray (BGR)
pusher = PyAVPusher(RTMP_OUTPUT_URL, VIDEO_WIDTH, VIDEO_HEIGHT, FPS, encoder="h264_nvenc") if PUSH_VIDEO else None
# -------------------------
# NG 回放:PyAV 环形缓存(写带框画面),NG 时导出前 10s
# -------------------------
global CLIP_RECORDER
try:
clip_enable = bool(getattr(cfg, "CLIP_ENABLE", True))
if clip_enable:
max_sec = int(getattr(cfg, "CLIP_WRAP", 30)) * int(getattr(cfg, "CLIP_SEGMENT_SEC", 1))
CLIP_RECORDER = PyAVClipBuffer(
fps=int(FPS if FPS else 15),
max_seconds=max(10, max_sec),
width=int(VIDEO_WIDTH),
height=int(VIDEO_HEIGHT),
encoder="h264_nvenc",
)
print("[CLIP] dust pyav rolling buffer enabled")
else:
print("[CLIP] disabled by CLIP_ENABLE=0")
CLIP_RECORDER = None
except Exception as e:
CLIP_RECORDER = None
print(f"[CLIP] init failed: {e}")
# 区域:先存原始点位,等拿到首帧分辨率后缩放到推流分辨率
entry_poly_orig = np.array(cfg.DUST_ENTRY_AREA, dtype=np.float32)
entry_poly2_orig = None
if hasattr(cfg, "DUST_ENTRY_AREA2") and getattr(cfg, "DUST_ENTRY_AREA2"):
entry_poly2_orig = np.array(cfg.DUST_ENTRY_AREA2, dtype=np.float32)
dust_poly_orig = np.array(cfg.DUST_AREA, dtype=np.float32)
exit_poly_orig = np.array(cfg.DUST_EXIT_AREA, dtype=np.float32)
exclude_polys_orig = []
if hasattr(cfg, "DUST_EXCLUDE_POLYGONS") and getattr(cfg, "DUST_EXCLUDE_POLYGONS"):
exclude_polys_orig = [np.array(p, dtype=np.float32) for p in cfg.DUST_EXCLUDE_POLYGONS]
entry_poly = None
entry_poly2 = None
dust_poly = None
exit_poly = None
exclude_polys = None
tracks: Dict[int, TrackState] = {}
# ID 跳变缝合:保存“刚消失”的轨迹,用于把新 ID 继承到老状态上
lost_pool: Dict[int, Tuple[TrackState, float]] = {} # tid -> (state, lost_at)
sound = SoundWorker()
if SOUND_ENABLE and SOUND_TEST_ON_START:
# 启动自检:确认本机确实能出声
try:
if SOUND_MODE == "file":
sound._play_file("BAD_DUST")
elif SOUND_MODE == "ps":
sound._ps_speak(SOUND_TEST_TEXT)
elif SOUND_MODE == "tts":
sound._tts(SOUND_TEST_TEXT)
except Exception:
pass
push_interval = 1.0 / (FPS if FPS else 15)
next_push_at = 0.0
MAX_CATCHUP = 2
frame_count = 0
last_status_time = time.time()
last_detect_res = None
print("开始沾尘检测(按ESC退出)...")
while True:
t_loop0 = time.time()
frame = reader.get_latest(timeout=0.2)
has_new_frame = frame is not None
if not has_new_frame:
# RTSP 抖一下时:继续用上一帧显示/推流,避免前端断粮转圈
if last_good_frame is not None:
frame = last_good_frame
else:
time.sleep(0.03)
continue
now = time.time()
frame_count += 1
# resize 到推流分辨率(推理/画面/推流同一张)
orig_h, orig_w = frame.shape[:2]
if frame.shape[1] != VIDEO_WIDTH or frame.shape[0] != VIDEO_HEIGHT:
frame = cv2.resize(frame, (VIDEO_WIDTH, VIDEO_HEIGHT))
if has_new_frame:
last_good_frame = frame
# 第一次帧:把点位从原始分辨率缩放到推流分辨率
if entry_poly is None:
sx = VIDEO_WIDTH / float(orig_w)
sy = VIDEO_HEIGHT / float(orig_h)
entry_poly = (entry_poly_orig * np.array([sx, sy], dtype=np.float32)).astype(np.int32)
if entry_poly2_orig is not None:
entry_poly2 = (entry_poly2_orig * np.array([sx, sy], dtype=np.float32)).astype(np.int32)
dust_poly = (dust_poly_orig * np.array([sx, sy], dtype=np.float32)).astype(np.int32)
exit_poly = (exit_poly_orig * np.array([sx, sy], dtype=np.float32)).astype(np.int32)
if exclude_polys_orig:
exclude_polys = [
(p * np.array([sx, sy], dtype=np.float32)).astype(np.int32) for p in exclude_polys_orig
]
print(f"[DUST] 区域坐标缩放完成: sx={sx:.3f}, sy={sy:.3f}")
# 推理节流:每 N 帧仅推理 1 帧;并且只有“新帧”才做推理/业务逻辑
# 重要:只在“推理帧”更新业务逻辑(入口/静止/出口/计时),跳帧只用于画框,避免 UNKNOWN/OK 误判
t_infer0 = time.time()
should_infer = False
if SKIP_INFER_N and SKIP_INFER_N > 1:
should_infer = (frame_count % SKIP_INFER_N == 0)
elif has_new_frame:
should_infer = True
if not has_new_frame:
should_infer = False
if should_infer:
try:
detect_results = detect_model.track(
source=frame,
persist=True,
classes=[0],
tracker=BYTETRACK_PATH,
conf=0.3,
iou=0.5,
verbose=False,
device=device_arg,
)
detect_res = detect_results[0] if detect_results and len(detect_results) > 0 else None
last_detect_res = detect_res
except Exception as e:
print(f"[DUST] 推理失败: {e}")
detect_res = last_detect_res
else:
detect_res = last_detect_res
infer_ms = (time.time() - t_infer0) * 1000.0
det_for_draw = detect_res
det_for_logic = detect_res if should_infer else None
# 画区域
draw_poly(frame, entry_poly, (255, 255, 0), "ENTRY")
draw_poly(frame, dust_poly, (0, 255, 255), "DUST")
draw_poly(frame, exit_poly, (255, 0, 255), "EXIT")
active_ids = set()
# 先画框(无论是否推理帧)
if det_for_draw is not None and hasattr(det_for_draw, "boxes") and det_for_draw.boxes is not None:
boxes = det_for_draw.boxes
if getattr(boxes, "id", None) is not None:
ids = boxes.id.int().cpu().tolist()
xyxy = boxes.xyxy.cpu().numpy()
for tid, b in zip(ids, xyxy):
x1, y1, x2, y2 = b.tolist()
cx = (x1 + x2) / 2.0
cy = (y1 + y2) / 2.0
pos = (cx, cy)
st = tracks.get(tid)
prev_pos = st.last_pos if st is not None else pos
# 画框/文字(只显示状态,不在这里推进业务状态)
if st is None:
color = (255, 255, 255)
tag = ""
else:
color = (0, 255, 0) if st.dust_ok else (0, 165, 255) if st.dust_entered else (255, 255, 255)
# 像洗手的 WASHING+时间:DUST 也显示累计静止时间(dust_total_stationary_sec)
in_dust_now = False
try:
in_dust_now = bool(dust_poly is not None and point_in_poly(pos, dust_poly))
except Exception:
in_dust_now = False
total_sec = _sec(getattr(st, "dust_total_stationary_sec", 0.0))
if in_dust_now:
tag = f"DUSTING {total_sec:.1f}s"
else:
tag = "OK_READY" if st.dust_ok else ("DUST" if st.dust_entered else ("ENTRY" if st.entry_ok else ""))
cv2.rectangle(frame, (int(x1), int(y1)), (int(x2), int(y2)), color, 2)
cv2.putText(frame, f"ID {tid} {tag}", (int(x1), max(0, int(y1) - 8)),
cv2.FONT_HERSHEY_SIMPLEX, 0.6, color, 2)
# 业务逻辑只在推理帧执行(避免跳帧使用“旧框”导致静止/入口误判)
if det_for_logic is not None and hasattr(det_for_logic, "boxes") and det_for_logic.boxes is not None:
boxes = det_for_logic.boxes
if getattr(boxes, "id", None) is not None:
ids = boxes.id.int().cpu().tolist()
xyxy = boxes.xyxy.cpu().numpy()
# 清理过期缝合池
for lid, (_, lost_at) in list(lost_pool.items()):
if now - lost_at > STITCH_TTL_SEC:
lost_pool.pop(lid, None)
for tid, b in zip(ids, xyxy):
x1, y1, x2, y2 = b.tolist()
cx = (x1 + x2) / 2.0
cy = (y1 + y2) / 2.0
pos = (cx, cy)
# 先算区域(用于“出现在离开区的人不追踪不上报”)
in_entry = point_in_poly(pos, entry_poly)
if entry_poly2 is not None:
in_entry = bool(in_entry or point_in_poly(pos, entry_poly2))
in_dust = point_in_poly(pos, dust_poly)
in_exit = point_in_poly(pos, exit_poly)
in_exclude = False
if exclude_polys:
for ep in exclude_polys:
if point_in_poly(pos, ep):
in_exclude = True
break
st = tracks.get(tid)
if st is None:
if in_exclude:
continue
# 规则:出现在离开区的人,不追踪,不上报
if in_exit:
continue
# 尝试缝合(追溯):
# 关键修复:不能只依赖 lost_pool,因为“原地遮挡 0.x 秒就换新 ID”的情况,
# 新 ID 出现时旧目标可能还没达到入池阈值(0.4s)。
# 所以这里同时从:
# - lost_pool(显式丢失池)
# - tracks 中“最近消失(<STITCH_TTL_SEC)”的目标
# 里找候选,优先距离最近者。
# 候选约束:必须是 in_scope 或已经进过 DUST(减少把路人硬缝进流程)。
best_id = None
best_dist = None
best_src = ""
def eligible_for_stitch(s: TrackState) -> bool:
if s is None:
return False
if s.finished or s.reported:
return False
if getattr(s, "last_in_exit", False):
return False
return bool(s.in_scope or getattr(s, "dust_entered", False))
# 1) 从 lost_pool 找
for lid, (lst, lost_at) in lost_pool.items():
if not eligible_for_stitch(lst):
continue
d = math.hypot(pos[0] - lst.last_pos[0], pos[1] - lst.last_pos[1])
if d <= STITCH_DIST_PX and (best_dist is None or d < best_dist):
best_dist = d
best_id = lid
best_src = "pool"
# 2) 从 tracks 找“最近消失”的(不依赖入池时机)
for lid2, lst2 in tracks.items():
if lid2 == tid:
continue
if not eligible_for_stitch(lst2):
continue
if lid2 in active_ids:
continue
# 必须是“刚消失”的,避免把很久以前的目标缝过来
if (now - float(lst2.last_seen)) > float(STITCH_TTL_SEC):
continue
d = math.hypot(pos[0] - lst2.last_pos[0], pos[1] - lst2.last_pos[1])
if d <= STITCH_DIST_PX and (best_dist is None or d < best_dist):
best_dist = d
best_id = lid2
best_src = "direct"
if best_id is not None:
# 如果来自 pool,先从 pool pop;如果来自 direct,可能不在 pool
if best_src == "pool":
st, _ = lost_pool.pop(best_id)
else:
st = tracks.get(best_id)
prev_pos = st.last_pos
st.last_seen = now
tracks[tid] = st
tracks.pop(best_id, None)
# 同步清理丢失池中可能残留的同 ID
lost_pool.pop(best_id, None)
print(f"[DUST][STITCH] ID 跳变修正: {best_id} -> {tid} (距离: {best_dist:.1f}px, src={best_src})")
else:
st = TrackState(first_seen=now, last_seen=now, last_pos=pos, last_box_bottom_y=float(y2))
tracks[tid] = st
prev_pos = pos
else:
prev_pos = st.last_pos
st.last_seen = now
if st.finished:
st.last_pos = pos
continue
if in_exclude:
st.finished = True
st.reported = True
st.reason = "exclude_ignore"
st.last_pos = pos
continue
st.last_in_exit = bool(in_exit)
st.last_in_dust = bool(in_dust)
st.last_box_bottom_y = float(y2)
# 入口确认(参考洗手:需要连续命中若干帧,减少误触带来的 UNKNOWN)
if in_entry:
st.frames_in_entry += 1
else:
st.frames_in_entry = max(0, st.frames_in_entry - 1)
if (not st.in_scope) and st.frames_in_entry >= MIN_FRAMES_IN_ENTRY:
st.entry_ok = True
st.in_scope = True
# 一旦进入 scope,后续才允许 NG/OK/UNKNOWN 上报
# print(f"[DUST][ENTRY] ID {tid} 入口确认")
if in_dust:
st.dust_entered = True
# 相对静止检测(改进版):
# 用“相对于锚点位置的漂移”判断是否静止,避免慢走时每帧位移小而被误判为静止
if st.dust_stationary_start is None or st.dust_stationary_ref_pos is None:
st.dust_stationary_start = now
st.dust_stationary_ref_pos = pos
st.dust_last_count_time = None
st.dust_stationary_confirmed = False
else:
ref = st.dust_stationary_ref_pos
dist_anchor = math.hypot(pos[0] - ref[0], pos[1] - ref[1])
if dist_anchor > MOVE_THRESH_PX:
# 移动前更新一次 best
if st.dust_stationary_start is not None:
cur_stationary = max(0.0, now - st.dust_stationary_start)
if cur_stationary > st.dust_best_stationary_sec:
st.dust_best_stationary_sec = cur_stationary
st.dust_stationary_start = now
st.dust_stationary_ref_pos = pos
st.dust_last_count_time = None
st.dust_stationary_confirmed = False
# 记录最大连续静止时长(即便最终不合格也要上报“沾尘时长”)
if st.dust_stationary_start is not None:
cur_stationary = max(0.0, now - st.dust_stationary_start)
if cur_stationary > st.dust_best_stationary_sec:
st.dust_best_stationary_sec = cur_stationary
# 总静止累计:先确认静止满 DUST_STATIONARY_CONFIRM_SEC,再开始累加
if not st.dust_stationary_confirmed:
if cur_stationary >= DUST_STATIONARY_CONFIRM_SEC:
st.dust_stationary_confirmed = True
# 把“确认之前的静止时间”也计入总时长(避免被截掉)
st.dust_total_stationary_sec += cur_stationary
st.dust_last_count_time = now
else:
if st.dust_last_count_time is None:
st.dust_last_count_time = now
dt = now - st.dust_last_count_time
if dt > 0:
st.dust_total_stationary_sec += dt
st.dust_last_count_time = now
# ✅ OK 判定口径(按你最新要求):比较“累计静止(total)”与阈值
if (not st.dust_ok) and st.dust_total_stationary_sec >= DUST_STAY_SEC:
st.dust_ok = True
st.dust_ok_time = float(st.dust_total_stationary_sec)
else:
# 离开沾尘区:停止当前静止窗口(但总累计/OK 状态保留,最终按总时长上报)
st.dust_stationary_start = None
st.dust_stationary_ref_pos = None
st.dust_last_count_time = None
st.dust_stationary_confirmed = False
# 判定出口
if in_exit and not st.reported:
exited_iso = time.strftime("%Y-%m-%dT%H:%M:%S", time.localtime(now))
active_count = len([t for t in tracks.values() if not t.finished and (now - t.last_seen) < 2.0])
# 规则:
# - 出现在离开区但没从入口进入(in_scope=false):不追踪/不报(只画框)
# - 从入口进入的人:进入离开区就立刻上报,合格=OK,不合格=NG;上报后不再管
# 入口偶发漏判时,只要已进过 DUST 也允许出口上报
if not (st.in_scope or getattr(st, "dust_entered", False)):
st.finished = True
st.reported = True
st.reason = "exit_no_scope_ignore"
else:
entry_source = "entry1"
# 出口最终判定:以“累计静止(total)”为准,避免某一帧没来得及把 dust_ok 置 True
total = float(getattr(st, "dust_total_stationary_sec", 0.0) or 0.0)
best = float(getattr(st, "dust_best_stationary_sec", 0.0) or 0.0)
ok_by_total = total >= DUST_STAY_SEC
if ok_by_total:
result = "OK"
# reason = f"沾尘合格(累计静止{total:.1f}s>={DUST_STAY_SEC:.0f}s)"
reason = ""
dust_time = max(DUST_STAY_SEC, total)
else:
result = "NG"
# 不合格分两类:1) 未沾尘(没进沾尘区) 2) 沾尘不规范(进了但没静止满)
if not st.dust_entered:
# 用户要求:时长=0 的 NG 统一给“未沾尘”
reason = "未沾尘"
sound.play_ng("NO_DUST")
dust_time = 0.0
else:
# 如果累计静止太小,说明基本是“路过”,按未沾尘处理
if total < MIN_EFFECTIVE_STATIONARY_SEC:
reason = "未沾尘"
sound.play_ng("NO_DUST")
dust_time = 0.0
else:
# 你要求:最终上报“总静止时间(total)”
reason = f"沾尘不规范(沾尘{total:.1f}s<{DUST_STAY_SEC:.0f}s)"
dust_time = float(total)
st.finished = True
st.reported = True
st.reason = reason
if result == "NG":
cat = "no" if (reason or "") == "未沾尘" else "bad"
schedule_dust_clip_export_and_report(
tid, entry_source, result, dust_time, reason, exited_iso, active_count, now, cat
)
else:
send_report(
tid, entry_source, result, dust_time, reason, exited_iso, active_count,
clip_url=None, clip_start_at=None, clip_end_at=None
)
# 只有真正进入追踪的目标才算 active(不把“离开区路人”计入)
active_ids.add(tid)
# 画框/文字
color = (0, 255, 0) if st.dust_ok else (0, 165, 255) if st.dust_entered else (255, 255, 255)
cv2.rectangle(frame, (int(x1), int(y1)), (int(x2), int(y2)), color, 2)
in_dust_now = bool(in_dust)
total_sec = _sec(getattr(st, "dust_total_stationary_sec", 0.0))
if in_dust_now:
tag = f"DUSTING {total_sec:.1f}s"
else:
tag = "OK_READY" if st.dust_ok else ("DUST" if st.dust_entered else ("ENTRY" if st.entry_ok else ""))
cv2.putText(frame, f"ID {tid} {tag}", (int(x1), max(0, int(y1) - 8)),
cv2.FONT_HERSHEY_SIMPLEX, 0.6, color, 2)
# 最后再更新 last_pos(确保上面位移计算用的是 prev_pos)
st.last_pos = pos
# 丢失判定:只在“本次为推理帧(should_infer)”时做(避免用旧框/停帧导致误报 UNKNOWN)
# 规则(按你最新要求):
# - 仅已追踪目标在“画面底部 6%”消失且超时未找回时,报 UNKNOWN(底部消失)
# - 左侧/右侧消失不处理
# - 已上报 OK/NG 后(尤其 EXIT 区消失):不报 UNKNOWN(st.finished/st.reported 已挡住)
if det_for_logic is not None:
bottom_edge_y = float(VIDEO_HEIGHT) * 0.9 # 底部 6%
# 先把“刚消失的目标”放入缝合池,给 ID 跳变/短遮挡一个恢复机会(用于“2s 内追溯回来”)
for tid, st in list(tracks.items()):
if st.finished:
continue
if tid not in active_ids:
# 候选:in_scope 或已进过 DUST
if (st.in_scope or getattr(st, "dust_entered", False)) and (now - st.last_seen) >= 0.2:
if tid not in lost_pool:
lost_pool[tid] = (st, now)
for tid, st in list(tracks.items()):
if st.finished or st.reported:
continue
# EXIT 区消失:不报 UNKNOWN
if getattr(st, "last_in_exit", False):
continue
if (now - st.last_seen) <= LOST_TIMEOUT_SEC:
continue
# 超时未找回:仅“底部消失”报 UNKNOWN
reason = None
last_bottom = float(getattr(st, "last_box_bottom_y", 0.0) or 0.0)
last_center_y = float(st.last_pos[1]) if st.last_pos is not None else 0.0
if (st.in_scope or getattr(st, "dust_entered", False)) and max(last_center_y, last_bottom) >= bottom_edge_y:
reason = "底部消失"
# 其他情况:不报 UNKNOWN(例如没进流程的人消失、在非 DUST 区消失等)
if not reason:
continue
st.finished = True
st.reported = True
st.reason = reason
# 一旦报 UNKNOWN,就不再允许缝合回来了
lost_pool.pop(tid, None)
exited_iso = time.strftime("%Y-%m-%dT%H:%M:%S", time.localtime(now))
active_count = len([t for t in tracks.values() if not t.finished and (now - t.last_seen) < 2.0])
entry_source = "entry1"
total = float(getattr(st, "dust_total_stationary_sec", 0.0) or 0.0)
schedule_dust_clip_export_and_report(
tid, entry_source, "UNKNOWN", total, reason, exited_iso, active_count, now, "unknown"
)
# 状态打印(仅调试模式)
if DEBUG_MODE and (now - last_status_time > 10):
active_count = len([t for t in tracks.values() if not t.finished and (now - t.last_seen) < 2.0])
done = len([t for t in tracks.values() if t.finished])
print(f"[DUST][STATUS] active={active_count} done={done} tracks={len(tracks)}")
last_status_time = now
# 算法端本地渲染(可选):你目前要求“不渲染,只推流”,默认关闭
show_ms = 0.0
if bool(getattr(cfg, "DUST_SHOW_WINDOW", False)):
t_show0 = time.time()
cv2.imshow("Dust Check", frame)
if cv2.waitKey(1) & 0xFF == 27:
break
show_ms = (time.time() - t_show0) * 1000.0
# 回放缓存:写入带框画面(只写本帧显示内容)
if CLIP_RECORDER is not None:
try:
CLIP_RECORDER.push(np.ascontiguousarray(frame, dtype=np.uint8), ts=now)
except Exception:
pass
# push(主线程固定节拍)
t_push0 = time.time()
if PUSH_VIDEO:
now2 = time.time()
if next_push_at == 0.0:
next_push_at = now2
sent = 0
while now2 >= next_push_at and sent < MAX_CATCHUP:
try:
if pusher is not None:
pusher.write(frame)
except Exception:
pass
next_push_at += push_interval
sent += 1
if now2 - next_push_at > 2.0 * push_interval:
next_push_at = now2 + push_interval
push_ms = (time.time() - t_push0) * 1000.0
# 每 30 帧打印一次耗时(仅调试模式)
if DEBUG_MODE and (frame_count % 30 == 0):
total_ms = (time.time() - t_loop0) * 1000.0
print(f"[DUST][TIME] frame={frame_count} infer={infer_ms:.1f}ms show={show_ms:.1f}ms push={push_ms:.1f}ms total={total_ms:.1f}ms")
try:
reader.stop()
except Exception:
pass
try:
if bool(getattr(cfg, "DUST_SHOW_WINDOW", False)):
cv2.destroyAllWindows()
except Exception:
pass
try:
sound.close()
except Exception:
pass
if CLIP_RECORDER is not None:
CLIP_RECORDER = None
try:
CLIP_EXPORT_POOL.shutdown(wait=False, cancel_futures=False)
except Exception:
pass
if pusher is not None:
try:
pusher.close()
except Exception:
pass
if True: #__name__ == "__main__":
try:
main()
except KeyboardInterrupt:
print("\n[INFO] Ctrl+C 退出")
except Exception as e:
import traceback
print(f"[ERROR] {e}")
traceback.print_exc()