Spaces:
Sleeping
Sleeping
Commit ·
88f1cd0
0
Parent(s):
Initial commit for ChickSense Space
Browse files- .gitattributes +5 -0
- Dockerfile +22 -0
- config.py +67 -0
- main.py +540 -0
- models/Chicken_CNN_Disease_Detection_Model.pth +3 -0
- models/yolov8n.pt +3 -0
- requirements.txt +20 -0
- static/assets/logo-.jpeg +3 -0
- static/css/style.css +186 -0
- static/demo/3_usa.mp4 +3 -0
- static/demo/5_usa.mp4 +3 -0
- static/demo/6_usa.mp4 +3 -0
- static/demo/7_usa.mp4 +3 -0
- static/demo/audio.m4a +3 -0
- static/index.html +308 -0
- static/js/app.js +682 -0
- utils/__init__.py +0 -0
- utils/audio/__init__.py +0 -0
- utils/audio/audio_ingest.py +118 -0
- utils/audio/vocalization_prediction.py +112 -0
- utils/metrics_store.py +83 -0
- utils/notifications.py +273 -0
- utils/video/__init__.py +0 -0
- utils/video/frame_reader.py +105 -0
- utils/video/stream_processor.py +250 -0
- utils/video/tracker.py +56 -0
.gitattributes
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
| 2 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
| 3 |
+
*.mp4 filter=lfs diff=lfs merge=lfs -text
|
| 4 |
+
*.m4a filter=lfs diff=lfs merge=lfs -text
|
| 5 |
+
static/assets/logo-.jpeg filter=lfs diff=lfs merge=lfs -text
|
Dockerfile
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
FROM python:3.10
|
| 2 |
+
|
| 3 |
+
# The two following lines are required for Hugging Face Spaces Dev Mode
|
| 4 |
+
RUN useradd -m -u 1000 user
|
| 5 |
+
WORKDIR /app
|
| 6 |
+
|
| 7 |
+
# Install dependencies
|
| 8 |
+
COPY --chown=user ./requirements.txt requirements.txt
|
| 9 |
+
RUN pip install --no-cache-dir --upgrade -r requirements.txt
|
| 10 |
+
|
| 11 |
+
# Copy application code, models, and static assets
|
| 12 |
+
COPY --chown=user . /app
|
| 13 |
+
|
| 14 |
+
# Switch to non-root user
|
| 15 |
+
USER user
|
| 16 |
+
|
| 17 |
+
# Set environment variables for Hugging Face
|
| 18 |
+
ENV HOME=/home/user \
|
| 19 |
+
PATH=/home/user/.local/bin:$PATH
|
| 20 |
+
|
| 21 |
+
# Start FastAPI app
|
| 22 |
+
CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "7860"]
|
config.py
ADDED
|
@@ -0,0 +1,67 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
|
| 3 |
+
# Main Config
|
| 4 |
+
APP_CONFIG = {
|
| 5 |
+
"YOLO_MODEL_PATH" : "./models/yolov8n.pt",
|
| 6 |
+
"VOCAL_MODEL_PATH" : "./models/Chicken_CNN_Disease_Detection_Model.pth",
|
| 7 |
+
|
| 8 |
+
"AUDIO_ANALYSIS_DURATION_S" : 30,
|
| 9 |
+
"AUDIO_ANALYSIS_INTERVAL_S" : 60
|
| 10 |
+
}
|
| 11 |
+
|
| 12 |
+
# Processing Config
|
| 13 |
+
TUNING = {
|
| 14 |
+
# How Video Processed
|
| 15 |
+
"YOLO_IMG_SIZE" : 512,
|
| 16 |
+
"DETECTION_INTERVAL_FRAMES" : 24,
|
| 17 |
+
"FRAME_READER_BUFFER_SIZE" : 5,
|
| 18 |
+
"FRAME_READER_FPS" : 15,
|
| 19 |
+
|
| 20 |
+
# How Streaming Processed
|
| 21 |
+
"WEBSOCKET_TARGET_FPS" : 8,
|
| 22 |
+
"WEBSOCKET_JPEG_QUALITY" : 40, # JPEG quality ( 0-100 )
|
| 23 |
+
"WEBSOCKET_DISPLAY_MAX_WIDTH" : 640
|
| 24 |
+
}
|
| 25 |
+
|
| 26 |
+
# Inactivity Config
|
| 27 |
+
INACTIVITY_CFG = {
|
| 28 |
+
"EMA_ALPHA" : 0.2,
|
| 29 |
+
"ENTER_THRESH_NORM_SPEED" : 0.02,
|
| 30 |
+
"EXIT_THRESH_NORM_SPEED" : 0.05,
|
| 31 |
+
"MIN_DURATION_S" : 60, # (7200) 2 Hours inactive of an object before flag as inactive
|
| 32 |
+
"MAX_UNSEEN_GAP_S" : 1.5
|
| 33 |
+
}
|
| 34 |
+
|
| 35 |
+
# Density Clustering Config
|
| 36 |
+
DENSITY_DBSCAN_CFG = {
|
| 37 |
+
"EPS_PX" : 60.0, # Max pixels between to object to be called neighbors
|
| 38 |
+
"MIN_NEIGHBORS" : 4 # Min objects required to form a dense cluster
|
| 39 |
+
}
|
| 40 |
+
|
| 41 |
+
# Database Config
|
| 42 |
+
DATA_STORAGE = {
|
| 43 |
+
"SQLITE_DB_PATH" : "data/metrics.sqlite",
|
| 44 |
+
"DB_RETENTION_DAYS" : 90,
|
| 45 |
+
"DB_WRITE_BUFFER_SIZE" : 500,
|
| 46 |
+
"DB_WRITE_INTERVAL_S" : 1.0
|
| 47 |
+
}
|
| 48 |
+
|
| 49 |
+
# Visual Config
|
| 50 |
+
VISUALS = {
|
| 51 |
+
"COLOR_DETECTED" : (0, 255, 0), # Green
|
| 52 |
+
"COLOR_DENSE" : (0, 165, 255), # Orange
|
| 53 |
+
"COLOR_INACTIVE": (0, 0, 255), # Red
|
| 54 |
+
}
|
| 55 |
+
|
| 56 |
+
# Notifier Config
|
| 57 |
+
NOTIFIER = {
|
| 58 |
+
"ENABLE_TELEGRAM_NOTIFICATIONS" : os.environ.get("ENABLE_TELEGRAM_NOTIFICATIONS", "n").lower().startswith("y"),
|
| 59 |
+
"SENSOR_DATA_JSON_PATH" : "data/sensor_data.json",
|
| 60 |
+
"TELEGRAM_BOT_TOKEN": os.environ.get("TELEGRAM_BOT_TOKEN", ""),
|
| 61 |
+
"TELEGRAM_CHAT_ID": os.environ.get("TELEGRAM_CHAT_ID", ""),
|
| 62 |
+
|
| 63 |
+
"INACTIVE_PERCENTAGE_THRESHOLD": 0.15, # The percentage threshold of inactive objects
|
| 64 |
+
"UNHEALTHY_HISTORY_LENGTH": 5, # The number of recent data points to be stored in the history
|
| 65 |
+
"UNHEALTHY_ALERT_THRESHOLD": 5, # The number of consecutive "unhealthy" statuses required within the history
|
| 66 |
+
"DENSITY_COUNT_THRESHOLD": 5 # The number of Unique Density before alret was sent
|
| 67 |
+
}
|
main.py
ADDED
|
@@ -0,0 +1,540 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import json
|
| 3 |
+
import time
|
| 4 |
+
import asyncio
|
| 5 |
+
import threading
|
| 6 |
+
from collections import deque
|
| 7 |
+
from concurrent.futures import ThreadPoolExecutor
|
| 8 |
+
from typing import Dict, Optional
|
| 9 |
+
from datetime import datetime, timezone, timedelta
|
| 10 |
+
import csv
|
| 11 |
+
import io
|
| 12 |
+
|
| 13 |
+
import cv2
|
| 14 |
+
import torch
|
| 15 |
+
from fastapi import FastAPI, WebSocket, WebSocketDisconnect, HTTPException
|
| 16 |
+
from fastapi.middleware.cors import CORSMiddleware
|
| 17 |
+
from fastapi.responses import FileResponse, HTMLResponse, StreamingResponse
|
| 18 |
+
from fastapi.staticfiles import StaticFiles
|
| 19 |
+
from pydantic import BaseModel
|
| 20 |
+
from ultralytics import YOLO
|
| 21 |
+
|
| 22 |
+
from config import APP_CONFIG, TUNING, VISUALS, NOTIFIER
|
| 23 |
+
from utils.audio.audio_ingest import background_audio_task
|
| 24 |
+
from utils.video.stream_processor import StreamRegistry
|
| 25 |
+
from utils.audio.vocalization_prediction import load_model
|
| 26 |
+
from utils.metrics_store import MetricsStore
|
| 27 |
+
import utils.notifications as notifications
|
| 28 |
+
|
| 29 |
+
# -- FastAPI ---
|
| 30 |
+
app = FastAPI()
|
| 31 |
+
app.add_middleware(
|
| 32 |
+
CORSMiddleware,
|
| 33 |
+
allow_origins = ["*"],
|
| 34 |
+
allow_credentials = True,
|
| 35 |
+
allow_methods = ["*"],
|
| 36 |
+
allow_headers = ["*"]
|
| 37 |
+
)
|
| 38 |
+
|
| 39 |
+
app.mount("/static", StaticFiles(directory = "static"), name = "static")
|
| 40 |
+
INDEX_FILE_PATH = os.path.join("static", "index.html")
|
| 41 |
+
|
| 42 |
+
@app.get("/", response_class = FileResponse)
|
| 43 |
+
async def read_index():
|
| 44 |
+
if not os.path.exists(INDEX_FILE_PATH):
|
| 45 |
+
return HTMLResponse("index.html missing", status_code = 404)
|
| 46 |
+
return FileResponse(INDEX_FILE_PATH)
|
| 47 |
+
|
| 48 |
+
# -- Global state --
|
| 49 |
+
latest_audio_result = {
|
| 50 |
+
"prediction": None,
|
| 51 |
+
"probabilities": None
|
| 52 |
+
}
|
| 53 |
+
|
| 54 |
+
audio_result_pending = False
|
| 55 |
+
analysis_in_progress = False
|
| 56 |
+
_last_appended_audio_sig = None
|
| 57 |
+
|
| 58 |
+
last_audio_trigger_time = 0.0
|
| 59 |
+
stream_registry: Optional[StreamRegistry] = None
|
| 60 |
+
executor = ThreadPoolExecutor(max_workers=4)
|
| 61 |
+
|
| 62 |
+
audio_seek_state = {"url": None, "seek_seconds": 0}
|
| 63 |
+
current_audio_url: Optional[str] = None
|
| 64 |
+
audio_loop_task: Optional[asyncio.Task] = None
|
| 65 |
+
|
| 66 |
+
# - Notifications JSON builder -
|
| 67 |
+
_sensor_json_lock = threading.Lock()
|
| 68 |
+
sensor_state: Dict[str, dict] = {}
|
| 69 |
+
inactive_since: Dict[str, float] = {}
|
| 70 |
+
|
| 71 |
+
global_vocal_history: deque[str] = deque(maxlen=5)
|
| 72 |
+
metrics_store = MetricsStore()
|
| 73 |
+
|
| 74 |
+
# -- Models --
|
| 75 |
+
class AudioTriggerRequest(BaseModel):
|
| 76 |
+
audio_url_: str
|
| 77 |
+
|
| 78 |
+
# -- Helpers method --
|
| 79 |
+
|
| 80 |
+
def _draw_overlays(frame, tracks, display_settings: Dict[str, bool]):
|
| 81 |
+
""" Ensure boxes/labels displayed according to the toggles """
|
| 82 |
+
for t in tracks:
|
| 83 |
+
x1, y1, x2, y2 = map(int, t["box"])
|
| 84 |
+
color = None
|
| 85 |
+
if t.get("inactive") and display_settings["show_inactive"]:
|
| 86 |
+
color = VISUALS["COLOR_INACTIVE"]
|
| 87 |
+
elif t.get("dense") and display_settings["show_density"]:
|
| 88 |
+
color = VISUALS["COLOR_DENSE"]
|
| 89 |
+
elif display_settings["show_detected"]:
|
| 90 |
+
color = VISUALS["COLOR_DETECTED"]
|
| 91 |
+
|
| 92 |
+
if color is not None:
|
| 93 |
+
cv2.rectangle(frame, (x1, y1), (x2, y2), color, 2)
|
| 94 |
+
cv2.putText(frame, f"ID {t['id']}", (x1, max(0, y1 - 6)), cv2.FONT_HERSHEY_SIMPLEX, 0.6, color, 2)
|
| 95 |
+
|
| 96 |
+
return frame
|
| 97 |
+
|
| 98 |
+
def _encode_frame_jpeg(frame, jpeg_q: int) -> Optional[bytes]:
|
| 99 |
+
ok, jpg = cv2.imencode(".jpg", frame, [int(cv2.IMWRITE_JPEG_QUALITY), jpeg_q])
|
| 100 |
+
return jpg.tobytes() if ok else None
|
| 101 |
+
|
| 102 |
+
def _scale_frame(frame, max_w: int):
|
| 103 |
+
h, w = frame.shape[:2]
|
| 104 |
+
if w <= max_w:
|
| 105 |
+
return frame
|
| 106 |
+
scale = max_w / float(w)
|
| 107 |
+
return cv2.resize(frame, (int(w * scale), int(h * scale)), interpolation=cv2.INTER_AREA)
|
| 108 |
+
|
| 109 |
+
def _update_sensor_json(camera_id: int, detected_count: int, inactive_count: int, dense_clusters: int, latest_audio: Dict):
|
| 110 |
+
""" Build or rewrite camera record in sensor_data.json """
|
| 111 |
+
now = time.time()
|
| 112 |
+
cam_key = str(camera_id)
|
| 113 |
+
|
| 114 |
+
# Update Inactivity
|
| 115 |
+
if inactive_count > 0:
|
| 116 |
+
if cam_key not in inactive_since:
|
| 117 |
+
inactive_since[cam_key] = now
|
| 118 |
+
else:
|
| 119 |
+
inactive_since.pop(cam_key, None)
|
| 120 |
+
|
| 121 |
+
raw_pred = (latest_audio or {}).get("prediction")
|
| 122 |
+
global audio_result_pending, _last_appended_audio_sig
|
| 123 |
+
|
| 124 |
+
pred = str(raw_pred).strip() if raw_pred else None
|
| 125 |
+
sig = None
|
| 126 |
+
probs = {}
|
| 127 |
+
if latest_audio and pred:
|
| 128 |
+
try:
|
| 129 |
+
probs = dict(latest_audio.get("probabilities") or {})
|
| 130 |
+
rounded_probs = {k: round(float(v), 4) for k, v in probs.items()}
|
| 131 |
+
sig = (pred, tuple(sorted(rounded_probs.items())))
|
| 132 |
+
except (ValueError, TypeError, OverflowError):
|
| 133 |
+
sig = (pred, None)
|
| 134 |
+
else:
|
| 135 |
+
sig = (pred, None) if pred else None
|
| 136 |
+
|
| 137 |
+
# Append "IF" prediction changed
|
| 138 |
+
if pred and (audio_result_pending or sig != _last_appended_audio_sig):
|
| 139 |
+
global_vocal_history.append(pred)
|
| 140 |
+
_last_appended_audio_sig = sig
|
| 141 |
+
audio_result_pending = False
|
| 142 |
+
|
| 143 |
+
mic_entry = {
|
| 144 |
+
"camera_id": "MIC",
|
| 145 |
+
"vocalization_history": list(global_vocal_history) # Full history
|
| 146 |
+
}
|
| 147 |
+
|
| 148 |
+
if pred:
|
| 149 |
+
rounded_probs = {k: round(float(v), 4) for k, v in probs.items()}
|
| 150 |
+
mic_entry["latest_prediction"] = pred
|
| 151 |
+
mic_entry["latest_probabilities"] = rounded_probs
|
| 152 |
+
|
| 153 |
+
sensor_state["MIC"] = mic_entry
|
| 154 |
+
|
| 155 |
+
# Update entry
|
| 156 |
+
sensor_state[cam_key] = {
|
| 157 |
+
"camera_id": cam_key,
|
| 158 |
+
"detected_count": int(detected_count),
|
| 159 |
+
"inactive_count": int(inactive_count),
|
| 160 |
+
"density_count": int(dense_clusters or 0)
|
| 161 |
+
}
|
| 162 |
+
|
| 163 |
+
# Write to sensor_data.json
|
| 164 |
+
json_path = NOTIFIER["SENSOR_DATA_JSON_PATH"]
|
| 165 |
+
tmp_path = json_path + ".tmp"
|
| 166 |
+
os.makedirs(os.path.dirname(json_path) or ".", exist_ok=True)
|
| 167 |
+
|
| 168 |
+
with _sensor_json_lock:
|
| 169 |
+
try:
|
| 170 |
+
with open(tmp_path, "w", encoding="utf-8") as f:
|
| 171 |
+
json.dump(list(sensor_state.values()), f, indent=2, ensure_ascii=False)
|
| 172 |
+
os.replace(tmp_path, json_path)
|
| 173 |
+
except Exception as e:
|
| 174 |
+
print(f"[Error] Failed to write sensor_data.json: {e}")
|
| 175 |
+
if os.path.exists(tmp_path):
|
| 176 |
+
os.remove(tmp_path)
|
| 177 |
+
|
| 178 |
+
def clear_sensor_data_json():
|
| 179 |
+
""" Initialize empty sensor_data.json went startup """
|
| 180 |
+
json_path = NOTIFIER["SENSOR_DATA_JSON_PATH"]
|
| 181 |
+
os.makedirs(os.path.dirname(json_path) or ".", exist_ok=True)
|
| 182 |
+
with open(json_path, "w", encoding="utf-8") as f:
|
| 183 |
+
json.dump([], f, indent=2)
|
| 184 |
+
|
| 185 |
+
async def periodic_audio_analyzer():
|
| 186 |
+
""" Handle audio analysis """
|
| 187 |
+
global last_audio_trigger_time, latest_audio_result, audio_result_pending, audio_seek_state
|
| 188 |
+
|
| 189 |
+
await asyncio.sleep(15)
|
| 190 |
+
|
| 191 |
+
print("[Audio Loop] Starting audio analysis.")
|
| 192 |
+
|
| 193 |
+
while True:
|
| 194 |
+
if not current_audio_url:
|
| 195 |
+
print("[Audio Loop] Waiting for an audio URL to be provided by a client.")
|
| 196 |
+
await asyncio.sleep(APP_CONFIG["AUDIO_ANALYSIS_INTERVAL_S"])
|
| 197 |
+
continue
|
| 198 |
+
|
| 199 |
+
if current_audio_url != audio_seek_state["url"]:
|
| 200 |
+
print(f"[Audio Loop] New audio source detected. Resetting seek time for {current_audio_url}")
|
| 201 |
+
audio_seek_state["url"] = current_audio_url
|
| 202 |
+
audio_seek_state["seek_seconds"] = 0
|
| 203 |
+
|
| 204 |
+
now = time.time()
|
| 205 |
+
if now - last_audio_trigger_time >= APP_CONFIG["AUDIO_ANALYSIS_INTERVAL_S"]:
|
| 206 |
+
|
| 207 |
+
# Get the current bookmark/seek time
|
| 208 |
+
seek_time = audio_seek_state["seek_seconds"]
|
| 209 |
+
duration = APP_CONFIG["AUDIO_ANALYSIS_DURATION_S"]
|
| 210 |
+
|
| 211 |
+
print(f"[Audio Loop] Triggering analysis for: {current_audio_url} (starting at {seek_time}s)")
|
| 212 |
+
last_audio_trigger_time = now
|
| 213 |
+
|
| 214 |
+
latest_audio_result["prediction"] = None
|
| 215 |
+
latest_audio_result["probabilities"] = None
|
| 216 |
+
audio_result_pending = True
|
| 217 |
+
|
| 218 |
+
executor.submit(
|
| 219 |
+
background_audio_task,
|
| 220 |
+
current_audio_url,
|
| 221 |
+
duration,
|
| 222 |
+
vocal_model,
|
| 223 |
+
vocal_device,
|
| 224 |
+
latest_audio_result,
|
| 225 |
+
seek_seconds=seek_time # Pass the seek time to the task
|
| 226 |
+
)
|
| 227 |
+
|
| 228 |
+
# Move the bookmark forward for the next run
|
| 229 |
+
audio_seek_state["seek_seconds"] += duration
|
| 230 |
+
|
| 231 |
+
await asyncio.sleep(1)
|
| 232 |
+
|
| 233 |
+
# -- Websocket --
|
| 234 |
+
@app.websocket("/ws/video_feed")
|
| 235 |
+
async def websocket_endpoint(websocket: WebSocket):
|
| 236 |
+
""" Push JPEG frames with stats over WS """
|
| 237 |
+
await websocket.accept()
|
| 238 |
+
video_url = None
|
| 239 |
+
audio_url = None
|
| 240 |
+
sp = None
|
| 241 |
+
|
| 242 |
+
global current_audio_url
|
| 243 |
+
|
| 244 |
+
try:
|
| 245 |
+
# Made initial handshake with config
|
| 246 |
+
init_msg = await asyncio.wait_for(websocket.receive_text(), timeout = 10.0)
|
| 247 |
+
init_data = json.loads(init_msg)
|
| 248 |
+
|
| 249 |
+
target_fps = int(init_data.get("target_fps", TUNING["WEBSOCKET_TARGET_FPS"]))
|
| 250 |
+
jpeg_q = int(init_data.get("jpeg_quality", TUNING["WEBSOCKET_JPEG_QUALITY"]))
|
| 251 |
+
max_w = int(init_data.get("display_max_width", TUNING["WEBSOCKET_DISPLAY_MAX_WIDTH"]))
|
| 252 |
+
|
| 253 |
+
min_dt = 1.0 / max(1, target_fps)
|
| 254 |
+
last_send = 0.0
|
| 255 |
+
|
| 256 |
+
video_url = init_data.get("video_url")
|
| 257 |
+
audio_url_from_client = init_data.get("audio_url", video_url)
|
| 258 |
+
if audio_url_from_client:
|
| 259 |
+
current_audio_url = audio_url_from_client
|
| 260 |
+
|
| 261 |
+
camera_id = int(init_data.get("camera_id", 0))
|
| 262 |
+
last_stats_sent = 0.0
|
| 263 |
+
|
| 264 |
+
if not video_url:
|
| 265 |
+
await websocket.send_text(json.dumps({"type": "status", "message": "Error: URL type unkown"}))
|
| 266 |
+
raise WebSocketDisconnect(code=1008, reason="URL not provided")
|
| 267 |
+
|
| 268 |
+
display_settings = {
|
| 269 |
+
"show_detected": bool(init_data.get("show_detected", False)),
|
| 270 |
+
"show_density": bool(init_data.get("show_density", False)),
|
| 271 |
+
"show_inactive": bool(init_data.get("show_inactive", False))
|
| 272 |
+
}
|
| 273 |
+
|
| 274 |
+
print(f"[WebSocket {websocket.client}] Stream start : {video_url}, Audio start: {audio_url}")
|
| 275 |
+
|
| 276 |
+
# Get or create the stream processor instance
|
| 277 |
+
sp = stream_registry.get(
|
| 278 |
+
video_url,
|
| 279 |
+
model = yolo_model,
|
| 280 |
+
device = _device,
|
| 281 |
+
half = (_device == "cuda")
|
| 282 |
+
)
|
| 283 |
+
|
| 284 |
+
while True:
|
| 285 |
+
|
| 286 |
+
# WS control messages (toggles display)
|
| 287 |
+
try:
|
| 288 |
+
msg_str = await asyncio.wait_for(websocket.receive_text(), timeout=0.3)
|
| 289 |
+
msg = json.loads(msg_str)
|
| 290 |
+
if msg.get("type") == "display_settings_update":
|
| 291 |
+
for key in ["show_detected", "show_density", "show_inactive"]:
|
| 292 |
+
if key in msg:
|
| 293 |
+
display_settings[key] = bool(msg[key])
|
| 294 |
+
await websocket.send_text(json.dumps({"type": "status", "message": "Display settings updated"}))
|
| 295 |
+
elif msg.get("type") == "update_audio_url" and msg.get("audio_url"):
|
| 296 |
+
audio_url = msg["audio_url"]
|
| 297 |
+
await websocket.send_text(json.dumps({"type": "status", "message": f"Audio start updated: {audio_url}"}))
|
| 298 |
+
except asyncio.TimeoutError:
|
| 299 |
+
pass
|
| 300 |
+
|
| 301 |
+
# Fetch latest processed frame and tracks
|
| 302 |
+
payload = sp.get_latest()
|
| 303 |
+
if payload is None:
|
| 304 |
+
await asyncio.sleep(0.01)
|
| 305 |
+
continue
|
| 306 |
+
|
| 307 |
+
frame = payload["frame"]
|
| 308 |
+
tracks = payload["tracks"]
|
| 309 |
+
|
| 310 |
+
_draw_overlays(frame, tracks, display_settings)
|
| 311 |
+
|
| 312 |
+
# Scale and encode
|
| 313 |
+
frame = _scale_frame(frame, max_w)
|
| 314 |
+
jpg_bytes = _encode_frame_jpeg(frame, jpeg_q)
|
| 315 |
+
if jpg_bytes is not None:
|
| 316 |
+
now = time.time()
|
| 317 |
+
if now - last_send >= min_dt: # Cap send rate
|
| 318 |
+
await websocket.send_bytes(jpg_bytes)
|
| 319 |
+
last_send = now
|
| 320 |
+
|
| 321 |
+
# Stats
|
| 322 |
+
stats = payload.get("stats", {}) or {}
|
| 323 |
+
now = time.time()
|
| 324 |
+
if now - last_stats_sent > 1.0:
|
| 325 |
+
detected = int(stats.get("detected", 0))
|
| 326 |
+
inactive = int(stats.get("inactive", 0))
|
| 327 |
+
dense_clusters = int(stats.get("dense_clusters", 0))
|
| 328 |
+
|
| 329 |
+
await websocket.send_text(
|
| 330 |
+
json.dumps(
|
| 331 |
+
{
|
| 332 |
+
"type": "stats",
|
| 333 |
+
"detected": detected,
|
| 334 |
+
"inactive": inactive,
|
| 335 |
+
"dense_areas": dense_clusters
|
| 336 |
+
}
|
| 337 |
+
)
|
| 338 |
+
)
|
| 339 |
+
|
| 340 |
+
# Write to DB
|
| 341 |
+
metrics_store.write(
|
| 342 |
+
ts=now,
|
| 343 |
+
camera_id=camera_id,
|
| 344 |
+
detected=detected,
|
| 345 |
+
dense_areas=dense_clusters,
|
| 346 |
+
inactive=inactive
|
| 347 |
+
)
|
| 348 |
+
last_stats_sent = now
|
| 349 |
+
|
| 350 |
+
# Update sensor JSON with full counts
|
| 351 |
+
try:
|
| 352 |
+
_update_sensor_json(
|
| 353 |
+
camera_id=camera_id,
|
| 354 |
+
detected_count=detected,
|
| 355 |
+
inactive_count=inactive,
|
| 356 |
+
dense_clusters=dense_clusters,
|
| 357 |
+
latest_audio=latest_audio_result
|
| 358 |
+
)
|
| 359 |
+
except Exception as e:
|
| 360 |
+
print(f"[Notif] sensor_data.json failed to update on camera {camera_id}: {e}")
|
| 361 |
+
|
| 362 |
+
await asyncio.sleep(max(0.005, min_dt * 0.25))
|
| 363 |
+
|
| 364 |
+
except WebSocketDisconnect as e:
|
| 365 |
+
print(f"WebSocket client {websocket.client} disconnected: (Code: {e.code}, Reason: {e.reason})")
|
| 366 |
+
except asyncio.TimeoutError:
|
| 367 |
+
print(f"WebSocket {websocket.client} timed out waiting for initial message.")
|
| 368 |
+
except Exception as e:
|
| 369 |
+
print(f"[Error] WebSocket failed for {websocket.client}: {e}")
|
| 370 |
+
finally:
|
| 371 |
+
if video_url and stream_registry:
|
| 372 |
+
stream_registry.release(video_url)
|
| 373 |
+
try:
|
| 374 |
+
await websocket.close()
|
| 375 |
+
except Exception:
|
| 376 |
+
pass
|
| 377 |
+
print(f"[WebSocket] Cleaned up for {websocket.client}.")
|
| 378 |
+
|
| 379 |
+
# Audio Endpoints
|
| 380 |
+
@app.post("/trigger_audio_analysis", status_code=202)
|
| 381 |
+
async def trigger_audio_analysis(request: AudioTriggerRequest):
|
| 382 |
+
"""Manual, fire-and-forget trigger for short audio analysis window."""
|
| 383 |
+
global audio_result_pending, analysis_in_progress
|
| 384 |
+
if analysis_in_progress:
|
| 385 |
+
return {"message": "Analysis already running; skipping duplicate trigger."}
|
| 386 |
+
|
| 387 |
+
print(f"[API] Audio analysis triggered: {request.audio_url_}")
|
| 388 |
+
latest_audio_result["prediction"] = None
|
| 389 |
+
latest_audio_result["probabilities"] = None
|
| 390 |
+
audio_result_pending = True
|
| 391 |
+
|
| 392 |
+
executor.submit(
|
| 393 |
+
background_audio_task,
|
| 394 |
+
request.audio_url_,
|
| 395 |
+
APP_CONFIG["AUDIO_ANALYSIS_DURATION_S"],
|
| 396 |
+
vocal_model,
|
| 397 |
+
vocal_device,
|
| 398 |
+
latest_audio_result,
|
| 399 |
+
)
|
| 400 |
+
return {"message": "Audio analysis started"}
|
| 401 |
+
|
| 402 |
+
@app.get("/get_latest_audio_result")
|
| 403 |
+
async def get_latest_audio_result_endpoint():
|
| 404 |
+
""" Return last result or status if analysis is pending """
|
| 405 |
+
if analysis_in_progress:
|
| 406 |
+
return {
|
| 407 |
+
"prediction": None,
|
| 408 |
+
"probabilities": None,
|
| 409 |
+
"status": "analyzing"
|
| 410 |
+
}
|
| 411 |
+
elif latest_audio_result["prediction"] is not None:
|
| 412 |
+
return {
|
| 413 |
+
**latest_audio_result,
|
| 414 |
+
"status": "completed"
|
| 415 |
+
}
|
| 416 |
+
else:
|
| 417 |
+
return {
|
| 418 |
+
"prediction": None,
|
| 419 |
+
"probabilities": None,
|
| 420 |
+
"status": "no_data"
|
| 421 |
+
}
|
| 422 |
+
|
| 423 |
+
# Startup
|
| 424 |
+
@app.on_event("startup")
|
| 425 |
+
async def startup_event():
|
| 426 |
+
""" setup for model and background notifier """
|
| 427 |
+
clear_sensor_data_json()
|
| 428 |
+
|
| 429 |
+
global yolo_model, vocal_model, vocal_device, _device, stream_registry, audio_loop_task
|
| 430 |
+
print("Trying to load model")
|
| 431 |
+
|
| 432 |
+
_device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 433 |
+
|
| 434 |
+
# Load YOLO model
|
| 435 |
+
try:
|
| 436 |
+
yolo_model = YOLO(APP_CONFIG["YOLO_MODEL_PATH"])
|
| 437 |
+
yolo_model.fuse()
|
| 438 |
+
yolo_model.to(_device)
|
| 439 |
+
try:
|
| 440 |
+
if _device == "cuda" and hasattr(yolo_model, "model"):
|
| 441 |
+
yolo_model.model.half() # FP16 on CUDA
|
| 442 |
+
print("[Startup] yolo model run on FP16 CUDA")
|
| 443 |
+
except Exception:
|
| 444 |
+
pass
|
| 445 |
+
print(f"YOLO model loaded on {_device}")
|
| 446 |
+
except Exception as e:
|
| 447 |
+
print(f"[Error] Failed to load YOLO model: {e}")
|
| 448 |
+
|
| 449 |
+
# Load Vocal model
|
| 450 |
+
try:
|
| 451 |
+
vocal_device = torch.device(_device)
|
| 452 |
+
vocal_model = load_model(APP_CONFIG["VOCAL_MODEL_PATH"]).to(vocal_device).eval()
|
| 453 |
+
print(f"Vocal model loaded on {vocal_device}.")
|
| 454 |
+
except Exception as e:
|
| 455 |
+
print("[Error] Failed to load Vocal model", e)
|
| 456 |
+
|
| 457 |
+
stream_registry = StreamRegistry()
|
| 458 |
+
print("Models has been load succesfully")
|
| 459 |
+
|
| 460 |
+
# Start single centralized audio analysis loop
|
| 461 |
+
audio_loop_task = asyncio.create_task(periodic_audio_analyzer())
|
| 462 |
+
|
| 463 |
+
# Run Notifications
|
| 464 |
+
try:
|
| 465 |
+
if NOTIFIER["ENABLE_TELEGRAM_NOTIFICATIONS"]:
|
| 466 |
+
threading.Thread(target=notifications.main, daemon=True).start()
|
| 467 |
+
print("[Notifications] started")
|
| 468 |
+
else:
|
| 469 |
+
print("[Notifications] Disabled (ENABLE_TELEGRAM_NOTIFICATIONS=False)")
|
| 470 |
+
except Exception as e:
|
| 471 |
+
print(f"[Notifications] Failed to start: {e}")
|
| 472 |
+
|
| 473 |
+
# CSV exporter helper and endpoints
|
| 474 |
+
def _parse_date_yyyy_mm_dd(s: str) -> int:
|
| 475 |
+
"""Parse 'YYYY-MM-DD' as midnight UTC (returns seconds)."""
|
| 476 |
+
try:
|
| 477 |
+
dt = datetime.strptime(s, "%Y-%m-%d").replace(tzinfo=timezone.utc)
|
| 478 |
+
return int(dt.timestamp())
|
| 479 |
+
except Exception:
|
| 480 |
+
raise HTTPException(status_code=400, detail="Dates must be in YYYY-MM-DD format")
|
| 481 |
+
|
| 482 |
+
def _fallback_offset_minutes() -> int:
|
| 483 |
+
off = datetime.now().astimezone().utcoffset() or timedelta(0)
|
| 484 |
+
return int(off.total_seconds() // 60)
|
| 485 |
+
|
| 486 |
+
@app.get("/metrics/available")
|
| 487 |
+
async def metrics_available(tz_offset_minutes: int | None = None):
|
| 488 |
+
min_ts, max_ts = metrics_store.get_bounds()
|
| 489 |
+
if min_ts is None or max_ts is None:
|
| 490 |
+
return {"min": None, "max": None}
|
| 491 |
+
minutes = tz_offset_minutes if tz_offset_minutes is not None else _fallback_offset_minutes()
|
| 492 |
+
offset = timedelta(minutes=int(minutes))
|
| 493 |
+
min_day = (datetime.fromtimestamp(min_ts, tz=timezone.utc) + offset).date().isoformat()
|
| 494 |
+
max_day = (datetime.fromtimestamp(max_ts, tz=timezone.utc) + offset).date().isoformat()
|
| 495 |
+
return {"min": min_day, "max": max_day}
|
| 496 |
+
|
| 497 |
+
@app.get("/metrics/export")
|
| 498 |
+
async def export_metrics(
|
| 499 |
+
start: str,
|
| 500 |
+
end: str,
|
| 501 |
+
camera_id: int | None = None,
|
| 502 |
+
tz_offset_minutes: int = 7 * 60,
|
| 503 |
+
raw: bool = True # ensure retained (we want to use raw data)
|
| 504 |
+
):
|
| 505 |
+
""" Export metrics into CSV : datetime_local,camera_id,detected,dense_areas,inactive """
|
| 506 |
+
start_ts_utc = _parse_date_yyyy_mm_dd(start)
|
| 507 |
+
end_ts_utc = _parse_date_yyyy_mm_dd(end) + 86399 # 86399 (seconds) -> 24-hours - 1 sec
|
| 508 |
+
|
| 509 |
+
rows = metrics_store.fetch_range(start_ts_utc, end_ts_utc, camera_id=camera_id)
|
| 510 |
+
|
| 511 |
+
minutes = tz_offset_minutes if tz_offset_minutes is not None else _fallback_offset_minutes()
|
| 512 |
+
offset_seconds = int(minutes) * 60
|
| 513 |
+
|
| 514 |
+
start_local = datetime.strptime(start, "%Y-%m-%d").date()
|
| 515 |
+
end_local = datetime.strptime(end, "%Y-%m-%d").date()
|
| 516 |
+
|
| 517 |
+
def csv_iter_raw():
|
| 518 |
+
sio = io.StringIO()
|
| 519 |
+
w = csv.writer(sio)
|
| 520 |
+
w.writerow(["datetime_local", "camera_id", "detected", "dense_areas", "inactive"])
|
| 521 |
+
yield sio.getvalue(); sio.seek(0); sio.truncate(0)
|
| 522 |
+
|
| 523 |
+
for ts, cam, det, dense, ina in rows:
|
| 524 |
+
local_dt = datetime.fromtimestamp(ts + offset_seconds, tz=timezone.utc)
|
| 525 |
+
d = local_dt.date()
|
| 526 |
+
if start_local <= d <= end_local:
|
| 527 |
+
w.writerow([local_dt.strftime("%Y-%m-%d %H:%M:%S"), cam, int(det), int(dense), int(ina)])
|
| 528 |
+
yield sio.getvalue(); sio.seek(0); sio.truncate(0)
|
| 529 |
+
|
| 530 |
+
fname = f"metrics_raw_{start}_to_{end}{('_cam' + str(camera_id)) if camera_id else ''}.csv"
|
| 531 |
+
return StreamingResponse(
|
| 532 |
+
csv_iter_raw(),
|
| 533 |
+
media_type="text/csv",
|
| 534 |
+
headers={"Content-Disposition": f'attachment; filename="{fname}"'},
|
| 535 |
+
)
|
| 536 |
+
|
| 537 |
+
if __name__ == "__main__":
|
| 538 |
+
import uvicorn
|
| 539 |
+
uvicorn.run("main:app", reload=False)
|
| 540 |
+
|
models/Chicken_CNN_Disease_Detection_Model.pth
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:976749066e43bd72a2ffff18199f9d3deb6e9c066bcc1d6b222fb8d85d5c49cc
|
| 3 |
+
size 26077850
|
models/yolov8n.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d83cb483af507be3d9a195ed764422b06c1f59b23adbbb6c8307caa5208b333f
|
| 3 |
+
size 6234410
|
requirements.txt
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
torch==2.6.0
|
| 2 |
+
torchvision==0.21.0
|
| 3 |
+
torchaudio==2.6.0
|
| 4 |
+
|
| 5 |
+
ultralytics==8.3.163
|
| 6 |
+
opencv-python==4.11.0.86
|
| 7 |
+
numpy==2.0.2
|
| 8 |
+
scikit-learn==1.7.1
|
| 9 |
+
|
| 10 |
+
boxmot==15.0.2
|
| 11 |
+
|
| 12 |
+
librosa==0.11.0
|
| 13 |
+
soundfile==0.13.1
|
| 14 |
+
yt-dlp==2025.7.21
|
| 15 |
+
|
| 16 |
+
fastapi==0.116.1
|
| 17 |
+
uvicorn[standard]==0.35.0
|
| 18 |
+
pydantic==2.11.7
|
| 19 |
+
|
| 20 |
+
python-telegram-bot==22.3
|
static/assets/logo-.jpeg
ADDED
|
Git LFS Details
|
static/css/style.css
ADDED
|
@@ -0,0 +1,186 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
:root {
|
| 2 |
+
--bg-main: #f1f5f9;
|
| 3 |
+
--bg-card: #ffffff;
|
| 4 |
+
--text-primary: #1e293b;
|
| 5 |
+
--text-secondary: #64748b;
|
| 6 |
+
--border-color: #e2e8f0;
|
| 7 |
+
--primary: #3b82f6;
|
| 8 |
+
--primary-hover: #2563eb;
|
| 9 |
+
|
| 10 |
+
--spacing-xs: 0.25rem;
|
| 11 |
+
--spacing-sm: 0.5rem;
|
| 12 |
+
--spacing-md: 0.75rem;
|
| 13 |
+
--spacing-lg: 1rem;
|
| 14 |
+
--spacing-xl: 1.25rem;
|
| 15 |
+
|
| 16 |
+
--radius-sm: 0.375rem;
|
| 17 |
+
--radius-md: 0.5rem;
|
| 18 |
+
--radius-lg: 0.75rem;
|
| 19 |
+
|
| 20 |
+
--shadow-sm: 0 1px 2px 0 rgba(0, 0, 0, 0.05);
|
| 21 |
+
--shadow-md: 0 1px 3px 0 rgba(0, 0, 0, 0.1), 0 1px 2px 0 rgba(0, 0, 0, 0.06);
|
| 22 |
+
--shadow-lg: 0 4px 6px -1px rgba(0, 0, 0, 0.1), 0 2px 4px -1px rgba(0, 0, 0, 0.06);
|
| 23 |
+
}
|
| 24 |
+
|
| 25 |
+
body {
|
| 26 |
+
font-family: 'Inter', sans-serif;
|
| 27 |
+
background-color: var(--bg-main);
|
| 28 |
+
color: var(--text-primary);
|
| 29 |
+
line-height: 1.5;
|
| 30 |
+
}
|
| 31 |
+
|
| 32 |
+
.card {
|
| 33 |
+
background-color: var(--bg-card);
|
| 34 |
+
border: 1px solid var(--border-color);
|
| 35 |
+
border-radius: var(--radius-lg);
|
| 36 |
+
box-shadow: var(--shadow-sm);
|
| 37 |
+
}
|
| 38 |
+
|
| 39 |
+
.card-header {
|
| 40 |
+
padding: var(--spacing-md) var(--spacing-lg);
|
| 41 |
+
border-bottom: 1px solid var(--border-color);
|
| 42 |
+
}
|
| 43 |
+
|
| 44 |
+
.btn {
|
| 45 |
+
display: inline-flex;
|
| 46 |
+
align-items: center;
|
| 47 |
+
justify-content: center;
|
| 48 |
+
gap: var(--spacing-sm);
|
| 49 |
+
border-radius: var(--radius-md);
|
| 50 |
+
padding: 0.6rem var(--spacing-lg);
|
| 51 |
+
font-weight: 600;
|
| 52 |
+
transition: all 0.2s ease-in-out;
|
| 53 |
+
cursor: pointer;
|
| 54 |
+
border: 1px solid transparent;
|
| 55 |
+
}
|
| 56 |
+
|
| 57 |
+
.btn-primary {
|
| 58 |
+
background-color: var(--primary);
|
| 59 |
+
color: #ffffff;
|
| 60 |
+
border-color: var(--primary);
|
| 61 |
+
}
|
| 62 |
+
|
| 63 |
+
.btn-primary:hover {
|
| 64 |
+
background-color: var(--primary-hover);
|
| 65 |
+
border-color: var(--primary-hover);
|
| 66 |
+
}
|
| 67 |
+
|
| 68 |
+
.control-button {
|
| 69 |
+
transition: all 0.2s ease-in-out;
|
| 70 |
+
background-color: #f8fafc;
|
| 71 |
+
border: 1px solid var(--border-color);
|
| 72 |
+
color: var(--text-secondary);
|
| 73 |
+
}
|
| 74 |
+
|
| 75 |
+
.control-button.active {
|
| 76 |
+
background-color: var(--primary);
|
| 77 |
+
color: #ffffff;
|
| 78 |
+
border-color: var(--primary);
|
| 79 |
+
box-shadow: 0 2px 5px rgba(59, 130, 246, 0.3);
|
| 80 |
+
}
|
| 81 |
+
|
| 82 |
+
.control-button .icon-solid {
|
| 83 |
+
display: none;
|
| 84 |
+
}
|
| 85 |
+
|
| 86 |
+
.control-button.active .icon-outline {
|
| 87 |
+
display: none;
|
| 88 |
+
}
|
| 89 |
+
|
| 90 |
+
.control-button.active .icon-solid {
|
| 91 |
+
display: inline-block;
|
| 92 |
+
}
|
| 93 |
+
|
| 94 |
+
.cam-badge {
|
| 95 |
+
position: absolute;
|
| 96 |
+
left: 12px;
|
| 97 |
+
bottom: 12px;
|
| 98 |
+
background-color: rgba(15, 23, 42, 0.75);
|
| 99 |
+
color: #ffffff;
|
| 100 |
+
font-weight: 700;
|
| 101 |
+
font-size: 0.75rem;
|
| 102 |
+
padding: var(--spacing-xs) var(--spacing-sm);
|
| 103 |
+
border-radius: var(--radius-sm);
|
| 104 |
+
letter-spacing: 0.05em;
|
| 105 |
+
z-index: 10;
|
| 106 |
+
pointer-events: none;
|
| 107 |
+
}
|
| 108 |
+
|
| 109 |
+
.modal {
|
| 110 |
+
position: fixed;
|
| 111 |
+
inset: 0;
|
| 112 |
+
display: none;
|
| 113 |
+
z-index: 50;
|
| 114 |
+
}
|
| 115 |
+
|
| 116 |
+
.modal.show {
|
| 117 |
+
display: block;
|
| 118 |
+
}
|
| 119 |
+
|
| 120 |
+
.modal-backdrop {
|
| 121 |
+
position: absolute;
|
| 122 |
+
inset: 0;
|
| 123 |
+
background-color: rgba(15, 23, 42, 0.5);
|
| 124 |
+
backdrop-filter: blur(2px);
|
| 125 |
+
}
|
| 126 |
+
|
| 127 |
+
.modal-container {
|
| 128 |
+
position: absolute;
|
| 129 |
+
inset: 0;
|
| 130 |
+
display: flex;
|
| 131 |
+
align-items: center;
|
| 132 |
+
justify-content: center;
|
| 133 |
+
padding: var(--spacing-lg);
|
| 134 |
+
}
|
| 135 |
+
|
| 136 |
+
.modal-close-btn {
|
| 137 |
+
color: var(--text-secondary);
|
| 138 |
+
background: transparent;
|
| 139 |
+
border: none;
|
| 140 |
+
font-size: 1.25rem;
|
| 141 |
+
line-height: 1;
|
| 142 |
+
cursor: pointer;
|
| 143 |
+
padding: var(--spacing-xs);
|
| 144 |
+
border-radius: var(--radius-sm);
|
| 145 |
+
}
|
| 146 |
+
|
| 147 |
+
.modal-close-btn:hover {
|
| 148 |
+
color: var(--text-primary);
|
| 149 |
+
background-color: #f1f5f9;
|
| 150 |
+
}
|
| 151 |
+
|
| 152 |
+
.modal-input {
|
| 153 |
+
width: 100%;
|
| 154 |
+
padding: var(--spacing-sm) var(--spacing-md);
|
| 155 |
+
border: 1px solid var(--border-color);
|
| 156 |
+
border-radius: var(--radius-sm);
|
| 157 |
+
font-size: 0.875rem;
|
| 158 |
+
background-color: #f8fafc;
|
| 159 |
+
transition: all 0.2s ease-in-out;
|
| 160 |
+
}
|
| 161 |
+
|
| 162 |
+
.modal-input:focus {
|
| 163 |
+
outline: 2px solid var(--primary);
|
| 164 |
+
outline-offset: 2px;
|
| 165 |
+
border-color: var(--primary);
|
| 166 |
+
background-color: #ffffff;
|
| 167 |
+
}
|
| 168 |
+
|
| 169 |
+
@media (prefers-reduced-motion: reduce) {
|
| 170 |
+
* {
|
| 171 |
+
animation-duration: 0.01ms !important;
|
| 172 |
+
animation-iteration-count: 1 !important;
|
| 173 |
+
transition-duration: 0.01ms !important;
|
| 174 |
+
}
|
| 175 |
+
}
|
| 176 |
+
|
| 177 |
+
@media (max-width: 768px) {
|
| 178 |
+
:root {
|
| 179 |
+
--spacing-lg: 1rem;
|
| 180 |
+
--spacing-xl: 1rem;
|
| 181 |
+
}
|
| 182 |
+
|
| 183 |
+
.card-header {
|
| 184 |
+
padding: var(--spacing-sm) var(--spacing-md);
|
| 185 |
+
}
|
| 186 |
+
}
|
static/demo/3_usa.mp4
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ff725a879421bf8498e8c76da02d0dde9632883b810a85f396fa3558d958ade2
|
| 3 |
+
size 13738686
|
static/demo/5_usa.mp4
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:89f6c7ea774b9fcac0d05a0220686d7c6c17db76f772809daab53725c1cce004
|
| 3 |
+
size 14045255
|
static/demo/6_usa.mp4
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:6acaab10818e29036a5bc5809b23dee1d23cc8035992e32c7d3126a44b7df44b
|
| 3 |
+
size 13040025
|
static/demo/7_usa.mp4
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:c4392b38c43a37586b46fceb48fa25b8f9b4ae7ccf2fd9586807a6bb1c381452
|
| 3 |
+
size 13456819
|
static/demo/audio.m4a
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:5bf2def5961ddf5e3cf5dc7d1975b2528cda3497ebab4bbf364bbbe594818569
|
| 3 |
+
size 9032152
|
static/index.html
ADDED
|
@@ -0,0 +1,308 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<!DOCTYPE html>
|
| 2 |
+
<html lang="en">
|
| 3 |
+
<head>
|
| 4 |
+
<meta charset="UTF-8">
|
| 5 |
+
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
| 6 |
+
<title>ChickSense</title>
|
| 7 |
+
<meta name="description" content=" Sistem Pemantauan Otomatis Kesehatan dan Kesejahteraan Ayam" />
|
| 8 |
+
|
| 9 |
+
<script src="https://cdn.tailwindcss.com"></script>
|
| 10 |
+
<link rel="stylesheet" href="https://rsms.me/inter/inter.css" />
|
| 11 |
+
<link rel="stylesheet" href="/static/css/style.css">
|
| 12 |
+
</head>
|
| 13 |
+
<body class="p-4 lg:p-5">
|
| 14 |
+
<div class="max-w-screen-2x1 mx-auto">
|
| 15 |
+
<header class="flex flex-col md:flex-row justify-between items-start md:items-center mb-4 gap-3">
|
| 16 |
+
<div class="flex items-center gap-3">
|
| 17 |
+
|
| 18 |
+
<!-- Logo -->
|
| 19 |
+
<div class="w-12 h-12 bg-amber-100 rounded-lg flex items-center justify-center border border-amber-200" aria-hidden="true">
|
| 20 |
+
<img src="/static/assets/logo-.jpeg" alt="ChickSense Logo" class="rounded-lg">
|
| 21 |
+
</div>
|
| 22 |
+
|
| 23 |
+
<!-- Judul dan Waktu -->
|
| 24 |
+
<div>
|
| 25 |
+
<h1 class="text-xl md:text-2xl font-bold text-slate-800">ChickSense Dashboard</h1>
|
| 26 |
+
<p id="datetime" class="text-xs md:text-sm text-slate-500" aria-live="polite">Tunggu memuat Waktu...</p>
|
| 27 |
+
</div>
|
| 28 |
+
</div>
|
| 29 |
+
</header>
|
| 30 |
+
|
| 31 |
+
<main class="grid grid-cols-1 lg:grid-cols-12 gap-4">
|
| 32 |
+
<section class="lg:col-span-9 flex flex-col gap-4" aria-label="Video streams and controls">
|
| 33 |
+
<!-- Grid Kamera 2x2 -->
|
| 34 |
+
<div class="grid grid-cols-1 md:grid-cols-2 gap-4" role="region">
|
| 35 |
+
<!-- Kamera 1 -->
|
| 36 |
+
<div class="card relative">
|
| 37 |
+
<div class="p-2">
|
| 38 |
+
<div class="aspect-video bg-slate-800 rounded-md">
|
| 39 |
+
<canvas id="video-canvas-0" class="w-full h-full rounded-md"></canvas>
|
| 40 |
+
</div>
|
| 41 |
+
<div class="cam-badge">CAM 1</div>
|
| 42 |
+
</div>
|
| 43 |
+
</div>
|
| 44 |
+
<!-- Kamera 2 -->
|
| 45 |
+
<div class="card relative">
|
| 46 |
+
<div class="p-2">
|
| 47 |
+
<div class="aspect-video bg-slate-800 rounded-md">
|
| 48 |
+
<canvas id="video-canvas-1" class="w-full h-full rounded-md"></canvas>
|
| 49 |
+
</div>
|
| 50 |
+
<div class="cam-badge">CAM 2</div>
|
| 51 |
+
</div>
|
| 52 |
+
</div>
|
| 53 |
+
<!-- Kamera 3 -->
|
| 54 |
+
<div class="card relative">
|
| 55 |
+
<div class="p-2">
|
| 56 |
+
<div class="aspect-video bg-slate-800 rounded-md">
|
| 57 |
+
<canvas id="video-canvas-2" class="w-full h-full rounded-md"></canvas>
|
| 58 |
+
</div>
|
| 59 |
+
<div class="cam-badge">CAM 3</div>
|
| 60 |
+
</div>
|
| 61 |
+
</div>
|
| 62 |
+
<!-- Kamera 4 -->
|
| 63 |
+
<div class="card relative">
|
| 64 |
+
<div class="p-2">
|
| 65 |
+
<div class="aspect-video bg-slate-800 rounded-md">
|
| 66 |
+
<canvas id="video-canvas-3" class="w-full h-full rounded-md"></canvas>
|
| 67 |
+
</div>
|
| 68 |
+
<div class="cam-badge">CAM 4</div>
|
| 69 |
+
</div>
|
| 70 |
+
</div>
|
| 71 |
+
</div>
|
| 72 |
+
|
| 73 |
+
<!-- Grid Kontrol Tampilan dan Log Sistem -->
|
| 74 |
+
<div class="grid grid-cols-1 md:grid-cols-2 gap-4">
|
| 75 |
+
<!-- Kontrol Tampilan -->
|
| 76 |
+
<div class="card" role="region">
|
| 77 |
+
<div class="card-header flex items-center justify-between">
|
| 78 |
+
<div class="flex items-center gap-2">
|
| 79 |
+
<svg xmlns="http://www.w3.org/2000/svg" class="w-5 h-5 text-slate-600" fill="none" viewBox="0 0 24 24" stroke-width="1.5" stroke="currentColor">
|
| 80 |
+
<path stroke-linecap="round" stroke-linejoin="round" d="M6 13.5V3.75m0 9.75a1.5 1.5 0 010 3m0-3a1.5 1.5 0 000 3m0 3.75V16.5m12-3V3.75m0 9.75a1.5 1.5 0 010 3m0-3a1.5 1.5 0 000 3m0 3.75V16.5m-6-9V3.75m0 3.75a1.5 1.5 0 010 3m0-3a1.5 1.5 0 000 3m0 9.75V10.5" />
|
| 81 |
+
</svg>
|
| 82 |
+
<h3 class="font-semibold text-slate-700 text-sm">Kontrol Tampilan</h3>
|
| 83 |
+
</div>
|
| 84 |
+
|
| 85 |
+
<!-- Button Setting Kamera -->
|
| 86 |
+
<button id="settings-button" title="Double-click to change camera URLs" class="p-1 text-slate-500 hover:text-blue-600 hover:bg-slate-300 rounded-md transition-colors">
|
| 87 |
+
<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" viewBox="0 0 24 24" fill="none" stroke="#000000" stroke-width="2" stroke-linecap="round" stroke-linejoin="round">
|
| 88 |
+
<circle cx="12" cy="12" r="3"></circle>
|
| 89 |
+
<path d="M19.4 15a1.65 1.65 0 0 0 .33 1.82l.06.06a2 2 0 0 1 0 2.83 2 2 0 0 1-2.83 0l-.06-.06a1.65 1.65 0 0 0-1.82-.33 1.65 1.65 0 0 0-1 1.51V21a2 2 0 0 1-2 2 2 2 0 0 1-2-2v-.09A1.65 1.65 0 0 0 9 19.4a1.65 1.65 0 0 0-1.82.33l-.06.06a2 2 0 0 1-2.83 0 2 2 0 0 1 0-2.83l.06-.06a1.65 1.65 0 0 0 .33-1.82 1.65 1.65 0 0 0-1.51-1H3a2 2 0 0 1-2-2 2 2 0 0 1 2-2h.09A1.65 1.65 0 0 0 4.6 9a1.65 1.65 0 0 0-.33-1.82l-.06-.06a2 2 0 0 1 0-2.83 2 2 0 0 1 2.83 0l.06.06a1.65 1.65 0 0 0 1.82.33H9a1.65 1.65 0 0 0 1-1.51V3a2 2 0 0 1 2-2 2 2 0 0 1 2 2v.09a1.65 1.65 0 0 0 1 1.51 1.65 1.65 0 0 0 1.82-.33l.06-.06a2 2 0 0 1 2.83 0 2 2 0 0 1 0 2.83l-.06.06a1.65 1.65 0 0 0-.33 1.82V9a1.65 1.65 0 0 0 1.51 1H21a2 2 0 0 1 2 2 2 2 0 0 1-2 2h-.09a1.65 1.65 0 0 0-1.51 1z"></path>
|
| 90 |
+
</svg>
|
| 91 |
+
</button>
|
| 92 |
+
</div>
|
| 93 |
+
|
| 94 |
+
<!-- Button Toggle -->
|
| 95 |
+
<div id="toggle-controls" class="p-3 grid grid-cols-3 gap-3">
|
| 96 |
+
<button id="toggle-detected" data-control="show_detected" class="control-button flex items-center justify-center gap-2 p-2.5 rounded-lg text-xs md:text-sm font-medium" aria-pressed="true">
|
| 97 |
+
<svg class="w-6 h-6 icon-outline" xmlns="http://www.w3.org/2000/svg" fill="none" viewBox="0 0 24 24" stroke-width="1.5" stroke="currentColor">
|
| 98 |
+
<path stroke-linecap="round" stroke-linejoin="round" d="M21 21l-5.197-5.197m0 0A7.5 7.5 0 105.196 5.196a7.5 7.5 0 0010.607 10.607z"/>
|
| 99 |
+
</svg>
|
| 100 |
+
<svg class="w-6 h-6 icon-solid" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24" fill="currentColor">
|
| 101 |
+
<path fill-rule="evenodd" d="M10.5 3.75a6.75 6.75 0 100 13.5 6.75 6.75 0 000-13.5zM2.25 10.5a8.25 8.25 0 1114.59 5.28l4.69 4.69a.75.75 0 11-1.06 1.06l-4.69-4.69A8.25 8.25 0 012.25 10.5z" clip-rule="evenodd"/>
|
| 102 |
+
</svg>
|
| 103 |
+
<span>Deteksi</span>
|
| 104 |
+
</button>
|
| 105 |
+
|
| 106 |
+
</button>
|
| 107 |
+
|
| 108 |
+
<button id="toggle-density" data-control="show_density" class="control-button flex items-center justify-center gap-2 p-2.5 rounded-lg text-xs md:text-sm font-medium" aria-pressed="false">
|
| 109 |
+
<svg class="w-6 h-6 icon-outline" xmlns="http://www.w3.org/2000/svg"
|
| 110 |
+
viewBox="0 0 512 512" fill="none" stroke="currentColor" stroke-width="24">
|
| 111 |
+
<circle cx="256" cy="256" r="240"/> <circle cx="220" cy="120" r="64"/> <circle cx="356" cy="92" r="34"/> <circle cx="420" cy="160" r="26"/> <circle cx="120" cy="100" r="28"/> <circle cx="104" cy="196" r="44"/> <circle cx="208" cy="240" r="28"/>
|
| 112 |
+
<circle cx="320" cy="236" r="44"/> <circle cx="420" cy="236" r="52"/> <circle cx="180" cy="372" r="88"/> <circle cx="300" cy="372" r="40"/> <circle cx="408" cy="372" r="52"/>
|
| 113 |
+
<circle cx="272" cy="460" r="28"/> <circle cx="88" cy="288" r="20"/>
|
| 114 |
+
</svg>
|
| 115 |
+
<svg class="w-6 h-6 icon-solid" xmlns="http://www.w3.org/2000/svg"
|
| 116 |
+
viewBox="0 0 512 512" fill="none" stroke="currentColor" stroke-width="24">
|
| 117 |
+
<circle cx="256" cy="256" r="240"/> <circle cx="220" cy="120" r="64"/> <circle cx="356" cy="92" r="34"/> <circle cx="420" cy="160" r="26"/> <circle cx="120" cy="100" r="28"/> <circle cx="104" cy="196" r="44"/> <circle cx="208" cy="240" r="28"/>
|
| 118 |
+
<circle cx="320" cy="236" r="44"/> <circle cx="420" cy="236" r="52"/> <circle cx="180" cy="372" r="88"/> <circle cx="300" cy="372" r="40"/> <circle cx="408" cy="372" r="52"/>
|
| 119 |
+
<circle cx="272" cy="460" r="28"/> <circle cx="88" cy="288" r="20"/>
|
| 120 |
+
</svg>
|
| 121 |
+
|
| 122 |
+
<span>Kepadatan</span>
|
| 123 |
+
</button>
|
| 124 |
+
|
| 125 |
+
<button id="toggle-inactive" data-control="show_inactive" class="control-button flex items-center justify-center gap-2 p-2.5 rounded-lg text-xs md:text-sm font-medium" aria-pressed="false">
|
| 126 |
+
<svg class="w-6 h-6 icon-outline" xmlns="http://www.w3.org/2000/svg" fill="none" viewBox="0 0 24 24" stroke-width="1.5" stroke="currentColor">
|
| 127 |
+
<path stroke-linecap="round" stroke-linejoin="round" d="M12 6v6h4.5m4.5 0a9 9 0 11-18 0 9 9 0 0118 0z"/>
|
| 128 |
+
</svg>
|
| 129 |
+
<svg class="w-6 h-6 icon-solid" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24" fill="currentColor">
|
| 130 |
+
<path fill-rule="evenodd" d="M12 2.25c-5.385 0-9.75 4.365-9.75 9.75s4.365 9.75 9.75 9.75 9.75-4.365 9.75-9.75S17.385 2.25 12 2.25zM12.75 6a.75.75 0 00-1.5 0v6c0 .414.336.75.75.75h4.5a.75.75 0 000-1.5h-3.75V6z" clip-rule="evenodd"/>
|
| 131 |
+
</svg>
|
| 132 |
+
<span>Inaktivitas</span>
|
| 133 |
+
</button>
|
| 134 |
+
</div>
|
| 135 |
+
</div>
|
| 136 |
+
|
| 137 |
+
<!-- Sistem Log Chart -->
|
| 138 |
+
<div class="card" role="region">
|
| 139 |
+
<div class="card-header flex items-center gap-2">
|
| 140 |
+
<svg xmlns="http://www.w3.org/2000/svg" class="w-5 h-5 text-slate-600" fill="none" viewBox="0 0 24 24" stroke-width="1.5" stroke="currentColor">
|
| 141 |
+
<path stroke-linecap="round" stroke-linejoin="round" d="M9.879 7.519c1.171-1.025 3.071-1.025 4.242 0 1.172 1.025 1.172 2.687 0 3.712-.203.179-.43.326-.67.442-.745.361-1.45.999-1.45 1.827v.75M21 12a9 9 0 11-18 0 9 9 0 0118 0zm-9 5.25h.008v.008H12v-.008z" />
|
| 142 |
+
</svg>
|
| 143 |
+
<h3 class="font-semibold text-slate-700 text-sm">Log Sistem</h3>
|
| 144 |
+
</div>
|
| 145 |
+
<div id="system-log" class="p-4 text-xs text-slate-500 space-y-2 h-28 overflow-y-auto" role="log" aria-live="polite"></div>
|
| 146 |
+
</div>
|
| 147 |
+
</div>
|
| 148 |
+
</section>
|
| 149 |
+
|
| 150 |
+
<aside class="lg:col-span-3 flex flex-col gap-4" aria-label="Analysis and export panels">
|
| 151 |
+
<!-- Card Ringkasan Analisis -->
|
| 152 |
+
<div class="card" role="region">
|
| 153 |
+
<div class="card-header flex items-center gap-2">
|
| 154 |
+
<svg xmlns="http://www.w3.org/2000/svg" class="w-5 h-5 text-blue-600" fill="none" viewBox="0 0 24 24" stroke-width="1.5" stroke="currentColor">
|
| 155 |
+
<path stroke-linecap="round" stroke-linejoin="round" d="M3.75 12h16.5m-16.5 3.75h16.5M3.75 19.5h16.5M5.625 4.5h12.75a1.875 1.875 0 010 3.75H5.625a1.875 1.875 0 010-3.75z" />
|
| 156 |
+
</svg>
|
| 157 |
+
<h3 class="font-semibold text-slate-700 text-sm">Ringkasan Analisis</h3>
|
| 158 |
+
</div>
|
| 159 |
+
<div class="p-4 space-y-3 text-sm">
|
| 160 |
+
<!-- Jumlah Terdeteksi -->
|
| 161 |
+
<div class="flex items-center gap-3 p-2.5 bg-slate-50 rounded-lg">
|
| 162 |
+
<div class="w-9 h-9 bg-blue-100 text-blue-600 rounded-md flex items-center justify-center flex-shrink-0">
|
| 163 |
+
<svg xmlns="http://www.w3.org/2000/svg" class="w-5 h-5" fill="none" viewBox="0 0 24 24" stroke-width="2" stroke="currentColor">
|
| 164 |
+
<path stroke-linecap="round" stroke-linejoin="round" d="M21 21l-5.197-5.197m0 0A7.5 7.5 0 105.196 5.196a7.5 7.5 0 0010.607 10.607z" />
|
| 165 |
+
</svg>
|
| 166 |
+
</div>
|
| 167 |
+
<div>
|
| 168 |
+
<p class="text-slate-500 text-xs font-medium">Jumlah Terdeteksi</p>
|
| 169 |
+
<p id="detected-count" class="text-lg font-bold text-slate-800">0</p>
|
| 170 |
+
</div>
|
| 171 |
+
</div>
|
| 172 |
+
<!-- Kluster Kepadatan -->
|
| 173 |
+
<div class="flex items-center gap-3 p-2.5 bg-slate-50 rounded-lg">
|
| 174 |
+
<div class="w-9 h-9 bg-amber-100 text-amber-600 rounded-md flex items-center justify-center flex-shrink-0">
|
| 175 |
+
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" fill="none" stroke="currentColor" stroke-width="24">
|
| 176 |
+
<circle cx="256" cy="256" r="240"/> <circle cx="220" cy="120" r="64"/> <circle cx="356" cy="92" r="34"/> <circle cx="420" cy="160" r="26"/> <circle cx="120" cy="100" r="28"/> <circle cx="104" cy="196" r="44"/>
|
| 177 |
+
<circle cx="208" cy="240" r="28"/> <circle cx="320" cy="236" r="44"/> <circle cx="420" cy="236" r="52"/> <circle cx="180" cy="372" r="88"/> <circle cx="300" cy="372" r="40"/> <circle cx="408" cy="372" r="52"/>
|
| 178 |
+
<circle cx="272" cy="460" r="28"/> <circle cx="88" cy="288" r="20"/>
|
| 179 |
+
</svg>
|
| 180 |
+
</div>
|
| 181 |
+
<div>
|
| 182 |
+
<p class="text-slate-500 text-xs font-medium">Kluster Kepadatan</p>
|
| 183 |
+
<p id="density-count" class="text-lg font-bold text-slate-800">0</p>
|
| 184 |
+
</div>
|
| 185 |
+
</div>
|
| 186 |
+
<!-- Ayam Tidak Aktif -->
|
| 187 |
+
<div class="flex items-center gap-3 p-2.5 bg-slate-50 rounded-lg">
|
| 188 |
+
<div class="w-9 h-9 bg-red-100 text-red-600 rounded-md flex items-center justify-center flex-shrink-0">
|
| 189 |
+
<svg xmlns="http://www.w3.org/2000/svg" class="w-5 h-5" fill="none" viewBox="0 0 24 24" stroke-width="2" stroke="currentColor">
|
| 190 |
+
<path stroke-linecap="round" stroke-linejoin="round" d="M12 6v6h4.5m4.5 0a9 9 0 11-18 0 9 9 0 0118 0z" />
|
| 191 |
+
</svg>
|
| 192 |
+
</div>
|
| 193 |
+
<div>
|
| 194 |
+
<p class="text-slate-500 text-xs font-medium">Ayam Tidak Aktif</p>
|
| 195 |
+
<p id="inactive-count" class="text-lg font-bold text-slate-800">0</p>
|
| 196 |
+
</div>
|
| 197 |
+
</div>
|
| 198 |
+
</div>
|
| 199 |
+
</div>
|
| 200 |
+
<!-- Card Analisis Vokalisasi -->
|
| 201 |
+
<div class="card" role="region">
|
| 202 |
+
<div class="card-header flex items-center gap-2">
|
| 203 |
+
<svg xmlns="http://www.w3.org/2000/svg" class="w-5 h-5 text-green-600" fill="none" viewBox="0 0 24 24" stroke-width="1.5" stroke="currentColor">
|
| 204 |
+
<path stroke-linecap="round" stroke-linejoin="round" d="M19.114 5.636a9 9 0 010 12.728M16.463 8.288a5.25 5.25 0 010 7.424M6.75 8.25l4.72-4.72a.75.75 0 011.28.53v15.88a.75.75 0 01-1.28.53l-4.72-4.72H4.51c-.88 0-1.704-.507-1.938-1.354A9.01 9.01 0 012.25 12c0-.83.112-1.633.322-2.396C2.806 8.756 3.63 8.25 4.51 8.25H6.75z" />
|
| 205 |
+
</svg>
|
| 206 |
+
<h3 class="font-semibold text-slate-700 text-sm">Analisis Vokalisasi</h3>
|
| 207 |
+
</div>
|
| 208 |
+
|
| 209 |
+
<div id="vocalization-content" class="p-4">
|
| 210 |
+
<!-- Populated by app.js -->
|
| 211 |
+
</div>
|
| 212 |
+
</div>
|
| 213 |
+
|
| 214 |
+
<!-- Card Ekspor Data Metrik -->
|
| 215 |
+
<div class="card" role="region">
|
| 216 |
+
<div class="card-header flex items-center gap-2">
|
| 217 |
+
<svg xmlns="http://www.w3.org/2000/svg" class="w-5 h-5 text-slate-600" fill="none" viewBox="0 0 24 24" stroke-width="1.5" stroke="currentColor">
|
| 218 |
+
<path stroke-linecap="round" stroke-linejoin="round" d="M3 16.5v2.25A2.25 2.25 0 005.25 21h13.5A2.25 2.25 0 0021 18.75V16.5M16.5 12L12 16.5m0 0L7.5 12m4.5 4.5V3" />
|
| 219 |
+
</svg>
|
| 220 |
+
<h3 class="font-semibold text-slate-700 text-sm">Ekspor Data Metrik (CSV)</h3>
|
| 221 |
+
</div>
|
| 222 |
+
<div class="p-4">
|
| 223 |
+
<button id="open-export-modal-btn" class="btn btn-primary w-full text-sm">
|
| 224 |
+
Ekspor CSV
|
| 225 |
+
</button>
|
| 226 |
+
<p class="text-[11px] text-slate-500 mt-2 text-center">Ekspor data dengan double-klik tombol diatas.</p>
|
| 227 |
+
</div>
|
| 228 |
+
</div>
|
| 229 |
+
</aside>
|
| 230 |
+
</main>
|
| 231 |
+
</div>
|
| 232 |
+
|
| 233 |
+
<!-- Modal Section -->
|
| 234 |
+
<div id="settings-modal" class="modal z-50" role="dialog" aria-labelledby="settings-modal-title" aria-hidden="true">
|
| 235 |
+
<div class="modal-backdrop"></div>
|
| 236 |
+
<div class="modal-container">
|
| 237 |
+
<div class="card w-full max-w-lg">
|
| 238 |
+
<div class="card-header flex items-center justify-between">
|
| 239 |
+
<h3 id="settings-modal-title" class="font-semibold text-slate-700 text-sm">Pengaturan URL</h3>
|
| 240 |
+
<button class="modal-close-btn" aria-label="Close">✕</button>
|
| 241 |
+
</div>
|
| 242 |
+
<div class="p-4 space-y-4 text-sm">
|
| 243 |
+
<div>
|
| 244 |
+
<label for="cam1-url" class="block text-xs font-medium text-slate-600 mb-1">URL Kamera 1</label>
|
| 245 |
+
<input type="text" id="cam1-url" class="modal-input" placeholder="Masukkan URL untuk Kamera 1">
|
| 246 |
+
</div>
|
| 247 |
+
<div>
|
| 248 |
+
<label for="cam2-url" class="block text-xs font-medium text-slate-600 mb-1">URL Kamera 2</label>
|
| 249 |
+
<input type="text" id="cam2-url" class="modal-input" placeholder="Masukkan URL untuk Kamera 2">
|
| 250 |
+
</div>
|
| 251 |
+
<div>
|
| 252 |
+
<label for="cam3-url" class="block text-xs font-medium text-slate-600 mb-1">URL Kamera 3</label>
|
| 253 |
+
<input type="text" id="cam3-url" class="modal-input" placeholder="Masukkan URL untuk Kamera 3">
|
| 254 |
+
</div>
|
| 255 |
+
<div>
|
| 256 |
+
<label for="cam4-url" class="block text-xs font-medium text-slate-600 mb-1">URL Kamera 4</label>
|
| 257 |
+
<input type="text" id="cam4-url" class="modal-input" placeholder="Masukkan URL untuk Kamera 4">
|
| 258 |
+
</div>
|
| 259 |
+
<hr class="border-slate-200">
|
| 260 |
+
<div>
|
| 261 |
+
<label for="audio-url" class="block text-xs font-medium text-slate-600 mb-1">URL Audio</label>
|
| 262 |
+
<input type="text" id="audio-url" class="modal-input" placeholder="Masukkan URL untuk Audio Source">
|
| 263 |
+
</div>
|
| 264 |
+
<div class="flex justify-end gap-2 pt-2">
|
| 265 |
+
<button id="stop-all-streams-btn" class="btn bg-red-600 hover:bg-red-700 text-white text-sm">Stop Semua Stream</button>
|
| 266 |
+
<button id="save-settings-btn" class="btn btn-primary text-sm">Simpan & Mulai Ulang</button>
|
| 267 |
+
</div>
|
| 268 |
+
</div>
|
| 269 |
+
</div>
|
| 270 |
+
</div>
|
| 271 |
+
</div>
|
| 272 |
+
|
| 273 |
+
<div id="export-modal" class="modal z-50" role="dialog" aria-labelledby="export-modal-title" aria-hidden="true">
|
| 274 |
+
<div class="modal-backdrop"></div>
|
| 275 |
+
<div class="modal-container">
|
| 276 |
+
<div class="card w-full max-w-md">
|
| 277 |
+
<div class="card-header flex items-center justify-between">
|
| 278 |
+
<h3 id="export-modal-title" class="font-semibold text-slate-700 text-sm">Ekspor Data Metrik</h3>
|
| 279 |
+
<button class="modal-close-btn" aria-label="Close">✕</button>
|
| 280 |
+
</div>
|
| 281 |
+
<div class="p-4 space-y-3 text-sm">
|
| 282 |
+
<div>
|
| 283 |
+
<label for="start-date" class="block text-xs font-medium text-slate-600 mb-1">Tanggal Mulai</label>
|
| 284 |
+
<input type="date" id="start-date" class="modal-input">
|
| 285 |
+
</div>
|
| 286 |
+
<div>
|
| 287 |
+
<label for="end-date" class="block text-xs font-medium text-slate-600 mb-1">Tanggal Akhir</label>
|
| 288 |
+
<input type="date" id="end-date" class="modal-input">
|
| 289 |
+
</div>
|
| 290 |
+
<div>
|
| 291 |
+
<label for="camera-select" class="block text-xs font-medium text-slate-600 mb-1">Pilih Kamera</label>
|
| 292 |
+
<select id="camera-select" class="modal-input bg-white">
|
| 293 |
+
<option value="">Semua Kamera</option>
|
| 294 |
+
<option value="1">Kamera 1</option>
|
| 295 |
+
<option value="2">Kamera 2</option>
|
| 296 |
+
<option value="3">Kamera 3</option>
|
| 297 |
+
<option value="4">Kamera 4</option>
|
| 298 |
+
</select>
|
| 299 |
+
</div>
|
| 300 |
+
<button id="download-csv-btn" class="btn btn-primary w-full">Unduh CSV</button>
|
| 301 |
+
</div>
|
| 302 |
+
</div>
|
| 303 |
+
</div>
|
| 304 |
+
</div>
|
| 305 |
+
<script src="/static/js/app.js"></script>
|
| 306 |
+
|
| 307 |
+
</body>
|
| 308 |
+
</html>
|
static/js/app.js
ADDED
|
@@ -0,0 +1,682 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
const CONFIG = {
|
| 2 |
+
API_URL: "",
|
| 3 |
+
WEBSOCKET_URL: "ws://"+window.location.host+"/ws/video_feed",
|
| 4 |
+
TRIGGER_AUDIO_URL: "/trigger_audio_analysis",
|
| 5 |
+
GET_AUDIO_RESULT_URL: "/get_latest_audio_result",
|
| 6 |
+
POLLING_INTERVAL_MS: 30000,
|
| 7 |
+
DEFAULT_VIDEO_URLS: [
|
| 8 |
+
"/static/demo/3_usa.mp4",
|
| 9 |
+
"/static/demo/5_usa.mp4",
|
| 10 |
+
"/static/demo/7_usa.mp4",
|
| 11 |
+
"/static/demo/6_usa.mp4",
|
| 12 |
+
],
|
| 13 |
+
DEFAULT_AUDIO_URL: "/static/demo/audio.m4a",
|
| 14 |
+
};
|
| 15 |
+
|
| 16 |
+
// --- Global State ---
|
| 17 |
+
const state = {
|
| 18 |
+
show_detected: true,
|
| 19 |
+
show_density: false,
|
| 20 |
+
show_inactive: false,
|
| 21 |
+
|
| 22 |
+
isStreaming: false,
|
| 23 |
+
cameraWebSockets: [null, null, null, null],
|
| 24 |
+
audioPollInterval: null,
|
| 25 |
+
|
| 26 |
+
cameraUrls: [...CONFIG.DEFAULT_VIDEO_URLS],
|
| 27 |
+
audioUrl: CONFIG.DEFAULT_AUDIO_URL,
|
| 28 |
+
|
| 29 |
+
cameraStats: Array(4).fill().map(() => ({
|
| 30 |
+
detected: 0,
|
| 31 |
+
inactive: 0,
|
| 32 |
+
dense_areas: 0
|
| 33 |
+
})),
|
| 34 |
+
|
| 35 |
+
lastVocalization: null,
|
| 36 |
+
isAnalyzingAudio: false,
|
| 37 |
+
lastAnalyzedAudioUrl: null,
|
| 38 |
+
lastAnalysisTimestamp: null,
|
| 39 |
+
|
| 40 |
+
lastInactiveAlert: {},
|
| 41 |
+
INACTIVE_THRESHOLD: 0.15 //percentage
|
| 42 |
+
};
|
| 43 |
+
|
| 44 |
+
// --- DOM Elements ---
|
| 45 |
+
const $ = (selector) => document.querySelector(selector);
|
| 46 |
+
|
| 47 |
+
const DOMElements = { // Cache
|
| 48 |
+
datetime: $('#datetime'),
|
| 49 |
+
|
| 50 |
+
canvases: Array.from({ length: 4 }, (_, i) => $(`#video-canvas-${i}`)),
|
| 51 |
+
contexts: [],
|
| 52 |
+
|
| 53 |
+
toggleControls: $('#toggle-controls'),
|
| 54 |
+
settingsButton: $('#settings-button'),
|
| 55 |
+
|
| 56 |
+
detectedCount: $('#detected-count'),
|
| 57 |
+
densityCount: $('#density-count'),
|
| 58 |
+
inactiveCount: $('#inactive-count'),
|
| 59 |
+
|
| 60 |
+
vocalizationContent: $('#vocalization-content'),
|
| 61 |
+
|
| 62 |
+
systemLog: $('#system-log'),
|
| 63 |
+
|
| 64 |
+
settingsModal: $('#settings-modal'),
|
| 65 |
+
saveSettingsBtn: $('#save-settings-btn'),
|
| 66 |
+
stopAllStreamsBtn: $('#stop-all-streams-btn'),
|
| 67 |
+
camUrlInputs: Array.from({ length: 4 }, (_, i) => $(`#cam${i+1}-url`)),
|
| 68 |
+
audioUrlInput: $('#audio-url'),
|
| 69 |
+
|
| 70 |
+
exportModal: $('#export-modal'),
|
| 71 |
+
openExportModalBtn: $('#open-export-modal-btn'),
|
| 72 |
+
startDateInput: $('#start-date'),
|
| 73 |
+
endDateInput: $('#end-date'),
|
| 74 |
+
cameraSelect: $('#camera-select'),
|
| 75 |
+
downloadCsvBtn: $('#download-csv-btn'),
|
| 76 |
+
};
|
| 77 |
+
|
| 78 |
+
// Init canvas contexts
|
| 79 |
+
DOMElements.contexts = DOMElements.canvases.map(canvas =>
|
| 80 |
+
canvas ? canvas.getContext('2d') : null
|
| 81 |
+
);
|
| 82 |
+
|
| 83 |
+
// --- Utils Functions ---
|
| 84 |
+
function updateDateTime() {
|
| 85 |
+
if (!DOMElements.datetime) return;
|
| 86 |
+
|
| 87 |
+
DOMElements.datetime.textContent = new Date().toLocaleString('id-ID', {
|
| 88 |
+
dateStyle: 'full',
|
| 89 |
+
timeStyle: 'short'
|
| 90 |
+
});
|
| 91 |
+
}
|
| 92 |
+
|
| 93 |
+
function addLog(message, type = 'info') {
|
| 94 |
+
if (!DOMElements.systemLog) return;
|
| 95 |
+
|
| 96 |
+
const timestamp = new Date().toLocaleTimeString('id-ID', { hour12: false });
|
| 97 |
+
const typeClass = {
|
| 98 |
+
info: 'text-slate-500',
|
| 99 |
+
warning: 'text-amber-600',
|
| 100 |
+
danger: 'text-red-600 font-semibold'
|
| 101 |
+
}[type] || 'text-slate-500';
|
| 102 |
+
|
| 103 |
+
const logEntry = document.createElement('p');
|
| 104 |
+
logEntry.className = typeClass;
|
| 105 |
+
logEntry.innerHTML = `<span class="font-semibold text-slate-400 mr-2">${timestamp}</span> ${message}`;
|
| 106 |
+
|
| 107 |
+
DOMElements.systemLog.prepend(logEntry);
|
| 108 |
+
|
| 109 |
+
if (DOMElements.systemLog.children.length > 30) {
|
| 110 |
+
DOMElements.systemLog.removeChild(DOMElements.systemLog.lastChild);
|
| 111 |
+
}
|
| 112 |
+
}
|
| 113 |
+
|
| 114 |
+
function updateToggleButtons() {
|
| 115 |
+
const buttons = DOMElements.toggleControls.querySelectorAll('button');
|
| 116 |
+
const activeColors = {
|
| 117 |
+
show_detected: '#22c55e', // Green
|
| 118 |
+
show_density: '#f97316', // Orange
|
| 119 |
+
show_inactive: '#ef4444' // Red
|
| 120 |
+
};
|
| 121 |
+
|
| 122 |
+
buttons.forEach(button => {
|
| 123 |
+
const control = button.dataset.control;
|
| 124 |
+
const isActive = state[control];
|
| 125 |
+
|
| 126 |
+
button.classList.toggle('active', isActive);
|
| 127 |
+
button.setAttribute('aria-pressed', isActive);
|
| 128 |
+
|
| 129 |
+
if (isActive) {
|
| 130 |
+
const color = activeColors[control];
|
| 131 |
+
button.style.backgroundColor = color;
|
| 132 |
+
button.style.borderColor = color;
|
| 133 |
+
button.style.color = '#ffffff';
|
| 134 |
+
} else {
|
| 135 |
+
button.style.backgroundColor = '';
|
| 136 |
+
button.style.borderColor = '';
|
| 137 |
+
button.style.color = '';
|
| 138 |
+
}
|
| 139 |
+
});
|
| 140 |
+
}
|
| 141 |
+
|
| 142 |
+
function updateDailyAnalysisUI() {
|
| 143 |
+
const totals = state.cameraStats.reduce((acc, stats) => ({
|
| 144 |
+
detected: acc.detected + (stats.detected || 0),
|
| 145 |
+
inactive: acc.inactive + (stats.inactive || 0),
|
| 146 |
+
denseAreas: acc.denseAreas + (stats.dense_areas || 0)
|
| 147 |
+
}), { detected: 0, inactive: 0, denseAreas: 0 });
|
| 148 |
+
|
| 149 |
+
if (DOMElements.detectedCount) DOMElements.detectedCount.textContent = totals.detected;
|
| 150 |
+
if (DOMElements.inactiveCount) DOMElements.inactiveCount.textContent = totals.inactive;
|
| 151 |
+
if (DOMElements.densityCount) DOMElements.densityCount.textContent = totals.denseAreas;
|
| 152 |
+
}
|
| 153 |
+
|
| 154 |
+
function displayAudioResults(data) {
|
| 155 |
+
if (!DOMElements.vocalizationContent) return;
|
| 156 |
+
|
| 157 |
+
if (!data) {
|
| 158 |
+
DOMElements.vocalizationContent.innerHTML = `<p class="text-sm text-slate-500">No response from server.</p>`;
|
| 159 |
+
return;
|
| 160 |
+
}
|
| 161 |
+
|
| 162 |
+
if (data.status === "analyzing") {
|
| 163 |
+
DOMElements.vocalizationContent.innerHTML = `<p class="text-sm text-amber-600">Analisis vokalisasi sedang berlangsung...</p>`;
|
| 164 |
+
return;
|
| 165 |
+
}
|
| 166 |
+
|
| 167 |
+
if (data.status === "no_data" || data.prediction === null) {
|
| 168 |
+
DOMElements.vocalizationContent.innerHTML = `<p class="text-sm text-slate-500">Menunggu analisis vokalisasi berlangsung...</p>`;
|
| 169 |
+
return;
|
| 170 |
+
}
|
| 171 |
+
|
| 172 |
+
if (data.prediction === "Error") {
|
| 173 |
+
DOMElements.vocalizationContent.innerHTML = `<p class="text-sm text-red-600">Error during audio analysis.</p>`;
|
| 174 |
+
return;
|
| 175 |
+
}
|
| 176 |
+
|
| 177 |
+
const probabilities = data.probabilities || {};
|
| 178 |
+
const statusMap = {
|
| 179 |
+
'Healthy': { text: 'Sehat', color: 'green' },
|
| 180 |
+
'Unhealthy': { text: 'Tidak Sehat', color: 'red' },
|
| 181 |
+
'Noise': { text: 'Bising', color: 'amber' },
|
| 182 |
+
};
|
| 183 |
+
|
| 184 |
+
const dominantStatus = statusMap[data.prediction] || {
|
| 185 |
+
text: data.prediction,
|
| 186 |
+
color: 'slate'
|
| 187 |
+
};
|
| 188 |
+
|
| 189 |
+
if(
|
| 190 |
+
(data.prediction === "Unhealthy" || dominantStatus.text === "Tidak Sehat") &&
|
| 191 |
+
state.lastVocalization !== "Unhealthy"
|
| 192 |
+
){
|
| 193 |
+
addLog("Status Vokal: <strong>Tidak Sehat Terdeteksi</strong>", "danger");
|
| 194 |
+
state.lastVocalization = "Unhealthy";
|
| 195 |
+
}
|
| 196 |
+
else if (data.prediction === "Healthy"){
|
| 197 |
+
state.lastVocalization = "Healthy";
|
| 198 |
+
}
|
| 199 |
+
|
| 200 |
+
let barsHtml = '';
|
| 201 |
+
for (const [key, value] of Object.entries(probabilities)) {
|
| 202 |
+
const status = statusMap[key] || { text: key, color: 'slate' };
|
| 203 |
+
const percentage = (value * 100).toFixed(1);
|
| 204 |
+
|
| 205 |
+
barsHtml += `
|
| 206 |
+
<div>
|
| 207 |
+
<div class="flex justify-between text-xs mb-1">
|
| 208 |
+
<span class="font-medium text-${status.color}-700">${status.text}</span>
|
| 209 |
+
<span>${percentage}%</span>
|
| 210 |
+
</div>
|
| 211 |
+
<div class="w-full bg-slate-200 rounded-full h-2">
|
| 212 |
+
<div class="bg-${status.color}-500 h-2 rounded-full" style="width:${percentage}%"></div>
|
| 213 |
+
</div>
|
| 214 |
+
</div>
|
| 215 |
+
`;
|
| 216 |
+
}
|
| 217 |
+
|
| 218 |
+
DOMElements.vocalizationContent.innerHTML = `
|
| 219 |
+
<div class="flex items-center justify-between mb-3">
|
| 220 |
+
<span class="text-slate-500 text-xs">Status Dominan:</span>
|
| 221 |
+
<span class="font-bold text-base text-${dominantStatus.color}-600">${dominantStatus.text}</span>
|
| 222 |
+
</div>
|
| 223 |
+
<div class="space-y-2">${barsHtml}</div>
|
| 224 |
+
`;
|
| 225 |
+
|
| 226 |
+
|
| 227 |
+
}
|
| 228 |
+
|
| 229 |
+
// --- Modal function ---
|
| 230 |
+
function openModal(modalEl) {
|
| 231 |
+
modalEl.classList.add('show');
|
| 232 |
+
modalEl.setAttribute('aria-hidden', 'false');
|
| 233 |
+
document.body.classList.add('overflow-hidden');
|
| 234 |
+
}
|
| 235 |
+
|
| 236 |
+
function closeModal(modalEl) {
|
| 237 |
+
modalEl.classList.remove('show');
|
| 238 |
+
modalEl.setAttribute('aria-hidden', 'true');
|
| 239 |
+
document.body.classList.remove('overflow-hidden');
|
| 240 |
+
}
|
| 241 |
+
|
| 242 |
+
function populateSettingsModal() {
|
| 243 |
+
DOMElements.camUrlInputs.forEach((input, i) => {
|
| 244 |
+
if (input) input.value = state.cameraUrls[i] || '';
|
| 245 |
+
});
|
| 246 |
+
|
| 247 |
+
if (DOMElements.audioUrlInput) {
|
| 248 |
+
DOMElements.audioUrlInput.value = state.audioUrl || '';
|
| 249 |
+
}
|
| 250 |
+
}
|
| 251 |
+
|
| 252 |
+
// Make Local Storage for url when change
|
| 253 |
+
function loadSettingsFromStorage() {
|
| 254 |
+
const savedCameraUrls = localStorage.getItem('chickSenseCameraUrls');
|
| 255 |
+
const savedAudioUrl = localStorage.getItem('chickSenseAudioUrl');
|
| 256 |
+
|
| 257 |
+
if (savedCameraUrls) {
|
| 258 |
+
try {
|
| 259 |
+
state.cameraUrls = JSON.parse(savedCameraUrls);
|
| 260 |
+
} catch (e) {
|
| 261 |
+
console.error("Failed to parse saved camera URLs, using defaults.", e);
|
| 262 |
+
state.cameraUrls = [...CONFIG.DEFAULT_VIDEO_URLS];
|
| 263 |
+
}
|
| 264 |
+
}
|
| 265 |
+
if (savedAudioUrl) {
|
| 266 |
+
state.audioUrl = savedAudioUrl;
|
| 267 |
+
}
|
| 268 |
+
addLog('Settings dimuat dari penyimpanan browser', 'info');
|
| 269 |
+
}
|
| 270 |
+
|
| 271 |
+
// --- Websocket Streaming functions ---
|
| 272 |
+
function connectWebSocket(cameraIndex) {
|
| 273 |
+
const videoUrl = state.cameraUrls[cameraIndex];
|
| 274 |
+
if (!videoUrl) {
|
| 275 |
+
console.warn(`No URL for Camera ${cameraIndex + 1}, skipping connection.`);
|
| 276 |
+
return;
|
| 277 |
+
}
|
| 278 |
+
|
| 279 |
+
if (state.cameraWebSockets[cameraIndex]) {
|
| 280 |
+
state.cameraWebSockets[cameraIndex].close();
|
| 281 |
+
}
|
| 282 |
+
|
| 283 |
+
try {
|
| 284 |
+
const ws = new WebSocket(CONFIG.WEBSOCKET_URL);
|
| 285 |
+
ws.binaryType = "blob";
|
| 286 |
+
state.cameraWebSockets[cameraIndex] = ws;
|
| 287 |
+
|
| 288 |
+
ws.onopen = () => {
|
| 289 |
+
addLog(`Menghubungkan ke Kamera ${cameraIndex + 1}`, 'info');
|
| 290 |
+
|
| 291 |
+
ws.send(JSON.stringify({
|
| 292 |
+
type: 'start_stream',
|
| 293 |
+
video_url: videoUrl,
|
| 294 |
+
audio_url: state.audioUrl,
|
| 295 |
+
show_detected: state.show_detected,
|
| 296 |
+
show_density: state.show_density,
|
| 297 |
+
show_inactive: state.show_inactive,
|
| 298 |
+
camera_id: cameraIndex + 1
|
| 299 |
+
}));
|
| 300 |
+
|
| 301 |
+
state.isStreaming = true;
|
| 302 |
+
};
|
| 303 |
+
|
| 304 |
+
ws.onmessage = (event) => {
|
| 305 |
+
if (event.data instanceof Blob) {
|
| 306 |
+
handleBlobMessage(event.data, cameraIndex);
|
| 307 |
+
} else if (typeof event.data === 'string') {
|
| 308 |
+
handleTextMessage(event.data, cameraIndex);
|
| 309 |
+
}
|
| 310 |
+
};
|
| 311 |
+
|
| 312 |
+
ws.onclose = () => {
|
| 313 |
+
addLog(`Kamera ${cameraIndex + 1} terputus.`, 'warning');
|
| 314 |
+
|
| 315 |
+
const ctx = DOMElements.contexts[cameraIndex];
|
| 316 |
+
if (ctx) ctx.clearRect(0, 0, ctx.canvas.width, ctx.canvas.height);
|
| 317 |
+
|
| 318 |
+
state.cameraWebSockets[cameraIndex] = null;
|
| 319 |
+
state.cameraStats[cameraIndex] = { detected: 0, inactive: 0, dense_areas: 0 };
|
| 320 |
+
updateDailyAnalysisUI();
|
| 321 |
+
|
| 322 |
+
state.isStreaming = state.cameraWebSockets.some(ws => ws !== null);
|
| 323 |
+
if (!state.isStreaming) {
|
| 324 |
+
addLog("Semua streams terputus.", 'danger');
|
| 325 |
+
}
|
| 326 |
+
};
|
| 327 |
+
|
| 328 |
+
ws.onerror = (error) => {
|
| 329 |
+
console.error(`WebSocket Error for Camera ${cameraIndex + 1}:`, error);
|
| 330 |
+
addLog(`Koneksi gagal untuk Kamera ${cameraIndex + 1}.`, 'danger');
|
| 331 |
+
};
|
| 332 |
+
} catch (error) {
|
| 333 |
+
console.error(`Failed to create WebSocket for Camera ${cameraIndex + 1}:`, error);
|
| 334 |
+
addLog(`Gagal terhubung ke Kamera ${cameraIndex + 1}.`, 'danger');
|
| 335 |
+
}
|
| 336 |
+
}
|
| 337 |
+
|
| 338 |
+
function handleBlobMessage(blob, cameraIndex) {
|
| 339 |
+
const imageUrl = URL.createObjectURL(blob);
|
| 340 |
+
const img = new Image();
|
| 341 |
+
|
| 342 |
+
img.onload = () => {
|
| 343 |
+
const ctx = DOMElements.contexts[cameraIndex];
|
| 344 |
+
if (ctx) {
|
| 345 |
+
ctx.canvas.width = img.width;
|
| 346 |
+
ctx.canvas.height = img.height;
|
| 347 |
+
ctx.drawImage(img, 0, 0);
|
| 348 |
+
}
|
| 349 |
+
URL.revokeObjectURL(imageUrl);
|
| 350 |
+
};
|
| 351 |
+
|
| 352 |
+
img.onerror = () => {
|
| 353 |
+
console.error(`Failed to load image for Camera ${cameraIndex + 1}`);
|
| 354 |
+
URL.revokeObjectURL(imageUrl);
|
| 355 |
+
};
|
| 356 |
+
|
| 357 |
+
img.src = imageUrl;
|
| 358 |
+
}
|
| 359 |
+
|
| 360 |
+
function handleTextMessage(data, cameraIndex) {
|
| 361 |
+
try {
|
| 362 |
+
const parsedData = JSON.parse(data);
|
| 363 |
+
|
| 364 |
+
if (parsedData.type === 'stats') {
|
| 365 |
+
const stats = {
|
| 366 |
+
detected: Number(parsedData.detected || 0),
|
| 367 |
+
inactive: Number(parsedData.inactive || 0),
|
| 368 |
+
dense_areas: Number(parsedData.dense_areas || 0),
|
| 369 |
+
};
|
| 370 |
+
|
| 371 |
+
state.cameraStats[cameraIndex] = stats;
|
| 372 |
+
updateDailyAnalysisUI();
|
| 373 |
+
|
| 374 |
+
if (stats.detected > 0) {
|
| 375 |
+
const inactiveRatio = stats.inactive / stats.detected;
|
| 376 |
+
const percent = Math.round(inactiveRatio * 100);
|
| 377 |
+
const cameraId = cameraIndex + 1;
|
| 378 |
+
|
| 379 |
+
const isAboveThreshold = inactiveRatio > state.INACTIVE_THRESHOLD;
|
| 380 |
+
const hasAlerted = state.lastInactiveAlert[cameraId];
|
| 381 |
+
|
| 382 |
+
if (isAboveThreshold && !hasAlerted){
|
| 383 |
+
const message = `Camera ${cameraId}: <strong> Persentase ayam tidak aktif cukup tinggi (${percent}%) </strong>`;
|
| 384 |
+
|
| 385 |
+
addLog(message, "danger");
|
| 386 |
+
state.lastInactiveAlert[cameraId] = true;
|
| 387 |
+
}
|
| 388 |
+
else if (!isAboveThreshold && hasAlerted){
|
| 389 |
+
state.lastInactiveAlert[cameraId] = false;
|
| 390 |
+
}
|
| 391 |
+
}
|
| 392 |
+
|
| 393 |
+
}else if (parsedData.type === 'status') {
|
| 394 |
+
const msg = parsedData.message;
|
| 395 |
+
if (msg === "Display settings updated") {
|
| 396 |
+
return;
|
| 397 |
+
}
|
| 398 |
+
addLog(`[Global] ${msg}`, 'info');
|
| 399 |
+
}
|
| 400 |
+
} catch (e) {
|
| 401 |
+
console.error("Error parsing WebSocket JSON message:", e, data);
|
| 402 |
+
}
|
| 403 |
+
}
|
| 404 |
+
|
| 405 |
+
function startAllStreams() {
|
| 406 |
+
addLog("Mencoba memulai semua stream yang terkonfigurasi...", 'info');
|
| 407 |
+
|
| 408 |
+
for (let i = 0; i < 4; i++) {
|
| 409 |
+
setTimeout(() => connectWebSocket(i), i * 100);
|
| 410 |
+
}
|
| 411 |
+
|
| 412 |
+
if (!state.audioPollInterval) {
|
| 413 |
+
fetchLatestAudioResult();
|
| 414 |
+
state.audioPollInterval = setInterval(
|
| 415 |
+
fetchLatestAudioResult,
|
| 416 |
+
CONFIG.POLLING_INTERVAL_MS
|
| 417 |
+
);
|
| 418 |
+
}
|
| 419 |
+
}
|
| 420 |
+
|
| 421 |
+
function stopAllStreams() {
|
| 422 |
+
addLog("Menghentikan semua stream...", 'warning');
|
| 423 |
+
|
| 424 |
+
state.cameraWebSockets.forEach((ws, index) => {
|
| 425 |
+
if (ws) {
|
| 426 |
+
ws.close();
|
| 427 |
+
state.cameraWebSockets[index] = null;
|
| 428 |
+
}
|
| 429 |
+
});
|
| 430 |
+
|
| 431 |
+
if (state.audioPollInterval) {
|
| 432 |
+
clearInterval(state.audioPollInterval);
|
| 433 |
+
state.audioPollInterval = null;
|
| 434 |
+
}
|
| 435 |
+
|
| 436 |
+
displayAudioResults(null);
|
| 437 |
+
state.isStreaming = false;
|
| 438 |
+
}
|
| 439 |
+
|
| 440 |
+
async function fetchLatestAudioResult() {
|
| 441 |
+
try {
|
| 442 |
+
const response = await fetch(`${CONFIG.API_URL}${CONFIG.GET_AUDIO_RESULT_URL}`);
|
| 443 |
+
|
| 444 |
+
if (!response.ok) {
|
| 445 |
+
if(response.status === 404){
|
| 446 |
+
return;
|
| 447 |
+
}throw new Error(`Server status ${response.status}`)
|
| 448 |
+
}
|
| 449 |
+
|
| 450 |
+
const audioData = await response.json();
|
| 451 |
+
|
| 452 |
+
const resultKey = JSON.stringify(audioData);
|
| 453 |
+
const wasAnalyzing = state.isAnalyzingAudio;
|
| 454 |
+
|
| 455 |
+
if (state.lastAudioResultKey && state.lastAudioResultKey === resultKey){
|
| 456 |
+
return;
|
| 457 |
+
}
|
| 458 |
+
|
| 459 |
+
state.lastAudioResultKey = resultKey;
|
| 460 |
+
|
| 461 |
+
if (wasAnalyzing){
|
| 462 |
+
addLog("Hasil vokalisasi diperbarui", "info");
|
| 463 |
+
state.isAnalyzingAudio = false;
|
| 464 |
+
state.lastAnalysisTimestamp = new Date();
|
| 465 |
+
}else{
|
| 466 |
+
addLog("Hasil vokalisasi tersedia", "info");
|
| 467 |
+
}
|
| 468 |
+
|
| 469 |
+
|
| 470 |
+
displayAudioResults(audioData);
|
| 471 |
+
} catch (error) {
|
| 472 |
+
console.error("Error fetching audio result:", error);
|
| 473 |
+
|
| 474 |
+
if (state.isAnalyzingAudio){
|
| 475 |
+
addLog("Analisis audio gagal", "danger");
|
| 476 |
+
state.isAnalyzingAudio = false;
|
| 477 |
+
}
|
| 478 |
+
}
|
| 479 |
+
}
|
| 480 |
+
|
| 481 |
+
async function downloadCSV() {
|
| 482 |
+
const start = DOMElements.startDateInput.value;
|
| 483 |
+
const end = DOMElements.endDateInput.value;
|
| 484 |
+
const cameraId = DOMElements.cameraSelect.value;
|
| 485 |
+
|
| 486 |
+
if (!start || !end) {
|
| 487 |
+
addLog("Silakan pilih tanggal mulai dan tanggal selesai untuk ekspor.", 'warning');
|
| 488 |
+
return;
|
| 489 |
+
}
|
| 490 |
+
|
| 491 |
+
try {
|
| 492 |
+
const url = new URL(`${CONFIG.API_URL}/metrics/export`);
|
| 493 |
+
url.searchParams.set('start', start);
|
| 494 |
+
url.searchParams.set('end', end);
|
| 495 |
+
if (cameraId) url.searchParams.set('camera_id', cameraId);
|
| 496 |
+
|
| 497 |
+
addLog(`Exporting CSV for ${start} to ${end}...`, 'info');
|
| 498 |
+
|
| 499 |
+
const response = await fetch(url.toString());
|
| 500 |
+
if (!response.ok) {
|
| 501 |
+
throw new Error(`Export failed with status ${response.status}`);
|
| 502 |
+
}
|
| 503 |
+
|
| 504 |
+
const blob = await response.blob();
|
| 505 |
+
const downloadUrl = URL.createObjectURL(blob);
|
| 506 |
+
|
| 507 |
+
const a = document.createElement('a');
|
| 508 |
+
a.href = downloadUrl;
|
| 509 |
+
a.download = `metrics_${start}_to_${end}${cameraId ? `_cam${cameraId}` : ''}.csv`;
|
| 510 |
+
document.body.appendChild(a);
|
| 511 |
+
a.click();
|
| 512 |
+
|
| 513 |
+
// Clean up
|
| 514 |
+
setTimeout(() => {
|
| 515 |
+
document.body.removeChild(a);
|
| 516 |
+
URL.revokeObjectURL(downloadUrl);
|
| 517 |
+
}, 100);
|
| 518 |
+
|
| 519 |
+
addLog("CSV download started.", 'info');
|
| 520 |
+
closeModal(DOMElements.exportModal);
|
| 521 |
+
} catch (error) {
|
| 522 |
+
console.error("CSV Download Error:", error);
|
| 523 |
+
addLog(error.message, 'danger');
|
| 524 |
+
}
|
| 525 |
+
}
|
| 526 |
+
|
| 527 |
+
// -- Event Handlers --
|
| 528 |
+
function handleToggleControlClick(e) {
|
| 529 |
+
const button = e.target.closest('button');
|
| 530 |
+
if (!button) return;
|
| 531 |
+
|
| 532 |
+
const control = button.dataset.control;
|
| 533 |
+
if (!control) return;
|
| 534 |
+
|
| 535 |
+
state[control] = !state[control];
|
| 536 |
+
updateToggleButtons();
|
| 537 |
+
|
| 538 |
+
const labelMap = {
|
| 539 |
+
show_detected: 'Deteksi',
|
| 540 |
+
show_density: 'Kepadatan',
|
| 541 |
+
show_inactive: 'Inaktivitas'
|
| 542 |
+
};
|
| 543 |
+
|
| 544 |
+
const displayName = labelMap[control] || control;
|
| 545 |
+
addLog(`Tampilan diperbarui: ${displayName} ${state[control] ? 'dinyalakan' : 'dimatikan'}`, 'info');
|
| 546 |
+
|
| 547 |
+
const payload = JSON.stringify({
|
| 548 |
+
type: 'display_settings_update',
|
| 549 |
+
show_detected: state.show_detected,
|
| 550 |
+
show_density: state.show_density,
|
| 551 |
+
show_inactive: state.show_inactive
|
| 552 |
+
});
|
| 553 |
+
|
| 554 |
+
state.cameraWebSockets.forEach(ws => {
|
| 555 |
+
if (ws && ws.readyState === WebSocket.OPEN) {
|
| 556 |
+
ws.send(payload);
|
| 557 |
+
}
|
| 558 |
+
});
|
| 559 |
+
}
|
| 560 |
+
|
| 561 |
+
function handleSettingsButtonClick() {
|
| 562 |
+
addLog('Tip: Double-click pada ikon gear untuk membuka setting kamera.', 'info');
|
| 563 |
+
}
|
| 564 |
+
|
| 565 |
+
function handleSettingsButtonDoubleClick() {
|
| 566 |
+
populateSettingsModal();
|
| 567 |
+
openModal(DOMElements.settingsModal);
|
| 568 |
+
}
|
| 569 |
+
|
| 570 |
+
function handleSaveSettingsClick() {
|
| 571 |
+
state.cameraUrls = DOMElements.camUrlInputs.map(input =>
|
| 572 |
+
input ? input.value.trim() : ''
|
| 573 |
+
);
|
| 574 |
+
state.audioUrl = DOMElements.audioUrlInput ?
|
| 575 |
+
DOMElements.audioUrlInput.value.trim() : '';
|
| 576 |
+
|
| 577 |
+
try {
|
| 578 |
+
localStorage.setItem('chickSenseCameraUrls', JSON.stringify(state.cameraUrls));
|
| 579 |
+
localStorage.setItem('chickSenseAudioUrl', state.audioUrl);
|
| 580 |
+
addLog('Pengaturan disimpan ke penyimpanan peramban.', 'info');
|
| 581 |
+
} catch (e) {
|
| 582 |
+
console.error("Failed to save settings to local storage:", e);
|
| 583 |
+
addLog('Tidak dapat menyimpan pengaturan.', 'danger');
|
| 584 |
+
}
|
| 585 |
+
|
| 586 |
+
addLog('Pengaturan disimpan. Memulai ulang aliran...', 'info');
|
| 587 |
+
closeModal(DOMElements.settingsModal);
|
| 588 |
+
|
| 589 |
+
stopAllStreams();
|
| 590 |
+
setTimeout(startAllStreams, 500);
|
| 591 |
+
}
|
| 592 |
+
|
| 593 |
+
function handleStopStreamsClick() {
|
| 594 |
+
stopAllStreams();
|
| 595 |
+
closeModal(DOMElements.settingsModal);
|
| 596 |
+
}
|
| 597 |
+
|
| 598 |
+
function handleExportButtonClick() {
|
| 599 |
+
addLog('Tip: Double-click pada "Export CSV" untuk mengekspor metric ke CSV.', 'info');
|
| 600 |
+
}
|
| 601 |
+
|
| 602 |
+
function handleExportButtonDoubleClick() {
|
| 603 |
+
openModal(DOMElements.exportModal);
|
| 604 |
+
}
|
| 605 |
+
|
| 606 |
+
function handleModalCloseClick(e) {
|
| 607 |
+
const modal = e.currentTarget;
|
| 608 |
+
|
| 609 |
+
if (e.target.classList.contains('modal-backdrop') ||
|
| 610 |
+
e.target.closest('.modal-close-btn')) {
|
| 611 |
+
closeModal(modal);
|
| 612 |
+
}
|
| 613 |
+
}
|
| 614 |
+
|
| 615 |
+
function handleEscapeKey(e) {
|
| 616 |
+
if (e.key === 'Escape') {
|
| 617 |
+
document.querySelectorAll('.modal.show').forEach(closeModal);
|
| 618 |
+
}
|
| 619 |
+
}
|
| 620 |
+
|
| 621 |
+
// -- Add Event Listener --
|
| 622 |
+
function setupEventListeners() {
|
| 623 |
+
if (DOMElements.toggleControls) {
|
| 624 |
+
DOMElements.toggleControls.addEventListener('click', handleToggleControlClick);
|
| 625 |
+
}
|
| 626 |
+
|
| 627 |
+
if (DOMElements.settingsButton) {
|
| 628 |
+
DOMElements.settingsButton.addEventListener('click', handleSettingsButtonClick);
|
| 629 |
+
DOMElements.settingsButton.addEventListener('dblclick', handleSettingsButtonDoubleClick);
|
| 630 |
+
}
|
| 631 |
+
|
| 632 |
+
if (DOMElements.saveSettingsBtn) {
|
| 633 |
+
DOMElements.saveSettingsBtn.addEventListener('click', handleSaveSettingsClick);
|
| 634 |
+
}
|
| 635 |
+
|
| 636 |
+
if (DOMElements.stopAllStreamsBtn) {
|
| 637 |
+
DOMElements.stopAllStreamsBtn.addEventListener('click', handleStopStreamsClick);
|
| 638 |
+
}
|
| 639 |
+
|
| 640 |
+
if (DOMElements.openExportModalBtn) {
|
| 641 |
+
DOMElements.openExportModalBtn.addEventListener('click', handleExportButtonClick);
|
| 642 |
+
DOMElements.openExportModalBtn.addEventListener('dblclick', handleExportButtonDoubleClick);
|
| 643 |
+
}
|
| 644 |
+
|
| 645 |
+
if (DOMElements.downloadCsvBtn) {
|
| 646 |
+
DOMElements.downloadCsvBtn.addEventListener('click', downloadCSV);
|
| 647 |
+
}
|
| 648 |
+
|
| 649 |
+
document.querySelectorAll('.modal').forEach(modal => {
|
| 650 |
+
modal.addEventListener('click', handleModalCloseClick);
|
| 651 |
+
});
|
| 652 |
+
|
| 653 |
+
window.addEventListener('keydown', handleEscapeKey);
|
| 654 |
+
}
|
| 655 |
+
|
| 656 |
+
// -- Init --
|
| 657 |
+
function initialize() {
|
| 658 |
+
updateDateTime();
|
| 659 |
+
updateToggleButtons();
|
| 660 |
+
updateDailyAnalysisUI();
|
| 661 |
+
displayAudioResults(null);
|
| 662 |
+
loadSettingsFromStorage();
|
| 663 |
+
|
| 664 |
+
setupEventListeners();
|
| 665 |
+
|
| 666 |
+
const today = new Date().toISOString().split('T')[0];
|
| 667 |
+
if (DOMElements.startDateInput) DOMElements.startDateInput.value = today;
|
| 668 |
+
if (DOMElements.endDateInput) DOMElements.endDateInput.value = today;
|
| 669 |
+
|
| 670 |
+
setInterval(updateDateTime, 30000);
|
| 671 |
+
|
| 672 |
+
addLog('Sistem diinisialisasi. Selamat datang! IKGC', 'info');
|
| 673 |
+
startAllStreams();
|
| 674 |
+
}
|
| 675 |
+
|
| 676 |
+
if (document.readyState === 'loading') {
|
| 677 |
+
document.addEventListener('DOMContentLoaded', initialize);
|
| 678 |
+
} else {
|
| 679 |
+
initialize();
|
| 680 |
+
}
|
| 681 |
+
|
| 682 |
+
|
utils/__init__.py
ADDED
|
File without changes
|
utils/audio/__init__.py
ADDED
|
File without changes
|
utils/audio/audio_ingest.py
ADDED
|
@@ -0,0 +1,118 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
import random
|
| 3 |
+
import json
|
| 4 |
+
import os
|
| 5 |
+
import subprocess
|
| 6 |
+
from io import BytesIO
|
| 7 |
+
from typing import Optional, Dict
|
| 8 |
+
from datetime import datetime
|
| 9 |
+
import soundfile as sf
|
| 10 |
+
from .vocalization_prediction import vocalization_prediction
|
| 11 |
+
|
| 12 |
+
def _resolve_youtube_audio_url(url: str) -> str:
|
| 13 |
+
if "youtube.com" in url or "youtu.be" in url:
|
| 14 |
+
try:
|
| 15 |
+
meta = subprocess.run(
|
| 16 |
+
["yt-dlp", "--no-cache-dir", "-J", url],
|
| 17 |
+
capture_output=True, text=True, check=True, timeout=30
|
| 18 |
+
)
|
| 19 |
+
info = json.loads(meta.stdout)
|
| 20 |
+
is_live = bool(info.get("is_live"))
|
| 21 |
+
fmt = "best[protocol^=m3u8]/best" if is_live else "bestaudio/best"
|
| 22 |
+
out = subprocess.run(
|
| 23 |
+
["yt-dlp", "--no-cache-dir", "--get-url", "-f", fmt, url],
|
| 24 |
+
capture_output=True, text=True, check=True, timeout=30
|
| 25 |
+
)
|
| 26 |
+
return out.stdout.strip()
|
| 27 |
+
except Exception as e:
|
| 28 |
+
print(f"[FFmpeg Audio] yt-dlp resolve failed: {e}")
|
| 29 |
+
return url
|
| 30 |
+
|
| 31 |
+
def record_audio_ffmpeg(src_url: str, duration: int = 60, sample_rate: int = 22050, seek_seconds: int = 0) -> Optional[bytes]:
|
| 32 |
+
src = _resolve_youtube_audio_url(src_url)
|
| 33 |
+
|
| 34 |
+
# --- This block is to make sure different kind of audio type can be extracted correctly ---
|
| 35 |
+
cmd = ["ffmpeg", "-hide_banner", "-loglevel", "error"]
|
| 36 |
+
|
| 37 |
+
if seek_seconds > 0 and not ("youtube.com" in src_url or "youtu.be" in src_url):
|
| 38 |
+
cmd += ["-ss", str(seek_seconds)]
|
| 39 |
+
|
| 40 |
+
if str(src).startswith(("http://", "https://")):
|
| 41 |
+
cmd += [
|
| 42 |
+
"-reconnect", "1",
|
| 43 |
+
"-reconnect_streamed", "1",
|
| 44 |
+
"-reconnect_on_network_error", "1",
|
| 45 |
+
"-fflags", "+genpts",
|
| 46 |
+
"-flags", "low_delay"
|
| 47 |
+
]
|
| 48 |
+
|
| 49 |
+
cmd += [
|
| 50 |
+
"-i", src,
|
| 51 |
+
"-map", "a:0?",
|
| 52 |
+
"-t", str(duration),
|
| 53 |
+
"-vn",
|
| 54 |
+
"-acodec", "pcm_s16le",
|
| 55 |
+
"-ar", str(sample_rate),
|
| 56 |
+
"-ac", "1",
|
| 57 |
+
"-f", "wav", "pipe:1",
|
| 58 |
+
"-y"
|
| 59 |
+
]
|
| 60 |
+
# --- ---
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
try:
|
| 64 |
+
print(f"[FFmpeg] extracting audio: {src} ({duration}s @ {sample_rate}Hz)")
|
| 65 |
+
proc = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
|
| 66 |
+
check=True, timeout=duration + 15)
|
| 67 |
+
return proc.stdout
|
| 68 |
+
except subprocess.CalledProcessError as e:
|
| 69 |
+
print(f"[FFmpeg] error (rc={e.returncode}): {e.stderr.decode(errors='ignore')}")
|
| 70 |
+
except subprocess.TimeoutExpired:
|
| 71 |
+
print("[FFmpeg] timed out reading audio.")
|
| 72 |
+
return None
|
| 73 |
+
|
| 74 |
+
def background_audio_task(
|
| 75 |
+
audio_url: str,
|
| 76 |
+
duration: int,
|
| 77 |
+
vocal_model,
|
| 78 |
+
vocal_device,
|
| 79 |
+
target_dict: Dict,
|
| 80 |
+
seek_seconds: int = 0
|
| 81 |
+
):
|
| 82 |
+
global analysis_in_progress
|
| 83 |
+
try:
|
| 84 |
+
analysis_in_progress = True
|
| 85 |
+
wav_bytes = record_audio_ffmpeg(audio_url, duration=duration, seek_seconds=seek_seconds)
|
| 86 |
+
if not wav_bytes:
|
| 87 |
+
raise RuntimeError("FFmpeg no data")
|
| 88 |
+
|
| 89 |
+
# --- Below is to save analyzed audio into local ---
|
| 90 |
+
# output_dir = os.path.join("static", "audio_captures")
|
| 91 |
+
# os.makedirs(output_dir, exist_ok=True)
|
| 92 |
+
# timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
| 93 |
+
# output_filename = f"capture_{timestamp}.wav"
|
| 94 |
+
# output_filepath = os.path.join(output_dir, output_filename)
|
| 95 |
+
# with open(output_filepath, "wb") as f:
|
| 96 |
+
# f.write(wav_bytes)
|
| 97 |
+
# print(f"[Audio Task] Saved captured audio to {output_filepath}")
|
| 98 |
+
|
| 99 |
+
with BytesIO(wav_bytes) as bio:
|
| 100 |
+
y, sr = sf.read(bio, dtype="float32")
|
| 101 |
+
|
| 102 |
+
if hasattr(y, "ndim") and y.ndim > 1:
|
| 103 |
+
y = np.mean(y, axis=1)
|
| 104 |
+
sr = int(sr)
|
| 105 |
+
|
| 106 |
+
pred, probs = vocalization_prediction(y, sr, vocal_model, vocal_device)
|
| 107 |
+
|
| 108 |
+
target_dict["prediction"] = pred
|
| 109 |
+
target_dict["probabilities"] = probs
|
| 110 |
+
print("[Audio Task] Completed (ffmpeg path)")
|
| 111 |
+
|
| 112 |
+
except Exception as e:
|
| 113 |
+
print(f"[Audio Task] Failed: {e}")
|
| 114 |
+
target_dict["prediction"] = "Error"
|
| 115 |
+
target_dict["probabilities"] = None
|
| 116 |
+
finally:
|
| 117 |
+
analysis_in_progress = False
|
| 118 |
+
|
utils/audio/vocalization_prediction.py
ADDED
|
@@ -0,0 +1,112 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
import librosa
|
| 3 |
+
import torch
|
| 4 |
+
import torch.nn as nn
|
| 5 |
+
import torch.nn.functional as F
|
| 6 |
+
|
| 7 |
+
class ModdifiedModel(nn.Module):
|
| 8 |
+
def __init__(self, num_classes=3):
|
| 9 |
+
super(ModdifiedModel, self).__init__()
|
| 10 |
+
self.features = nn.Sequential(
|
| 11 |
+
nn.Conv2d(1, 32, kernel_size=3),
|
| 12 |
+
nn.BatchNorm2d(32),
|
| 13 |
+
nn.ReLU(),
|
| 14 |
+
nn.MaxPool2d(2),
|
| 15 |
+
nn.Conv2d(32, 64, kernel_size=3),
|
| 16 |
+
nn.BatchNorm2d(64),
|
| 17 |
+
nn.ReLU(),
|
| 18 |
+
nn.MaxPool2d(2),
|
| 19 |
+
nn.Conv2d(64, 128, kernel_size=3),
|
| 20 |
+
nn.BatchNorm2d(128),
|
| 21 |
+
nn.ReLU(),
|
| 22 |
+
nn.MaxPool2d(2),
|
| 23 |
+
)
|
| 24 |
+
self.classifier = nn.Sequential(
|
| 25 |
+
nn.Flatten(),
|
| 26 |
+
nn.Linear(25088, 256),
|
| 27 |
+
nn.Dropout(0.5),
|
| 28 |
+
nn.ReLU(),
|
| 29 |
+
nn.Linear(256, num_classes)
|
| 30 |
+
)
|
| 31 |
+
|
| 32 |
+
def forward(self, x):
|
| 33 |
+
x = self.features(x)
|
| 34 |
+
return self.classifier(x)
|
| 35 |
+
|
| 36 |
+
def audio_to_log_mel_spec(audio_data, sr=22050, wav_size=int(1.5 * 22050)):
|
| 37 |
+
"""
|
| 38 |
+
Convert raw audio waveform to log-mel spectrogram.
|
| 39 |
+
Args:
|
| 40 |
+
audio_data: np.ndarray, audio waveform
|
| 41 |
+
sr: int, sample rate
|
| 42 |
+
wav_size: int, target length in samples (1.5 seconds)
|
| 43 |
+
Returns:
|
| 44 |
+
mel_spec_db: log-mel spectrogram (n_mels, time)
|
| 45 |
+
"""
|
| 46 |
+
wav = librosa.util.normalize(audio_data)
|
| 47 |
+
if len(wav) > wav_size:
|
| 48 |
+
wav = wav[:wav_size]
|
| 49 |
+
else:
|
| 50 |
+
wav = np.pad(wav, (0, max(0, wav_size - len(wav))), "constant")
|
| 51 |
+
|
| 52 |
+
n_fft = 2048
|
| 53 |
+
hop_length = 256
|
| 54 |
+
n_mels = 128
|
| 55 |
+
mel_spec = librosa.feature.melspectrogram(y=wav, sr=sr, n_fft=n_fft,
|
| 56 |
+
hop_length=hop_length, n_mels=n_mels)
|
| 57 |
+
mel_spec_db = librosa.power_to_db(mel_spec, ref=np.max)
|
| 58 |
+
return mel_spec_db
|
| 59 |
+
|
| 60 |
+
def load_model(model_path):
|
| 61 |
+
"""Load trained model from .pth file."""
|
| 62 |
+
model = ModdifiedModel()
|
| 63 |
+
model.load_state_dict(torch.load(model_path, map_location=torch.device('cpu')))
|
| 64 |
+
model.eval()
|
| 65 |
+
return model
|
| 66 |
+
|
| 67 |
+
def vocalization_prediction(audio_data, sample_rate, model, device=None):
|
| 68 |
+
"""
|
| 69 |
+
Predict vocalization class from long audio by averaging over 1.5s windows.
|
| 70 |
+
Args:
|
| 71 |
+
audio_data: np.ndarray, long audio (e.g., 30 seconds)
|
| 72 |
+
sample_rate: int, sample rate
|
| 73 |
+
model: trained PyTorch model
|
| 74 |
+
device: torch.device
|
| 75 |
+
Returns:
|
| 76 |
+
predicted_label: str
|
| 77 |
+
probabilities: dict
|
| 78 |
+
"""
|
| 79 |
+
if device is None:
|
| 80 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 81 |
+
|
| 82 |
+
wav_size_samples = int(1.5 * sample_rate)
|
| 83 |
+
step_size = wav_size_samples // 2 # 50% overlap
|
| 84 |
+
class_labels = {0: 'Healthy', 1: 'Noise', 2: 'Unhealthy'}
|
| 85 |
+
total_probs = np.zeros(len(class_labels))
|
| 86 |
+
n_predictions = 0
|
| 87 |
+
|
| 88 |
+
for start in range(0, len(audio_data) - wav_size_samples + 1, step_size):
|
| 89 |
+
chunk = audio_data[start:start + wav_size_samples]
|
| 90 |
+
try:
|
| 91 |
+
mel_spec_db = audio_to_log_mel_spec(chunk, sr=sample_rate)
|
| 92 |
+
X_tensor = torch.tensor(mel_spec_db, dtype=torch.float32).unsqueeze(0).unsqueeze(0).to(device)
|
| 93 |
+
model = model.to(device)
|
| 94 |
+
|
| 95 |
+
with torch.no_grad():
|
| 96 |
+
output = model(X_tensor)
|
| 97 |
+
probs = F.softmax(output, dim=1)[0].cpu().numpy()
|
| 98 |
+
total_probs += probs
|
| 99 |
+
n_predictions += 1
|
| 100 |
+
except Exception as e:
|
| 101 |
+
print(f"[Error] Failed on audio chunk: {e}")
|
| 102 |
+
continue
|
| 103 |
+
|
| 104 |
+
if n_predictions == 0:
|
| 105 |
+
return "Error", {"Healthy": 0.0, "Noise": 0.0, "Unhealthy": 0.0}
|
| 106 |
+
|
| 107 |
+
avg_probs = total_probs / n_predictions
|
| 108 |
+
pred_class = np.argmax(avg_probs)
|
| 109 |
+
predicted_label = class_labels[pred_class]
|
| 110 |
+
prob_dict = {class_labels[i]: avg_probs[i] for i in range(len(class_labels))}
|
| 111 |
+
|
| 112 |
+
return predicted_label, prob_dict
|
utils/metrics_store.py
ADDED
|
@@ -0,0 +1,83 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import sqlite3
|
| 3 |
+
import threading
|
| 4 |
+
import queue
|
| 5 |
+
import time
|
| 6 |
+
from config import DATA_STORAGE
|
| 7 |
+
|
| 8 |
+
SCHEMA = """
|
| 9 |
+
CREATE TABLE IF NOT EXISTS metrics (
|
| 10 |
+
ts INTEGER NOT NULL, -- seconds (unix)
|
| 11 |
+
camera_id INTEGER NOT NULL,
|
| 12 |
+
detected INTEGER NOT NULL,
|
| 13 |
+
dense_areas INTEGER NOT NULL,
|
| 14 |
+
inactive INTEGER NOT NULL,
|
| 15 |
+
PRIMARY KEY (ts, camera_id)
|
| 16 |
+
);
|
| 17 |
+
CREATE INDEX IF NOT EXISTS idx_metrics_cam_ts ON metrics(camera_id, ts);
|
| 18 |
+
"""
|
| 19 |
+
|
| 20 |
+
class MetricsStore:
|
| 21 |
+
def __init__(self):
|
| 22 |
+
path = DATA_STORAGE["SQLITE_DB_PATH"]
|
| 23 |
+
retention_days = DATA_STORAGE["DB_RETENTION_DAYS"]
|
| 24 |
+
os.makedirs(os.path.dirname(path) or ".", exist_ok=True)
|
| 25 |
+
self.conn = sqlite3.connect(path, check_same_thread=False)
|
| 26 |
+
self.conn.execute("PRAGMA journal_mode=WAL;")
|
| 27 |
+
self.conn.execute("PRAGMA synchronous=NORMAL;")
|
| 28 |
+
self.conn.executescript(SCHEMA)
|
| 29 |
+
self.retention_days = retention_days
|
| 30 |
+
self.q = queue.Queue(maxsize=10000)
|
| 31 |
+
self.t = threading.Thread(target=self._worker, daemon=True)
|
| 32 |
+
self.t.start()
|
| 33 |
+
|
| 34 |
+
def _worker(self):
|
| 35 |
+
buf, last_flush, last_ret = [], time.time(), time.time()
|
| 36 |
+
while True:
|
| 37 |
+
try:
|
| 38 |
+
item = self.q.get(timeout = 1.0)
|
| 39 |
+
buf.append(item)
|
| 40 |
+
except queue.Empty:
|
| 41 |
+
pass
|
| 42 |
+
|
| 43 |
+
now = time.time()
|
| 44 |
+
if buf and (len(buf) >= DATA_STORAGE["DB_WRITE_BUFFER_SIZE"] or now - last_flush > DATA_STORAGE["DB_WRITE_INTERVAL_S"]):
|
| 45 |
+
self.conn.executemany(
|
| 46 |
+
"INSERT OR REPLACE INTO metrics(ts,camera_id,detected,dense_areas,inactive) VALUES (?,?,?,?,?)",
|
| 47 |
+
buf
|
| 48 |
+
)
|
| 49 |
+
self.conn.commit()
|
| 50 |
+
buf.clear()
|
| 51 |
+
last_flush = now
|
| 52 |
+
|
| 53 |
+
# keep retention on check every hour
|
| 54 |
+
if now - last_ret > 3600:
|
| 55 |
+
cutoff = int(time.time()) - self.retention_days * 86400
|
| 56 |
+
self.conn.execute("DELETE FROM metrics WHERE ts < ?", (cutoff,))
|
| 57 |
+
self.conn.commit()
|
| 58 |
+
last_ret = now
|
| 59 |
+
|
| 60 |
+
def write(self, ts, camera_id, detected, dense_areas, inactive):
|
| 61 |
+
try:
|
| 62 |
+
self.q.put_nowait((int(ts), int(camera_id), int(detected), int(dense_areas), int(inactive)))
|
| 63 |
+
except queue.Full:
|
| 64 |
+
pass # drop if overloaded (rare), avoids blocking video loop
|
| 65 |
+
|
| 66 |
+
def fetch_range(self, start_ts, end_ts, camera_id=None):
|
| 67 |
+
""" retrieve data from database with specific time window """
|
| 68 |
+
cur = self.conn.cursor()
|
| 69 |
+
sql = "SELECT ts, camera_id, detected, dense_areas, inactive FROM metrics WHERE ts BETWEEN ? AND ?"
|
| 70 |
+
params = [int(start_ts), int(end_ts)]
|
| 71 |
+
if camera_id is not None:
|
| 72 |
+
sql += " AND camera_id = ?"
|
| 73 |
+
params.append(int(camera_id))
|
| 74 |
+
sql += " ORDER BY ts ASC, camera_id ASC"
|
| 75 |
+
cur.execute(sql, params)
|
| 76 |
+
return cur.fetchall()
|
| 77 |
+
|
| 78 |
+
def get_bounds(self):
|
| 79 |
+
""" help to find the earliest and latest timestamps on the table """
|
| 80 |
+
cur = self.conn.cursor()
|
| 81 |
+
cur.execute("SELECT MIN(ts), MAX(ts) FROM metrics")
|
| 82 |
+
row = cur.fetchone() or (None, None)
|
| 83 |
+
return row
|
utils/notifications.py
ADDED
|
@@ -0,0 +1,273 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import asyncio
|
| 2 |
+
import json
|
| 3 |
+
import time
|
| 4 |
+
import logging
|
| 5 |
+
from telegram import Bot
|
| 6 |
+
from dataclasses import dataclass
|
| 7 |
+
from datetime import datetime
|
| 8 |
+
from pathlib import Path
|
| 9 |
+
from typing import Dict, List, Any, Optional
|
| 10 |
+
|
| 11 |
+
from config import NOTIFIER
|
| 12 |
+
|
| 13 |
+
ENABLE_TELEGRAM_NOTIFICATIONS = NOTIFIER["ENABLE_TELEGRAM_NOTIFICATIONS"]
|
| 14 |
+
TELEGRAM_BOT_TOKEN = NOTIFIER["TELEGRAM_BOT_TOKEN"]
|
| 15 |
+
TELEGRAM_CHAT_ID = NOTIFIER["TELEGRAM_CHAT_ID"]
|
| 16 |
+
|
| 17 |
+
INACTIVE_PERCENTAGE_THRESHOLD = NOTIFIER["INACTIVE_PERCENTAGE_THRESHOLD"]
|
| 18 |
+
UNHEALTHY_HISTORY_LENGTH = NOTIFIER["UNHEALTHY_HISTORY_LENGTH"]
|
| 19 |
+
UNHEALTHY_ALERT_THRESHOLD = NOTIFIER["UNHEALTHY_ALERT_THRESHOLD"]
|
| 20 |
+
DENSITY_COUNT_THRESHOLD = NOTIFIER["DENSITY_COUNT_THRESHOLD"]
|
| 21 |
+
|
| 22 |
+
# --- Helper ---
|
| 23 |
+
def _now_local() -> datetime:
|
| 24 |
+
return datetime.now().astimezone()
|
| 25 |
+
|
| 26 |
+
@dataclass
|
| 27 |
+
class AlertState:
|
| 28 |
+
""" Tracks each camera alert state and cooldown """
|
| 29 |
+
sent_inactive_danger: bool = False
|
| 30 |
+
sent_unhealthy_danger: bool = False
|
| 31 |
+
sent_density_danger: bool = False
|
| 32 |
+
last_danger_alert_time: float = 0.0 # timestamp
|
| 33 |
+
last_warning_alert_time: float = 0.0
|
| 34 |
+
cooldown_period_danger: int = 1800 # 30 minutes
|
| 35 |
+
cooldown_period_warning: int = 3600
|
| 36 |
+
|
| 37 |
+
class TelegramNotifier:
|
| 38 |
+
def __init__(self, bot_token: str, chat_id: str, *, logger: Optional[logging.Logger] = None):
|
| 39 |
+
self.bot_token = bot_token
|
| 40 |
+
self.chat_id = chat_id
|
| 41 |
+
self._logger = logger or logging.getLogger(__name__)
|
| 42 |
+
|
| 43 |
+
async def _send_async(self, text: str) -> bool:
|
| 44 |
+
try:
|
| 45 |
+
bot = Bot(token=self.bot_token)
|
| 46 |
+
await bot.send_message(chat_id=self.chat_id, text=text, parse_mode="HTML", disable_web_page_preview=True)
|
| 47 |
+
return True
|
| 48 |
+
except Exception as e:
|
| 49 |
+
self._logger.error("[Telegram] Gagal kirim pesan: %s", e)
|
| 50 |
+
return False
|
| 51 |
+
|
| 52 |
+
def send(self, text: str) -> bool:
|
| 53 |
+
""" Send message """
|
| 54 |
+
try:
|
| 55 |
+
return asyncio.run(self._send_async(text))
|
| 56 |
+
except Exception as e:
|
| 57 |
+
self._logger.error("[Telegram] Gagal jalankan async: %s", e)
|
| 58 |
+
return False
|
| 59 |
+
|
| 60 |
+
SENSOR_JSON_PATH = Path(NOTIFIER["SENSOR_DATA_JSON_PATH"])
|
| 61 |
+
alert_states: Dict[str, AlertState] = {}
|
| 62 |
+
|
| 63 |
+
def _load_sensor_data() -> List[Dict[str, Any]]:
|
| 64 |
+
""" Load sensor_data.json """
|
| 65 |
+
if not SENSOR_JSON_PATH.exists():
|
| 66 |
+
return []
|
| 67 |
+
try:
|
| 68 |
+
with SENSOR_JSON_PATH.open("r", encoding="utf-8") as f:
|
| 69 |
+
data = json.load(f)
|
| 70 |
+
return data if isinstance(data, list) else []
|
| 71 |
+
except Exception as e:
|
| 72 |
+
logging.error("[Sensor JSON] Gagal baca: %s", e)
|
| 73 |
+
return []
|
| 74 |
+
|
| 75 |
+
def _count_unhealthy(vocal_history: List[str]) -> int:
|
| 76 |
+
""" Count 'Unhealthy' """
|
| 77 |
+
recent = vocal_history[-UNHEALTHY_HISTORY_LENGTH:]
|
| 78 |
+
return sum(1 for s in recent if isinstance(s, str) and s.strip() == "Unhealthy")
|
| 79 |
+
|
| 80 |
+
def _get_prediction_with_prob(entry: Dict) -> tuple[str, float]:
|
| 81 |
+
pred = entry.get("prediction")
|
| 82 |
+
probs = entry.get("probabilities", {})
|
| 83 |
+
if pred and pred in probs:
|
| 84 |
+
return pred, float(probs[pred])
|
| 85 |
+
return pred or "Unknown", 0.0
|
| 86 |
+
|
| 87 |
+
def generate_alert_message(
|
| 88 |
+
camera_id: str,
|
| 89 |
+
inactive_ratio: float,
|
| 90 |
+
audio_pred: str,
|
| 91 |
+
audio_prob: float,
|
| 92 |
+
density_count: int,
|
| 93 |
+
unhealthy_count: int,
|
| 94 |
+
timestamp: str
|
| 95 |
+
) -> str:
|
| 96 |
+
details = []
|
| 97 |
+
is_danger = False
|
| 98 |
+
is_warning = False
|
| 99 |
+
is_mic = camera_id == "MIC"
|
| 100 |
+
|
| 101 |
+
# Inactivity Logic
|
| 102 |
+
if not is_mic:
|
| 103 |
+
if inactive_ratio >= INACTIVE_PERCENTAGE_THRESHOLD:
|
| 104 |
+
is_danger = True
|
| 105 |
+
pcnt = round(inactive_ratio * 100, 1)
|
| 106 |
+
details.append(f"Kamera {camera_id}: Inaktivitas ayam mencapai <b>{pcnt}%</b>.")
|
| 107 |
+
elif inactive_ratio > 0:
|
| 108 |
+
is_warning = True
|
| 109 |
+
details.append(f"Kamera {camera_id}: Terdeteksi beberapa ayam tidak aktif.")
|
| 110 |
+
|
| 111 |
+
# Density Logic
|
| 112 |
+
if not is_mic:
|
| 113 |
+
if density_count >= DENSITY_COUNT_THRESHOLD:
|
| 114 |
+
is_danger = True
|
| 115 |
+
details.append(f"Kamera {camera_id}: Terdeteksi <b>{density_count}</b> area dengan kepadatan tinggi.")
|
| 116 |
+
elif density_count > 1:
|
| 117 |
+
is_warning = True
|
| 118 |
+
details.append(f"Kamera {camera_id}: Terdeteksi kepadatan ayam lokal.\n({density_count} cluster)")
|
| 119 |
+
|
| 120 |
+
# Audio Logic
|
| 121 |
+
if audio_pred == "Unhealthy":
|
| 122 |
+
pcnt = round(audio_prob * 100, 1)
|
| 123 |
+
if unhealthy_count >= UNHEALTHY_ALERT_THRESHOLD:
|
| 124 |
+
is_danger = True
|
| 125 |
+
details.append(f"Audio {camera_id}: Tanda <b>Tidak Sehat</b> terdeteksi berulang (Prob: {pcnt}%).")
|
| 126 |
+
else:
|
| 127 |
+
is_warning = True
|
| 128 |
+
details.append(f"Audio {camera_id}: Terdengar tanda awal <b>Tidak Sehat</b> (Prob: {pcnt}%).")
|
| 129 |
+
|
| 130 |
+
if not is_danger and not is_warning:
|
| 131 |
+
return ""
|
| 132 |
+
|
| 133 |
+
end_parts = []
|
| 134 |
+
if is_danger:
|
| 135 |
+
end_parts.append("🔴 <b>[BAHAYA]</b>")
|
| 136 |
+
elif is_warning:
|
| 137 |
+
end_parts.append("🟠 <b>[PERINGATAN]</b>")
|
| 138 |
+
|
| 139 |
+
# Build the message
|
| 140 |
+
end_parts.append("")
|
| 141 |
+
end_parts.extend(details)
|
| 142 |
+
end_parts.append("")
|
| 143 |
+
end_parts.append("<b>Mohon segera periksa kondisi kandang!</b>")
|
| 144 |
+
end_parts.append(f"<i>Waktu: {timestamp}</i>")
|
| 145 |
+
|
| 146 |
+
return "\n".join(end_parts)
|
| 147 |
+
|
| 148 |
+
def monitor_once(notifier: Optional[TelegramNotifier]) -> None:
|
| 149 |
+
data = _load_sensor_data()
|
| 150 |
+
if not data:
|
| 151 |
+
return
|
| 152 |
+
|
| 153 |
+
timestamp_str = _now_local().strftime("%Y-%m-%d %H:%M:%S")
|
| 154 |
+
|
| 155 |
+
for item in data:
|
| 156 |
+
camera_id = str(item.get("camera_id", "unknown"))
|
| 157 |
+
detected_count = item.get("detected_count", 0)
|
| 158 |
+
inactive_count = item.get("inactive_count", 0)
|
| 159 |
+
density_count = item.get("density_count", 0)
|
| 160 |
+
vocal_history = item.get("vocalization_history", [])
|
| 161 |
+
latest_audio_pred = vocal_history[-1] if vocal_history else None
|
| 162 |
+
|
| 163 |
+
audio_pred, audio_prob = _get_prediction_with_prob({
|
| 164 |
+
"prediction": latest_audio_pred,
|
| 165 |
+
"probabilities": item.get("latest_probabilities", {})
|
| 166 |
+
})
|
| 167 |
+
unhealthy_count = _count_unhealthy(vocal_history)
|
| 168 |
+
|
| 169 |
+
inactive_ratio = inactive_count / detected_count if detected_count > 0 else 0.
|
| 170 |
+
|
| 171 |
+
if camera_id not in alert_states:
|
| 172 |
+
alert_states[camera_id] = AlertState()
|
| 173 |
+
state = alert_states[camera_id]
|
| 174 |
+
|
| 175 |
+
# Send alert State
|
| 176 |
+
now = time.time()
|
| 177 |
+
should_send = False
|
| 178 |
+
message_type = "info"
|
| 179 |
+
is_mic = camera_id == "MIC"
|
| 180 |
+
|
| 181 |
+
# Type : Danger
|
| 182 |
+
danger_cooldown_expired = (now - state.last_danger_alert_time) >= state.cooldown_period_danger
|
| 183 |
+
if not is_mic and inactive_ratio >= INACTIVE_PERCENTAGE_THRESHOLD:
|
| 184 |
+
if not state.sent_inactive_danger or danger_cooldown_expired:
|
| 185 |
+
should_send = True
|
| 186 |
+
message_type = "danger"
|
| 187 |
+
state.sent_inactive_danger = True
|
| 188 |
+
state.last_danger_alert_time = now
|
| 189 |
+
|
| 190 |
+
if unhealthy_count >= UNHEALTHY_ALERT_THRESHOLD:
|
| 191 |
+
if not state.sent_unhealthy_danger or danger_cooldown_expired:
|
| 192 |
+
should_send = True
|
| 193 |
+
message_type = "danger"
|
| 194 |
+
state.sent_unhealthy_danger = True
|
| 195 |
+
state.last_danger_alert_time = now
|
| 196 |
+
|
| 197 |
+
if not is_mic and density_count >= DENSITY_COUNT_THRESHOLD:
|
| 198 |
+
if not state.sent_density_danger or danger_cooldown_expired:
|
| 199 |
+
should_send = True
|
| 200 |
+
message_type = "danger"
|
| 201 |
+
state.sent_density_danger = True
|
| 202 |
+
state.last_danger_alert_time = now
|
| 203 |
+
|
| 204 |
+
# Type : Warning
|
| 205 |
+
warning_cooldown_expired = (now - state.last_warning_alert_time) >= state.cooldown_period_warning
|
| 206 |
+
if not should_send and not is_mic and inactive_count > 0 and not state.sent_inactive_danger:
|
| 207 |
+
if warning_cooldown_expired:
|
| 208 |
+
should_send = True
|
| 209 |
+
message_type = "warning"
|
| 210 |
+
state.last_warning_alert_time = now
|
| 211 |
+
|
| 212 |
+
if not should_send and audio_pred == "Unhealthy" and not state.sent_unhealthy_danger:
|
| 213 |
+
if warning_cooldown_expired:
|
| 214 |
+
should_send = True
|
| 215 |
+
message_type = "warning"
|
| 216 |
+
state.last_warning_alert_time = now
|
| 217 |
+
|
| 218 |
+
if not should_send and not is_mic and density_count > 1 and not state.sent_density_danger:
|
| 219 |
+
if warning_cooldown_expired:
|
| 220 |
+
should_send = True
|
| 221 |
+
message_type = "warning"
|
| 222 |
+
state.last_warning_alert_time = now
|
| 223 |
+
|
| 224 |
+
# Send alert if TRUE
|
| 225 |
+
if should_send:
|
| 226 |
+
msg = generate_alert_message(
|
| 227 |
+
camera_id=camera_id,
|
| 228 |
+
inactive_ratio=inactive_ratio,
|
| 229 |
+
audio_pred=audio_pred,
|
| 230 |
+
audio_prob=audio_prob,
|
| 231 |
+
density_count=density_count,
|
| 232 |
+
unhealthy_count=unhealthy_count,
|
| 233 |
+
timestamp=timestamp_str
|
| 234 |
+
)
|
| 235 |
+
|
| 236 |
+
if msg.strip() and notifier and ENABLE_TELEGRAM_NOTIFICATIONS:
|
| 237 |
+
success = notifier.send(msg)
|
| 238 |
+
logging.info("[Notif] %s alert sent for Camera %s: %s", message_type.upper(), camera_id, success)
|
| 239 |
+
else:
|
| 240 |
+
print(f"[Notif][Dry-Run] {message_type.upper()}:\n{msg}\n")
|
| 241 |
+
|
| 242 |
+
# State Reset
|
| 243 |
+
if inactive_ratio < INACTIVE_PERCENTAGE_THRESHOLD:
|
| 244 |
+
state.sent_inactive_danger = False
|
| 245 |
+
if unhealthy_count < UNHEALTHY_ALERT_THRESHOLD:
|
| 246 |
+
state.sent_unhealthy_danger = False
|
| 247 |
+
if density_count < DENSITY_COUNT_THRESHOLD:
|
| 248 |
+
state.sent_density_danger = False
|
| 249 |
+
|
| 250 |
+
def main() -> None:
|
| 251 |
+
logging.basicConfig(level=logging.INFO, format='%(asctime)s | %(levelname)s | %(message)s')
|
| 252 |
+
|
| 253 |
+
# Initialize notifier
|
| 254 |
+
notifier: Optional[TelegramNotifier] = None
|
| 255 |
+
if ENABLE_TELEGRAM_NOTIFICATIONS and TELEGRAM_BOT_TOKEN and TELEGRAM_CHAT_ID:
|
| 256 |
+
notifier = TelegramNotifier(TELEGRAM_BOT_TOKEN, TELEGRAM_CHAT_ID, logger=logging.getLogger("Telegram"))
|
| 257 |
+
logging.info("[Notif] Telegram aktif.")
|
| 258 |
+
else:
|
| 259 |
+
logging.info("[Notif] Telegram dinonaktifkan. Mode dry-run.")
|
| 260 |
+
|
| 261 |
+
logging.info("[Notif] Memantau sensor_data.json setiap 60 detik...")
|
| 262 |
+
|
| 263 |
+
while True:
|
| 264 |
+
try:
|
| 265 |
+
monitor_once(notifier)
|
| 266 |
+
except Exception as e:
|
| 267 |
+
logging.error("[Notif] Error saat monitoring: %s", e)
|
| 268 |
+
time.sleep(60)
|
| 269 |
+
|
| 270 |
+
if __name__ == "__main__":
|
| 271 |
+
main()
|
| 272 |
+
|
| 273 |
+
|
utils/video/__init__.py
ADDED
|
File without changes
|
utils/video/frame_reader.py
ADDED
|
@@ -0,0 +1,105 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import re
|
| 3 |
+
import cv2
|
| 4 |
+
import time
|
| 5 |
+
import threading
|
| 6 |
+
import subprocess
|
| 7 |
+
from collections import deque
|
| 8 |
+
from config import TUNING
|
| 9 |
+
|
| 10 |
+
class FrameReader(threading.Thread):
|
| 11 |
+
def __init__(self, video_url: str):
|
| 12 |
+
super().__init__(daemon=True)
|
| 13 |
+
self.video_url = video_url
|
| 14 |
+
self.buffer = deque(maxlen=TUNING["FRAME_READER_BUFFER_SIZE"])
|
| 15 |
+
self.fps = TUNING["FRAME_READER_FPS"]
|
| 16 |
+
self.running = threading.Event()
|
| 17 |
+
self.cap = None
|
| 18 |
+
self._is_file = self._looks_like_file(video_url)
|
| 19 |
+
|
| 20 |
+
def _looks_like_file(self, url: str) -> bool:
|
| 21 |
+
if os.path.exists(url):
|
| 22 |
+
return True
|
| 23 |
+
return not re.match(r'^[a-zA-Z]+://', url or "")
|
| 24 |
+
|
| 25 |
+
def _resolve_url(self, url: str) -> str:
|
| 26 |
+
if "youtube.com" in url or "youtu.be" in url:
|
| 27 |
+
try:
|
| 28 |
+
print(f"[FrameReader] Resolving YouTube URL: {url}")
|
| 29 |
+
|
| 30 |
+
result = subprocess.run(
|
| 31 |
+
["yt-dlp", "--get-url", url],
|
| 32 |
+
capture_output=True, text=True, check=True, timeout=30
|
| 33 |
+
)
|
| 34 |
+
urls = result.stdout.strip().splitlines()
|
| 35 |
+
if not urls:
|
| 36 |
+
print("[FrameReader] No URLs returned by yt-dlp")
|
| 37 |
+
return None
|
| 38 |
+
|
| 39 |
+
stream_url = urls[0]
|
| 40 |
+
print(f"[FrameReader] Resolved to stream: {stream_url}")
|
| 41 |
+
return stream_url
|
| 42 |
+
|
| 43 |
+
except subprocess.CalledProcessError as e:
|
| 44 |
+
stderr = e.stderr.strip()
|
| 45 |
+
print(f"[FrameReader] yt-dlp failed with error: {stderr}")
|
| 46 |
+
if "unavailable" in stderr:
|
| 47 |
+
print("[FrameReader] Video may be offline, private, or geo-restricted.")
|
| 48 |
+
return None
|
| 49 |
+
except Exception as e:
|
| 50 |
+
print(f"[FrameReader] Unexpected error resolving YouTube URL: {e}")
|
| 51 |
+
return None
|
| 52 |
+
|
| 53 |
+
return url
|
| 54 |
+
|
| 55 |
+
def run(self):
|
| 56 |
+
""" Read each frame of a video and store in buffer """
|
| 57 |
+
resolved = self._resolve_url(self.video_url)
|
| 58 |
+
self.cap = cv2.VideoCapture(resolved, cv2.CAP_FFMPEG)
|
| 59 |
+
if not self.cap.isOpened():
|
| 60 |
+
print(f"[FrameReader] Cannot open: {resolved}")
|
| 61 |
+
return
|
| 62 |
+
|
| 63 |
+
try:
|
| 64 |
+
self.cap.set(cv2.CAP_PROP_BUFFERSIZE, 1)
|
| 65 |
+
except Exception:
|
| 66 |
+
pass
|
| 67 |
+
self.running.set()
|
| 68 |
+
|
| 69 |
+
# Count Time & FPS
|
| 70 |
+
src_fps = self.cap.get(cv2.CAP_PROP_FPS)
|
| 71 |
+
if not src_fps or src_fps <= 1e-3:
|
| 72 |
+
src_fps = float(self.fps) if self.fps else 30.0
|
| 73 |
+
frame_period = 1.0 / float(src_fps)
|
| 74 |
+
next_ts = time.monotonic()
|
| 75 |
+
|
| 76 |
+
# Read Frame Loop
|
| 77 |
+
while self.running.is_set():
|
| 78 |
+
ret, frame = self.cap.read()
|
| 79 |
+
if not ret:
|
| 80 |
+
if os.path.exists(self.video_url):
|
| 81 |
+
break
|
| 82 |
+
time.sleep(0.5)
|
| 83 |
+
continue
|
| 84 |
+
|
| 85 |
+
if frame is not None:
|
| 86 |
+
self.buffer.append(frame)
|
| 87 |
+
|
| 88 |
+
# Time Sync
|
| 89 |
+
next_ts += frame_period
|
| 90 |
+
sleep_for = next_ts - time.monotonic()
|
| 91 |
+
if sleep_for > 0:
|
| 92 |
+
time.sleep(sleep_for)
|
| 93 |
+
else:
|
| 94 |
+
next_ts = time.monotonic()
|
| 95 |
+
|
| 96 |
+
if self.cap:
|
| 97 |
+
self.cap.release()
|
| 98 |
+
|
| 99 |
+
def stop(self):
|
| 100 |
+
self.running.clear()
|
| 101 |
+
self.join()
|
| 102 |
+
|
| 103 |
+
def read(self):
|
| 104 |
+
return self.buffer[-1].copy() if self.buffer else None
|
| 105 |
+
|
utils/video/stream_processor.py
ADDED
|
@@ -0,0 +1,250 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
import time
|
| 3 |
+
from typing import Dict, List, Optional
|
| 4 |
+
import threading
|
| 5 |
+
import torch
|
| 6 |
+
from sklearn.cluster import DBSCAN
|
| 7 |
+
|
| 8 |
+
# -- Local Imports --
|
| 9 |
+
from config import APP_CONFIG, TUNING, INACTIVITY_CFG, DENSITY_DBSCAN_CFG
|
| 10 |
+
from .frame_reader import FrameReader
|
| 11 |
+
from .tracker import MOTTracker
|
| 12 |
+
|
| 13 |
+
class StreamProcessor:
|
| 14 |
+
def __init__(self,
|
| 15 |
+
video_url: str,
|
| 16 |
+
model,
|
| 17 |
+
device: str = "cpu",
|
| 18 |
+
half: bool = False,
|
| 19 |
+
infer_lock=None):
|
| 20 |
+
|
| 21 |
+
# -- Inisialisasi --
|
| 22 |
+
self.video_url = video_url
|
| 23 |
+
self.model = model
|
| 24 |
+
self.device = device
|
| 25 |
+
self.detection_interval = max(1, int(TUNING["DETECTION_INTERVAL_FRAMES"]))
|
| 26 |
+
self.imgsz = int(TUNING["YOLO_IMG_SIZE"])
|
| 27 |
+
self.half = bool(half and device == "cuda")
|
| 28 |
+
self._infer_lock = infer_lock
|
| 29 |
+
|
| 30 |
+
self.frame_reader = FrameReader(video_url)
|
| 31 |
+
self.tracker = MOTTracker(tracker_type="bytetrack", device=device)
|
| 32 |
+
self._running = False
|
| 33 |
+
self._thread = threading.Thread(target=self._run, daemon=True)
|
| 34 |
+
self._lock = threading.Lock()
|
| 35 |
+
|
| 36 |
+
self._latest_payload = None
|
| 37 |
+
self._frame_idx = 0
|
| 38 |
+
self._last_dets = None
|
| 39 |
+
self.id_state: Dict[int, Dict] = {}
|
| 40 |
+
|
| 41 |
+
def start(self):
|
| 42 |
+
if self._running:
|
| 43 |
+
return
|
| 44 |
+
self._running = True
|
| 45 |
+
self.frame_reader.start()
|
| 46 |
+
self._thread.start()
|
| 47 |
+
|
| 48 |
+
def stop(self):
|
| 49 |
+
self._running = False
|
| 50 |
+
self.frame_reader.stop()
|
| 51 |
+
self._thread.join()
|
| 52 |
+
|
| 53 |
+
def get_latest(self) -> Optional[Dict]:
|
| 54 |
+
with self._lock:
|
| 55 |
+
if self._latest_payload is None:
|
| 56 |
+
return None
|
| 57 |
+
return {
|
| 58 |
+
"frame" : self._latest_payload["frame"].copy(),
|
| 59 |
+
"tracks" : list(self._latest_payload["tracks"]),
|
| 60 |
+
"timestamp" : self._latest_payload["timestamp"],
|
| 61 |
+
"frame_idx" : self._latest_payload["frame_idx"],
|
| 62 |
+
"stats" : dict(self._latest_payload.get("stats", {}))
|
| 63 |
+
}
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
def _size_norm(self, box: List[float]) -> float:
|
| 68 |
+
x1, y1, x2, y2 = box
|
| 69 |
+
w = max(1.0, x2- x1)
|
| 70 |
+
h = max(1.0, y2 - y1)
|
| 71 |
+
return (w*w + h*h) ** 0.5
|
| 72 |
+
|
| 73 |
+
# -- Inactivity Logic --
|
| 74 |
+
def _update_inactivity(self, tracks: List[Dict], now: float):
|
| 75 |
+
current_ids = set(t["id"] for t in tracks)
|
| 76 |
+
|
| 77 |
+
for t in tracks:
|
| 78 |
+
tid = t["id"]
|
| 79 |
+
cx, cy = self._center(t["box"])
|
| 80 |
+
diag = self._size_norm(t["box"])
|
| 81 |
+
st = self.id_state.get(tid)
|
| 82 |
+
|
| 83 |
+
if st is None:
|
| 84 |
+
# please refer to "config.py" for each definition
|
| 85 |
+
st = {"pos": (cx, cy), "t": now, "ema_v": 0.0, "inactive": False, "since": None, "last_seen": now}
|
| 86 |
+
self.id_state[tid] = st
|
| 87 |
+
t["inactive"] = False
|
| 88 |
+
continue
|
| 89 |
+
|
| 90 |
+
dt = max(1e-3, now - st["t"])
|
| 91 |
+
dx = cx - st["pos"][0]
|
| 92 |
+
dy = cy - st["pos"][1]
|
| 93 |
+
|
| 94 |
+
v_norm = ((dx*dx + dy * dy)**0.5 / dt) / max(1.0, diag)
|
| 95 |
+
alpha = INACTIVITY_CFG["EMA_ALPHA"]
|
| 96 |
+
ema_v = alpha * v_norm + (1.0 - alpha) * st["ema_v"]
|
| 97 |
+
|
| 98 |
+
entry = INACTIVITY_CFG["ENTER_THRESH_NORM_SPEED"]
|
| 99 |
+
exit_ = INACTIVITY_CFG["EXIT_THRESH_NORM_SPEED"]
|
| 100 |
+
dwell = INACTIVITY_CFG["MIN_DURATION_S"]
|
| 101 |
+
|
| 102 |
+
if st["inactive"]:
|
| 103 |
+
if ema_v > exit_:
|
| 104 |
+
st["since"] = st.get("since") or now
|
| 105 |
+
if (now - st["since"]) >= dwell:
|
| 106 |
+
st["inactive"] = False
|
| 107 |
+
st['since'] = None
|
| 108 |
+
else:
|
| 109 |
+
st["since"] = None
|
| 110 |
+
else:
|
| 111 |
+
if ema_v < entry:
|
| 112 |
+
st["since"] = st.get("since") or now
|
| 113 |
+
if (now - st["since"]) >= dwell:
|
| 114 |
+
st["inactive"] = True
|
| 115 |
+
st["since"] = None
|
| 116 |
+
else:
|
| 117 |
+
st["since"] = None
|
| 118 |
+
|
| 119 |
+
st.update(pos = (cx, cy), t = now, ema_v = ema_v, last_seen = now)
|
| 120 |
+
t["inactive"] = st["inactive"]
|
| 121 |
+
|
| 122 |
+
# ensure old unseen ID removed
|
| 123 |
+
stale = [
|
| 124 |
+
tid for tid, st in list(self.id_state.items())
|
| 125 |
+
if tid not in current_ids and (now - st.get("last_seen", now)) > INACTIVITY_CFG["MAX_UNSEEN_GAP_S"]
|
| 126 |
+
]
|
| 127 |
+
for tid in stale:
|
| 128 |
+
self.id_state.pop(tid, None)
|
| 129 |
+
|
| 130 |
+
def _center(self, box: List[float]) -> tuple[float, float]:
|
| 131 |
+
x1, y1, x2, y2 = box
|
| 132 |
+
return ((x1 + x2) * 0.5, (y1 + y2) * 0.5)
|
| 133 |
+
|
| 134 |
+
# -- Density Logic --
|
| 135 |
+
def _compute_density_dbscan(self, tracks: List[Dict]) -> set:
|
| 136 |
+
if not tracks:
|
| 137 |
+
return set(), 0
|
| 138 |
+
|
| 139 |
+
centers = np.array([self._center(t["box"]) for t in tracks], dtype=np.float32)
|
| 140 |
+
# please refer to "config.py" for each definition
|
| 141 |
+
min_samples = max(1, DENSITY_DBSCAN_CFG["MIN_NEIGHBORS"] + 1)
|
| 142 |
+
labels = DBSCAN(eps=DENSITY_DBSCAN_CFG["EPS_PX"], min_samples=min_samples).fit_predict(centers)
|
| 143 |
+
|
| 144 |
+
# below to count cluster that happend ( -1 is noise )
|
| 145 |
+
n_clusters = int(len(set(lbl for lbl in labels if lbl != -1)))
|
| 146 |
+
dense_ids = {t["id"] for t, lbl in zip(tracks, labels) if lbl != -1}
|
| 147 |
+
return dense_ids, n_clusters
|
| 148 |
+
|
| 149 |
+
# return {t["id"] for t, lbl in zip(tracks, labels) if lbl != -1}
|
| 150 |
+
|
| 151 |
+
|
| 152 |
+
def _run(self):
|
| 153 |
+
""" Detect Object from The Frame and add its metadata (Inactivity / Density) """
|
| 154 |
+
while self._running:
|
| 155 |
+
frame = self.frame_reader.read()
|
| 156 |
+
if frame is None:
|
| 157 |
+
time.sleep(0.01)
|
| 158 |
+
continue
|
| 159 |
+
self._frame_idx += 1
|
| 160 |
+
|
| 161 |
+
# ensure frame to detect only on the interval
|
| 162 |
+
if self._frame_idx % self.detection_interval == 1:
|
| 163 |
+
try:
|
| 164 |
+
with torch.no_grad():
|
| 165 |
+
res = self.model.predict(
|
| 166 |
+
frame,
|
| 167 |
+
imgsz = self.imgsz,
|
| 168 |
+
device = self.device,
|
| 169 |
+
half = self.half,
|
| 170 |
+
verbose = False
|
| 171 |
+
)[0]
|
| 172 |
+
|
| 173 |
+
boxes = res.boxes
|
| 174 |
+
if boxes is not None and len(boxes) > 0:
|
| 175 |
+
dets = np.concatenate([
|
| 176 |
+
boxes.xyxy.cpu().numpy(),
|
| 177 |
+
boxes.conf.cpu().numpy()[:, None],
|
| 178 |
+
boxes.cls.cpu().numpy()[:, None]
|
| 179 |
+
], axis = 1).astype("float32")
|
| 180 |
+
else:
|
| 181 |
+
dets = np.empty((0, 6), dtype="float32")
|
| 182 |
+
self._last_dets = dets
|
| 183 |
+
except Exception as e:
|
| 184 |
+
print(f"[StreamProcessor] detection error: {e}")
|
| 185 |
+
self._last_dets = None
|
| 186 |
+
dets = self._last_dets # use last know tracking to ensure trackig is keept
|
| 187 |
+
|
| 188 |
+
# Update tracker
|
| 189 |
+
try:
|
| 190 |
+
tracks = self.tracker.update(dets, frame)
|
| 191 |
+
except Exception as e:
|
| 192 |
+
print(f"[StreamProcessor] tracking error: {e}")
|
| 193 |
+
tracks = []
|
| 194 |
+
|
| 195 |
+
# Update metadata
|
| 196 |
+
now = time.time()
|
| 197 |
+
self._update_inactivity(tracks, now)
|
| 198 |
+
dense_ids, n_clusters = self._compute_density_dbscan(tracks)
|
| 199 |
+
for t in tracks:
|
| 200 |
+
t["dense"] = (t["id"] in dense_ids)
|
| 201 |
+
|
| 202 |
+
stats = {
|
| 203 |
+
"detected": len(tracks),
|
| 204 |
+
"inactive": sum(1 for t in tracks if t.get("inactive")),
|
| 205 |
+
"dense_clusters": n_clusters
|
| 206 |
+
}
|
| 207 |
+
|
| 208 |
+
# Store latest result
|
| 209 |
+
with self._lock:
|
| 210 |
+
self._latest_payload = {
|
| 211 |
+
"frame" : frame,
|
| 212 |
+
"tracks" : tracks,
|
| 213 |
+
"timestamp" : now,
|
| 214 |
+
"frame_idx" : self._frame_idx,
|
| 215 |
+
"stats" : stats
|
| 216 |
+
}
|
| 217 |
+
|
| 218 |
+
class StreamRegistry:
|
| 219 |
+
def __init__(self):
|
| 220 |
+
self._by_url: Dict[str, StreamProcessor] = {}
|
| 221 |
+
self._ref_count: Dict[str, int] = {}
|
| 222 |
+
self._lock = threading.Lock()
|
| 223 |
+
|
| 224 |
+
def get(self, url: str, model, device="cpu", half=False) -> StreamProcessor:
|
| 225 |
+
""" Gather and made new StreamProcessor from each Video URL"""
|
| 226 |
+
with self._lock:
|
| 227 |
+
sp = self._by_url.get(url)
|
| 228 |
+
if sp is None:
|
| 229 |
+
print(f"[Registry] Creating new stream processor for: {url}")
|
| 230 |
+
sp = StreamProcessor(url, model=model, device=device, half=half)
|
| 231 |
+
sp.start()
|
| 232 |
+
self._by_url[url] = sp
|
| 233 |
+
self._ref_count[url] = 0
|
| 234 |
+
self._ref_count[url] += 1
|
| 235 |
+
print(f"[Registry] URL {url} ref count is now {self._ref_count[url]}")
|
| 236 |
+
return sp
|
| 237 |
+
|
| 238 |
+
def release(self, url: str):
|
| 239 |
+
""" Ensure when stream video stop, threading is stop too """
|
| 240 |
+
with self._lock:
|
| 241 |
+
if url in self._by_url:
|
| 242 |
+
self._ref_count[url] -= 1
|
| 243 |
+
print(f"[Registry] URL {url} ref count is now {self._ref_count[url]}")
|
| 244 |
+
if self._ref_count[url] <= 0:
|
| 245 |
+
print(f"[Registry] Stopping and removing processor for {url}")
|
| 246 |
+
try:
|
| 247 |
+
self._by_url[url].stop()
|
| 248 |
+
finally:
|
| 249 |
+
self._by_url.pop(url, None)
|
| 250 |
+
self._ref_count.pop(url, None)
|
utils/video/tracker.py
ADDED
|
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import numpy as np
|
| 3 |
+
import boxmot
|
| 4 |
+
from boxmot.tracker_zoo import create_tracker
|
| 5 |
+
|
| 6 |
+
def _find_tracker_config(tracker_type: str) -> str:
|
| 7 |
+
env_path = os.environ.get("BOXMOT_CFG", "")
|
| 8 |
+
if env_path and os.path.exists(env_path):
|
| 9 |
+
return env_path
|
| 10 |
+
|
| 11 |
+
try:
|
| 12 |
+
bm_dir = os.path.dirname(boxmot.__file__)
|
| 13 |
+
candidates = [
|
| 14 |
+
os.path.join(bm_dir, "configs", "trackers", f"{tracker_type}.yaml"),
|
| 15 |
+
os.path.join(bm_dir, "trackers", "configs", f"{tracker_type}.yaml"),
|
| 16 |
+
os.path.join(bm_dir, "configs", f"{tracker_type}.yaml")
|
| 17 |
+
]
|
| 18 |
+
|
| 19 |
+
for p in candidates:
|
| 20 |
+
if os.path.exists(p):
|
| 21 |
+
return p
|
| 22 |
+
except Exception:
|
| 23 |
+
pass
|
| 24 |
+
return ""
|
| 25 |
+
|
| 26 |
+
class MOTTracker:
|
| 27 |
+
def __init__(self, tracker_type: str = "bytetrack", device: str = "cpu"):
|
| 28 |
+
cfg_path = _find_tracker_config(tracker_type)
|
| 29 |
+
if not cfg_path:
|
| 30 |
+
raise RuntimeError(
|
| 31 |
+
f"Could not locate config for '{tracker_type}'. "
|
| 32 |
+
f"Set BOXMOT_CFG to the YAML path or ensure boxmot is installed correctly."
|
| 33 |
+
)
|
| 34 |
+
|
| 35 |
+
self.tracker = create_tracker(
|
| 36 |
+
tracker_type = tracker_type,
|
| 37 |
+
tracker_config = cfg_path,
|
| 38 |
+
reid_weights = None,
|
| 39 |
+
device = device,
|
| 40 |
+
half = False
|
| 41 |
+
)
|
| 42 |
+
|
| 43 |
+
def update(self, dets_xyxy_conf_cls, frame):
|
| 44 |
+
""" ensure each tracker is new """
|
| 45 |
+
if dets_xyxy_conf_cls is None or len(dets_xyxy_conf_cls) == 0:
|
| 46 |
+
dets_xyxy_conf_cls = np.empty((0, 6), dtype = "float32")
|
| 47 |
+
|
| 48 |
+
tracks = self.tracker.update(dets_xyxy_conf_cls.astype("float32"), frame)
|
| 49 |
+
|
| 50 |
+
out = []
|
| 51 |
+
if tracks.shape[0] > 0:
|
| 52 |
+
for row in tracks:
|
| 53 |
+
x1, y1, x2, y2, tid, conf, cls = map(float, row[:7])
|
| 54 |
+
out.append({"id": int(tid), "box": [x1, y1, x2, y2], "conf": conf, "cls": int(cls)})
|
| 55 |
+
return out
|
| 56 |
+
|