ProjSentinel / sentinel_app_v13.py
Paulownia's picture
docker deployment
27b973a verified
import os, math, time, datetime, threading, json, uuid
import requests as http_requests
from collections import deque
from flask import Flask, render_template, Response, jsonify, request
from werkzeug.utils import secure_filename
import cv2
import numpy as np
from ultralytics import YOLO
from sort import Sort
import google.generativeai as genai
from PIL import Image
import torch
import torchvision.transforms as T
from torchvision.models import resnet18, ResNet18_Weights
# Configure Gemini
GENAI_API_KEY = ''
genai.configure(api_key=GENAI_API_KEY)
model_gemini = genai.GenerativeModel('gemini-2.5-flash')
app = Flask(__name__)
app.config['UPLOAD_FOLDER'] = 'uploads'
os.makedirs(app.config['UPLOAD_FOLDER'], exist_ok=True)
# Load models
model_movement = YOLO('yolov8n.pt')
model_facemask = YOLO('face_mask.pt')
model_weapon = YOLO('All_weapon.pt')
# Re-ID Embedding Extractor (ResNet18)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
reid_model = resnet18(weights=ResNet18_Weights.DEFAULT)
reid_model.fc = torch.nn.Identity() # Remove classification layer to get embeddings
reid_model.to(device)
reid_model.eval()
reid_transform = T.Compose([
T.ToPILImage(),
T.Resize((128, 64)),
T.ToTensor(),
T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
print(f"All Models Loaded (V13 - Journey Tracker). Device: {device}")
# ════════════════════════════════════════════════════════════════
# GLOBAL STATE
# ════════════════════════════════════════════════════════════════
# Multi-module system: set of active detection modules
active_modules = {'movement'} # Default: movement active
red_alert = False
# Per-feed video source management
# feed_id -> filepath (None means no source / webcam for feed 0)
feed_sources = {} # e.g. {0: '/path/to/clip1.mp4', 1: '/path/to/clip2.mp4'}
# Multi-camera feed management
camera_feeds = {}
feed_versions = {}
MAX_FEEDS = 4
# Multi-camera tracking
feed_trackers = {i: Sort() for i in range(MAX_FEEDS)}
person_history = {}
object_history = {}
journey_log = deque(maxlen=500)
class SubjectIdentityManager:
def __init__(self, threshold=0.7):
self.threshold = threshold
self.global_subjects = {} # global_id -> {'embedding': tensor, 'first_seen': timestamp}
self.local_to_global = {} # (feed_id, local_id) -> {'global_id': int, 'last_seen': float}
self.next_global_id = 100
self.lock = threading.Lock()
def get_embedding(self, face_img):
img_t = reid_transform(face_img).unsqueeze(0).to(device)
with torch.no_grad():
emb = reid_model(img_t)
return emb / emb.norm() # Normalize
def match_or_register(self, feed_id, local_id, frame, bbox):
x1, y1, x2, y2 = map(int, bbox)
h, w = frame.shape[:2]
x1, y1 = max(0, x1), max(0, y1)
x2, y2 = min(w, x2), min(h, y2)
if x2 <= x1 or y2 <= y1:
return None
crop = frame[y1:y2, x1:x2]
if crop.size == 0:
return None
key = (feed_id, local_id)
now = time.time()
with self.lock:
# Use cached mapping if fresh (< 3 seconds), otherwise re-evaluate
if key in self.local_to_global:
entry = self.local_to_global[key]
if now - entry['last_seen'] < 3.0:
entry['last_seen'] = now
return entry['global_id']
# Stale — re-evaluate embedding
new_emb = self.get_embedding(crop)
best_id = None
best_sim = -1
for gid, data in self.global_subjects.items():
sim = torch.mm(new_emb, data['embedding'].t()).item()
if sim > best_sim:
best_sim = sim
best_id = gid
if best_sim > self.threshold:
match_id = best_id
# Update embedding (moving average)
self.global_subjects[match_id]['embedding'] = (
0.9 * self.global_subjects[match_id]['embedding'] + 0.1 * new_emb
)
else:
match_id = self.next_global_id
self.next_global_id += 1
self.global_subjects[match_id] = {
'embedding': new_emb,
'first_seen': now
}
self.local_to_global[key] = {'global_id': match_id, 'last_seen': now}
return match_id
def cleanup_feed(self, feed_id, active_local_ids):
"""Remove stale local-to-global mappings for tracks no longer active."""
with self.lock:
stale = [k for k in self.local_to_global if k[0] == feed_id and k[1] not in active_local_ids]
for k in stale:
del self.local_to_global[k]
def get_global_id_cached(self, feed_id, local_id):
"""Get cached global_id without re-evaluating (for re-id check)."""
key = (feed_id, local_id)
entry = self.local_to_global.get(key)
return entry['global_id'] if entry else None
identity_manager = SubjectIdentityManager()
previous_score = 0.0
ALPHA = 0.2
# ════════════════════════════════════════════════════════════════
# OPERATOR AUDIT LOG
# ════════════════════════════════════════════════════════════════
audit_log = deque(maxlen=200)
audit_lock = threading.Lock()
def log_audit(action, details="", severity="INFO"):
"""Append an event to the operator audit log."""
entry = {
'timestamp': datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
'action': action,
'details': details,
'severity': severity # INFO, WARNING, CRITICAL
}
with audit_lock:
audit_log.appendleft(entry)
if severity == "CRITICAL":
print(f"[AUDIT-CRITICAL] {entry['timestamp']} | {action} | {details}")
log_audit("SYSTEM_BOOT", "Sentinel V14 initializing — Dispatch Integration active.", "INFO")
# ════════════════════════════════════════════════════════════════
# TELEGRAM DISPATCH ENGINE
# ════════════════════════════════════════════════════════════════
class TelegramDispatcher:
"""Sends alert messages via Telegram Bot API using simple HTTP requests."""
def __init__(self, bot_token='', chat_id=''):
self.bot_token = bot_token
self.chat_id = chat_id
self.base_url = ''
if bot_token:
self.base_url = f'https://api.telegram.org/bot{bot_token}'
def configure(self, bot_token, chat_id=''):
self.bot_token = bot_token
self.chat_id = chat_id
self.base_url = f'https://api.telegram.org/bot{bot_token}'
def is_configured(self):
return bool(self.bot_token and self.chat_id)
def send_message(self, text, parse_mode='HTML'):
"""Send a text message to the configured chat."""
if not self.is_configured():
return {'ok': False, 'error': 'Telegram not configured (missing token or chat_id)'}
try:
resp = http_requests.post(
f'{self.base_url}/sendMessage',
json={
'chat_id': self.chat_id,
'text': text,
'parse_mode': parse_mode
},
timeout=10
)
result = resp.json()
if result.get('ok'):
log_audit("DISPATCH_SENT", f"Telegram message delivered to chat {self.chat_id}", "INFO")
else:
log_audit("DISPATCH_FAILED", f"Telegram API error: {result.get('description', 'Unknown')}", "WARNING")
return result
except Exception as e:
log_audit("DISPATCH_ERROR", f"Network error: {str(e)}", "WARNING")
return {'ok': False, 'error': str(e)}
def test_connection(self):
"""Send a test message to verify the bot is working."""
test_msg = (
"🛡️ <b>SENTINEL DISPATCH — TEST</b>\n\n"
"✅ Connection verified.\n"
"This bot is now linked to Project SENTINEL.\n\n"
f"{datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n"
"<i>Automated threat alerts will be delivered here.</i>"
)
return self.send_message(test_msg)
def auto_detect_chat_id(self):
"""Try to detect chat_id from recent /start messages."""
if not self.bot_token:
return None
try:
resp = http_requests.get(f'{self.base_url}/getUpdates', timeout=10)
data = resp.json()
if data.get('ok') and data.get('result'):
for update in reversed(data['result']):
msg = update.get('message', {})
chat = msg.get('chat', {})
if chat.get('id'):
self.chat_id = str(chat['id'])
log_audit("DISPATCH_CONFIG", f"Auto-detected chat ID: {self.chat_id}", "INFO")
return self.chat_id
except:
pass
return None
def send_threat_alert(self, threat_score, details, active_modules):
"""Format and send a threat alert message."""
modules_str = ', '.join([m.upper() for m in active_modules]) if active_modules else 'NONE'
timestamp = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
# Build detail lines
detail_lines = []
for module, data in details.items():
if isinstance(data, dict):
for k, v in data.items():
if v and v != 0 and v != False:
label = k.replace('_', ' ').title()
if isinstance(v, bool):
v = '⚠️ YES'
detail_lines.append(f" • {label}: {v}")
details_text = '\n'.join(detail_lines) if detail_lines else ' No specific details'
severity = '🔴 CRITICAL' if threat_score >= 80 else '🟠 ELEVATED' if threat_score >= 50 else '🟡 GUARDED'
msg = (
f"🚨 <b>SENTINEL THREAT ALERT</b>\n\n"
f"<b>Threat Level:</b> {severity} ({threat_score}/100)\n"
f"<b>Active Modules:</b> {modules_str}\n"
f"<b>Time:</b> {timestamp}\n\n"
f"<b>Detection Details:</b>\n{details_text}\n\n"
f"🏛️ <i>Project SENTINEL — Automated Dispatch</i>"
)
return self.send_message(msg)
# Initialize dispatcher with the bot token
telegram_dispatcher = TelegramDispatcher(
bot_token='8659917680:AAFHai-uliSdnX2zNhKL_-a5fZV_x0DcJ2E',
chat_id='8521681859' # Paulo's Telegram chat ID
)
# ════════════════════════════════════════════════════════════════
# DISPATCH STATE MANAGEMENT
# ════════════════════════════════════════════════════════════════
dispatch_log = deque(maxlen=100) # History of all dispatch events
pending_approvals = {} # id -> dispatch event awaiting approval
dispatch_lock = threading.Lock()
# Dispatch settings
dispatch_settings = {
'auto_dispatch': False, # If True, send alerts immediately; if False, queue for approval
'cooldown_seconds': 60, # Minimum seconds between auto-dispatches
'enabled': True, # Master switch
'last_dispatch_time': 0, # Timestamp of last sent alert
}
def create_dispatch_event(threat_score, details, active_modules):
"""Create a dispatch event and either auto-send or queue for approval."""
event_id = str(uuid.uuid4())[:8]
now = time.time()
event = {
'id': event_id,
'timestamp': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
'threat_score': threat_score,
'details': dict(details) if details else {},
'active_modules': list(active_modules),
'status': 'pending', # pending, sent, rejected, failed
'created_at': now,
}
if not dispatch_settings['enabled']:
return
if not telegram_dispatcher.is_configured():
event['status'] = 'failed'
event['error'] = 'Telegram not configured'
with dispatch_lock:
dispatch_log.appendleft(event)
log_audit("DISPATCH_SKIPPED", "Telegram not configured — alert not sent", "WARNING")
return
# Check cooldown
if now - dispatch_settings['last_dispatch_time'] < dispatch_settings['cooldown_seconds']:
remaining = int(dispatch_settings['cooldown_seconds'] - (now - dispatch_settings['last_dispatch_time']))
log_audit("DISPATCH_COOLDOWN", f"Alert suppressed — cooldown active ({remaining}s remaining)", "INFO")
return
if dispatch_settings['auto_dispatch']:
# Send immediately
result = telegram_dispatcher.send_threat_alert(threat_score, details, active_modules)
event['status'] = 'sent' if result.get('ok') else 'failed'
if not result.get('ok'):
event['error'] = result.get('error', result.get('description', 'Unknown'))
dispatch_settings['last_dispatch_time'] = now
with dispatch_lock:
dispatch_log.appendleft(event)
log_audit("AUTO_DISPATCH", f"Threat alert auto-dispatched (score: {threat_score})", "CRITICAL")
else:
# Queue for human approval
with dispatch_lock:
pending_approvals[event_id] = event
log_audit("DISPATCH_QUEUED", f"Alert queued for operator approval (score: {threat_score})", "WARNING")
def approve_dispatch_event(event_id):
"""Operator approves a pending dispatch."""
with dispatch_lock:
event = pending_approvals.pop(event_id, None)
if not event:
return {'error': 'Event not found or already processed'}
result = telegram_dispatcher.send_threat_alert(
event['threat_score'], event['details'], event['active_modules']
)
event['status'] = 'sent' if result.get('ok') else 'failed'
if not result.get('ok'):
event['error'] = result.get('error', result.get('description', 'Unknown'))
event['approved_at'] = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
dispatch_settings['last_dispatch_time'] = time.time()
with dispatch_lock:
dispatch_log.appendleft(event)
log_audit("DISPATCH_APPROVED", f"Operator approved alert {event_id} (score: {event['threat_score']})", "CRITICAL")
return {'success': True, 'status': event['status']}
def reject_dispatch_event(event_id):
"""Operator rejects a pending dispatch."""
with dispatch_lock:
event = pending_approvals.pop(event_id, None)
if not event:
return {'error': 'Event not found or already processed'}
event['status'] = 'rejected'
event['rejected_at'] = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
with dispatch_lock:
dispatch_log.appendleft(event)
log_audit("DISPATCH_REJECTED", f"Operator rejected alert {event_id}", "INFO")
return {'success': True}
# ════════════════════════════════════════════════════════════════
# GIF HANDLER
# ════════════════════════════════════════════════════════════════
class GIFReader:
"""Handles GIF file reading and frame extraction."""
def __init__(self, filepath):
self.filepath = filepath
self.gif = Image.open(filepath)
self.frame_count = 0
try:
while True:
self.gif.seek(self.frame_count)
self.frame_count += 1
except EOFError:
pass
self.current_frame_idx = 0
# Get duration for each frame (in milliseconds)
try:
self.gif.seek(0)
self.frame_duration = self.gif.info.get('duration', 100) / 1000.0 # Convert to seconds
except:
self.frame_duration = 0.04 # Default ~25 FPS
def get_next_frame(self):
"""Get the next frame from GIF."""
try:
self.gif.seek(self.current_frame_idx)
frame = self.gif.convert('RGB')
frame_np = np.array(frame)
# Convert RGB to BGR for OpenCV
frame_bgr = cv2.cvtColor(frame_np, cv2.COLOR_RGB2BGR)
self.current_frame_idx = (self.current_frame_idx + 1) % self.frame_count
return True, frame_bgr, self.frame_duration
except Exception as e:
print(f"[GIF] Error reading frame: {e}")
return False, None, 0
def close(self):
if self.gif:
self.gif.close()
# ════════════════════════════════════════════════════════════════
# THREADED VIDEO CAPTURE (OPTIMIZED FOR LARGE FILES)
# ════════════════════════════════════════════════════════════════
class VideoCamera:
def __init__(self, src=0, feed_id=0):
self.feed_id = feed_id
self.src = src
self.source_is_file = isinstance(src, str)
self.is_gif = False
self.gif_reader = None
self.stream = None
self.lock = threading.Lock()
self.frame_buffer = deque(maxlen=2) # Keep only last 2 frames to reduce memory
# Check if source is a GIF
if self.source_is_file and src.lower().endswith('.gif'):
self.is_gif = True
try:
self.gif_reader = GIFReader(src)
self.grabbed = True
success, frame, duration = self.gif_reader.get_next_frame()
self.frame = frame
self.target_delay = duration
self.stopped = False
print(f"[V8] GIF loaded: {src} ({self.gif_reader.frame_count} frames)")
except Exception as e:
print(f"[WARNING] Could not open GIF: {src} - {e}")
self.grabbed = False
self.frame = None
self.stopped = True
return
else:
# Regular video file or camera
self.stream = cv2.VideoCapture(src)
if not self.stream.isOpened():
print(f"[WARNING] Could not open video source: {src}")
self.grabbed = False
self.frame = None
self.stopped = True
return
(self.grabbed, self.frame) = self.stream.read()
self.stopped = False
# For file sources: read at the file's native FPS to simulate live playback
self.target_delay = 0
if self.source_is_file:
fps = self.stream.get(cv2.CAP_PROP_FPS)
if fps and fps > 0:
self.target_delay = 1.0 / fps
print(f"[V8] Video loaded: {src} (FPS: {fps:.2f})")
else:
self.target_delay = 1.0 / 25 # fallback 25fps
self.t = threading.Thread(target=self.update, args=())
self.t.daemon = True
self.t.start()
def update(self):
while True:
if self.stopped:
return
frame_start = time.time()
if self.is_gif:
# Handle GIF frames
grabbed, frame, delay = self.gif_reader.get_next_frame()
else:
# Handle video/camera frames
(grabbed, frame) = self.stream.read()
if not grabbed and self.source_is_file:
# End of video file — loop it seamlessly like CCTV
self.stream.set(cv2.CAP_PROP_POS_FRAMES, 0)
(grabbed, frame) = self.stream.read()
with self.lock:
self.grabbed = grabbed
if grabbed and frame is not None:
self.frame = frame
# Clear buffer and add only current frame to minimize memory
self.frame_buffer.clear()
self.frame_buffer.append(frame)
# Throttle to native FPS
if self.target_delay > 0:
elapsed = time.time() - frame_start
sleep_time = max(0, self.target_delay - elapsed)
if sleep_time > 0:
time.sleep(sleep_time)
def get_frame(self):
# Handle case where thread wasn't started
if self.stopped and self.frame is None:
return False, None
with self.lock:
return self.grabbed, self.frame.copy() if self.frame is not None else (False, None)
def stop(self):
self.stopped = True
if hasattr(self, 't') and self.t.is_alive():
self.t.join(timeout=2)
if self.is_gif and self.gif_reader:
self.gif_reader.close()
elif self.stream:
self.stream.release()
def set_source(self, src):
self.stop()
self.__init__(src, self.feed_id)
# Note: __init__ starts the new thread if successful
def is_opened(self):
if self.is_gif:
return self.gif_reader is not None
return self.stream is not None and self.stream.isOpened()
def get(self, prop):
if self.is_gif:
if prop == cv2.CAP_PROP_FPS:
return 1.0 / self.target_delay if self.target_delay > 0 else 25
return 0
return self.stream.get(prop) if self.stream else 0
def set(self, prop, val):
if not self.is_gif and self.stream:
self.stream.set(prop, val)
def get_or_create_feed(feed_id=0, src=None):
"""Get existing feed or create a new one."""
global camera_feeds
if feed_id in camera_feeds and camera_feeds[feed_id].is_opened():
return camera_feeds[feed_id]
if src is None:
# Check per-feed source first
if feed_id in feed_sources and os.path.exists(feed_sources[feed_id]):
src = feed_sources[feed_id]
elif feed_id == 0:
src = 0 # Default webcam for feed 0 only
else:
return None # No source for this feed
cam = VideoCamera(src, feed_id)
camera_feeds[feed_id] = cam
log_audit("FEED_CREATED", f"Feed #{feed_id} initialized (source: {src})", "INFO")
return cam
def restart_feed(feed_id=0):
"""Restart a specific feed and bump the version so generators re-acquire."""
global camera_feeds, feed_versions
if feed_id in camera_feeds:
camera_feeds[feed_id].stop()
del camera_feeds[feed_id]
src = feed_sources.get(feed_id)
if src and os.path.exists(src):
camera_feeds[feed_id] = VideoCamera(src, feed_id)
elif feed_id == 0:
camera_feeds[feed_id] = VideoCamera(0, feed_id)
# else: no source, feed stays empty
feed_versions[feed_id] = feed_versions.get(feed_id, 0) + 1
log_audit("FEED_RESTART", f"Feed #{feed_id} restarted (v{feed_versions[feed_id]}).", "INFO")
# ════════════════════════════════════════════════════════════════
# SCORING & DETECTION LOGIC
# ════════════════════════════════════════════════════════════════
def smooth_score(new_score):
global previous_score
smoothed = ALPHA * new_score + (1 - ALPHA) * previous_score
previous_score = smoothed
return int(smoothed)
def calculate_threat_score(mode, details):
score = 0
if mode == 'weapon':
score += details.get('guns', 0) * 100
score += details.get('knives', 0) * 80
elif mode == 'facemask':
score += details.get('with_mask', 0) * 60
score += details.get('obscured_faces', 0) * 90
elif mode == 'movement':
score += details.get('long_stays', 0) * 40
elif mode == 'public_safety':
score += details.get('abandoned_objects', 0) * 70
if details.get('crowd_panic', False):
score += 85
return min(score, 100)
# ════════════════════════════════════════════════════════════════
# FRAME GENERATOR (per-feed) - OPTIMIZED
# ════════════════════════════════════════════════════════════════
def generate_frames(feed_id=0):
global active_modules, person_history, object_history, previous_score, red_alert
camera = get_or_create_feed(feed_id)
my_version = feed_versions.get(feed_id, 0)
while True:
# Detect if the feed was restarted (e.g. user uploaded a video or switched source)
current_version = feed_versions.get(feed_id, 0)
if current_version != my_version:
# Feed was swapped — re-acquire the new camera
my_version = current_version
camera = camera_feeds.get(feed_id)
if camera is None:
time.sleep(0.1)
continue
if not camera.is_opened():
time.sleep(0.1)
camera = get_or_create_feed(feed_id)
my_version = feed_versions.get(feed_id, 0)
continue
success, img = camera.get_frame()
if not success or img is None:
# Generate a "NO SIGNAL" placeholder frame
blank_frame = np.zeros((480, 640, 3), dtype=np.uint8)
cv2.putText(blank_frame, "NO SIGNAL / CAMERA UNAVAILABLE", (100, 240),
cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 0, 255), 2)
cv2.putText(blank_frame, "Please use 'Upload Video' on Server", (120, 280),
cv2.FONT_HERSHEY_SIMPLEX, 0.6, (200, 200, 200), 1)
# Encode and yield
ret, buffer = cv2.imencode('.jpg', blank_frame)
frame_bytes = buffer.tobytes()
yield (b'--frame\r\n' b'Content-Type: image/jpeg\r\n' b'X-Score: 0\r\n\r\n' + frame_bytes + b'\r\n')
time.sleep(1.0) # Slow update for static error frame
continue
# Resize for performance
h, w = img.shape[:2]
new_w = 640
new_h = int(h * (new_w / w))
frame = cv2.resize(img, (new_w, new_h))
frame = cv2.GaussianBlur(frame, (3, 3), 0)
current_time = time.time()
current_details = {}
if 'movement' in active_modules:
results = model_movement(frame, stream=True, verbose=False)
detections = np.empty((0, 5))
for r in results:
for box in r.boxes:
if int(box.cls[0]) == 0:
x1, y1, x2, y2 = map(int, box.xyxy[0])
conf = math.ceil(box.conf[0] * 100) / 100
detections = np.vstack((detections, [x1, y1, x2, y2, conf]))
# Use feed-specific tracker
tracks = feed_trackers[feed_id].update(detections)
current_used_ids = []
journey_active = 'suspect_journey' in active_modules
for tr in tracks:
x1, y1, x2, y2, local_id = tr
is_reidentified = False
display_id = f"L{int(local_id)}"
# Only run Re-ID when suspect_journey module is active
if journey_active:
global_id = identity_manager.match_or_register(feed_id, local_id, frame, (x1, y1, x2, y2))
if global_id is not None:
# Check if this subject exists on ANY other feed
for key, entry in identity_manager.local_to_global.items():
gid = entry['global_id'] if isinstance(entry, dict) else entry
if gid == global_id and key[0] != feed_id:
is_reidentified = True
break
display_id = global_id
else:
display_id = int(local_id)
midX = int((x1 + x2) / 2)
headX, headY = midX, int(y1)
# Track journey (only when journey module is active)
if journey_active:
journey_log.append({
'global_id': display_id,
'feed_id': feed_id,
'timestamp': datetime.datetime.now().strftime("%H:%M:%S"),
'local_id': int(local_id)
})
pid = display_id
if pid not in person_history:
person_history[pid] = []
person_history[pid].append((headX, headY, current_time))
person_history[pid] = [p for p in person_history[pid] if current_time - p[2] < 5.0]
if len(person_history[pid]) > 1:
pts = np.array([(p[0], p[1]) for p in person_history[pid]], np.int32).reshape((-1, 1, 2))
cv2.polylines(frame, [pts], False, (0, 255, 0), 2)
target_prev = current_time - 1.0
prev_pt = None
if len(person_history[pid]) > 2:
diffs = [abs(p[2] - target_prev) for p in person_history[pid]]
min_idx = int(np.argmin(diffs))
if diffs[min_idx] < 0.5:
prev_pt = person_history[pid][min_idx]
if prev_pt:
dt = current_time - prev_pt[2]
if dt > 0.1:
vx = (headX - prev_pt[0]) / dt
vy = (headY - prev_pt[1]) / dt
predX = int(headX + vx * 4)
predY = int(headY + vy * 4)
cv2.arrowedLine(frame, (headX, headY), (predX, predY), (0, 255, 255), 2, tipLength=0.3)
# ── Visual markers ──
if is_reidentified:
# TRACKED subject: cyan reticle + thick border
cx, cy = int((x1 + x2) / 2), int((y1 + y2) / 2)
r = min(int(x2 - x1), int(y2 - y1)) // 3
cv2.circle(frame, (cx, cy), r, (255, 255, 0), 2) # Cyan circle
cv2.line(frame, (cx - r - 5, cy), (cx + r + 5, cy), (255, 255, 0), 1) # H crosshair
cv2.line(frame, (cx, cy - r - 5), (cx, cy + r + 5), (255, 255, 0), 1) # V crosshair
cv2.rectangle(frame, (int(x1), int(y1)), (int(x2), int(y2)), (255, 255, 0), 3)
cv2.putText(frame, f"TRACKED #{display_id}", (int(x1), int(y1) - 8), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 0), 2)
else:
# Normal detection: magenta border
cv2.rectangle(frame, (int(x1), int(y1)), (int(x2), int(y2)), (255, 0, 255), 2)
cv2.putText(frame, f"ID:{display_id}", (int(x1), int(y1) - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 0, 255), 2)
current_used_ids.append(display_id)
# Cleanup stale Re-ID mappings for tracks no longer in this frame
if journey_active:
active_local_ids = set(int(tr[4]) for tr in tracks)
identity_manager.cleanup_feed(feed_id, active_local_ids)
long_stays = sum(1 for hist in person_history.values() if len(hist) > 30)
current_details['movement'] = {'total_people': len(person_history), 'current_people': len(current_used_ids), 'long_stays': long_stays}
if 'facemask' in active_modules:
results_mask = model_facemask(frame, stream=True, verbose=False)
with_mask = 0
without_mask = 0
mask_boxes = []
for r in results_mask:
for box in r.boxes:
cls = int(box.cls[0])
x1, y1, x2, y2 = map(int, box.xyxy[0])
mask_boxes.append((x1, y1, x2, y2))
if cls == 0:
with_mask += 1
color = (0, 0, 255)
label = "Mask (THREAT)"
else:
without_mask += 1
color = (0, 255, 0)
label = "No Mask"
cv2.rectangle(frame, (x1, y1), (x2, y2), color, 2)
cv2.putText(frame, label, (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)
obscured_faces = 0
people_res = model_movement(frame, stream=True, verbose=False, classes=[0])
people_boxes = []
for r in people_res:
for box in r.boxes:
people_boxes.append(tuple(map(int, box.xyxy[0])))
def overlap(person, face):
px1, py1, px2, py2 = person
fx1, fy1, fx2, fy2 = face
upper = py1 + (py2 - py1) // 1.5
return fx1 >= px1 and fx2 <= px2 and fy1 >= py1 and fy2 <= upper
for pbox in people_boxes:
if not any(overlap(pbox, mbox) for mbox in mask_boxes):
obscured_faces += 1
cv2.rectangle(frame, (pbox[0], pbox[1]), (pbox[2], pbox[3]), (0, 0, 255), 3)
cv2.putText(frame, "Face Obscured (THREAT)", (pbox[0], pbox[1] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)
current_details['facemask'] = {'with_mask': with_mask, 'without_mask': without_mask, 'obscured_faces': obscured_faces}
if 'weapon' in active_modules:
h_w, w_w = frame.shape[:2]
weapon_w = 416
weapon_h = int(h_w * (weapon_w / w_w))
weapon_frame = cv2.resize(frame, (weapon_w, weapon_h))
results = model_weapon(weapon_frame, stream=True, verbose=False, conf=0.5)
guns = 0
melee = 0
scale_x = w_w / weapon_w
scale_y = h_w / weapon_h
firearm_keywords = ['gun', 'pistol', 'rifle', 'shotgun', 'sniper', 'machine', 'glock', 'ak47', 'm4', 'awp', 'famas', 'galil', 'mp5', 'p90']
melee_keywords = ['knife', 'sword', 'dagger', 'axe', 'bat', 'stick', 'machete', 'blade']
for r in results:
for box in r.boxes:
cls = int(box.cls[0])
conf = float(box.conf[0])
x1, y1, x2, y2 = box.xyxy[0]
x1 = int(x1 * scale_x)
y1 = int(y1 * scale_y)
x2 = int(x2 * scale_x)
y2 = int(y2 * scale_y)
class_name = model_weapon.names[cls].lower()
is_firearm = any(k in class_name for k in firearm_keywords)
is_melee = any(k in class_name for k in melee_keywords)
if is_firearm:
guns += 1
color = (0, 0, 255)
label = f"{class_name.upper()} {conf:.2f}"
elif is_melee:
melee += 1
color = (0, 165, 255)
label = f"{class_name.upper()} {conf:.2f}"
else:
guns += 1
color = (0, 0, 255)
label = f"{class_name.upper()} {conf:.2f}"
cv2.rectangle(frame, (x1, y1), (x2, y2), color, 3)
cv2.putText(frame, label, (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.6, color, 2)
cv2.rectangle(frame, (0, 0), (new_w, new_h), (0, 255, 0), 5)
current_details['weapon'] = {'guns': guns, 'knives': melee}
if 'public_safety' in active_modules:
results = model_movement(frame, stream=True, verbose=False, classes=[0, 24, 26, 28])
detections = np.empty((0, 5))
object_detections = []
for r in results:
for box in r.boxes:
cls = int(box.cls[0])
x1, y1, x2, y2 = map(int, box.xyxy[0])
conf = float(box.conf[0])
if cls == 0:
detections = np.vstack((detections, [x1, y1, x2, y2, conf]))
else:
object_detections.append({'box': [x1, y1, x2, y2], 'cls': cls, 'conf': conf})
cv2.rectangle(frame, (x1, y1), (x2, y2), (255, 200, 0), 1)
tracks = feed_trackers[feed_id].update(detections)
velocities = []
for tr in tracks:
x1, y1, x2, y2, Id = tr
midX, midY = int((x1 + x2) / 2), int((y1 + y2) / 2)
if Id not in person_history:
person_history[Id] = []
person_history[Id].append((midX, midY, current_time))
person_history[Id] = [p for p in person_history[Id] if current_time - p[2] < 2.0]
if len(person_history[Id]) > 2:
p_start = person_history[Id][0]
p_end = person_history[Id][-1]
dt = p_end[2] - p_start[2]
if dt > 0.5:
dist = math.sqrt((p_end[0] - p_start[0])**2 + (p_end[1] - p_start[1])**2)
speed = dist / dt
velocities.append(speed)
cv2.rectangle(frame, (int(x1), int(y1)), (int(x2), int(y2)), (0, 255, 0), 1)
crowd_panic = False
avg_speed = 0
if len(velocities) > 3:
avg_speed = sum(velocities) / len(velocities)
if avg_speed > 150:
crowd_panic = True
cv2.putText(frame, f"CROWD ANOMALY: PANIC ({int(avg_speed)} px/s)", (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 3)
active_objects = []
for obj in object_detections:
box = obj['box']
cx, cy = (box[0] + box[2]) // 2, (box[1] + box[3]) // 2
matched_id = None
for obj_id, history in object_history.items():
last_pos = history['last_pos']
dist = math.sqrt((cx - last_pos[0])**2 + (cy - last_pos[1])**2)
if dist < 50:
matched_id = obj_id
break
if matched_id is None:
new_id = str(time.time())
object_history[new_id] = {'start_time': current_time, 'last_seen': current_time, 'last_pos': (cx, cy), 'stationary': True}
matched_id = new_id
else:
object_history[matched_id]['last_seen'] = current_time
object_history[matched_id]['last_pos'] = (cx, cy)
active_objects.append(matched_id)
abandoned_count = 0
keys_to_remove = []
for obj_id, history in object_history.items():
if current_time - history['last_seen'] > 2.0:
keys_to_remove.append(obj_id)
continue
duration = current_time - history['start_time']
if duration > 10.0:
abandoned_count += 1
pos = history['last_pos']
cv2.circle(frame, pos, 30, (0, 0, 255), 2)
cv2.putText(frame, f"ABANDONED {int(duration)}s", (pos[0]-40, pos[1]-40), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)
for k in keys_to_remove:
del object_history[k]
current_details['public_safety'] = {'people_count': len(tracks), 'avg_crowd_speed': int(avg_speed), 'crowd_panic': crowd_panic, 'abandoned_objects': abandoned_count}
# If no modules active, show idle state
if not active_modules:
cv2.putText(frame, "NO DETECTION ACTIVE", (new_w // 2 - 140, new_h // 2),
cv2.FONT_HERSHEY_SIMPLEX, 1.2, (100, 100, 100), 2)
current_details = {'status': 'idle', 'active_modules': 0}
# Store details globally
global latest_details
latest_details = current_details
# Aggregated threat scoring from ALL active modules
total_score = 0
for module, details in current_details.items():
if module == 'weapon':
total_score += details.get('guns', 0) * 100
total_score += details.get('knives', 0) * 80
elif module == 'facemask':
total_score += details.get('with_mask', 0) * 60
total_score += details.get('obscured_faces', 0) * 90
elif module == 'movement':
total_score += details.get('long_stays', 0) * 40
elif module == 'public_safety':
total_score += details.get('abandoned_objects', 0) * 70
if details.get('crowd_panic', False):
total_score += 85
raw_score = min(total_score, 100)
smooth = smooth_score(raw_score)
# Red Alert escalation
was_red = red_alert
red_alert = smooth >= 80
if red_alert and not was_red:
active_str = ', '.join([m.upper() for m in active_modules]) if active_modules else 'NONE'
log_audit("RED_ALERT_TRIGGERED", f"Threat score escalated to {smooth}. Active: {active_str}", "CRITICAL")
# ── Dispatch Integration: trigger alert on RED_ALERT ──
threading.Thread(
target=create_dispatch_event,
args=(smooth, dict(current_details), set(active_modules)),
daemon=True
).start()
elif not red_alert and was_red:
log_audit("RED_ALERT_CLEARED", f"Threat score de-escalated to {smooth}.", "WARNING")
# Feed label overlay
cv2.putText(frame, f"FEED {feed_id} | LIVE", (10, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 255), 1)
ret, buffer = cv2.imencode('.jpg', frame)
frame_bytes = buffer.tobytes()
yield (b'--frame\r\n' b'Content-Type: image/jpeg\r\n' b'X-Score: ' + str(smooth).encode() + b'\r\n\r\n' + frame_bytes + b'\r\n')
# Global to store latest details for stats
latest_details = {}
# ════════════════════════════════════════════════════════════════
# ROUTES
# ════════════════════════════════════════════════════════════════
@app.route('/')
def index():
return render_template('sentinel_dashboard_v13.html')
@app.route('/journey_data')
def get_journey_data():
"""Return recent subject journey logs for the dashboard."""
# Group by global_id and get the last seen location for each
unique_journeys = {}
for entry in list(journey_log):
gid = entry['global_id']
if gid not in unique_journeys:
unique_journeys[gid] = {
'id': gid,
'last_feed': entry['feed_id'],
'last_seen': entry['timestamp'],
'history': []
}
unique_journeys[gid]['history'].append({
'feed': entry['feed_id'],
'time': entry['timestamp']
})
# Sort history and limit to last 5 entries per ID
for gid in unique_journeys:
unique_journeys[gid]['history'] = unique_journeys[gid]['history'][-5:]
unique_journeys[gid]['last_feed'] = unique_journeys[gid]['history'][-1]['feed']
unique_journeys[gid]['last_seen'] = unique_journeys[gid]['history'][-1]['time']
return jsonify(list(unique_journeys.values()))
@app.route('/video_feed')
@app.route('/video_feed/<int:feed_id>')
def video_feed(feed_id=0):
return Response(generate_frames(feed_id), mimetype='multipart/x-mixed-replace; boundary=frame')
@app.route('/toggle_module', methods=['POST'])
def toggle_module():
global active_modules
data = request.get_json()
module = data.get('module')
valid_modules = ['movement', 'facemask', 'weapon', 'public_safety', 'suspect_journey']
if module not in valid_modules:
return jsonify({'error': 'Invalid module'}), 400
if module in active_modules:
active_modules.remove(module)
log_audit("MODULE_DEACTIVATED", f"{module.upper()} detection disabled", "INFO")
else:
active_modules.add(module)
log_audit("MODULE_ACTIVATED", f"{module.upper()} detection enabled", "INFO")
# When suspect_journey is activated, ensure movement is also active
if module == 'suspect_journey':
active_modules.add('movement')
log_audit("MODULE_ACTIVATED", "MOVEMENT auto-enabled for journey tracking", "INFO")
return jsonify({
'success': True,
'active_modules': list(active_modules)
})
@app.route('/set_source', methods=['POST'])
def set_source():
data = request.get_json()
source = data.get('source')
feed_id = data.get('feed_id', 0)
if source == 'camera' and feed_id == 0:
feed_sources.pop(feed_id, None)
restart_feed(feed_id)
log_audit("SOURCE_CHANGE", f"Feed #{feed_id} set to live camera", "INFO")
return jsonify({'success': True})
return jsonify({'error': 'Invalid source'}), 400
@app.route('/upload_video', methods=['POST'])
@app.route('/upload_video/<int:feed_id>', methods=['POST'])
def upload_video(feed_id=0):
"""Upload a video to a specific feed. Default: feed 0."""
if 'file' not in request.files:
return jsonify({'error': 'No file part'}), 400
file = request.files['file']
if file.filename == '':
return jsonify({'error': 'No selected file'}), 400
if feed_id < 0 or feed_id >= MAX_FEEDS:
return jsonify({'error': f'Invalid feed_id. Must be 0-{MAX_FEEDS-1}'}), 400
filename = secure_filename(file.filename)
allowed_extensions = {'.mp4', '.avi', '.mov', '.mkv', '.wmv', '.flv', '.webm', '.gif'}
file_ext = os.path.splitext(filename)[1].lower()
if file_ext not in allowed_extensions:
return jsonify({'error': f'Unsupported file format. Allowed: {", ".join(allowed_extensions)}'}), 400
# Save with feed-specific prefix to avoid collisions
save_name = f"feed{feed_id}_{filename}"
filepath = os.path.join(app.config['UPLOAD_FOLDER'], save_name)
filepath = os.path.abspath(filepath)
file.save(filepath)
# Register source for this feed and restart it
feed_sources[feed_id] = filepath
restart_feed(feed_id)
log_audit("FILE_UPLOAD", f"Feed #{feed_id}: {filename} uploaded", "INFO")
return jsonify({
'success': True,
'message': f'Feed {feed_id}: {filename} loaded',
'feed_id': feed_id,
'feed_url': f'/video_feed/{feed_id}'
})
@app.route('/stats')
def get_stats():
return jsonify({
'threat_score': int(previous_score),
'details': latest_details,
'red_alert': red_alert,
'active_modules': list(active_modules)
})
@app.route('/audit_log')
def get_audit_log():
with audit_lock:
entries = list(audit_log)
return jsonify({'log': entries})
@app.route('/generate_report', methods=['POST'])
def generate_report():
timestamp = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
log_audit("REPORT_GENERATED", f"Operator requested AI incident report at threat score {int(previous_score)}", "INFO")
prompt = f"""
You are the AI Agent for Project SENTINEL, a national security surveillance system.
Generate a concise, professional security incident report based on the following real-time data.
TIMESTAMP: {timestamp}
THREAT SCORE: {int(previous_score)}/100
ACTIVE MODULES: {', '.join([m.upper() for m in active_modules])}
DETECTION DETAILS: {latest_details}
Format the report with these sections:
1. INCIDENT SUMMARY (1-2 sentences)
2. THREAT ANALYSIS (Bullet points of specific risks)
3. RECOMMENDED ACTION (Clear directive for security personnel)
Keep it brief and authoritative.
"""
try:
response = model_gemini.generate_content(prompt)
report_text = response.text
except Exception as e:
print(f"Gemini Error: {e}")
import traceback
traceback.print_exc()
report_text = f"ERROR: Could not generate AI report. System fallback.\n\nDetails: {latest_details}"
return jsonify({'report': report_text})
@app.route('/reset_system', methods=['POST'])
def reset_system():
global active_modules, red_alert, previous_score, feed_sources
global person_history, object_history, latest_details
active_modules = {'movement'}
red_alert = False
previous_score = 0.0
feed_sources.clear()
person_history.clear()
object_history.clear()
latest_details.clear()
journey_log.clear()
identity_manager.global_subjects.clear()
identity_manager.local_to_global.clear()
identity_manager.next_global_id = 100
for fid in list(camera_feeds.keys()):
restart_feed(fid)
log_audit("SYSTEM_RESET", "Operator initiated full system reset. All states cleared.", "WARNING")
return jsonify({'success': True})
# ════════════════════════════════════════════════════════════════
# DISPATCH ROUTES
# ════════════════════════════════════════════════════════════════
@app.route('/dispatch_log')
def get_dispatch_log():
"""Return dispatch history and pending approvals."""
with dispatch_lock:
log_entries = list(dispatch_log)
pending = list(pending_approvals.values())
return jsonify({
'log': log_entries,
'pending': pending,
'settings': {
'auto_dispatch': dispatch_settings['auto_dispatch'],
'cooldown_seconds': dispatch_settings['cooldown_seconds'],
'enabled': dispatch_settings['enabled'],
'telegram_configured': telegram_dispatcher.is_configured(),
'chat_id': telegram_dispatcher.chat_id or '',
}
})
@app.route('/dispatch_settings', methods=['GET', 'POST'])
def handle_dispatch_settings():
if request.method == 'GET':
return jsonify({
'auto_dispatch': dispatch_settings['auto_dispatch'],
'cooldown_seconds': dispatch_settings['cooldown_seconds'],
'enabled': dispatch_settings['enabled'],
'bot_token': telegram_dispatcher.bot_token[:10] + '...' if telegram_dispatcher.bot_token else '',
'chat_id': telegram_dispatcher.chat_id or '',
'telegram_configured': telegram_dispatcher.is_configured(),
})
data = request.get_json()
if 'auto_dispatch' in data:
dispatch_settings['auto_dispatch'] = bool(data['auto_dispatch'])
mode = 'AUTO' if dispatch_settings['auto_dispatch'] else 'MANUAL APPROVAL'
log_audit("DISPATCH_MODE", f"Dispatch mode set to {mode}", "INFO")
if 'cooldown_seconds' in data:
dispatch_settings['cooldown_seconds'] = max(10, min(600, int(data['cooldown_seconds'])))
if 'enabled' in data:
dispatch_settings['enabled'] = bool(data['enabled'])
log_audit("DISPATCH_TOGGLE", f"Dispatch {'enabled' if dispatch_settings['enabled'] else 'disabled'}", "INFO")
if 'bot_token' in data and data['bot_token']:
telegram_dispatcher.configure(data['bot_token'], data.get('chat_id', telegram_dispatcher.chat_id))
log_audit("DISPATCH_CONFIG", "Telegram bot token updated", "INFO")
if 'chat_id' in data and data['chat_id']:
telegram_dispatcher.chat_id = str(data['chat_id'])
log_audit("DISPATCH_CONFIG", f"Chat ID set to {telegram_dispatcher.chat_id}", "INFO")
return jsonify({'success': True})
@app.route('/approve_dispatch/<event_id>', methods=['POST'])
def approve_dispatch(event_id):
result = approve_dispatch_event(event_id)
return jsonify(result)
@app.route('/reject_dispatch/<event_id>', methods=['POST'])
def reject_dispatch(event_id):
result = reject_dispatch_event(event_id)
return jsonify(result)
@app.route('/test_dispatch', methods=['POST'])
def test_dispatch():
"""Send a test message to verify Telegram connection."""
# Try auto-detect chat_id if not set
if not telegram_dispatcher.chat_id:
detected = telegram_dispatcher.auto_detect_chat_id()
if detected:
log_audit("DISPATCH_CONFIG", f"Auto-detected chat ID: {detected}", "INFO")
if not telegram_dispatcher.is_configured():
return jsonify({
'ok': False,
'error': 'Telegram not fully configured. Please set bot token and chat ID.'
}), 400
result = telegram_dispatcher.test_connection()
return jsonify(result)
@app.route('/dispatch_alert', methods=['POST'])
def manual_dispatch_alert():
"""Manually trigger a dispatch alert with current threat data."""
event_id = str(uuid.uuid4())[:8]
now = time.time()
event = {
'id': event_id,
'timestamp': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
'threat_score': int(previous_score),
'details': dict(latest_details) if latest_details else {},
'active_modules': list(active_modules),
'status': 'pending',
'created_at': now,
}
if not telegram_dispatcher.is_configured():
return jsonify({'ok': False, 'error': 'Telegram not configured'}), 400
result = telegram_dispatcher.send_threat_alert(
event['threat_score'], event['details'], event['active_modules']
)
event['status'] = 'sent' if result.get('ok') else 'failed'
dispatch_settings['last_dispatch_time'] = now
with dispatch_lock:
dispatch_log.appendleft(event)
log_audit("MANUAL_DISPATCH", f"Operator manually dispatched alert (score: {event['threat_score']})", "CRITICAL")
return jsonify({'success': True, 'telegram_result': result})
# ════════════════════════════════════════════════════════════════
# ENTRY POINT
# ════════════════════════════════════════════════════════════════
if __name__ == '__main__':
print("\n" + "═"*60)
print(" PROJECT SENTINEL V14 — DISPATCH INTEGRATION")
print("═"*60)
print("\n ✨ V14 FEATURES:")
print(" • Automated Telegram alert dispatch on RED_ALERT")
print(" • Human-in-the-loop approval/rejection queue")
print(" • Dispatch Center dashboard with alert history")
print(" • Configurable auto-dispatch / manual mode")
print(" • All V13 features (Journey Tracker, Multi-Camera, Re-ID)")
print("\n 👉 Open your browser: http://localhost:8080\n")
app.run(host='0.0.0.0', port=8080, debug=False, threaded=True)