Spaces:
Running
Running
Upload 24 files
Browse files- .gitattributes +14 -0
- demo_videos/Abuse.mp4 +3 -0
- demo_videos/Arrest.mp4 +3 -0
- demo_videos/Arson.mp4 +3 -0
- demo_videos/Assault.mp4 +3 -0
- demo_videos/Burglary.mp4 +3 -0
- demo_videos/Explosion.mp4 +3 -0
- demo_videos/Fighting.mp4 +3 -0
- demo_videos/Normal.mp4 +3 -0
- demo_videos/RoadAccidents.mp4 +3 -0
- demo_videos/Robbery.mp4 +3 -0
- demo_videos/Shooting.mp4 +3 -0
- demo_videos/Shoplifting.mp4 +3 -0
- demo_videos/Stealing.mp4 +3 -0
- demo_videos/Vandalism.mp4 +3 -0
- load_model.py +114 -0
- main.js +108 -0
- models/c3d.pickle +3 -0
- models/epoch_80000.pt +3 -0
- models/yolo_my_model.pt +3 -0
- outputs/anomaly_clip_img.zip +3 -0
- requirements.txt +19 -0
- style.css +112 -0
- utils.py +87 -0
- yolo_detection.py +123 -0
.gitattributes
CHANGED
|
@@ -33,3 +33,17 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 36 |
+
demo_videos/Abuse.mp4 filter=lfs diff=lfs merge=lfs -text
|
| 37 |
+
demo_videos/Arrest.mp4 filter=lfs diff=lfs merge=lfs -text
|
| 38 |
+
demo_videos/Arson.mp4 filter=lfs diff=lfs merge=lfs -text
|
| 39 |
+
demo_videos/Assault.mp4 filter=lfs diff=lfs merge=lfs -text
|
| 40 |
+
demo_videos/Burglary.mp4 filter=lfs diff=lfs merge=lfs -text
|
| 41 |
+
demo_videos/Explosion.mp4 filter=lfs diff=lfs merge=lfs -text
|
| 42 |
+
demo_videos/Fighting.mp4 filter=lfs diff=lfs merge=lfs -text
|
| 43 |
+
demo_videos/Normal.mp4 filter=lfs diff=lfs merge=lfs -text
|
| 44 |
+
demo_videos/RoadAccidents.mp4 filter=lfs diff=lfs merge=lfs -text
|
| 45 |
+
demo_videos/Robbery.mp4 filter=lfs diff=lfs merge=lfs -text
|
| 46 |
+
demo_videos/Shooting.mp4 filter=lfs diff=lfs merge=lfs -text
|
| 47 |
+
demo_videos/Shoplifting.mp4 filter=lfs diff=lfs merge=lfs -text
|
| 48 |
+
demo_videos/Stealing.mp4 filter=lfs diff=lfs merge=lfs -text
|
| 49 |
+
demo_videos/Vandalism.mp4 filter=lfs diff=lfs merge=lfs -text
|
demo_videos/Abuse.mp4
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:425744aa3472e424d52d7ce97bf6d0bdd445ad62ad1be110095d2027a31550cc
|
| 3 |
+
size 6250495
|
demo_videos/Arrest.mp4
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:faf0f08b1ee989545ad1de2edecdb56a24e65914194b8083f47d10481926c0e1
|
| 3 |
+
size 11929804
|
demo_videos/Arson.mp4
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:902f3138fa8b839abd08bcd3e434e84756742fdf0c60bcc0769cd7106b1ac3a2
|
| 3 |
+
size 12694369
|
demo_videos/Assault.mp4
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:3b83cf948fef884ede2b86a2d3fe68de779b9c81301a5c653fbb329bfc243274
|
| 3 |
+
size 21066405
|
demo_videos/Burglary.mp4
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:cd17094bfd2e5b73bcce767c434f14b715744eb3338fb80f1a213c1a337ce65d
|
| 3 |
+
size 9857751
|
demo_videos/Explosion.mp4
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b462f9241ab7521e98071b18e8956c5a921336140b4da68ddbf56a5684e87fb6
|
| 3 |
+
size 5162883
|
demo_videos/Fighting.mp4
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:a135cc99b9b7d1f314375cc5e29b6a38aa1131544bf0d9ca133a95644668abf6
|
| 3 |
+
size 5519077
|
demo_videos/Normal.mp4
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e7a4881043c8e9deefe11c65ed8663a281c8366a5baa91f091d67b98eb638018
|
| 3 |
+
size 7205089
|
demo_videos/RoadAccidents.mp4
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:0e6ccd7bac80120cfeac9a5ef3e726da29864fb8cfd218ea0ed42d696ce553ab
|
| 3 |
+
size 14490312
|
demo_videos/Robbery.mp4
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ce7983bbb834708b8316c72cb916b9cab0105e2f283c7f8e636d38b36ddd6b48
|
| 3 |
+
size 26631485
|
demo_videos/Shooting.mp4
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b125ed267b82f514820cc568c7c820a0f04cd531500bd242003c8efd2f9bdcdf
|
| 3 |
+
size 2198741
|
demo_videos/Shoplifting.mp4
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:717d68d3671d3f7638f80cc7db2e682599fceee21f15385431c569a1480d42ab
|
| 3 |
+
size 22406639
|
demo_videos/Stealing.mp4
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:97ebf655ad4192fdfef01ec91c435f85d6e773257fe72a1458eacf5abdd2e04b
|
| 3 |
+
size 27565440
|
demo_videos/Vandalism.mp4
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:407508a2a3587caac3b3e4b165983f494692301e400ed4c4bbed504c47ba9e56
|
| 3 |
+
size 2851411
|
load_model.py
ADDED
|
@@ -0,0 +1,114 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""This module contains functions for loading models."""
|
| 2 |
+
|
| 3 |
+
import logging
|
| 4 |
+
from os import path
|
| 5 |
+
from typing import Tuple
|
| 6 |
+
|
| 7 |
+
import torch
|
| 8 |
+
|
| 9 |
+
from network.anomaly_detector_model import AnomalyDetector
|
| 10 |
+
from network.c3d import C3D
|
| 11 |
+
from network.MFNET import MFNET_3D
|
| 12 |
+
from network.resnet import generate_model
|
| 13 |
+
from network.TorchUtils import TorchModel
|
| 14 |
+
from utils.types import Device, FeatureExtractor
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
def load_feature_extractor(
|
| 18 |
+
features_method: str, feature_extractor_path: str, device: Device
|
| 19 |
+
) -> FeatureExtractor:
|
| 20 |
+
"""Load feature extractor from given path.
|
| 21 |
+
|
| 22 |
+
Args:
|
| 23 |
+
features_method (str): The feature extractor model type to use. Either c3d | mfnet | r3d101 | r3d152.
|
| 24 |
+
feature_extractor_path (str): Path to the feature extractor model.
|
| 25 |
+
device (Union[torch.device, str]): Device to use for the model.
|
| 26 |
+
|
| 27 |
+
Raises:
|
| 28 |
+
FileNotFoundError: The path to the model does not exist.
|
| 29 |
+
NotImplementedError: The provided feature extractor method is not implemented.
|
| 30 |
+
|
| 31 |
+
Returns:
|
| 32 |
+
FeatureExtractor
|
| 33 |
+
"""
|
| 34 |
+
if not path.exists(feature_extractor_path):
|
| 35 |
+
raise FileNotFoundError(
|
| 36 |
+
f"Couldn't find feature extractor {feature_extractor_path}.\n"
|
| 37 |
+
+ r"If you are using resnet, download it first from:\n"
|
| 38 |
+
+ r"r3d101: https://drive.google.com/file/d/1p80RJsghFIKBSLKgtRG94LE38OGY5h4y/view?usp=share_link"
|
| 39 |
+
+ "\n"
|
| 40 |
+
+ r"r3d152: https://drive.google.com/file/d/1irIdC_v7wa-sBpTiBlsMlS7BYNdj4Gr7/view?usp=share_link"
|
| 41 |
+
)
|
| 42 |
+
logging.info(f"Loading feature extractor from {feature_extractor_path}")
|
| 43 |
+
|
| 44 |
+
model: FeatureExtractor
|
| 45 |
+
|
| 46 |
+
if features_method == "c3d":
|
| 47 |
+
model = C3D(pretrained=feature_extractor_path)
|
| 48 |
+
elif features_method == "mfnet":
|
| 49 |
+
model = MFNET_3D()
|
| 50 |
+
model.load_state(state_dict=feature_extractor_path)
|
| 51 |
+
elif features_method == "r3d101":
|
| 52 |
+
model = generate_model(model_depth=101)
|
| 53 |
+
param_dict = torch.load(feature_extractor_path)["state_dict"]
|
| 54 |
+
param_dict.pop("fc.weight")
|
| 55 |
+
param_dict.pop("fc.bias")
|
| 56 |
+
model.load_state_dict(param_dict)
|
| 57 |
+
elif features_method == "r3d152":
|
| 58 |
+
model = generate_model(model_depth=152)
|
| 59 |
+
param_dict = torch.load(feature_extractor_path)["state_dict"]
|
| 60 |
+
param_dict.pop("fc.weight")
|
| 61 |
+
param_dict.pop("fc.bias")
|
| 62 |
+
model.load_state_dict(param_dict)
|
| 63 |
+
else:
|
| 64 |
+
raise NotImplementedError(
|
| 65 |
+
f"Features extraction method {features_method} not implemented"
|
| 66 |
+
)
|
| 67 |
+
|
| 68 |
+
return model.to(device).eval()
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
def load_anomaly_detector(ad_model_path: str, device: Device) -> AnomalyDetector:
|
| 72 |
+
"""Load anomaly detection model from given path.
|
| 73 |
+
|
| 74 |
+
Args:
|
| 75 |
+
ad_model_path (str): Path to the anomaly detection model.
|
| 76 |
+
device (Device): Device to use for the model.
|
| 77 |
+
|
| 78 |
+
Raises:
|
| 79 |
+
FileNotFoundError: The path to the model does not exist.
|
| 80 |
+
|
| 81 |
+
Returns:
|
| 82 |
+
AnomalyDetector
|
| 83 |
+
"""
|
| 84 |
+
if not path.exists(ad_model_path):
|
| 85 |
+
raise FileNotFoundError(f"Couldn't find anomaly detector {ad_model_path}.")
|
| 86 |
+
logging.info(f"Loading anomaly detector from {ad_model_path}")
|
| 87 |
+
|
| 88 |
+
anomaly_detector = TorchModel.load_model(ad_model_path).to(device)
|
| 89 |
+
return anomaly_detector.eval()
|
| 90 |
+
|
| 91 |
+
|
| 92 |
+
def load_models(
|
| 93 |
+
feature_extractor_path: str,
|
| 94 |
+
ad_model_path: str,
|
| 95 |
+
features_method: str = "c3d",
|
| 96 |
+
device: Device = "cuda",
|
| 97 |
+
) -> Tuple[AnomalyDetector, FeatureExtractor]:
|
| 98 |
+
"""Loads both feature extractor and anomaly detector from the given paths.
|
| 99 |
+
|
| 100 |
+
Args:
|
| 101 |
+
feature_extractor_path (str): Path of the features extractor weights to load.
|
| 102 |
+
ad_model_path (str): Path of the anomaly detector weights to load.
|
| 103 |
+
features_method (str, optional): Name of the model to use for features extraction.
|
| 104 |
+
Defaults to "c3d".
|
| 105 |
+
device (str, optional): Device to use for the models. Defaults to "cuda".
|
| 106 |
+
|
| 107 |
+
Returns:
|
| 108 |
+
Tuple[nn.Module, nn.Module]
|
| 109 |
+
"""
|
| 110 |
+
feature_extractor = load_feature_extractor(
|
| 111 |
+
features_method, feature_extractor_path, device
|
| 112 |
+
)
|
| 113 |
+
anomaly_detector = load_anomaly_detector(ad_model_path, device)
|
| 114 |
+
return anomaly_detector, feature_extractor
|
main.js
ADDED
|
@@ -0,0 +1,108 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
document.addEventListener('DOMContentLoaded', () => {
|
| 2 |
+
const socket = io();
|
| 3 |
+
|
| 4 |
+
const videoPlayer = document.getElementById('videoPlayer');
|
| 5 |
+
const yoloTextLabel = document.getElementById('yoloTextLabel');
|
| 6 |
+
const yoloImageFrame = document.getElementById('yoloImageFrame');
|
| 7 |
+
const statusLabel = document.getElementById('statusLabel');
|
| 8 |
+
const resetButton = document.getElementById('resetButton');
|
| 9 |
+
const videoUploadInput = document.getElementById('videoUpload');
|
| 10 |
+
const uploadButton = document.getElementById('uploadButton');
|
| 11 |
+
|
| 12 |
+
// CHANGED: Get the new dropdown selector
|
| 13 |
+
const anomalySelector = document.getElementById('anomalySelector');
|
| 14 |
+
|
| 15 |
+
let chart;
|
| 16 |
+
|
| 17 |
+
function initializeChart() {
|
| 18 |
+
const ctx = document.getElementById('anomalyChart').getContext('2d');
|
| 19 |
+
if (chart) { chart.destroy(); }
|
| 20 |
+
chart = new Chart(ctx, {
|
| 21 |
+
type: 'line', data: { labels: [], datasets: [{ label: 'Anomaly Score', data: [], borderColor: 'rgba(255, 99, 132, 1)', backgroundColor: 'rgba(255, 99, 132, 0.2)', borderWidth: 2, tension: 0.4, pointRadius: 0 }] }, options: { scales: { y: { beginAtZero: true, max: 1.0, ticks: { color: '#e0e0e0' }}, x: { ticks: { color: '#e0e0e0' }}}, plugins: { legend: { labels: { color: '#e0e0e0' }}}}
|
| 22 |
+
});
|
| 23 |
+
}
|
| 24 |
+
|
| 25 |
+
function resetUI() {
|
| 26 |
+
videoPlayer.pause();
|
| 27 |
+
videoPlayer.removeAttribute('src');
|
| 28 |
+
videoPlayer.load();
|
| 29 |
+
initializeChart();
|
| 30 |
+
yoloTextLabel.textContent = 'Waiting for anomaly...';
|
| 31 |
+
yoloImageFrame.src = '';
|
| 32 |
+
statusLabel.textContent = 'System reset. Select a video to begin.';
|
| 33 |
+
videoUploadInput.value = '';
|
| 34 |
+
anomalySelector.selectedIndex = 0; // Reset dropdown to the default option
|
| 35 |
+
}
|
| 36 |
+
|
| 37 |
+
// --- WebSocket Event Listeners (unchanged) ---
|
| 38 |
+
socket.on('connect', () => { statusLabel.textContent = 'Connected. Please select a video to start processing.'; });
|
| 39 |
+
socket.on('update_graph', (data) => {
|
| 40 |
+
const { score } = data;
|
| 41 |
+
if (!chart) return;
|
| 42 |
+
const newLabel = chart.data.labels.length + 1;
|
| 43 |
+
chart.data.labels.push(newLabel);
|
| 44 |
+
chart.data.datasets[0].data.push(score);
|
| 45 |
+
if (chart.data.labels.length > 100) { chart.data.labels.shift(); chart.data.datasets[0].data.shift(); }
|
| 46 |
+
chart.update();
|
| 47 |
+
});
|
| 48 |
+
socket.on('update_yolo_text', (data) => { yoloTextLabel.textContent = data.text; });
|
| 49 |
+
socket.on('update_yolo_image', (data) => { yoloImageFrame.src = `data:image/jpeg;base64,${data.image_data}`; });
|
| 50 |
+
socket.on('update_status', (data) => { statusLabel.textContent = data.status; });
|
| 51 |
+
socket.on('processing_error', (data) => { statusLabel.textContent = `Error: ${data.error}`; });
|
| 52 |
+
socket.on('processing_finished', (data) => { statusLabel.textContent = data.message; });
|
| 53 |
+
socket.on('system_reset_confirm', () => { resetUI(); });
|
| 54 |
+
|
| 55 |
+
// --- User Interaction ---
|
| 56 |
+
|
| 57 |
+
// CHANGED: Replaced the old event listener for links with one for the dropdown
|
| 58 |
+
anomalySelector.addEventListener('change', (event) => {
|
| 59 |
+
const anomalyName = event.target.value;
|
| 60 |
+
if (!anomalyName) return; // Do nothing if the default option is selected
|
| 61 |
+
|
| 62 |
+
resetUI();
|
| 63 |
+
statusLabel.textContent = `Requesting to process ${anomalyName}...`;
|
| 64 |
+
|
| 65 |
+
videoPlayer.src = `/video_stream/demo/${anomalyName}`;
|
| 66 |
+
videoPlayer.play();
|
| 67 |
+
|
| 68 |
+
socket.emit('start_processing', { 'source': 'demo', 'filename': anomalyName });
|
| 69 |
+
});
|
| 70 |
+
|
| 71 |
+
resetButton.addEventListener('click', () => { socket.emit('reset_system'); });
|
| 72 |
+
|
| 73 |
+
// Upload button logic (unchanged)
|
| 74 |
+
uploadButton.addEventListener('click', () => {
|
| 75 |
+
const file = videoUploadInput.files[0];
|
| 76 |
+
if (!file) {
|
| 77 |
+
alert('Please select a video file first!');
|
| 78 |
+
return;
|
| 79 |
+
}
|
| 80 |
+
|
| 81 |
+
resetUI();
|
| 82 |
+
statusLabel.textContent = 'Uploading video...';
|
| 83 |
+
|
| 84 |
+
const formData = new FormData();
|
| 85 |
+
formData.append('video', file);
|
| 86 |
+
|
| 87 |
+
fetch('/upload', { method: 'POST', body: formData })
|
| 88 |
+
.then(response => response.json())
|
| 89 |
+
.then(data => {
|
| 90 |
+
if (data.success) {
|
| 91 |
+
const uploadedFilename = data.filename;
|
| 92 |
+
statusLabel.textContent = `Upload successful. Starting analysis...`;
|
| 93 |
+
videoPlayer.src = `/video_stream/upload/${uploadedFilename}`;
|
| 94 |
+
videoPlayer.play();
|
| 95 |
+
socket.emit('start_processing', { 'source': 'upload', 'filename': uploadedFilename });
|
| 96 |
+
} else {
|
| 97 |
+
statusLabel.textContent = `Error: ${data.error}`;
|
| 98 |
+
alert(`Upload failed: ${data.error}`);
|
| 99 |
+
}
|
| 100 |
+
})
|
| 101 |
+
.catch(error => {
|
| 102 |
+
statusLabel.textContent = 'An error occurred during upload.';
|
| 103 |
+
console.error('Upload error:', error);
|
| 104 |
+
});
|
| 105 |
+
});
|
| 106 |
+
|
| 107 |
+
initializeChart();
|
| 108 |
+
});
|
models/c3d.pickle
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e082d1890be04df0600aebae68f8687f5f41ba7590d2556edaa9ca49513cadff
|
| 3 |
+
size 319966434
|
models/epoch_80000.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:9cbffe0b8831ed2c5ac82be4b40f10699b1a27fba84226a40161c6a381832510
|
| 3 |
+
size 8460133
|
models/yolo_my_model.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ef4636cec13eb6e8f4f08aa10430acd25dabe394c0aadf97ad13e8f2c34074b6
|
| 3 |
+
size 19187290
|
outputs/anomaly_clip_img.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:9ceacf2c8f175901ff562bf2bc46306010788298f5e4dab0a73bbccb14e30b26
|
| 3 |
+
size 4490667
|
requirements.txt
ADDED
|
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
typing
|
| 2 |
+
matplotlib==3.7.2
|
| 3 |
+
torch
|
| 4 |
+
torchvision==0.15.2
|
| 5 |
+
pandas==2.0.3
|
| 6 |
+
tqdm==4.65.0
|
| 7 |
+
pyyaml==6.0.1
|
| 8 |
+
scipy==1.10.1
|
| 9 |
+
scikit-learn==1.3.0
|
| 10 |
+
av==11.0.0
|
| 11 |
+
opencv-python
|
| 12 |
+
pre-commit==3.4.0
|
| 13 |
+
PyQt5
|
| 14 |
+
tensorboard==2.10.0
|
| 15 |
+
ruff==0.11.2
|
| 16 |
+
flask
|
| 17 |
+
flask-socketio
|
| 18 |
+
eventlet
|
| 19 |
+
numpy
|
style.css
ADDED
|
@@ -0,0 +1,112 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
body {
|
| 2 |
+
font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, Helvetica, Arial, sans-serif;
|
| 3 |
+
background-color: #121212;
|
| 4 |
+
color: #e0e0e0;
|
| 5 |
+
margin: 0;
|
| 6 |
+
padding: 20px;
|
| 7 |
+
display: flex;
|
| 8 |
+
justify-content: center;
|
| 9 |
+
}
|
| 10 |
+
|
| 11 |
+
.container {
|
| 12 |
+
display: flex;
|
| 13 |
+
width: 100%;
|
| 14 |
+
max-width: 1600px;
|
| 15 |
+
gap: 20px;
|
| 16 |
+
}
|
| 17 |
+
|
| 18 |
+
/* CHANGED: Main content takes more space, sidebar takes less */
|
| 19 |
+
.main-content {
|
| 20 |
+
flex: 4; /* Increased from 3 */
|
| 21 |
+
display: flex;
|
| 22 |
+
flex-direction: column;
|
| 23 |
+
}
|
| 24 |
+
|
| 25 |
+
.sidebar {
|
| 26 |
+
flex: 1; /* Stays at 1, making it proportionally smaller */
|
| 27 |
+
background-color: #1e1e1e;
|
| 28 |
+
padding: 20px;
|
| 29 |
+
border-radius: 8px;
|
| 30 |
+
height: fit-content;
|
| 31 |
+
}
|
| 32 |
+
|
| 33 |
+
.header {
|
| 34 |
+
display: flex;
|
| 35 |
+
justify-content: space-between;
|
| 36 |
+
align-items: center;
|
| 37 |
+
margin-bottom: 10px;
|
| 38 |
+
}
|
| 39 |
+
|
| 40 |
+
h1 { border-bottom: none; padding-bottom: 0; }
|
| 41 |
+
h2, h3 { color: #ffffff; border-bottom: 2px solid #333; padding-bottom: 10px; }
|
| 42 |
+
|
| 43 |
+
/* CHANGED: Grid ratio adjusted to make the graph wider */
|
| 44 |
+
.dashboard-grid {
|
| 45 |
+
display: grid;
|
| 46 |
+
grid-template-columns: 1.8fr 1.5fr; /* Video area vs Graph area */
|
| 47 |
+
gap: 20px;
|
| 48 |
+
align-items: flex-start;
|
| 49 |
+
}
|
| 50 |
+
|
| 51 |
+
.video-area {
|
| 52 |
+
display: flex;
|
| 53 |
+
flex-direction: column;
|
| 54 |
+
}
|
| 55 |
+
|
| 56 |
+
.video-wrapper {
|
| 57 |
+
width: 100%;
|
| 58 |
+
margin-bottom: 10px; /* Space between video and status label */
|
| 59 |
+
}
|
| 60 |
+
|
| 61 |
+
#videoPlayer {
|
| 62 |
+
background-color: #000;
|
| 63 |
+
border-radius: 8px;
|
| 64 |
+
}
|
| 65 |
+
|
| 66 |
+
/* CHANGED: Status label is now positioned under the video */
|
| 67 |
+
#statusLabel {
|
| 68 |
+
margin-top: 0; /* Resets previous margin */
|
| 69 |
+
font-style: italic;
|
| 70 |
+
color: #f44336;
|
| 71 |
+
text-align: center; /* Center the text under the video */
|
| 72 |
+
min-height: 24px; /* Prevents layout shifts */
|
| 73 |
+
}
|
| 74 |
+
|
| 75 |
+
/* The chart and yolo containers are now styled independently */
|
| 76 |
+
.chart-container {
|
| 77 |
+
background-color: #1e1e1e;
|
| 78 |
+
padding: 20px;
|
| 79 |
+
border-radius: 8px;
|
| 80 |
+
}
|
| 81 |
+
|
| 82 |
+
.yolo-container {
|
| 83 |
+
background-color: #1e1e1e;
|
| 84 |
+
padding: 20px;
|
| 85 |
+
border-radius: 8px;
|
| 86 |
+
}
|
| 87 |
+
|
| 88 |
+
#yoloTextLabel { font-size: 1.2em; font-weight: bold; color: #4CAF50; min-height: 25px; }
|
| 89 |
+
#yoloImageFrame { width: 100%; height: auto; border-radius: 4px; background-color: #333; min-height: 150px; margin-top: 10px; }
|
| 90 |
+
|
| 91 |
+
/* Styles for controls in the sidebar */
|
| 92 |
+
.custom-select {
|
| 93 |
+
width: 100%;
|
| 94 |
+
padding: 12px 15px;
|
| 95 |
+
background-color: #3a3a3a;
|
| 96 |
+
color: #e0e0e0;
|
| 97 |
+
border: 1px solid #bb86fc;
|
| 98 |
+
border-radius: 4px;
|
| 99 |
+
font-size: 1em;
|
| 100 |
+
cursor: pointer;
|
| 101 |
+
}
|
| 102 |
+
.custom-select:hover { background-color: #4a4a4a; }
|
| 103 |
+
|
| 104 |
+
.separator { border: none; border-top: 1px solid #333; margin: 20px 0; }
|
| 105 |
+
.upload-section { display: flex; flex-direction: column; gap: 10px; }
|
| 106 |
+
#videoUpload { color: #e0e0e0; }
|
| 107 |
+
#videoUpload::file-selector-button { font-weight: bold; color: #bb86fc; background-color: #3a3a3a; padding: 8px 12px; border: 1px solid #bb86fc; border-radius: 4px; cursor: pointer; transition: background-color 0.2s; }
|
| 108 |
+
#videoUpload::file-selector-button:hover { background-color: #4a4a4a; }
|
| 109 |
+
#uploadButton { padding: 10px 20px; font-size: 16px; font-weight: bold; color: white; background-color: #03dac6; border: none; border-radius: 5px; cursor: pointer; transition: background-color 0.2s; }
|
| 110 |
+
#uploadButton:hover { background-color: #018786; }
|
| 111 |
+
#resetButton { padding: 10px 20px; font-size: 16px; font-weight: bold; color: white; background-color: #f44336; border: none; border-radius: 5px; cursor: pointer; transition: background-color 0.2s; }
|
| 112 |
+
#resetButton:hover { background-color: #d32f2f; }
|
utils.py
ADDED
|
@@ -0,0 +1,87 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""This module contains utilities for anomaly detection."""
|
| 2 |
+
|
| 3 |
+
import logging
|
| 4 |
+
import sys
|
| 5 |
+
from typing import List, Optional, Union
|
| 6 |
+
|
| 7 |
+
from torchvision.transforms import transforms
|
| 8 |
+
|
| 9 |
+
from . import transforms_video
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
def register_logger(log_file: Optional[str] = None, stdout: bool = True) -> None:
|
| 13 |
+
"""Register a logger.
|
| 14 |
+
|
| 15 |
+
Args:
|
| 16 |
+
log_file (str, optional): Path to the file where log should be written.
|
| 17 |
+
If `None`, log wouldn't be written to any file. Defaults to None.
|
| 18 |
+
stdout (bool, optional): If `True`, the log would be printed to stdout. Defaults to True.
|
| 19 |
+
"""
|
| 20 |
+
log = logging.getLogger() # root logger
|
| 21 |
+
for hdlr in log.handlers[:]: # remove all old handlers
|
| 22 |
+
log.removeHandler(hdlr)
|
| 23 |
+
|
| 24 |
+
handlers: List[Union[logging.FileHandler, logging.StreamHandler]] = []
|
| 25 |
+
|
| 26 |
+
if stdout:
|
| 27 |
+
handlers.append(logging.StreamHandler(stream=sys.stdout))
|
| 28 |
+
|
| 29 |
+
if log_file is not None:
|
| 30 |
+
handlers.append(logging.FileHandler(log_file))
|
| 31 |
+
|
| 32 |
+
logging.basicConfig(
|
| 33 |
+
format="%(asctime)s %(message)s",
|
| 34 |
+
handlers=handlers,
|
| 35 |
+
level=logging.INFO,
|
| 36 |
+
)
|
| 37 |
+
logging.root.setLevel(logging.INFO)
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
def build_transforms(mode: str = "c3d") -> transforms.Compose:
|
| 41 |
+
"""Build transforms to use for training an anomaly detection model.
|
| 42 |
+
|
| 43 |
+
Args:
|
| 44 |
+
mode (str, optional): Mode for which transforms should be constructed.
|
| 45 |
+
Either c3d | i3d | mfnet | 3dResNet. Defaults to "c3d".
|
| 46 |
+
|
| 47 |
+
Raises:
|
| 48 |
+
NotImplementedError: The provided mode is not implemented.
|
| 49 |
+
|
| 50 |
+
Returns:
|
| 51 |
+
transforms.Compose
|
| 52 |
+
"""
|
| 53 |
+
if mode == "c3d":
|
| 54 |
+
mean = [124 / 255, 117 / 255, 104 / 255]
|
| 55 |
+
std = [1 / (0.0167 * 255)] * 3
|
| 56 |
+
resize = 128, 171
|
| 57 |
+
crop = 112
|
| 58 |
+
elif mode == "i3d":
|
| 59 |
+
mean = [0, 0, 0]
|
| 60 |
+
std = [1, 1, 1]
|
| 61 |
+
elif mode == "mfnet":
|
| 62 |
+
mean = [0.485, 0.456, 0.406]
|
| 63 |
+
std = [0.229, 0.224, 0.225]
|
| 64 |
+
elif mode == "3dResNet":
|
| 65 |
+
mean = [0.4345, 0.4051, 0.3775]
|
| 66 |
+
std = [0.2768, 0.2713, 0.2737]
|
| 67 |
+
else:
|
| 68 |
+
raise NotImplementedError(f"Mode {mode} not implemented")
|
| 69 |
+
|
| 70 |
+
if mode == "c3d":
|
| 71 |
+
res = transforms.Compose(
|
| 72 |
+
[
|
| 73 |
+
transforms_video.ToTensorVideo(),
|
| 74 |
+
transforms_video.ResizeVideo(resize),
|
| 75 |
+
transforms_video.CenterCropVideo(crop),
|
| 76 |
+
transforms_video.NormalizeVideo(mean=mean, std=std),
|
| 77 |
+
]
|
| 78 |
+
)
|
| 79 |
+
else:
|
| 80 |
+
res = transforms.Compose(
|
| 81 |
+
[
|
| 82 |
+
transforms_video.ToTensorVideo(),
|
| 83 |
+
transforms_video.NormalizeVideo(mean=mean, std=std),
|
| 84 |
+
]
|
| 85 |
+
)
|
| 86 |
+
|
| 87 |
+
return res
|
yolo_detection.py
ADDED
|
@@ -0,0 +1,123 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import argparse
|
| 2 |
+
import os
|
| 3 |
+
import sys
|
| 4 |
+
import cv2
|
| 5 |
+
from ultralytics import YOLO
|
| 6 |
+
import numpy as np
|
| 7 |
+
|
| 8 |
+
# --- Configuration ---
|
| 9 |
+
# Set the path for the YOLO model weights
|
| 10 |
+
YOLO_MODEL_PATH = r"S:\ano_dec_pro\AnomalyDetectionCVPR2018-Pytorch\yolo_my_model.pt"
|
| 11 |
+
|
| 12 |
+
# --- Main Detection Function ---
|
| 13 |
+
def analyze_video_with_yolo(video_path: str, model_path: str = YOLO_MODEL_PATH, conf_threshold: float = 0.5,return_class=False):
|
| 14 |
+
"""
|
| 15 |
+
Analyzes a video using a pre-trained YOLO model for object detection
|
| 16 |
+
and prints the predicted anomaly class for frames with detections.
|
| 17 |
+
|
| 18 |
+
:param video_path: Path to the input video file.
|
| 19 |
+
:param model_path: Path to the trained YOLO model weights.
|
| 20 |
+
:param conf_threshold: Minimum confidence score required for a detection.
|
| 21 |
+
"""
|
| 22 |
+
if not os.path.exists(video_path):
|
| 23 |
+
print(f"[ERROR] Video file not found at: {video_path}")
|
| 24 |
+
return
|
| 25 |
+
|
| 26 |
+
if not os.path.exists(model_path):
|
| 27 |
+
print(f"[ERROR] YOLO model not found at: {model_path}")
|
| 28 |
+
return
|
| 29 |
+
|
| 30 |
+
try:
|
| 31 |
+
# Load the YOLO model (Assumes it's trained for your 14 anomaly classes)
|
| 32 |
+
model = YOLO(model_path, task='detect')
|
| 33 |
+
labels = model.names
|
| 34 |
+
print(f"[INFO] YOLO Model loaded successfully with {len(labels)} classes.")
|
| 35 |
+
except Exception as e:
|
| 36 |
+
print(f"[FATAL] Failed to load YOLO model: {e}")
|
| 37 |
+
return
|
| 38 |
+
|
| 39 |
+
cap = cv2.VideoCapture(video_path)
|
| 40 |
+
if not cap.isOpened():
|
| 41 |
+
print(f"[ERROR] Could not open video stream for: {video_path}")
|
| 42 |
+
return
|
| 43 |
+
|
| 44 |
+
frame_num = 0
|
| 45 |
+
detections_found = 0
|
| 46 |
+
|
| 47 |
+
print("-" * 50)
|
| 48 |
+
print(f"Starting analysis of: {os.path.basename(video_path)}")
|
| 49 |
+
print("-" * 50)
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
detected_class = "None"
|
| 53 |
+
while cap.isOpened():
|
| 54 |
+
ret, frame = cap.read()
|
| 55 |
+
if not ret:
|
| 56 |
+
break
|
| 57 |
+
if frame_num % 10 == 0:
|
| 58 |
+
results = model(frame, conf=conf_threshold, verbose=False)
|
| 59 |
+
if results and len(results[0].boxes) > 0:
|
| 60 |
+
detections = results[0].boxes
|
| 61 |
+
best_detection = detections[detections.conf.argmax()]
|
| 62 |
+
class_idx = int(best_detection.cls.item())
|
| 63 |
+
class_name = labels.get(class_idx, "Unknown Class")
|
| 64 |
+
detected_class = class_name
|
| 65 |
+
frame_num += 1
|
| 66 |
+
|
| 67 |
+
cap.release()
|
| 68 |
+
cv2.destroyAllWindows()
|
| 69 |
+
|
| 70 |
+
if return_class:
|
| 71 |
+
return detected_class
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
# while cap.isOpened():
|
| 78 |
+
# ret, frame = cap.read()
|
| 79 |
+
# if not ret:
|
| 80 |
+
# break
|
| 81 |
+
|
| 82 |
+
# # Analyze every 10th frame to speed up testing and reduce redundancy
|
| 83 |
+
# if frame_num % 10 == 0:
|
| 84 |
+
|
| 85 |
+
# # Run inference (set verbose=False to keep the output clean)
|
| 86 |
+
# results = model(frame, conf=conf_threshold, verbose=False)
|
| 87 |
+
|
| 88 |
+
# # Process results
|
| 89 |
+
# if results and len(results[0].boxes) > 0:
|
| 90 |
+
# detections = results[0].boxes
|
| 91 |
+
|
| 92 |
+
# # We'll take the highest confidence detection in the frame
|
| 93 |
+
# best_detection = detections[detections.conf.argmax()]
|
| 94 |
+
|
| 95 |
+
# class_idx = int(best_detection.cls.item())
|
| 96 |
+
# class_name = labels.get(class_idx, "Unknown Class")
|
| 97 |
+
# confidence = best_detection.conf.item()
|
| 98 |
+
|
| 99 |
+
# print(f"Frame {frame_num:05d}: DETECTED -> {class_name} ({confidence:.2f})")
|
| 100 |
+
# detections_found += 1
|
| 101 |
+
|
| 102 |
+
# frame_num += 1
|
| 103 |
+
|
| 104 |
+
# cap.release()
|
| 105 |
+
# cv2.destroyAllWindows()
|
| 106 |
+
|
| 107 |
+
print("-" * 50)
|
| 108 |
+
print(f"Analysis complete. Total detections reported: {detections_found}")
|
| 109 |
+
print("-" * 50)
|
| 110 |
+
|
| 111 |
+
|
| 112 |
+
# if __name__ == '__main__':
|
| 113 |
+
# # --- Example Usage (Run this command in your environment) ---
|
| 114 |
+
# # Change the video path to a file you want to test!
|
| 115 |
+
# example_video = r"S:\ano_dec_pro\dataset\Abuse\Abuse002_x264.mp4"
|
| 116 |
+
|
| 117 |
+
# # NOTE: In a shell/CLI environment (like VS Code or PowerShell), you typically
|
| 118 |
+
# # run this by executing the script and passing arguments:
|
| 119 |
+
# # python yolo_detection.py --video_path "S:\ano_dec_pro\dataset\Abuse\Abuse002_x264.mp4"
|
| 120 |
+
|
| 121 |
+
# # We call the function directly for simple execution in Python interpreter
|
| 122 |
+
# print("NOTE: Running the detection script. Please change 'example_video' to your actual file path.")
|
| 123 |
+
# analyze_video_with_yolo(example_video)
|