Haiss123 commited on
Commit
e3f2cc8
·
verified ·
1 Parent(s): 54d791b

Delete main.py

Browse files
Files changed (1) hide show
  1. main.py +0 -1734
main.py DELETED
@@ -1,1734 +0,0 @@
1
- import cv2
2
- import numpy as np
3
- import torch
4
- import os
5
- import json
6
- import warnings
7
-
8
- warnings.filterwarnings('ignore')
9
-
10
- # Import required libraries
11
- try:
12
- from ultralytics import YOLO
13
- from transformers import pipeline
14
- from PIL import Image, ImageDraw, ImageFont
15
- import requests
16
- from datetime import datetime
17
-
18
- # MediaPipe import with fallback
19
- try:
20
- import mediapipe as mp
21
-
22
- MEDIAPIPE_AVAILABLE = True
23
- print("✅ MediaPipe imported successfully")
24
- except ImportError:
25
- MEDIAPIPE_AVAILABLE = False
26
- print("⚠️ MediaPipe not available - pose detection disabled")
27
- except Exception as e:
28
- MEDIAPIPE_AVAILABLE = False
29
- print(f"⚠️ MediaPipe import error: {e} - pose detection disabled")
30
-
31
- except ImportError as e:
32
- print(f"Missing dependency: {e}")
33
- print("Please install: pip install ultralytics transformers pillow requests")
34
- print("For MediaPipe: pip install mediapipe==0.10.18")
35
-
36
-
37
- class ContentModerator:
38
- def __init__(self, config=None):
39
- self.config = config or self.get_default_config()
40
- self.device = 'cuda' if torch.cuda.is_available() else 'cpu'
41
-
42
- # CPU optimizations
43
- if self.device == 'cpu':
44
- print("💻 CPU mode detected - applying optimizations...")
45
- torch.set_num_threads(4)
46
- self.config['performance']['half_precision'] = False
47
- self.config['nsfw_detection']['pose_analysis'] = False
48
-
49
- # Initialize models
50
- self.weapon_model = None # Primary weapon model
51
- self.weapon_model_custom = None # Custom model for dao + súng + fight
52
- self.weapon_model_general = None # General model for person + backup weapons
53
- self.nsfw_classifier = None
54
- self.pose_detector = None
55
-
56
- # Performance optimization
57
- self.detection_cache = {}
58
- self.cache_ttl = 2 # Cache for 2 seconds
59
-
60
- # Results storage
61
- self.detection_history = []
62
-
63
- print(f"🚀 Content Moderator initialized on {self.device}")
64
- if self.device == 'cpu':
65
- print("⚡ CPU optimizations enabled")
66
-
67
- self.setup_models()
68
-
69
- def get_default_config(self):
70
- """Default configuration optimized for CPU/GPU with enhanced knife and fight detection"""
71
- # Auto-detect optimal settings
72
- is_cuda = torch.cuda.is_available()
73
-
74
- return {
75
- 'weapon_detection': {
76
- 'enabled': True,
77
- 'confidence_threshold': 0.5, # For guns
78
- 'knife_confidence': 0.5, # Lower threshold for knives
79
- 'fight_confidence': 0.45, # Lower threshold for fights (behavioral)
80
- 'model_size': 'yolo12n',
81
- 'classes': ['gun', 'knife', 'fight'],
82
- 'use_enhancement': True, # Enable image enhancement for knives
83
- 'multi_pass': True, # Enable multi-pass detection
84
- 'boost_knife_detection': True, # Enable knife confidence boosting
85
- 'fight_detection': True, # Enable fight-specific detection
86
- 'fight_analysis': True # Enable advanced fight behavior analysis
87
- },
88
- 'fight_detection': {
89
- 'enabled': True,
90
- 'confidence_threshold': 0.45,
91
- 'pose_analysis': True, # Analyze poses for fighting
92
- 'motion_analysis': False, # Motion-based fight detection (for video)
93
- 'aggression_keywords': ['fight'],
94
- 'threat_escalation': True, # Escalate threat level for fights
95
- 'multi_person_analysis': True # Analyze interactions between people
96
- },
97
- 'nsfw_detection': {
98
- 'enabled': True,
99
- 'confidence_threshold': 0.7,
100
- 'skin_detection': True,
101
- 'pose_analysis': False,
102
- 'region_analysis': True
103
- },
104
- 'performance': {
105
- 'image_size': 416 if is_cuda else 320,
106
- 'batch_size': 1,
107
- 'half_precision': is_cuda,
108
- 'use_flash_attention': False,
109
- 'cpu_optimization': not is_cuda
110
- },
111
- 'output': {
112
- 'save_detections': True,
113
- 'draw_boxes': True,
114
- 'log_results': True
115
- }
116
- }
117
-
118
- def setup_models(self):
119
- """Initialize all detection models"""
120
- try:
121
- # Clear GPU cache
122
- if torch.cuda.is_available():
123
- torch.cuda.empty_cache()
124
-
125
- # 1. Setup Weapon Detection (now includes fight detection)
126
- if self.config['weapon_detection']['enabled']:
127
- self.setup_weapon_detector()
128
-
129
- # 2. Setup NSFW Detection
130
- if self.config['nsfw_detection']['enabled']:
131
- self.setup_nsfw_detector()
132
-
133
- print("✅ All models loaded successfully!")
134
-
135
- except Exception as e:
136
- print(f"❌ Error setting up models: {e}")
137
-
138
- def setup_weapon_detector(self):
139
- """Setup dual model system: Custom for weapons + fights + General for person detection"""
140
- try:
141
- print("🔫 Loading weapon and fight detection models...")
142
-
143
- # Model 1: Custom YOLO11 for weapons (dao + súng + fight)
144
- custom_model_path = "models/best_ft4.pt"
145
- project_root = os.path.dirname(os.path.abspath(__file__))
146
- full_model_path = os.path.join(project_root, custom_model_path)
147
-
148
- if os.path.exists(full_model_path):
149
- print(f"✅ Loading custom weapon+fight model: {full_model_path}")
150
- self.weapon_model_custom = YOLO(full_model_path)
151
- print("🎯 Custom weapon+fight model (dao + súng + fight) loaded!")
152
-
153
- # Show custom model classes
154
- if hasattr(self.weapon_model_custom, 'names'):
155
- classes = list(self.weapon_model_custom.names.values())
156
- print(f"📊 Custom classes: {classes}")
157
-
158
- # Check if fight class is available
159
- if any('fight' in str(cls).lower() for cls in classes):
160
- print("👊 Fight detection enabled in custom model")
161
- else:
162
- print("⚠️ Fight class not found in custom model")
163
- else:
164
- print("⚠️ Custom weapon+fight model not found")
165
- self.weapon_model_custom = None
166
-
167
- # Model 2: General YOLO11n for person detection and fight fallback
168
- print("👤 Loading general model for person detection...")
169
- self.weapon_model_general = YOLO('yolo11n.pt')
170
- print("✅ General YOLO11n loaded for person detection")
171
-
172
- # Set primary weapon model
173
- self.weapon_model = self.weapon_model_custom if self.weapon_model_custom else self.weapon_model_general
174
-
175
- # Optimize models for performance
176
- if self.device == 'cuda' and self.config['performance']['half_precision']:
177
- try:
178
- if self.weapon_model_custom:
179
- self.weapon_model_custom.model.half()
180
- self.weapon_model_general.model.half()
181
- print("✅ Half precision enabled for both models")
182
- except:
183
- print("⚠️ Half precision not supported")
184
-
185
- print("🔥 Dual model system ready with fight detection!")
186
-
187
- except Exception as e:
188
- print(f"❌ Error loading weapon+fight models: {e}")
189
- self.weapon_model = None
190
- self.weapon_model_custom = None
191
- self.weapon_model_general = None
192
-
193
- def detect_weapons(self, image):
194
- """Enhanced dual model weapon and fight detection"""
195
- detections = []
196
-
197
- try:
198
- imgsz = self.config['performance']['image_size']
199
- use_half = self.config['performance']['half_precision'] and self.device == 'cuda'
200
-
201
- # Prepare multiple versions of the image
202
- images_to_process = [(image, 1.0, "original")]
203
-
204
- if self.config['weapon_detection']['use_enhancement']:
205
- enhanced_image = self.enhance_knife_detection(image)
206
- images_to_process.append((enhanced_image, 1.15, "enhanced"))
207
- if 'fight_detection' not in self.config:
208
- self.config['fight_detection'] = {
209
- 'enabled': True,
210
- 'confidence_threshold': 0.40,
211
- 'pose_analysis': False,
212
- 'motion_analysis': False,
213
- 'aggression_keywords': ['fight'],
214
- 'threat_escalation': True,
215
- 'multi_person_analysis': False
216
- }
217
- # Process each image version
218
- for img, weight_multiplier, img_type in images_to_process:
219
- if self.weapon_model_custom:
220
- # Use different confidence thresholds for different detection types
221
- knife_conf = self.config['weapon_detection']['knife_confidence']
222
- gun_conf = self.config['weapon_detection']['confidence_threshold']
223
- fight_conf = self.config['weapon_detection']['fight_confidence']
224
-
225
- # Multi-pass detection with different thresholds
226
- passes = [
227
- (knife_conf, "knife_pass"), # Low threshold for knives
228
- (gun_conf, "gun_pass"), # Normal threshold for guns
229
- (fight_conf, "fight_pass") # Low threshold for fights
230
- ] if self.config['weapon_detection']['multi_pass'] else [
231
- (min(knife_conf, fight_conf), "single_pass")]
232
-
233
- for conf_threshold, pass_type in passes:
234
- try:
235
- results = self.weapon_model_custom(
236
- img,
237
- imgsz=imgsz,
238
- conf=conf_threshold,
239
- device=self.device,
240
- half=use_half,
241
- verbose=False,
242
- augment=True # Enable test-time augmentation
243
- )
244
-
245
- for result in results:
246
- boxes = result.boxes
247
- if boxes is not None:
248
- for box in boxes:
249
- class_id = int(box.cls[0])
250
-
251
- if hasattr(result, 'names') and class_id in result.names:
252
- class_name = result.names[class_id].lower()
253
- else:
254
- class_name = f"detection_{class_id}"
255
-
256
- x1, y1, x2, y2 = box.xyxy[0].cpu().numpy()
257
- confidence = float(box.conf[0]) * weight_multiplier
258
-
259
- # Determine detection type and apply appropriate processing
260
- if self.is_fight_detection(class_name):
261
- # Fight detection processing
262
- confidence = self.boost_fight_confidence(
263
- img, [x1, y1, x2, y2], confidence, class_name
264
- )
265
-
266
- detection_type = 'fight'
267
- min_conf = fight_conf
268
- threat_level = self.assess_fight_threat(confidence, img, [x1, y1, x2, y2])
269
-
270
- else:
271
- # Weapon detection processing
272
- if self.config['weapon_detection']['boost_knife_detection']:
273
- if 'dao' in class_name or 'knife' in class_name or 'blade' in class_name:
274
- confidence = self.boost_knife_confidence(
275
- img, [x1, y1, x2, y2], confidence, class_name
276
- )
277
-
278
- detection_type = 'weapon'
279
- weapon_type = self.classify_weapon_type(class_name)
280
- min_conf = knife_conf if weapon_type == 'blade' else gun_conf
281
- threat_level = self.assess_weapon_threat(weapon_type, confidence)
282
-
283
- if confidence >= min_conf:
284
- detection_data = {
285
- 'type': detection_type,
286
- 'class': class_name,
287
- 'confidence': min(confidence, 0.99),
288
- 'bbox': [int(x1), int(y1), int(x2), int(y2)],
289
- 'threat_level': threat_level,
290
- 'detection_method': f'custom_model_{img_type}_{pass_type}'
291
- }
292
-
293
- # Add type-specific fields
294
- if detection_type == 'weapon':
295
- detection_data['weapon_type'] = weapon_type
296
- elif detection_type == 'fight':
297
- detection_data['fight_type'] = self.classify_fight_type(class_name)
298
- detection_data['aggression_level'] = self.assess_aggression_level(
299
- confidence)
300
-
301
- detections.append(detection_data)
302
-
303
- icon = "👊" if detection_type == 'fight' else "🎯"
304
- print(
305
- f" {icon} Detected: {class_name} (conf: {confidence:.3f}, method: {img_type}_{pass_type})")
306
-
307
- except Exception as e:
308
- print(f"⚠️ Detection pass error ({pass_type}): {e}")
309
-
310
- # Fallback: General model for backup detection (only if no custom detections)
311
- if self.weapon_model_general and len(detections) == 0:
312
- detections.extend(self.fallback_detection(image, imgsz, use_half))
313
-
314
- # Remove duplicate detections
315
- detections = self.remove_duplicate_detections(detections)
316
-
317
- # Additional fight analysis if enabled
318
- if self.config['fight_detection']['enabled'] and self.config['fight_detection']['multi_person_analysis']:
319
- fight_detections = [d for d in detections if d['type'] == 'fight']
320
- if fight_detections:
321
- enhanced_fights = self.analyze_fight_context(image, fight_detections)
322
- # Replace original fight detections with enhanced ones
323
- detections = [d for d in detections if d['type'] != 'fight'] + enhanced_fights
324
-
325
- return detections
326
-
327
- except Exception as e:
328
- print(f"❌ Weapon and fight detection error: {e}")
329
- return []
330
-
331
- def is_fight_detection(self, class_name):
332
- """Check if detection is fight-related"""
333
- fight_keywords = ['fight', 'fighting', 'combat', 'violence', 'aggression', 'brawl', 'scuffle']
334
- return any(keyword in class_name.lower() for keyword in fight_keywords)
335
-
336
- def classify_fight_type(self, class_name):
337
- """Classify type of fight detected"""
338
- class_name = class_name.lower()
339
-
340
- if any(word in class_name for word in ['punch', 'boxing', 'fist']):
341
- return 'physical_combat'
342
- elif any(word in class_name for word in ['kick', 'martial', 'karate']):
343
- return 'martial_arts'
344
- elif any(word in class_name for word in ['wrestle', 'grapple']):
345
- return 'wrestling'
346
- elif any(word in class_name for word in ['group', 'mob', 'crowd']):
347
- return 'group_violence'
348
- else:
349
- return 'general_fight'
350
-
351
- def boost_fight_confidence(self, image, bbox, initial_confidence, class_name):
352
- """Boost confidence for fight detection based on contextual analysis"""
353
- try:
354
- x1, y1, x2, y2 = [int(coord) for coord in bbox]
355
-
356
- # Ensure bbox is within image bounds
357
- x1 = max(0, x1)
358
- y1 = max(0, y1)
359
- x2 = min(image.shape[1], x2)
360
- y2 = min(image.shape[0], y2)
361
-
362
- roi = image[y1:y2, x1:x2]
363
-
364
- if roi.size == 0:
365
- return initial_confidence
366
-
367
- boost = 0
368
-
369
- # 1. Motion blur analysis (indicates rapid movement)
370
- gray = cv2.cvtColor(roi, cv2.COLOR_BGR2GRAY)
371
- blur_variance = cv2.Laplacian(gray, cv2.CV_64F).var()
372
- if blur_variance < 100: # Low variance indicates blur/motion
373
- boost += 0.10
374
-
375
- # 2. Edge density (chaotic scenes have more edges)
376
- edges = cv2.Canny(gray, 50, 150)
377
- edge_density = np.count_nonzero(edges) / edges.size
378
- if edge_density > 0.15:
379
- boost += 0.08
380
-
381
- # 3. Color analysis (fights often have varied, chaotic colors)
382
- hsv = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)
383
- color_variance = np.var(hsv[:, :, 1]) # Saturation variance
384
- if color_variance > 1000:
385
- boost += 0.05
386
-
387
- # 4. Texture analysis (complex textures indicate multiple overlapping objects)
388
- gray_f = np.float32(gray)
389
- texture_response = cv2.cornerHarris(gray_f, 2, 3, 0.04)
390
- texture_strength = np.mean(texture_response)
391
- if texture_strength > 0.01:
392
- boost += 0.07
393
-
394
- # 5. Aspect ratio analysis (fights often have irregular bounding boxes)
395
- height = y2 - y1
396
- width = x2 - x1
397
- if height > 0 and width > 0:
398
- aspect_ratio = max(width, height) / min(width, height)
399
- if 1.2 < aspect_ratio < 3.0: # Moderate irregularity
400
- boost += 0.05
401
-
402
- final_confidence = min(initial_confidence + boost, 0.95)
403
-
404
- if boost > 0:
405
- print(f" 👊 Fight boost applied: +{boost:.2f} (blur:{blur_variance:.0f}, edge:{edge_density:.2f})")
406
-
407
- return final_confidence
408
-
409
- except Exception as e:
410
- print(f"⚠️ Fight confidence boost error: {e}")
411
- return initial_confidence
412
-
413
- def assess_fight_threat(self, confidence, image, bbox):
414
- """Assess threat level of detected fight"""
415
- base_threat = 'medium' # Fights start at medium threat
416
-
417
- # Escalate based on confidence
418
- if confidence >= 0.85:
419
- base_threat = 'critical'
420
- elif confidence >= 0.70:
421
- base_threat = 'high'
422
- elif confidence >= 0.50:
423
- base_threat = 'medium'
424
- else:
425
- base_threat = 'low'
426
-
427
- # Additional context-based escalation
428
- try:
429
- x1, y1, x2, y2 = bbox
430
- fight_area = (x2 - x1) * (y2 - y1)
431
- image_area = image.shape[0] * image.shape[1]
432
- area_ratio = fight_area / image_area
433
-
434
- # Large fights are more dangerous
435
- if area_ratio > 0.5: # Fight covers >50% of image
436
- if base_threat == 'medium':
437
- base_threat = 'high'
438
- elif base_threat == 'high':
439
- base_threat = 'critical'
440
-
441
- except Exception as e:
442
- print(f"⚠️ Fight threat assessment error: {e}")
443
-
444
- return base_threat
445
-
446
- def assess_aggression_level(self, confidence):
447
- """Assess aggression level based on confidence"""
448
- if confidence >= 0.80:
449
- return 'extreme'
450
- elif confidence >= 0.65:
451
- return 'high'
452
- elif confidence >= 0.45:
453
- return 'moderate'
454
- else:
455
- return 'low'
456
-
457
- def analyze_fight_context(self, image, fight_detections):
458
- """Enhanced analysis of fight context with multi-person detection"""
459
- enhanced_fights = []
460
-
461
- try:
462
- # Detect all persons in the image
463
- persons = self.detect_persons(image)
464
-
465
- for fight in fight_detections:
466
- enhanced_fight = fight.copy()
467
-
468
- # Count people involved in or near the fight
469
- fight_bbox = fight['bbox']
470
- people_in_fight = 0
471
- people_nearby = 0
472
-
473
- for person in persons:
474
- person_bbox = person['bbox']
475
-
476
- # Calculate overlap with fight area
477
- overlap = self.calculate_bbox_overlap(fight_bbox, person_bbox)
478
-
479
- if overlap > 0.3: # Person is directly involved
480
- people_in_fight += 1
481
- elif overlap > 0.1: # Person is nearby
482
- people_nearby += 1
483
-
484
- # Update fight information based on context
485
- enhanced_fight['people_involved'] = people_in_fight
486
- enhanced_fight['people_nearby'] = people_nearby
487
- enhanced_fight['total_people'] = people_in_fight + people_nearby
488
-
489
- # Escalate threat based on number of people
490
- if people_in_fight >= 3:
491
- if enhanced_fight['threat_level'] == 'medium':
492
- enhanced_fight['threat_level'] = 'high'
493
- elif enhanced_fight['threat_level'] == 'high':
494
- enhanced_fight['threat_level'] = 'critical'
495
- enhanced_fight['fight_type'] = 'group_violence'
496
-
497
- # Add context flags
498
- enhanced_fight['context_flags'] = []
499
- if people_in_fight >= 3:
500
- enhanced_fight['context_flags'].append('multi_person_fight')
501
- if people_nearby >= 2:
502
- enhanced_fight['context_flags'].append('crowd_present')
503
-
504
- enhanced_fights.append(enhanced_fight)
505
-
506
- print(f" 👥 Fight context: {people_in_fight} involved, {people_nearby} nearby")
507
-
508
- except Exception as e:
509
- print(f"⚠️ Fight context analysis error: {e}")
510
- return fight_detections
511
-
512
- return enhanced_fights
513
-
514
- def calculate_bbox_overlap(self, bbox1, bbox2):
515
- """Calculate overlap ratio between two bounding boxes"""
516
- x1_min, y1_min, x1_max, y1_max = bbox1
517
- x2_min, y2_min, x2_max, y2_max = bbox2
518
-
519
- # Calculate intersection
520
- intersect_xmin = max(x1_min, x2_min)
521
- intersect_ymin = max(y1_min, y2_min)
522
- intersect_xmax = min(x1_max, x2_max)
523
- intersect_ymax = min(y1_max, y2_max)
524
-
525
- if intersect_xmax < intersect_xmin or intersect_ymax < intersect_ymin:
526
- return 0.0
527
-
528
- intersect_area = (intersect_xmax - intersect_xmin) * (intersect_ymax - intersect_ymin)
529
- bbox1_area = (x1_max - x1_min) * (y1_max - y1_min)
530
-
531
- return intersect_area / bbox1_area if bbox1_area > 0 else 0
532
-
533
- def fallback_detection(self, image, imgsz, use_half):
534
- """Fallback detection using general model"""
535
- detections = []
536
-
537
- try:
538
- general_results = self.weapon_model_general(
539
- image,
540
- imgsz=imgsz,
541
- conf=0.4,
542
- device=self.device,
543
- half=use_half,
544
- verbose=False
545
- )
546
-
547
- for result in general_results:
548
- boxes = result.boxes
549
- if boxes is not None:
550
- for box in boxes:
551
- class_id = int(box.cls[0])
552
- class_name = result.names[class_id].lower()
553
-
554
- # Filter for weapon-like objects
555
- weapon_keywords = ['knife', 'scissors', 'fork']
556
-
557
- if any(keyword in class_name for keyword in weapon_keywords):
558
- x1, y1, x2, y2 = box.xyxy[0].cpu().numpy()
559
- confidence = float(box.conf[0])
560
-
561
- detections.append({
562
- 'type': 'weapon',
563
- 'class': class_name,
564
- 'weapon_type': 'blade',
565
- 'confidence': confidence,
566
- 'bbox': [int(x1), int(y1), int(x2), int(y2)],
567
- 'threat_level': self.assess_weapon_threat('blade', confidence),
568
- 'detection_method': 'general_model_fallback'
569
- })
570
-
571
- except Exception as e:
572
- print(f"⚠️ General detection error: {e}")
573
-
574
- return detections
575
-
576
- def enhance_knife_detection(self, image):
577
- """Enhance image specifically for better knife/dao detection"""
578
- try:
579
- # 1. Increase contrast and brightness for metallic objects
580
- enhanced = cv2.convertScaleAbs(image, alpha=1.4, beta=25)
581
-
582
- # 2. Apply sharpening kernel to highlight edges
583
- kernel_sharpen = np.array([[-1, -1, -1],
584
- [-1, 9, -1],
585
- [-1, -1, -1]])
586
- sharpened = cv2.filter2D(enhanced, -1, kernel_sharpen)
587
-
588
- # 3. Apply CLAHE for better local contrast
589
- lab = cv2.cvtColor(sharpened, cv2.COLOR_BGR2LAB)
590
- l, a, b = cv2.split(lab)
591
- clahe = cv2.createCLAHE(clipLimit=3.0, tileGridSize=(8, 8))
592
- l = clahe.apply(l)
593
- enhanced_final = cv2.merge([l, a, b])
594
- enhanced_final = cv2.cvtColor(enhanced_final, cv2.COLOR_LAB2BGR)
595
-
596
- return enhanced_final
597
- except Exception as e:
598
- print(f"⚠️ Enhancement failed: {e}")
599
- return image
600
-
601
- def boost_knife_confidence(self, image, bbox, initial_confidence, class_name):
602
- """Boost confidence for knife/dao based on geometric and visual features"""
603
- try:
604
- x1, y1, x2, y2 = [int(coord) for coord in bbox]
605
-
606
- # Ensure bbox is within image bounds
607
- x1 = max(0, x1)
608
- y1 = max(0, y1)
609
- x2 = min(image.shape[1], x2)
610
- y2 = min(image.shape[0], y2)
611
-
612
- roi = image[y1:y2, x1:x2]
613
-
614
- if roi.size == 0:
615
- return initial_confidence
616
-
617
- boost = 0
618
-
619
- # 1. Check aspect ratio (knives are typically elongated)
620
- height = y2 - y1
621
- width = x2 - x1
622
- if height > 0 and width > 0:
623
- aspect_ratio = max(width, height) / min(width, height)
624
- if aspect_ratio > 2.5: # Elongated shape
625
- boost += 0.15
626
- elif aspect_ratio > 2.0:
627
- boost += 0.10
628
-
629
- # 2. Check for metallic reflection (brightness)
630
- gray = cv2.cvtColor(roi, cv2.COLOR_BGR2GRAY)
631
- mean_brightness = np.mean(gray)
632
- std_brightness = np.std(gray)
633
-
634
- if mean_brightness > 140: # Bright (metallic)
635
- boost += 0.10
636
- if std_brightness > 50: # High contrast (blade edge)
637
- boost += 0.05
638
-
639
- # 3. Edge detection (knives have strong edges)
640
- edges = cv2.Canny(gray, 50, 150)
641
- edge_ratio = np.count_nonzero(edges) / edges.size
642
- if edge_ratio > 0.15: # Strong edges
643
- boost += 0.10
644
- elif edge_ratio > 0.10:
645
- boost += 0.05
646
-
647
- # 4. Check for blade-like gradient
648
- if height > width: # Vertical orientation
649
- gradient = np.gradient(gray, axis=0)
650
- else: # Horizontal orientation
651
- gradient = np.gradient(gray, axis=1)
652
-
653
- gradient_strength = np.mean(np.abs(gradient))
654
- if gradient_strength > 10:
655
- boost += 0.05
656
-
657
- # Apply boost with class-specific multiplier
658
- if 'dao' in class_name.lower() or 'knife' in class_name.lower():
659
- boost *= 1.2 # Extra boost for knife/dao classes
660
-
661
- final_confidence = min(initial_confidence + boost, 0.95)
662
-
663
- if boost > 0:
664
- print(
665
- f" 🔪 Knife boost applied: +{boost:.2f} (AR:{aspect_ratio:.1f}, Bright:{mean_brightness:.0f}, Edge:{edge_ratio:.2f})")
666
-
667
- return final_confidence
668
-
669
- except Exception as e:
670
- print(f"⚠️ Confidence boost error: {e}")
671
- return initial_confidence
672
-
673
- def detect_persons(self, image):
674
- """Detect persons using general model (needed for NSFW and fight analysis)"""
675
- persons = []
676
-
677
- if not self.weapon_model_general:
678
- return persons
679
-
680
- try:
681
- imgsz = self.config['performance']['image_size']
682
- use_half = self.config['performance']['half_precision'] and self.device == 'cuda'
683
-
684
- results = self.weapon_model_general(
685
- image,
686
- imgsz=imgsz,
687
- conf=0.3,
688
- device=self.device,
689
- half=use_half,
690
- verbose=False
691
- )
692
-
693
- for result in results:
694
- boxes = result.boxes
695
- if boxes is not None:
696
- for box in boxes:
697
- class_id = int(box.cls[0])
698
- class_name = result.names[class_id].lower()
699
-
700
- if class_name == 'person':
701
- x1, y1, x2, y2 = box.xyxy[0].cpu().numpy()
702
- confidence = float(box.conf[0])
703
-
704
- persons.append({
705
- 'class': 'person',
706
- 'confidence': confidence,
707
- 'bbox': [int(x1), int(y1), int(x2), int(y2)]
708
- })
709
-
710
- return persons
711
-
712
- except Exception as e:
713
- print(f"❌ Person detection error: {e}")
714
- return []
715
-
716
- def classify_weapon_type(self, class_name):
717
- """Classify weapon type from class name"""
718
- class_name = class_name.lower()
719
-
720
- # Knife/Blade keywords (expanded)
721
- knife_keywords = ['knife', 'dao', 'blade', 'dagger', 'sword', 'machete', 'katana', 'cutter']
722
- if any(keyword in class_name for keyword in knife_keywords):
723
- return 'blade'
724
-
725
- # Gun/Firearm keywords
726
- gun_keywords = ['gun', 'pistol', 'rifle', 'firearm', 'revolver', 'shotgun', 'súng']
727
- if any(keyword in class_name for keyword in gun_keywords):
728
- return 'firearm'
729
-
730
- # Other weapons
731
- other_keywords = ['axe', 'hammer', 'club', 'bat']
732
- if any(keyword in class_name for keyword in other_keywords):
733
- return 'blunt_weapon'
734
-
735
- # Check for numbered weapon classes
736
- if 'weapon' in class_name:
737
- try:
738
- weapon_id = int(class_name.split('_')[-1])
739
- if weapon_id in [0, 1]: # Assuming 0,1 are firearms
740
- return 'firearm'
741
- elif weapon_id in [2, 3]: # Assuming 2,3 are blades
742
- return 'blade'
743
- else:
744
- return 'unknown_weapon'
745
- except:
746
- pass
747
-
748
- return 'unknown_weapon'
749
-
750
- def assess_weapon_threat(self, weapon_type, confidence):
751
- """Assess threat level of detected weapon"""
752
- threat_levels = {
753
- 'firearm': 'critical',
754
- 'blade': 'high',
755
- 'blunt_weapon': 'medium',
756
- 'unknown_weapon': 'medium'
757
- }
758
-
759
- base_threat = threat_levels.get(weapon_type, 'medium')
760
-
761
- # Adjust based on confidence
762
- if confidence >= 0.9:
763
- if base_threat == 'medium':
764
- return 'high'
765
- elif base_threat == 'high':
766
- return 'critical'
767
- else:
768
- return base_threat
769
- elif confidence >= 0.7:
770
- return base_threat
771
- elif confidence >= 0.5:
772
- if base_threat == 'critical':
773
- return 'high'
774
- elif base_threat == 'high':
775
- return 'medium'
776
- else:
777
- return base_threat
778
- else:
779
- if base_threat == 'critical':
780
- return 'medium'
781
- elif base_threat == 'high':
782
- return 'low'
783
- else:
784
- return 'low'
785
-
786
- def remove_duplicate_detections(self, detections, iou_threshold=0.4):
787
- """Remove duplicate detections using Non-Maximum Suppression"""
788
- if len(detections) <= 1:
789
- return detections
790
-
791
- # Sort by confidence (highest first)
792
- detections = sorted(detections, key=lambda x: x['confidence'], reverse=True)
793
-
794
- keep = []
795
- for i, det1 in enumerate(detections):
796
- should_keep = True
797
- for det2 in keep:
798
- # Check if same type and overlapping
799
- if det1['type'] == det2['type']:
800
- iou = self.calculate_iou(det1['bbox'], det2['bbox'])
801
- if iou > iou_threshold:
802
- should_keep = False
803
- break
804
-
805
- if should_keep:
806
- keep.append(det1)
807
-
808
- return keep
809
-
810
- def calculate_iou(self, box1, box2):
811
- """Calculate Intersection over Union between two bounding boxes"""
812
- x1_min, y1_min, x1_max, y1_max = box1
813
- x2_min, y2_min, x2_max, y2_max = box2
814
-
815
- # Calculate intersection
816
- intersect_xmin = max(x1_min, x2_min)
817
- intersect_ymin = max(y1_min, y2_min)
818
- intersect_xmax = min(x1_max, x2_max)
819
- intersect_ymax = min(y1_max, y2_max)
820
-
821
- if intersect_xmax < intersect_xmin or intersect_ymax < intersect_ymin:
822
- return 0.0
823
-
824
- intersect_area = (intersect_xmax - intersect_xmin) * (intersect_ymax - intersect_ymin)
825
-
826
- # Calculate union
827
- box1_area = (x1_max - x1_min) * (y1_max - y1_min)
828
- box2_area = (x2_max - x2_min) * (y2_max - y2_min)
829
- union_area = box1_area + box2_area - intersect_area
830
-
831
- return intersect_area / union_area if union_area > 0 else 0
832
-
833
- def detect_nsfw_content(self, image):
834
- """Enhanced NSFW detection with person detection first"""
835
- detections = []
836
-
837
- try:
838
- if len(image.shape) == 3 and image.shape[2] == 3:
839
- rgb_image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
840
- else:
841
- rgb_image = image
842
-
843
- # Stage 1: Detect persons first (optimization)
844
- persons = self.detect_persons(image)
845
-
846
- if not persons:
847
- # No persons detected, skip detailed NSFW analysis
848
- return detections
849
-
850
- print(f"👤 Found {len(persons)} person(s), analyzing for NSFW content...")
851
-
852
- # Stage 2: Overall NSFW Classification
853
- if self.nsfw_classifier:
854
- try:
855
- pil_image = Image.fromarray(rgb_image)
856
- nsfw_result = self.nsfw_classifier(pil_image)
857
-
858
- if nsfw_result[0]['label'] == 'nsfw':
859
- confidence = nsfw_result[0]['score']
860
- if confidence > self.config['nsfw_detection']['confidence_threshold']:
861
- detections.append({
862
- 'type': 'nsfw',
863
- 'class': 'inappropriate_content',
864
- 'confidence': confidence,
865
- 'bbox': [0, 0, image.shape[1], image.shape[0]],
866
- 'method': 'classification'
867
- })
868
- except Exception as e:
869
- print(f"⚠️ NSFW classifier error: {e}")
870
-
871
- # Stage 3: Person-specific skin analysis
872
- if self.config['nsfw_detection']['skin_detection']:
873
- for person in persons:
874
- person_detections = self.analyze_person_skin(image, person)
875
- detections.extend(person_detections)
876
-
877
- # Stage 4: Regional skin analysis (if no person-specific detections)
878
- if self.config['nsfw_detection']['region_analysis'] and len(detections) == 0:
879
- skin_detections = self.detect_skin_regions(image)
880
- detections.extend(skin_detections)
881
-
882
- return detections
883
-
884
- except Exception as e:
885
- print(f"❌ NSFW detection error: {e}")
886
- return []
887
-
888
- def analyze_person_skin(self, image, person):
889
- """Analyze skin exposure for a specific person"""
890
- detections = []
891
-
892
- try:
893
- x1, y1, x2, y2 = person['bbox']
894
- person_region = image[y1:y2, x1:x2]
895
-
896
- if person_region.size == 0:
897
- return detections
898
-
899
- # Convert to HSV for skin detection
900
- hsv_person = cv2.cvtColor(person_region, cv2.COLOR_BGR2HSV)
901
-
902
- # Skin color range
903
- lower_skin = np.array([0, 20, 70], dtype=np.uint8)
904
- upper_skin = np.array([20, 255, 255], dtype=np.uint8)
905
-
906
- # Create skin mask
907
- skin_mask = cv2.inRange(hsv_person, lower_skin, upper_skin)
908
-
909
- # Calculate skin percentage
910
- total_person_pixels = person_region.shape[0] * person_region.shape[1]
911
- skin_pixels = cv2.countNonZero(skin_mask)
912
- skin_ratio = skin_pixels / total_person_pixels if total_person_pixels > 0 else 0
913
-
914
- # Threshold for suspicious skin exposure
915
- if skin_ratio > 0.4: # 40% of person region is skin
916
- confidence = min(skin_ratio * 2, 1.0)
917
-
918
- detections.append({
919
- 'type': 'nsfw',
920
- 'class': 'excessive_skin_exposure',
921
- 'confidence': confidence,
922
- 'bbox': [x1, y1, x2, y2],
923
- 'method': 'person_skin_analysis',
924
- 'skin_ratio': skin_ratio
925
- })
926
-
927
- print(f"🚨 Excessive skin exposure detected: {skin_ratio:.2f} ratio")
928
-
929
- return detections
930
-
931
- except Exception as e:
932
- print(f"❌ Person skin analysis error: {e}")
933
- return []
934
-
935
- def detect_skin_regions(self, image):
936
- """Detect large skin-colored regions"""
937
- try:
938
- hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
939
-
940
- # Define skin color range
941
- lower_skin = np.array([0, 20, 70], dtype=np.uint8)
942
- upper_skin = np.array([20, 255, 255], dtype=np.uint8)
943
-
944
- # Create skin mask
945
- skin_mask = cv2.inRange(hsv, lower_skin, upper_skin)
946
-
947
- # Apply morphological operations
948
- kernel = np.ones((3, 3), np.uint8)
949
- skin_mask = cv2.morphologyEx(skin_mask, cv2.MORPH_OPEN, kernel)
950
- skin_mask = cv2.morphologyEx(skin_mask, cv2.MORPH_CLOSE, kernel)
951
-
952
- # Find contours
953
- contours, _ = cv2.findContours(skin_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
954
-
955
- detections = []
956
- image_area = image.shape[0] * image.shape[1]
957
-
958
- for contour in contours:
959
- area = cv2.contourArea(contour)
960
-
961
- # If skin region is too large
962
- if area > image_area * 0.3:
963
- x, y, w, h = cv2.boundingRect(contour)
964
- confidence = min(area / image_area, 1.0)
965
-
966
- detections.append({
967
- 'type': 'nsfw',
968
- 'class': 'large_skin_region',
969
- 'confidence': confidence,
970
- 'bbox': [x, y, x + w, y + h],
971
- 'method': 'skin_detection'
972
- })
973
-
974
- return detections
975
-
976
- except Exception as e:
977
- print(f"❌ Skin detection error: {e}")
978
- return []
979
-
980
- def setup_nsfw_detector(self):
981
- """Setup NSFW detection components (Optimized for CPU)"""
982
- try:
983
- print("🔞 Loading NSFW detection components...")
984
-
985
- # 1. NSFW Classifier (Optimized for CPU)
986
- try:
987
- device_id = 0 if self.device == 'cuda' else -1
988
- self.nsfw_classifier = pipeline(
989
- "image-classification",
990
- model="Falconsai/nsfw_image_detection",
991
- device=device_id,
992
- use_fast=True
993
- )
994
- print("✅ NSFW classifier loaded")
995
- except Exception as nsfw_error:
996
- print(f"⚠️ NSFW classifier failed: {nsfw_error}")
997
- print(" Trying backup method...")
998
- try:
999
- # Fallback without specifying use_fast
1000
- self.nsfw_classifier = pipeline(
1001
- "image-classification",
1002
- model="Falconsai/nsfw_image_detection",
1003
- device=device_id
1004
- )
1005
- print("✅ NSFW classifier loaded (fallback)")
1006
- except:
1007
- print("❌ NSFW classifier completely failed")
1008
- self.nsfw_classifier = None
1009
-
1010
- # 2. Pose Detection (Fixed import with fallbacks)
1011
- if self.config['nsfw_detection']['pose_analysis'] and MEDIAPIPE_AVAILABLE:
1012
- try:
1013
- import mediapipe as mp
1014
- try:
1015
- mp_pose = mp.solutions.pose
1016
- self.pose_detector = mp_pose.Pose(
1017
- static_image_mode=True,
1018
- model_complexity=0,
1019
- min_detection_confidence=0.5
1020
- )
1021
- print("✅ Pose detector loaded (legacy API)")
1022
- except AttributeError:
1023
- print("⚠️ MediaPipe API not available")
1024
- self.pose_detector = None
1025
- self.config['nsfw_detection']['pose_analysis'] = False
1026
-
1027
- except Exception as pose_error:
1028
- print(f"⚠️ Pose detection failed: {pose_error}")
1029
- self.pose_detector = None
1030
- self.config['nsfw_detection']['pose_analysis'] = False
1031
- else:
1032
- self.pose_detector = None
1033
- if not MEDIAPIPE_AVAILABLE:
1034
- print("⚠️ MediaPipe not available - pose analysis disabled")
1035
-
1036
- except Exception as e:
1037
- print(f"❌ Error loading NSFW components: {e}")
1038
- print("💡 Falling back to skin detection only")
1039
-
1040
- def process_image(self, image_path):
1041
- """Process single image with enhanced detection including fights"""
1042
- try:
1043
- # Load image
1044
- if isinstance(image_path, str):
1045
- image = cv2.imread(image_path)
1046
- if image is None:
1047
- raise ValueError(f"Could not load image: {image_path}")
1048
- cache_key = f"file_{image_path}"
1049
- else:
1050
- image = image_path
1051
- cache_key = f"array_{hash(image.tobytes())}"
1052
-
1053
- # Check cache
1054
- import time
1055
- current_time = time.time()
1056
- if cache_key in self.detection_cache:
1057
- cached_result, timestamp = self.detection_cache[cache_key]
1058
- if current_time - timestamp < self.cache_ttl:
1059
- return cached_result
1060
-
1061
- print(f"📸 Processing image: {image.shape}")
1062
-
1063
- # Run detections
1064
- all_detections = []
1065
-
1066
- # Weapon and fight detection
1067
- if self.config['weapon_detection']['enabled']:
1068
- weapon_fight_detections = self.detect_weapons(image)
1069
- all_detections.extend(weapon_fight_detections)
1070
-
1071
- weapon_detections = [d for d in weapon_fight_detections if d['type'] == 'weapon']
1072
- fight_detections = [d for d in weapon_fight_detections if d['type'] == 'fight']
1073
-
1074
- print(f"🔫 Found {len(weapon_detections)} weapon(s)")
1075
- print(f"👊 Found {len(fight_detections)} fight(s)")
1076
-
1077
- # Show detailed breakdown
1078
- if weapon_detections:
1079
- knife_detections = [d for d in weapon_detections if d['weapon_type'] == 'blade']
1080
- if knife_detections:
1081
- print(f" 🔪 Including {len(knife_detections)} knife/dao detection(s)")
1082
-
1083
- if fight_detections:
1084
- for fight in fight_detections:
1085
- fight_type = fight.get('fight_type', 'unknown')
1086
- aggression = fight.get('aggression_level', 'unknown')
1087
- print(f" 👊 Fight: {fight_type} (aggression: {aggression})")
1088
-
1089
- # NSFW detection
1090
- if self.config['nsfw_detection']['enabled']:
1091
- nsfw_detections = self.detect_nsfw_content(image)
1092
- all_detections.extend(nsfw_detections)
1093
- print(f"🔞 Found {len(nsfw_detections)} NSFW detection(s)")
1094
-
1095
- # Generate result
1096
- result = {
1097
- 'timestamp': datetime.now().isoformat(),
1098
- 'image_path': image_path if isinstance(image_path, str) else 'array',
1099
- 'detections': all_detections,
1100
- 'total_threats': len(all_detections),
1101
- 'risk_level': self.calculate_risk_level(all_detections),
1102
- 'action_required': len(all_detections) > 0,
1103
- 'processing_method': 'enhanced_dual_model_with_fight',
1104
- 'detection_breakdown': {
1105
- 'weapons': len([d for d in all_detections if d['type'] == 'weapon']),
1106
- 'fights': len([d for d in all_detections if d['type'] == 'fight']),
1107
- 'nsfw': len([d for d in all_detections if d['type'] == 'nsfw'])
1108
- }
1109
- }
1110
-
1111
- # Cache result
1112
- self.detection_cache[cache_key] = (result, current_time)
1113
-
1114
- # Clean old cache entries
1115
- self.clean_cache(current_time)
1116
-
1117
- # Save detection history
1118
- self.detection_history.append(result)
1119
-
1120
- # Draw detections
1121
- if self.config['output']['draw_boxes'] and all_detections:
1122
- annotated_image = self.draw_detections(image.copy(), all_detections)
1123
- result['annotated_image'] = annotated_image
1124
-
1125
- return result
1126
-
1127
- except Exception as e:
1128
- print(f"❌ Error processing image: {e}")
1129
- return None
1130
-
1131
- def clean_cache(self, current_time):
1132
- """Clean expired cache entries"""
1133
- try:
1134
- expired_keys = []
1135
- for key, value in self.detection_cache.items():
1136
- # Check tuple structure
1137
- if isinstance(value, tuple) and len(value) == 2:
1138
- _, timestamp = value
1139
- if timestamp is not None and current_time - timestamp > self.cache_ttl:
1140
- expired_keys.append(key)
1141
- else:
1142
- # Invalid cache entry, remove it
1143
- expired_keys.append(key)
1144
-
1145
- for key in expired_keys:
1146
- del self.detection_cache[key]
1147
-
1148
- except Exception as e:
1149
- print(f"⚠️ Cache cleanup error: {e}")
1150
-
1151
- def get_model_status(self):
1152
- """Get status of all models"""
1153
- status = {
1154
- 'custom_weapon_fight_model': self.weapon_model_custom is not None,
1155
- 'general_model': self.weapon_model_general is not None,
1156
- 'nsfw_classifier': self.nsfw_classifier is not None,
1157
- 'pose_detector': self.pose_detector is not None,
1158
- 'device': self.device,
1159
- 'cache_size': len(self.detection_cache),
1160
- 'knife_enhancement': self.config['weapon_detection']['use_enhancement'],
1161
- 'knife_boost': self.config['weapon_detection']['boost_knife_detection'],
1162
- 'fight_detection': self.config['weapon_detection']['fight_detection'],
1163
- 'fight_analysis': self.config['weapon_detection']['fight_analysis']
1164
- }
1165
-
1166
- if self.weapon_model_custom and hasattr(self.weapon_model_custom, 'names'):
1167
- status['custom_classes'] = list(self.weapon_model_custom.names.values())
1168
-
1169
- return status
1170
-
1171
- def calculate_risk_level(self, detections):
1172
- """Calculate overall risk level including fights"""
1173
- if not detections:
1174
- return 'safe'
1175
-
1176
- max_confidence = max(det['confidence'] for det in detections)
1177
- threat_types = set(det['type'] for det in detections)
1178
-
1179
- # Check for critical combinations
1180
- has_weapons = 'weapon' in threat_types
1181
- has_fights = 'fight' in threat_types
1182
- has_nsfw = 'nsfw' in threat_types
1183
-
1184
- # Fights + weapons = critical
1185
- if has_weapons and has_fights:
1186
- return 'critical'
1187
-
1188
- # High confidence fights are critical
1189
- fight_detections = [d for d in detections if d['type'] == 'fight']
1190
- if fight_detections:
1191
- max_fight_confidence = max(f['confidence'] for f in fight_detections)
1192
- if max_fight_confidence > 0.8:
1193
- return 'critical'
1194
- elif max_fight_confidence > 0.65:
1195
- return 'high'
1196
-
1197
- # Existing weapon logic
1198
- if has_weapons and max_confidence > 0.8:
1199
- return 'critical'
1200
- elif has_weapons or has_fights or max_confidence > 0.9:
1201
- return 'high'
1202
- elif max_confidence > 0.7:
1203
- return 'medium'
1204
- else:
1205
- return 'low'
1206
-
1207
- def draw_detections(self, image, detections):
1208
- """Draw detection boxes and labels with enhanced visualization for fights"""
1209
- try:
1210
- colors = {
1211
- 'weapon': (0, 0, 255), # Red
1212
- 'fight': (0, 165, 255), # Orange for fights
1213
- 'nsfw': (255, 0, 255), # Magenta
1214
- }
1215
-
1216
- # Special colors for weapon types
1217
- weapon_colors = {
1218
- 'blade': (0, 100, 255), # Orange-red for knives
1219
- 'firearm': (0, 0, 255), # Red for guns
1220
- 'blunt_weapon': (100, 0, 255) # Purple for blunt weapons
1221
- }
1222
-
1223
- # Special colors for fight types
1224
- fight_colors = {
1225
- 'physical_combat': (0, 140, 255), # Orange
1226
- 'martial_arts': (0, 200, 255), # Light orange
1227
- 'wrestling': (0, 165, 255), # Medium orange
1228
- 'group_violence': (0, 69, 255), # Dark orange
1229
- 'general_fight': (0, 165, 255) # Default orange
1230
- }
1231
-
1232
- for det in detections:
1233
- x1, y1, x2, y2 = det['bbox']
1234
-
1235
- # Choose color based on type
1236
- if det['type'] == 'weapon' and 'weapon_type' in det:
1237
- color = weapon_colors.get(det['weapon_type'], colors['weapon'])
1238
- elif det['type'] == 'fight' and 'fight_type' in det:
1239
- color = fight_colors.get(det['fight_type'], colors['fight'])
1240
- else:
1241
- color = colors.get(det['type'], (0, 255, 0))
1242
-
1243
- # Draw rectangle with thicker line for high-threat detections
1244
- thickness = 4 if det.get('threat_level') == 'critical' else 3 if det['type'] in ['weapon',
1245
- 'fight'] else 2
1246
- cv2.rectangle(image, (x1, y1), (x2, y2), color, thickness)
1247
-
1248
- # Create detailed label
1249
- if det['type'] == 'weapon':
1250
- label = f"{det['class']} ({det['confidence']:.2f})"
1251
- if 'threat_level' in det:
1252
- label += f" [{det['threat_level']}]"
1253
- elif det['type'] == 'fight':
1254
- label = f"FIGHT: {det['class']} ({det['confidence']:.2f})"
1255
- if 'threat_level' in det:
1256
- label += f" [{det['threat_level']}]"
1257
- if 'aggression_level' in det:
1258
- label += f" {det['aggression_level']}"
1259
- else:
1260
- label = f"{det['type']}: {det['class']} ({det['confidence']:.2f})"
1261
-
1262
- # Draw label background
1263
- label_size = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.5, 2)[0]
1264
- cv2.rectangle(image, (x1, y1 - 25), (x1 + label_size[0] + 5, y1), color, -1)
1265
-
1266
- # Draw label text
1267
- cv2.putText(image, label, (x1 + 2, y1 - 7),
1268
- cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2)
1269
-
1270
- # Add additional context for fights
1271
- if det['type'] == 'fight':
1272
- context_text = []
1273
- if 'people_involved' in det and det['people_involved'] > 0:
1274
- context_text.append(f"People: {det['people_involved']}")
1275
- if 'context_flags' in det and det['context_flags']:
1276
- context_text.append(f"Flags: {', '.join(det['context_flags'])}")
1277
-
1278
- if context_text:
1279
- context_label = " | ".join(context_text)
1280
- cv2.putText(image, context_label, (x1, y2 + 15),
1281
- cv2.FONT_HERSHEY_SIMPLEX, 0.3, color, 1)
1282
-
1283
- # Add detection method indicator (small text)
1284
- if 'detection_method' in det:
1285
- method = det['detection_method'].split('_')[-1]
1286
- cv2.putText(image, method, (x1, y2 + 30),
1287
- cv2.FONT_HERSHEY_SIMPLEX, 0.3, color, 1)
1288
-
1289
- return image
1290
-
1291
- except Exception as e:
1292
- print(f"❌ Error drawing detections: {e}")
1293
- return image
1294
-
1295
- def process_video(self, video_path, output_path=None):
1296
- """Process video file with enhanced detection including fights - processes every frame"""
1297
- try:
1298
- cap = cv2.VideoCapture(video_path)
1299
- frame_count = 0
1300
- total_detections = []
1301
- fight_timeline = [] # Track fights over time
1302
-
1303
- if output_path:
1304
- fourcc = cv2.VideoWriter_fourcc(*'mp4v')
1305
- fps = cap.get(cv2.CAP_PROP_FPS)
1306
- width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
1307
- height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
1308
- out = cv2.VideoWriter(output_path, fourcc, fps, (width, height))
1309
-
1310
- while True:
1311
- ret, frame = cap.read()
1312
- if not ret:
1313
- break
1314
-
1315
- frame_count += 1
1316
-
1317
- # Process every frame
1318
- result = self.process_image(frame)
1319
- if result and result['detections']:
1320
- # Add frame number to each detection for tracking
1321
- for detection in result['detections']:
1322
- detection['frame'] = frame_count
1323
-
1324
- total_detections.extend(result['detections'])
1325
-
1326
- # Track fight timeline
1327
- fight_detections = [d for d in result['detections'] if d['type'] == 'fight']
1328
- if fight_detections:
1329
- timestamp = frame_count / cap.get(cv2.CAP_PROP_FPS)
1330
- fight_timeline.append({
1331
- 'timestamp': timestamp,
1332
- 'frame': frame_count,
1333
- 'fights': len(fight_detections),
1334
- 'max_aggression': max(f.get('aggression_level', 'low') for f in fight_detections)
1335
- })
1336
-
1337
- print(f"⚠️ Frame {frame_count}: {len(result['detections'])} threats detected")
1338
-
1339
- breakdown = result.get('detection_breakdown', {})
1340
- if breakdown.get('fights', 0) > 0:
1341
- print(f" 👊 Fights: {breakdown['fights']}")
1342
-
1343
- if output_path and 'annotated_image' in result:
1344
- out.write(result['annotated_image'])
1345
- elif output_path:
1346
- out.write(frame)
1347
- else:
1348
- if output_path:
1349
- out.write(frame)
1350
-
1351
- cap.release()
1352
- if output_path:
1353
- out.release()
1354
-
1355
- # Analysis of fight patterns
1356
- fight_analysis = {}
1357
- if fight_timeline:
1358
- fight_analysis = {
1359
- 'total_fight_incidents': len(fight_timeline),
1360
- 'first_fight_time': fight_timeline[0]['timestamp'],
1361
- 'last_fight_time': fight_timeline[-1]['timestamp'],
1362
- 'peak_aggression_time': max(fight_timeline, key=lambda x: x['max_aggression'])['timestamp'],
1363
- 'fight_duration_coverage': fight_timeline[-1]['timestamp'] - fight_timeline[0]['timestamp'] if len(
1364
- fight_timeline) > 1 else 0
1365
- }
1366
-
1367
- return {
1368
- 'total_frames_processed': frame_count,
1369
- 'total_detections': len(total_detections),
1370
- 'detections': total_detections,
1371
- 'fight_timeline': fight_timeline,
1372
- 'fight_analysis': fight_analysis,
1373
- 'detection_breakdown': {
1374
- 'weapons': len([d for d in total_detections if d['type'] == 'weapon']),
1375
- 'fights': len([d for d in total_detections if d['type'] == 'fight']),
1376
- 'nsfw': len([d for d in total_detections if d['type'] == 'nsfw'])
1377
- }
1378
- }
1379
-
1380
- except Exception as e:
1381
- print(f"❌ Error processing video: {e}")
1382
- return None
1383
-
1384
- def save_report(self, filename="detection_report.json"):
1385
- """Save detection history to file"""
1386
- try:
1387
- with open(filename, 'w') as f:
1388
- json.dump(self.detection_history, f, indent=2, default=str)
1389
- print(f"📊 Report saved to {filename}")
1390
- except Exception as e:
1391
- print(f"❌ Error saving report: {e}")
1392
-
1393
- def get_memory_usage(self):
1394
- """Get current GPU memory usage"""
1395
- if torch.cuda.is_available():
1396
- allocated = torch.cuda.memory_allocated() / 1024 ** 3
1397
- cached = torch.cuda.memory_reserved() / 1024 ** 3
1398
- return f"GPU Memory: {allocated:.2f}GB allocated, {cached:.2f}GB cached"
1399
- return "CPU mode"
1400
-
1401
-
1402
- def main():
1403
- """Enhanced example usage with knife and fight detection improvements - processes every frame"""
1404
-
1405
- # Initialize the system
1406
- moderator = ContentModerator()
1407
-
1408
- # Show enhanced system information
1409
- print("\n" + "=" * 60)
1410
- print("🎯 ENHANCED DUAL MODEL SYSTEM WITH FIGHT DETECTION")
1411
- print("=" * 60)
1412
-
1413
- status = moderator.get_model_status()
1414
-
1415
- if status['custom_weapon_fight_model']:
1416
- print("✅ Custom YOLO11 Model (dao + súng + fight): LOADED")
1417
- if 'custom_classes' in status:
1418
- print(f"📊 Custom classes: {status['custom_classes']}")
1419
- else:
1420
- print("❌ Custom weapon+fight model: NOT FOUND")
1421
-
1422
- if status['general_model']:
1423
- print("✅ General YOLO11n Model (person detection): LOADED")
1424
- else:
1425
- print("❌ General model: FAILED")
1426
-
1427
- if status['nsfw_classifier']:
1428
- print("✅ NSFW Classifier: LOADED")
1429
- else:
1430
- print("❌ NSFW Classifier: FAILED")
1431
-
1432
- print(f"🖥️ Device: {status['device']}")
1433
- print(f"🗄️ Cache system: ENABLED")
1434
- print(f"🔪 Knife enhancement: {'ENABLED' if status['knife_enhancement'] else 'DISABLED'}")
1435
- print(f"📈 Knife confidence boost: {'ENABLED' if status['knife_boost'] else 'DISABLED'}")
1436
- print(f"👊 Fight detection: {'ENABLED' if status['fight_detection'] else 'DISABLED'}")
1437
- print(f"🧠 Fight analysis: {'ENABLED' if status['fight_analysis'] else 'DISABLED'}")
1438
-
1439
- # Enhanced features info
1440
- print("\n" + "=" * 60)
1441
- print("✨ ENHANCED DETECTION FEATURES")
1442
- print("=" * 60)
1443
- print("🔧 Image Enhancement:")
1444
- print(" - Contrast & brightness optimization")
1445
- print(" - Edge sharpening for metallic objects")
1446
- print(" - CLAHE for local contrast")
1447
- print("📊 Confidence Boosting:")
1448
- print(" - Geometric analysis (knives)")
1449
- print(" - Motion blur analysis (fights)")
1450
- print(" - Edge strength analysis")
1451
- print("🎯 Multi-pass Detection:")
1452
- print(" - Low threshold pass for knives (0.45)")
1453
- print(" - Normal threshold for guns (0.45)")
1454
- print(" - Low threshold for fights (0.40)")
1455
- print("👊 Fight Analysis:")
1456
- print(" - Multi-person fight detection")
1457
- print(" - Aggression level assessment")
1458
- print(" - Context-aware threat escalation")
1459
-
1460
- # Example 1: Process single image
1461
- print("\n" + "=" * 50)
1462
- print("🖼️ SINGLE IMAGE PROCESSING")
1463
- print("=" * 50)
1464
-
1465
- test_image = "test_image.jpg"
1466
-
1467
- if os.path.exists(test_image):
1468
- result = moderator.process_image(test_image)
1469
- if result:
1470
- print(f"\n📊 DETECTION RESULTS:")
1471
- print(f"Risk Level: {result['risk_level']}")
1472
- print(f"Total Threats: {result['total_threats']}")
1473
- print(f"Processing Method: {result.get('processing_method', 'standard')}")
1474
-
1475
- breakdown = result.get('detection_breakdown', {})
1476
- if breakdown:
1477
- print(f"\n📈 BREAKDOWN:")
1478
- print(f" Weapons: {breakdown.get('weapons', 0)}")
1479
- print(f" Fights: {breakdown.get('fights', 0)}")
1480
- print(f" NSFW: {breakdown.get('nsfw', 0)}")
1481
-
1482
- # Show weapon-specific results
1483
- weapon_detections = [d for d in result['detections'] if d['type'] == 'weapon']
1484
- if weapon_detections:
1485
- print(f"\n🔫 WEAPON DETECTIONS: {len(weapon_detections)}")
1486
- for i, detection in enumerate(weapon_detections):
1487
- method = detection.get('detection_method', 'unknown')
1488
- print(f" Weapon {i + 1} ({method}):")
1489
- print(f" Class: {detection['class']}")
1490
- print(f" Type: {detection['weapon_type']}")
1491
- print(f" Confidence: {detection['confidence']:.3f}")
1492
- print(f" Threat Level: {detection['threat_level']}")
1493
-
1494
- # Show fight-specific results
1495
- fight_detections = [d for d in result['detections'] if d['type'] == 'fight']
1496
- if fight_detections:
1497
- print(f"\n👊 FIGHT DETECTIONS: {len(fight_detections)}")
1498
- for i, detection in enumerate(fight_detections):
1499
- method = detection.get('detection_method', 'unknown')
1500
- print(f" Fight {i + 1} ({method}):")
1501
- print(f" Class: {detection['class']}")
1502
- print(f" Type: {detection.get('fight_type', 'unknown')}")
1503
- print(f" Confidence: {detection['confidence']:.3f}")
1504
- print(f" Threat Level: {detection['threat_level']}")
1505
- print(f" Aggression: {detection.get('aggression_level', 'unknown')}")
1506
- if 'people_involved' in detection:
1507
- print(f" People Involved: {detection['people_involved']}")
1508
- if 'context_flags' in detection and detection['context_flags']:
1509
- print(f" Context: {', '.join(detection['context_flags'])}")
1510
-
1511
- # Show NSFW results
1512
- nsfw_detections = [d for d in result['detections'] if d['type'] == 'nsfw']
1513
- if nsfw_detections:
1514
- print(f"\n🔞 NSFW DETECTIONS: {len(nsfw_detections)}")
1515
- for i, detection in enumerate(nsfw_detections):
1516
- method = detection.get('method', 'unknown')
1517
- print(f" NSFW {i + 1} ({method}):")
1518
- print(f" Class: {detection['class']}")
1519
- print(f" Confidence: {detection['confidence']:.3f}")
1520
- if 'skin_ratio' in detection:
1521
- print(f" Skin Ratio: {detection['skin_ratio']:.2f}")
1522
- else:
1523
- print(f"⚠️ Test image not found: {test_image}")
1524
- print("Creating a test pattern to demonstrate detection...")
1525
-
1526
- # Create a synthetic test image
1527
- test_img = np.ones((640, 640, 3), dtype=np.uint8) * 128
1528
- cv2.putText(test_img, "Test Pattern", (200, 320),
1529
- cv2.FONT_HERSHEY_SIMPLEX, 2, (255, 255, 255), 3)
1530
-
1531
- result = moderator.process_image(test_img)
1532
- print("✅ Test pattern processed successfully")
1533
-
1534
- # Example 2: Enhanced webcam processing with fight detection - processes every frame
1535
- print("\n" + "=" * 60)
1536
- print("📹 ENHANCED WEBCAM PROCESSING WITH FIGHT DETECTION")
1537
- print("=" * 60)
1538
- print("Starting enhanced detection on webcam...")
1539
- print("🎮 Controls:")
1540
- print(" - Press 'q' to quit")
1541
- print(" - Press 's' to save frame")
1542
- print(" - Press 'i' to show model info")
1543
- print(" - Press 'e' to toggle enhancement")
1544
- print(" - Press 'b' to toggle knife confidence boost")
1545
- print(" - Press 'f' to toggle fight analysis")
1546
- print(" - Press 'h' for help")
1547
-
1548
- try:
1549
- cap = cv2.VideoCapture(0)
1550
-
1551
- if not cap.isOpened():
1552
- print("❌ Cannot open webcam. Check if camera is connected.")
1553
- else:
1554
- print("✅ Enhanced webcam processing started")
1555
-
1556
- frame_count = 0
1557
- detection_stats = {
1558
- 'weapons': 0,
1559
- 'knives': 0,
1560
- 'guns': 0,
1561
- 'fights': 0,
1562
- 'nsfw': 0,
1563
- 'total_frames': 0,
1564
- 'fight_incidents': 0
1565
- }
1566
-
1567
- while True:
1568
- ret, frame = cap.read()
1569
- if not ret:
1570
- print("❌ Cannot read from webcam")
1571
- break
1572
-
1573
- frame_count += 1
1574
- detection_stats['total_frames'] = frame_count
1575
- frame = cv2.flip(frame, 1)
1576
-
1577
- # Add status overlay
1578
- y_offset = frame.shape[0] - 120
1579
- cv2.putText(frame,
1580
- f"Enhancement: {'ON' if moderator.config['weapon_detection']['use_enhancement'] else 'OFF'}",
1581
- (10, y_offset), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1)
1582
-
1583
- cv2.putText(frame,
1584
- f"Knife Boost: {'ON' if moderator.config['weapon_detection']['boost_knife_detection'] else 'OFF'}",
1585
- (10, y_offset + 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1)
1586
-
1587
- cv2.putText(frame,
1588
- f"Fight Analysis: {'ON' if moderator.config['weapon_detection']['fight_analysis'] else 'OFF'}",
1589
- (10, y_offset + 40), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1)
1590
-
1591
- model_info = "Models: Custom+General" if moderator.weapon_model_custom else "General Only"
1592
- cv2.putText(frame, model_info, (10, y_offset + 60),
1593
- cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1)
1594
-
1595
- # Process every frame
1596
- result = moderator.process_image(frame)
1597
-
1598
- if result and result['action_required']:
1599
- # Count detections by type
1600
- for detection in result['detections']:
1601
- if detection['type'] == 'weapon':
1602
- detection_stats['weapons'] += 1
1603
- if detection['weapon_type'] == 'blade':
1604
- detection_stats['knives'] += 1
1605
- elif detection['weapon_type'] == 'firearm':
1606
- detection_stats['guns'] += 1
1607
- elif detection['type'] == 'fight':
1608
- detection_stats['fights'] += 1
1609
- if detection.get('aggression_level') in ['high', 'extreme']:
1610
- detection_stats['fight_incidents'] += 1
1611
- elif detection['type'] == 'nsfw':
1612
- detection_stats['nsfw'] += 1
1613
-
1614
- print(f"⚠️ Frame {frame_count}: {result['risk_level']} risk - {result['total_threats']} threats!")
1615
-
1616
- # Show specific detections with fight info
1617
- for detection in result['detections']:
1618
- if detection['type'] == 'weapon':
1619
- icon = "🔪" if detection['weapon_type'] == 'blade' else "🔫"
1620
- method = detection.get('detection_method', 'unknown').split('_')[-1]
1621
- print(f" {icon} {detection['class']} ({detection['confidence']:.3f}) [{method}]")
1622
- elif detection['type'] == 'fight':
1623
- fight_type = detection.get('fight_type', 'general')
1624
- aggression = detection.get('aggression_level', 'unknown')
1625
- people = detection.get('people_involved', 0)
1626
- method = detection.get('detection_method', 'unknown').split('_')[-1]
1627
- print(f" 👊 FIGHT: {fight_type} ({detection['confidence']:.3f}) [{method}]")
1628
- print(f" Aggression: {aggression}, People: {people}")
1629
-
1630
- # Use annotated frame
1631
- if 'annotated_image' in result:
1632
- cv2.imshow('Enhanced Detection System (Weapons + Fights)', result['annotated_image'])
1633
- else:
1634
- # Add threat counter
1635
- breakdown = result.get('detection_breakdown', {})
1636
- threat_text = f"THREATS: W:{breakdown.get('weapons', 0)} F:{breakdown.get('fights', 0)} N:{breakdown.get('nsfw', 0)}"
1637
- cv2.putText(frame, threat_text, (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
1638
- cv2.imshow('Enhanced Detection System (Weapons + Fights)', frame)
1639
- else:
1640
- cv2.putText(frame, "SAFE", (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
1641
- cv2.imshow('Enhanced Detection System (Weapons + Fights)', frame)
1642
-
1643
- # Handle key presses
1644
- key = cv2.waitKey(1) & 0xFF
1645
- if key == ord('q'):
1646
- print("🛑 Webcam stopped by user")
1647
- break
1648
- elif key == ord('s'):
1649
- filename = f"enhanced_detection_{frame_count}.jpg"
1650
- cv2.imwrite(filename, frame)
1651
- print(f"💾 Frame saved as {filename}")
1652
- elif key == ord('i'):
1653
- print(f"\n📊 Model Status:")
1654
- current_status = moderator.get_model_status()
1655
- for k, v in current_status.items():
1656
- print(f" {k}: {v}")
1657
- elif key == ord('e'):
1658
- # Toggle enhancement
1659
- moderator.config['weapon_detection']['use_enhancement'] = \
1660
- not moderator.config['weapon_detection']['use_enhancement']
1661
- print(
1662
- f"🔧 Enhancement: {'ON' if moderator.config['weapon_detection']['use_enhancement'] else 'OFF'}")
1663
- elif key == ord('b'):
1664
- # Toggle knife boost
1665
- moderator.config['weapon_detection']['boost_knife_detection'] = \
1666
- not moderator.config['weapon_detection']['boost_knife_detection']
1667
- print(
1668
- f"📈 Knife Boost: {'ON' if moderator.config['weapon_detection']['boost_knife_detection'] else 'OFF'}")
1669
- elif key == ord('f'):
1670
- # Toggle fight analysis
1671
- moderator.config['weapon_detection']['fight_analysis'] = \
1672
- not moderator.config['weapon_detection']['fight_analysis']
1673
- print(
1674
- f"👊 Fight Analysis: {'ON' if moderator.config['weapon_detection']['fight_analysis'] else 'OFF'}")
1675
- elif key == ord('h'):
1676
- print("\n🎮 Controls:")
1677
- print(" 'q': quit")
1678
- print(" 's': save frame")
1679
- print(" 'i': model info")
1680
- print(" 'e': toggle enhancement")
1681
- print(" 'b': toggle knife confidence boost")
1682
- print(" 'f': toggle fight analysis")
1683
- print(" 'h': help")
1684
-
1685
- # Show comprehensive session statistics
1686
- print(f"\n📈 Session Statistics:")
1687
- print(f" Total frames: {detection_stats['total_frames']}")
1688
- print(f" Total weapon detections: {detection_stats['weapons']}")
1689
- print(f" - Knives/Dao: {detection_stats['knives']}")
1690
- print(f" - Guns: {detection_stats['guns']}")
1691
- print(f" Total fight detections: {detection_stats['fights']}")
1692
- print(f" - High-aggression incidents: {detection_stats['fight_incidents']}")
1693
- print(f" NSFW detections: {detection_stats['nsfw']}")
1694
-
1695
- if detection_stats['total_frames'] > 0:
1696
- total_detections = detection_stats['weapons'] + detection_stats['fights'] + detection_stats['nsfw']
1697
- detection_rate = (total_detections / detection_stats['total_frames'] * 100)
1698
- print(f" Overall detection rate: {detection_rate:.1f}%")
1699
-
1700
- if detection_stats['weapons'] > 0:
1701
- knife_ratio = detection_stats['knives'] / detection_stats['weapons'] * 100
1702
- print(f" Knife detection ratio: {knife_ratio:.1f}% of weapons")
1703
-
1704
- if detection_stats['fights'] > 0:
1705
- incident_ratio = detection_stats['fight_incidents'] / detection_stats['fights'] * 100
1706
- print(f" High-aggression fight ratio: {incident_ratio:.1f}% of fights")
1707
-
1708
- cap.release()
1709
- cv2.destroyAllWindows()
1710
- print("✅ Enhanced webcam session completed")
1711
-
1712
- except Exception as e:
1713
- print(f"❌ Webcam error: {e}")
1714
-
1715
- # Show final system status
1716
- print(f"\n💾 {moderator.get_memory_usage()}")
1717
- print(f"🗄️ Final cache size: {len(moderator.detection_cache)} entries")
1718
-
1719
- # Save enhanced report
1720
- moderator.save_report("enhanced_detection_with_fights_report.json")
1721
-
1722
- print("\n✅ Enhanced Content Moderation System with Fight Detection completed!")
1723
- print("💡 New fight detection capabilities:")
1724
- print(" - Behavioral fight pattern recognition")
1725
- print(" - Multi-person fight analysis")
1726
- print(" - Aggression level assessment")
1727
- print(" - Context-aware threat escalation")
1728
- print(" - Fight timeline tracking for videos")
1729
- print("💡 Enhanced weapon detection:")
1730
- print(" - Image enhancement preprocessing")
1731
- print(" - Dynamic confidence thresholds")
1732
- print(" - Geometric feature analysis")
1733
- print(" - Multi-pass detection strategy")
1734
- print("💡 Processing mode: EVERY FRAME (no skipping)")