astrosbd commited on
Commit
827d2e8
Β·
verified Β·
1 Parent(s): 0826094

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +770 -573
app.py CHANGED
@@ -1,685 +1,882 @@
1
  #!/usr/bin/env python3
 
2
  import os
3
  import sys
4
- import traceback
5
- from typing import Optional, Tuple, Dict, Any, List
6
- import warnings
7
-
8
- import importlib.util
9
  import time
10
  import cv2
11
  import torch
12
  import numpy as np
13
  import gradio as gr
14
- from PIL import Image, ImageOps
 
15
  import torch.nn as nn
16
  import torch.nn.functional as F
 
 
17
  from transformers import AutoModel, CLIPImageProcessor
18
  import joblib
 
19
  import json
 
 
 
20
 
21
- # Suppress warnings
22
- warnings.filterwarnings("ignore", message="Couldn't find the key")
23
- warnings.filterwarnings("ignore", category=UserWarning)
24
 
25
- # --------------------------------------------------------------------------------------
26
- # Environment Setup
27
- # --------------------------------------------------------------------------------------
 
 
 
 
28
 
29
- # Check for Detectron2
30
  DETECTRON2_AVAILABLE = False
31
  try:
 
32
  from detectron2.engine import DefaultPredictor
33
  from detectron2.config import get_cfg
 
34
  from detectron2 import model_zoo
 
35
  DETECTRON2_AVAILABLE = True
36
- print("βœ… Detectron2 available")
37
- except ImportError:
38
- print("⚠️ Detectron2 not available - Stage 1 will use simulator")
 
39
 
40
- # Download classifier from HF if available
41
  huggingface_model_path = None
42
  try:
43
  from huggingface_hub import hf_hub_download
44
-
45
- repo = os.getenv('PRIVATE_REPO', 'fallback')
46
- token = os.getenv('key')
47
-
48
- if repo != 'fallback' and token:
49
- huggingface_model_path = hf_hub_download(
50
- repo_id=repo,
51
- filename="V1.pkl",
52
- token=token
53
- )
54
- print(f"βœ… Classifier downloaded: {huggingface_model_path}")
55
  except Exception as e:
56
- print(f"⚠️ Could not download classifier: {e}")
 
 
 
 
 
 
57
 
58
- # Device setup
59
  if torch.backends.mps.is_available():
60
- DEVICE = torch.device("mps")
61
- print("πŸ–₯️ Using MPS (Metal Performance Shaders)")
62
  elif torch.cuda.is_available():
63
- DEVICE = torch.device("cuda")
64
- print("πŸ–₯️ Using CUDA")
65
  else:
66
- DEVICE = torch.device("cpu")
67
- print("πŸ–₯️ Using CPU")
68
 
69
- # --------------------------------------------------------------------------------------
70
- # Global Models
71
- # --------------------------------------------------------------------------------------
72
-
73
- image_processor = None
74
- radio_model = None
75
  ai_detection_classifier = None
76
- _preloaded = False
77
 
78
- # Default paths
79
- DEFAULT_AI_DETECTION_MODEL_PATH = "./output/V1.pkl"
80
- DEFAULT_DAMAGE_MODEL_PATH = "./output/model_final.pth"
81
 
82
- # --------------------------------------------------------------------------------------
83
- # C-RADIOv3-g Model Loading (matching your working implementation)
84
- # --------------------------------------------------------------------------------------
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
85
 
86
- def preload_radio_model():
87
- """Load C-RADIOv3-g model for feature extraction"""
88
- global image_processor, radio_model, _preloaded
89
-
90
- if _preloaded:
91
- print("βœ… Models already loaded")
92
- return True
93
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
94
  try:
95
- # Use C-RADIOv3-g (not v3-B!)
96
- hf_repo = os.getenv('MODEL_REPO', 'nvidia/C-RADIOv3-g')
97
- if hf_repo == 'fallback':
98
- hf_repo = 'nvidia/C-RADIOv3-g'
99
-
100
- print(f"οΏ½οΏ½οΏ½οΏ½ Loading {hf_repo}...")
101
-
102
- # Load image processor
103
- image_processor = CLIPImageProcessor.from_pretrained(hf_repo)
104
-
105
- # Load model with trust_remote_code
106
- radio_model = AutoModel.from_pretrained(hf_repo, trust_remote_code=True)
107
- radio_model = radio_model.to(DEVICE)
108
- radio_model.eval()
109
-
110
- print(f"βœ… {hf_repo} loaded successfully")
111
- _preloaded = True
112
- return True
113
-
114
- except KeyError as ke:
115
- if "ls1.gamma" in str(ke) or "ls1.grandma" in str(ke):
116
- print(f"⚠️ Known layer scaling issue, trying workaround...")
117
-
118
- # Try with low_cpu_mem_usage=False
119
- try:
120
- radio_model = AutoModel.from_pretrained(
121
- hf_repo,
122
- trust_remote_code=True,
123
- low_cpu_mem_usage=False
124
- )
125
- radio_model = radio_model.to(DEVICE)
126
- radio_model.eval()
127
- _preloaded = True
128
- print("βœ… Loaded with workaround")
129
- return True
130
- except:
131
- pass
132
-
133
  except Exception as e:
134
- print(f"❌ Failed to load RADIO model: {e}")
135
-
136
- # Fallback to CLIP if needed
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
137
  try:
138
- print("πŸ“¦ Trying fallback CLIP model...")
139
- from transformers import CLIPModel, CLIPProcessor
140
-
141
- clip_model = "openai/clip-vit-base-patch32"
142
- image_processor = CLIPProcessor.from_pretrained(clip_model)
143
- radio_model = CLIPModel.from_pretrained(clip_model)
144
- radio_model = radio_model.to(DEVICE)
145
- radio_model.eval()
146
-
147
- print("βœ… Using CLIP fallback (Note: results may differ)")
148
- _preloaded = True
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
149
  return True
150
-
151
  except Exception as e:
152
- print(f"❌ All model loading attempts failed: {e}")
153
  return False
154
 
155
- # --------------------------------------------------------------------------------------
156
- # Feature Extraction (matching your working implementation)
157
- # --------------------------------------------------------------------------------------
158
 
159
- def extract_radio_features(image):
160
- """
161
- Extract RADIO features from image - matching your working implementation
162
- CRITICAL: This must output 1,119,744 features to match the classifier
163
- """
164
- global image_processor, radio_model
165
-
166
- if image_processor is None or radio_model is None:
167
- raise Exception("RADIO model not initialized")
168
-
169
- # Convert to PIL if needed
170
- if not isinstance(image, Image.Image):
171
- if isinstance(image, np.ndarray):
172
- image = Image.fromarray(image.astype('uint8'))
173
- else:
174
- raise ValueError(f"Unsupported image type: {type(image)}")
175
-
176
- # CRITICAL: Resize to 512x512 (not 224x224!)
177
- # This matches your working implementation
178
- image = image.convert("RGB").resize((512, 512))
179
-
180
- # Process image with CLIP processor
181
- pixel_values = image_processor(
182
- images=image,
183
- return_tensors='pt',
184
- do_resize=True
185
- ).pixel_values
186
- pixel_values = pixel_values.to(DEVICE)
187
-
188
- # Extract features
189
  with torch.no_grad():
190
- # Check if using CLIP fallback
191
- if hasattr(radio_model, 'get_image_features'):
192
- # CLIP model - different output format
193
- features = radio_model.get_image_features(pixel_values)
194
- # CLIP features are much smaller, need to handle this
195
- features = features.detach().flatten()
196
- else:
197
- # C-RADIOv3 returns tuple: (summary, features)
198
- outputs = radio_model(pixel_values)
199
-
200
- if isinstance(outputs, tuple):
201
- summary, features = outputs
202
- else:
203
- # Handle other possible formats
204
- features = outputs
205
-
206
- # Flatten ALL dimensions - no pooling or reduction!
207
- features = features.detach().flatten()
208
-
209
- # L2 normalize (matching your working code)
210
  features = F.normalize(features, p=2, dim=-1).cpu().flatten()
211
-
212
- features_np = features.numpy()
213
-
214
- print(f" βœ“ Extracted features shape: {features_np.shape}")
215
-
216
- # Check if dimensions match expected
217
- if features_np.shape[0] != 1119744 and features_np.shape[0] > 1000:
218
- print(f" ⚠️ Warning: Expected 1,119,744 features but got {features_np.shape[0]}")
219
-
220
- return features_np
221
 
222
- # --------------------------------------------------------------------------------------
223
- # Classifier Loading
224
- # --------------------------------------------------------------------------------------
225
 
226
  def load_ai_detection_classifier(model_path):
227
- """Load the PassiveAggressiveClassifier"""
228
  global ai_detection_classifier
229
-
230
- if ai_detection_classifier is not None:
231
- print("βœ… Classifier already loaded")
232
- return ai_detection_classifier
233
-
234
- if not os.path.exists(model_path):
235
- print(f"❌ Classifier not found at: {model_path}")
236
  return None
237
-
238
  try:
239
  ai_detection_classifier = joblib.load(model_path)
240
-
241
- # Check expected features
242
- if hasattr(ai_detection_classifier, 'n_features_in_'):
243
- expected = ai_detection_classifier.n_features_in_
244
- print(f"βœ… Classifier loaded - expects {expected:,} features")
245
- else:
246
- print(f"βœ… Classifier loaded: {type(ai_detection_classifier).__name__}")
247
-
248
  return ai_detection_classifier
249
-
250
  except Exception as e:
251
- print(f"❌ Error loading classifier: {e}")
252
  return None
253
 
254
- # --------------------------------------------------------------------------------------
255
- # Prediction
256
- # --------------------------------------------------------------------------------------
257
 
258
- def predict_with_classifier(features):
259
- """Make prediction with loaded classifier"""
260
- global ai_detection_classifier
261
-
262
- if ai_detection_classifier is None:
263
- return None
264
-
265
- # Reshape for sklearn
266
- features = features.reshape(1, -1)
267
-
268
- # Check dimensions
269
- if hasattr(ai_detection_classifier, 'n_features_in_'):
270
- expected = ai_detection_classifier.n_features_in_
271
- actual = features.shape[1]
272
-
273
- if expected != actual:
274
- print(f"❌ Dimension mismatch: classifier expects {expected:,} but got {actual:,} features")
275
-
276
- # If using CLIP fallback with wrong dimensions
277
- if actual < 10000: # CLIP features are much smaller
278
- print(" Note: Using CLIP fallback - results will be unreliable")
279
- # Pad with zeros to match expected size (not ideal but allows demo)
280
- features = np.pad(features, ((0, 0), (0, expected - actual)), 'constant')
281
- print(f" Padded features to {features.shape[1]:,} dimensions")
282
-
283
- # Make prediction
284
- try:
285
- pred = ai_detection_classifier.predict(features)[0]
286
-
287
- # Get confidence
288
- confidence = None
289
- if hasattr(ai_detection_classifier, 'decision_function'):
290
- confidence = float(ai_detection_classifier.decision_function(features)[0])
291
- elif hasattr(ai_detection_classifier, 'predict_proba'):
292
- proba = ai_detection_classifier.predict_proba(features)[0]
293
- confidence = float(max(proba))
294
-
295
- # Convert decision function to probability using sigmoid
296
- if confidence is not None:
297
- probability = 1 / (1 + np.exp(-confidence))
298
- else:
299
- probability = float(pred)
300
-
301
  return {
302
- "prediction": int(pred),
303
- "label": "AI-Generated" if pred == 1 else "Real",
304
- "probability": probability,
305
- "confidence": confidence,
306
- "confidence_score": abs(confidence) if confidence else 0
 
 
 
 
 
307
  }
308
-
309
- except Exception as e:
310
- print(f"❌ Prediction error: {e}")
311
- traceback.print_exc()
312
- return None
313
 
314
- # --------------------------------------------------------------------------------------
315
- # Damage Detection (Stage 1) - Optional
316
- # --------------------------------------------------------------------------------------
317
 
318
- _damage_predictor = None
 
 
 
319
 
320
- def load_damage_model(model_path):
321
- """Load Detectron2 damage detection model"""
322
- global _damage_predictor
323
-
324
- if _damage_predictor is not None:
325
- return _damage_predictor
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
326
 
327
- if not DETECTRON2_AVAILABLE or not os.path.exists(model_path):
328
- print("ℹ️ Damage detection not available")
329
- return None
330
 
 
331
  try:
332
- cfg = get_cfg()
333
- cfg.merge_from_file(model_zoo.get_config_file("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml"))
334
- cfg.MODEL.WEIGHTS = model_path
335
- cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5
336
- cfg.MODEL.ROI_HEADS.NUM_CLASSES = 1
337
- cfg.MODEL.DEVICE = str(DEVICE)
338
-
339
- _damage_predictor = DefaultPredictor(cfg)
340
- print("βœ… Damage detection model loaded")
341
- return _damage_predictor
342
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
343
  except Exception as e:
344
- print(f"❌ Could not load damage model: {e}")
345
- return None
 
 
 
 
346
 
347
- def simulate_damage_detection(image):
348
- """Simulate damage detection for demo"""
349
- import hashlib, random
350
-
351
- if isinstance(image, Image.Image):
352
- arr = np.array(image)
353
  else:
354
- arr = image
355
-
356
- h, w = arr.shape[:2] if arr.ndim >= 2 else (100, 100)
357
-
358
- # Deterministic random based on image
359
- seed = int(hashlib.md5(arr.tobytes()).hexdigest()[:8], 16) % 10000
360
- random.seed(seed)
361
-
362
- n_damages = random.randint(0, 3)
363
- damages = []
364
-
365
- for i in range(n_damages):
366
- x1 = random.randint(0, max(1, w - 100))
367
- y1 = random.randint(0, max(1, h - 100))
368
- x2 = min(w, x1 + random.randint(50, 150))
369
- y2 = min(h, y1 + random.randint(50, 150))
370
-
371
- damages.append({
372
- "bbox": [x1, y1, x2, y2],
373
- "score": random.uniform(0.6, 0.95),
374
- "label": f"damage_{i+1}"
375
- })
376
-
377
- return damages
378
 
379
- # --------------------------------------------------------------------------------------
380
- # Main Analysis Function
381
- # --------------------------------------------------------------------------------------
382
 
383
- def analyze_image(image, enable_damage=False, damage_threshold=0.5):
384
- """Main analysis pipeline"""
385
-
386
- if image is None:
387
- return "No image provided", {}, None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
388
 
 
389
  try:
390
- # Convert numpy to PIL if needed
391
- if isinstance(image, np.ndarray):
392
- image = Image.fromarray(image.astype('uint8'))
393
-
394
- # Stage 2: AI Detection (Main functionality)
395
- ai_result = None
396
-
397
- # Ensure models are loaded
398
- if not _preloaded:
399
- if not preload_radio_model():
400
- return "Failed to load feature extractor", {"error": "Model loading failed"}, None
401
-
402
- # Load classifier
403
- classifier_path = huggingface_model_path or DEFAULT_AI_DETECTION_MODEL_PATH
404
- if ai_detection_classifier is None:
405
- load_ai_detection_classifier(classifier_path)
406
-
407
- if ai_detection_classifier is None:
408
- return "Classifier not available", {"error": "Classifier not loaded"}, None
409
-
410
- # Extract features (512x512, full dimensions)
411
- try:
412
- features = extract_radio_features(image)
413
-
414
- # Make prediction
415
- ai_result = predict_with_classifier(features)
416
-
417
- if ai_result:
418
- result_text = f"{ai_result['label']} (Confidence: {ai_result['probability']:.2%})"
419
- else:
420
- result_text = "Prediction failed"
421
-
422
- except Exception as e:
423
- print(f"❌ AI detection error: {e}")
424
- traceback.print_exc()
425
- result_text = f"Error: {str(e)}"
426
- ai_result = {"error": str(e)}
427
-
428
- # Stage 1: Damage Detection (Optional)
429
- damages = []
430
- annotated = None
431
-
432
- if enable_damage:
 
 
 
 
 
433
  try:
434
- predictor = load_damage_model(DEFAULT_DAMAGE_MODEL_PATH)
435
-
436
- if predictor:
437
- # Real detection
438
- outputs = predictor(np.array(image))
439
- instances = outputs["instances"].to("cpu")
440
-
441
- if len(instances) > 0:
442
- boxes = instances.pred_boxes.tensor.numpy()
443
- scores = instances.scores.numpy()
444
-
445
- for box, score in zip(boxes, scores):
446
- if score > damage_threshold:
447
- x1, y1, x2, y2 = map(int, box)
448
- damages.append({
449
- "bbox": [x1, y1, x2, y2],
450
- "score": float(score),
451
- "label": "damage"
452
- })
453
  else:
454
- # Use simulator
455
- damages = simulate_damage_detection(image)
456
-
457
- # Create annotated image
458
- annotated = np.array(image.convert("RGB"))
459
- for dmg in damages:
460
- x1, y1, x2, y2 = dmg["bbox"]
461
- cv2.rectangle(annotated, (x1, y1), (x2, y2), (255, 255, 0), 2)
462
- label = f"{dmg['score']:.2%}"
463
- cv2.putText(annotated, label, (x1, y1-5),
464
- cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 0), 2)
465
-
466
- # Add AI verdict overlay
467
- if ai_result and 'label' in ai_result:
468
- color = (0, 255, 0) if ai_result['label'] == "Real" else (0, 0, 255)
469
- cv2.putText(annotated, ai_result['label'], (30, 50),
470
- cv2.FONT_HERSHEY_SIMPLEX, 1.0, color, 3)
471
-
472
- if 'probability' in ai_result:
473
- conf_text = f"Conf: {ai_result['probability']:.1%}"
474
- cv2.putText(annotated, conf_text, (30, 90),
475
- cv2.FONT_HERSHEY_SIMPLEX, 0.8, color, 2)
476
-
477
- except Exception as e:
478
- print(f"⚠️ Damage detection error: {e}")
479
-
480
- # Prepare detailed results
481
- detailed_results = {
482
- "ai_detection": ai_result or {},
483
- "damage_detection": {
484
- "enabled": enable_damage,
485
- "damages_found": len(damages),
486
- "damages": damages
487
- } if enable_damage else None
488
- }
489
-
490
- return result_text, detailed_results, annotated
491
-
492
  except Exception as e:
493
- print(f"❌ Analysis error: {e}")
494
- traceback.print_exc()
495
- return f"Error: {str(e)}", {"error": str(e)}, None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
496
 
497
- # --------------------------------------------------------------------------------------
498
- # Gradio Interface
499
- # --------------------------------------------------------------------------------------
500
 
501
  def create_gradio_interface():
502
- """Create Gradio interface"""
 
 
 
 
 
 
 
 
 
503
 
504
- with gr.Blocks(title="AI Image Detection - C-RADIOv3-g") as app:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
505
  gr.HTML("""
506
- <div style="text-align: center; padding: 20px; background: linear-gradient(90deg, #667eea 0%, #764ba2 100%); color: white; border-radius: 10px;">
507
- <h1>πŸ€– AI Image Detection</h1>
508
- <p>C-RADIOv3-g (512Γ—512) + PassiveAggressiveClassifier</p>
509
- </div>
510
  """)
511
-
512
  with gr.Row():
513
- with gr.Column():
514
- input_image = gr.Image(type="numpy", label="Upload Image", height=400)
 
 
 
515
 
516
  with gr.Row():
517
- analyze_btn = gr.Button("πŸ” Analyze", variant="primary", size="lg")
518
- clear_btn = gr.Button("πŸ—‘οΈ Clear", variant="secondary", size="lg")
 
 
 
 
 
 
519
 
520
- enable_damage = gr.Checkbox(
521
- value=False,
522
- label="Enable Damage Detection (Stage 1)",
523
- info="Optional: Detect physical damage in images"
524
  )
525
- damage_thresh = gr.Slider(
526
- 0.1, 0.95, value=0.5, step=0.05,
527
- label="Damage Score Threshold",
528
- visible=False
 
 
 
 
529
  )
530
 
531
- with gr.Column():
532
- output_text = gr.Textbox(
533
- label="AI Detection Result",
534
- placeholder="Upload an image and click Analyze",
535
- lines=2
536
  )
537
- output_json = gr.JSON(label="Detailed Analysis")
538
- annotated_image = gr.Image(label="Annotated Output", visible=False)
539
-
540
- # Show/hide damage threshold based on checkbox
541
- enable_damage.change(
542
- fn=lambda x: gr.update(visible=x),
543
- inputs=[enable_damage],
544
- outputs=[damage_thresh]
545
- )
546
 
547
- enable_damage.change(
548
- fn=lambda x: gr.update(visible=x),
549
- inputs=[enable_damage],
550
- outputs=[annotated_image]
551
- )
552
-
553
- # Analysis function
554
- def run_analysis(img, dmg_enabled, dmg_thresh):
555
- if img is None:
556
- return "No image provided", {}, None
 
 
 
 
 
 
 
557
 
558
- text, details, annotated = analyze_image(img, dmg_enabled, dmg_thresh)
 
559
 
560
- # Only show annotated if damage detection is enabled
561
- if dmg_enabled and annotated is not None:
562
- return text, details, gr.update(value=annotated, visible=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
563
  else:
564
- return text, details, gr.update(visible=False)
 
 
 
 
 
565
 
566
- # Wire up buttons
567
  analyze_btn.click(
568
- fn=run_analysis,
569
- inputs=[input_image, enable_damage, damage_thresh],
570
- outputs=[output_text, output_json, annotated_image]
 
571
  )
572
 
 
 
573
  clear_btn.click(
574
- fn=lambda: (None, "", {}, gr.update(visible=False)),
575
- outputs=[input_image, output_text, output_json, annotated_image]
576
- )
577
-
578
- # Auto-analyze on image upload
579
- input_image.change(
580
- fn=run_analysis,
581
- inputs=[input_image, enable_damage, damage_thresh],
582
- outputs=[output_text, output_json, annotated_image]
583
  )
584
-
585
- # Info section
586
- with gr.Accordion("ℹ️ Model Information", open=False):
587
- gr.Markdown("""
588
- ### Technical Details
589
- - **Feature Extractor**: nvidia/C-RADIOv3-g
590
- - **Input Size**: 512Γ—512 pixels
591
- - **Feature Dimensions**: 1,119,744
592
- - **Classifier**: PassiveAggressiveClassifier (V1.pkl)
593
- - **Device**: Automatically selected (MPS > CUDA > CPU)
594
-
595
- ### How It Works
596
- 1. Image is resized to 512Γ—512
597
- 2. C-RADIOv3-g extracts visual features
598
- 3. Features are L2-normalized
599
- 4. PassiveAggressiveClassifier predicts Real vs AI-Generated
600
-
601
- ### Optional Stage 1
602
- - Damage detection using Detectron2 (if available)
603
- - Falls back to simulation if not available
604
- """)
605
-
606
- return app
607
-
608
- # --------------------------------------------------------------------------------------
609
- # Test Function
610
- # --------------------------------------------------------------------------------------
611
 
612
- def test_system():
613
- """Test the AI detection system"""
614
- print("\n" + "="*60)
615
- print("πŸ§ͺ System Test")
616
- print("="*60)
617
-
618
- test_results = {
619
- "device": str(DEVICE),
620
- "detectron2_available": DETECTRON2_AVAILABLE,
621
- "model_loaded": radio_model is not None,
622
- "processor_loaded": image_processor is not None,
623
- "classifier_loaded": ai_detection_classifier is not None,
624
- "classifier_path": huggingface_model_path or DEFAULT_AI_DETECTION_MODEL_PATH
625
- }
626
-
627
- for key, value in test_results.items():
628
- status = "βœ…" if value not in [None, False, ""] else "❌"
629
- print(f"{status} {key}: {value}")
630
-
631
- # Test feature extraction with dummy image
632
- if radio_model and image_processor:
633
- try:
634
- test_img = Image.new('RGB', (512, 512), color='white')
635
- features = extract_radio_features(test_img)
636
- print(f"βœ… Feature extraction test: {features.shape}")
637
-
638
- if ai_detection_classifier and hasattr(ai_detection_classifier, 'n_features_in_'):
639
- expected = ai_detection_classifier.n_features_in_
640
- actual = features.shape[0]
641
- if expected == actual:
642
- print(f"βœ… Dimension match: {actual:,} features")
643
- else:
644
- print(f"⚠️ Dimension mismatch: expected {expected:,}, got {actual:,}")
645
-
646
- except Exception as e:
647
- print(f"❌ Feature extraction test failed: {e}")
648
-
649
- print("="*60 + "\n")
650
 
651
- # --------------------------------------------------------------------------------------
652
- # Main
653
- # --------------------------------------------------------------------------------------
654
 
655
  if __name__ == "__main__":
656
- print("="*60)
657
- print("πŸš€ Starting AI Image Detection Service")
658
- print("="*60)
659
- print(f"πŸ“ Device: {DEVICE}")
660
- print(f"πŸ“¦ Classifier: {huggingface_model_path or DEFAULT_AI_DETECTION_MODEL_PATH}")
661
-
662
- # Load models
663
- if preload_radio_model():
664
- print("βœ… Feature extractor loaded")
665
- else:
666
- print("⚠️ Running without feature extractor")
667
 
668
- # Load classifier
669
- classifier_path = huggingface_model_path or DEFAULT_AI_DETECTION_MODEL_PATH
670
- if load_ai_detection_classifier(classifier_path):
671
- print("βœ… Classifier loaded")
672
- else:
673
- print("⚠️ Running without classifier")
674
-
675
- # Run system test
676
- test_system()
677
 
678
- # Launch app
679
  app = create_gradio_interface()
680
  app.launch(
 
681
  server_name="0.0.0.0",
682
  server_port=7860,
683
- share=False,
684
  show_error=True
685
  )
 
1
  #!/usr/bin/env python3
2
+ import importlib.util
3
  import os
4
  import sys
 
 
 
 
 
5
  import time
6
  import cv2
7
  import torch
8
  import numpy as np
9
  import gradio as gr
10
+ from PIL import Image
11
+ from torchvision import transforms
12
  import torch.nn as nn
13
  import torch.nn.functional as F
14
+ import traceback
15
+ from torchvision.models import vit_b_16
16
  from transformers import AutoModel, CLIPImageProcessor
17
  import joblib
18
+ import zipfile
19
  import json
20
+ from datetime import datetime
21
+ import base64
22
+ import io
23
 
24
+ # Add current directory to path
25
+ if not os.getcwd() in sys.path:
26
+ sys.path.append(os.getcwd())
27
 
28
+ # Check if detectron2 is installed and attempt installation if needed
29
+ if importlib.util.find_spec("detectron") is None:
30
+ print("πŸ”„ Detectron2 not found. Attempting installation...")
31
+ print("Installing PyTorch and Detectron2...")
32
+ os.system("pip install torch torchvision --extra-index-url https://download.pytorch.org/whl/cpu")
33
+ os.system("pip install git+https://github.com/facebookresearch/detectron2.git")
34
+ print("Installation complete!")
35
 
36
+ # Optional Detectron2 import
37
  DETECTRON2_AVAILABLE = False
38
  try:
39
+ print("Attempting to import Detectron2...")
40
  from detectron2.engine import DefaultPredictor
41
  from detectron2.config import get_cfg
42
+ from detectron2.utils.visualizer import Visualizer, ColorMode
43
  from detectron2 import model_zoo
44
+
45
  DETECTRON2_AVAILABLE = True
46
+ print("βœ… Detectron2 imported successfully")
47
+ except ImportError as e:
48
+ print(f"⚠️ Detectron2 not available: {e}")
49
+ DETECTRON2_AVAILABLE = False
50
 
51
+ # Try to download model from Hugging Face
52
  huggingface_model_path = None
53
  try:
54
  from huggingface_hub import hf_hub_download
55
+
56
+ # Try to download from your repository
57
+ huggingface_model_path = hf_hub_download(
58
+ repo_id=os.getenv('PRIVATE_REPO', 'fallback'),
59
+ filename="V1.pkl",
60
+ token=os.getenv('key')
61
+ )
62
+ print(f"βœ… Model downloaded from Hugging Face: {huggingface_model_path}")
 
 
 
63
  except Exception as e:
64
+ print(f"⚠️ Could not download model from Hugging Face: {e}")
65
+ print("πŸ”„ Will use demo mode with simulated results")
66
+ huggingface_model_path = None
67
+
68
+ # Define model paths - SEQUENTIAL PIPELINE
69
+ DEFAULT_DAMAGE_MODEL_PATH = "./output/model_final.pth" # zone detection (Stage 1)
70
+ DEFAULT_AI_DETECTION_MODEL_PATH = "./output/V1.pkl" # AI detection (Stage 2)
71
 
72
+ # Initialize device for model
73
  if torch.backends.mps.is_available():
74
+ RADIO_DEVICE = torch.device("mps")
 
75
  elif torch.cuda.is_available():
76
+ RADIO_DEVICE = torch.device("cuda")
 
77
  else:
78
+ RADIO_DEVICE = torch.device("cpu")
 
79
 
80
+ # Global variables for C model
81
+ radio_l_image_processor = None
82
+ radio_l_model = None
 
 
 
83
  ai_detection_classifier = None
 
84
 
85
+ # Maximum number of tries allowed per user per day
86
+ MAX_TRIES = 10
 
87
 
88
+ # JavaScript for cookie management - Version corrigΓ©e
89
+ COOKIE_JAVASCRIPT = """
90
+ <script>
91
+ // Cookie management functions for HEDI
92
+ function setCookie(name, value, days = 1) {
93
+ try {
94
+ const expires = new Date();
95
+ expires.setTime(expires.getTime() + (days * 24 * 60 * 60 * 1000));
96
+ document.cookie = name + '=' + value + ';expires=' + expires.toUTCString() + ';path=/;SameSite=Lax';
97
+ console.log('βœ… Cookie set:', name, '=', value);
98
+ return true;
99
+ } catch (e) {
100
+ console.error('❌ Error setting cookie:', e);
101
+ return false;
102
+ }
103
+ }
104
+ function getCookie(name) {
105
+ try {
106
+ const nameEQ = name + '=';
107
+ const ca = document.cookie.split(';');
108
+ for(let i = 0; i < ca.length; i++) {
109
+ let c = ca[i];
110
+ while (c.charAt(0) == ' ') c = c.substring(1, c.length);
111
+ if (c.indexOf(nameEQ) == 0) {
112
+ const value = c.substring(nameEQ.length, c.length);
113
+ console.log('πŸ“– Cookie read:', name, '=', value);
114
+ return value;
115
+ }
116
+ }
117
+ console.log('πŸ“– Cookie not found:', name);
118
+ return null;
119
+ } catch (e) {
120
+ console.error('❌ Error reading cookie:', e);
121
+ return null;
122
+ }
123
+ }
124
+ function getHediUsage() {
125
+ try {
126
+ console.log('πŸ” Getting HEDI usage...');
127
+ const today = new Date().toISOString().split('T')[0]; // YYYY-MM-DD
128
+ const lastDate = getCookie('hedi_last_date');
129
 
130
+ // Daily reset
131
+ if (lastDate !== today) {
132
+ console.log('πŸ”„ Daily reset detected: ' + lastDate + ' β†’ ' + today);
133
+ setCookie('hedi_usage_count', '0', 1);
134
+ setCookie('hedi_last_date', today, 1);
135
+ console.log('βœ… Usage reset to 0');
136
+ return 0;
137
+ }
138
+
139
+ const usage = parseInt(getCookie('hedi_usage_count') || '0');
140
+ console.log('πŸͺ Current usage from cookies: ' + usage + '/10');
141
+ return usage;
142
+ } catch (e) {
143
+ console.error('❌ Error getting usage from cookies:', e);
144
+ return 0;
145
+ }
146
+ }
147
+ function saveHediUsage(count) {
148
+ try {
149
+ console.log('πŸ’Ύ Saving usage to cookies:', count);
150
+ const today = new Date().toISOString().split('T')[0];
151
+ const success1 = setCookie('hedi_usage_count', count.toString(), 1);
152
+ const success2 = setCookie('hedi_last_date', today, 1);
153
+
154
+ if (success1 && success2) {
155
+ console.log('βœ… Usage saved successfully: ' + count + '/10');
156
+ return true;
157
+ } else {
158
+ console.error('❌ Failed to save usage');
159
+ return false;
160
+ }
161
+ } catch (e) {
162
+ console.error('❌ Error saving usage to cookies:', e);
163
+ return false;
164
+ }
165
+ }
166
+ // Expose functions globally
167
+ window.hediCookies = {
168
+ getUsage: function() {
169
+ try {
170
+ return getHediUsage();
171
+ } catch (e) {
172
+ console.error('Fallback: Error in getUsage', e);
173
+ return 0;
174
+ }
175
+ },
176
+ saveUsage: function(count) {
177
+ try {
178
+ return saveHediUsage(count);
179
+ } catch (e) {
180
+ console.error('Fallback: Error in saveUsage', e);
181
+ return false;
182
+ }
183
+ }
184
+ };
185
+ // Initialize immediately
186
+ console.log('πŸͺ HEDI Cookies loading...');
187
+ try {
188
+ const initialUsage = getHediUsage();
189
+ console.log('πŸͺ HEDI Cookies initialized with usage:', initialUsage);
190
+ } catch (e) {
191
+ console.error('❌ Error during initialization:', e);
192
+ }
193
+ </script>
194
+ """
195
+
196
+
197
+ def get_usage_display_html(usage_count):
198
+ """Generate usage display HTML with cookies info"""
199
+ usage_percent = (usage_count / MAX_TRIES) * 100
200
+ color = "#dc2626" if usage_count >= MAX_TRIES else "#2563eb" if usage_count < 7 else "#f59e0b"
201
+
202
+ return f"""
203
+ <div id="usage-display" style="background: white; border: 1px solid #e5e7eb; padding: 15px; border-radius: 8px;">
204
+ <div style="display: flex; justify-content: space-between; margin-bottom: 10px;">
205
+ <span>Daily Usage:</span>
206
+ <span style="background: #dbeafe; color: #1e40af; padding: 2px 8px; border-radius: 12px;">{usage_count}/{MAX_TRIES}</span>
207
+ </div>
208
+ <div style="background: #e5e7eb; height: 6px; border-radius: 3px;">
209
+ <div style="background: {color}; height: 6px; border-radius: 3px; width: {usage_percent}%; transition: width 0.3s;"></div>
210
+ </div>
211
+ <div style="font-size: 12px; color: #6b7280; margin-top: 5px; text-align: center;">
212
+ {'⚠️ Daily limit reached!' if usage_count >= MAX_TRIES else f'βœ… {MAX_TRIES - usage_count} remaining' if usage_count < MAX_TRIES else ''}
213
+ </div>
214
+ </div>
215
+ """
216
+
217
+
218
+ def preload_models():
219
+ """Preload models at startup to improve response time"""
220
+ global radio_l_image_processor, radio_l_model
221
+
222
+ print("πŸ”„ Preloading C model...")
223
  try:
224
+ hf_repo = os.getenv('MODEL_REPO', 'fallback')
225
+ if hf_repo and hf_repo != 'fallback':
226
+ from transformers import AutoModel, CLIPImageProcessor
227
+ radio_l_image_processor = CLIPImageProcessor.from_pretrained(hf_repo)
228
+ radio_l_model = AutoModel.from_pretrained(hf_repo, trust_remote_code=True)
229
+ radio_l_model = radio_l_model.to(RADIO_DEVICE)
230
+ radio_l_model.eval()
231
+ print("βœ… C model preloaded successfully!")
232
+ return True
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
233
  except Exception as e:
234
+ print(f"⚠️ Could not preload C model: {e}")
235
+ return False
236
+
237
+
238
+ def setup_device(device_str):
239
+ """Set up computation device"""
240
+ if device_str == 'auto':
241
+ if torch.cuda.is_available():
242
+ return torch.device('cuda:0')
243
+ elif hasattr(torch, 'backends') and hasattr(torch.backends, 'mps') and torch.backends.mps.is_available():
244
+ return torch.device('mps')
245
+ else:
246
+ return torch.device('cpu')
247
+ elif device_str == 'cuda' and torch.cuda.is_available():
248
+ return torch.device('cuda:0')
249
+ elif device_str == 'mps' and hasattr(torch, 'backends') and hasattr(torch.backends,
250
+ 'mps') and torch.backends.mps.is_available():
251
+ return torch.device('mps')
252
+ else:
253
+ return torch.device('cpu')
254
+
255
+
256
+ def load_detectron2_damage_model(model_path, device):
257
+ """Load fine-tuned Detectron2 model for damage detection (Stage 1)"""
258
+ if not DETECTRON2_AVAILABLE:
259
+ print("❌ Detectron2 not available")
260
+ return None
261
+
262
+ if model_path is None or not os.path.exists(model_path):
263
+ print(f"❌ Damage model not found at: {model_path}")
264
+ return None
265
+
266
  try:
267
+ cfg = get_cfg()
268
+ cfg.merge_from_file(model_zoo.get_config_file("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml"))
269
+ cfg.MODEL.WEIGHTS = model_path
270
+ cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5
271
+ cfg.MODEL.DEVICE = str(device)
272
+
273
+ # Adjust number of classes if needed (update based on your fine-tuned model)
274
+ cfg.MODEL.ROI_HEADS.NUM_CLASSES = 1 # Assuming binary damage detection
275
+
276
+ predictor = DefaultPredictor(cfg)
277
+ print("βœ… Detectron2 damage detection model loaded successfully")
278
+ return predictor
279
+ except Exception as e:
280
+ print(f"❌ Error loading Detectron2 model: {e}")
281
+ return None
282
+
283
+
284
+ def initialize_radiov3_model():
285
+ """Initialize the model for feature extraction"""
286
+ global radio_l_image_processor, radio_l_model
287
+
288
+ # Check if already loaded
289
+ if radio_l_image_processor is not None and radio_l_model is not None:
290
+ print("βœ… C model already loaded, reusing...")
291
+ return True
292
+
293
+ try:
294
+ print("πŸ”„ Loading model C...")
295
+ hf_repo = os.getenv('MODEL_REPO', 'fallback')
296
+ radio_l_image_processor = CLIPImageProcessor.from_pretrained(hf_repo)
297
+ radio_l_model = AutoModel.from_pretrained(hf_repo, trust_remote_code=True)
298
+ radio_l_model = radio_l_model.to(RADIO_DEVICE)
299
+ radio_l_model.eval()
300
+ print("βœ… C model loaded successfully")
301
  return True
 
302
  except Exception as e:
303
+ print(f"❌ Error loading model: {e}")
304
  return False
305
 
 
 
 
306
 
307
+ def extract_radio_l_features(image):
308
+ """Extract C features from a PIL image with 224x224 resize"""
309
+ global radio_l_image_processor, radio_l_model
310
+
311
+ if radio_l_image_processor is None or radio_l_model is None:
312
+ raise Exception("C model not initialized")
313
+
314
+ # Resize to 224x224 as required
315
+ if isinstance(image, np.ndarray):
316
+ image = Image.fromarray(image.astype('uint8'))
317
+
318
+ image = image.resize((224, 224))
319
+
320
+ pixel_values = radio_l_image_processor(images=image, return_tensors='pt', do_resize=True).pixel_values
321
+ pixel_values = pixel_values.to(RADIO_DEVICE)
322
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
323
  with torch.no_grad():
324
+ summary, features = radio_l_model(pixel_values)
325
+ features = features.detach().flatten()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
326
  features = F.normalize(features, p=2, dim=-1).cpu().flatten()
 
 
 
 
 
 
 
 
 
 
327
 
328
+ return features.numpy()
329
+
 
330
 
331
  def load_ai_detection_classifier(model_path):
332
+ """Load the AI detection (Stage 2)"""
333
  global ai_detection_classifier
334
+
335
+ if model_path is None or not os.path.exists(model_path):
336
+ print(f"❌ AI detection model not found at: {model_path}")
 
 
 
 
337
  return None
338
+
339
  try:
340
  ai_detection_classifier = joblib.load(model_path)
341
+ print("βœ… V1.pkl AI detection classifier loaded successfully")
 
 
 
 
 
 
 
342
  return ai_detection_classifier
 
343
  except Exception as e:
344
+ print(f"❌ Error loading V1.pkl classifier: {e}")
345
  return None
346
 
 
 
 
347
 
348
+ def simulate_damage_detection(image):
349
+ """Simulate damage detection when Zone model is not available"""
350
+ import random
351
+ import hashlib
352
+
353
+ # Create deterministic "analysis" based on image content
354
+ if isinstance(image, np.ndarray):
355
+ # Use image hash to create consistent results
356
+ img_hash = hashlib.md5(image.tobytes()).hexdigest()
357
+ seed = int(img_hash[:8], 16) % 1000
358
+ random.seed(seed)
359
+
360
+ h, w = image.shape[:2]
361
+ num_damages = random.randint(1, 3)
362
+
363
+ damages = []
364
+ for i in range(num_damages):
365
+ # Generate realistic damage regions
366
+ x1 = random.randint(0, w // 2)
367
+ y1 = random.randint(0, h // 2)
368
+ x2 = x1 + random.randint(w // 6, w // 3)
369
+ y2 = y1 + random.randint(h // 6, h // 3)
370
+
371
+ # Ensure bounds
372
+ x2 = min(x2, w - 1)
373
+ y2 = min(y2, h - 1)
374
+
375
+ confidence = random.uniform(0.6, 0.95)
376
+ damage_type = random.choice(["Scratch", "Dent", "Crack", "Paint Damage"])
377
+
378
+ damages.append({
379
+ "bbox": [x1, y1, x2, y2],
380
+ "confidence": confidence,
381
+ "type": damage_type,
382
+ "area": (x2 - x1) * (y2 - y1)
383
+ })
384
+
 
 
 
 
 
 
385
  return {
386
+ "damages": damages,
387
+ "total_damages": len(damages),
388
+ "demo_mode": True
389
+ }
390
+ else:
391
+ # Default demo result
392
+ return {
393
+ "damages": [{"bbox": [100, 100, 200, 200], "confidence": 0.85, "type": "Dent", "area": 10000}],
394
+ "total_damages": 1,
395
+ "demo_mode": True
396
  }
 
 
 
 
 
397
 
 
 
 
398
 
399
+ def simulate_ai_detection(image, threshold=0.5):
400
+ """Simulate AI detection analysis when real model is not available"""
401
+ import random
402
+ import hashlib
403
 
404
+ # Create deterministic "analysis" based on image content
405
+ if isinstance(image, np.ndarray):
406
+ # Use image hash to create consistent results
407
+ img_hash = hashlib.md5(image.tobytes()).hexdigest()
408
+ seed = int(img_hash[:8], 16) % 1000
409
+ random.seed(seed)
410
+
411
+ # Generate "realistic" probabilities
412
+ ai_prob = random.uniform(0.1, 0.9)
413
+ real_prob = 1.0 - ai_prob
414
+ is_ai = ai_prob > threshold
415
+
416
+ return {
417
+ "ai_prob": ai_prob,
418
+ "real_prob": real_prob,
419
+ "is_ai": is_ai,
420
+ "prediction": 1 if is_ai else 0,
421
+ "confidence": "HIGH" if abs(ai_prob - 0.5) > 0.3 else "MEDIUM" if abs(ai_prob - 0.5) > 0.15 else "LOW",
422
+ "demo_mode": True
423
+ }
424
+ else:
425
+ # Default demo result
426
+ return {
427
+ "ai_prob": 0.3,
428
+ "real_prob": 0.7,
429
+ "is_ai": False,
430
+ "prediction": 0,
431
+ "confidence": "MEDIUM",
432
+ "demo_mode": True
433
+ }
434
+
435
+
436
+ def analyze_with_status(input_image, damage_threshold=0.7, ai_detection_threshold=0.5, device_str="cpu"):
437
+ """Main API function for analysis - returns results directly"""
438
 
439
+ print(f"πŸš€ analyze_with_status called!")
440
+ print(f"πŸ“Š Parameters: image={input_image is not None}, threshold_damage={damage_threshold}, ai_detection_threshold={ai_detection_threshold}")
 
441
 
442
+ # Basic image validation
443
  try:
444
+ if input_image is None:
445
+ return {
446
+ "success": False,
447
+ "error": "No image provided",
448
+ "analysis_text": "❌ Please upload an image to analyze.",
449
+ "result_image": None
450
+ }
451
+
452
+ # Convert image to proper format
453
+ if isinstance(input_image, dict) and "path" in input_image:
454
+ img = cv2.imread(input_image["path"])
455
+ original_filename = os.path.basename(input_image["path"])
456
+ elif isinstance(input_image, str):
457
+ img = cv2.imread(input_image)
458
+ original_filename = os.path.basename(input_image)
459
+ elif isinstance(input_image, np.ndarray):
460
+ img = input_image.copy()
461
+ if len(img.shape) == 3 and img.shape[2] == 3:
462
+ img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
463
+ original_filename = "uploaded_image"
464
+ else:
465
+ return {
466
+ "success": False,
467
+ "error": "Unsupported image format",
468
+ "analysis_text": "❌ Unsupported image format",
469
+ "result_image": None
470
+ }
471
+
472
+ if img is None:
473
+ return {
474
+ "success": False,
475
+ "error": "Could not read image",
476
+ "analysis_text": "❌ Could not read the image",
477
+ "result_image": None
478
+ }
479
+
480
  except Exception as e:
481
+ return {
482
+ "success": False,
483
+ "error": str(e),
484
+ "analysis_text": f"❌ Error loading image: {str(e)}",
485
+ "result_image": None
486
+ }
487
 
488
+ # Setup processing
489
+ device = setup_device(device_str)
490
+
491
+ # Convert to RGB for consistent processing
492
+ if len(img.shape) == 3 and img.shape[2] == 3:
493
+ rgb_img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
494
  else:
495
+ rgb_img = img
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
496
 
497
+ # Initialize models
498
+ damage_model_path = DEFAULT_DAMAGE_MODEL_PATH
499
+ ai_detection_model_path = huggingface_model_path or DEFAULT_AI_DETECTION_MODEL_PATH
500
 
501
+ damage_model = None
502
+ ai_classifier = None
503
+ demo_mode = False
504
+
505
+ # Stage 1: Load Damage Detection Model (Detectron2)
506
+ if damage_model_path and os.path.exists(damage_model_path):
507
+ damage_model = load_detectron2_damage_model(damage_model_path, device)
508
+ if not damage_model:
509
+ demo_mode = True
510
+ else:
511
+ demo_mode = True
512
+
513
+ # Stage 2: Initialize C-RADIOv3-g model
514
+ radiov3_initialized = initialize_radiov3_model()
515
+ if not radiov3_initialized:
516
+ demo_mode = True
517
+
518
+ # Stage 2b: Load AI Detection Classifier (V1.pkl)
519
+ if ai_detection_model_path and os.path.exists(ai_detection_model_path):
520
+ ai_classifier = load_ai_detection_classifier(ai_detection_model_path)
521
+ if not ai_classifier:
522
+ demo_mode = True
523
+ else:
524
+ demo_mode = True
525
+
526
+ # Set demo mode if any model failed
527
+ if damage_model is None or not radiov3_initialized or ai_classifier is None:
528
+ demo_mode = True
529
+
530
+ progress_info = []
531
+ progress_info.append("πŸ”„ SEQUENTIAL ANALYSIS PIPELINE")
532
 
533
+ # STAGE 1: DAMAGE DETECTION
534
  try:
535
+ if damage_model and not demo_mode:
536
+ # Use real model
537
+ outputs = damage_model(rgb_img)
538
+ instances = outputs["instances"].to("cpu")
539
+
540
+ damages = []
541
+ boxes = instances.pred_boxes.tensor.numpy() if len(instances) > 0 else []
542
+ scores = instances.scores.numpy() if len(instances) > 0 else []
543
+
544
+ for i, (box, score) in enumerate(zip(boxes, scores)):
545
+ if score > float(damage_threshold):
546
+ x1, y1, x2, y2 = box
547
+ damages.append({
548
+ "bbox": [int(x1), int(y1), int(x2), int(y2)],
549
+ "confidence": float(score),
550
+ "type": f"Damage_{i + 1}",
551
+ "area": int((x2 - x1) * (y2 - y1))
552
+ })
553
+
554
+ damage_result = {
555
+ "damages": damages,
556
+ "total_damages": len(damages),
557
+ "demo_mode": False
558
+ }
559
+ else:
560
+ # Use simulation
561
+ damage_result = simulate_damage_detection(rgb_img)
562
+
563
+ # Get results
564
+ damages = damage_result["damages"]
565
+ total_damages = damage_result["total_damages"]
566
+
567
+ except Exception as e:
568
+ damage_result = simulate_damage_detection(rgb_img)
569
+ damages = damage_result["damages"]
570
+ total_damages = damage_result["total_damages"]
571
+
572
+ # STAGE 2: AI DETECTION
573
+ try:
574
+ if radiov3_initialized and ai_classifier and not demo_mode:
575
+ # Extract features using C with 224x224 resize
576
+ features = extract_radio_l_features(rgb_img)
577
+ features = features.reshape(1, -1) # Reshape for single sample
578
+
579
+ # Predict using V1.pkl classifier
580
+ prediction = ai_classifier.predict(features)[0]
581
+
582
+ # Get confidence/probability
583
  try:
584
+ if hasattr(ai_classifier, 'predict_proba'):
585
+ probabilities = ai_classifier.predict_proba(features)[0]
586
+ prob_real = float(probabilities[0]) if len(probabilities) > 1 else 1 - prediction
587
+ prob_ai = float(probabilities[1]) if len(probabilities) > 1 else prediction
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
588
  else:
589
+ # For models with decision_function
590
+ decision_score = ai_classifier.decision_function(features)[0]
591
+ prob_real = 0.5 + decision_score / 2 if decision_score < 0 else 0.5 - decision_score / 2
592
+ prob_ai = 1 - prob_real
593
+ except Exception:
594
+ prob_real = 0.5
595
+ prob_ai = 0.5
596
+
597
+ is_ai = prediction == 1
598
+
599
+ ai_detection_result = {
600
+ "ai_prob": prob_ai,
601
+ "real_prob": prob_real,
602
+ "is_ai": is_ai,
603
+ "prediction": int(prediction),
604
+ "confidence": "HIGH" if abs(prob_ai - 0.5) > 0.3 else "MEDIUM" if abs(prob_ai - 0.5) > 0.15 else "LOW",
605
+ "demo_mode": False
606
+ }
607
+ else:
608
+ # Use simulation
609
+ ai_detection_result = simulate_ai_detection(rgb_img, float(ai_detection_threshold))
610
+
611
+ # Get results
612
+ ai_prob = ai_detection_result["ai_prob"]
613
+ real_prob = ai_detection_result["real_prob"]
614
+ is_ai = ai_detection_result["is_ai"]
615
+ ai_confidence = ai_detection_result["confidence"]
616
+
 
 
 
 
 
 
 
 
 
 
617
  except Exception as e:
618
+ ai_detection_result = simulate_ai_detection(rgb_img, float(ai_detection_threshold))
619
+ ai_prob = ai_detection_result["ai_prob"]
620
+ real_prob = ai_detection_result["real_prob"]
621
+ is_ai = ai_detection_result["is_ai"]
622
+ ai_confidence = ai_detection_result["confidence"]
623
+
624
+ # SEQUENTIAL ANALYSIS SYNTHESIS
625
+ progress_info.append("\nπŸ”„ ANALYSIS RESULTS:")
626
+
627
+ if demo_mode:
628
+ progress_info.append("⚠️ Note: Using demo simulation (models not fully available)")
629
+
630
+ # Determine final verdict based on both stages
631
+ if total_damages > 0 and not is_ai:
632
+ final_verdict = "βœ… LEGITIMATE DAMAGE CLAIM"
633
+ verdict_explanation = "Genuine vehicle damage detected in authentic image"
634
+ recommendation = "βœ… Proceed with claim processing"
635
+ risk_level = "LOW"
636
+ elif total_damages > 0 and is_ai:
637
+ final_verdict = "⚠️ POTENTIAL FRAUD - AI-GENERATED IMAGE"
638
+ verdict_explanation = "Damage detected but image appears to be AI-generated"
639
+ recommendation = "πŸ” Flag for manual review and investigation"
640
+ risk_level = "HIGH"
641
+ elif total_damages == 0 and is_ai:
642
+ final_verdict = "🚨 FRAUD DETECTED"
643
+ verdict_explanation = "No significant damage found and image appears to be AI-generated"
644
+ recommendation = "❌ Reject claim - likely fraudulent"
645
+ risk_level = "VERY HIGH"
646
+ else: # No damage, authentic image
647
+ final_verdict = "⚠️ NO DAMAGE DETECTED"
648
+ verdict_explanation = "Authentic image but no significant damage found"
649
+ recommendation = "πŸ” Verify claim details and request additional evidence"
650
+ risk_level = "MEDIUM"
651
+
652
+ progress_info.append(f"\nπŸ“Š DAMAGE DETECTION:")
653
+ progress_info.append(f"β”œβ”€ Total Damages Found: {total_damages}")
654
+ for i, damage in enumerate(damages):
655
+ progress_info.append(f"β”œβ”€ Damage {i+1}: {damage['type']} (Confidence: {damage['confidence']*100:.1f}%)")
656
+
657
+ progress_info.append(f"\nπŸ€– AI DETECTION:")
658
+ progress_info.append(f"β”œβ”€ AI Probability: {ai_prob*100:.1f}%")
659
+ progress_info.append(f"β”œβ”€ Real Probability: {real_prob*100:.1f}%")
660
+ progress_info.append(f"β”œβ”€ Classification: {'AI-GENERATED' if is_ai else 'AUTHENTIC'}")
661
+ progress_info.append(f"└─ Confidence Level: {ai_confidence}")
662
+
663
+ progress_info.append(f"\n🎯 FINAL VERDICT:")
664
+ progress_info.append(f"β”œβ”€ Verdict: {final_verdict}")
665
+ progress_info.append(f"β”œβ”€ Explanation: {verdict_explanation}")
666
+ progress_info.append(f"β”œβ”€ Risk Level: {risk_level}")
667
+ progress_info.append(f"└─ Recommendation: {recommendation}")
668
+
669
+ # Create comprehensive visualization
670
+ result_img = rgb_img.copy()
671
+
672
+ # Draw damage detection results (Stage 1)
673
+ for i, damage in enumerate(damages):
674
+ bbox = damage["bbox"]
675
+ conf = damage["confidence"]
676
+ x1, y1, x2, y2 = bbox
677
+
678
+ # Draw bounding box for damage
679
+ cv2.rectangle(result_img, (x1, y1), (x2, y2), (0, 255, 255), 2) # Yellow for damage
680
+ cv2.putText(result_img, f"Damage {i + 1}: {conf * 100:.1f}%",
681
+ (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 255), 2)
682
+
683
+ # Add AI detection results (Stage 2)
684
+ ai_color = (255, 0, 0) if is_ai else (0, 255, 0) # Red for AI, green for real
685
+ ai_text = f"{'AI-GENERATED' if is_ai else 'AUTHENTIC'}"
686
+ ai_prob_text = f"Confidence: {(ai_prob if is_ai else real_prob) * 100:.1f}%"
687
+
688
+ # Add text overlays
689
+ cv2.putText(result_img, final_verdict, (30, 50), cv2.FONT_HERSHEY_SIMPLEX, 1.0, ai_color, 3)
690
+ cv2.putText(result_img, f"Damage Count: {total_damages}", (30, 90), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 255, 255), 2)
691
+ cv2.putText(result_img, f"AI Detection: {ai_text}", (30, 130), cv2.FONT_HERSHEY_SIMPLEX, 0.8, ai_color, 2)
692
+ cv2.putText(result_img, ai_prob_text, (30, 170), cv2.FONT_HERSHEY_SIMPLEX, 0.6, ai_color, 2)
693
+ cv2.putText(result_img, f"Risk Level: {risk_level}", (30, 210), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 0), 2)
694
+
695
+ # Add pipeline info
696
+ analysis_text = "Advanced Detection System"
697
+ mode_text = "DEMO MODE" if demo_mode else "FULL ANALYSIS"
698
+ cv2.putText(result_img, analysis_text, (30, 250), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (128, 128, 128), 2)
699
+ cv2.putText(result_img, mode_text, (30, 280), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (128, 128, 128), 2)
700
+
701
+ # Add timestamp
702
+ timestamp = time.strftime("%Y-%m-%d %H:%M:%S")
703
+ cv2.putText(result_img, f"Analysis: {timestamp}",
704
+ (30, result_img.shape[0] - 30), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (128, 128, 128), 1)
705
+
706
+ analysis_text = "\n".join(progress_info)
707
+
708
+ # Return results as dictionary for API
709
+ return {
710
+ "success": True,
711
+ "analysis_text": analysis_text,
712
+ "result_image": result_img,
713
+ "verdict": final_verdict,
714
+ "risk_level": risk_level,
715
+ "damage_count": total_damages,
716
+ "damages": damages,
717
+ "ai_probability": ai_prob,
718
+ "real_probability": real_prob,
719
+ "is_ai_generated": is_ai,
720
+ "ai_confidence": ai_confidence,
721
+ "recommendation": recommendation,
722
+ "demo_mode": demo_mode,
723
+ "timestamp": timestamp
724
+ }
725
 
 
 
 
726
 
727
  def create_gradio_interface():
728
+ """Interface Gradio for API access"""
729
+
730
+ # CSS with JavaScript for cookies
731
+ custom_css = """
732
+ :root {
733
+ --background-fill-primary: #ffffff !important;
734
+ --background-fill-secondary: #f8f9fa !important;
735
+ --border-color-primary: #e5e7eb !important;
736
+ --body-text-color: #000000 !important;
737
+ }
738
 
739
+ .gradio-container {
740
+ background-color: #ffffff !important;
741
+ color: #000000 !important;
742
+ }
743
+ """ + COOKIE_JAVASCRIPT
744
+
745
+ with gr.Blocks(
746
+ title="HEDI - AI Fraud Detection API",
747
+ theme=gr.themes.Soft(
748
+ primary_hue="blue",
749
+ secondary_hue="slate",
750
+ neutral_hue="zinc"
751
+ ),
752
+ css=custom_css
753
+ ) as app:
754
+
755
+ # Header
756
  gr.HTML("""
757
+ <div style="background: linear-gradient(90deg, #1e40af, #2563eb); color: white; padding: 20px; border-radius: 10px; margin-bottom: 20px; text-align: center;">
758
+ <h1 style="margin: 0; color: white;">πŸ›‘οΈ HEDI - AI Fraud Detection API</h1>
759
+ <p style="margin: 5px 0 0 0; color: white; opacity: 0.9;">Two-Stage Sequential Pipeline Analysis</p>
760
+ </div>
761
  """)
762
+
763
  with gr.Row():
764
+ with gr.Column(scale=2):
765
+ input_image = gr.Image(
766
+ type="numpy",
767
+ label="Upload Image for Analysis"
768
+ )
769
 
770
  with gr.Row():
771
+ damage_threshold = gr.Slider(
772
+ minimum=0.1, maximum=0.95, value=0.7, step=0.05,
773
+ label="πŸ” Damage Detection Sensitivity"
774
+ )
775
+ ai_detection_threshold = gr.Slider(
776
+ minimum=0.1, maximum=0.9, value=0.5, step=0.05,
777
+ label="πŸ€– AI Detection Sensitivity"
778
+ )
779
 
780
+ analyze_btn = gr.Button(
781
+ "πŸš€ Analyze Image",
782
+ variant="primary",
783
+ size="lg"
784
  )
785
+
786
+ with gr.Column(scale=3):
787
+ # Analysis Results Display
788
+ result_text = gr.Textbox(
789
+ label="πŸ“Š Analysis Results",
790
+ lines=20,
791
+ max_lines=30,
792
+ show_copy_button=True
793
  )
794
 
795
+ result_image = gr.Image(
796
+ label="πŸ“Έ Annotated Result",
797
+ type="numpy"
 
 
798
  )
 
 
 
 
 
 
 
 
 
799
 
800
+ # Usage display
801
+ usage_display = gr.HTML(get_usage_display_html(0))
802
+
803
+ # JSON Output for API
804
+ with gr.Accordion("πŸ“„ JSON API Response", open=False):
805
+ json_output = gr.JSON(label="API Response Data")
806
+
807
+ # Event handler for analysis
808
+ def process_and_display(image, damage_thresh, ai_thresh):
809
+ """Process image and display results"""
810
+ if image is None:
811
+ return (
812
+ "❌ Please upload an image",
813
+ None,
814
+ {"error": "No image provided"},
815
+ get_usage_display_html(0)
816
+ )
817
 
818
+ # Get analysis results
819
+ results = analyze_with_status(image, damage_thresh, ai_thresh)
820
 
821
+ # Extract visualization and text
822
+ if results["success"]:
823
+ # Update usage (in real implementation, integrate with cookies)
824
+ usage_count = 1 # This would come from cookies in production
825
+
826
+ return (
827
+ results["analysis_text"],
828
+ results["result_image"],
829
+ {
830
+ "verdict": results["verdict"],
831
+ "risk_level": results["risk_level"],
832
+ "damage_count": results["damage_count"],
833
+ "damages": results["damages"],
834
+ "ai_probability": results["ai_probability"],
835
+ "is_ai_generated": results["is_ai_generated"],
836
+ "recommendation": results["recommendation"],
837
+ "timestamp": results["timestamp"]
838
+ },
839
+ get_usage_display_html(usage_count)
840
+ )
841
  else:
842
+ return (
843
+ results["analysis_text"],
844
+ None,
845
+ {"error": results.get("error", "Analysis failed")},
846
+ get_usage_display_html(0)
847
+ )
848
 
 
849
  analyze_btn.click(
850
+ fn=process_and_display,
851
+ inputs=[input_image, damage_threshold, ai_detection_threshold],
852
+ outputs=[result_text, result_image, json_output, usage_display],
853
+ api_name="analyze_with_status" # This makes it accessible via API
854
  )
855
 
856
+ # Clear button
857
+ clear_btn = gr.Button("πŸ—‘οΈ Clear", variant="secondary")
858
  clear_btn.click(
859
+ fn=lambda: (None, None, "", None, {}, get_usage_display_html(0)),
860
+ outputs=[input_image, result_image, result_text, json_output, usage_display]
 
 
 
 
 
 
 
861
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
862
 
863
+ return app
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
864
 
 
 
 
865
 
866
  if __name__ == "__main__":
867
+ print("πŸš€ Starting HEDI AI Fraud Detector - API Version...")
868
+ print(f"βœ… Damage model: {'Available' if os.path.exists(DEFAULT_DAMAGE_MODEL_PATH) else 'Demo mode'}")
869
+ print(f"βœ… AI Detection Model: {'Available' if huggingface_model_path or os.path.exists(DEFAULT_AI_DETECTION_MODEL_PATH) else 'Demo mode'}")
870
+ print("πŸ”Œ API Endpoint: /analyze_with_status")
871
+ print("πŸ“Š Returns: JSON response with verdict, risk level, and detailed analysis")
 
 
 
 
 
 
872
 
873
+ # Preload models at startup
874
+ preload_models()
 
 
 
 
 
 
 
875
 
 
876
  app = create_gradio_interface()
877
  app.launch(
878
+ share=False,
879
  server_name="0.0.0.0",
880
  server_port=7860,
 
881
  show_error=True
882
  )