openfree commited on
Commit
ca83025
ยท
verified ยท
1 Parent(s): 797737a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +209 -66
app.py CHANGED
@@ -257,6 +257,14 @@ def train_model(epochs, batch_size, img_size, device_selection):
257
  else:
258
  device = 0 if torch.cuda.is_available() else "cpu"
259
 
 
 
 
 
 
 
 
 
260
  # Initialize model - use yolov8n if yolo11n not available
261
  try:
262
  model = YOLO("yolo11n.pt")
@@ -268,9 +276,9 @@ def train_model(epochs, batch_size, img_size, device_selection):
268
  project_dir = "./xray_detection"
269
  os.makedirs(project_dir, exist_ok=True)
270
 
271
- # Train model with workers=0 to avoid multiprocessing issues on Spaces
272
  results = model.train(
273
- data=f"{dataset_path}/data.yaml",
274
  epochs=epochs,
275
  imgsz=img_size,
276
  batch=batch_size,
@@ -285,7 +293,34 @@ def train_model(epochs, batch_size, img_size, device_selection):
285
  single_cls=False,
286
  rect=False,
287
  cache=False, # Disable caching to avoid memory issues
288
- amp=True # Use automatic mixed precision for faster training
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
289
  )
290
 
291
  # Collect training result plots
@@ -304,13 +339,22 @@ def train_model(epochs, batch_size, img_size, device_selection):
304
  model_path = os.path.join(results_path, "weights", "best.pt")
305
 
306
  # Load the trained model to ensure it's ready for inference
 
 
307
  if os.path.exists(model_path):
308
- model = YOLO(model_path)
309
- class_info = f"\nTrained on {len(model.names)} classes: {', '.join(list(model.names.values())[:5])}"
310
- if len(model.names) > 5:
311
- class_info += f"... (์ด {len(model.names)} ํด๋ž˜์Šค)"
 
 
 
 
 
 
 
312
  else:
313
- class_info = ""
314
 
315
  training_in_progress = False
316
 
@@ -368,51 +412,101 @@ def run_inference(input_image, conf_threshold):
368
  try:
369
  # Check if model is trained on X-ray dataset
370
  model_info = ""
 
 
371
  try:
372
- model_info = f"Using model with {len(model.names)} classes\n"
 
 
 
373
  except:
374
  model_info = "Using loaded model\n"
375
 
376
- # Save the input image temporarily
377
  temp_path = "temp_inference.jpg"
378
- input_image.save(temp_path)
379
-
380
- # Run inference with workers=0
381
- results = model(temp_path, conf=conf_threshold, verbose=False, device=0 if torch.cuda.is_available() else 'cpu')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
382
 
383
- # Draw results
384
- annotated_image = results[0].plot()
 
 
 
 
 
 
385
 
386
  # Get detection info
387
  detections = []
388
- if results[0].boxes is not None and len(results[0].boxes) > 0:
389
- for box in results[0].boxes:
 
 
 
 
 
390
  cls = int(box.cls)
391
  conf = float(box.conf)
 
392
  try:
393
  cls_name = model.names[cls]
394
  except:
395
  cls_name = f"Class {cls}"
396
- detections.append(f"{cls_name}: {conf:.2f}")
397
-
398
- detection_text = model_info + "Detections:\n" + "\n".join(detections)
399
- else:
400
- # Check if it's because of wrong model
401
- try:
402
- if len(model.names) == 80: # COCO dataset has 80 classes
403
- detection_text = model_info + "No objects detected.\n\nโš ๏ธ Note: This appears to be a general COCO model. For X-ray baggage detection, please train the model on the X-ray dataset first."
404
- else:
405
- detection_text = model_info + "No objects detected at this confidence threshold.\nTry lowering the confidence threshold."
406
- except:
407
- detection_text = model_info + "No objects detected."
408
 
409
  # Clean up
410
  if os.path.exists(temp_path):
411
  os.remove(temp_path)
412
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
413
  return Image.fromarray(annotated_image), detection_text
414
 
415
  except Exception as e:
 
 
 
416
  return None, f"Error during inference: {str(e)}"
417
 
418
  @spaces.GPU(duration=60) # Request GPU for batch inference
@@ -454,19 +548,24 @@ def batch_inference(data_type, num_images):
454
  return [], f"No images found in {image_dir}"
455
 
456
  results_images = []
 
457
 
458
  for img_path in image_files:
459
- results = model(img_path, verbose=False)
460
  annotated = results[0].plot()
461
  results_images.append(Image.fromarray(annotated))
 
 
 
 
 
 
462
 
463
  # Check model type
464
- try:
465
- model_type = "X-ray detection model" if len(model.names) != 80 else "General COCO model"
466
- except:
467
- model_type = "Loaded model"
468
 
469
- return results_images, f"Processed {len(results_images)} images from {data_type} dataset using {model_type}"
470
 
471
  except Exception as e:
472
  return [], f"Error during batch inference: {str(e)}"
@@ -487,10 +586,19 @@ def get_dataset_info():
487
  class_names = data.get('names', [])
488
  num_classes = len(class_names)
489
 
490
- info = f"**X-ray Baggage Dataset Info:**\n"
491
- info += f"- Number of classes: {num_classes}\n"
492
- info += f"- Classes: {', '.join(class_names)}\n"
493
- info += f"\nThese are the prohibited items that the model will learn to detect."
 
 
 
 
 
 
 
 
 
494
 
495
  return info
496
  except Exception as e:
@@ -577,20 +685,21 @@ def check_model_status():
577
  if os.path.exists(trained_path):
578
  try:
579
  model = YOLO(trained_path)
580
- return f"โœ… Trained model loaded: {len(model.names)} classes"
 
 
581
  except:
582
  return "โŒ No model loaded. Please train or load a model first."
583
  return "โŒ No model loaded. Please train or load a model first."
584
  else:
585
  try:
586
  num_classes = len(model.names)
 
 
587
  if num_classes == 80:
588
  return f"โš ๏ธ Default COCO model loaded ({num_classes} classes). For X-ray detection, please train on the X-ray dataset."
589
  else:
590
- class_names = ', '.join(list(model.names.values())[:5])
591
- if len(model.names) > 5:
592
- class_names += "..."
593
- return f"โœ… Model loaded: {num_classes} classes - {class_names}"
594
  except:
595
  return "โœ… Model loaded"
596
 
@@ -706,18 +815,19 @@ with gr.Blocks(title="X-ray Baggage Anomaly Detection", theme=gr.themes.Soft())
706
  gr.Markdown("""
707
  **Note:** Training will automatically use GPU if available. This may take several minutes.
708
 
709
- **Tips for Hugging Face Spaces:**
710
- - Use smaller batch sizes (4-8) to avoid GPU memory issues
711
- - Start with fewer epochs (5-10) for testing
712
- - Image size 480 provides good balance between quality and speed
 
713
 
714
  โš ๏ธ **Important**: Models are temporary on Spaces! Download your model after training.
715
  """)
716
 
717
  with gr.Row():
718
- epochs_input = gr.Slider(1, 50, 10, step=1, label="Epochs")
719
- batch_size_input = gr.Slider(4, 32, 8, step=4, label="Batch Size (lower for limited GPU)")
720
- img_size_input = gr.Slider(320, 640, 480, step=32, label="Image Size")
721
  device_input = gr.Radio(["Auto", "GPU", "CPU"], value="Auto", label="Device")
722
 
723
  train_btn = gr.Button("Start Training", variant="primary")
@@ -775,40 +885,72 @@ with gr.Blocks(title="X-ray Baggage Anomaly Detection", theme=gr.themes.Soft())
775
 
776
  refresh_status_btn.click(check_model_status, outputs=model_status)
777
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
778
  gr.Markdown("### Single Image Inference")
779
  gr.Markdown("Upload an X-ray baggage image to detect prohibited items.")
780
 
781
  with gr.Row():
782
  with gr.Column():
783
  input_image = gr.Image(type="pil", label="Upload X-ray Image")
784
- conf_threshold = gr.Slider(0.1, 0.9, 0.5, step=0.05, label="Confidence Threshold")
 
 
 
 
 
785
 
786
  # Add example images if dataset is available
787
- def get_example_images():
788
- examples = []
789
- if dataset_path and os.path.exists(f"{dataset_path}/test/images"):
790
- test_images = glob(f"{dataset_path}/test/images/*")[:3]
791
- examples.extend(test_images)
792
- return examples if examples else None
793
 
794
- example_images = get_example_images()
795
  if example_images:
796
  gr.Examples(
797
- examples=example_images,
798
  inputs=input_image,
799
- label="Example X-ray Images"
800
  )
801
-
802
- inference_btn = gr.Button("Run Detection", variant="primary")
803
 
804
  with gr.Column():
805
  output_image = gr.Image(type="pil", label="Detection Result")
806
- detection_info = gr.Textbox(label="Detection Info", lines=5)
807
 
808
  inference_btn.click(run_inference,
809
  inputs=[input_image, conf_threshold],
810
  outputs=[output_image, detection_info])
811
 
 
 
 
 
 
 
 
 
 
 
812
  gr.Markdown("### Batch Inference")
813
  gr.Markdown("Run detection on multiple images from the test dataset.")
814
 
@@ -828,7 +970,8 @@ with gr.Blocks(title="X-ray Baggage Anomaly Detection", theme=gr.themes.Soft())
828
  gr.Markdown("---")
829
  gr.Markdown("""
830
  <div style='text-align: center; font-size: 14px; color: #666;'>
831
- ๐Ÿ’ก <b>Quick Start:</b> Download Dataset โ†’ Train Model (or Upload existing) โ†’ Run Inference<br>
 
832
  ๐Ÿš€ Built with Gradio, YOLOv8, and โค๏ธ for X-ray security
833
  </div>
834
  """)
 
257
  else:
258
  device = 0 if torch.cuda.is_available() else "cpu"
259
 
260
+ # Read dataset info
261
+ yaml_path = f"{dataset_path}/data.yaml"
262
+ with open(yaml_path, 'r') as file:
263
+ data_config = yaml.safe_load(file)
264
+
265
+ class_names = data_config.get('names', [])
266
+ print(f"Training on {len(class_names)} classes: {class_names}")
267
+
268
  # Initialize model - use yolov8n if yolo11n not available
269
  try:
270
  model = YOLO("yolo11n.pt")
 
276
  project_dir = "./xray_detection"
277
  os.makedirs(project_dir, exist_ok=True)
278
 
279
+ # Train model with optimized settings for X-ray detection
280
  results = model.train(
281
+ data=yaml_path,
282
  epochs=epochs,
283
  imgsz=img_size,
284
  batch=batch_size,
 
293
  single_cls=False,
294
  rect=False,
295
  cache=False, # Disable caching to avoid memory issues
296
+ amp=True, # Use automatic mixed precision for faster training
297
+ # Optimization settings
298
+ optimizer='AdamW',
299
+ lr0=0.001, # Initial learning rate
300
+ lrf=0.01, # Final learning rate factor
301
+ momentum=0.937,
302
+ weight_decay=0.0005,
303
+ warmup_epochs=3.0,
304
+ warmup_momentum=0.8,
305
+ warmup_bias_lr=0.1,
306
+ # Loss weights
307
+ box=7.5,
308
+ cls=0.5,
309
+ dfl=1.5,
310
+ # Augmentation settings for X-ray images
311
+ hsv_h=0.0, # No hue augmentation for X-ray
312
+ hsv_s=0.0, # No saturation augmentation
313
+ hsv_v=0.1, # Slight value augmentation
314
+ degrees=0.0, # No rotation
315
+ translate=0.1,
316
+ scale=0.5,
317
+ shear=0.0,
318
+ perspective=0.0,
319
+ flipud=0.0, # No vertical flip for X-ray
320
+ fliplr=0.5, # Horizontal flip is okay
321
+ mosaic=1.0,
322
+ mixup=0.0,
323
+ copy_paste=0.0
324
  )
325
 
326
  # Collect training result plots
 
339
  model_path = os.path.join(results_path, "weights", "best.pt")
340
 
341
  # Load the trained model to ensure it's ready for inference
342
+ model_loaded = False
343
+ class_info = ""
344
  if os.path.exists(model_path):
345
+ try:
346
+ model = YOLO(model_path)
347
+ model_loaded = True
348
+ class_info = f"\nโœ… Trained on {len(model.names)} classes: {', '.join(list(model.names.values()))}"
349
+
350
+ # Run a test inference to ensure model works
351
+ test_img = np.zeros((640, 640, 3), dtype=np.uint8)
352
+ test_results = model(test_img, verbose=False)
353
+ class_info += "\nโœ… Model test passed - ready for inference!"
354
+ except Exception as e:
355
+ class_info = f"\nโš ๏ธ Model loaded but test failed: {str(e)}"
356
  else:
357
+ class_info = "\nโŒ Model file not found!"
358
 
359
  training_in_progress = False
360
 
 
412
  try:
413
  # Check if model is trained on X-ray dataset
414
  model_info = ""
415
+ num_classes = 80 # Default COCO classes
416
+ class_list = []
417
  try:
418
+ num_classes = len(model.names)
419
+ class_list = list(model.names.values())
420
+ model_info = f"Model: {num_classes} classes - {', '.join(class_list)}\n"
421
+ model_info += f"Confidence threshold: {conf_threshold}\n\n"
422
  except:
423
  model_info = "Using loaded model\n"
424
 
425
+ # Save the input image temporarily with proper format
426
  temp_path = "temp_inference.jpg"
427
+ # Ensure image is in RGB format
428
+ if input_image.mode != 'RGB':
429
+ input_image = input_image.convert('RGB')
430
+ input_image.save(temp_path, format='JPEG', quality=95)
431
+
432
+ # Print image info for debugging
433
+ print(f"Image mode: {input_image.mode}, Size: {input_image.size}")
434
+
435
+ # Run inference with verbose output for debugging
436
+ imgsz = 640 # Processing size
437
+ print(f"Running inference with conf={conf_threshold}, imgsz={imgsz}")
438
+ results = model(
439
+ temp_path,
440
+ conf=conf_threshold,
441
+ verbose=True, # Enable verbose for debugging
442
+ device=0 if torch.cuda.is_available() else 'cpu',
443
+ imgsz=imgsz, # Ensure consistent image size
444
+ augment=False, # Disable augmentation for consistent results
445
+ agnostic_nms=False, # Use class-specific NMS
446
+ max_det=300 # Maximum detections
447
+ )
448
 
449
+ # Draw results with lower confidence for visualization
450
+ annotated_image = results[0].plot(
451
+ conf=True, # Show confidence scores
452
+ labels=True, # Show labels
453
+ boxes=True, # Show boxes
454
+ masks=False, # No masks for detection
455
+ probs=False # No classification probabilities
456
+ )
457
 
458
  # Get detection info
459
  detections = []
460
+ detection_count = 0
461
+
462
+ if results[0].boxes is not None:
463
+ detection_count = len(results[0].boxes)
464
+ print(f"Found {detection_count} detections")
465
+
466
+ for idx, box in enumerate(results[0].boxes):
467
  cls = int(box.cls)
468
  conf = float(box.conf)
469
+ xyxy = box.xyxy[0].tolist()
470
  try:
471
  cls_name = model.names[cls]
472
  except:
473
  cls_name = f"Class {cls}"
474
+
475
+ detections.append(f"{idx+1}. {cls_name}: {conf:.3f} | Box: [{int(xyxy[0])}, {int(xyxy[1])}, {int(xyxy[2])}, {int(xyxy[3])}]")
 
 
 
 
 
 
 
 
 
 
476
 
477
  # Clean up
478
  if os.path.exists(temp_path):
479
  os.remove(temp_path)
480
 
481
+ # Prepare detection text
482
+ if detections:
483
+ detection_text = model_info + f"โœ… Found {detection_count} object(s):\n\n" + "\n".join(detections)
484
+ else:
485
+ # Provide helpful debugging info
486
+ detection_text = model_info + "โŒ No objects detected.\n\n"
487
+
488
+ if num_classes == 80: # COCO model
489
+ detection_text += "โš ๏ธ This is a COCO model (general objects). For X-ray detection, please train on the X-ray dataset.\n"
490
+ else:
491
+ detection_text += "๐Ÿ’ก Suggestions:\n"
492
+ detection_text += f"โ€ข Current threshold: {conf_threshold} - try lowering to 0.1-0.3\n"
493
+ detection_text += "โ€ข Ensure you're using X-ray images (grayscale/blue-tinted)\n"
494
+ detection_text += "โ€ข Check if the model was trained with the same image size\n"
495
+ detection_text += "โ€ข The model may need more training epochs\n"
496
+
497
+ # Show what the model is looking for
498
+ detection_text += f"\n\n๐Ÿ“Š Debug Info:\n"
499
+ detection_text += f"โ€ข Image size: {input_image.size}\n"
500
+ detection_text += f"โ€ข Processing size: {imgsz}x{imgsz}\n"
501
+ detection_text += f"โ€ข Device: {'GPU' if torch.cuda.is_available() else 'CPU'}\n"
502
+ detection_text += f"โ€ข Model classes: {num_classes} - {', '.join(class_list)}"
503
+
504
  return Image.fromarray(annotated_image), detection_text
505
 
506
  except Exception as e:
507
+ print(f"Error during inference: {str(e)}")
508
+ import traceback
509
+ traceback.print_exc()
510
  return None, f"Error during inference: {str(e)}"
511
 
512
  @spaces.GPU(duration=60) # Request GPU for batch inference
 
548
  return [], f"No images found in {image_dir}"
549
 
550
  results_images = []
551
+ detection_counts = []
552
 
553
  for img_path in image_files:
554
+ results = model(img_path, verbose=False, conf=0.25, imgsz=640)
555
  annotated = results[0].plot()
556
  results_images.append(Image.fromarray(annotated))
557
+
558
+ # Count detections
559
+ if results[0].boxes is not None:
560
+ detection_counts.append(len(results[0].boxes))
561
+ else:
562
+ detection_counts.append(0)
563
 
564
  # Check model type
565
+ model_type = "X-ray detection model" if len(model.names) != 80 else "General COCO model"
566
+ avg_detections = sum(detection_counts) / len(detection_counts) if detection_counts else 0
 
 
567
 
568
+ return results_images, f"Processed {len(results_images)} images using {model_type}\nAverage detections per image: {avg_detections:.1f}"
569
 
570
  except Exception as e:
571
  return [], f"Error during batch inference: {str(e)}"
 
586
  class_names = data.get('names', [])
587
  num_classes = len(class_names)
588
 
589
+ # Count images
590
+ train_images = len(glob(f"{dataset_path}/train/images/*")) if os.path.exists(f"{dataset_path}/train/images") else 0
591
+ valid_images = len(glob(f"{dataset_path}/valid/images/*")) if os.path.exists(f"{dataset_path}/valid/images") else 0
592
+ test_images = len(glob(f"{dataset_path}/test/images/*")) if os.path.exists(f"{dataset_path}/test/images") else 0
593
+
594
+ info = f"### ๐Ÿ“Š X-ray Baggage Dataset Info\n\n"
595
+ info += f"**Classes ({num_classes}):** {', '.join(class_names)}\n\n"
596
+ info += f"**Dataset Split:**\n"
597
+ info += f"- Training: {train_images} images\n"
598
+ info += f"- Validation: {valid_images} images\n"
599
+ info += f"- Test: {test_images} images\n"
600
+ info += f"- Total: {train_images + valid_images + test_images} images\n\n"
601
+ info += f"**What to expect:** The model will learn to detect these prohibited items in X-ray scans."
602
 
603
  return info
604
  except Exception as e:
 
685
  if os.path.exists(trained_path):
686
  try:
687
  model = YOLO(trained_path)
688
+ num_classes = len(model.names)
689
+ class_names = ', '.join(list(model.names.values()))
690
+ return f"โœ… Trained model loaded: {num_classes} classes\n๐Ÿ“‹ Classes: {class_names}"
691
  except:
692
  return "โŒ No model loaded. Please train or load a model first."
693
  return "โŒ No model loaded. Please train or load a model first."
694
  else:
695
  try:
696
  num_classes = len(model.names)
697
+ class_names = ', '.join(list(model.names.values()))
698
+
699
  if num_classes == 80:
700
  return f"โš ๏ธ Default COCO model loaded ({num_classes} classes). For X-ray detection, please train on the X-ray dataset."
701
  else:
702
+ return f"โœ… Model loaded: {num_classes} classes\n๐Ÿ“‹ Classes: {class_names}"
 
 
 
703
  except:
704
  return "โœ… Model loaded"
705
 
 
815
  gr.Markdown("""
816
  **Note:** Training will automatically use GPU if available. This may take several minutes.
817
 
818
+ **Recommended Settings for X-ray Detection:**
819
+ - **Epochs:** 20-30 for good results
820
+ - **Batch Size:** 2-4 for better convergence
821
+ - **Image Size:** 640 for best quality
822
+ - **Expected time:** ~2-5 minutes for 20 epochs
823
 
824
  โš ๏ธ **Important**: Models are temporary on Spaces! Download your model after training.
825
  """)
826
 
827
  with gr.Row():
828
+ epochs_input = gr.Slider(1, 50, 20, step=1, label="Epochs (20+ recommended)")
829
+ batch_size_input = gr.Slider(2, 16, 4, step=2, label="Batch Size (lower for better results)")
830
+ img_size_input = gr.Slider(320, 640, 640, step=32, label="Image Size (640 recommended)")
831
  device_input = gr.Radio(["Auto", "GPU", "CPU"], value="Auto", label="Device")
832
 
833
  train_btn = gr.Button("Start Training", variant="primary")
 
885
 
886
  refresh_status_btn.click(check_model_status, outputs=model_status)
887
 
888
+ gr.Markdown("""
889
+ ## ๐ŸŽฏ ๋ชจ๋ธ์ด ๊ฐ์ฒด๋ฅผ ๊ฐ์ง€ํ•˜์ง€ ๋ชปํ•˜๋‚˜์š”?
890
+
891
+ **๊ถŒ์žฅ ํ•™์Šต ์„ค์ •:**
892
+ - **Epochs: 30** (์ตœ์†Œ 20 ์ด์ƒ)
893
+ - **Batch Size: 2 ๋˜๋Š” 4**
894
+ - **Image Size: 640**
895
+
896
+ **์ฒดํฌ๋ฆฌ์ŠคํŠธ:**
897
+ 1. โœ… X-ray ์ด๋ฏธ์ง€์ธ๊ฐ€? (์ผ๋ฐ˜ ์‚ฌ์ง„์€ ์ž‘๋™ ์•ˆ ํ•จ)
898
+ 2. โœ… ์ถฉ๋ถ„ํžˆ ํ•™์Šตํ–ˆ๋‚˜? (20+ epochs)
899
+ 3. โœ… Confidence threshold๋ฅผ 0.01๋กœ ๋‚ฎ์ถฐ๋ดค๋‚˜?
900
+ 4. โœ… ๋ชจ๋ธ์ด ์ œ๋Œ€๋กœ ๋กœ๋“œ๋˜์—ˆ๋‚˜? (์ƒํƒœ ํ™•์ธ)
901
+
902
+ **์„ฑ๊ณต์ ์ธ ํ•™์Šต ํ›„ ์˜ˆ์ƒ ๊ฒฐ๊ณผ:**
903
+ - Firearm (์ด๊ธฐ๋ฅ˜) ๊ฐ์ง€
904
+ - Knife (์นผ) ๊ฐ์ง€
905
+ - Pliers (ํŽœ์น˜) ๊ฐ์ง€
906
+ - Scissors (๊ฐ€์œ„) ๊ฐ์ง€
907
+ - Wrench (๋ Œ์น˜) ๊ฐ์ง€
908
+ """)
909
+
910
  gr.Markdown("### Single Image Inference")
911
  gr.Markdown("Upload an X-ray baggage image to detect prohibited items.")
912
 
913
  with gr.Row():
914
  with gr.Column():
915
  input_image = gr.Image(type="pil", label="Upload X-ray Image")
916
+ conf_threshold = gr.Slider(0.01, 0.9, 0.25, step=0.01, label="Confidence Threshold (๋‚ฎ์„์ˆ˜๋ก ๋” ๋งŽ์ด ๊ฐ์ง€)")
917
+
918
+ # Debug options
919
+ with gr.Row():
920
+ inference_btn = gr.Button("Run Detection", variant="primary")
921
+ test_btn = gr.Button("Test with 0.01 threshold", variant="secondary", scale=0)
922
 
923
  # Add example images if dataset is available
924
+ example_images = []
925
+ if dataset_path and os.path.exists(f"{dataset_path}/test/images"):
926
+ test_images = glob(f"{dataset_path}/test/images/*")[:5]
927
+ example_images.extend(test_images)
 
 
928
 
 
929
  if example_images:
930
  gr.Examples(
931
+ examples=[[img] for img in example_images],
932
  inputs=input_image,
933
+ label="Example X-ray Images (Click to load)"
934
  )
 
 
935
 
936
  with gr.Column():
937
  output_image = gr.Image(type="pil", label="Detection Result")
938
+ detection_info = gr.Textbox(label="Detection Info", lines=8)
939
 
940
  inference_btn.click(run_inference,
941
  inputs=[input_image, conf_threshold],
942
  outputs=[output_image, detection_info])
943
 
944
+ # Test with very low threshold
945
+ test_btn.click(
946
+ lambda img: run_inference(img, 0.01),
947
+ inputs=[input_image],
948
+ outputs=[output_image, detection_info]
949
+ )
950
+
951
+ # Auto-refresh model status after inference
952
+ inference_btn.click(check_model_status, outputs=model_status)
953
+
954
  gr.Markdown("### Batch Inference")
955
  gr.Markdown("Run detection on multiple images from the test dataset.")
956
 
 
970
  gr.Markdown("---")
971
  gr.Markdown("""
972
  <div style='text-align: center; font-size: 14px; color: #666;'>
973
+ ๐Ÿ’ก <b>Quick Start:</b> Download Dataset โ†’ Train Model (20+ epochs) โ†’ Run Inference<br>
974
+ ๐Ÿ” <b>No detections?</b> Try lowering threshold to 0.01 or train for more epochs<br>
975
  ๐Ÿš€ Built with Gradio, YOLOv8, and โค๏ธ for X-ray security
976
  </div>
977
  """)