openfree commited on
Commit
c2e0b70
·
verified ·
1 Parent(s): c3c2c75

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +21 -6
app.py CHANGED
@@ -1,4 +1,7 @@
1
  import os
 
 
 
2
  import cv2
3
  import yaml
4
  import torch
@@ -253,7 +256,7 @@ def train_model(epochs, batch_size, img_size, device_selection):
253
  project_dir = "./xray_detection"
254
  os.makedirs(project_dir, exist_ok=True)
255
 
256
- # Train model
257
  results = model.train(
258
  data=f"{dataset_path}/data.yaml",
259
  epochs=epochs,
@@ -265,7 +268,12 @@ def train_model(epochs, batch_size, img_size, device_selection):
265
  exist_ok=True,
266
  verbose=True,
267
  patience=5, # Reduce patience for faster training on Spaces
268
- save_period=5 # Save checkpoints every 5 epochs
 
 
 
 
 
269
  )
270
 
271
  # Collect training result plots
@@ -310,8 +318,8 @@ def run_inference(input_image, conf_threshold):
310
  temp_path = "temp_inference.jpg"
311
  input_image.save(temp_path)
312
 
313
- # Run inference
314
- results = model(temp_path, conf=conf_threshold, verbose=False)
315
 
316
  # Draw results
317
  annotated_image = results[0].plot()
@@ -450,11 +458,18 @@ with gr.Blocks(title="X-ray Baggage Anomaly Detection", theme=gr.themes.Soft())
450
 
451
  with gr.Tab("🚀 Training"):
452
  gr.Markdown("### Train YOLOv11 Model")
453
- gr.Markdown("**Note:** Training will automatically use GPU if available. This may take several minutes.")
 
 
 
 
 
 
 
454
 
455
  with gr.Row():
456
  epochs_input = gr.Slider(1, 50, 10, step=1, label="Epochs")
457
- batch_size_input = gr.Slider(8, 64, 16, step=8, label="Batch Size")
458
  img_size_input = gr.Slider(320, 640, 480, step=32, label="Image Size")
459
  device_input = gr.Radio(["Auto", "GPU", "CPU"], value="Auto", label="Device")
460
 
 
1
  import os
2
+ # Set environment variables for Spaces compatibility
3
+ os.environ['OMP_NUM_THREADS'] = '1'
4
+ os.environ['MKL_NUM_THREADS'] = '1'
5
  import cv2
6
  import yaml
7
  import torch
 
256
  project_dir = "./xray_detection"
257
  os.makedirs(project_dir, exist_ok=True)
258
 
259
+ # Train model with workers=0 to avoid multiprocessing issues on Spaces
260
  results = model.train(
261
  data=f"{dataset_path}/data.yaml",
262
  epochs=epochs,
 
268
  exist_ok=True,
269
  verbose=True,
270
  patience=5, # Reduce patience for faster training on Spaces
271
+ save_period=5, # Save checkpoints every 5 epochs
272
+ workers=0, # Important: Set to 0 to avoid multiprocessing issues
273
+ single_cls=False,
274
+ rect=False,
275
+ cache=False, # Disable caching to avoid memory issues
276
+ amp=True # Use automatic mixed precision for faster training
277
  )
278
 
279
  # Collect training result plots
 
318
  temp_path = "temp_inference.jpg"
319
  input_image.save(temp_path)
320
 
321
+ # Run inference with workers=0
322
+ results = model(temp_path, conf=conf_threshold, verbose=False, device=0 if torch.cuda.is_available() else 'cpu')
323
 
324
  # Draw results
325
  annotated_image = results[0].plot()
 
458
 
459
  with gr.Tab("🚀 Training"):
460
  gr.Markdown("### Train YOLOv11 Model")
461
+ gr.Markdown("""
462
+ **Note:** Training will automatically use GPU if available. This may take several minutes.
463
+
464
+ **Tips for Hugging Face Spaces:**
465
+ - Use smaller batch sizes (4-8) to avoid GPU memory issues
466
+ - Start with fewer epochs (5-10) for testing
467
+ - Image size 480 provides good balance between quality and speed
468
+ """)
469
 
470
  with gr.Row():
471
  epochs_input = gr.Slider(1, 50, 10, step=1, label="Epochs")
472
+ batch_size_input = gr.Slider(4, 32, 8, step=4, label="Batch Size (lower for limited GPU)")
473
  img_size_input = gr.Slider(320, 640, 480, step=32, label="Image Size")
474
  device_input = gr.Radio(["Auto", "GPU", "CPU"], value="Auto", label="Device")
475