doniramdani820 commited on
Commit
660bc39
·
verified ·
1 Parent(s): 2899e6b

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +53 -7
app.py CHANGED
@@ -17,7 +17,7 @@ import base64
17
  import hashlib
18
  import asyncio
19
  from datetime import datetime
20
- from typing import Optional, Dict, Any, List
21
  import logging
22
 
23
  import cv2
@@ -39,6 +39,7 @@ try:
39
  print("✅ ONNX Runtime imported successfully")
40
  except ImportError as e:
41
  print(f"❌ ONNX Runtime import failed: {e}")
 
42
 
43
  # Try PyTorch as fallback
44
  try:
@@ -179,7 +180,7 @@ class ModelManager:
179
  # Load model dengan available backend
180
  session = None
181
 
182
- if ONNX_AVAILABLE:
183
  # Load ONNX session dengan CPU optimization
184
  providers = ['CPUExecutionProvider']
185
  session_options = ort.SessionOptions()
@@ -435,8 +436,8 @@ async def handle_pick_the_challenge(data: dict) -> dict:
435
 
436
  # Non-Maximum Suppression
437
  indices = cv2.dnn.NMSBoxes(
438
- np.array(boxes),
439
- np.array(confidences),
440
  model_data['confidence'],
441
  model_data['nms']
442
  )
@@ -452,8 +453,53 @@ async def handle_pick_the_challenge(data: dict) -> dict:
452
  best_match_box = None
453
  highest_score = 0
454
 
455
- for i in indices.flatten():
456
- if class_ids[i] == target_class_id:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
457
  current_score = confidences[i]
458
  if current_score > highest_score:
459
  highest_score = current_score
@@ -522,7 +568,7 @@ async def handle_upright_challenge(data: dict) -> dict:
522
 
523
  # Determine input size for this inference (prefer model config, default to 300 for upright 3x2 grid)
524
  input_size = model_data.get('input_size')
525
- if not isinstance(input_size) or not isinstance(input_size, int) or input_size <= 0:
526
  input_size = 300
527
 
528
  input_tensor = preprocess_image(image_bytes, input_size)
 
17
  import hashlib
18
  import asyncio
19
  from datetime import datetime
20
+ from typing import Optional, Dict, Any, List, Union
21
  import logging
22
 
23
  import cv2
 
39
  print("✅ ONNX Runtime imported successfully")
40
  except ImportError as e:
41
  print(f"❌ ONNX Runtime import failed: {e}")
42
+ ort = None # Set to None when import fails
43
 
44
  # Try PyTorch as fallback
45
  try:
 
180
  # Load model dengan available backend
181
  session = None
182
 
183
+ if ONNX_AVAILABLE and ort is not None:
184
  # Load ONNX session dengan CPU optimization
185
  providers = ['CPUExecutionProvider']
186
  session_options = ort.SessionOptions()
 
436
 
437
  # Non-Maximum Suppression
438
  indices = cv2.dnn.NMSBoxes(
439
+ boxes, # Use original list instead of numpy array
440
+ confidences, # Use original list instead of numpy array
441
  model_data['confidence'],
442
  model_data['nms']
443
  )
 
453
  best_match_box = None
454
  highest_score = 0
455
 
456
+ # Handle indices properly - cv2.dnn.NMSBoxes can return different types
457
+ indices_flat: List[int] = []
458
+ if indices is not None and len(indices) > 0:
459
+ # Convert to list of integers with proper type handling
460
+ try:
461
+ # Check if it's a numpy array
462
+ if isinstance(indices, np.ndarray):
463
+ indices_flat = indices.flatten().tolist()
464
+ elif hasattr(indices, '__iter__') and not isinstance(indices, (str, bytes)):
465
+ # Handle iterable (list, tuple, etc.)
466
+ temp_list = []
467
+ for idx in indices:
468
+ if isinstance(idx, (list, tuple, np.ndarray)):
469
+ # Nested iterable - flatten it
470
+ try:
471
+ if isinstance(idx, np.ndarray):
472
+ temp_list.extend(idx.flatten().tolist())
473
+ else:
474
+ temp_list.extend([int(x) for x in idx])
475
+ except (TypeError, ValueError):
476
+ # Skip invalid nested items
477
+ continue
478
+ else:
479
+ # Single value
480
+ try:
481
+ temp_list.append(int(idx))
482
+ except (TypeError, ValueError):
483
+ # Skip invalid items
484
+ continue
485
+ indices_flat = temp_list
486
+ else:
487
+ # Handle single numeric value
488
+ try:
489
+ # Check if it's numeric
490
+ if isinstance(indices, (int, float)):
491
+ indices_flat = [int(indices)]
492
+ else:
493
+ indices_flat = []
494
+ except (TypeError, ValueError):
495
+ indices_flat = []
496
+ except Exception as e:
497
+ # fallback to empty list if conversion fails
498
+ logger.warning(f"Failed to process NMS indices: {e}")
499
+ indices_flat = []
500
+
501
+ for i in indices_flat:
502
+ if 0 <= i < len(class_ids) and class_ids[i] == target_class_id:
503
  current_score = confidences[i]
504
  if current_score > highest_score:
505
  highest_score = current_score
 
568
 
569
  # Determine input size for this inference (prefer model config, default to 300 for upright 3x2 grid)
570
  input_size = model_data.get('input_size')
571
+ if not isinstance(input_size, int) or input_size <= 0:
572
  input_size = 300
573
 
574
  input_tensor = preprocess_image(image_bytes, input_size)