Update model_loaders.py
Browse files- model_loaders.py +42 -15
model_loaders.py
CHANGED
|
@@ -64,37 +64,58 @@ def load_sam2_predictor():
|
|
| 64 |
Returns None if loading fails.
|
| 65 |
"""
|
| 66 |
try:
|
| 67 |
-
|
| 68 |
from sam2.build_sam import build_sam2
|
| 69 |
from sam2.sam2_image_predictor import SAM2ImagePredictor
|
| 70 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 71 |
# Try local checkpoints first
|
| 72 |
checkpoint_path = "/home/user/app/checkpoints/sam2.1_hiera_large.pt"
|
| 73 |
model_cfg = "/home/user/app/configs/sam2.1/sam2.1_hiera_l.yaml"
|
| 74 |
|
| 75 |
if not os.path.exists(checkpoint_path) or not os.path.exists(model_cfg):
|
| 76 |
-
|
| 77 |
-
predictor = SAM2ImagePredictor.from_pretrained(
|
|
|
|
|
|
|
|
|
|
| 78 |
else:
|
| 79 |
# Check available GPU memory
|
| 80 |
memory_info = get_memory_usage()
|
| 81 |
gpu_free = memory_info.get('gpu_free', 0)
|
| 82 |
|
| 83 |
-
if gpu_free < 4.0:
|
| 84 |
-
|
| 85 |
try:
|
| 86 |
-
predictor = SAM2ImagePredictor.from_pretrained(
|
|
|
|
|
|
|
|
|
|
| 87 |
except Exception:
|
| 88 |
-
predictor = SAM2ImagePredictor.from_pretrained(
|
|
|
|
|
|
|
|
|
|
| 89 |
else:
|
| 90 |
# Use local large model
|
| 91 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 92 |
|
| 93 |
-
|
| 94 |
return predictor
|
| 95 |
|
| 96 |
except Exception as e:
|
| 97 |
-
|
|
|
|
|
|
|
| 98 |
return None
|
| 99 |
|
| 100 |
# Alias for new app.py
|
|
@@ -113,16 +134,22 @@ def load_matanyone_processor():
|
|
| 113 |
Returns None if loading fails.
|
| 114 |
"""
|
| 115 |
try:
|
| 116 |
-
|
| 117 |
from matanyone import InferenceCore
|
| 118 |
|
| 119 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 120 |
|
| 121 |
-
|
| 122 |
return processor
|
| 123 |
|
| 124 |
except Exception as e:
|
| 125 |
-
|
|
|
|
|
|
|
| 126 |
return None
|
| 127 |
|
| 128 |
# Alias for new app.py
|
|
@@ -142,7 +169,7 @@ def load_matanyone():
|
|
| 142 |
enable_segmentation=True,
|
| 143 |
min_detection_confidence=0.5
|
| 144 |
)
|
| 145 |
-
|
| 146 |
|
| 147 |
# ============================================================================
|
| 148 |
# Model Health Check
|
|
|
|
| 64 |
Returns None if loading fails.
|
| 65 |
"""
|
| 66 |
try:
|
| 67 |
+
print("Loading SAM2 image predictor...", flush=True)
|
| 68 |
from sam2.build_sam import build_sam2
|
| 69 |
from sam2.sam2_image_predictor import SAM2ImagePredictor
|
| 70 |
|
| 71 |
+
# Determine device
|
| 72 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 73 |
+
print(f"Using device: {device}", flush=True)
|
| 74 |
+
|
| 75 |
# Try local checkpoints first
|
| 76 |
checkpoint_path = "/home/user/app/checkpoints/sam2.1_hiera_large.pt"
|
| 77 |
model_cfg = "/home/user/app/configs/sam2.1/sam2.1_hiera_l.yaml"
|
| 78 |
|
| 79 |
if not os.path.exists(checkpoint_path) or not os.path.exists(model_cfg):
|
| 80 |
+
print("Local checkpoints not found, using Hugging Face...", flush=True)
|
| 81 |
+
predictor = SAM2ImagePredictor.from_pretrained(
|
| 82 |
+
"facebook/sam2-hiera-large",
|
| 83 |
+
device=device
|
| 84 |
+
)
|
| 85 |
else:
|
| 86 |
# Check available GPU memory
|
| 87 |
memory_info = get_memory_usage()
|
| 88 |
gpu_free = memory_info.get('gpu_free', 0)
|
| 89 |
|
| 90 |
+
if device == "cuda" and gpu_free < 4.0:
|
| 91 |
+
print(f"Limited GPU memory ({gpu_free:.1f}GB), using smaller SAM2 model...", flush=True)
|
| 92 |
try:
|
| 93 |
+
predictor = SAM2ImagePredictor.from_pretrained(
|
| 94 |
+
"facebook/sam2-hiera-tiny",
|
| 95 |
+
device=device
|
| 96 |
+
)
|
| 97 |
except Exception:
|
| 98 |
+
predictor = SAM2ImagePredictor.from_pretrained(
|
| 99 |
+
"facebook/sam2-hiera-small",
|
| 100 |
+
device=device
|
| 101 |
+
)
|
| 102 |
else:
|
| 103 |
# Use local large model
|
| 104 |
+
sam2_model = build_sam2(model_cfg, checkpoint_path, device=device)
|
| 105 |
+
predictor = SAM2ImagePredictor(sam2_model)
|
| 106 |
+
|
| 107 |
+
# Verify model is on correct device
|
| 108 |
+
if hasattr(predictor, 'model'):
|
| 109 |
+
predictor.model.to(device)
|
| 110 |
+
print(f"Model moved to {device}", flush=True)
|
| 111 |
|
| 112 |
+
print("✅ SAM2 image predictor loaded successfully!", flush=True)
|
| 113 |
return predictor
|
| 114 |
|
| 115 |
except Exception as e:
|
| 116 |
+
print(f"Failed to load SAM2 predictor: {e}", flush=True)
|
| 117 |
+
import traceback
|
| 118 |
+
traceback.print_exc()
|
| 119 |
return None
|
| 120 |
|
| 121 |
# Alias for new app.py
|
|
|
|
| 134 |
Returns None if loading fails.
|
| 135 |
"""
|
| 136 |
try:
|
| 137 |
+
print("Loading MatAnyone processor...", flush=True)
|
| 138 |
from matanyone import InferenceCore
|
| 139 |
|
| 140 |
+
# Determine device
|
| 141 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 142 |
+
print(f"MatAnyone using device: {device}", flush=True)
|
| 143 |
+
|
| 144 |
+
processor = InferenceCore("PeiqingYang/MatAnyone", device=device)
|
| 145 |
|
| 146 |
+
print("✅ MatAnyone processor loaded successfully!", flush=True)
|
| 147 |
return processor
|
| 148 |
|
| 149 |
except Exception as e:
|
| 150 |
+
print(f"Failed to load MatAnyone: {e}", flush=True)
|
| 151 |
+
import traceback
|
| 152 |
+
traceback.print_exc()
|
| 153 |
return None
|
| 154 |
|
| 155 |
# Alias for new app.py
|
|
|
|
| 169 |
enable_segmentation=True,
|
| 170 |
min_detection_confidence=0.5
|
| 171 |
)
|
| 172 |
+
print("✅ MediaPipe Pose initialized", flush=True)
|
| 173 |
|
| 174 |
# ============================================================================
|
| 175 |
# Model Health Check
|