Update models/model_loaders.py
Browse files- models/model_loaders.py +1 -23
models/model_loaders.py
CHANGED
|
@@ -4,14 +4,12 @@
|
|
| 4 |
Handles lazy loading of SAM2 and MatAnyone models with caching
|
| 5 |
(Enhanced logging, error handling, and memory safety)
|
| 6 |
"""
|
| 7 |
-
|
| 8 |
import os
|
| 9 |
import gc
|
| 10 |
import logging
|
| 11 |
import streamlit as st
|
| 12 |
import torch
|
| 13 |
import psutil
|
| 14 |
-
import mediapipe as mp
|
| 15 |
from contextlib import contextmanager
|
| 16 |
|
| 17 |
logging.basicConfig(level=logging.INFO)
|
|
@@ -33,7 +31,7 @@ def get_memory_usage():
|
|
| 33 |
if torch.cuda.is_available():
|
| 34 |
memory_info['gpu_allocated'] = torch.cuda.memory_allocated() / 1e9
|
| 35 |
memory_info['gpu_reserved'] = torch.cuda.memory_reserved() / 1e9
|
| 36 |
-
memory_info['gpu_free'] = (torch.cuda.get_device_properties(0).total_memory -
|
| 37 |
torch.cuda.memory_allocated()) / 1e9
|
| 38 |
memory_info['ram_used'] = psutil.virtual_memory().used / 1e9
|
| 39 |
memory_info['ram_available'] = psutil.virtual_memory().available / 1e9
|
|
@@ -55,13 +53,10 @@ def load_sam2_predictor():
|
|
| 55 |
logger.info("[load_sam2_predictor] Loading SAM2 image predictor...") # [LOG+SAFETY PATCH]
|
| 56 |
from sam2.build_sam import build_sam2
|
| 57 |
from sam2.sam2_image_predictor import SAM2ImagePredictor
|
| 58 |
-
|
| 59 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 60 |
logger.info(f"[load_sam2_predictor] Using device: {device}")
|
| 61 |
-
|
| 62 |
checkpoint_path = "/home/user/app/checkpoints/sam2.1_hiera_large.pt"
|
| 63 |
model_cfg = "/home/user/app/configs/sam2.1/sam2.1_hiera_l.yaml"
|
| 64 |
-
|
| 65 |
if not os.path.exists(checkpoint_path) or not os.path.exists(model_cfg):
|
| 66 |
logger.warning("[load_sam2_predictor] Local checkpoints not found, using Hugging Face.")
|
| 67 |
predictor = SAM2ImagePredictor.from_pretrained(
|
|
@@ -88,15 +83,12 @@ def load_sam2_predictor():
|
|
| 88 |
logger.info("[load_sam2_predictor] Using local large model")
|
| 89 |
sam2_model = build_sam2(model_cfg, checkpoint_path, device=device)
|
| 90 |
predictor = SAM2ImagePredictor(sam2_model)
|
| 91 |
-
|
| 92 |
if hasattr(predictor, 'model'):
|
| 93 |
predictor.model.to(device)
|
| 94 |
predictor.model.eval()
|
| 95 |
logger.info(f"[load_sam2_predictor] SAM2 model moved to {device} and set to eval mode")
|
| 96 |
-
|
| 97 |
logger.info(f"β
SAM2 loaded successfully on {device}!")
|
| 98 |
return predictor, device
|
| 99 |
-
|
| 100 |
except Exception as e:
|
| 101 |
logger.error(f"β Failed to load SAM2 predictor: {e}", exc_info=True)
|
| 102 |
import traceback
|
|
@@ -112,23 +104,18 @@ def load_matanyone_processor():
|
|
| 112 |
try:
|
| 113 |
logger.info("[load_matanyone_processor] Loading MatAnyone processor...") # [LOG+SAFETY PATCH]
|
| 114 |
from matanyone import InferenceCore
|
| 115 |
-
|
| 116 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 117 |
logger.info(f"[load_matanyone_processor] MatAnyone using device: {device}")
|
| 118 |
-
|
| 119 |
processor = InferenceCore("PeiqingYang/MatAnyone", device=device)
|
| 120 |
if hasattr(processor, 'model'):
|
| 121 |
processor.model.to(device)
|
| 122 |
processor.model.eval()
|
| 123 |
logger.info(f"[load_matanyone_processor] MatAnyone model explicitly moved to {device}")
|
| 124 |
-
|
| 125 |
if not hasattr(processor, 'device'):
|
| 126 |
processor.device = device
|
| 127 |
logger.info(f"[load_matanyone_processor] Set processor.device to {device}")
|
| 128 |
-
|
| 129 |
logger.info(f"β
MatAnyone loaded successfully on {device}!")
|
| 130 |
return processor, device
|
| 131 |
-
|
| 132 |
except Exception as e:
|
| 133 |
logger.error(f"β Failed to load MatAnyone: {e}", exc_info=True)
|
| 134 |
import traceback
|
|
@@ -139,15 +126,6 @@ def load_matanyone():
|
|
| 139 |
processor, device = load_matanyone_processor()
|
| 140 |
return processor
|
| 141 |
|
| 142 |
-
mp_pose = mp.solutions.pose
|
| 143 |
-
pose = mp_pose.Pose(
|
| 144 |
-
static_image_mode=False,
|
| 145 |
-
model_complexity=1,
|
| 146 |
-
enable_segmentation=True,
|
| 147 |
-
min_detection_confidence=0.5
|
| 148 |
-
)
|
| 149 |
-
logger.info("β
MediaPipe Pose initialized",) # [LOG+SAFETY PATCH]
|
| 150 |
-
|
| 151 |
def test_models():
|
| 152 |
results = {
|
| 153 |
'sam2': {'loaded': False, 'error': None, 'device': None},
|
|
|
|
| 4 |
Handles lazy loading of SAM2 and MatAnyone models with caching
|
| 5 |
(Enhanced logging, error handling, and memory safety)
|
| 6 |
"""
|
|
|
|
| 7 |
import os
|
| 8 |
import gc
|
| 9 |
import logging
|
| 10 |
import streamlit as st
|
| 11 |
import torch
|
| 12 |
import psutil
|
|
|
|
| 13 |
from contextlib import contextmanager
|
| 14 |
|
| 15 |
logging.basicConfig(level=logging.INFO)
|
|
|
|
| 31 |
if torch.cuda.is_available():
|
| 32 |
memory_info['gpu_allocated'] = torch.cuda.memory_allocated() / 1e9
|
| 33 |
memory_info['gpu_reserved'] = torch.cuda.memory_reserved() / 1e9
|
| 34 |
+
memory_info['gpu_free'] = (torch.cuda.get_device_properties(0).total_memory -
|
| 35 |
torch.cuda.memory_allocated()) / 1e9
|
| 36 |
memory_info['ram_used'] = psutil.virtual_memory().used / 1e9
|
| 37 |
memory_info['ram_available'] = psutil.virtual_memory().available / 1e9
|
|
|
|
| 53 |
logger.info("[load_sam2_predictor] Loading SAM2 image predictor...") # [LOG+SAFETY PATCH]
|
| 54 |
from sam2.build_sam import build_sam2
|
| 55 |
from sam2.sam2_image_predictor import SAM2ImagePredictor
|
|
|
|
| 56 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 57 |
logger.info(f"[load_sam2_predictor] Using device: {device}")
|
|
|
|
| 58 |
checkpoint_path = "/home/user/app/checkpoints/sam2.1_hiera_large.pt"
|
| 59 |
model_cfg = "/home/user/app/configs/sam2.1/sam2.1_hiera_l.yaml"
|
|
|
|
| 60 |
if not os.path.exists(checkpoint_path) or not os.path.exists(model_cfg):
|
| 61 |
logger.warning("[load_sam2_predictor] Local checkpoints not found, using Hugging Face.")
|
| 62 |
predictor = SAM2ImagePredictor.from_pretrained(
|
|
|
|
| 83 |
logger.info("[load_sam2_predictor] Using local large model")
|
| 84 |
sam2_model = build_sam2(model_cfg, checkpoint_path, device=device)
|
| 85 |
predictor = SAM2ImagePredictor(sam2_model)
|
|
|
|
| 86 |
if hasattr(predictor, 'model'):
|
| 87 |
predictor.model.to(device)
|
| 88 |
predictor.model.eval()
|
| 89 |
logger.info(f"[load_sam2_predictor] SAM2 model moved to {device} and set to eval mode")
|
|
|
|
| 90 |
logger.info(f"β
SAM2 loaded successfully on {device}!")
|
| 91 |
return predictor, device
|
|
|
|
| 92 |
except Exception as e:
|
| 93 |
logger.error(f"β Failed to load SAM2 predictor: {e}", exc_info=True)
|
| 94 |
import traceback
|
|
|
|
| 104 |
try:
|
| 105 |
logger.info("[load_matanyone_processor] Loading MatAnyone processor...") # [LOG+SAFETY PATCH]
|
| 106 |
from matanyone import InferenceCore
|
|
|
|
| 107 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 108 |
logger.info(f"[load_matanyone_processor] MatAnyone using device: {device}")
|
|
|
|
| 109 |
processor = InferenceCore("PeiqingYang/MatAnyone", device=device)
|
| 110 |
if hasattr(processor, 'model'):
|
| 111 |
processor.model.to(device)
|
| 112 |
processor.model.eval()
|
| 113 |
logger.info(f"[load_matanyone_processor] MatAnyone model explicitly moved to {device}")
|
|
|
|
| 114 |
if not hasattr(processor, 'device'):
|
| 115 |
processor.device = device
|
| 116 |
logger.info(f"[load_matanyone_processor] Set processor.device to {device}")
|
|
|
|
| 117 |
logger.info(f"β
MatAnyone loaded successfully on {device}!")
|
| 118 |
return processor, device
|
|
|
|
| 119 |
except Exception as e:
|
| 120 |
logger.error(f"β Failed to load MatAnyone: {e}", exc_info=True)
|
| 121 |
import traceback
|
|
|
|
| 126 |
processor, device = load_matanyone_processor()
|
| 127 |
return processor
|
| 128 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 129 |
def test_models():
|
| 130 |
results = {
|
| 131 |
'sam2': {'loaded': False, 'error': None, 'device': None},
|