File size: 12,515 Bytes
ac3d151
 
d909e1e
0fb1268
d909e1e
01d284f
ac3d151
 
 
0fb1268
 
ac3d151
 
0fb1268
 
ac3d151
01d284f
0fb1268
ce45f53
0fb1268
 
 
 
ce45f53
0fb1268
01d284f
0fb1268
 
 
 
 
ce45f53
0fb1268
d909e1e
0fb1268
01d284f
0fb1268
 
d909e1e
 
01d284f
d909e1e
 
 
01d284f
0fb1268
 
 
d909e1e
0fb1268
 
d909e1e
0fb1268
01d284f
0fb1268
 
d909e1e
 
0fb1268
01d284f
d909e1e
0fb1268
d909e1e
 
 
 
0fb1268
d909e1e
 
 
 
 
 
 
 
 
 
 
 
 
 
01d284f
 
 
d909e1e
 
 
0fb1268
d909e1e
 
 
 
 
 
 
0fb1268
d909e1e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
01d284f
 
 
ce45f53
0fb1268
 
ac3d151
01d284f
0fb1268
 
 
 
 
 
01d284f
0fb1268
 
01d284f
0fb1268
01d284f
0fb1268
 
01d284f
 
 
0fb1268
01d284f
0fb1268
fb9272e
0fb1268
 
01d284f
0fb1268
01d284f
0fb1268
 
01d284f
0fb1268
 
 
 
 
 
01d284f
 
0fb1268
01d284f
0fb1268
fb9272e
d909e1e
 
01d284f
d909e1e
01d284f
d909e1e
 
 
01d284f
d909e1e
 
 
 
 
 
 
01d284f
 
d909e1e
01d284f
d909e1e
 
 
 
 
 
01d284f
 
d909e1e
01d284f
d909e1e
 
0fb1268
01d284f
0fb1268
01d284f
0fb1268
fb9272e
01d284f
 
 
fb9272e
01d284f
0fb1268
fb9272e
0fb1268
01d284f
fb9272e
0fb1268
 
 
 
 
 
 
 
 
 
 
 
 
 
 
01d284f
 
fb9272e
01d284f
0fb1268
 
01d284f
0fb1268
 
 
 
 
 
 
01d284f
0fb1268
 
01d284f
0fb1268
 
 
 
 
 
 
 
 
 
 
 
 
01d284f
 
0fb1268
 
 
01d284f
0fb1268
 
 
 
 
 
01d284f
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
#!/usr/bin/env python3
"""
SAM2 Loader with Hugging Face Hub integration
Provides SAM2Predictor class with memory management and optimization features
Updated to use Hugging Face Hub models instead of direct downloads
(Enhanced logging and exception safety)
"""

import os
import gc
import torch
import logging
import numpy as np
from pathlib import Path
from typing import Optional, Any, Dict, List, Tuple

logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

class SAM2Predictor:
    """
    T4-optimized SAM2 video predictor wrapper with memory management
    """
    
    def __init__(self, device: torch.device, model_size: str = "small"):
        logger.info(f"[SAM2Predictor.__init__] device={device}, model_size={model_size}")  # [LOG+SAFETY PATCH]
        self.device = device
        self.model_size = model_size
        self.predictor = None
        self.model = None
        self._load_predictor()
        
    def _load_predictor(self):
        """Load SAM2 predictor with Hugging Face Hub integration"""
        try:
            logger.info("[SAM2Predictor._load_predictor] Loading SAM2 predictor...")  # [LOG+SAFETY PATCH]
            from sam2.build_sam import build_sam2_video_predictor
            
            checkpoint_path = self._get_hf_checkpoint()
            if not checkpoint_path:
                logger.error(f"Failed to get SAM2 {self.model_size} checkpoint from HF Hub")  # [LOG+SAFETY PATCH]
                raise RuntimeError(f"Failed to get SAM2 {self.model_size} checkpoint from HF Hub")
            
            model_cfg = self._get_model_config()
            logger.info(f"[SAM2Predictor._load_predictor] Using model_cfg: {model_cfg}")  # [LOG+SAFETY PATCH]
            
            self.predictor = build_sam2_video_predictor(model_cfg, checkpoint_path, device=self.device)
            self._optimize_for_t4()
            logger.info(f"SAM2 {self.model_size} predictor loaded successfully from HF Hub")
        except ImportError as e:
            logger.error(f"SAM2 import failed: {e}")
            raise RuntimeError("SAM2 not available - check sam2 installation")
        except Exception as e:
            logger.error(f"SAM2 loading failed: {e}", exc_info=True)
            raise
    
    def _get_hf_checkpoint(self) -> Optional[str]:
        """Download checkpoint from Hugging Face Hub"""
        try:
            logger.info(f"[SAM2Predictor._get_hf_checkpoint] Downloading checkpoint...")  # [LOG+SAFETY PATCH]
            from huggingface_hub import hf_hub_download
            
            repo_mapping = {
                "small": "facebook/sam2-hiera-small",
                "base": "facebook/sam2-hiera-base-plus", 
                "large": "facebook/sam2-hiera-large"
            }
            filename_mapping = {
                "small": "sam2_hiera_small.pt",
                "base": "sam2_hiera_base_plus.pt",
                "large": "sam2_hiera_large.pt"
            }
            if self.model_size not in repo_mapping:
                logger.error(f"Unknown model size: {self.model_size}")
                return None
            repo_id = repo_mapping[self.model_size]
            filename = filename_mapping[self.model_size]
            logger.info(f"Downloading SAM2 {self.model_size} from HF Hub: {repo_id}")
            checkpoint_path = hf_hub_download(
                repo_id=repo_id,
                filename=filename,
                cache_dir=None,
                force_download=False,
                token=None
            )
            logger.info(f"SAM2 checkpoint downloaded to: {checkpoint_path}")
            return checkpoint_path
        except Exception as e:
            logger.error(f"HF Hub download failed: {e}")
            return self._fallback_local_checkpoint()
    
    def _fallback_local_checkpoint(self) -> Optional[str]:
        """Fallback to local checkpoint files"""
        try:
            checkpoint_path = f"./checkpoints/sam2_hiera_{self.model_size}.pt"
            if Path(checkpoint_path).exists():
                logger.info(f"Using local checkpoint: {checkpoint_path}")
                return checkpoint_path
            else:
                logger.error(f"Local checkpoint not found: {checkpoint_path}")
                return None
        except Exception as e:
            logger.error(f"Local checkpoint fallback failed: {e}")
            return None
    
    def _get_model_config(self) -> str:
        """Get the appropriate model config file"""
        config_mapping = {
            "small": "sam2_hiera_s.yaml",
            "base": "sam2_hiera_b+.yaml", 
            "large": "sam2_hiera_l.yaml"
        }
        cfg = config_mapping.get(self.model_size, "sam2_hiera_s.yaml")
        logger.info(f"[SAM2Predictor._get_model_config] Returning config: {cfg}")  # [LOG+SAFETY PATCH]
        return cfg
    
    def _optimize_for_t4(self):
        """Apply T4-specific optimizations"""
        try:
            logger.info("[SAM2Predictor._optimize_for_t4] Optimizing for T4...")  # [LOG+SAFETY PATCH]
            if hasattr(self.predictor, "model") and self.predictor.model is not None:
                self.model = self.predictor.model
                self.model = self.model.half().to(self.device)
                self.model = self.model.to(memory_format=torch.channels_last)
                logger.info("SAM2: fp16 + channels_last applied for T4 optimization")
        except Exception as e:
            logger.warning(f"SAM2 T4 optimization warning: {e}", exc_info=True)
    
    def init_state(self, video_path: str):
        logger.info(f"[SAM2Predictor.init_state] Initializing video state for: {video_path}")  # [LOG+SAFETY PATCH]
        if self.predictor is None:
            logger.error("Predictor not loaded in init_state")
            raise RuntimeError("Predictor not loaded")
        try:
            state = self.predictor.init_state(video_path=video_path)
            logger.info("[SAM2Predictor.init_state] Video state initialized OK")
            return state
        except Exception as e:
            logger.error(f"Failed to initialize video state: {e}", exc_info=True)
            raise
    
    def add_new_points(self, inference_state, frame_idx: int, obj_id: int, 
                      points: np.ndarray, labels: np.ndarray):
        logger.info(f"[SAM2Predictor.add_new_points] Adding points for frame {frame_idx}, obj {obj_id}")  # [LOG+SAFETY PATCH]
        if self.predictor is None:
            logger.error("Predictor not loaded in add_new_points")
            raise RuntimeError("Predictor not loaded")
        try:
            out = self.predictor.add_new_points(
                inference_state=inference_state,
                frame_idx=frame_idx,
                obj_id=obj_id,
                points=points,
                labels=labels
            )
            logger.info(f"[SAM2Predictor.add_new_points] Points added OK")
            return out
        except Exception as e:
            logger.error(f"Failed to add new points: {e}", exc_info=True)
            raise
    
    def add_new_points_or_box(self, inference_state, frame_idx: int, obj_id: int, 
                             points: np.ndarray, labels: np.ndarray, clear_old_points: bool = True):
        logger.info(f"[SAM2Predictor.add_new_points_or_box] Adding points/box for frame {frame_idx}, obj {obj_id}")  # [LOG+SAFETY PATCH]
        if self.predictor is None:
            logger.error("Predictor not loaded in add_new_points_or_box")
            raise RuntimeError("Predictor not loaded")
        try:
            if hasattr(self.predictor, 'add_new_points_or_box'):
                out = self.predictor.add_new_points_or_box(
                    inference_state=inference_state,
                    frame_idx=frame_idx,
                    obj_id=obj_id,
                    points=points,
                    labels=labels,
                    clear_old_points=clear_old_points
                )
                logger.info(f"[SAM2Predictor.add_new_points_or_box] Used new API, points/box added OK")
                return out
            else:
                out = self.predictor.add_new_points(
                    inference_state=inference_state,
                    frame_idx=frame_idx,
                    obj_id=obj_id,
                    points=points,
                    labels=labels
                )
                logger.info(f"[SAM2Predictor.add_new_points_or_box] Used fallback, points added OK")
                return out
        except Exception as e:
            logger.error(f"Failed to add new points or box: {e}", exc_info=True)
            raise
    
    def propagate_in_video(self, inference_state, scale: float = 1.0, **kwargs):
        logger.info(f"[SAM2Predictor.propagate_in_video] Propagating in video...")  # [LOG+SAFETY PATCH]
        if self.predictor is None:
            logger.error("Predictor not loaded in propagate_in_video")
            raise RuntimeError("Predictor not loaded")
        try:
            out = self.predictor.propagate_in_video(inference_state, **kwargs)
            logger.info(f"[SAM2Predictor.propagate_in_video] Propagation OK")
            return out
        except Exception as e:
            logger.error(f"Failed to propagate in video: {e}", exc_info=True)
            raise
    
    def prune_state(self, inference_state, keep: int):
        logger.info(f"[SAM2Predictor.prune_state] Pruning state to keep {keep} frames...")  # [LOG+SAFETY PATCH]
        try:
            if hasattr(inference_state, 'cached_features'):
                cached_keys = list(inference_state.cached_features.keys())
                if len(cached_keys) > keep:
                    keys_to_remove = cached_keys[:-keep]
                    for key in keys_to_remove:
                        if key in inference_state.cached_features:
                            del inference_state.cached_features[key]
                    logger.debug(f"Pruned {len(keys_to_remove)} old cached features")
            if hasattr(inference_state, 'point_inputs_per_obj'):
                for obj_id in list(inference_state.point_inputs_per_obj.keys()):
                    obj_inputs = inference_state.point_inputs_per_obj[obj_id]
                    if len(obj_inputs) > keep:
                        recent_keys = sorted(obj_inputs.keys())[-keep:]
                        new_inputs = {k: obj_inputs[k] for k in recent_keys}
                        inference_state.point_inputs_per_obj[obj_id] = new_inputs
            if self.device.type == 'cuda':
                torch.cuda.empty_cache()
        except Exception as e:
            logger.debug(f"State pruning warning: {e}", exc_info=True)
    
    def clear_memory(self):
        logger.info("[SAM2Predictor.clear_memory] Clearing GPU memory")  # [LOG+SAFETY PATCH]
        try:
            if self.device.type == 'cuda':
                torch.cuda.empty_cache()
                torch.cuda.synchronize()
                torch.cuda.ipc_collect()
            gc.collect()
        except Exception as e:
            logger.warning(f"Memory clearing warning: {e}", exc_info=True)
    
    def get_memory_usage(self) -> Dict[str, float]:
        logger.info("[SAM2Predictor.get_memory_usage] Checking memory usage")  # [LOG+SAFETY PATCH]
        if self.device.type != 'cuda':
            return {"allocated_gb": 0.0, "reserved_gb": 0.0, "free_gb": 0.0}
        try:
            allocated = torch.cuda.memory_allocated(self.device) / (1024**3)
            reserved = torch.cuda.memory_reserved(self.device) / (1024**3) 
            free, total = torch.cuda.mem_get_info(self.device)
            free_gb = free / (1024**3)
            return {
                "allocated_gb": allocated,
                "reserved_gb": reserved, 
                "free_gb": free_gb,
                "total_gb": total / (1024**3)
            }
        except Exception as e:
            logger.warning(f"Error checking memory usage: {e}", exc_info=True)
            return {"allocated_gb": 0.0, "reserved_gb": 0.0, "free_gb": 0.0}
    
    def __del__(self):
        logger.info("[SAM2Predictor.__del__] Cleaning up...")  # [LOG+SAFETY PATCH]
        try:
            if hasattr(self, 'predictor') and self.predictor is not None:
                del self.predictor
            if hasattr(self, 'model') and self.model is not None:
                del self.model
            self.clear_memory()
        except Exception as e:
            logger.warning(f"Error in __del__: {e}", exc_info=True)