mmrech commited on
Commit
e6d00b4
·
1 Parent(s): 3233b61

Fix: Make app startup more resilient to prevent SSE errors

Browse files

- Don't raise exception if HF_TOKEN is missing (show warning instead)
- Don't raise exception if SAM 3 model fails to load (allow app to start)
- Add graceful import handling for Sam3Processor/Sam3Model
- Add startup verification messages
- This prevents app crashes that cause 'SSE is not enabled' errors

Files changed (1) hide show
  1. app.py +61 -25
app.py CHANGED
@@ -15,7 +15,17 @@ import torch
15
  import pydicom
16
  import numpy as np
17
  from PIL import Image, ImageEnhance, ImageDraw
18
- from transformers import Sam3Processor, Sam3Model
 
 
 
 
 
 
 
 
 
 
19
  import matplotlib.pyplot as plt
20
  from matplotlib.patches import Rectangle
21
  from scipy import ndimage
@@ -31,7 +41,9 @@ except ImportError:
31
  # Hugging Face Token (must be set as HF_TOKEN environment variable in Space settings)
32
  hf_token = os.getenv("HF_TOKEN")
33
  if not hf_token:
34
- raise ValueError("HF_TOKEN environment variable is required. Please set it in Space settings.")
 
 
35
 
36
  # Login to Hugging Face Hub
37
  try:
@@ -46,26 +58,39 @@ device = "cuda" if torch.cuda.is_available() else "cpu"
46
  model = None
47
  processor = None
48
 
49
- # SAM 3 model identifier - matching official implementation
50
- SAM_MODEL_ID = "facebook/sam3"
51
-
52
- try:
53
- # Load model with proper dtype (float16 for GPU, float32 for CPU) - matching official implementation
54
- model = Sam3Model.from_pretrained(
55
- SAM_MODEL_ID,
56
- torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
57
- token=hf_token
58
- ).to(device)
59
- processor = Sam3Processor.from_pretrained(SAM_MODEL_ID, token=hf_token)
60
- model.eval()
61
- print(f"✅ SAM 3 Model Loaded Successfully! ({SAM_MODEL_ID})")
62
- except Exception as e:
63
- print(f"❌ Failed to load SAM 3 model: {e}")
64
- print("Ensure you have:")
65
- print(" 1. transformers>=4.45.0 for SAM 3 support")
66
- print(" 2. Valid Hugging Face token with access to SAM 3")
67
- print(" 3. Sufficient memory for the model")
68
- raise
 
 
 
 
 
 
 
 
 
 
 
 
 
69
 
70
  def run_sam3_inference(pil_image, prompt_text, threshold=0.5, mask_threshold=0.5):
71
  """
@@ -81,8 +106,8 @@ def run_sam3_inference(pil_image, prompt_text, threshold=0.5, mask_threshold=0.5
81
  results dict with 'masks' and 'scores' keys, or None if failed
82
  """
83
  if model is None or processor is None:
84
- print("❌ Model not loaded")
85
- return None
86
 
87
  try:
88
  # Prepare inputs - matching official implementation
@@ -3214,4 +3239,15 @@ with gr.Blocks() as demo:
3214
  )
3215
 
3216
  if __name__ == "__main__":
3217
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
15
  import pydicom
16
  import numpy as np
17
  from PIL import Image, ImageEnhance, ImageDraw
18
+ try:
19
+ from transformers import Sam3Processor, Sam3Model
20
+ SAM3_AVAILABLE = True
21
+ except ImportError:
22
+ print("⚠️ Warning: Sam3Processor/Sam3Model not found in transformers.")
23
+ print("⚠️ Please ensure transformers>=4.45.0 is installed.")
24
+ print("⚠️ You may need: pip install transformers>=4.45.0")
25
+ SAM3_AVAILABLE = False
26
+ # Create dummy classes to prevent import errors
27
+ Sam3Processor = None
28
+ Sam3Model = None
29
  import matplotlib.pyplot as plt
30
  from matplotlib.patches import Rectangle
31
  from scipy import ndimage
 
41
  # Hugging Face Token (must be set as HF_TOKEN environment variable in Space settings)
42
  hf_token = os.getenv("HF_TOKEN")
43
  if not hf_token:
44
+ print("⚠️ WARNING: HF_TOKEN environment variable not set!")
45
+ print("⚠️ Some features may not work. Please set HF_TOKEN in Space settings.")
46
+ hf_token = None # Allow app to start, but model loading will fail gracefully
47
 
48
  # Login to Hugging Face Hub
49
  try:
 
58
  model = None
59
  processor = None
60
 
61
+ if not SAM3_AVAILABLE:
62
+ print("❌ SAM 3 classes not available in transformers library.")
63
+ print("❌ Please install: pip install transformers>=4.45.0")
64
+ print("⚠️ App will start but segmentation features will be disabled.")
65
+ else:
66
+ # SAM 3 model identifier - matching official implementation
67
+ SAM_MODEL_ID = "facebook/sam3"
68
+
69
+ if hf_token is None:
70
+ print("⚠️ Cannot load model: HF_TOKEN not set")
71
+ model = None
72
+ processor = None
73
+ else:
74
+ try:
75
+ # Load model with proper dtype (float16 for GPU, float32 for CPU) - matching official implementation
76
+ model = Sam3Model.from_pretrained(
77
+ SAM_MODEL_ID,
78
+ torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
79
+ token=hf_token
80
+ ).to(device)
81
+ processor = Sam3Processor.from_pretrained(SAM_MODEL_ID, token=hf_token)
82
+ model.eval()
83
+ print(f"✅ SAM 3 Model Loaded Successfully! ({SAM_MODEL_ID})")
84
+ except Exception as e:
85
+ print(f"⚠️ Failed to load SAM 3 model: {e}")
86
+ print("Ensure you have:")
87
+ print(" 1. transformers>=4.45.0 for SAM 3 support")
88
+ print(" 2. Valid Hugging Face token with access to SAM 3")
89
+ print(" 3. Sufficient memory for the model")
90
+ print("⚠️ App will start but segmentation features will be disabled until model loads.")
91
+ # Don't raise - allow app to start and show error in UI
92
+ model = None
93
+ processor = None
94
 
95
  def run_sam3_inference(pil_image, prompt_text, threshold=0.5, mask_threshold=0.5):
96
  """
 
106
  results dict with 'masks' and 'scores' keys, or None if failed
107
  """
108
  if model is None or processor is None:
109
+ print("❌ Model not loaded - please check HF_TOKEN and model availability")
110
+ raise ValueError("SAM 3 model not loaded. Please check that HF_TOKEN is set correctly and the model is accessible.")
111
 
112
  try:
113
  # Prepare inputs - matching official implementation
 
3239
  )
3240
 
3241
  if __name__ == "__main__":
3242
+ # Verify model is loaded before launching
3243
+ if model is None or processor is None:
3244
+ print("⚠️ WARNING: SAM 3 model failed to load!")
3245
+ print("⚠️ The app will start but segmentation features will not work.")
3246
+ print("⚠️ Please check:")
3247
+ print(" 1. HF_TOKEN environment variable is set correctly")
3248
+ print(" 2. transformers>=4.45.0 is installed")
3249
+ print(" 3. Sufficient memory/GPU available")
3250
+ else:
3251
+ print("✅ SAM 3 model ready - app starting...")
3252
+
3253
+ demo.launch(server_name="0.0.0.0", server_port=7860)