Fred808 commited on
Commit
b70de74
·
verified ·
1 Parent(s): 2c70576

Update vision_analyzer.py

Browse files
Files changed (1) hide show
  1. vision_analyzer.py +22 -8
vision_analyzer.py CHANGED
@@ -81,22 +81,36 @@ import sys
81
  device = "cpu" # Explicitly ensure CPU usage
82
 
83
  try:
84
- # Load the model, forcing the 'eager' (CPU-compatible) attention implementation
 
85
  vision_language_model_large = Blip2ForConditionalGeneration.from_pretrained(
86
  "Salesforce/blip2-opt-2.7b",
 
 
 
87
  trust_remote_code=True
88
- ).to(device).eval()
 
89
  vision_language_processor_large = AutoProcessor.from_pretrained(
90
  "Salesforce/blip2-opt-2.7b",
91
  trust_remote_code=True
92
  )
93
- print("BLIP-2 base model and processor loaded successfully on CPU.")
94
  except Exception as e:
95
- print(f"Error loading BLIP-2 model on CPU: {e}")
96
- print("Please ensure you have enough RAM and a compatible PyTorch version.")
97
- vision_language_model_large = None
98
- vision_language_processor_large = None
99
-
 
 
 
 
 
 
 
 
 
100
 
101
  def log_message(message: str):
102
  """Log messages with timestamp"""
 
81
  device = "cpu" # Explicitly ensure CPU usage
82
 
83
  try:
84
+ try:
85
+ # Modified model loading code
86
  vision_language_model_large = Blip2ForConditionalGeneration.from_pretrained(
87
  "Salesforce/blip2-opt-2.7b",
88
+ torch_dtype=torch.float32, # Explicitly set dtype
89
+ device_map="cpu", # Force CPU
90
+ low_cpu_mem_usage=True, # Optimize for low memory
91
  trust_remote_code=True
92
+ ).eval() # Don't call .to(device) since device_map handles it
93
+
94
  vision_language_processor_large = AutoProcessor.from_pretrained(
95
  "Salesforce/blip2-opt-2.7b",
96
  trust_remote_code=True
97
  )
98
+ print("BLIP-2 model loaded successfully on CPU.")
99
  except Exception as e:
100
+ print(f"Error loading BLIP-2 model: {e}")
101
+ # Fallback to smaller model if available
102
+ try:
103
+ vision_language_model_large = Blip2ForConditionalGeneration.from_pretrained(
104
+ "Salesforce/blip2-opt-1.3b", # Smaller variant
105
+ torch_dtype=torch.float32,
106
+ device_map="cpu",
107
+ low_cpu_mem_usage=True,
108
+ trust_remote_code=True
109
+ ).eval()
110
+ print("Loaded smaller 1.3B parameter model instead.")
111
+ except:
112
+ vision_language_model_large = None
113
+ vision_language_processor_large = None
114
 
115
  def log_message(message: str):
116
  """Log messages with timestamp"""