Factor Studios commited on
Commit
937f1ed
·
verified ·
1 Parent(s): 028bfa9

Update test_ai_integration_http.py

Browse files
Files changed (1) hide show
  1. test_ai_integration_http.py +38 -24
test_ai_integration_http.py CHANGED
@@ -113,32 +113,46 @@ def test_ai_integration_http():
113
  logger.info(f"Loading {model_name}")
114
 
115
  try:
116
- # Load processor first
117
- processor = AutoProcessor.from_pretrained(
118
- model_name,
119
- trust_remote_code=True
120
- )
121
- status['processor_loaded'] = True
122
 
123
- # Load model directly - let it handle config internally
124
- model = AutoModel.from_pretrained(
125
- model_name,
126
- trust_remote_code=True,
127
- torch_dtype=torch.float32, # Use float32 for better compatibility
128
- device_map=None, # Don't auto-map devices
129
- ignore_mismatched_sizes=True # Handle any size mismatches
130
- )
131
- status['model_loaded'] = True
132
-
133
- logger.info(f"Processor type: {type(processor).__name__}")
134
- logger.info(f"Model type: {type(model).__name__}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
135
 
136
- # Log model architecture and details
137
- model_size = get_model_size(model)
138
- logger.info(f"Model loaded: {model_size/1e9:.2f} GB in parameters")
139
- logger.info(f"Model architecture: {model.__class__.__name__}")
140
- if hasattr(model, 'config'):
141
- logger.info(f"Model config type: {type(model.config).__name__}")
142
  except Exception as e:
143
  logger.error(f"Model loading failed: {str(e)}")
144
  raise
 
113
  logger.info(f"Loading {model_name}")
114
 
115
  try:
116
+ # Disable transformers logging temporarily
117
+ import logging
118
+ transformers_logger = logging.getLogger("transformers")
119
+ original_level = transformers_logger.level
120
+ transformers_logger.setLevel(logging.ERROR)
 
121
 
122
+ try:
123
+ # Load processor first
124
+ processor = AutoProcessor.from_pretrained(
125
+ model_name,
126
+ trust_remote_code=True
127
+ )
128
+ status['processor_loaded'] = True
129
+
130
+ # Import the specific model class
131
+ from transformers.models.florence.modeling_florence import Florence2Model
132
+
133
+ # Load model directly with specific class
134
+ model = Florence2Model.from_pretrained(
135
+ model_name,
136
+ trust_remote_code=True,
137
+ torch_dtype=torch.float32,
138
+ device_map=None,
139
+ ignore_mismatched_sizes=True
140
+ )
141
+ status['model_loaded'] = True
142
+
143
+ # Log model details
144
+ logger.info(f"Processor type: {type(processor).__name__}")
145
+ logger.info(f"Model type: {type(model).__name__}")
146
+
147
+ # Log model architecture
148
+ model_size = get_model_size(model)
149
+ logger.info(f"Model loaded: {model_size/1e9:.2f} GB in parameters")
150
+ logger.info(f"Model architecture: {model.__class__.__name__}")
151
 
152
+ finally:
153
+ # Restore original logging level
154
+ transformers_logger.setLevel(original_level)
155
+
 
 
156
  except Exception as e:
157
  logger.error(f"Model loading failed: {str(e)}")
158
  raise