samwell commited on
Commit
1675b21
·
verified ·
1 Parent(s): 3fc00e4

Upload export_medsiglip.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. export_medsiglip.py +34 -19
export_medsiglip.py CHANGED
@@ -212,9 +212,10 @@ def load_trained_model(config: Config):
212
 
213
 
214
  def export_to_onnx(model, config: Config, quantize: bool = True):
215
- """Export model to ONNX format"""
216
  import onnx
217
- from onnxruntime.quantization import quantize_dynamic, QuantType
 
218
 
219
  config.output_dir.mkdir(parents=True, exist_ok=True)
220
 
@@ -222,22 +223,29 @@ def export_to_onnx(model, config: Config, quantize: bool = True):
222
  wrapper = LaborViewExportWrapper(model)
223
  wrapper.eval()
224
 
225
- # Create dummy input
226
  dummy_input = torch.randn(1, 3, config.image_size, config.image_size)
227
 
 
 
 
228
  # Export paths
229
  onnx_path = config.output_dir / "laborview_medsiglip.onnx"
230
  onnx_quant_path = config.output_dir / "laborview_medsiglip_int8.onnx"
231
 
232
- print(f"Exporting to ONNX: {onnx_path}")
233
 
234
- # Export to ONNX
 
 
 
 
235
  torch.onnx.export(
236
- wrapper,
237
  dummy_input,
238
  str(onnx_path),
239
  export_params=True,
240
- opset_version=config.opset_version,
241
  do_constant_folding=True,
242
  input_names=['pixel_values'],
243
  output_names=['seg_probs', 'plane_pred'],
@@ -257,20 +265,27 @@ def export_to_onnx(model, config: Config, quantize: bool = True):
257
  onnx_size = onnx_path.stat().st_size / (1024 * 1024 * 1024)
258
  print(f"ONNX model size: {onnx_size:.2f} GB")
259
 
260
- # Quantize to INT8
261
- if quantize:
262
- print(f"Quantizing to INT8: {onnx_quant_path}")
263
- quantize_dynamic(
264
- str(onnx_path),
265
- str(onnx_quant_path),
266
- weight_type=QuantType.QInt8
267
- )
 
 
 
268
 
269
- quant_size = onnx_quant_path.stat().st_size / (1024 * 1024 * 1024)
270
- print(f"Quantized model size: {quant_size:.2f} GB")
271
- print(f"Size reduction: {(1 - quant_size/onnx_size) * 100:.1f}%")
 
 
 
 
272
 
273
- return onnx_path, onnx_quant_path if quantize else None
274
 
275
 
276
  def export_to_coreml(model, config: Config):
 
212
 
213
 
214
  def export_to_onnx(model, config: Config, quantize: bool = True):
215
+ """Export model to ONNX format using TorchScript tracing"""
216
  import onnx
217
+ import os
218
+ os.environ["TORCH_ONNX_USE_DYNAMO"] = "0" # Force legacy exporter
219
 
220
  config.output_dir.mkdir(parents=True, exist_ok=True)
221
 
 
223
  wrapper = LaborViewExportWrapper(model)
224
  wrapper.eval()
225
 
226
+ # Create dummy input on CPU
227
  dummy_input = torch.randn(1, 3, config.image_size, config.image_size)
228
 
229
+ # Move model to CPU for export
230
+ wrapper = wrapper.cpu()
231
+
232
  # Export paths
233
  onnx_path = config.output_dir / "laborview_medsiglip.onnx"
234
  onnx_quant_path = config.output_dir / "laborview_medsiglip_int8.onnx"
235
 
236
+ print(f"Exporting to ONNX (legacy exporter): {onnx_path}")
237
 
238
+ # Trace the model first
239
+ with torch.no_grad():
240
+ traced = torch.jit.trace(wrapper, dummy_input)
241
+
242
+ # Export traced model to ONNX
243
  torch.onnx.export(
244
+ traced,
245
  dummy_input,
246
  str(onnx_path),
247
  export_params=True,
248
+ opset_version=14, # Use stable opset version
249
  do_constant_folding=True,
250
  input_names=['pixel_values'],
251
  output_names=['seg_probs', 'plane_pred'],
 
265
  onnx_size = onnx_path.stat().st_size / (1024 * 1024 * 1024)
266
  print(f"ONNX model size: {onnx_size:.2f} GB")
267
 
268
+ # Quantize to INT8 (with error handling)
269
+ onnx_quant_path_result = None
270
+ if quantize and onnx_size > 0.001:
271
+ try:
272
+ from onnxruntime.quantization import quantize_dynamic, QuantType
273
+ print(f"Quantizing to INT8: {onnx_quant_path}")
274
+ quantize_dynamic(
275
+ str(onnx_path),
276
+ str(onnx_quant_path),
277
+ weight_type=QuantType.QInt8
278
+ )
279
 
280
+ quant_size = onnx_quant_path.stat().st_size / (1024 * 1024 * 1024)
281
+ print(f"Quantized model size: {quant_size:.2f} GB")
282
+ print(f"Size reduction: {(1 - quant_size/onnx_size) * 100:.1f}%")
283
+ onnx_quant_path_result = onnx_quant_path
284
+ except Exception as e:
285
+ print(f"Quantization failed: {e}")
286
+ print("Continuing with FP32 model only")
287
 
288
+ return onnx_path, onnx_quant_path_result
289
 
290
 
291
  def export_to_coreml(model, config: Config):