Upload export_medsiglip.py with huggingface_hub
Browse files- export_medsiglip.py +3 -7
export_medsiglip.py
CHANGED
|
@@ -239,7 +239,7 @@ def export_to_onnx(model, config: Config, quantize: bool = True):
|
|
| 239 |
with torch.no_grad():
|
| 240 |
traced = torch.jit.trace(wrapper, dummy_input)
|
| 241 |
|
| 242 |
-
# Export traced model to ONNX
|
| 243 |
torch.onnx.export(
|
| 244 |
traced,
|
| 245 |
dummy_input,
|
|
@@ -248,12 +248,8 @@ def export_to_onnx(model, config: Config, quantize: bool = True):
|
|
| 248 |
opset_version=14, # Use stable opset version
|
| 249 |
do_constant_folding=True,
|
| 250 |
input_names=['pixel_values'],
|
| 251 |
-
output_names=['seg_probs', 'plane_pred']
|
| 252 |
-
dynamic_axes
|
| 253 |
-
'pixel_values': {0: 'batch_size'},
|
| 254 |
-
'seg_probs': {0: 'batch_size'},
|
| 255 |
-
'plane_pred': {0: 'batch_size'}
|
| 256 |
-
}
|
| 257 |
)
|
| 258 |
|
| 259 |
# Verify ONNX model
|
|
|
|
| 239 |
with torch.no_grad():
|
| 240 |
traced = torch.jit.trace(wrapper, dummy_input)
|
| 241 |
|
| 242 |
+
# Export traced model to ONNX (fixed batch size for compatibility)
|
| 243 |
torch.onnx.export(
|
| 244 |
traced,
|
| 245 |
dummy_input,
|
|
|
|
| 248 |
opset_version=14, # Use stable opset version
|
| 249 |
do_constant_folding=True,
|
| 250 |
input_names=['pixel_values'],
|
| 251 |
+
output_names=['seg_probs', 'plane_pred']
|
| 252 |
+
# No dynamic_axes - use fixed batch size of 1 for mobile
|
|
|
|
|
|
|
|
|
|
|
|
|
| 253 |
)
|
| 254 |
|
| 255 |
# Verify ONNX model
|