diff --git a/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/__pycache__/__init__.cpython-310.pyc b/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3a37525f9fa057c21d802ac9557b5719dfd8cc8c Binary files /dev/null and b/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/__pycache__/__init__.cpython-310.pyc differ diff --git a/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/__pycache__/_diagnostic.cpython-310.pyc b/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/__pycache__/_diagnostic.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..821370707812e43202a59eaa9129a7e7d7716083 Binary files /dev/null and b/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/__pycache__/_diagnostic.cpython-310.pyc differ diff --git a/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/__pycache__/_rules.cpython-310.pyc b/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/__pycache__/_rules.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ebcc3d116f5793ee6837dded5654d1ddc1f4d337 Binary files /dev/null and b/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/__pycache__/_rules.cpython-310.pyc differ diff --git a/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/__pycache__/__init__.cpython-310.pyc b/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c754077e60deffd82f921dbe4e9c7ac72fcff07c Binary files /dev/null and b/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/__pycache__/__init__.cpython-310.pyc differ diff --git a/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/__pycache__/_infra.cpython-310.pyc b/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/__pycache__/_infra.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bcffea819d2cc6ad6e45018bbd7b4dfbbecd0b3c Binary files /dev/null and b/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/__pycache__/_infra.cpython-310.pyc differ diff --git a/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/__pycache__/context.cpython-310.pyc b/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/__pycache__/context.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f63e771bc578c1cc01d4043fc48734f5a1708cdd Binary files /dev/null and b/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/__pycache__/context.cpython-310.pyc differ diff --git a/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/__pycache__/decorator.cpython-310.pyc b/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/__pycache__/decorator.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0424a8fd4efce89d4879426aa56092022bd3a878 Binary files /dev/null and b/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/__pycache__/decorator.cpython-310.pyc differ diff --git a/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/__pycache__/formatter.cpython-310.pyc b/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/__pycache__/formatter.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..82beefd0f717db3bab3e1b993430923ce200b744 Binary files /dev/null and b/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/__pycache__/formatter.cpython-310.pyc differ diff --git a/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/__pycache__/utils.cpython-310.pyc b/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..34f79a10d4756425101ff15ec35941382c3aad2a Binary files /dev/null and b/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/__pycache__/utils.cpython-310.pyc differ diff --git a/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/__init__.cpython-310.pyc b/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7a8fe14cd846e1e72ecf02dafe35e144b413159f Binary files /dev/null and b/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/__init__.cpython-310.pyc differ diff --git a/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_artifact.cpython-310.pyc b/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_artifact.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fc601d4ab4754cc17490e4a2cb2e5d680bbc0b5f Binary files /dev/null and b/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_artifact.cpython-310.pyc differ diff --git a/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_artifact_change.cpython-310.pyc b/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_artifact_change.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..122c3ff029b471302c0e803c750134cc8ba2d23b Binary files /dev/null and b/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_artifact_change.cpython-310.pyc differ diff --git a/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_artifact_content.cpython-310.pyc b/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_artifact_content.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..91aaeaf5930ad7185cefc19816761eb2890647c3 Binary files /dev/null and b/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_artifact_content.cpython-310.pyc differ diff --git a/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_artifact_location.cpython-310.pyc b/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_artifact_location.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..db065020262be274147dafd1a6f135604c8c39e5 Binary files /dev/null and b/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_artifact_location.cpython-310.pyc differ diff --git a/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_attachment.cpython-310.pyc b/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_attachment.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b4c8e3de30fce2568ac2a1e5181d5c1e7c4ee072 Binary files /dev/null and b/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_attachment.cpython-310.pyc differ diff --git a/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_code_flow.cpython-310.pyc b/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_code_flow.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7262c27cd68b9f00e6bb21150799705fc1837f4d Binary files /dev/null and b/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_code_flow.cpython-310.pyc differ diff --git a/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_configuration_override.cpython-310.pyc b/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_configuration_override.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5f06caed059874d48c917bb8dcb60c859dc24d0a Binary files /dev/null and b/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_configuration_override.cpython-310.pyc differ diff --git a/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_conversion.cpython-310.pyc b/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_conversion.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..abfa081ec8012d3b012eeb6db9cf151e5d4c9598 Binary files /dev/null and b/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_conversion.cpython-310.pyc differ diff --git a/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_edge.cpython-310.pyc b/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_edge.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1a359c0ce4f42b373eb6f9620fc24e5b3f27314c Binary files /dev/null and b/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_edge.cpython-310.pyc differ diff --git a/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_edge_traversal.cpython-310.pyc b/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_edge_traversal.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6da08b2bbcec8cfdfa3a8f780f95f23f0d1d04ca Binary files /dev/null and b/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_edge_traversal.cpython-310.pyc differ diff --git a/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_exception.cpython-310.pyc b/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_exception.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1ab4419b215a91f3b2d07f4d6e2a79c1ffc5cfe4 Binary files /dev/null and b/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_exception.cpython-310.pyc differ diff --git a/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_external_properties.cpython-310.pyc b/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_external_properties.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a97ed3872975c201e207d055d99aadcb763b3b77 Binary files /dev/null and b/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_external_properties.cpython-310.pyc differ diff --git a/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_external_property_file_reference.cpython-310.pyc b/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_external_property_file_reference.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3f9c0515d3c6a717e69136f4495a8e92184b1f03 Binary files /dev/null and b/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_external_property_file_reference.cpython-310.pyc differ diff --git a/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_external_property_file_references.cpython-310.pyc b/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_external_property_file_references.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1948a6810b7e9895c2a18a75471de4b51187744a Binary files /dev/null and b/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_external_property_file_references.cpython-310.pyc differ diff --git a/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_fix.cpython-310.pyc b/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_fix.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..75f3d197d1383d0a74a63308eb94cfad12009786 Binary files /dev/null and b/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_fix.cpython-310.pyc differ diff --git a/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_graph.cpython-310.pyc b/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_graph.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c0c0aa7153fabdda31ff5e92d061d46701b74722 Binary files /dev/null and b/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_graph.cpython-310.pyc differ diff --git a/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_graph_traversal.cpython-310.pyc b/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_graph_traversal.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6d1fc11f6fef7c67c1dc38a6cf747b81cab633a8 Binary files /dev/null and b/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_graph_traversal.cpython-310.pyc differ diff --git a/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_invocation.cpython-310.pyc b/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_invocation.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a44fc0a1f7e842280020b6f30c830bd789a22aae Binary files /dev/null and b/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_invocation.cpython-310.pyc differ diff --git a/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_location.cpython-310.pyc b/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_location.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b56bc5a8dd865ccc33afca2f27b21f1c098e6bb5 Binary files /dev/null and b/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_location.cpython-310.pyc differ diff --git a/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_location_relationship.cpython-310.pyc b/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_location_relationship.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f06516e2713e381d80793811832223f1c6c0d0bb Binary files /dev/null and b/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_location_relationship.cpython-310.pyc differ diff --git a/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_logical_location.cpython-310.pyc b/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_logical_location.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..36cddf29c2fa795882528dd2390e10c0e3a555ed Binary files /dev/null and b/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_logical_location.cpython-310.pyc differ diff --git a/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_message.cpython-310.pyc b/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_message.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0ced4ce9681f5308d276664d33079ff4d6d3fd32 Binary files /dev/null and b/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_message.cpython-310.pyc differ diff --git a/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_multiformat_message_string.cpython-310.pyc b/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_multiformat_message_string.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..af5bcf217d10a1d2b8679e8733083462b6de8afd Binary files /dev/null and b/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_multiformat_message_string.cpython-310.pyc differ diff --git a/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_node.cpython-310.pyc b/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_node.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7c162edc77b3c839050d96f341264bdb9931d4e2 Binary files /dev/null and b/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_node.cpython-310.pyc differ diff --git a/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_notification.cpython-310.pyc b/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_notification.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5cab8ce9275b145f2ae6edf68108a98e6f4f20b8 Binary files /dev/null and b/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_notification.cpython-310.pyc differ diff --git a/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_physical_location.cpython-310.pyc b/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_physical_location.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..11367a874abeeccbdf1c874cd68afa5cb6066652 Binary files /dev/null and b/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_physical_location.cpython-310.pyc differ diff --git a/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_property_bag.cpython-310.pyc b/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_property_bag.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..71d09a1e9ab3b15a60a6e91a257e94340c70a821 Binary files /dev/null and b/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_property_bag.cpython-310.pyc differ diff --git a/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_rectangle.cpython-310.pyc b/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_rectangle.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7a5ac274bc48f2a599f23c7771ffdc1378f44ba1 Binary files /dev/null and b/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_rectangle.cpython-310.pyc differ diff --git a/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_region.cpython-310.pyc b/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_region.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c055c26e187c095d53583dfaeb84baac347e409b Binary files /dev/null and b/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_region.cpython-310.pyc differ diff --git a/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_replacement.cpython-310.pyc b/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_replacement.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7c7f60efc84b03b31fa9b46fb5d49e01738174bf Binary files /dev/null and b/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_replacement.cpython-310.pyc differ diff --git a/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_reporting_configuration.cpython-310.pyc b/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_reporting_configuration.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..483f40ec0929e52b3dfee3f023577e1a1e31e086 Binary files /dev/null and b/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_reporting_configuration.cpython-310.pyc differ diff --git a/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_reporting_descriptor.cpython-310.pyc b/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_reporting_descriptor.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..62bec81bfa9bacbb9f897014471c99fc821e7642 Binary files /dev/null and b/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_reporting_descriptor.cpython-310.pyc differ diff --git a/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_reporting_descriptor_reference.cpython-310.pyc b/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_reporting_descriptor_reference.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5a55d1058e86dad47142d91a3416cd8f8b843794 Binary files /dev/null and b/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_reporting_descriptor_reference.cpython-310.pyc differ diff --git a/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_reporting_descriptor_relationship.cpython-310.pyc b/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_reporting_descriptor_relationship.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..20c4024973834db8d63dc44eae9b456e1e126dfc Binary files /dev/null and b/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_reporting_descriptor_relationship.cpython-310.pyc differ diff --git a/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_result.cpython-310.pyc b/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_result.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ac8c4efc97105ca5c4f0d6ee15a365deb317ef26 Binary files /dev/null and b/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_result.cpython-310.pyc differ diff --git a/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_result_provenance.cpython-310.pyc b/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_result_provenance.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bc9534ee8e25a9bc62ff00eb9a39dc8a25e02192 Binary files /dev/null and b/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_result_provenance.cpython-310.pyc differ diff --git a/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_run.cpython-310.pyc b/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_run.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1dd6c277eafe8e7f51575ddc07155e1e04e0f9d5 Binary files /dev/null and b/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_run.cpython-310.pyc differ diff --git a/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_run_automation_details.cpython-310.pyc b/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_run_automation_details.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..416a9a9292c4ff1430fa2703b0d7b9c2f4bd4720 Binary files /dev/null and b/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_run_automation_details.cpython-310.pyc differ diff --git a/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_sarif_log.cpython-310.pyc b/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_sarif_log.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ef03c2628df696b9eb70d2efedc0cf1c5fce113b Binary files /dev/null and b/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_sarif_log.cpython-310.pyc differ diff --git a/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_special_locations.cpython-310.pyc b/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_special_locations.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a80191569590bad61058f7500b6199e69ad40494 Binary files /dev/null and b/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_special_locations.cpython-310.pyc differ diff --git a/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_stack.cpython-310.pyc b/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_stack.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..890587afa68d8a9115ad48cfda7df026df93dc08 Binary files /dev/null and b/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_stack.cpython-310.pyc differ diff --git a/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_stack_frame.cpython-310.pyc b/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_stack_frame.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..55515ba146e381c06921fae55af710b551077d6f Binary files /dev/null and b/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_stack_frame.cpython-310.pyc differ diff --git a/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_suppression.cpython-310.pyc b/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_suppression.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e0fd4a57aaf3e834eed13ee729b2346005f17397 Binary files /dev/null and b/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_suppression.cpython-310.pyc differ diff --git a/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_thread_flow.cpython-310.pyc b/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_thread_flow.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1d18aa8afb330017b72b06ec530695c9e5993a6b Binary files /dev/null and b/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_thread_flow.cpython-310.pyc differ diff --git a/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_thread_flow_location.cpython-310.pyc b/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_thread_flow_location.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8015cfce16cbf1bb8560e0f2cde78fb7ec95368d Binary files /dev/null and b/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_thread_flow_location.cpython-310.pyc differ diff --git a/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_tool.cpython-310.pyc b/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_tool.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2cd11a46274b38ab9ce011fe623a5d23af292c2b Binary files /dev/null and b/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_tool.cpython-310.pyc differ diff --git a/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_tool_component.cpython-310.pyc b/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_tool_component.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fbe8d6bf7beede8adec3094d69c7ff97170c7e5a Binary files /dev/null and b/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_tool_component.cpython-310.pyc differ diff --git a/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_tool_component_reference.cpython-310.pyc b/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_tool_component_reference.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1b207f64faa48e154f685be2485fed15828e96cd Binary files /dev/null and b/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_tool_component_reference.cpython-310.pyc differ diff --git a/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_translation_metadata.cpython-310.pyc b/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_translation_metadata.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8fd9437d2b9d4ed66fc34e86c6e68db63cb60e48 Binary files /dev/null and b/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_translation_metadata.cpython-310.pyc differ diff --git a/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_version_control_details.cpython-310.pyc b/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_version_control_details.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3d81d6a947cb8faad9036fbf6995c2377ef8c881 Binary files /dev/null and b/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_version_control_details.cpython-310.pyc differ diff --git a/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_web_request.cpython-310.pyc b/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_web_request.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cb3808ea49a0cd738be1aec86df7b886f053592f Binary files /dev/null and b/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_web_request.cpython-310.pyc differ diff --git a/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_web_response.cpython-310.pyc b/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_web_response.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..93a19d1620a3e99806a7ade18c006d7057506587 Binary files /dev/null and b/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_web_response.cpython-310.pyc differ diff --git a/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/version.cpython-310.pyc b/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/version.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b2bab10e220e8e054c7067b59430a425cc4dd5b4 Binary files /dev/null and b/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/version.cpython-310.pyc differ diff --git a/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_graph.py b/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_graph.py new file mode 100644 index 0000000000000000000000000000000000000000..306d6b305126a796e9f67c0b69d81ec0f22fad40 --- /dev/null +++ b/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_graph.py @@ -0,0 +1,35 @@ +# DO NOT EDIT! This file was generated by jschema_to_python version 0.0.1.dev29, +# with extension for dataclasses and type annotation. + +from __future__ import annotations + +import dataclasses +from typing import List, Optional + +from torch.onnx._internal.diagnostics.infra.sarif import ( + _edge, + _message, + _node, + _property_bag, +) + + +@dataclasses.dataclass +class Graph(object): + """A network of nodes and directed edges that describes some aspect of the structure of the code (for example, a call graph).""" + + description: Optional[_message.Message] = dataclasses.field( + default=None, metadata={"schema_property_name": "description"} + ) + edges: Optional[List[_edge.Edge]] = dataclasses.field( + default=None, metadata={"schema_property_name": "edges"} + ) + nodes: Optional[List[_node.Node]] = dataclasses.field( + default=None, metadata={"schema_property_name": "nodes"} + ) + properties: Optional[_property_bag.PropertyBag] = dataclasses.field( + default=None, metadata={"schema_property_name": "properties"} + ) + + +# flake8: noqa diff --git a/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_invocation.py b/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_invocation.py new file mode 100644 index 0000000000000000000000000000000000000000..d9fae0d550c46ab3768372ef738922abd242955b --- /dev/null +++ b/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_invocation.py @@ -0,0 +1,118 @@ +# DO NOT EDIT! This file was generated by jschema_to_python version 0.0.1.dev29, +# with extension for dataclasses and type annotation. + +from __future__ import annotations + +import dataclasses +from typing import Any, List, Optional + +from torch.onnx._internal.diagnostics.infra.sarif import ( + _artifact_location, + _configuration_override, + _notification, + _property_bag, +) + + +@dataclasses.dataclass +class Invocation(object): + """The runtime environment of the analysis tool run.""" + + execution_successful: bool = dataclasses.field( + metadata={"schema_property_name": "executionSuccessful"} + ) + account: Optional[str] = dataclasses.field( + default=None, metadata={"schema_property_name": "account"} + ) + arguments: Optional[List[str]] = dataclasses.field( + default=None, metadata={"schema_property_name": "arguments"} + ) + command_line: Optional[str] = dataclasses.field( + default=None, metadata={"schema_property_name": "commandLine"} + ) + end_time_utc: Optional[str] = dataclasses.field( + default=None, metadata={"schema_property_name": "endTimeUtc"} + ) + environment_variables: Any = dataclasses.field( + default=None, metadata={"schema_property_name": "environmentVariables"} + ) + executable_location: Optional[_artifact_location.ArtifactLocation] = ( + dataclasses.field( + default=None, metadata={"schema_property_name": "executableLocation"} + ) + ) + exit_code: Optional[int] = dataclasses.field( + default=None, metadata={"schema_property_name": "exitCode"} + ) + exit_code_description: Optional[str] = dataclasses.field( + default=None, metadata={"schema_property_name": "exitCodeDescription"} + ) + exit_signal_name: Optional[str] = dataclasses.field( + default=None, metadata={"schema_property_name": "exitSignalName"} + ) + exit_signal_number: Optional[int] = dataclasses.field( + default=None, metadata={"schema_property_name": "exitSignalNumber"} + ) + machine: Optional[str] = dataclasses.field( + default=None, metadata={"schema_property_name": "machine"} + ) + notification_configuration_overrides: Optional[ + List[_configuration_override.ConfigurationOverride] + ] = dataclasses.field( + default=None, + metadata={"schema_property_name": "notificationConfigurationOverrides"}, + ) + process_id: Optional[int] = dataclasses.field( + default=None, metadata={"schema_property_name": "processId"} + ) + process_start_failure_message: Optional[str] = dataclasses.field( + default=None, metadata={"schema_property_name": "processStartFailureMessage"} + ) + properties: Optional[_property_bag.PropertyBag] = dataclasses.field( + default=None, metadata={"schema_property_name": "properties"} + ) + response_files: Optional[List[_artifact_location.ArtifactLocation]] = ( + dataclasses.field( + default=None, metadata={"schema_property_name": "responseFiles"} + ) + ) + rule_configuration_overrides: Optional[ + List[_configuration_override.ConfigurationOverride] + ] = dataclasses.field( + default=None, metadata={"schema_property_name": "ruleConfigurationOverrides"} + ) + start_time_utc: Optional[str] = dataclasses.field( + default=None, metadata={"schema_property_name": "startTimeUtc"} + ) + stderr: Optional[_artifact_location.ArtifactLocation] = dataclasses.field( + default=None, metadata={"schema_property_name": "stderr"} + ) + stdin: Optional[_artifact_location.ArtifactLocation] = dataclasses.field( + default=None, metadata={"schema_property_name": "stdin"} + ) + stdout: Optional[_artifact_location.ArtifactLocation] = dataclasses.field( + default=None, metadata={"schema_property_name": "stdout"} + ) + stdout_stderr: Optional[_artifact_location.ArtifactLocation] = dataclasses.field( + default=None, metadata={"schema_property_name": "stdoutStderr"} + ) + tool_configuration_notifications: Optional[List[_notification.Notification]] = ( + dataclasses.field( + default=None, + metadata={"schema_property_name": "toolConfigurationNotifications"}, + ) + ) + tool_execution_notifications: Optional[List[_notification.Notification]] = ( + dataclasses.field( + default=None, + metadata={"schema_property_name": "toolExecutionNotifications"}, + ) + ) + working_directory: Optional[_artifact_location.ArtifactLocation] = ( + dataclasses.field( + default=None, metadata={"schema_property_name": "workingDirectory"} + ) + ) + + +# flake8: noqa diff --git a/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_tool_component_reference.py b/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_tool_component_reference.py new file mode 100644 index 0000000000000000000000000000000000000000..c7929e12bc80de9d1f6db7d7810ff22ba5f51802 --- /dev/null +++ b/pllava/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_tool_component_reference.py @@ -0,0 +1,30 @@ +# DO NOT EDIT! This file was generated by jschema_to_python version 0.0.1.dev29, +# with extension for dataclasses and type annotation. + +from __future__ import annotations + +import dataclasses +from typing import Optional + +from torch.onnx._internal.diagnostics.infra.sarif import _property_bag + + +@dataclasses.dataclass +class ToolComponentReference(object): + """Identifies a particular toolComponent object, either the driver or an extension.""" + + guid: Optional[str] = dataclasses.field( + default=None, metadata={"schema_property_name": "guid"} + ) + index: int = dataclasses.field( + default=-1, metadata={"schema_property_name": "index"} + ) + name: Optional[str] = dataclasses.field( + default=None, metadata={"schema_property_name": "name"} + ) + properties: Optional[_property_bag.PropertyBag] = dataclasses.field( + default=None, metadata={"schema_property_name": "properties"} + ) + + +# flake8: noqa diff --git a/pllava/lib/python3.10/site-packages/torch/onnx/_internal/exporter/__init__.py b/pllava/lib/python3.10/site-packages/torch/onnx/_internal/exporter/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..3f8bc9517e61b4f48c6c943ab5d3dccdf413cee6 --- /dev/null +++ b/pllava/lib/python3.10/site-packages/torch/onnx/_internal/exporter/__init__.py @@ -0,0 +1,17 @@ +__all__ = [ + "ONNXRegistry", + "ONNXProgram", + "analyze", + "export", + "exported_program_to_ir", + "export_compat", + "testing", + "verification", +] + +from . import _testing as testing, _verification as verification +from ._analysis import analyze +from ._compat import export_compat +from ._core import export, exported_program_to_ir +from ._onnx_program import ONNXProgram +from ._registration import ONNXRegistry diff --git a/pllava/lib/python3.10/site-packages/torch/onnx/_internal/exporter/_analysis.py b/pllava/lib/python3.10/site-packages/torch/onnx/_internal/exporter/_analysis.py new file mode 100644 index 0000000000000000000000000000000000000000..2eb5adab3a7d9dc942e59a3c3159914500c23aea --- /dev/null +++ b/pllava/lib/python3.10/site-packages/torch/onnx/_internal/exporter/_analysis.py @@ -0,0 +1,242 @@ +"""Compatibility analyzer for PyTorch models.""" + +# mypy: allow-untyped-defs +# flake8: noqa: B950 We do not need flake8 as it complains line length +from __future__ import annotations + +import dataclasses +import textwrap +import traceback +from collections import defaultdict +from typing import TYPE_CHECKING + +import torch +import torch._export.serde.schema +from torch.export import graph_signature +from torch.onnx._internal.exporter import _dispatching, _registration + + +if TYPE_CHECKING: + import torch.fx + + +@dataclasses.dataclass +class ModelInfo: + """Information about the model.""" + + parameter_count: defaultdict[torch.dtype, int] = dataclasses.field( + default_factory=lambda: defaultdict(int) + ) + buffer_count: defaultdict[torch.dtype, int] = dataclasses.field( + default_factory=lambda: defaultdict(int) + ) + fx_node_count: int = 0 + fx_node_op_count: defaultdict[str, int] = dataclasses.field( + default_factory=lambda: defaultdict(int) + ) + fx_node_target_count: defaultdict[str, int] = dataclasses.field( + default_factory=lambda: defaultdict(int) + ) + dispatch_failures: list[tuple[torch.fx.Node, str]] = dataclasses.field( + default_factory=list + ) + inputs: dict[str, torch._export.serde.schema.TensorMeta] = dataclasses.field( + default_factory=dict + ) + outputs: dict[str, torch._export.serde.schema.TensorMeta] = dataclasses.field( + default_factory=dict + ) + + +def _count_weights( + exported_program: torch.export.ExportedProgram, +) -> tuple[defaultdict[torch.dtype, int], defaultdict[torch.dtype, int]]: + """Count the size of the parameters in the exported program.""" + + parameter_count: defaultdict[torch.dtype, int] = defaultdict(int) + buffer_count: defaultdict[torch.dtype, int] = defaultdict(int) + for parameter in exported_program.parameters(): + dtype = parameter.dtype + parameter_count[dtype] += parameter.numel() + + for buffer in exported_program.buffers(): + dtype = buffer.dtype + buffer_count[dtype] += buffer.numel() + + return parameter_count, buffer_count + + +def _format_model_info(model_info: ModelInfo) -> str: + """Format the information about the model.""" + lines = [ + textwrap.dedent( + f"""\ + PyTorch ONNX Conversion Analysis + + ## Model Information + + The model has {sum(model_info.parameter_count.values())} parameters and {sum(model_info.buffer_count.values())} buffers (non-trainable parameters). + Number of parameters per dtype: + ```python + {model_info.parameter_count} + ``` + Number of buffers per dtype: + ```python + {model_info.buffer_count} + ``` + """ + ), + "Inputs:", + *[f"- `{name}`: `{meta}`" for name, meta in model_info.inputs.items()], + "", + "Outputs:", + *[f"- `{name}`: `{meta}`" for name, meta in model_info.outputs.items()], + "", + f"The FX graph has {model_info.fx_node_count} nodes in total. Number of FX nodes per op:", + ] + for op, count in model_info.fx_node_op_count.items(): + lines.append(f"- `{op}`: {count}") + lines.append("\n") + lines.append("Of the call_function nodes, the counts of operators used are:\n") + sorted_targets = sorted( + model_info.fx_node_target_count.items(), key=lambda x: x[1], reverse=True + ) + for target, count in sorted_targets: + lines.append(f"- `{target}`: {count}") + + lines.append("") + lines.append("## ONNX Conversion Information") + lines.append("") + + if model_info.dispatch_failures: + lines.append( + "The model contains operators the dispatcher could not find registered ONNX decompositions for. " + "This may be due to missing implementations, decompositions not registered " + "correctly, or a bug in the dispatcher." + ) + lines.append("") + lines.append("Errors grouped by operator:\n") + + target_to_nodes = defaultdict(list) + for node, _ in model_info.dispatch_failures: + target_to_nodes[str(node.target)].append(node) + + target_to_messages = {} + for node, message in model_info.dispatch_failures: + if str(node.target) not in target_to_messages: + target_to_messages[str(node.target)] = message + + for target, nodes in sorted( + target_to_nodes.items(), key=lambda x: x[0], reverse=True + ): + message = textwrap.indent( + f"{target_to_messages[target]}. Example node: `{nodes[0].format_node()}`. All nodes: `{nodes}`", + " ", + ) + lines.append(f"- `{target}`: {message}") + else: + lines.append("All operators in the model have registered ONNX decompositions.") + + return "\n".join(lines) + + +def _get_io_specs(exported_program: torch.export.ExportedProgram) -> tuple[dict, dict]: + """Get the input and output specs of the exported program.""" + + nodes: dict[str, torch.fx.Node] = { + node.name: node for node in exported_program.graph.nodes + } + user_inputs = [ + spec + for spec in exported_program.graph_signature.input_specs + if spec.kind == graph_signature.InputKind.USER_INPUT + ] + user_outputs = [ + spec + for spec in exported_program.graph_signature.output_specs + if spec.kind == graph_signature.OutputKind.USER_OUTPUT + ] + inputs: dict[str, torch._export.serde.schema.TensorMeta] = {} + outputs: dict[str, torch._export.serde.schema.TensorMeta] = {} + for spec in user_inputs: + if isinstance(spec.arg, graph_signature.ConstantArgument): + continue + name = spec.arg.name + # FIXME: tensor_meta is None sometimes when the exported program still knows the shape/type + inputs[name] = nodes[name].meta["tensor_meta"] + for spec in user_outputs: + if isinstance(spec.arg, graph_signature.ConstantArgument): + continue + name = spec.arg.name + outputs[name] = nodes[name].meta["tensor_meta"] + return inputs, outputs + + +def _count_fx_targets( + exported_program: torch.export.ExportedProgram, +) -> defaultdict[str, int]: + """Count the number of targets for each node in the exported program.""" + fx_node_target_count: defaultdict[str, int] = defaultdict(int) + for node in exported_program.graph.nodes: + if node.op == "call_function": + fx_node_target_count[str(node.target)] += 1 + return fx_node_target_count + + +def analyze( + exported_program: torch.export.ExportedProgram, + registry: _registration.ONNXRegistry | None = None, + file=None, +) -> None: + """Analyze the compatibility of the exported program.""" + # Get basic information about the model + model_info = ModelInfo() + model_info.parameter_count, model_info.buffer_count = _count_weights( + exported_program + ) + model_info.fx_node_count = len(exported_program.graph.nodes) + model_info.fx_node_target_count = _count_fx_targets(exported_program) + inputs, outputs = _get_io_specs(exported_program) + model_info.inputs = inputs + model_info.outputs = outputs + + if registry is None: + registry = _registration.ONNXRegistry.from_torchlib() + + # Try to find ops for every node in the graph + for node in exported_program.graph.nodes: + model_info.fx_node_op_count[node.op] += 1 + if node.op == "call_function": + try: + onnx_function, message = _dispatching.dispatch(node, registry) + except Exception as e: + message = "Critical Error in dispatcher:\n" + formatted_exception = "\n".join( + traceback.format_exception(type(e), e, e.__traceback__) + ) + message += f"```pytb\n{formatted_exception}\n```\n" + onnx_function = None + if onnx_function is None: + model_info.dispatch_failures.append((node, message)) + + # Print the results + report = _format_model_info(model_info) + print(report, file=file, flush=True) + + +def compare_ops( + program_a: torch.export.ExportedProgram, program_b: torch.export.ExportedProgram +) -> tuple[set[str], set[str]]: + """Compare and get unique ops in two exported programs. + + Args: + program_a: The first exported program. + program_b: The second exported program. + + Returns: + A tuple of two sets, where the first set contains the unique ops in the first program + and the second set contains the unique ops in the second program. + """ + program_a_ops = set(_count_fx_targets(program_a)) + program_b_ops = set(_count_fx_targets(program_b)) + return program_a_ops - program_b_ops, program_b_ops - program_a_ops diff --git a/pllava/lib/python3.10/site-packages/torch/onnx/_internal/exporter/_compat.py b/pllava/lib/python3.10/site-packages/torch/onnx/_internal/exporter/_compat.py new file mode 100644 index 0000000000000000000000000000000000000000..3fddef36b8b428f671dc1e2c88932097378fa661 --- /dev/null +++ b/pllava/lib/python3.10/site-packages/torch/onnx/_internal/exporter/_compat.py @@ -0,0 +1,216 @@ +"""Compatibility functions for the torch.onnx.export API.""" + +# mypy: allow-untyped-defs +# mypy: disable-error-code=attr-defined +from __future__ import annotations + +import inspect +import logging +from typing import Any, Mapping, Sequence, TYPE_CHECKING + +import torch +from torch.onnx._internal._lazy_import import onnxscript_apis, onnxscript_ir as ir +from torch.onnx._internal.exporter import _core, _onnx_program + + +if TYPE_CHECKING: + import os + +logger = logging.getLogger(__name__) + + +def _signature(model) -> inspect.Signature: + should_be_callable = getattr(model, "forward", model) + if callable(should_be_callable): + return inspect.signature(should_be_callable) + raise ValueError("model has no forward method and is not callable") + + +def _from_dynamic_axes_to_dynamic_shapes( + model, + *, + dynamic_axes=None, + output_names: set[str], + input_names: Sequence[str] | None = None, +) -> dict[str, Any] | None: + """ + + dynamic_axes examples: + (1) dynamic_axes = {"x": {0: "my_custom_axis_name_1"}, "y": {1: "my_custom_axis_name_2"}} + (2) dynamic_axes = {"x": [0], "y": [1]} + + these will be converted to dynamic_shapes respectively: + (1) dynamic_shapes = {"x": {0: Dim("my_custom_axis_name_1")}, "y": {1: Dim("my_custom_axis_name_2")}} + (2) dynamic_shapes = {"x": {0: Dim("x_dim_0")}, "y": {1: Dim("y_dim_1")}} # auto-generated dim names + + """ + # https://github.com/pytorch/pytorch/pull/128371 + # 1. The function does not need to provide dynamic_shapes to torch.export.export + if dynamic_axes is None: + return None + + if input_names is None: + input_names = [] + + sig = _signature(model) + if len(input_names) > len(sig.parameters): + raise ValueError( + f"Number of input names ({len(input_names)}) should not be greater than " + f"the number of model inputs ({len(sig.parameters)})" + ) + input_names_to_model_inputs = {} + for idx, param_name in enumerate(sig.parameters): + if idx < len(input_names): + input_names_to_model_inputs[input_names[idx]] = param_name + else: + input_names_to_model_inputs[param_name] = param_name + + # NOTE: torch.export.export does not support input names assignment, + # so we need to map input names to model inputs to create dynamic_shapes + # for the exported program + dynamic_shapes_to_exported_program = {} + for input_name, axes in dynamic_axes.items(): + if input_name in output_names: + # User specified an output name as a dynamic axis, so we skip it + continue + # input_name can be either from input_names or from the model inputs + if input_name not in input_names_to_model_inputs: + raise ValueError( + f"dynamic axis: {input_name} is not found in the input names: {input_names}" + ) + model_input_name = input_names_to_model_inputs[input_name] + if isinstance(axes, dict): + dynamic_shapes_to_exported_program[model_input_name] = { + k: torch.export.Dim(v) for k, v in axes.items() + } + elif isinstance(axes, list): + dynamic_shapes_to_exported_program[model_input_name] = { + k: torch.export.Dim(f"{model_input_name}_dim_{k}") for k in axes + } + else: + raise TypeError( + f"dynamic_axes value must be either a dict or a list, but got {type(axes)}" + ) + # torch.export.export needs static dim to present in dynamic_shapes + # for all input tensors, so we need to add them with None + for input_name in sig.parameters: + if input_name not in dynamic_shapes_to_exported_program: + dynamic_shapes_to_exported_program[input_name] = None # type: ignore[assignment] + + return dynamic_shapes_to_exported_program + + +def _get_torch_export_args( + args: tuple[Any, ...], + kwargs: dict[str, Any] | None, +) -> tuple[tuple[Any, ...], dict[str, Any] | None]: + """Obtain the arguments for torch.onnx.export from the model and the input arguments.""" + if not kwargs and args and isinstance(args[-1], dict): + kwargs = args[-1] + args = args[:-1] + return args, kwargs + + +def export_compat( + model: torch.nn.Module + | torch.export.ExportedProgram + | torch.jit.ScriptModule + | torch.jit.ScriptFunction, + args: tuple[Any, ...], + f: str | os.PathLike | None = None, + *, + kwargs: dict[str, Any] | None = None, + export_params: bool = True, + verbose: bool | None = None, + input_names: Sequence[str] | None = None, + output_names: Sequence[str] | None = None, + opset_version: int | None = None, + dynamic_axes: Mapping[str, Mapping[int, str]] + | Mapping[str, Sequence[int]] + | None = None, + dynamic_shapes: dict[str, Any] | tuple[Any, ...] | list[Any] | None = None, + keep_initializers_as_inputs: bool = False, + external_data: bool = True, + report: bool = False, + verify: bool = False, + profile: bool = False, + dump_exported_program: bool = False, + artifacts_dir: str | os.PathLike = ".", + fallback: bool = False, + **_, +) -> _onnx_program.ONNXProgram: + if opset_version is None: + # TODO(justinchuby): Change the hardcoded opset version for it to be flexible + opset_version = 18 + + if isinstance(model, torch.export.ExportedProgram): + # We know the model is already exported program, so the args, kwargs, and dynamic_shapes + # are not used + dynamic_shapes = dynamic_shapes or {} + else: + args, kwargs = _get_torch_export_args(args, kwargs) + if dynamic_shapes is None and dynamic_axes is not None: + dynamic_shapes = _from_dynamic_axes_to_dynamic_shapes( + model, + dynamic_axes=dynamic_axes, + input_names=input_names, + output_names=set(output_names or ()), + ) + + try: + onnx_program = _core.export( + model, + args, + kwargs, + registry=None, + dynamic_shapes=dynamic_shapes, + input_names=input_names, + output_names=output_names, + profile=profile, + report=report, + verify=verify, + dump_exported_program=dump_exported_program, + artifacts_dir=artifacts_dir, + verbose=verbose, + ) + + except Exception as e: + if fallback: + if verbose is not False: + print( + "[torch.onnx] Falling back to legacy torch.onnx.export due " + f"to the following error: {e}", + ) + if f is None: + raise TypeError("f must be provided when fallback is enabled") from e + torch.onnx.utils.export( + model, # type: ignore[arg-type] + args, + f, # type: ignore[arg-type] + kwargs=kwargs, + export_params=export_params, + input_names=input_names, + output_names=output_names, + opset_version=17, # TODO(justinchuby): Hard coded to 17 for now + dynamic_axes=dynamic_axes, + keep_initializers_as_inputs=keep_initializers_as_inputs, + ) + onnx_program = _onnx_program.ONNXProgram(ir.load(f), None) + else: + raise + + # Converter opset version and optimize + onnx_program.model = onnxscript_apis.convert_version( + onnx_program.model, opset_version + ) + onnx_program.model = onnxscript_apis.optimize(onnx_program.model) + + if f is not None: + onnx_program.save( + f, + include_initializers=export_params, + keep_initializers_as_inputs=keep_initializers_as_inputs, + external_data=external_data, + ) + + return onnx_program diff --git a/pllava/lib/python3.10/site-packages/torch/onnx/_internal/exporter/_core.py b/pllava/lib/python3.10/site-packages/torch/onnx/_internal/exporter/_core.py new file mode 100644 index 0000000000000000000000000000000000000000..7d49a654a9c00c9301992e3a9afd33c4af5a35ef --- /dev/null +++ b/pllava/lib/python3.10/site-packages/torch/onnx/_internal/exporter/_core.py @@ -0,0 +1,1341 @@ +# mypy: allow-untyped-defs +# flake8: noqa: B950 We do not need flake8 as it complains line length +from __future__ import annotations + +import ctypes +import datetime +import inspect +import itertools +import logging +import operator +import pathlib +import textwrap +import traceback +import typing +from typing import Any, Callable, Literal, Sequence + +import onnxscript +import onnxscript.evaluator +from onnxscript import ir +from onnxscript.ir import convenience as ir_convenience + +import torch +import torch.fx +from torch.export import graph_signature +from torch.onnx._internal._lazy_import import onnxscript_apis +from torch.onnx._internal.exporter import ( + _analysis, + _building, + _capture_strategies, + _dispatching, + _errors, + _fx_passes, + _ir_passes, + _onnx_program, + _registration, + _reporting, + _tensors, + _verification, +) + + +if typing.TYPE_CHECKING: + import os + + import numpy as np + + +# Define utilities to convert PyTorch data types so users do not need to specify manually +_TORCH_DTYPE_TO_ONNX: dict[torch.dtype, ir.DataType] = { + torch.bfloat16: ir.DataType.BFLOAT16, + torch.bool: ir.DataType.BOOL, + torch.complex128: ir.DataType.COMPLEX128, + torch.complex64: ir.DataType.COMPLEX64, + torch.float16: ir.DataType.FLOAT16, + torch.float32: ir.DataType.FLOAT, + torch.float64: ir.DataType.DOUBLE, + torch.float8_e4m3fn: ir.DataType.FLOAT8E4M3FN, + torch.float8_e4m3fnuz: ir.DataType.FLOAT8E4M3FNUZ, + torch.float8_e5m2: ir.DataType.FLOAT8E5M2, + torch.float8_e5m2fnuz: ir.DataType.FLOAT8E5M2FNUZ, + torch.int16: ir.DataType.INT16, + torch.int32: ir.DataType.INT32, + torch.int64: ir.DataType.INT64, + torch.int8: ir.DataType.INT8, + torch.uint8: ir.DataType.UINT8, + torch.uint16: ir.DataType.UINT16, + torch.uint32: ir.DataType.UINT32, + torch.uint64: ir.DataType.UINT64, +} +_BLUE = "\033[96m" +_END = "\033[0m" + +_STEP_ONE_ERROR_MESSAGE = textwrap.dedent( + f"""\ + Failed to export the model with torch.export. {_BLUE}This is step 1/2{_END} of exporting the model to ONNX. Next steps: + - Modify the model code for `torch.export.export` to succeed. Refer to https://pytorch.org/docs/stable/generated/exportdb/index.html for more information. + - Debug `torch.export.export` and summit a PR to PyTorch. + - Create an issue in the PyTorch GitHub repository against the {_BLUE}*torch.export*{_END} component and attach the full error stack as well as reproduction scripts.""" +) + +_STEP_TWO_ERROR_MESSAGE = textwrap.dedent( + f"""\ + Failed to convert the exported program to an ONNX model. {_BLUE}This is step 2/2{_END} of exporting the model to ONNX. Next steps: + - If there is a missing ONNX function, implement it and register it to the registry. + - If there is an internal error during ONNX conversion, debug the error and summit a PR to PyTorch. + - Save the ExportedProgram as a pt2 file and create an error report with `export(..., report=True)`. Create an issue in the PyTorch GitHub repository against the {_BLUE}*onnx*{_END} component. Attach the pt2 model and the error report.""" +) + +logger = logging.getLogger(__name__) + + +def _torch_dtype_to_onnx_dtype(dtype: torch.dtype) -> ir.DataType: + return _TORCH_DTYPE_TO_ONNX[dtype] + + +class TorchTensor(ir.Tensor): + def __init__(self, tensor: torch.Tensor, name: str | None = None): + # Pass the tensor as the raw data to ir.Tensor's constructor + super().__init__( + tensor, dtype=_torch_dtype_to_onnx_dtype(tensor.dtype), name=name + ) + + def numpy(self) -> np.ndarray: + self.raw: torch.Tensor + if self.dtype == ir.DataType.BFLOAT16: + return self.raw.view(torch.uint16).numpy(force=True) + if self.dtype in { + ir.DataType.FLOAT8E4M3FN, + ir.DataType.FLOAT8E4M3FNUZ, + ir.DataType.FLOAT8E5M2, + ir.DataType.FLOAT8E5M2FNUZ, + }: + # TODO: Use ml_dtypes + return self.raw.view(torch.uint8).numpy(force=True) + return self.raw.numpy(force=True) + + def __array__(self, dtype: Any = None, copy: bool | None = None) -> np.ndarray: + del copy # Unused, but needed for the signature + if dtype is None: + return self.numpy() + return self.numpy().__array__(dtype) + + def tobytes(self) -> bytes: + # Implement tobytes to support native PyTorch types so we can use types like bloat16 + # Reading from memory directly is also more efficient because + # it avoids copying to a NumPy array + import torch._subclasses.fake_tensor + + if isinstance(self.raw, torch._subclasses.fake_tensor.FakeTensor): + raise TypeError( + f"Cannot take content out from the FakeTensor ('{self.name}'). Please replace the tensor " + "with a tensor backed by real data using ONNXProgram.apply_weights() " + "or save the model without initializers by setting include_initializers=False." + ) + tensor = self.raw.detach().cpu().contiguous() + return bytes( + (ctypes.c_ubyte * tensor.element_size() * tensor.numel()).from_address( + tensor.data_ptr() + ) + ) + + +# https://github.com/pytorch/pytorch/blob/ee6cb6daa173896f8ea1876266a19775aaa4f610/torch/export/graph_signature.py#L56C1-L62C19 +# class InputKind(Enum): +# USER_INPUT = auto() +# PARAMETER = auto() +# BUFFER = auto() +# CONSTANT_TENSOR = auto() +# CUSTOM_OBJ = auto() +# TOKEN = auto() + +# https://github.com/pytorch/pytorch/blob/ee6cb6daa173896f8ea1876266a19775aaa4f610/torch/export/graph_signature.py#L89C1-L96C19 +# class OutputKind(Enum): +# USER_OUTPUT = auto() +# LOSS_OUTPUT = auto() +# BUFFER_MUTATION = auto() +# GRADIENT_TO_PARAMETER = auto() +# GRADIENT_TO_USER_INPUT = auto() +# USER_INPUT_MUTATION = auto() +# TOKEN = auto() + + +def _set_shape_types( + values: Sequence[ir.Value], + meta_vals: Sequence[torch.Tensor], + complex_to_float: bool = True, +) -> None: + if not isinstance(meta_vals, Sequence): + logger.warning( + "Expected meta_vals to be a sequence, but got %s. There may be an internal error.", + meta_vals, + ) + meta_vals = (meta_vals,) + for value, meta_val in zip(values, meta_vals): + _set_shape_type(value, meta_val, complex_to_float=complex_to_float) + + +def _set_shape_type( + value: ir.Value, + meta_val: torch.Tensor + | torch.SymBool + | torch.SymInt + | torch.SymFloat + | tuple[torch.Tensor], + complex_to_float: bool, +) -> None: + # TODO: Consider using meta["tensor_meta"] for this? Would it be faster? + if isinstance(meta_val, tuple): + logger.warning("Setting shape and type of tensors is not supported yet") + if isinstance(meta_val, torch.Tensor): + # FIXME: Consider shape for complex values + dims = [] + for dim in meta_val.shape: + if isinstance(dim, int): + dims.append(dim) + else: + dims.append(str(dim.node)) + value.dtype = _torch_dtype_to_onnx_dtype(meta_val.dtype) + if complex_to_float: + if meta_val.dtype == torch.complex64: + value.dtype = ir.DataType.FLOAT + # Add 2 as the last dimension if the tensor is complex to hold the real/imag parts + dims.append(2) + elif meta_val.dtype == torch.complex128: + value.dtype = ir.DataType.DOUBLE + # Add 2 as the last dimension if the tensor is complex to hold the real/imag parts + dims.append(2) + + value.shape = ir.Shape(dims) + elif isinstance(meta_val, (int, torch.SymInt)): + # aten::sym_size output is a int, not a tensor, which stands + # for the size of one dim. We treat it as a scalar. + value.dtype = ir.DataType.INT64 + value.shape = ir.Shape([]) + elif isinstance(meta_val, (bool, torch.SymBool)): + value.dtype = ir.DataType.BOOL + value.shape = ir.Shape([]) + elif isinstance(meta_val, (float, torch.SymFloat)): + value.dtype = ir.DataType.FLOAT + value.shape = ir.Shape([]) + else: + pass + + +def _get_qualified_module_name(cls: Any) -> str: + if isinstance(cls, str): + return cls + module = cls.__module__ + if module is None or module == str.__class__.__module__: + return cls.__name__ + return module + "." + cls.__name__ + + +def _get_node_namespace(node: torch.fx.Node) -> tuple[str, list[str], list[str]]: + """Get the namespace and scope of the node. + + Example:: + + { + 'L__self__': ('', ), + 'L__self___avgpool': ('avgpool', ) + } + + Will yield + + namespace: ": torchvision.models.resnet.ResNet/avgpool: torch.nn.modules.pooling.AdaptiveAvgPool2d/node_name: node_target" + class_hierarchy: ["torchvision.models.resnet.ResNet", "torch.nn.modules.pooling.AdaptiveAvgPool2d", ] + name_scopes: ["", "avgpool", ] + + Args: + node: The node to get the namespace and scope of. + + Returns: + (namespace, class_hierarchy, name_scope) + """ + nn_module_stack = node.meta.get("nn_module_stack") + logger.debug("%s", nn_module_stack) + if nn_module_stack is None: + logger.warning( + "nn_module_stack not found for node '%s'. Skip adding metadata...", + node.name, + ) + return f"{node.name}: {node.target}", [str(node.target)], [node.name] + namespaces = [] + class_hierarchy = [] + name_scopes = [] + for name, nn_module in nn_module_stack.values(): + name_scopes.append(name) + nn_module_name = _get_qualified_module_name(nn_module) + class_hierarchy.append(nn_module_name) + namespaces.append(f"{name}: {_get_qualified_module_name(nn_module)}") + namespaces.append(f"{node.name}: {node.target}") + class_hierarchy.append(str(node.target)) + name_scopes.append(node.name) + + return "/".join(namespaces), class_hierarchy, name_scopes + + +def _set_node_metadata(fx_node: torch.fx.Node, ir_node: ir.Node) -> None: + """Adds namespace and other node metadata to the ONNX node.""" + namespace, class_hierarchy, name_scopes = _get_node_namespace(fx_node) + ir_node.metadata_props["namespace"] = namespace + ir_node.metadata_props["pkg.torch.onnx.class_hierarchy"] = repr(class_hierarchy) + ir_node.metadata_props["pkg.torch.onnx.name_scopes"] = repr(name_scopes) + ir_node.metadata_props["pkg.torch.onnx.fx_node"] = str(fx_node.format_node()) + ir_node.metadata_props["pkg.torch.onnx.stack_trace"] = fx_node.meta.get( + "stack_trace", "" + ) + + +def _handle_getitem_node( + node: torch.fx.Node, node_name_to_values: dict[str, ir.Value | Sequence[ir.Value]] +) -> ir.Value: + """Handle a getitem node. + + Add the input value it is getting to the mapping, then return the value. + + There are two cases for this node: + 1. The output is a Sequence (traced), we can simply get the value from the sequence + 2. The output is produced by a SplitToSequence node, we need to get the value from the sequence value + This function only handles the first case + """ + assert len(node.all_input_nodes) == 1 + source = node.all_input_nodes[0] + source_outputs = node_name_to_values[source.name] + assert isinstance( + source_outputs, Sequence + ), f"Expected {source.name} to output sequence, got {node_name_to_values[source.name]}" + index = typing.cast(int, node.args[1]) + value = source_outputs[index] + # Save the getitem value to the values mapping to in case + # it is one of the graph outputs + node_name_to_values[node.name] = value + # Rename the name of value with the getitem name. + value.name = node.name + return value + + +def _handle_call_function_node( + graph: ir.Graph, + node: torch.fx.Node, + node_name_to_values: dict[str, ir.Value | Sequence[ir.Value]], +) -> None: + """Handle a call_function node. + + Args: + graph: The ONNX graph at construction. + node: The FX node to translate. + node_name_to_values: A mapping of FX node names to their produced ir.Value. + """ + if node.target == operator.getitem: + _handle_getitem_node(node, node_name_to_values) + # Add op to the graph + op = str(node.target) + fx_inputs, attributes, input_names, output_names = _get_inputs_and_attributes(node) + inputs: list[ir.Value | None] = [] + for i, input_ in enumerate(fx_inputs): + if input_ is None: + inputs.append(None) + elif hasattr(input_, "name"): + if isinstance(input_, torch.fx.Node) and input_.target == operator.getitem: + actual_input = _handle_getitem_node(input_, node_name_to_values) + inputs.append(actual_input) + else: + value = node_name_to_values[input_.name] + assert not isinstance(value, Sequence) + inputs.append(value) + else: + attributes[f"arg_{i}"] = input_ + + outputs = [ir.Value(name=name) for name in output_names] + if len(outputs) > 1: + _set_shape_types(outputs, node.meta["val"], complex_to_float=False) + node_name_to_values[node.name] = outputs + else: + _set_shape_type(outputs[0], node.meta["val"], complex_to_float=False) + node_name_to_values[node.name] = outputs[0] + ir_node = ir.Node( + "pkg.torch.ops", + op, + inputs, + attributes=ir_convenience.convert_attributes(attributes), + outputs=outputs, + name=node.name, + ) + ir_node.meta["node"] = node + ir_node.metadata_props["pkg.torch.onnx.input_names"] = repr(input_names) + # Record the nn.Module stack for the node + _set_node_metadata(node, ir_node) + + graph.append(ir_node) + + +def _convert_fx_arg_to_onnx_arg( + arg, node_name_to_values: dict[str, ir.Value | Sequence[ir.Value]] +) -> Any: + """Convert an FX argument to an ONNX compatible argument. + + This function + - Converts a torch dtype to an integer + - Converts a torch device/memory_format/layout to a string + - Converts a torch.fx.Node to an ir.Value + - Converts a sequence of torch.fx.Node to a sequence of ir.Value + """ + if arg is None: + # None arguments are not modified because when the arg is an ONNX input + # we need to preserve the None value; when the arg is an ONNX attribute, + # we want to drop the value. + # The actual dropping of a None attribute value is done by OpRecorder + return None + if hasattr(arg, "name"): + if isinstance(arg, torch.fx.Node) and arg.target == operator.getitem: + source = arg.all_input_nodes[0] + source_outputs = node_name_to_values[source.name] + if isinstance(source_outputs, Sequence): + # If the node is getting an input from another node, get the actual value the node is retrieving + return _handle_getitem_node(arg, node_name_to_values) + else: + # `source_outputs` is a sequence(tensor()) value and we need to + # use SequenceAt to get the value. This is handled by torchlib + pass + # If the input is a node, get the value from the mapping + return node_name_to_values[arg.name] + if isinstance(arg, (list, tuple)): + return [_convert_fx_arg_to_onnx_arg(elem, node_name_to_values) for elem in arg] + if isinstance(arg, (torch.device, torch.memory_format, torch.layout)): + return str(arg) + if isinstance(arg, torch.dtype): + return _torch_dtype_to_onnx_dtype(arg) + # Maybe a Python value + return arg + + +def _get_onnxscript_opset(opset_version: int) -> onnxscript.values.Opset: + return onnxscript.values.Opset("", opset_version) + + +def _handle_call_function_node_with_lowering( + model: ir.Model, + node: torch.fx.Node, + node_name_to_values: dict[str, ir.Value | Sequence[ir.Value]], + constant_farm: dict[Any, ir.Value], + registry: _registration.ONNXRegistry, + opset: onnxscript.values.Opset, +) -> None: + if node.target == operator.getitem: + source = node.all_input_nodes[0] + source_outputs = node_name_to_values[source.name] + if isinstance(source_outputs, Sequence): + _handle_getitem_node(node, node_name_to_values) + return + else: + # `source_outputs` is a sequence(tensor()) value and we need to + # use SequenceAt to get the value. This is handled by torchlib + pass + + # Find the matching ONNX overload for the node + # NOTE: Create different registries for different ONNX opset versions + # TODO: Log the message here to expose false positives + onnx_function, message = _dispatching.dispatch(node, registry) + + if onnx_function is None: + # TODO(justinchuby): Fall back to ATen op or do something else? + raise _errors.DispatchError( + f"No ONNX function found for {node.target!r}. Failure message: {message}" + ) + + # Map FX inputs to ONNX inputs and fill optional inputs. + # torch_args and torch_kwargs are for op-level validation + fx_args = node.args + fx_kwargs = node.kwargs + + # Replace the input FX nodes with ONNX values + onnx_args = [ + _convert_fx_arg_to_onnx_arg(input_, node_name_to_values) for input_ in fx_args + ] + + onnx_kwargs = {} + for key, value in fx_kwargs.items(): + onnx_kwargs[key] = _convert_fx_arg_to_onnx_arg(value, node_name_to_values) + if key == "dtype" and onnx_kwargs[key] is None: + # Set dtype to -1 if it is None + onnx_kwargs[key] = -1 + + with onnxscript.evaluator.default_as( + tracer := _building.OpRecorder(opset, constant_farm) + ): + try: + outputs = onnx_function(*onnx_args, **onnx_kwargs) + except Exception as e: + raise _errors.GraphConstructionError( + f"Error when calling function '{onnx_function}' with args '{onnx_args}' and kwargs '{onnx_kwargs}'" + ) from e + + # NOTE: Instead of using the output names from node.target._schema, + # we always use the index if there are more than one outputs so the + # names can be programmatically reconstructed. This is useful for + # comparing values from the ONNX graph with those from the FX graph. + # + # When there are multiple outputs, the output names will be + # node_name__0, node_name__1, etc. + if isinstance(outputs, Sequence): + _set_shape_types(outputs, node.meta["val"], complex_to_float=True) + node_name_to_values[node.name] = outputs + for i, output in enumerate(outputs): + output.name = f"{node.name}__{i}" + else: + _set_shape_type(outputs, node.meta["val"], complex_to_float=True) + node_name_to_values[node.name] = outputs + outputs.name = node.name + + for ir_node in tracer.nodes: + ir_node.meta["node"] = node + # Record the nn.Module stack for the node + _set_node_metadata(node, ir_node) + + # Add the traced nodes to the graph + model.graph.extend(tracer.nodes) + # Add the defined functions to the model + for identifier, onnxscript_function in tracer.functions.items(): + if identifier in model.functions: + continue + # TODO: Get IR function directly when onnxscript is updated + proto = onnxscript_function.to_function_proto() + ir_function = ir.serde.deserialize_function(proto) + model.functions[identifier] = ir_function + if ir_function.domain not in model.opset_imports: + # FIXME: Record the correct opset version of the function + model.opset_imports[ir_function.domain] = 1 + + +def _handle_placeholder_node( + node: torch.fx.Node, + node_name_to_values: dict[str, ir.Value | Sequence[ir.Value]], + *, + lower: str, + opset: onnxscript.values.Opset, +) -> None: + # Placeholder nodes are user inputs + # We need to create a new tensor for each user input + # and add it to the graph's inputs + name = node.name + input_ = _tensors.SymbolicTensor(opset, name=name) + input_.meta["node"] = node + _set_shape_type(input_, node.meta["val"], complex_to_float=lower != "none") + node_name_to_values[name] = input_ + # The inputs will be added to the graph later + + +def _add_nodes( + exported_program: torch.export.ExportedProgram, + model: ir.Model, + lower: Literal["at_conversion", "post_conversion", "none"], + registry: _registration.ONNXRegistry, +) -> dict[str, ir.Value | Sequence[ir.Value]]: + node_name_to_values: dict[str, ir.Value | Sequence[ir.Value]] = {} + constant_farm: dict[Any, ir.Value] = {} + opset = _get_onnxscript_opset(registry.opset_version) + for node in exported_program.graph.nodes: + logger.debug( + "%s", (node.name, node.args, node.target, node.op, node.type, node.kwargs) + ) + try: + if node.op == "placeholder": + _handle_placeholder_node( + node, + node_name_to_values, + lower=lower, + opset=opset, + ) + elif node.op == "call_function": + if lower == "at_conversion": + _handle_call_function_node_with_lowering( + model, + node, + node_name_to_values, + constant_farm, + registry=registry, + opset=opset, + ) + else: + # No lowering + _handle_call_function_node(model.graph, node, node_name_to_values) + except Exception as e: + raise _errors.ConversionError( + f"Error when translating node {node.format_node()}. See the stack trace for more information." + ) from e + return node_name_to_values + + +def _torch_version_integer() -> int: + return int(torch.__version__.replace(".", "").split("dev")[0]) + + +def _get_inputs_and_attributes( + node: torch.fx.Node, +) -> tuple[list[torch.fx.Node | None], dict[str, Any], list[str], list[str]]: + """Find and Fill in the not provided kwargs with default values. + + Returns: + (inputs, attributes, input_names, output_names) + """ + if inspect.isbuiltin(node.target) or isinstance(node.target, str): + inputs = list(node.args) + return inputs, {}, [], [node.name] # type: ignore[return-value] + + # The target should be an ATen operator now + assert hasattr( + node.target, "_schema" + ), f"The target should be an ATen operator now, but node target {node.target} has no schema" + node_schema: torch.FunctionSchema = node.target._schema + + # This function assumes the order of arguments in FX op is the + # same as the order of arguments in TorchScript op. + inputs: list[Any] = [] # type: ignore[no-redef] + input_names: list[str] = [] + attributes: dict[str, Any] = {} + + if inspect.isbuiltin(node.target): + inputs = list(node.args) + else: + for arg, schema_arg in zip(node.args, node_schema.arguments): + if arg is None or isinstance(arg, torch.fx.Node): + inputs.append(arg) + input_names.append(schema_arg.name) + elif isinstance(arg, Sequence) and all( + elem is None or isinstance(elem, torch.fx.Node) for elem in arg + ): + inputs.extend(arg) + input_names.extend([schema_arg.name] * len(arg)) + elif isinstance(arg, torch.device): + attributes[schema_arg.name] = str(arg) + elif isinstance(arg, torch.dtype): + attributes[schema_arg.name] = _torch_dtype_to_onnx_dtype(arg) + else: + attributes[schema_arg.name] = arg + for schema_arg in node_schema.arguments: + if schema_arg.name not in node.kwargs: + continue + kwarg = node.kwargs[schema_arg.name] + if schema_arg.name in { + "layout", + "device", + "requires_grad", + "memory_format", + "implicit", + } or isinstance(kwarg, torch.device): + attr = str(kwarg) + elif isinstance(kwarg, torch.dtype): + attr = _torch_dtype_to_onnx_dtype(kwarg) # type: ignore[assignment] + else: + attr = kwarg # type: ignore[assignment] + + attributes[schema_arg.name] = attr + + output_names = [f"{node.name}_{output.name}" for output in node_schema.returns] + + return inputs, attributes, input_names, output_names # type: ignore[return-value] + + +def _maybe_start_profiler(should_profile: bool) -> Any: + if should_profile: + import pyinstrument # type: ignore[import-not-found] + + profiler = pyinstrument.Profiler(async_mode="disabled") + profiler.start() + return profiler + return None + + +def _maybe_stop_profiler_and_get_result(profiler) -> str | None: + if profiler is None: + return None + profiler.stop() + return profiler.output_text(unicode=True) + + +def _format_exception(e: Exception) -> str: + """Format the full traceback as Python would show it.""" + return "\n".join(traceback.format_exception(type(e), e, e.__traceback__)) + + +def _summarize_exception_stack(e: BaseException) -> str: + """Format the exception stack by showing the text of each exception.""" + causes = [e] + while e.__cause__ is not None: + causes.append(e.__cause__) + e = e.__cause__ + return ( + "\n\n## Exception summary\n\n" + + "⬆️\n".join([f"{type(e)}: {e}\n" for e in reversed(causes)]) + + "\n(Refer to the full stack trace above for more information.)" + ) + + +def _format_exceptions_for_all_strategies( + results: list[_capture_strategies.Result], +) -> str: + """Format all the exceptions from the capture strategies.""" + return "\n".join( + [ + f"# ⚠️ Errors from strategy '{result.strategy}': -----------------------\n\n" + f"{_format_exception(result.exception)}\n" + for result in results + if result.exception is not None + ] + ) + + +def exported_program_to_ir( + exported_program: torch.export.ExportedProgram, + *, + registry: _registration.ONNXRegistry | None = None, + lower: Literal["at_conversion", "post_conversion", "none"] = "at_conversion", +) -> ir.Model: + """Convert an exported program to an ONNX IR model. + + Reference: + - ExportedProgram spec: https://pytorch.org/docs/stable/export.ir_spec.html + + Args: + exported_program: The exported program to convert. + lower: Whether to lower the graph to core ONNX operators. + at_conversion: Lower whe translating the FX graph to ONNX IR. + post_conversion: Use an IR pass to lower the graph. + none: Do not lower the graph. + registry: The registry of all ONNX Script decomposition. + """ + if registry is None: + registry = _registration.ONNXRegistry.from_torchlib() + if lower != "none": + exported_program = _prepare_exported_program_for_export( + exported_program, registry=registry + ) + return _exported_program_to_onnx_program( + exported_program, registry=registry, lower=lower + ).model + + +def _prepare_exported_program_for_export( + exported_program: torch.export.ExportedProgram, + *, + registry: _registration.ONNXRegistry, +) -> torch.export.ExportedProgram: + """Decompose and apply pre-export transformations to the exported program.""" + # Decompose the graph given the implemented torch ops in ONNX + exported_program = _fx_passes.decompose_with_registry(exported_program, registry) + + graph_module = exported_program.graph_module + # Include explicit type promotion nodes + graph_module = _fx_passes.insert_type_promotion_nodes(graph_module) + graph_module = _fx_passes.remove_assertion_nodes(graph_module) + # TODO(justinchuby): Reassigning the graph module to save some runtime. + # If this does not work, we need to retrace the module with torch.export + exported_program._graph_module = graph_module + return exported_program + + +def _exported_program_to_onnx_program( + exported_program: torch.export.ExportedProgram, + *, + registry: _registration.ONNXRegistry, + lower: Literal["at_conversion", "post_conversion", "none"] = "at_conversion", +) -> _onnx_program.ONNXProgram: + """Convert an exported program to an ONNX Program. + + The exported_program field in the returned ONNXProgram is one that is after + decompositions have been applied. + + Reference: + - ExportedProgram spec: https://pytorch.org/docs/stable/export.ir_spec.html + + Args: + exported_program: The exported program to convert. The exported program + should be the one that is after decompositions have been applied. + lower: Whether to lower the graph to core ONNX operators. + at_conversion: Lower whe translating the FX graph to ONNX IR. + post_conversion: Use an IR pass to lower the graph. + none: Do not lower the graph. + registry: The registry of all ONNX Script decomposition. + """ + model = ir.Model( + graph=ir.Graph( + [], + [], + nodes=[], + opset_imports={ + "": registry.opset_version, + }, + name="main_graph", + metadata_props={ + "pkg.torch.export.ExportedProgram.graph_signature": str( + exported_program.graph_signature + ), + "pkg.torch.export.ExportedProgram.range_constraints": str( + exported_program.range_constraints + ), + }, + ), + ir_version=9, + producer_name="pytorch", + producer_version=torch.__version__, + ) + + if lower == "none": + # Add the opset import for the torch ops + model.opset_imports["pkg.torch.ops"] = _torch_version_integer() + # NOTE: Function domains are added when translating nodes when lower="at_conversion" + + # 1. Add all nodes to the graph and create a dictionary of values + values = _add_nodes(exported_program, model, lower=lower, registry=registry) + + # 2. Add user inputs and all parameters/buffers to the graph. + # Since the node names and the tensor names are different, we need to rename + # the nodes to match the tensor names later. For now we will just use the node names. + user_inputs = [ + spec + for spec in exported_program.graph_signature.input_specs + if spec.kind == graph_signature.InputKind.USER_INPUT + ] + non_user_inputs = [ + spec + for spec in exported_program.graph_signature.input_specs + if spec.kind != graph_signature.InputKind.USER_INPUT + ] + + for spec in itertools.chain(user_inputs, non_user_inputs): + # Put the user inputs first and then the parameters/buffers + if isinstance(spec.arg, graph_signature.ConstantArgument): + logger.debug("Skipping constant argument %s", spec.arg) + continue + value_name = spec.arg.name + input_kind = spec.kind + persistent = spec.persistent + value = values[value_name] + + assert not isinstance( + value, Sequence + ), f"Input '{value_name}' should not be a sequence. This is unexpected." + + value.metadata_props["pkg.torch.export.graph_signature.InputSpec.kind"] = ( + input_kind.name + ) + value.metadata_props[ + "pkg.torch.export.graph_signature.InputSpec.persistent" + ] = str(persistent) + + if input_kind == graph_signature.InputKind.USER_INPUT: + # Add only user inputs to the graph + # Subsequent passes can decide if they want to add initializers as inputs + model.graph.inputs.append(value) + else: + model.graph.initializers[value_name] = value + + # 3. Add user outputs to the graph and assign metadata to all outputs + user_outputs = [ + spec + for spec in exported_program.graph_signature.output_specs + if spec.kind == graph_signature.OutputKind.USER_OUTPUT + ] + non_user_outputs = [ + spec + for spec in exported_program.graph_signature.output_specs + if spec.kind != graph_signature.OutputKind.USER_OUTPUT + ] + for spec in itertools.chain(user_outputs, non_user_outputs): + if isinstance(spec.arg, graph_signature.ConstantArgument): + logger.warning("Skipping constant argument %s", spec.arg) + continue + value_name = spec.arg.name + output_kind = spec.kind + value = values[value_name] + + if not isinstance(value, (ir.Value, Sequence)): + raise TypeError( + f"Output '{value_name}' should be an ir.Value. Actual type is '{type(value)}': {value!r}. " + "This may be due to an incorrect implementation of the ONNX function that produced this output." + ) + + # The output value may be a sequence, meaning the operator has multiple outputs + _values = (value,) if not isinstance(value, Sequence) else value + + if len(_values) > 1: + logger.warning( + "Model output '%s' has multiple values: %s (output spec: %s). Please make sure this is expected.", + value_name, + _values, + spec, + ) + + for value in _values: + value.metadata_props["pkg.torch.export.graph_signature.OutputSpec.kind"] = ( + output_kind.name + ) + if output_kind == graph_signature.OutputKind.USER_OUTPUT: + model.graph.outputs.append(value) + + # 4. Rename the initializers to match the tensor names + for name, param_name in itertools.chain( + exported_program.graph_signature.inputs_to_parameters.items(), + exported_program.graph_signature.inputs_to_buffers.items(), + exported_program.graph_signature.inputs_to_lifted_tensor_constants.items(), + ): + initializer = model.graph.initializers.pop(name) + initializer.name = param_name + # Record the original name so users can search the metadata and correspond + # with the FX graph + initializer.metadata_props["pkg.torch.onnx.original_node_name"] = name + model.graph.initializers[param_name] = initializer + + # 5. Add initializers to the graph + # ExportedProgram stores parameters and buffers in state_dict, + # but non_persistent_buffers and lifted_tensor_constants are not there + # so we need to get them from the name_* apis. + for name, torch_tensor in itertools.chain( + exported_program.named_parameters(), + exported_program.named_buffers(), + exported_program.constants.items(), + ): + initializer = model.graph.initializers.get(name) # type: ignore[assignment] + if initializer is None: + logger.warning("Tensor '%s' is not one of the initializers", name) + continue + if not isinstance(torch_tensor, torch.Tensor): + raise NotImplementedError( + f"Tensor '{name}' should be a torch.Tensor. Actual type is '{type(torch_tensor)}': {torch_tensor!r}. " + "This is unexpected and not yet supported." + ) + ir_tensor = TorchTensor(torch_tensor, name=name) + initializer.const_value = ir_tensor + _set_shape_type( + initializer, + torch_tensor, + complex_to_float=lower != "none", + ) + + # TODO: Decide if we should keep mutated buffers as inputs/outputs + + # TODO(justinchuby): Remove the hack + _ir_passes.add_torchlib_common_imports(model) + + return _onnx_program.ONNXProgram(model, exported_program) + + +def _verbose_printer(verbose: bool | None) -> Callable[..., None]: + """Prints messages based on `verbose`.""" + if verbose is False: + return lambda *_, **__: None + return lambda *args, **kwargs: print("[torch.onnx]", *args, **kwargs) + + +def export( + model: torch.nn.Module + | torch.export.ExportedProgram + | torch.fx.GraphModule + | torch.jit.ScriptModule + | torch.jit.ScriptFunction, + args: tuple[Any, ...] = (), + kwargs: dict[str, Any] | None = None, + *, + registry: _registration.ONNXRegistry | None = None, + dynamic_shapes: dict[str, Any] | tuple[Any, ...] | list[Any] | None = None, + input_names: Sequence[str] | None = None, + output_names: Sequence[str] | None = None, + report: bool = False, + verify: bool = False, + profile: bool = False, + dump_exported_program: bool = False, + artifacts_dir: str | os.PathLike = ".", + verbose: bool | None = None, +) -> _onnx_program.ONNXProgram: + """Export a PyTorch model to ONNXProgram. + + Args: + model: The model to export. This can be a PyTorch nn.Module or an ExportedProgram. + args: The arguments to pass to the model. + kwargs: The keyword arguments to pass to the model. + registry: The registry of all ONNX decompositions. + dynamic_shapes: Dynamic shapes in the graph. + input_names: If provided, rename the inputs. + output_names: If provided, rename the outputs. + report: Whether to generate an error report if the export fails. + verify: Whether to verify the ONNX model after exporting. + profile: Whether to profile the export process. When report is True, + the profile result will be saved in the report. Otherwise, the profile + result will be printed. + dump_exported_program: Whether to save the exported program to a file. + artifacts_dir: The directory to save the exported program and error reports. + verbose: Whether to print verbose messages. If None (default), some messages will be printed. + + Returns: + The ONNXProgram with the exported IR graph. + + Raises: + TorchExportError: If the export process fails with torch.export. + ConversionError: If the ExportedProgram to ONNX translation fails. + """ + # Set up the error reporting facilities + timestamp = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S-%f") + profiler = _maybe_start_profiler(profile) + + # Create the artifacts directory if it does not exist + artifacts_dir = pathlib.Path(artifacts_dir) + if report or profile or dump_exported_program: + artifacts_dir.mkdir(parents=True, exist_ok=True) + + verbose_print = _verbose_printer(verbose) + export_status = _reporting.ExportStatus() + failed_results: list[_capture_strategies.Result] = [] + + program: torch.export.ExportedProgram | None = None + # Step 1: Export the model with torch.export.export if the model is not already an ExportedProgram + if isinstance(model, torch.export.ExportedProgram): + # We know the model is already exported program, so the args, kwargs, and dynamic_shapes + # are not used. + program = model + export_status.torch_export = True + else: + # Convert an nn.Module to an ExportedProgram + # Try everything 🐰 (all paths for getting an ExportedProgram) + # When input is a JIT module, the last strategy will succeed so it is handled + result: _capture_strategies.Result | None = None + for strategy_class in _capture_strategies.CAPTURE_STRATEGIES: + strategy = strategy_class( # type: ignore[abstract] + verbose=verbose is not False, # Treat None as verbose + dump=dump_exported_program, + artifacts_dir=artifacts_dir, + timestamp=timestamp, + ) + result = strategy(model, args, kwargs, dynamic_shapes=dynamic_shapes) + + # Record the status + if strategy_class is _capture_strategies.TorchExportStrategy: + export_status.torch_export = result.success + elif strategy_class is _capture_strategies.TorchExportNonStrictStrategy: + export_status.torch_export_non_strict = result.success + elif strategy_class is _capture_strategies.JitTraceConvertStrategy: + export_status.torch_jit = result.success + + if result.exported_program is not None: + program = result.exported_program + break + else: + failed_results.append(result) + + assert result is not None + if result.exported_program is None: + # If all strategies fail, produce an error report and raise the first error + profile_result = _maybe_stop_profiler_and_get_result(profiler) + + if report: + report_path = artifacts_dir / _reporting.construct_report_file_name( + timestamp, export_status + ) + + try: + _reporting.create_torch_export_error_report( + report_path, + _format_exceptions_for_all_strategies(failed_results), + export_status=export_status, + profile_result=profile_result, + ) + except Exception as e_report: + verbose_print( + f"Failed to save error report due to an error: {e_report}" + ) + else: + report_path = None + + first_error = failed_results[0].exception + assert first_error is not None + + # NOTE: We only throw the torch.export (first) exception because we want to + # focus on the torch.export.export error. Errors from other strategies like + # torch.jit.trace is due to the fallback and can be confusing to users. + # We save all errors in the error report. + raise _errors.TorchExportError( + _STEP_ONE_ERROR_MESSAGE + + ( + f"\nError report has been saved to '{report_path}'." + if report + else "" + ) + + _summarize_exception_stack(first_error) + ) from first_error + + assert program is not None + + if dump_exported_program: + verbose_print("Dumping ExportedProgram because `dump_exported_program=True`...") + program_path = artifacts_dir / f"onnx_export_{timestamp}.pt2" + try: + torch.export.save(program, program_path) + except Exception as e: + verbose_print(f"Failed to save ExportedProgram due to an error: {e}") + else: + verbose_print(f"ExportedProgram has been saved to '{program_path}'.") + + # Step 2: Convert the exported program to an ONNX model + verbose_print("Translate the graph into ONNX...") + + # Step 2a: Decompose the exported program and insert type promotion nodes + try: + # Build the ONNX function registry + if registry is None: + registry = _registration.ONNXRegistry.from_torchlib() + + # Process the exported program to run decompositions and type promotions etc. + decomposed_program = _prepare_exported_program_for_export( + program, registry=registry + ) + except Exception as e: + export_status.onnx_translation = False + verbose_print("Translate the graph into ONNX... ❌") + profile_result = _maybe_stop_profiler_and_get_result(profiler) + + if report: + report_path = artifacts_dir / _reporting.construct_report_file_name( + timestamp, export_status + ) + + # Run the analysis to get the error report + try: + _reporting.create_onnx_export_report( + report_path, + f"{_format_exceptions_for_all_strategies(failed_results)}\n\n{_format_exception(e)}", + program, + export_status=export_status, + profile_result=profile_result, + registry=registry, + ) + except Exception: + logger.exception("Failed to save report due to an error.") + else: + report_path = None + + raise _errors.ConversionError( + _STEP_TWO_ERROR_MESSAGE + + (f"\nError report has been saved to '{report_path}'." if report else "") + + _summarize_exception_stack(e) + ) from e + + # Step 2b: Translate the decomposed program to ONNX and produce ONNXProgram + if report or profile: + pre_decomp_unique_ops, post_decomp_unique_ops = _analysis.compare_ops( + program, decomposed_program + ) + else: + pre_decomp_unique_ops = None + post_decomp_unique_ops = None + + try: + # Convert the exported program to an ONNX model + onnx_program = _exported_program_to_onnx_program( + decomposed_program, registry=registry + ) + + # Run the ONNX passes + if input_names: + _ir_passes.rename_inputs(onnx_program.model, input_names) + if output_names: + _ir_passes.rename_outputs(onnx_program.model, output_names) + + # TODO(justinchuby): Remove the hack + _ir_passes.add_torchlib_common_imports(onnx_program.model) + + export_status.onnx_translation = True + verbose_print("Translate the graph into ONNX... ✅") + except Exception as e: + export_status.onnx_translation = False + verbose_print("Translate the graph into ONNX... ❌") + profile_result = _maybe_stop_profiler_and_get_result(profiler) + + if report: + report_path = artifacts_dir / _reporting.construct_report_file_name( + timestamp, export_status + ) + + try: + assert pre_decomp_unique_ops is not None + assert post_decomp_unique_ops is not None + + # Run the analysis to get the error report + _reporting.create_onnx_export_report( + report_path, + f"{_format_exceptions_for_all_strategies(failed_results)}\n\n{_format_exception(e)}", + program, + decomp_comparison=_reporting.format_decomp_comparison( + pre_decomp_unique_ops, post_decomp_unique_ops + ), + export_status=export_status, + profile_result=profile_result, + registry=registry, + ) + verbose_print(f"Export report has been saved to '{report_path}'.") + except Exception: + logger.exception("Failed to save report due to an error.") + else: + report_path = None + + raise _errors.ConversionError( + _STEP_TWO_ERROR_MESSAGE + + (f"\nError report has been saved to '{report_path}'." if report else "") + + _summarize_exception_stack(e) + ) from e + + profile_result = _maybe_stop_profiler_and_get_result(profiler) + + assert onnx_program.exported_program is not None + + if not verify: + # Return if verification is not requested + if report: + try: + assert pre_decomp_unique_ops is not None + assert post_decomp_unique_ops is not None + report_path = artifacts_dir / _reporting.construct_report_file_name( + timestamp, export_status + ) + _reporting.create_onnx_export_report( + report_path, + "No errors" + if not failed_results + else _format_exceptions_for_all_strategies(failed_results), + onnx_program.exported_program, + decomp_comparison=_reporting.format_decomp_comparison( + pre_decomp_unique_ops, post_decomp_unique_ops + ), + export_status=export_status, + profile_result=profile_result, + model=onnx_program.model, + registry=registry, + ) + verbose_print(f"Export report has been saved to '{report_path}'.") + except Exception: + logger.exception("Failed to save report due to an error.") + elif profile and profile_result is not None: + verbose_print("Profile result:") + verbose_print(profile_result) + return onnx_program + + # Step 3: (verify=True) Check the ONNX model with ONNX checker + try: + verbose_print("Check the ONNX model...") + onnxscript_apis.check_model(onnx_program.model) + export_status.onnx_checker = True + verbose_print("Check the ONNX model... ✅") + except Exception as e: + export_status.onnx_checker = False + verbose_print("Check the ONNX model... ❌") + if report: + try: + assert pre_decomp_unique_ops is not None + assert post_decomp_unique_ops is not None + report_path = artifacts_dir / _reporting.construct_report_file_name( + timestamp, export_status + ) + _reporting.create_onnx_export_report( + report_path, + f"{_format_exceptions_for_all_strategies(failed_results)}\n\n{_format_exception(e)}", + onnx_program.exported_program, + decomp_comparison=_reporting.format_decomp_comparison( + pre_decomp_unique_ops, post_decomp_unique_ops + ), + export_status=export_status, + profile_result=profile_result, + model=onnx_program.model, + registry=registry, + ) + verbose_print(f"Export report has been saved to '{report_path}'.") + except Exception: + logger.exception("Failed to save report due to an error.") + logger.warning( + "Conversion successful but the ONNX model fails ONNX checker. " # noqa: G004 + "Please create an issue " + f"in the PyTorch GitHub repository against the {_BLUE}*onnx*{_END} component and " + "attach the full error stack as well as reproduction scripts. ", + exc_info=e, + ) + return onnx_program + + # Step 4: (verify=True) Execute the model with ONNX Runtime + try: + verbose_print("Execute the model with ONNX Runtime...") + verification_results = _verification.verify_onnx_program(onnx_program) + verbose_print("Execute the model with ONNX Runtime... ✅") + export_status.onnx_runtime = True + onnx_runtime_error_message = None + except Exception as e: + verbose_print("Execute the model with ONNX Runtime... ❌") + export_status.onnx_runtime = False + onnx_runtime_error_message = _format_exception(e) + verification_message = None + + else: + # Step 5: (verify=True) Validate the output values + verbose_print("Verify output accuracy...") + export_status.output_accuracy = True + for verification_result in verification_results: + # TODO(justinchuby): The threshold is arbitrary right now + if verification_result.max_abs_diff >= 5e-3: + logger.warning( + "Output '%s' has a large absolute difference of %f. ", + verification_result.name, + verification_result.max_abs_diff, + ) + export_status.output_accuracy = False + if verification_result.max_rel_diff >= 1e-1: + logger.warning( + "Output '%s' has a large relative difference of %f. ", + verification_result.name, + verification_result.max_rel_diff, + ) + export_status.output_accuracy = False + if export_status.output_accuracy: + verbose_print("Verify output accuracy... ✅") + else: + verbose_print("Verify output accuracy... ❌") + verification_message = _reporting.format_verification_infos( + verification_results + ) + + if report: + try: + assert pre_decomp_unique_ops is not None + assert post_decomp_unique_ops is not None + + traceback_lines = [] + if failed_results: + traceback_lines.append( + _format_exceptions_for_all_strategies(failed_results) + ) + if onnx_runtime_error_message: + traceback_lines.append("# ⚠️ ONNX Runtime error -----------------------") + traceback_lines.append(onnx_runtime_error_message) + if not traceback_lines: + traceback_lines.append("No errors") + + report_path = artifacts_dir / _reporting.construct_report_file_name( + timestamp, export_status + ) + _reporting.create_onnx_export_report( + report_path, + "\n\n".join(traceback_lines), + onnx_program.exported_program, + profile_result=profile_result, + export_status=export_status, + decomp_comparison=_reporting.format_decomp_comparison( + pre_decomp_unique_ops, post_decomp_unique_ops + ), + model=onnx_program.model, + registry=registry, + verification_result=verification_message, + ) + verbose_print(f"Export report has been saved to '{report_path}'.") + except Exception: + logger.exception("Failed to save report due to an error.") + + # Release the inference session created during verification + onnx_program.release() + return onnx_program diff --git a/pllava/lib/python3.10/site-packages/torch/onnx/_internal/exporter/_decomp.py b/pllava/lib/python3.10/site-packages/torch/onnx/_internal/exporter/_decomp.py new file mode 100644 index 0000000000000000000000000000000000000000..3bbff757e92ef20987b5314ad9a69e73594f6766 --- /dev/null +++ b/pllava/lib/python3.10/site-packages/torch/onnx/_internal/exporter/_decomp.py @@ -0,0 +1,100 @@ +"""Build decomp table from PyTorch.""" + +# mypy: allow-untyped-defs +from __future__ import annotations + +from typing import Callable, TYPE_CHECKING + +import torch +import torch._ops + + +if TYPE_CHECKING: + from torch.onnx._internal.exporter import _registration + + +def get_onnx_implemented_overloads( + registry: _registration.ONNXRegistry, +) -> list[torch._ops.OperatorBase]: + """ + Creates a set of OperatorBase and Callable objects that represent ONNX-supported PyTorch operations. + + Args: + registry: The ONNX registry for PyTorch. + + Returns: + A collection of OperatorBase and Callable objects representing ONNX-supported PyTorch operations. + """ + registered_ops: list[torch._ops.OperatorBase] = [] + for op_namespace in (torch.ops.aten, torch.ops.prims): + op_names = dir(op_namespace) + for op_name in op_names: + op_overload_packet = getattr(op_namespace, op_name) + if not isinstance(op_overload_packet, torch._ops.OpOverloadPacket): + continue + + for overload_name in op_overload_packet.overloads(): + op_overload = getattr(op_overload_packet, overload_name) + if registry.is_registered(op_overload): + registered_ops.append(op_overload) + return registered_ops + + +def get_preserve_ops() -> set[torch._ops.OpOverload]: + """Return a set of CompositeImplicitAutograd ops that should be preserved.""" + aten = torch.ops.aten + # NOTE: Keep this list sorted + # NOTE: Do _not_ retain aten.linear as its decomposition is addmm, which is Gemm and is preferable for accuracy + return { + aten._upsample_bilinear2d_aa.default, + aten._upsample_nearest_exact1d.vec, + aten._upsample_nearest_exact2d.vec, + aten._upsample_nearest_exact3d.vec, + aten.group_norm.default, + aten.instance_norm.default, + aten.upsample_bilinear2d.default, + aten.upsample_bilinear2d.vec, + aten.upsample_linear1d.default, + aten.upsample_linear1d.vec, + aten.upsample_nearest1d.default, + aten.upsample_nearest1d.vec, + aten.upsample_nearest2d.default, + aten.upsample_nearest2d.vec, + aten.upsample_nearest3d.default, + aten.upsample_nearest3d.vec, + aten.upsample_trilinear3d.default, + aten.upsample_trilinear3d.vec, + } + + +def create_onnx_friendly_decomposition_table( + onnx_registered_ops: set[torch._ops.OperatorBase], +) -> dict[torch._ops.OperatorBase, Callable]: + """ + This function creates a dictionary of op overloads and their decomposition functions + for ops that do not have ONNX symbolic functions. If an op already has an ONNX symbolic function, + its decomposition function is excluded from the table. The decomposition table is a subset of PyTorch's + built-in aten-to-aten decomposition. + + Args: + onnx_registered_ops: All ops that have an ONNX decomposition implemented. + + Returns: + Dict[torch._ops.OperatorBase, Callable]: A dictionary that maps op overloads to their corresponding + decomposition functions. + """ + decomposition_table: dict[torch._ops.OperatorBase, Callable] = {} + + # NOTE: If we import torch._decomp, we will get RuntimeError: Only a single + # TORCH_LIBRARY can be used to register the namespace nvprims; please put all of your + # definitions in a single TORCH_LIBRARY block. + for op_overload, decomp_fn in torch._decomp.decomposition_table.items(): # type: ignore[attr-defined] + # Skip decomposition for op_overload as long as that op_overload has a corresponding ONNX + # symbolic function. + # NOTE: Do not skip torch._refs decomps. They are fine because otherwise the model is + # not exportable anyways. + if op_overload in onnx_registered_ops: + continue + decomposition_table[op_overload] = decomp_fn + + return decomposition_table diff --git a/pllava/lib/python3.10/site-packages/torch/onnx/_internal/exporter/_errors.py b/pllava/lib/python3.10/site-packages/torch/onnx/_internal/exporter/_errors.py new file mode 100644 index 0000000000000000000000000000000000000000..ff41bbe695fe7d0ebe60c40014332abc36430d0f --- /dev/null +++ b/pllava/lib/python3.10/site-packages/torch/onnx/_internal/exporter/_errors.py @@ -0,0 +1,21 @@ +"""Error classes for the ONNX exporter.""" + +from __future__ import annotations + +import torch.onnx.errors + + +class TorchExportError(torch.onnx.errors.OnnxExporterError): + """Error during graph capturing using torch.export.""" + + +class ConversionError(torch.onnx.errors.OnnxExporterError): + """Error during ExportedProgram to ONNX conversion.""" + + +class DispatchError(ConversionError): + """Error during ONNX Function dispatching.""" + + +class GraphConstructionError(ConversionError): + """Error during ONNX graph construction.""" diff --git a/pllava/lib/python3.10/site-packages/torch/onnx/_internal/exporter/_ir_passes.py b/pllava/lib/python3.10/site-packages/torch/onnx/_internal/exporter/_ir_passes.py new file mode 100644 index 0000000000000000000000000000000000000000..7e8748443e2b1318f4d57d2e034897581c90f24a --- /dev/null +++ b/pllava/lib/python3.10/site-packages/torch/onnx/_internal/exporter/_ir_passes.py @@ -0,0 +1,41 @@ +# mypy: allow-untyped-defs +from __future__ import annotations + +import logging +from typing import Sequence + +from onnxscript import ir + + +logger = logging.getLogger(__name__) + + +def rename_inputs(model: ir.Model, new_names: Sequence[str]) -> None: + # TODO: Ensure the names do not have duplicates + for input, new_name in zip(model.graph.inputs, new_names): + input.metadata_props["pkg.torch.onnx.original_node_name"] = str(input.name) + input.name = new_name + + +def rename_outputs(model: ir.Model, new_names: Sequence[str]) -> None: + for output, new_name in zip(model.graph.outputs, new_names): + output.metadata_props["pkg.torch.onnx.original_node_name"] = str(output.name) + output.name = new_name + + +def add_torchlib_common_imports(model: ir.Model) -> None: + """Hack to add torchlib common imports to the model.""" + + try: + # TODO(justinchuby): Remove this hack and improved onnxscript + from onnxscript.function_libs.torch_lib.ops import common as common_ops + + model.opset_imports["pkg.onnxscript.torch_lib.common"] = 1 + rank_func = ir.serde.deserialize_function(common_ops.Rank.to_function_proto()) + is_scalar_func = ir.serde.deserialize_function( + common_ops.IsScalar.to_function_proto() + ) + model.functions[rank_func.identifier()] = rank_func + model.functions[is_scalar_func.identifier()] = is_scalar_func + except Exception: + logger.exception("Failed to add torchlib common imports to the model.") diff --git a/pllava/lib/python3.10/site-packages/torch/onnx/_internal/exporter/_reporting.py b/pllava/lib/python3.10/site-packages/torch/onnx/_internal/exporter/_reporting.py new file mode 100644 index 0000000000000000000000000000000000000000..86c79df97430a6b91faf41bf8f40637020d8779f --- /dev/null +++ b/pllava/lib/python3.10/site-packages/torch/onnx/_internal/exporter/_reporting.py @@ -0,0 +1,194 @@ +# mypy: allow-untyped-defs +from __future__ import annotations + +import dataclasses +import re +from typing import TYPE_CHECKING + +from torch.onnx._internal.exporter import _analysis, _registration, _verification + + +if TYPE_CHECKING: + import os + + from onnxscript import ir + + import torch + + +@dataclasses.dataclass +class ExportStatus: + # Whether torch.export.export.export() succeeds + torch_export: bool | None = None + # Whether torch.export.export.export(..., strict=False) succeeds + torch_export_non_strict: bool | None = None + # Whether torch.jit.trace succeeds + torch_jit: bool | None = None + # Whether ONNX translation succeeds + onnx_translation: bool | None = None + # Whether ONNX model passes onnx.checker.check_model + onnx_checker: bool | None = None + # Whether ONNX model runs successfully with ONNX Runtime + onnx_runtime: bool | None = None + # Whether the output of the ONNX model is accurate + output_accuracy: bool | None = None + + +def _status_emoji(status: bool | None) -> str: + if status is None: + return "⚪" + return "✅" if status else "❌" + + +def _format_export_status(status: ExportStatus) -> str: + return ( + f"```\n" + f"{_status_emoji(status.torch_export)} Obtain model graph with `torch.export.export`\n" + f"{_status_emoji(status.torch_export_non_strict)} Obtain model graph with `torch.export.export(..., strict=False)`\n" + f"{_status_emoji(status.torch_jit)} Obtain model graph with `torch.jit.trace`\n" + f"{_status_emoji(status.onnx_translation)} Translate the graph into ONNX\n" + f"{_status_emoji(status.onnx_checker)} Run `onnx.checker` on the ONNX model\n" + f"{_status_emoji(status.onnx_runtime)} Execute the model with ONNX Runtime\n" + f"{_status_emoji(status.output_accuracy)} Validate model output accuracy\n" + f"```\n\n" + ) + + +def _strip_color_from_string(text: str) -> str: + # This regular expression matches ANSI escape codes + # https://github.com/pytorch/pytorch/blob/9554a9af8788c57e1c5222c39076a5afcf0998ae/torch/_dynamo/utils.py#L2785-L2788 + ansi_escape = re.compile(r"\x1B[@-_][0-?]*[ -/]*[@-~]") + return ansi_escape.sub("", text) + + +def _format_exported_program(exported_program: torch.export.ExportedProgram) -> str: + # Adapted from https://github.com/pytorch/pytorch/pull/128476 + # to remove colors + # Even though we can call graph_module.print_readable directly, since the + # colored option was added only recently, we can't guarantee that the + # version of PyTorch used by the user has this option. Therefore, we + # still call str(ExportedProgram) + text = f"```python\n{_strip_color_from_string(str(exported_program))}\n```\n\n" + return text + + +def construct_report_file_name(timestamp: str, status: ExportStatus) -> str: + # Status could be None. So we need to check for False explicitly. + if not (status.torch_export or status.torch_export_non_strict or status.torch_jit): + # All strategies failed + postfix = "pt_export" + elif status.onnx_translation is False: + postfix = "conversion" + elif status.onnx_checker is False: + postfix = "checker" + elif status.onnx_runtime is False: + postfix = "runtime" + elif status.output_accuracy is False: + postfix = "accuracy" + elif status.torch_export is False or status.torch_export_non_strict is False: + # Some strategies failed + postfix = "strategies" + else: + postfix = "success" + return f"onnx_export_{timestamp}_{postfix}.md" + + +def format_decomp_comparison( + pre_decomp_unique_ops: set[str], + post_decomp_unique_ops: set[str], +) -> str: + """Format the decomposition comparison result. + + Args: + unique_ops_in_a: The unique ops in the first program. + unique_ops_in_b: The unique ops in the second program. + + Returns: + The formatted comparison result. + """ + return ( + f"Ops exist only in the ExportedProgram before decomposition: `{sorted(pre_decomp_unique_ops)}`\n\n" + f"Ops exist only in the ExportedProgram after decomposition: `{sorted(post_decomp_unique_ops)}`\n" + ) + + +def format_verification_infos( + verification_infos: list[_verification.VerificationInfo], +) -> str: + """Format the verification result. + + Args: + verification_infos: The verification result. + + Returns: + The formatted verification result. + """ + return "\n".join( + f"`{info.name}`: `max_abs_diff={info.max_abs_diff:e}`, `max_rel_diff={info.max_rel_diff:e}`, " + f"`abs_diff_hist={info.abs_diff_hist}`, `rel_diff_hist={info.rel_diff_hist}`" + for info in verification_infos + ) + + +def create_torch_export_error_report( + filename: str | os.PathLike, + formatted_traceback: str, + *, + export_status: ExportStatus, + profile_result: str | None, +): + with open(filename, "w", encoding="utf-8") as f: + f.write("# PyTorch ONNX Conversion Error Report\n\n") + f.write(_format_export_status(export_status)) + f.write("Error message:\n\n") + f.write("```pytb\n") + f.write(formatted_traceback) + f.write("```\n\n") + if profile_result is not None: + f.write("## Profiling result\n\n") + f.write("```\n") + f.write(profile_result) + f.write("```\n") + + +def create_onnx_export_report( + filename: str | os.PathLike, + formatted_traceback: str, + program: torch.export.ExportedProgram, + *, + decomp_comparison: str | None = None, + export_status: ExportStatus, + profile_result: str | None, + model: ir.Model | None = None, + registry: _registration.ONNXRegistry | None = None, + verification_result: str | None = None, +): + with open(filename, "w", encoding="utf-8") as f: + f.write("# PyTorch ONNX Conversion Report\n\n") + f.write(_format_export_status(export_status)) + f.write("## Error messages\n\n") + f.write("```pytb\n") + f.write(formatted_traceback) + f.write("\n```\n\n") + f.write("## Exported program\n\n") + f.write(_format_exported_program(program)) + if model is not None: + f.write("## ONNX model\n\n") + f.write("```python\n") + f.write(str(model)) + f.write("\n```\n\n") + f.write("## Analysis\n\n") + _analysis.analyze(program, file=f, registry=registry) + if decomp_comparison is not None: + f.write("\n## Decomposition comparison\n\n") + f.write(decomp_comparison) + f.write("\n") + if verification_result is not None: + f.write("\n## Verification results\n\n") + f.write(verification_result) + f.write("\n") + if profile_result is not None: + f.write("\n## Profiling result\n\n") + f.write("```\n") + f.write(profile_result) + f.write("```\n") diff --git a/pllava/lib/python3.10/site-packages/torch/onnx/_internal/exporter/_verification.py b/pllava/lib/python3.10/site-packages/torch/onnx/_internal/exporter/_verification.py new file mode 100644 index 0000000000000000000000000000000000000000..c4eec16da4990f135d3c5958bd9ebcc04fce743f --- /dev/null +++ b/pllava/lib/python3.10/site-packages/torch/onnx/_internal/exporter/_verification.py @@ -0,0 +1,111 @@ +# mypy: allow-untyped-defs +from __future__ import annotations + + +__all__ = [ + "VerificationInfo", + "verify_onnx_program", +] + +import dataclasses +import math +from typing import Any, TYPE_CHECKING + +import torch +from torch.utils import _pytree + + +if TYPE_CHECKING: + from torch.onnx._internal.exporter import _onnx_program + + +@dataclasses.dataclass +class VerificationInfo: + name: str + max_abs_diff: float + max_rel_diff: float + abs_diff_hist: tuple[torch.Tensor, torch.Tensor] + rel_diff_hist: tuple[torch.Tensor, torch.Tensor] + expected_dtype: torch.dtype + actual_dtype: torch.dtype + # NOTE: We don't need to include shape because the expected shape is already known + # and checked by the runtime + + +def _compare_tensors( + expected: torch.Tensor, + actual: torch.Tensor, +) -> tuple[float, float, torch.Tensor, torch.Tensor]: + # Move tensors to the same device + expected = expected.detach().cpu() + actual = actual.detach().cpu() + if expected.numel() == 0 or actual.numel() == 0: + return math.inf, math.inf, torch.tensor(math.inf), torch.tensor(math.inf) + if expected.dtype == torch.bool: + expected = expected.to(torch.float32) + actual = actual.to(torch.float32) + abs_diff = torch.abs(expected - actual) + eps = 1e-7 + normalizer = torch.abs(expected) + eps + rel_diff = abs_diff / normalizer + + max_absolute_difference = abs_diff.max().item() + max_relative_difference = rel_diff.max().item() + + return max_absolute_difference, max_relative_difference, abs_diff, rel_diff + + +def verify_onnx_program( + onnx_program: _onnx_program.ONNXProgram, + args: tuple[Any, ...] | None = None, + kwargs: dict[str, Any] | None = None, +) -> list[VerificationInfo]: + exported_program = onnx_program.exported_program + if exported_program is None: + raise ValueError( + "The ONNX program does not contain an exported_program. " + "Please provide an exported_program to verify the ONNX program." + ) + if args is None and kwargs is None: + # User did not provide example inputs, use the default example inputs + if exported_program.example_inputs is None: + raise ValueError( + "No example inputs provided and the exported_program does not contain example inputs. " + "Please provide arguments to verify the ONNX program." + ) + args, kwargs = exported_program.example_inputs + if args is None: + args = () + if kwargs is None: + kwargs = {} + torch_module = exported_program.module() + torch_outputs, _ = _pytree.tree_flatten(torch_module(*args, **kwargs)) + onnx_outputs = onnx_program(*args, **kwargs) + results = [] + for torch_output, onnx_output, output_val in zip( + torch_outputs, onnx_outputs, onnx_program.model.graph.outputs + ): + name = output_val.name + max_abs_diff, max_rel_diff, abs_diff, rel_diff = _compare_tensors( + torch_output, onnx_output + ) + abs_diff = abs_diff.flatten() + rel_diff = rel_diff.flatten() + bins = torch.tensor( + [0.0, 1e-6, 1e-5, 1e-4, 1e-3, 1e-2, 1e-1, 1.0, 10, 1000000], + dtype=abs_diff.dtype, + ) + abs_diff_hist = torch.histogram(abs_diff, bins=bins) + rel_diff_hist = torch.histogram(rel_diff, bins=bins) + results.append( + VerificationInfo( + name=str(name), + max_abs_diff=max_abs_diff, + max_rel_diff=max_rel_diff, + abs_diff_hist=abs_diff_hist, + rel_diff_hist=rel_diff_hist, + expected_dtype=torch_output.dtype, + actual_dtype=onnx_output.dtype, + ) + ) + return results