diff --git a/janus/lib/python3.10/site-packages/torch/onnx/_internal/__pycache__/__init__.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/onnx/_internal/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4eef5e7541b0a6c07a1166d6610baa3eba8d9ddf Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/onnx/_internal/__pycache__/__init__.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/onnx/_internal/__pycache__/_exporter_legacy.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/onnx/_internal/__pycache__/_exporter_legacy.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3480849fd724dba005fac3a2e3166a0b52e741ce Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/onnx/_internal/__pycache__/_exporter_legacy.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/onnx/_internal/__pycache__/_lazy_import.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/onnx/_internal/__pycache__/_lazy_import.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3b8b632774292c741cd436c4c1c1f5f9a176d8cb Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/onnx/_internal/__pycache__/_lazy_import.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/onnx/_internal/__pycache__/io_adapter.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/onnx/_internal/__pycache__/io_adapter.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c64ef5282ce51b34fafb7dacd901e18a15e40123 Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/onnx/_internal/__pycache__/io_adapter.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/onnx/_internal/__pycache__/jit_utils.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/onnx/_internal/__pycache__/jit_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..93a6867a5f5c640f457d9472baafb73a7b6e5b43 Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/onnx/_internal/__pycache__/jit_utils.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/onnx/_internal/__pycache__/onnx_proto_utils.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/onnx/_internal/__pycache__/onnx_proto_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4ebaad51297d51be765d69f94f4af2e0941daf9a Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/onnx/_internal/__pycache__/onnx_proto_utils.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/onnx/_internal/__pycache__/onnxruntime.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/onnx/_internal/__pycache__/onnxruntime.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f15ace376617a500f8fdb86d8b916f81aad18176 Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/onnx/_internal/__pycache__/onnxruntime.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/onnx/_internal/__pycache__/registration.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/onnx/_internal/__pycache__/registration.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e573bfc0cb3827550051c346c490d7b7bc66923b Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/onnx/_internal/__pycache__/registration.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/__pycache__/__init__.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6afdbd9032763241b2b2b9cdfa4b92b6417cd870 Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/__pycache__/__init__.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/__pycache__/_diagnostic.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/__pycache__/_diagnostic.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5a53c94d5485af2d96f145c64532c1a9a3bc8131 Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/__pycache__/_diagnostic.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/__pycache__/_rules.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/__pycache__/_rules.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c488da15190580869f725b52021f74569027d6a4 Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/__pycache__/_rules.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/__pycache__/__init__.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..98f20ce0241a6e88d16e8dabb5b03f9234539e57 Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/__pycache__/__init__.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/__pycache__/_infra.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/__pycache__/_infra.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..66dc140515d0f66dd14c3347545339f63f0a856c Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/__pycache__/_infra.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/__pycache__/context.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/__pycache__/context.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..669d3503c726d92749f933721d0d91656c73b453 Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/__pycache__/context.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/__pycache__/decorator.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/__pycache__/decorator.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..54d2f1255758bb9041f79bb28883dd79f1d9fbe9 Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/__pycache__/decorator.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/__pycache__/formatter.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/__pycache__/formatter.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b2234ecf8ed2c868ce17880b2368ac4f57cf3a18 Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/__pycache__/formatter.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/__pycache__/utils.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cee7df921213264b8775cb0adf6accfc3332cd32 Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/__pycache__/utils.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/__init__.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..aa03224c4fa72a110c7415c27b0befbee7870219 Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/__init__.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_artifact_change.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_artifact_change.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b21c51d34a4093a964694f12ddd86a4e5033004c Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_artifact_change.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_code_flow.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_code_flow.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4aac58baac43894b5c481ef933db431c9f797612 Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_code_flow.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_edge.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_edge.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4bf2185e85d484ca038bb47a8680ca86501a6a78 Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_edge.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_exception.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_exception.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e59a2b371f82b76e00d6ea7bb5d5e13e338b6825 Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_exception.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_fix.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_fix.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c9ddd494f7e4a8da749fb33527a625523038c594 Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_fix.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_region.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_region.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9927e4ea61c476bbe80d7c6438c85a4e5806b6c0 Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_region.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_suppression.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_suppression.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..954ba69b67daa668d4d35b8c403c36de26f93a34 Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_suppression.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_tool.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_tool.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a52266878a5440c91bc4edcfb06bc20c65fadf08 Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_tool.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/onnx/_internal/exporter/__init__.py b/janus/lib/python3.10/site-packages/torch/onnx/_internal/exporter/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..3f8bc9517e61b4f48c6c943ab5d3dccdf413cee6 --- /dev/null +++ b/janus/lib/python3.10/site-packages/torch/onnx/_internal/exporter/__init__.py @@ -0,0 +1,17 @@ +__all__ = [ + "ONNXRegistry", + "ONNXProgram", + "analyze", + "export", + "exported_program_to_ir", + "export_compat", + "testing", + "verification", +] + +from . import _testing as testing, _verification as verification +from ._analysis import analyze +from ._compat import export_compat +from ._core import export, exported_program_to_ir +from ._onnx_program import ONNXProgram +from ._registration import ONNXRegistry diff --git a/janus/lib/python3.10/site-packages/torch/onnx/_internal/exporter/__pycache__/__init__.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/onnx/_internal/exporter/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b0bad56b88bb32a3b24adbcf77359a9defe21717 Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/onnx/_internal/exporter/__pycache__/__init__.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/onnx/_internal/exporter/__pycache__/_analysis.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/onnx/_internal/exporter/__pycache__/_analysis.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f9c97c833864e3d0abea8649e86376ec9e5fe173 Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/onnx/_internal/exporter/__pycache__/_analysis.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/onnx/_internal/exporter/__pycache__/_capture_strategies.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/onnx/_internal/exporter/__pycache__/_capture_strategies.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..055c1519fa59efcb07b333c3585c245b06b45dc7 Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/onnx/_internal/exporter/__pycache__/_capture_strategies.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/onnx/_internal/exporter/__pycache__/_compat.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/onnx/_internal/exporter/__pycache__/_compat.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9f3bb68fb1cfcced9a30a345605414421a967931 Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/onnx/_internal/exporter/__pycache__/_compat.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/onnx/_internal/exporter/__pycache__/_core.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/onnx/_internal/exporter/__pycache__/_core.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f41de28ea0c7925a82f42e7dc46911c17d238acd Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/onnx/_internal/exporter/__pycache__/_core.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/onnx/_internal/exporter/__pycache__/_decomp.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/onnx/_internal/exporter/__pycache__/_decomp.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..428566ca0843a0c2134efc37f92fcb45dd7b2bab Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/onnx/_internal/exporter/__pycache__/_decomp.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/onnx/_internal/exporter/__pycache__/_dispatching.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/onnx/_internal/exporter/__pycache__/_dispatching.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d1615e41b9f7528930b91e798411f87fb5c47d54 Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/onnx/_internal/exporter/__pycache__/_dispatching.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/onnx/_internal/exporter/__pycache__/_fx_passes.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/onnx/_internal/exporter/__pycache__/_fx_passes.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..32dfcdd989281b878b8d0a37230d7dde49fd96a2 Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/onnx/_internal/exporter/__pycache__/_fx_passes.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/onnx/_internal/exporter/__pycache__/_isolated.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/onnx/_internal/exporter/__pycache__/_isolated.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..808ec54f9c2e2dbe3be94ca98198a5befe994008 Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/onnx/_internal/exporter/__pycache__/_isolated.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/onnx/_internal/exporter/__pycache__/_onnx_program.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/onnx/_internal/exporter/__pycache__/_onnx_program.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..58da098ec6d9665363bdba67acddae3ae7a3bb22 Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/onnx/_internal/exporter/__pycache__/_onnx_program.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/onnx/_internal/exporter/__pycache__/_reporting.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/onnx/_internal/exporter/__pycache__/_reporting.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..69ee35f033d18ce94c66ba8a5ee38bb4b8e452fb Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/onnx/_internal/exporter/__pycache__/_reporting.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/onnx/_internal/exporter/__pycache__/_schemas.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/onnx/_internal/exporter/__pycache__/_schemas.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..57aad502e1c0b9a9ce8bd678b9e79deb2bca45bd Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/onnx/_internal/exporter/__pycache__/_schemas.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/onnx/_internal/exporter/__pycache__/_tensors.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/onnx/_internal/exporter/__pycache__/_tensors.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..aae44f883ceba1642ca70ba74dbf6692f8925e6c Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/onnx/_internal/exporter/__pycache__/_tensors.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/onnx/_internal/exporter/__pycache__/_testing.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/onnx/_internal/exporter/__pycache__/_testing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cb5f80e67d5353ea7fda6c49f8c6fad17a1f2d13 Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/onnx/_internal/exporter/__pycache__/_testing.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/onnx/_internal/exporter/__pycache__/_verification.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/onnx/_internal/exporter/__pycache__/_verification.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..86af8c7293c06f20b7cd668ca0e8b42d98497c58 Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/onnx/_internal/exporter/__pycache__/_verification.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/onnx/_internal/exporter/_analysis.py b/janus/lib/python3.10/site-packages/torch/onnx/_internal/exporter/_analysis.py new file mode 100644 index 0000000000000000000000000000000000000000..2eb5adab3a7d9dc942e59a3c3159914500c23aea --- /dev/null +++ b/janus/lib/python3.10/site-packages/torch/onnx/_internal/exporter/_analysis.py @@ -0,0 +1,242 @@ +"""Compatibility analyzer for PyTorch models.""" + +# mypy: allow-untyped-defs +# flake8: noqa: B950 We do not need flake8 as it complains line length +from __future__ import annotations + +import dataclasses +import textwrap +import traceback +from collections import defaultdict +from typing import TYPE_CHECKING + +import torch +import torch._export.serde.schema +from torch.export import graph_signature +from torch.onnx._internal.exporter import _dispatching, _registration + + +if TYPE_CHECKING: + import torch.fx + + +@dataclasses.dataclass +class ModelInfo: + """Information about the model.""" + + parameter_count: defaultdict[torch.dtype, int] = dataclasses.field( + default_factory=lambda: defaultdict(int) + ) + buffer_count: defaultdict[torch.dtype, int] = dataclasses.field( + default_factory=lambda: defaultdict(int) + ) + fx_node_count: int = 0 + fx_node_op_count: defaultdict[str, int] = dataclasses.field( + default_factory=lambda: defaultdict(int) + ) + fx_node_target_count: defaultdict[str, int] = dataclasses.field( + default_factory=lambda: defaultdict(int) + ) + dispatch_failures: list[tuple[torch.fx.Node, str]] = dataclasses.field( + default_factory=list + ) + inputs: dict[str, torch._export.serde.schema.TensorMeta] = dataclasses.field( + default_factory=dict + ) + outputs: dict[str, torch._export.serde.schema.TensorMeta] = dataclasses.field( + default_factory=dict + ) + + +def _count_weights( + exported_program: torch.export.ExportedProgram, +) -> tuple[defaultdict[torch.dtype, int], defaultdict[torch.dtype, int]]: + """Count the size of the parameters in the exported program.""" + + parameter_count: defaultdict[torch.dtype, int] = defaultdict(int) + buffer_count: defaultdict[torch.dtype, int] = defaultdict(int) + for parameter in exported_program.parameters(): + dtype = parameter.dtype + parameter_count[dtype] += parameter.numel() + + for buffer in exported_program.buffers(): + dtype = buffer.dtype + buffer_count[dtype] += buffer.numel() + + return parameter_count, buffer_count + + +def _format_model_info(model_info: ModelInfo) -> str: + """Format the information about the model.""" + lines = [ + textwrap.dedent( + f"""\ + PyTorch ONNX Conversion Analysis + + ## Model Information + + The model has {sum(model_info.parameter_count.values())} parameters and {sum(model_info.buffer_count.values())} buffers (non-trainable parameters). + Number of parameters per dtype: + ```python + {model_info.parameter_count} + ``` + Number of buffers per dtype: + ```python + {model_info.buffer_count} + ``` + """ + ), + "Inputs:", + *[f"- `{name}`: `{meta}`" for name, meta in model_info.inputs.items()], + "", + "Outputs:", + *[f"- `{name}`: `{meta}`" for name, meta in model_info.outputs.items()], + "", + f"The FX graph has {model_info.fx_node_count} nodes in total. Number of FX nodes per op:", + ] + for op, count in model_info.fx_node_op_count.items(): + lines.append(f"- `{op}`: {count}") + lines.append("\n") + lines.append("Of the call_function nodes, the counts of operators used are:\n") + sorted_targets = sorted( + model_info.fx_node_target_count.items(), key=lambda x: x[1], reverse=True + ) + for target, count in sorted_targets: + lines.append(f"- `{target}`: {count}") + + lines.append("") + lines.append("## ONNX Conversion Information") + lines.append("") + + if model_info.dispatch_failures: + lines.append( + "The model contains operators the dispatcher could not find registered ONNX decompositions for. " + "This may be due to missing implementations, decompositions not registered " + "correctly, or a bug in the dispatcher." + ) + lines.append("") + lines.append("Errors grouped by operator:\n") + + target_to_nodes = defaultdict(list) + for node, _ in model_info.dispatch_failures: + target_to_nodes[str(node.target)].append(node) + + target_to_messages = {} + for node, message in model_info.dispatch_failures: + if str(node.target) not in target_to_messages: + target_to_messages[str(node.target)] = message + + for target, nodes in sorted( + target_to_nodes.items(), key=lambda x: x[0], reverse=True + ): + message = textwrap.indent( + f"{target_to_messages[target]}. Example node: `{nodes[0].format_node()}`. All nodes: `{nodes}`", + " ", + ) + lines.append(f"- `{target}`: {message}") + else: + lines.append("All operators in the model have registered ONNX decompositions.") + + return "\n".join(lines) + + +def _get_io_specs(exported_program: torch.export.ExportedProgram) -> tuple[dict, dict]: + """Get the input and output specs of the exported program.""" + + nodes: dict[str, torch.fx.Node] = { + node.name: node for node in exported_program.graph.nodes + } + user_inputs = [ + spec + for spec in exported_program.graph_signature.input_specs + if spec.kind == graph_signature.InputKind.USER_INPUT + ] + user_outputs = [ + spec + for spec in exported_program.graph_signature.output_specs + if spec.kind == graph_signature.OutputKind.USER_OUTPUT + ] + inputs: dict[str, torch._export.serde.schema.TensorMeta] = {} + outputs: dict[str, torch._export.serde.schema.TensorMeta] = {} + for spec in user_inputs: + if isinstance(spec.arg, graph_signature.ConstantArgument): + continue + name = spec.arg.name + # FIXME: tensor_meta is None sometimes when the exported program still knows the shape/type + inputs[name] = nodes[name].meta["tensor_meta"] + for spec in user_outputs: + if isinstance(spec.arg, graph_signature.ConstantArgument): + continue + name = spec.arg.name + outputs[name] = nodes[name].meta["tensor_meta"] + return inputs, outputs + + +def _count_fx_targets( + exported_program: torch.export.ExportedProgram, +) -> defaultdict[str, int]: + """Count the number of targets for each node in the exported program.""" + fx_node_target_count: defaultdict[str, int] = defaultdict(int) + for node in exported_program.graph.nodes: + if node.op == "call_function": + fx_node_target_count[str(node.target)] += 1 + return fx_node_target_count + + +def analyze( + exported_program: torch.export.ExportedProgram, + registry: _registration.ONNXRegistry | None = None, + file=None, +) -> None: + """Analyze the compatibility of the exported program.""" + # Get basic information about the model + model_info = ModelInfo() + model_info.parameter_count, model_info.buffer_count = _count_weights( + exported_program + ) + model_info.fx_node_count = len(exported_program.graph.nodes) + model_info.fx_node_target_count = _count_fx_targets(exported_program) + inputs, outputs = _get_io_specs(exported_program) + model_info.inputs = inputs + model_info.outputs = outputs + + if registry is None: + registry = _registration.ONNXRegistry.from_torchlib() + + # Try to find ops for every node in the graph + for node in exported_program.graph.nodes: + model_info.fx_node_op_count[node.op] += 1 + if node.op == "call_function": + try: + onnx_function, message = _dispatching.dispatch(node, registry) + except Exception as e: + message = "Critical Error in dispatcher:\n" + formatted_exception = "\n".join( + traceback.format_exception(type(e), e, e.__traceback__) + ) + message += f"```pytb\n{formatted_exception}\n```\n" + onnx_function = None + if onnx_function is None: + model_info.dispatch_failures.append((node, message)) + + # Print the results + report = _format_model_info(model_info) + print(report, file=file, flush=True) + + +def compare_ops( + program_a: torch.export.ExportedProgram, program_b: torch.export.ExportedProgram +) -> tuple[set[str], set[str]]: + """Compare and get unique ops in two exported programs. + + Args: + program_a: The first exported program. + program_b: The second exported program. + + Returns: + A tuple of two sets, where the first set contains the unique ops in the first program + and the second set contains the unique ops in the second program. + """ + program_a_ops = set(_count_fx_targets(program_a)) + program_b_ops = set(_count_fx_targets(program_b)) + return program_a_ops - program_b_ops, program_b_ops - program_a_ops diff --git a/janus/lib/python3.10/site-packages/torch/onnx/_internal/exporter/_capture_strategies.py b/janus/lib/python3.10/site-packages/torch/onnx/_internal/exporter/_capture_strategies.py new file mode 100644 index 0000000000000000000000000000000000000000..4cec92854ea8584e10d3ff83b9c4c7a66c8f98da --- /dev/null +++ b/janus/lib/python3.10/site-packages/torch/onnx/_internal/exporter/_capture_strategies.py @@ -0,0 +1,361 @@ +"""Strategies for capturing ExportedPrograms.""" + +# mypy: allow-untyped-defs +from __future__ import annotations + +import abc +import dataclasses +import datetime +import pathlib +from typing import Any, Callable, TYPE_CHECKING + +import torch +from torch._export import converter as _torchscript_converter +from torch.utils import _pytree + + +if TYPE_CHECKING: + import os + + +def _verbose_printer(verbose: bool | None) -> Callable[..., None]: + """Prints messages based on `verbose`.""" + if verbose is False: + return lambda *_, **__: None + return lambda *args, **kwargs: print("[torch.onnx]", *args, **kwargs) + + +def _take_first_line(text: str) -> str: + """Take the first line of a text.""" + lines = text.split("\n", maxsplit=1) + first_line = lines[0] + if len(lines) > 1: + first_line += "[...]" + return first_line + + +@dataclasses.dataclass +class Result: + exported_program: torch.export.ExportedProgram | None + strategy: str + exception: Exception | None = None + + @property + def success(self) -> bool: + return self.exported_program is not None + + +class CaptureStrategy(abc.ABC): + """Strategy for capturing a module as ExportedProgram. + + To use a strategy, create an instance and call it with the model, args, kwargs, and dynamic_shapes. + Example:: + + strategy = TorchExportStrategy(verbose=True) + result = strategy(model, args, kwargs, dynamic_shapes) + """ + + def __init__( + self, + *, + verbose: bool = False, + dump: bool = False, + artifacts_dir: str | os.PathLike = ".", + timestamp: str | None = None, + ): + """Initialize the strategy. + + Args: + verbose: Whether to print verbose messages. + dump: Whether to dump the intermediate artifacts to a file. + """ + self._verbose_print = _verbose_printer(verbose) + self._dump = dump + self._artifacts_dir = pathlib.Path(artifacts_dir) + self._timestamp = timestamp or datetime.datetime.now().strftime( + "%Y-%m-%d_%H-%M-%S-%f" + ) + + def __call__( + self, + model: torch.nn.Module | torch.jit.ScriptFunction, + args: tuple[Any, ...], + kwargs: dict[str, Any] | None, + dynamic_shapes, + ) -> Result: + self._enter(model) + if kwargs is None: + kwargs = {} + try: + exported_program = self._capture(model, args, kwargs, dynamic_shapes) + except Exception as e: + self._failure(model, e) + return Result( + exported_program=None, + strategy=self.__class__.__name__, + exception=e, + ) + self._success(model) + return Result(exported_program, strategy=self.__call__.__name__) + + @abc.abstractmethod + def _capture( + self, model, args, kwargs, dynamic_shapes + ) -> torch.export.ExportedProgram: + raise NotImplementedError + + def _enter(self, model: torch.nn.Module | torch.jit.ScriptFunction) -> None: + return + + def _success(self, model: torch.nn.Module | torch.jit.ScriptFunction) -> None: + return + + def _failure( + self, model: torch.nn.Module | torch.jit.ScriptFunction, e: Exception + ) -> None: + return + + +class TorchExportStrategy(CaptureStrategy): + def _capture( + self, model, args, kwargs, dynamic_shapes + ) -> torch.export.ExportedProgram: + try: + return torch.export.export( + model, args, kwargs=kwargs, dynamic_shapes=dynamic_shapes + ) + except torch._dynamo.exc.UserError as exc: + # Refine the dynamic shapes based on the suggested fixes. + try: + new_shapes = torch.export.dynamic_shapes.refine_dynamic_shapes_from_suggested_fixes( + exc.msg, dynamic_shapes + ) + except Exception: + # If the dynamic shapes cannot be refined, re-raise the exception. + raise exc from None + return torch.export.export( + model, args, kwargs=kwargs, dynamic_shapes=new_shapes + ) + + def _enter(self, model) -> None: + model_repr = _take_first_line(repr(model)) + self._verbose_print( + f"Obtain model graph for `{model_repr}` with `torch.export.export`..." + ) + + def _success(self, model) -> None: + model_repr = _take_first_line(repr(model)) + self._verbose_print( + f"Obtain model graph for `{model_repr}` with `torch.export.export`... ✅" + ) + + def _failure(self, model, e) -> None: + del e # Unused + model_repr = _take_first_line(repr(model)) + self._verbose_print( + f"Obtain model graph for `{model_repr}` with `torch.export.export`... ❌" + ) + + +class TorchExportNonStrictStrategy(CaptureStrategy): + def _capture( + self, model, args, kwargs, dynamic_shapes + ) -> torch.export.ExportedProgram: + try: + return torch.export.export( + model, args, kwargs=kwargs, dynamic_shapes=dynamic_shapes, strict=False + ) + except torch._dynamo.exc.UserError as exc: + # Refine the dynamic shapes based on the suggested fixes. + try: + new_shapes = torch.export.dynamic_shapes.refine_dynamic_shapes_from_suggested_fixes( + exc.msg, dynamic_shapes + ) + except Exception: + # If the dynamic shapes cannot be refined, re-raise the exception. + raise exc from None + return torch.export.export( + model, args, kwargs=kwargs, dynamic_shapes=new_shapes, strict=False + ) + + def _enter(self, model) -> None: + model_repr = _take_first_line(repr(model)) + self._verbose_print( + f"Obtain model graph for `{model_repr}` with `torch.export.export(..., strict=False)`..." + ) + + def _success(self, model) -> None: + model_repr = _take_first_line(repr(model)) + self._verbose_print( + f"Obtain model graph for `{model_repr}` with `torch.export.export(..., strict=False)`... ✅" + ) + + def _failure(self, model, e) -> None: + del e # Unused + model_repr = _take_first_line(repr(model)) + self._verbose_print( + f"Obtain model graph for `{model_repr}` with `torch.export.export(..., strict=False)`... ❌" + ) + + +class JitTraceConvertStrategy(CaptureStrategy): + def _capture( + self, model, args, kwargs, dynamic_shapes + ) -> torch.export.ExportedProgram: + del dynamic_shapes # Unused + + flattened_args, spec = _pytree.tree_flatten((args, kwargs)) + flattened_args = tuple(flattened_args) + + # Since torch.jit.trace only accepts Tensors as inputs, we filter + # out non-Tensor arguments and reconstruct the arguments after entering + # the WrappedModel. + tensor_placeholder = object() + non_tensor_args = [ + arg if not isinstance(arg, torch.Tensor) else tensor_placeholder + for arg in flattened_args + ] + tensor_args = tuple( + arg for arg in flattened_args if isinstance(arg, torch.Tensor) + ) + + class WrappedModel(torch.nn.Module): + """Wrap the model so that it takes flattened arguments.""" + + def __init__(self, m): + super().__init__() + self.model = m + + def forward(self, *_args): + # Take the non-Tensor arguments list as a starting point and + # replace the tensor_placeholder with the actual tensor arguments + # from _args. + reconstructed_flattened_args = non_tensor_args.copy() + _args_iter = iter(_args) + for i, arg in enumerate(reconstructed_flattened_args): + if arg is tensor_placeholder: + reconstructed_flattened_args[i] = next(_args_iter) + # Unflatten the arguments and kwargs to pass to the model. + unflattened_args, unflattened_kwargs = _pytree.tree_unflatten( + reconstructed_flattened_args, spec + ) + results = self.model(*unflattened_args, **unflattened_kwargs) + if not isinstance(results, tuple): + results = (results,) + flattened_results, _ = _pytree.tree_flatten(results) + if len(flattened_results) == 1: + return flattened_results[0] + return tuple(flattened_results) + + jit_model = torch.jit.trace( + WrappedModel(model), + example_inputs=tensor_args, + check_trace=False, + strict=False, + ) + if self._dump: + program_path = self._artifacts_dir / f"onnx_export_{self._timestamp}.pt" + try: + torch.jit.save(jit_model, program_path) + except Exception as e: + self._verbose_print( + f"Failed to save Torch Script model due to an error: {e}" + ) + else: + self._verbose_print( + f"Torch Script model has been saved to '{program_path}'." + ) + return _torchscript_converter.TS2EPConverter( + jit_model, flattened_args + ).convert() + + def _enter(self, model) -> None: + model_repr = _take_first_line(repr(model)) + self._verbose_print( + f"Obtain model graph for `{model_repr}` with Torch Script..." + ) + + def _success(self, model) -> None: + model_repr = _take_first_line(repr(model)) + self._verbose_print( + f"Obtain model graph for `{model_repr}` with Torch Script... ✅" + ) + + def _failure(self, model, e) -> None: + del e # Unused + model_repr = _take_first_line(repr(model)) + self._verbose_print( + f"Obtain model graph for `{model_repr}` with Torch Script... ❌" + ) + + +class LegacyDynamoStrategy(CaptureStrategy): + """Strategy implemented by the ONNX team using internal dynamo APIs and custom fx passes.""" + + def _capture( + self, model, args, kwargs, dynamic_shapes + ) -> torch.export.ExportedProgram: + # NOTE: Import here to prevent circular dependency + from torch.onnx._internal.fx import diagnostics, passes + + graph_module, _ = torch._dynamo.export( + model, + tracing_mode="symbolic", + dynamic_shapes=dynamic_shapes, + )( + *args, + **kwargs, + ) + torch._dynamo.reset() + + diagnostic_context = diagnostics.DiagnosticContext( + "torch.onnx.export", + torch.__version__, + ) + + flattened_args, _ = _pytree.tree_flatten((args, kwargs)) + flattened_args = tuple(flattened_args) + + # ONNX does not support views and mutations. + # Functionalize to get a semantically equivalent graph without mutations. + graph_module = passes.Functionalize( + diagnostic_context, + graph_module, + enable_dynamic_axes=bool(dynamic_shapes), + ).run(*flattened_args) + + # Input mutations are detected and distilled after `Functionalize` pass. + # Remove them since ONNX inference does not need them. + graph_module = passes.RemoveInputMutation(diagnostic_context, graph_module).run( + *flattened_args + ) + + # Use torch.export to recapture the GraphModule into an ExportedProgram. + return torch.export.export(graph_module, flattened_args) + + def _enter(self, model) -> None: + model_repr = _take_first_line(repr(model)) + self._verbose_print( + f"Obtain model graph for `{model_repr}` with internal Dynamo apis..." + ) + + def _success(self, model) -> None: + model_repr = _take_first_line(repr(model)) + self._verbose_print( + f"Obtain model graph for `{model_repr}` with internal Dynamo apis... ✅" + ) + + def _failure(self, model, e) -> None: + del e # Unused + model_repr = _take_first_line(repr(model)) + self._verbose_print( + f"Obtain model graph for `{model_repr}` with internal Dynamo apis... ❌" + ) + + +CAPTURE_STRATEGIES = ( + TorchExportStrategy, + TorchExportNonStrictStrategy, + JitTraceConvertStrategy, + LegacyDynamoStrategy, +) diff --git a/janus/lib/python3.10/site-packages/torch/onnx/_internal/exporter/_compat.py b/janus/lib/python3.10/site-packages/torch/onnx/_internal/exporter/_compat.py new file mode 100644 index 0000000000000000000000000000000000000000..3fddef36b8b428f671dc1e2c88932097378fa661 --- /dev/null +++ b/janus/lib/python3.10/site-packages/torch/onnx/_internal/exporter/_compat.py @@ -0,0 +1,216 @@ +"""Compatibility functions for the torch.onnx.export API.""" + +# mypy: allow-untyped-defs +# mypy: disable-error-code=attr-defined +from __future__ import annotations + +import inspect +import logging +from typing import Any, Mapping, Sequence, TYPE_CHECKING + +import torch +from torch.onnx._internal._lazy_import import onnxscript_apis, onnxscript_ir as ir +from torch.onnx._internal.exporter import _core, _onnx_program + + +if TYPE_CHECKING: + import os + +logger = logging.getLogger(__name__) + + +def _signature(model) -> inspect.Signature: + should_be_callable = getattr(model, "forward", model) + if callable(should_be_callable): + return inspect.signature(should_be_callable) + raise ValueError("model has no forward method and is not callable") + + +def _from_dynamic_axes_to_dynamic_shapes( + model, + *, + dynamic_axes=None, + output_names: set[str], + input_names: Sequence[str] | None = None, +) -> dict[str, Any] | None: + """ + + dynamic_axes examples: + (1) dynamic_axes = {"x": {0: "my_custom_axis_name_1"}, "y": {1: "my_custom_axis_name_2"}} + (2) dynamic_axes = {"x": [0], "y": [1]} + + these will be converted to dynamic_shapes respectively: + (1) dynamic_shapes = {"x": {0: Dim("my_custom_axis_name_1")}, "y": {1: Dim("my_custom_axis_name_2")}} + (2) dynamic_shapes = {"x": {0: Dim("x_dim_0")}, "y": {1: Dim("y_dim_1")}} # auto-generated dim names + + """ + # https://github.com/pytorch/pytorch/pull/128371 + # 1. The function does not need to provide dynamic_shapes to torch.export.export + if dynamic_axes is None: + return None + + if input_names is None: + input_names = [] + + sig = _signature(model) + if len(input_names) > len(sig.parameters): + raise ValueError( + f"Number of input names ({len(input_names)}) should not be greater than " + f"the number of model inputs ({len(sig.parameters)})" + ) + input_names_to_model_inputs = {} + for idx, param_name in enumerate(sig.parameters): + if idx < len(input_names): + input_names_to_model_inputs[input_names[idx]] = param_name + else: + input_names_to_model_inputs[param_name] = param_name + + # NOTE: torch.export.export does not support input names assignment, + # so we need to map input names to model inputs to create dynamic_shapes + # for the exported program + dynamic_shapes_to_exported_program = {} + for input_name, axes in dynamic_axes.items(): + if input_name in output_names: + # User specified an output name as a dynamic axis, so we skip it + continue + # input_name can be either from input_names or from the model inputs + if input_name not in input_names_to_model_inputs: + raise ValueError( + f"dynamic axis: {input_name} is not found in the input names: {input_names}" + ) + model_input_name = input_names_to_model_inputs[input_name] + if isinstance(axes, dict): + dynamic_shapes_to_exported_program[model_input_name] = { + k: torch.export.Dim(v) for k, v in axes.items() + } + elif isinstance(axes, list): + dynamic_shapes_to_exported_program[model_input_name] = { + k: torch.export.Dim(f"{model_input_name}_dim_{k}") for k in axes + } + else: + raise TypeError( + f"dynamic_axes value must be either a dict or a list, but got {type(axes)}" + ) + # torch.export.export needs static dim to present in dynamic_shapes + # for all input tensors, so we need to add them with None + for input_name in sig.parameters: + if input_name not in dynamic_shapes_to_exported_program: + dynamic_shapes_to_exported_program[input_name] = None # type: ignore[assignment] + + return dynamic_shapes_to_exported_program + + +def _get_torch_export_args( + args: tuple[Any, ...], + kwargs: dict[str, Any] | None, +) -> tuple[tuple[Any, ...], dict[str, Any] | None]: + """Obtain the arguments for torch.onnx.export from the model and the input arguments.""" + if not kwargs and args and isinstance(args[-1], dict): + kwargs = args[-1] + args = args[:-1] + return args, kwargs + + +def export_compat( + model: torch.nn.Module + | torch.export.ExportedProgram + | torch.jit.ScriptModule + | torch.jit.ScriptFunction, + args: tuple[Any, ...], + f: str | os.PathLike | None = None, + *, + kwargs: dict[str, Any] | None = None, + export_params: bool = True, + verbose: bool | None = None, + input_names: Sequence[str] | None = None, + output_names: Sequence[str] | None = None, + opset_version: int | None = None, + dynamic_axes: Mapping[str, Mapping[int, str]] + | Mapping[str, Sequence[int]] + | None = None, + dynamic_shapes: dict[str, Any] | tuple[Any, ...] | list[Any] | None = None, + keep_initializers_as_inputs: bool = False, + external_data: bool = True, + report: bool = False, + verify: bool = False, + profile: bool = False, + dump_exported_program: bool = False, + artifacts_dir: str | os.PathLike = ".", + fallback: bool = False, + **_, +) -> _onnx_program.ONNXProgram: + if opset_version is None: + # TODO(justinchuby): Change the hardcoded opset version for it to be flexible + opset_version = 18 + + if isinstance(model, torch.export.ExportedProgram): + # We know the model is already exported program, so the args, kwargs, and dynamic_shapes + # are not used + dynamic_shapes = dynamic_shapes or {} + else: + args, kwargs = _get_torch_export_args(args, kwargs) + if dynamic_shapes is None and dynamic_axes is not None: + dynamic_shapes = _from_dynamic_axes_to_dynamic_shapes( + model, + dynamic_axes=dynamic_axes, + input_names=input_names, + output_names=set(output_names or ()), + ) + + try: + onnx_program = _core.export( + model, + args, + kwargs, + registry=None, + dynamic_shapes=dynamic_shapes, + input_names=input_names, + output_names=output_names, + profile=profile, + report=report, + verify=verify, + dump_exported_program=dump_exported_program, + artifacts_dir=artifacts_dir, + verbose=verbose, + ) + + except Exception as e: + if fallback: + if verbose is not False: + print( + "[torch.onnx] Falling back to legacy torch.onnx.export due " + f"to the following error: {e}", + ) + if f is None: + raise TypeError("f must be provided when fallback is enabled") from e + torch.onnx.utils.export( + model, # type: ignore[arg-type] + args, + f, # type: ignore[arg-type] + kwargs=kwargs, + export_params=export_params, + input_names=input_names, + output_names=output_names, + opset_version=17, # TODO(justinchuby): Hard coded to 17 for now + dynamic_axes=dynamic_axes, + keep_initializers_as_inputs=keep_initializers_as_inputs, + ) + onnx_program = _onnx_program.ONNXProgram(ir.load(f), None) + else: + raise + + # Converter opset version and optimize + onnx_program.model = onnxscript_apis.convert_version( + onnx_program.model, opset_version + ) + onnx_program.model = onnxscript_apis.optimize(onnx_program.model) + + if f is not None: + onnx_program.save( + f, + include_initializers=export_params, + keep_initializers_as_inputs=keep_initializers_as_inputs, + external_data=external_data, + ) + + return onnx_program diff --git a/janus/lib/python3.10/site-packages/torch/onnx/_internal/exporter/_core.py b/janus/lib/python3.10/site-packages/torch/onnx/_internal/exporter/_core.py new file mode 100644 index 0000000000000000000000000000000000000000..7d49a654a9c00c9301992e3a9afd33c4af5a35ef --- /dev/null +++ b/janus/lib/python3.10/site-packages/torch/onnx/_internal/exporter/_core.py @@ -0,0 +1,1341 @@ +# mypy: allow-untyped-defs +# flake8: noqa: B950 We do not need flake8 as it complains line length +from __future__ import annotations + +import ctypes +import datetime +import inspect +import itertools +import logging +import operator +import pathlib +import textwrap +import traceback +import typing +from typing import Any, Callable, Literal, Sequence + +import onnxscript +import onnxscript.evaluator +from onnxscript import ir +from onnxscript.ir import convenience as ir_convenience + +import torch +import torch.fx +from torch.export import graph_signature +from torch.onnx._internal._lazy_import import onnxscript_apis +from torch.onnx._internal.exporter import ( + _analysis, + _building, + _capture_strategies, + _dispatching, + _errors, + _fx_passes, + _ir_passes, + _onnx_program, + _registration, + _reporting, + _tensors, + _verification, +) + + +if typing.TYPE_CHECKING: + import os + + import numpy as np + + +# Define utilities to convert PyTorch data types so users do not need to specify manually +_TORCH_DTYPE_TO_ONNX: dict[torch.dtype, ir.DataType] = { + torch.bfloat16: ir.DataType.BFLOAT16, + torch.bool: ir.DataType.BOOL, + torch.complex128: ir.DataType.COMPLEX128, + torch.complex64: ir.DataType.COMPLEX64, + torch.float16: ir.DataType.FLOAT16, + torch.float32: ir.DataType.FLOAT, + torch.float64: ir.DataType.DOUBLE, + torch.float8_e4m3fn: ir.DataType.FLOAT8E4M3FN, + torch.float8_e4m3fnuz: ir.DataType.FLOAT8E4M3FNUZ, + torch.float8_e5m2: ir.DataType.FLOAT8E5M2, + torch.float8_e5m2fnuz: ir.DataType.FLOAT8E5M2FNUZ, + torch.int16: ir.DataType.INT16, + torch.int32: ir.DataType.INT32, + torch.int64: ir.DataType.INT64, + torch.int8: ir.DataType.INT8, + torch.uint8: ir.DataType.UINT8, + torch.uint16: ir.DataType.UINT16, + torch.uint32: ir.DataType.UINT32, + torch.uint64: ir.DataType.UINT64, +} +_BLUE = "\033[96m" +_END = "\033[0m" + +_STEP_ONE_ERROR_MESSAGE = textwrap.dedent( + f"""\ + Failed to export the model with torch.export. {_BLUE}This is step 1/2{_END} of exporting the model to ONNX. Next steps: + - Modify the model code for `torch.export.export` to succeed. Refer to https://pytorch.org/docs/stable/generated/exportdb/index.html for more information. + - Debug `torch.export.export` and summit a PR to PyTorch. + - Create an issue in the PyTorch GitHub repository against the {_BLUE}*torch.export*{_END} component and attach the full error stack as well as reproduction scripts.""" +) + +_STEP_TWO_ERROR_MESSAGE = textwrap.dedent( + f"""\ + Failed to convert the exported program to an ONNX model. {_BLUE}This is step 2/2{_END} of exporting the model to ONNX. Next steps: + - If there is a missing ONNX function, implement it and register it to the registry. + - If there is an internal error during ONNX conversion, debug the error and summit a PR to PyTorch. + - Save the ExportedProgram as a pt2 file and create an error report with `export(..., report=True)`. Create an issue in the PyTorch GitHub repository against the {_BLUE}*onnx*{_END} component. Attach the pt2 model and the error report.""" +) + +logger = logging.getLogger(__name__) + + +def _torch_dtype_to_onnx_dtype(dtype: torch.dtype) -> ir.DataType: + return _TORCH_DTYPE_TO_ONNX[dtype] + + +class TorchTensor(ir.Tensor): + def __init__(self, tensor: torch.Tensor, name: str | None = None): + # Pass the tensor as the raw data to ir.Tensor's constructor + super().__init__( + tensor, dtype=_torch_dtype_to_onnx_dtype(tensor.dtype), name=name + ) + + def numpy(self) -> np.ndarray: + self.raw: torch.Tensor + if self.dtype == ir.DataType.BFLOAT16: + return self.raw.view(torch.uint16).numpy(force=True) + if self.dtype in { + ir.DataType.FLOAT8E4M3FN, + ir.DataType.FLOAT8E4M3FNUZ, + ir.DataType.FLOAT8E5M2, + ir.DataType.FLOAT8E5M2FNUZ, + }: + # TODO: Use ml_dtypes + return self.raw.view(torch.uint8).numpy(force=True) + return self.raw.numpy(force=True) + + def __array__(self, dtype: Any = None, copy: bool | None = None) -> np.ndarray: + del copy # Unused, but needed for the signature + if dtype is None: + return self.numpy() + return self.numpy().__array__(dtype) + + def tobytes(self) -> bytes: + # Implement tobytes to support native PyTorch types so we can use types like bloat16 + # Reading from memory directly is also more efficient because + # it avoids copying to a NumPy array + import torch._subclasses.fake_tensor + + if isinstance(self.raw, torch._subclasses.fake_tensor.FakeTensor): + raise TypeError( + f"Cannot take content out from the FakeTensor ('{self.name}'). Please replace the tensor " + "with a tensor backed by real data using ONNXProgram.apply_weights() " + "or save the model without initializers by setting include_initializers=False." + ) + tensor = self.raw.detach().cpu().contiguous() + return bytes( + (ctypes.c_ubyte * tensor.element_size() * tensor.numel()).from_address( + tensor.data_ptr() + ) + ) + + +# https://github.com/pytorch/pytorch/blob/ee6cb6daa173896f8ea1876266a19775aaa4f610/torch/export/graph_signature.py#L56C1-L62C19 +# class InputKind(Enum): +# USER_INPUT = auto() +# PARAMETER = auto() +# BUFFER = auto() +# CONSTANT_TENSOR = auto() +# CUSTOM_OBJ = auto() +# TOKEN = auto() + +# https://github.com/pytorch/pytorch/blob/ee6cb6daa173896f8ea1876266a19775aaa4f610/torch/export/graph_signature.py#L89C1-L96C19 +# class OutputKind(Enum): +# USER_OUTPUT = auto() +# LOSS_OUTPUT = auto() +# BUFFER_MUTATION = auto() +# GRADIENT_TO_PARAMETER = auto() +# GRADIENT_TO_USER_INPUT = auto() +# USER_INPUT_MUTATION = auto() +# TOKEN = auto() + + +def _set_shape_types( + values: Sequence[ir.Value], + meta_vals: Sequence[torch.Tensor], + complex_to_float: bool = True, +) -> None: + if not isinstance(meta_vals, Sequence): + logger.warning( + "Expected meta_vals to be a sequence, but got %s. There may be an internal error.", + meta_vals, + ) + meta_vals = (meta_vals,) + for value, meta_val in zip(values, meta_vals): + _set_shape_type(value, meta_val, complex_to_float=complex_to_float) + + +def _set_shape_type( + value: ir.Value, + meta_val: torch.Tensor + | torch.SymBool + | torch.SymInt + | torch.SymFloat + | tuple[torch.Tensor], + complex_to_float: bool, +) -> None: + # TODO: Consider using meta["tensor_meta"] for this? Would it be faster? + if isinstance(meta_val, tuple): + logger.warning("Setting shape and type of tensors is not supported yet") + if isinstance(meta_val, torch.Tensor): + # FIXME: Consider shape for complex values + dims = [] + for dim in meta_val.shape: + if isinstance(dim, int): + dims.append(dim) + else: + dims.append(str(dim.node)) + value.dtype = _torch_dtype_to_onnx_dtype(meta_val.dtype) + if complex_to_float: + if meta_val.dtype == torch.complex64: + value.dtype = ir.DataType.FLOAT + # Add 2 as the last dimension if the tensor is complex to hold the real/imag parts + dims.append(2) + elif meta_val.dtype == torch.complex128: + value.dtype = ir.DataType.DOUBLE + # Add 2 as the last dimension if the tensor is complex to hold the real/imag parts + dims.append(2) + + value.shape = ir.Shape(dims) + elif isinstance(meta_val, (int, torch.SymInt)): + # aten::sym_size output is a int, not a tensor, which stands + # for the size of one dim. We treat it as a scalar. + value.dtype = ir.DataType.INT64 + value.shape = ir.Shape([]) + elif isinstance(meta_val, (bool, torch.SymBool)): + value.dtype = ir.DataType.BOOL + value.shape = ir.Shape([]) + elif isinstance(meta_val, (float, torch.SymFloat)): + value.dtype = ir.DataType.FLOAT + value.shape = ir.Shape([]) + else: + pass + + +def _get_qualified_module_name(cls: Any) -> str: + if isinstance(cls, str): + return cls + module = cls.__module__ + if module is None or module == str.__class__.__module__: + return cls.__name__ + return module + "." + cls.__name__ + + +def _get_node_namespace(node: torch.fx.Node) -> tuple[str, list[str], list[str]]: + """Get the namespace and scope of the node. + + Example:: + + { + 'L__self__': ('', ), + 'L__self___avgpool': ('avgpool', ) + } + + Will yield + + namespace: ": torchvision.models.resnet.ResNet/avgpool: torch.nn.modules.pooling.AdaptiveAvgPool2d/node_name: node_target" + class_hierarchy: ["torchvision.models.resnet.ResNet", "torch.nn.modules.pooling.AdaptiveAvgPool2d", ] + name_scopes: ["", "avgpool", ] + + Args: + node: The node to get the namespace and scope of. + + Returns: + (namespace, class_hierarchy, name_scope) + """ + nn_module_stack = node.meta.get("nn_module_stack") + logger.debug("%s", nn_module_stack) + if nn_module_stack is None: + logger.warning( + "nn_module_stack not found for node '%s'. Skip adding metadata...", + node.name, + ) + return f"{node.name}: {node.target}", [str(node.target)], [node.name] + namespaces = [] + class_hierarchy = [] + name_scopes = [] + for name, nn_module in nn_module_stack.values(): + name_scopes.append(name) + nn_module_name = _get_qualified_module_name(nn_module) + class_hierarchy.append(nn_module_name) + namespaces.append(f"{name}: {_get_qualified_module_name(nn_module)}") + namespaces.append(f"{node.name}: {node.target}") + class_hierarchy.append(str(node.target)) + name_scopes.append(node.name) + + return "/".join(namespaces), class_hierarchy, name_scopes + + +def _set_node_metadata(fx_node: torch.fx.Node, ir_node: ir.Node) -> None: + """Adds namespace and other node metadata to the ONNX node.""" + namespace, class_hierarchy, name_scopes = _get_node_namespace(fx_node) + ir_node.metadata_props["namespace"] = namespace + ir_node.metadata_props["pkg.torch.onnx.class_hierarchy"] = repr(class_hierarchy) + ir_node.metadata_props["pkg.torch.onnx.name_scopes"] = repr(name_scopes) + ir_node.metadata_props["pkg.torch.onnx.fx_node"] = str(fx_node.format_node()) + ir_node.metadata_props["pkg.torch.onnx.stack_trace"] = fx_node.meta.get( + "stack_trace", "" + ) + + +def _handle_getitem_node( + node: torch.fx.Node, node_name_to_values: dict[str, ir.Value | Sequence[ir.Value]] +) -> ir.Value: + """Handle a getitem node. + + Add the input value it is getting to the mapping, then return the value. + + There are two cases for this node: + 1. The output is a Sequence (traced), we can simply get the value from the sequence + 2. The output is produced by a SplitToSequence node, we need to get the value from the sequence value + This function only handles the first case + """ + assert len(node.all_input_nodes) == 1 + source = node.all_input_nodes[0] + source_outputs = node_name_to_values[source.name] + assert isinstance( + source_outputs, Sequence + ), f"Expected {source.name} to output sequence, got {node_name_to_values[source.name]}" + index = typing.cast(int, node.args[1]) + value = source_outputs[index] + # Save the getitem value to the values mapping to in case + # it is one of the graph outputs + node_name_to_values[node.name] = value + # Rename the name of value with the getitem name. + value.name = node.name + return value + + +def _handle_call_function_node( + graph: ir.Graph, + node: torch.fx.Node, + node_name_to_values: dict[str, ir.Value | Sequence[ir.Value]], +) -> None: + """Handle a call_function node. + + Args: + graph: The ONNX graph at construction. + node: The FX node to translate. + node_name_to_values: A mapping of FX node names to their produced ir.Value. + """ + if node.target == operator.getitem: + _handle_getitem_node(node, node_name_to_values) + # Add op to the graph + op = str(node.target) + fx_inputs, attributes, input_names, output_names = _get_inputs_and_attributes(node) + inputs: list[ir.Value | None] = [] + for i, input_ in enumerate(fx_inputs): + if input_ is None: + inputs.append(None) + elif hasattr(input_, "name"): + if isinstance(input_, torch.fx.Node) and input_.target == operator.getitem: + actual_input = _handle_getitem_node(input_, node_name_to_values) + inputs.append(actual_input) + else: + value = node_name_to_values[input_.name] + assert not isinstance(value, Sequence) + inputs.append(value) + else: + attributes[f"arg_{i}"] = input_ + + outputs = [ir.Value(name=name) for name in output_names] + if len(outputs) > 1: + _set_shape_types(outputs, node.meta["val"], complex_to_float=False) + node_name_to_values[node.name] = outputs + else: + _set_shape_type(outputs[0], node.meta["val"], complex_to_float=False) + node_name_to_values[node.name] = outputs[0] + ir_node = ir.Node( + "pkg.torch.ops", + op, + inputs, + attributes=ir_convenience.convert_attributes(attributes), + outputs=outputs, + name=node.name, + ) + ir_node.meta["node"] = node + ir_node.metadata_props["pkg.torch.onnx.input_names"] = repr(input_names) + # Record the nn.Module stack for the node + _set_node_metadata(node, ir_node) + + graph.append(ir_node) + + +def _convert_fx_arg_to_onnx_arg( + arg, node_name_to_values: dict[str, ir.Value | Sequence[ir.Value]] +) -> Any: + """Convert an FX argument to an ONNX compatible argument. + + This function + - Converts a torch dtype to an integer + - Converts a torch device/memory_format/layout to a string + - Converts a torch.fx.Node to an ir.Value + - Converts a sequence of torch.fx.Node to a sequence of ir.Value + """ + if arg is None: + # None arguments are not modified because when the arg is an ONNX input + # we need to preserve the None value; when the arg is an ONNX attribute, + # we want to drop the value. + # The actual dropping of a None attribute value is done by OpRecorder + return None + if hasattr(arg, "name"): + if isinstance(arg, torch.fx.Node) and arg.target == operator.getitem: + source = arg.all_input_nodes[0] + source_outputs = node_name_to_values[source.name] + if isinstance(source_outputs, Sequence): + # If the node is getting an input from another node, get the actual value the node is retrieving + return _handle_getitem_node(arg, node_name_to_values) + else: + # `source_outputs` is a sequence(tensor()) value and we need to + # use SequenceAt to get the value. This is handled by torchlib + pass + # If the input is a node, get the value from the mapping + return node_name_to_values[arg.name] + if isinstance(arg, (list, tuple)): + return [_convert_fx_arg_to_onnx_arg(elem, node_name_to_values) for elem in arg] + if isinstance(arg, (torch.device, torch.memory_format, torch.layout)): + return str(arg) + if isinstance(arg, torch.dtype): + return _torch_dtype_to_onnx_dtype(arg) + # Maybe a Python value + return arg + + +def _get_onnxscript_opset(opset_version: int) -> onnxscript.values.Opset: + return onnxscript.values.Opset("", opset_version) + + +def _handle_call_function_node_with_lowering( + model: ir.Model, + node: torch.fx.Node, + node_name_to_values: dict[str, ir.Value | Sequence[ir.Value]], + constant_farm: dict[Any, ir.Value], + registry: _registration.ONNXRegistry, + opset: onnxscript.values.Opset, +) -> None: + if node.target == operator.getitem: + source = node.all_input_nodes[0] + source_outputs = node_name_to_values[source.name] + if isinstance(source_outputs, Sequence): + _handle_getitem_node(node, node_name_to_values) + return + else: + # `source_outputs` is a sequence(tensor()) value and we need to + # use SequenceAt to get the value. This is handled by torchlib + pass + + # Find the matching ONNX overload for the node + # NOTE: Create different registries for different ONNX opset versions + # TODO: Log the message here to expose false positives + onnx_function, message = _dispatching.dispatch(node, registry) + + if onnx_function is None: + # TODO(justinchuby): Fall back to ATen op or do something else? + raise _errors.DispatchError( + f"No ONNX function found for {node.target!r}. Failure message: {message}" + ) + + # Map FX inputs to ONNX inputs and fill optional inputs. + # torch_args and torch_kwargs are for op-level validation + fx_args = node.args + fx_kwargs = node.kwargs + + # Replace the input FX nodes with ONNX values + onnx_args = [ + _convert_fx_arg_to_onnx_arg(input_, node_name_to_values) for input_ in fx_args + ] + + onnx_kwargs = {} + for key, value in fx_kwargs.items(): + onnx_kwargs[key] = _convert_fx_arg_to_onnx_arg(value, node_name_to_values) + if key == "dtype" and onnx_kwargs[key] is None: + # Set dtype to -1 if it is None + onnx_kwargs[key] = -1 + + with onnxscript.evaluator.default_as( + tracer := _building.OpRecorder(opset, constant_farm) + ): + try: + outputs = onnx_function(*onnx_args, **onnx_kwargs) + except Exception as e: + raise _errors.GraphConstructionError( + f"Error when calling function '{onnx_function}' with args '{onnx_args}' and kwargs '{onnx_kwargs}'" + ) from e + + # NOTE: Instead of using the output names from node.target._schema, + # we always use the index if there are more than one outputs so the + # names can be programmatically reconstructed. This is useful for + # comparing values from the ONNX graph with those from the FX graph. + # + # When there are multiple outputs, the output names will be + # node_name__0, node_name__1, etc. + if isinstance(outputs, Sequence): + _set_shape_types(outputs, node.meta["val"], complex_to_float=True) + node_name_to_values[node.name] = outputs + for i, output in enumerate(outputs): + output.name = f"{node.name}__{i}" + else: + _set_shape_type(outputs, node.meta["val"], complex_to_float=True) + node_name_to_values[node.name] = outputs + outputs.name = node.name + + for ir_node in tracer.nodes: + ir_node.meta["node"] = node + # Record the nn.Module stack for the node + _set_node_metadata(node, ir_node) + + # Add the traced nodes to the graph + model.graph.extend(tracer.nodes) + # Add the defined functions to the model + for identifier, onnxscript_function in tracer.functions.items(): + if identifier in model.functions: + continue + # TODO: Get IR function directly when onnxscript is updated + proto = onnxscript_function.to_function_proto() + ir_function = ir.serde.deserialize_function(proto) + model.functions[identifier] = ir_function + if ir_function.domain not in model.opset_imports: + # FIXME: Record the correct opset version of the function + model.opset_imports[ir_function.domain] = 1 + + +def _handle_placeholder_node( + node: torch.fx.Node, + node_name_to_values: dict[str, ir.Value | Sequence[ir.Value]], + *, + lower: str, + opset: onnxscript.values.Opset, +) -> None: + # Placeholder nodes are user inputs + # We need to create a new tensor for each user input + # and add it to the graph's inputs + name = node.name + input_ = _tensors.SymbolicTensor(opset, name=name) + input_.meta["node"] = node + _set_shape_type(input_, node.meta["val"], complex_to_float=lower != "none") + node_name_to_values[name] = input_ + # The inputs will be added to the graph later + + +def _add_nodes( + exported_program: torch.export.ExportedProgram, + model: ir.Model, + lower: Literal["at_conversion", "post_conversion", "none"], + registry: _registration.ONNXRegistry, +) -> dict[str, ir.Value | Sequence[ir.Value]]: + node_name_to_values: dict[str, ir.Value | Sequence[ir.Value]] = {} + constant_farm: dict[Any, ir.Value] = {} + opset = _get_onnxscript_opset(registry.opset_version) + for node in exported_program.graph.nodes: + logger.debug( + "%s", (node.name, node.args, node.target, node.op, node.type, node.kwargs) + ) + try: + if node.op == "placeholder": + _handle_placeholder_node( + node, + node_name_to_values, + lower=lower, + opset=opset, + ) + elif node.op == "call_function": + if lower == "at_conversion": + _handle_call_function_node_with_lowering( + model, + node, + node_name_to_values, + constant_farm, + registry=registry, + opset=opset, + ) + else: + # No lowering + _handle_call_function_node(model.graph, node, node_name_to_values) + except Exception as e: + raise _errors.ConversionError( + f"Error when translating node {node.format_node()}. See the stack trace for more information." + ) from e + return node_name_to_values + + +def _torch_version_integer() -> int: + return int(torch.__version__.replace(".", "").split("dev")[0]) + + +def _get_inputs_and_attributes( + node: torch.fx.Node, +) -> tuple[list[torch.fx.Node | None], dict[str, Any], list[str], list[str]]: + """Find and Fill in the not provided kwargs with default values. + + Returns: + (inputs, attributes, input_names, output_names) + """ + if inspect.isbuiltin(node.target) or isinstance(node.target, str): + inputs = list(node.args) + return inputs, {}, [], [node.name] # type: ignore[return-value] + + # The target should be an ATen operator now + assert hasattr( + node.target, "_schema" + ), f"The target should be an ATen operator now, but node target {node.target} has no schema" + node_schema: torch.FunctionSchema = node.target._schema + + # This function assumes the order of arguments in FX op is the + # same as the order of arguments in TorchScript op. + inputs: list[Any] = [] # type: ignore[no-redef] + input_names: list[str] = [] + attributes: dict[str, Any] = {} + + if inspect.isbuiltin(node.target): + inputs = list(node.args) + else: + for arg, schema_arg in zip(node.args, node_schema.arguments): + if arg is None or isinstance(arg, torch.fx.Node): + inputs.append(arg) + input_names.append(schema_arg.name) + elif isinstance(arg, Sequence) and all( + elem is None or isinstance(elem, torch.fx.Node) for elem in arg + ): + inputs.extend(arg) + input_names.extend([schema_arg.name] * len(arg)) + elif isinstance(arg, torch.device): + attributes[schema_arg.name] = str(arg) + elif isinstance(arg, torch.dtype): + attributes[schema_arg.name] = _torch_dtype_to_onnx_dtype(arg) + else: + attributes[schema_arg.name] = arg + for schema_arg in node_schema.arguments: + if schema_arg.name not in node.kwargs: + continue + kwarg = node.kwargs[schema_arg.name] + if schema_arg.name in { + "layout", + "device", + "requires_grad", + "memory_format", + "implicit", + } or isinstance(kwarg, torch.device): + attr = str(kwarg) + elif isinstance(kwarg, torch.dtype): + attr = _torch_dtype_to_onnx_dtype(kwarg) # type: ignore[assignment] + else: + attr = kwarg # type: ignore[assignment] + + attributes[schema_arg.name] = attr + + output_names = [f"{node.name}_{output.name}" for output in node_schema.returns] + + return inputs, attributes, input_names, output_names # type: ignore[return-value] + + +def _maybe_start_profiler(should_profile: bool) -> Any: + if should_profile: + import pyinstrument # type: ignore[import-not-found] + + profiler = pyinstrument.Profiler(async_mode="disabled") + profiler.start() + return profiler + return None + + +def _maybe_stop_profiler_and_get_result(profiler) -> str | None: + if profiler is None: + return None + profiler.stop() + return profiler.output_text(unicode=True) + + +def _format_exception(e: Exception) -> str: + """Format the full traceback as Python would show it.""" + return "\n".join(traceback.format_exception(type(e), e, e.__traceback__)) + + +def _summarize_exception_stack(e: BaseException) -> str: + """Format the exception stack by showing the text of each exception.""" + causes = [e] + while e.__cause__ is not None: + causes.append(e.__cause__) + e = e.__cause__ + return ( + "\n\n## Exception summary\n\n" + + "⬆️\n".join([f"{type(e)}: {e}\n" for e in reversed(causes)]) + + "\n(Refer to the full stack trace above for more information.)" + ) + + +def _format_exceptions_for_all_strategies( + results: list[_capture_strategies.Result], +) -> str: + """Format all the exceptions from the capture strategies.""" + return "\n".join( + [ + f"# ⚠️ Errors from strategy '{result.strategy}': -----------------------\n\n" + f"{_format_exception(result.exception)}\n" + for result in results + if result.exception is not None + ] + ) + + +def exported_program_to_ir( + exported_program: torch.export.ExportedProgram, + *, + registry: _registration.ONNXRegistry | None = None, + lower: Literal["at_conversion", "post_conversion", "none"] = "at_conversion", +) -> ir.Model: + """Convert an exported program to an ONNX IR model. + + Reference: + - ExportedProgram spec: https://pytorch.org/docs/stable/export.ir_spec.html + + Args: + exported_program: The exported program to convert. + lower: Whether to lower the graph to core ONNX operators. + at_conversion: Lower whe translating the FX graph to ONNX IR. + post_conversion: Use an IR pass to lower the graph. + none: Do not lower the graph. + registry: The registry of all ONNX Script decomposition. + """ + if registry is None: + registry = _registration.ONNXRegistry.from_torchlib() + if lower != "none": + exported_program = _prepare_exported_program_for_export( + exported_program, registry=registry + ) + return _exported_program_to_onnx_program( + exported_program, registry=registry, lower=lower + ).model + + +def _prepare_exported_program_for_export( + exported_program: torch.export.ExportedProgram, + *, + registry: _registration.ONNXRegistry, +) -> torch.export.ExportedProgram: + """Decompose and apply pre-export transformations to the exported program.""" + # Decompose the graph given the implemented torch ops in ONNX + exported_program = _fx_passes.decompose_with_registry(exported_program, registry) + + graph_module = exported_program.graph_module + # Include explicit type promotion nodes + graph_module = _fx_passes.insert_type_promotion_nodes(graph_module) + graph_module = _fx_passes.remove_assertion_nodes(graph_module) + # TODO(justinchuby): Reassigning the graph module to save some runtime. + # If this does not work, we need to retrace the module with torch.export + exported_program._graph_module = graph_module + return exported_program + + +def _exported_program_to_onnx_program( + exported_program: torch.export.ExportedProgram, + *, + registry: _registration.ONNXRegistry, + lower: Literal["at_conversion", "post_conversion", "none"] = "at_conversion", +) -> _onnx_program.ONNXProgram: + """Convert an exported program to an ONNX Program. + + The exported_program field in the returned ONNXProgram is one that is after + decompositions have been applied. + + Reference: + - ExportedProgram spec: https://pytorch.org/docs/stable/export.ir_spec.html + + Args: + exported_program: The exported program to convert. The exported program + should be the one that is after decompositions have been applied. + lower: Whether to lower the graph to core ONNX operators. + at_conversion: Lower whe translating the FX graph to ONNX IR. + post_conversion: Use an IR pass to lower the graph. + none: Do not lower the graph. + registry: The registry of all ONNX Script decomposition. + """ + model = ir.Model( + graph=ir.Graph( + [], + [], + nodes=[], + opset_imports={ + "": registry.opset_version, + }, + name="main_graph", + metadata_props={ + "pkg.torch.export.ExportedProgram.graph_signature": str( + exported_program.graph_signature + ), + "pkg.torch.export.ExportedProgram.range_constraints": str( + exported_program.range_constraints + ), + }, + ), + ir_version=9, + producer_name="pytorch", + producer_version=torch.__version__, + ) + + if lower == "none": + # Add the opset import for the torch ops + model.opset_imports["pkg.torch.ops"] = _torch_version_integer() + # NOTE: Function domains are added when translating nodes when lower="at_conversion" + + # 1. Add all nodes to the graph and create a dictionary of values + values = _add_nodes(exported_program, model, lower=lower, registry=registry) + + # 2. Add user inputs and all parameters/buffers to the graph. + # Since the node names and the tensor names are different, we need to rename + # the nodes to match the tensor names later. For now we will just use the node names. + user_inputs = [ + spec + for spec in exported_program.graph_signature.input_specs + if spec.kind == graph_signature.InputKind.USER_INPUT + ] + non_user_inputs = [ + spec + for spec in exported_program.graph_signature.input_specs + if spec.kind != graph_signature.InputKind.USER_INPUT + ] + + for spec in itertools.chain(user_inputs, non_user_inputs): + # Put the user inputs first and then the parameters/buffers + if isinstance(spec.arg, graph_signature.ConstantArgument): + logger.debug("Skipping constant argument %s", spec.arg) + continue + value_name = spec.arg.name + input_kind = spec.kind + persistent = spec.persistent + value = values[value_name] + + assert not isinstance( + value, Sequence + ), f"Input '{value_name}' should not be a sequence. This is unexpected." + + value.metadata_props["pkg.torch.export.graph_signature.InputSpec.kind"] = ( + input_kind.name + ) + value.metadata_props[ + "pkg.torch.export.graph_signature.InputSpec.persistent" + ] = str(persistent) + + if input_kind == graph_signature.InputKind.USER_INPUT: + # Add only user inputs to the graph + # Subsequent passes can decide if they want to add initializers as inputs + model.graph.inputs.append(value) + else: + model.graph.initializers[value_name] = value + + # 3. Add user outputs to the graph and assign metadata to all outputs + user_outputs = [ + spec + for spec in exported_program.graph_signature.output_specs + if spec.kind == graph_signature.OutputKind.USER_OUTPUT + ] + non_user_outputs = [ + spec + for spec in exported_program.graph_signature.output_specs + if spec.kind != graph_signature.OutputKind.USER_OUTPUT + ] + for spec in itertools.chain(user_outputs, non_user_outputs): + if isinstance(spec.arg, graph_signature.ConstantArgument): + logger.warning("Skipping constant argument %s", spec.arg) + continue + value_name = spec.arg.name + output_kind = spec.kind + value = values[value_name] + + if not isinstance(value, (ir.Value, Sequence)): + raise TypeError( + f"Output '{value_name}' should be an ir.Value. Actual type is '{type(value)}': {value!r}. " + "This may be due to an incorrect implementation of the ONNX function that produced this output." + ) + + # The output value may be a sequence, meaning the operator has multiple outputs + _values = (value,) if not isinstance(value, Sequence) else value + + if len(_values) > 1: + logger.warning( + "Model output '%s' has multiple values: %s (output spec: %s). Please make sure this is expected.", + value_name, + _values, + spec, + ) + + for value in _values: + value.metadata_props["pkg.torch.export.graph_signature.OutputSpec.kind"] = ( + output_kind.name + ) + if output_kind == graph_signature.OutputKind.USER_OUTPUT: + model.graph.outputs.append(value) + + # 4. Rename the initializers to match the tensor names + for name, param_name in itertools.chain( + exported_program.graph_signature.inputs_to_parameters.items(), + exported_program.graph_signature.inputs_to_buffers.items(), + exported_program.graph_signature.inputs_to_lifted_tensor_constants.items(), + ): + initializer = model.graph.initializers.pop(name) + initializer.name = param_name + # Record the original name so users can search the metadata and correspond + # with the FX graph + initializer.metadata_props["pkg.torch.onnx.original_node_name"] = name + model.graph.initializers[param_name] = initializer + + # 5. Add initializers to the graph + # ExportedProgram stores parameters and buffers in state_dict, + # but non_persistent_buffers and lifted_tensor_constants are not there + # so we need to get them from the name_* apis. + for name, torch_tensor in itertools.chain( + exported_program.named_parameters(), + exported_program.named_buffers(), + exported_program.constants.items(), + ): + initializer = model.graph.initializers.get(name) # type: ignore[assignment] + if initializer is None: + logger.warning("Tensor '%s' is not one of the initializers", name) + continue + if not isinstance(torch_tensor, torch.Tensor): + raise NotImplementedError( + f"Tensor '{name}' should be a torch.Tensor. Actual type is '{type(torch_tensor)}': {torch_tensor!r}. " + "This is unexpected and not yet supported." + ) + ir_tensor = TorchTensor(torch_tensor, name=name) + initializer.const_value = ir_tensor + _set_shape_type( + initializer, + torch_tensor, + complex_to_float=lower != "none", + ) + + # TODO: Decide if we should keep mutated buffers as inputs/outputs + + # TODO(justinchuby): Remove the hack + _ir_passes.add_torchlib_common_imports(model) + + return _onnx_program.ONNXProgram(model, exported_program) + + +def _verbose_printer(verbose: bool | None) -> Callable[..., None]: + """Prints messages based on `verbose`.""" + if verbose is False: + return lambda *_, **__: None + return lambda *args, **kwargs: print("[torch.onnx]", *args, **kwargs) + + +def export( + model: torch.nn.Module + | torch.export.ExportedProgram + | torch.fx.GraphModule + | torch.jit.ScriptModule + | torch.jit.ScriptFunction, + args: tuple[Any, ...] = (), + kwargs: dict[str, Any] | None = None, + *, + registry: _registration.ONNXRegistry | None = None, + dynamic_shapes: dict[str, Any] | tuple[Any, ...] | list[Any] | None = None, + input_names: Sequence[str] | None = None, + output_names: Sequence[str] | None = None, + report: bool = False, + verify: bool = False, + profile: bool = False, + dump_exported_program: bool = False, + artifacts_dir: str | os.PathLike = ".", + verbose: bool | None = None, +) -> _onnx_program.ONNXProgram: + """Export a PyTorch model to ONNXProgram. + + Args: + model: The model to export. This can be a PyTorch nn.Module or an ExportedProgram. + args: The arguments to pass to the model. + kwargs: The keyword arguments to pass to the model. + registry: The registry of all ONNX decompositions. + dynamic_shapes: Dynamic shapes in the graph. + input_names: If provided, rename the inputs. + output_names: If provided, rename the outputs. + report: Whether to generate an error report if the export fails. + verify: Whether to verify the ONNX model after exporting. + profile: Whether to profile the export process. When report is True, + the profile result will be saved in the report. Otherwise, the profile + result will be printed. + dump_exported_program: Whether to save the exported program to a file. + artifacts_dir: The directory to save the exported program and error reports. + verbose: Whether to print verbose messages. If None (default), some messages will be printed. + + Returns: + The ONNXProgram with the exported IR graph. + + Raises: + TorchExportError: If the export process fails with torch.export. + ConversionError: If the ExportedProgram to ONNX translation fails. + """ + # Set up the error reporting facilities + timestamp = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S-%f") + profiler = _maybe_start_profiler(profile) + + # Create the artifacts directory if it does not exist + artifacts_dir = pathlib.Path(artifacts_dir) + if report or profile or dump_exported_program: + artifacts_dir.mkdir(parents=True, exist_ok=True) + + verbose_print = _verbose_printer(verbose) + export_status = _reporting.ExportStatus() + failed_results: list[_capture_strategies.Result] = [] + + program: torch.export.ExportedProgram | None = None + # Step 1: Export the model with torch.export.export if the model is not already an ExportedProgram + if isinstance(model, torch.export.ExportedProgram): + # We know the model is already exported program, so the args, kwargs, and dynamic_shapes + # are not used. + program = model + export_status.torch_export = True + else: + # Convert an nn.Module to an ExportedProgram + # Try everything 🐰 (all paths for getting an ExportedProgram) + # When input is a JIT module, the last strategy will succeed so it is handled + result: _capture_strategies.Result | None = None + for strategy_class in _capture_strategies.CAPTURE_STRATEGIES: + strategy = strategy_class( # type: ignore[abstract] + verbose=verbose is not False, # Treat None as verbose + dump=dump_exported_program, + artifacts_dir=artifacts_dir, + timestamp=timestamp, + ) + result = strategy(model, args, kwargs, dynamic_shapes=dynamic_shapes) + + # Record the status + if strategy_class is _capture_strategies.TorchExportStrategy: + export_status.torch_export = result.success + elif strategy_class is _capture_strategies.TorchExportNonStrictStrategy: + export_status.torch_export_non_strict = result.success + elif strategy_class is _capture_strategies.JitTraceConvertStrategy: + export_status.torch_jit = result.success + + if result.exported_program is not None: + program = result.exported_program + break + else: + failed_results.append(result) + + assert result is not None + if result.exported_program is None: + # If all strategies fail, produce an error report and raise the first error + profile_result = _maybe_stop_profiler_and_get_result(profiler) + + if report: + report_path = artifacts_dir / _reporting.construct_report_file_name( + timestamp, export_status + ) + + try: + _reporting.create_torch_export_error_report( + report_path, + _format_exceptions_for_all_strategies(failed_results), + export_status=export_status, + profile_result=profile_result, + ) + except Exception as e_report: + verbose_print( + f"Failed to save error report due to an error: {e_report}" + ) + else: + report_path = None + + first_error = failed_results[0].exception + assert first_error is not None + + # NOTE: We only throw the torch.export (first) exception because we want to + # focus on the torch.export.export error. Errors from other strategies like + # torch.jit.trace is due to the fallback and can be confusing to users. + # We save all errors in the error report. + raise _errors.TorchExportError( + _STEP_ONE_ERROR_MESSAGE + + ( + f"\nError report has been saved to '{report_path}'." + if report + else "" + ) + + _summarize_exception_stack(first_error) + ) from first_error + + assert program is not None + + if dump_exported_program: + verbose_print("Dumping ExportedProgram because `dump_exported_program=True`...") + program_path = artifacts_dir / f"onnx_export_{timestamp}.pt2" + try: + torch.export.save(program, program_path) + except Exception as e: + verbose_print(f"Failed to save ExportedProgram due to an error: {e}") + else: + verbose_print(f"ExportedProgram has been saved to '{program_path}'.") + + # Step 2: Convert the exported program to an ONNX model + verbose_print("Translate the graph into ONNX...") + + # Step 2a: Decompose the exported program and insert type promotion nodes + try: + # Build the ONNX function registry + if registry is None: + registry = _registration.ONNXRegistry.from_torchlib() + + # Process the exported program to run decompositions and type promotions etc. + decomposed_program = _prepare_exported_program_for_export( + program, registry=registry + ) + except Exception as e: + export_status.onnx_translation = False + verbose_print("Translate the graph into ONNX... ❌") + profile_result = _maybe_stop_profiler_and_get_result(profiler) + + if report: + report_path = artifacts_dir / _reporting.construct_report_file_name( + timestamp, export_status + ) + + # Run the analysis to get the error report + try: + _reporting.create_onnx_export_report( + report_path, + f"{_format_exceptions_for_all_strategies(failed_results)}\n\n{_format_exception(e)}", + program, + export_status=export_status, + profile_result=profile_result, + registry=registry, + ) + except Exception: + logger.exception("Failed to save report due to an error.") + else: + report_path = None + + raise _errors.ConversionError( + _STEP_TWO_ERROR_MESSAGE + + (f"\nError report has been saved to '{report_path}'." if report else "") + + _summarize_exception_stack(e) + ) from e + + # Step 2b: Translate the decomposed program to ONNX and produce ONNXProgram + if report or profile: + pre_decomp_unique_ops, post_decomp_unique_ops = _analysis.compare_ops( + program, decomposed_program + ) + else: + pre_decomp_unique_ops = None + post_decomp_unique_ops = None + + try: + # Convert the exported program to an ONNX model + onnx_program = _exported_program_to_onnx_program( + decomposed_program, registry=registry + ) + + # Run the ONNX passes + if input_names: + _ir_passes.rename_inputs(onnx_program.model, input_names) + if output_names: + _ir_passes.rename_outputs(onnx_program.model, output_names) + + # TODO(justinchuby): Remove the hack + _ir_passes.add_torchlib_common_imports(onnx_program.model) + + export_status.onnx_translation = True + verbose_print("Translate the graph into ONNX... ✅") + except Exception as e: + export_status.onnx_translation = False + verbose_print("Translate the graph into ONNX... ❌") + profile_result = _maybe_stop_profiler_and_get_result(profiler) + + if report: + report_path = artifacts_dir / _reporting.construct_report_file_name( + timestamp, export_status + ) + + try: + assert pre_decomp_unique_ops is not None + assert post_decomp_unique_ops is not None + + # Run the analysis to get the error report + _reporting.create_onnx_export_report( + report_path, + f"{_format_exceptions_for_all_strategies(failed_results)}\n\n{_format_exception(e)}", + program, + decomp_comparison=_reporting.format_decomp_comparison( + pre_decomp_unique_ops, post_decomp_unique_ops + ), + export_status=export_status, + profile_result=profile_result, + registry=registry, + ) + verbose_print(f"Export report has been saved to '{report_path}'.") + except Exception: + logger.exception("Failed to save report due to an error.") + else: + report_path = None + + raise _errors.ConversionError( + _STEP_TWO_ERROR_MESSAGE + + (f"\nError report has been saved to '{report_path}'." if report else "") + + _summarize_exception_stack(e) + ) from e + + profile_result = _maybe_stop_profiler_and_get_result(profiler) + + assert onnx_program.exported_program is not None + + if not verify: + # Return if verification is not requested + if report: + try: + assert pre_decomp_unique_ops is not None + assert post_decomp_unique_ops is not None + report_path = artifacts_dir / _reporting.construct_report_file_name( + timestamp, export_status + ) + _reporting.create_onnx_export_report( + report_path, + "No errors" + if not failed_results + else _format_exceptions_for_all_strategies(failed_results), + onnx_program.exported_program, + decomp_comparison=_reporting.format_decomp_comparison( + pre_decomp_unique_ops, post_decomp_unique_ops + ), + export_status=export_status, + profile_result=profile_result, + model=onnx_program.model, + registry=registry, + ) + verbose_print(f"Export report has been saved to '{report_path}'.") + except Exception: + logger.exception("Failed to save report due to an error.") + elif profile and profile_result is not None: + verbose_print("Profile result:") + verbose_print(profile_result) + return onnx_program + + # Step 3: (verify=True) Check the ONNX model with ONNX checker + try: + verbose_print("Check the ONNX model...") + onnxscript_apis.check_model(onnx_program.model) + export_status.onnx_checker = True + verbose_print("Check the ONNX model... ✅") + except Exception as e: + export_status.onnx_checker = False + verbose_print("Check the ONNX model... ❌") + if report: + try: + assert pre_decomp_unique_ops is not None + assert post_decomp_unique_ops is not None + report_path = artifacts_dir / _reporting.construct_report_file_name( + timestamp, export_status + ) + _reporting.create_onnx_export_report( + report_path, + f"{_format_exceptions_for_all_strategies(failed_results)}\n\n{_format_exception(e)}", + onnx_program.exported_program, + decomp_comparison=_reporting.format_decomp_comparison( + pre_decomp_unique_ops, post_decomp_unique_ops + ), + export_status=export_status, + profile_result=profile_result, + model=onnx_program.model, + registry=registry, + ) + verbose_print(f"Export report has been saved to '{report_path}'.") + except Exception: + logger.exception("Failed to save report due to an error.") + logger.warning( + "Conversion successful but the ONNX model fails ONNX checker. " # noqa: G004 + "Please create an issue " + f"in the PyTorch GitHub repository against the {_BLUE}*onnx*{_END} component and " + "attach the full error stack as well as reproduction scripts. ", + exc_info=e, + ) + return onnx_program + + # Step 4: (verify=True) Execute the model with ONNX Runtime + try: + verbose_print("Execute the model with ONNX Runtime...") + verification_results = _verification.verify_onnx_program(onnx_program) + verbose_print("Execute the model with ONNX Runtime... ✅") + export_status.onnx_runtime = True + onnx_runtime_error_message = None + except Exception as e: + verbose_print("Execute the model with ONNX Runtime... ❌") + export_status.onnx_runtime = False + onnx_runtime_error_message = _format_exception(e) + verification_message = None + + else: + # Step 5: (verify=True) Validate the output values + verbose_print("Verify output accuracy...") + export_status.output_accuracy = True + for verification_result in verification_results: + # TODO(justinchuby): The threshold is arbitrary right now + if verification_result.max_abs_diff >= 5e-3: + logger.warning( + "Output '%s' has a large absolute difference of %f. ", + verification_result.name, + verification_result.max_abs_diff, + ) + export_status.output_accuracy = False + if verification_result.max_rel_diff >= 1e-1: + logger.warning( + "Output '%s' has a large relative difference of %f. ", + verification_result.name, + verification_result.max_rel_diff, + ) + export_status.output_accuracy = False + if export_status.output_accuracy: + verbose_print("Verify output accuracy... ✅") + else: + verbose_print("Verify output accuracy... ❌") + verification_message = _reporting.format_verification_infos( + verification_results + ) + + if report: + try: + assert pre_decomp_unique_ops is not None + assert post_decomp_unique_ops is not None + + traceback_lines = [] + if failed_results: + traceback_lines.append( + _format_exceptions_for_all_strategies(failed_results) + ) + if onnx_runtime_error_message: + traceback_lines.append("# ⚠️ ONNX Runtime error -----------------------") + traceback_lines.append(onnx_runtime_error_message) + if not traceback_lines: + traceback_lines.append("No errors") + + report_path = artifacts_dir / _reporting.construct_report_file_name( + timestamp, export_status + ) + _reporting.create_onnx_export_report( + report_path, + "\n\n".join(traceback_lines), + onnx_program.exported_program, + profile_result=profile_result, + export_status=export_status, + decomp_comparison=_reporting.format_decomp_comparison( + pre_decomp_unique_ops, post_decomp_unique_ops + ), + model=onnx_program.model, + registry=registry, + verification_result=verification_message, + ) + verbose_print(f"Export report has been saved to '{report_path}'.") + except Exception: + logger.exception("Failed to save report due to an error.") + + # Release the inference session created during verification + onnx_program.release() + return onnx_program diff --git a/janus/lib/python3.10/site-packages/torch/onnx/_internal/exporter/_decomp.py b/janus/lib/python3.10/site-packages/torch/onnx/_internal/exporter/_decomp.py new file mode 100644 index 0000000000000000000000000000000000000000..3bbff757e92ef20987b5314ad9a69e73594f6766 --- /dev/null +++ b/janus/lib/python3.10/site-packages/torch/onnx/_internal/exporter/_decomp.py @@ -0,0 +1,100 @@ +"""Build decomp table from PyTorch.""" + +# mypy: allow-untyped-defs +from __future__ import annotations + +from typing import Callable, TYPE_CHECKING + +import torch +import torch._ops + + +if TYPE_CHECKING: + from torch.onnx._internal.exporter import _registration + + +def get_onnx_implemented_overloads( + registry: _registration.ONNXRegistry, +) -> list[torch._ops.OperatorBase]: + """ + Creates a set of OperatorBase and Callable objects that represent ONNX-supported PyTorch operations. + + Args: + registry: The ONNX registry for PyTorch. + + Returns: + A collection of OperatorBase and Callable objects representing ONNX-supported PyTorch operations. + """ + registered_ops: list[torch._ops.OperatorBase] = [] + for op_namespace in (torch.ops.aten, torch.ops.prims): + op_names = dir(op_namespace) + for op_name in op_names: + op_overload_packet = getattr(op_namespace, op_name) + if not isinstance(op_overload_packet, torch._ops.OpOverloadPacket): + continue + + for overload_name in op_overload_packet.overloads(): + op_overload = getattr(op_overload_packet, overload_name) + if registry.is_registered(op_overload): + registered_ops.append(op_overload) + return registered_ops + + +def get_preserve_ops() -> set[torch._ops.OpOverload]: + """Return a set of CompositeImplicitAutograd ops that should be preserved.""" + aten = torch.ops.aten + # NOTE: Keep this list sorted + # NOTE: Do _not_ retain aten.linear as its decomposition is addmm, which is Gemm and is preferable for accuracy + return { + aten._upsample_bilinear2d_aa.default, + aten._upsample_nearest_exact1d.vec, + aten._upsample_nearest_exact2d.vec, + aten._upsample_nearest_exact3d.vec, + aten.group_norm.default, + aten.instance_norm.default, + aten.upsample_bilinear2d.default, + aten.upsample_bilinear2d.vec, + aten.upsample_linear1d.default, + aten.upsample_linear1d.vec, + aten.upsample_nearest1d.default, + aten.upsample_nearest1d.vec, + aten.upsample_nearest2d.default, + aten.upsample_nearest2d.vec, + aten.upsample_nearest3d.default, + aten.upsample_nearest3d.vec, + aten.upsample_trilinear3d.default, + aten.upsample_trilinear3d.vec, + } + + +def create_onnx_friendly_decomposition_table( + onnx_registered_ops: set[torch._ops.OperatorBase], +) -> dict[torch._ops.OperatorBase, Callable]: + """ + This function creates a dictionary of op overloads and their decomposition functions + for ops that do not have ONNX symbolic functions. If an op already has an ONNX symbolic function, + its decomposition function is excluded from the table. The decomposition table is a subset of PyTorch's + built-in aten-to-aten decomposition. + + Args: + onnx_registered_ops: All ops that have an ONNX decomposition implemented. + + Returns: + Dict[torch._ops.OperatorBase, Callable]: A dictionary that maps op overloads to their corresponding + decomposition functions. + """ + decomposition_table: dict[torch._ops.OperatorBase, Callable] = {} + + # NOTE: If we import torch._decomp, we will get RuntimeError: Only a single + # TORCH_LIBRARY can be used to register the namespace nvprims; please put all of your + # definitions in a single TORCH_LIBRARY block. + for op_overload, decomp_fn in torch._decomp.decomposition_table.items(): # type: ignore[attr-defined] + # Skip decomposition for op_overload as long as that op_overload has a corresponding ONNX + # symbolic function. + # NOTE: Do not skip torch._refs decomps. They are fine because otherwise the model is + # not exportable anyways. + if op_overload in onnx_registered_ops: + continue + decomposition_table[op_overload] = decomp_fn + + return decomposition_table diff --git a/janus/lib/python3.10/site-packages/torch/onnx/_internal/exporter/_dispatching.py b/janus/lib/python3.10/site-packages/torch/onnx/_internal/exporter/_dispatching.py new file mode 100644 index 0000000000000000000000000000000000000000..11ed1af17aaade760a265daae33e88df0fbaee05 --- /dev/null +++ b/janus/lib/python3.10/site-packages/torch/onnx/_internal/exporter/_dispatching.py @@ -0,0 +1,362 @@ +# mypy: allow-untyped-defs +from __future__ import annotations + +import logging +from typing import Callable, Sequence + +from onnxscript import ir + +import torch +import torch.fx +from torch.onnx._internal.exporter import _registration, _schemas + + +logger = logging.getLogger(__name__) + +# Define utilities to convert PyTorch data types so users do not need to specify manually +_TORCH_DTYPE_TO_ONNX_COMPATIBLE: dict[torch.dtype, ir.DataType] = { + torch.bfloat16: ir.DataType.BFLOAT16, + torch.bool: ir.DataType.BOOL, + torch.complex128: ir.DataType.DOUBLE, + torch.complex64: ir.DataType.FLOAT, + torch.float16: ir.DataType.FLOAT16, + torch.float32: ir.DataType.FLOAT, + torch.float64: ir.DataType.DOUBLE, + torch.float8_e4m3fn: ir.DataType.FLOAT8E4M3FN, + torch.float8_e4m3fnuz: ir.DataType.FLOAT8E4M3FNUZ, + torch.float8_e5m2: ir.DataType.FLOAT8E5M2, + torch.float8_e5m2fnuz: ir.DataType.FLOAT8E5M2FNUZ, + torch.int16: ir.DataType.INT16, + torch.int32: ir.DataType.INT32, + torch.int64: ir.DataType.INT64, + torch.int8: ir.DataType.INT8, + torch.uint8: ir.DataType.UINT8, +} + + +def _torch_dtype_to_onnx_compatible_dtype(dtype: torch.dtype) -> ir.DataType: + return _TORCH_DTYPE_TO_ONNX_COMPATIBLE[dtype] + + +def _attribute_type_compatible_with_arg( + attr: _schemas.AttributeParameter, + value: ir.Value | int | float | bool | Sequence[int] | Sequence[float] | None, +) -> bool: + """Check if the attribute type is compatible with the argument.""" + if isinstance(value, bool): + return attr.type is ir.AttributeType.INT + if isinstance(value, str): + return attr.type is ir.AttributeType.STRING + if isinstance(value, int): + return attr.type in {ir.AttributeType.INT, ir.AttributeType.FLOAT} + if isinstance(value, float): + return attr.type is ir.AttributeType.FLOAT + if isinstance(value, complex): + return False + if isinstance(value, Sequence): + if attr.type is ir.AttributeType.INTS: + return all(isinstance(i, int) for i in value) + if attr.type is ir.AttributeType.FLOATS: + return all(isinstance(i, (int, float)) for i in value) + if isinstance(value, torch.dtype): + return attr.type is ir.AttributeType.INT + if isinstance(value, (torch.device, torch.memory_format, torch.layout)): + return attr.type is ir.AttributeType.STRING + if value is None and not attr.required: + # An optional attribute is not supplied + return True + return False + + +def _param_type_compatible_with_arg( + param: _schemas.Parameter, + value: ir.TypeProtocol + | str + | int + | float + | complex + | Sequence[int] + | Sequence[float] + | None, + assigned_types: dict[str, ir.TypeProtocol], +) -> bool: + # Handle Python types first + if isinstance(value, bool): # noqa: SIM102 + if param.type_constraint.allowed_types & {ir.TensorType(ir.DataType.BOOL)}: + return True + if isinstance(value, int) and param.type_constraint.allowed_types & { + ir.TensorType(ir.DataType.INT4), + ir.TensorType(ir.DataType.INT8), + ir.TensorType(ir.DataType.INT16), + ir.TensorType(ir.DataType.INT32), + ir.TensorType(ir.DataType.INT64), + # Int inputs can be casted to a float too + ir.TensorType(ir.DataType.FLOAT8E4M3FN), + ir.TensorType(ir.DataType.FLOAT8E4M3FNUZ), + ir.TensorType(ir.DataType.FLOAT8E5M2), + ir.TensorType(ir.DataType.FLOAT8E5M2FNUZ), + ir.TensorType(ir.DataType.FLOAT16), + ir.TensorType(ir.DataType.FLOAT), + ir.TensorType(ir.DataType.DOUBLE), + }: + return True + if isinstance(value, float) and param.type_constraint.allowed_types & { + ir.TensorType(ir.DataType.FLOAT8E4M3FN), + ir.TensorType(ir.DataType.FLOAT8E4M3FNUZ), + ir.TensorType(ir.DataType.FLOAT8E5M2), + ir.TensorType(ir.DataType.FLOAT8E5M2FNUZ), + ir.TensorType(ir.DataType.FLOAT16), + ir.TensorType(ir.DataType.FLOAT), + ir.TensorType(ir.DataType.DOUBLE), + }: + return True + if isinstance(value, complex) and param.type_constraint.allowed_types & { + ir.TensorType(ir.DataType.FLOAT), + ir.TensorType(ir.DataType.DOUBLE), + ir.TensorType(ir.DataType.COMPLEX64), + ir.TensorType(ir.DataType.COMPLEX128), + }: + return True + if isinstance(value, str): # noqa: SIM102 + if param.type_constraint.allowed_types & {ir.TensorType(ir.DataType.STRING)}: + return True + if isinstance(value, (list, tuple)): + if param.type_constraint.allowed_types & { + ir.TensorType(ir.DataType.INT32), + ir.TensorType(ir.DataType.INT64), + ir.TensorType(ir.DataType.FLOAT), + ir.TensorType(ir.DataType.DOUBLE), + ir.SequenceType(ir.TensorType(ir.DataType.INT32)), + ir.SequenceType(ir.TensorType(ir.DataType.INT64)), + ir.SequenceType(ir.TensorType(ir.DataType.FLOAT)), + ir.SequenceType(ir.TensorType(ir.DataType.DOUBLE)), + } and all(isinstance(i, (int)) for i in value): + # We will just allow any fx node and trust that the overload handles it + return True + if param.type_constraint.allowed_types & { + ir.TensorType(ir.DataType.FLOAT), + ir.TensorType(ir.DataType.DOUBLE), + ir.SequenceType(ir.TensorType(ir.DataType.FLOAT)), + ir.SequenceType(ir.TensorType(ir.DataType.DOUBLE)), + } and all(isinstance(i, (int, float)) for i in value): + # We will just allow any fx node and trust that the overload handles it + return True + if value is None and not param.required: + # An optional parameter is not supplied + return True + + if not isinstance(value, ir.TypeProtocol): + return False + + # Then check tensor types + if param.type_constraint.name in assigned_types: + # If a typevar is already bound, check if the value has the same type + assigned_type = assigned_types[param.type_constraint.name] + return assigned_type == value + # If the typevar is not bound, bind it to the value type + if value in param.type_constraint.allowed_types: + # TODO: Maybe just check dtype? Being more strict here for now + assigned_types[param.type_constraint.name] = value + return True + return False + + +def _get_type_from_tensor( + tensor: torch.Tensor + | torch.SymBool + | torch.SymInt + | torch.SymFloat + | Sequence[torch.Tensor], +) -> ir.TypeProtocol: + if isinstance(tensor, torch.Tensor): + return ir.TensorType(_torch_dtype_to_onnx_compatible_dtype(tensor.dtype)) + if isinstance(tensor, torch.SymBool): + return ir.TensorType(ir.DataType.BOOL) + if isinstance(tensor, torch.SymInt): + return ir.TensorType(ir.DataType.INT64) + if isinstance(tensor, torch.SymFloat): + return ir.TensorType(ir.DataType.FLOAT) + + # Handle sequences + first_tensor = next((item for item in tensor if item is not None), None) + if first_tensor is None: + return ir.SequenceType(ir.TensorType(ir.DataType.UNDEFINED)) + return ir.SequenceType( + ir.TensorType(_torch_dtype_to_onnx_compatible_dtype(first_tensor.dtype)) + ) + + +def _get_first_tensor_in_node_list( + nodes: Sequence[torch.fx.Node | None], +) -> torch.Tensor | None: + for node in nodes: + if ( + node is not None + and "val" in node.meta + and isinstance(node.meta["val"], torch.Tensor) + ): + return node.meta["val"] + return None + + +def _get_named_fx_node_args(node: torch.fx.Node) -> dict[str, torch.fx.node.Argument]: + assert hasattr(node.target, "_schema") + torch_schema: torch.FunctionSchema = node.target._schema # type: ignore[union-attr] + node_args = {} + for arg, schema_arg in zip(node.args, torch_schema.arguments): + node_args[schema_arg.name] = arg + + node_args.update(node.kwargs) + return node_args + + +def get_matching_overload( + node: torch.fx.Node, + overloads: Sequence[Callable], +) -> tuple[Callable | None, str]: + """Get the overload that matches the node's arguments. + + Args: + node: The node to match. + overloads: The overloads to match against. + + Returns: + A tuple containing the matched overload and a string describing the reason for failure or success. + """ + if not hasattr(node.target, "_schema"): + # FIXME(justinchuby): When the target is a builtin, we should instead + # Match only the inputs positionally. Figure out how to do that as right + # now we assume all inputs are named. + return overloads[ + 0 + ], "The node target does not have a schema. Return the first one." + named_args = _get_named_fx_node_args(node) + # FIXME: Handle when we don't know the names of the arguments + schema_args: dict[str, torch.Argument] = { + arg.name: arg + for arg in node.target._schema.arguments # type: ignore[union-attr] + } + failure_messages: list[str] = [] + for overload in overloads: + assigned_types: dict[str, ir.TypeProtocol] = {} + fail_reason = "" + if not hasattr(overload, "signature"): + # When an overload does not have a signature, we assume it is a custom op and should be matched + return ( + overload, + "The overload does not have a signature. Assuming it is a custom op and matching it.", + ) + for param in overload.signature: + if param.name not in schema_args and param.required: + # We don't need to handle variadic inputs as there is none. + # A required parameter is not supplied. + fail_reason = "Required parameter not supplied" + break + + # Get the argument + if param.name in named_args: + # Provided in Node args + arg = named_args[param.name] + elif ( + param.name in schema_args + and schema_args[param.name].has_default_value() + ): + # Provided in schema args + arg = schema_args[param.name].default_value + elif param.has_default(): + # Provided in the ONNX op definition + arg = param.default + else: + fail_reason = "Parameter not provided" + break + + if isinstance(param, _schemas.Parameter): + if isinstance(arg, torch.Tensor): + arg = _get_type_from_tensor(arg) # type: ignore[assignment] + if isinstance(arg, (list, tuple)) and any( + isinstance(t, torch.fx.Node) for t in arg + ): + first_tensor = _get_first_tensor_in_node_list(arg) + assert first_tensor is not None + # FIXME: Handle symfloat here + arg = ir.SequenceType(_get_type_from_tensor(first_tensor)) # type: ignore[assignment] + elif isinstance(arg, torch.fx.Node): + meta_val = arg.meta["val"] + arg = _get_type_from_tensor(meta_val) # type: ignore[assignment] + # TODO: Handle None attributes + # FIXME: Handle symfloat etc. + # Handle tensors and Python values + if not _param_type_compatible_with_arg(param, arg, assigned_types): # type: ignore[arg-type] + fail_reason = ( + f"Parameter type not compatible with argument: param=`{param}`, " + f"assigned_types=`{assigned_types}`, arg=`{arg}`" + ) + break + elif isinstance(param, _schemas.AttributeParameter): + if not _attribute_type_compatible_with_arg(param, arg): # type: ignore[arg-type] + fail_reason = f"Attribute type not compatible with argument: param=`{param}`, arg=`{arg}`" + break + if not fail_reason: + return overload, "Successfully matched overload" + else: + failure_messages.append( + f"- Failed to match overload `{overload}`: {fail_reason}" + ) + return ( + None, + f"All overloads did not match the node `{node.format_node()}`.\n" + + "\n".join(failure_messages), + ) + + +def _arg_has_complex_dtype(arg) -> bool: + """Check if the node has complex dtype recursively.""" + if ( + isinstance(arg, torch.fx.Node) + and "val" in arg.meta + and isinstance(arg.meta["val"], torch.Tensor) + and torch.is_complex(arg.meta["val"]) + ): + return True + elif isinstance(arg, list): + return any(_arg_has_complex_dtype(item) for item in arg) + return False + + +def dispatch( + node: torch.fx.Node, registry: _registration.ONNXRegistry +) -> tuple[Callable | None, str]: + """Dispatch a node to an ONNX function based on the node's target and the ONNX registry. + + Args: + node: The node to dispatch. + registry: The ONNX registry to use for dispatching. + + Returns: + A tuple containing the matched ONNX function and a string describing the reason for failure or success. + """ + # TODO: Handle when node does not have a target + decomp_metas = registry.get_decomps(node.target) # type: ignore[arg-type] + # Determine if the node has complex inputs. + is_complex = any(_arg_has_complex_dtype(arg) for arg in node.args) or any( + _arg_has_complex_dtype(arg) for arg in node.kwargs.values() + ) + if is_complex: + decomp_metas = [decomp for decomp in decomp_metas if decomp.is_complex] + if not decomp_metas: + return None, "No decompositions registered for the complex-valued input" + else: + decomp_metas = [decomp for decomp in decomp_metas if not decomp.is_complex] + if not decomp_metas: + return None, "No decompositions registered for the real-valued input" + + if len(decomp_metas) == 1: + return ( + decomp_metas[0].onnx_function, + "Fast path: Only one decomposition is defined", + ) + + overload, message = get_matching_overload( + node, [decomp.onnx_function for decomp in decomp_metas] + ) + return overload, message diff --git a/janus/lib/python3.10/site-packages/torch/onnx/_internal/exporter/_errors.py b/janus/lib/python3.10/site-packages/torch/onnx/_internal/exporter/_errors.py new file mode 100644 index 0000000000000000000000000000000000000000..ff41bbe695fe7d0ebe60c40014332abc36430d0f --- /dev/null +++ b/janus/lib/python3.10/site-packages/torch/onnx/_internal/exporter/_errors.py @@ -0,0 +1,21 @@ +"""Error classes for the ONNX exporter.""" + +from __future__ import annotations + +import torch.onnx.errors + + +class TorchExportError(torch.onnx.errors.OnnxExporterError): + """Error during graph capturing using torch.export.""" + + +class ConversionError(torch.onnx.errors.OnnxExporterError): + """Error during ExportedProgram to ONNX conversion.""" + + +class DispatchError(ConversionError): + """Error during ONNX Function dispatching.""" + + +class GraphConstructionError(ConversionError): + """Error during ONNX graph construction.""" diff --git a/janus/lib/python3.10/site-packages/torch/onnx/_internal/exporter/_ir_passes.py b/janus/lib/python3.10/site-packages/torch/onnx/_internal/exporter/_ir_passes.py new file mode 100644 index 0000000000000000000000000000000000000000..7e8748443e2b1318f4d57d2e034897581c90f24a --- /dev/null +++ b/janus/lib/python3.10/site-packages/torch/onnx/_internal/exporter/_ir_passes.py @@ -0,0 +1,41 @@ +# mypy: allow-untyped-defs +from __future__ import annotations + +import logging +from typing import Sequence + +from onnxscript import ir + + +logger = logging.getLogger(__name__) + + +def rename_inputs(model: ir.Model, new_names: Sequence[str]) -> None: + # TODO: Ensure the names do not have duplicates + for input, new_name in zip(model.graph.inputs, new_names): + input.metadata_props["pkg.torch.onnx.original_node_name"] = str(input.name) + input.name = new_name + + +def rename_outputs(model: ir.Model, new_names: Sequence[str]) -> None: + for output, new_name in zip(model.graph.outputs, new_names): + output.metadata_props["pkg.torch.onnx.original_node_name"] = str(output.name) + output.name = new_name + + +def add_torchlib_common_imports(model: ir.Model) -> None: + """Hack to add torchlib common imports to the model.""" + + try: + # TODO(justinchuby): Remove this hack and improved onnxscript + from onnxscript.function_libs.torch_lib.ops import common as common_ops + + model.opset_imports["pkg.onnxscript.torch_lib.common"] = 1 + rank_func = ir.serde.deserialize_function(common_ops.Rank.to_function_proto()) + is_scalar_func = ir.serde.deserialize_function( + common_ops.IsScalar.to_function_proto() + ) + model.functions[rank_func.identifier()] = rank_func + model.functions[is_scalar_func.identifier()] = is_scalar_func + except Exception: + logger.exception("Failed to add torchlib common imports to the model.") diff --git a/janus/lib/python3.10/site-packages/torch/onnx/_internal/exporter/_onnx_program.py b/janus/lib/python3.10/site-packages/torch/onnx/_internal/exporter/_onnx_program.py new file mode 100644 index 0000000000000000000000000000000000000000..6fb62a5961f87516a12657a41fc777f0b7ddd8b1 --- /dev/null +++ b/janus/lib/python3.10/site-packages/torch/onnx/_internal/exporter/_onnx_program.py @@ -0,0 +1,317 @@ +# mypy: allow-untyped-defs +# mypy: disable-error-code="attr-defined,name-defined" +from __future__ import annotations + + +__all__ = ["ONNXProgram"] + +import copy +import gc +import logging +import os +import tempfile +import textwrap +import warnings +from typing import Callable, Sequence, TYPE_CHECKING + +import torch +from torch.onnx._internal._lazy_import import onnx, onnxscript_apis, onnxscript_ir as ir +from torch.utils import _pytree + + +# NOTE: DO NOT import module from torch.onnx._internal to this module in the global scope +# because ONNXProgram is exposed to the public API + +if TYPE_CHECKING: + import onnxruntime as ort + +_LARGE_MODEL_THRESHOLD = 1536 * 1024 * 1024 # 1536MB + +logger = logging.getLogger(__name__) + + +def _ort_session_initializer(model: str | bytes) -> ort.InferenceSession: + """Initialize an ONNX Runtime inference session with the specified model.""" + import onnxruntime as ort + + session_options = ort.SessionOptions() + session_options.log_severity_level = 3 # 3: Error + possible_providers = ( + "CUDAExecutionProvider", + "CPUExecutionProvider", + ) + available_providers = set(ort.get_available_providers()) + providers = [ + provider for provider in possible_providers if provider in available_providers + ] + return ort.InferenceSession( + model, providers=providers, sess_options=session_options + ) + + +def _count_initializer_size(graph: ir.Graph) -> int: + """Count the total size of the initializers in bytes.""" + return sum( + v.const_value.nbytes + for v in graph.initializers.values() + if v.const_value is not None + ) + + +class ONNXProgram: + """A class to represent an ONNX program that is callable with torch tensors.""" + + def __init__( + self, model: ir.Model, exported_program: torch.export.ExportedProgram | None + ): + """Initialize the ONNX program with the specified model and exported program. + Args: + model: The ONNX model. + exported_program: The exported program that produced the ONNX model. Optional. + """ + self.model: ir.Model = model + self.exported_program = exported_program + self._inference_session: ort.InferenceSession | None = None + self._tempdir: tempfile.TemporaryDirectory | None = None + + def __repr__(self) -> str: + return f"""\ +ONNXProgram( + model= +{textwrap.indent(str(self.model), ' ' * 8)} + , + exported_program= +{textwrap.indent(str(self.exported_program), ' ' * 8)} +) +""" + + def __call__(self, *args, **kwargs) -> Sequence[torch.Tensor]: + """Run the ONNX model with the same arguments you would provide to the GraphModule.""" + import onnxruntime as ort + + flatten_args = _process_args(args, kwargs) + + if self._inference_session is None: + self.initialize_inference_session() + + assert self._inference_session is not None + + # We don't expect non-tensor as inputs + ort_input = { + k.name: v.numpy(force=True) + for k, v in zip(self.model.graph.inputs, flatten_args) + } + run_options = ort.RunOptions() + run_options.log_severity_level = 3 # 3: Error + logger.debug("Running the inference session with %s arguments.", len(ort_input)) + outputs = self._inference_session.run(None, ort_input, run_options=run_options) + logger.debug("Inference session run completed.") + # TODO(justinchuby): Maybe output complex tensors as needed + return tuple(torch.from_numpy(output) for output in outputs) + + @property + def model_proto(self) -> onnx.ModelProto: + """Compatibility property for `torch.onnx.ONNXProgram.model_proto`.""" + return ir.serde.serialize_model(self.model) + + def save( + self, + destination: str | os.PathLike, + *, + include_initializers: bool = True, + keep_initializers_as_inputs: bool = False, + external_data: bool | None = None, + **_, + ): + """Save the ONNX model to the specified destination. + + When `external_data` is `True` or the model is larger than 2GB, + the weights are saved as external data in a separate file. + + Initializer (model weights) serialization behaviors: + - include_initializers=True, keep_initializers_as_inputs=False (default): + The initializers are included in the saved model. + - include_initializers=True, keep_initializers_as_inputs=True: + The initializers are included in the saved model and kept as model inputs. + Choose this option if you want the ability to override the model weights + during inference. + - include_initializers=False, keep_initializers_as_inputs=False: + The initializers are not included in the saved model and are not listed + as model inputs. Choose this option if you want to attach the initializers + to the ONNX model in a separate, post-processing, step. + - include_initializers=False, keep_initializers_as_inputs=True: + The initializers are not included in the saved model but are listed as model + inputs. Choose this option if you want to supply the initializers during + inference and want to minimize the size of the saved model. + + Args: + destination: The path to save the ONNX model to. + include_initializers: Whether to include the initializers in the saved model. + keep_initializers_as_inputs: Whether to keep the initializers as inputs in the saved model. + If `True`, the initializers are added as inputs to the model which means they can be overwritten. + by providing the initializers as model inputs. + external_data: Whether to save the weights as external data in a separate file. + + Raises: + TypeError: If `external_data` is `True` and `destination` is not a file path. + """ + original_initializers = copy.copy(self.model.graph.initializers) + original_inputs = copy.copy(self.model.graph.inputs) + + # Adjust the model based on options + if not include_initializers: + self.model.graph.initializers.clear() + if keep_initializers_as_inputs: + self.model.graph.inputs.extend(original_initializers.values()) # type: ignore[arg-type] + + # Save the model to disk + if ( + external_data + or _count_initializer_size(self.model.graph) > _LARGE_MODEL_THRESHOLD + ): + onnxscript_apis.save_model_with_external_data(self.model, destination) + else: + ir.save(self.model, destination) + + # Revert the changes to the model + if not include_initializers: + self.model.graph.initializers.update(original_initializers) + if keep_initializers_as_inputs: + self.model.graph.inputs.clear() + self.model.graph.inputs.extend(original_inputs) + + def apply_weights(self, state_dict: dict[str, torch.Tensor]) -> None: + """Apply the weights from the specified state dict to the ONNX model. + Args: + state_dict: The state dict containing the weights to apply to the ONNX model. + """ + from torch.onnx._internal.exporter import _core + + for name, tensor in state_dict.items(): + if name in self.model.graph.initializers: + self.model.graph.initializers[name].const_value = _core.TorchTensor( + tensor, name + ) + else: + warnings.warn( + f"Weight '{name}' not found in the model. Skipped applying.", + category=torch.onnx.errors.OnnxExporterWarning, + stacklevel=1, + ) + + def initialize_inference_session( + self, + initializer: Callable[ + [str | bytes], ort.InferenceSession + ] = _ort_session_initializer, + ) -> None: + """Initialize the ONNX Runtime inference session. + + Args: + initializer: The function to initialize the ONNX Runtime inference + session with the specified model. By default, it uses the + :func:`_ort_session_initializer` function. + """ + # TODO(justinchuby): Allow different inference options + logger.debug("Initializing the inference session.") + if ( + byte_size := _count_initializer_size(self.model.graph) + ) > _LARGE_MODEL_THRESHOLD: + logger.debug("The model initializers is larger than 1.5GB (%s).", byte_size) + # Save the model to a temporary file if too large + self._tempdir = tempfile.TemporaryDirectory(ignore_cleanup_errors=True) + model_path = os.path.join(self._tempdir.name, "model.onnx") + self.save(model_path, external_data=True) + model = model_path + else: + model = self.model_proto.SerializeToString() # type: ignore[assignment] + + self._inference_session = initializer(model) + logger.debug("Inference session initialized.") + + def release(self) -> None: + """Release the inference session. + + You may call this method to release the resources used by the inference session. + """ + # Release the inference session first so that the model file can be deleted + if self._inference_session is not None: + self._inference_session = None + gc.collect() + if self._tempdir is not None: + self._tempdir.cleanup() + self._tempdir = None + + +def _process_args(args, kwargs) -> tuple[torch.Tensor, ...]: + """Process input arguments for the ONNX model.""" + args = _flatten_inputs(args, kwargs) + args = _remove_none_from_inputs(args) + args = _remove_non_tensor(args) + args = _convert_complex_to_real_representation(args) + return args + + +def _flatten_inputs(model_args, model_kwargs): + flattened_args, _ = _pytree.tree_flatten((model_args, model_kwargs)) + return flattened_args + + +def _remove_none_from_inputs(model_args): + return tuple(arg for arg in model_args if arg is not None) + + +def _remove_non_tensor(model_args): + """Remove the non-tensor input arguments. + + Dynamo does not support non-tensor input arguments (https://github.com/pytorch/pytorch/issues/99534). + + Specifically, it does put the input into graph with an empty node, but consumed by no ones. + The concrete value is embedded into the graph as a constant arg of a target node. Meta + suggests in this case that one should rewrite the model code to make it tensor if the + input value is supposed to change at runtime. We might need to further investigate + the feasibility of that suggestion. + + For example, + + def func(x, b=1.0): + y = x + b + z = y.relu() + return (y, z) + + x = torch.randn(1, 1, 2, dtype=torch.float32) + gm_fun, _ = dynamo.export(func, x, b=8.0, aten_graph=True, tracing_mode="real") + + # class GraphModule(torch.nn.Module): + # def forward(self, x, b): + # arg0: f32[1, 1, 2], arg1, = fx_pytree.tree_flatten_spec(([x, b], {}), self._in_spec) + # # File: path/to/pytorch/test_constant_input.py:5, code: y = x + b + # add_tensor: f32[1, 1, 2] = torch.ops.aten.add.Tensor(arg0, 8.0); arg0 = None + + # # File: path/to/pytorch/test_constant_input.py:6, code: z = y.relu() + # relu_default: f32[1, 1, 2] = torch.ops.aten.relu.default(add_tensor) + # return pytree.tree_unflatten([add_tensor, relu_default], self._out_spec) + + Empty torch.fx.Node input leading to a mismatched number of input with PyTorch, as + it's ignored in ONNX graph. Thus, we delete the useless input here. + + """ + + return tuple( + arg for arg in model_args if not isinstance(arg, (int, float, bool, str)) + ) + + +def _convert_complex_to_real_representation(model_args): + """Convert complex dtype tensors to real representation tensors. + + ONNX does not support complex dtype tensors. Thus, we convert complex dtype tensors + to real representation tensors (i.e., float dtype tensors with an extra dimension + representing the real and imaginary parts of the complex number). + """ + return tuple( + torch.view_as_real(arg.resolve_conj()) + if isinstance(arg, torch.Tensor) and arg.is_complex() + else arg + for arg in model_args + ) diff --git a/janus/lib/python3.10/site-packages/torch/onnx/_internal/exporter/_reporting.py b/janus/lib/python3.10/site-packages/torch/onnx/_internal/exporter/_reporting.py new file mode 100644 index 0000000000000000000000000000000000000000..86c79df97430a6b91faf41bf8f40637020d8779f --- /dev/null +++ b/janus/lib/python3.10/site-packages/torch/onnx/_internal/exporter/_reporting.py @@ -0,0 +1,194 @@ +# mypy: allow-untyped-defs +from __future__ import annotations + +import dataclasses +import re +from typing import TYPE_CHECKING + +from torch.onnx._internal.exporter import _analysis, _registration, _verification + + +if TYPE_CHECKING: + import os + + from onnxscript import ir + + import torch + + +@dataclasses.dataclass +class ExportStatus: + # Whether torch.export.export.export() succeeds + torch_export: bool | None = None + # Whether torch.export.export.export(..., strict=False) succeeds + torch_export_non_strict: bool | None = None + # Whether torch.jit.trace succeeds + torch_jit: bool | None = None + # Whether ONNX translation succeeds + onnx_translation: bool | None = None + # Whether ONNX model passes onnx.checker.check_model + onnx_checker: bool | None = None + # Whether ONNX model runs successfully with ONNX Runtime + onnx_runtime: bool | None = None + # Whether the output of the ONNX model is accurate + output_accuracy: bool | None = None + + +def _status_emoji(status: bool | None) -> str: + if status is None: + return "⚪" + return "✅" if status else "❌" + + +def _format_export_status(status: ExportStatus) -> str: + return ( + f"```\n" + f"{_status_emoji(status.torch_export)} Obtain model graph with `torch.export.export`\n" + f"{_status_emoji(status.torch_export_non_strict)} Obtain model graph with `torch.export.export(..., strict=False)`\n" + f"{_status_emoji(status.torch_jit)} Obtain model graph with `torch.jit.trace`\n" + f"{_status_emoji(status.onnx_translation)} Translate the graph into ONNX\n" + f"{_status_emoji(status.onnx_checker)} Run `onnx.checker` on the ONNX model\n" + f"{_status_emoji(status.onnx_runtime)} Execute the model with ONNX Runtime\n" + f"{_status_emoji(status.output_accuracy)} Validate model output accuracy\n" + f"```\n\n" + ) + + +def _strip_color_from_string(text: str) -> str: + # This regular expression matches ANSI escape codes + # https://github.com/pytorch/pytorch/blob/9554a9af8788c57e1c5222c39076a5afcf0998ae/torch/_dynamo/utils.py#L2785-L2788 + ansi_escape = re.compile(r"\x1B[@-_][0-?]*[ -/]*[@-~]") + return ansi_escape.sub("", text) + + +def _format_exported_program(exported_program: torch.export.ExportedProgram) -> str: + # Adapted from https://github.com/pytorch/pytorch/pull/128476 + # to remove colors + # Even though we can call graph_module.print_readable directly, since the + # colored option was added only recently, we can't guarantee that the + # version of PyTorch used by the user has this option. Therefore, we + # still call str(ExportedProgram) + text = f"```python\n{_strip_color_from_string(str(exported_program))}\n```\n\n" + return text + + +def construct_report_file_name(timestamp: str, status: ExportStatus) -> str: + # Status could be None. So we need to check for False explicitly. + if not (status.torch_export or status.torch_export_non_strict or status.torch_jit): + # All strategies failed + postfix = "pt_export" + elif status.onnx_translation is False: + postfix = "conversion" + elif status.onnx_checker is False: + postfix = "checker" + elif status.onnx_runtime is False: + postfix = "runtime" + elif status.output_accuracy is False: + postfix = "accuracy" + elif status.torch_export is False or status.torch_export_non_strict is False: + # Some strategies failed + postfix = "strategies" + else: + postfix = "success" + return f"onnx_export_{timestamp}_{postfix}.md" + + +def format_decomp_comparison( + pre_decomp_unique_ops: set[str], + post_decomp_unique_ops: set[str], +) -> str: + """Format the decomposition comparison result. + + Args: + unique_ops_in_a: The unique ops in the first program. + unique_ops_in_b: The unique ops in the second program. + + Returns: + The formatted comparison result. + """ + return ( + f"Ops exist only in the ExportedProgram before decomposition: `{sorted(pre_decomp_unique_ops)}`\n\n" + f"Ops exist only in the ExportedProgram after decomposition: `{sorted(post_decomp_unique_ops)}`\n" + ) + + +def format_verification_infos( + verification_infos: list[_verification.VerificationInfo], +) -> str: + """Format the verification result. + + Args: + verification_infos: The verification result. + + Returns: + The formatted verification result. + """ + return "\n".join( + f"`{info.name}`: `max_abs_diff={info.max_abs_diff:e}`, `max_rel_diff={info.max_rel_diff:e}`, " + f"`abs_diff_hist={info.abs_diff_hist}`, `rel_diff_hist={info.rel_diff_hist}`" + for info in verification_infos + ) + + +def create_torch_export_error_report( + filename: str | os.PathLike, + formatted_traceback: str, + *, + export_status: ExportStatus, + profile_result: str | None, +): + with open(filename, "w", encoding="utf-8") as f: + f.write("# PyTorch ONNX Conversion Error Report\n\n") + f.write(_format_export_status(export_status)) + f.write("Error message:\n\n") + f.write("```pytb\n") + f.write(formatted_traceback) + f.write("```\n\n") + if profile_result is not None: + f.write("## Profiling result\n\n") + f.write("```\n") + f.write(profile_result) + f.write("```\n") + + +def create_onnx_export_report( + filename: str | os.PathLike, + formatted_traceback: str, + program: torch.export.ExportedProgram, + *, + decomp_comparison: str | None = None, + export_status: ExportStatus, + profile_result: str | None, + model: ir.Model | None = None, + registry: _registration.ONNXRegistry | None = None, + verification_result: str | None = None, +): + with open(filename, "w", encoding="utf-8") as f: + f.write("# PyTorch ONNX Conversion Report\n\n") + f.write(_format_export_status(export_status)) + f.write("## Error messages\n\n") + f.write("```pytb\n") + f.write(formatted_traceback) + f.write("\n```\n\n") + f.write("## Exported program\n\n") + f.write(_format_exported_program(program)) + if model is not None: + f.write("## ONNX model\n\n") + f.write("```python\n") + f.write(str(model)) + f.write("\n```\n\n") + f.write("## Analysis\n\n") + _analysis.analyze(program, file=f, registry=registry) + if decomp_comparison is not None: + f.write("\n## Decomposition comparison\n\n") + f.write(decomp_comparison) + f.write("\n") + if verification_result is not None: + f.write("\n## Verification results\n\n") + f.write(verification_result) + f.write("\n") + if profile_result is not None: + f.write("\n## Profiling result\n\n") + f.write("```\n") + f.write(profile_result) + f.write("```\n") diff --git a/janus/lib/python3.10/site-packages/torch/onnx/_internal/exporter/_tensors.py b/janus/lib/python3.10/site-packages/torch/onnx/_internal/exporter/_tensors.py new file mode 100644 index 0000000000000000000000000000000000000000..2fdafacbe06f4b811d21d6de7720d95f278c0c92 --- /dev/null +++ b/janus/lib/python3.10/site-packages/torch/onnx/_internal/exporter/_tensors.py @@ -0,0 +1,95 @@ +"""Subclass of ir.Value that supports Python operators.""" + +# mypy: allow-untyped-defs +from __future__ import annotations + +import onnxscript +from onnxscript import ir + + +class SymbolicTensor(ir.Value): + """A subclass of ir.Value that supports Python operators.""" + + def __init__( + self, + opset: onnxscript.values.Opset, + name: str | None = None, + shape: ir.Shape | None = None, + type: ir.TypeProtocol | None = None, + doc_string: str | None = None, + const_value: ir.TensorProtocol | None = None, + ): + super().__init__( + name=name, + shape=shape, + type=type, + doc_string=doc_string, + const_value=const_value, + ) + self._opset = opset + + @property + def rank(self) -> int | None: + if self.shape is None: + return None + return len(self.shape) + + # TODO: Implement indexing + + def __mod__(self, other): + if self.dtype in { + ir.DataType.FLOAT, + ir.DataType.DOUBLE, + ir.DataType.FLOAT16, + ir.DataType.BFLOAT16, + }: + return self._opset.Mod(self, other, fmod=1) + return self._opset.Mod(self, other) + + def __ne__(self, other): + return self._opset.Not(self._opset.Equal(self, other)) + + def __neg__(self): + return self._opset.Neg(self) + + def __add__(self, other): + return self._opset.Add(self, other) + + def __radd__(self, other): + return self._opset.Add(other, self) + + def __rand__(self, other): + return self._opset.And(other, self) + + def __mul__(self, other): + return self._opset.Mul(self, other) + + def __rmul__(self, other): + return self._opset.Mul(other, self) + + def __matmul__(self, other): + return self._opset.MatMul(self, other) + + def __pow__(self, other): + return self._opset.Pow(self, other) + + def __sub__(self, other): + return self._opset.Sub(self, other) + + def __rsub__(self, other): + return self._opset.Sub(other, self) + + def __truediv__(self, other): + return self._opset.Div(self, other) + + def __lt__(self, other): + return self._opset.Less(self, other) + + def __le__(self, other): + return self._opset.LessOrEqual(self, other) + + def __ge__(self, other): + return self._opset.GreaterOrEqual(self, other) + + def __gt__(self, other): + return self._opset.Greater(self, other) diff --git a/janus/lib/python3.10/site-packages/torch/onnx/_internal/exporter/_testing.py b/janus/lib/python3.10/site-packages/torch/onnx/_internal/exporter/_testing.py new file mode 100644 index 0000000000000000000000000000000000000000..19f0c73734839d9a5abe5ee5d4bdc945d45dd64e --- /dev/null +++ b/janus/lib/python3.10/site-packages/torch/onnx/_internal/exporter/_testing.py @@ -0,0 +1,66 @@ +"""Test utilities for ONNX export.""" + +from __future__ import annotations + + +__all__ = ["assert_onnx_program"] + +from typing import Any, TYPE_CHECKING + +import torch +from torch.utils import _pytree + + +if TYPE_CHECKING: + from torch.onnx._internal.exporter import _onnx_program + + +def assert_onnx_program( + program: _onnx_program.ONNXProgram, + *, + rtol: float | None = None, + atol: float | None = None, + args: tuple[Any, ...] | None = None, + kwargs: dict[str, Any] | None = None, +) -> None: + """Assert that the ONNX model produces the same output as the PyTorch ExportedProgram. + Args: + program: The ``ONNXProgram`` to verify. + rtol: Relative tolerance. + atol: Absolute tolerance. + args: The positional arguments to pass to the program. + If None, the default example inputs in the ExportedProgram will be used. + kwargs: The keyword arguments to pass to the program. + If None, the default example inputs in the ExportedProgram will be used. + """ + exported_program = program.exported_program + if exported_program is None: + raise ValueError( + "The ONNXProgram does not contain an ExportedProgram. " + "To verify the ONNX program, initialize ONNXProgram with an ExportedProgram, " + "or assign the ExportedProgram to the ONNXProgram.exported_program attribute." + ) + if args is None and kwargs is None: + # User did not provide example inputs, use the default example inputs + if exported_program.example_inputs is None: + raise ValueError( + "No example inputs provided and the exported_program does not contain example inputs. " + "Please provide arguments to verify the ONNX program." + ) + args, kwargs = exported_program.example_inputs + if args is None: + args = () + if kwargs is None: + kwargs = {} + torch_module = exported_program.module() + torch_outputs, _ = _pytree.tree_flatten(torch_module(*args, **kwargs)) + onnx_outputs = program(*args, **kwargs) + # TODO(justinchuby): Include output names in the error message + torch.testing.assert_close( + tuple(onnx_outputs), + tuple(torch_outputs), + rtol=rtol, + atol=atol, + equal_nan=True, + check_device=False, + ) diff --git a/janus/lib/python3.10/site-packages/torch/onnx/_internal/fx/__init__.py b/janus/lib/python3.10/site-packages/torch/onnx/_internal/fx/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..b5716bdafced78fc8e6eb9aa07c3799b6893cfe1 --- /dev/null +++ b/janus/lib/python3.10/site-packages/torch/onnx/_internal/fx/__init__.py @@ -0,0 +1,8 @@ +from .patcher import ONNXTorchPatcher +from .serialization import save_model_with_external_data + + +__all__ = [ + "save_model_with_external_data", + "ONNXTorchPatcher", +] diff --git a/janus/lib/python3.10/site-packages/torch/onnx/_internal/fx/_pass.py b/janus/lib/python3.10/site-packages/torch/onnx/_internal/fx/_pass.py new file mode 100644 index 0000000000000000000000000000000000000000..5246788756f3ed67cd697b489d89045767ffde5e --- /dev/null +++ b/janus/lib/python3.10/site-packages/torch/onnx/_internal/fx/_pass.py @@ -0,0 +1,323 @@ +# mypy: allow-untyped-defs +from __future__ import annotations + +import abc +import contextlib +import dataclasses +import difflib +import io +import logging +import sys +from typing import Any, Callable, TYPE_CHECKING + +import torch +import torch.fx +from torch._subclasses.fake_tensor import unset_fake_temporarily +from torch.onnx._internal.fx import diagnostics, onnxfunction_dispatcher + + +if TYPE_CHECKING: + from torch._subclasses import fake_tensor + + +@dataclasses.dataclass +class PackageInfo: + package_name: str + version: str | None + commit_hash: str | None + + def to_onnx_domain_string(self) -> str: + return ".".join( + filter(None, ("pkg", self.package_name, self.version, self.commit_hash)) + ) + + @classmethod + def from_python_class(cls, python_class_name: type | str) -> PackageInfo: + if isinstance(python_class_name, type): + python_class_name = python_class_name.__module__ + package_name = python_class_name.split(".")[0] + package = __import__(package_name) + version = getattr(package, "__version__", None) + # TODO: Figure out how to retrieve commit hash. + commit_hash = None + return cls(package_name, version, commit_hash) + + +@dataclasses.dataclass +class GraphModuleOnnxMeta: + package_info: PackageInfo + + +@contextlib.contextmanager +def _patch_difflib_sequence_matcher_init(): + """Context patching `difflib.SequenceMatcher` for fx readable graph. + + Under this context, the `autojunk` argument of `difflib.SequenceMatcher` will always + be considered as `False`. This is to prevent `difflib.SequenceMatcher` recognizing + stacktrace messages in fx readable graph as junk, as these messages tend to be long (>200) + and repeat multiple times, which falls under the junk filter criteria. + + `difflib.SequenceMatcher` is used underneath by all sorts of diffing functions + in `difflib`, including `difflib.unified_diff`, `difflib.ndiff`, `difflib.context_diff`. + Unfortunately, there is no way to pass `autojunk` argument to these functions, and + they all default to `True`. This context patching will affect all of them. + + `Reference: Automatic junk heuristic `_ + """ + original_init = difflib.SequenceMatcher.__init__ + + def patched_init(self, isjunk=None, a="", b="", autojunk=True): + original_init(self, isjunk, a, b, autojunk=False) + + difflib.SequenceMatcher.__init__ = patched_init # type: ignore[assignment] + try: + yield + finally: + difflib.SequenceMatcher.__init__ = original_init # type: ignore[assignment] + + +def _unified_diff(a: str, b: str) -> str: + """Return a string containing the unified diff of two strings. + + This function calls a patched version of `difflib.unified_diff` with `autojunk` set + to `False` for `difflib.SequenceMatcher` class. More details can be found in + `_patch_difflib_sequence_matcher_init` function. + + Args: + a: The first string. + b: The second string. + + Returns: + The unified diff of the two strings. If there is no diff, return "". + + Example:: + + >>> a = '''class GraphModule(torch.nn.Module): + ... def forward(self, input_ids : torch.Tensor, attention_mask : torch.Tensor): + ... # File: /modeling.py:770, code: input_ids = input_ids.view(-1, input_shape[-1]) + ... view = input_ids.view(-1, 3); input_ids = None + ... ''' + >>> b = '''class (torch.nn.Module): + ... def forward(self, input_ids: i64[1, 3], attention_mask: i64[1, 3]): + ... # File: /modeling.py:770, code: input_ids = input_ids.view(-1, input_shape[-1]) + ... view: i64[1, 3] = torch.ops.aten.view.default(input_ids, [-1, 3]); input_ids = None + ... ''' + >>> print(_unified_diff(a, b)) + --- + +++ + @@ -1,4 +1,4 @@ + -class GraphModule(torch.nn.Module): + - def forward(self, input_ids : torch.Tensor, attention_mask : torch.Tensor): + +class (torch.nn.Module): + + def forward(self, input_ids: i64[1, 3], attention_mask: i64[1, 3]): + # File: /modeling.py:770, code: input_ids = input_ids.view(-1, input_shape[-1]) + - view = input_ids.view(-1, 3); input_ids = None + + view: i64[1, 3] = torch.ops.aten.view.default(input_ids, [-1, 3]); input_ids = None + """ + + a_list = a.splitlines(keepends=True) + b_list = b.splitlines(keepends=True) + + with _patch_difflib_sequence_matcher_init(): + # Set `n` to `sys.maxsize` to show entire graph when there is a diff. + diff = "".join(difflib.unified_diff(a_list, b_list, n=sys.maxsize)) + + if not diff: + return "" + return diff + + +def _transform_diagnose_call_message_formatter( + run: Callable, + self: Transform, + *args: Any, + **kwargs: Any, +) -> str: + return f"Running {self.__class__.__name__} pass. " + + +def maybe_fx_graph_tabular(graph: torch.fx.Graph) -> str | None: + """Return the Graph nodes in tabular format. Equivalent to stdout of `graph.print_tabular()`. + If `tabulate` is not installed, return `None`. + + Args: + graph: The Graph to print. + + Returns: + The Graph printed in a tabular format. None if `tabulate` is not installed. + """ + f = io.StringIO() + with contextlib.redirect_stdout(f): + try: + graph.print_tabular() + except ImportError: + return None + return f.getvalue() + + +class Transform(abc.ABC): + """Base class for FX graph transformations to be used by FX-ONNX exporter. + + Similar to `FX Interpreter `_, + specializations of this class execute the FX graph Node-by-Node. + Methods in the `Transform` class can be overridden to customize the behavior of the model. + This pattern can be useful for many things, including writing code transformations as well as analysis passes. + + The following methods can be overridden:: + + _run() + +-- run_node() + +-- placeholder() + +-- get_attr() + +-- call_function() + +-- call_method() + +-- call_module() + +-- output() + + One important aspect to note is that if the transformation modifies the model input and/or output signature, + (e.g. additional inputs/outputs are added to the model), :class:`InputAdaptStep` and/or :class:`OutputAdaptStep` + are needed to reconcile :attr:`ONNXProgram.model_proto`. + That is, the model signature and the model representation must match. + + As an additional feature, this class provides builtin support for transformation recording using the diagnostics. + The granularity of overriding is up to the user. And it affects the granularity of + the diagnostics information. For example, if `_run()` is overridden, the + diagnostics information will only contain graph level transformation. Instead, + if `call_function()` is overridden, the diagnostics information will additionally + contain the node level information of `call_function()`. + + TODO(bowbao): Add more overridable methods in call hierarchy + TODO(bowbao): Create an example once more overridable methods are added. + """ + + diagnostic_context: diagnostics.DiagnosticContext + """The diagnostic context for recording diagnostics.""" + + module: torch.fx.GraphModule + """The module to be transformed.""" + + fake_mode: fake_tensor.FakeTensorMode | None + """The existing fake mode detected from `self.module`.""" + + def __init__( + self, + diagnostic_context: diagnostics.DiagnosticContext, + module: torch.fx.GraphModule, + ): + """Initialize the transform. + + Args: + diagnostic_context: The diagnostic context for recording diagnostics. + module: The module to be transformed. + """ + self.diagnostic_context = diagnostic_context + self.module = module + self.fake_mode = self._detect_fake_mode() + + def _detect_fake_mode(self) -> fake_tensor.FakeTensorMode | None: + """Detect fake mode from the graph. + + Scan through all nodes in graph and their meta['val'] to detect fake mode. + """ + fake_tensors = [node.meta.get("val") for node in self.module.graph.nodes] + with unset_fake_temporarily(): + return torch._dynamo.utils.detect_fake_mode(fake_tensors) + + def _maybe_fakefy_args( + self, fake_mode: fake_tensor.FakeTensorMode | None, *args: Any + ) -> tuple[Any, ...]: + if fake_mode is None: + return args + # NB: This should hit the cache if tensors were fakefied before. + # E.g., when the fx graph is produced by Dynamo. + return tuple( + fake_mode.from_tensor(t) if isinstance(t, torch.Tensor) else t for t in args + ) + + @abc.abstractmethod + def _run(self, *args, **kwargs) -> torch.fx.GraphModule: ... + + @diagnostics.diagnose_call( + diagnostics.rules.fx_pass, + diagnostic_message_formatter=_transform_diagnose_call_message_formatter, + ) + def run(self, *args, **kwargs) -> torch.fx.GraphModule: + """Run the transform on `self.module`. + + Note that this method may or may not mutate `self.module`, and the returned + `GraphModule` could be either `self.module` or a new `GraphModule`. + + Args: + *args: Positional arguments for `self.module` to run. + **kwargs: Keyword arguments for `self.module` to run. + """ + diagnostic = self.diagnostic_context.inflight_diagnostic( + rule=diagnostics.rules.fx_pass + ) + diagnostic.info( + "For detailed logging of graph modifications by this pass, either set " + "`DiagnosticOptions.verbosity_level` to `logging.DEBUG` or use the environment variable " + "`TORCH_LOGS='onnx_diagnostics'`." + ) + + # Gather graph information before transform. + graph_diff_log_level = logging.DEBUG + if diagnostic.logger.isEnabledFor(graph_diff_log_level): + # Cannot use LazyString because the graph may have been mutated at evaluation time. + old_readable_graph = self.module.print_readable(print_output=False) + old_tabular = maybe_fx_graph_tabular(self.module.graph) + else: + # Set to empty string to avoid unbound warning. This value should never be + # used since the log level is not enabled. + old_readable_graph = "" + old_tabular = "" + + module = self._run(*args, **kwargs) + + # Gather graph information after transform. + if diagnostic.logger.isEnabledFor(graph_diff_log_level): + new_readable_graph = module.print_readable(print_output=False) + new_tabular = maybe_fx_graph_tabular(module.graph) + + with diagnostic.log_section(graph_diff_log_level, "Graph diff:"): + diagnostic.log( + graph_diff_log_level, + "```\n%s\n```", + diagnostics.LazyString( + _unified_diff, old_readable_graph, new_readable_graph + ), + ) + + with diagnostic.log_section(graph_diff_log_level, "Tabular diff:"): + if old_tabular is None or new_tabular is None: + diagnostic.log( + graph_diff_log_level, + "Tabular diff is not available because `tabulate` is not installed.", + ) + else: + diagnostic.log( + graph_diff_log_level, + "```\n%s\n```", + diagnostics.LazyString(_unified_diff, old_tabular, new_tabular), + ) + + return module + + +class AnalysisResult(abc.ABC): # noqa: B024 + ... + + +class Analysis(abc.ABC): + def __init__( + self, + diagnostic_context: diagnostics.DiagnosticContext, + module: torch.fx.GraphModule, + onnxfunction_dispatcher: onnxfunction_dispatcher.OnnxFunctionDispatcher, + ): + self.diagnostic_context = diagnostic_context + self.module = module + self.onnxfunction_dispatcher = onnxfunction_dispatcher + + @abc.abstractmethod + def analyze(self, diagnostic_level: diagnostics.infra.Level) -> AnalysisResult: ... diff --git a/janus/lib/python3.10/site-packages/torch/onnx/_internal/fx/decomposition_skip.py b/janus/lib/python3.10/site-packages/torch/onnx/_internal/fx/decomposition_skip.py new file mode 100644 index 0000000000000000000000000000000000000000..4849616e412aaa1736db2754bc4093d6c05e66f6 --- /dev/null +++ b/janus/lib/python3.10/site-packages/torch/onnx/_internal/fx/decomposition_skip.py @@ -0,0 +1,238 @@ +# mypy: allow-untyped-defs +"""A context manager that disables the decomposition of certain ops during dynamo tracing. + +The approach is to temporarily hijack the operator callable with PT2 custom operator. +The custom operator will not be decomposed and will show up as a single node to be exported to ONNX. + +For the time being the decomposition of these ops is otherwise unavoidable. + +https://github.com/pytorch/pytorch/issues/116684 +https://github.com/pytorch/pytorch/issues/115883 + +This solution will no longer be required once the issue is resolved. +""" + +from __future__ import annotations + +import abc +import contextlib +from typing import Callable, Sequence + +from onnxscript.function_libs.torch_lib.ops import ( # type: ignore[import-not-found] + core as torchlib_core, + nn as torchlib_nn, +) + +import torch +from torch._decomp import decompositions + + +_NEW_OP_NAMESPACE: str = "onnx_export" +"""The namespace for the custom operator.""" + + +class DecompSkip(abc.ABC): + op_callable: Callable + """The original operator callable to skip decomposition.""" + onnxscript_function: Callable + """The ONNXScript function to be registered for exporting the custom operator.""" + + new_op_name: str + """The name for the custom operator.""" + new_op_schema: str + """The schema for the custom operator. This should match with the signature of the original operator.""" + + @classmethod + @abc.abstractmethod + def register(cls, export_options: torch.onnx.ExportOptions): + """Registers the custom operator and overrides the original operator. + + It should do the following steps in order: + + 1. Register the custom operator. + 2. Override the original operator with the replacement callable. + 3. Register the ONNXScript function for exporting the custom operator. + """ + ... + + @classmethod + @abc.abstractmethod + def unregister(cls): + """Restores the original operator callable.""" + ... + + @classmethod + @abc.abstractmethod + def abstract(cls, *args, **kwargs): + """An abstract impl (meta kernel) for the operator.""" + ... + + @classmethod + def register_custom_op(cls): + """Registers the custom operator.""" + new_op_qualname = f"{_NEW_OP_NAMESPACE}::{cls.new_op_name}" + torch.library.define(new_op_qualname, cls.new_op_schema) + torch.library.impl(new_op_qualname, "default", cls.replacement) + torch.library.register_fake(new_op_qualname, cls.abstract) + + @classmethod + def replacement(cls, *args, **kwargs): + """A replacement callable for the operator to be hijacked. + + This has the same signature and eager behavior as the original operator. + """ + return cls.op_callable(*args, **kwargs) + + +class UpsampleBilinear2DDecompSkip(DecompSkip): + op_callable = torch._C._nn.upsample_bilinear2d # type: ignore[attr-defined] + onnxscript_function = torchlib_nn.aten_upsample_bilinear2d_vec # type: ignore[attr-defined] + new_op_name = "upsample_bilinear2d" + new_op_schema = "(Tensor self, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> (Tensor)" + + @classmethod + def register(cls, export_options: torch.onnx.ExportOptions): + if not hasattr(torch.ops, _NEW_OP_NAMESPACE) or not hasattr( + torch.ops.onnx_export, cls.new_op_name + ): + cls.register_custom_op() + torch._C._nn.upsample_bilinear2d = torch.ops.onnx_export.upsample_bilinear2d # type: ignore[attr-defined] + if export_options.onnx_registry is None: + export_options.onnx_registry = torch.onnx.OnnxRegistry() + registry = export_options.onnx_registry + registry.register_op( + function=cls.onnxscript_function, + namespace=_NEW_OP_NAMESPACE, + op_name=cls.new_op_name, + ) + + @classmethod + def unregister(cls): + torch._C._nn.upsample_bilinear2d = cls.op_callable # type: ignore[attr-defined] + + @classmethod + def abstract(cls, input, output_size, align_corners, scale_factors): + osize = decompositions.upsample_compute_output_size( + input.size(), output_size, scale_factors + ) + return torch.empty( + (input.size(0), input.size(1), *osize), + dtype=input.dtype, + device=input.device, + ) + + +class UpsampleTrilinear3DDecompSkip(DecompSkip): + op_callable = torch._C._nn.upsample_trilinear3d # type: ignore[attr-defined] + onnxscript_function = torchlib_nn.aten_upsample_trilinear3d_vec # type: ignore[attr-defined] + new_op_name = "upsample_trilinear3d" + new_op_schema = "(Tensor self, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> (Tensor)" + + @classmethod + def register(cls, export_options: torch.onnx.ExportOptions): + if not hasattr(torch.ops, _NEW_OP_NAMESPACE) or not hasattr( + torch.ops.onnx_export, cls.new_op_name + ): + cls.register_custom_op() + torch._C._nn.upsample_trilinear3d = torch.ops.onnx_export.upsample_trilinear3d # type: ignore[attr-defined] + if export_options.onnx_registry is None: + export_options.onnx_registry = torch.onnx.OnnxRegistry() + registry = export_options.onnx_registry + registry.register_op( + function=cls.onnxscript_function, + namespace=_NEW_OP_NAMESPACE, + op_name=cls.new_op_name, + ) + + @classmethod + def unregister(cls): + torch._C._nn.upsample_trilinear3d = cls.op_callable # type: ignore[attr-defined] + + @classmethod + def abstract(cls, input, output_size, align_corners, scale_factors): + osize = decompositions.upsample_compute_output_size( + input.size(), output_size, scale_factors + ) + return torch.empty( + (input.size(0), input.size(1), input.size(2), *osize), + dtype=input.dtype, + device=input.device, + ) + + +class InstanceNormDecompSkip(DecompSkip): + op_callable = torch.instance_norm # type: ignore[attr-defined] + onnxscript_function = torchlib_core.aten_instance_norm # type: ignore[attr-defined] + new_op_name = "instance_norm" + new_op_schema = ( + "(Tensor input, Tensor? weight, Tensor? bias, " + "Tensor? running_mean, Tensor? running_var, " + "bool use_input_stats, float momentum, float eps, " + "bool cudnn_enabled) -> Tensor" + ) + + @classmethod + def register(cls, export_options: torch.onnx.ExportOptions): + if not hasattr(torch.ops, _NEW_OP_NAMESPACE) or not hasattr( + torch.ops.onnx_export, cls.new_op_name + ): + cls.register_custom_op() + + torch.instance_norm = torch.ops.onnx_export.instance_norm # type: ignore[attr-defined] + if export_options.onnx_registry is None: + export_options.onnx_registry = torch.onnx.OnnxRegistry() + registry = export_options.onnx_registry + registry.register_op( + function=cls.onnxscript_function, + namespace=_NEW_OP_NAMESPACE, + op_name=cls.new_op_name, + ) + + @classmethod + def unregister(cls): + torch.instance_norm = cls.op_callable # type: ignore[attr-defined] + + @classmethod + def abstract( + cls, + input, + weight, + bias, + running_mean, + running_var, + use_input_stats: bool, + momentum: float, + eps: float, + cudnn_enabled: bool, + ): + return torch.empty( + input.size(), + dtype=input.dtype, + device=input.device, + ) + + +_DEFAULT_SKIP_LIST = [ + UpsampleBilinear2DDecompSkip, + InstanceNormDecompSkip, + UpsampleTrilinear3DDecompSkip, +] + + +@contextlib.contextmanager +def enable_decomposition_skips( + export_options: torch.onnx.ExportOptions, + skips: Sequence[type[DecompSkip]] = _DEFAULT_SKIP_LIST, +): + """A context manager that enables the decomposition skips. + + The original operator callables that are otherwise decomposed are replaced with custom operators. + The ONNXScript functions for exporting the custom operators are added to the ONNX registry inside export_options. + """ + try: + for skip in skips: + skip.register(export_options) + yield + finally: + for skip in skips: + skip.unregister() diff --git a/janus/lib/python3.10/site-packages/torch/onnx/_internal/fx/decomposition_table.py b/janus/lib/python3.10/site-packages/torch/onnx/_internal/fx/decomposition_table.py new file mode 100644 index 0000000000000000000000000000000000000000..6ca128da11e4955ff900d806ed4f7b4c6d349c5c --- /dev/null +++ b/janus/lib/python3.10/site-packages/torch/onnx/_internal/fx/decomposition_table.py @@ -0,0 +1,116 @@ +# mypy: allow-untyped-defs +"""Dispatcher for AtenLib functions from onnx-script.""" + +from __future__ import annotations + +from typing import Callable + +import torch +import torch._ops +import torch.fx +from torch.onnx._internal.fx import registration + + +def _create_onnx_supports_op_overload_table( + registry, +) -> set[torch._ops.OperatorBase | Callable]: + """ + Creates a set of OperatorBase and Callable objects that represent ONNX-supported PyTorch operations. + + Args: + registry (OnnxRegistry): The ONNX registry for PyTorch. + + Returns: + A collection of OperatorBase and Callable objects representing ONNX-supported PyTorch operations. + """ + table: set[torch._ops.OperatorBase | Callable] = set() + + # Some ops in `torch.ops.aten` are not discoverable through `dir(torch.ops.aten)`, + # but retrievable via explicit lookup. + # https://github.com/pytorch/pytorch/issues/99681 + # This is a workaround to make sure we register ONNX symbolic functions for these. + onnx_supported_aten_lookup_table = [ + k.split("::")[1].split(".")[0] + for k in registry._all_registered_ops() + if k.startswith("aten::") + ] + + for op_namespace in (torch.ops.aten, torch.ops.prims): + attr_names = dir(op_namespace) + if op_namespace is torch.ops.aten: + attr_names += onnx_supported_aten_lookup_table + for attr_name in attr_names: + if not hasattr(op_namespace, attr_name): + # torchlib owns some attributes that are not aten ops. + continue + op_overload_packet = getattr(op_namespace, attr_name) + if not isinstance(op_overload_packet, torch._ops.OpOverloadPacket): + continue + + for overload_name in op_overload_packet.overloads(): + op_overload = getattr(op_overload_packet, overload_name) + internal_op_name = registration.OpName.from_qualified_name( + qualified_name=op_overload.name() + ) + # NOTE: If the overload is supported in registry or it's default overload is supported in registry, + # we add it to the table. + if registry.is_registered_op( + namespace=internal_op_name.namespace, + op_name=internal_op_name.op_name, + overload=internal_op_name.overload, + ) or registry.is_registered_op( + namespace=internal_op_name.namespace, + op_name=internal_op_name.op_name, + overload=None, + ): + # This line maps torch.ops.aten.add.Tensor, torch.ops.aten.add.Scalar, torch.ops.aten.add.out, etc + # to "aten::add". This means the exporter for "aten::add" is used for all overloads of "aten::add". + # This is applied to all ops under torch.ops.aten. + table.add(op_overload) + return table + + +def create_onnx_friendly_decomposition_table( + registry, +) -> dict[torch._ops.OperatorBase, Callable]: + """ + This function creates a dictionary of op overloads and their decomposition functions + for ops that do not have ONNX symbolic functions. If an op already has an ONNX symbolic function, + its decomposition function is excluded from the table. The decomposition table is a subset of PyTorch's + built-in aten-to-aten decomposition. + + Args: + registry (torch.onnx.OnnxRegistry): The ONNX registry for PyTorch. + + Returns: + Dict[torch._ops.OperatorBase, Callable]: A dictionary that maps op overloads to their corresponding + decomposition functions. + """ + decomposition_table: dict[torch._ops.OperatorBase, Callable] = {} + # Dictionary that maps torch.ops.aten.* to exporter look up key; e.g., + # _OP_OVERLOAD_TO_EXPORTER_KEY_TABLE[torch.add.Tensor] is "aten::add". + _ONNX_SUPPORT_OP_OVERLOADS = _create_onnx_supports_op_overload_table(registry) + + # NOTE: If we import torch._decomp, we will get RuntimeError: Only a single + # TORCH_LIBRARY can be used to register the namespace nvprims; please put all of your + # definitions in a single TORCH_LIBRARY block. + for op_overload, decomp_fn in torch._decomp.decomposition_table.items(): # type: ignore[attr-defined] + # Skip decomposition into "prim::*" ops (defined in 'torch._refs'), because they + # are not generally supported by ONNX. + # Skip decomposition for op_overload as long as that op_overload has a corresponding ONNX + # symbolic function. + if ( + "torch._refs" in decomp_fn.__module__ + or op_overload in _ONNX_SUPPORT_OP_OVERLOADS + ): + continue + decomposition_table[op_overload] = decomp_fn + + # NOTE: There are ops in core ATen and under torch._refs, + # that are not decomposed to prim::ops. We need to pick them + # back + for op_overload, decomp_fn in torch._decomp.core_aten_decompositions().items(): + if op_overload in _ONNX_SUPPORT_OP_OVERLOADS: + continue + decomposition_table[op_overload] = decomp_fn + return decomposition_table diff --git a/janus/lib/python3.10/site-packages/torch/onnx/_internal/fx/diagnostics.py b/janus/lib/python3.10/site-packages/torch/onnx/_internal/fx/diagnostics.py new file mode 100644 index 0000000000000000000000000000000000000000..8617afa5f440a37ab300706b6ee8edc8ebd4ccdf --- /dev/null +++ b/janus/lib/python3.10/site-packages/torch/onnx/_internal/fx/diagnostics.py @@ -0,0 +1,261 @@ +# mypy: allow-untyped-defs +from __future__ import annotations + +import dataclasses +import functools +from typing import Any, TYPE_CHECKING + +import onnxscript # type: ignore[import] +from onnxscript.function_libs.torch_lib import graph_building # type: ignore[import] + +import torch +import torch.fx +from torch.onnx._internal import diagnostics +from torch.onnx._internal.diagnostics import infra +from torch.onnx._internal.diagnostics.infra import decorator, formatter +from torch.onnx._internal.fx import registration, type_utils as fx_type_utils + + +if TYPE_CHECKING: + import logging + +# NOTE: The following limits are for the number of items to display in diagnostics for +# a list, tuple or dict. The limit is picked such that common useful scenarios such as +# operator arguments are covered, while preventing excessive processing loads on considerably +# large containers such as the dictionary mapping from fx to onnx nodes. +_CONTAINER_ITEM_LIMIT: int = 10 + +# NOTE(bowbao): This is a shim over `torch.onnx._internal.diagnostics`, which is +# used in `torch.onnx`, and loaded with `torch`. Hence anything related to `onnxscript` +# cannot be put there. + +# [NOTE: `dynamo_export` diagnostics logging] +# The 'dynamo_export' diagnostics leverages the PT2 artifact logger to handle the verbosity +# level of logs that are recorded in each SARIF log diagnostic. In addition to SARIF log, +# terminal logging is by default disabled. Terminal logging can be activated by setting +# the environment variable `TORCH_LOGS="onnx_diagnostics"`. When the environment variable +# is set, it also fixes logging level to `logging.DEBUG`, overriding the verbosity level +# specified in the diagnostic options. +# See `torch/_logging/__init__.py` for more on PT2 logging. +_ONNX_DIAGNOSTICS_ARTIFACT_LOGGER_NAME = "onnx_diagnostics" +diagnostic_logger = torch._logging.getArtifactLogger( + "torch.onnx", _ONNX_DIAGNOSTICS_ARTIFACT_LOGGER_NAME +) + + +def is_onnx_diagnostics_log_artifact_enabled() -> bool: + return torch._logging._internal.log_state.is_artifact_enabled( + _ONNX_DIAGNOSTICS_ARTIFACT_LOGGER_NAME + ) + + +@functools.singledispatch +def _format_argument(obj: Any) -> str: + return formatter.format_argument(obj) + + +def format_argument(obj: Any) -> str: + formatter = _format_argument.dispatch(type(obj)) + return formatter(obj) + + +# NOTE: EDITING BELOW? READ THIS FIRST! +# +# The below functions register the `format_argument` function for different types via +# `functools.singledispatch` registry. These are invoked by the diagnostics system +# when recording function arguments and return values as part of a diagnostic. +# Hence, code with heavy workload should be avoided. Things to avoid for example: +# `torch.fx.GraphModule.print_readable()`. + + +@_format_argument.register +def _torch_nn_module(obj: torch.nn.Module) -> str: + return f"torch.nn.Module({obj.__class__.__name__})" + + +@_format_argument.register +def _torch_fx_graph_module(obj: torch.fx.GraphModule) -> str: + return f"torch.fx.GraphModule({obj.__class__.__name__})" + + +@_format_argument.register +def _torch_fx_node(obj: torch.fx.Node) -> str: + node_string = f"fx.Node({obj.target})[{obj.op}]:" + if "val" not in obj.meta: + return node_string + "None" + return node_string + format_argument(obj.meta["val"]) + + +@_format_argument.register +def _torch_fx_symbolic_bool(obj: torch.SymBool) -> str: + return f"SymBool({obj})" + + +@_format_argument.register +def _torch_fx_symbolic_int(obj: torch.SymInt) -> str: + return f"SymInt({obj})" + + +@_format_argument.register +def _torch_fx_symbolic_float(obj: torch.SymFloat) -> str: + return f"SymFloat({obj})" + + +@_format_argument.register +def _torch_tensor(obj: torch.Tensor) -> str: + return f"Tensor({fx_type_utils.from_torch_dtype_to_abbr(obj.dtype)}{_stringify_shape(obj.shape)})" + + +@_format_argument.register +def _int(obj: int) -> str: + return str(obj) + + +@_format_argument.register +def _float(obj: float) -> str: + return str(obj) + + +@_format_argument.register +def _bool(obj: bool) -> str: + return str(obj) + + +@_format_argument.register +def _str(obj: str) -> str: + return obj + + +@_format_argument.register +def _registration_onnx_function(obj: registration.ONNXFunction) -> str: + # TODO: Compact display of `param_schema`. + return f"registration.ONNXFunction({obj.op_full_name}, is_custom={obj.is_custom}, is_complex={obj.is_complex})" + + +@_format_argument.register +def _list(obj: list) -> str: + list_string = f"List[length={len(obj)}](\n" + if not obj: + return list_string + "None)" + for i, item in enumerate(obj): + if i >= _CONTAINER_ITEM_LIMIT: + # NOTE: Print only first _CONTAINER_ITEM_LIMIT items. + list_string += "...,\n" + break + list_string += f"{format_argument(item)},\n" + return list_string + ")" + + +@_format_argument.register +def _tuple(obj: tuple) -> str: + tuple_string = f"Tuple[length={len(obj)}](\n" + if not obj: + return tuple_string + "None)" + for i, item in enumerate(obj): + if i >= _CONTAINER_ITEM_LIMIT: + # NOTE: Print only first _CONTAINER_ITEM_LIMIT items. + tuple_string += "...,\n" + break + tuple_string += f"{format_argument(item)},\n" + return tuple_string + ")" + + +@_format_argument.register +def _dict(obj: dict) -> str: + dict_string = f"Dict[length={len(obj)}](\n" + if not obj: + return dict_string + "None)" + for i, (key, value) in enumerate(obj.items()): + if i >= _CONTAINER_ITEM_LIMIT: + # NOTE: Print only first _CONTAINER_ITEM_LIMIT items. + dict_string += "...\n" + break + dict_string += f"{key}: {format_argument(value)},\n" + return dict_string + ")" + + +@_format_argument.register +def _torch_nn_parameter(obj: torch.nn.Parameter) -> str: + return f"Parameter({format_argument(obj.data)})" + + +@_format_argument.register +def _onnxscript_torch_script_tensor(obj: graph_building.TorchScriptTensor) -> str: + return f"`TorchScriptTensor({fx_type_utils.from_torch_dtype_to_abbr(obj.dtype)}{_stringify_shape(obj.shape)})`" # type: ignore[arg-type] # noqa: B950 + + +@_format_argument.register +def _onnxscript_onnx_function(obj: onnxscript.OnnxFunction) -> str: + return f"`OnnxFunction({obj.name})`" + + +@_format_argument.register +def _onnxscript_traced_onnx_function(obj: onnxscript.TracedOnnxFunction) -> str: + return f"`TracedOnnxFunction({obj.name})`" + + +# from torch/fx/graph.py to follow torch format +def _stringify_shape(shape: torch.Size | None) -> str: + if shape is None: + return "" + return f"[{', '.join(str(x) for x in shape)}]" + + +rules = diagnostics.rules +levels = diagnostics.levels +RuntimeErrorWithDiagnostic = infra.RuntimeErrorWithDiagnostic +LazyString = formatter.LazyString +DiagnosticOptions = infra.DiagnosticOptions + + +@dataclasses.dataclass +class Diagnostic(infra.Diagnostic): + logger: logging.Logger = dataclasses.field(init=False, default=diagnostic_logger) + + def log(self, level: int, message: str, *args, **kwargs) -> None: + if self.logger.isEnabledFor(level): + formatted_message = message % args + if is_onnx_diagnostics_log_artifact_enabled(): + # Only log to terminal if artifact is enabled. + # See [NOTE: `dynamo_export` diagnostics logging] for details. + self.logger.log(level, formatted_message, **kwargs) + + self.additional_messages.append(formatted_message) + + +@dataclasses.dataclass +class DiagnosticContext(infra.DiagnosticContext[Diagnostic]): + logger: logging.Logger = dataclasses.field(init=False, default=diagnostic_logger) + _bound_diagnostic_type: type[Diagnostic] = dataclasses.field( + init=False, default=Diagnostic + ) + + def __enter__(self): + self._previous_log_level = self.logger.level + # Adjust the logger level based on `options.verbosity_level` and the environment + # variable `TORCH_LOGS`. See [NOTE: `dynamo_export` diagnostics logging] for details. + if not is_onnx_diagnostics_log_artifact_enabled(): + return super().__enter__() + else: + return self + + +diagnose_call = functools.partial( + decorator.diagnose_call, + diagnostic_type=Diagnostic, + format_argument=format_argument, +) + + +@dataclasses.dataclass +class UnsupportedFxNodeDiagnostic(Diagnostic): + unsupported_fx_node: torch.fx.Node | None = None + + def __post_init__(self) -> None: + super().__post_init__() + # NOTE: This is a hack to make sure that the additional fields must be set and + # not None. Ideally they should not be set as optional. But this is a known + # limitation with `dataclasses`. Resolvable in Python 3.10 with `kw_only=True`. + # https://stackoverflow.com/questions/69711886/python-dataclasses-inheritance-and-default-values + if self.unsupported_fx_node is None: + raise ValueError("unsupported_fx_node must be specified.") diff --git a/janus/lib/python3.10/site-packages/torch/onnx/_internal/fx/dynamo_graph_extractor.py b/janus/lib/python3.10/site-packages/torch/onnx/_internal/fx/dynamo_graph_extractor.py new file mode 100644 index 0000000000000000000000000000000000000000..54af3142cc230dd09a3c3b4805ff0f98bd50a485 --- /dev/null +++ b/janus/lib/python3.10/site-packages/torch/onnx/_internal/fx/dynamo_graph_extractor.py @@ -0,0 +1,228 @@ +# mypy: allow-untyped-defs +# NOTE: This file is referenced by name at +# /opt/pytorch/torch/_dynamo/eval_frame.py::DONT_WRAP_FILES. +# introduced by https://github.com/pytorch/pytorch/pull/98894. +# If this file is renamed, moved, etc please update the reference there! + +from __future__ import annotations + +import contextlib +import functools +import inspect +from typing import Any, Callable, Mapping, Sequence + +import torch._dynamo +import torch.export as torch_export +import torch.fx +import torch.onnx +from torch.onnx._internal import _exporter_legacy, io_adapter +from torch.utils import _pytree as pytree + + +class _PyTreeExtensionContext: + """Context manager to register PyTree extension.""" + + _extensions: dict[type, tuple[pytree.FlattenFunc, pytree.UnflattenFunc]] + + def __init__(self) -> None: + self._extensions = {} + # Register PyTree extension for HuggingFace model output. + self._register_huggingface_model_output_extension() + + def __enter__(self): + for class_type, (flatten_func, unflatten_func) in self._extensions.items(): + pytree._private_register_pytree_node( + class_type, + flatten_func, + unflatten_func, + ) + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + for class_type in self._extensions: + pytree.SUPPORTED_NODES.pop(class_type) + + def register_pytree_node( + self, + class_type: type, + flatten_func: pytree.FlattenFunc, + unflatten_func: pytree.UnflattenFunc, + ): + """Register PyTree extension for a custom python type. + + Args: + class_type: The custom python type. + flatten_func: The flatten function. + unflatten_func: The unflatten function. + + Raises: + AssertionError: If the custom python type is already registered. + """ + if class_type in pytree.SUPPORTED_NODES or class_type in self._extensions: + # PyTree node already registered. + # E.g., `huggingface/transformer` registers `ModelOutput` as PyTree node after + # https://github.com/huggingface/transformers/pull/25358. + return + self._extensions[class_type] = (flatten_func, unflatten_func) + + def _register_huggingface_model_output_extension(self): + try: + from transformers import modeling_outputs # type: ignore[import] + except ImportError as e: + return + + def model_output_flatten( + output: modeling_outputs.ModelOutput, + ) -> tuple[list[Any], pytree.Context]: + return list(output.values()), (type(output), list(output.keys())) + + def model_output_unflatten( + values: list[Any], context: pytree.Context + ) -> modeling_outputs.ModelOutput: + output_type, keys = context + return output_type(**dict(zip(keys, values))) + + # All 'ModelOutput' subclasses are defined under module 'modeling_outputs'. + named_model_output_classes = inspect.getmembers( + modeling_outputs, + lambda x: ( + inspect.isclass(x) + and issubclass(x, modeling_outputs.ModelOutput) + and x is not modeling_outputs.ModelOutput + ), + ) + + for _, class_type in named_model_output_classes: + self.register_pytree_node( + class_type, + model_output_flatten, + model_output_unflatten, # type: ignore[arg-type ] + ) + + +class DynamoFlattenOutputStep(io_adapter.FlattenOutputStep): + """Flatten nested collection and custom python types and return a flat list of elements. + + Extended from :class:`io_adapter.FlattenOutputStep` to support flattening arbitrary + types via pytree extension. By default this supports many common user defined python + types such as :class:`ModelOutput` from HuggingFace transformers. + + The pytree extension can be customized by passing in a ``_PyTreeExtensionContext`` + object. See :meth:`_PyTreeExtensionContext.register_pytree_node`. + """ + + def __init__(self, pytree_extension_context: _PyTreeExtensionContext | None = None): + super().__init__() + self._pytree_extension_context = ( + pytree_extension_context or _PyTreeExtensionContext() + ) + + def apply( + self, + model_outputs: Any, + model: torch.nn.Module | Callable | torch_export.ExportedProgram | None = None, + ) -> Sequence[Any]: + """Flatten the model outputs, under the context of pytree extension.""" + with self._pytree_extension_context: + return super().apply(model_outputs, model=model) + + +def _wrap_model_with_output_adapter( + model: torch.nn.Module | Callable, + output_adapter: DynamoFlattenOutputStep, +) -> Callable: + """Wrap model with output adapter. + + This is a helper function to enable :func:`dynamo.export` on models that produce + custom user defined types outputs. It wraps the model with an output adapter to + convert the outputs to :func:`dynamo.export` compatible types, i.e. :class:`torch.Tensor`. + + The adapting logic is controlled by ``output_adapter``. + + Args: + model: PyTorch model or function. + output_adapter: Output adapter to apply to model output. + Returns: + Wrapped model. + """ + model_func = model.forward if isinstance(model, torch.nn.Module) else model + + # Preserve original function signature. + @functools.wraps(model_func) + def wrapped(*args, **kwargs): + return output_adapter.apply(model_func(*args, **kwargs), model=model) + + return wrapped + + +class DynamoExport(_exporter_legacy.FXGraphExtractor): + """Generates a FX GraphModule using torch.dynamo.export API + Args: + aten_graph: If True, exports a graph with ATen operators. + If False, exports a graph with Python operators. + """ + + def __init__( + self, + aten_graph: bool | None = None, + ): + super().__init__() + self.aten_graph = aten_graph or True + + def generate_fx( + self, + options: _exporter_legacy.ResolvedExportOptions, + model: torch.nn.Module | Callable, + model_args: Sequence[Any], + model_kwargs: Mapping[str, Any], + ) -> torch.fx.GraphModule: + # `dynamo.export` does not recognize custom user defined classes as output type. + # Apply wrapper to adapt the outputs back to `dynamo.export` compatible types, + # i.e. :class:`torch.Tensor`. + dynamo_flatten_output_step = DynamoFlattenOutputStep() + wrapped_model = _wrap_model_with_output_adapter( + model, dynamo_flatten_output_step + ) + # Record the output adapter step. + self.output_adapter.append_step(dynamo_flatten_output_step) + + # Translate callable to FX graph. + # + fake_mode = ( + options.fake_context.fake_mode + if options.fake_context + else contextlib.nullcontext() + ) + fx_mode = "symbolic" if options.dynamic_shapes else "fake" + with fake_mode: # type: ignore[attr-defined] + graph_module, graph_guard = torch._dynamo.export( + wrapped_model, + tracing_mode=fx_mode, + )( + *model_args, + **model_kwargs, + ) + del graph_guard # Unused + torch._dynamo.reset() + + # Export FX graph to ONNX ModelProto. + self.input_adapter.append_step( + io_adapter.FlattenInputWithTreeSpecValidationInputStep() + ) + + updated_model_args = self.input_adapter.apply( + *model_args, model=model, **model_kwargs + ) + + return self.pre_export_passes(options, model, graph_module, updated_model_args) # type: ignore[return-value] + + def pre_export_passes( + self, + options: _exporter_legacy.ResolvedExportOptions, + original_model: torch.nn.Module | Callable, + fx_module: torch.fx.GraphModule, + fx_module_args: Sequence[Any], + ): + return _exporter_legacy.common_pre_export_passes( + options, original_model, fx_module, fx_module_args + ) diff --git a/janus/lib/python3.10/site-packages/torch/onnx/_internal/fx/fx_onnx_interpreter.py b/janus/lib/python3.10/site-packages/torch/onnx/_internal/fx/fx_onnx_interpreter.py new file mode 100644 index 0000000000000000000000000000000000000000..b81c7254751ba8e7587c63926f746eefb453ec00 --- /dev/null +++ b/janus/lib/python3.10/site-packages/torch/onnx/_internal/fx/fx_onnx_interpreter.py @@ -0,0 +1,794 @@ +# mypy: allow-untyped-defs +from __future__ import annotations + +import inspect +import logging +import operator +import re +from typing import Callable, Sequence + +import onnxscript # type: ignore[import] +from onnxscript.function_libs.torch_lib import ( # type: ignore[import] + graph_building as onnxscript_graph_building, +) + +import torch +import torch.fx +from torch.onnx import _type_utils as jit_type_utils +from torch.onnx._internal.fx import ( + _pass, + diagnostics, + onnxfunction_dispatcher, + type_utils as fx_type_utils, +) +from torch.utils import _pytree + + +def _fx_node_to_onnx_message_formatter( + fn: Callable, + self, + node: torch.fx.Node, + *args, + **kwargs, +) -> str: + return f"FX Node: {node.op}:{node.target}[name={node.name}]. " + + +def _fx_graph_to_onnx_message_formatter( + fn: Callable, + self, + fx_graph_module: torch.fx.GraphModule, + *args, + **kwargs, +) -> str: + return f"FX Graph: {fx_graph_module._get_name()}. " + + +def _location_from_fx_stack_trace( + node_stack_trace: str, +) -> diagnostics.infra.Location | None: + """Extract location from FX node stack trace. + + TODO(bowbao): Create fx utils module and move this function there. + + Args: + node_stack_trace: The stack trace of the FX node. Example: + + File "path/file.py", line 311, in + + | File "path/file2.py", line 389, in + + + Returns: + location: The location of the FX node. + """ + if "File" not in node_stack_trace: + return None + + lines = node_stack_trace.strip().split("\n") + idx = 0 + while idx < len(lines) and "File" not in lines[idx]: + idx += 1 + if idx + 1 >= len(lines): + return None + + pattern = re.compile(r"^File \"(.+)\", line (\d+), in (.+)$") + matches = pattern.match(lines[idx].strip()) + if matches: + uri = matches.group(1) + line_number = int(matches.group(2)) + snippet = lines[idx + 1].strip() + return diagnostics.infra.Location(uri=uri, line=line_number, snippet=snippet) + return None + + +def _retrieve_or_adapt_input_to_graph_set( + fx_node_arg: fx_type_utils.Argument, + fx_name_to_onnxscript_value: dict[ + str, + onnxscript_graph_building.TorchScriptTensor + | tuple[onnxscript_graph_building.TorchScriptTensor, ...], + ], + tracer: onnxscript_graph_building.TorchScriptTracingEvaluator, +): + """Map FX value to TorchScript value. + + When creating TorchScript graph from FX graph, we need a mapping from FX variable + to TorchScript variable. This function maps FX variable, fx_node_arg, to torch.jit.Value. + """ + + onnx_tensor = fx_node_arg + if isinstance(onnx_tensor, torch.fx.Node): + # 1. fx_node_arg is a torch.fx.Node, which means + # fx_node_arg stands for the output of that torch.fx.Node. + # 2. fx_node_arg (variable in torch.fx.Graph) is be mapped to + # torch.jit.Value, fx_name_to_onnxscript_value[fx_node_arg.name], + # in TorchScript graph. + return fx_name_to_onnxscript_value[onnx_tensor.name] + elif isinstance(onnx_tensor, (tuple, list)) and any( + isinstance(node, torch.fx.Node) + and fx_type_utils.is_torch_symbolic_type(node.meta.get("val")) + for node in onnx_tensor + ): + # This intends to handle dynamic axes. for example, if the input size of op.Expand + # is dynamic, each dimension would be variable (i.e., sym variable in Pytorch + # FX graph. Note that sym variable is mapped to tensor in ONNX Script world) + # calculated by other operators. + sequence_mixed_elements: list[ + onnxscript_graph_building.TorchScriptTensor + | tuple[onnxscript_graph_building.TorchScriptTensor, ...] + | list[int] + ] = [] + # onnx_tensor contains a list of scalars which could be one of + # - tensor with empty shape, + # - tensor with tensor with shape (1,), + # - torch.SymInt, + # - int + # - ... + # They should all be promoted to tensor with shape (1,) + # in order to call ONNX's Concat. + for tensor in onnx_tensor: + # Prepare `tensor` as input of ONNX's Concat. + + if isinstance( + tensor, torch.fx.Node + ) and fx_type_utils.is_torch_symbolic_type(tensor.meta.get("val")): + # In this case, tensor is a torch.SymInt from Dynamo's perspective. + # It might be mapped to tensor with shape () or (1,) in ONNX. + element_value = fx_name_to_onnxscript_value[tensor.name] + if isinstance( + element_value, onnxscript_graph_building.TorchScriptTensor + ): + # All elements sequence_mixed_elements will be send to onnx's Concat + # as inputs. Therefore, they are required to have the same rank. + # Since tensors with rank=0 (i.e., scalar) cannot be concated, all + # scalars are promoted to tensors with shape (1,). + with onnxscript.evaluator.default_as(tracer): + element_value = onnxscript.opset18.Reshape(element_value, [1]) # type: ignore[arg-type, type-var] + sequence_mixed_elements.append(element_value) + elif isinstance(tensor, int): + # NOTE: op.Concat doesn't support scalar, so we need to wrap it with + # dim, and onnx-script will promote it to tensor(int64) + sequence_mixed_elements.append([tensor]) + else: + raise RuntimeError( + f"Unsupported type in sequence_mixed_elements: {type(tensor)}" + ) + # Concat all the elements in the sequence. + # shapes are mapped to tensors in ONNX graph (TorchScriptGraph), + # so list of sym_ints is concatenated to a tensor before calling ONNX op. + + # For example: + # inputs: [[2], [4], fx.Node(SymIntA), [1], fx.Node(SymIntB)] + # outputs: op.Concat([op.Constant(2), op.Constant(4), TorchScriptTensor(A), op.Constant(1), TorchScriptTensor(B)]) + + # onnx-script auto wraps python number with op.Constants, + # so we don't need to specifically process them. + with onnxscript.evaluator.default_as(tracer): + output = onnxscript.opset18.Concat(*sequence_mixed_elements, axis=0) # type: ignore[type-var] + output.dtype = torch.int64 # type: ignore[union-attr] + output.shape = [len(sequence_mixed_elements)] # type: ignore[union-attr] + return output + elif isinstance(onnx_tensor, (tuple, list)) and all( + isinstance(node, torch.fx.Node) or node is None for node in onnx_tensor + ): + sequence_elements: list[ + onnxscript_graph_building.TorchScriptTensor + | None + | tuple[onnxscript_graph_building.TorchScriptTensor, ...] + ] = [] + for tensor in onnx_tensor: + sequence_elements.append( + fx_name_to_onnxscript_value[tensor.name] if tensor is not None else None + ) + return sequence_elements + if isinstance(onnx_tensor, torch.dtype): + onnx_tensor = int( # type: ignore[call-overload] + jit_type_utils.JitScalarType.from_dtype(onnx_tensor).onnx_type() + ) + # NOTE: if device is specified in kwargs (not consumed), it's free to ignored. But + # if it's in args, we need to set it to string for dispatcher to match schema. + if isinstance(onnx_tensor, torch.device): + # torch.device is not supported by onnxscript (no op). We turn it into + # a string. + return str(onnx_tensor) + # all other cases, we do nothing. + return onnx_tensor + + +def filter_incompatible_and_dtype_convert_kwargs(kwargs): + """Filter out kwargs that are not supported by onnxscript.""" + filtered = {} + for key, value in kwargs.items(): + if key in { + "layout", + "device", + "requires_grad", + "pin_memory", + "memory_format", + "implicit", + }: + continue + if key == "dtype": + if value is None: + # We omit if dtype is not provided, because onnxscript handles the + # default case. + continue + else: + value = int(jit_type_utils.JitScalarType.from_dtype(value).onnx_type()) # type: ignore[call-overload] + filtered[key] = value + return filtered + + +def _fill_tensor_shape_type( + onnxscript_values: onnxscript_graph_building.TorchScriptTensor + | tuple[onnxscript_graph_building.TorchScriptTensor, ...], + name: str, + expected_values: fx_type_utils.META_VALUE_TYPE + | list[fx_type_utils.META_VALUE_TYPE] + | tuple[fx_type_utils.META_VALUE_TYPE | None, ...], +): + """Fill the meta information of onnxscript_values with that from the fx FakeTensor.""" + + if isinstance(expected_values, (list, tuple)) and not isinstance( + onnxscript_values, (list, tuple) + ): + # ex: aten::split - in onnx_dtype: seq(tensor) + # onnxscript_values is a single tensor, but expected_values is a list of tensors. + return + + flat_onnxscript_values, _ = _pytree.tree_flatten(onnxscript_values) + flat_expected_values, _ = _pytree.tree_flatten(expected_values) + for i, (onnxscript_value, expected_value) in enumerate( + zip(flat_onnxscript_values, flat_expected_values) + ): + if expected_value is None: + # There is no shape/type from None. + # NOTE: according to https://github.com/pytorch/pytorch/blob/main/torch/_meta_registrations.py, + # None could be a valid value for return type, so we need to handle it. + # e.g. the function: meta__scaled_dot_product_flash() in cpu mode. + continue + elif fx_type_utils.is_torch_symbolic_type(expected_value): + # aten::sym_size output is a int, not a tensor, which stands + # for the size of one dim. We treat it as 1-D tensor. + onnxscript_value.dtype = fx_type_utils.from_sym_value_to_torch_dtype( + expected_value + ) + onnxscript_value.shape = torch.Size([1]) + elif isinstance(expected_value, (int, float, bool)): + onnxscript_value.dtype = fx_type_utils.from_scalar_type_to_torch_dtype( + type(expected_value) + ) + onnxscript_value.shape = torch.Size([]) + elif isinstance(expected_value, complex): + # From complex scalar to real representation + onnxscript_value_to_torch_dtype = ( + fx_type_utils.from_scalar_type_to_torch_dtype(type(expected_value)) + ) + onnxscript_value.dtype = ( + fx_type_utils.from_complex_to_float(onnxscript_value_to_torch_dtype) + if onnxscript_value_to_torch_dtype is not None + else None + ) + onnxscript_value.shape = torch.Size([2]) + elif fx_type_utils.is_torch_complex_dtype(expected_value.dtype): + # Like torch.view_as_real, we flatten complex tensors to real tensors with + # additional last dimension of 2 + onnxscript_value.shape = torch.Size((*expected_value.size(), 2)) + # complex64 -> float32, complex128 -> float64, etc. + onnxscript_value.dtype = fx_type_utils.from_complex_to_float( + expected_value.dtype + ) + # Dispatcher needs to know the value is complex + onnxscript_value.is_complex = True + else: + # We set node output sizes to be dynamic to continue the model conversion, + # and inputs are also set to be dynamic in add_input(). + onnxscript_value.shape = expected_value.size() + onnxscript_value.dtype = expected_value.dtype + + # naming + if i > 0: + onnxscript_value.name = f"{name}_{i}" + else: + onnxscript_value.name = name + + +def _fill_in_default_kwargs( + node: torch.fx.Node, +) -> tuple[list[fx_type_utils.Argument], dict[str, fx_type_utils.Argument]]: + """Find and Fill in the not provided kwargs with default values.""" + + # TODO: aten::sym_size has overload, but fx graph is using + # overloadpacket for some reasons. + # https://github.com/pytorch/pytorch/issues/97201 + # We manually assigned overload for aten::sym_size. + if hasattr(node.target, "_schema"): + node_schema = node.target._schema # type: ignore[union-attr] + else: + node_schema = torch.ops.aten.sym_size.int._schema # type: ignore[union-attr] + + # This function assumes the order of arguments in FX op is the + # same as the order of arguments in TorchScript op. + complete_args: list[fx_type_utils.Argument] = [] + complete_kwargs: dict[str, fx_type_utils.Argument] = {} + + if inspect.isbuiltin(node.target): + complete_args = list(node.args) + else: + for i, expected_arg in enumerate(node_schema.arguments): + if i < len(node.args): + complete_args.append(node.args[i]) + elif expected_arg.name in node.kwargs: + complete_kwargs[expected_arg.name] = node.kwargs[expected_arg.name] + else: + # Get default from schema. + complete_kwargs[expected_arg.name] = expected_arg.default_value + + return complete_args, complete_kwargs + + +def _wrap_fx_args_as_onnxscript_args( + complete_args: list[fx_type_utils.Argument], + complete_kwargs: dict[str, fx_type_utils.Argument], + fx_name_to_onnxscript_value: dict[ + str, + onnxscript_graph_building.TorchScriptTensor + | tuple[onnxscript_graph_building.TorchScriptTensor, ...], + ], + tracer: onnxscript_graph_building.TorchScriptTracingEvaluator, +) -> tuple[ + Sequence[ + onnxscript_graph_building.TorchScriptTensor + | str + | int + | float + | bool + | list + | complex + | None + ], + dict[str, fx_type_utils.Argument], +]: + """Map all FX arguments of a node to arguments in TorchScript graph.""" + + onnxscript_args = tuple( + _retrieve_or_adapt_input_to_graph_set(arg, fx_name_to_onnxscript_value, tracer) + for arg in complete_args + ) + onnxscript_kwargs = filter_incompatible_and_dtype_convert_kwargs(complete_kwargs) + + return onnxscript_args, onnxscript_kwargs + + +class FxOnnxInterpreter: + """Stateless class to process FX graph Nodes and translate them into their ONNX counterparts. + + All FX nodes described by [FX Graph](https://pytorch.org/docs/stable/fx.html#torch.fx.Graph) are supported. + Similarly to [FX Interpreter pattern](https://pytorch.org/docs/stable/fx.html#torch.fx.Interpreter), each FX node + must be implemented on its own method in this class. + + Each operator's implementation returns either an `onnxscript.OnnxFunction` or + `onnxscript.TracedOnnxFunction` instance based on the dispatch algorithm. They can + also raise RuntimeError: If there are no overloaded functions available for the given FX node. + + TODO: Convert methods to @staticmethod when the diagnostic system supports it + DO NOT ADD NEW ATTRIBUTES TO THIS CLASS! + """ + + def __init__( + self, + diagnostic_context: diagnostics.DiagnosticContext, + ): + # THIS SHOULD BE THE ONLY STATE IN THIS CLASS (constraint from diagnosticS API) + # TODO: Diagnostics API should be revised to get rid of this attribute. + # DO NOT add other class-level attributes. + self.diagnostic_context = diagnostic_context + + @diagnostics.diagnose_call( + diagnostics.rules.fx_node_to_onnx, + diagnostic_message_formatter=_fx_node_to_onnx_message_formatter, + ) + def run_node( + self, + node, + fx_graph_module: torch.fx.GraphModule, + onnxfunction_dispatcher: onnxfunction_dispatcher.OnnxFunctionDispatcher, + onnxscript_graph: onnxscript_graph_building.TorchScriptGraph, + onnxscript_tracer: onnxscript_graph_building.TorchScriptTracingEvaluator, + fx_name_to_onnxscript_value: dict[ + str, + onnxscript_graph_building.TorchScriptTensor + | tuple[onnxscript_graph_building.TorchScriptTensor, ...], + ], + ): + """Execute a single FX node to produce its ONNX counterpart. + + Args: + node: The FX node to be translated. + fx_graph_module: The FX graph module containing the node. + onnxfunction_dispatcher: The dispatcher to find the best matched ONNX op. + onnxscript_graph: The ONNX graph to be populated. + onnxscript_tracer: The tracer to trace the ONNX graph. + fx_name_to_onnxscript_value: The mapping from FX node name to ONNX Script value. + + Raises: + RuntimeError: When a node.op is not supported. + """ + # Record stack trace of node in diagnostic. + node_stack_trace = node.stack_trace + if node_stack_trace: + diagnostic = self.diagnostic_context.inflight_diagnostic( + rule=diagnostics.rules.fx_node_to_onnx + ) + with diagnostic.log_section(logging.INFO, "PyTorch source information"): + diagnostic.info("```\n%s\n```", node_stack_trace) + location = _location_from_fx_stack_trace(node_stack_trace) + if location is not None: + diagnostic.with_location(location) + + if node.op == "placeholder": + self.placeholder(node, onnxscript_graph, fx_name_to_onnxscript_value) + elif node.op == "get_attr": + self.get_attr( + node, + onnxscript_graph, + fx_name_to_onnxscript_value, + fx_graph_module, + ) + elif node.op == "call_function": + self.call_function( + node, + onnxscript_tracer, + fx_name_to_onnxscript_value, + onnxfunction_dispatcher, + fx_graph_module, + ) + elif node.op == "call_method": + self.call_method(node) + elif node.op == "call_module": + self.call_module( + node, + onnxscript_graph, + fx_name_to_onnxscript_value, + onnxscript_tracer, + fx_graph_module, + onnxfunction_dispatcher, + ) + elif node.op == "output": + self.output(node, onnxscript_graph, fx_name_to_onnxscript_value) + else: + raise RuntimeError(f"Found node type not defined in torch.fx: {node.op}") + + @diagnostics.diagnose_call( + diagnostics.rules.fx_graph_to_onnx, + diagnostic_message_formatter=_fx_graph_to_onnx_message_formatter, + ) + def run( + self, + fx_graph_module: torch.fx.GraphModule, + onnxfunction_dispatcher: onnxfunction_dispatcher.OnnxFunctionDispatcher, + parent_onnxscript_graph: onnxscript_graph_building.TorchScriptGraph + | None = None, + ) -> onnxscript_graph_building.TorchScriptGraph: + """Analyze all FX nodes and trigger their ONNX translation. + + Args: + fx_graph_module: FX graph module to be translated. + onnxfunction_dispatcher: ONNX function dispatcher. + parent_onnxscript_graph: The parent TorchScript graph. Must be provided if + `fx_graph_module` is a submodule. If not provided, + `fx_graph_module` is assumed to be the root module. + """ + diagnostic = self.diagnostic_context.inflight_diagnostic() + with diagnostic.log_section(logging.DEBUG, "FX Graph:"): + diagnostic.debug( + "```\n%s\n```", + diagnostics.LazyString(fx_graph_module.print_readable, False), + ) + + if parent_onnxscript_graph is not None: + # If parent_onnxscript_graph is provided, we assume fx_graph_module is a + # submodule representing a forward call of an nn.Module. + # Compose package and version where the nn.Module is defined as domain name + # for the local function. + + onnx_meta: _pass.GraphModuleOnnxMeta | None = fx_graph_module.meta.get( + "onnx" + ) + if onnx_meta is None: + raise RuntimeError( + f"ONNX meta is not found in submodule {fx_graph_module._get_name()}. " + f"Only submodules produced by `Modularize` pass is supported in ONNX export." + ) + + onnx_domain = onnx_meta.package_info.to_onnx_domain_string() + else: + # Leave as default domain name for the root module. + onnx_domain = None + + onnxscript_graph = onnxscript_graph_building.TorchScriptGraph( + parent_onnxscript_graph, domain_name=onnx_domain + ) + onnxscript_tracer = onnxscript_graph_building.TorchScriptTracingEvaluator( + onnxscript_graph + ) + # In the following loop, a TorchScript graph is created to + # represent the input FX graph with ONNX symbols (e.g., onnx::add). + # To connect the values to nodes in the TorchScript graph, we maintain + # fx_name_to_onnxscript_value. Basically, we want to translate + # fx_tensor_x (type: torch.fx.Node) -> fx_node_1 -> fx_tensor_y (type: torch.fx.Node) + # to + # fx_name_to_onnxscript_value[fx_tensor_x.name] -> onnx_node_1 -> fx_name_to_onnxscript_value[fx_tensor_y.name] + fx_name_to_onnxscript_value: dict[ + str, + onnxscript_graph_building.TorchScriptTensor + | tuple[onnxscript_graph_building.TorchScriptTensor, ...], + ] = {} + + # TODO: Fix FakeTensorMode limitation asap + # We want to pass list of ints and floats to TorchScript graph correctly + # in _export_fx_to_ts, so we must disable FakeTensorMode. Otherwise, graph may + # receive FakeTensor and results runtime error. In addition, TorchScript-based + # ONNX exporter used in _ts_graph_to_onnx_model_in_protobuf is not compatible + # with FakeTensorMode. + with torch.utils._mode_utils.no_dispatch(): + for node in fx_graph_module.graph.nodes: + self.run_node( + node, + fx_graph_module, + onnxfunction_dispatcher, + onnxscript_graph, + onnxscript_tracer, + fx_name_to_onnxscript_value, + ) + + with diagnostic.log_section(logging.DEBUG, "ONNX Graph:"): + diagnostic.debug("```\n%s\n```", onnxscript_graph.torch_graph) # type: ignore[attr-defined] + + return onnxscript_graph + + def placeholder( + self, + node: torch.fx.Node, + onnxscript_graph: onnxscript_graph_building.TorchScriptGraph, + fx_name_to_onnxscript_value: dict[ + str, + onnxscript_graph_building.TorchScriptTensor + | tuple[onnxscript_graph_building.TorchScriptTensor, ...], + ], + ): + # Input of graph. + # The node.meta["val"] is generated by FakeTensorProp. + # NOTE: add_input() intends to create nodes with shape/type + fake_tensor = node.meta.get("val", None) + # NOTE: During the tracing, when inputs are constants, they are represented + # by nodes with node.meta['val'] being None (nn.Module to dynamo_export) + # or nodes with node.meta['val'] being a builtin value (ExportedProgram to dynamo_export). + # Nonethless, the nodes are not consumed by others, so we don't need to + # create a TorchScriptTensor for them. + if fake_tensor is None or isinstance(fake_tensor, (int, float, bool, str)): + output = onnxscript_graph.add_input( + input_name=None, + ) + elif isinstance(fake_tensor, torch.Tensor): + # NOTE: ONNX doesn't support tensor of complex64/complex128, so we + # convert them to float32/float64 with real representation. + if fx_type_utils.is_torch_complex_dtype(fake_tensor.dtype): + fake_tensor = torch.view_as_real(fake_tensor.resolve_conj()) + output = onnxscript_graph.add_input( + input_name=node.name, + shape=fake_tensor.shape, + dtype=fake_tensor.dtype, + ) + + elif fx_type_utils.is_torch_symbolic_type(fake_tensor): + output = onnxscript_graph.add_input( + input_name=node.name, + shape=torch.Size([]), + dtype=fx_type_utils.from_sym_value_to_torch_dtype(fake_tensor), + ) + else: + raise RuntimeError( + f"Unsupported type(node.meta['val']) for placeholder: {type(fake_tensor)}" + ) + assert ( + output is not None + ), f"Node creates None with target={node.target} and name={node.name}" + + assert isinstance(output, onnxscript_graph_building.TorchScriptTensor) + assert isinstance(output, onnxscript.tensor.Tensor) + + fx_name_to_onnxscript_value[node.name] = output + + def call_function( + self, + node: torch.fx.Node, + onnxscript_tracer: onnxscript_graph_building.TorchScriptTracingEvaluator, + fx_name_to_onnxscript_value: dict[ + str, + onnxscript_graph_building.TorchScriptTensor + | tuple[onnxscript_graph_building.TorchScriptTensor, ...], + ], + onnxfunction_dispatcher: onnxfunction_dispatcher.OnnxFunctionDispatcher, + fx_graph_module: torch.fx.GraphModule, + ): + # aten ops and other stateless functions. + if node.target == operator.getitem and isinstance( + fx_name_to_onnxscript_value[node.args[0].name], # type: ignore[union-attr,index] + tuple, + ): + onnx_tensor_tuple = fx_name_to_onnxscript_value[node.args[0].name] # type: ignore[union-attr,index] + index = node.args[1] + value = onnx_tensor_tuple[index] # type: ignore[index] + assert ( + value is not None + ), f"Node creates None with target={node.target} and name={node.name}" + assert isinstance( + value, (onnxscript_graph_building.TorchScriptTensor, tuple) + ), type(value) + + fx_name_to_onnxscript_value[node.name] = value + return + + # Map FX inputs to ONNX inputs and fill optional inputs with default values. + # torch_args and torch_kwargs are for op-level validation + fx_args, fx_kwargs = _fill_in_default_kwargs(node) + + onnx_args, onnx_kwargs = _wrap_fx_args_as_onnxscript_args( + fx_args, + fx_kwargs, + fx_name_to_onnxscript_value, + onnxscript_tracer, + ) + # Dispatch to ONNX op through OpShema. The input argument dtypes are compared to + # function signature in OpSchema, and find the best matched overload. + symbolic_fn = onnxfunction_dispatcher.dispatch( + node=node, + onnx_args=onnx_args, # type: ignore[arg-type] + onnx_kwargs=onnx_kwargs, + diagnostic_context=self.diagnostic_context, + ) + with onnxscript.evaluator.default_as(onnxscript_tracer): + output: ( + onnxscript_graph_building.TorchScriptTensor + | tuple[onnxscript_graph_building.TorchScriptTensor, ...] + ) = symbolic_fn(*onnx_args, **onnx_kwargs) + assert ( + output is not None + ), f"Node creates None with target={node.target}, name={node.name}, args={onnx_args}, kwargs={onnx_kwargs}" + # Assign type and shape from fx graph. + _fill_tensor_shape_type(output, node.name, node.meta["val"]) + # One fx node could produce multiple outputs (e.g., tuple of tensors); in + # that case, v is a tuple of TorchScriptTensors. + assert isinstance( + output, (onnxscript_graph_building.TorchScriptTensor, tuple) + ), type(output) + fx_name_to_onnxscript_value[node.name] = output + + def output( + self, + node: torch.fx.Node, + onnxscript_graph: onnxscript_graph_building.TorchScriptGraph, + fx_name_to_onnxscript_value: dict[ + str, + onnxscript_graph_building.TorchScriptTensor + | tuple[onnxscript_graph_building.TorchScriptTensor, ...], + ], + ): + if isinstance(node.args[0], torch.fx.Node): + onnx_tensor_or_tensor_tuple = fx_name_to_onnxscript_value[node.args[0].name] + onnxscript_graph.register_outputs(onnx_tensor_or_tensor_tuple) + else: + # ONNX can't represent collection types (e.g., dictionary, tuple of tuple of + # tensor, etc), we flatten the collection and register each element as output. + flat_args, _ = _pytree.tree_flatten(node.args[0]) + for arg in flat_args: + assert isinstance( + arg, torch.fx.Node + ), f"arg must be a torch.fx.Node, not {type(arg)}" + onnx_tensor_or_tensor_tuple = fx_name_to_onnxscript_value[arg.name] + onnxscript_graph.register_outputs(onnx_tensor_or_tensor_tuple) + + def call_method(self, node: torch.fx.Node): + # TODO(wechi): Support call_method. + raise RuntimeError("call_method is not supported yet.") + + def call_module( + self, + node: torch.fx.Node, + parent_onnxscript_graph: onnxscript_graph_building.TorchScriptGraph, + fx_name_to_onnxscript_value: dict[ + str, + onnxscript_graph_building.TorchScriptTensor + | tuple[onnxscript_graph_building.TorchScriptTensor, ...], + ], + tracer: onnxscript_graph_building.TorchScriptTracingEvaluator, + root_fx_graph_module: torch.fx.GraphModule, + onnxfunction_dispatcher: onnxfunction_dispatcher.OnnxFunctionDispatcher, + ) -> None: + """Export a fx.GraphModule submodule to ONNXScript graph. + + The export process specifically targets `call_module` nodes that are created by + the exporter's `Modularize` pass. Each `call_module` node has an associated fx.GraphModule + by `node.target` underneath the root fx.GraphModule. These `call_module` nodes are exported as ONNX + function nodes. The related `sub_module` is then exported as an ONNX model local function, + which is represented by another `TorchScriptGraph`. This `TorchScriptGraph` sets the current + `onnxscript_graph` as its parent. + + Args: + node: The call_module node in the FX graph that represents the submodule call. + parent_onnxscript_graph: The parent ONNXScript graph to which the ONNX function and + function node belong. + fx_name_to_onnxscript_value: The mapping from FX node name to ONNXScript value. + tracer: The tracer used to trace the ONNXScript graph. + root_fx_graph_module: The root FX module. + onnxfunction_dispatcher: The dispatcher. + """ + assert isinstance( + node.target, str + ), f"node.target must be a str, not {type(node.target)} for node {node}." + + sub_module = root_fx_graph_module.get_submodule(node.target) + + assert isinstance( + sub_module, torch.fx.GraphModule + ), f"sub_module must be a torch.fx.GraphModule, not {type(sub_module)} for node {node}." + + sub_onnxscript_graph = self.run( + sub_module, onnxfunction_dispatcher, parent_onnxscript_graph + ) + + onnx_args, _ = _wrap_fx_args_as_onnxscript_args( + list(node.args), {}, fx_name_to_onnxscript_value, tracer + ) + + # TODO: We may want to consider other naming styles. The goal is to be stable and + # unique such that it can be easily identified in case of kernel substitution. + # Example for current style is combination of qualified module class name and + # module attribute name: `torch_nn_modules_conv_Conv2d_conv1`. + # Other naming styles such as qualified module class name made unique can also + # be considered. + unique_module_name = f"{sub_module._get_name()}_{node.target}" + + outputs: ( + onnxscript_graph_building.TorchScriptTensor + | tuple[onnxscript_graph_building.TorchScriptTensor, ...] + ) = parent_onnxscript_graph.add_module_call( # type: ignore[assignment] + unique_module_name, sub_onnxscript_graph, onnx_args + ) + + assert isinstance( + outputs, (onnxscript_graph_building.TorchScriptTensor, tuple) + ), f"Unexpected outputs type {type(outputs)} for node {node}." + + _fill_tensor_shape_type(outputs, node.name, node.meta["val"]) + fx_name_to_onnxscript_value[node.name] = outputs + + # Skip op_level_validation for call_module. Subgraph nodes are validated individually. + + def get_attr( + self, + node: torch.fx.Node, + onnxscript_graph: onnxscript_graph_building.TorchScriptGraph, + fx_name_to_onnxscript_value: dict[ + str, + onnxscript_graph_building.TorchScriptTensor + | tuple[onnxscript_graph_building.TorchScriptTensor, ...], + ], + fx_graph_module: torch.fx.GraphModule, + ): + assert isinstance(node.target, str), f"node.target {node.target} is not a str." + attr_tensor = getattr(fx_graph_module, node.target) + assert isinstance(attr_tensor, torch.Tensor), f"{attr_tensor} is not a tensor." + + # Parameter/buffer name cannot contain "." + # Revert from "/" to restore namespace formatting. + input_ = onnxscript_graph.add_initializer( + name=node.target.replace("/", "."), + value=attr_tensor, + ) + + assert isinstance(input_, onnxscript_graph_building.TorchScriptTensor) + assert isinstance(input_, onnxscript.tensor.Tensor) + fx_name_to_onnxscript_value[node.name] = input_ diff --git a/janus/lib/python3.10/site-packages/torch/onnx/_internal/fx/fx_symbolic_graph_extractor.py b/janus/lib/python3.10/site-packages/torch/onnx/_internal/fx/fx_symbolic_graph_extractor.py new file mode 100644 index 0000000000000000000000000000000000000000..140fceb64d69de4b2b651bced7e481db2c66a12d --- /dev/null +++ b/janus/lib/python3.10/site-packages/torch/onnx/_internal/fx/fx_symbolic_graph_extractor.py @@ -0,0 +1,247 @@ +# mypy: allow-untyped-defs +from __future__ import annotations + +import functools +from typing import Any, Callable, Mapping, Sequence + +import torch +import torch.fx +import torch.onnx +import torch.onnx._internal.fx.passes as passes +from torch.onnx._internal import _exporter_legacy, io_adapter + + +# Functions directly wrapped to produce torch.fx.Proxy so that symbolic +# data can flow through those functions. Python functions (e.g., `torch.arange`) +# not defined by pybind11 in C++ do not go though Python dispatcher, so +# they are not automatically patched by FX's Python dispatcher. +# The list below means `torch.arange`, `torch.tensor`, and so on will be +# patched. +_TORCH_METHODS_TO_PATCH: tuple[str, ...] = ( + "arange", + "tensor", + "finfo", + "full", + "empty", +) + + +class ModuleExpansionTracer(torch.fx._symbolic_trace.Tracer): + """Tracer to create ONNX-exporting friendly FX graph. + + This tracer traces models into operators. That is, + the traced graph mostly contains call_function nodes and + has no call_module nodes. The call_module nodes + are problematic to the use of make_fx(...) in ONNX + exporter. + """ + + def is_leaf_module( + self, module: torch.nn.Module, module_qualified_name: str + ) -> bool: + # This returns False so that all sub-modules are considered as not leaves + # and therefore expanded into operators in + # torch.fx._symbolic_trace.Tracer.call_module. + return False + + def to_bool(self, obj: torch.fx.Proxy) -> bool: + # FIXME: This is a hack to tracing through if-else Python blocks. + # It may generate incorrect ONNX graphs if the if-else block + return False + + +def _wrap_for_symbolic_trace(target: Callable) -> tuple[Callable, Callable]: + """This function wraps ```target`` for symbolic tracing. + + This function wraps ```target``` so that its wrapper produces + torch.fx.Proxy in symbolic computation. The returned values are + the wrapper and then the original function. Per `_TORCH_METHODS_TO_PATCH`, + this function shall receive `torch.arange`, `torch.tensor`, etc. as inputs. + """ + + @functools.wraps(target) + def wrapper(*args, **kwargs): + proxy = None + + def check_has_proxy(v): + if isinstance(v, torch.fx.Proxy): + nonlocal proxy + proxy = v + + torch.fx.node.map_aggregate(args, check_has_proxy) + torch.fx.node.map_aggregate(kwargs, check_has_proxy) + + if proxy is not None: + return proxy.tracer.create_proxy("call_function", target, args, kwargs) + else: + return target(*args, **kwargs) + + return wrapper, target + + +def _module_expansion_symbolic_trace( + root: torch.nn.Module | Callable[..., Any], + concrete_args: dict[str, Any] | None = None, +) -> torch.fx.GraphModule: + """Trace a callable into FX graph. + + When "root" is torch.nn.Module, calls to its submodule (type: torch.nn.Module) will be + expanded into operators (e.g., torch.matmul, torch.add, +, and -) to simplify graph + structure. + """ + # For functions doesn't support symbolic tracing, create wrappers + # which produce symbolic results during tracing. + patched_torch_methods = { + target_name: _wrap_for_symbolic_trace(getattr(torch, target_name)) + for target_name in _TORCH_METHODS_TO_PATCH + } + + # Set the symbolic-tracing friendly functions so that `tracer.trace` below + # can work. + for name, (wrapper, _) in patched_torch_methods.items(): + setattr(torch, name, wrapper) + + try: + # Set up a tracer. + tracer = ModuleExpansionTracer() + # Trace the model. + graph = tracer.trace(root, concrete_args) + name = ( + root.__class__.__name__ + if isinstance(root, torch.nn.Module) + else root.__name__ + ) + return torch.fx.GraphModule(tracer.root, graph, name) + finally: + # Revert the patches for symbolic tracing. + for name, (_, wrapped) in patched_torch_methods.items(): + # wrapped is the original version of `torch.name`. + setattr(torch, name, wrapped) + + +# TODO: Migrate to `DynamoExporter` after fake model tracing is supported. +# Proposal at https://github.com/pytorch/pytorch/issues/95900. +class FXSymbolicTracer(_exporter_legacy.FXGraphExtractor): + """Generates a FX GraphModule using torch.fx.symbolic_trace API + Args: + concrete_args: Inputs to be partially specialized + It can be used to remove control flow or data structures. + For example:: + def f(a, b): + if b == True: + return a + else: + return a*2 + FX can typically not trace through this due to the presence of control + flow. However, we can use `concrete_args` to specialize on the value of + `b` to trace through this:: + f = fx.symbolic_trace(f, concrete_args={'b': False}) + assert f(3, False) == 6 + Note that although you can still pass in different values of `b`, they will be ignored. + It can also be used to eliminate data-structure handling from + our function. This will use pytrees to flatten your input. To avoid + overspecializing, pass in `fx.PH` for values that shouldn't be + specialized. For example:: + def f(x): + out = 0 + for v in x.values(): + out += v + return out + + + f = fx.symbolic_trace(f, concrete_args={"x": {"a": fx.PH, "b": fx.PH, "c": fx.PH}}) + assert f({"a": 1, "b": 2, "c": 4}) == 7 + """ + + def __init__(self, concrete_args: dict[str, Any] | None = None): + super().__init__() + # TODO: plumb ``concrete_args`` to symbolic_trace call at ``generate_fx`` + self.concrete_args = concrete_args + + def _trace_into_fx_graph_via_fx_symbolic_trace( + self, model, model_args, model_kwargs + ) -> torch.fx.GraphModule: + # Bind model args and kwargs with model signature to retrieve default values + # of unprovided arguments. These are then used to construct ``concrete_args``. + bind_input_step = io_adapter.BindInputStep( + torch.onnx.utils.model_signature(model) + ) + self.input_adapter.append_step(bind_input_step) + _, named_args = bind_input_step.apply(model_args, model_kwargs, model=model) + + # Create inputs to call symbolic trace (torch.fx.symbolic_trace) + # Example content of concrete_args: + # concrete_args["x"] = torch.fx._symbolic_trace.PH + # concrete_args["b"] = 1 + # where "x" and "b" are argument names in "signature". + concrete_args = {} + for param_name, param_value in named_args.items(): + if isinstance(param_value, torch.Tensor): + # param_value can be, e.g., a real tensor or a fake tensor. + # param_value is treated as substitutable tensor symbol (aka placeholder). + concrete_args[param_name] = torch.fx._symbolic_trace.PH + else: + concrete_args[param_name] = param_value + + # Merge kwargs back into args since that is the format FX graph expects. + merge_kwargs_step = io_adapter.MergeKwargsIntoArgsInputStep() + self.input_adapter.append_step(merge_kwargs_step) + return _module_expansion_symbolic_trace(model, concrete_args=concrete_args) + + def generate_fx( + self, + options: _exporter_legacy.ResolvedExportOptions, + model: torch.nn.Module | Callable, + model_args: Sequence[Any], + model_kwargs: Mapping[str, Any], + ) -> torch.fx.GraphModule: + diagnostic_context = options.diagnostic_context + graph_module = self._trace_into_fx_graph_via_fx_symbolic_trace( + model, model_args, model_kwargs + ) + + # Make sure all placeholder nodes are executed before get_attr nodes. + # Otherwise, inputs can interleave with initializers in the final ModeoProto.graph.input. + # Basically, we want + # ModeoProto.graph.input = + # [input_0, input_1, ..., input_n, weight_0, weight_1, ..., weight_m] + # and we don't want + # ModeoProto.graph.input = + # [input_0, weight_0, input_1, weight_1, ..., input_n, weight_0, weight_1, ..., weight_m] + graph_module = passes.MovePlaceholderToFront( + diagnostic_context, graph_module + ).run() + # To save memory, move get_attr to input so that the generated model doesn't + # have weigh tensors. "replaced_attrs" are a tuple of replaced weight tensors. + replace_get_attr_with_placeholder_pass = passes.ReplaceGetAttrWithPlaceholder( + diagnostic_context, graph_module + ) + graph_module = replace_get_attr_with_placeholder_pass.run() + replaced_attrs = replace_get_attr_with_placeholder_pass.replaced_attrs + append_extra_input_step = io_adapter.LiftParametersAndBuffersIntoArgsInputStep( + replaced_attrs + ) + self.input_adapter.append_step(append_extra_input_step) + # Move all newly created placeholder nodes to the front of the graph. + graph_module = passes.MovePlaceholderToFront( + diagnostic_context, graph_module + ).run() + # Finalize the graph editing. + graph_module.recompile() + + updated_model_args = self.input_adapter.apply( + *model_args, model=model, **model_kwargs + ) + + return self.pre_export_passes(options, model, graph_module, updated_model_args) # type: ignore[return-value] + + def pre_export_passes( + self, + options: _exporter_legacy.ResolvedExportOptions, + original_model: torch.nn.Module | Callable, + fx_module: torch.fx.GraphModule, + fx_module_args: Sequence[Any], + ): + return _exporter_legacy.common_pre_export_passes( + options, original_model, fx_module, fx_module_args + ) diff --git a/janus/lib/python3.10/site-packages/torch/onnx/_internal/fx/onnxfunction_dispatcher.py b/janus/lib/python3.10/site-packages/torch/onnx/_internal/fx/onnxfunction_dispatcher.py new file mode 100644 index 0000000000000000000000000000000000000000..964afcee9ae4657fcba9a90073fae41be94a443f --- /dev/null +++ b/janus/lib/python3.10/site-packages/torch/onnx/_internal/fx/onnxfunction_dispatcher.py @@ -0,0 +1,874 @@ +# mypy: allow-untyped-defs +"""Dispatcher for AtenLib functions from onnx-script.""" + +from __future__ import annotations + +import logging +import operator +import types +from typing import Any, Callable, Sequence, TYPE_CHECKING + +import torch +import torch._ops +import torch.fx +from torch.onnx._internal.fx import ( + diagnostics, + registration, + type_utils as fx_type_utils, +) + + +if TYPE_CHECKING: + import onnxscript # type: ignore[import] + from onnxscript.function_libs.torch_lib import ( # type: ignore[import] + graph_building as onnxscript_graph_building, + ) + + from torch.onnx import OnnxRegistry + + +def _find_opschema_matched_symbolic_function_disagnostic_message_formatter( + fn: Callable, + self, + node: torch.fx.Node, + default_and_custom_functions: list[registration.ONNXFunction], + *args, + **kwargs, +) -> str: + """Format the diagnostic message for the nearest match warning.""" + all_function_overload_names = "" + for symbolic_func in default_and_custom_functions: + overload_func = symbolic_func.onnx_function + all_function_overload_names += f"ONNX Node: {overload_func.name}[opset={overload_func.opset};is_custom={symbolic_func.is_custom}]. \n" # noqa: B950 + return f"FX Node: {node.target}. \n" f"{all_function_overload_names}" + + +def _find_operator_overloads_in_onnx_registry_disagnostic_message_formatter( + fn: Callable, + self, + node: torch.fx.Node, + *args, + **kwargs, +) -> str: + """Format the diagnostic message for the nearest match warning.""" + return f"Searching operator overload: '{node.target}' in onnx registry...\n" + + +class OnnxFunctionDispatcher: + """A dispatcher that finds the best ONNX Function for ATen/Custom operators. + + It uses the `torch.ops` name to find the function. If not found, it falls back to default. + Otherwise, the best match is found among all function overloads. An exact match has + higher precedence over the closest ones. + + Below is a breakdown on how the dispatch mechanism works: + + 1. Use the torch.ops name to find the function: + a. Check if the ATen overload exists in the registry. + b. If not, check if the default overload exists in the registry. + + 2. Find the nearest match among all overloaded functions: + a. If the types match perfectly, select the function. + b. Otherwise, find the nearest one with the highest matching score. Because of + the potential wrongly annotated dtypes and attributes matching, we use + nearest match to find the best function once the aten name is targeted. + + 3. Tie-breaker: If there are multiple nearest matches, we will select the one with + the highest matching score. + + NOTE: The nearest match `doesn't guarantee` a correct match, and a warning message is logged. + """ + + def __init__( + self, + onnx_registry: OnnxRegistry, + diagnostic_context: diagnostics.DiagnosticContext, + ): + """Initialize the ONNX Function dispatcher. + + Args: + onnx_registry: The ONNX registry. + diagnostic_context: The diagnostic context to use for reporting errors. + """ + self.onnx_registry = onnx_registry + self.diagnostic_context = diagnostic_context + + def dispatch( + self, + node: torch.fx.Node, + onnx_args: Sequence[ + fx_type_utils.TensorLike | str | int | float | bool | list | complex | None + ], + onnx_kwargs: dict[str, fx_type_utils.Argument], + diagnostic_context: diagnostics.DiagnosticContext, + ) -> onnxscript.OnnxFunction | onnxscript.TracedOnnxFunction: + """Dispatches an ONNX function based on the given FX node, arguments, and keyword arguments. + Args: + node: The TorchFX node to dispatch the function for. + onnx_args: The arguments of the ONNX function. + onnx_kwargs: The keyword arguments of the ONNX function. + diagnostic_context: The diagnostic context to use for reporting errors. + Returns: + Either an `onnxscript.OnnxFunction` or `onnxscript.TracedOnnxFunction` instance based on the dispatch algorithm. + Raises: + RuntimeError: If there are no overloaded functions available for the given FX node. + """ + # If there are no overloaded functions available for the given FX node, raise an + # unsupported error + default_and_custom_functions = self.get_function_overloads( + node, diagnostic_context + ) + + # If there are overloaded functions available, we will find one that perfect or + # nearest matches the given arguments and keyword arguments + return self._find_the_perfect_or_nearest_match_onnxfunction( + node, + default_and_custom_functions, + onnx_args, + onnx_kwargs, + diagnostic_context, + ) + + def _filter_or_keep_complex( + self, + node, + default_and_custom_functions: list[registration.ONNXFunction], + diagnostic_context: diagnostics.DiagnosticContext, + ) -> list[registration.ONNXFunction]: + """Filter the complex functions if the input has complex dtype.""" + + args_with_complex_dtype = [_is_arg_with_complex_dtype(arg) for arg in node.args] + if any(args_with_complex_dtype): + default_and_custom_functions = [ + func for func in default_and_custom_functions if func.is_complex + ] + # If we can't find the complex function group, raise error. + if not default_and_custom_functions: + op_full_name = self._get_aten_name( + node, diagnostic_context + ).qualified_name() + diagnostic = diagnostics.UnsupportedFxNodeDiagnostic( + diagnostics.rules.no_symbolic_function_for_call_function, + diagnostics.levels.ERROR, + f"Cannot find any COMPLEX symbolic function for {op_full_name}, " + f"which should be registered under {node.target}.", + unsupported_fx_node=node, + ) + diagnostic_context.log(diagnostic) + raise diagnostics.RuntimeErrorWithDiagnostic(diagnostic) + else: + default_and_custom_functions = [ + func for func in default_and_custom_functions if not func.is_complex + ] + # If we can't find the complex function group, raise error. + if not default_and_custom_functions: + op_full_name = self._get_aten_name( + node, diagnostic_context + ).qualified_name() + diagnostic = diagnostics.UnsupportedFxNodeDiagnostic( + diagnostics.rules.no_symbolic_function_for_call_function, + diagnostics.levels.ERROR, + f"Can ONLY find COMPLEX symbolic function for {op_full_name}, " + f"which should be registered under {node.target}.", + unsupported_fx_node=node, + ) + diagnostic_context.log(diagnostic) + raise diagnostics.RuntimeErrorWithDiagnostic(diagnostic) + return default_and_custom_functions + + @diagnostics.diagnose_call( + diagnostics.rules.find_opschema_matched_symbolic_function, + diagnostic_message_formatter=_find_opschema_matched_symbolic_function_disagnostic_message_formatter, + ) + def _find_the_perfect_or_nearest_match_onnxfunction( + self, + node: torch.fx.Node, # this is used in diagnostic_message_formatter + default_and_custom_functions: list[registration.ONNXFunction], + onnx_args: Sequence[ + fx_type_utils.TensorLike | str | int | float | bool | list | complex | None + ], + onnx_kwargs: dict[str, fx_type_utils.Argument], + diagnostic_context: diagnostics.DiagnosticContext, + ): + """Find the perfect/nearest matched OnnxFunction for the given FX node, arguments, and keyword arguments. + + Args: + default_and_custom_functions: The list includes overloaded functions, with + custom ones appearing after the default ones. + onnx_args: Arguments organized in PyTorch inputs way. + onnx_kwargs: Keyword arguments organized in PyTorch inputs way. + diagnostic_context: The diagnostic context to use for reporting errors. + + Returns: + Either an `onnxscript.OnnxFunction` or `onnxscript.TracedOnnxFunction` instance based on the dispatch algorithm. + Raises: + RuntimeError: If there are no overloaded functions available for the given FX node. + """ + overload_match_ranking: dict[registration.ONNXFunction, int | None] = {} + diagnostic = diagnostic_context.inflight_diagnostic() + + # Iterate the overloaded functions in reverse order to prioritize the custom ones + # over the default ones, and find the perfect match. + for symbolic_function in reversed(default_and_custom_functions): + function_opschema = _OnnxSchemaChecker(symbolic_function.onnx_function) + + # NOTE: 1. If the perfect match is found, return the function + if function_opschema.perfect_match_inputs( + diagnostic, onnx_args, onnx_kwargs + ): + return symbolic_function.onnx_function + # Record the match score for the nearest match if it's not the perfect match + overload_match_ranking[symbolic_function] = function_opschema.match_score + + # NOTE: 2. If there is no perfect match, find the nearest match among the nearest matche candidates + # If there is no nearest match, raise an error + overload_match_ranking = { + k: v for k, v in overload_match_ranking.items() if v is not None + } + if not overload_match_ranking: + # If there are no overloaded functions available for the given FX node, raise an + # unsupported error + op_full_name = self._get_aten_name( + node, diagnostic_context + ).qualified_name() + diagnostic = diagnostics.UnsupportedFxNodeDiagnostic( + diagnostics.rules.no_symbolic_function_for_call_function, + diagnostics.levels.ERROR, + f"Cannot find any perfect/nearest match of symbolic function for {op_full_name}," + f"which should be registered under {node.target}.", + unsupported_fx_node=node, + ) + diagnostic_context.log(diagnostic) + raise diagnostics.RuntimeErrorWithDiagnostic(diagnostic) + + diagnostic.warning( + "### Exact match is not found!\n" + "Cannot find a perfect match of symbolic overload, " + "a nearest match is found. Please check the ONNX output carefully. \n", + ) + diagnostic.level = diagnostics.levels.WARNING + # NOTE: 3. Tie breaker: if there are multiple nearest matches, we will choose the one + # that is custom first. If there are multiple custom ones, we will choose the one + # that is added lastly in the list. + symbolic_function_list: list[registration.ONNXFunction] = sorted( + overload_match_ranking, + key=lambda k: ( + overload_match_ranking[k], + k.is_custom, + default_and_custom_functions.index(k), + ), + reverse=True, + ) + return symbolic_function_list[0].onnx_function + + def _get_aten_name( + self, node: torch.fx.Node, diagnostic_context: diagnostics.DiagnosticContext + ) -> registration.OpName: + """Get the OpName from the target. + + Args: + node: The TorchFX node to get the aten name for. + diagnostic_context: The diagnostic context to use for reporting errors. + + Returns: + The internal op name within dataclass: registration.OpName. + """ + if node.target == operator.getitem: + return registration.OpName.from_name_parts( + namespace="aten", op_name="getitem" + ) + if isinstance(node.target, torch._ops.OpOverloadPacket): + # aten::sym_size is the only OverloadPacket that we support. + # schema: aten::sym_size(Tensor self, int dim) -> Tensor + if node.target != torch.ops.aten.sym_size: + diagnostic = diagnostics.UnsupportedFxNodeDiagnostic( + diagnostics.rules.no_symbolic_function_for_call_function, + diagnostics.levels.ERROR, + f"Unsupported OverloadPacket: {node.target}, aten.sym_size is the only allowed OverloadPacket!", + unsupported_fx_node=node, + ) + diagnostic_context.log(diagnostic) + raise diagnostics.RuntimeErrorWithDiagnostic(diagnostic) + # TODO(titaiwang): aten::sym_size has overload, but fx graph is using + # overloadpacket for some reasons. + # https://github.com/pytorch/pytorch/issues/97201 + aten_op_default = node.target.default + return registration.OpName.from_op_overload(op_overload=aten_op_default) # type: ignore[no-any-return] + + if isinstance(node.target, types.BuiltinFunctionType): + # Make sure it's symint/symfloat consuming builtin ops. + for node_arg in node.args: + if (not isinstance(node_arg, (torch.fx.Node, int, float))) or ( + isinstance(node_arg, torch.fx.Node) + and not fx_type_utils.is_torch_symbolic_type(node_arg.meta["val"]) + ): + diagnostic = diagnostics.UnsupportedFxNodeDiagnostic( + diagnostics.rules.no_symbolic_function_for_call_function, + diagnostics.levels.ERROR, + f"Unsupported node arg: {node_arg} (type {type(node_arg)}) with builtin function: {node.target}," + " only int/float/SymInt/SymFloat is supported with built-in ops!", + unsupported_fx_node=node, + ) + diagnostic_context.log(diagnostic) + raise diagnostics.RuntimeErrorWithDiagnostic(diagnostic) + return registration.OpName.from_builtin_function(node.target) + + if isinstance(node.target, torch._ops.OpOverload): + return registration.OpName.from_op_overload(op_overload=node.target) + + # Unexpected target, raise error. + diagnostic = diagnostics.UnsupportedFxNodeDiagnostic( + diagnostics.rules.no_symbolic_function_for_call_function, + diagnostics.levels.ERROR, + f"Unknown call_function target: {node.target}", + unsupported_fx_node=node, + ) + diagnostic_context.log(diagnostic) + raise diagnostics.RuntimeErrorWithDiagnostic(diagnostic) + + @diagnostics.diagnose_call( + diagnostics.rules.find_operator_overloads_in_onnx_registry, + diagnostic_message_formatter=_find_operator_overloads_in_onnx_registry_disagnostic_message_formatter, + ) + def get_function_overloads( + self, + node: torch.fx.Node, + diagnostic_context: diagnostics.DiagnosticContext, + ) -> list[registration.ONNXFunction]: + """Get the function overloads from the registry. + + Args: + node: The node to get the function overloads for. + diagnostic_context: The diagnostic context to use for reporting errors. + + Returns: + The list contains ONNXFunctions, starting with the default ones and + followed by any custom ones. + """ + + internal_opname: registration.OpName = self._get_aten_name( + node=node, diagnostic_context=diagnostic_context + ) + + # If the ATen/Custom operators are not registered, the group will be None. + # And non-registered ATen/Custom operators will trigger error in the next step. + function_group: list[registration.ONNXFunction] | None = None + + function_group = self.onnx_registry.get_op_functions( + namespace=internal_opname.namespace, + op_name=internal_opname.op_name, + overload=internal_opname.overload, + ) + + # NOTE: Fall back to default overload if the ONNX registry doesn't have the overload. + if function_group is None: + function_group = self.onnx_registry.get_op_functions( + namespace=internal_opname.namespace, + op_name=internal_opname.op_name, + overload=None, + ) + if function_group is not None: + op_full_name = internal_opname.qualified_name() + diagnostic = diagnostic_context.inflight_diagnostic() + diagnostic.warning( + "### The operator overload is not found in onnx registry!\n" + "Cannot find the operator overload in onnx registry, but " + "the default overload is found. Please check the ONNX output carefully. \n", + ) + diagnostic.level = diagnostics.levels.WARNING + + if function_group is not None: + # NOTE: If the input has complex dtype, we will only dispatch to the complex functions. + function_group = self._filter_or_keep_complex( + node, function_group, diagnostic_context + ) + return function_group # type: ignore[return-value] + + op_full_name = internal_opname.qualified_name() + diagnostic = diagnostics.UnsupportedFxNodeDiagnostic( + diagnostics.rules.no_symbolic_function_for_call_function, + diagnostics.levels.ERROR, + f"Cannot find symbolic function for {op_full_name}, " + f"which should be registered under {node.target}.", + unsupported_fx_node=node, + ) + diagnostic_context.log(diagnostic) + raise diagnostics.RuntimeErrorWithDiagnostic(diagnostic) + + +class _OnnxSchemaChecker: + """ + The OnnxSchemaChecker class is a checker for ONNX OpSchema and param schema. + + It provides methods to check for input compatibility based on the OpSchema. It also + provides a matching score to indicate how well the OpSchema matches the input and + kwargs types. A function will be evaluated as perfect match, nearest match eligible, + or no match. + + Here are some common examples in categories: + + 1. [NOTE: Perfect match]: The number of inputs and attributes are exactly the same as + the OpSchema. The types of inputs and attributes are exactly the same as the + OpSchema. + + ```python + inputs = (Tensor[2, 3], Tensor[2, 3]) + attributes = {"alpha": 1.0} + + + @torch_op("aten::op") + def aten_op(self: TReal, other: TReal, alpha: float = 1) -> TReal: ... + ``` + Result: Perfect match. + + 2. [NOTE: Optional input]: The dispatcher recognizes optional inputs. However, + the input can't be ignored. None must be provided. + + ```python + inputs = (Tensor([2, 3]), None) + attributes = {} + + aten_op(X: TTensor, Y: Optional[INT64]): + ... + ``` + Result: Perfect match. + Real example: `aten::convolution`. + + 3. [NOTE: Different attributes]: If an attribute is provided with value, it's + a must to match the attribute in function signature. + ```python + inputs = (Tensor([2, 3]),) + attributes = {"a":1, "b":2} + + aten_op(X: TTensor, a: int): + ... + ``` + Result: No match. + Real example: `aten::div` vs `aten::div.Tensor_mode`. + + 4. [NOTE: Default attributes]: Default attribute will fill in the value into + inputs/attributes. + ```python + inputs = (Tensor([2, 3]),) + attributes = {} + + aten_op(X: TTensor, a: int = 3): + ... + ``` + Result: Perfect match. + Real example: `aten::clone` + + 5. [NOTE: Ignore attribute with None value]: The attributes with None value + will be ignored in matching. + ```python + inputs = (Tensor([2, 3]),) + attributes = {"a": None} + + aten_op(X: TTensor): + ... + ``` + Result: Perfect match. + + ```python + inputs = (Tensor([2, 3]),) + attributes = {"a": None} + + aten_op(X: TTensor, a: int = 3): + ... + ``` + Result: Nearest match eligible. + + Real example: `aten::div` vs `aten::div.Tensor_mode`. + + Attributes: + onnxfunction: The OnnxFunction. + param_schema: The parameter schema defined in the OnnxFunction. + op_schema: The ONNX OpSchema. + type_constraints: The type constraints defined in the OpSchema. + attributes: The attributes defined in the OpSchema. + _matching_score: The matching score of the OnnxSchemaChecker . + + """ + + def __init__( + self, + onnxfunction: onnxscript.OnnxFunction | onnxscript.TracedOnnxFunction, + ): + """Initialize the OnnxSchemaChecker . + + Args: + onnxfunction: The OnnxFunction. + """ + self.onnxfunction = onnxfunction + self.param_schema = self.onnxfunction.param_schemas() + op_schema = self.onnxfunction.op_schema + # Both `OnnxFunction` and `TracedOnnxFunction` never return None for `op_schema`. + # However their base class would. Hence return type is annotated as Optional[OpSchema]. + assert op_schema is not None + self.op_schema = op_schema + self.type_constraints = { + # "T": {"tensor(int64)"} + constraint.type_param_str: set(constraint.allowed_type_strs) + for constraint in self.op_schema.type_constraints + } + self.attributes = self.op_schema.attributes + self._matching_score: int | None = None + + @property + def match_score(self) -> int | None: + """The matching score of the OnnxSchemaChecker . + + If this remains None, it means the matching score has not been calculated, + and it's not a nearest match candidate. + + Returns: + The matching score of the OnnxSchemaChecker . + """ + return self._matching_score + + def perfect_match_inputs( + self, + diagnostic: diagnostics.Diagnostic, + args: Sequence[ + fx_type_utils.TensorLike | str | int | float | bool | list | complex | None + ], + kwargs: dict[str, fx_type_utils.Argument], + ) -> bool: + """Check if the inputs perfectly match the OpSchema requirements. + + The definition of perfect match is that the input types are all in the type + constraints and the number of inputs matches the number of inputs in the + OpSchema. + + Checking steps: + 1. The function signature matches the inputs number, and attribute names. + 2. The input/attribute types are all in the type constraints. + + A function should at least pass the first step to be eligible for the + nearest matching. + + Args: + diagnostic: The diagnostic to use for logging detailed info. + args: The input arguments organized in PyTorch inputs way. + kwargs: The input keyword arguments organized in PyTorch inputs way. + + Returns: + True if the inputs match the requirements, False otherwise. + """ + + # NOTE: OnnxFunction does not have the same function signature as the original + # PyTorch operator. We need to separate the input/attributes from the arguments. + ( + function_inputs, + function_attributes, + ) = self._separate_input_attributes_from_arguments( + self.param_schema, + args, + kwargs, + fill_defaults=True, # fill defaults for optional arguments to match + ) + with diagnostic.log_section(logging.INFO, "Checking perfect match..."): + diagnostic.info( + "%s", + diagnostics.LazyString(diagnostics.format_argument, self.onnxfunction), + ) + # NOTE: 1. Check if the input number and attribute names match the + # OpSchema. If it's not, we know the function is not eligible to be a perfect + # match, nor a nearest match. + # We use is_perfect_match to postpone the return value to the end + # of the function, as we want to log all the mismatch info. + is_perfect_match = True + if len(function_inputs) != len(self.op_schema.inputs): + with diagnostic.log_section( + logging.INFO, "Failed: input number mismatch!" + ): + diagnostic.info( + "Actual %d vs expected %d", + len(function_inputs), + len(self.op_schema.inputs), + ) + diagnostic.info("The function is not a nearest match candidate.") + is_perfect_match = False + + if set(function_attributes) != set(self.attributes): + with diagnostic.log_section( + logging.INFO, "Failed: attribute mismatch!" + ): + diagnostic.info( + "%s", + diagnostics.LazyString( + lambda: f"Actual {set(function_attributes)} vs expected {set(self.attributes)}", + ), + ) + diagnostic.info("The function is not a nearest match candidate.") + is_perfect_match = False + + # If it's already not a perfect match, we can return False directly. Further + # checking is only for the functions that are eligible for nearest match. + if not is_perfect_match: + return False + + # NOTE: 2. The dtypes of inputs and attributes should be in the + # type constraints of the OpSchema. If they are not, we know the function is not + # eligible to be a perfect match, but can be a nearest match candidate. + for schema_input, torch_input in zip( + self.op_schema.inputs, function_inputs + ): + torch_input_compatible_types = _find_onnx_data_type(torch_input) + allowed_types = self.type_constraints[schema_input.type_str] + if not allowed_types.intersection( + torch_input_compatible_types + ) and not any( + fx_type_utils.is_optional_onnx_dtype_str(onnx_type_str) + for onnx_type_str in allowed_types + ): + # If torch_input_compatible_types isn't in allowed_types + # of this input defined in the OpSchema, we know the function + # and the input are not compatible + with diagnostic.log_section( + logging.INFO, + "Failed: input type mismatch for input '%s'!", + schema_input.name, + ): + diagnostic.info( + "Actual %s vs\nExpected %s", + torch_input_compatible_types, + allowed_types, + ) + is_perfect_match = False + + for attribute_name, attribute in function_attributes.items(): + if not self._match_onnx_attribute_type(attribute_name, attribute): + # If the attribute type of the OpSchema and the attribute type don't match, + # we know the function and the input are not compatible + with diagnostic.log_section( + logging.INFO, + "Failed: attribute '%s' type mismatch!", + attribute_name, + ): + diagnostic.info( + "Actual %s vs\nExpected %s", + type(attribute), + self.attributes[attribute_name].type, + ) + is_perfect_match = False + + # NOTE: This is still a candidate for nearest match, as it only mismatches attributes on dtype. + self._record_matching_score(function_inputs, function_attributes) + diagnostic.info("match score: %d", self.match_score) + return is_perfect_match + + def _match_onnx_attribute_type( + self, + attribute_name: str, + attribute: fx_type_utils.Argument | onnxscript_graph_building.TorchScriptTensor, + is_sequence: bool = False, + ) -> bool: + if isinstance(attribute, (int, float, bool, str)): + attribute_onnx_type = fx_type_utils.from_python_type_to_onnx_attribute_type( + type(attribute), is_sequence=is_sequence + ) + if attribute_onnx_type != self.attributes[attribute_name].type: + return False + # If the attribute is an empty list, we don't know the type of the list + # so it's a mismatch + elif isinstance(attribute, (list, tuple)) and attribute: + return self._match_onnx_attribute_type( + attribute_name, attribute[0], is_sequence=True + ) + else: + # NOTE: Unrecognized attribute type + return False + return True + + def _record_matching_score( + self, + inputs: Sequence[ + fx_type_utils.TensorLike | str | int | float | bool | list | complex | None + ], + attributes: dict[str, fx_type_utils.Argument], + ): + """Calculate the inputs matching score of the OpSchema requirements to find the nearest match. + + Only the functions which have the same number of inputs and attributes as the + OpSchema are eligible to be a nearest match candidate. Thus, we don't need to + check the length of inputs and attributes here, and only check the types of + inputs and attributes. + + How the matchsing score is calculated: + score += 1 if one input/attribute type is in the type constraints. + + Limitations: + None/NoeType/[] could result in zero matches, and the same score of overloads, + which will be recorded in SARIF. + + Args: + inputs: The input arguments. + attributes: The input keyword arguments. + + Returns: + True if the inputs match the requirements, False otherwise. + """ + self._matching_score = 0 + # If they have different length of arguments, the score would be lower to those + # functions which have the same length of arguments. + for schema_input, torch_input in zip(self.op_schema.inputs, inputs): + torch_input_compatible_types = _find_onnx_data_type(torch_input) + allowed_types = self.type_constraints[schema_input.type_str] + if allowed_types.intersection(torch_input_compatible_types): + # If torch_input_compatible_types is in allowed_types + # of this input defined in the OpSchema, we know the function + # and the input are compatible + self._matching_score += 1 + # NOTE: The penalty is applied to those functions which have different attributes. + for attribute_name, attribute_proto in self.attributes.items(): + attribute = attributes[attribute_name] + attribute_onnx_type = fx_type_utils.from_python_type_to_onnx_attribute_type( + type(attribute) + ) + if attribute_onnx_type != attribute_proto.type: + # If the attribute type of the OpSchema and the attribute type don't match, + # we know the function and the input are not compatible + self._matching_score -= 1 + + # NOTE: Referenced from onnxscript internal function. + # Importing this function makes the code less robust, as it is not a public API. + + def _separate_input_attributes_from_arguments( + self, + param_schemas: Sequence[onnxscript.values.ParamSchema], + args: Sequence[ + fx_type_utils.TensorLike | str | int | float | bool | list | complex | None + ], + kwargs: dict[str, fx_type_utils.Argument], + fill_defaults: bool = True, + ) -> tuple[list[Any], dict[str, Any]]: + """Separate Python args and kwargs into ONNX inputs and attributes. + + Extra_kwargs are ignored if their values are None. For example, if the + OpSchema has an attribute "rounding_mode" and the caller provides + "rounding_mode=None", the attribute "rounding_mode" will not be included + in the returned attributes when the OnnxFunction signature doesn't have + "rounding_mode" as an attribute. + + Args: + param_schemas: The parameter schemas of an Op or a OnnxFunction. + args: The Python positional arguments supplied by the caller. + kwargs: The Python keyword arguments supplied by the caller. + fill_defaults: Whether to fill the default values for attributes. + + Returns: + A tuple of two elements: + - A list of ONNX inputs. + - An dictionary of ONNX attribute names and values. + + Raises: + TypeError: When allow_extra_kwargs is False and there are unknown kwargs. + TypeError: When a required input is not provided. + """ + # args, kwargs and param_schemas should be all in order + # user may not specify all inputs or attributes + + import onnx + + onnx_inputs: list[Any] = [] + onnx_attributes: dict[str, Any] = {} + # NOTE: We need to copy kwargs because we will mutate it + copy_kwargs = kwargs.copy() + for i, param in enumerate(param_schemas): + if param.is_variadic_input: + # Exhaust all remaining args + onnx_inputs.extend(args[i:]) + args = [] + continue + if i < len(args): + if param.is_input: + onnx_inputs.append(args[i]) + else: + onnx_attributes[param.name] = args[i] + elif param.name in copy_kwargs: + if param.is_input: + # Move the input from kwargs to inputs + onnx_inputs.append(copy_kwargs[param.name]) + copy_kwargs.pop(param.name) + else: + onnx_attributes[param.name] = copy_kwargs[param.name] + elif ( + param.is_attribute + and self.attributes[param.name].default_value.type + != onnx.AttributeProto.UNDEFINED # type: ignore[attr-defined] + ): + # User did not provide the attribute + if fill_defaults: + onnx_attributes[param.name] = param.default + # optional input + elif param.is_input: + if fill_defaults: + onnx_inputs.append(None) + + # NOTE: Pick up extra kwargs if it's not None. None is not expected + # as an attribute value in torchlib. + for k, v in copy_kwargs.items(): + if k not in onnx_attributes and v is not None: + onnx_attributes[k] = v + return onnx_inputs, onnx_attributes + + +def _is_arg_with_complex_dtype(arg: fx_type_utils.Argument) -> bool: + """Check if the node has complex dtype recursively.""" + if ( + isinstance(arg, torch.fx.Node) + and "val" in arg.meta + and isinstance(arg.meta["val"], torch.Tensor) + and torch.is_complex(arg.meta["val"]) + ): + return True + elif isinstance(arg, list): + for item in arg: + return _is_arg_with_complex_dtype(item) + return False + + +def _find_onnx_data_type( + torch_input: fx_type_utils.TensorLike + | str + | int + | float + | bool + | list + | tuple + | complex + | None, +) -> set[str]: + """Convert inputs data type from torch acceptable dtype to the compatible onnx dtype string.""" + if ( + isinstance(torch_input, fx_type_utils.TensorLike) + and torch_input.dtype is not None + ): + return fx_type_utils.from_torch_dtype_to_onnx_dtype_str(torch_input.dtype) + if isinstance(torch_input, (int, float, bool, str, complex)): + return fx_type_utils.from_torch_dtype_to_onnx_dtype_str(type(torch_input)) + if isinstance(torch_input, (list, tuple)) and torch_input: # [Tensor, Tensor] + the_first_non_none_item = next( + (item for item in torch_input if item is not None), None + ) + set_dtype = _find_onnx_data_type(the_first_non_none_item) + if any(isinstance(input, fx_type_utils.TensorLike) for input in torch_input): + # NOTE: Any Tensor involved in a list would make it a seq(tensor(onnx_type)) + return {f"seq({dtype})" for dtype in set_dtype} + else: + # constant list of non-tensor type + return set_dtype + if ( + torch_input is None + or ( + isinstance(torch_input, fx_type_utils.TensorLike) + and torch_input.dtype is None + ) + or (isinstance(torch_input, (list, tuple)) and not torch_input) + ): + # NOTE: None, No dtype, and empty list are edge cases, we allow it to be any type to relax the type check + # seq(tensor) also goes to here, as it is not supported in torchscript, and it would be None in this case. + return set() + + raise RuntimeError(f"Unknown input type from input: {torch_input}") diff --git a/janus/lib/python3.10/site-packages/torch/onnx/_internal/fx/passes/__init__.py b/janus/lib/python3.10/site-packages/torch/onnx/_internal/fx/passes/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..aa04e6beb5f127a7685e9f4d7b4d1ac5842159de --- /dev/null +++ b/janus/lib/python3.10/site-packages/torch/onnx/_internal/fx/passes/__init__.py @@ -0,0 +1,18 @@ +from .decomp import Decompose +from .functionalization import Functionalize, RemoveInputMutation +from .modularization import Modularize +from .readability import RestoreParameterAndBufferNames +from .type_promotion import InsertTypePromotion +from .virtualization import MovePlaceholderToFront, ReplaceGetAttrWithPlaceholder + + +__all__ = [ + "Decompose", + "InsertTypePromotion", + "Functionalize", + "Modularize", + "MovePlaceholderToFront", + "RemoveInputMutation", + "RestoreParameterAndBufferNames", + "ReplaceGetAttrWithPlaceholder", +] diff --git a/janus/lib/python3.10/site-packages/torch/onnx/_internal/fx/passes/__pycache__/type_promotion.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/onnx/_internal/fx/passes/__pycache__/type_promotion.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..62f28a322f7f6b0eca669cbdc6fa57319f493495 Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/onnx/_internal/fx/passes/__pycache__/type_promotion.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/onnx/_internal/fx/passes/__pycache__/virtualization.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/onnx/_internal/fx/passes/__pycache__/virtualization.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..13d6531124a32aa7a2f3188f93ba76811937be2e Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/onnx/_internal/fx/passes/__pycache__/virtualization.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/onnx/_internal/fx/passes/_utils.py b/janus/lib/python3.10/site-packages/torch/onnx/_internal/fx/passes/_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..853557362e049536d4c3ec67d8ba6dc1198bda73 --- /dev/null +++ b/janus/lib/python3.10/site-packages/torch/onnx/_internal/fx/passes/_utils.py @@ -0,0 +1,115 @@ +# mypy: allow-untyped-defs +"""Common utility functions for FX passes. + +These functions should NOT be directly invoked outside of `passes` package. +""" + +from __future__ import annotations + +import collections +import re +from typing import Callable + +import torch.fx +import torch.fx.traceback as fx_traceback + + +def wrap_graph_module_for_node_meta_preservation( + graph_module: torch.fx.GraphModule, +) -> Callable: + """Wrap a GraphModule with contexts to preserve node meta information, such as stacktrace info. + + This is typically useful before calling `make_fx`. Without this wrapper, the + stacktrace information will be lost afterwards. + """ + + def wrapped(*args): + with fx_traceback.preserve_node_meta(): + return torch.fx.Interpreter(graph_module).run(*args) + + return wrapped + + +def _get_node_base_name(node_name: str) -> tuple[str, int | None]: + pattern = r"(.*)\.(\d+)" + match = re.match(pattern, node_name) + if match is not None: + base_name, count_str = match.groups() + return base_name, int(count_str) + return node_name, None + + +def set_node_name( + node: torch.fx.Node, + new_name: str, + name_to_node_cache: dict[str, torch.fx.Node], +): + """Safely set the unique name of a node. + + If the new name is already taken by another node, the name of the other node will be + updated. If `new_name` is a string of format f"{base_name}.{count}", where `count` + is an integer, the other node will be renamed as f"{base_name}.{count+1}". If not, + the other node will be renamed as "{new_name}.1". This function will iteratively + update the names until there is no conflict. + + ``name_to_node_cache`` is required as an argument to avoid recomputation. The caller + is responsible for ensuring the cache is accurate and in sync with the owning module + of the node. The values in the cache will be updated accordingly. + + Args: + node: The node to update. + new_name: The new name to use. + name_to_node_cache: A cache of node names to nodes. + """ + module = node.graph.owning_module + node_name_to_set = collections.deque([(node, new_name)]) + + while node_name_to_set: + node, new_name = node_name_to_set.pop() + if new_name in name_to_node_cache and name_to_node_cache[new_name] != node: + base_name, postfix_count = _get_node_base_name(new_name) + if postfix_count is None: + postfix_count = 0 + node_name_to_set.append( + (name_to_node_cache[new_name], f"{base_name}.{postfix_count + 1}") + ) + node.name = new_name + name_to_node_cache[new_name] = node + + +def replace_placeholder_name_and_target( + module: torch.fx.GraphModule, reference_module: torch.fx.GraphModule +): + """Replace the argument names in module with those in reference_module. + + This function assumes the two modules have the same signature structure. + The caller is responsible for ensuring this. Otherwise, the behavior of this + function is undefined. This function only does minimal sanity check that the two + modules have the same number of arguments. + + Name conflicts between new names and existing node names in the graph are handled. + Check the documentation of :func:`set_node_name` for more details. + + Raises: + RuntimeError: If the two modules have different number of arguments. + """ + placeholders = [node for node in module.graph.nodes if node.op == "placeholder"] + reference_placeholders = [ + node for node in reference_module.graph.nodes if node.op == "placeholder" + ] + + if len(placeholders) != len(reference_placeholders): + raise RuntimeError( + "The two modules have different number of arguments. " + f"module: {len(placeholders)}, reference_module: {len(reference_placeholders)}" + ) + + name_to_node: dict[str, torch.fx.Node] = {} + for node in module.graph.nodes: + name_to_node[node.name] = node + + for placeholder, reference_placeholder in zip(placeholders, reference_placeholders): + placeholder.target = reference_placeholder.target + set_node_name(placeholder, reference_placeholder.name, name_to_node) + + module.recompile() diff --git a/janus/lib/python3.10/site-packages/torch/onnx/_internal/fx/passes/decomp.py b/janus/lib/python3.10/site-packages/torch/onnx/_internal/fx/passes/decomp.py new file mode 100644 index 0000000000000000000000000000000000000000..c7352f67b1492c54bfaad14a08baa51888cb1003 --- /dev/null +++ b/janus/lib/python3.10/site-packages/torch/onnx/_internal/fx/passes/decomp.py @@ -0,0 +1,82 @@ +# mypy: allow-untyped-defs +from __future__ import annotations + +import contextlib +from typing import Callable, Mapping, TYPE_CHECKING + +import torch +import torch._ops +from torch._dispatch import python as python_dispatch +from torch._subclasses import fake_tensor +from torch.fx.experimental import proxy_tensor +from torch.onnx._internal.fx import _pass, diagnostics +from torch.onnx._internal.fx.passes import _utils + + +if TYPE_CHECKING: + import torch.fx + + +class Decompose(_pass.Transform): + def __init__( + self, + diagnostic_context: diagnostics.DiagnosticContext, + module: torch.fx.GraphModule, + decomposition_table: Mapping[torch._ops.OpOverload, Callable], + enable_dynamic_axes: bool, + allow_fake_constant: bool | None = False, + ): + super().__init__(diagnostic_context, module) + self.decomposition_table = decomposition_table + self.enable_dynamic_axes = enable_dynamic_axes + self.allow_fake_constant = allow_fake_constant + + def _run(self, *args, **kwargs) -> torch.fx.GraphModule: + assert not kwargs, "kwargs is not supported in Decompose." + + # To preserve stack trace info after `make_fx`. + module = _utils.wrap_graph_module_for_node_meta_preservation(self.module) + + # fake mode use static size to trace the size of tensors. while symbolic + # mode generates aten::sym_size to dynamically trace the size of tensors. + + # e.g. fake mode: + # view: f32[3, 5, 20] = torch.ops.aten.view.default(x, [3, 5, 20]) + + # e.g. symbolic mode: + # sym_size = torch.ops.aten.sym_size(x, 0) + # sym_size_1 = torch.ops.aten.sym_size(x, 1) + # sym_size_2 = torch.ops.aten.sym_size(x, 2) + # sym_size_3 = torch.ops.aten.sym_size(x, 3) + # mul = sym_size_2 * sym_size_3; sym_size_2 = sym_size_3 = None + # view: f32[3, 5, 20] = torch.ops.aten.view.default(x, [sym_size, sym_size_1, mul]) + + # Mimic `torch._dynamo.export(aten_graph=True)` behavior in invoking `make_fx`. + # TODO: May need revisit for user fake mode export + dynamic shape scenario. + fake_mode: fake_tensor.FakeTensorMode | None = self.fake_mode + maybe_fake_args = self._maybe_fakefy_args(fake_mode, *args) + if fake_mode is not None: + # Using existing fake mode as context, signal `make_fx` that it does not need + # to create a new fake mode by passing tracing_mode as "real". + tracing_mode = "real" + else: + # Existing fake mode not found, signal `make_fx` to create one. + fake_mode = contextlib.nullcontext() # type: ignore[assignment] + tracing_mode = "symbolic" if self.enable_dynamic_axes else "fake" + + # Apply decomposition table to the input graph. + assert fake_mode is not None # for mypy + with fake_tensor.unset_fake_temporarily(), python_dispatch.enable_python_dispatcher(), fake_mode: + decomposed_module = proxy_tensor.make_fx( + module, + decomposition_table=self.decomposition_table, + tracing_mode=tracing_mode, + _allow_non_fake_inputs=True, + _allow_fake_constant=bool(self.allow_fake_constant), + )(*maybe_fake_args) + + # Rename placeholder targets to match the original module's signature since + # We don't want to map forward(x, y, z) to forward(arg0, arg1, arg2). + _utils.replace_placeholder_name_and_target(decomposed_module, self.module) + + return decomposed_module diff --git a/janus/lib/python3.10/site-packages/torch/onnx/_internal/fx/passes/readability.py b/janus/lib/python3.10/site-packages/torch/onnx/_internal/fx/passes/readability.py new file mode 100644 index 0000000000000000000000000000000000000000..50221f47f64fffd94cd3acbb2955f864ecfca5c6 --- /dev/null +++ b/janus/lib/python3.10/site-packages/torch/onnx/_internal/fx/passes/readability.py @@ -0,0 +1,130 @@ +# mypy: allow-untyped-defs +from __future__ import annotations + +from typing import Sequence + +import torch +from torch.onnx._internal.fx import _pass, diagnostics + + +class RestoreParameterAndBufferNames(_pass.Transform): + """Restore parameter and buffer names from original nn.module. + + This pass is useful for readability of the exported ONNX graph. It restores the + parameter and buffer names from the original nn.module. For example, if the original + nn.module has a parameter named `root.linear.0.weight`, and the parameter is renamed to + `_param_constant9` by FX, this pass will rename it back. + + This pass must be run after `Decompose` pass. Because this pass is expected to be called on + `fx.GraphModule` produced by `proxy_tensor.make_fx`, where all parameters and buffers + are registered at root level. + """ + + def __init__( + self, + diagnostic_context: diagnostics.DiagnosticContext, + fx_module: torch.fx.GraphModule, + original_nn_module: torch.nn.Module, + ): + super().__init__(diagnostic_context, fx_module) + self.original_nn_module = original_nn_module + + def _rename_param_and_buffer( + self, + diagnostic: diagnostics.Diagnostic, + nodes: Sequence[torch.fx.Node], + new_name: str, + ) -> None: + """Rename the parameter/buffer and replace corresponding nodes with new nodes of updated target.""" + assert len(nodes) > 0, "`nodes` cannot be empty" + assert ( + len({node.target for node in nodes}) == 1 + ), "`nodes` must all have same `target`" + old_name = nodes[0].target + assert isinstance(old_name, str), f"Expected str, got type({old_name})" + # Parameter/buffer name cannot contain "." + normalized_name = new_name.replace(".", "/") + attr_value = getattr(self.module, old_name) + setattr(self.module, normalized_name, attr_value) + delattr(self.module, old_name) + for node in nodes: + with self.module.graph.inserting_before(node): + new_node = self.module.graph.get_attr(normalized_name) + new_node.meta = node.meta + node.replace_all_uses_with(new_node) + self.module.graph.erase_node(node) + diagnostic.info( + "Renamed 'self.%s' to 'self.%s', " + "normalized from original parameter name '%s'.", + old_name, + normalized_name, + new_name, + ) + + def _run(self, *args, **kwargs) -> torch.fx.GraphModule: + """Restore parameter and buffer names from original module. + + For each `get_attr` node, if the target is a str representing a parameter or buffer + under `self.module`, we rename the parameter or buffer to its original name. + The parameters and buffers between `self.module` and `self.original_nn_module` refer + to the same objects, allowing us to use it as key to retrieve the original name. + """ + assert len(args) == 0, "RestoreParameterAndBufferNames does not take any args" + assert ( + len(kwargs) == 0 + ), "RestoreParameterAndBufferNames does not take any kwargs" + # state_to_readable_name[parameter/buffer] returns the original readable name of + # the parameter/buffer. E.g., "self.linear.weight". + state_to_readable_name: dict[torch.nn.Parameter | torch.Tensor, str] = {} + state_to_readable_name.update( + {v: k for k, v in self.original_nn_module.named_parameters()} + ) + state_to_readable_name.update( + {v: k for k, v in self.original_nn_module.named_buffers()} + ) + diagnostic = self.diagnostic_context.inflight_diagnostic() + + # old_name_to_nodes[old_name] returns a tuple of (nodes, new_name) + # where `nodes` is a list of `get_attr` nodes with `old_name` as `target` and + # `new_name` is the new readable name. + old_name_to_nodes: dict[str, tuple[list[torch.fx.Node], str]] = {} + + for node in self.module.graph.nodes: + if node.op == "get_attr": + assert isinstance( + node.target, str + ), f"Expected str, got type({node.target})" + if node.target.find(".") != -1: + raise RuntimeError( + f"Unexpected target {node.target} in get_attr, found '.' in target. " + f"All parameters and buffers are expected to be registered at root level, " + f"i.e., self.module. " + ) + if node.target in old_name_to_nodes: + # We have already processed this parameter/buffer. + old_name_to_nodes[node.target][0].append(node) + continue + attr_value = getattr(self.module, node.target) + if ( + isinstance(attr_value, (torch.nn.Parameter, torch.Tensor)) + and attr_value in state_to_readable_name + ): + readable_name = state_to_readable_name[attr_value] + old_name_to_nodes[node.target] = ([node], readable_name) + continue + + diagnostic.info( + "Cannot find readable name for self.%s: %s. The name is unchanged.", + node.target, + type(attr_value), + ) + if isinstance(attr_value, torch.nn.Parameter): + # If it is a parameter we treat it more seriously. + diagnostic.level = diagnostics.levels.WARNING + else: + diagnostic.level = diagnostics.levels.NONE + + for nodes, new_name in old_name_to_nodes.values(): + self._rename_param_and_buffer(diagnostic, nodes, new_name) + + return self.module diff --git a/janus/lib/python3.10/site-packages/torch/onnx/_internal/fx/passes/type_promotion.py b/janus/lib/python3.10/site-packages/torch/onnx/_internal/fx/passes/type_promotion.py new file mode 100644 index 0000000000000000000000000000000000000000..6397beb5f089a4968cf520a48fa4f900fffcb29e --- /dev/null +++ b/janus/lib/python3.10/site-packages/torch/onnx/_internal/fx/passes/type_promotion.py @@ -0,0 +1,1720 @@ +# mypy: allow-untyped-defs +# Owner(s): ["module: onnx"] +from __future__ import annotations + +import abc +import dataclasses +import inspect +import logging +from typing import Any, Callable, Mapping, Sequence, TYPE_CHECKING + +import torch +import torch._ops +import torch.fx +import torch.fx.traceback as fx_traceback +from torch import _prims_common, _refs +from torch._prims_common import ( + ELEMENTWISE_TYPE_PROMOTION_KIND, + wrappers as _prims_common_wrappers, +) +from torch._refs import linalg as _linalg_refs, nn as _nn_refs, special as _special_refs +from torch._refs.nn import functional as _functional_refs +from torch._subclasses import fake_tensor +from torch.fx.experimental import proxy_tensor +from torch.onnx._internal.fx import _pass, diagnostics, type_utils as fx_type_utils +from torch.utils import _python_dispatch, _pytree + + +if TYPE_CHECKING: + from types import ModuleType + + +logger = logging.getLogger(__name__) + +# TODO(bowbao): move to type utils. +_SCALAR_TYPE_TENSOR_DTYPE_MAP: Mapping[type, torch.dtype] = { + bool: torch.bool, + int: torch.int64, + float: torch.float32, + complex: torch.complex32, +} + + +def _try_getclosurevars(func): + try: + return inspect.getclosurevars(func) + except TypeError as e: + return None + + +@dataclasses.dataclass +class TypePromotionSnapshot: + """Type promotion snapshot for a fx node and its inputs. + + Contains the promoted dtype for args and kwargs that needs promoting. + Contains the expected node output dtype. + """ + + args_dtypes: Mapping[int, torch.dtype] + """Mapping from arg position to dtype to promote to.""" + + kwargs_dtypes: Mapping[str, torch.dtype] + """Mapping from kwarg name to dtype to promote to.""" + + out_dtype: torch.dtype + """Expected output dtype of the node.""" + + +class TypePromotionRule(abc.ABC): + """Base class for type promotion rule per 'torch.ops.{namespace}.{op_name}'.""" + + def __init__(self, namespace: str, op_name: str): + self.namespace = namespace + self.op_name = op_name + + # Make this abstract as well because subclass needs to override __eq__(). + # A class that overrides __eq__() and does not define __hash__() will have its __hash__() implicitly set to None. + # Ref: https://docs.python.org/3/reference/datamodel.html#object.__hash__ + @abc.abstractmethod + def __hash__(self) -> int: ... + + @abc.abstractmethod + def __repr__(self): ... + + @abc.abstractmethod + def __eq__(self, other: object) -> bool: ... + + def is_valid(self) -> bool: + """Check if the rule is valid.""" + # This always returns a module. If the module does not exist it will be created. + module = getattr(torch.ops, self.namespace) + py_op = getattr(module, self.op_name, None) + if py_op is None: + logger.warning( + "Cannot find op: %s in module: %s", self.op_name, self.namespace + ) + return False + if not isinstance(py_op, torch._ops.OpOverloadPacket): + logger.warning( + "Op: torch.ops.%s.%s is not an OpOverloadPacket, got: %s", + self.namespace, + self.op_name, + type(py_op), + ) + return False + + return True + + @abc.abstractmethod + def preview_type_promotion( + self, args: tuple, kwargs: dict + ) -> TypePromotionSnapshot: + """Preview type promotion results for provided set of args and kwargs. + + Returns a TypePromotionSnapshot object that contains the promoted dtypes for + the arguments and the expected output dtype. + """ + ... + + +class ElementwiseTypePromotionRule(TypePromotionRule): + """Defines how to perform elementwise type promotion for 'torch.ops.{namespace}.{op_name}'.""" + + _USE_OPMATH: bool = False + """Whether to use opmath to compute the promoted input dtype. + If used, upcasts will be inserted everywhere for lower precision models. + Set to False and have torchlib handle upcasts in op implementation internally. + """ + + def __init__( + self, + namespace: str, + op_name: str, + promote_args_positions: Sequence[int], + promote_kwargs_names: Sequence[str], + promotion_kind: _prims_common.ELEMENTWISE_TYPE_PROMOTION_KIND, + ): + """Constructs a TypePromotionRule for elementwise operators. + + Args: + namespace: Namespace of the op. E.g. 'aten' in 'torch.ops.aten.add'. + op_name: Name of the op. E.g. 'add' in 'torch.ops.aten.add'. + promote_args_positions: Positions of args to promote. + promote_kwargs_names: Names of kwargs to promote. + promotion_kind: Type promotion kind. Refer to [_prims_common.elementwise_dtypes](https://github.com/pytorch/pytorch/blob/main/torch/_prims_common/__init__.py) for detail. # noqa: B950 + """ + super().__init__(namespace, op_name) + self.promote_args_positions = promote_args_positions + self.promote_kwargs_names = promote_kwargs_names + self.promotion_kind = promotion_kind + + def __repr__(self): + return ( + f"ElementwiseTypePromotionRule('{self.namespace}', '{self.op_name}', " + f"{self.promote_args_positions}, {self.promote_kwargs_names}, {self.promotion_kind})" + ) + + def __eq__(self, __value: object) -> bool: + if not isinstance(__value, ElementwiseTypePromotionRule): + return False + return ( + self.namespace == __value.namespace + and self.op_name == __value.op_name + and self.promote_args_positions == __value.promote_args_positions + and self.promote_kwargs_names == __value.promote_kwargs_names + and self.promotion_kind == __value.promotion_kind + ) + + def __hash__(self) -> int: + return f"{type(self)}:{self.namespace}.{self.op_name}".__hash__() + + def _consolidate_input_dtype( + self, computed_dtype: torch.dtype, result_dtype: torch.dtype + ) -> torch.dtype: + """ + Although opmath is the right thing to do to retain on-par precision, it inserts + upcasts everywhere in the graph. This is particularly hard for backend to optimize + since there is no way to differentiate between inserted upcasts and model code + casts. Hence we consolidate the input dtype to the result dtype to avoid this. + """ + if not self._USE_OPMATH and self.promotion_kind in ( + _prims_common.ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, + _prims_common.ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT, + ): + return result_dtype + return computed_dtype + + def preview_type_promotion( + self, args: tuple, kwargs: dict + ) -> TypePromotionSnapshot: + candidate_args = { + i: args[i] + for i in self.promote_args_positions + if i < len(args) and args[i] is not None + } + candidate_kwargs = { + name: kwargs[name] + for name in self.promote_kwargs_names + if name in kwargs and kwargs[name] is not None + } + + computed_dtype, result_dtype = _prims_common.elementwise_dtypes( + *_pytree.arg_tree_leaves(*candidate_args.values(), **candidate_kwargs), + type_promotion_kind=self.promotion_kind, + ) + + consolidated_input_dtype = self._consolidate_input_dtype( + computed_dtype, result_dtype + ) + + return TypePromotionSnapshot( + dict.fromkeys(candidate_args.keys(), consolidated_input_dtype), + dict.fromkeys(candidate_kwargs.keys(), consolidated_input_dtype), + result_dtype, + ) + + +class DivElementwiseTypePromotionRule(ElementwiseTypePromotionRule): + """Reference type promotion rule from torch._refs.div. + + Rule depends on the value of the `rounding_mode` argument. + """ + + def __init__(self): + super().__init__( + "aten", + "div", + promote_args_positions=(0, 1), + promote_kwargs_names=(), + promotion_kind=_prims_common.ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, + ) + + def preview_type_promotion( + self, args: tuple, kwargs: dict + ) -> TypePromotionSnapshot: + rounding_mode = kwargs.get("rounding_mode", None) + if rounding_mode is None: + # true_divide + self.promotion_kind = ( + _prims_common.ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT + ) + return super().preview_type_promotion(args, kwargs) + if rounding_mode == "trunc": + # trunc_divide + self.promotion_kind = _prims_common.ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT + return super().preview_type_promotion(args, kwargs) + if rounding_mode == "floor": + # floor_divide + self.promotion_kind = _prims_common.ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT + return super().preview_type_promotion(args, kwargs) + raise ValueError(f"Unknown rounding_mode: {rounding_mode}") + + +class ReductionTypePromotionRule(TypePromotionRule): + def __init__( + self, + namespace: str, + op_name: str, + promotion_kind: _prims_common.REDUCTION_OUTPUT_TYPE_KIND, + ): + """Constructs a TypePromotionRule for reduction operators. + + Args: + namespace: Namespace of the op. E.g. 'aten' in 'torch.ops.aten.sum'. + op_name: Name of the op. E.g. 'sum' in 'torch.ops.aten.sum'. + promotion_kind: Type promotion kind. Refer to [_prims_common.reduction_dtypes]((https://github.com/pytorch/pytorch/blob/main/torch/_prims_common/__init__.py)) for detail. # noqa: B950 + """ + super().__init__(namespace, op_name) + self.promotion_kind = promotion_kind + + def __repr__(self): + return f"ReductionTypePromotionRule('{self.namespace}', '{self.op_name}', {self.promotion_kind})" + + def __eq__(self, __value: object) -> bool: + if not isinstance(__value, ElementwiseTypePromotionRule): + return False + return ( + self.namespace == __value.namespace + and self.op_name == __value.op_name + and self.promotion_kind == __value.promotion_kind + ) + + def __hash__(self) -> int: + return f"{type(self)}:{self.namespace}.{self.op_name}".__hash__() + + def preview_type_promotion( + self, args: tuple, kwargs: dict + ) -> TypePromotionSnapshot: + assert ( + len(args) >= 1 + ), f"Reduction op torch.ops.{self.namespace}.{self.op_name} expects at least one argument" + arg = args[0] + assert isinstance(arg, torch.Tensor), f"{type(arg)=} is not torch.Tensor" + dtype: torch.dtype | None = kwargs.get("dtype", None) + + computation_dtype, result_dtype = _prims_common.reduction_dtypes( + arg, self.promotion_kind, dtype + ) + if result_dtype is None: + # Inspecting code, this can only happen when `promotion_kind` is `KEEP_PROMOTED_TYPE`. + # Hence set same as computation_dtype. + result_dtype = computation_dtype + + return TypePromotionSnapshot( + {0: computation_dtype}, + {}, + result_dtype, + ) + + +class AllOrAnyReductionTypePromotionRule(ReductionTypePromotionRule): + """Reference type promotion rule from torch.ops.aten.all or torch.ops.aten.any. + + This is a special case where computation dtype is always torch.bool. + The result dtype is always uint8 if `dtype` kwarg is uint8, otherwise torch.bool. + """ + + def __init__(self, op_name: str): + super().__init__( + "aten", + op_name, + _prims_common.REDUCTION_OUTPUT_TYPE_KIND.ALWAYS_BOOL, + ) + + def preview_type_promotion( + self, args: tuple, kwargs: dict + ) -> TypePromotionSnapshot: + assert ( + len(args) >= 1 + ), f"Reduction op torch.ops.{self.namespace}.{self.op_name} expects at least one argument" + arg = args[0] + assert isinstance(arg, torch.Tensor), f"{type(arg)=} is not torch.Tensor" + computation_dtype = torch.bool + # Preserves uint8 -- probably a legacy mask thing + result_dtype = torch.uint8 if arg.dtype == torch.uint8 else torch.bool + return TypePromotionSnapshot( + {0: computation_dtype}, + {}, + result_dtype, + ) + + +class SumLikeReductionTypePromotionRule(ReductionTypePromotionRule): + """Reference type promotion rule from torch.ops.aten.sum. + + This is a special case where computation dtype is always torch.int64 for integral arg, + unless overridden by `dtype` kwarg. + """ + + def preview_type_promotion( + self, args: tuple, kwargs: dict + ) -> TypePromotionSnapshot: + assert ( + len(args) >= 1 + ), f"Reduction op torch.ops.{self.namespace}.{self.op_name} expects at least one argument" + arg = args[0] + assert isinstance(arg, torch.Tensor), f"{type(arg)=} is not torch.Tensor" + dtype: torch.dtype | None = kwargs.get("dtype", None) + # The below logic is copied from `torch/_refs/__init__.py` reduction ops impl. + if dtype is None: + if _prims_common.is_boolean_dtype( + arg.dtype + ) or _prims_common.is_integer_dtype(arg.dtype): + dtype = torch.int64 + else: + dtype = arg.dtype + return super().preview_type_promotion(args, {"dtype": dtype}) + + +# NOTE: [Update type promotion rule] +# BELOW TABLE IS GENERATED FROM `TypePromotionRuleSetGenerator.generate_from_torch_refs`. +# DO NOT EDIT MANUALLY !!! +# For missing rules or discrepancies, please +# 1. Run `pytest test/onnx/test_fx_type_promotion.py` to validate if the generated rule set is current. +# If it is not, update with new generated set. +# 2. If discrepancies still exist, consider debugging torch._refs or report a bug. +# 3. If rules are still missing, add them to `_EXTRA_TYPE_PROMOTION_RULE_SET` or report a bug. +# Check `TypePromotionRule` class for how each rule is defined and used. +_GENERATED_ATEN_TYPE_PROMOTION_RULE_SET = { + ElementwiseTypePromotionRule( + "aten", "abs", [0], [], ELEMENTWISE_TYPE_PROMOTION_KIND.COMPLEX_TO_FLOAT + ), + ElementwiseTypePromotionRule( + "aten", "abs_", [0], [], ELEMENTWISE_TYPE_PROMOTION_KIND.COMPLEX_TO_FLOAT + ), + ElementwiseTypePromotionRule( + "aten", "acos", [0], [], ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT + ), + ElementwiseTypePromotionRule( + "aten", "acos_", [0], [], ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT + ), + ElementwiseTypePromotionRule( + "aten", "acosh", [0], [], ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT + ), + ElementwiseTypePromotionRule( + "aten", "acosh_", [0], [], ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT + ), + ElementwiseTypePromotionRule( + "aten", "add", [0, 1], [], ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT + ), + ElementwiseTypePromotionRule( + "aten", "add_", [0, 1], [], ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT + ), + ElementwiseTypePromotionRule( + "aten", "addcdiv", [0, 1, 2], [], ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT + ), + ElementwiseTypePromotionRule( + "aten", "addcdiv_", [0, 1, 2], [], ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT + ), + ElementwiseTypePromotionRule( + "aten", "addcmul", [0, 1, 2], [], ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT + ), + ElementwiseTypePromotionRule( + "aten", "addcmul_", [0, 1, 2], [], ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT + ), + ElementwiseTypePromotionRule( + "aten", "addr", [0, 1, 2], [], ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT + ), + ElementwiseTypePromotionRule( + "aten", "asin", [0], [], ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT + ), + ElementwiseTypePromotionRule( + "aten", "asin_", [0], [], ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT + ), + ElementwiseTypePromotionRule( + "aten", "asinh", [0], [], ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT + ), + ElementwiseTypePromotionRule( + "aten", "asinh_", [0], [], ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT + ), + ElementwiseTypePromotionRule( + "aten", "atan", [0], [], ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT + ), + ElementwiseTypePromotionRule( + "aten", "atan2", [0, 1], [], ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT + ), + ElementwiseTypePromotionRule( + "aten", "atan2_", [0, 1], [], ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT + ), + ElementwiseTypePromotionRule( + "aten", "atan_", [0], [], ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT + ), + ElementwiseTypePromotionRule( + "aten", "atanh", [0], [], ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT + ), + ElementwiseTypePromotionRule( + "aten", "atanh_", [0], [], ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT + ), + ElementwiseTypePromotionRule( + "aten", "bitwise_and", [0, 1], [], ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT + ), + ElementwiseTypePromotionRule( + "aten", "bitwise_and_", [0, 1], [], ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT + ), + ElementwiseTypePromotionRule( + "aten", + "bitwise_left_shift", + [0, 1], + [], + ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, + ), + ElementwiseTypePromotionRule( + "aten", + "bitwise_left_shift_", + [0, 1], + [], + ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, + ), + ElementwiseTypePromotionRule( + "aten", "bitwise_not", [0], [], ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT + ), + ElementwiseTypePromotionRule( + "aten", "bitwise_not_", [0], [], ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT + ), + ElementwiseTypePromotionRule( + "aten", "bitwise_or", [0, 1], [], ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT + ), + ElementwiseTypePromotionRule( + "aten", "bitwise_or_", [0, 1], [], ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT + ), + ElementwiseTypePromotionRule( + "aten", + "bitwise_right_shift", + [0, 1], + [], + ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, + ), + ElementwiseTypePromotionRule( + "aten", + "bitwise_right_shift_", + [0, 1], + [], + ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, + ), + ElementwiseTypePromotionRule( + "aten", "bitwise_xor", [0, 1], [], ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT + ), + ElementwiseTypePromotionRule( + "aten", "bitwise_xor_", [0, 1], [], ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT + ), + ElementwiseTypePromotionRule( + "aten", "cat", [0], [], ELEMENTWISE_TYPE_PROMOTION_KIND.NO_OPMATH + ), + ElementwiseTypePromotionRule( + "aten", "cauchy", [0], [], ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT + ), + ElementwiseTypePromotionRule( + "aten", "cauchy_", [0], [], ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT + ), + ElementwiseTypePromotionRule( + "aten", "ceil", [0], [], ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT + ), + ElementwiseTypePromotionRule( + "aten", "ceil_", [0], [], ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT + ), + ElementwiseTypePromotionRule( + "aten", "celu", [0], [], ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT + ), + ElementwiseTypePromotionRule( + "aten", "celu_", [0], [], ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT + ), + ElementwiseTypePromotionRule( + "aten", "clamp", [0, 1, 2], [], ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT + ), + ElementwiseTypePromotionRule( + "aten", "clamp_", [0, 1, 2], [], ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT + ), + ElementwiseTypePromotionRule( + "aten", "copysign", [0, 1], [], ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT + ), + ElementwiseTypePromotionRule( + "aten", "copysign_", [0, 1], [], ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT + ), + ElementwiseTypePromotionRule( + "aten", "cos", [0], [], ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT + ), + ElementwiseTypePromotionRule( + "aten", "cos_", [0], [], ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT + ), + ElementwiseTypePromotionRule( + "aten", "cosh", [0], [], ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT + ), + ElementwiseTypePromotionRule( + "aten", "cosh_", [0], [], ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT + ), + ElementwiseTypePromotionRule( + "aten", "deg2rad", [0], [], ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT + ), + ElementwiseTypePromotionRule( + "aten", "deg2rad_", [0], [], ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT + ), + ElementwiseTypePromotionRule( + "aten", "digamma", [0], [], ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT + ), + ElementwiseTypePromotionRule( + "aten", "digamma_", [0], [], ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT + ), + ElementwiseTypePromotionRule( + "aten", "elu", [0], [], ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT + ), + ElementwiseTypePromotionRule( + "aten", "elu_", [0], [], ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT + ), + ElementwiseTypePromotionRule( + "aten", "eq", [0, 1], [], ELEMENTWISE_TYPE_PROMOTION_KIND.ALWAYS_BOOL + ), + ElementwiseTypePromotionRule( + "aten", "eq_", [0, 1], [], ELEMENTWISE_TYPE_PROMOTION_KIND.ALWAYS_BOOL + ), + ElementwiseTypePromotionRule( + "aten", "erf", [0], [], ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT + ), + ElementwiseTypePromotionRule( + "aten", "erf_", [0], [], ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT + ), + ElementwiseTypePromotionRule( + "aten", "erfc", [0], [], ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT + ), + ElementwiseTypePromotionRule( + "aten", "erfc_", [0], [], ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT + ), + ElementwiseTypePromotionRule( + "aten", "erfinv", [0], [], ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT + ), + ElementwiseTypePromotionRule( + "aten", "erfinv_", [0], [], ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT + ), + ElementwiseTypePromotionRule( + "aten", "exp", [0], [], ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT + ), + ElementwiseTypePromotionRule( + "aten", "exp2", [0], [], ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT + ), + ElementwiseTypePromotionRule( + "aten", "exp2_", [0], [], ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT + ), + ElementwiseTypePromotionRule( + "aten", "exp_", [0], [], ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT + ), + ElementwiseTypePromotionRule( + "aten", "expm1", [0], [], ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT + ), + ElementwiseTypePromotionRule( + "aten", "expm1_", [0], [], ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT + ), + ElementwiseTypePromotionRule( + "aten", "exponential", [0], [], ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT + ), + ElementwiseTypePromotionRule( + "aten", "exponential_", [0], [], ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT + ), + ElementwiseTypePromotionRule( + "aten", "fill", [0], [], ELEMENTWISE_TYPE_PROMOTION_KIND.NO_OPMATH + ), + ElementwiseTypePromotionRule( + "aten", "floor", [0], [], ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT + ), + ElementwiseTypePromotionRule( + "aten", "floor_", [0], [], ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT + ), + ElementwiseTypePromotionRule( + "aten", "floor_divide", [0, 1], [], ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT + ), + ElementwiseTypePromotionRule( + "aten", "floor_divide_", [0, 1], [], ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT + ), + ElementwiseTypePromotionRule( + "aten", "fmax", [0, 1], [], ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT + ), + ElementwiseTypePromotionRule( + "aten", "fmin", [0, 1], [], ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT + ), + ElementwiseTypePromotionRule( + "aten", "fmod", [0, 1], [], ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT + ), + ElementwiseTypePromotionRule( + "aten", "fmod_", [0, 1], [], ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT + ), + ElementwiseTypePromotionRule( + "aten", "frac", [0], [], ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT + ), + ElementwiseTypePromotionRule( + "aten", "frac_", [0], [], ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT + ), + ElementwiseTypePromotionRule( + "aten", "gcd", [0, 1], [], ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT + ), + ElementwiseTypePromotionRule( + "aten", "gcd_", [0, 1], [], ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT + ), + ElementwiseTypePromotionRule( + "aten", "ge", [0, 1], [], ELEMENTWISE_TYPE_PROMOTION_KIND.ALWAYS_BOOL + ), + ElementwiseTypePromotionRule( + "aten", "ge_", [0, 1], [], ELEMENTWISE_TYPE_PROMOTION_KIND.ALWAYS_BOOL + ), + ElementwiseTypePromotionRule( + "aten", "gelu", [0], [], ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT + ), + ElementwiseTypePromotionRule( + "aten", "geometric", [0], [], ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT + ), + ElementwiseTypePromotionRule( + "aten", "geometric_", [0], [], ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT + ), + ElementwiseTypePromotionRule( + "aten", "glu", [0], [], ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT + ), + ElementwiseTypePromotionRule( + "aten", "gt", [0, 1], [], ELEMENTWISE_TYPE_PROMOTION_KIND.ALWAYS_BOOL + ), + ElementwiseTypePromotionRule( + "aten", "gt_", [0, 1], [], ELEMENTWISE_TYPE_PROMOTION_KIND.ALWAYS_BOOL + ), + ElementwiseTypePromotionRule( + "aten", "hardtanh", [0], [], ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT + ), + ElementwiseTypePromotionRule( + "aten", "heaviside", [0, 1], [], ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT + ), + ElementwiseTypePromotionRule( + "aten", "heaviside_", [0, 1], [], ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT + ), + ElementwiseTypePromotionRule( + "aten", "huber_loss", [0, 1], [], ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT + ), + ElementwiseTypePromotionRule( + "aten", "hypot", [0, 1], [], ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT + ), + ElementwiseTypePromotionRule( + "aten", "hypot_", [0, 1], [], ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT + ), + ElementwiseTypePromotionRule( + "aten", "i0", [0], [], ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT + ), + ElementwiseTypePromotionRule( + "aten", "i0_", [0], [], ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT + ), + ElementwiseTypePromotionRule( + "aten", "igamma", [0, 1], [], ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT + ), + ElementwiseTypePromotionRule( + "aten", "igamma_", [0, 1], [], ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT + ), + ElementwiseTypePromotionRule( + "aten", "igammac", [0, 1], [], ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT + ), + ElementwiseTypePromotionRule( + "aten", "igammac_", [0, 1], [], ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT + ), + ElementwiseTypePromotionRule( + "aten", "isfinite", [0], [], ELEMENTWISE_TYPE_PROMOTION_KIND.ALWAYS_BOOL + ), + ElementwiseTypePromotionRule( + "aten", "isinf", [0], [], ELEMENTWISE_TYPE_PROMOTION_KIND.ALWAYS_BOOL + ), + ElementwiseTypePromotionRule( + "aten", "isnan", [0], [], ELEMENTWISE_TYPE_PROMOTION_KIND.ALWAYS_BOOL + ), + ElementwiseTypePromotionRule( + "aten", "isneginf", [0], [], ELEMENTWISE_TYPE_PROMOTION_KIND.ALWAYS_BOOL + ), + ElementwiseTypePromotionRule( + "aten", "isposinf", [0], [], ELEMENTWISE_TYPE_PROMOTION_KIND.ALWAYS_BOOL + ), + ElementwiseTypePromotionRule( + "aten", "isreal", [0], [], ELEMENTWISE_TYPE_PROMOTION_KIND.ALWAYS_BOOL + ), + ElementwiseTypePromotionRule( + "aten", "l1_loss", [0, 1], [], ELEMENTWISE_TYPE_PROMOTION_KIND.COMPLEX_TO_FLOAT + ), + ElementwiseTypePromotionRule( + "aten", "lcm", [0, 1], [], ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT + ), + ElementwiseTypePromotionRule( + "aten", "lcm_", [0, 1], [], ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT + ), + ElementwiseTypePromotionRule( + "aten", "le", [0, 1], [], ELEMENTWISE_TYPE_PROMOTION_KIND.ALWAYS_BOOL + ), + ElementwiseTypePromotionRule( + "aten", "le_", [0, 1], [], ELEMENTWISE_TYPE_PROMOTION_KIND.ALWAYS_BOOL + ), + ElementwiseTypePromotionRule( + "aten", "leaky_relu", [0], [], ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT + ), + ElementwiseTypePromotionRule( + "aten", "lerp", [0, 1, 2], [], ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT + ), + ElementwiseTypePromotionRule( + "aten", "lerp_", [0, 1, 2], [], ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT + ), + ElementwiseTypePromotionRule( + "aten", "lgamma", [0], [], ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT + ), + ElementwiseTypePromotionRule( + "aten", "lgamma_", [0], [], ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT + ), + ElementwiseTypePromotionRule( + "aten", "log", [0], [], ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT + ), + ElementwiseTypePromotionRule( + "aten", "log10", [0], [], ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT + ), + ElementwiseTypePromotionRule( + "aten", "log10_", [0], [], ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT + ), + ElementwiseTypePromotionRule( + "aten", "log1p", [0], [], ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT + ), + ElementwiseTypePromotionRule( + "aten", "log1p_", [0], [], ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT + ), + ElementwiseTypePromotionRule( + "aten", "log2", [0], [], ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT + ), + ElementwiseTypePromotionRule( + "aten", "log2_", [0], [], ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT + ), + ElementwiseTypePromotionRule( + "aten", "log_", [0], [], ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT + ), + ElementwiseTypePromotionRule( + "aten", "log_normal", [0], [], ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT + ), + ElementwiseTypePromotionRule( + "aten", "log_normal_", [0], [], ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT + ), + ElementwiseTypePromotionRule( + "aten", "logaddexp", [0, 1], [], ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT + ), + ElementwiseTypePromotionRule( + "aten", "logaddexp2", [0, 1], [], ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT + ), + ElementwiseTypePromotionRule( + "aten", "logical_and", [0, 1], [], ELEMENTWISE_TYPE_PROMOTION_KIND.ALWAYS_BOOL + ), + ElementwiseTypePromotionRule( + "aten", "logical_and_", [0, 1], [], ELEMENTWISE_TYPE_PROMOTION_KIND.ALWAYS_BOOL + ), + ElementwiseTypePromotionRule( + "aten", "logical_not", [0], [], ELEMENTWISE_TYPE_PROMOTION_KIND.ALWAYS_BOOL + ), + ElementwiseTypePromotionRule( + "aten", "logical_not_", [0], [], ELEMENTWISE_TYPE_PROMOTION_KIND.ALWAYS_BOOL + ), + ElementwiseTypePromotionRule( + "aten", "logical_or", [0, 1], [], ELEMENTWISE_TYPE_PROMOTION_KIND.ALWAYS_BOOL + ), + ElementwiseTypePromotionRule( + "aten", "logical_or_", [0, 1], [], ELEMENTWISE_TYPE_PROMOTION_KIND.ALWAYS_BOOL + ), + ElementwiseTypePromotionRule( + "aten", "logical_xor", [0, 1], [], ELEMENTWISE_TYPE_PROMOTION_KIND.ALWAYS_BOOL + ), + ElementwiseTypePromotionRule( + "aten", "logical_xor_", [0, 1], [], ELEMENTWISE_TYPE_PROMOTION_KIND.ALWAYS_BOOL + ), + ElementwiseTypePromotionRule( + "aten", "logit", [0], [], ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT + ), + ElementwiseTypePromotionRule( + "aten", "logsumexp", [0], [], ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT + ), + ElementwiseTypePromotionRule( + "aten", "lt", [0, 1], [], ELEMENTWISE_TYPE_PROMOTION_KIND.ALWAYS_BOOL + ), + ElementwiseTypePromotionRule( + "aten", "lt_", [0, 1], [], ELEMENTWISE_TYPE_PROMOTION_KIND.ALWAYS_BOOL + ), + ElementwiseTypePromotionRule( + "aten", "maximum", [0, 1], [], ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT + ), + ElementwiseTypePromotionRule( + "aten", "minimum", [0, 1], [], ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT + ), + ElementwiseTypePromotionRule( + "aten", "mish", [0], [], ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT + ), + ElementwiseTypePromotionRule( + "aten", "mish_", [0], [], ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT + ), + ElementwiseTypePromotionRule( + "aten", "mse_loss", [0, 1], [], ELEMENTWISE_TYPE_PROMOTION_KIND.COMPLEX_TO_FLOAT + ), + ElementwiseTypePromotionRule( + "aten", "mul", [0, 1], [], ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT + ), + ElementwiseTypePromotionRule( + "aten", "mul_", [0, 1], [], ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT + ), + ElementwiseTypePromotionRule( + "aten", "ne", [0, 1], [], ELEMENTWISE_TYPE_PROMOTION_KIND.ALWAYS_BOOL + ), + ElementwiseTypePromotionRule( + "aten", "ne_", [0, 1], [], ELEMENTWISE_TYPE_PROMOTION_KIND.ALWAYS_BOOL + ), + ElementwiseTypePromotionRule( + "aten", "neg", [0], [], ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT + ), + ElementwiseTypePromotionRule( + "aten", "neg_", [0], [], ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT + ), + ElementwiseTypePromotionRule( + "aten", "nextafter", [0, 1], [], ELEMENTWISE_TYPE_PROMOTION_KIND.NO_OPMATH + ), + ElementwiseTypePromotionRule( + "aten", "nextafter_", [0, 1], [], ELEMENTWISE_TYPE_PROMOTION_KIND.NO_OPMATH + ), + ElementwiseTypePromotionRule( + "aten", "nll_loss", [0], [], ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT + ), + ElementwiseTypePromotionRule( + "aten", "normal", [0], [], ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT + ), + ElementwiseTypePromotionRule( + "aten", "normal_", [0], [], ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT + ), + ElementwiseTypePromotionRule( + "aten", "pdist", [0], [], ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT + ), + ElementwiseTypePromotionRule( + "aten", + "poisson_nll_loss", + [0, 1], + [], + ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT, + ), + ElementwiseTypePromotionRule( + "aten", "pow", [0, 1], [], ELEMENTWISE_TYPE_PROMOTION_KIND.BOOL_TO_LONG + ), + ElementwiseTypePromotionRule( + "aten", "pow_", [0, 1], [], ELEMENTWISE_TYPE_PROMOTION_KIND.BOOL_TO_LONG + ), + ElementwiseTypePromotionRule( + "aten", "prelu", [0, 1], [], ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT + ), + ElementwiseTypePromotionRule( + "aten", "rad2deg", [0], [], ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT + ), + ElementwiseTypePromotionRule( + "aten", "rad2deg_", [0], [], ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT + ), + ElementwiseTypePromotionRule( + "aten", "reciprocal", [0], [], ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT + ), + ElementwiseTypePromotionRule( + "aten", "reciprocal_", [0], [], ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT + ), + ElementwiseTypePromotionRule( + "aten", "relu", [0], [], ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT + ), + ElementwiseTypePromotionRule( + "aten", "remainder", [0, 1], [], ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT + ), + ElementwiseTypePromotionRule( + "aten", "remainder_", [0, 1], [], ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT + ), + ElementwiseTypePromotionRule( + "aten", "round", [0], [], ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT + ), + ElementwiseTypePromotionRule( + "aten", "rsqrt", [0], [], ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT + ), + ElementwiseTypePromotionRule( + "aten", "rsqrt_", [0], [], ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT + ), + ElementwiseTypePromotionRule( + "aten", "rsub", [0, 1], [], ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT + ), + ElementwiseTypePromotionRule( + "aten", "selu", [0], [], ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT + ), + ElementwiseTypePromotionRule( + "aten", "selu_", [0], [], ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT + ), + ElementwiseTypePromotionRule( + "aten", "sgn", [0], [], ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT + ), + ElementwiseTypePromotionRule( + "aten", "sgn_", [0], [], ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT + ), + ElementwiseTypePromotionRule( + "aten", "sigmoid", [0], [], ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT + ), + ElementwiseTypePromotionRule( + "aten", "sigmoid_", [0], [], ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT + ), + ElementwiseTypePromotionRule( + "aten", "sign", [0], [], ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT + ), + ElementwiseTypePromotionRule( + "aten", "sign_", [0], [], ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT + ), + ElementwiseTypePromotionRule( + "aten", "signbit", [0], [], ELEMENTWISE_TYPE_PROMOTION_KIND.ALWAYS_BOOL + ), + ElementwiseTypePromotionRule( + "aten", "sin", [0], [], ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT + ), + ElementwiseTypePromotionRule( + "aten", "sin_", [0], [], ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT + ), + ElementwiseTypePromotionRule( + "aten", "sinc", [0], [], ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT + ), + ElementwiseTypePromotionRule( + "aten", "sinc_", [0], [], ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT + ), + ElementwiseTypePromotionRule( + "aten", "sinh", [0], [], ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT + ), + ElementwiseTypePromotionRule( + "aten", "sinh_", [0], [], ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT + ), + ElementwiseTypePromotionRule( + "aten", + "smooth_l1_loss", + [0, 1], + [], + ELEMENTWISE_TYPE_PROMOTION_KIND.COMPLEX_TO_FLOAT, + ), + ElementwiseTypePromotionRule( + "aten", "softplus", [0], [], ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT + ), + ElementwiseTypePromotionRule( + "aten", "sqrt", [0], [], ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT + ), + ElementwiseTypePromotionRule( + "aten", "sqrt_", [0], [], ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT + ), + ElementwiseTypePromotionRule( + "aten", "square", [0], [], ELEMENTWISE_TYPE_PROMOTION_KIND.BOOL_TO_LONG + ), + ElementwiseTypePromotionRule( + "aten", "square_", [0], [], ELEMENTWISE_TYPE_PROMOTION_KIND.BOOL_TO_LONG + ), + ElementwiseTypePromotionRule( + "aten", "sub", [0, 1], [], ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT + ), + ElementwiseTypePromotionRule( + "aten", "sub_", [0, 1], [], ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT + ), + ElementwiseTypePromotionRule( + "aten", "tan", [0], [], ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT + ), + ElementwiseTypePromotionRule( + "aten", "tan_", [0], [], ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT + ), + ElementwiseTypePromotionRule( + "aten", "tanh", [0], [], ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT + ), + ElementwiseTypePromotionRule( + "aten", "tanh_", [0], [], ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT + ), + ElementwiseTypePromotionRule( + "aten", "threshold", [0], [], ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT + ), + ElementwiseTypePromotionRule( + "aten", "threshold_", [0], [], ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT + ), + ElementwiseTypePromotionRule( + "aten", "true_divide", [0, 1], [], ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT + ), + ElementwiseTypePromotionRule( + "aten", "true_divide_", [0, 1], [], ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT + ), + ElementwiseTypePromotionRule( + "aten", "trunc", [0], [], ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT + ), + ElementwiseTypePromotionRule( + "aten", "trunc_", [0], [], ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT + ), + ElementwiseTypePromotionRule( + "aten", "where", [1, 2], [], ELEMENTWISE_TYPE_PROMOTION_KIND.NO_OPMATH + ), + ElementwiseTypePromotionRule( + "aten", "xlogy", [0, 1], [], ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT + ), + ElementwiseTypePromotionRule( + "aten", "xlogy_", [0, 1], [], ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT + ), +} + +# Manually curated extra type promotion rules. Please see NOTE [Update type promotion rule] +# before adding new rules. +_EXTRA_TYPE_PROMOTION_RULE_SET = { + # torch._refs skips type promotion decoration for `clamp_min` and `clamp_max` since + # the call is routed to the decorated `aten.clamp` op. + ElementwiseTypePromotionRule( + "aten", + "clamp_max", + promote_args_positions=(0, 1), + promote_kwargs_names=(), + promotion_kind=_prims_common.ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, + ), + ElementwiseTypePromotionRule( + "aten", + "clamp_min", + promote_args_positions=(0, 1), + promote_kwargs_names=(), + promotion_kind=_prims_common.ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, + ), + # torch.ops.aten.div.Tensor_mode applies different type promotion rules + # depending on the value of the `mode` argument. + DivElementwiseTypePromotionRule(), + # Manually curating reduction ops since the logic is written inside the op reference + # implementation. + AllOrAnyReductionTypePromotionRule("all"), + AllOrAnyReductionTypePromotionRule("any"), + ReductionTypePromotionRule( + "aten", + "amax", + promotion_kind=_prims_common.REDUCTION_OUTPUT_TYPE_KIND.SAME, + ), + ReductionTypePromotionRule( + "aten", + "amin", + promotion_kind=_prims_common.REDUCTION_OUTPUT_TYPE_KIND.SAME, + ), + # torch.ops.aten.mean is a special case that does not need type promotion. + ReductionTypePromotionRule( + "aten", + "std", + promotion_kind=_prims_common.REDUCTION_OUTPUT_TYPE_KIND.COMPLEX_TO_FLOAT, + ), + ReductionTypePromotionRule( + "aten", + "std_mean", + promotion_kind=_prims_common.REDUCTION_OUTPUT_TYPE_KIND.COMPLEX_TO_FLOAT, + ), + ReductionTypePromotionRule( + "aten", + "var", + promotion_kind=_prims_common.REDUCTION_OUTPUT_TYPE_KIND.COMPLEX_TO_FLOAT, + ), + SumLikeReductionTypePromotionRule( + "aten", + "cumprod", + promotion_kind=_prims_common.REDUCTION_OUTPUT_TYPE_KIND.SAME, + ), + SumLikeReductionTypePromotionRule( + "aten", + "cumsum", + promotion_kind=_prims_common.REDUCTION_OUTPUT_TYPE_KIND.SAME, + ), + SumLikeReductionTypePromotionRule( + "aten", + "prod", + promotion_kind=_prims_common.REDUCTION_OUTPUT_TYPE_KIND.SAME, + ), + SumLikeReductionTypePromotionRule( + "aten", + "sum", + promotion_kind=_prims_common.REDUCTION_OUTPUT_TYPE_KIND.SAME, + ), +} + + +class ElementwiseTypePromotionRuleSetGenerator: + """Hackly distilling info from reference ops decorated with elementwise type promotion rule. + + The goal is to retrieve the decorator + + ```python + @elementwise_type_promotion_wrapper( + type_promoting_args=("a", "b"), + type_promotion_kind=type_promotion_kind, + ) + ``` + + from the reference ops. It provides info as for which arguments are promoted + and what kind of promotion is applied. + """ + + @classmethod + def generate_from_torch_refs(cls) -> set[ElementwiseTypePromotionRule]: + """Parse type promotion rules from reference ops under torch._C._refs.""" + rule_set = set() + rule_set.update(cls._parse_torch_refs(_refs)) + rule_set.update(cls._parse_torch_refs(_nn_refs)) + rule_set.update(cls._parse_torch_refs(_linalg_refs)) + rule_set.update(cls._parse_torch_refs(_special_refs)) + rule_set.update(cls._parse_torch_refs(_functional_refs)) + return rule_set + + @classmethod + def _parse_torch_refs( + cls, ref_module: ModuleType + ) -> set[ElementwiseTypePromotionRule]: + logger.info("Processing module: %s", ref_module.__name__) + rule_set = set() + for name in ref_module.__all__: + decorated_op = getattr(ref_module, name) + rule = cls._parse_type_promotion_rule_from_refs_op(decorated_op) + if rule is not None and rule.is_valid(): + rule_set.add(rule) + + return rule_set + + @classmethod + def _parse_type_promotion_rule_from_refs_op( + cls, + decorated_op: Callable, + ) -> ElementwiseTypePromotionRule | None: + """Retrieve and parse type promotion decorator from op under torch._refs.""" + fn = decorated_op + type_promo_wrapper = None + while fn_closure_vars := _try_getclosurevars(fn): + if "fn" not in fn_closure_vars.nonlocals: + break + if "self" in fn_closure_vars.nonlocals and isinstance( + fn_closure_vars.nonlocals["self"], + _prims_common_wrappers.elementwise_type_promotion_wrapper, + ): + type_promo_wrapper = fn_closure_vars.nonlocals["self"] + break + fn = fn_closure_vars.nonlocals["fn"] + + if type_promo_wrapper is not None: + signature = inspect.signature(decorated_op) + + pos = 0 + promote_args_positions = [] + promote_kwargs_names = [] + + if type_promo_wrapper.type_promoting_arg_names is not None: + for name, param in signature.parameters.items(): + if name in type_promo_wrapper.type_promoting_arg_names: + if param.kind in ( + param.POSITIONAL_OR_KEYWORD, + param.POSITIONAL_ONLY, + ): + promote_args_positions.append(pos) + elif param.kind == param.KEYWORD_ONLY: + promote_kwargs_names.append(name) + pos += 1 + + return ElementwiseTypePromotionRule( + "aten", + decorated_op.__name__, + promote_args_positions=promote_args_positions, + promote_kwargs_names=promote_kwargs_names, + promotion_kind=type_promo_wrapper.type_promotion_kind, + ) + + logger.warning( + "Cannot find type promotion rule for: %s.%s", + decorated_op.__module__, + decorated_op.__name__, + ) + return None + + +class TypePromotionTable: + """Type promotion table for torch.ops.""" + + def __init__(self): + self._rule_table = {} + for rule in _GENERATED_ATEN_TYPE_PROMOTION_RULE_SET: + self.add_rule(rule) + for rule in _EXTRA_TYPE_PROMOTION_RULE_SET: + self.add_rule(rule) + + def add_rule(self, rule: TypePromotionRule) -> None: + """Add a type promotion rule for a python op in a torch.ops module. + + Args: + rule: Type promotion rule. + module: Module containing the op. E.g. torch.ops.aten. + + Raises: + ValueError: If the rule is invalid. + """ + if not rule.is_valid(): + raise ValueError(f"Invalid type promotion rule: {rule}") + self._rule_table[f"{rule.namespace}.{rule.op_name}"] = rule + + def get_rule(self, py_op: torch._ops.OpOverloadPacket) -> TypePromotionRule | None: + """Get type promotion rule for a python op under 'torch.ops.'.""" + return self._rule_table.get(str(py_op), None) + + +def get_type_promotion_rule( + diagnostic: diagnostics.Diagnostic, + node: torch.fx.Node, + type_promotion_table: TypePromotionTable, +) -> TypePromotionRule | None: + """Get type promotion rule for a node. + + Args: + diagnostic: Diagnostic object. + node: Node to get type promotion rule for. + type_promotion_table: Type promotion table. + + Returns: + Type promotion rule for the node. None if no rule is found or if the node is not + representing a torch operator. + """ + op = node.target + if not isinstance(op, torch._ops.OpOverload): + # TODO(bowbao): diagnostic.emit and diagnostic.set_message api. + diagnostic.message = ( + f"Skipped for {diagnostics.format_argument(node)}: " + f"node.target is not OpOverload. Got type: {type(op)}" + ) + return None + if (rule := type_promotion_table.get_rule(op.overloadpacket)) is None: + diagnostic.message = ( + f"Skipped for {diagnostics.format_argument(node)}: " + f"Cannot find type promotion rule for op: {op}" + ) + return None + + diagnostic.info("Found type promotion rule: %s", rule) + return rule + + +class _OpTraceDispatchMode(_python_dispatch.TorchDispatchMode): + """Trace ops that were dispatched. + + Utilize the dispatch mechanism in [`__torch_dispatch__`](https://dev-discuss.pytorch.org/t/what-and-why-is-torch-dispatch/557) + to trace op overloads that were dispatched to. This is used to find the compatible + op overload for a given op overload packet for different set of args and kwargs. + """ + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.traced_ops = [] + + def __torch_dispatch__(self, func, types, args=(), kwargs=None): + self.traced_ops.append(func) + return func(*args, **kwargs) + + +def find_compatible_op_overload( + op: torch._ops.OpOverloadPacket, args: tuple, kwargs: dict +) -> torch._ops.OpOverload: + """Find compatible OpOverload for an OpOverloadPacket using provided args and kwargs. + + Each "call_function" fx.Node in the fx.GraphModule has a target that represents a torch._ops.OpOverload. + The OpOverload contains an OpOverloadPacket that holds all the available overloads for the operation. + + During the type promotion pass, there are cases where the types of the args and kwargs may change, + such as promoting Python numbers to tensors. Consequently, the original OpOverload might not be + compatible with the updated args and kwargs. This function is used to identify the compatible + OpOverload for the given args and kwargs. + + Args: + op: OpOverloadPacket to find compatible OpOverload for. + args: The positional arguments to consider for compatibility. + kwargs: The keyword arguments to consider for compatibility. + + Returns: + torch._ops.OpOverload: The compatible OpOverload found for the given args and kwargs. + + Raises: + RuntimeError: If no compatible op overload is found. + + Examples: + >>> import torch + >>> packet = torch.ops.aten.pow + >>> args = (torch.tensor([1.0, 2.0]), 2) + >>> find_compatible_op_overload(packet, args, {})._overloadname + 'Tensor_Scalar' + >>> args = (torch.tensor([1.0, 2.0]), torch.tensor(2.0)) + >>> find_compatible_op_overload(packet, args, {})._overloadname + 'Tensor_Tensor' + """ + # Utilize the dispatch mechanism to find the compatible op overload. + op_trace_dispatch_mode = _OpTraceDispatchMode() + with op_trace_dispatch_mode: + op(*args, **kwargs) + assert ( + len(op_trace_dispatch_mode.traced_ops) >= 1 + ), "Expected at least 1 traced op, got 0" + + new_op_overload = op_trace_dispatch_mode.traced_ops[0] + assert isinstance( + new_op_overload, torch._ops.OpOverload + ), f"Expected OpOverload, got {type(new_op_overload)}" + assert ( + new_op_overload.overloadpacket == op + ), f"Expected same OpOverload packet, got {new_op_overload.overloadpacket} != {op}" + + return new_op_overload + + +class _TypePromotionInterpreter(torch.fx.Interpreter): + """Interpreter that inserts type promotion for each node.""" + + def __init__( + self, + diagnostic_context: diagnostics.DiagnosticContext, + module: torch.fx.GraphModule, + type_promotion_table: TypePromotionTable, + ): + super().__init__(module) + self.diagnostic_context = diagnostic_context + self.type_promotion_table = type_promotion_table + + def _run_node_and_set_meta(self, node) -> Any: + """Run node and set meta according to `fx_traceback.get_current_meta()`. + + This should be used on new nodes or nodes that have been modified. + By default `Interpreter.run_node` does not update `node.meta`. + Set `node.meta` to the current meta, except for `node.meta["val"]`, which is + recomputed. + """ + out = super().run_node(node) + # Update interpreter env state with new output value. + self.env[node] = out + node.meta.update( + (k, v) + for k, v in fx_traceback.get_current_meta().items() + if k not in node.meta + ) + node.meta["val"] = proxy_tensor.extract_val(out) + return out + + def _create_node( + self, + graph: torch.fx.Graph, + op_type: str, + target: torch.fx.node.Target, + args: tuple, + kwargs: dict, + ) -> torch.fx.Node: + """Create a node and set its metadata.""" + assert op_type in ( + "call_function", + "call_method", + "get_attr", + "call_module", + "placeholder", + "output", + ), f"Unexpected op_type: {op_type}" + node = getattr(graph, op_type)(target, args, kwargs) + self._run_node_and_set_meta(node) + return node + + def _rerun_node_after_type_promotion( + self, + diagnostic: diagnostics.Diagnostic, + node: torch.fx.Node, + expected_out_dtype: torch.dtype, + ) -> None: + """Rerun a node after type promotion and update node.meta["val"] with the output value.""" + node_val = node.meta.get("val", None) + assert node_val is not None, f"Node {node} node.meta['val'] is not set." + args, kwargs = self.fetch_args_kwargs_from_env(node) + target = node.target + assert isinstance( + target, torch._ops.OpOverload + ), f"Expected OpOverload, got {type(target)}" + node.target = find_compatible_op_overload(target.overloadpacket, args, kwargs) + + new_node_val = self._run_node_and_set_meta(node) + assert isinstance(new_node_val, type(node_val)), ( + f"run_node output type should not change between runs. " + f"Got {type(new_node_val)}, expect {type(node_val)}." + ) + + if isinstance(node_val, torch.Tensor): + prev_node_dtype = node_val.dtype + + assert prev_node_dtype == expected_out_dtype, ( + f"node.meta['val'].dtype({prev_node_dtype}) does not agree with " + f"type promotion rule({expected_out_dtype})." + ) + + if new_node_val.dtype != expected_out_dtype: + # With explicit type promotion, the expected result dtype may not be + # the same as the computation dtype. This is referred to as "op math". + # We need to explicitly cast the output back to the expected dtype. + # See more about "op math" topic at `_prims_common.elementwise_dtypes`. + graph = node.graph + with graph.inserting_after(node): + output_cast_node = self._create_node( + graph, + "call_function", + torch.ops.prims.convert_element_type.default, + (node,), + {"dtype": expected_out_dtype}, + ) + node.replace_all_uses_with(output_cast_node) + output_cast_node.args = (node,) + diagnostic.info( + "Node '%s' output dtype becomes %s due to op math. " + "Cast back to %s.", + node, + new_node_val.dtype, + expected_out_dtype, + ) + + elif fx_type_utils.is_torch_symbolic_type(node_val): + raise NotImplementedError( + "Type promotion does not support node output of sym types." + ) + elif isinstance(node_val, (list, tuple)): + raise NotImplementedError( + "Type promotion does not support node output of list or tuple." + ) + else: + raise RuntimeError(f"Unexpected node output type: {type(node_val)}.") + + def _maybe_promote_arg( + self, + diagnostic: diagnostics.Diagnostic, + node: torch.fx.Node, + fx_arg: torch.fx.node.Argument, + dtype: torch.dtype | None, + ) -> torch.fx.node.Argument: + """Promote fx_arg to dtype if necessary.""" + if dtype is None: + diagnostic.info( + "Argument %s is not promoted. Not mentioned by type promotion rule.", + fx_arg, + ) + return fx_arg + + if isinstance(fx_arg, torch.fx.Node): + arg_val = self.env[fx_arg] + if isinstance(arg_val, torch.Tensor): + if (old_dtype := arg_val.dtype) != dtype: + # Promote tensor to dtype. + graph = node.graph + with graph.inserting_before(node): + diagnostic.info( + "Argument %s(%s) is promoted to %s.", + fx_arg, + old_dtype, + dtype, + ) + return self._create_node( + graph, + "call_function", + torch.ops.prims.convert_element_type.default, + (fx_arg,), + {"dtype": dtype}, + ) + diagnostic.info( + "Argument %s is not promoted. Already %s.", fx_arg, dtype + ) + return fx_arg + elif fx_type_utils.is_torch_symbolic_type(arg_val): + arg_type = type(arg_val) + equivalent_dtype = fx_type_utils.from_scalar_type_to_torch_dtype( + arg_type + ) + assert equivalent_dtype is not None, f"Unexpected arg_type: {arg_type}" + if equivalent_dtype != dtype: + # Promote Sym number to tensor of dtype. + graph = node.graph + with graph.inserting_before(node): + diagnostic.info( + "Argument %s(Scalar of equivalent dtype: %s) " + "is promoted to %s.", + fx_arg, + equivalent_dtype, + dtype, + ) + return self._create_node( + graph, + "call_function", + torch.ops.aten.scalar_tensor.default, + (fx_arg,), + {"dtype": dtype}, + ) + diagnostic.info( + "Argument %s is not promoted. Already %s.", fx_arg, dtype + ) + return fx_arg + elif ( + equivalent_dtype := fx_type_utils.from_scalar_type_to_torch_dtype( + type(fx_arg) + ) + ) is not None: + if equivalent_dtype != dtype: + # Promote number to tensor of dtype. + # The op should have overload that supports tensor for this arg, otherwise + # the type promotion rule should not suggest promoting this arg. + graph = node.graph + with graph.inserting_before(node): + diagnostic.info( + "Argument %s(Scalar of equivalent dtype: %s) " + "is promoted to %s.", + fx_arg, + equivalent_dtype, + dtype, + ) + return self._create_node( + graph, + "call_function", + torch.ops.aten.scalar_tensor.default, + (fx_arg,), + {"dtype": dtype}, + ) + diagnostic.info("Argument %s is not promoted. Already %s.", fx_arg, dtype) + return fx_arg + elif isinstance(fx_arg, (tuple, list)): + diagnostic.info( + "Argument %s is a tuple/list. Promoting each element.", fx_arg + ) + return type(fx_arg)( + self._maybe_promote_arg(diagnostic, node, fx_arg_elem, dtype) + for fx_arg_elem in fx_arg + ) + + raise NotImplementedError(f"Unknown fx arg type: {type(fx_arg)}") + + def _maybe_promote_node( + self, + diagnostic: diagnostics.Diagnostic, + node: torch.fx.Node, + rule: TypePromotionRule, + ) -> torch.fx.Node: + """Promote node inputs and outputs according to type promotion rule.""" + args, kwargs = self.fetch_args_kwargs_from_env(node) + type_promotion_info = rule.preview_type_promotion(args, kwargs) + new_args = [] + new_kwargs = {} + for i, arg in enumerate(node.args): + new_args.append( + self._maybe_promote_arg( + diagnostic, node, arg, type_promotion_info.args_dtypes.get(i, None) + ) + ) + + for name, arg in node.kwargs.items(): + new_kwargs[name] = self._maybe_promote_arg( + diagnostic, node, arg, type_promotion_info.kwargs_dtypes.get(name, None) + ) + new_args = tuple(new_args) + + if node.args != new_args or node.kwargs != new_kwargs: + diagnostic.message = f"Applied type promotion for {node}. " + node.args = new_args + node.kwargs = new_kwargs + self._rerun_node_after_type_promotion( + diagnostic, node, type_promotion_info.out_dtype + ) + else: + diagnostic.message = f"Type promotion not needed for {node}. " + + return node + + @diagnostics.diagnose_call( + rule=diagnostics.rules.fx_node_insert_type_promotion, + level=diagnostics.levels.NONE, + ) + def run_node(self, node: torch.fx.Node) -> Any: + """This method is an override which inserts type promotion nodes as needed. + + For each `call_function` node, an initial check is conducted to determine if a type + promotion rule is applicable. If a relevant rule exists, type casting nodes are + introduced for the corresponding arguments. The OpOverload of the node is updated + to one that accommodates the promoted types. Should the output type be different, + type casting node is inserted for this output. + + The call `super().run_node(node)` is guaranteed to be invoked for each node. + In the case of new or modified nodes, the result of `super().run_node(node)` is + used to update its `node.meta["val"]` value. + """ + diagnostic = self.diagnostic_context.inflight_diagnostic() + with self._set_current_node(node): + if node.op != "call_function": + diagnostic.message = f"Skipped {node}: not a call_function." + elif rule := get_type_promotion_rule( + diagnostic, node, self.type_promotion_table + ): + self._maybe_promote_node(diagnostic, node, rule) + + return super().run_node(node) + + +class InsertTypePromotion(_pass.Transform): + """Explicitly insert type promotion ops to the graph. + + This class subclasses `_pass.Transform` to provide graph level diagnostic tracking. + Underneath, the main pass is driven by `_TypePromotionInterpreter`, which is a subclass + of `torch.fx.Interpreter` to interpret the fx.Graph and perform the insertion of type + promotion operations. + + The interpreter is extended with ability to track diagnostic information for each node. + + By re-running the new and modified nodes using the interpreter, we can update the + metadata, specifically the fake tensor stored under node.meta["val"], and ensure it + reflects the latest changes. + + See [FXE0015: fx_node_insert_type_promotion](https://pytorch.org/docs/main/generated/onnx_dynamo_diagnostics_rules/FXE0015%3Afx-node-insert-type-promotion.html) for more details. # noqa: B950 + """ + + def __init__( + self, + diagnostic_context: diagnostics.DiagnosticContext, + module: torch.fx.GraphModule, + type_promotion_table: TypePromotionTable | None = None, + ): + super().__init__(diagnostic_context, module) + self.interpreter = _TypePromotionInterpreter( + diagnostic_context, module, type_promotion_table or TypePromotionTable() + ) + + def _fetch_fake_args( + self, + ) -> Sequence[ + fake_tensor.FakeTensor + | float + | int + | bool + | torch.SymInt + | torch.SymFloat + | torch.SymBool + | None + ]: + """Fetch fake args from fx graph. + + For each argument, try to fetch fake tensor from the matching placeholder node. + """ + fake_args = [] + for node in self.module.graph.nodes: + if node.op == "placeholder": + try: + # Meta value can be torch.Tensor, int, float, bool, + # torch.SymInt, torch.SymFloat, torch.SymBool. + meta_value = _val = node.meta.get("val", None) + except RuntimeError as e: + if not node.users: + # If the placeholder is not used, we can safely ignore it and put + # None as placeholder. + meta_value = None + else: + raise RuntimeError( + "Cannot fetch symbolic fake args from fx graph. " + "InsertTypePromotion pass needs to run with pre-existing fake args, " + "Otherwise the pass will produce inaccurate dynamic shape. " + ) from e + + fake_args.append(meta_value) + return fake_args + + def _run(self, *args, **kwargs) -> torch.fx.GraphModule: + assert not args, ( + "`InsertTypePromotion` deduces symbolic fake arguments from the graph. " + "It does not accept concrete arguments as input because this pass requires " + "re-running the graph. When executed with newly faked concrete arguments, " + "the pass loses the symbolic dynamic shape information." + ) + assert not kwargs, "`kwargs` is not supported" + + fake_args = self._fetch_fake_args() + fake_mode = self.fake_mode + assert fake_mode is not None, "Cannot detect fake_mode." + + with fake_tensor.unset_fake_temporarily(), ( + fake_mode + ), fx_traceback.preserve_node_meta(): + self.interpreter.run(*fake_args) + + return self.module diff --git a/janus/lib/python3.10/site-packages/torch/onnx/_internal/fx/passes/virtualization.py b/janus/lib/python3.10/site-packages/torch/onnx/_internal/fx/passes/virtualization.py new file mode 100644 index 0000000000000000000000000000000000000000..456c25fee777c81efa3580facca3b093c9471dab --- /dev/null +++ b/janus/lib/python3.10/site-packages/torch/onnx/_internal/fx/passes/virtualization.py @@ -0,0 +1,96 @@ +# mypy: allow-untyped-defs +from __future__ import annotations + +from typing import TYPE_CHECKING + +import torch +from torch.onnx._internal.fx import _pass + + +if TYPE_CHECKING: + import torch.fx + + +class MovePlaceholderToFront(_pass.Transform): + """This pass move all placeholder nodes to the front of the graph node list. + + In torch.fx.Graph, placeholder is a special assignment node. If it's not + executed in the beginning, it could overwrite values computed by upstream + nodes. + """ + + def _run(self, *args, **kwargs) -> torch.fx.GraphModule: + graph_module = self.module + graph = graph_module.graph + placeholders = [] + first_not_placeholder = None + for node in graph.nodes: + if node.op == "placeholder": + placeholders.append(node) + if first_not_placeholder is None and node.op != "placeholder": + first_not_placeholder = node + if first_not_placeholder is None: + return graph_module + for placeholder in placeholders: + first_not_placeholder.prepend(placeholder) + return graph_module + + +class ReplaceGetAttrWithPlaceholder(_pass.Transform): + """Replace get_attr with placeholder. + + The parameters and buffers accessed by the original get_attr are returned; + they are useful when creating random inputs for the modified graph_module. + """ + + _replaced_attrs: tuple[torch.Tensor, ...] | None + + @property + def replaced_attrs(self) -> tuple[torch.Tensor, ...]: + """The list of replaced weight tensors.""" + assert ( + self._replaced_attrs is not None + ), "Must run ReplaceGetAttrWithPlaceholder first" + return self._replaced_attrs + + def _run(self, *args, **kwargs) -> torch.fx.GraphModule: + graph_module = self.module + graph = graph_module.graph + replaced_attrs: list[torch.Tensor] = [] + for node in graph.nodes: + if node.op == "get_attr": + replaced_attr: torch.Tensor | None = None + # get_attr could retrieve either parameter or buffer, so + # we need to try both. + try: + replaced_attr = graph_module.get_parameter(node.target) + except AttributeError: + # It's possible that model author use buffer instead of + # parameter to store trainable weights. In this case, + # 1. get_parameter will throw something like + # AttributeError: `bias` is not an nn.Parameter. + # 2. get_buffer should work. + replaced_attr = graph_module.get_buffer(node.target) + + # Reassign op type so that get_attr node becomes placeholder node. + node.op = "placeholder" + # The target name in placeholder must be a valid Python identifier. + # Thus, we replace, e.g., "module.submodule.weight" with + # "module_submodule_weight". + node.target = node.target.replace(".", "_") + # Default value is None. This is needed as long as the "graph_module" + # has optional inputs. Assume the original forward signature is + # def forward(self, x, y=None) + # and the replaced get_attr node has target "z". Then, the modified + # signature should be + # def forward(self, x, y=None, z=None) + # Without the following line, the signature will be + # def forward(self, x, y=None, z) + # , which is not valid Python code. + node.args = (None,) + + replaced_attrs.append(replaced_attr) + + self._replaced_attrs = tuple(replaced_attrs) + + return graph_module diff --git a/janus/lib/python3.10/site-packages/torch/onnx/_internal/fx/patcher.py b/janus/lib/python3.10/site-packages/torch/onnx/_internal/fx/patcher.py new file mode 100644 index 0000000000000000000000000000000000000000..04298159deb0622d08065f538d1d76af1496d72e --- /dev/null +++ b/janus/lib/python3.10/site-packages/torch/onnx/_internal/fx/patcher.py @@ -0,0 +1,143 @@ +# mypy: allow-untyped-defs +import copy +import functools +from typing import List, TYPE_CHECKING, Union + +import torch + + +if TYPE_CHECKING: + import io + + +# TODO: Remove after https://github.com/huggingface/safetensors/pull/318 +@functools.lru_cache(None) +def has_safetensors_and_transformers(): + try: + # safetensors is not an exporter requirement, but needed for some huggingface models + import safetensors # type: ignore[import] # noqa: F401 + import transformers # type: ignore[import] # noqa: F401 + from safetensors import torch as safetensors_torch # noqa: F401 + + return True + except ImportError: + return False + + +class ONNXTorchPatcher: + """Context manager to temporarily patch PyTorch during FX-to-ONNX export. + + This class is a collection of "patches" required by FX-to-ONNX exporter. + + This context overrides several torch functions to support symbolic + export of large scale models. + + torch.load: + This function is patched to record the files PyTorch stores model + parameters and buffers. Downstream FX-to-ONNX exporter can create + initializers from these files. + torch.fx._symbolic_trace._wrapped_methods_to_patch: + This list is extended with (torch.Tensor, "__getitem__") so that + weight[x, :, y] becomes exportable with torch.fx.symbolic_trace. + safetensors.torch.load_file: + This function is patched to allow safetensors to be loaded within + FakeTensorMode. Remove after https://github.com/huggingface/safetensors/pull/318 + + Search for ONNXTorchPatcher in test_fx_to_onnx_with_onnxruntime.py for + example usage. + + TODO: Should this really be a global patcher? Can we make it a local patcher? + A reason for splitting this into several patchers is to patch one part of the code + as a collateral damage of patching another part of the code. For example, we + for tracing model with torch._dynamo.export, we don't need to patch + `torch.fx._symbolic_trace._wrapped_methods_to_patch` + """ + + def __init__(self) -> None: + # List of file paths processed by torch.load. + self.paths: List[Union[str, io.BufferedIOBase]] = [] + + def torch_load_wrapper(f, *args, **kwargs): + # Record path for later serialization into ONNX proto + self.paths.append(f) + # Then, call the original torch.load. + return self.torch_load(f, *args, **kwargs) + + # Original version of torch.load. + self.torch_load = torch.load + + # Wrapper or modified version of torch functions. + self.torch_load_wrapper = torch_load_wrapper + + if has_safetensors_and_transformers(): + import safetensors + import transformers + + def safetensors_load_file_wrapper(filename, device="cpu"): + # Record path for later serialization into ONNX proto + self.paths.append(filename) + result = {} + with safetensors.torch.safe_open( # type: ignore[attr-defined] + filename, framework="pt", device=device + ) as f: + for k in f.keys(): + fake_mode = torch._guards.detect_fake_mode() + if not fake_mode: + result[k] = f.get_tensor(k) + else: + empty_tensor = f.get_slice(k) + result[k] = torch.empty( + tuple(empty_tensor.get_shape()), + dtype=safetensors.torch._getdtype( + empty_tensor.get_dtype() + ), + ) + return result + + self.safetensors_torch_load_file = safetensors.torch.load_file + self.safetensors_torch_load_file_wrapper = safetensors_load_file_wrapper + self.transformers_modeling_utils_safe_load_file = ( + transformers.modeling_utils.safe_load_file + ) + + def __enter__(self): + torch.load = self.torch_load_wrapper + + self.torch_fx__symbolic_trace__wrapped_methods_to_patch = ( + torch.fx._symbolic_trace._wrapped_methods_to_patch + ) + desired_wrapped_methods = copy.deepcopy( + torch.fx._symbolic_trace._wrapped_methods_to_patch + ) + if (torch.Tensor, "__getitem__") not in desired_wrapped_methods: + # Adding `__getitem__` to the patching list will make tensor indexing traceable via + # torch.fx.symbolic_trace. Otherwise, `tensor[x, :, y]` cannot be traced. + # This happens because `__getitem__` is neither under torch domain nor an aten operator, + # so the patching (or similar Proxy-generating mechanism) doesn't happen automatically. + # Note that torch.fx.symbolic_trace defines FX_PATCH_GETITEM environment variable for + # enabling the line below for patching. + desired_wrapped_methods.append((torch.Tensor, "__getitem__")) + torch.fx._symbolic_trace._wrapped_methods_to_patch = desired_wrapped_methods + + if has_safetensors_and_transformers(): + import safetensors + import transformers + + safetensors.torch.load_file = self.safetensors_torch_load_file_wrapper + transformers.modeling_utils.safe_load_file = ( + self.safetensors_torch_load_file_wrapper + ) + + def __exit__(self, exc_type, exc_value, traceback): + torch.load = self.torch_load + torch.fx._symbolic_trace._wrapped_methods_to_patch = ( + self.torch_fx__symbolic_trace__wrapped_methods_to_patch + ) + if has_safetensors_and_transformers(): + import safetensors + import transformers + + safetensors.torch.load_file = self.safetensors_torch_load_file + transformers.modeling_utils.safe_load_file = ( + self.transformers_modeling_utils_safe_load_file + ) diff --git a/janus/lib/python3.10/site-packages/torch/onnx/_internal/fx/registration.py b/janus/lib/python3.10/site-packages/torch/onnx/_internal/fx/registration.py new file mode 100644 index 0000000000000000000000000000000000000000..e855f98f044f6a3cc65d114c8a302bbdea993948 --- /dev/null +++ b/janus/lib/python3.10/site-packages/torch/onnx/_internal/fx/registration.py @@ -0,0 +1,87 @@ +"""Module for handling ATen to ONNX functions registration.""" + +from __future__ import annotations + +import dataclasses +from typing import TYPE_CHECKING + + +# We can only import onnx from this module in a type-checking context to ensure that +# 'import torch.onnx' continues to work without having 'onnx' installed. We fully +# 'import onnx' inside of dynamo_export (by way of _assert_dependencies). +if TYPE_CHECKING: + import types + + import onnxscript # type: ignore[import] + + import torch._ops + + +@dataclasses.dataclass(frozen=True, eq=True) +class ONNXFunction: + """A wrapper of onnx-script function. + + op_full_name: The qualified name of the function. In the form of '::.'. + onnx_function: The onnx-script function from torchlib. + is_custom: Whether the function is a custom function. + is_complex: Whether the function is a function that handles complex valued inputs. + + """ + + onnx_function: onnxscript.OnnxFunction | onnxscript.TracedOnnxFunction + op_full_name: str + is_custom: bool = False + is_complex: bool = False + + +@dataclasses.dataclass(frozen=True, eq=True) +class OpName: + """A class representing an operator name in internal ONNX converter.""" + + namespace: str + op_name: str + overload: str + + @classmethod + def from_name_parts( + cls, namespace: str, op_name: str, overload: str | None = None + ) -> OpName: + # NOTE: in PyTorch, the overload could be unprovided to indicate the + # default overload + if overload is None or overload == "": + overload = "default" + return cls(namespace, op_name, overload) + + @classmethod + def from_qualified_name(cls, qualified_name: str) -> OpName: + """When the name is ::[.]""" + namespace, opname_overload = qualified_name.split("::") + op_name, *overload = opname_overload.split(".", 1) + overload = overload[0] if overload else "default" + return cls(namespace, op_name, overload) + + @classmethod + def from_op_overload(cls, op_overload: torch._ops.OpOverload) -> OpName: + return cls.from_qualified_name(op_overload.name()) + + @classmethod + def from_builtin_function( + cls, builtin_function: types.BuiltinFunctionType + ) -> OpName: + """From a builtin function, e.g. operator.add, math.ceil, etc, get the OpName. + + FX graph uses built-in functions to caculate sympy expression. This function + is used to get the OpName from a builtin function. + + Args: + builtin_function (types.BuiltinFunctionType): operator.add, math.ceil, etc. + + Returns: + OpName: _description_ + """ + op = builtin_function.__name__ # add, sub, etc. + module = builtin_function.__module__ # _operators or math + return cls.from_qualified_name(module + "::" + op) + + def qualified_name(self) -> str: + return f"{self.namespace}::{self.op_name}.{self.overload}" diff --git a/janus/lib/python3.10/site-packages/torch/onnx/_internal/fx/serialization.py b/janus/lib/python3.10/site-packages/torch/onnx/_internal/fx/serialization.py new file mode 100644 index 0000000000000000000000000000000000000000..8d01cf01c4ef18b474a7cfe880eecd18df9b92c4 --- /dev/null +++ b/janus/lib/python3.10/site-packages/torch/onnx/_internal/fx/serialization.py @@ -0,0 +1,244 @@ +# mypy: allow-untyped-defs +from __future__ import annotations + +import io +import logging +import os +from typing import TYPE_CHECKING + +import torch +from torch.onnx import _type_utils as jit_type_utils + + +if TYPE_CHECKING: + import onnx + +log = logging.getLogger(__name__) + + +def _create_tensor_proto_with_external_data( + tensor: torch.Tensor, + name: str, + location: str, + basepath: str, + dtype_override: onnx.TypeProto | None = None, # type: ignore[name-defined] +) -> onnx.TensorProto: # type: ignore[name-defined] + """Create a TensorProto with external data from a PyTorch tensor. + The external data is saved to os.path.join(basepath, location). + + Args: + tensor: Tensor to be saved. + name: Name of the tensor (i.e., initializer name in ONNX graph). + location: Relative location of the external data file + (e.g., "/tmp/initializers/weight_0" when model is "/tmp/model_name.onnx"). + basepath: Base path of the external data file (e.g., "/tmp/external_data" while model must be in "/tmp"). + + + Reference for ONNX's external data format: + How to load? + https://github.com/onnx/onnx/blob/5dac81ac0707bdf88f56c35c0a5e8855d3534673/onnx/external_data_helper.py#L187 + How to save? + https://github.com/onnx/onnx/blob/5dac81ac0707bdf88f56c35c0a5e8855d3534673/onnx/external_data_helper.py#L43 + How to set ONNX fields? + https://github.com/onnx/onnx/blob/5dac81ac0707bdf88f56c35c0a5e8855d3534673/onnx/external_data_helper.py#L88 + """ + # FIXME: Avoid importing onnx into torch.onnx. + import onnx + + scalar_type = ( + jit_type_utils.JitScalarType.from_onnx_type( + dtype_override.tensor_type.elem_type + ) + if dtype_override is not None + else jit_type_utils.JitScalarType.from_dtype(tensor.dtype) + ) + + # Checkpoints can be stored with a different dtype as the model expects because + # the user script can explicitly cast the original type to something or maybe + # PyTorch's type promotion might do it + if dtype_override is not None and scalar_type.dtype() != tensor.dtype: + tensor = tensor.to(scalar_type.dtype()) + + tensor_proto = onnx.TensorProto() # type: ignore[attr-defined] + tensor_proto.name = name + tensor_proto.data_type = scalar_type.onnx_type() # type: ignore[assignment] + + tensor_proto.dims.extend(tensor.shape) + tensor_proto.data_location = onnx.TensorProto.EXTERNAL # type: ignore[attr-defined] + + # Settings for saving one tensor per file. + # Offset is zero because there is no other tensor in the same file. + key_value_pairs = { + "location": location, + "offset": 0, + "length": tensor.untyped_storage().nbytes(), + } + for k, v in key_value_pairs.items(): + entry = tensor_proto.external_data.add() + entry.key = k + entry.value = str(v) + + # Actual path to write content of tensor. + external_data_file_path = os.path.join(basepath, location) + if os.path.exists(external_data_file_path): + os.remove(external_data_file_path) + + # Create external data's folder if not exists. + external_data_dir_path = os.path.dirname(external_data_file_path) + if not os.path.exists(external_data_dir_path): + # if the demo_folder directory is not present + # then create it. + os.makedirs(external_data_dir_path) + + # Create a fresh file. + with open(external_data_file_path, "xb") as data_file: + # No need to call "seek" because offset is 0. + # data_file.seek(0) + # Write tensor content to the file. + data_file.write(tensor.numpy(force=True).tobytes()) + + return tensor_proto + + +def _convert_safetensors_to_torch_format(safetensors_file): + # It this function is called, safetensors is guaranteed to exist + # because the HF model with safetensors was already loaded and exported to ONNX + from safetensors import safe_open # type: ignore[import-not-found] + + tensors = {} + with safe_open(safetensors_file, framework="pt", device="cpu") as f: # type: ignore[attr-defined] + for k in f.keys(): + tensors[k] = f.get_tensor(k).cpu() + return tensors + + +# TODO: generalize to allow more checkpoints formats (torch or gguf) +def save_model_with_external_data( + basepath: str, + model_location: str, + initializer_location: str, + torch_state_dicts: tuple[dict | str | io.BytesIO, ...], + onnx_model: onnx.ModelProto, # type: ignore[name-defined] + rename_initializer: bool = False, +) -> None: + """Load PyTorch tensors from files and add to "onnx_model" as external initializers. + + Output files: + ONNX model file path: + ONNX initializer folder: os.path.join(basepath, initializer_location) + + After running this function, you can do + ort_sess = onnxruntime.InferenceSession(os.path.join(basepath, model_location)) + to execute the model. + + Arguments: + basepath: Base path of the ONNX external data file (e.g., "/path/to/large_model/"). + model_location: Relative location of the ONNX model file. + E.g., "model.onnx" so that the model file is saved to + "/model.onnx". + initializer_location: Relative location of the ONNX initializer folder. + E.g., "initializers" so that the initializers are saved to + "/initializers/". + Note: When initializers are >2GB, must be the same as `model_location`. + torch_state_dicts: Dictionaries or files which contain PyTorch tensors to be saved + as ONNX initializers. For non-dict arguments, `torch.load` will be used to load them from file-like objects. + onnx_model: ONNX model to be saved with external initializers. + If an input name matches a tensor loaded from "torch_state_dicts", + the tensor will be saved as that input's external initializer. + rename_initializer: Replaces "." by "_" for all ONNX initializer names. + Not needed by the official torch.onnx.dynamo_export. This is a hack + for supporting `FXSymbolicTracer` tracer with fake tensor mode. + In short, `FXSymbolicTracer` lifts FX parameters (self.linear_weight) + as inputs (`def forward(self, linear_weight)`) and therefore, `.` cannot be used. + """ + # FIXME: Avoid importing onnx into torch.onnx. + import onnx + + initializers_to_be_deleted = {} # Using dict because it is **ordered** + existing_initializers = { + k.name: idx for idx, k in enumerate(onnx_model.graph.initializer) + } + onnx_input_names = {input.name for input in onnx_model.graph.input} + for el in torch_state_dicts: + if isinstance(el, dict): + # Useful for when state_dict is loaded with torch.load(..., mmap=True, map_location="cpu") by the user + # Using torch.save wouldn't leverage mmap, leading to higher memory usage + state_dict = el + else: + if isinstance(el, str) and el.endswith(".safetensors"): + state_dict = _convert_safetensors_to_torch_format(el) + else: + try: + # Loads checkpoint using memory-map on CPU to support really large models + # The underlying torch.UntypedStorage is memory mapped, so state_dict is lazy loaded + state_dict = torch.load(el, map_location="cpu", mmap=True) + except (RuntimeError, ValueError) as e: + if "mmap can only be used with files saved with" in str( + e + ) or isinstance(el, io.BytesIO): + log.warning( + "Failed to load the checkpoint with memory-map enabled, retrying without memory-map." + "Consider updating the checkpoint with mmap by using torch.save() on PyTorch version >= 1.6." + ) + if isinstance(el, io.BytesIO): + el.seek(0) # torch.load from `try:` has read the file. + state_dict = torch.load(el, map_location="cpu") + else: + raise e + + for name, tensor in state_dict.items(): + if rename_initializer: + # Basically, "transformer.attention.self.query.weight" is mapped + # to "transformer_attention_self_query_weight" for mimicking the + # name-modifying code in FX-to-ONNX exporter. + # See function _replace_get_attr_with_placeholder for details. + name = name.replace(".", "_") + + # This block tries to match the onnx initializer name with torch parameter/buffer + # e.g. A pytorch buffer 'transformer.h.0.attn.bias' can be named 'h.0.attn.bias' in a ONNX initializer + # For each PyTorch tensor name loaded by torch.load, + # 1. Search its best match in ONNX model. E.g., the match of + # "transformer_attention_weight" could be "attention_weight". + # 2. Set "tensor" as the initializer of the matched ONNX input. + # E.g., "tensor" is stored as the initializer of "attention_weight". + # Step 1 is required because sometimes, tensor names are stored with prefix the dictionary + # loaded by torch.load. + if name in onnx_input_names: + # Same input name shouldn't be matched again + onnx_input_names.remove(name) + else: + for onnx_input_name in onnx_input_names: + if onnx_input_name.endswith(name) or name.endswith(onnx_input_name): + # Find a match. Change name to the matched ONNX input name, so that we + # create initializer with the right ONNX name. + name = onnx_input_name + onnx_input_names.remove(onnx_input_name) + break + + relative_tensor_file_path = os.path.join(initializer_location, name) + # Create one file per tensor. + # tensor_proto.raw_data is stored to external file at + # os.path.join(basepath, relative_tensor_file_path). + model_input_types = {k.name: k.type for k in onnx_model.graph.input} + + # Mark for deletion - a replacement will be appended next + if name in existing_initializers: + initializers_to_be_deleted[existing_initializers[name]] = name + tensor_proto = _create_tensor_proto_with_external_data( + tensor, + name, + relative_tensor_file_path, + basepath, + model_input_types.pop(name, None), + ) + # Add the tensor_proto to the ONNX model as an initializer with external data. + onnx_model.graph.initializer.append(tensor_proto) + # Remove old duplicated initializers, if any. delete in desc order to not invalidate deletion indices + initializers_to_be_deleted = dict( + sorted(initializers_to_be_deleted.items(), reverse=True) + ) + for idx in initializers_to_be_deleted.keys(): + del onnx_model.graph.initializer[idx] + + # model_location should be a pure file name such as "file_name.onnx", not "folder/file_name.onnx". + onnx.save(onnx_model, os.path.join(basepath, model_location)) # type: ignore[attr-defined] diff --git a/janus/lib/python3.10/site-packages/torch/onnx/_internal/fx/type_utils.py b/janus/lib/python3.10/site-packages/torch/onnx/_internal/fx/type_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..74501e865c797120afcc951b47a95c8bdf2ba7c9 --- /dev/null +++ b/janus/lib/python3.10/site-packages/torch/onnx/_internal/fx/type_utils.py @@ -0,0 +1,256 @@ +# mypy: allow-untyped-defs +"""Utilities for converting and operating on ONNX, JIT and torch types.""" + +from __future__ import annotations + +from typing import ( + Any, + Dict, + List, + Optional, + Protocol, + runtime_checkable, + Tuple, + TYPE_CHECKING, + Union, +) + +import numpy + +import onnx + +import torch +from torch._subclasses import fake_tensor + + +if TYPE_CHECKING: + import onnx.defs.OpSchema.AttrType # type: ignore[import] # noqa: TCH004 + + +# Enable both TorchScriptTensor and torch.Tensor to be tested +# for dtype in OpSchemaWrapper. +@runtime_checkable +class TensorLike(Protocol): + @property + def dtype(self) -> torch.dtype | None: ... + + +def is_torch_complex_dtype(tensor_dtype: torch.dtype) -> bool: + # NOTE: This is needed as TorchScriptTensor is nor supported by torch.is_complex() + return tensor_dtype in _COMPLEX_TO_FLOAT + + +def from_complex_to_float(dtype: torch.dtype) -> torch.dtype: + return _COMPLEX_TO_FLOAT[dtype] + + +def from_sym_value_to_torch_dtype(sym_value: SYM_VALUE_TYPE) -> torch.dtype: + return _SYM_TYPE_TO_TORCH_DTYPE[type(sym_value)] + + +def is_optional_onnx_dtype_str(onnx_type_str: str) -> bool: + return onnx_type_str in _OPTIONAL_ONNX_DTYPE_STR + + +def from_torch_dtype_to_onnx_dtype_str(dtype: torch.dtype | type) -> set[str]: + return _TORCH_DTYPE_TO_COMPATIBLE_ONNX_TYPE_STRINGS[dtype] + + +def from_python_type_to_onnx_attribute_type( + dtype: type, is_sequence: bool = False +) -> onnx.defs.OpSchema.AttrType | None: + import onnx.defs # type: ignore[import] + + _PYTHON_TYPE_TO_ONNX_ATTRIBUTE_TYPE = { + float: onnx.defs.OpSchema.AttrType.FLOAT, + int: onnx.defs.OpSchema.AttrType.INT, + str: onnx.defs.OpSchema.AttrType.STRING, + bool: onnx.defs.OpSchema.AttrType.INT, + } + + _SEQUENCE_TYPE_TO_ONNX_ATTRIBUTE_TYPE = { + float: onnx.defs.OpSchema.AttrType.FLOATS, + int: onnx.defs.OpSchema.AttrType.INTS, + str: onnx.defs.OpSchema.AttrType.STRINGS, + bool: onnx.defs.OpSchema.AttrType.INTS, + } + + if is_sequence: + return _SEQUENCE_TYPE_TO_ONNX_ATTRIBUTE_TYPE.get(dtype) + return _PYTHON_TYPE_TO_ONNX_ATTRIBUTE_TYPE.get(dtype) + + +def from_python_type_to_onnx_tensor_element_type(type: type): + """ + Converts a Python type to the corresponding ONNX tensor element type. + For example, `from_python_type_to_onnx_tensor_element_type(float)` returns + `onnx.TensorProto.FLOAT`. + + Args: + type (type): The Python type to convert. + + Returns: + int: The corresponding ONNX tensor element type. + + """ + _PYTHON_TYPE_TO_ONNX_TENSOR_ELEMENT_TYPE = { + float: onnx.TensorProto.FLOAT, # type: ignore[attr-defined] + int: onnx.TensorProto.INT64, # type: ignore[attr-defined] + bool: onnx.TensorProto.BOOL, # type: ignore[attr-defined] + } + return _PYTHON_TYPE_TO_ONNX_TENSOR_ELEMENT_TYPE.get(type) + + +def is_torch_symbolic_type(value: Any) -> bool: + return isinstance(value, (torch.SymBool, torch.SymInt, torch.SymFloat)) + + +def from_torch_dtype_to_abbr(dtype: torch.dtype | None) -> str: + if dtype is None: + return "" + return _TORCH_DTYPE_TO_ABBREVIATION.get(dtype, "") + + +def from_scalar_type_to_torch_dtype(scalar_type: type) -> torch.dtype | None: + return _SCALAR_TYPE_TO_TORCH_DTYPE.get(scalar_type) + + +# NOTE: this is a mapping from torch dtype to a set of compatible onnx types +# It's used in dispatcher to find the best match overload for the input dtypes +_TORCH_DTYPE_TO_COMPATIBLE_ONNX_TYPE_STRINGS: dict[torch.dtype | type, set[str]] = { + torch.bfloat16: {"tensor(bfloat16)"}, + torch.bool: {"tensor(bool)"}, + torch.float64: {"tensor(double)"}, + torch.float32: {"tensor(float)"}, + torch.float16: {"tensor(float16)"}, + torch.float8_e4m3fn: {"tensor(float8_e4m3fn)"}, + torch.float8_e4m3fnuz: {"tensor(float8_e4m3fnuz)"}, + torch.float8_e5m2: {"tensor(float8_e5m2)"}, + torch.float8_e5m2fnuz: {"tensor(float8_e5m2fnuz)"}, + torch.int16: {"tensor(int16)"}, + torch.int32: {"tensor(int32)"}, + torch.int64: {"tensor(int64)"}, + torch.int8: {"tensor(int8)"}, + torch.uint8: {"tensor(uint8)"}, + str: {"tensor(string)"}, + int: {"tensor(int16)", "tensor(int32)", "tensor(int64)"}, + float: {"tensor(float16)", "tensor(float)", "tensor(double)"}, + bool: {"tensor(int32)", "tensor(int64)", "tensor(bool)"}, + complex: {"tensor(float)", "tensor(double)"}, + torch.complex32: {"tensor(float16)"}, + torch.complex64: {"tensor(float)"}, + torch.complex128: {"tensor(double)"}, +} + +_OPTIONAL_ONNX_DTYPE_STR: set[str] = { + f"optional({value})" + for value_set in _TORCH_DTYPE_TO_COMPATIBLE_ONNX_TYPE_STRINGS.values() + for value in value_set +} + +_PYTHON_TYPE_TO_TORCH_DTYPE = { + bool: torch.bool, + int: torch.int64, + float: torch.float32, + complex: torch.complex64, +} + +_COMPLEX_TO_FLOAT: dict[torch.dtype, torch.dtype] = { + torch.complex32: torch.float16, + torch.complex64: torch.float32, + torch.complex128: torch.float64, # NOTE: ORT doesn't support torch.float64 +} + +_SYM_TYPE_TO_TORCH_DTYPE = { + torch.SymInt: torch.int64, + torch.SymFloat: torch.float32, + torch.SymBool: torch.bool, +} + +_SCALAR_TYPE_TO_TORCH_DTYPE: dict[type, torch.dtype] = { + **_PYTHON_TYPE_TO_TORCH_DTYPE, + **_SYM_TYPE_TO_TORCH_DTYPE, # type: ignore[dict-item] +} + +_TORCH_DTYPE_TO_ABBREVIATION = { + torch.bfloat16: "bf16", + torch.float64: "f64", + torch.float32: "f32", + torch.float16: "f16", + torch.float8_e4m3fn: "e4m3fn", + torch.float8_e4m3fnuz: "e4m3fnuz", + torch.float8_e5m2: "f8e5m2", + torch.float8_e5m2fnuz: "e5m2fnuz", + torch.complex32: "c32", + torch.complex64: "c64", + torch.complex128: "c128", + torch.int8: "i8", + torch.int16: "i16", + torch.int32: "i32", + torch.int64: "i64", + torch.bool: "b8", + torch.uint8: "u8", +} + +_TORCH_DTYPE_TO_NUMPY_DTYPE = { + torch.float16: numpy.float16, + torch.float32: numpy.float32, + torch.float64: numpy.float64, + torch.uint8: numpy.uint8, + torch.int8: numpy.int8, + torch.int16: numpy.int16, + torch.int32: numpy.int32, + torch.int64: numpy.longlong, + torch.bool: numpy.bool_, +} + +_ONNX_TENSOR_ELEMENT_TYPE_TO_TORCH_DTYPE = { + onnx.TensorProto.FLOAT: torch.float32, # type: ignore[attr-defined] + onnx.TensorProto.FLOAT16: torch.float16, # type: ignore[attr-defined] + onnx.TensorProto.FLOAT8E5M2: torch.float8_e5m2, # type: ignore[attr-defined] + onnx.TensorProto.FLOAT8E5M2FNUZ: torch.float8_e5m2fnuz, # type: ignore[attr-defined] + onnx.TensorProto.FLOAT8E4M3FN: torch.float8_e4m3fn, # type: ignore[attr-defined] + onnx.TensorProto.FLOAT8E4M3FNUZ: torch.float8_e4m3fnuz, # type: ignore[attr-defined] + onnx.TensorProto.DOUBLE: torch.float64, # type: ignore[attr-defined] + onnx.TensorProto.BOOL: torch.bool, # type: ignore[attr-defined] + onnx.TensorProto.UINT8: torch.uint8, # type: ignore[attr-defined] + onnx.TensorProto.INT8: torch.int8, # type: ignore[attr-defined] + onnx.TensorProto.INT16: torch.int16, # type: ignore[attr-defined] + onnx.TensorProto.INT32: torch.int32, # type: ignore[attr-defined] + onnx.TensorProto.INT64: torch.int64, # type: ignore[attr-defined] +} + +_TORCH_DTYPE_TO_ONNX_TENSOR_ELEMENT_TYPE = { + value: key for key, value in _ONNX_TENSOR_ELEMENT_TYPE_TO_TORCH_DTYPE.items() +} + +SYM_VALUE_TYPE = Union[torch.SymInt, torch.SymFloat, torch.SymBool] +META_VALUE_TYPE = Union[fake_tensor.FakeTensor, SYM_VALUE_TYPE, int, float, bool] +# NOTE: Belows are from torch/fx/node.py +BaseArgumentTypes = Union[ + str, + int, + float, + bool, + complex, + torch.dtype, + torch.Tensor, + torch.device, + torch.memory_format, + torch.layout, + torch._ops.OpOverload, + torch.SymInt, + torch.SymFloat, + torch.SymBool, +] +Argument = Optional[ + Union[ + Tuple[Any, ...], # actually Argument, but mypy can't represent recursive types + List[Any], # actually Argument + Dict[str, Any], # actually Argument + slice, # Slice[Argument, Argument, Argument], but slice is not a templated type in typing + range, + "torch.fx.Node", + BaseArgumentTypes, + ] +]