ZTWHHH commited on
Commit
2e5e5d8
·
verified ·
1 Parent(s): c1f1d95

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. janus/lib/python3.10/site-packages/torch/onnx/_internal/__pycache__/__init__.cpython-310.pyc +0 -0
  2. janus/lib/python3.10/site-packages/torch/onnx/_internal/__pycache__/_exporter_legacy.cpython-310.pyc +0 -0
  3. janus/lib/python3.10/site-packages/torch/onnx/_internal/__pycache__/_lazy_import.cpython-310.pyc +0 -0
  4. janus/lib/python3.10/site-packages/torch/onnx/_internal/__pycache__/io_adapter.cpython-310.pyc +0 -0
  5. janus/lib/python3.10/site-packages/torch/onnx/_internal/__pycache__/jit_utils.cpython-310.pyc +0 -0
  6. janus/lib/python3.10/site-packages/torch/onnx/_internal/__pycache__/onnx_proto_utils.cpython-310.pyc +0 -0
  7. janus/lib/python3.10/site-packages/torch/onnx/_internal/__pycache__/onnxruntime.cpython-310.pyc +0 -0
  8. janus/lib/python3.10/site-packages/torch/onnx/_internal/__pycache__/registration.cpython-310.pyc +0 -0
  9. janus/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/__pycache__/__init__.cpython-310.pyc +0 -0
  10. janus/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/__pycache__/_diagnostic.cpython-310.pyc +0 -0
  11. janus/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/__pycache__/_rules.cpython-310.pyc +0 -0
  12. janus/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/__pycache__/__init__.cpython-310.pyc +0 -0
  13. janus/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/__pycache__/_infra.cpython-310.pyc +0 -0
  14. janus/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/__pycache__/context.cpython-310.pyc +0 -0
  15. janus/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/__pycache__/decorator.cpython-310.pyc +0 -0
  16. janus/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/__pycache__/formatter.cpython-310.pyc +0 -0
  17. janus/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/__pycache__/utils.cpython-310.pyc +0 -0
  18. janus/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/__init__.cpython-310.pyc +0 -0
  19. janus/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_artifact_change.cpython-310.pyc +0 -0
  20. janus/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_code_flow.cpython-310.pyc +0 -0
  21. janus/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_edge.cpython-310.pyc +0 -0
  22. janus/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_exception.cpython-310.pyc +0 -0
  23. janus/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_fix.cpython-310.pyc +0 -0
  24. janus/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_region.cpython-310.pyc +0 -0
  25. janus/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_suppression.cpython-310.pyc +0 -0
  26. janus/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_tool.cpython-310.pyc +0 -0
  27. janus/lib/python3.10/site-packages/torch/onnx/_internal/exporter/__init__.py +17 -0
  28. janus/lib/python3.10/site-packages/torch/onnx/_internal/exporter/__pycache__/__init__.cpython-310.pyc +0 -0
  29. janus/lib/python3.10/site-packages/torch/onnx/_internal/exporter/__pycache__/_analysis.cpython-310.pyc +0 -0
  30. janus/lib/python3.10/site-packages/torch/onnx/_internal/exporter/__pycache__/_capture_strategies.cpython-310.pyc +0 -0
  31. janus/lib/python3.10/site-packages/torch/onnx/_internal/exporter/__pycache__/_compat.cpython-310.pyc +0 -0
  32. janus/lib/python3.10/site-packages/torch/onnx/_internal/exporter/__pycache__/_core.cpython-310.pyc +0 -0
  33. janus/lib/python3.10/site-packages/torch/onnx/_internal/exporter/__pycache__/_decomp.cpython-310.pyc +0 -0
  34. janus/lib/python3.10/site-packages/torch/onnx/_internal/exporter/__pycache__/_dispatching.cpython-310.pyc +0 -0
  35. janus/lib/python3.10/site-packages/torch/onnx/_internal/exporter/__pycache__/_fx_passes.cpython-310.pyc +0 -0
  36. janus/lib/python3.10/site-packages/torch/onnx/_internal/exporter/__pycache__/_isolated.cpython-310.pyc +0 -0
  37. janus/lib/python3.10/site-packages/torch/onnx/_internal/exporter/__pycache__/_onnx_program.cpython-310.pyc +0 -0
  38. janus/lib/python3.10/site-packages/torch/onnx/_internal/exporter/__pycache__/_reporting.cpython-310.pyc +0 -0
  39. janus/lib/python3.10/site-packages/torch/onnx/_internal/exporter/__pycache__/_schemas.cpython-310.pyc +0 -0
  40. janus/lib/python3.10/site-packages/torch/onnx/_internal/exporter/__pycache__/_tensors.cpython-310.pyc +0 -0
  41. janus/lib/python3.10/site-packages/torch/onnx/_internal/exporter/__pycache__/_testing.cpython-310.pyc +0 -0
  42. janus/lib/python3.10/site-packages/torch/onnx/_internal/exporter/__pycache__/_verification.cpython-310.pyc +0 -0
  43. janus/lib/python3.10/site-packages/torch/onnx/_internal/exporter/_analysis.py +242 -0
  44. janus/lib/python3.10/site-packages/torch/onnx/_internal/exporter/_capture_strategies.py +361 -0
  45. janus/lib/python3.10/site-packages/torch/onnx/_internal/exporter/_compat.py +216 -0
  46. janus/lib/python3.10/site-packages/torch/onnx/_internal/exporter/_core.py +1341 -0
  47. janus/lib/python3.10/site-packages/torch/onnx/_internal/exporter/_decomp.py +100 -0
  48. janus/lib/python3.10/site-packages/torch/onnx/_internal/exporter/_dispatching.py +362 -0
  49. janus/lib/python3.10/site-packages/torch/onnx/_internal/exporter/_errors.py +21 -0
  50. janus/lib/python3.10/site-packages/torch/onnx/_internal/exporter/_ir_passes.py +41 -0
janus/lib/python3.10/site-packages/torch/onnx/_internal/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (171 Bytes). View file
 
janus/lib/python3.10/site-packages/torch/onnx/_internal/__pycache__/_exporter_legacy.cpython-310.pyc ADDED
Binary file (40.7 kB). View file
 
janus/lib/python3.10/site-packages/torch/onnx/_internal/__pycache__/_lazy_import.cpython-310.pyc ADDED
Binary file (1.42 kB). View file
 
janus/lib/python3.10/site-packages/torch/onnx/_internal/__pycache__/io_adapter.cpython-310.pyc ADDED
Binary file (22.1 kB). View file
 
janus/lib/python3.10/site-packages/torch/onnx/_internal/__pycache__/jit_utils.cpython-310.pyc ADDED
Binary file (13.7 kB). View file
 
janus/lib/python3.10/site-packages/torch/onnx/_internal/__pycache__/onnx_proto_utils.cpython-310.pyc ADDED
Binary file (7.75 kB). View file
 
janus/lib/python3.10/site-packages/torch/onnx/_internal/__pycache__/onnxruntime.cpython-310.pyc ADDED
Binary file (28 kB). View file
 
janus/lib/python3.10/site-packages/torch/onnx/_internal/__pycache__/registration.cpython-310.pyc ADDED
Binary file (11.3 kB). View file
 
janus/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (520 Bytes). View file
 
janus/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/__pycache__/_diagnostic.cpython-310.pyc ADDED
Binary file (7.95 kB). View file
 
janus/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/__pycache__/_rules.cpython-310.pyc ADDED
Binary file (27.1 kB). View file
 
janus/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (649 Bytes). View file
 
janus/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/__pycache__/_infra.cpython-310.pyc ADDED
Binary file (11.1 kB). View file
 
janus/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/__pycache__/context.cpython-310.pyc ADDED
Binary file (16.5 kB). View file
 
janus/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/__pycache__/decorator.cpython-310.pyc ADDED
Binary file (4.13 kB). View file
 
janus/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/__pycache__/formatter.cpython-310.pyc ADDED
Binary file (3.23 kB). View file
 
janus/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/__pycache__/utils.cpython-310.pyc ADDED
Binary file (2.81 kB). View file
 
janus/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (5.42 kB). View file
 
janus/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_artifact_change.cpython-310.pyc ADDED
Binary file (1.05 kB). View file
 
janus/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_code_flow.cpython-310.pyc ADDED
Binary file (1.08 kB). View file
 
janus/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_edge.cpython-310.pyc ADDED
Binary file (1.05 kB). View file
 
janus/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_exception.cpython-310.pyc ADDED
Binary file (1.17 kB). View file
 
janus/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_fix.cpython-310.pyc ADDED
Binary file (1.21 kB). View file
 
janus/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_region.cpython-310.pyc ADDED
Binary file (1.61 kB). View file
 
janus/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_suppression.cpython-310.pyc ADDED
Binary file (1.2 kB). View file
 
janus/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_tool.cpython-310.pyc ADDED
Binary file (988 Bytes). View file
 
janus/lib/python3.10/site-packages/torch/onnx/_internal/exporter/__init__.py ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ __all__ = [
2
+ "ONNXRegistry",
3
+ "ONNXProgram",
4
+ "analyze",
5
+ "export",
6
+ "exported_program_to_ir",
7
+ "export_compat",
8
+ "testing",
9
+ "verification",
10
+ ]
11
+
12
+ from . import _testing as testing, _verification as verification
13
+ from ._analysis import analyze
14
+ from ._compat import export_compat
15
+ from ._core import export, exported_program_to_ir
16
+ from ._onnx_program import ONNXProgram
17
+ from ._registration import ONNXRegistry
janus/lib/python3.10/site-packages/torch/onnx/_internal/exporter/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (575 Bytes). View file
 
janus/lib/python3.10/site-packages/torch/onnx/_internal/exporter/__pycache__/_analysis.cpython-310.pyc ADDED
Binary file (7.72 kB). View file
 
janus/lib/python3.10/site-packages/torch/onnx/_internal/exporter/__pycache__/_capture_strategies.cpython-310.pyc ADDED
Binary file (10.7 kB). View file
 
janus/lib/python3.10/site-packages/torch/onnx/_internal/exporter/__pycache__/_compat.cpython-310.pyc ADDED
Binary file (5.27 kB). View file
 
janus/lib/python3.10/site-packages/torch/onnx/_internal/exporter/__pycache__/_core.cpython-310.pyc ADDED
Binary file (32.3 kB). View file
 
janus/lib/python3.10/site-packages/torch/onnx/_internal/exporter/__pycache__/_decomp.cpython-310.pyc ADDED
Binary file (2.94 kB). View file
 
janus/lib/python3.10/site-packages/torch/onnx/_internal/exporter/__pycache__/_dispatching.cpython-310.pyc ADDED
Binary file (9.87 kB). View file
 
janus/lib/python3.10/site-packages/torch/onnx/_internal/exporter/__pycache__/_fx_passes.cpython-310.pyc ADDED
Binary file (2.02 kB). View file
 
janus/lib/python3.10/site-packages/torch/onnx/_internal/exporter/__pycache__/_isolated.cpython-310.pyc ADDED
Binary file (1.53 kB). View file
 
janus/lib/python3.10/site-packages/torch/onnx/_internal/exporter/__pycache__/_onnx_program.cpython-310.pyc ADDED
Binary file (12.5 kB). View file
 
janus/lib/python3.10/site-packages/torch/onnx/_internal/exporter/__pycache__/_reporting.cpython-310.pyc ADDED
Binary file (5.84 kB). View file
 
janus/lib/python3.10/site-packages/torch/onnx/_internal/exporter/__pycache__/_schemas.cpython-310.pyc ADDED
Binary file (14.2 kB). View file
 
janus/lib/python3.10/site-packages/torch/onnx/_internal/exporter/__pycache__/_tensors.cpython-310.pyc ADDED
Binary file (3.66 kB). View file
 
janus/lib/python3.10/site-packages/torch/onnx/_internal/exporter/__pycache__/_testing.cpython-310.pyc ADDED
Binary file (2.14 kB). View file
 
janus/lib/python3.10/site-packages/torch/onnx/_internal/exporter/__pycache__/_verification.cpython-310.pyc ADDED
Binary file (2.94 kB). View file
 
janus/lib/python3.10/site-packages/torch/onnx/_internal/exporter/_analysis.py ADDED
@@ -0,0 +1,242 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Compatibility analyzer for PyTorch models."""
2
+
3
+ # mypy: allow-untyped-defs
4
+ # flake8: noqa: B950 We do not need flake8 as it complains line length
5
+ from __future__ import annotations
6
+
7
+ import dataclasses
8
+ import textwrap
9
+ import traceback
10
+ from collections import defaultdict
11
+ from typing import TYPE_CHECKING
12
+
13
+ import torch
14
+ import torch._export.serde.schema
15
+ from torch.export import graph_signature
16
+ from torch.onnx._internal.exporter import _dispatching, _registration
17
+
18
+
19
+ if TYPE_CHECKING:
20
+ import torch.fx
21
+
22
+
23
+ @dataclasses.dataclass
24
+ class ModelInfo:
25
+ """Information about the model."""
26
+
27
+ parameter_count: defaultdict[torch.dtype, int] = dataclasses.field(
28
+ default_factory=lambda: defaultdict(int)
29
+ )
30
+ buffer_count: defaultdict[torch.dtype, int] = dataclasses.field(
31
+ default_factory=lambda: defaultdict(int)
32
+ )
33
+ fx_node_count: int = 0
34
+ fx_node_op_count: defaultdict[str, int] = dataclasses.field(
35
+ default_factory=lambda: defaultdict(int)
36
+ )
37
+ fx_node_target_count: defaultdict[str, int] = dataclasses.field(
38
+ default_factory=lambda: defaultdict(int)
39
+ )
40
+ dispatch_failures: list[tuple[torch.fx.Node, str]] = dataclasses.field(
41
+ default_factory=list
42
+ )
43
+ inputs: dict[str, torch._export.serde.schema.TensorMeta] = dataclasses.field(
44
+ default_factory=dict
45
+ )
46
+ outputs: dict[str, torch._export.serde.schema.TensorMeta] = dataclasses.field(
47
+ default_factory=dict
48
+ )
49
+
50
+
51
+ def _count_weights(
52
+ exported_program: torch.export.ExportedProgram,
53
+ ) -> tuple[defaultdict[torch.dtype, int], defaultdict[torch.dtype, int]]:
54
+ """Count the size of the parameters in the exported program."""
55
+
56
+ parameter_count: defaultdict[torch.dtype, int] = defaultdict(int)
57
+ buffer_count: defaultdict[torch.dtype, int] = defaultdict(int)
58
+ for parameter in exported_program.parameters():
59
+ dtype = parameter.dtype
60
+ parameter_count[dtype] += parameter.numel()
61
+
62
+ for buffer in exported_program.buffers():
63
+ dtype = buffer.dtype
64
+ buffer_count[dtype] += buffer.numel()
65
+
66
+ return parameter_count, buffer_count
67
+
68
+
69
+ def _format_model_info(model_info: ModelInfo) -> str:
70
+ """Format the information about the model."""
71
+ lines = [
72
+ textwrap.dedent(
73
+ f"""\
74
+ PyTorch ONNX Conversion Analysis
75
+
76
+ ## Model Information
77
+
78
+ The model has {sum(model_info.parameter_count.values())} parameters and {sum(model_info.buffer_count.values())} buffers (non-trainable parameters).
79
+ Number of parameters per dtype:
80
+ ```python
81
+ {model_info.parameter_count}
82
+ ```
83
+ Number of buffers per dtype:
84
+ ```python
85
+ {model_info.buffer_count}
86
+ ```
87
+ """
88
+ ),
89
+ "Inputs:",
90
+ *[f"- `{name}`: `{meta}`" for name, meta in model_info.inputs.items()],
91
+ "",
92
+ "Outputs:",
93
+ *[f"- `{name}`: `{meta}`" for name, meta in model_info.outputs.items()],
94
+ "",
95
+ f"The FX graph has {model_info.fx_node_count} nodes in total. Number of FX nodes per op:",
96
+ ]
97
+ for op, count in model_info.fx_node_op_count.items():
98
+ lines.append(f"- `{op}`: {count}")
99
+ lines.append("\n")
100
+ lines.append("Of the call_function nodes, the counts of operators used are:\n")
101
+ sorted_targets = sorted(
102
+ model_info.fx_node_target_count.items(), key=lambda x: x[1], reverse=True
103
+ )
104
+ for target, count in sorted_targets:
105
+ lines.append(f"- `{target}`: {count}")
106
+
107
+ lines.append("")
108
+ lines.append("## ONNX Conversion Information")
109
+ lines.append("")
110
+
111
+ if model_info.dispatch_failures:
112
+ lines.append(
113
+ "The model contains operators the dispatcher could not find registered ONNX decompositions for. "
114
+ "This may be due to missing implementations, decompositions not registered "
115
+ "correctly, or a bug in the dispatcher."
116
+ )
117
+ lines.append("")
118
+ lines.append("Errors grouped by operator:\n")
119
+
120
+ target_to_nodes = defaultdict(list)
121
+ for node, _ in model_info.dispatch_failures:
122
+ target_to_nodes[str(node.target)].append(node)
123
+
124
+ target_to_messages = {}
125
+ for node, message in model_info.dispatch_failures:
126
+ if str(node.target) not in target_to_messages:
127
+ target_to_messages[str(node.target)] = message
128
+
129
+ for target, nodes in sorted(
130
+ target_to_nodes.items(), key=lambda x: x[0], reverse=True
131
+ ):
132
+ message = textwrap.indent(
133
+ f"{target_to_messages[target]}. Example node: `{nodes[0].format_node()}`. All nodes: `{nodes}`",
134
+ " ",
135
+ )
136
+ lines.append(f"- `{target}`: {message}")
137
+ else:
138
+ lines.append("All operators in the model have registered ONNX decompositions.")
139
+
140
+ return "\n".join(lines)
141
+
142
+
143
+ def _get_io_specs(exported_program: torch.export.ExportedProgram) -> tuple[dict, dict]:
144
+ """Get the input and output specs of the exported program."""
145
+
146
+ nodes: dict[str, torch.fx.Node] = {
147
+ node.name: node for node in exported_program.graph.nodes
148
+ }
149
+ user_inputs = [
150
+ spec
151
+ for spec in exported_program.graph_signature.input_specs
152
+ if spec.kind == graph_signature.InputKind.USER_INPUT
153
+ ]
154
+ user_outputs = [
155
+ spec
156
+ for spec in exported_program.graph_signature.output_specs
157
+ if spec.kind == graph_signature.OutputKind.USER_OUTPUT
158
+ ]
159
+ inputs: dict[str, torch._export.serde.schema.TensorMeta] = {}
160
+ outputs: dict[str, torch._export.serde.schema.TensorMeta] = {}
161
+ for spec in user_inputs:
162
+ if isinstance(spec.arg, graph_signature.ConstantArgument):
163
+ continue
164
+ name = spec.arg.name
165
+ # FIXME: tensor_meta is None sometimes when the exported program still knows the shape/type
166
+ inputs[name] = nodes[name].meta["tensor_meta"]
167
+ for spec in user_outputs:
168
+ if isinstance(spec.arg, graph_signature.ConstantArgument):
169
+ continue
170
+ name = spec.arg.name
171
+ outputs[name] = nodes[name].meta["tensor_meta"]
172
+ return inputs, outputs
173
+
174
+
175
+ def _count_fx_targets(
176
+ exported_program: torch.export.ExportedProgram,
177
+ ) -> defaultdict[str, int]:
178
+ """Count the number of targets for each node in the exported program."""
179
+ fx_node_target_count: defaultdict[str, int] = defaultdict(int)
180
+ for node in exported_program.graph.nodes:
181
+ if node.op == "call_function":
182
+ fx_node_target_count[str(node.target)] += 1
183
+ return fx_node_target_count
184
+
185
+
186
+ def analyze(
187
+ exported_program: torch.export.ExportedProgram,
188
+ registry: _registration.ONNXRegistry | None = None,
189
+ file=None,
190
+ ) -> None:
191
+ """Analyze the compatibility of the exported program."""
192
+ # Get basic information about the model
193
+ model_info = ModelInfo()
194
+ model_info.parameter_count, model_info.buffer_count = _count_weights(
195
+ exported_program
196
+ )
197
+ model_info.fx_node_count = len(exported_program.graph.nodes)
198
+ model_info.fx_node_target_count = _count_fx_targets(exported_program)
199
+ inputs, outputs = _get_io_specs(exported_program)
200
+ model_info.inputs = inputs
201
+ model_info.outputs = outputs
202
+
203
+ if registry is None:
204
+ registry = _registration.ONNXRegistry.from_torchlib()
205
+
206
+ # Try to find ops for every node in the graph
207
+ for node in exported_program.graph.nodes:
208
+ model_info.fx_node_op_count[node.op] += 1
209
+ if node.op == "call_function":
210
+ try:
211
+ onnx_function, message = _dispatching.dispatch(node, registry)
212
+ except Exception as e:
213
+ message = "Critical Error in dispatcher:\n"
214
+ formatted_exception = "\n".join(
215
+ traceback.format_exception(type(e), e, e.__traceback__)
216
+ )
217
+ message += f"```pytb\n{formatted_exception}\n```\n"
218
+ onnx_function = None
219
+ if onnx_function is None:
220
+ model_info.dispatch_failures.append((node, message))
221
+
222
+ # Print the results
223
+ report = _format_model_info(model_info)
224
+ print(report, file=file, flush=True)
225
+
226
+
227
+ def compare_ops(
228
+ program_a: torch.export.ExportedProgram, program_b: torch.export.ExportedProgram
229
+ ) -> tuple[set[str], set[str]]:
230
+ """Compare and get unique ops in two exported programs.
231
+
232
+ Args:
233
+ program_a: The first exported program.
234
+ program_b: The second exported program.
235
+
236
+ Returns:
237
+ A tuple of two sets, where the first set contains the unique ops in the first program
238
+ and the second set contains the unique ops in the second program.
239
+ """
240
+ program_a_ops = set(_count_fx_targets(program_a))
241
+ program_b_ops = set(_count_fx_targets(program_b))
242
+ return program_a_ops - program_b_ops, program_b_ops - program_a_ops
janus/lib/python3.10/site-packages/torch/onnx/_internal/exporter/_capture_strategies.py ADDED
@@ -0,0 +1,361 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Strategies for capturing ExportedPrograms."""
2
+
3
+ # mypy: allow-untyped-defs
4
+ from __future__ import annotations
5
+
6
+ import abc
7
+ import dataclasses
8
+ import datetime
9
+ import pathlib
10
+ from typing import Any, Callable, TYPE_CHECKING
11
+
12
+ import torch
13
+ from torch._export import converter as _torchscript_converter
14
+ from torch.utils import _pytree
15
+
16
+
17
+ if TYPE_CHECKING:
18
+ import os
19
+
20
+
21
+ def _verbose_printer(verbose: bool | None) -> Callable[..., None]:
22
+ """Prints messages based on `verbose`."""
23
+ if verbose is False:
24
+ return lambda *_, **__: None
25
+ return lambda *args, **kwargs: print("[torch.onnx]", *args, **kwargs)
26
+
27
+
28
+ def _take_first_line(text: str) -> str:
29
+ """Take the first line of a text."""
30
+ lines = text.split("\n", maxsplit=1)
31
+ first_line = lines[0]
32
+ if len(lines) > 1:
33
+ first_line += "[...]"
34
+ return first_line
35
+
36
+
37
+ @dataclasses.dataclass
38
+ class Result:
39
+ exported_program: torch.export.ExportedProgram | None
40
+ strategy: str
41
+ exception: Exception | None = None
42
+
43
+ @property
44
+ def success(self) -> bool:
45
+ return self.exported_program is not None
46
+
47
+
48
+ class CaptureStrategy(abc.ABC):
49
+ """Strategy for capturing a module as ExportedProgram.
50
+
51
+ To use a strategy, create an instance and call it with the model, args, kwargs, and dynamic_shapes.
52
+ Example::
53
+
54
+ strategy = TorchExportStrategy(verbose=True)
55
+ result = strategy(model, args, kwargs, dynamic_shapes)
56
+ """
57
+
58
+ def __init__(
59
+ self,
60
+ *,
61
+ verbose: bool = False,
62
+ dump: bool = False,
63
+ artifacts_dir: str | os.PathLike = ".",
64
+ timestamp: str | None = None,
65
+ ):
66
+ """Initialize the strategy.
67
+
68
+ Args:
69
+ verbose: Whether to print verbose messages.
70
+ dump: Whether to dump the intermediate artifacts to a file.
71
+ """
72
+ self._verbose_print = _verbose_printer(verbose)
73
+ self._dump = dump
74
+ self._artifacts_dir = pathlib.Path(artifacts_dir)
75
+ self._timestamp = timestamp or datetime.datetime.now().strftime(
76
+ "%Y-%m-%d_%H-%M-%S-%f"
77
+ )
78
+
79
+ def __call__(
80
+ self,
81
+ model: torch.nn.Module | torch.jit.ScriptFunction,
82
+ args: tuple[Any, ...],
83
+ kwargs: dict[str, Any] | None,
84
+ dynamic_shapes,
85
+ ) -> Result:
86
+ self._enter(model)
87
+ if kwargs is None:
88
+ kwargs = {}
89
+ try:
90
+ exported_program = self._capture(model, args, kwargs, dynamic_shapes)
91
+ except Exception as e:
92
+ self._failure(model, e)
93
+ return Result(
94
+ exported_program=None,
95
+ strategy=self.__class__.__name__,
96
+ exception=e,
97
+ )
98
+ self._success(model)
99
+ return Result(exported_program, strategy=self.__call__.__name__)
100
+
101
+ @abc.abstractmethod
102
+ def _capture(
103
+ self, model, args, kwargs, dynamic_shapes
104
+ ) -> torch.export.ExportedProgram:
105
+ raise NotImplementedError
106
+
107
+ def _enter(self, model: torch.nn.Module | torch.jit.ScriptFunction) -> None:
108
+ return
109
+
110
+ def _success(self, model: torch.nn.Module | torch.jit.ScriptFunction) -> None:
111
+ return
112
+
113
+ def _failure(
114
+ self, model: torch.nn.Module | torch.jit.ScriptFunction, e: Exception
115
+ ) -> None:
116
+ return
117
+
118
+
119
+ class TorchExportStrategy(CaptureStrategy):
120
+ def _capture(
121
+ self, model, args, kwargs, dynamic_shapes
122
+ ) -> torch.export.ExportedProgram:
123
+ try:
124
+ return torch.export.export(
125
+ model, args, kwargs=kwargs, dynamic_shapes=dynamic_shapes
126
+ )
127
+ except torch._dynamo.exc.UserError as exc:
128
+ # Refine the dynamic shapes based on the suggested fixes.
129
+ try:
130
+ new_shapes = torch.export.dynamic_shapes.refine_dynamic_shapes_from_suggested_fixes(
131
+ exc.msg, dynamic_shapes
132
+ )
133
+ except Exception:
134
+ # If the dynamic shapes cannot be refined, re-raise the exception.
135
+ raise exc from None
136
+ return torch.export.export(
137
+ model, args, kwargs=kwargs, dynamic_shapes=new_shapes
138
+ )
139
+
140
+ def _enter(self, model) -> None:
141
+ model_repr = _take_first_line(repr(model))
142
+ self._verbose_print(
143
+ f"Obtain model graph for `{model_repr}` with `torch.export.export`..."
144
+ )
145
+
146
+ def _success(self, model) -> None:
147
+ model_repr = _take_first_line(repr(model))
148
+ self._verbose_print(
149
+ f"Obtain model graph for `{model_repr}` with `torch.export.export`... ✅"
150
+ )
151
+
152
+ def _failure(self, model, e) -> None:
153
+ del e # Unused
154
+ model_repr = _take_first_line(repr(model))
155
+ self._verbose_print(
156
+ f"Obtain model graph for `{model_repr}` with `torch.export.export`... ❌"
157
+ )
158
+
159
+
160
+ class TorchExportNonStrictStrategy(CaptureStrategy):
161
+ def _capture(
162
+ self, model, args, kwargs, dynamic_shapes
163
+ ) -> torch.export.ExportedProgram:
164
+ try:
165
+ return torch.export.export(
166
+ model, args, kwargs=kwargs, dynamic_shapes=dynamic_shapes, strict=False
167
+ )
168
+ except torch._dynamo.exc.UserError as exc:
169
+ # Refine the dynamic shapes based on the suggested fixes.
170
+ try:
171
+ new_shapes = torch.export.dynamic_shapes.refine_dynamic_shapes_from_suggested_fixes(
172
+ exc.msg, dynamic_shapes
173
+ )
174
+ except Exception:
175
+ # If the dynamic shapes cannot be refined, re-raise the exception.
176
+ raise exc from None
177
+ return torch.export.export(
178
+ model, args, kwargs=kwargs, dynamic_shapes=new_shapes, strict=False
179
+ )
180
+
181
+ def _enter(self, model) -> None:
182
+ model_repr = _take_first_line(repr(model))
183
+ self._verbose_print(
184
+ f"Obtain model graph for `{model_repr}` with `torch.export.export(..., strict=False)`..."
185
+ )
186
+
187
+ def _success(self, model) -> None:
188
+ model_repr = _take_first_line(repr(model))
189
+ self._verbose_print(
190
+ f"Obtain model graph for `{model_repr}` with `torch.export.export(..., strict=False)`... ✅"
191
+ )
192
+
193
+ def _failure(self, model, e) -> None:
194
+ del e # Unused
195
+ model_repr = _take_first_line(repr(model))
196
+ self._verbose_print(
197
+ f"Obtain model graph for `{model_repr}` with `torch.export.export(..., strict=False)`... ❌"
198
+ )
199
+
200
+
201
+ class JitTraceConvertStrategy(CaptureStrategy):
202
+ def _capture(
203
+ self, model, args, kwargs, dynamic_shapes
204
+ ) -> torch.export.ExportedProgram:
205
+ del dynamic_shapes # Unused
206
+
207
+ flattened_args, spec = _pytree.tree_flatten((args, kwargs))
208
+ flattened_args = tuple(flattened_args)
209
+
210
+ # Since torch.jit.trace only accepts Tensors as inputs, we filter
211
+ # out non-Tensor arguments and reconstruct the arguments after entering
212
+ # the WrappedModel.
213
+ tensor_placeholder = object()
214
+ non_tensor_args = [
215
+ arg if not isinstance(arg, torch.Tensor) else tensor_placeholder
216
+ for arg in flattened_args
217
+ ]
218
+ tensor_args = tuple(
219
+ arg for arg in flattened_args if isinstance(arg, torch.Tensor)
220
+ )
221
+
222
+ class WrappedModel(torch.nn.Module):
223
+ """Wrap the model so that it takes flattened arguments."""
224
+
225
+ def __init__(self, m):
226
+ super().__init__()
227
+ self.model = m
228
+
229
+ def forward(self, *_args):
230
+ # Take the non-Tensor arguments list as a starting point and
231
+ # replace the tensor_placeholder with the actual tensor arguments
232
+ # from _args.
233
+ reconstructed_flattened_args = non_tensor_args.copy()
234
+ _args_iter = iter(_args)
235
+ for i, arg in enumerate(reconstructed_flattened_args):
236
+ if arg is tensor_placeholder:
237
+ reconstructed_flattened_args[i] = next(_args_iter)
238
+ # Unflatten the arguments and kwargs to pass to the model.
239
+ unflattened_args, unflattened_kwargs = _pytree.tree_unflatten(
240
+ reconstructed_flattened_args, spec
241
+ )
242
+ results = self.model(*unflattened_args, **unflattened_kwargs)
243
+ if not isinstance(results, tuple):
244
+ results = (results,)
245
+ flattened_results, _ = _pytree.tree_flatten(results)
246
+ if len(flattened_results) == 1:
247
+ return flattened_results[0]
248
+ return tuple(flattened_results)
249
+
250
+ jit_model = torch.jit.trace(
251
+ WrappedModel(model),
252
+ example_inputs=tensor_args,
253
+ check_trace=False,
254
+ strict=False,
255
+ )
256
+ if self._dump:
257
+ program_path = self._artifacts_dir / f"onnx_export_{self._timestamp}.pt"
258
+ try:
259
+ torch.jit.save(jit_model, program_path)
260
+ except Exception as e:
261
+ self._verbose_print(
262
+ f"Failed to save Torch Script model due to an error: {e}"
263
+ )
264
+ else:
265
+ self._verbose_print(
266
+ f"Torch Script model has been saved to '{program_path}'."
267
+ )
268
+ return _torchscript_converter.TS2EPConverter(
269
+ jit_model, flattened_args
270
+ ).convert()
271
+
272
+ def _enter(self, model) -> None:
273
+ model_repr = _take_first_line(repr(model))
274
+ self._verbose_print(
275
+ f"Obtain model graph for `{model_repr}` with Torch Script..."
276
+ )
277
+
278
+ def _success(self, model) -> None:
279
+ model_repr = _take_first_line(repr(model))
280
+ self._verbose_print(
281
+ f"Obtain model graph for `{model_repr}` with Torch Script... ✅"
282
+ )
283
+
284
+ def _failure(self, model, e) -> None:
285
+ del e # Unused
286
+ model_repr = _take_first_line(repr(model))
287
+ self._verbose_print(
288
+ f"Obtain model graph for `{model_repr}` with Torch Script... ❌"
289
+ )
290
+
291
+
292
+ class LegacyDynamoStrategy(CaptureStrategy):
293
+ """Strategy implemented by the ONNX team using internal dynamo APIs and custom fx passes."""
294
+
295
+ def _capture(
296
+ self, model, args, kwargs, dynamic_shapes
297
+ ) -> torch.export.ExportedProgram:
298
+ # NOTE: Import here to prevent circular dependency
299
+ from torch.onnx._internal.fx import diagnostics, passes
300
+
301
+ graph_module, _ = torch._dynamo.export(
302
+ model,
303
+ tracing_mode="symbolic",
304
+ dynamic_shapes=dynamic_shapes,
305
+ )(
306
+ *args,
307
+ **kwargs,
308
+ )
309
+ torch._dynamo.reset()
310
+
311
+ diagnostic_context = diagnostics.DiagnosticContext(
312
+ "torch.onnx.export",
313
+ torch.__version__,
314
+ )
315
+
316
+ flattened_args, _ = _pytree.tree_flatten((args, kwargs))
317
+ flattened_args = tuple(flattened_args)
318
+
319
+ # ONNX does not support views and mutations.
320
+ # Functionalize to get a semantically equivalent graph without mutations.
321
+ graph_module = passes.Functionalize(
322
+ diagnostic_context,
323
+ graph_module,
324
+ enable_dynamic_axes=bool(dynamic_shapes),
325
+ ).run(*flattened_args)
326
+
327
+ # Input mutations are detected and distilled after `Functionalize` pass.
328
+ # Remove them since ONNX inference does not need them.
329
+ graph_module = passes.RemoveInputMutation(diagnostic_context, graph_module).run(
330
+ *flattened_args
331
+ )
332
+
333
+ # Use torch.export to recapture the GraphModule into an ExportedProgram.
334
+ return torch.export.export(graph_module, flattened_args)
335
+
336
+ def _enter(self, model) -> None:
337
+ model_repr = _take_first_line(repr(model))
338
+ self._verbose_print(
339
+ f"Obtain model graph for `{model_repr}` with internal Dynamo apis..."
340
+ )
341
+
342
+ def _success(self, model) -> None:
343
+ model_repr = _take_first_line(repr(model))
344
+ self._verbose_print(
345
+ f"Obtain model graph for `{model_repr}` with internal Dynamo apis... ✅"
346
+ )
347
+
348
+ def _failure(self, model, e) -> None:
349
+ del e # Unused
350
+ model_repr = _take_first_line(repr(model))
351
+ self._verbose_print(
352
+ f"Obtain model graph for `{model_repr}` with internal Dynamo apis... ❌"
353
+ )
354
+
355
+
356
+ CAPTURE_STRATEGIES = (
357
+ TorchExportStrategy,
358
+ TorchExportNonStrictStrategy,
359
+ JitTraceConvertStrategy,
360
+ LegacyDynamoStrategy,
361
+ )
janus/lib/python3.10/site-packages/torch/onnx/_internal/exporter/_compat.py ADDED
@@ -0,0 +1,216 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Compatibility functions for the torch.onnx.export API."""
2
+
3
+ # mypy: allow-untyped-defs
4
+ # mypy: disable-error-code=attr-defined
5
+ from __future__ import annotations
6
+
7
+ import inspect
8
+ import logging
9
+ from typing import Any, Mapping, Sequence, TYPE_CHECKING
10
+
11
+ import torch
12
+ from torch.onnx._internal._lazy_import import onnxscript_apis, onnxscript_ir as ir
13
+ from torch.onnx._internal.exporter import _core, _onnx_program
14
+
15
+
16
+ if TYPE_CHECKING:
17
+ import os
18
+
19
+ logger = logging.getLogger(__name__)
20
+
21
+
22
+ def _signature(model) -> inspect.Signature:
23
+ should_be_callable = getattr(model, "forward", model)
24
+ if callable(should_be_callable):
25
+ return inspect.signature(should_be_callable)
26
+ raise ValueError("model has no forward method and is not callable")
27
+
28
+
29
+ def _from_dynamic_axes_to_dynamic_shapes(
30
+ model,
31
+ *,
32
+ dynamic_axes=None,
33
+ output_names: set[str],
34
+ input_names: Sequence[str] | None = None,
35
+ ) -> dict[str, Any] | None:
36
+ """
37
+
38
+ dynamic_axes examples:
39
+ (1) dynamic_axes = {"x": {0: "my_custom_axis_name_1"}, "y": {1: "my_custom_axis_name_2"}}
40
+ (2) dynamic_axes = {"x": [0], "y": [1]}
41
+
42
+ these will be converted to dynamic_shapes respectively:
43
+ (1) dynamic_shapes = {"x": {0: Dim("my_custom_axis_name_1")}, "y": {1: Dim("my_custom_axis_name_2")}}
44
+ (2) dynamic_shapes = {"x": {0: Dim("x_dim_0")}, "y": {1: Dim("y_dim_1")}} # auto-generated dim names
45
+
46
+ """
47
+ # https://github.com/pytorch/pytorch/pull/128371
48
+ # 1. The function does not need to provide dynamic_shapes to torch.export.export
49
+ if dynamic_axes is None:
50
+ return None
51
+
52
+ if input_names is None:
53
+ input_names = []
54
+
55
+ sig = _signature(model)
56
+ if len(input_names) > len(sig.parameters):
57
+ raise ValueError(
58
+ f"Number of input names ({len(input_names)}) should not be greater than "
59
+ f"the number of model inputs ({len(sig.parameters)})"
60
+ )
61
+ input_names_to_model_inputs = {}
62
+ for idx, param_name in enumerate(sig.parameters):
63
+ if idx < len(input_names):
64
+ input_names_to_model_inputs[input_names[idx]] = param_name
65
+ else:
66
+ input_names_to_model_inputs[param_name] = param_name
67
+
68
+ # NOTE: torch.export.export does not support input names assignment,
69
+ # so we need to map input names to model inputs to create dynamic_shapes
70
+ # for the exported program
71
+ dynamic_shapes_to_exported_program = {}
72
+ for input_name, axes in dynamic_axes.items():
73
+ if input_name in output_names:
74
+ # User specified an output name as a dynamic axis, so we skip it
75
+ continue
76
+ # input_name can be either from input_names or from the model inputs
77
+ if input_name not in input_names_to_model_inputs:
78
+ raise ValueError(
79
+ f"dynamic axis: {input_name} is not found in the input names: {input_names}"
80
+ )
81
+ model_input_name = input_names_to_model_inputs[input_name]
82
+ if isinstance(axes, dict):
83
+ dynamic_shapes_to_exported_program[model_input_name] = {
84
+ k: torch.export.Dim(v) for k, v in axes.items()
85
+ }
86
+ elif isinstance(axes, list):
87
+ dynamic_shapes_to_exported_program[model_input_name] = {
88
+ k: torch.export.Dim(f"{model_input_name}_dim_{k}") for k in axes
89
+ }
90
+ else:
91
+ raise TypeError(
92
+ f"dynamic_axes value must be either a dict or a list, but got {type(axes)}"
93
+ )
94
+ # torch.export.export needs static dim to present in dynamic_shapes
95
+ # for all input tensors, so we need to add them with None
96
+ for input_name in sig.parameters:
97
+ if input_name not in dynamic_shapes_to_exported_program:
98
+ dynamic_shapes_to_exported_program[input_name] = None # type: ignore[assignment]
99
+
100
+ return dynamic_shapes_to_exported_program
101
+
102
+
103
+ def _get_torch_export_args(
104
+ args: tuple[Any, ...],
105
+ kwargs: dict[str, Any] | None,
106
+ ) -> tuple[tuple[Any, ...], dict[str, Any] | None]:
107
+ """Obtain the arguments for torch.onnx.export from the model and the input arguments."""
108
+ if not kwargs and args and isinstance(args[-1], dict):
109
+ kwargs = args[-1]
110
+ args = args[:-1]
111
+ return args, kwargs
112
+
113
+
114
+ def export_compat(
115
+ model: torch.nn.Module
116
+ | torch.export.ExportedProgram
117
+ | torch.jit.ScriptModule
118
+ | torch.jit.ScriptFunction,
119
+ args: tuple[Any, ...],
120
+ f: str | os.PathLike | None = None,
121
+ *,
122
+ kwargs: dict[str, Any] | None = None,
123
+ export_params: bool = True,
124
+ verbose: bool | None = None,
125
+ input_names: Sequence[str] | None = None,
126
+ output_names: Sequence[str] | None = None,
127
+ opset_version: int | None = None,
128
+ dynamic_axes: Mapping[str, Mapping[int, str]]
129
+ | Mapping[str, Sequence[int]]
130
+ | None = None,
131
+ dynamic_shapes: dict[str, Any] | tuple[Any, ...] | list[Any] | None = None,
132
+ keep_initializers_as_inputs: bool = False,
133
+ external_data: bool = True,
134
+ report: bool = False,
135
+ verify: bool = False,
136
+ profile: bool = False,
137
+ dump_exported_program: bool = False,
138
+ artifacts_dir: str | os.PathLike = ".",
139
+ fallback: bool = False,
140
+ **_,
141
+ ) -> _onnx_program.ONNXProgram:
142
+ if opset_version is None:
143
+ # TODO(justinchuby): Change the hardcoded opset version for it to be flexible
144
+ opset_version = 18
145
+
146
+ if isinstance(model, torch.export.ExportedProgram):
147
+ # We know the model is already exported program, so the args, kwargs, and dynamic_shapes
148
+ # are not used
149
+ dynamic_shapes = dynamic_shapes or {}
150
+ else:
151
+ args, kwargs = _get_torch_export_args(args, kwargs)
152
+ if dynamic_shapes is None and dynamic_axes is not None:
153
+ dynamic_shapes = _from_dynamic_axes_to_dynamic_shapes(
154
+ model,
155
+ dynamic_axes=dynamic_axes,
156
+ input_names=input_names,
157
+ output_names=set(output_names or ()),
158
+ )
159
+
160
+ try:
161
+ onnx_program = _core.export(
162
+ model,
163
+ args,
164
+ kwargs,
165
+ registry=None,
166
+ dynamic_shapes=dynamic_shapes,
167
+ input_names=input_names,
168
+ output_names=output_names,
169
+ profile=profile,
170
+ report=report,
171
+ verify=verify,
172
+ dump_exported_program=dump_exported_program,
173
+ artifacts_dir=artifacts_dir,
174
+ verbose=verbose,
175
+ )
176
+
177
+ except Exception as e:
178
+ if fallback:
179
+ if verbose is not False:
180
+ print(
181
+ "[torch.onnx] Falling back to legacy torch.onnx.export due "
182
+ f"to the following error: {e}",
183
+ )
184
+ if f is None:
185
+ raise TypeError("f must be provided when fallback is enabled") from e
186
+ torch.onnx.utils.export(
187
+ model, # type: ignore[arg-type]
188
+ args,
189
+ f, # type: ignore[arg-type]
190
+ kwargs=kwargs,
191
+ export_params=export_params,
192
+ input_names=input_names,
193
+ output_names=output_names,
194
+ opset_version=17, # TODO(justinchuby): Hard coded to 17 for now
195
+ dynamic_axes=dynamic_axes,
196
+ keep_initializers_as_inputs=keep_initializers_as_inputs,
197
+ )
198
+ onnx_program = _onnx_program.ONNXProgram(ir.load(f), None)
199
+ else:
200
+ raise
201
+
202
+ # Converter opset version and optimize
203
+ onnx_program.model = onnxscript_apis.convert_version(
204
+ onnx_program.model, opset_version
205
+ )
206
+ onnx_program.model = onnxscript_apis.optimize(onnx_program.model)
207
+
208
+ if f is not None:
209
+ onnx_program.save(
210
+ f,
211
+ include_initializers=export_params,
212
+ keep_initializers_as_inputs=keep_initializers_as_inputs,
213
+ external_data=external_data,
214
+ )
215
+
216
+ return onnx_program
janus/lib/python3.10/site-packages/torch/onnx/_internal/exporter/_core.py ADDED
@@ -0,0 +1,1341 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ # flake8: noqa: B950 We do not need flake8 as it complains line length
3
+ from __future__ import annotations
4
+
5
+ import ctypes
6
+ import datetime
7
+ import inspect
8
+ import itertools
9
+ import logging
10
+ import operator
11
+ import pathlib
12
+ import textwrap
13
+ import traceback
14
+ import typing
15
+ from typing import Any, Callable, Literal, Sequence
16
+
17
+ import onnxscript
18
+ import onnxscript.evaluator
19
+ from onnxscript import ir
20
+ from onnxscript.ir import convenience as ir_convenience
21
+
22
+ import torch
23
+ import torch.fx
24
+ from torch.export import graph_signature
25
+ from torch.onnx._internal._lazy_import import onnxscript_apis
26
+ from torch.onnx._internal.exporter import (
27
+ _analysis,
28
+ _building,
29
+ _capture_strategies,
30
+ _dispatching,
31
+ _errors,
32
+ _fx_passes,
33
+ _ir_passes,
34
+ _onnx_program,
35
+ _registration,
36
+ _reporting,
37
+ _tensors,
38
+ _verification,
39
+ )
40
+
41
+
42
+ if typing.TYPE_CHECKING:
43
+ import os
44
+
45
+ import numpy as np
46
+
47
+
48
+ # Define utilities to convert PyTorch data types so users do not need to specify manually
49
+ _TORCH_DTYPE_TO_ONNX: dict[torch.dtype, ir.DataType] = {
50
+ torch.bfloat16: ir.DataType.BFLOAT16,
51
+ torch.bool: ir.DataType.BOOL,
52
+ torch.complex128: ir.DataType.COMPLEX128,
53
+ torch.complex64: ir.DataType.COMPLEX64,
54
+ torch.float16: ir.DataType.FLOAT16,
55
+ torch.float32: ir.DataType.FLOAT,
56
+ torch.float64: ir.DataType.DOUBLE,
57
+ torch.float8_e4m3fn: ir.DataType.FLOAT8E4M3FN,
58
+ torch.float8_e4m3fnuz: ir.DataType.FLOAT8E4M3FNUZ,
59
+ torch.float8_e5m2: ir.DataType.FLOAT8E5M2,
60
+ torch.float8_e5m2fnuz: ir.DataType.FLOAT8E5M2FNUZ,
61
+ torch.int16: ir.DataType.INT16,
62
+ torch.int32: ir.DataType.INT32,
63
+ torch.int64: ir.DataType.INT64,
64
+ torch.int8: ir.DataType.INT8,
65
+ torch.uint8: ir.DataType.UINT8,
66
+ torch.uint16: ir.DataType.UINT16,
67
+ torch.uint32: ir.DataType.UINT32,
68
+ torch.uint64: ir.DataType.UINT64,
69
+ }
70
+ _BLUE = "\033[96m"
71
+ _END = "\033[0m"
72
+
73
+ _STEP_ONE_ERROR_MESSAGE = textwrap.dedent(
74
+ f"""\
75
+ Failed to export the model with torch.export. {_BLUE}This is step 1/2{_END} of exporting the model to ONNX. Next steps:
76
+ - Modify the model code for `torch.export.export` to succeed. Refer to https://pytorch.org/docs/stable/generated/exportdb/index.html for more information.
77
+ - Debug `torch.export.export` and summit a PR to PyTorch.
78
+ - Create an issue in the PyTorch GitHub repository against the {_BLUE}*torch.export*{_END} component and attach the full error stack as well as reproduction scripts."""
79
+ )
80
+
81
+ _STEP_TWO_ERROR_MESSAGE = textwrap.dedent(
82
+ f"""\
83
+ Failed to convert the exported program to an ONNX model. {_BLUE}This is step 2/2{_END} of exporting the model to ONNX. Next steps:
84
+ - If there is a missing ONNX function, implement it and register it to the registry.
85
+ - If there is an internal error during ONNX conversion, debug the error and summit a PR to PyTorch.
86
+ - Save the ExportedProgram as a pt2 file and create an error report with `export(..., report=True)`. Create an issue in the PyTorch GitHub repository against the {_BLUE}*onnx*{_END} component. Attach the pt2 model and the error report."""
87
+ )
88
+
89
+ logger = logging.getLogger(__name__)
90
+
91
+
92
+ def _torch_dtype_to_onnx_dtype(dtype: torch.dtype) -> ir.DataType:
93
+ return _TORCH_DTYPE_TO_ONNX[dtype]
94
+
95
+
96
+ class TorchTensor(ir.Tensor):
97
+ def __init__(self, tensor: torch.Tensor, name: str | None = None):
98
+ # Pass the tensor as the raw data to ir.Tensor's constructor
99
+ super().__init__(
100
+ tensor, dtype=_torch_dtype_to_onnx_dtype(tensor.dtype), name=name
101
+ )
102
+
103
+ def numpy(self) -> np.ndarray:
104
+ self.raw: torch.Tensor
105
+ if self.dtype == ir.DataType.BFLOAT16:
106
+ return self.raw.view(torch.uint16).numpy(force=True)
107
+ if self.dtype in {
108
+ ir.DataType.FLOAT8E4M3FN,
109
+ ir.DataType.FLOAT8E4M3FNUZ,
110
+ ir.DataType.FLOAT8E5M2,
111
+ ir.DataType.FLOAT8E5M2FNUZ,
112
+ }:
113
+ # TODO: Use ml_dtypes
114
+ return self.raw.view(torch.uint8).numpy(force=True)
115
+ return self.raw.numpy(force=True)
116
+
117
+ def __array__(self, dtype: Any = None, copy: bool | None = None) -> np.ndarray:
118
+ del copy # Unused, but needed for the signature
119
+ if dtype is None:
120
+ return self.numpy()
121
+ return self.numpy().__array__(dtype)
122
+
123
+ def tobytes(self) -> bytes:
124
+ # Implement tobytes to support native PyTorch types so we can use types like bloat16
125
+ # Reading from memory directly is also more efficient because
126
+ # it avoids copying to a NumPy array
127
+ import torch._subclasses.fake_tensor
128
+
129
+ if isinstance(self.raw, torch._subclasses.fake_tensor.FakeTensor):
130
+ raise TypeError(
131
+ f"Cannot take content out from the FakeTensor ('{self.name}'). Please replace the tensor "
132
+ "with a tensor backed by real data using ONNXProgram.apply_weights() "
133
+ "or save the model without initializers by setting include_initializers=False."
134
+ )
135
+ tensor = self.raw.detach().cpu().contiguous()
136
+ return bytes(
137
+ (ctypes.c_ubyte * tensor.element_size() * tensor.numel()).from_address(
138
+ tensor.data_ptr()
139
+ )
140
+ )
141
+
142
+
143
+ # https://github.com/pytorch/pytorch/blob/ee6cb6daa173896f8ea1876266a19775aaa4f610/torch/export/graph_signature.py#L56C1-L62C19
144
+ # class InputKind(Enum):
145
+ # USER_INPUT = auto()
146
+ # PARAMETER = auto()
147
+ # BUFFER = auto()
148
+ # CONSTANT_TENSOR = auto()
149
+ # CUSTOM_OBJ = auto()
150
+ # TOKEN = auto()
151
+
152
+ # https://github.com/pytorch/pytorch/blob/ee6cb6daa173896f8ea1876266a19775aaa4f610/torch/export/graph_signature.py#L89C1-L96C19
153
+ # class OutputKind(Enum):
154
+ # USER_OUTPUT = auto()
155
+ # LOSS_OUTPUT = auto()
156
+ # BUFFER_MUTATION = auto()
157
+ # GRADIENT_TO_PARAMETER = auto()
158
+ # GRADIENT_TO_USER_INPUT = auto()
159
+ # USER_INPUT_MUTATION = auto()
160
+ # TOKEN = auto()
161
+
162
+
163
+ def _set_shape_types(
164
+ values: Sequence[ir.Value],
165
+ meta_vals: Sequence[torch.Tensor],
166
+ complex_to_float: bool = True,
167
+ ) -> None:
168
+ if not isinstance(meta_vals, Sequence):
169
+ logger.warning(
170
+ "Expected meta_vals to be a sequence, but got %s. There may be an internal error.",
171
+ meta_vals,
172
+ )
173
+ meta_vals = (meta_vals,)
174
+ for value, meta_val in zip(values, meta_vals):
175
+ _set_shape_type(value, meta_val, complex_to_float=complex_to_float)
176
+
177
+
178
+ def _set_shape_type(
179
+ value: ir.Value,
180
+ meta_val: torch.Tensor
181
+ | torch.SymBool
182
+ | torch.SymInt
183
+ | torch.SymFloat
184
+ | tuple[torch.Tensor],
185
+ complex_to_float: bool,
186
+ ) -> None:
187
+ # TODO: Consider using meta["tensor_meta"] for this? Would it be faster?
188
+ if isinstance(meta_val, tuple):
189
+ logger.warning("Setting shape and type of tensors is not supported yet")
190
+ if isinstance(meta_val, torch.Tensor):
191
+ # FIXME: Consider shape for complex values
192
+ dims = []
193
+ for dim in meta_val.shape:
194
+ if isinstance(dim, int):
195
+ dims.append(dim)
196
+ else:
197
+ dims.append(str(dim.node))
198
+ value.dtype = _torch_dtype_to_onnx_dtype(meta_val.dtype)
199
+ if complex_to_float:
200
+ if meta_val.dtype == torch.complex64:
201
+ value.dtype = ir.DataType.FLOAT
202
+ # Add 2 as the last dimension if the tensor is complex to hold the real/imag parts
203
+ dims.append(2)
204
+ elif meta_val.dtype == torch.complex128:
205
+ value.dtype = ir.DataType.DOUBLE
206
+ # Add 2 as the last dimension if the tensor is complex to hold the real/imag parts
207
+ dims.append(2)
208
+
209
+ value.shape = ir.Shape(dims)
210
+ elif isinstance(meta_val, (int, torch.SymInt)):
211
+ # aten::sym_size output is a int, not a tensor, which stands
212
+ # for the size of one dim. We treat it as a scalar.
213
+ value.dtype = ir.DataType.INT64
214
+ value.shape = ir.Shape([])
215
+ elif isinstance(meta_val, (bool, torch.SymBool)):
216
+ value.dtype = ir.DataType.BOOL
217
+ value.shape = ir.Shape([])
218
+ elif isinstance(meta_val, (float, torch.SymFloat)):
219
+ value.dtype = ir.DataType.FLOAT
220
+ value.shape = ir.Shape([])
221
+ else:
222
+ pass
223
+
224
+
225
+ def _get_qualified_module_name(cls: Any) -> str:
226
+ if isinstance(cls, str):
227
+ return cls
228
+ module = cls.__module__
229
+ if module is None or module == str.__class__.__module__:
230
+ return cls.__name__
231
+ return module + "." + cls.__name__
232
+
233
+
234
+ def _get_node_namespace(node: torch.fx.Node) -> tuple[str, list[str], list[str]]:
235
+ """Get the namespace and scope of the node.
236
+
237
+ Example::
238
+
239
+ {
240
+ 'L__self__': ('', <class 'torchvision.models.resnet.ResNet'>),
241
+ 'L__self___avgpool': ('avgpool', <class 'torch.nn.modules.pooling.AdaptiveAvgPool2d'>)
242
+ }
243
+
244
+ Will yield
245
+
246
+ namespace: ": torchvision.models.resnet.ResNet/avgpool: torch.nn.modules.pooling.AdaptiveAvgPool2d/node_name: node_target"
247
+ class_hierarchy: ["torchvision.models.resnet.ResNet", "torch.nn.modules.pooling.AdaptiveAvgPool2d", <node_target>]
248
+ name_scopes: ["", "avgpool", <node_name>]
249
+
250
+ Args:
251
+ node: The node to get the namespace and scope of.
252
+
253
+ Returns:
254
+ (namespace, class_hierarchy, name_scope)
255
+ """
256
+ nn_module_stack = node.meta.get("nn_module_stack")
257
+ logger.debug("%s", nn_module_stack)
258
+ if nn_module_stack is None:
259
+ logger.warning(
260
+ "nn_module_stack not found for node '%s'. Skip adding metadata...",
261
+ node.name,
262
+ )
263
+ return f"{node.name}: {node.target}", [str(node.target)], [node.name]
264
+ namespaces = []
265
+ class_hierarchy = []
266
+ name_scopes = []
267
+ for name, nn_module in nn_module_stack.values():
268
+ name_scopes.append(name)
269
+ nn_module_name = _get_qualified_module_name(nn_module)
270
+ class_hierarchy.append(nn_module_name)
271
+ namespaces.append(f"{name}: {_get_qualified_module_name(nn_module)}")
272
+ namespaces.append(f"{node.name}: {node.target}")
273
+ class_hierarchy.append(str(node.target))
274
+ name_scopes.append(node.name)
275
+
276
+ return "/".join(namespaces), class_hierarchy, name_scopes
277
+
278
+
279
+ def _set_node_metadata(fx_node: torch.fx.Node, ir_node: ir.Node) -> None:
280
+ """Adds namespace and other node metadata to the ONNX node."""
281
+ namespace, class_hierarchy, name_scopes = _get_node_namespace(fx_node)
282
+ ir_node.metadata_props["namespace"] = namespace
283
+ ir_node.metadata_props["pkg.torch.onnx.class_hierarchy"] = repr(class_hierarchy)
284
+ ir_node.metadata_props["pkg.torch.onnx.name_scopes"] = repr(name_scopes)
285
+ ir_node.metadata_props["pkg.torch.onnx.fx_node"] = str(fx_node.format_node())
286
+ ir_node.metadata_props["pkg.torch.onnx.stack_trace"] = fx_node.meta.get(
287
+ "stack_trace", ""
288
+ )
289
+
290
+
291
+ def _handle_getitem_node(
292
+ node: torch.fx.Node, node_name_to_values: dict[str, ir.Value | Sequence[ir.Value]]
293
+ ) -> ir.Value:
294
+ """Handle a getitem node.
295
+
296
+ Add the input value it is getting to the mapping, then return the value.
297
+
298
+ There are two cases for this node:
299
+ 1. The output is a Sequence (traced), we can simply get the value from the sequence
300
+ 2. The output is produced by a SplitToSequence node, we need to get the value from the sequence value
301
+ This function only handles the first case
302
+ """
303
+ assert len(node.all_input_nodes) == 1
304
+ source = node.all_input_nodes[0]
305
+ source_outputs = node_name_to_values[source.name]
306
+ assert isinstance(
307
+ source_outputs, Sequence
308
+ ), f"Expected {source.name} to output sequence, got {node_name_to_values[source.name]}"
309
+ index = typing.cast(int, node.args[1])
310
+ value = source_outputs[index]
311
+ # Save the getitem value to the values mapping to in case
312
+ # it is one of the graph outputs
313
+ node_name_to_values[node.name] = value
314
+ # Rename the name of value with the getitem name.
315
+ value.name = node.name
316
+ return value
317
+
318
+
319
+ def _handle_call_function_node(
320
+ graph: ir.Graph,
321
+ node: torch.fx.Node,
322
+ node_name_to_values: dict[str, ir.Value | Sequence[ir.Value]],
323
+ ) -> None:
324
+ """Handle a call_function node.
325
+
326
+ Args:
327
+ graph: The ONNX graph at construction.
328
+ node: The FX node to translate.
329
+ node_name_to_values: A mapping of FX node names to their produced ir.Value.
330
+ """
331
+ if node.target == operator.getitem:
332
+ _handle_getitem_node(node, node_name_to_values)
333
+ # Add op to the graph
334
+ op = str(node.target)
335
+ fx_inputs, attributes, input_names, output_names = _get_inputs_and_attributes(node)
336
+ inputs: list[ir.Value | None] = []
337
+ for i, input_ in enumerate(fx_inputs):
338
+ if input_ is None:
339
+ inputs.append(None)
340
+ elif hasattr(input_, "name"):
341
+ if isinstance(input_, torch.fx.Node) and input_.target == operator.getitem:
342
+ actual_input = _handle_getitem_node(input_, node_name_to_values)
343
+ inputs.append(actual_input)
344
+ else:
345
+ value = node_name_to_values[input_.name]
346
+ assert not isinstance(value, Sequence)
347
+ inputs.append(value)
348
+ else:
349
+ attributes[f"arg_{i}"] = input_
350
+
351
+ outputs = [ir.Value(name=name) for name in output_names]
352
+ if len(outputs) > 1:
353
+ _set_shape_types(outputs, node.meta["val"], complex_to_float=False)
354
+ node_name_to_values[node.name] = outputs
355
+ else:
356
+ _set_shape_type(outputs[0], node.meta["val"], complex_to_float=False)
357
+ node_name_to_values[node.name] = outputs[0]
358
+ ir_node = ir.Node(
359
+ "pkg.torch.ops",
360
+ op,
361
+ inputs,
362
+ attributes=ir_convenience.convert_attributes(attributes),
363
+ outputs=outputs,
364
+ name=node.name,
365
+ )
366
+ ir_node.meta["node"] = node
367
+ ir_node.metadata_props["pkg.torch.onnx.input_names"] = repr(input_names)
368
+ # Record the nn.Module stack for the node
369
+ _set_node_metadata(node, ir_node)
370
+
371
+ graph.append(ir_node)
372
+
373
+
374
+ def _convert_fx_arg_to_onnx_arg(
375
+ arg, node_name_to_values: dict[str, ir.Value | Sequence[ir.Value]]
376
+ ) -> Any:
377
+ """Convert an FX argument to an ONNX compatible argument.
378
+
379
+ This function
380
+ - Converts a torch dtype to an integer
381
+ - Converts a torch device/memory_format/layout to a string
382
+ - Converts a torch.fx.Node to an ir.Value
383
+ - Converts a sequence of torch.fx.Node to a sequence of ir.Value
384
+ """
385
+ if arg is None:
386
+ # None arguments are not modified because when the arg is an ONNX input
387
+ # we need to preserve the None value; when the arg is an ONNX attribute,
388
+ # we want to drop the value.
389
+ # The actual dropping of a None attribute value is done by OpRecorder
390
+ return None
391
+ if hasattr(arg, "name"):
392
+ if isinstance(arg, torch.fx.Node) and arg.target == operator.getitem:
393
+ source = arg.all_input_nodes[0]
394
+ source_outputs = node_name_to_values[source.name]
395
+ if isinstance(source_outputs, Sequence):
396
+ # If the node is getting an input from another node, get the actual value the node is retrieving
397
+ return _handle_getitem_node(arg, node_name_to_values)
398
+ else:
399
+ # `source_outputs` is a sequence(tensor()) value and we need to
400
+ # use SequenceAt to get the value. This is handled by torchlib
401
+ pass
402
+ # If the input is a node, get the value from the mapping
403
+ return node_name_to_values[arg.name]
404
+ if isinstance(arg, (list, tuple)):
405
+ return [_convert_fx_arg_to_onnx_arg(elem, node_name_to_values) for elem in arg]
406
+ if isinstance(arg, (torch.device, torch.memory_format, torch.layout)):
407
+ return str(arg)
408
+ if isinstance(arg, torch.dtype):
409
+ return _torch_dtype_to_onnx_dtype(arg)
410
+ # Maybe a Python value
411
+ return arg
412
+
413
+
414
+ def _get_onnxscript_opset(opset_version: int) -> onnxscript.values.Opset:
415
+ return onnxscript.values.Opset("", opset_version)
416
+
417
+
418
+ def _handle_call_function_node_with_lowering(
419
+ model: ir.Model,
420
+ node: torch.fx.Node,
421
+ node_name_to_values: dict[str, ir.Value | Sequence[ir.Value]],
422
+ constant_farm: dict[Any, ir.Value],
423
+ registry: _registration.ONNXRegistry,
424
+ opset: onnxscript.values.Opset,
425
+ ) -> None:
426
+ if node.target == operator.getitem:
427
+ source = node.all_input_nodes[0]
428
+ source_outputs = node_name_to_values[source.name]
429
+ if isinstance(source_outputs, Sequence):
430
+ _handle_getitem_node(node, node_name_to_values)
431
+ return
432
+ else:
433
+ # `source_outputs` is a sequence(tensor()) value and we need to
434
+ # use SequenceAt to get the value. This is handled by torchlib
435
+ pass
436
+
437
+ # Find the matching ONNX overload for the node
438
+ # NOTE: Create different registries for different ONNX opset versions
439
+ # TODO: Log the message here to expose false positives
440
+ onnx_function, message = _dispatching.dispatch(node, registry)
441
+
442
+ if onnx_function is None:
443
+ # TODO(justinchuby): Fall back to ATen op or do something else?
444
+ raise _errors.DispatchError(
445
+ f"No ONNX function found for {node.target!r}. Failure message: {message}"
446
+ )
447
+
448
+ # Map FX inputs to ONNX inputs and fill optional inputs.
449
+ # torch_args and torch_kwargs are for op-level validation
450
+ fx_args = node.args
451
+ fx_kwargs = node.kwargs
452
+
453
+ # Replace the input FX nodes with ONNX values
454
+ onnx_args = [
455
+ _convert_fx_arg_to_onnx_arg(input_, node_name_to_values) for input_ in fx_args
456
+ ]
457
+
458
+ onnx_kwargs = {}
459
+ for key, value in fx_kwargs.items():
460
+ onnx_kwargs[key] = _convert_fx_arg_to_onnx_arg(value, node_name_to_values)
461
+ if key == "dtype" and onnx_kwargs[key] is None:
462
+ # Set dtype to -1 if it is None
463
+ onnx_kwargs[key] = -1
464
+
465
+ with onnxscript.evaluator.default_as(
466
+ tracer := _building.OpRecorder(opset, constant_farm)
467
+ ):
468
+ try:
469
+ outputs = onnx_function(*onnx_args, **onnx_kwargs)
470
+ except Exception as e:
471
+ raise _errors.GraphConstructionError(
472
+ f"Error when calling function '{onnx_function}' with args '{onnx_args}' and kwargs '{onnx_kwargs}'"
473
+ ) from e
474
+
475
+ # NOTE: Instead of using the output names from node.target._schema,
476
+ # we always use the index if there are more than one outputs so the
477
+ # names can be programmatically reconstructed. This is useful for
478
+ # comparing values from the ONNX graph with those from the FX graph.
479
+ #
480
+ # When there are multiple outputs, the output names will be
481
+ # node_name__0, node_name__1, etc.
482
+ if isinstance(outputs, Sequence):
483
+ _set_shape_types(outputs, node.meta["val"], complex_to_float=True)
484
+ node_name_to_values[node.name] = outputs
485
+ for i, output in enumerate(outputs):
486
+ output.name = f"{node.name}__{i}"
487
+ else:
488
+ _set_shape_type(outputs, node.meta["val"], complex_to_float=True)
489
+ node_name_to_values[node.name] = outputs
490
+ outputs.name = node.name
491
+
492
+ for ir_node in tracer.nodes:
493
+ ir_node.meta["node"] = node
494
+ # Record the nn.Module stack for the node
495
+ _set_node_metadata(node, ir_node)
496
+
497
+ # Add the traced nodes to the graph
498
+ model.graph.extend(tracer.nodes)
499
+ # Add the defined functions to the model
500
+ for identifier, onnxscript_function in tracer.functions.items():
501
+ if identifier in model.functions:
502
+ continue
503
+ # TODO: Get IR function directly when onnxscript is updated
504
+ proto = onnxscript_function.to_function_proto()
505
+ ir_function = ir.serde.deserialize_function(proto)
506
+ model.functions[identifier] = ir_function
507
+ if ir_function.domain not in model.opset_imports:
508
+ # FIXME: Record the correct opset version of the function
509
+ model.opset_imports[ir_function.domain] = 1
510
+
511
+
512
+ def _handle_placeholder_node(
513
+ node: torch.fx.Node,
514
+ node_name_to_values: dict[str, ir.Value | Sequence[ir.Value]],
515
+ *,
516
+ lower: str,
517
+ opset: onnxscript.values.Opset,
518
+ ) -> None:
519
+ # Placeholder nodes are user inputs
520
+ # We need to create a new tensor for each user input
521
+ # and add it to the graph's inputs
522
+ name = node.name
523
+ input_ = _tensors.SymbolicTensor(opset, name=name)
524
+ input_.meta["node"] = node
525
+ _set_shape_type(input_, node.meta["val"], complex_to_float=lower != "none")
526
+ node_name_to_values[name] = input_
527
+ # The inputs will be added to the graph later
528
+
529
+
530
+ def _add_nodes(
531
+ exported_program: torch.export.ExportedProgram,
532
+ model: ir.Model,
533
+ lower: Literal["at_conversion", "post_conversion", "none"],
534
+ registry: _registration.ONNXRegistry,
535
+ ) -> dict[str, ir.Value | Sequence[ir.Value]]:
536
+ node_name_to_values: dict[str, ir.Value | Sequence[ir.Value]] = {}
537
+ constant_farm: dict[Any, ir.Value] = {}
538
+ opset = _get_onnxscript_opset(registry.opset_version)
539
+ for node in exported_program.graph.nodes:
540
+ logger.debug(
541
+ "%s", (node.name, node.args, node.target, node.op, node.type, node.kwargs)
542
+ )
543
+ try:
544
+ if node.op == "placeholder":
545
+ _handle_placeholder_node(
546
+ node,
547
+ node_name_to_values,
548
+ lower=lower,
549
+ opset=opset,
550
+ )
551
+ elif node.op == "call_function":
552
+ if lower == "at_conversion":
553
+ _handle_call_function_node_with_lowering(
554
+ model,
555
+ node,
556
+ node_name_to_values,
557
+ constant_farm,
558
+ registry=registry,
559
+ opset=opset,
560
+ )
561
+ else:
562
+ # No lowering
563
+ _handle_call_function_node(model.graph, node, node_name_to_values)
564
+ except Exception as e:
565
+ raise _errors.ConversionError(
566
+ f"Error when translating node {node.format_node()}. See the stack trace for more information."
567
+ ) from e
568
+ return node_name_to_values
569
+
570
+
571
+ def _torch_version_integer() -> int:
572
+ return int(torch.__version__.replace(".", "").split("dev")[0])
573
+
574
+
575
+ def _get_inputs_and_attributes(
576
+ node: torch.fx.Node,
577
+ ) -> tuple[list[torch.fx.Node | None], dict[str, Any], list[str], list[str]]:
578
+ """Find and Fill in the not provided kwargs with default values.
579
+
580
+ Returns:
581
+ (inputs, attributes, input_names, output_names)
582
+ """
583
+ if inspect.isbuiltin(node.target) or isinstance(node.target, str):
584
+ inputs = list(node.args)
585
+ return inputs, {}, [], [node.name] # type: ignore[return-value]
586
+
587
+ # The target should be an ATen operator now
588
+ assert hasattr(
589
+ node.target, "_schema"
590
+ ), f"The target should be an ATen operator now, but node target {node.target} has no schema"
591
+ node_schema: torch.FunctionSchema = node.target._schema
592
+
593
+ # This function assumes the order of arguments in FX op is the
594
+ # same as the order of arguments in TorchScript op.
595
+ inputs: list[Any] = [] # type: ignore[no-redef]
596
+ input_names: list[str] = []
597
+ attributes: dict[str, Any] = {}
598
+
599
+ if inspect.isbuiltin(node.target):
600
+ inputs = list(node.args)
601
+ else:
602
+ for arg, schema_arg in zip(node.args, node_schema.arguments):
603
+ if arg is None or isinstance(arg, torch.fx.Node):
604
+ inputs.append(arg)
605
+ input_names.append(schema_arg.name)
606
+ elif isinstance(arg, Sequence) and all(
607
+ elem is None or isinstance(elem, torch.fx.Node) for elem in arg
608
+ ):
609
+ inputs.extend(arg)
610
+ input_names.extend([schema_arg.name] * len(arg))
611
+ elif isinstance(arg, torch.device):
612
+ attributes[schema_arg.name] = str(arg)
613
+ elif isinstance(arg, torch.dtype):
614
+ attributes[schema_arg.name] = _torch_dtype_to_onnx_dtype(arg)
615
+ else:
616
+ attributes[schema_arg.name] = arg
617
+ for schema_arg in node_schema.arguments:
618
+ if schema_arg.name not in node.kwargs:
619
+ continue
620
+ kwarg = node.kwargs[schema_arg.name]
621
+ if schema_arg.name in {
622
+ "layout",
623
+ "device",
624
+ "requires_grad",
625
+ "memory_format",
626
+ "implicit",
627
+ } or isinstance(kwarg, torch.device):
628
+ attr = str(kwarg)
629
+ elif isinstance(kwarg, torch.dtype):
630
+ attr = _torch_dtype_to_onnx_dtype(kwarg) # type: ignore[assignment]
631
+ else:
632
+ attr = kwarg # type: ignore[assignment]
633
+
634
+ attributes[schema_arg.name] = attr
635
+
636
+ output_names = [f"{node.name}_{output.name}" for output in node_schema.returns]
637
+
638
+ return inputs, attributes, input_names, output_names # type: ignore[return-value]
639
+
640
+
641
+ def _maybe_start_profiler(should_profile: bool) -> Any:
642
+ if should_profile:
643
+ import pyinstrument # type: ignore[import-not-found]
644
+
645
+ profiler = pyinstrument.Profiler(async_mode="disabled")
646
+ profiler.start()
647
+ return profiler
648
+ return None
649
+
650
+
651
+ def _maybe_stop_profiler_and_get_result(profiler) -> str | None:
652
+ if profiler is None:
653
+ return None
654
+ profiler.stop()
655
+ return profiler.output_text(unicode=True)
656
+
657
+
658
+ def _format_exception(e: Exception) -> str:
659
+ """Format the full traceback as Python would show it."""
660
+ return "\n".join(traceback.format_exception(type(e), e, e.__traceback__))
661
+
662
+
663
+ def _summarize_exception_stack(e: BaseException) -> str:
664
+ """Format the exception stack by showing the text of each exception."""
665
+ causes = [e]
666
+ while e.__cause__ is not None:
667
+ causes.append(e.__cause__)
668
+ e = e.__cause__
669
+ return (
670
+ "\n\n## Exception summary\n\n"
671
+ + "⬆️\n".join([f"{type(e)}: {e}\n" for e in reversed(causes)])
672
+ + "\n(Refer to the full stack trace above for more information.)"
673
+ )
674
+
675
+
676
+ def _format_exceptions_for_all_strategies(
677
+ results: list[_capture_strategies.Result],
678
+ ) -> str:
679
+ """Format all the exceptions from the capture strategies."""
680
+ return "\n".join(
681
+ [
682
+ f"# ⚠️ Errors from strategy '{result.strategy}': -----------------------\n\n"
683
+ f"{_format_exception(result.exception)}\n"
684
+ for result in results
685
+ if result.exception is not None
686
+ ]
687
+ )
688
+
689
+
690
+ def exported_program_to_ir(
691
+ exported_program: torch.export.ExportedProgram,
692
+ *,
693
+ registry: _registration.ONNXRegistry | None = None,
694
+ lower: Literal["at_conversion", "post_conversion", "none"] = "at_conversion",
695
+ ) -> ir.Model:
696
+ """Convert an exported program to an ONNX IR model.
697
+
698
+ Reference:
699
+ - ExportedProgram spec: https://pytorch.org/docs/stable/export.ir_spec.html
700
+
701
+ Args:
702
+ exported_program: The exported program to convert.
703
+ lower: Whether to lower the graph to core ONNX operators.
704
+ at_conversion: Lower whe translating the FX graph to ONNX IR.
705
+ post_conversion: Use an IR pass to lower the graph.
706
+ none: Do not lower the graph.
707
+ registry: The registry of all ONNX Script decomposition.
708
+ """
709
+ if registry is None:
710
+ registry = _registration.ONNXRegistry.from_torchlib()
711
+ if lower != "none":
712
+ exported_program = _prepare_exported_program_for_export(
713
+ exported_program, registry=registry
714
+ )
715
+ return _exported_program_to_onnx_program(
716
+ exported_program, registry=registry, lower=lower
717
+ ).model
718
+
719
+
720
+ def _prepare_exported_program_for_export(
721
+ exported_program: torch.export.ExportedProgram,
722
+ *,
723
+ registry: _registration.ONNXRegistry,
724
+ ) -> torch.export.ExportedProgram:
725
+ """Decompose and apply pre-export transformations to the exported program."""
726
+ # Decompose the graph given the implemented torch ops in ONNX
727
+ exported_program = _fx_passes.decompose_with_registry(exported_program, registry)
728
+
729
+ graph_module = exported_program.graph_module
730
+ # Include explicit type promotion nodes
731
+ graph_module = _fx_passes.insert_type_promotion_nodes(graph_module)
732
+ graph_module = _fx_passes.remove_assertion_nodes(graph_module)
733
+ # TODO(justinchuby): Reassigning the graph module to save some runtime.
734
+ # If this does not work, we need to retrace the module with torch.export
735
+ exported_program._graph_module = graph_module
736
+ return exported_program
737
+
738
+
739
+ def _exported_program_to_onnx_program(
740
+ exported_program: torch.export.ExportedProgram,
741
+ *,
742
+ registry: _registration.ONNXRegistry,
743
+ lower: Literal["at_conversion", "post_conversion", "none"] = "at_conversion",
744
+ ) -> _onnx_program.ONNXProgram:
745
+ """Convert an exported program to an ONNX Program.
746
+
747
+ The exported_program field in the returned ONNXProgram is one that is after
748
+ decompositions have been applied.
749
+
750
+ Reference:
751
+ - ExportedProgram spec: https://pytorch.org/docs/stable/export.ir_spec.html
752
+
753
+ Args:
754
+ exported_program: The exported program to convert. The exported program
755
+ should be the one that is after decompositions have been applied.
756
+ lower: Whether to lower the graph to core ONNX operators.
757
+ at_conversion: Lower whe translating the FX graph to ONNX IR.
758
+ post_conversion: Use an IR pass to lower the graph.
759
+ none: Do not lower the graph.
760
+ registry: The registry of all ONNX Script decomposition.
761
+ """
762
+ model = ir.Model(
763
+ graph=ir.Graph(
764
+ [],
765
+ [],
766
+ nodes=[],
767
+ opset_imports={
768
+ "": registry.opset_version,
769
+ },
770
+ name="main_graph",
771
+ metadata_props={
772
+ "pkg.torch.export.ExportedProgram.graph_signature": str(
773
+ exported_program.graph_signature
774
+ ),
775
+ "pkg.torch.export.ExportedProgram.range_constraints": str(
776
+ exported_program.range_constraints
777
+ ),
778
+ },
779
+ ),
780
+ ir_version=9,
781
+ producer_name="pytorch",
782
+ producer_version=torch.__version__,
783
+ )
784
+
785
+ if lower == "none":
786
+ # Add the opset import for the torch ops
787
+ model.opset_imports["pkg.torch.ops"] = _torch_version_integer()
788
+ # NOTE: Function domains are added when translating nodes when lower="at_conversion"
789
+
790
+ # 1. Add all nodes to the graph and create a dictionary of values
791
+ values = _add_nodes(exported_program, model, lower=lower, registry=registry)
792
+
793
+ # 2. Add user inputs and all parameters/buffers to the graph.
794
+ # Since the node names and the tensor names are different, we need to rename
795
+ # the nodes to match the tensor names later. For now we will just use the node names.
796
+ user_inputs = [
797
+ spec
798
+ for spec in exported_program.graph_signature.input_specs
799
+ if spec.kind == graph_signature.InputKind.USER_INPUT
800
+ ]
801
+ non_user_inputs = [
802
+ spec
803
+ for spec in exported_program.graph_signature.input_specs
804
+ if spec.kind != graph_signature.InputKind.USER_INPUT
805
+ ]
806
+
807
+ for spec in itertools.chain(user_inputs, non_user_inputs):
808
+ # Put the user inputs first and then the parameters/buffers
809
+ if isinstance(spec.arg, graph_signature.ConstantArgument):
810
+ logger.debug("Skipping constant argument %s", spec.arg)
811
+ continue
812
+ value_name = spec.arg.name
813
+ input_kind = spec.kind
814
+ persistent = spec.persistent
815
+ value = values[value_name]
816
+
817
+ assert not isinstance(
818
+ value, Sequence
819
+ ), f"Input '{value_name}' should not be a sequence. This is unexpected."
820
+
821
+ value.metadata_props["pkg.torch.export.graph_signature.InputSpec.kind"] = (
822
+ input_kind.name
823
+ )
824
+ value.metadata_props[
825
+ "pkg.torch.export.graph_signature.InputSpec.persistent"
826
+ ] = str(persistent)
827
+
828
+ if input_kind == graph_signature.InputKind.USER_INPUT:
829
+ # Add only user inputs to the graph
830
+ # Subsequent passes can decide if they want to add initializers as inputs
831
+ model.graph.inputs.append(value)
832
+ else:
833
+ model.graph.initializers[value_name] = value
834
+
835
+ # 3. Add user outputs to the graph and assign metadata to all outputs
836
+ user_outputs = [
837
+ spec
838
+ for spec in exported_program.graph_signature.output_specs
839
+ if spec.kind == graph_signature.OutputKind.USER_OUTPUT
840
+ ]
841
+ non_user_outputs = [
842
+ spec
843
+ for spec in exported_program.graph_signature.output_specs
844
+ if spec.kind != graph_signature.OutputKind.USER_OUTPUT
845
+ ]
846
+ for spec in itertools.chain(user_outputs, non_user_outputs):
847
+ if isinstance(spec.arg, graph_signature.ConstantArgument):
848
+ logger.warning("Skipping constant argument %s", spec.arg)
849
+ continue
850
+ value_name = spec.arg.name
851
+ output_kind = spec.kind
852
+ value = values[value_name]
853
+
854
+ if not isinstance(value, (ir.Value, Sequence)):
855
+ raise TypeError(
856
+ f"Output '{value_name}' should be an ir.Value. Actual type is '{type(value)}': {value!r}. "
857
+ "This may be due to an incorrect implementation of the ONNX function that produced this output."
858
+ )
859
+
860
+ # The output value may be a sequence, meaning the operator has multiple outputs
861
+ _values = (value,) if not isinstance(value, Sequence) else value
862
+
863
+ if len(_values) > 1:
864
+ logger.warning(
865
+ "Model output '%s' has multiple values: %s (output spec: %s). Please make sure this is expected.",
866
+ value_name,
867
+ _values,
868
+ spec,
869
+ )
870
+
871
+ for value in _values:
872
+ value.metadata_props["pkg.torch.export.graph_signature.OutputSpec.kind"] = (
873
+ output_kind.name
874
+ )
875
+ if output_kind == graph_signature.OutputKind.USER_OUTPUT:
876
+ model.graph.outputs.append(value)
877
+
878
+ # 4. Rename the initializers to match the tensor names
879
+ for name, param_name in itertools.chain(
880
+ exported_program.graph_signature.inputs_to_parameters.items(),
881
+ exported_program.graph_signature.inputs_to_buffers.items(),
882
+ exported_program.graph_signature.inputs_to_lifted_tensor_constants.items(),
883
+ ):
884
+ initializer = model.graph.initializers.pop(name)
885
+ initializer.name = param_name
886
+ # Record the original name so users can search the metadata and correspond
887
+ # with the FX graph
888
+ initializer.metadata_props["pkg.torch.onnx.original_node_name"] = name
889
+ model.graph.initializers[param_name] = initializer
890
+
891
+ # 5. Add initializers to the graph
892
+ # ExportedProgram stores parameters and buffers in state_dict,
893
+ # but non_persistent_buffers and lifted_tensor_constants are not there
894
+ # so we need to get them from the name_* apis.
895
+ for name, torch_tensor in itertools.chain(
896
+ exported_program.named_parameters(),
897
+ exported_program.named_buffers(),
898
+ exported_program.constants.items(),
899
+ ):
900
+ initializer = model.graph.initializers.get(name) # type: ignore[assignment]
901
+ if initializer is None:
902
+ logger.warning("Tensor '%s' is not one of the initializers", name)
903
+ continue
904
+ if not isinstance(torch_tensor, torch.Tensor):
905
+ raise NotImplementedError(
906
+ f"Tensor '{name}' should be a torch.Tensor. Actual type is '{type(torch_tensor)}': {torch_tensor!r}. "
907
+ "This is unexpected and not yet supported."
908
+ )
909
+ ir_tensor = TorchTensor(torch_tensor, name=name)
910
+ initializer.const_value = ir_tensor
911
+ _set_shape_type(
912
+ initializer,
913
+ torch_tensor,
914
+ complex_to_float=lower != "none",
915
+ )
916
+
917
+ # TODO: Decide if we should keep mutated buffers as inputs/outputs
918
+
919
+ # TODO(justinchuby): Remove the hack
920
+ _ir_passes.add_torchlib_common_imports(model)
921
+
922
+ return _onnx_program.ONNXProgram(model, exported_program)
923
+
924
+
925
+ def _verbose_printer(verbose: bool | None) -> Callable[..., None]:
926
+ """Prints messages based on `verbose`."""
927
+ if verbose is False:
928
+ return lambda *_, **__: None
929
+ return lambda *args, **kwargs: print("[torch.onnx]", *args, **kwargs)
930
+
931
+
932
+ def export(
933
+ model: torch.nn.Module
934
+ | torch.export.ExportedProgram
935
+ | torch.fx.GraphModule
936
+ | torch.jit.ScriptModule
937
+ | torch.jit.ScriptFunction,
938
+ args: tuple[Any, ...] = (),
939
+ kwargs: dict[str, Any] | None = None,
940
+ *,
941
+ registry: _registration.ONNXRegistry | None = None,
942
+ dynamic_shapes: dict[str, Any] | tuple[Any, ...] | list[Any] | None = None,
943
+ input_names: Sequence[str] | None = None,
944
+ output_names: Sequence[str] | None = None,
945
+ report: bool = False,
946
+ verify: bool = False,
947
+ profile: bool = False,
948
+ dump_exported_program: bool = False,
949
+ artifacts_dir: str | os.PathLike = ".",
950
+ verbose: bool | None = None,
951
+ ) -> _onnx_program.ONNXProgram:
952
+ """Export a PyTorch model to ONNXProgram.
953
+
954
+ Args:
955
+ model: The model to export. This can be a PyTorch nn.Module or an ExportedProgram.
956
+ args: The arguments to pass to the model.
957
+ kwargs: The keyword arguments to pass to the model.
958
+ registry: The registry of all ONNX decompositions.
959
+ dynamic_shapes: Dynamic shapes in the graph.
960
+ input_names: If provided, rename the inputs.
961
+ output_names: If provided, rename the outputs.
962
+ report: Whether to generate an error report if the export fails.
963
+ verify: Whether to verify the ONNX model after exporting.
964
+ profile: Whether to profile the export process. When report is True,
965
+ the profile result will be saved in the report. Otherwise, the profile
966
+ result will be printed.
967
+ dump_exported_program: Whether to save the exported program to a file.
968
+ artifacts_dir: The directory to save the exported program and error reports.
969
+ verbose: Whether to print verbose messages. If None (default), some messages will be printed.
970
+
971
+ Returns:
972
+ The ONNXProgram with the exported IR graph.
973
+
974
+ Raises:
975
+ TorchExportError: If the export process fails with torch.export.
976
+ ConversionError: If the ExportedProgram to ONNX translation fails.
977
+ """
978
+ # Set up the error reporting facilities
979
+ timestamp = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S-%f")
980
+ profiler = _maybe_start_profiler(profile)
981
+
982
+ # Create the artifacts directory if it does not exist
983
+ artifacts_dir = pathlib.Path(artifacts_dir)
984
+ if report or profile or dump_exported_program:
985
+ artifacts_dir.mkdir(parents=True, exist_ok=True)
986
+
987
+ verbose_print = _verbose_printer(verbose)
988
+ export_status = _reporting.ExportStatus()
989
+ failed_results: list[_capture_strategies.Result] = []
990
+
991
+ program: torch.export.ExportedProgram | None = None
992
+ # Step 1: Export the model with torch.export.export if the model is not already an ExportedProgram
993
+ if isinstance(model, torch.export.ExportedProgram):
994
+ # We know the model is already exported program, so the args, kwargs, and dynamic_shapes
995
+ # are not used.
996
+ program = model
997
+ export_status.torch_export = True
998
+ else:
999
+ # Convert an nn.Module to an ExportedProgram
1000
+ # Try everything 🐰 (all paths for getting an ExportedProgram)
1001
+ # When input is a JIT module, the last strategy will succeed so it is handled
1002
+ result: _capture_strategies.Result | None = None
1003
+ for strategy_class in _capture_strategies.CAPTURE_STRATEGIES:
1004
+ strategy = strategy_class( # type: ignore[abstract]
1005
+ verbose=verbose is not False, # Treat None as verbose
1006
+ dump=dump_exported_program,
1007
+ artifacts_dir=artifacts_dir,
1008
+ timestamp=timestamp,
1009
+ )
1010
+ result = strategy(model, args, kwargs, dynamic_shapes=dynamic_shapes)
1011
+
1012
+ # Record the status
1013
+ if strategy_class is _capture_strategies.TorchExportStrategy:
1014
+ export_status.torch_export = result.success
1015
+ elif strategy_class is _capture_strategies.TorchExportNonStrictStrategy:
1016
+ export_status.torch_export_non_strict = result.success
1017
+ elif strategy_class is _capture_strategies.JitTraceConvertStrategy:
1018
+ export_status.torch_jit = result.success
1019
+
1020
+ if result.exported_program is not None:
1021
+ program = result.exported_program
1022
+ break
1023
+ else:
1024
+ failed_results.append(result)
1025
+
1026
+ assert result is not None
1027
+ if result.exported_program is None:
1028
+ # If all strategies fail, produce an error report and raise the first error
1029
+ profile_result = _maybe_stop_profiler_and_get_result(profiler)
1030
+
1031
+ if report:
1032
+ report_path = artifacts_dir / _reporting.construct_report_file_name(
1033
+ timestamp, export_status
1034
+ )
1035
+
1036
+ try:
1037
+ _reporting.create_torch_export_error_report(
1038
+ report_path,
1039
+ _format_exceptions_for_all_strategies(failed_results),
1040
+ export_status=export_status,
1041
+ profile_result=profile_result,
1042
+ )
1043
+ except Exception as e_report:
1044
+ verbose_print(
1045
+ f"Failed to save error report due to an error: {e_report}"
1046
+ )
1047
+ else:
1048
+ report_path = None
1049
+
1050
+ first_error = failed_results[0].exception
1051
+ assert first_error is not None
1052
+
1053
+ # NOTE: We only throw the torch.export (first) exception because we want to
1054
+ # focus on the torch.export.export error. Errors from other strategies like
1055
+ # torch.jit.trace is due to the fallback and can be confusing to users.
1056
+ # We save all errors in the error report.
1057
+ raise _errors.TorchExportError(
1058
+ _STEP_ONE_ERROR_MESSAGE
1059
+ + (
1060
+ f"\nError report has been saved to '{report_path}'."
1061
+ if report
1062
+ else ""
1063
+ )
1064
+ + _summarize_exception_stack(first_error)
1065
+ ) from first_error
1066
+
1067
+ assert program is not None
1068
+
1069
+ if dump_exported_program:
1070
+ verbose_print("Dumping ExportedProgram because `dump_exported_program=True`...")
1071
+ program_path = artifacts_dir / f"onnx_export_{timestamp}.pt2"
1072
+ try:
1073
+ torch.export.save(program, program_path)
1074
+ except Exception as e:
1075
+ verbose_print(f"Failed to save ExportedProgram due to an error: {e}")
1076
+ else:
1077
+ verbose_print(f"ExportedProgram has been saved to '{program_path}'.")
1078
+
1079
+ # Step 2: Convert the exported program to an ONNX model
1080
+ verbose_print("Translate the graph into ONNX...")
1081
+
1082
+ # Step 2a: Decompose the exported program and insert type promotion nodes
1083
+ try:
1084
+ # Build the ONNX function registry
1085
+ if registry is None:
1086
+ registry = _registration.ONNXRegistry.from_torchlib()
1087
+
1088
+ # Process the exported program to run decompositions and type promotions etc.
1089
+ decomposed_program = _prepare_exported_program_for_export(
1090
+ program, registry=registry
1091
+ )
1092
+ except Exception as e:
1093
+ export_status.onnx_translation = False
1094
+ verbose_print("Translate the graph into ONNX... ❌")
1095
+ profile_result = _maybe_stop_profiler_and_get_result(profiler)
1096
+
1097
+ if report:
1098
+ report_path = artifacts_dir / _reporting.construct_report_file_name(
1099
+ timestamp, export_status
1100
+ )
1101
+
1102
+ # Run the analysis to get the error report
1103
+ try:
1104
+ _reporting.create_onnx_export_report(
1105
+ report_path,
1106
+ f"{_format_exceptions_for_all_strategies(failed_results)}\n\n{_format_exception(e)}",
1107
+ program,
1108
+ export_status=export_status,
1109
+ profile_result=profile_result,
1110
+ registry=registry,
1111
+ )
1112
+ except Exception:
1113
+ logger.exception("Failed to save report due to an error.")
1114
+ else:
1115
+ report_path = None
1116
+
1117
+ raise _errors.ConversionError(
1118
+ _STEP_TWO_ERROR_MESSAGE
1119
+ + (f"\nError report has been saved to '{report_path}'." if report else "")
1120
+ + _summarize_exception_stack(e)
1121
+ ) from e
1122
+
1123
+ # Step 2b: Translate the decomposed program to ONNX and produce ONNXProgram
1124
+ if report or profile:
1125
+ pre_decomp_unique_ops, post_decomp_unique_ops = _analysis.compare_ops(
1126
+ program, decomposed_program
1127
+ )
1128
+ else:
1129
+ pre_decomp_unique_ops = None
1130
+ post_decomp_unique_ops = None
1131
+
1132
+ try:
1133
+ # Convert the exported program to an ONNX model
1134
+ onnx_program = _exported_program_to_onnx_program(
1135
+ decomposed_program, registry=registry
1136
+ )
1137
+
1138
+ # Run the ONNX passes
1139
+ if input_names:
1140
+ _ir_passes.rename_inputs(onnx_program.model, input_names)
1141
+ if output_names:
1142
+ _ir_passes.rename_outputs(onnx_program.model, output_names)
1143
+
1144
+ # TODO(justinchuby): Remove the hack
1145
+ _ir_passes.add_torchlib_common_imports(onnx_program.model)
1146
+
1147
+ export_status.onnx_translation = True
1148
+ verbose_print("Translate the graph into ONNX... ✅")
1149
+ except Exception as e:
1150
+ export_status.onnx_translation = False
1151
+ verbose_print("Translate the graph into ONNX... ❌")
1152
+ profile_result = _maybe_stop_profiler_and_get_result(profiler)
1153
+
1154
+ if report:
1155
+ report_path = artifacts_dir / _reporting.construct_report_file_name(
1156
+ timestamp, export_status
1157
+ )
1158
+
1159
+ try:
1160
+ assert pre_decomp_unique_ops is not None
1161
+ assert post_decomp_unique_ops is not None
1162
+
1163
+ # Run the analysis to get the error report
1164
+ _reporting.create_onnx_export_report(
1165
+ report_path,
1166
+ f"{_format_exceptions_for_all_strategies(failed_results)}\n\n{_format_exception(e)}",
1167
+ program,
1168
+ decomp_comparison=_reporting.format_decomp_comparison(
1169
+ pre_decomp_unique_ops, post_decomp_unique_ops
1170
+ ),
1171
+ export_status=export_status,
1172
+ profile_result=profile_result,
1173
+ registry=registry,
1174
+ )
1175
+ verbose_print(f"Export report has been saved to '{report_path}'.")
1176
+ except Exception:
1177
+ logger.exception("Failed to save report due to an error.")
1178
+ else:
1179
+ report_path = None
1180
+
1181
+ raise _errors.ConversionError(
1182
+ _STEP_TWO_ERROR_MESSAGE
1183
+ + (f"\nError report has been saved to '{report_path}'." if report else "")
1184
+ + _summarize_exception_stack(e)
1185
+ ) from e
1186
+
1187
+ profile_result = _maybe_stop_profiler_and_get_result(profiler)
1188
+
1189
+ assert onnx_program.exported_program is not None
1190
+
1191
+ if not verify:
1192
+ # Return if verification is not requested
1193
+ if report:
1194
+ try:
1195
+ assert pre_decomp_unique_ops is not None
1196
+ assert post_decomp_unique_ops is not None
1197
+ report_path = artifacts_dir / _reporting.construct_report_file_name(
1198
+ timestamp, export_status
1199
+ )
1200
+ _reporting.create_onnx_export_report(
1201
+ report_path,
1202
+ "No errors"
1203
+ if not failed_results
1204
+ else _format_exceptions_for_all_strategies(failed_results),
1205
+ onnx_program.exported_program,
1206
+ decomp_comparison=_reporting.format_decomp_comparison(
1207
+ pre_decomp_unique_ops, post_decomp_unique_ops
1208
+ ),
1209
+ export_status=export_status,
1210
+ profile_result=profile_result,
1211
+ model=onnx_program.model,
1212
+ registry=registry,
1213
+ )
1214
+ verbose_print(f"Export report has been saved to '{report_path}'.")
1215
+ except Exception:
1216
+ logger.exception("Failed to save report due to an error.")
1217
+ elif profile and profile_result is not None:
1218
+ verbose_print("Profile result:")
1219
+ verbose_print(profile_result)
1220
+ return onnx_program
1221
+
1222
+ # Step 3: (verify=True) Check the ONNX model with ONNX checker
1223
+ try:
1224
+ verbose_print("Check the ONNX model...")
1225
+ onnxscript_apis.check_model(onnx_program.model)
1226
+ export_status.onnx_checker = True
1227
+ verbose_print("Check the ONNX model... ✅")
1228
+ except Exception as e:
1229
+ export_status.onnx_checker = False
1230
+ verbose_print("Check the ONNX model... ❌")
1231
+ if report:
1232
+ try:
1233
+ assert pre_decomp_unique_ops is not None
1234
+ assert post_decomp_unique_ops is not None
1235
+ report_path = artifacts_dir / _reporting.construct_report_file_name(
1236
+ timestamp, export_status
1237
+ )
1238
+ _reporting.create_onnx_export_report(
1239
+ report_path,
1240
+ f"{_format_exceptions_for_all_strategies(failed_results)}\n\n{_format_exception(e)}",
1241
+ onnx_program.exported_program,
1242
+ decomp_comparison=_reporting.format_decomp_comparison(
1243
+ pre_decomp_unique_ops, post_decomp_unique_ops
1244
+ ),
1245
+ export_status=export_status,
1246
+ profile_result=profile_result,
1247
+ model=onnx_program.model,
1248
+ registry=registry,
1249
+ )
1250
+ verbose_print(f"Export report has been saved to '{report_path}'.")
1251
+ except Exception:
1252
+ logger.exception("Failed to save report due to an error.")
1253
+ logger.warning(
1254
+ "Conversion successful but the ONNX model fails ONNX checker. " # noqa: G004
1255
+ "Please create an issue "
1256
+ f"in the PyTorch GitHub repository against the {_BLUE}*onnx*{_END} component and "
1257
+ "attach the full error stack as well as reproduction scripts. ",
1258
+ exc_info=e,
1259
+ )
1260
+ return onnx_program
1261
+
1262
+ # Step 4: (verify=True) Execute the model with ONNX Runtime
1263
+ try:
1264
+ verbose_print("Execute the model with ONNX Runtime...")
1265
+ verification_results = _verification.verify_onnx_program(onnx_program)
1266
+ verbose_print("Execute the model with ONNX Runtime... ✅")
1267
+ export_status.onnx_runtime = True
1268
+ onnx_runtime_error_message = None
1269
+ except Exception as e:
1270
+ verbose_print("Execute the model with ONNX Runtime... ❌")
1271
+ export_status.onnx_runtime = False
1272
+ onnx_runtime_error_message = _format_exception(e)
1273
+ verification_message = None
1274
+
1275
+ else:
1276
+ # Step 5: (verify=True) Validate the output values
1277
+ verbose_print("Verify output accuracy...")
1278
+ export_status.output_accuracy = True
1279
+ for verification_result in verification_results:
1280
+ # TODO(justinchuby): The threshold is arbitrary right now
1281
+ if verification_result.max_abs_diff >= 5e-3:
1282
+ logger.warning(
1283
+ "Output '%s' has a large absolute difference of %f. ",
1284
+ verification_result.name,
1285
+ verification_result.max_abs_diff,
1286
+ )
1287
+ export_status.output_accuracy = False
1288
+ if verification_result.max_rel_diff >= 1e-1:
1289
+ logger.warning(
1290
+ "Output '%s' has a large relative difference of %f. ",
1291
+ verification_result.name,
1292
+ verification_result.max_rel_diff,
1293
+ )
1294
+ export_status.output_accuracy = False
1295
+ if export_status.output_accuracy:
1296
+ verbose_print("Verify output accuracy... ✅")
1297
+ else:
1298
+ verbose_print("Verify output accuracy... ❌")
1299
+ verification_message = _reporting.format_verification_infos(
1300
+ verification_results
1301
+ )
1302
+
1303
+ if report:
1304
+ try:
1305
+ assert pre_decomp_unique_ops is not None
1306
+ assert post_decomp_unique_ops is not None
1307
+
1308
+ traceback_lines = []
1309
+ if failed_results:
1310
+ traceback_lines.append(
1311
+ _format_exceptions_for_all_strategies(failed_results)
1312
+ )
1313
+ if onnx_runtime_error_message:
1314
+ traceback_lines.append("# ⚠️ ONNX Runtime error -----------------------")
1315
+ traceback_lines.append(onnx_runtime_error_message)
1316
+ if not traceback_lines:
1317
+ traceback_lines.append("No errors")
1318
+
1319
+ report_path = artifacts_dir / _reporting.construct_report_file_name(
1320
+ timestamp, export_status
1321
+ )
1322
+ _reporting.create_onnx_export_report(
1323
+ report_path,
1324
+ "\n\n".join(traceback_lines),
1325
+ onnx_program.exported_program,
1326
+ profile_result=profile_result,
1327
+ export_status=export_status,
1328
+ decomp_comparison=_reporting.format_decomp_comparison(
1329
+ pre_decomp_unique_ops, post_decomp_unique_ops
1330
+ ),
1331
+ model=onnx_program.model,
1332
+ registry=registry,
1333
+ verification_result=verification_message,
1334
+ )
1335
+ verbose_print(f"Export report has been saved to '{report_path}'.")
1336
+ except Exception:
1337
+ logger.exception("Failed to save report due to an error.")
1338
+
1339
+ # Release the inference session created during verification
1340
+ onnx_program.release()
1341
+ return onnx_program
janus/lib/python3.10/site-packages/torch/onnx/_internal/exporter/_decomp.py ADDED
@@ -0,0 +1,100 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Build decomp table from PyTorch."""
2
+
3
+ # mypy: allow-untyped-defs
4
+ from __future__ import annotations
5
+
6
+ from typing import Callable, TYPE_CHECKING
7
+
8
+ import torch
9
+ import torch._ops
10
+
11
+
12
+ if TYPE_CHECKING:
13
+ from torch.onnx._internal.exporter import _registration
14
+
15
+
16
+ def get_onnx_implemented_overloads(
17
+ registry: _registration.ONNXRegistry,
18
+ ) -> list[torch._ops.OperatorBase]:
19
+ """
20
+ Creates a set of OperatorBase and Callable objects that represent ONNX-supported PyTorch operations.
21
+
22
+ Args:
23
+ registry: The ONNX registry for PyTorch.
24
+
25
+ Returns:
26
+ A collection of OperatorBase and Callable objects representing ONNX-supported PyTorch operations.
27
+ """
28
+ registered_ops: list[torch._ops.OperatorBase] = []
29
+ for op_namespace in (torch.ops.aten, torch.ops.prims):
30
+ op_names = dir(op_namespace)
31
+ for op_name in op_names:
32
+ op_overload_packet = getattr(op_namespace, op_name)
33
+ if not isinstance(op_overload_packet, torch._ops.OpOverloadPacket):
34
+ continue
35
+
36
+ for overload_name in op_overload_packet.overloads():
37
+ op_overload = getattr(op_overload_packet, overload_name)
38
+ if registry.is_registered(op_overload):
39
+ registered_ops.append(op_overload)
40
+ return registered_ops
41
+
42
+
43
+ def get_preserve_ops() -> set[torch._ops.OpOverload]:
44
+ """Return a set of CompositeImplicitAutograd ops that should be preserved."""
45
+ aten = torch.ops.aten
46
+ # NOTE: Keep this list sorted
47
+ # NOTE: Do _not_ retain aten.linear as its decomposition is addmm, which is Gemm and is preferable for accuracy
48
+ return {
49
+ aten._upsample_bilinear2d_aa.default,
50
+ aten._upsample_nearest_exact1d.vec,
51
+ aten._upsample_nearest_exact2d.vec,
52
+ aten._upsample_nearest_exact3d.vec,
53
+ aten.group_norm.default,
54
+ aten.instance_norm.default,
55
+ aten.upsample_bilinear2d.default,
56
+ aten.upsample_bilinear2d.vec,
57
+ aten.upsample_linear1d.default,
58
+ aten.upsample_linear1d.vec,
59
+ aten.upsample_nearest1d.default,
60
+ aten.upsample_nearest1d.vec,
61
+ aten.upsample_nearest2d.default,
62
+ aten.upsample_nearest2d.vec,
63
+ aten.upsample_nearest3d.default,
64
+ aten.upsample_nearest3d.vec,
65
+ aten.upsample_trilinear3d.default,
66
+ aten.upsample_trilinear3d.vec,
67
+ }
68
+
69
+
70
+ def create_onnx_friendly_decomposition_table(
71
+ onnx_registered_ops: set[torch._ops.OperatorBase],
72
+ ) -> dict[torch._ops.OperatorBase, Callable]:
73
+ """
74
+ This function creates a dictionary of op overloads and their decomposition functions
75
+ for ops that do not have ONNX symbolic functions. If an op already has an ONNX symbolic function,
76
+ its decomposition function is excluded from the table. The decomposition table is a subset of PyTorch's
77
+ built-in aten-to-aten decomposition.
78
+
79
+ Args:
80
+ onnx_registered_ops: All ops that have an ONNX decomposition implemented.
81
+
82
+ Returns:
83
+ Dict[torch._ops.OperatorBase, Callable]: A dictionary that maps op overloads to their corresponding
84
+ decomposition functions.
85
+ """
86
+ decomposition_table: dict[torch._ops.OperatorBase, Callable] = {}
87
+
88
+ # NOTE: If we import torch._decomp, we will get RuntimeError: Only a single
89
+ # TORCH_LIBRARY can be used to register the namespace nvprims; please put all of your
90
+ # definitions in a single TORCH_LIBRARY block.
91
+ for op_overload, decomp_fn in torch._decomp.decomposition_table.items(): # type: ignore[attr-defined]
92
+ # Skip decomposition for op_overload as long as that op_overload has a corresponding ONNX
93
+ # symbolic function.
94
+ # NOTE: Do not skip torch._refs decomps. They are fine because otherwise the model is
95
+ # not exportable anyways.
96
+ if op_overload in onnx_registered_ops:
97
+ continue
98
+ decomposition_table[op_overload] = decomp_fn
99
+
100
+ return decomposition_table
janus/lib/python3.10/site-packages/torch/onnx/_internal/exporter/_dispatching.py ADDED
@@ -0,0 +1,362 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ from __future__ import annotations
3
+
4
+ import logging
5
+ from typing import Callable, Sequence
6
+
7
+ from onnxscript import ir
8
+
9
+ import torch
10
+ import torch.fx
11
+ from torch.onnx._internal.exporter import _registration, _schemas
12
+
13
+
14
+ logger = logging.getLogger(__name__)
15
+
16
+ # Define utilities to convert PyTorch data types so users do not need to specify manually
17
+ _TORCH_DTYPE_TO_ONNX_COMPATIBLE: dict[torch.dtype, ir.DataType] = {
18
+ torch.bfloat16: ir.DataType.BFLOAT16,
19
+ torch.bool: ir.DataType.BOOL,
20
+ torch.complex128: ir.DataType.DOUBLE,
21
+ torch.complex64: ir.DataType.FLOAT,
22
+ torch.float16: ir.DataType.FLOAT16,
23
+ torch.float32: ir.DataType.FLOAT,
24
+ torch.float64: ir.DataType.DOUBLE,
25
+ torch.float8_e4m3fn: ir.DataType.FLOAT8E4M3FN,
26
+ torch.float8_e4m3fnuz: ir.DataType.FLOAT8E4M3FNUZ,
27
+ torch.float8_e5m2: ir.DataType.FLOAT8E5M2,
28
+ torch.float8_e5m2fnuz: ir.DataType.FLOAT8E5M2FNUZ,
29
+ torch.int16: ir.DataType.INT16,
30
+ torch.int32: ir.DataType.INT32,
31
+ torch.int64: ir.DataType.INT64,
32
+ torch.int8: ir.DataType.INT8,
33
+ torch.uint8: ir.DataType.UINT8,
34
+ }
35
+
36
+
37
+ def _torch_dtype_to_onnx_compatible_dtype(dtype: torch.dtype) -> ir.DataType:
38
+ return _TORCH_DTYPE_TO_ONNX_COMPATIBLE[dtype]
39
+
40
+
41
+ def _attribute_type_compatible_with_arg(
42
+ attr: _schemas.AttributeParameter,
43
+ value: ir.Value | int | float | bool | Sequence[int] | Sequence[float] | None,
44
+ ) -> bool:
45
+ """Check if the attribute type is compatible with the argument."""
46
+ if isinstance(value, bool):
47
+ return attr.type is ir.AttributeType.INT
48
+ if isinstance(value, str):
49
+ return attr.type is ir.AttributeType.STRING
50
+ if isinstance(value, int):
51
+ return attr.type in {ir.AttributeType.INT, ir.AttributeType.FLOAT}
52
+ if isinstance(value, float):
53
+ return attr.type is ir.AttributeType.FLOAT
54
+ if isinstance(value, complex):
55
+ return False
56
+ if isinstance(value, Sequence):
57
+ if attr.type is ir.AttributeType.INTS:
58
+ return all(isinstance(i, int) for i in value)
59
+ if attr.type is ir.AttributeType.FLOATS:
60
+ return all(isinstance(i, (int, float)) for i in value)
61
+ if isinstance(value, torch.dtype):
62
+ return attr.type is ir.AttributeType.INT
63
+ if isinstance(value, (torch.device, torch.memory_format, torch.layout)):
64
+ return attr.type is ir.AttributeType.STRING
65
+ if value is None and not attr.required:
66
+ # An optional attribute is not supplied
67
+ return True
68
+ return False
69
+
70
+
71
+ def _param_type_compatible_with_arg(
72
+ param: _schemas.Parameter,
73
+ value: ir.TypeProtocol
74
+ | str
75
+ | int
76
+ | float
77
+ | complex
78
+ | Sequence[int]
79
+ | Sequence[float]
80
+ | None,
81
+ assigned_types: dict[str, ir.TypeProtocol],
82
+ ) -> bool:
83
+ # Handle Python types first
84
+ if isinstance(value, bool): # noqa: SIM102
85
+ if param.type_constraint.allowed_types & {ir.TensorType(ir.DataType.BOOL)}:
86
+ return True
87
+ if isinstance(value, int) and param.type_constraint.allowed_types & {
88
+ ir.TensorType(ir.DataType.INT4),
89
+ ir.TensorType(ir.DataType.INT8),
90
+ ir.TensorType(ir.DataType.INT16),
91
+ ir.TensorType(ir.DataType.INT32),
92
+ ir.TensorType(ir.DataType.INT64),
93
+ # Int inputs can be casted to a float too
94
+ ir.TensorType(ir.DataType.FLOAT8E4M3FN),
95
+ ir.TensorType(ir.DataType.FLOAT8E4M3FNUZ),
96
+ ir.TensorType(ir.DataType.FLOAT8E5M2),
97
+ ir.TensorType(ir.DataType.FLOAT8E5M2FNUZ),
98
+ ir.TensorType(ir.DataType.FLOAT16),
99
+ ir.TensorType(ir.DataType.FLOAT),
100
+ ir.TensorType(ir.DataType.DOUBLE),
101
+ }:
102
+ return True
103
+ if isinstance(value, float) and param.type_constraint.allowed_types & {
104
+ ir.TensorType(ir.DataType.FLOAT8E4M3FN),
105
+ ir.TensorType(ir.DataType.FLOAT8E4M3FNUZ),
106
+ ir.TensorType(ir.DataType.FLOAT8E5M2),
107
+ ir.TensorType(ir.DataType.FLOAT8E5M2FNUZ),
108
+ ir.TensorType(ir.DataType.FLOAT16),
109
+ ir.TensorType(ir.DataType.FLOAT),
110
+ ir.TensorType(ir.DataType.DOUBLE),
111
+ }:
112
+ return True
113
+ if isinstance(value, complex) and param.type_constraint.allowed_types & {
114
+ ir.TensorType(ir.DataType.FLOAT),
115
+ ir.TensorType(ir.DataType.DOUBLE),
116
+ ir.TensorType(ir.DataType.COMPLEX64),
117
+ ir.TensorType(ir.DataType.COMPLEX128),
118
+ }:
119
+ return True
120
+ if isinstance(value, str): # noqa: SIM102
121
+ if param.type_constraint.allowed_types & {ir.TensorType(ir.DataType.STRING)}:
122
+ return True
123
+ if isinstance(value, (list, tuple)):
124
+ if param.type_constraint.allowed_types & {
125
+ ir.TensorType(ir.DataType.INT32),
126
+ ir.TensorType(ir.DataType.INT64),
127
+ ir.TensorType(ir.DataType.FLOAT),
128
+ ir.TensorType(ir.DataType.DOUBLE),
129
+ ir.SequenceType(ir.TensorType(ir.DataType.INT32)),
130
+ ir.SequenceType(ir.TensorType(ir.DataType.INT64)),
131
+ ir.SequenceType(ir.TensorType(ir.DataType.FLOAT)),
132
+ ir.SequenceType(ir.TensorType(ir.DataType.DOUBLE)),
133
+ } and all(isinstance(i, (int)) for i in value):
134
+ # We will just allow any fx node and trust that the overload handles it
135
+ return True
136
+ if param.type_constraint.allowed_types & {
137
+ ir.TensorType(ir.DataType.FLOAT),
138
+ ir.TensorType(ir.DataType.DOUBLE),
139
+ ir.SequenceType(ir.TensorType(ir.DataType.FLOAT)),
140
+ ir.SequenceType(ir.TensorType(ir.DataType.DOUBLE)),
141
+ } and all(isinstance(i, (int, float)) for i in value):
142
+ # We will just allow any fx node and trust that the overload handles it
143
+ return True
144
+ if value is None and not param.required:
145
+ # An optional parameter is not supplied
146
+ return True
147
+
148
+ if not isinstance(value, ir.TypeProtocol):
149
+ return False
150
+
151
+ # Then check tensor types
152
+ if param.type_constraint.name in assigned_types:
153
+ # If a typevar is already bound, check if the value has the same type
154
+ assigned_type = assigned_types[param.type_constraint.name]
155
+ return assigned_type == value
156
+ # If the typevar is not bound, bind it to the value type
157
+ if value in param.type_constraint.allowed_types:
158
+ # TODO: Maybe just check dtype? Being more strict here for now
159
+ assigned_types[param.type_constraint.name] = value
160
+ return True
161
+ return False
162
+
163
+
164
+ def _get_type_from_tensor(
165
+ tensor: torch.Tensor
166
+ | torch.SymBool
167
+ | torch.SymInt
168
+ | torch.SymFloat
169
+ | Sequence[torch.Tensor],
170
+ ) -> ir.TypeProtocol:
171
+ if isinstance(tensor, torch.Tensor):
172
+ return ir.TensorType(_torch_dtype_to_onnx_compatible_dtype(tensor.dtype))
173
+ if isinstance(tensor, torch.SymBool):
174
+ return ir.TensorType(ir.DataType.BOOL)
175
+ if isinstance(tensor, torch.SymInt):
176
+ return ir.TensorType(ir.DataType.INT64)
177
+ if isinstance(tensor, torch.SymFloat):
178
+ return ir.TensorType(ir.DataType.FLOAT)
179
+
180
+ # Handle sequences
181
+ first_tensor = next((item for item in tensor if item is not None), None)
182
+ if first_tensor is None:
183
+ return ir.SequenceType(ir.TensorType(ir.DataType.UNDEFINED))
184
+ return ir.SequenceType(
185
+ ir.TensorType(_torch_dtype_to_onnx_compatible_dtype(first_tensor.dtype))
186
+ )
187
+
188
+
189
+ def _get_first_tensor_in_node_list(
190
+ nodes: Sequence[torch.fx.Node | None],
191
+ ) -> torch.Tensor | None:
192
+ for node in nodes:
193
+ if (
194
+ node is not None
195
+ and "val" in node.meta
196
+ and isinstance(node.meta["val"], torch.Tensor)
197
+ ):
198
+ return node.meta["val"]
199
+ return None
200
+
201
+
202
+ def _get_named_fx_node_args(node: torch.fx.Node) -> dict[str, torch.fx.node.Argument]:
203
+ assert hasattr(node.target, "_schema")
204
+ torch_schema: torch.FunctionSchema = node.target._schema # type: ignore[union-attr]
205
+ node_args = {}
206
+ for arg, schema_arg in zip(node.args, torch_schema.arguments):
207
+ node_args[schema_arg.name] = arg
208
+
209
+ node_args.update(node.kwargs)
210
+ return node_args
211
+
212
+
213
+ def get_matching_overload(
214
+ node: torch.fx.Node,
215
+ overloads: Sequence[Callable],
216
+ ) -> tuple[Callable | None, str]:
217
+ """Get the overload that matches the node's arguments.
218
+
219
+ Args:
220
+ node: The node to match.
221
+ overloads: The overloads to match against.
222
+
223
+ Returns:
224
+ A tuple containing the matched overload and a string describing the reason for failure or success.
225
+ """
226
+ if not hasattr(node.target, "_schema"):
227
+ # FIXME(justinchuby): When the target is a builtin, we should instead
228
+ # Match only the inputs positionally. Figure out how to do that as right
229
+ # now we assume all inputs are named.
230
+ return overloads[
231
+ 0
232
+ ], "The node target does not have a schema. Return the first one."
233
+ named_args = _get_named_fx_node_args(node)
234
+ # FIXME: Handle when we don't know the names of the arguments
235
+ schema_args: dict[str, torch.Argument] = {
236
+ arg.name: arg
237
+ for arg in node.target._schema.arguments # type: ignore[union-attr]
238
+ }
239
+ failure_messages: list[str] = []
240
+ for overload in overloads:
241
+ assigned_types: dict[str, ir.TypeProtocol] = {}
242
+ fail_reason = ""
243
+ if not hasattr(overload, "signature"):
244
+ # When an overload does not have a signature, we assume it is a custom op and should be matched
245
+ return (
246
+ overload,
247
+ "The overload does not have a signature. Assuming it is a custom op and matching it.",
248
+ )
249
+ for param in overload.signature:
250
+ if param.name not in schema_args and param.required:
251
+ # We don't need to handle variadic inputs as there is none.
252
+ # A required parameter is not supplied.
253
+ fail_reason = "Required parameter not supplied"
254
+ break
255
+
256
+ # Get the argument
257
+ if param.name in named_args:
258
+ # Provided in Node args
259
+ arg = named_args[param.name]
260
+ elif (
261
+ param.name in schema_args
262
+ and schema_args[param.name].has_default_value()
263
+ ):
264
+ # Provided in schema args
265
+ arg = schema_args[param.name].default_value
266
+ elif param.has_default():
267
+ # Provided in the ONNX op definition
268
+ arg = param.default
269
+ else:
270
+ fail_reason = "Parameter not provided"
271
+ break
272
+
273
+ if isinstance(param, _schemas.Parameter):
274
+ if isinstance(arg, torch.Tensor):
275
+ arg = _get_type_from_tensor(arg) # type: ignore[assignment]
276
+ if isinstance(arg, (list, tuple)) and any(
277
+ isinstance(t, torch.fx.Node) for t in arg
278
+ ):
279
+ first_tensor = _get_first_tensor_in_node_list(arg)
280
+ assert first_tensor is not None
281
+ # FIXME: Handle symfloat here
282
+ arg = ir.SequenceType(_get_type_from_tensor(first_tensor)) # type: ignore[assignment]
283
+ elif isinstance(arg, torch.fx.Node):
284
+ meta_val = arg.meta["val"]
285
+ arg = _get_type_from_tensor(meta_val) # type: ignore[assignment]
286
+ # TODO: Handle None attributes
287
+ # FIXME: Handle symfloat etc.
288
+ # Handle tensors and Python values
289
+ if not _param_type_compatible_with_arg(param, arg, assigned_types): # type: ignore[arg-type]
290
+ fail_reason = (
291
+ f"Parameter type not compatible with argument: param=`{param}`, "
292
+ f"assigned_types=`{assigned_types}`, arg=`{arg}`"
293
+ )
294
+ break
295
+ elif isinstance(param, _schemas.AttributeParameter):
296
+ if not _attribute_type_compatible_with_arg(param, arg): # type: ignore[arg-type]
297
+ fail_reason = f"Attribute type not compatible with argument: param=`{param}`, arg=`{arg}`"
298
+ break
299
+ if not fail_reason:
300
+ return overload, "Successfully matched overload"
301
+ else:
302
+ failure_messages.append(
303
+ f"- Failed to match overload `{overload}`: {fail_reason}"
304
+ )
305
+ return (
306
+ None,
307
+ f"All overloads did not match the node `{node.format_node()}`.\n"
308
+ + "\n".join(failure_messages),
309
+ )
310
+
311
+
312
+ def _arg_has_complex_dtype(arg) -> bool:
313
+ """Check if the node has complex dtype recursively."""
314
+ if (
315
+ isinstance(arg, torch.fx.Node)
316
+ and "val" in arg.meta
317
+ and isinstance(arg.meta["val"], torch.Tensor)
318
+ and torch.is_complex(arg.meta["val"])
319
+ ):
320
+ return True
321
+ elif isinstance(arg, list):
322
+ return any(_arg_has_complex_dtype(item) for item in arg)
323
+ return False
324
+
325
+
326
+ def dispatch(
327
+ node: torch.fx.Node, registry: _registration.ONNXRegistry
328
+ ) -> tuple[Callable | None, str]:
329
+ """Dispatch a node to an ONNX function based on the node's target and the ONNX registry.
330
+
331
+ Args:
332
+ node: The node to dispatch.
333
+ registry: The ONNX registry to use for dispatching.
334
+
335
+ Returns:
336
+ A tuple containing the matched ONNX function and a string describing the reason for failure or success.
337
+ """
338
+ # TODO: Handle when node does not have a target
339
+ decomp_metas = registry.get_decomps(node.target) # type: ignore[arg-type]
340
+ # Determine if the node has complex inputs.
341
+ is_complex = any(_arg_has_complex_dtype(arg) for arg in node.args) or any(
342
+ _arg_has_complex_dtype(arg) for arg in node.kwargs.values()
343
+ )
344
+ if is_complex:
345
+ decomp_metas = [decomp for decomp in decomp_metas if decomp.is_complex]
346
+ if not decomp_metas:
347
+ return None, "No decompositions registered for the complex-valued input"
348
+ else:
349
+ decomp_metas = [decomp for decomp in decomp_metas if not decomp.is_complex]
350
+ if not decomp_metas:
351
+ return None, "No decompositions registered for the real-valued input"
352
+
353
+ if len(decomp_metas) == 1:
354
+ return (
355
+ decomp_metas[0].onnx_function,
356
+ "Fast path: Only one decomposition is defined",
357
+ )
358
+
359
+ overload, message = get_matching_overload(
360
+ node, [decomp.onnx_function for decomp in decomp_metas]
361
+ )
362
+ return overload, message
janus/lib/python3.10/site-packages/torch/onnx/_internal/exporter/_errors.py ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Error classes for the ONNX exporter."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import torch.onnx.errors
6
+
7
+
8
+ class TorchExportError(torch.onnx.errors.OnnxExporterError):
9
+ """Error during graph capturing using torch.export."""
10
+
11
+
12
+ class ConversionError(torch.onnx.errors.OnnxExporterError):
13
+ """Error during ExportedProgram to ONNX conversion."""
14
+
15
+
16
+ class DispatchError(ConversionError):
17
+ """Error during ONNX Function dispatching."""
18
+
19
+
20
+ class GraphConstructionError(ConversionError):
21
+ """Error during ONNX graph construction."""
janus/lib/python3.10/site-packages/torch/onnx/_internal/exporter/_ir_passes.py ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ from __future__ import annotations
3
+
4
+ import logging
5
+ from typing import Sequence
6
+
7
+ from onnxscript import ir
8
+
9
+
10
+ logger = logging.getLogger(__name__)
11
+
12
+
13
+ def rename_inputs(model: ir.Model, new_names: Sequence[str]) -> None:
14
+ # TODO: Ensure the names do not have duplicates
15
+ for input, new_name in zip(model.graph.inputs, new_names):
16
+ input.metadata_props["pkg.torch.onnx.original_node_name"] = str(input.name)
17
+ input.name = new_name
18
+
19
+
20
+ def rename_outputs(model: ir.Model, new_names: Sequence[str]) -> None:
21
+ for output, new_name in zip(model.graph.outputs, new_names):
22
+ output.metadata_props["pkg.torch.onnx.original_node_name"] = str(output.name)
23
+ output.name = new_name
24
+
25
+
26
+ def add_torchlib_common_imports(model: ir.Model) -> None:
27
+ """Hack to add torchlib common imports to the model."""
28
+
29
+ try:
30
+ # TODO(justinchuby): Remove this hack and improved onnxscript
31
+ from onnxscript.function_libs.torch_lib.ops import common as common_ops
32
+
33
+ model.opset_imports["pkg.onnxscript.torch_lib.common"] = 1
34
+ rank_func = ir.serde.deserialize_function(common_ops.Rank.to_function_proto())
35
+ is_scalar_func = ir.serde.deserialize_function(
36
+ common_ops.IsScalar.to_function_proto()
37
+ )
38
+ model.functions[rank_func.identifier()] = rank_func
39
+ model.functions[is_scalar_func.identifier()] = is_scalar_func
40
+ except Exception:
41
+ logger.exception("Failed to add torchlib common imports to the model.")