ZTWHHH commited on
Commit
e9e27a4
·
verified ·
1 Parent(s): 9fc5606

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. evalkit_cambrian/lib/python3.10/site-packages/deepspeed/comm/__init__.py +7 -0
  2. evalkit_cambrian/lib/python3.10/site-packages/deepspeed/comm/ccl.py +177 -0
  3. evalkit_cambrian/lib/python3.10/site-packages/deepspeed/comm/config.py +33 -0
  4. evalkit_cambrian/lib/python3.10/site-packages/deepspeed/comm/reduce_op.py +18 -0
  5. evalkit_cambrian/lib/python3.10/site-packages/deepspeed/runtime/checkpoint_engine/__pycache__/__init__.cpython-310.pyc +0 -0
  6. evalkit_cambrian/lib/python3.10/site-packages/deepspeed/runtime/checkpoint_engine/__pycache__/checkpoint_engine.cpython-310.pyc +0 -0
  7. evalkit_cambrian/lib/python3.10/site-packages/deepspeed/runtime/checkpoint_engine/checkpoint_engine.py +30 -0
  8. evalkit_cambrian/lib/python3.10/site-packages/deepspeed/runtime/checkpoint_engine/nebula_checkpoint_engine.py +107 -0
  9. evalkit_cambrian/lib/python3.10/site-packages/deepspeed/runtime/comm/__init__.py +5 -0
  10. evalkit_cambrian/lib/python3.10/site-packages/deepspeed/runtime/comm/__pycache__/__init__.cpython-310.pyc +0 -0
  11. evalkit_cambrian/lib/python3.10/site-packages/deepspeed/runtime/comm/__pycache__/hccl.cpython-310.pyc +0 -0
  12. evalkit_cambrian/lib/python3.10/site-packages/deepspeed/runtime/comm/__pycache__/mpi.cpython-310.pyc +0 -0
  13. evalkit_cambrian/lib/python3.10/site-packages/deepspeed/runtime/comm/__pycache__/nccl.cpython-310.pyc +0 -0
  14. evalkit_cambrian/lib/python3.10/site-packages/deepspeed/runtime/comm/coalesced_collectives.py +133 -0
  15. evalkit_cambrian/lib/python3.10/site-packages/deepspeed/runtime/comm/hccl.py +124 -0
  16. evalkit_cambrian/lib/python3.10/site-packages/deepspeed/runtime/comm/mpi.py +215 -0
  17. evalkit_cambrian/lib/python3.10/site-packages/deepspeed/runtime/comm/nccl.py +166 -0
  18. evalkit_cambrian/lib/python3.10/site-packages/deepspeed/runtime/compression/__pycache__/cupy.cpython-310.pyc +0 -0
  19. evalkit_cambrian/lib/python3.10/site-packages/deepspeed/runtime/compression/cupy.py +26 -0
  20. evalkit_cambrian/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/__pycache__/__init__.cpython-310.pyc +0 -0
  21. evalkit_cambrian/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/__pycache__/curriculum_scheduler.cpython-310.pyc +0 -0
  22. infer_4_47_1/lib/python3.10/site-packages/torch/backends/_nnapi/__pycache__/__init__.cpython-310.pyc +0 -0
  23. infer_4_47_1/lib/python3.10/site-packages/torch/onnx/_exporter_states.py +12 -0
  24. infer_4_47_1/lib/python3.10/site-packages/torch/onnx/_flags.py +49 -0
  25. infer_4_47_1/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/_diagnostic.py +211 -0
  26. infer_4_47_1/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/_rules.py +636 -0
  27. infer_4_47_1/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/__init__.py +34 -0
  28. infer_4_47_1/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/_infra.py +285 -0
  29. infer_4_47_1/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/context.py +404 -0
  30. infer_4_47_1/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/decorator.py +153 -0
  31. infer_4_47_1/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/formatter.py +106 -0
  32. infer_4_47_1/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__init__.py +101 -0
  33. infer_4_47_1/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_address.py +48 -0
  34. infer_4_47_1/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_artifact_change.py +31 -0
  35. infer_4_47_1/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_artifact_location.py +33 -0
  36. infer_4_47_1/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_attachment.py +39 -0
  37. infer_4_47_1/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_configuration_override.py +31 -0
  38. infer_4_47_1/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_edge.py +31 -0
  39. infer_4_47_1/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_external_property_file_references.py +86 -0
  40. infer_4_47_1/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_graph_traversal.py +43 -0
  41. infer_4_47_1/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_location.py +50 -0
  42. infer_4_47_1/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_location_relationship.py +28 -0
  43. infer_4_47_1/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_logical_location.py +39 -0
  44. infer_4_47_1/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_message.py +33 -0
  45. infer_4_47_1/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_multiformat_message_string.py +25 -0
  46. infer_4_47_1/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_physical_location.py +40 -0
  47. infer_4_47_1/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_rectangle.py +36 -0
  48. infer_4_47_1/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_region.py +58 -0
  49. infer_4_47_1/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_replacement.py +31 -0
  50. infer_4_47_1/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_reporting_configuration.py +33 -0
evalkit_cambrian/lib/python3.10/site-packages/deepspeed/comm/__init__.py ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from .utils import *
7
+ from .comm import *
evalkit_cambrian/lib/python3.10/site-packages/deepspeed/comm/ccl.py ADDED
@@ -0,0 +1,177 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+ '''
6
+ Copyright 2021 The Microsoft DeepSpeed Team
7
+ '''
8
+
9
+ import torch
10
+ from deepspeed.accelerator import get_accelerator
11
+ from .reduce_op import ReduceOp
12
+ from .torch import TorchBackend
13
+
14
+
15
+ def build_ccl_op():
16
+ builder = get_accelerator().create_op_builder("CCLCommBuilder")
17
+ if builder is None:
18
+ return None
19
+ ccl_cpp_module = builder.load()
20
+ print(f'DeepSpeed {builder.absolute_name()} built successfully')
21
+ return ccl_cpp_module
22
+
23
+
24
+ class CCLHandler():
25
+
26
+ def __init__(self, ccl_comm_op=None):
27
+ self.ccl_comm_op = ccl_comm_op
28
+
29
+ def wait(self):
30
+ # backend covered it
31
+ pass
32
+
33
+
34
+ class CCLBackend(TorchBackend):
35
+
36
+ def __init__(self, name='ccl', rank=-1, world_size=-1, mpu=None, timeout=None, init_method=None):
37
+ self.ccl_comm_op = build_ccl_op()
38
+ if self.ccl_comm_op is None:
39
+ # set CCLBackend to uninitialized state if CCLCommBuilder cannot be loaded
40
+ self.initialized = False
41
+ return
42
+ super(CCLBackend, self).__init__(backend='ccl',
43
+ name='torch',
44
+ rank=rank,
45
+ world_size=world_size,
46
+ timeout=timeout,
47
+ init_method=init_method)
48
+ self.name = 'ccl'
49
+ size = self.get_world_size()
50
+ rank = self.get_rank()
51
+ main_kvs = self.ccl_comm_op.get_kvs_addr(rank)
52
+ main_kvs = torch.tensor(main_kvs).to(torch.uint8).to(get_accelerator().current_device_name())
53
+ super(CCLBackend, self).broadcast(main_kvs, 0)
54
+ self.ccl_comm_op.initialize(size, rank, main_kvs)
55
+ self.initialized = True
56
+ self.groups = [tuple(range(self.get_world_size()))]
57
+ self.available_coll = self.ccl_comm_op.get_available_coll()
58
+
59
+ def is_initialized(self):
60
+ return self.initialized
61
+
62
+ def run_collective(self, name, **kwargs):
63
+ if name in self.available_coll:
64
+ kwargs['group'] = self.get_all_ranks_from_group(kwargs['group'])
65
+ if 'dst' in kwargs:
66
+ kwargs['dst'] = kwargs['group'].index(kwargs['dst'])
67
+ if 'src' in kwargs:
68
+ kwargs['src'] = kwargs['group'].index(kwargs['src'])
69
+ func = "self.ccl_comm_op." + name
70
+ eval(func)(*(kwargs.values()))
71
+ return CCLHandler(self.ccl_comm_op)
72
+ else:
73
+ func = "super(CCLBackend, self)." + name
74
+ return eval(func)(*(kwargs.values()))
75
+
76
+ def all_reduce(self, tensor, op=ReduceOp.SUM, group=None, async_op=False):
77
+ use_caching = False
78
+ if use_caching:
79
+ match_id = f"{tensor.size()}-{op}"
80
+ return self.run_collective(name="all_reduce_caching",
81
+ tensor=tensor,
82
+ op=op,
83
+ match_id=match_id,
84
+ group=group,
85
+ async_op=async_op)
86
+ else:
87
+ return self.run_collective(name="all_reduce", tensor=tensor, op=op, group=group, async_op=async_op)
88
+
89
+ def inference_all_reduce(self, tensor, op=ReduceOp.SUM, group=None, async_op=False):
90
+ return self.run_collective(name="inference_all_reduce", tensor=tensor, op=op, group=group, async_op=async_op)
91
+
92
+ def broadcast(self, tensor, src, group=None, async_op=False):
93
+ return self.run_collective(name="broadcast", tensor=tensor, src=src, group=group, async_op=async_op)
94
+
95
+ def all_gather(self, tensor_list, tensor, group=None, async_op=False):
96
+ return self.run_collective(name="all_gather",
97
+ tensor_list=tensor_list,
98
+ tensor=tensor,
99
+ group=group,
100
+ async_op=async_op)
101
+
102
+ def reduce_scatter_tensor(self, output_tensor, input_tensor, op, group=None, async_op=False):
103
+ return self.run_collective(name="reduce_scatter_tensor",
104
+ output_tensor=output_tensor,
105
+ input_tensor=input_tensor,
106
+ op=op,
107
+ group=group)
108
+
109
+ def all_gather_into_tensor(self, output_tensor, input_tensor, group=None, async_op=False):
110
+ return self.run_collective(name="all_gather_into_tensor",
111
+ output_tensor=output_tensor,
112
+ input_tensor=input_tensor,
113
+ group=group)
114
+
115
+ def all_to_all_single(self, output, input, output_split_sizes, input_split_sizes, group=None, async_op=False):
116
+ return self.run_collective(name="all_to_all_single",
117
+ output=output,
118
+ input=input,
119
+ output_split_sizes=output_split_sizes,
120
+ input_split_sizes=input_split_sizes,
121
+ group=group)
122
+
123
+ def send(self, tensor, dst, group=None, async_op=False):
124
+ return self.run_collective(name="send", tensor=tensor, dst=dst, group=group, async_op=async_op)
125
+
126
+ def recv(self, tensor, src, group=None, async_op=False):
127
+ return self.run_collective(name="recv", tensor=tensor, src=src, group=group, async_op=async_op)
128
+
129
+ def gather(self, tensor, gather_list, dst, group=None, async_op=False):
130
+ return self.run_collective(name="gather", tensor=tensor, gather_list=gather_list, dst=dst, group=group)
131
+
132
+ def scatter(self, tensor, gather_list, dst, group=None, async_op=False):
133
+ return self.run_collective(name="scatter", tensor=tensor, gather_list=gather_list, dst=dst, group=group)
134
+
135
+ def barrier(self, group=None, async_op=False):
136
+ return self.run_collective(name="barrier", group=group, async_op=async_op)
137
+
138
+ def monitored_barrier(self, group=None, timeout=None, wait_all_ranks=False):
139
+ return self.run_collective(name="monitored_barrier", group=group)
140
+
141
+ def reduce_scatter(self, output, input_list, op=ReduceOp.SUM, group=None, async_op=False):
142
+ return self.run_collective(name="reduce_scatter",
143
+ output=output,
144
+ input_list=input_list,
145
+ op=op,
146
+ group=group,
147
+ async_op=async_op)
148
+
149
+ def reduce(self, tensor, dst, op=ReduceOp.SUM, group=None, async_op=False):
150
+ return self.run_collective(name="reduce", tensor=tensor, dst=dst, op=op, group=group, async_op=async_op)
151
+
152
+ def new_group(self, ranks):
153
+ return super(CCLBackend, self).new_group(ranks)
154
+
155
+ def _new_group(self, ranks, group):
156
+ size = len(ranks)
157
+ rank = self.get_rank()
158
+ sub_main_kvs = self.ccl_comm_op.get_sub_kvs_addr(rank == ranks[0])
159
+ sub_main_kvs = torch.tensor(sub_main_kvs).to(torch.uint8).to(get_accelerator().current_device_name())
160
+ super(CCLBackend, self).broadcast(sub_main_kvs, ranks[0], group)
161
+ self.ccl_comm_op.initialize_sub_comm(size, ranks.index(rank), sub_main_kvs, ranks)
162
+ self.groups.append(tuple(ranks))
163
+
164
+ def get_all_ranks_from_group(self, group):
165
+ if group is None:
166
+ return list(range(self.get_world_size()))
167
+ rank = 0
168
+ results = []
169
+ try:
170
+ while True:
171
+ results.append(super(CCLBackend, self).get_global_rank(group, rank))
172
+ rank += 1
173
+ except ValueError:
174
+ pass
175
+ if tuple(results) not in self.groups:
176
+ self._new_group(results, group)
177
+ return results
evalkit_cambrian/lib/python3.10/site-packages/deepspeed/comm/config.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from .constants import *
7
+ from ..pydantic_v1 import BaseModel
8
+
9
+
10
+ class CommsConfig(BaseModel):
11
+
12
+ class Config:
13
+ validate_all = True
14
+ validate_assignment = True
15
+ use_enum_values = True
16
+ extra = 'forbid'
17
+
18
+
19
+ class CommsLoggerConfig(CommsConfig):
20
+ enabled: bool = COMMS_LOGGER_ENABLED_DEFAULT
21
+ prof_all: bool = COMMS_LOGGER_PROF_ALL_DEFAULT
22
+ prof_ops: list = COMMS_LOGGER_PROF_OPS_DEFAULT
23
+ verbose: bool = COMMS_LOGGER_VERBOSE_DEFAULT
24
+ debug: bool = COMMS_LOGGER_DEBUG_DEFAULT
25
+
26
+
27
+ class DeepSpeedCommsConfig:
28
+
29
+ def __init__(self, ds_config):
30
+ self.comms_logger_enabled = 'comms_logger' in ds_config
31
+
32
+ if self.comms_logger_enabled:
33
+ self.comms_logger = CommsLoggerConfig(**ds_config['comms_logger'])
evalkit_cambrian/lib/python3.10/site-packages/deepspeed/comm/reduce_op.py ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from enum import Enum
7
+
8
+
9
+ class ReduceOp(Enum):
10
+ SUM = 0
11
+ PRODUCT = 1
12
+ MIN = 2
13
+ MAX = 3
14
+ BAND = 4
15
+ BOR = 5
16
+ BXOR = 6
17
+ AVG = 7
18
+ UNUSED = 8
evalkit_cambrian/lib/python3.10/site-packages/deepspeed/runtime/checkpoint_engine/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (247 Bytes). View file
 
evalkit_cambrian/lib/python3.10/site-packages/deepspeed/runtime/checkpoint_engine/__pycache__/checkpoint_engine.cpython-310.pyc ADDED
Binary file (1.25 kB). View file
 
evalkit_cambrian/lib/python3.10/site-packages/deepspeed/runtime/checkpoint_engine/checkpoint_engine.py ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ import os
7
+
8
+
9
+ class CheckpointEngine(object):
10
+
11
+ # init checkpoint engine for save/load
12
+ def __init__(self, config_params=None):
13
+ pass
14
+
15
+ def create(self, tag):
16
+ # create checkpoint on give tag for save/load.
17
+ pass
18
+
19
+ def makedirs(self, path, exist_ok=False):
20
+ os.makedirs(path, exist_ok=exist_ok)
21
+
22
+ def save(self, state_dict, path: str):
23
+ pass
24
+
25
+ def load(self, path: str, map_location=None):
26
+ pass
27
+
28
+ def commit(self, tag):
29
+ # to tell checkpoint services if all files are ready.
30
+ pass
evalkit_cambrian/lib/python3.10/site-packages/deepspeed/runtime/checkpoint_engine/nebula_checkpoint_engine.py ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ import os
7
+ import torch
8
+ import torch_nebula
9
+
10
+ from deepspeed.runtime.checkpoint_engine.checkpoint_engine import \
11
+ CheckpointEngine
12
+ from deepspeed.utils import logger, log_dist
13
+ from deepspeed.nebula.constants import *
14
+
15
+
16
+ def _get_tag_from_path(path):
17
+ return os.path.basename(os.path.dirname(path))
18
+
19
+
20
+ class NebulaCheckpointEngine(CheckpointEngine):
21
+
22
+ def __init__(self, config_params=None):
23
+ super().__init__(config_params)
24
+ self.checkpoint = None
25
+ self.tag_flag = None
26
+ self.enable_nebula_load = config_params.enable_nebula_load
27
+ self.nebula_load_path = config_params.load_path
28
+ if self.nebula_load_path is None:
29
+ self.nebula_load_path = config_params.persistent_storage_path
30
+
31
+ nebula_config_params = {
32
+ NEBULA_PERSISTENT_STORAGE_PATH: config_params.persistent_storage_path,
33
+ NEBULA_PERSISTENT_TIME_INTERVAL: config_params.persistent_time_interval,
34
+ NEBULA_NUM_OF_VERSION_IN_RETENTION: config_params.num_of_version_in_retention,
35
+ }
36
+ torch_nebula.init(**nebula_config_params)
37
+
38
+ def create(self, tag):
39
+ log_dist(f"[Nebula] Start Checkpoint for tag:{tag}", ranks=[0])
40
+ # -2 means: customer needs to explicitly tell nebula
41
+ # current checkpoint is complete by commit method.
42
+ self.checkpoint = torch_nebula.Checkpoint(tag, -2)
43
+
44
+ def save(self, state_dict, path: str):
45
+ log_dist(f"[Nebula] Create dummy files for loading.")
46
+ torch.save("", path)
47
+
48
+ tag = _get_tag_from_path(path)
49
+ partition_name = os.path.basename(path)
50
+ logger.info(f"[Nebula] Saving {partition_name} under tag {tag}...")
51
+ self.checkpoint.save(partition_name, state_dict)
52
+ logger.info(f"[Nebula] Saved {partition_name} under tag {tag}.")
53
+ return None
54
+
55
+ def load(self, path: str, map_location=None):
56
+ tag = _get_tag_from_path(path)
57
+ first_load_flag = self.tag_flag is None or self.tag_flag == tag
58
+ if not self.enable_nebula_load and first_load_flag:
59
+ self.tag_flag = tag
60
+ logger.info(f"[Nebula] Disable nebula load. Loading checkpoint from {path} ...")
61
+ partition = torch.load(path, map_location=map_location)
62
+ logger.info(f"[Nebula] Disable nebula load. Loaded checkpoint from {path} .")
63
+ return partition
64
+
65
+ partition_name = os.path.basename(path)
66
+ logger.info(f"[Nebula] Loading {path} under tag {tag} from nebula path {self.nebula_load_path}...")
67
+
68
+ checkpoint = None
69
+ if tag in (None, 'latest', 'latest_universal'):
70
+ # In some cases, there is the inconsistent tag between deepspeed metadata (latest file)
71
+ # and nebula metadata, will lead to the failure on loading with deepspeed tag. Then we
72
+ # will try to load the valid latest checkpoint from nebula(tier3 > tier1). So, in summary
73
+ # when met failure loading for given tag, the loading priority would be like:
74
+ # nebula tier3 latest > nebula tier1 latest.
75
+ checkpoint = torch_nebula.get_latest_checkpoint(persist_path=self.nebula_load_path)
76
+ else:
77
+ checkpoint = torch_nebula.get_checkpoint(tag=tag, persist_path=self.nebula_load_path)
78
+
79
+ if checkpoint is None or (checkpoint is not None and checkpoint.tag == ''):
80
+ logger.info(
81
+ f"Unable to find valid checkpoint tag:{tag} from Nebula, try to get latest checkpoint again from nebula {self.nebula_load_path} path!"
82
+ )
83
+ # nebula tier3 latest
84
+ checkpoint = torch_nebula.get_latest_checkpoint(persist_path=self.nebula_load_path)
85
+ if checkpoint is None or (checkpoint is not None and checkpoint.tag == ''):
86
+ logger.info(
87
+ f"Unable to find latest checkpoint from Nebula tier3, try to get latest checkpoint again from nebula tier1 path!"
88
+ )
89
+ # nebula tier1 latest
90
+ checkpoint = torch_nebula.get_latest_checkpoint()
91
+ logger.warning(f"Unable to find valid checkpoint from Nebula under tag:{tag}.")
92
+ return None
93
+
94
+ tag = checkpoint.tag
95
+ self.tag_flag = -1
96
+ partition = checkpoint.load(partition_name, map_location=map_location)
97
+ logger.info(f"[Nebula] Loaded {path} under tag {tag} from {self.nebula_load_path}.")
98
+ return partition
99
+
100
+ def commit(self, tag):
101
+ # nebula commit will be call when all files under give tag are ready to be persisted in the async way.
102
+ logger.info(f"[Nebula] all files for {tag} are saved in tier1. It is ready to start persisting")
103
+ commit_rls = self.checkpoint.commit()
104
+ if not commit_rls:
105
+ logger.error(f"[Nebula] failed to commit the checkpoint, please check the log.")
106
+ return False
107
+ return commit_rls
evalkit_cambrian/lib/python3.10/site-packages/deepspeed/runtime/comm/__init__.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+ '''Copyright The Microsoft DeepSpeed Team'''
evalkit_cambrian/lib/python3.10/site-packages/deepspeed/runtime/comm/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (234 Bytes). View file
 
evalkit_cambrian/lib/python3.10/site-packages/deepspeed/runtime/comm/__pycache__/hccl.cpython-310.pyc ADDED
Binary file (3.64 kB). View file
 
evalkit_cambrian/lib/python3.10/site-packages/deepspeed/runtime/comm/__pycache__/mpi.cpython-310.pyc ADDED
Binary file (4.9 kB). View file
 
evalkit_cambrian/lib/python3.10/site-packages/deepspeed/runtime/comm/__pycache__/nccl.cpython-310.pyc ADDED
Binary file (4.39 kB). View file
 
evalkit_cambrian/lib/python3.10/site-packages/deepspeed/runtime/comm/coalesced_collectives.py ADDED
@@ -0,0 +1,133 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+ """
6
+ batched collective operations for overhead amortization and better
7
+ bandwidth utilization
8
+ """
9
+
10
+ import math
11
+ from typing import List
12
+ import torch
13
+ from torch import Tensor
14
+ from deepspeed import comm as dist
15
+ # NOTE: Use torch.distributed's ProcessGroup class until we have our own.
16
+ from torch.distributed import ProcessGroup, all_to_all_single
17
+ from deepspeed.accelerator import get_accelerator
18
+ from deepspeed.utils import instrument_w_nvtx
19
+ from deepspeed.ops import op_builder
20
+
21
+
22
+ def _torch_reduce_scatter_fn(input_tensor: Tensor, output_tensor: Tensor, group=None, async_op=False, prof=False):
23
+ return instrument_w_nvtx(dist.reduce_scatter_fn)(output_tensor, input_tensor, group=group, async_op=False)
24
+
25
+
26
+ quantizer_module = None
27
+
28
+
29
+ @instrument_w_nvtx
30
+ @torch.no_grad()
31
+ def all_to_all_quant_reduce(tensors: List[Tensor], groups: {}) -> List[Tensor]:
32
+ global quantizer_module
33
+ if quantizer_module is None:
34
+ quantizer_module = op_builder.QuantizerBuilder().load()
35
+ local_world_size = get_accelerator().device_count()
36
+ global_world_size = dist.get_world_size()
37
+ num_nodes = global_world_size // local_world_size
38
+ this_rank = dist.get_rank()
39
+ intra_idx = int(this_rank / local_world_size)
40
+ inter_idx = this_rank % local_world_size
41
+ output_lst: List[Tensor] = [None] * len(tensors)
42
+ for idx, tensor in enumerate(tensors):
43
+ if tensor.dim() == 1:
44
+ intra_quant_group = global_world_size
45
+ output_lst[idx] = reduce_scatter_coalesced([tensor])[0]
46
+ continue
47
+ else:
48
+ intra_quant_group = max(tensor.shape[0], tensor.shape[1], global_world_size)
49
+
50
+ inter_quant_group = intra_quant_group // local_world_size
51
+ intra_quant_int4, intra_q_scales = quantizer_module.swizzle_quant(tensor, intra_quant_group, 4,
52
+ quantizer_module.Symmetric, 1, num_nodes,
53
+ local_world_size)
54
+ local_output = torch.empty_like(intra_quant_int4)
55
+ scale_output = torch.empty_like(intra_q_scales)
56
+ all_to_all_single(local_output, intra_quant_int4, group=groups[f'local_{intra_idx}'])
57
+ all_to_all_single(scale_output, intra_q_scales, group=groups[f'local_{intra_idx}'])
58
+ global_input_tensor, global_scales = quantizer_module.quantized_reduction(
59
+ local_output, scale_output, intra_quant_group, inter_quant_group, 4, quantizer_module.Symmetric,
60
+ local_world_size)
61
+ global_output = torch.empty_like(global_input_tensor)
62
+ global_scale_output = torch.empty_like(global_scales)
63
+ all_to_all_single(global_output, global_input_tensor, group=groups[f'global_{inter_idx}'])
64
+ all_to_all_single(global_scale_output, global_scales, group=groups[f'global_{inter_idx}'])
65
+ final_output = quantizer_module.dequantize(global_output, global_scale_output, global_scale_output.numel(),
66
+ 4, quantizer_module.Symmetric)
67
+ output_lst[idx] = (sum(list(final_output.chunk(num_nodes))) / num_nodes).view(-1)
68
+ return output_lst
69
+
70
+
71
+ @instrument_w_nvtx
72
+ @torch.no_grad()
73
+ def reduce_scatter_coalesced(
74
+ tensors: List[Tensor],
75
+ group: ProcessGroup = None,
76
+ ) -> List[Tensor]:
77
+ """simultaneously reduce-scatter a list of tensors - this can be done more
78
+ efficiently than individual reduce scatter calls
79
+ TODO. see if PyTorch team wants a c++ version of this for ProcessGroupNCCL
80
+ """
81
+ this_rank = dist.get_rank(group)
82
+ world_sz = dist.get_world_size(group)
83
+
84
+ partition_lst_for_each_tensor = [None] * len(tensors)
85
+ for tensor_idx, tensor in enumerate(tensors):
86
+ flattened_tensor = tensor.view(-1)
87
+ chunk_sz = math.ceil(tensor.numel() / world_sz)
88
+ partition_lst_for_each_tensor[tensor_idx] = [
89
+ flattened_tensor[rank * chunk_sz:rank * chunk_sz + chunk_sz] for rank in range(0, world_sz)
90
+ ]
91
+
92
+ padded_partition_sz_for_each_tensor = tuple(math.ceil(t.numel() / world_sz) for t in tensors)
93
+
94
+ if len(tensors) == 1 and tensors[0].numel() % world_sz == 0:
95
+ # if there's only one tensor being reduced and we don't need to pad
96
+ # we have an opportunity to avoid a memory allocation
97
+ tensor_partition_flat_buffer = tensors[0].view(-1)
98
+ else:
99
+ # interleave tensor partitions such that the correct reduced partitions of each tensor
100
+ # end up at each rank
101
+ tensor_partitions_lst_with_padding = []
102
+ for rank in range(world_sz):
103
+ for tensor_idx in range(len(tensors)):
104
+ # add tensor content
105
+ tensor_chunk = partition_lst_for_each_tensor[tensor_idx][rank]
106
+ tensor_partitions_lst_with_padding.append(tensor_chunk)
107
+
108
+ # add padding if necessary
109
+ padding_sz = padded_partition_sz_for_each_tensor[tensor_idx] - tensor_chunk.numel()
110
+ if padding_sz > 0:
111
+ tensor_partitions_lst_with_padding.append(
112
+ torch.empty(padding_sz, dtype=tensor_chunk.dtype, device=tensor_chunk.device))
113
+
114
+ tensor_partition_flat_buffer = instrument_w_nvtx(torch.cat)(tensor_partitions_lst_with_padding)
115
+
116
+ tensor_partition_flat_buffer.div_(world_sz) # pre-divide
117
+ tensor_partition_buffer_for_each_rank: List[Tensor] = torch.chunk(tensor_partition_flat_buffer, world_sz)
118
+
119
+ # batched reduce-scatter call
120
+ _torch_reduce_scatter_fn(tensor_partition_flat_buffer,
121
+ tensor_partition_buffer_for_each_rank[this_rank],
122
+ group=group)
123
+
124
+ # reverse procedure of the interleaving done previously, done on the
125
+ # result of the batched reduce-scatter
126
+ output_lst: List[Tensor] = [None] * len(tensors)
127
+ offset = 0
128
+ for tensor_idx in range(len(tensors)):
129
+ output_lst[tensor_idx] = tensor_partition_buffer_for_each_rank[this_rank].narrow(
130
+ 0, offset, partition_lst_for_each_tensor[tensor_idx][this_rank].numel())
131
+
132
+ offset += padded_partition_sz_for_each_tensor[tensor_idx]
133
+ return output_lst
evalkit_cambrian/lib/python3.10/site-packages/deepspeed/runtime/comm/hccl.py ADDED
@@ -0,0 +1,124 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ import numpy as np
7
+ import torch
8
+ import torch_npu
9
+ import deepspeed.comm as dist
10
+
11
+
12
+ class HcclBackend(object):
13
+
14
+ def __init__(self, mpu=None):
15
+ if mpu is None:
16
+ self.world_group = dist.new_group(ranks=range(dist.get_world_size()))
17
+ else:
18
+ self.mpu = mpu
19
+ self.world_group = self.mpu.get_data_parallel_group()
20
+ self.size = dist.get_world_size(group=self.world_group)
21
+ self.rank = dist.get_rank(group=self.world_group)
22
+
23
+ def my_igather(self, rank, size, group, sendbuf, recvbuf, root):
24
+ req = []
25
+ if rank == root:
26
+ for idx in range(size):
27
+ if idx != rank:
28
+ req.append(dist.irecv(recvbuf[idx], src=idx, group=group))
29
+ else:
30
+ recvbuf[rank] = sendbuf
31
+ else:
32
+ req.append(dist.isend(sendbuf, group=group, dst=root))
33
+ return req
34
+
35
+ def my_gather(self, rank, size, group, sendbuf, recvbuf, root):
36
+ if rank == root:
37
+ for idx in range(size):
38
+ if idx != rank:
39
+ dist.recv(recvbuf[idx], src=idx, group=group)
40
+ else:
41
+ recvbuf[rank] = sendbuf
42
+ else:
43
+ dist.send(sendbuf, group=group, dst=root)
44
+
45
+ def compressed_allreduce(self, buffer_m: torch.tensor, worker_error, server_error, local_rank):
46
+ original_shape = buffer_m.size()
47
+ if len(original_shape) > 1:
48
+ buffer_m = torch.flatten(buffer_m)
49
+
50
+ # align size of original_buffer and error
51
+ original_size = buffer_m.numel()
52
+ worker_error_size = worker_error.numel()
53
+ if original_size != worker_error_size:
54
+ empty_tensor = torch.zeros(worker_error_size - original_size, device=buffer_m.device)
55
+ buffer_m = torch.cat([buffer_m, empty_tensor])
56
+
57
+ buffer_m.add_(worker_error)
58
+ worker_scale = torch.linalg.norm(buffer_m) / np.sqrt(torch.numel(buffer_m))
59
+
60
+ worker_error.set_(buffer_m - worker_scale * buffer_m.sign().add_(1).bool().float().add_(-0.5).mul_(2.0))
61
+
62
+ sign_list_packed_tmp = torch_npu.npu_sign_bits_pack(buffer_m, self.size).type(torch.int8)
63
+
64
+ recvbuf_sign = torch.zeros([self.size, len(sign_list_packed_tmp[self.rank])],
65
+ dtype=sign_list_packed_tmp[0].dtype,
66
+ device=sign_list_packed_tmp.device)
67
+
68
+ sign_list_packed = [sign_list_packed_tmp[idx] for idx in range(self.size)]
69
+
70
+ recvbuf_scale = [
71
+ torch.zeros(1, dtype=worker_scale.dtype, device=torch.device(local_rank)) for _ in range(self.size)
72
+ ]
73
+
74
+ # communication phase 1
75
+ # all to all for sign
76
+ dist.all_to_all_single(recvbuf_sign, torch.stack(sign_list_packed), group=self.world_group)
77
+ # all gather for scale
78
+ dist.all_gather(recvbuf_scale, worker_scale, group=self.world_group)
79
+
80
+ flattened_recvbuf_sign = recvbuf_sign.type(torch.uint8).flatten()
81
+ compensated_server_m = torch_npu.npu_sign_bits_unpack(flattened_recvbuf_sign, self.size, torch.float32) \
82
+ .mul_(torch.stack(recvbuf_scale).mul_(1 / self.size)).sum(0)
83
+
84
+ compensated_server_m.add_(server_error)
85
+
86
+ server_scale = torch.norm(compensated_server_m) / np.sqrt(compensated_server_m.numel())
87
+
88
+ server_error.set_(compensated_server_m -
89
+ server_scale * compensated_server_m.sign().add_(1).bool().float().add_(-0.5).mul_(2.0))
90
+
91
+ server_sign_packed = torch_npu.npu_sign_bits_pack(compensated_server_m, 1).type(torch.int8)
92
+
93
+ # recvbuf_sign_server
94
+ recvbuf_sign_server_tmp = torch.zeros([self.size, len(server_sign_packed[0])],
95
+ dtype=recvbuf_sign.dtype,
96
+ device=server_sign_packed.device)
97
+
98
+ recvbuf_sign_server = [recvbuf_sign_server_tmp[idx] for idx in range(self.size)]
99
+
100
+ # recvbuf_scale_server
101
+ recvbuf_scale_server_tmp = torch.zeros([self.size, 1],
102
+ dtype=worker_scale.dtype,
103
+ device=server_sign_packed.device)
104
+
105
+ recvbuf_scale_server = [recvbuf_scale_server_tmp[idx] for idx in range(self.size)]
106
+
107
+ # communication Phase 2
108
+ dist.all_gather(recvbuf_sign_server, server_sign_packed[0], group=self.world_group)
109
+ dist.all_gather(recvbuf_scale_server, server_scale, group=self.world_group)
110
+
111
+ recvbuf_sign_server = torch.stack(recvbuf_sign_server)
112
+
113
+ flattened_recvbuf_sign_server = recvbuf_sign_server.type(torch.uint8).flatten()
114
+
115
+ buffer_m.data.copy_(
116
+ torch_npu.npu_sign_bits_unpack(flattened_recvbuf_sign_server, self.size,
117
+ torch.float32).mul_(recvbuf_scale_server_tmp).flatten().data)
118
+
119
+ if original_size != worker_error_size:
120
+ buffer_m = buffer_m[0:original_size]
121
+ if len(original_shape) > 1:
122
+ buffer_m = buffer_m.reshape(original_shape)
123
+
124
+ return buffer_m
evalkit_cambrian/lib/python3.10/site-packages/deepspeed/runtime/comm/mpi.py ADDED
@@ -0,0 +1,215 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ import torch
7
+ import cupy
8
+ import time
9
+ import numpy as np
10
+ from mpi4py import MPI
11
+
12
+ from deepspeed.runtime.compression.cupy import CupyBackend
13
+
14
+
15
+ class MpiBackend(object):
16
+
17
+ def __init__(self, cuda_aware):
18
+ self.comm = MPI.COMM_WORLD
19
+ self.rank = self.comm.Get_rank()
20
+ self.size = self.comm.Get_size()
21
+ self.cuda_aware = cuda_aware
22
+ self.compression_backend = CupyBackend()
23
+
24
+ def my_igather(self, rank, size, comm, sendbuf, recbuf, root):
25
+ req = []
26
+ if rank == root:
27
+ for idx in range(size):
28
+ if idx != rank:
29
+ req.append(comm.Irecv(recbuf[idx], source=idx))
30
+ else:
31
+ recbuf[rank] = sendbuf
32
+ else:
33
+ req.append(comm.Isend(sendbuf, dest=root))
34
+ return req
35
+
36
+ def gather_cuda(self, rank, world_size, comm, cupy_sign_list_packed, cupy_recvbuf_sign, cupy_worker_scale,
37
+ cupy_recvbuf_scale):
38
+ # We do in-place operations on cupy buffers so we do not return any buffers
39
+ requests = []
40
+ for idx in range(world_size):
41
+ req_sign = self.my_igather(rank, world_size, comm, cupy_sign_list_packed[idx], cupy_recvbuf_sign, root=idx)
42
+ requests += req_sign
43
+
44
+ for idx in range(world_size):
45
+ req_scale = self.my_igather(rank, world_size, comm, cupy_worker_scale, cupy_recvbuf_scale, root=idx)
46
+ requests += req_scale
47
+
48
+ MPI.Request.Waitall(requests)
49
+
50
+ def gather_host(self, rank, world_size, comm, cupy_sign_list_packed, cupy_recvbuf_sign, cupy_worker_scale,
51
+ cupy_recvbuf_scale):
52
+
53
+ # In-place operations are not possible for newly created cupy arrays
54
+ # so we need to return the new buffers
55
+ numpy_recvbuf_sign = np.zeros([world_size, cupy_sign_list_packed[rank].size],
56
+ dtype=cupy_sign_list_packed[0].dtype)
57
+ numpy_recvbuf_scale = np.zeros([world_size, 1], dtype=cupy_worker_scale.dtype)
58
+
59
+ # 1. convert from cupy to numpy
60
+ numpy_sign_list_packed = cupy_sign_list_packed
61
+
62
+ for idx in range(world_size):
63
+ numpy_sign_list_packed[idx] = cupy.asnumpy(cupy_sign_list_packed[idx])
64
+
65
+ numpy_worker_scale = cupy.asnumpy(cupy_worker_scale)
66
+ numpy_recvbuf_scale = cupy.asnumpy(cupy_recvbuf_scale)
67
+
68
+ cupy.cuda.get_current_stream().synchronize()
69
+
70
+ # 2. use numpy buffers for communication
71
+ requests = []
72
+
73
+ for idx in range(world_size):
74
+ req_sign = self.my_igather(rank,
75
+ world_size,
76
+ comm,
77
+ numpy_sign_list_packed[idx],
78
+ numpy_recvbuf_sign,
79
+ root=idx)
80
+ requests += req_sign
81
+
82
+ for idx in range(world_size):
83
+ req_scale = self.my_igather(rank, world_size, comm, numpy_worker_scale, numpy_recvbuf_scale, root=idx)
84
+ requests += req_scale
85
+
86
+ MPI.Request.Waitall(requests)
87
+
88
+ # 3. Convert back from numpy to cupy
89
+ cupy_recvbuf_sign = cupy.asarray(numpy_recvbuf_sign)
90
+ for idx in range(world_size):
91
+ cupy_sign_list_packed[idx] = cupy.asarray(numpy_sign_list_packed[idx])
92
+
93
+ cupy_worker_scale = cupy.asarray(numpy_worker_scale)
94
+ cupy_recvbuf_scale = cupy.asarray(numpy_recvbuf_scale)
95
+ cupy.cuda.get_current_stream().synchronize()
96
+
97
+ return cupy_sign_list_packed, cupy_recvbuf_sign, cupy_worker_scale, cupy_recvbuf_scale
98
+
99
+ def allgather_cuda(self, comm, cupy_server_sign_packed, cupy_recvbuf_sign_server, cupy_server_scale,
100
+ cupy_recvbuf_scale_server):
101
+ comm.Allgather(cupy_server_sign_packed, cupy_recvbuf_sign_server)
102
+ comm.Allgather(cupy_server_scale, cupy_recvbuf_scale_server)
103
+
104
+ def allgather_host(self, comm, cupy_server_sign_packed, cupy_recvbuf_sign_server, cupy_server_scale,
105
+ cupy_recvbuf_scale_server):
106
+
107
+ # 1. Convert cupy to numpy
108
+ numpy_recvbuf_sign_server = np.zeros([comm.Get_size(), cupy_server_sign_packed.size],
109
+ dtype=cupy_server_sign_packed.dtype)
110
+ numpy_recvbuf_scale_server = np.zeros([comm.Get_size(), 1], dtype=cupy_server_scale.dtype)
111
+
112
+ numpy_server_sign_packed = cupy.asnumpy(cupy_server_sign_packed)
113
+ numpy_recvbuf_sign_server = cupy.asnumpy(cupy_recvbuf_sign_server)
114
+ numpy_server_scale = cupy.asnumpy(cupy_server_scale)
115
+ numpy_recvbuf_scale_server = cupy.asnumpy(cupy_recvbuf_scale_server)
116
+ cupy.cuda.get_current_stream().synchronize()
117
+
118
+ # 2. Communicate numpy buffers
119
+ comm.Allgather(numpy_server_sign_packed, numpy_recvbuf_sign_server)
120
+ comm.Allgather(numpy_server_scale, numpy_recvbuf_scale_server)
121
+ comm.Barrier()
122
+
123
+ # 3. Convert numpy back to cupy
124
+ cupy_server_sign_packed = cupy.asarray(numpy_server_sign_packed)
125
+ cupy_recvbuf_sign_server = cupy.asarray(numpy_recvbuf_sign_server)
126
+ cupy_server_scale = cupy.asarray(numpy_server_scale)
127
+ cupy_recvbuf_scale_server = cupy.asarray(numpy_recvbuf_scale_server)
128
+ cupy.cuda.get_current_stream().synchronize()
129
+
130
+ return cupy_server_sign_packed, cupy_recvbuf_sign_server, cupy_server_scale, cupy_recvbuf_scale_server
131
+
132
+ def compressed_allreduce(self, buffer_m: torch.tensor, worker_error, server_error, local_rank):
133
+
134
+ all_start_time = time.time()
135
+ original_shape = buffer_m.size()
136
+ if len(original_shape) > 1:
137
+ buffer_m = torch.flatten(buffer_m)
138
+ original_size = buffer_m.numel()
139
+ worker_error_size = worker_error.numel()
140
+ cupy.cuda.Device(local_rank).use()
141
+
142
+ if original_size != worker_error_size:
143
+ empty_tensor = torch.zeros(worker_error_size - original_size, device=buffer_m.device)
144
+ buffer_m = torch.cat([buffer_m, empty_tensor])
145
+
146
+ buffer_m.add_(worker_error)
147
+ worker_scale = torch.linalg.norm(buffer_m) / np.sqrt(torch.numel(buffer_m))
148
+ worker_error.set_(buffer_m - worker_scale * buffer_m.sign().add_(1).bool().float().add_(-0.5).mul_(2.0))
149
+
150
+ cupy_sign_list_packed = self.compression_backend.compress_by_chunk(
151
+ self.compression_backend.torch2cupy(buffer_m.sign_().add_(1).bool()), self.size)
152
+ cupy_worker_scale = self.compression_backend.torch2cupy(worker_scale)
153
+
154
+ cupy_recvbuf_sign = cupy.zeros([self.size, cupy_sign_list_packed[self.rank].size],
155
+ dtype=cupy_sign_list_packed[0].dtype)
156
+ cupy_recvbuf_scale = cupy.zeros([self.size, 1], dtype=cupy_worker_scale.dtype)
157
+
158
+ # Communication Phase 1
159
+ gather_start = time.time()
160
+ if self.cuda_aware:
161
+ self.gather_cuda(self.rank, self.size, self.comm, cupy_sign_list_packed, cupy_recvbuf_sign,
162
+ cupy_worker_scale, cupy_recvbuf_scale)
163
+ else:
164
+ _, cupy_recvbuf_sign, _, cupy_recvbuf_scale = self.gather_host(self.rank, self.size, self.comm,
165
+ cupy_sign_list_packed, cupy_recvbuf_sign,
166
+ cupy_worker_scale, cupy_recvbuf_scale)
167
+ gather_end = time.time()
168
+
169
+ # cupy_sign_list_packed, cupy_worker_scale, worker_scale = None, None, None
170
+ cupy_sign_list_packed = None
171
+
172
+ compensated_server_m = self.compression_backend.cupy2torch(
173
+ (cupy.unpackbits(cupy_recvbuf_sign.flatten())).reshape(self.size, -1)).float().add_(-0.5).mul_(2.0).mul_(
174
+ self.compression_backend.cupy2torch(cupy_recvbuf_scale).mul_(1 / self.size)).sum(0)
175
+ compensated_server_m.add_(server_error)
176
+ server_scale = torch.linalg.norm(compensated_server_m) / np.sqrt(compensated_server_m.numel())
177
+ server_error.set_(compensated_server_m -
178
+ server_scale * compensated_server_m.sign().add_(1).bool().float().add_(-0.5).mul_(2.0))
179
+
180
+ cupy_server_scale = self.compression_backend.torch2cupy(server_scale)
181
+
182
+ cupy_server_sign_packed = self.compression_backend.compress_by_chunk(
183
+ self.compression_backend.torch2cupy(compensated_server_m.sign_().add_(1).bool()), 1)
184
+ compensated_server_m = None
185
+
186
+ cupy_recvbuf_sign_server = cupy.zeros([self.size, cupy_server_sign_packed[0].size],
187
+ dtype=cupy_recvbuf_sign.dtype)
188
+ cupy_recvbuf_scale_server = cupy.zeros([self.size, 1], dtype=cupy_recvbuf_scale.dtype)
189
+ # cupy_recvbuf_sign, cupy_recvbuf_scale = None, None
190
+ cupy_recvbuf_sign = None
191
+
192
+ # Communication Phase 2
193
+ if self.cuda_aware:
194
+ self.allgather_cuda(self.comm, cupy_server_sign_packed[0], cupy_recvbuf_sign_server, cupy_server_scale,
195
+ cupy_recvbuf_scale_server)
196
+ else:
197
+ _, cupy_recvbuf_sign_server, _, cupy_recvbuf_scale_server = self.allgather_host(
198
+ self.comm, cupy_server_sign_packed[0], cupy_recvbuf_sign_server, cupy_server_scale,
199
+ cupy_recvbuf_scale_server)
200
+
201
+ # cupy_server_sign_packed, cupy_server_scale, server_scale = None, None, None
202
+ cupy_server_sign_packed = None
203
+
204
+ buffer_m.data.copy_(
205
+ self.compression_backend.cupy2torch((cupy.unpackbits(cupy_recvbuf_sign_server.flatten())).reshape(
206
+ self.size, -1)).float().add_(-0.5).mul_(2.0).mul_(
207
+ self.compression_backend.cupy2torch(cupy_recvbuf_scale_server)).flatten().data)
208
+ if original_size != worker_error_size:
209
+ buffer_m = buffer_m[0:original_size]
210
+ if len(original_shape) > 1:
211
+ buffer_m = buffer_m.reshape(original_shape)
212
+
213
+ # cupy_recvbuf_sign_server, cupy_recvbuf_scale_server = None, None
214
+
215
+ return buffer_m
evalkit_cambrian/lib/python3.10/site-packages/deepspeed/runtime/comm/nccl.py ADDED
@@ -0,0 +1,166 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ import torch
7
+ from deepspeed import comm as dist
8
+ import cupy
9
+ import numpy as np
10
+
11
+ from deepspeed.runtime.compression.cupy import CupyBackend
12
+ from deepspeed.runtime.utils import required_torch_version
13
+ from deepspeed.accelerator import get_accelerator
14
+
15
+
16
+ class NcclBackend(object):
17
+
18
+ def __init__(self, mpu=None):
19
+ if mpu is None:
20
+ self.world_group = dist.new_group(ranks=range(dist.get_world_size()))
21
+ else:
22
+ self.mpu = mpu
23
+ self.world_group = self.mpu.get_data_parallel_group()
24
+ self.rank = dist.get_rank(group=self.world_group)
25
+ self.size = dist.get_world_size(group=self.world_group)
26
+ self.compression_backend = CupyBackend()
27
+ self.bool_not_supported = required_torch_version(min_version=1.10)
28
+
29
+ def my_igather(self, rank, size, group, sendbuf, recvbuf, root):
30
+ req = []
31
+ if rank == root:
32
+ for idx in range(size):
33
+ if idx != rank:
34
+ req.append(dist.irecv(recvbuf[idx], src=idx, group=group))
35
+ else:
36
+ recvbuf[rank] = sendbuf
37
+ else:
38
+ req.append(dist.isend(sendbuf, group=group, dst=root))
39
+ return req
40
+
41
+ def my_gather(self, rank, size, group, sendbuf, recvbuf, root):
42
+ if rank == root:
43
+ for idx in range(size):
44
+ if idx != rank:
45
+ dist.recv(recvbuf[idx], src=idx, group=group)
46
+ else:
47
+ recvbuf[rank] = sendbuf
48
+ else:
49
+ dist.send(sendbuf, group=group, dst=root)
50
+
51
+ def compressed_allreduce(self, buffer_m: torch.tensor, worker_error, server_error, local_rank):
52
+
53
+ # all_start_time = time.time()
54
+ original_shape = buffer_m.size()
55
+ if len(original_shape) > 1:
56
+ buffer_m = torch.flatten(buffer_m)
57
+ original_size = buffer_m.numel()
58
+ worker_error_size = worker_error.numel()
59
+ cupy.cuda.Device(local_rank).use()
60
+
61
+ if original_size != worker_error_size:
62
+ empty_tensor = torch.zeros(worker_error_size - original_size, device=buffer_m.device)
63
+ buffer_m = torch.cat([buffer_m, empty_tensor])
64
+
65
+ buffer_m.add_(worker_error)
66
+ worker_scale = torch.linalg.norm(buffer_m) / np.sqrt(buffer_m.numel())
67
+ worker_error.set_(buffer_m - worker_scale * buffer_m.sign().add_(1).bool().float().add_(-0.5).mul_(2.0))
68
+
69
+ if self.bool_not_supported:
70
+ cupy_sign_list_packed = self.compression_backend.compress_by_chunk(
71
+ self.compression_backend.torch2cupy(buffer_m.sign_().add_(1).bool().to(dtype=torch.uint8)), self.size)
72
+ else:
73
+ cupy_sign_list_packed = self.compression_backend.compress_by_chunk(
74
+ self.compression_backend.torch2cupy(buffer_m.sign_().add_(1).bool()), self.size)
75
+ cupy_worker_scale = self.compression_backend.torch2cupy(worker_scale)
76
+
77
+ cupy_recvbuf_sign = cupy.zeros([self.size, cupy_sign_list_packed[self.rank].size],
78
+ dtype=cupy_sign_list_packed[0].dtype)
79
+ # cupy_recvbuf_scale = cupy.zeros([self.size, 1], dtype=cupy_worker_scale.dtype)
80
+
81
+ sign_list_packed = [
82
+ self.compression_backend.cupy2torch(cupy_sign_list_packed[idx]) for idx in range(self.size)
83
+ ]
84
+
85
+ # worker_scale = self.compression_backend.cupy2torch(cupy_worker_scale)
86
+ recvbuf_sign = self.compression_backend.cupy2torch(cupy_recvbuf_sign)
87
+ #recvbuf_scale = self.compression_backend.cupy2torch(cupy_recvbuf_scale)
88
+ recvbuf_scale = [
89
+ torch.zeros(1, dtype=worker_scale.dtype, device=torch.device(get_accelerator().device_name(local_rank)))
90
+ for i in range(self.size)
91
+ ]
92
+
93
+ # communication phase 1
94
+ # gather_start = time.time()
95
+ # Alltoall for sign
96
+ dist.all_to_all_single(recvbuf_sign, torch.stack(sign_list_packed), group=self.world_group)
97
+ # Allgather for scale
98
+ dist.all_gather(recvbuf_scale, worker_scale, group=self.world_group)
99
+
100
+ # gather_end = time.time()
101
+
102
+ # cupy_sign_list_packed, sign_list_packed, cupy_worker_scale, worker_scale = None, None, None, None
103
+ cupy_sign_list_packed = None
104
+
105
+ cupy_recvbuf_sign = self.compression_backend.torch2cupy(recvbuf_sign)
106
+ #cupy_recvbuf_scale = self.compression_backend.torch2cupy(torch.stack(recvbuf_scale))
107
+
108
+ compensated_server_m = self.compression_backend.cupy2torch(
109
+ (cupy.unpackbits(cupy_recvbuf_sign.flatten())).reshape(self.size, -1)).float().add_(-0.5).mul_(2.0).mul_(
110
+ torch.stack(recvbuf_scale).mul_(1 / self.size)).sum(0)
111
+ compensated_server_m.add_(server_error)
112
+ server_scale = torch.linalg.norm(compensated_server_m) / np.sqrt(compensated_server_m.numel())
113
+ server_error.set_(compensated_server_m -
114
+ server_scale * compensated_server_m.sign().add_(1).bool().float().add_(-0.5).mul_(2.0))
115
+
116
+ # cupy_server_scale = self.compression_backend.torch2cupy(server_scale)
117
+
118
+ if self.bool_not_supported:
119
+ cupy_server_sign_packed = self.compression_backend.compress_by_chunk(
120
+ self.compression_backend.torch2cupy(compensated_server_m.sign_().add_(1).bool().to(dtype=torch.uint8)),
121
+ 1)
122
+ else:
123
+ cupy_server_sign_packed = self.compression_backend.compress_by_chunk(
124
+ self.compression_backend.torch2cupy(compensated_server_m.sign_().add_(1).bool()), 1)
125
+ compensated_server_m = None
126
+
127
+ cupy_recvbuf_sign_server = cupy.zeros([self.size, cupy_server_sign_packed[0].size],
128
+ dtype=cupy_recvbuf_sign.dtype)
129
+ # cupy_recvbuf_sign, recvbuf_sign = None, None
130
+ cupy_recvbuf_sign = None
131
+
132
+ server_sign_packed = [self.compression_backend.cupy2torch(cupy_server_sign_packed[0])]
133
+ recvbuf_sign_server = [
134
+ self.compression_backend.cupy2torch(cupy_recvbuf_sign_server[idx]) for idx in range(self.size)
135
+ ]
136
+
137
+ # server_scale = self.compression_backend.cupy2torch(cupy_server_scale)
138
+ cupy_recvbuf_scale_server = cupy.zeros([self.size, 1], dtype=cupy_worker_scale.dtype)
139
+ # cupy_recvbuf_scale, recvbuf_scale = None, None
140
+
141
+ recvbuf_scale_server = [
142
+ self.compression_backend.cupy2torch(cupy_recvbuf_scale_server[idx]) for idx in range(self.size)
143
+ ]
144
+
145
+ # Communication Phase 2
146
+ dist.all_gather(recvbuf_sign_server, server_sign_packed[0], group=self.world_group)
147
+ dist.all_gather(recvbuf_scale_server, server_scale, group=self.world_group)
148
+
149
+ cupy_server_sign_packed = None
150
+
151
+ # need to convert from a tensor list to a single tensor
152
+ # dist.all_gather only provides a tensor list as the recv/output buffer
153
+ recvbuf_sign_server = torch.stack(recvbuf_sign_server)
154
+
155
+ cupy_recvbuf_sign_server = self.compression_backend.torch2cupy(recvbuf_sign_server)
156
+
157
+ buffer_m.data.copy_(
158
+ self.compression_backend.cupy2torch((cupy.unpackbits(cupy_recvbuf_sign_server.flatten())).reshape(
159
+ self.size, -1)).float().add_(-0.5).mul_(2.0).mul_(
160
+ self.compression_backend.cupy2torch(cupy_recvbuf_scale_server)).flatten().data)
161
+ if original_size != worker_error_size:
162
+ buffer_m = buffer_m[0:original_size]
163
+ if len(original_shape) > 1:
164
+ buffer_m = buffer_m.reshape(original_shape)
165
+
166
+ return buffer_m
evalkit_cambrian/lib/python3.10/site-packages/deepspeed/runtime/compression/__pycache__/cupy.cpython-310.pyc ADDED
Binary file (1.21 kB). View file
 
evalkit_cambrian/lib/python3.10/site-packages/deepspeed/runtime/compression/cupy.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ import cupy
7
+ from torch.utils.dlpack import to_dlpack
8
+ from torch.utils.dlpack import from_dlpack
9
+
10
+
11
+ class CupyBackend(object):
12
+
13
+ def __init__(self):
14
+ pass
15
+
16
+ def torch2cupy(self, tensor):
17
+ return cupy.fromDlpack(to_dlpack(tensor))
18
+
19
+ def cupy2torch(self, cupy_tensor):
20
+ return from_dlpack(cupy_tensor.toDlpack())
21
+
22
+ def compress_by_chunk(self, cupy_bool_tensor, num_chunks):
23
+ packed_sign = cupy.packbits(cupy_bool_tensor)
24
+ sign_list_packed = cupy.split(packed_sign, num_chunks)
25
+ cupy.cuda.get_current_stream().synchronize()
26
+ return sign_list_packed
evalkit_cambrian/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (243 Bytes). View file
 
evalkit_cambrian/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/__pycache__/curriculum_scheduler.cpython-310.pyc ADDED
Binary file (4.83 kB). View file
 
infer_4_47_1/lib/python3.10/site-packages/torch/backends/_nnapi/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (179 Bytes). View file
 
infer_4_47_1/lib/python3.10/site-packages/torch/onnx/_exporter_states.py ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+
4
+ class ExportTypes:
5
+ """Specifies how the ONNX model is stored."""
6
+
7
+ # TODO(justinchuby): Deprecate and remove this class.
8
+
9
+ PROTOBUF_FILE = "Saves model in the specified protobuf file."
10
+ ZIP_ARCHIVE = "Saves model in the specified ZIP file (uncompressed)."
11
+ COMPRESSED_ZIP_ARCHIVE = "Saves model in the specified ZIP file (compressed)."
12
+ DIRECTORY = "Saves model in the specified folder."
infer_4_47_1/lib/python3.10/site-packages/torch/onnx/_flags.py ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Internal feature flags for torch.onnx.
2
+
3
+ NOTE: These flags are experimental only. Any flag here can be removed at any
4
+ time without notice.
5
+ """
6
+
7
+ import logging
8
+ import os
9
+
10
+
11
+ logger = logging.getLogger(__name__)
12
+
13
+
14
+ def _load_boolean_flag(
15
+ name: str,
16
+ *,
17
+ this_will: str,
18
+ deprecated: bool = False,
19
+ default: bool = False,
20
+ ) -> bool:
21
+ """Load a boolean flag from environment variable.
22
+
23
+ Args:
24
+ name: The name of the environment variable.
25
+ this_will: A string that describes what this flag will do.
26
+ deprecated: Whether this flag is deprecated.
27
+ default: The default value if envvar not defined.
28
+ """
29
+ undefined = os.getenv(name) is None
30
+ state = os.getenv(name) == "1"
31
+ if state:
32
+ if deprecated:
33
+ logger.error(
34
+ "Experimental flag %s is deprecated. Please remove it from your environment.",
35
+ name,
36
+ )
37
+ else:
38
+ logger.warning(
39
+ "Experimental flag %s is enabled. This will %s.", name, this_will
40
+ )
41
+ if undefined:
42
+ state = default
43
+ return state
44
+
45
+
46
+ USE_EXPERIMENTAL_LOGIC: bool = _load_boolean_flag(
47
+ "TORCH_ONNX_USE_EXPERIMENTAL_LOGIC",
48
+ this_will="use ExportedProgram and the new torch.onnx export logic",
49
+ )
infer_4_47_1/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/_diagnostic.py ADDED
@@ -0,0 +1,211 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ """Diagnostic components for TorchScript based ONNX export, i.e. `torch.onnx.export`."""
3
+
4
+ from __future__ import annotations
5
+
6
+ import contextlib
7
+ import gzip
8
+ from typing import TYPE_CHECKING
9
+
10
+ import torch
11
+ from torch.onnx._internal.diagnostics import infra
12
+ from torch.onnx._internal.diagnostics.infra import formatter, sarif
13
+ from torch.onnx._internal.diagnostics.infra.sarif import version as sarif_version
14
+ from torch.utils import cpp_backtrace
15
+
16
+
17
+ if TYPE_CHECKING:
18
+ from collections.abc import Generator
19
+
20
+
21
+ def _cpp_call_stack(frames_to_skip: int = 0, frames_to_log: int = 32) -> infra.Stack:
22
+ """Returns the current C++ call stack.
23
+
24
+ This function utilizes `torch.utils.cpp_backtrace` to get the current C++ call stack.
25
+ The returned C++ call stack is a concatenated string of the C++ call stack frames.
26
+ Each frame is separated by a newline character, in the same format of
27
+ r"frame #[0-9]+: (?P<frame_info>.*)". More info at `c10/util/Backtrace.cpp`.
28
+
29
+ """
30
+ frames = cpp_backtrace.get_cpp_backtrace(frames_to_skip, frames_to_log).split("\n")
31
+ frame_messages = []
32
+ for frame in frames:
33
+ segments = frame.split(":", 1)
34
+ if len(segments) == 2:
35
+ frame_messages.append(segments[1].strip())
36
+ else:
37
+ frame_messages.append("<unknown frame>")
38
+ return infra.Stack(
39
+ frames=[
40
+ infra.StackFrame(location=infra.Location(message=message))
41
+ for message in frame_messages
42
+ ]
43
+ )
44
+
45
+
46
+ class TorchScriptOnnxExportDiagnostic(infra.Diagnostic):
47
+ """Base class for all export diagnostics.
48
+
49
+ This class is used to represent all export diagnostics. It is a subclass of
50
+ infra.Diagnostic, and adds additional methods to add more information to the
51
+ diagnostic.
52
+ """
53
+
54
+ python_call_stack: infra.Stack | None = None
55
+ cpp_call_stack: infra.Stack | None = None
56
+
57
+ def __init__(
58
+ self,
59
+ *args,
60
+ frames_to_skip: int = 1,
61
+ cpp_stack: bool = False,
62
+ **kwargs,
63
+ ) -> None:
64
+ super().__init__(*args, **kwargs)
65
+ self.python_call_stack = self.record_python_call_stack(
66
+ frames_to_skip=frames_to_skip
67
+ )
68
+ if cpp_stack:
69
+ self.cpp_call_stack = self.record_cpp_call_stack(
70
+ frames_to_skip=frames_to_skip
71
+ )
72
+
73
+ def record_cpp_call_stack(self, frames_to_skip: int) -> infra.Stack:
74
+ """Records the current C++ call stack in the diagnostic."""
75
+ stack = _cpp_call_stack(frames_to_skip=frames_to_skip)
76
+ stack.message = "C++ call stack"
77
+ self.with_stack(stack)
78
+ return stack
79
+
80
+
81
+ class ExportDiagnosticEngine:
82
+ """PyTorch ONNX Export diagnostic engine.
83
+
84
+ The only purpose of creating this class instead of using `DiagnosticContext` directly
85
+ is to provide a background context for `diagnose` calls inside exporter.
86
+
87
+ By design, one `torch.onnx.export` call should initialize one diagnostic context.
88
+ All `diagnose` calls inside exporter should be made in the context of that export.
89
+ However, since diagnostic context is currently being accessed via a global variable,
90
+ there is no guarantee that the context is properly initialized. Therefore, we need
91
+ to provide a default background context to fallback to, otherwise any invocation of
92
+ exporter internals, e.g. unit tests, will fail due to missing diagnostic context.
93
+ This can be removed once the pipeline for context to flow through the exporter is
94
+ established.
95
+ """
96
+
97
+ contexts: list[infra.DiagnosticContext]
98
+ _background_context: infra.DiagnosticContext
99
+
100
+ def __init__(self) -> None:
101
+ self.contexts = []
102
+ self._background_context = infra.DiagnosticContext(
103
+ name="torch.onnx",
104
+ version=torch.__version__,
105
+ )
106
+
107
+ @property
108
+ def background_context(self) -> infra.DiagnosticContext:
109
+ return self._background_context
110
+
111
+ def create_diagnostic_context(
112
+ self,
113
+ name: str,
114
+ version: str,
115
+ options: infra.DiagnosticOptions | None = None,
116
+ ) -> infra.DiagnosticContext:
117
+ """Creates a new diagnostic context.
118
+
119
+ Args:
120
+ name: The subject name for the diagnostic context.
121
+ version: The subject version for the diagnostic context.
122
+ options: The options for the diagnostic context.
123
+
124
+ Returns:
125
+ A new diagnostic context.
126
+ """
127
+ if options is None:
128
+ options = infra.DiagnosticOptions()
129
+ context: infra.DiagnosticContext[infra.Diagnostic] = infra.DiagnosticContext(
130
+ name, version, options
131
+ )
132
+ self.contexts.append(context)
133
+ return context
134
+
135
+ def clear(self):
136
+ """Clears all diagnostic contexts."""
137
+ self.contexts.clear()
138
+ self._background_context.diagnostics.clear()
139
+
140
+ def to_json(self) -> str:
141
+ return formatter.sarif_to_json(self.sarif_log())
142
+
143
+ def dump(self, file_path: str, compress: bool = False) -> None:
144
+ """Dumps the SARIF log to a file."""
145
+ if compress:
146
+ with gzip.open(file_path, "wt") as f:
147
+ f.write(self.to_json())
148
+ else:
149
+ with open(file_path, "w") as f:
150
+ f.write(self.to_json())
151
+
152
+ def sarif_log(self):
153
+ log = sarif.SarifLog(
154
+ version=sarif_version.SARIF_VERSION,
155
+ schema_uri=sarif_version.SARIF_SCHEMA_LINK,
156
+ runs=[context.sarif() for context in self.contexts],
157
+ )
158
+
159
+ log.runs.append(self._background_context.sarif())
160
+ return log
161
+
162
+
163
+ engine = ExportDiagnosticEngine()
164
+ _context = engine.background_context
165
+
166
+
167
+ @contextlib.contextmanager
168
+ def create_export_diagnostic_context() -> (
169
+ Generator[infra.DiagnosticContext, None, None]
170
+ ):
171
+ """Create a diagnostic context for export.
172
+
173
+ This is a workaround for code robustness since diagnostic context is accessed by
174
+ export internals via global variable. See `ExportDiagnosticEngine` for more details.
175
+ """
176
+ global _context
177
+ assert (
178
+ _context == engine.background_context
179
+ ), "Export context is already set. Nested export is not supported."
180
+ _context = engine.create_diagnostic_context(
181
+ "torch.onnx.export",
182
+ torch.__version__,
183
+ )
184
+ try:
185
+ yield _context
186
+ finally:
187
+ _context = engine.background_context
188
+
189
+
190
+ def diagnose(
191
+ rule: infra.Rule,
192
+ level: infra.Level,
193
+ message: str | None = None,
194
+ frames_to_skip: int = 2,
195
+ **kwargs,
196
+ ) -> TorchScriptOnnxExportDiagnostic:
197
+ """Creates a diagnostic and record it in the global diagnostic context.
198
+
199
+ This is a wrapper around `context.log` that uses the global diagnostic
200
+ context.
201
+ """
202
+ diagnostic = TorchScriptOnnxExportDiagnostic(
203
+ rule, level, message, frames_to_skip=frames_to_skip, **kwargs
204
+ )
205
+ export_context().log(diagnostic)
206
+ return diagnostic
207
+
208
+
209
+ def export_context() -> infra.DiagnosticContext:
210
+ global _context
211
+ return _context
infer_4_47_1/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/_rules.py ADDED
@@ -0,0 +1,636 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ """
3
+ GENERATED CODE - DO NOT EDIT DIRECTLY
4
+ This file is generated by gen_diagnostics.py.
5
+ See tools/onnx/gen_diagnostics.py for more information.
6
+
7
+ Diagnostic rules for PyTorch ONNX export.
8
+ """
9
+
10
+ import dataclasses
11
+ from typing import Tuple
12
+
13
+ # flake8: noqa
14
+ from torch.onnx._internal.diagnostics import infra
15
+
16
+
17
+ """
18
+ GENERATED CODE - DO NOT EDIT DIRECTLY
19
+ The purpose of generating a class for each rule is to override the `format_message`
20
+ method to provide more details in the signature about the format arguments.
21
+ """
22
+
23
+
24
+ class _NodeMissingOnnxShapeInference(infra.Rule):
25
+ """Node is missing ONNX shape inference."""
26
+
27
+ def format_message(self, op_name) -> str: # type: ignore[override]
28
+ """Returns the formatted default message of this Rule.
29
+
30
+ Message template: 'The shape inference of {op_name} type is missing, so it may result in wrong shape inference for the exported graph. Please consider adding it in symbolic function.'
31
+ """
32
+ return self.message_default_template.format(op_name=op_name)
33
+
34
+ def format( # type: ignore[override]
35
+ self, level: infra.Level, op_name
36
+ ) -> Tuple[infra.Rule, infra.Level, str]:
37
+ """Returns a tuple of (Rule, Level, message) for this Rule.
38
+
39
+ Message template: 'The shape inference of {op_name} type is missing, so it may result in wrong shape inference for the exported graph. Please consider adding it in symbolic function.'
40
+ """
41
+ return self, level, self.format_message(op_name=op_name)
42
+
43
+
44
+ class _MissingCustomSymbolicFunction(infra.Rule):
45
+ """Missing symbolic function for custom PyTorch operator, cannot translate node to ONNX."""
46
+
47
+ def format_message(self, op_name) -> str: # type: ignore[override]
48
+ """Returns the formatted default message of this Rule.
49
+
50
+ Message template: 'ONNX export failed on an operator with unrecognized namespace {op_name}. If you are trying to export a custom operator, make sure you registered it with the right domain and version.'
51
+ """
52
+ return self.message_default_template.format(op_name=op_name)
53
+
54
+ def format( # type: ignore[override]
55
+ self, level: infra.Level, op_name
56
+ ) -> Tuple[infra.Rule, infra.Level, str]:
57
+ """Returns a tuple of (Rule, Level, message) for this Rule.
58
+
59
+ Message template: 'ONNX export failed on an operator with unrecognized namespace {op_name}. If you are trying to export a custom operator, make sure you registered it with the right domain and version.'
60
+ """
61
+ return self, level, self.format_message(op_name=op_name)
62
+
63
+
64
+ class _MissingStandardSymbolicFunction(infra.Rule):
65
+ """Missing symbolic function for standard PyTorch operator, cannot translate node to ONNX."""
66
+
67
+ def format_message( # type: ignore[override]
68
+ self, op_name, opset_version, issue_url
69
+ ) -> str:
70
+ """Returns the formatted default message of this Rule.
71
+
72
+ Message template: "Exporting the operator '{op_name}' to ONNX opset version {opset_version} is not supported. Please feel free to request support or submit a pull request on PyTorch GitHub: {issue_url}."
73
+ """
74
+ return self.message_default_template.format(
75
+ op_name=op_name, opset_version=opset_version, issue_url=issue_url
76
+ )
77
+
78
+ def format( # type: ignore[override]
79
+ self, level: infra.Level, op_name, opset_version, issue_url
80
+ ) -> Tuple[infra.Rule, infra.Level, str]:
81
+ """Returns a tuple of (Rule, Level, message) for this Rule.
82
+
83
+ Message template: "Exporting the operator '{op_name}' to ONNX opset version {opset_version} is not supported. Please feel free to request support or submit a pull request on PyTorch GitHub: {issue_url}."
84
+ """
85
+ return (
86
+ self,
87
+ level,
88
+ self.format_message(
89
+ op_name=op_name, opset_version=opset_version, issue_url=issue_url
90
+ ),
91
+ )
92
+
93
+
94
+ class _OperatorSupportedInNewerOpsetVersion(infra.Rule):
95
+ """Operator is supported in newer opset version."""
96
+
97
+ def format_message( # type: ignore[override]
98
+ self, op_name, opset_version, supported_opset_version
99
+ ) -> str:
100
+ """Returns the formatted default message of this Rule.
101
+
102
+ Message template: "Exporting the operator '{op_name}' to ONNX opset version {opset_version} is not supported. Support for this operator was added in version {supported_opset_version}, try exporting with this version."
103
+ """
104
+ return self.message_default_template.format(
105
+ op_name=op_name,
106
+ opset_version=opset_version,
107
+ supported_opset_version=supported_opset_version,
108
+ )
109
+
110
+ def format( # type: ignore[override]
111
+ self, level: infra.Level, op_name, opset_version, supported_opset_version
112
+ ) -> Tuple[infra.Rule, infra.Level, str]:
113
+ """Returns a tuple of (Rule, Level, message) for this Rule.
114
+
115
+ Message template: "Exporting the operator '{op_name}' to ONNX opset version {opset_version} is not supported. Support for this operator was added in version {supported_opset_version}, try exporting with this version."
116
+ """
117
+ return (
118
+ self,
119
+ level,
120
+ self.format_message(
121
+ op_name=op_name,
122
+ opset_version=opset_version,
123
+ supported_opset_version=supported_opset_version,
124
+ ),
125
+ )
126
+
127
+
128
+ class _FxGraphToOnnx(infra.Rule):
129
+ """Transforms graph from FX IR to ONNX IR."""
130
+
131
+ def format_message(self, graph_name) -> str: # type: ignore[override]
132
+ """Returns the formatted default message of this Rule.
133
+
134
+ Message template: 'Transforming FX graph {graph_name} to ONNX graph.'
135
+ """
136
+ return self.message_default_template.format(graph_name=graph_name)
137
+
138
+ def format( # type: ignore[override]
139
+ self, level: infra.Level, graph_name
140
+ ) -> Tuple[infra.Rule, infra.Level, str]:
141
+ """Returns a tuple of (Rule, Level, message) for this Rule.
142
+
143
+ Message template: 'Transforming FX graph {graph_name} to ONNX graph.'
144
+ """
145
+ return self, level, self.format_message(graph_name=graph_name)
146
+
147
+
148
+ class _FxNodeToOnnx(infra.Rule):
149
+ """Transforms an FX node to an ONNX node."""
150
+
151
+ def format_message(self, node_repr) -> str: # type: ignore[override]
152
+ """Returns the formatted default message of this Rule.
153
+
154
+ Message template: 'Transforming FX node {node_repr} to ONNX node.'
155
+ """
156
+ return self.message_default_template.format(node_repr=node_repr)
157
+
158
+ def format( # type: ignore[override]
159
+ self, level: infra.Level, node_repr
160
+ ) -> Tuple[infra.Rule, infra.Level, str]:
161
+ """Returns a tuple of (Rule, Level, message) for this Rule.
162
+
163
+ Message template: 'Transforming FX node {node_repr} to ONNX node.'
164
+ """
165
+ return self, level, self.format_message(node_repr=node_repr)
166
+
167
+
168
+ class _FxPass(infra.Rule):
169
+ """FX graph transformation during ONNX export before converting from FX IR to ONNX IR."""
170
+
171
+ def format_message(self, pass_name) -> str: # type: ignore[override]
172
+ """Returns the formatted default message of this Rule.
173
+
174
+ Message template: 'Running {pass_name} pass.'
175
+ """
176
+ return self.message_default_template.format(pass_name=pass_name)
177
+
178
+ def format( # type: ignore[override]
179
+ self, level: infra.Level, pass_name
180
+ ) -> Tuple[infra.Rule, infra.Level, str]:
181
+ """Returns a tuple of (Rule, Level, message) for this Rule.
182
+
183
+ Message template: 'Running {pass_name} pass.'
184
+ """
185
+ return self, level, self.format_message(pass_name=pass_name)
186
+
187
+
188
+ class _NoSymbolicFunctionForCallFunction(infra.Rule):
189
+ """Cannot find symbolic function to convert the "call_function" FX node to ONNX."""
190
+
191
+ def format_message(self, target) -> str: # type: ignore[override]
192
+ """Returns the formatted default message of this Rule.
193
+
194
+ Message template: 'No symbolic function to convert the "call_function" node {target} to ONNX. '
195
+ """
196
+ return self.message_default_template.format(target=target)
197
+
198
+ def format( # type: ignore[override]
199
+ self, level: infra.Level, target
200
+ ) -> Tuple[infra.Rule, infra.Level, str]:
201
+ """Returns a tuple of (Rule, Level, message) for this Rule.
202
+
203
+ Message template: 'No symbolic function to convert the "call_function" node {target} to ONNX. '
204
+ """
205
+ return self, level, self.format_message(target=target)
206
+
207
+
208
+ class _UnsupportedFxNodeAnalysis(infra.Rule):
209
+ """Result from FX graph analysis to reveal unsupported FX nodes."""
210
+
211
+ def format_message( # type: ignore[override]
212
+ self, node_op_to_target_mapping
213
+ ) -> str:
214
+ """Returns the formatted default message of this Rule.
215
+
216
+ Message template: 'Unsupported FX nodes: {node_op_to_target_mapping}. '
217
+ """
218
+ return self.message_default_template.format(
219
+ node_op_to_target_mapping=node_op_to_target_mapping
220
+ )
221
+
222
+ def format( # type: ignore[override]
223
+ self, level: infra.Level, node_op_to_target_mapping
224
+ ) -> Tuple[infra.Rule, infra.Level, str]:
225
+ """Returns a tuple of (Rule, Level, message) for this Rule.
226
+
227
+ Message template: 'Unsupported FX nodes: {node_op_to_target_mapping}. '
228
+ """
229
+ return (
230
+ self,
231
+ level,
232
+ self.format_message(node_op_to_target_mapping=node_op_to_target_mapping),
233
+ )
234
+
235
+
236
+ class _OpLevelDebugging(infra.Rule):
237
+ """Report any op level validation failure in warnings."""
238
+
239
+ def format_message(self, node, symbolic_fn) -> str: # type: ignore[override]
240
+ """Returns the formatted default message of this Rule.
241
+
242
+ Message template: 'FX node: {node} and its onnx function: {symbolic_fn} fails on op level validation.'
243
+ """
244
+ return self.message_default_template.format(node=node, symbolic_fn=symbolic_fn)
245
+
246
+ def format( # type: ignore[override]
247
+ self, level: infra.Level, node, symbolic_fn
248
+ ) -> Tuple[infra.Rule, infra.Level, str]:
249
+ """Returns a tuple of (Rule, Level, message) for this Rule.
250
+
251
+ Message template: 'FX node: {node} and its onnx function: {symbolic_fn} fails on op level validation.'
252
+ """
253
+ return self, level, self.format_message(node=node, symbolic_fn=symbolic_fn)
254
+
255
+
256
+ class _FindOpschemaMatchedSymbolicFunction(infra.Rule):
257
+ """Find the OnnxFunction that matches the input/attribute dtypes by comparing them with their opschemas."""
258
+
259
+ def format_message(self, symbolic_fn, node) -> str: # type: ignore[override]
260
+ """Returns the formatted default message of this Rule.
261
+
262
+ Message template: 'The OnnxFunction: {symbolic_fn} is the nearest match of the node {node}.'
263
+ """
264
+ return self.message_default_template.format(symbolic_fn=symbolic_fn, node=node)
265
+
266
+ def format( # type: ignore[override]
267
+ self, level: infra.Level, symbolic_fn, node
268
+ ) -> Tuple[infra.Rule, infra.Level, str]:
269
+ """Returns a tuple of (Rule, Level, message) for this Rule.
270
+
271
+ Message template: 'The OnnxFunction: {symbolic_fn} is the nearest match of the node {node}.'
272
+ """
273
+ return self, level, self.format_message(symbolic_fn=symbolic_fn, node=node)
274
+
275
+
276
+ class _FxNodeInsertTypePromotion(infra.Rule):
277
+ """Determine if type promotion is required for the FX node. Insert cast nodes if needed."""
278
+
279
+ def format_message(self, target) -> str: # type: ignore[override]
280
+ """Returns the formatted default message of this Rule.
281
+
282
+ Message template: 'Performing explicit type promotion for node {target}. '
283
+ """
284
+ return self.message_default_template.format(target=target)
285
+
286
+ def format( # type: ignore[override]
287
+ self, level: infra.Level, target
288
+ ) -> Tuple[infra.Rule, infra.Level, str]:
289
+ """Returns a tuple of (Rule, Level, message) for this Rule.
290
+
291
+ Message template: 'Performing explicit type promotion for node {target}. '
292
+ """
293
+ return self, level, self.format_message(target=target)
294
+
295
+
296
+ class _FindOperatorOverloadsInOnnxRegistry(infra.Rule):
297
+ """Find the list of OnnxFunction of the PyTorch operator in onnx registry."""
298
+
299
+ def format_message(self, node) -> str: # type: ignore[override]
300
+ """Returns the formatted default message of this Rule.
301
+
302
+ Message template: 'Checking if the FX node: {node} is supported in onnx registry.'
303
+ """
304
+ return self.message_default_template.format(node=node)
305
+
306
+ def format( # type: ignore[override]
307
+ self, level: infra.Level, node
308
+ ) -> Tuple[infra.Rule, infra.Level, str]:
309
+ """Returns a tuple of (Rule, Level, message) for this Rule.
310
+
311
+ Message template: 'Checking if the FX node: {node} is supported in onnx registry.'
312
+ """
313
+ return self, level, self.format_message(node=node)
314
+
315
+
316
+ @dataclasses.dataclass
317
+ class _POERules(infra.RuleCollection):
318
+ node_missing_onnx_shape_inference: _NodeMissingOnnxShapeInference = dataclasses.field(
319
+ default=_NodeMissingOnnxShapeInference.from_sarif(
320
+ **{
321
+ "id": "POE0001",
322
+ "name": "node-missing-onnx-shape-inference",
323
+ "short_description": {"text": "Node is missing ONNX shape inference."},
324
+ "full_description": {
325
+ "text": "Node is missing ONNX shape inference. This usually happens when the node is not valid under standard ONNX operator spec.",
326
+ "markdown": "Node is missing ONNX shape inference.\nThis usually happens when the node is not valid under standard ONNX operator spec.\n",
327
+ },
328
+ "message_strings": {
329
+ "default": {
330
+ "text": "The shape inference of {op_name} type is missing, so it may result in wrong shape inference for the exported graph. Please consider adding it in symbolic function."
331
+ }
332
+ },
333
+ "help_uri": None,
334
+ "properties": {"deprecated": False, "tags": []},
335
+ }
336
+ ),
337
+ init=False,
338
+ )
339
+ """Node is missing ONNX shape inference."""
340
+
341
+ missing_custom_symbolic_function: _MissingCustomSymbolicFunction = dataclasses.field(
342
+ default=_MissingCustomSymbolicFunction.from_sarif(
343
+ **{
344
+ "id": "POE0002",
345
+ "name": "missing-custom-symbolic-function",
346
+ "short_description": {
347
+ "text": "Missing symbolic function for custom PyTorch operator, cannot translate node to ONNX."
348
+ },
349
+ "full_description": {
350
+ "text": "Missing symbolic function for custom PyTorch operator, cannot translate node to ONNX.",
351
+ "markdown": "Missing symbolic function for custom PyTorch operator, cannot translate node to ONNX.\n",
352
+ },
353
+ "message_strings": {
354
+ "default": {
355
+ "text": "ONNX export failed on an operator with unrecognized namespace {op_name}. If you are trying to export a custom operator, make sure you registered it with the right domain and version."
356
+ }
357
+ },
358
+ "help_uri": None,
359
+ "properties": {"deprecated": False, "tags": []},
360
+ }
361
+ ),
362
+ init=False,
363
+ )
364
+ """Missing symbolic function for custom PyTorch operator, cannot translate node to ONNX."""
365
+
366
+ missing_standard_symbolic_function: _MissingStandardSymbolicFunction = dataclasses.field(
367
+ default=_MissingStandardSymbolicFunction.from_sarif(
368
+ **{
369
+ "id": "POE0003",
370
+ "name": "missing-standard-symbolic-function",
371
+ "short_description": {
372
+ "text": "Missing symbolic function for standard PyTorch operator, cannot translate node to ONNX."
373
+ },
374
+ "full_description": {
375
+ "text": "Missing symbolic function for standard PyTorch operator, cannot translate node to ONNX.",
376
+ "markdown": "Missing symbolic function for standard PyTorch operator, cannot translate node to ONNX.\n",
377
+ },
378
+ "message_strings": {
379
+ "default": {
380
+ "text": "Exporting the operator '{op_name}' to ONNX opset version {opset_version} is not supported. Please feel free to request support or submit a pull request on PyTorch GitHub: {issue_url}."
381
+ }
382
+ },
383
+ "help_uri": None,
384
+ "properties": {"deprecated": False, "tags": []},
385
+ }
386
+ ),
387
+ init=False,
388
+ )
389
+ """Missing symbolic function for standard PyTorch operator, cannot translate node to ONNX."""
390
+
391
+ operator_supported_in_newer_opset_version: _OperatorSupportedInNewerOpsetVersion = dataclasses.field(
392
+ default=_OperatorSupportedInNewerOpsetVersion.from_sarif(
393
+ **{
394
+ "id": "POE0004",
395
+ "name": "operator-supported-in-newer-opset-version",
396
+ "short_description": {
397
+ "text": "Operator is supported in newer opset version."
398
+ },
399
+ "full_description": {
400
+ "text": "Operator is supported in newer opset version.",
401
+ "markdown": "Operator is supported in newer opset version.\n\nExample:\n```python\ntorch.onnx.export(model, args, ..., opset_version=9)\n```\n",
402
+ },
403
+ "message_strings": {
404
+ "default": {
405
+ "text": "Exporting the operator '{op_name}' to ONNX opset version {opset_version} is not supported. Support for this operator was added in version {supported_opset_version}, try exporting with this version."
406
+ }
407
+ },
408
+ "help_uri": None,
409
+ "properties": {"deprecated": False, "tags": []},
410
+ }
411
+ ),
412
+ init=False,
413
+ )
414
+ """Operator is supported in newer opset version."""
415
+
416
+ fx_graph_to_onnx: _FxGraphToOnnx = dataclasses.field(
417
+ default=_FxGraphToOnnx.from_sarif(
418
+ **{
419
+ "id": "FXE0007",
420
+ "name": "fx-graph-to-onnx",
421
+ "short_description": {
422
+ "text": "Transforms graph from FX IR to ONNX IR."
423
+ },
424
+ "full_description": {
425
+ "text": "Transforms graph from FX IR to ONNX IR.",
426
+ "markdown": "This diagnostic tracks the transformation process from an FX Graph (in FX IR) to an ONNX Graph (in ONNX IR).\n\n## Key Representations:\n\n- **FX Graph**: The graph in FX IR produced by dynamo or symbolic tracing.\n- **ONNX Graph**: The graph in ONNX IR and [operators](https://onnx.ai/onnx/operators/).\n\n## Additional Notes:\n\n- Prior to this transformation step, the FX graph undergoes preprocessing through multiple FX passes.\n To gain insight into these transformations, refer to diagnostic `FXE0010`.\n- To enable a detailed view of the graph transformation in progress within this diagnostic, switch to the DEBUG mode.\n\n - Set DiagnosticOptions.verbosity_level to logging.DEBUG.\n - Activate the environment variable TORCH_LOGS='onnx_diagnostics'.\n\n- For specific information related to node-level FX to ONNX transformations, explore the diagnostic `FXE0008`.\n",
427
+ },
428
+ "message_strings": {
429
+ "default": {
430
+ "text": "Transforming FX graph {graph_name} to ONNX graph."
431
+ }
432
+ },
433
+ "help_uri": None,
434
+ "properties": {"deprecated": False, "tags": []},
435
+ }
436
+ ),
437
+ init=False,
438
+ )
439
+ """Transforms graph from FX IR to ONNX IR."""
440
+
441
+ fx_node_to_onnx: _FxNodeToOnnx = dataclasses.field(
442
+ default=_FxNodeToOnnx.from_sarif(
443
+ **{
444
+ "id": "FXE0008",
445
+ "name": "fx-node-to-onnx",
446
+ "short_description": {"text": "Transforms an FX node to an ONNX node."},
447
+ "full_description": {
448
+ "text": "Transforms an FX node to an ONNX node.",
449
+ "markdown": "This diagnostic tracks the transformation process from an FX Node to ONNX [Operators](https://onnx.ai/onnx/operators/).\n\nThe process of converting FX Node to ONNX Node involves dealing with six distinct node types:\n 1. `placeholder`: Represents a module input, maps to an ONNX graph input.\n 2. `call_module`: Symbolizes a call to a submodule, maps to an ONNX\n 3. `call_method`: Symbolizes a method call. Not yet implemented.\n 4. `call_function`: Symbolizes a function call. [Core ATen](https://pytorch.org/docs/stable/ir.html#core-aten-ir) is expected\n as the function call target. The mapping from ATen to ONNX is implemented by [ONNXScript torchlib](https://github.com/microsoft/onnxscript/tree/main/onnxscript/function_libs/torch_lib/ops).\n This [guide](https://pytorch.org/docs/stable/onnx.html#onnx-script-functions) shows how to write and register a custom symbolic function for call_function FX node.\n 5. `get_attr`: Indicates an attribute access within the current module. Maps to an ONNX graph initializer.\n 6. `output`: Represents the module's output. Maps to an ONNX graph output.\n\nFor a granular understanding of how each node type is transformed, refer to the implementation details in `FxOnnxInterpreter`.\n",
450
+ },
451
+ "message_strings": {
452
+ "default": {
453
+ "text": "Transforming FX node {node_repr} to ONNX node."
454
+ }
455
+ },
456
+ "help_uri": None,
457
+ "properties": {"deprecated": False, "tags": []},
458
+ }
459
+ ),
460
+ init=False,
461
+ )
462
+ """Transforms an FX node to an ONNX node."""
463
+
464
+ fx_pass: _FxPass = dataclasses.field(
465
+ default=_FxPass.from_sarif(
466
+ **{
467
+ "id": "FXE0010",
468
+ "name": "fx-pass",
469
+ "short_description": {
470
+ "text": "FX graph transformation during ONNX export before converting from FX IR to ONNX IR."
471
+ },
472
+ "full_description": {
473
+ "text": "FX graph transformation during ONNX export before converting from FX IR to ONNX IR.",
474
+ "markdown": "This diagnostic tracks the FX passes executed during the ONNX export process prior\nto converting from FX IR (Intermediate Representation) to ONNX IR.\n\nUnder the scope of ONNX export, an FX pass refers to a specific transformation applied to the FX GraphModule.\nThe primary aim of these passes is to streamline the graph into a format that aligns more with the ONNX IR.\nMoreover, these passes work to substitute unsupported FX IR features with those recognized and endorsed by\nONNX IR. Common transformations include, but aren't limited to, decomposition, functionalization and\ntype promotion.\n\nFor those who are interested in a comprehensive log detailing the modifications made during these passes,\nthere are a couple of options:\n\n- Set DiagnosticOptions.verbosity_level to logging.DEBUG.\n- Activate the environment variable TORCH_LOGS='onnx_diagnostics'.\n\nHowever, it's noteworthy that by default, such detailed logging is turned off. The primary reason being\nits considerable impact on performance.\n\nFor an in-depth understanding of each specific pass, please refer to the directory: torch/onnx/_internal/fx/passes.\n",
475
+ },
476
+ "message_strings": {"default": {"text": "Running {pass_name} pass."}},
477
+ "help_uri": None,
478
+ "properties": {"deprecated": False, "tags": []},
479
+ }
480
+ ),
481
+ init=False,
482
+ )
483
+ """FX graph transformation during ONNX export before converting from FX IR to ONNX IR."""
484
+
485
+ no_symbolic_function_for_call_function: _NoSymbolicFunctionForCallFunction = dataclasses.field(
486
+ default=_NoSymbolicFunctionForCallFunction.from_sarif(
487
+ **{
488
+ "id": "FXE0011",
489
+ "name": "no-symbolic-function-for-call-function",
490
+ "short_description": {
491
+ "text": 'Cannot find symbolic function to convert the "call_function" FX node to ONNX.'
492
+ },
493
+ "full_description": {
494
+ "text": 'Cannot find symbolic function to convert the "call_function" FX node to ONNX. ',
495
+ "markdown": 'This error occurs when the ONNX converter is unable to find a corresponding symbolic function\nto convert a "call_function" node in the input graph to its equivalence in ONNX. The "call_function"\nnode represents a normalized function call in PyTorch, such as "torch.aten.ops.add".\n\nTo resolve this error, you can try one of the following:\n\n- If exists, apply the auto-fix suggested by the diagnostic. TODO: this part is not available yet.\n- Rewrite the model using only supported PyTorch operators or functions.\n- Follow this [guide](https://pytorch.org/tutorials/beginner/onnx/onnx_registry_tutorial.html#overview) to write and\n register a custom symbolic function for the unsupported call_function FX node.\n',
496
+ },
497
+ "message_strings": {
498
+ "default": {
499
+ "text": 'No symbolic function to convert the "call_function" node {target} to ONNX. '
500
+ }
501
+ },
502
+ "help_uri": None,
503
+ "properties": {"deprecated": False, "tags": []},
504
+ }
505
+ ),
506
+ init=False,
507
+ )
508
+ """Cannot find symbolic function to convert the "call_function" FX node to ONNX."""
509
+
510
+ unsupported_fx_node_analysis: _UnsupportedFxNodeAnalysis = dataclasses.field(
511
+ default=_UnsupportedFxNodeAnalysis.from_sarif(
512
+ **{
513
+ "id": "FXE0012",
514
+ "name": "unsupported-fx-node-analysis",
515
+ "short_description": {
516
+ "text": "Result from FX graph analysis to reveal unsupported FX nodes."
517
+ },
518
+ "full_description": {
519
+ "text": "Result from FX graph analysis to reveal unsupported FX nodes.",
520
+ "markdown": "This error indicates that an FX graph contains one or more unsupported nodes. The error message\nis typically accompanied by a list of the unsupported nodes found during analysis.\n\nTo resolve this error, you can try resolving each individual unsupported node error by following\nthe suggestions by its diagnostic. Typically, options include:\n\n- If exists, apply the auto-fix suggested by the diagnostic. TODO: this part is not available yet.\n- Rewrite the model using only supported PyTorch operators or functions.\n- Follow this [guide](https://pytorch.org/docs/stable/onnx.html#onnx-script-functions) to write and\n register a custom symbolic function for the unsupported call_function FX node.\n",
521
+ },
522
+ "message_strings": {
523
+ "default": {
524
+ "text": "Unsupported FX nodes: {node_op_to_target_mapping}. "
525
+ }
526
+ },
527
+ "help_uri": None,
528
+ "properties": {"deprecated": False, "tags": []},
529
+ }
530
+ ),
531
+ init=False,
532
+ )
533
+ """Result from FX graph analysis to reveal unsupported FX nodes."""
534
+
535
+ op_level_debugging: _OpLevelDebugging = dataclasses.field(
536
+ default=_OpLevelDebugging.from_sarif(
537
+ **{
538
+ "id": "FXE0013",
539
+ "name": "op-level-debugging",
540
+ "short_description": {
541
+ "text": "Report any op level validation failure in warnings."
542
+ },
543
+ "full_description": {
544
+ "text": "Report any op level validation failure in warnings.",
545
+ "markdown": "This warning message indicates that during op level debugging, certain symbolic functions\nhave failed to match the results of torch ops when using real tensors generated from fake\ntensors. It is important to note that the symbolic functions may not necessarily be\nincorrect, as the validation process is non-deterministic and should only be used as a\nreference.\n\nThere are two categories of warnings that can be triggered:\n\n1. Non-validated operators:\n If the warnings are caused by the following errors, they can be disregarded by users,\n as these errors occur due to the non-deterministic nature of the validation. However,\n it is important to be aware that the operators have not been validated.\n\n - IndexError: Unsupported input arguments of randomized dimensions/indices(INT64).\n - RuntimeError: Unsupported input arguments for torch ops are generated.\n - ValueError: Arguments/keyword arguments do not match the signature of the symbolic function.\n\n2. Potentially wrong torchlib operators:\n If the warnings are triggered by the following error, users should be aware that the symbolic functions\n may be incorrect in dispatching or implementation. In such cases, it is recommended to report\n the issue to the PyTorch-ONNX team, or create/register a custom symbolic function to replace the default one.\n\n - AssertionError: The symbolic function is potentially wrong as the results do not match the results of torch ops.\n - TypeError: The symbolic function is potentially wrong as the opschema doesn't match inputs.\n",
546
+ },
547
+ "message_strings": {
548
+ "default": {
549
+ "text": "FX node: {node} and its onnx function: {symbolic_fn} fails on op level validation."
550
+ }
551
+ },
552
+ "help_uri": None,
553
+ "properties": {"deprecated": False, "tags": []},
554
+ }
555
+ ),
556
+ init=False,
557
+ )
558
+ """Report any op level validation failure in warnings."""
559
+
560
+ find_opschema_matched_symbolic_function: _FindOpschemaMatchedSymbolicFunction = dataclasses.field(
561
+ default=_FindOpschemaMatchedSymbolicFunction.from_sarif(
562
+ **{
563
+ "id": "FXE0014",
564
+ "name": "find-opschema-matched-symbolic-function",
565
+ "short_description": {
566
+ "text": "Find the OnnxFunction that matches the input/attribute dtypes by comparing them with their opschemas."
567
+ },
568
+ "full_description": {
569
+ "text": "Find the OnnxFunction that matches the input dtypes by comparing them with their opschemas. A warning will be issued if the matched OnnxFunction is not an exact match.",
570
+ "markdown": "When an ATen/Custom operator is registered and needs to be dispatched to an OnnxFunction, the input/attribute\ndtypes of the ATen/Custom operator are compared with the input/attribute dtypes of the OnnxFunction opschemas\nto find a match. However, if a perfect/exact match is not found, the dispatcher will attempt to find\nthe nearest match with the highest number of input/attribute dtypes matching the OnnxFunction opschemas, while\nissuing a warning.\n\nThere are two types of level that can be triggered in this rule:\n\n1. NOTE: A perfect match is found, and no warning is issued.\n2. WARNING: The matched OnnxFunction is not a perfect/exact match.\n\nHere are some suggestions based on the WARNING situation:\n\n1. If there are NO errors or mismatches in the results, it is safe to disregard this warning,\n as the definition of OnnxFunction schema is usually more stringent.\n2. If there are errors or mismatches in the results, it is recommended to:\n (a) Enable op_level_debugging to determine if the OnnxFunction might be incorrect.\n (b) Report the issue to the PyTorch-ONNX team.\n (c) Create/register a custom symbolic function to replace the default one.\n",
571
+ },
572
+ "message_strings": {
573
+ "default": {
574
+ "text": "The OnnxFunction: {symbolic_fn} is the nearest match of the node {node}."
575
+ }
576
+ },
577
+ "help_uri": None,
578
+ "properties": {"deprecated": False, "tags": []},
579
+ }
580
+ ),
581
+ init=False,
582
+ )
583
+ """Find the OnnxFunction that matches the input/attribute dtypes by comparing them with their opschemas."""
584
+
585
+ fx_node_insert_type_promotion: _FxNodeInsertTypePromotion = dataclasses.field(
586
+ default=_FxNodeInsertTypePromotion.from_sarif(
587
+ **{
588
+ "id": "FXE0015",
589
+ "name": "fx-node-insert-type-promotion",
590
+ "short_description": {
591
+ "text": "Determine if type promotion is required for the FX node. Insert cast nodes if needed."
592
+ },
593
+ "full_description": {
594
+ "text": "Determine if type promotion is required for the FX node. Insert cast nodes if needed.",
595
+ "markdown": "This diagnostic monitors the node-level type promotion insertion process. In PyTorch, there is an automatic process called implicit type promotion,\nwhere the input types of an operator are promoted to a common type. The determination of the common type is based on the type promotion rule specific to each operator.\nTo learn more about PyTorch's type promotion rules, refer to the [elementwise_dtypes doc](https://github.com/pytorch/pytorch/blob/f044613f78df713fb57f70c608483c9f10ad332e/torch/_prims_common/__init__.py#L1252-L1335)\nand [torch._refs ops](https://github.com/pytorch/pytorch/blob/a475ea4542dfe961c9d097e33ab5041f61c8c17f/torch/_refs/__init__.py#L484).\n\nHowever, implicit type promotion is not supported in ONNX. Therefore, to replicate the PyTorch behavior, we need to explicitly insert cast nodes.\nThis diagnostic tracks the process of node-level type promotion insertion.\n\nThe type promotion rules used by this process can be found in `torch/onnx/_internal/fx/passes/type_promotion.py.`\nTo update or add new type promotion rules, please refer to the [Note: Update type promotion rule] section.\n",
596
+ },
597
+ "message_strings": {
598
+ "default": {
599
+ "text": "Performing explicit type promotion for node {target}. "
600
+ }
601
+ },
602
+ "help_uri": None,
603
+ "properties": {"deprecated": False, "tags": []},
604
+ }
605
+ ),
606
+ init=False,
607
+ )
608
+ """Determine if type promotion is required for the FX node. Insert cast nodes if needed."""
609
+
610
+ find_operator_overloads_in_onnx_registry: _FindOperatorOverloadsInOnnxRegistry = dataclasses.field(
611
+ default=_FindOperatorOverloadsInOnnxRegistry.from_sarif(
612
+ **{
613
+ "id": "FXE0016",
614
+ "name": "find-operator-overloads-in-onnx-registry",
615
+ "short_description": {
616
+ "text": "Find the list of OnnxFunction of the PyTorch operator in onnx registry."
617
+ },
618
+ "full_description": {
619
+ "text": "This rule involves finding the list of OnnxFunction for the PyTorch operator overload in the ONNX registry. If the operator overload is not supported but its default overload is, a warning will be issued. If both the operator overload and its default overload are not supported, an error will be issued.",
620
+ "markdown": "The operator overload name serves the purpose of verifying whether a PyTorch operator is registered in the ONNX registry.\nIf it's not found, the dispatcher takes a fallback approach and tries to locate the default overload of the PyTorch\noperator in the registry. If even the default overload is absent, it signifies that the operator is officially unsupported.\n\nThere are three types of level that can be triggered in this rule:\n\n1. NOTE: The op overload is supported.\n2. WARNING: The op overload is not supported, but it's default overload is supported.\n3. ERROR: The op overload is not supported, and it's default overload is also not supported.\n\nHere are some suggestions based on the WARNING situation:\n\n1. If there are NO errors or mismatches in the results, it is safe to disregard this warning.\n2. If there are errors or mismatches in the results, it is recommended to:\n (a) Enable op_level_debugging to determine if the OnnxFunction might be incorrect.\n (b) Report the unsupported overload to the PyTorch-ONNX team.\n (c) Create/register a custom symbolic function to replace the default one.\n\nHere are some suggestions based on the ERROR situation:\n\n1. Report the unsupported operator to the PyTorch-ONNX team.\n2. Create/register a custom symbolic function to replace the default one.\n",
621
+ },
622
+ "message_strings": {
623
+ "default": {
624
+ "text": "Checking if the FX node: {node} is supported in onnx registry."
625
+ }
626
+ },
627
+ "help_uri": None,
628
+ "properties": {"deprecated": False, "tags": []},
629
+ }
630
+ ),
631
+ init=False,
632
+ )
633
+ """Find the list of OnnxFunction of the PyTorch operator in onnx registry."""
634
+
635
+
636
+ rules = _POERules()
infer_4_47_1/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/__init__.py ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from ._infra import (
2
+ DiagnosticOptions,
3
+ Graph,
4
+ Invocation,
5
+ Level,
6
+ levels,
7
+ Location,
8
+ Rule,
9
+ RuleCollection,
10
+ Stack,
11
+ StackFrame,
12
+ Tag,
13
+ ThreadFlowLocation,
14
+ )
15
+ from .context import Diagnostic, DiagnosticContext, RuntimeErrorWithDiagnostic
16
+
17
+
18
+ __all__ = [
19
+ "Diagnostic",
20
+ "DiagnosticContext",
21
+ "DiagnosticOptions",
22
+ "Graph",
23
+ "Invocation",
24
+ "Level",
25
+ "levels",
26
+ "Location",
27
+ "Rule",
28
+ "RuleCollection",
29
+ "RuntimeErrorWithDiagnostic",
30
+ "Stack",
31
+ "StackFrame",
32
+ "Tag",
33
+ "ThreadFlowLocation",
34
+ ]
infer_4_47_1/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/_infra.py ADDED
@@ -0,0 +1,285 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ """This file defines an additional layer of abstraction on top of the SARIF OM."""
3
+
4
+ from __future__ import annotations
5
+
6
+ import dataclasses
7
+ import enum
8
+ import logging
9
+ from typing import Mapping, Sequence
10
+
11
+ from torch.onnx._internal.diagnostics.infra import formatter, sarif
12
+
13
+
14
+ class Level(enum.IntEnum):
15
+ """The level of a diagnostic.
16
+
17
+ This class is used to represent the level of a diagnostic. The levels are defined
18
+ by the SARIF specification, and are not modifiable. For alternative categories,
19
+ please use infra.Tag instead. When selecting a level, please consider the following
20
+ guidelines:
21
+
22
+ - NONE: Informational result that does not indicate the presence of a problem.
23
+ - NOTE: An opportunity for improvement was found.
24
+ - WARNING: A potential problem was found.
25
+ - ERROR: A serious problem was found.
26
+
27
+ This level is a subclass of enum.IntEnum, and can be used as an integer. Its integer
28
+ value maps to the logging levels in Python's logging module. The mapping is as
29
+ follows:
30
+
31
+ Level.NONE = logging.DEBUG = 10
32
+ Level.NOTE = logging.INFO = 20
33
+ Level.WARNING = logging.WARNING = 30
34
+ Level.ERROR = logging.ERROR = 40
35
+ """
36
+
37
+ NONE = 10
38
+ NOTE = 20
39
+ WARNING = 30
40
+ ERROR = 40
41
+
42
+
43
+ levels = Level
44
+
45
+
46
+ class Tag(enum.Enum):
47
+ """The tag of a diagnostic. This class can be inherited to define custom tags."""
48
+
49
+
50
+ class PatchedPropertyBag(sarif.PropertyBag):
51
+ """Key/value pairs that provide additional information about the object.
52
+
53
+ The definition of PropertyBag via SARIF spec is "A property bag is an object (section 3.6)
54
+ containing an unordered set of properties with arbitrary names." However it is not
55
+ reflected in the json file, and therefore not captured by the python representation.
56
+ This patch adds additional **kwargs to the `__init__` method to allow recording
57
+ arbitrary key/value pairs.
58
+ """
59
+
60
+ def __init__(self, tags: list[str] | None = None, **kwargs):
61
+ super().__init__(tags=tags)
62
+ self.__dict__.update(kwargs)
63
+
64
+
65
+ @dataclasses.dataclass(frozen=True)
66
+ class Rule:
67
+ id: str
68
+ name: str
69
+ message_default_template: str
70
+ short_description: str | None = None
71
+ full_description: str | None = None
72
+ full_description_markdown: str | None = None
73
+ help_uri: str | None = None
74
+
75
+ @classmethod
76
+ def from_sarif(cls, **kwargs):
77
+ """Returns a rule from the SARIF reporting descriptor."""
78
+ short_description = kwargs.get("short_description", {}).get("text")
79
+ full_description = kwargs.get("full_description", {}).get("text")
80
+ full_description_markdown = kwargs.get("full_description", {}).get("markdown")
81
+ help_uri = kwargs.get("help_uri")
82
+
83
+ rule = cls(
84
+ id=kwargs["id"],
85
+ name=kwargs["name"],
86
+ message_default_template=kwargs["message_strings"]["default"]["text"],
87
+ short_description=short_description,
88
+ full_description=full_description,
89
+ full_description_markdown=full_description_markdown,
90
+ help_uri=help_uri,
91
+ )
92
+ return rule
93
+
94
+ def sarif(self) -> sarif.ReportingDescriptor:
95
+ """Returns a SARIF reporting descriptor of this Rule."""
96
+ short_description = (
97
+ sarif.MultiformatMessageString(text=self.short_description)
98
+ if self.short_description is not None
99
+ else None
100
+ )
101
+ full_description = (
102
+ sarif.MultiformatMessageString(
103
+ text=self.full_description, markdown=self.full_description_markdown
104
+ )
105
+ if self.full_description is not None
106
+ else None
107
+ )
108
+ return sarif.ReportingDescriptor(
109
+ id=self.id,
110
+ name=self.name,
111
+ short_description=short_description,
112
+ full_description=full_description,
113
+ help_uri=self.help_uri,
114
+ )
115
+
116
+ def format(self, level: Level, *args, **kwargs) -> tuple[Rule, Level, str]:
117
+ """Returns a tuple of (rule, level, message) for a diagnostic.
118
+
119
+ This method is used to format the message of a diagnostic. The message is
120
+ formatted using the default template of this rule, and the arguments passed in
121
+ as `*args` and `**kwargs`. The level is used to override the default level of
122
+ this rule.
123
+ """
124
+ return (self, level, self.format_message(*args, **kwargs))
125
+
126
+ def format_message(self, *args, **kwargs) -> str:
127
+ """Returns the formatted default message of this Rule.
128
+
129
+ This method should be overridden (with code generation) by subclasses to reflect
130
+ the exact arguments needed by the message template. This is a helper method to
131
+ create the default message for a diagnostic.
132
+ """
133
+ return self.message_default_template.format(*args, **kwargs)
134
+
135
+
136
+ @dataclasses.dataclass
137
+ class Location:
138
+ uri: str | None = None
139
+ line: int | None = None
140
+ message: str | None = None
141
+ start_column: int | None = None
142
+ end_column: int | None = None
143
+ snippet: str | None = None
144
+ function: str | None = None
145
+
146
+ def sarif(self) -> sarif.Location:
147
+ """Returns the SARIF representation of this location."""
148
+ return sarif.Location(
149
+ physical_location=sarif.PhysicalLocation(
150
+ artifact_location=sarif.ArtifactLocation(uri=self.uri),
151
+ region=sarif.Region(
152
+ start_line=self.line,
153
+ start_column=self.start_column,
154
+ end_column=self.end_column,
155
+ snippet=sarif.ArtifactContent(text=self.snippet),
156
+ ),
157
+ ),
158
+ message=sarif.Message(text=self.message)
159
+ if self.message is not None
160
+ else None,
161
+ )
162
+
163
+
164
+ @dataclasses.dataclass
165
+ class StackFrame:
166
+ location: Location
167
+
168
+ def sarif(self) -> sarif.StackFrame:
169
+ """Returns the SARIF representation of this stack frame."""
170
+ return sarif.StackFrame(location=self.location.sarif())
171
+
172
+
173
+ @dataclasses.dataclass
174
+ class Stack:
175
+ """Records a stack trace. The frames are in order from newest to oldest stack frame."""
176
+
177
+ frames: list[StackFrame] = dataclasses.field(default_factory=list)
178
+ message: str | None = None
179
+
180
+ def sarif(self) -> sarif.Stack:
181
+ """Returns the SARIF representation of this stack."""
182
+ return sarif.Stack(
183
+ frames=[frame.sarif() for frame in self.frames],
184
+ message=sarif.Message(text=self.message)
185
+ if self.message is not None
186
+ else None,
187
+ )
188
+
189
+
190
+ @dataclasses.dataclass
191
+ class ThreadFlowLocation:
192
+ """Records code location and the initial state."""
193
+
194
+ location: Location
195
+ state: Mapping[str, str]
196
+ index: int
197
+ stack: Stack | None = None
198
+
199
+ def sarif(self) -> sarif.ThreadFlowLocation:
200
+ """Returns the SARIF representation of this thread flow location."""
201
+ return sarif.ThreadFlowLocation(
202
+ location=self.location.sarif(),
203
+ state=self.state,
204
+ stack=self.stack.sarif() if self.stack is not None else None,
205
+ )
206
+
207
+
208
+ @dataclasses.dataclass
209
+ class Graph:
210
+ """A graph of diagnostics.
211
+
212
+ This class stores the string representation of a model graph.
213
+ The `nodes` and `edges` fields are unused in the current implementation.
214
+ """
215
+
216
+ graph: str
217
+ name: str
218
+ description: str | None = None
219
+
220
+ def sarif(self) -> sarif.Graph:
221
+ """Returns the SARIF representation of this graph."""
222
+ return sarif.Graph(
223
+ description=sarif.Message(text=self.graph),
224
+ properties=PatchedPropertyBag(name=self.name, description=self.description),
225
+ )
226
+
227
+
228
+ @dataclasses.dataclass
229
+ class RuleCollection:
230
+ _rule_id_name_set: frozenset[tuple[str, str]] = dataclasses.field(init=False)
231
+
232
+ def __post_init__(self) -> None:
233
+ self._rule_id_name_set = frozenset(
234
+ {
235
+ (field.default.id, field.default.name)
236
+ for field in dataclasses.fields(self)
237
+ if isinstance(field.default, Rule)
238
+ }
239
+ )
240
+
241
+ def __contains__(self, rule: Rule) -> bool:
242
+ """Checks if the rule is in the collection."""
243
+ return (rule.id, rule.name) in self._rule_id_name_set
244
+
245
+ @classmethod
246
+ def custom_collection_from_list(
247
+ cls, new_collection_class_name: str, rules: Sequence[Rule]
248
+ ) -> RuleCollection:
249
+ """Creates a custom class inherited from RuleCollection with the list of rules."""
250
+ return dataclasses.make_dataclass(
251
+ new_collection_class_name,
252
+ [
253
+ (
254
+ formatter.kebab_case_to_snake_case(rule.name),
255
+ type(rule),
256
+ dataclasses.field(default=rule),
257
+ )
258
+ for rule in rules
259
+ ],
260
+ bases=(cls,),
261
+ )()
262
+
263
+
264
+ class Invocation:
265
+ # TODO: Implement this.
266
+ # Tracks top level call arguments and diagnostic options.
267
+ def __init__(self) -> None:
268
+ raise NotImplementedError
269
+
270
+
271
+ @dataclasses.dataclass
272
+ class DiagnosticOptions:
273
+ """Options for diagnostic context.
274
+
275
+ Attributes:
276
+ verbosity_level: Set the amount of information logged for each diagnostics,
277
+ equivalent to the 'level' in Python logging module.
278
+ warnings_as_errors: When True, warning diagnostics are treated as error diagnostics.
279
+ """
280
+
281
+ verbosity_level: int = dataclasses.field(default=logging.INFO)
282
+ """Set the amount of information logged for each diagnostics, equivalent to the 'level' in Python logging module."""
283
+
284
+ warnings_as_errors: bool = dataclasses.field(default=False)
285
+ """If True, warning diagnostics are treated as error diagnostics."""
infer_4_47_1/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/context.py ADDED
@@ -0,0 +1,404 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ """A diagnostic context based on SARIF."""
3
+
4
+ from __future__ import annotations
5
+
6
+ import contextlib
7
+ import dataclasses
8
+ import gzip
9
+ import logging
10
+ from typing import Callable, Generator, Generic, Literal, Mapping, TypeVar
11
+ from typing_extensions import Self
12
+
13
+ from torch.onnx._internal.diagnostics import infra
14
+ from torch.onnx._internal.diagnostics.infra import formatter, sarif, utils
15
+ from torch.onnx._internal.diagnostics.infra.sarif import version as sarif_version
16
+
17
+
18
+ # This is a workaround for mypy not supporting Self from typing_extensions.
19
+ _Diagnostic = TypeVar("_Diagnostic", bound="Diagnostic")
20
+ diagnostic_logger: logging.Logger = logging.getLogger(__name__)
21
+
22
+
23
+ @dataclasses.dataclass
24
+ class Diagnostic:
25
+ rule: infra.Rule
26
+ level: infra.Level
27
+ message: str | None = None
28
+ locations: list[infra.Location] = dataclasses.field(default_factory=list)
29
+ stacks: list[infra.Stack] = dataclasses.field(default_factory=list)
30
+ graphs: list[infra.Graph] = dataclasses.field(default_factory=list)
31
+ thread_flow_locations: list[infra.ThreadFlowLocation] = dataclasses.field(
32
+ default_factory=list
33
+ )
34
+ additional_messages: list[str] = dataclasses.field(default_factory=list)
35
+ tags: list[infra.Tag] = dataclasses.field(default_factory=list)
36
+ source_exception: Exception | None = None
37
+ """The exception that caused this diagnostic to be created."""
38
+ logger: logging.Logger = dataclasses.field(init=False, default=diagnostic_logger)
39
+ """The logger for this diagnostic. Defaults to 'diagnostic_logger' which has the same
40
+ log level setting with `DiagnosticOptions.verbosity_level`."""
41
+ _current_log_section_depth: int = 0
42
+
43
+ def __post_init__(self) -> None:
44
+ pass
45
+
46
+ def sarif(self) -> sarif.Result:
47
+ """Returns the SARIF Result representation of this diagnostic."""
48
+ message = self.message or self.rule.message_default_template
49
+ if self.additional_messages:
50
+ additional_message = "\n".join(self.additional_messages)
51
+ message_markdown = (
52
+ f"{message}\n\n## Additional Message:\n\n{additional_message}"
53
+ )
54
+ else:
55
+ message_markdown = message
56
+
57
+ kind: Literal["informational", "fail"] = (
58
+ "informational" if self.level == infra.Level.NONE else "fail"
59
+ )
60
+
61
+ sarif_result = sarif.Result(
62
+ message=sarif.Message(text=message, markdown=message_markdown),
63
+ level=self.level.name.lower(), # type: ignore[arg-type]
64
+ rule_id=self.rule.id,
65
+ kind=kind,
66
+ )
67
+ sarif_result.locations = [location.sarif() for location in self.locations]
68
+ sarif_result.stacks = [stack.sarif() for stack in self.stacks]
69
+ sarif_result.graphs = [graph.sarif() for graph in self.graphs]
70
+ sarif_result.code_flows = [
71
+ sarif.CodeFlow(
72
+ thread_flows=[
73
+ sarif.ThreadFlow(
74
+ locations=[loc.sarif() for loc in self.thread_flow_locations]
75
+ )
76
+ ]
77
+ )
78
+ ]
79
+ sarif_result.properties = sarif.PropertyBag(
80
+ tags=[tag.value for tag in self.tags]
81
+ )
82
+ return sarif_result
83
+
84
+ def with_location(self: Self, location: infra.Location) -> Self:
85
+ """Adds a location to the diagnostic."""
86
+ self.locations.append(location)
87
+ return self
88
+
89
+ def with_thread_flow_location(
90
+ self: Self, location: infra.ThreadFlowLocation
91
+ ) -> Self:
92
+ """Adds a thread flow location to the diagnostic."""
93
+ self.thread_flow_locations.append(location)
94
+ return self
95
+
96
+ def with_stack(self: Self, stack: infra.Stack) -> Self:
97
+ """Adds a stack to the diagnostic."""
98
+ self.stacks.append(stack)
99
+ return self
100
+
101
+ def with_graph(self: Self, graph: infra.Graph) -> Self:
102
+ """Adds a graph to the diagnostic."""
103
+ self.graphs.append(graph)
104
+ return self
105
+
106
+ @contextlib.contextmanager
107
+ def log_section(
108
+ self, level: int, message: str, *args, **kwargs
109
+ ) -> Generator[None, None, None]:
110
+ """
111
+ Context manager for a section of log messages, denoted by a title message and increased indentation.
112
+
113
+ Same api as `logging.Logger.log`.
114
+
115
+ This context manager logs the given title at the specified log level, increases the current
116
+ section depth for subsequent log messages, and ensures that the section depth is decreased
117
+ again when exiting the context.
118
+
119
+ Args:
120
+ level: The log level.
121
+ message: The title message to log.
122
+ *args: The arguments to the message. Use `LazyString` to defer the
123
+ expensive evaluation of the arguments until the message is actually logged.
124
+ **kwargs: The keyword arguments for `logging.Logger.log`.
125
+
126
+ Yields:
127
+ None: This context manager does not yield any value.
128
+
129
+ Example:
130
+ >>> with DiagnosticContext("DummyContext", "1.0"):
131
+ ... rule = infra.Rule("RuleID", "DummyRule", "Rule message")
132
+ ... diagnostic = Diagnostic(rule, infra.Level.WARNING)
133
+ ... with diagnostic.log_section(logging.INFO, "My Section"):
134
+ ... diagnostic.log(logging.INFO, "My Message")
135
+ ... with diagnostic.log_section(logging.INFO, "My Subsection"):
136
+ ... diagnostic.log(logging.INFO, "My Submessage")
137
+ ... diagnostic.additional_messages
138
+ ['## My Section', 'My Message', '### My Subsection', 'My Submessage']
139
+ """
140
+ if self.logger.isEnabledFor(level):
141
+ indented_format_message = (
142
+ f"##{'#' * self._current_log_section_depth } {message}"
143
+ )
144
+ self.log(
145
+ level,
146
+ indented_format_message,
147
+ *args,
148
+ **kwargs,
149
+ )
150
+ self._current_log_section_depth += 1
151
+ try:
152
+ yield
153
+ finally:
154
+ self._current_log_section_depth -= 1
155
+
156
+ def log(self, level: int, message: str, *args, **kwargs) -> None:
157
+ """Logs a message within the diagnostic. Same api as `logging.Logger.log`.
158
+
159
+ If logger is not enabled for the given level, the message will not be logged.
160
+ Otherwise, the message will be logged and also added to the diagnostic's additional_messages.
161
+
162
+ The default setting for `DiagnosticOptions.verbosity_level` is `logging.INFO`. Based on this default,
163
+ the log level recommendations are as follows. If you've set a different default verbosity level in your
164
+ application, please adjust accordingly:
165
+
166
+ - logging.ERROR: Log any events leading to application failure.
167
+ - logging.WARNING: Log events that might result in application issues or failures, although not guaranteed.
168
+ - logging.INFO: Log general useful information, ensuring minimal performance overhead.
169
+ - logging.DEBUG: Log detailed debug information, which might affect performance when logged.
170
+
171
+ Args:
172
+ level: The log level.
173
+ message: The message to log.
174
+ *args: The arguments to the message. Use `LazyString` to defer the
175
+ expensive evaluation of the arguments until the message is actually logged.
176
+ **kwargs: The keyword arguments for `logging.Logger.log`.
177
+ """
178
+ if self.logger.isEnabledFor(level):
179
+ formatted_message = message % args
180
+ self.logger.log(level, formatted_message, **kwargs)
181
+ self.additional_messages.append(formatted_message)
182
+
183
+ def debug(self, message: str, *args, **kwargs) -> None:
184
+ """Logs a debug message within the diagnostic. Same api as logging.Logger.debug.
185
+
186
+ Checkout `log` for more details.
187
+ """
188
+ self.log(logging.DEBUG, message, *args, **kwargs)
189
+
190
+ def info(self, message: str, *args, **kwargs) -> None:
191
+ """Logs an info message within the diagnostic. Same api as logging.Logger.info.
192
+
193
+ Checkout `log` for more details.
194
+ """
195
+ self.log(logging.INFO, message, *args, **kwargs)
196
+
197
+ def warning(self, message: str, *args, **kwargs) -> None:
198
+ """Logs a warning message within the diagnostic. Same api as logging.Logger.warning.
199
+
200
+ Checkout `log` for more details.
201
+ """
202
+ self.log(logging.WARNING, message, *args, **kwargs)
203
+
204
+ def error(self, message: str, *args, **kwargs) -> None:
205
+ """Logs an error message within the diagnostic. Same api as logging.Logger.error.
206
+
207
+ Checkout `log` for more details.
208
+ """
209
+ self.log(logging.ERROR, message, *args, **kwargs)
210
+
211
+ def log_source_exception(self, level: int, exception: Exception) -> None:
212
+ """Logs a source exception within the diagnostic.
213
+
214
+ Invokes `log_section` and `log` to log the exception in markdown section format.
215
+ """
216
+ self.source_exception = exception
217
+ with self.log_section(level, "Exception log"):
218
+ self.log(level, "%s", formatter.lazy_format_exception(exception))
219
+
220
+ def record_python_call_stack(self, frames_to_skip: int) -> infra.Stack:
221
+ """Records the current Python call stack."""
222
+ frames_to_skip += 1 # Skip this function.
223
+ stack = utils.python_call_stack(frames_to_skip=frames_to_skip)
224
+ self.with_stack(stack)
225
+ if len(stack.frames) > 0:
226
+ self.with_location(stack.frames[0].location)
227
+ return stack
228
+
229
+ def record_python_call(
230
+ self,
231
+ fn: Callable,
232
+ state: Mapping[str, str],
233
+ message: str | None = None,
234
+ frames_to_skip: int = 0,
235
+ ) -> infra.ThreadFlowLocation:
236
+ """Records a python call as one thread flow step."""
237
+ frames_to_skip += 1 # Skip this function.
238
+ stack = utils.python_call_stack(frames_to_skip=frames_to_skip, frames_to_log=5)
239
+ location = utils.function_location(fn)
240
+ location.message = message
241
+ # Add function location to the top of the stack.
242
+ stack.frames.insert(0, infra.StackFrame(location=location))
243
+ thread_flow_location = infra.ThreadFlowLocation(
244
+ location=location,
245
+ state=state,
246
+ index=len(self.thread_flow_locations),
247
+ stack=stack,
248
+ )
249
+ self.with_thread_flow_location(thread_flow_location)
250
+ return thread_flow_location
251
+
252
+
253
+ class RuntimeErrorWithDiagnostic(RuntimeError):
254
+ """Runtime error with enclosed diagnostic information."""
255
+
256
+ def __init__(self, diagnostic: Diagnostic):
257
+ super().__init__(diagnostic.message)
258
+ self.diagnostic = diagnostic
259
+
260
+
261
+ @dataclasses.dataclass
262
+ class DiagnosticContext(Generic[_Diagnostic]):
263
+ name: str
264
+ version: str
265
+ options: infra.DiagnosticOptions = dataclasses.field(
266
+ default_factory=infra.DiagnosticOptions
267
+ )
268
+ diagnostics: list[_Diagnostic] = dataclasses.field(init=False, default_factory=list)
269
+ # TODO(bowbao): Implement this.
270
+ # _invocation: infra.Invocation = dataclasses.field(init=False)
271
+ _inflight_diagnostics: list[_Diagnostic] = dataclasses.field(
272
+ init=False, default_factory=list
273
+ )
274
+ _previous_log_level: int = dataclasses.field(init=False, default=logging.WARNING)
275
+ logger: logging.Logger = dataclasses.field(init=False, default=diagnostic_logger)
276
+ _bound_diagnostic_type: type = dataclasses.field(init=False, default=Diagnostic)
277
+
278
+ def __enter__(self):
279
+ self._previous_log_level = self.logger.level
280
+ self.logger.setLevel(self.options.verbosity_level)
281
+ return self
282
+
283
+ def __exit__(self, exc_type, exc_val, exc_tb):
284
+ self.logger.setLevel(self._previous_log_level)
285
+ return None
286
+
287
+ def sarif(self) -> sarif.Run:
288
+ """Returns the SARIF Run object."""
289
+ unique_rules = {diagnostic.rule for diagnostic in self.diagnostics}
290
+ return sarif.Run(
291
+ sarif.Tool(
292
+ driver=sarif.ToolComponent(
293
+ name=self.name,
294
+ version=self.version,
295
+ rules=[rule.sarif() for rule in unique_rules],
296
+ )
297
+ ),
298
+ results=[diagnostic.sarif() for diagnostic in self.diagnostics],
299
+ )
300
+
301
+ def sarif_log(self) -> sarif.SarifLog: # type: ignore[name-defined]
302
+ """Returns the SARIF Log object."""
303
+ return sarif.SarifLog(
304
+ version=sarif_version.SARIF_VERSION,
305
+ schema_uri=sarif_version.SARIF_SCHEMA_LINK,
306
+ runs=[self.sarif()],
307
+ )
308
+
309
+ def to_json(self) -> str:
310
+ return formatter.sarif_to_json(self.sarif_log())
311
+
312
+ def dump(self, file_path: str, compress: bool = False) -> None:
313
+ """Dumps the SARIF log to a file."""
314
+ if compress:
315
+ with gzip.open(file_path, "wt") as f:
316
+ f.write(self.to_json())
317
+ else:
318
+ with open(file_path, "w") as f:
319
+ f.write(self.to_json())
320
+
321
+ def log(self, diagnostic: _Diagnostic) -> None:
322
+ """Logs a diagnostic.
323
+
324
+ This method should be used only after all the necessary information for the diagnostic
325
+ has been collected.
326
+
327
+ Args:
328
+ diagnostic: The diagnostic to add.
329
+ """
330
+ if not isinstance(diagnostic, self._bound_diagnostic_type):
331
+ raise TypeError(
332
+ f"Expected diagnostic of type {self._bound_diagnostic_type}, got {type(diagnostic)}"
333
+ )
334
+ if self.options.warnings_as_errors and diagnostic.level == infra.Level.WARNING: # type: ignore[attr-defined]
335
+ diagnostic.level = infra.Level.ERROR # type: ignore[attr-defined]
336
+ self.diagnostics.append(diagnostic) # type: ignore[arg-type]
337
+
338
+ def log_and_raise_if_error(self, diagnostic: _Diagnostic) -> None:
339
+ """Logs a diagnostic and raises an exception if it is an error.
340
+
341
+ Use this method for logging non inflight diagnostics where diagnostic level is not known or
342
+ lower than ERROR. If it is always expected raise, use `log` and explicit
343
+ `raise` instead. Otherwise there is no way to convey the message that it always
344
+ raises to Python intellisense and type checking tools.
345
+
346
+ This method should be used only after all the necessary information for the diagnostic
347
+ has been collected.
348
+
349
+ Args:
350
+ diagnostic: The diagnostic to add.
351
+ """
352
+ self.log(diagnostic)
353
+ if diagnostic.level == infra.Level.ERROR:
354
+ if diagnostic.source_exception is not None:
355
+ raise diagnostic.source_exception
356
+ raise RuntimeErrorWithDiagnostic(diagnostic)
357
+
358
+ @contextlib.contextmanager
359
+ def add_inflight_diagnostic(
360
+ self, diagnostic: _Diagnostic
361
+ ) -> Generator[_Diagnostic, None, None]:
362
+ """Adds a diagnostic to the context.
363
+
364
+ Use this method to add diagnostics that are not created by the context.
365
+ Args:
366
+ diagnostic: The diagnostic to add.
367
+ """
368
+ self._inflight_diagnostics.append(diagnostic)
369
+ try:
370
+ yield diagnostic
371
+ finally:
372
+ self._inflight_diagnostics.pop()
373
+
374
+ def push_inflight_diagnostic(self, diagnostic: _Diagnostic) -> None:
375
+ """Pushes a diagnostic to the inflight diagnostics stack.
376
+
377
+ Args:
378
+ diagnostic: The diagnostic to push.
379
+
380
+ Raises:
381
+ ValueError: If the rule is not supported by the tool.
382
+ """
383
+ self._inflight_diagnostics.append(diagnostic)
384
+
385
+ def pop_inflight_diagnostic(self) -> _Diagnostic:
386
+ """Pops the last diagnostic from the inflight diagnostics stack.
387
+
388
+ Returns:
389
+ The popped diagnostic.
390
+ """
391
+ return self._inflight_diagnostics.pop()
392
+
393
+ def inflight_diagnostic(self, rule: infra.Rule | None = None) -> _Diagnostic:
394
+ if rule is None:
395
+ # TODO(bowbao): Create builtin-rules and create diagnostic using that.
396
+ if len(self._inflight_diagnostics) <= 0:
397
+ raise AssertionError("No inflight diagnostics")
398
+
399
+ return self._inflight_diagnostics[-1]
400
+ else:
401
+ for diagnostic in reversed(self._inflight_diagnostics):
402
+ if diagnostic.rule == rule:
403
+ return diagnostic
404
+ raise AssertionError(f"No inflight diagnostic for rule {rule.name}")
infer_4_47_1/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/decorator.py ADDED
@@ -0,0 +1,153 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ from __future__ import annotations
3
+
4
+ import functools
5
+ import logging
6
+ import traceback
7
+ from typing import Any, Callable, Dict, Tuple
8
+
9
+ from torch.onnx._internal.diagnostics import infra
10
+ from torch.onnx._internal.diagnostics.infra import formatter, utils
11
+
12
+
13
+ MessageFormatterType = Callable[..., str]
14
+
15
+
16
+ def format_message_in_text(fn: Callable, *args: Any, **kwargs: Any) -> str:
17
+ return f"{formatter.display_name(fn)}. "
18
+
19
+
20
+ def format_exception_in_markdown(exception: Exception) -> str:
21
+ msg_list = ["### Exception log", "```"]
22
+ msg_list.extend(
23
+ traceback.format_exception(type(exception), exception, exception.__traceback__)
24
+ )
25
+ msg_list.append("```")
26
+ return "\n".join(msg_list)
27
+
28
+
29
+ def format_function_signature_in_markdown(
30
+ fn: Callable,
31
+ args: tuple[Any, ...],
32
+ kwargs: dict[str, Any],
33
+ format_argument: Callable[[Any], str] = formatter.format_argument,
34
+ ) -> str:
35
+ msg_list = [f"### Function Signature {formatter.display_name(fn)}"]
36
+
37
+ state = utils.function_state(fn, args, kwargs)
38
+
39
+ for k, v in state.items():
40
+ msg_list.append(f"- {k}: {format_argument(v)}")
41
+
42
+ return "\n".join(msg_list)
43
+
44
+
45
+ def format_return_values_in_markdown(
46
+ return_values: Any,
47
+ format_argument: Callable[[Any], str] = formatter.format_argument,
48
+ ) -> str:
49
+ return f"{format_argument(return_values)}"
50
+
51
+
52
+ ModifierCallableType = Callable[
53
+ [infra.Diagnostic, Callable, Tuple[Any, ...], Dict[str, Any], Any], None
54
+ ]
55
+
56
+
57
+ def diagnose_call(
58
+ rule: infra.Rule,
59
+ *,
60
+ level: infra.Level = infra.Level.NONE,
61
+ diagnostic_type: type[infra.Diagnostic] = infra.Diagnostic,
62
+ format_argument: Callable[[Any], str] = formatter.format_argument,
63
+ diagnostic_message_formatter: MessageFormatterType = format_message_in_text,
64
+ ) -> Callable:
65
+ def decorator(fn):
66
+ @functools.wraps(fn)
67
+ def wrapper(*args, **kwargs):
68
+ common_error_message = "diagnose_call can only be applied to callables"
69
+ if not callable(fn):
70
+ raise AssertionError(
71
+ f"{common_error_message}. Got {type(fn)} instead of callable."
72
+ )
73
+ arg0 = args[0] if len(args) > 0 else None
74
+ if isinstance(ctx := arg0, infra.DiagnosticContext):
75
+ pass
76
+ elif isinstance(
77
+ ctx := getattr(arg0, "diagnostic_context", None),
78
+ infra.DiagnosticContext,
79
+ ):
80
+ pass
81
+ else:
82
+ # NOTE: At decorate time, it can't tell if a callable is function or method.
83
+ # Technically both are regarded as function at that time.
84
+ raise AssertionError(
85
+ f"{common_error_message}. For {fn}, "
86
+ f"If it is a function, a DiagnosticContext instance must be present as "
87
+ f"the first argument. "
88
+ f"If it is a method, a DiagnosticContext instance must be present as "
89
+ f"the attribute 'diagnostic_context' of the 'self' argument."
90
+ )
91
+
92
+ diag = diagnostic_type(
93
+ rule,
94
+ level,
95
+ diagnostic_message_formatter(fn, *args, **kwargs),
96
+ )
97
+
98
+ # pop the decorator frame
99
+ # TODO(bowbao): by default diagnostic doesn't have stack.
100
+ # So need to check before doing this. Make the code cleaner.
101
+ # Option: do not capture stack by default in diagnostic initialization.
102
+ stack: infra.Stack | None = None
103
+ if len(diag.stacks) > 0:
104
+ stack = diag.stacks[0]
105
+ stack.frames.pop(0)
106
+
107
+ # set function location
108
+ fn_location = utils.function_location(fn)
109
+ diag.locations.insert(0, fn_location)
110
+ # Add function location to the top of the stack.
111
+ if stack is not None:
112
+ stack.frames.insert(0, infra.StackFrame(location=fn_location))
113
+
114
+ with diag.log_section(logging.INFO, "Function Signature"):
115
+ diag.log(
116
+ logging.INFO,
117
+ "%s",
118
+ formatter.LazyString(
119
+ format_function_signature_in_markdown,
120
+ fn,
121
+ args,
122
+ kwargs,
123
+ format_argument,
124
+ ),
125
+ )
126
+
127
+ return_values: Any = None
128
+ with ctx.add_inflight_diagnostic(diag) as diag:
129
+ try:
130
+ return_values = fn(*args, **kwargs)
131
+ with diag.log_section(logging.INFO, "Return values"):
132
+ diag.log(
133
+ logging.INFO,
134
+ "%s",
135
+ formatter.LazyString(
136
+ format_return_values_in_markdown,
137
+ return_values,
138
+ format_argument,
139
+ ),
140
+ )
141
+ return return_values
142
+ except Exception as e:
143
+ diag.log_source_exception(logging.ERROR, e)
144
+ diag.level = infra.Level.ERROR
145
+ finally:
146
+ ctx.log_and_raise_if_error(diag)
147
+
148
+ return wrapper
149
+
150
+ return decorator
151
+
152
+
153
+ # TODO(bowbao): decorator to report only when failed.
infer_4_47_1/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/formatter.py ADDED
@@ -0,0 +1,106 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import dataclasses
4
+ import json
5
+ import re
6
+ import traceback
7
+ from typing import Any, Callable, Union
8
+
9
+ from torch._logging import LazyString
10
+ from torch.onnx._internal.diagnostics.infra import sarif
11
+
12
+
13
+ # A list of types in the SARIF module to support pretty printing.
14
+ # This is solely for type annotation for the functions below.
15
+ _SarifClass = Union[
16
+ sarif.SarifLog,
17
+ sarif.Run,
18
+ sarif.ReportingDescriptor,
19
+ sarif.Result,
20
+ ]
21
+
22
+
23
+ def lazy_format_exception(exception: Exception) -> LazyString:
24
+ return LazyString(
25
+ lambda: "\n".join(
26
+ (
27
+ "```",
28
+ *traceback.format_exception(
29
+ type(exception), exception, exception.__traceback__
30
+ ),
31
+ "```",
32
+ )
33
+ ),
34
+ )
35
+
36
+
37
+ def snake_case_to_camel_case(s: str) -> str:
38
+ splits = s.split("_")
39
+ if len(splits) <= 1:
40
+ return s
41
+ return "".join([splits[0], *map(str.capitalize, splits[1:])])
42
+
43
+
44
+ def camel_case_to_snake_case(s: str) -> str:
45
+ return re.sub(r"([A-Z])", r"_\1", s).lower()
46
+
47
+
48
+ def kebab_case_to_snake_case(s: str) -> str:
49
+ return s.replace("-", "_")
50
+
51
+
52
+ def _convert_key(
53
+ object: dict[str, Any] | Any, convert: Callable[[str], str]
54
+ ) -> dict[str, Any] | Any:
55
+ """Convert and update keys in a dictionary with "convert".
56
+
57
+ Any value that is a dictionary will be recursively updated.
58
+ Any value that is a list will be recursively searched.
59
+
60
+ Args:
61
+ object: The object to update.
62
+ convert: The function to convert the keys, e.g. `kebab_case_to_snake_case`.
63
+
64
+ Returns:
65
+ The updated object.
66
+ """
67
+ if not isinstance(object, dict):
68
+ return object
69
+ new_dict = {}
70
+ for k, v in object.items():
71
+ new_k = convert(k)
72
+ if isinstance(v, dict):
73
+ new_v = _convert_key(v, convert)
74
+ elif isinstance(v, list):
75
+ new_v = [_convert_key(elem, convert) for elem in v]
76
+ else:
77
+ new_v = v
78
+ if new_v is None:
79
+ # Otherwise unnecessarily bloated sarif log with "null"s.
80
+ continue
81
+ if new_v == -1:
82
+ # WAR: -1 as default value shouldn't be logged into sarif.
83
+ continue
84
+
85
+ new_dict[new_k] = new_v
86
+
87
+ return new_dict
88
+
89
+
90
+ def sarif_to_json(attr_cls_obj: _SarifClass, indent: str | None = " ") -> str:
91
+ dict = dataclasses.asdict(attr_cls_obj)
92
+ dict = _convert_key(dict, snake_case_to_camel_case)
93
+ return json.dumps(dict, indent=indent, separators=(",", ":"))
94
+
95
+
96
+ def format_argument(obj: Any) -> str:
97
+ return f"{type(obj)}"
98
+
99
+
100
+ def display_name(fn: Callable) -> str:
101
+ if hasattr(fn, "__qualname__"):
102
+ return fn.__qualname__
103
+ elif hasattr(fn, "__name__"):
104
+ return fn.__name__
105
+ else:
106
+ return str(fn)
infer_4_47_1/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__init__.py ADDED
@@ -0,0 +1,101 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # DO NOT EDIT! This file was generated by jschema_to_python version 0.0.1.dev29,
2
+ # with extension for dataclasses and type annotation.
3
+
4
+ from torch.onnx._internal.diagnostics.infra.sarif._address import Address
5
+ from torch.onnx._internal.diagnostics.infra.sarif._artifact import Artifact
6
+ from torch.onnx._internal.diagnostics.infra.sarif._artifact_change import ArtifactChange
7
+ from torch.onnx._internal.diagnostics.infra.sarif._artifact_content import (
8
+ ArtifactContent,
9
+ )
10
+ from torch.onnx._internal.diagnostics.infra.sarif._artifact_location import (
11
+ ArtifactLocation,
12
+ )
13
+ from torch.onnx._internal.diagnostics.infra.sarif._attachment import Attachment
14
+ from torch.onnx._internal.diagnostics.infra.sarif._code_flow import CodeFlow
15
+ from torch.onnx._internal.diagnostics.infra.sarif._configuration_override import (
16
+ ConfigurationOverride,
17
+ )
18
+ from torch.onnx._internal.diagnostics.infra.sarif._conversion import Conversion
19
+ from torch.onnx._internal.diagnostics.infra.sarif._edge import Edge
20
+ from torch.onnx._internal.diagnostics.infra.sarif._edge_traversal import EdgeTraversal
21
+ from torch.onnx._internal.diagnostics.infra.sarif._exception import Exception
22
+ from torch.onnx._internal.diagnostics.infra.sarif._external_properties import (
23
+ ExternalProperties,
24
+ )
25
+ from torch.onnx._internal.diagnostics.infra.sarif._external_property_file_reference import (
26
+ ExternalPropertyFileReference,
27
+ )
28
+ from torch.onnx._internal.diagnostics.infra.sarif._external_property_file_references import (
29
+ ExternalPropertyFileReferences,
30
+ )
31
+ from torch.onnx._internal.diagnostics.infra.sarif._fix import Fix
32
+ from torch.onnx._internal.diagnostics.infra.sarif._graph import Graph
33
+ from torch.onnx._internal.diagnostics.infra.sarif._graph_traversal import GraphTraversal
34
+ from torch.onnx._internal.diagnostics.infra.sarif._invocation import Invocation
35
+ from torch.onnx._internal.diagnostics.infra.sarif._location import Location
36
+ from torch.onnx._internal.diagnostics.infra.sarif._location_relationship import (
37
+ LocationRelationship,
38
+ )
39
+ from torch.onnx._internal.diagnostics.infra.sarif._logical_location import (
40
+ LogicalLocation,
41
+ )
42
+ from torch.onnx._internal.diagnostics.infra.sarif._message import Message
43
+ from torch.onnx._internal.diagnostics.infra.sarif._multiformat_message_string import (
44
+ MultiformatMessageString,
45
+ )
46
+ from torch.onnx._internal.diagnostics.infra.sarif._node import Node
47
+ from torch.onnx._internal.diagnostics.infra.sarif._notification import Notification
48
+ from torch.onnx._internal.diagnostics.infra.sarif._physical_location import (
49
+ PhysicalLocation,
50
+ )
51
+ from torch.onnx._internal.diagnostics.infra.sarif._property_bag import PropertyBag
52
+ from torch.onnx._internal.diagnostics.infra.sarif._rectangle import Rectangle
53
+ from torch.onnx._internal.diagnostics.infra.sarif._region import Region
54
+ from torch.onnx._internal.diagnostics.infra.sarif._replacement import Replacement
55
+ from torch.onnx._internal.diagnostics.infra.sarif._reporting_configuration import (
56
+ ReportingConfiguration,
57
+ )
58
+ from torch.onnx._internal.diagnostics.infra.sarif._reporting_descriptor import (
59
+ ReportingDescriptor,
60
+ )
61
+ from torch.onnx._internal.diagnostics.infra.sarif._reporting_descriptor_reference import (
62
+ ReportingDescriptorReference,
63
+ )
64
+ from torch.onnx._internal.diagnostics.infra.sarif._reporting_descriptor_relationship import (
65
+ ReportingDescriptorRelationship,
66
+ )
67
+ from torch.onnx._internal.diagnostics.infra.sarif._result import Result
68
+ from torch.onnx._internal.diagnostics.infra.sarif._result_provenance import (
69
+ ResultProvenance,
70
+ )
71
+ from torch.onnx._internal.diagnostics.infra.sarif._run import Run
72
+ from torch.onnx._internal.diagnostics.infra.sarif._run_automation_details import (
73
+ RunAutomationDetails,
74
+ )
75
+ from torch.onnx._internal.diagnostics.infra.sarif._sarif_log import SarifLog
76
+ from torch.onnx._internal.diagnostics.infra.sarif._special_locations import (
77
+ SpecialLocations,
78
+ )
79
+ from torch.onnx._internal.diagnostics.infra.sarif._stack import Stack
80
+ from torch.onnx._internal.diagnostics.infra.sarif._stack_frame import StackFrame
81
+ from torch.onnx._internal.diagnostics.infra.sarif._suppression import Suppression
82
+ from torch.onnx._internal.diagnostics.infra.sarif._thread_flow import ThreadFlow
83
+ from torch.onnx._internal.diagnostics.infra.sarif._thread_flow_location import (
84
+ ThreadFlowLocation,
85
+ )
86
+ from torch.onnx._internal.diagnostics.infra.sarif._tool import Tool
87
+ from torch.onnx._internal.diagnostics.infra.sarif._tool_component import ToolComponent
88
+ from torch.onnx._internal.diagnostics.infra.sarif._tool_component_reference import (
89
+ ToolComponentReference,
90
+ )
91
+ from torch.onnx._internal.diagnostics.infra.sarif._translation_metadata import (
92
+ TranslationMetadata,
93
+ )
94
+ from torch.onnx._internal.diagnostics.infra.sarif._version_control_details import (
95
+ VersionControlDetails,
96
+ )
97
+ from torch.onnx._internal.diagnostics.infra.sarif._web_request import WebRequest
98
+ from torch.onnx._internal.diagnostics.infra.sarif._web_response import WebResponse
99
+
100
+
101
+ # flake8: noqa
infer_4_47_1/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_address.py ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # DO NOT EDIT! This file was generated by jschema_to_python version 0.0.1.dev29,
2
+ # with extension for dataclasses and type annotation.
3
+
4
+ from __future__ import annotations
5
+
6
+ import dataclasses
7
+ from typing import Optional
8
+
9
+ from torch.onnx._internal.diagnostics.infra.sarif import _property_bag
10
+
11
+
12
+ @dataclasses.dataclass
13
+ class Address(object):
14
+ """A physical or virtual address, or a range of addresses, in an 'addressable region' (memory or a binary file)."""
15
+
16
+ absolute_address: int = dataclasses.field(
17
+ default=-1, metadata={"schema_property_name": "absoluteAddress"}
18
+ )
19
+ fully_qualified_name: Optional[str] = dataclasses.field(
20
+ default=None, metadata={"schema_property_name": "fullyQualifiedName"}
21
+ )
22
+ index: int = dataclasses.field(
23
+ default=-1, metadata={"schema_property_name": "index"}
24
+ )
25
+ kind: Optional[str] = dataclasses.field(
26
+ default=None, metadata={"schema_property_name": "kind"}
27
+ )
28
+ length: Optional[int] = dataclasses.field(
29
+ default=None, metadata={"schema_property_name": "length"}
30
+ )
31
+ name: Optional[str] = dataclasses.field(
32
+ default=None, metadata={"schema_property_name": "name"}
33
+ )
34
+ offset_from_parent: Optional[int] = dataclasses.field(
35
+ default=None, metadata={"schema_property_name": "offsetFromParent"}
36
+ )
37
+ parent_index: int = dataclasses.field(
38
+ default=-1, metadata={"schema_property_name": "parentIndex"}
39
+ )
40
+ properties: Optional[_property_bag.PropertyBag] = dataclasses.field(
41
+ default=None, metadata={"schema_property_name": "properties"}
42
+ )
43
+ relative_address: Optional[int] = dataclasses.field(
44
+ default=None, metadata={"schema_property_name": "relativeAddress"}
45
+ )
46
+
47
+
48
+ # flake8: noqa
infer_4_47_1/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_artifact_change.py ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # DO NOT EDIT! This file was generated by jschema_to_python version 0.0.1.dev29,
2
+ # with extension for dataclasses and type annotation.
3
+
4
+ from __future__ import annotations
5
+
6
+ import dataclasses
7
+ from typing import List, Optional
8
+
9
+ from torch.onnx._internal.diagnostics.infra.sarif import (
10
+ _artifact_location,
11
+ _property_bag,
12
+ _replacement,
13
+ )
14
+
15
+
16
+ @dataclasses.dataclass
17
+ class ArtifactChange(object):
18
+ """A change to a single artifact."""
19
+
20
+ artifact_location: _artifact_location.ArtifactLocation = dataclasses.field(
21
+ metadata={"schema_property_name": "artifactLocation"}
22
+ )
23
+ replacements: List[_replacement.Replacement] = dataclasses.field(
24
+ metadata={"schema_property_name": "replacements"}
25
+ )
26
+ properties: Optional[_property_bag.PropertyBag] = dataclasses.field(
27
+ default=None, metadata={"schema_property_name": "properties"}
28
+ )
29
+
30
+
31
+ # flake8: noqa
infer_4_47_1/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_artifact_location.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # DO NOT EDIT! This file was generated by jschema_to_python version 0.0.1.dev29,
2
+ # with extension for dataclasses and type annotation.
3
+
4
+ from __future__ import annotations
5
+
6
+ import dataclasses
7
+ from typing import Optional
8
+
9
+ from torch.onnx._internal.diagnostics.infra.sarif import _message, _property_bag
10
+
11
+
12
+ @dataclasses.dataclass
13
+ class ArtifactLocation(object):
14
+ """Specifies the location of an artifact."""
15
+
16
+ description: Optional[_message.Message] = dataclasses.field(
17
+ default=None, metadata={"schema_property_name": "description"}
18
+ )
19
+ index: int = dataclasses.field(
20
+ default=-1, metadata={"schema_property_name": "index"}
21
+ )
22
+ properties: Optional[_property_bag.PropertyBag] = dataclasses.field(
23
+ default=None, metadata={"schema_property_name": "properties"}
24
+ )
25
+ uri: Optional[str] = dataclasses.field(
26
+ default=None, metadata={"schema_property_name": "uri"}
27
+ )
28
+ uri_base_id: Optional[str] = dataclasses.field(
29
+ default=None, metadata={"schema_property_name": "uriBaseId"}
30
+ )
31
+
32
+
33
+ # flake8: noqa
infer_4_47_1/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_attachment.py ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # DO NOT EDIT! This file was generated by jschema_to_python version 0.0.1.dev29,
2
+ # with extension for dataclasses and type annotation.
3
+
4
+ from __future__ import annotations
5
+
6
+ import dataclasses
7
+ from typing import List, Optional
8
+
9
+ from torch.onnx._internal.diagnostics.infra.sarif import (
10
+ _artifact_location,
11
+ _message,
12
+ _property_bag,
13
+ _rectangle,
14
+ _region,
15
+ )
16
+
17
+
18
+ @dataclasses.dataclass
19
+ class Attachment(object):
20
+ """An artifact relevant to a result."""
21
+
22
+ artifact_location: _artifact_location.ArtifactLocation = dataclasses.field(
23
+ metadata={"schema_property_name": "artifactLocation"}
24
+ )
25
+ description: Optional[_message.Message] = dataclasses.field(
26
+ default=None, metadata={"schema_property_name": "description"}
27
+ )
28
+ properties: Optional[_property_bag.PropertyBag] = dataclasses.field(
29
+ default=None, metadata={"schema_property_name": "properties"}
30
+ )
31
+ rectangles: Optional[List[_rectangle.Rectangle]] = dataclasses.field(
32
+ default=None, metadata={"schema_property_name": "rectangles"}
33
+ )
34
+ regions: Optional[List[_region.Region]] = dataclasses.field(
35
+ default=None, metadata={"schema_property_name": "regions"}
36
+ )
37
+
38
+
39
+ # flake8: noqa
infer_4_47_1/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_configuration_override.py ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # DO NOT EDIT! This file was generated by jschema_to_python version 0.0.1.dev29,
2
+ # with extension for dataclasses and type annotation.
3
+
4
+ from __future__ import annotations
5
+
6
+ import dataclasses
7
+ from typing import Optional
8
+
9
+ from torch.onnx._internal.diagnostics.infra.sarif import (
10
+ _property_bag,
11
+ _reporting_configuration,
12
+ _reporting_descriptor_reference,
13
+ )
14
+
15
+
16
+ @dataclasses.dataclass
17
+ class ConfigurationOverride(object):
18
+ """Information about how a specific rule or notification was reconfigured at runtime."""
19
+
20
+ configuration: _reporting_configuration.ReportingConfiguration = dataclasses.field(
21
+ metadata={"schema_property_name": "configuration"}
22
+ )
23
+ descriptor: _reporting_descriptor_reference.ReportingDescriptorReference = (
24
+ dataclasses.field(metadata={"schema_property_name": "descriptor"})
25
+ )
26
+ properties: Optional[_property_bag.PropertyBag] = dataclasses.field(
27
+ default=None, metadata={"schema_property_name": "properties"}
28
+ )
29
+
30
+
31
+ # flake8: noqa
infer_4_47_1/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_edge.py ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # DO NOT EDIT! This file was generated by jschema_to_python version 0.0.1.dev29,
2
+ # with extension for dataclasses and type annotation.
3
+
4
+ from __future__ import annotations
5
+
6
+ import dataclasses
7
+ from typing import Optional
8
+
9
+ from torch.onnx._internal.diagnostics.infra.sarif import _message, _property_bag
10
+
11
+
12
+ @dataclasses.dataclass
13
+ class Edge(object):
14
+ """Represents a directed edge in a graph."""
15
+
16
+ id: str = dataclasses.field(metadata={"schema_property_name": "id"})
17
+ source_node_id: str = dataclasses.field(
18
+ metadata={"schema_property_name": "sourceNodeId"}
19
+ )
20
+ target_node_id: str = dataclasses.field(
21
+ metadata={"schema_property_name": "targetNodeId"}
22
+ )
23
+ label: Optional[_message.Message] = dataclasses.field(
24
+ default=None, metadata={"schema_property_name": "label"}
25
+ )
26
+ properties: Optional[_property_bag.PropertyBag] = dataclasses.field(
27
+ default=None, metadata={"schema_property_name": "properties"}
28
+ )
29
+
30
+
31
+ # flake8: noqa
infer_4_47_1/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_external_property_file_references.py ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # DO NOT EDIT! This file was generated by jschema_to_python version 0.0.1.dev29,
2
+ # with extension for dataclasses and type annotation.
3
+
4
+ from __future__ import annotations
5
+
6
+ import dataclasses
7
+ from typing import List, Optional
8
+
9
+ from torch.onnx._internal.diagnostics.infra.sarif import (
10
+ _external_property_file_reference,
11
+ _property_bag,
12
+ )
13
+
14
+
15
+ @dataclasses.dataclass
16
+ class ExternalPropertyFileReferences(object):
17
+ """References to external property files that should be inlined with the content of a root log file."""
18
+
19
+ addresses: Optional[
20
+ List[_external_property_file_reference.ExternalPropertyFileReference]
21
+ ] = dataclasses.field(default=None, metadata={"schema_property_name": "addresses"})
22
+ artifacts: Optional[
23
+ List[_external_property_file_reference.ExternalPropertyFileReference]
24
+ ] = dataclasses.field(default=None, metadata={"schema_property_name": "artifacts"})
25
+ conversion: Optional[
26
+ _external_property_file_reference.ExternalPropertyFileReference
27
+ ] = dataclasses.field(default=None, metadata={"schema_property_name": "conversion"})
28
+ driver: Optional[
29
+ _external_property_file_reference.ExternalPropertyFileReference
30
+ ] = dataclasses.field(default=None, metadata={"schema_property_name": "driver"})
31
+ extensions: Optional[
32
+ List[_external_property_file_reference.ExternalPropertyFileReference]
33
+ ] = dataclasses.field(default=None, metadata={"schema_property_name": "extensions"})
34
+ externalized_properties: Optional[
35
+ _external_property_file_reference.ExternalPropertyFileReference
36
+ ] = dataclasses.field(
37
+ default=None, metadata={"schema_property_name": "externalizedProperties"}
38
+ )
39
+ graphs: Optional[
40
+ List[_external_property_file_reference.ExternalPropertyFileReference]
41
+ ] = dataclasses.field(default=None, metadata={"schema_property_name": "graphs"})
42
+ invocations: Optional[
43
+ List[_external_property_file_reference.ExternalPropertyFileReference]
44
+ ] = dataclasses.field(
45
+ default=None, metadata={"schema_property_name": "invocations"}
46
+ )
47
+ logical_locations: Optional[
48
+ List[_external_property_file_reference.ExternalPropertyFileReference]
49
+ ] = dataclasses.field(
50
+ default=None, metadata={"schema_property_name": "logicalLocations"}
51
+ )
52
+ policies: Optional[
53
+ List[_external_property_file_reference.ExternalPropertyFileReference]
54
+ ] = dataclasses.field(default=None, metadata={"schema_property_name": "policies"})
55
+ properties: Optional[_property_bag.PropertyBag] = dataclasses.field(
56
+ default=None, metadata={"schema_property_name": "properties"}
57
+ )
58
+ results: Optional[
59
+ List[_external_property_file_reference.ExternalPropertyFileReference]
60
+ ] = dataclasses.field(default=None, metadata={"schema_property_name": "results"})
61
+ taxonomies: Optional[
62
+ List[_external_property_file_reference.ExternalPropertyFileReference]
63
+ ] = dataclasses.field(default=None, metadata={"schema_property_name": "taxonomies"})
64
+ thread_flow_locations: Optional[
65
+ List[_external_property_file_reference.ExternalPropertyFileReference]
66
+ ] = dataclasses.field(
67
+ default=None, metadata={"schema_property_name": "threadFlowLocations"}
68
+ )
69
+ translations: Optional[
70
+ List[_external_property_file_reference.ExternalPropertyFileReference]
71
+ ] = dataclasses.field(
72
+ default=None, metadata={"schema_property_name": "translations"}
73
+ )
74
+ web_requests: Optional[
75
+ List[_external_property_file_reference.ExternalPropertyFileReference]
76
+ ] = dataclasses.field(
77
+ default=None, metadata={"schema_property_name": "webRequests"}
78
+ )
79
+ web_responses: Optional[
80
+ List[_external_property_file_reference.ExternalPropertyFileReference]
81
+ ] = dataclasses.field(
82
+ default=None, metadata={"schema_property_name": "webResponses"}
83
+ )
84
+
85
+
86
+ # flake8: noqa
infer_4_47_1/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_graph_traversal.py ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # DO NOT EDIT! This file was generated by jschema_to_python version 0.0.1.dev29,
2
+ # with extension for dataclasses and type annotation.
3
+
4
+ from __future__ import annotations
5
+
6
+ import dataclasses
7
+ from typing import Any, List, Optional
8
+
9
+ from torch.onnx._internal.diagnostics.infra.sarif import (
10
+ _edge_traversal,
11
+ _message,
12
+ _property_bag,
13
+ )
14
+
15
+
16
+ @dataclasses.dataclass
17
+ class GraphTraversal(object):
18
+ """Represents a path through a graph."""
19
+
20
+ description: Optional[_message.Message] = dataclasses.field(
21
+ default=None, metadata={"schema_property_name": "description"}
22
+ )
23
+ edge_traversals: Optional[List[_edge_traversal.EdgeTraversal]] = dataclasses.field(
24
+ default=None, metadata={"schema_property_name": "edgeTraversals"}
25
+ )
26
+ immutable_state: Any = dataclasses.field(
27
+ default=None, metadata={"schema_property_name": "immutableState"}
28
+ )
29
+ initial_state: Any = dataclasses.field(
30
+ default=None, metadata={"schema_property_name": "initialState"}
31
+ )
32
+ properties: Optional[_property_bag.PropertyBag] = dataclasses.field(
33
+ default=None, metadata={"schema_property_name": "properties"}
34
+ )
35
+ result_graph_index: int = dataclasses.field(
36
+ default=-1, metadata={"schema_property_name": "resultGraphIndex"}
37
+ )
38
+ run_graph_index: int = dataclasses.field(
39
+ default=-1, metadata={"schema_property_name": "runGraphIndex"}
40
+ )
41
+
42
+
43
+ # flake8: noqa
infer_4_47_1/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_location.py ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # DO NOT EDIT! This file was generated by jschema_to_python version 0.0.1.dev29,
2
+ # with extension for dataclasses and type annotation.
3
+
4
+ from __future__ import annotations
5
+
6
+ import dataclasses
7
+ from typing import List, Optional
8
+
9
+ from torch.onnx._internal.diagnostics.infra.sarif import (
10
+ _location_relationship,
11
+ _logical_location,
12
+ _message,
13
+ _physical_location,
14
+ _property_bag,
15
+ _region,
16
+ )
17
+
18
+
19
+ @dataclasses.dataclass
20
+ class Location(object):
21
+ """A location within a programming artifact."""
22
+
23
+ annotations: Optional[List[_region.Region]] = dataclasses.field(
24
+ default=None, metadata={"schema_property_name": "annotations"}
25
+ )
26
+ id: int = dataclasses.field(default=-1, metadata={"schema_property_name": "id"})
27
+ logical_locations: Optional[List[_logical_location.LogicalLocation]] = (
28
+ dataclasses.field(
29
+ default=None, metadata={"schema_property_name": "logicalLocations"}
30
+ )
31
+ )
32
+ message: Optional[_message.Message] = dataclasses.field(
33
+ default=None, metadata={"schema_property_name": "message"}
34
+ )
35
+ physical_location: Optional[_physical_location.PhysicalLocation] = (
36
+ dataclasses.field(
37
+ default=None, metadata={"schema_property_name": "physicalLocation"}
38
+ )
39
+ )
40
+ properties: Optional[_property_bag.PropertyBag] = dataclasses.field(
41
+ default=None, metadata={"schema_property_name": "properties"}
42
+ )
43
+ relationships: Optional[List[_location_relationship.LocationRelationship]] = (
44
+ dataclasses.field(
45
+ default=None, metadata={"schema_property_name": "relationships"}
46
+ )
47
+ )
48
+
49
+
50
+ # flake8: noqa
infer_4_47_1/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_location_relationship.py ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # DO NOT EDIT! This file was generated by jschema_to_python version 0.0.1.dev29,
2
+ # with extension for dataclasses and type annotation.
3
+
4
+ from __future__ import annotations
5
+
6
+ import dataclasses
7
+ from typing import List, Optional
8
+
9
+ from torch.onnx._internal.diagnostics.infra.sarif import _message, _property_bag
10
+
11
+
12
+ @dataclasses.dataclass
13
+ class LocationRelationship(object):
14
+ """Information about the relation of one location to another."""
15
+
16
+ target: int = dataclasses.field(metadata={"schema_property_name": "target"})
17
+ description: Optional[_message.Message] = dataclasses.field(
18
+ default=None, metadata={"schema_property_name": "description"}
19
+ )
20
+ kinds: List[str] = dataclasses.field(
21
+ default_factory=lambda: ["relevant"], metadata={"schema_property_name": "kinds"}
22
+ )
23
+ properties: Optional[_property_bag.PropertyBag] = dataclasses.field(
24
+ default=None, metadata={"schema_property_name": "properties"}
25
+ )
26
+
27
+
28
+ # flake8: noqa
infer_4_47_1/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_logical_location.py ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # DO NOT EDIT! This file was generated by jschema_to_python version 0.0.1.dev29,
2
+ # with extension for dataclasses and type annotation.
3
+
4
+ from __future__ import annotations
5
+
6
+ import dataclasses
7
+ from typing import Optional
8
+
9
+ from torch.onnx._internal.diagnostics.infra.sarif import _property_bag
10
+
11
+
12
+ @dataclasses.dataclass
13
+ class LogicalLocation(object):
14
+ """A logical location of a construct that produced a result."""
15
+
16
+ decorated_name: Optional[str] = dataclasses.field(
17
+ default=None, metadata={"schema_property_name": "decoratedName"}
18
+ )
19
+ fully_qualified_name: Optional[str] = dataclasses.field(
20
+ default=None, metadata={"schema_property_name": "fullyQualifiedName"}
21
+ )
22
+ index: int = dataclasses.field(
23
+ default=-1, metadata={"schema_property_name": "index"}
24
+ )
25
+ kind: Optional[str] = dataclasses.field(
26
+ default=None, metadata={"schema_property_name": "kind"}
27
+ )
28
+ name: Optional[str] = dataclasses.field(
29
+ default=None, metadata={"schema_property_name": "name"}
30
+ )
31
+ parent_index: int = dataclasses.field(
32
+ default=-1, metadata={"schema_property_name": "parentIndex"}
33
+ )
34
+ properties: Optional[_property_bag.PropertyBag] = dataclasses.field(
35
+ default=None, metadata={"schema_property_name": "properties"}
36
+ )
37
+
38
+
39
+ # flake8: noqa
infer_4_47_1/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_message.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # DO NOT EDIT! This file was generated by jschema_to_python version 0.0.1.dev29,
2
+ # with extension for dataclasses and type annotation.
3
+
4
+ from __future__ import annotations
5
+
6
+ import dataclasses
7
+ from typing import List, Optional
8
+
9
+ from torch.onnx._internal.diagnostics.infra.sarif import _property_bag
10
+
11
+
12
+ @dataclasses.dataclass
13
+ class Message(object):
14
+ """Encapsulates a message intended to be read by the end user."""
15
+
16
+ arguments: Optional[List[str]] = dataclasses.field(
17
+ default=None, metadata={"schema_property_name": "arguments"}
18
+ )
19
+ id: Optional[str] = dataclasses.field(
20
+ default=None, metadata={"schema_property_name": "id"}
21
+ )
22
+ markdown: Optional[str] = dataclasses.field(
23
+ default=None, metadata={"schema_property_name": "markdown"}
24
+ )
25
+ properties: Optional[_property_bag.PropertyBag] = dataclasses.field(
26
+ default=None, metadata={"schema_property_name": "properties"}
27
+ )
28
+ text: Optional[str] = dataclasses.field(
29
+ default=None, metadata={"schema_property_name": "text"}
30
+ )
31
+
32
+
33
+ # flake8: noqa
infer_4_47_1/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_multiformat_message_string.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # DO NOT EDIT! This file was generated by jschema_to_python version 0.0.1.dev29,
2
+ # with extension for dataclasses and type annotation.
3
+
4
+ from __future__ import annotations
5
+
6
+ import dataclasses
7
+ from typing import Optional
8
+
9
+ from torch.onnx._internal.diagnostics.infra.sarif import _property_bag
10
+
11
+
12
+ @dataclasses.dataclass
13
+ class MultiformatMessageString(object):
14
+ """A message string or message format string rendered in multiple formats."""
15
+
16
+ text: str = dataclasses.field(metadata={"schema_property_name": "text"})
17
+ markdown: Optional[str] = dataclasses.field(
18
+ default=None, metadata={"schema_property_name": "markdown"}
19
+ )
20
+ properties: Optional[_property_bag.PropertyBag] = dataclasses.field(
21
+ default=None, metadata={"schema_property_name": "properties"}
22
+ )
23
+
24
+
25
+ # flake8: noqa
infer_4_47_1/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_physical_location.py ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # DO NOT EDIT! This file was generated by jschema_to_python version 0.0.1.dev29,
2
+ # with extension for dataclasses and type annotation.
3
+
4
+ from __future__ import annotations
5
+
6
+ import dataclasses
7
+ from typing import Optional
8
+
9
+ from torch.onnx._internal.diagnostics.infra.sarif import (
10
+ _address,
11
+ _artifact_location,
12
+ _property_bag,
13
+ _region,
14
+ )
15
+
16
+
17
+ @dataclasses.dataclass
18
+ class PhysicalLocation(object):
19
+ """A physical location relevant to a result. Specifies a reference to a programming artifact together with a range of bytes or characters within that artifact."""
20
+
21
+ address: Optional[_address.Address] = dataclasses.field(
22
+ default=None, metadata={"schema_property_name": "address"}
23
+ )
24
+ artifact_location: Optional[_artifact_location.ArtifactLocation] = (
25
+ dataclasses.field(
26
+ default=None, metadata={"schema_property_name": "artifactLocation"}
27
+ )
28
+ )
29
+ context_region: Optional[_region.Region] = dataclasses.field(
30
+ default=None, metadata={"schema_property_name": "contextRegion"}
31
+ )
32
+ properties: Optional[_property_bag.PropertyBag] = dataclasses.field(
33
+ default=None, metadata={"schema_property_name": "properties"}
34
+ )
35
+ region: Optional[_region.Region] = dataclasses.field(
36
+ default=None, metadata={"schema_property_name": "region"}
37
+ )
38
+
39
+
40
+ # flake8: noqa
infer_4_47_1/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_rectangle.py ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # DO NOT EDIT! This file was generated by jschema_to_python version 0.0.1.dev29,
2
+ # with extension for dataclasses and type annotation.
3
+
4
+ from __future__ import annotations
5
+
6
+ import dataclasses
7
+ from typing import Optional
8
+
9
+ from torch.onnx._internal.diagnostics.infra.sarif import _message, _property_bag
10
+
11
+
12
+ @dataclasses.dataclass
13
+ class Rectangle(object):
14
+ """An area within an image."""
15
+
16
+ bottom: Optional[float] = dataclasses.field(
17
+ default=None, metadata={"schema_property_name": "bottom"}
18
+ )
19
+ left: Optional[float] = dataclasses.field(
20
+ default=None, metadata={"schema_property_name": "left"}
21
+ )
22
+ message: Optional[_message.Message] = dataclasses.field(
23
+ default=None, metadata={"schema_property_name": "message"}
24
+ )
25
+ properties: Optional[_property_bag.PropertyBag] = dataclasses.field(
26
+ default=None, metadata={"schema_property_name": "properties"}
27
+ )
28
+ right: Optional[float] = dataclasses.field(
29
+ default=None, metadata={"schema_property_name": "right"}
30
+ )
31
+ top: Optional[float] = dataclasses.field(
32
+ default=None, metadata={"schema_property_name": "top"}
33
+ )
34
+
35
+
36
+ # flake8: noqa
infer_4_47_1/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_region.py ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # DO NOT EDIT! This file was generated by jschema_to_python version 0.0.1.dev29,
2
+ # with extension for dataclasses and type annotation.
3
+
4
+ from __future__ import annotations
5
+
6
+ import dataclasses
7
+ from typing import Optional
8
+
9
+ from torch.onnx._internal.diagnostics.infra.sarif import (
10
+ _artifact_content,
11
+ _message,
12
+ _property_bag,
13
+ )
14
+
15
+
16
+ @dataclasses.dataclass
17
+ class Region(object):
18
+ """A region within an artifact where a result was detected."""
19
+
20
+ byte_length: Optional[int] = dataclasses.field(
21
+ default=None, metadata={"schema_property_name": "byteLength"}
22
+ )
23
+ byte_offset: int = dataclasses.field(
24
+ default=-1, metadata={"schema_property_name": "byteOffset"}
25
+ )
26
+ char_length: Optional[int] = dataclasses.field(
27
+ default=None, metadata={"schema_property_name": "charLength"}
28
+ )
29
+ char_offset: int = dataclasses.field(
30
+ default=-1, metadata={"schema_property_name": "charOffset"}
31
+ )
32
+ end_column: Optional[int] = dataclasses.field(
33
+ default=None, metadata={"schema_property_name": "endColumn"}
34
+ )
35
+ end_line: Optional[int] = dataclasses.field(
36
+ default=None, metadata={"schema_property_name": "endLine"}
37
+ )
38
+ message: Optional[_message.Message] = dataclasses.field(
39
+ default=None, metadata={"schema_property_name": "message"}
40
+ )
41
+ properties: Optional[_property_bag.PropertyBag] = dataclasses.field(
42
+ default=None, metadata={"schema_property_name": "properties"}
43
+ )
44
+ snippet: Optional[_artifact_content.ArtifactContent] = dataclasses.field(
45
+ default=None, metadata={"schema_property_name": "snippet"}
46
+ )
47
+ source_language: Optional[str] = dataclasses.field(
48
+ default=None, metadata={"schema_property_name": "sourceLanguage"}
49
+ )
50
+ start_column: Optional[int] = dataclasses.field(
51
+ default=None, metadata={"schema_property_name": "startColumn"}
52
+ )
53
+ start_line: Optional[int] = dataclasses.field(
54
+ default=None, metadata={"schema_property_name": "startLine"}
55
+ )
56
+
57
+
58
+ # flake8: noqa
infer_4_47_1/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_replacement.py ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # DO NOT EDIT! This file was generated by jschema_to_python version 0.0.1.dev29,
2
+ # with extension for dataclasses and type annotation.
3
+
4
+ from __future__ import annotations
5
+
6
+ import dataclasses
7
+ from typing import Optional
8
+
9
+ from torch.onnx._internal.diagnostics.infra.sarif import (
10
+ _artifact_content,
11
+ _property_bag,
12
+ _region,
13
+ )
14
+
15
+
16
+ @dataclasses.dataclass
17
+ class Replacement(object):
18
+ """The replacement of a single region of an artifact."""
19
+
20
+ deleted_region: _region.Region = dataclasses.field(
21
+ metadata={"schema_property_name": "deletedRegion"}
22
+ )
23
+ inserted_content: Optional[_artifact_content.ArtifactContent] = dataclasses.field(
24
+ default=None, metadata={"schema_property_name": "insertedContent"}
25
+ )
26
+ properties: Optional[_property_bag.PropertyBag] = dataclasses.field(
27
+ default=None, metadata={"schema_property_name": "properties"}
28
+ )
29
+
30
+
31
+ # flake8: noqa
infer_4_47_1/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_reporting_configuration.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # DO NOT EDIT! This file was generated by jschema_to_python version 0.0.1.dev29,
2
+ # with extension for dataclasses and type annotation.
3
+
4
+ from __future__ import annotations
5
+
6
+ import dataclasses
7
+ from typing import Literal, Optional
8
+
9
+ from torch.onnx._internal.diagnostics.infra.sarif import _property_bag
10
+
11
+
12
+ @dataclasses.dataclass
13
+ class ReportingConfiguration(object):
14
+ """Information about a rule or notification that can be configured at runtime."""
15
+
16
+ enabled: bool = dataclasses.field(
17
+ default=True, metadata={"schema_property_name": "enabled"}
18
+ )
19
+ level: Literal["none", "note", "warning", "error"] = dataclasses.field(
20
+ default="warning", metadata={"schema_property_name": "level"}
21
+ )
22
+ parameters: Optional[_property_bag.PropertyBag] = dataclasses.field(
23
+ default=None, metadata={"schema_property_name": "parameters"}
24
+ )
25
+ properties: Optional[_property_bag.PropertyBag] = dataclasses.field(
26
+ default=None, metadata={"schema_property_name": "properties"}
27
+ )
28
+ rank: float = dataclasses.field(
29
+ default=-1.0, metadata={"schema_property_name": "rank"}
30
+ )
31
+
32
+
33
+ # flake8: noqa