ZTWHHH commited on
Commit
313b239
·
verified ·
1 Parent(s): fd6ec6c

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +6 -0
  2. pllava/lib/python3.10/site-packages/sympy/solvers/__pycache__/solveset.cpython-310.pyc +3 -0
  3. pllava/lib/python3.10/site-packages/sympy/solvers/ode/__pycache__/ode.cpython-310.pyc +3 -0
  4. pllava/lib/python3.10/site-packages/sympy/solvers/tests/__pycache__/test_solveset.cpython-310.pyc +3 -0
  5. pllava/lib/python3.10/site-packages/sympy/utilities/tests/__pycache__/test_wester.cpython-310.pyc +3 -0
  6. pllava/lib/python3.10/site-packages/torch/_decomp/__init__.py +484 -0
  7. pllava/lib/python3.10/site-packages/torch/_decomp/__pycache__/__init__.cpython-310.pyc +0 -0
  8. pllava/lib/python3.10/site-packages/torch/_decomp/__pycache__/decompositions_for_jvp.cpython-310.pyc +0 -0
  9. pllava/lib/python3.10/site-packages/torch/_decomp/__pycache__/decompositions_for_rng.cpython-310.pyc +0 -0
  10. pllava/lib/python3.10/site-packages/torch/_decomp/decompositions.py +0 -0
  11. pllava/lib/python3.10/site-packages/torch/_decomp/decompositions_for_jvp.py +335 -0
  12. pllava/lib/python3.10/site-packages/torch/_decomp/decompositions_for_rng.py +266 -0
  13. pllava/lib/python3.10/site-packages/torch/_dynamo/bytecode_transformation.py +1503 -0
  14. pllava/lib/python3.10/site-packages/torch/_dynamo/compiled_autograd.py +533 -0
  15. pllava/lib/python3.10/site-packages/torch/_dynamo/convert_frame.py +1277 -0
  16. pllava/lib/python3.10/site-packages/torch/_dynamo/debug_utils.py +824 -0
  17. pllava/lib/python3.10/site-packages/torch/_dynamo/mutation_guard.py +150 -0
  18. pllava/lib/python3.10/site-packages/torch/_dynamo/testing.py +409 -0
  19. pllava/lib/python3.10/site-packages/torch/bin/protoc +3 -0
  20. pllava/lib/python3.10/site-packages/torch/bin/protoc-3.13.0.0 +3 -0
  21. pllava/lib/python3.10/site-packages/torch/contrib/__init__.py +0 -0
  22. pllava/lib/python3.10/site-packages/torch/contrib/__pycache__/__init__.cpython-310.pyc +0 -0
  23. pllava/lib/python3.10/site-packages/torch/contrib/__pycache__/_tensorboard_vis.cpython-310.pyc +0 -0
  24. pllava/lib/python3.10/site-packages/torch/contrib/_tensorboard_vis.py +143 -0
  25. pllava/lib/python3.10/site-packages/torch/nested/__init__.py +465 -0
  26. pllava/lib/python3.10/site-packages/torch/nested/__pycache__/__init__.cpython-310.pyc +0 -0
  27. pllava/lib/python3.10/site-packages/torch/nested/_internal/__init__.py +0 -0
  28. pllava/lib/python3.10/site-packages/torch/nested/_internal/__pycache__/__init__.cpython-310.pyc +0 -0
  29. pllava/lib/python3.10/site-packages/torch/nested/_internal/__pycache__/nested_tensor.cpython-310.pyc +0 -0
  30. pllava/lib/python3.10/site-packages/torch/nested/_internal/__pycache__/ops.cpython-310.pyc +0 -0
  31. pllava/lib/python3.10/site-packages/torch/nested/_internal/__pycache__/sdpa.cpython-310.pyc +0 -0
  32. pllava/lib/python3.10/site-packages/torch/nested/_internal/nested_tensor.py +564 -0
  33. pllava/lib/python3.10/site-packages/torch/nested/_internal/ops.py +1675 -0
  34. pllava/lib/python3.10/site-packages/torch/nested/_internal/sdpa.py +871 -0
  35. pllava/lib/python3.10/site-packages/torch/optim/__pycache__/__init__.cpython-310.pyc +0 -0
  36. pllava/lib/python3.10/site-packages/torch/optim/__pycache__/_adafactor.cpython-310.pyc +0 -0
  37. pllava/lib/python3.10/site-packages/torch/optim/__pycache__/_functional.cpython-310.pyc +0 -0
  38. pllava/lib/python3.10/site-packages/torch/optim/__pycache__/adadelta.cpython-310.pyc +0 -0
  39. pllava/lib/python3.10/site-packages/torch/optim/__pycache__/adagrad.cpython-310.pyc +0 -0
  40. pllava/lib/python3.10/site-packages/torch/optim/__pycache__/lbfgs.cpython-310.pyc +0 -0
  41. pllava/lib/python3.10/site-packages/torch/optim/__pycache__/optimizer.cpython-310.pyc +0 -0
  42. pllava/lib/python3.10/site-packages/torch/optim/__pycache__/radam.cpython-310.pyc +0 -0
  43. pllava/lib/python3.10/site-packages/torch/optim/__pycache__/rprop.cpython-310.pyc +0 -0
  44. pllava/lib/python3.10/site-packages/torch/optim/__pycache__/sgd.cpython-310.pyc +0 -0
  45. pllava/lib/python3.10/site-packages/torch/optim/__pycache__/swa_utils.cpython-310.pyc +0 -0
  46. pllava/lib/python3.10/site-packages/torch/optim/_multi_tensor/__init__.py +30 -0
  47. pllava/lib/python3.10/site-packages/torch/optim/_multi_tensor/__pycache__/__init__.cpython-310.pyc +0 -0
  48. pllava/lib/python3.10/site-packages/torch/quantization/__init__.py +86 -0
  49. pllava/lib/python3.10/site-packages/torch/quantization/__pycache__/_numeric_suite_fx.cpython-310.pyc +0 -0
  50. pllava/lib/python3.10/site-packages/torch/quantization/__pycache__/fuse_modules.cpython-310.pyc +0 -0
.gitattributes CHANGED
@@ -301,3 +301,9 @@ pllava/lib/python3.10/site-packages/sympy/polys/__pycache__/polytools.cpython-31
301
  pllava/lib/python3.10/site-packages/sympy/polys/tests/__pycache__/test_polytools.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
302
  pllava/lib/python3.10/site-packages/torchvision.libs/libjpeg.ceea7512.so.62 filter=lfs diff=lfs merge=lfs -text
303
  pllava/lib/python3.10/site-packages/torchvision.libs/libz.5f199d92.so.1 filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
301
  pllava/lib/python3.10/site-packages/sympy/polys/tests/__pycache__/test_polytools.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
302
  pllava/lib/python3.10/site-packages/torchvision.libs/libjpeg.ceea7512.so.62 filter=lfs diff=lfs merge=lfs -text
303
  pllava/lib/python3.10/site-packages/torchvision.libs/libz.5f199d92.so.1 filter=lfs diff=lfs merge=lfs -text
304
+ pllava/lib/python3.10/site-packages/torch/bin/protoc-3.13.0.0 filter=lfs diff=lfs merge=lfs -text
305
+ pllava/lib/python3.10/site-packages/sympy/solvers/tests/__pycache__/test_solveset.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
306
+ pllava/lib/python3.10/site-packages/sympy/solvers/ode/__pycache__/ode.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
307
+ pllava/lib/python3.10/site-packages/torch/bin/protoc filter=lfs diff=lfs merge=lfs -text
308
+ pllava/lib/python3.10/site-packages/sympy/solvers/__pycache__/solveset.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
309
+ pllava/lib/python3.10/site-packages/sympy/utilities/tests/__pycache__/test_wester.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
pllava/lib/python3.10/site-packages/sympy/solvers/__pycache__/solveset.cpython-310.pyc ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a7cf9312cc2afcf4de2195697258e9ef91099ba7b943df93a72daa7d5cd03595
3
+ size 111974
pllava/lib/python3.10/site-packages/sympy/solvers/ode/__pycache__/ode.cpython-310.pyc ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6305c15d74945f19cbbcd72353dc4671ba99cd0078a558954517df22715db8c8
3
+ size 121439
pllava/lib/python3.10/site-packages/sympy/solvers/tests/__pycache__/test_solveset.cpython-310.pyc ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c5795a3fec8384b3c476a5488ca2875999793ea03c972e24109bbc859bd73950
3
+ size 137706
pllava/lib/python3.10/site-packages/sympy/utilities/tests/__pycache__/test_wester.cpython-310.pyc ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2c55a009039c579ec04b5a5d980a86f6a437a6d0fc6e4fbe3aae6414307957f3
3
+ size 113415
pllava/lib/python3.10/site-packages/torch/_decomp/__init__.py ADDED
@@ -0,0 +1,484 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ import inspect
3
+ from collections import defaultdict
4
+ from functools import wraps
5
+ from itertools import chain
6
+ from typing import Callable, Dict, List, Sequence, TypeVar, Union
7
+ from typing_extensions import ParamSpec
8
+
9
+ import torch
10
+ import torch.library
11
+ from torch._ops import HigherOrderOperator, OpOverload, OpOverloadPacket
12
+ from torch._prims_common import CustomOutParamAnnotation
13
+ from torch.utils import _pytree as pytree
14
+
15
+
16
+ __all__ = [
17
+ "decomposition_table",
18
+ "pre_autograd_decomposition_table",
19
+ "meta_table",
20
+ "register_decomposition",
21
+ "get_decompositions",
22
+ "core_aten_decompositions",
23
+ ]
24
+
25
+ _T = TypeVar("_T")
26
+ _P = ParamSpec("_P")
27
+
28
+ # TODO: relax key type here; torch registrations should be possible to; but
29
+ # right now this type is accurate
30
+ global_decomposition_table: Dict[
31
+ str, Dict[torch._ops.OperatorBase, Callable]
32
+ ] = defaultdict(dict)
33
+
34
+ decomposition_table = global_decomposition_table["post_autograd"]
35
+ pre_autograd_decomposition_table = global_decomposition_table["pre_autograd"]
36
+ meta_table = global_decomposition_table["meta"]
37
+
38
+
39
+ def _add_op_to_registry(registry, op, fn):
40
+ """
41
+ This is an internal API for adding an op to the decomposition table.
42
+
43
+ If op is OpOverload, it will be added to the registry directly.
44
+ If op is OpOverloadPacket, all the valid op_overloads in the packet will be added to the registry.
45
+ """
46
+ overloads: List[Union[torch._ops.OperatorBase]] = []
47
+ if isinstance(op, HigherOrderOperator):
48
+ # There's no concept of overloads for HigherOrderOperator
49
+ registry[op] = fn
50
+ return
51
+ elif isinstance(op, OpOverload):
52
+ overloads.append(op)
53
+ else:
54
+ assert isinstance(op, OpOverloadPacket)
55
+ for ol in op.overloads():
56
+ overloads.append(getattr(op, ol))
57
+
58
+ for op_overload in overloads:
59
+ if op_overload in registry:
60
+ raise RuntimeError(f"duplicate registrations for {op_overload}")
61
+ # TorchScript dumps a bunch of extra nonsense overloads
62
+ # which don't have corresponding dispatcher entries, we need
63
+ # to filter those out, e.g aten.add.float_int
64
+ if torch._C._dispatch_has_kernel(op_overload.name()):
65
+ registry[op_overload] = fn
66
+
67
+
68
+ def _convert_out_params(f):
69
+ out_annotation = f.__annotations__.get("out")
70
+
71
+ # If there are no out params, do not wrap the function.
72
+ if not out_annotation:
73
+ return f
74
+
75
+ # Hack to detect when out is a Tuple. There seems to be no pretty way of doing this
76
+ if getattr(out_annotation, "__origin__", None) is tuple:
77
+ sig = inspect.signature(f)
78
+ out_names = sig.return_annotation._fields
79
+ # If out is a tuple, we need to register a function that unpacks all the out
80
+ # elements as this is what native_functions.yaml expects
81
+
82
+ @wraps(f)
83
+ def _fn(*args, **kwargs):
84
+ out_kwargs = tuple(kwargs.pop(o, None) for o in out_names)
85
+ # Either all of the out kwargs are set or none of them
86
+ is_none = out_kwargs[0] is None
87
+ assert all((o is None) == is_none for o in out_kwargs)
88
+ return f(*args, **kwargs, out=None if is_none else out_kwargs)
89
+
90
+ out_params = [
91
+ inspect.Parameter(
92
+ o,
93
+ kind=inspect.Parameter.KEYWORD_ONLY,
94
+ default=None,
95
+ annotation=t,
96
+ )
97
+ for o, t in zip(out_names, out_annotation.__args__)
98
+ ]
99
+ # Drop the out parameter and concatenate the new kwargs in the signature
100
+ params = chain((v for k, v in sig.parameters.items() if k != "out"), out_params)
101
+ _fn.__signature__ = inspect.Signature( # type: ignore[attr-defined]
102
+ parameters=params, return_annotation=sig.return_annotation # type: ignore[arg-type]
103
+ )
104
+ # Drop the out parameter and concatenate the new kwargs in the annotations
105
+ _fn.__annotations__ = {k: v for k, v in f.__annotations__.items() if k != "out"}
106
+ for o in out_params:
107
+ _fn.__annotations__[o.name] = o.annotation
108
+
109
+ # Propagate that this function is wrapped by `out_wrapper`
110
+ _fn._torch_decompositions_out_wrapper = f._torch_decompositions_out_wrapper # type: ignore[attr-defined]
111
+
112
+ return _fn
113
+
114
+ # Alternatively, there may be a single tensor out parameter with a name
115
+ # other than "out". This will need special treatment and is indicated by an
116
+ # annotation, which we will remove here so it is not exposed after wrapping.
117
+ custom_out_param_name = f.__annotations__.pop(CustomOutParamAnnotation, None)
118
+ if custom_out_param_name:
119
+
120
+ @wraps(f)
121
+ def _fn(*args, **kwargs):
122
+ out_kwarg = kwargs.pop(custom_out_param_name, None)
123
+ return f(*args, **kwargs, out=out_kwarg)
124
+
125
+ out_param = inspect.Parameter(
126
+ custom_out_param_name,
127
+ kind=inspect.Parameter.KEYWORD_ONLY,
128
+ default=None,
129
+ annotation=out_annotation,
130
+ )
131
+
132
+ # Drop the out parameter and concatenate the new kwarg in the signature
133
+ sig = inspect.signature(f)
134
+ params = chain(
135
+ (v for k, v in sig.parameters.items() if k != "out"), (out_param,)
136
+ )
137
+ _fn.__signature__ = inspect.Signature( # type: ignore[attr-defined]
138
+ parameters=params, return_annotation=sig.return_annotation # type: ignore[arg-type]
139
+ )
140
+
141
+ # Drop the out parameter and concatenate the new kwargs in the annotations
142
+ _fn.__annotations__ = {k: v for k, v in f.__annotations__.items() if k != "out"}
143
+ _fn.__annotations__[out_param.name] = out_param.annotation
144
+
145
+ return _fn
146
+
147
+ return f
148
+
149
+
150
+ def register_decomposition(
151
+ aten_op, registry=None, *, type="post_autograd", unsafe=False
152
+ ) -> Callable[[Callable[_P, _T]], Callable[_P, _T]]:
153
+ """
154
+ A decorator to register a function as a decomposition to the Python
155
+ decomposition table. Use it like this::
156
+
157
+ @register_decomposition(torch.ops.aten.clamp_min)
158
+ def clamp_min(x):
159
+ return torch.clamp(self, min=min)
160
+
161
+ If you are writing a new decomposition, consider contributing it
162
+ directly to PyTorch in torch._decomp.decompositions.
163
+
164
+ This API is experimental; we are almost certainly going to extend
165
+ the API when we make decompositions eligible for use in transforms (e.g.,
166
+ autograd) and not just backend tracing, where we then need to know if a
167
+ decomposition can be used to simulate a transform.
168
+
169
+ By default, we also will register it to the Meta key of dispatcher,
170
+ and replace the c++ Meta implementation if there is already one.
171
+
172
+ unsafe kwarg is for reuse of this function for registering non-function
173
+ things
174
+ """
175
+
176
+ assert type in {"post_autograd", "pre_autograd", "meta"}
177
+
178
+ def decomposition_decorator(fn: Callable[_P, _T]) -> Callable[_P, _T]:
179
+ orig_fn = fn
180
+ if not unsafe:
181
+ fn = _convert_out_params(fn)
182
+
183
+ nonlocal registry
184
+ if registry is None:
185
+ registry = global_decomposition_table[type]
186
+
187
+ def register(op):
188
+ _add_op_to_registry(registry, op, fn)
189
+
190
+ # To handle allowing multiple aten_ops at once
191
+ pytree.tree_map_(register, aten_op)
192
+ return orig_fn
193
+
194
+ return decomposition_decorator
195
+
196
+
197
+ def get_decompositions(
198
+ aten_ops: Sequence[Union[torch._ops.OperatorBase, OpOverloadPacket]],
199
+ type: str = "post_autograd",
200
+ ) -> Dict[torch._ops.OperatorBase, Callable]:
201
+ """
202
+ Retrieve a dictionary of decompositions corresponding to the list of
203
+ operator overloads and overload packets passed as input. Overload
204
+ packets will include all decomposed overloads in the packet. If there is
205
+ no decomposition for a requested operator, it is silently ignored.
206
+
207
+ This API is experimental; we are almost certainly going to give an alternate,
208
+ more recommended formulation, where a user provides the set of operators
209
+ they know how to implement, and we provide decompositions for everything
210
+ not in this set.
211
+ """
212
+ assert type in {"post_autograd", "pre_autograd", "meta"}
213
+
214
+ registry = global_decomposition_table[type]
215
+ packets_to_overloads = defaultdict(list)
216
+ for opo in registry:
217
+ if isinstance(opo, (OpOverload, OpOverloadPacket)):
218
+ packets_to_overloads[opo.overloadpacket].append(opo)
219
+ decompositions: Dict[torch._ops.OperatorBase, Callable] = {}
220
+ for op in aten_ops:
221
+ if isinstance(op, OpOverloadPacket) and op in packets_to_overloads:
222
+ for op_overload in packets_to_overloads[op]:
223
+ decompositions[op_overload] = registry[op_overload]
224
+ elif isinstance(op, (torch._ops.OperatorBase)) and op in registry:
225
+ decompositions[op] = registry[op]
226
+ return decompositions
227
+
228
+
229
+ def remove_decompositions(
230
+ decompositions: Dict[torch._ops.OperatorBase, Callable],
231
+ aten_ops: Sequence[Union[OpOverload, OpOverloadPacket]],
232
+ ) -> None:
233
+ """
234
+ Given a dictionary of decompositions obtained from get_decompositions(), removes
235
+ operators associated with a list of operator overloads and overload packets passed
236
+ as input. If the decomposition dictionary does not contain a decomposition that is
237
+ specified to be removed, it is silently ignored.
238
+ """
239
+ for op in aten_ops:
240
+ if isinstance(op, OpOverloadPacket):
241
+ for overload_name in op.overloads():
242
+ opo = getattr(op, overload_name)
243
+ decompositions.pop(opo, None)
244
+ elif isinstance(op, OpOverload):
245
+ decompositions.pop(op, None)
246
+
247
+
248
+ # populate the table
249
+ import torch._decomp.decompositions
250
+ import torch._refs
251
+
252
+
253
+ # See NOTE [Core ATen Ops]
254
+ #
255
+ # list was copied from torch/_inductor/decomposition.py
256
+ # excluding decompositions that results in prim ops
257
+ # Resulting opset of decomposition is core aten ops
258
+ def core_aten_decompositions() -> Dict[torch._ops.OperatorBase, Callable]:
259
+ aten = torch.ops.aten
260
+ return get_decompositions(
261
+ [
262
+ aten.addcdiv,
263
+ aten.addcdiv_,
264
+ aten.addcmul,
265
+ aten.addcmul_,
266
+ aten.addr,
267
+ aten.affine_grid_generator,
268
+ aten.alias_copy,
269
+ aten.all,
270
+ aten.aminmax,
271
+ aten.arange.default,
272
+ aten.arange.start,
273
+ aten.avg_pool2d_backward,
274
+ aten.baddbmm,
275
+ aten.binary_cross_entropy,
276
+ aten.binary_cross_entropy_backward,
277
+ aten.binary_cross_entropy_with_logits,
278
+ aten.block_diag,
279
+ aten.celu,
280
+ aten.celu_,
281
+ aten.channel_shuffle,
282
+ aten.clamp_max,
283
+ aten.clamp_min,
284
+ aten.col2im,
285
+ aten.count_nonzero,
286
+ aten.linalg_cross,
287
+ aten.cudnn_batch_norm,
288
+ aten.cudnn_batch_norm_backward,
289
+ aten.miopen_batch_norm_backward,
290
+ aten.deg2rad,
291
+ aten.deg2rad_,
292
+ aten.detach,
293
+ aten.diag_embed,
294
+ aten.diagonal_backward,
295
+ aten.dot,
296
+ aten.vdot,
297
+ aten.elu,
298
+ aten.elu_,
299
+ aten.elu_backward,
300
+ aten._embedding_bag,
301
+ aten.embedding_dense_backward,
302
+ aten.empty_like,
303
+ aten._euclidean_dist.default,
304
+ aten.expand_as,
305
+ aten.expand_copy,
306
+ aten.eye,
307
+ aten.fill,
308
+ aten.fill_,
309
+ aten.floor_divide,
310
+ aten.frac,
311
+ aten.frac_,
312
+ aten._fused_moving_avg_obs_fq_helper,
313
+ aten.gelu_,
314
+ aten.gelu_backward,
315
+ aten.glu,
316
+ aten.glu_backward,
317
+ aten.hardshrink,
318
+ aten.hardsigmoid,
319
+ aten.hardsigmoid_,
320
+ aten.hardsigmoid_backward,
321
+ aten.hardswish,
322
+ aten.hardswish_,
323
+ aten.hardswish_backward,
324
+ aten.hardtanh_,
325
+ aten.hardtanh_backward,
326
+ aten.heaviside,
327
+ aten.heaviside_,
328
+ aten.huber_loss,
329
+ aten.huber_loss_backward,
330
+ aten.im2col,
331
+ aten.index_add,
332
+ aten.index_add_,
333
+ aten.index_copy,
334
+ aten.index_copy_,
335
+ aten.index_fill,
336
+ aten.index_fill_,
337
+ aten.isin,
338
+ aten.isneginf,
339
+ aten.isposinf,
340
+ aten.l1_loss,
341
+ aten._lazy_clone,
342
+ aten._test_parallel_materialize,
343
+ aten.leaky_relu_,
344
+ aten.leaky_relu_backward,
345
+ aten.lerp,
346
+ aten.lerp_,
347
+ aten.linspace,
348
+ aten.logaddexp,
349
+ aten.logaddexp2,
350
+ aten.logit,
351
+ aten.logit_,
352
+ aten.logit_backward,
353
+ aten.log_sigmoid_backward,
354
+ aten.log_sigmoid_forward,
355
+ aten._log_softmax_backward_data,
356
+ aten.logspace,
357
+ aten.logsumexp.default,
358
+ aten.masked_fill,
359
+ aten.masked_fill_,
360
+ aten.mish,
361
+ aten.mish_,
362
+ aten.mse_loss,
363
+ aten.mse_loss_backward,
364
+ aten.multi_margin_loss,
365
+ aten.multilabel_margin_loss_forward,
366
+ aten.mv,
367
+ aten.mvlgamma,
368
+ aten.mvlgamma_,
369
+ aten.nansum,
370
+ aten.nan_to_num,
371
+ aten.nan_to_num_,
372
+ aten.narrow,
373
+ aten.native_batch_norm_backward,
374
+ aten.native_dropout_backward,
375
+ aten.native_group_norm_backward,
376
+ aten.native_layer_norm_backward,
377
+ aten.new_empty,
378
+ aten.new_full,
379
+ aten.new_ones,
380
+ aten.new_zeros,
381
+ aten.nll_loss2d_forward,
382
+ aten.nll_loss2d_backward,
383
+ aten.nll_loss_backward,
384
+ aten.nll_loss_forward,
385
+ aten.norm,
386
+ aten.ones,
387
+ aten.ones_like,
388
+ aten.pixel_shuffle,
389
+ aten.pixel_unshuffle,
390
+ aten._prelu_kernel,
391
+ aten._prelu_kernel_backward,
392
+ aten._reshape_alias,
393
+ aten.rad2deg,
394
+ aten.rad2deg_,
395
+ aten.reflection_pad1d,
396
+ aten.reflection_pad1d_backward,
397
+ aten.reflection_pad2d,
398
+ aten.reflection_pad2d_backward,
399
+ aten.reflection_pad3d,
400
+ aten.reflection_pad3d_backward,
401
+ aten.replication_pad1d,
402
+ aten.replication_pad2d,
403
+ aten.replication_pad3d,
404
+ aten.renorm,
405
+ aten.renorm_,
406
+ aten.replication_pad2d,
407
+ aten.resize_as,
408
+ aten.roll,
409
+ aten.rot90,
410
+ aten.rrelu_with_noise,
411
+ aten.rrelu_with_noise_,
412
+ aten.rsub,
413
+ aten._safe_softmax,
414
+ aten._scaled_dot_product_flash_attention_for_cpu.default,
415
+ aten.select_backward,
416
+ aten.select_scatter,
417
+ aten.sgn,
418
+ aten.sgn_,
419
+ aten.sigmoid_backward,
420
+ aten.silu,
421
+ aten.silu_,
422
+ aten.silu_backward,
423
+ aten.sinc,
424
+ aten.sinc_,
425
+ aten.slice_backward,
426
+ aten.smooth_l1_loss,
427
+ aten.smooth_l1_loss_backward,
428
+ aten.soft_margin_loss,
429
+ aten.soft_margin_loss_backward,
430
+ aten._softmax_backward_data,
431
+ aten.softplus,
432
+ aten.softplus_backward,
433
+ aten.softshrink,
434
+ aten.special_entr,
435
+ aten.special_log_ndtr,
436
+ aten.special_xlog1py,
437
+ aten.split.Tensor,
438
+ aten.split_with_sizes_copy,
439
+ aten.squeeze.default,
440
+ aten.squeeze.dim,
441
+ aten.std,
442
+ aten.std_mean,
443
+ aten.stack,
444
+ aten.sum.default,
445
+ aten.sum.out,
446
+ aten.t,
447
+ aten.t_copy,
448
+ aten.take,
449
+ aten.tanh_backward,
450
+ aten.threshold,
451
+ aten.threshold_,
452
+ aten.threshold_backward,
453
+ aten.trace,
454
+ aten.transpose.int,
455
+ aten.tril,
456
+ aten.tril_,
457
+ aten.triu,
458
+ aten.triu_,
459
+ aten.unbind,
460
+ aten.unfold_backward,
461
+ aten.unfold_copy,
462
+ aten._unsafe_index,
463
+ aten._unsafe_index_put,
464
+ aten._unsafe_masked_index,
465
+ aten._unsafe_masked_index_put_accumulate,
466
+ aten.unsafe_split.Tensor,
467
+ aten.unsafe_split_with_sizes,
468
+ aten.unsqueeze_copy,
469
+ aten._unsafe_view,
470
+ aten.upsample_linear1d,
471
+ aten.upsample_bilinear2d,
472
+ aten.upsample_trilinear3d,
473
+ aten.upsample_nearest2d_backward,
474
+ aten.view_as_complex,
475
+ aten.xlogy,
476
+ aten.xlogy_,
477
+ aten.zero,
478
+ aten.zero_,
479
+ aten.zeros,
480
+ aten.zeros_like,
481
+ aten._chunk_cat,
482
+ aten._weight_norm_interface,
483
+ ]
484
+ )
pllava/lib/python3.10/site-packages/torch/_decomp/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (13.4 kB). View file
 
pllava/lib/python3.10/site-packages/torch/_decomp/__pycache__/decompositions_for_jvp.cpython-310.pyc ADDED
Binary file (6.73 kB). View file
 
pllava/lib/python3.10/site-packages/torch/_decomp/__pycache__/decompositions_for_rng.cpython-310.pyc ADDED
Binary file (8.02 kB). View file
 
pllava/lib/python3.10/site-packages/torch/_decomp/decompositions.py ADDED
The diff for this file is too large to render. See raw diff
 
pllava/lib/python3.10/site-packages/torch/_decomp/decompositions_for_jvp.py ADDED
@@ -0,0 +1,335 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-decorators
2
+ # mypy: allow-untyped-defs
3
+ import inspect
4
+ from typing import Callable, Dict, List, Optional, Tuple
5
+
6
+ import torch
7
+ import torch._decomp
8
+ from torch import Tensor
9
+ from torch._prims_common.wrappers import _maybe_remove_out_wrapper
10
+
11
+
12
+ decomposition_table = torch._decomp.decomposition_table
13
+ decomposition_table_for_jvp: Dict[torch._ops.OperatorBase, Callable] = {}
14
+ register_decomposition = torch._decomp.register_decomposition
15
+ aten = torch.ops.aten
16
+
17
+ # NOTE: [forward-mode AD decompositions mechanism]
18
+ #
19
+ # The mechanism is in VariableType,
20
+ # IF any inputs have forward grad
21
+ # AND there is no forward AD formula implemented
22
+ # AND the functions is actually differentiable
23
+ # run the decomposition
24
+ # See run_jit_decomposition_with_args_for_jvp
25
+ # We currently use python decompositions that we torchscript.
26
+ #
27
+ # Note that we would be building the backward graph at the decomposed level
28
+ # too, but that is OK, because we would've errored out otherwise anyway.
29
+ #
30
+ # TODO: The mechanism we are using to register decompositions doesn't
31
+ # seem to be exclusively used for jvp. So open question here is whether
32
+ # torch/csrc/jit/runtime/decomposition_registry.cpp is being used for other things.
33
+ # If that is the case, we may go down the decomposition path unexpectedly
34
+ # (and possibly produce an unintelligible error) vs erroring out earlier and
35
+ # printing that the forward AD formula is not implemented.
36
+ #
37
+ # The solution to this may be to have a explicitly white list control when
38
+ # to enable the decomposition.
39
+
40
+
41
+ def maybe_register_decomposition(op):
42
+ def decorator(f):
43
+ try:
44
+ return register_decomposition(op)(f)
45
+ except Exception:
46
+ return f
47
+
48
+ return decorator
49
+
50
+
51
+ # Functions where we need a special decomposition for jvp but there's another version that
52
+ # should be used more generally (ex. for jvp we need to recompute the mean and variance for
53
+ # the backwards of a normalization function. Without jvp, it should use the saved value)
54
+ decomposition_table_for_jvp = {}
55
+
56
+
57
+ def register_decomposition_for_jvp(fn):
58
+ return register_decomposition(fn, registry=decomposition_table_for_jvp)
59
+
60
+
61
+ def _register_jit_decomposition_for_jvp(decomp, use_python=False):
62
+ if decomp in decomposition_table_for_jvp:
63
+ decomposition_table_used = decomposition_table_for_jvp
64
+ elif decomp in decomposition_table:
65
+ decomposition_table_used = decomposition_table
66
+ else:
67
+ raise RuntimeError(f"could not find decomposition for {decomp}")
68
+ decomp_fn = decomposition_table_used[decomp]
69
+
70
+ # `out_wrapper` extends a decompositions signature with
71
+ # an `out` parameter. However jit will use the unwrapped function's
72
+ # signature instead so we need to unwrap here to prevent an error
73
+ decomp_fn = _maybe_remove_out_wrapper(decomp_fn)
74
+
75
+ if use_python:
76
+ decomp_fn = torch.jit.ignore(decomp_fn)
77
+ sig = inspect.signature(decomp_fn)
78
+
79
+ # Create a string wrapping the function from the signature
80
+ # example output:
81
+ # def wrapped_decomp(x: torch.Tensor, y: int, z: int):
82
+ # return decomp_fn(x, y, z)
83
+ # Thanks copilot!
84
+ def get_function_def(sig):
85
+ param_def = [f"{param_str}" for param_str in sig.parameters.values()]
86
+ param_use = [f"{param_str}" for param_str in sig.parameters.keys()]
87
+
88
+ return f"def wrapped_decomp({', '.join(param_def)}):\n return decomp_fn({', '.join(param_use)})\n"
89
+
90
+ f_str = get_function_def(sig)
91
+ graph = torch.jit.CompilationUnit(f_str).wrapped_decomp.graph
92
+ else:
93
+ graph = torch.jit.script(decomp_fn).graph
94
+ torch.jit._register_decomposition(decomp, graph)
95
+
96
+
97
+ # The only decompositions here are temporary or hacks for the purposes of jvp
98
+
99
+
100
+ # TODO: do these also belong here?
101
+ @maybe_register_decomposition(aten.trace.default)
102
+ def trace(self: Tensor) -> Tensor:
103
+ return torch.sum(torch.diag(self))
104
+
105
+
106
+ @maybe_register_decomposition(aten.log_sigmoid_forward.default)
107
+ def log_sigmoid_forward(self: Tensor) -> Tuple[Tensor, Tensor]:
108
+ min = torch.minimum(self.new_zeros(()), self)
109
+ z = torch.exp(-torch.abs(self))
110
+ if self.is_cuda:
111
+ buffer = self.new_zeros((0,))
112
+ else:
113
+ buffer = z
114
+ return min - torch.log1p(z), buffer
115
+
116
+
117
+ def recompute_mean_var(
118
+ input: Tensor, rstd: Tensor, inner_dim_indices: List[int], keepdim: bool
119
+ ):
120
+ # for most norm decompositions, it will be the same as the core version except for here.
121
+ # We recompute the mean and variance so that they track gradients through input
122
+
123
+ mean = torch.mean(input, dim=inner_dim_indices, keepdim=keepdim)
124
+ var = torch.var(input, dim=inner_dim_indices, unbiased=False, keepdim=keepdim)
125
+ eps = torch.pow(1 / rstd, 2) - var # this makes me so sad inside
126
+ eps = eps.detach()
127
+ rstd = 1 / torch.sqrt(var + eps)
128
+ return mean, rstd
129
+
130
+
131
+ @register_decomposition_for_jvp(aten.native_layer_norm_backward)
132
+ def native_layer_norm_backward(
133
+ grad_out: Tensor,
134
+ input: Tensor,
135
+ normalized_shape: List[int],
136
+ mean: Tensor,
137
+ rstd: Tensor,
138
+ weight: Optional[Tensor],
139
+ bias: Optional[Tensor],
140
+ output_mask: List[bool],
141
+ ) -> Tuple[Optional[Tensor], Optional[Tensor], Optional[Tensor]]:
142
+ input_shape = input.shape
143
+ input_ndim = input.dim()
144
+
145
+ axis = input_ndim - len(normalized_shape)
146
+ inner_dims = input_shape[axis:]
147
+ outer_dims = input_shape[:axis]
148
+ inner_dim_indices = list(range(axis, input_ndim))
149
+ outer_dim_indices = list(range(0, axis))
150
+
151
+ N = 1
152
+ for i in inner_dims:
153
+ N *= i
154
+ M = 1
155
+ for i in outer_dims:
156
+ M *= i
157
+ if M <= 0 or N <= 0:
158
+ return (
159
+ input.new_zeros(input_shape),
160
+ input.new_zeros(input_shape[axis:]),
161
+ input.new_zeros(input_shape[axis:]),
162
+ )
163
+
164
+ mean_, rstd_ = recompute_mean_var(input, rstd, inner_dim_indices, keepdim=True)
165
+
166
+ x_hat = (input - mean_) * rstd_
167
+ if weight is not None:
168
+ grad_x_hat = grad_out * weight
169
+ else:
170
+ grad_x_hat = grad_out
171
+ a = grad_x_hat * N
172
+ b = torch.sum(grad_x_hat, inner_dim_indices, True)
173
+ c1 = torch.mul(grad_x_hat, x_hat)
174
+ c2 = torch.sum(c1, inner_dim_indices, True)
175
+ c3 = torch.mul(x_hat, c2)
176
+ inner = a - b - c3
177
+
178
+ if output_mask[0]:
179
+ d_input: Optional[Tensor] = (rstd_ / N) * inner
180
+ else:
181
+ d_input = torch.zeros_like(input) # should be None but doesn't work with vjp
182
+
183
+ if output_mask[1] and weight is not None:
184
+ if len(outer_dim_indices) > 0:
185
+ d_weight: Optional[Tensor] = torch.sum(
186
+ grad_out * x_hat, outer_dim_indices, False
187
+ )
188
+ else:
189
+ d_weight = grad_out * x_hat
190
+ elif weight is not None:
191
+ d_weight = torch.zeros_like(weight) # should be None but doesn't work with vjp
192
+ else:
193
+ d_weight = torch.zeros(()) # should be None but doesn't work with vjp
194
+
195
+ if output_mask[2] and bias is not None:
196
+ if len(outer_dim_indices) > 0:
197
+ d_bias: Optional[Tensor] = torch.sum(grad_out, outer_dim_indices, False)
198
+ else:
199
+ d_bias = grad_out.clone()
200
+ elif bias is not None:
201
+ d_bias = torch.zeros_like(bias) # should be None but doesn't work with vjp
202
+ else:
203
+ d_bias = torch.zeros(()) # should be None but doesn't work with vjp
204
+
205
+ return (d_input, d_weight, d_bias)
206
+
207
+
208
+ def prod(x: List[int]):
209
+ r = 1
210
+ for i in x:
211
+ r *= i
212
+ return r
213
+
214
+
215
+ @register_decomposition_for_jvp(aten.native_batch_norm_backward)
216
+ def native_batch_norm_backward(
217
+ grad_out: Tensor,
218
+ input: Tensor,
219
+ weight: Optional[Tensor],
220
+ running_mean: Optional[Tensor],
221
+ running_var: Optional[Tensor],
222
+ save_mean: Optional[Tensor],
223
+ save_invstd: Optional[Tensor],
224
+ train: bool,
225
+ eps: float,
226
+ output_mask: List[bool],
227
+ ) -> Tuple[Tensor, Optional[Tensor], Optional[Tensor]]:
228
+ input_shape = input.shape
229
+ input_rank = input.dim()
230
+ assert input_rank >= 2, "rank of the input must be at least 2"
231
+
232
+ axis = 1
233
+ num_features = prod(input_shape) / input_shape[axis] # type: ignore[arg-type]
234
+ mean = save_mean
235
+ invstd = save_invstd
236
+ if train:
237
+ assert (
238
+ save_mean is not None and save_invstd is not None
239
+ ), "when train=True, save_mean and save_invstd are required"
240
+
241
+ reduciton_dims = [0] + list(range(2, input.dim()))
242
+ assert invstd is not None # for typing
243
+ mean, invstd = recompute_mean_var(input, invstd, reduciton_dims, keepdim=False)
244
+ else:
245
+ assert running_mean is not None and running_var is not None
246
+ mean = running_mean
247
+ invstd = torch.rsqrt(running_var + eps)
248
+
249
+ assert invstd is not None and mean is not None
250
+
251
+ broadcast_mask = [1] * input_rank
252
+ broadcast_mask[axis] = input_shape[axis]
253
+
254
+ reduction_axes: List[int] = []
255
+ for i in range(input_rank):
256
+ if i != axis:
257
+ reduction_axes.append(i)
258
+
259
+ mean = torch.reshape(mean, broadcast_mask)
260
+ norm = 1.0 / num_features
261
+ grad_output_sum = torch.sum(grad_out, reduction_axes)
262
+ dot_p = torch.sum(grad_out * (input - mean), reduction_axes)
263
+
264
+ grad_mean = torch.reshape(grad_output_sum * norm, broadcast_mask)
265
+ proj_scale = torch.reshape(torch.mul(dot_p * norm, invstd * invstd), broadcast_mask)
266
+
267
+ if weight is None:
268
+ grad_scale = torch.reshape(invstd, broadcast_mask) * 1.0
269
+ else:
270
+ grad_scale = torch.reshape(invstd * weight, broadcast_mask)
271
+
272
+ if train:
273
+ proj = (input - mean) * proj_scale
274
+ grad_input = ((grad_out - proj) - grad_mean) * grad_scale
275
+ else:
276
+ grad_input = grad_out * grad_scale
277
+
278
+ if output_mask[1]:
279
+ grad_weight = dot_p * invstd
280
+ elif weight is not None:
281
+ grad_weight = torch.zeros_like(
282
+ weight
283
+ ) # should be None but doesn't work with vjp
284
+ else:
285
+ grad_weight = torch.zeros(()) # should be None but doesn't work with vjp
286
+
287
+ if output_mask[2]:
288
+ grad_bias = grad_output_sum
289
+ else:
290
+ grad_bias = torch.zeros_like(
291
+ grad_output_sum
292
+ ) # should be None but doesn't work with vjp
293
+
294
+ return (grad_input, grad_weight, grad_bias)
295
+
296
+
297
+ @register_decomposition_for_jvp(aten.batch_norm_backward)
298
+ def batch_norm_backward(
299
+ grad_out: Tensor,
300
+ input: Tensor,
301
+ weight: Tensor,
302
+ running_mean: Optional[Tensor],
303
+ running_var: Optional[Tensor],
304
+ save_mean: Optional[Tensor],
305
+ save_var: Optional[Tensor],
306
+ update: bool,
307
+ eps: float,
308
+ output_mask: List[bool],
309
+ reserve: Tensor,
310
+ ) -> Tuple[Tensor, Optional[Tensor], Optional[Tensor]]:
311
+ return native_batch_norm_backward(
312
+ grad_out,
313
+ input,
314
+ weight,
315
+ running_mean,
316
+ running_var,
317
+ save_mean,
318
+ save_var,
319
+ update,
320
+ eps,
321
+ output_mask,
322
+ )
323
+
324
+
325
+ _register_jit_decomposition_for_jvp(torch.ops.aten.trace.default, use_python=True)
326
+ _register_jit_decomposition_for_jvp(torch.ops.aten.nll_loss_backward.default)
327
+ _register_jit_decomposition_for_jvp(torch.ops.aten.nll_loss2d_backward.default)
328
+ _register_jit_decomposition_for_jvp(torch.ops.aten._log_softmax_backward_data.default)
329
+ _register_jit_decomposition_for_jvp(torch.ops.aten._softmax_backward_data.default)
330
+ _register_jit_decomposition_for_jvp(torch.ops.aten.log_sigmoid_forward.default)
331
+ _register_jit_decomposition_for_jvp(torch.ops.aten.native_layer_norm_backward.default)
332
+ _register_jit_decomposition_for_jvp(torch.ops.aten.native_batch_norm_backward.default)
333
+ _register_jit_decomposition_for_jvp(torch.ops.aten.cudnn_batch_norm_backward.default)
334
+ _register_jit_decomposition_for_jvp(torch.ops.aten.batch_norm_backward.default)
335
+ _register_jit_decomposition_for_jvp(torch.ops.aten.miopen_batch_norm_backward.default)
pllava/lib/python3.10/site-packages/torch/_decomp/decompositions_for_rng.py ADDED
@@ -0,0 +1,266 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-decorators
2
+ # mypy: allow-untyped-defs
3
+ import functools
4
+ from collections import defaultdict
5
+ from typing import Callable, Dict
6
+
7
+ import torch
8
+ import torch._decomp as decomp
9
+ from torch._decomp import get_decompositions
10
+ from torch._ops import OpOverload
11
+
12
+
13
+ aten = torch.ops.aten
14
+
15
+ rng_decompositions: Dict[str, Dict[OpOverload, Callable]] = defaultdict(dict)
16
+
17
+
18
+ def register_rng_decomposition(aten_op):
19
+ return decomp.register_decomposition(aten_op, rng_decompositions)
20
+
21
+
22
+ def throw_on_non_cuda(device):
23
+ raise RuntimeError(
24
+ f"You are trying to functionalize a {device.type} RNG operator but {device.type} does not "
25
+ f"use Philox/counter-based RNG. Therefore, functionalizing a {device.type} RNG operator is "
26
+ "not supported. We are discussing the possibility of a Philox-based RNG implementation for CPU."
27
+ )
28
+
29
+
30
+ # TODO - We have to register many more distributions here, and also higher level
31
+ # ops like dropout which have fused implementation and can hide the rand inside.
32
+ @register_rng_decomposition(aten.rand)
33
+ def rand(shape, dtype=None, layout=torch.strided, device=None, pin_memory=False):
34
+ if device and device.type != "cuda":
35
+ throw_on_non_cuda(device)
36
+ seed, offset = PhiloxStateTracker.get_state_as_tuple()
37
+ dtype = dtype or torch.float32
38
+ out, offset_jump = torch.ops.rngprims.philox_rand(
39
+ shape, seed, offset, None, device, dtype
40
+ )
41
+ PhiloxStateTracker.advance_offset(offset_jump)
42
+ return out
43
+
44
+
45
+ @register_rng_decomposition(aten.rand_like)
46
+ def rand_like(
47
+ x: torch.Tensor,
48
+ dtype=None,
49
+ layout=None,
50
+ device=None,
51
+ pin_memory=False,
52
+ memory_format=torch.preserve_format,
53
+ ):
54
+ device = device or x.device
55
+ if device.type != "cuda":
56
+ throw_on_non_cuda(device)
57
+ dtype = dtype or x.dtype
58
+ seed, offset = PhiloxStateTracker.get_state_as_tuple()
59
+ out, offset_jump = torch.ops.rngprims.philox_rand(
60
+ x.shape, seed, offset, None, device, dtype
61
+ )
62
+ PhiloxStateTracker.advance_offset(offset_jump)
63
+ return out
64
+
65
+
66
+ class PhiloxState:
67
+ """
68
+ Represents a PhiloxRngState - (seed, offset) where offset = base_offset +
69
+ relative_offset. seed and base_offset basically point to the rng state just
70
+ before tracing starts. relative offset tracks the totally consumed offset at
71
+ trace time.
72
+ """
73
+
74
+ def __init__(self) -> None:
75
+ self.reset()
76
+
77
+ def reset(self):
78
+ self.seed = torch.tensor(())
79
+ self.base_offset = torch.tensor(())
80
+ self.relative_offset = 0
81
+ self.offset_advanced_alteast_once = False
82
+
83
+ def validate_state(self):
84
+ assert self.seed.numel() != 0 and self.base_offset.numel() != 0
85
+
86
+ def advance_offset(self, consumed_offset):
87
+ self.offset_advanced_alteast_once = True
88
+ self.relative_offset = self.relative_offset + consumed_offset
89
+
90
+ def set_state(self, seed, base_offset, relative_offset=0):
91
+ self.seed = seed
92
+ self.base_offset = base_offset
93
+ self.relative_offset = relative_offset
94
+
95
+ def get_state_as_tuple(self):
96
+ self.validate_state()
97
+ return (self.seed, self.base_offset + self.relative_offset)
98
+
99
+ def get_state_as_tensor(self):
100
+ # Only needed because we override get_rng_state.
101
+ self.validate_state()
102
+ return torch.stack([self.seed, self.base_offset + self.relative_offset])
103
+
104
+ def set_state_from_tensor(self, state):
105
+ # Only needed because we override set_rng_state.
106
+ self.seed, self.base_offset = torch.unbind(state)
107
+ self.relative_offset = 0
108
+
109
+
110
+ class PhiloxStateTracker:
111
+ """
112
+ Singleton class to track the philox rng state during AOT Autograd tracing.
113
+ For each aot tracing instance, AOT Autograd resets this tracker and keeps
114
+ track of both forward and backward offsets. At runtime, we only care about
115
+ the total consumed forward and backward offsets. For dynamic shapes, these
116
+ offsets are a function of input shapes. Therefore, the AOT generated graphs
117
+ have additional outputs that compute total consumed forward and backward
118
+ offsets.
119
+ """
120
+
121
+ running_state: PhiloxState
122
+ fwd_state: PhiloxState
123
+ bwd_state: PhiloxState
124
+
125
+ def __enter__(self):
126
+ PhiloxStateTracker.reset()
127
+ return self
128
+
129
+ def __exit__(self, exc_type, exc_cal, exc_tb):
130
+ PhiloxStateTracker.reset()
131
+
132
+ @classmethod
133
+ def reset(cls):
134
+ cls.running_state = PhiloxState()
135
+ cls.fwd_state = PhiloxState()
136
+ cls.bwd_state = PhiloxState()
137
+
138
+ @classmethod
139
+ def mark_beginning_of_forward(cls):
140
+ # Tells the tracker to use fwd_state as the running state
141
+ cls.running_state = cls.fwd_state
142
+
143
+ @classmethod
144
+ def mark_beginning_of_backward(cls):
145
+ # Tells the tracker to use bwd_state as the running state
146
+ cls.running_state = cls.bwd_state
147
+
148
+ @classmethod
149
+ def record_state(cls, seed, offset, mode):
150
+ # Records the seed and offset tensors. These tensors are used to invoke
151
+ # the philox_rand functional primitives.
152
+ if mode == "forward":
153
+ cls.fwd_state.set_state(seed, offset)
154
+ cls.mark_beginning_of_forward()
155
+ else:
156
+ assert mode == "backward"
157
+ cls.bwd_state.set_state(seed, offset)
158
+
159
+ @classmethod
160
+ def get_state_as_tensor(cls):
161
+ # The only reason this exists is because we override get_rng_state and
162
+ # set_rng_state during tracing. get_rng_state expects a tensor output,
163
+ # so return (seed, offset) tuple upset other parts of the program like
164
+ # ctx.saved_tensors.
165
+
166
+ # A bad consequence is that if user saves and restores rng state, we
167
+ # have little bit of ugliness in the generated code, where we first
168
+ # concat the (seed, offset) to create a tensor for get_rng_state, and
169
+ # then split it back to get (seed, offset) tuple in set_rng_state.
170
+
171
+ # TODO: Investigate if there is be a better way to wrap the tuple in a
172
+ # false Tensor object, and then desugar it later on.
173
+ return cls.running_state.get_state_as_tensor()
174
+
175
+ @classmethod
176
+ def get_state_as_tuple(cls):
177
+ return cls.running_state.get_state_as_tuple()
178
+
179
+ @classmethod
180
+ def set_state_from_tensor(cls, x):
181
+ # This is only needed because we override set_rng_state. Look at the
182
+ # comment in get_state_from_tensor method.
183
+ cls.running_state.set_state_from_tensor(x)
184
+
185
+ @classmethod
186
+ def advance_offset(cls, consumed_offset):
187
+ cls.running_state.advance_offset(consumed_offset)
188
+
189
+ @classmethod
190
+ def get_current_relative_offset(cls):
191
+ return cls.running_state.relative_offset
192
+
193
+ @staticmethod
194
+ def multiple_of_4(offset):
195
+ # torch cuda rng state offset must be a multiple of 4. For inductor, as
196
+ # we sum up all the numel, the result might not be a multiple of 4. This
197
+ # method achieves that.
198
+ return (offset + 3) // 4 * 4
199
+
200
+ @classmethod
201
+ def get_updated_fwd_offset(cls):
202
+ # Short circuit if no rand ops were observed
203
+ if not cls.fwd_state.offset_advanced_alteast_once:
204
+ return cls.fwd_state.base_offset
205
+ return cls.multiple_of_4(
206
+ cls.fwd_state.base_offset + cls.fwd_state.relative_offset
207
+ )
208
+
209
+ @classmethod
210
+ def get_updated_bwd_offset(cls):
211
+ # Short circuit if no rand ops were observed
212
+ if not cls.bwd_state.offset_advanced_alteast_once:
213
+ return cls.bwd_state.base_offset
214
+ return cls.multiple_of_4(
215
+ cls.bwd_state.base_offset + cls.bwd_state.relative_offset
216
+ )
217
+
218
+
219
+ # Adding more decompositions which eventually use rand_like inside decomps.
220
+ # Adding these in rng_decompositions ensures the functionalization of rand_like
221
+ # ops used in these decomps. The list is copied from inductor codebase, which
222
+ # uses it for similar purpose.
223
+ #
224
+ # Caution - These decomps do not have same accuracy as that of eager. However,
225
+ # we can't just disable them with a config flag like fallback_random, because
226
+ # for functionalization of rng ops, we have to decompose these ops.
227
+ extra_random_decomps = get_decompositions(
228
+ [
229
+ aten.cauchy,
230
+ aten.cauchy_,
231
+ aten.exponential,
232
+ aten.exponential_,
233
+ aten.geometric,
234
+ aten.geometric_,
235
+ aten.native_dropout,
236
+ aten.normal,
237
+ aten.normal_,
238
+ aten.normal_functional,
239
+ aten.log_normal,
240
+ aten.log_normal_,
241
+ aten.rrelu_with_noise,
242
+ aten.rrelu_with_noise_,
243
+ aten.uniform_,
244
+ ]
245
+ )
246
+ register_extra_random_decomp = functools.partial(
247
+ decomp.register_decomposition, registry=extra_random_decomps
248
+ )
249
+
250
+
251
+ @register_extra_random_decomp([aten.bernoulli_])
252
+ def bernoulli_(self, p=0.5):
253
+ if self.device == torch.device("cpu"):
254
+ return NotImplemented
255
+ return self.copy_(torch.rand_like(self, dtype=torch.float32) < p)
256
+
257
+
258
+ @register_extra_random_decomp([aten.bernoulli.p])
259
+ def bernoulli_p(self, p=0.5, *, generator=None):
260
+ if self.device == torch.device("cpu"):
261
+ return NotImplemented
262
+ assert generator is None
263
+ return torch.rand_like(self, dtype=torch.float32) < p
264
+
265
+
266
+ rng_decompositions.update(extra_random_decomps) # type: ignore[arg-type]
pllava/lib/python3.10/site-packages/torch/_dynamo/bytecode_transformation.py ADDED
@@ -0,0 +1,1503 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ import copy
3
+ import dataclasses
4
+ import dis
5
+ import itertools
6
+ import sys
7
+ import types
8
+ from typing import Any, Callable, cast, Dict, Iterator, List, Optional, Tuple, Union
9
+
10
+ from .bytecode_analysis import (
11
+ get_indexof,
12
+ propagate_line_nums,
13
+ remove_extra_line_nums,
14
+ stacksize_analysis,
15
+ )
16
+
17
+
18
+ @dataclasses.dataclass
19
+ class InstructionExnTabEntry:
20
+ start: "Instruction"
21
+ end: "Instruction"
22
+ target: "Instruction"
23
+ depth: int
24
+ lasti: bool
25
+
26
+ def __repr__(self) -> str:
27
+ return (
28
+ f"InstructionExnTabEntry(start={self.start.short_inst_repr()}, "
29
+ f"end={self.end.short_inst_repr()}, "
30
+ f"target={self.target.short_inst_repr()}, "
31
+ f"depth={self.depth}, lasti={self.lasti})"
32
+ )
33
+
34
+ def __eq__(self, o) -> bool:
35
+ return (
36
+ self.start is o.start
37
+ and self.end is o.end
38
+ and self.target is o.target
39
+ and self.depth == o.depth
40
+ and self.lasti == o.lasti
41
+ )
42
+
43
+
44
+ @dataclasses.dataclass
45
+ class Instruction:
46
+ """A mutable version of dis.Instruction"""
47
+
48
+ opcode: int
49
+ opname: str
50
+ arg: Optional[int]
51
+ argval: Any
52
+ offset: Optional[int] = None
53
+ starts_line: Optional[int] = None
54
+ is_jump_target: bool = False
55
+ positions: Optional["dis.Positions"] = None
56
+ # extra fields to make modification easier:
57
+ target: Optional["Instruction"] = None
58
+ exn_tab_entry: Optional[InstructionExnTabEntry] = None
59
+
60
+ def __hash__(self) -> int:
61
+ return id(self)
62
+
63
+ def __eq__(self, other) -> bool:
64
+ return id(self) == id(other)
65
+
66
+ def short_inst_repr(self) -> str:
67
+ return f"Instruction(opname={self.opname}, offset={self.offset})"
68
+
69
+
70
+ def convert_instruction(i: dis.Instruction) -> Instruction:
71
+ if sys.version_info >= (3, 13):
72
+ starts_line = i.line_number
73
+ else:
74
+ starts_line = i.starts_line
75
+ return Instruction(
76
+ i.opcode,
77
+ i.opname,
78
+ i.arg,
79
+ i.argval,
80
+ i.offset,
81
+ starts_line,
82
+ i.is_jump_target,
83
+ getattr(i, "positions", None),
84
+ )
85
+
86
+
87
+ class _NotProvided:
88
+ def __repr__(self) -> str:
89
+ return "_NotProvided"
90
+
91
+
92
+ def inst_has_op_bits(name):
93
+ return (sys.version_info >= (3, 11) and name == "LOAD_GLOBAL") or (
94
+ sys.version_info >= (3, 12) and name in ("LOAD_ATTR", "LOAD_SUPER_ATTR")
95
+ )
96
+
97
+
98
+ def create_instruction(
99
+ name, *, arg=None, argval=_NotProvided, target=None
100
+ ) -> Instruction:
101
+ """
102
+ At most one of `arg`, `argval`, and `target` can be not None/_NotProvided.
103
+ This is to prevent ambiguity, e.g. does
104
+ create_instruction("LOAD_CONST", 5)
105
+ mean load the constant at co_consts[5], or load the constant 5?
106
+
107
+ If `arg` is not provided, it will be computed during assembly from
108
+ `argval` or `target`.
109
+
110
+ Bits in the args of instructions LOAD_GLOBAL, LOAD_ATTR (3.12+), and LOAD_SUPER_ATTR
111
+ modify the behavior of the instruction. In this case, we allow both `arg`
112
+ and `argval` to be set. The value of `arg` here is expected to be the value of
113
+ the op bits and the true value of `arg` will be computed during assembly.
114
+ If `arg` is not set, the bits are assumed to be 0.
115
+ """
116
+
117
+ # allow for instructions with op bits to have both arg and argval specified
118
+ if inst_has_op_bits(name):
119
+ if target is not None:
120
+ raise RuntimeError("target cannot be specified for instruction")
121
+ if arg is None:
122
+ arg = 0
123
+ else:
124
+ cnt = (arg is not None) + (argval is not _NotProvided) + (target is not None)
125
+ if cnt > 1:
126
+ raise RuntimeError(
127
+ "only one of arg, argval, and target can be not None/_NotProvided"
128
+ )
129
+ if arg is not None and not isinstance(arg, int):
130
+ raise RuntimeError("instruction arg must be int or None")
131
+ return Instruction(
132
+ opcode=dis.opmap[name], opname=name, arg=arg, argval=argval, target=target
133
+ )
134
+
135
+
136
+ # Python 3.11 remaps
137
+ def create_jump_absolute(target) -> Instruction:
138
+ inst = "JUMP_FORWARD" if sys.version_info >= (3, 11) else "JUMP_ABSOLUTE"
139
+ return create_instruction(inst, target=target)
140
+
141
+
142
+ def create_dup_top() -> Instruction:
143
+ if sys.version_info >= (3, 11):
144
+ return create_instruction("COPY", arg=1)
145
+ return create_instruction("DUP_TOP")
146
+
147
+
148
+ def create_rot_n(n) -> List[Instruction]:
149
+ """
150
+ Returns a "simple" sequence of instructions that rotates TOS to the n-th
151
+ position in the stack. For Python < 3.11, returns a single ROT_*
152
+ instruction. If no such instruction exists, an error is raised and the
153
+ caller is expected to generate an equivalent sequence of instructions.
154
+ For Python >= 3.11, any rotation can be expressed as a simple sequence of
155
+ swaps.
156
+ """
157
+ if n <= 1:
158
+ # don't rotate
159
+ return []
160
+
161
+ if sys.version_info >= (3, 11):
162
+ # rotate can be expressed as a sequence of swap operations
163
+ # e.g. rotate 3 is equivalent to swap 3, swap 2
164
+ return [create_instruction("SWAP", arg=i) for i in range(n, 1, -1)]
165
+
166
+ # ensure desired rotate function exists
167
+ if sys.version_info < (3, 8) and n >= 4:
168
+ raise AttributeError(f"rotate {n} not supported for Python < 3.8")
169
+ if sys.version_info < (3, 10) and n >= 5:
170
+ raise AttributeError(f"rotate {n} not supported for Python < 3.10")
171
+
172
+ if n <= 4:
173
+ return [create_instruction("ROT_" + ["TWO", "THREE", "FOUR"][n - 2])]
174
+ return [create_instruction("ROT_N", arg=n)]
175
+
176
+
177
+ def add_push_null(
178
+ inst_or_insts: Union[Instruction, List[Instruction]],
179
+ ) -> List[Instruction]:
180
+ """
181
+ Appends or prepends a PUSH_NULL instruction to `inst_or_insts`,
182
+ depending on Python version. Used when you know that
183
+ `inst_or_insts` generates a callable that will be called.
184
+
185
+ NOTE: Assumes `inst_or_insts` is a single instruction or sequence of
186
+ instructions that pushes exactly 1 object to the stack that is to
187
+ be called. It is important that you include ALL instructions that
188
+ construct the callable - not just the first instruction/a prefix.
189
+
190
+ Will attempt to use the NULL push bit for instructions
191
+ with such bits (LOAD_GLOBAL 3.11+, LOAD_ATTR 3.12+, LOAD_SUPER_ATTR).
192
+ In this case, instructions WILL be modified.
193
+ """
194
+ if isinstance(inst_or_insts, Instruction):
195
+ insts = [inst_or_insts]
196
+ else:
197
+ insts = inst_or_insts
198
+
199
+ def inst_has_bit_set(idx):
200
+ assert insts[idx].arg is not None
201
+ return insts[idx].arg & 1 == 1
202
+
203
+ def set_inst_bit(idx):
204
+ assert insts[idx].arg is not None
205
+ insts[idx].arg |= 1
206
+
207
+ if sys.version_info >= (3, 13):
208
+ # In 3.13, NULL follows the callable
209
+ if inst_has_op_bits(insts[-1].opname) and not inst_has_bit_set(-1):
210
+ # All insts with op bits have the push_null bit as the last one.
211
+ # Only set the bit if it hasn't been set - otherwise, we need
212
+ # to add another PUSH_NULL.
213
+ set_inst_bit(-1)
214
+ else:
215
+ insts = insts + [create_instruction("PUSH_NULL")]
216
+ elif sys.version_info >= (3, 12):
217
+ # LOAD_ATTR/LOAD_SUPER_ATTR at the end
218
+ # We assume that `insts` will only load 1 object, so
219
+ # LOAD_GLOBAL at the end doesn't need to be checked
220
+ if inst_has_op_bits(insts[-1].opname) and not inst_has_bit_set(-1):
221
+ set_inst_bit(-1)
222
+ elif insts[0].opname == "LOAD_GLOBAL" and not inst_has_bit_set(0):
223
+ set_inst_bit(0)
224
+ else:
225
+ insts = [create_instruction("PUSH_NULL")] + insts
226
+ elif sys.version_info >= (3, 11):
227
+ # 3.11 introduced NULL preceding callable
228
+ if inst_has_op_bits(insts[0].opname) and not inst_has_bit_set(0):
229
+ set_inst_bit(0)
230
+ else:
231
+ insts = [create_instruction("PUSH_NULL")] + insts
232
+ return insts
233
+
234
+
235
+ def add_push_null_call_function_ex(
236
+ inst_or_insts: Union[Instruction, List[Instruction]],
237
+ ) -> List[Instruction]:
238
+ """Like add_push_null, but the low bit of LOAD_ATTR/LOAD_SUPER_ATTR
239
+ is not set, due to an expected CALL_FUNCTION_EX instruction.
240
+ """
241
+ if isinstance(inst_or_insts, Instruction):
242
+ insts = [inst_or_insts]
243
+ else:
244
+ insts = inst_or_insts
245
+
246
+ if sys.version_info < (3, 11):
247
+ return insts
248
+
249
+ idx = -1 if sys.version_info >= (3, 13) else 0
250
+ if insts[idx].opname == "LOAD_GLOBAL":
251
+ assert insts[idx].arg is not None
252
+ if insts[idx].arg & 1 == 0: # type: ignore[operator]
253
+ insts[idx].arg |= 1 # type: ignore[operator]
254
+ return insts
255
+
256
+ if sys.version_info >= (3, 13):
257
+ insts = insts + [create_instruction("PUSH_NULL")]
258
+ else:
259
+ insts = [create_instruction("PUSH_NULL")] + insts
260
+
261
+ return insts
262
+
263
+
264
+ def create_call_function(nargs, push_null) -> List[Instruction]:
265
+ """
266
+ Creates a sequence of instructions that makes a function call.
267
+
268
+ `push_null` is used in Python 3.11+ only. It is used in codegen when
269
+ a function call is intended to be made with the NULL + fn convention,
270
+ and we know that the NULL has not been pushed yet. We will push a
271
+ NULL and rotate it to the correct position immediately before making
272
+ the function call.
273
+
274
+ `push_null` should be True if no NULL is pushed for the callable.
275
+ Conversely, `push_null` should be False if a NULL was pushed for the callable.
276
+ Prefer using `push_null=False` when possible since we will not need to rotate
277
+ NULL to the right place, which is less efficient.
278
+
279
+ Generally, you should codegen a function by using `add_push_null` then
280
+ `create_call_function` with `push_null=False`.
281
+
282
+ Example of when to set push_null False:
283
+
284
+ insts = [
285
+ create_instruction("LOAD_GLOBAL", argval="torch"),
286
+ create_instruction("LOAD_ATTR", argval="nn"),
287
+ create_instruction("LOAD_ATTR", argval="functional"),
288
+ create_instruction("LOAD_ATTR", argval="relu"),
289
+ ]
290
+ insts = add_push_null(insts)
291
+ insts.append(create_instruction("LOAD_FAST", argval="x"))
292
+ insts.extend(create_call_function(1, False))
293
+
294
+ Example of when to set push_null True:
295
+
296
+ insts = [create_instruction("LOAD_FAST", x)]
297
+ for should_wrap, wrapper_name in wrappers:
298
+ if should_wrap:
299
+ insts.extend([
300
+ create_instruction("LOAD_GLOBAL", argval="wrapper1"),
301
+ create_instruction("SWAP", arg=2),
302
+ *create_call_function(1, True),
303
+ )
304
+ """
305
+ if sys.version_info >= (3, 11):
306
+ output = []
307
+ if push_null:
308
+ output.append(create_instruction("PUSH_NULL"))
309
+ # 3.13 swapped NULL and callable
310
+ rots = nargs + 1 if sys.version_info >= (3, 13) else nargs + 2
311
+ output.extend(create_rot_n(rots))
312
+ if sys.version_info < (3, 12):
313
+ output.append(create_instruction("PRECALL", arg=nargs))
314
+ output.append(create_instruction("CALL", arg=nargs))
315
+ return output
316
+ return [create_instruction("CALL_FUNCTION", arg=nargs)]
317
+
318
+
319
+ def create_call_method(nargs) -> List[Instruction]:
320
+ if sys.version_info >= (3, 12):
321
+ return [create_instruction("CALL", arg=nargs)]
322
+ if sys.version_info >= (3, 11):
323
+ return [
324
+ create_instruction("PRECALL", arg=nargs),
325
+ create_instruction("CALL", arg=nargs),
326
+ ]
327
+ return [create_instruction("CALL_METHOD", arg=nargs)]
328
+
329
+
330
+ def create_load_method(name) -> Instruction:
331
+ if sys.version_info >= (3, 12):
332
+ # in 3.12, create a LOAD_ATTR instruction with the low bit set
333
+ return create_instruction("LOAD_ATTR", arg=1, argval=name)
334
+ return create_instruction("LOAD_METHOD", argval=name)
335
+
336
+
337
+ def create_setup_with(target) -> Instruction:
338
+ opname = "BEFORE_WITH" if sys.version_info >= (3, 11) else "SETUP_WITH"
339
+ return create_instruction(opname, target=target)
340
+
341
+
342
+ def create_swap(n) -> List[Instruction]:
343
+ if sys.version_info >= (3, 11):
344
+ return [create_instruction("SWAP", arg=n)]
345
+ # in Python < 3.11, SWAP is a macro that expands to multiple instructions
346
+ if n == 1:
347
+ return []
348
+ """
349
+ e.g. swap "a" and "b" in this stack:
350
+ 0 a 1 2 3 b
351
+ 0 a [1 2 3 b]
352
+ 0 a [1 2 3 b] [1 2 3 b]
353
+ 0 a [1 2 3 b] [1 2 3 b] -1
354
+ 0 a [1 2 3 b] b
355
+ 0 b a [1 2 3 b]
356
+ 0 b a [1 2 3 b] [1 2 3 b]
357
+ 0 b [1 2 3 b] a [1 2 3 b]
358
+ 0 b [1 2 3 b] a [1 2 3 b] -1
359
+ 0 b [1 2 3 a]
360
+ 0 b [1 2 3 a] [1 2 3 a]
361
+ 0 b [1 2 3 a] [1 2 3 a] reverse
362
+ 0 b [a 3 2 1] None
363
+ 0 b [a 3 2 1]
364
+ 0 b 1 2 3 a
365
+ """
366
+ return [
367
+ create_instruction("BUILD_LIST", arg=n - 1),
368
+ create_instruction("DUP_TOP"),
369
+ create_instruction("LOAD_CONST", argval=-1),
370
+ create_instruction("BINARY_SUBSCR"),
371
+ create_instruction("ROT_THREE"),
372
+ create_instruction("DUP_TOP"),
373
+ create_instruction("ROT_THREE"),
374
+ create_instruction("LOAD_CONST", argval=-1),
375
+ create_instruction("STORE_SUBSCR"),
376
+ create_instruction("DUP_TOP"),
377
+ create_load_method("reverse"),
378
+ *create_call_method(0),
379
+ create_instruction("POP_TOP"),
380
+ create_instruction("UNPACK_SEQUENCE", arg=n - 1),
381
+ ]
382
+
383
+
384
+ def lnotab_writer(
385
+ lineno: int, byteno: int = 0
386
+ ) -> Tuple[List[int], Callable[[int, int], None]]:
387
+ """
388
+ Used to create typing.CodeType.co_lnotab
389
+ See https://github.com/python/cpython/blob/main/Objects/lnotab_notes.txt
390
+ This is the internal format of the line number table if Python < 3.10
391
+ """
392
+ assert sys.version_info < (3, 10)
393
+ lnotab: List[int] = []
394
+
395
+ def update(lineno_new, byteno_new):
396
+ nonlocal byteno, lineno
397
+ while byteno_new != byteno or lineno_new != lineno:
398
+ byte_offset = max(0, min(byteno_new - byteno, 255))
399
+ line_offset = max(-128, min(lineno_new - lineno, 127))
400
+ assert byte_offset != 0 or line_offset != 0
401
+ byteno += byte_offset
402
+ lineno += line_offset
403
+ lnotab.extend((byte_offset, line_offset & 0xFF))
404
+
405
+ return lnotab, update
406
+
407
+
408
+ def linetable_310_writer(first_lineno):
409
+ """
410
+ Used to create typing.CodeType.co_linetable
411
+ See https://github.com/python/cpython/blob/main/Objects/lnotab_notes.txt
412
+ This is the internal format of the line number table for Python 3.10
413
+ """
414
+ assert sys.version_info >= (3, 10) and sys.version_info < (3, 11)
415
+ linetable: List[int] = []
416
+ lineno = first_lineno
417
+ lineno_delta = 0
418
+ byteno = 0
419
+
420
+ def _update(byteno_delta, lineno_delta):
421
+ while byteno_delta != 0 or lineno_delta != 0:
422
+ byte_offset = max(0, min(byteno_delta, 254))
423
+ line_offset = max(-127, min(lineno_delta, 127))
424
+ assert byte_offset != 0 or line_offset != 0
425
+ byteno_delta -= byte_offset
426
+ lineno_delta -= line_offset
427
+ linetable.extend((byte_offset, line_offset & 0xFF))
428
+
429
+ def update(lineno_new, byteno_new):
430
+ nonlocal lineno, lineno_delta, byteno
431
+ byteno_delta = byteno_new - byteno
432
+ byteno = byteno_new
433
+ _update(byteno_delta, lineno_delta)
434
+ lineno_delta = lineno_new - lineno
435
+ lineno = lineno_new
436
+
437
+ def end(total_bytes):
438
+ _update(total_bytes - byteno, lineno_delta)
439
+
440
+ return linetable, update, end
441
+
442
+
443
+ def encode_varint(n: int) -> List[int]:
444
+ """
445
+ 6-bit chunk encoding of an unsigned integer
446
+ See https://github.com/python/cpython/blob/3.11/Objects/locations.md
447
+ """
448
+ assert n >= 0
449
+ b = [n & 63]
450
+ n >>= 6
451
+ while n > 0:
452
+ b[-1] |= 64
453
+ b.append(n & 63)
454
+ n >>= 6
455
+ return b
456
+
457
+
458
+ def linetable_311_writer(first_lineno: int):
459
+ """
460
+ Used to create typing.CodeType.co_linetable
461
+ See https://github.com/python/cpython/blob/3.11/Objects/locations.md
462
+ This is the internal format of the line number table for Python 3.11
463
+ """
464
+ assert sys.version_info >= (3, 11)
465
+ linetable = []
466
+ lineno = first_lineno
467
+
468
+ def update(positions: "dis.Positions", inst_size):
469
+ nonlocal lineno
470
+ lineno_new = positions.lineno if positions else None
471
+
472
+ def _update(delta, size):
473
+ assert 0 < size <= 8
474
+ # first byte - use 13 (no column info) is positions is
475
+ # malformed, otherwise use 14 (long form)
476
+ other_varints: Tuple[int, ...] = ()
477
+ if (
478
+ positions
479
+ and positions.lineno is not None
480
+ and positions.end_lineno is not None
481
+ and positions.col_offset is not None
482
+ and positions.end_col_offset is not None
483
+ ):
484
+ linetable.append(0b1_1110_000 + size - 1)
485
+ # for whatever reason, column offset needs `+ 1`
486
+ # https://github.com/python/cpython/blob/1931c2a438c50e6250725c84dff94fc760b9b951/Python/compile.c#L7603
487
+ other_varints = (
488
+ positions.end_lineno - positions.lineno,
489
+ positions.col_offset + 1,
490
+ positions.end_col_offset + 1,
491
+ )
492
+ else:
493
+ linetable.append(0b1_1101_000 + size - 1)
494
+ # encode signed int
495
+ if delta < 0:
496
+ delta = ((-delta) << 1) | 1
497
+ else:
498
+ delta <<= 1
499
+ # encode unsigned int
500
+ linetable.extend(encode_varint(delta))
501
+ for n in other_varints:
502
+ linetable.extend(encode_varint(n))
503
+
504
+ if lineno_new is None:
505
+ lineno_delta = 0
506
+ else:
507
+ lineno_delta = lineno_new - lineno
508
+ lineno = lineno_new
509
+ while inst_size > 8:
510
+ _update(lineno_delta, 8)
511
+ inst_size -= 8
512
+ _update(lineno_delta, inst_size)
513
+
514
+ return linetable, update
515
+
516
+
517
+ @dataclasses.dataclass
518
+ class ExceptionTableEntry:
519
+ start: int
520
+ end: int
521
+ target: int
522
+ depth: int
523
+ lasti: bool
524
+
525
+
526
+ def encode_exception_table_varint(n: int) -> List[int]:
527
+ """
528
+ Similar to `encode_varint`, but the 6-bit chunks are ordered in reverse.
529
+ """
530
+ assert n >= 0
531
+ b = [n & 63]
532
+ n >>= 6
533
+ while n > 0:
534
+ b.append(n & 63)
535
+ n >>= 6
536
+ b.reverse()
537
+ for i in range(len(b) - 1):
538
+ b[i] |= 64
539
+ return b
540
+
541
+
542
+ def decode_exception_table_varint(bytes_iter: Iterator[int]) -> int:
543
+ """
544
+ Inverse of `encode_exception_table_varint`.
545
+ """
546
+ b = next(bytes_iter)
547
+ val = b & 63
548
+ while b & 64:
549
+ val <<= 6
550
+ b = next(bytes_iter)
551
+ val |= b & 63
552
+ return val
553
+
554
+
555
+ def check_exception_table(tab: List[ExceptionTableEntry]) -> None:
556
+ """
557
+ Verifies that a list of ExceptionTableEntries will make a well-formed
558
+ jump table: entries are non-empty, sorted, and do not overlap.
559
+ """
560
+ for i in range(len(tab) - 1):
561
+ assert (
562
+ tab[i].start <= tab[i].end
563
+ and tab[i].end < tab[i + 1].start
564
+ and tab[i + 1].start <= tab[i + 1].end
565
+ )
566
+
567
+
568
+ def parse_exception_table(exntab: bytes) -> List[ExceptionTableEntry]:
569
+ """
570
+ Parse the exception table according to
571
+ https://github.com/python/cpython/blob/3.11/Objects/exception_handling_notes.txt
572
+ """
573
+ exntab_iter = iter(exntab)
574
+ tab = []
575
+ try:
576
+ while True:
577
+ start = decode_exception_table_varint(exntab_iter) * 2
578
+ length = decode_exception_table_varint(exntab_iter) * 2
579
+ end = start + length - 2
580
+ target = decode_exception_table_varint(exntab_iter) * 2
581
+ dl = decode_exception_table_varint(exntab_iter)
582
+ depth = dl >> 1
583
+ lasti = bool(dl & 1)
584
+ tab.append(ExceptionTableEntry(start, end, target, depth, lasti))
585
+ except StopIteration:
586
+ check_exception_table(tab)
587
+ return tab
588
+
589
+
590
+ def assemble_exception_table(tab: List[ExceptionTableEntry]) -> bytes:
591
+ """
592
+ Inverse of parse_exception_table - encodes list of exception
593
+ table entries into bytes.
594
+ """
595
+ b = []
596
+ for entry in tab:
597
+ first_entry = encode_exception_table_varint(entry.start // 2)
598
+ first_entry[0] |= 1 << 7
599
+ b.extend(first_entry)
600
+ length = entry.end - entry.start + 2
601
+ b.extend(encode_exception_table_varint(length // 2))
602
+ b.extend(encode_exception_table_varint(entry.target // 2))
603
+ dl = (entry.depth << 1) + entry.lasti
604
+ b.extend(encode_exception_table_varint(dl))
605
+ return bytes(b)
606
+
607
+
608
+ def assemble(instructions: List[Instruction], firstlineno: int) -> Tuple[bytes, bytes]:
609
+ """Do the opposite of dis.get_instructions()"""
610
+ code: List[int] = []
611
+ if sys.version_info >= (3, 11):
612
+ lnotab, update_lineno = linetable_311_writer(firstlineno)
613
+ num_ext = 0
614
+ for i, inst in enumerate(instructions):
615
+ if inst.opname == "EXTENDED_ARG":
616
+ inst_size = 1
617
+ num_ext += 1
618
+ # copy positions from the actual instruction
619
+ for j in (1, 2, 3):
620
+ if instructions[i + j].opname != "EXTENDED_ARG":
621
+ inst.positions = instructions[i + j].positions
622
+ break
623
+ else:
624
+ inst_size = instruction_size(inst) // 2 + num_ext
625
+ num_ext = 0
626
+ update_lineno(inst.positions, inst_size)
627
+ num_ext = 0
628
+ arg = inst.arg or 0
629
+ code.extend((inst.opcode, arg & 0xFF))
630
+ for _ in range(instruction_size(inst) // 2 - 1):
631
+ code.extend((0, 0))
632
+ else:
633
+ if sys.version_info < (3, 10):
634
+ lnotab, update_lineno = lnotab_writer(firstlineno)
635
+ else:
636
+ lnotab, update_lineno, end = linetable_310_writer(firstlineno)
637
+
638
+ for inst in instructions:
639
+ if inst.starts_line is not None:
640
+ update_lineno(inst.starts_line, len(code))
641
+ arg = inst.arg or 0
642
+ code.extend((inst.opcode, arg & 0xFF))
643
+
644
+ if sys.version_info >= (3, 10):
645
+ end(len(code))
646
+
647
+ return bytes(code), bytes(lnotab)
648
+
649
+
650
+ def _get_instruction_by_offset(offset_to_inst: Dict[int, Instruction], offset: int):
651
+ """
652
+ Get the instruction located at a given offset, accounting for EXTENDED_ARGs
653
+ """
654
+ for n in (0, 2, 4, 6):
655
+ if offset_to_inst[offset + n].opcode != dis.EXTENDED_ARG:
656
+ return offset_to_inst[offset + n]
657
+ return None
658
+
659
+
660
+ def virtualize_jumps(instructions) -> None:
661
+ """Replace jump targets with pointers to make editing easier"""
662
+ jump_targets = {inst.offset: inst for inst in instructions}
663
+
664
+ for inst in instructions:
665
+ if inst.opcode in dis.hasjabs or inst.opcode in dis.hasjrel:
666
+ inst.target = _get_instruction_by_offset(jump_targets, inst.argval)
667
+
668
+
669
+ _REL_JUMPS = set(dis.hasjrel)
670
+
671
+
672
+ def flip_jump_direction(instruction: Instruction) -> None:
673
+ if sys.version_info < (3, 11):
674
+ raise RuntimeError("Cannot flip jump direction in Python < 3.11")
675
+ if "FORWARD" in instruction.opname:
676
+ instruction.opname = instruction.opname.replace("FORWARD", "BACKWARD")
677
+ elif "BACKWARD" in instruction.opname:
678
+ instruction.opname = instruction.opname.replace("BACKWARD", "FORWARD")
679
+ else:
680
+ raise AttributeError("Instruction is not a forward or backward jump")
681
+ instruction.opcode = dis.opmap[instruction.opname]
682
+ assert instruction.opcode in _REL_JUMPS
683
+
684
+
685
+ def _get_instruction_front(instructions: List[Instruction], idx: int):
686
+ """
687
+ i.e. get the first EXTENDED_ARG instruction (if any) when targeting
688
+ instructions[idx] with a jump.
689
+ """
690
+ target = instructions[idx]
691
+ for offset in (1, 2, 3):
692
+ if idx >= offset and instructions[idx - offset].opcode == dis.EXTENDED_ARG:
693
+ target = instructions[idx - offset]
694
+ else:
695
+ break
696
+ return target
697
+
698
+
699
+ def devirtualize_jumps(instructions):
700
+ """Fill in args for virtualized jump target after instructions may have moved"""
701
+ jumps = set(dis.hasjabs).union(set(dis.hasjrel))
702
+
703
+ # check for negative jump args and fix them
704
+ for inst in instructions:
705
+ if inst.opcode in jumps:
706
+ if inst.opcode not in dis.hasjabs:
707
+ if inst.target.offset < inst.offset:
708
+ if sys.version_info < (3, 11):
709
+ raise RuntimeError("Got negative jump offset for Python < 3.11")
710
+ # forward jumps become backward
711
+ if "FORWARD" in inst.opname:
712
+ flip_jump_direction(inst)
713
+ else:
714
+ # backward jumps become forward
715
+ if sys.version_info >= (3, 11) and "BACKWARD" in inst.opname:
716
+ flip_jump_direction(inst)
717
+
718
+ # jump instruction size may have changed due to flips
719
+ update_offsets(instructions)
720
+ indexof = get_indexof(instructions)
721
+
722
+ # compute jump instruction arg
723
+ for inst in instructions:
724
+ if inst.opcode in jumps:
725
+ target = _get_instruction_front(instructions, indexof[inst.target])
726
+ if inst.opcode in dis.hasjabs:
727
+ if sys.version_info < (3, 10):
728
+ inst.arg = target.offset
729
+ elif sys.version_info < (3, 11):
730
+ # `arg` is expected to be bytecode offset, whereas `offset` is byte offset.
731
+ # Divide since bytecode is 2 bytes large.
732
+ inst.arg = int(target.offset / 2)
733
+ else:
734
+ raise RuntimeError("Python 3.11+ should not have absolute jumps")
735
+ else: # relative jump
736
+ # byte offset between target and next instruction
737
+ inst.arg = abs(
738
+ int(target.offset - inst.offset - instruction_size(inst))
739
+ )
740
+ if sys.version_info >= (3, 10):
741
+ # see bytecode size comment in the absolute jump case above
742
+ inst.arg //= 2
743
+ inst.argval = target.offset
744
+ inst.argrepr = f"to {target.offset}"
745
+
746
+
747
+ def virtualize_exception_table(exn_tab_bytes: bytes, instructions: List[Instruction]):
748
+ """Replace exception table entries with pointers to make editing easier"""
749
+ exn_tab = parse_exception_table(exn_tab_bytes)
750
+ offset_to_inst = {cast(int, inst.offset): inst for inst in instructions}
751
+ offsets = sorted(offset_to_inst.keys())
752
+ end_offset_idx = 0
753
+ exn_tab_iter = iter(exn_tab)
754
+ try:
755
+
756
+ def step():
757
+ nonlocal end_offset_idx
758
+ entry = next(exn_tab_iter)
759
+ # find rightmost offset <= entry.end, since entry.end may not be
760
+ # an actual instruction, e.g. if the end instruction is LOAD_GLOBAL,
761
+ # which takes more than 2 bytes, then entry.end points to the end
762
+ # of the LOAD_GLOBAL instruction, not the beginning.
763
+ while (
764
+ end_offset_idx < len(offsets) and offsets[end_offset_idx] <= entry.end
765
+ ):
766
+ end_offset_idx += 1
767
+ assert end_offset_idx > 0
768
+ end_offset = offsets[end_offset_idx - 1]
769
+ inst_entry = InstructionExnTabEntry(
770
+ _get_instruction_by_offset(offset_to_inst, entry.start),
771
+ _get_instruction_by_offset(offset_to_inst, end_offset),
772
+ _get_instruction_by_offset(offset_to_inst, entry.target),
773
+ entry.depth,
774
+ entry.lasti,
775
+ )
776
+ return entry, inst_entry
777
+
778
+ entry, inst_entry = step()
779
+ for inst in instructions:
780
+ while inst.offset > entry.end:
781
+ entry, inst_entry = step()
782
+ if inst.offset >= entry.start:
783
+ inst.exn_tab_entry = copy.copy(inst_entry)
784
+ except StopIteration:
785
+ pass
786
+
787
+
788
+ def compute_exception_table(
789
+ instructions: List[Instruction],
790
+ ) -> List[ExceptionTableEntry]:
791
+ """Compute exception table in list format from instructions with exn_tab_entries"""
792
+ exn_dict: Dict[Tuple[int, int], Tuple[int, int, bool]] = {}
793
+ indexof = get_indexof(instructions)
794
+
795
+ for inst in instructions:
796
+ if inst.exn_tab_entry:
797
+ # account for prefixed EXTENDED_ARGS
798
+ start = _get_instruction_front(
799
+ instructions, indexof[inst.exn_tab_entry.start]
800
+ ).offset
801
+ # point to the last 2 bytes of the end instruction
802
+ end = (
803
+ cast(int, inst.exn_tab_entry.end.offset)
804
+ + instruction_size(inst.exn_tab_entry.end)
805
+ - 2
806
+ )
807
+ target = _get_instruction_front(
808
+ instructions, indexof[inst.exn_tab_entry.target]
809
+ ).offset
810
+ key = (start, end)
811
+ val = (target, inst.exn_tab_entry.depth, inst.exn_tab_entry.lasti)
812
+ if key in exn_dict:
813
+ assert exn_dict[key] == val
814
+ exn_dict[key] = val
815
+
816
+ # Dynamo may construct nested exception table entries for convenience,
817
+ # but Python expects exception table entries to not overlap.
818
+ # NOTE: below, "keys" refer to old instruction entries' starts and ends,
819
+ # and "entries" refer to the generated exception table entries.
820
+
821
+ # Sort keys by increasing start, then decreasing end
822
+ keys_sorted = sorted(exn_dict.keys(), key=lambda t: (t[0], -t[1]))
823
+ # smallest byte that the next exception table entry can start at
824
+ nexti = 0
825
+ # stack of current nested keys
826
+ key_stack: List[Tuple[int, int]] = []
827
+ exn_tab: List[ExceptionTableEntry] = []
828
+
829
+ def pop():
830
+ """
831
+ Pop the key_stack and append an exception table entry if possible.
832
+ """
833
+ nonlocal nexti
834
+ if key_stack:
835
+ key = key_stack.pop()
836
+ if nexti <= key[1]:
837
+ exn_tab.append(
838
+ ExceptionTableEntry(max(key[0], nexti), key[1], *exn_dict[key])
839
+ )
840
+ nexti = key[1] + 2
841
+
842
+ for key in keys_sorted:
843
+ # pop keys that are no longer nested over the current key
844
+ while key_stack and key_stack[-1][1] < key[0]:
845
+ pop()
846
+ if key_stack:
847
+ # create an entry covering to the current key, if possible
848
+ assert key_stack[-1][0] <= key[0] <= key[1] <= key_stack[-1][1]
849
+ left = max(nexti, key_stack[-1][0])
850
+ if left < key[0]:
851
+ exn_tab.append(
852
+ ExceptionTableEntry(left, key[0] - 2, *exn_dict[key_stack[-1]])
853
+ )
854
+ nexti = key[0]
855
+ key_stack.append(key)
856
+ while key_stack:
857
+ pop()
858
+ check_exception_table(exn_tab)
859
+ return exn_tab
860
+
861
+
862
+ def check_inst_exn_tab_entries_nested(
863
+ tab: List[InstructionExnTabEntry], indexof
864
+ ) -> None:
865
+ """
866
+ Checks `tab` is a properly sorted list of nested InstructionExnTabEntry's,
867
+ i.e. no entries partially overlap.
868
+ "Properly sorted" means entries are sorted by increasing starts, then
869
+ decreasing ends.
870
+ """
871
+ entry_stack: List[Tuple[int, int]] = []
872
+ for entry in tab:
873
+ key = (indexof[entry.start], indexof[entry.end])
874
+ while entry_stack and entry_stack[-1][1] < key[0]:
875
+ entry_stack.pop()
876
+ if entry_stack:
877
+ assert entry_stack[-1][0] <= key[0] <= key[1] <= entry_stack[-1][1]
878
+ entry_stack.append(key)
879
+
880
+
881
+ def propagate_inst_exn_table_entries(instructions: List[Instruction]) -> None:
882
+ """
883
+ Copies exception table entries to all instructions in an entry's range.
884
+ Supports nested exception table entries.
885
+ """
886
+ indexof = get_indexof(instructions)
887
+ entries: Dict[Tuple[int, int], InstructionExnTabEntry] = {}
888
+ for inst in instructions:
889
+ if inst.exn_tab_entry:
890
+ key = (
891
+ indexof[inst.exn_tab_entry.start],
892
+ indexof[inst.exn_tab_entry.end],
893
+ )
894
+ if key in entries:
895
+ assert inst.exn_tab_entry == entries[key]
896
+ entries[key] = inst.exn_tab_entry
897
+ sorted_entries = [
898
+ entries[key] for key in sorted(entries.keys(), key=lambda t: (t[0], -t[1]))
899
+ ]
900
+ check_inst_exn_tab_entries_nested(sorted_entries, indexof)
901
+ # Propagation of nested entries works since nested entries come later
902
+ # in sorted order.
903
+ for entry in sorted_entries:
904
+ for i in range(indexof[entry.start], indexof[entry.end] + 1):
905
+ instructions[i].exn_tab_entry = copy.copy(entry)
906
+
907
+
908
+ def check_inst_exn_tab_entries_valid(instructions: List[Instruction]):
909
+ """
910
+ Checks that exn_tab_entries of instructions are valid.
911
+ An entry's start, end, and target must be in instructions.
912
+ Instructions with an exn_tab_entry are located within
913
+ the entry's start and end instructions.
914
+ Instructions do not share exn_tab_entries.
915
+
916
+ Implicitly checks for no duplicate instructions.
917
+ """
918
+ indexof = get_indexof(instructions)
919
+ exn_tab_entry_set = set()
920
+ for i, inst in enumerate(instructions):
921
+ if inst.exn_tab_entry:
922
+ assert sys.version_info >= (3, 11)
923
+ assert id(inst.exn_tab_entry) not in exn_tab_entry_set
924
+ exn_tab_entry_set.add(id(inst.exn_tab_entry))
925
+ entry = inst.exn_tab_entry
926
+ assert entry.start in indexof
927
+ assert entry.end in indexof
928
+ assert entry.target in indexof
929
+ assert indexof[entry.start] <= i <= indexof[entry.end]
930
+
931
+
932
+ def strip_extended_args(instructions: List[Instruction]) -> None:
933
+ instructions[:] = [i for i in instructions if i.opcode != dis.EXTENDED_ARG]
934
+
935
+
936
+ def remove_load_call_method(instructions: List[Instruction]) -> List[Instruction]:
937
+ """LOAD_METHOD puts a NULL on the stack which causes issues, so remove it"""
938
+ assert sys.version_info < (3, 11)
939
+ rewrites = {"LOAD_METHOD": "LOAD_ATTR", "CALL_METHOD": "CALL_FUNCTION"}
940
+ for inst in instructions:
941
+ if inst.opname in rewrites:
942
+ inst.opname = rewrites[inst.opname]
943
+ inst.opcode = dis.opmap[inst.opname]
944
+ return instructions
945
+
946
+
947
+ def remove_jump_if_none(instructions: List[Instruction]) -> None:
948
+ new_insts = []
949
+ for inst in instructions:
950
+ new_insts.append(inst)
951
+ if "_NONE" in inst.opname:
952
+ is_op = create_instruction("IS_OP", arg=int("NOT" in inst.opname))
953
+ is_op.argval = is_op.arg
954
+ is_op.positions = inst.positions
955
+ if sys.version_info < (3, 12):
956
+ jump_op = create_instruction(
957
+ "POP_JUMP_FORWARD_IF_TRUE"
958
+ if "FORWARD" in inst.opname
959
+ else "POP_JUMP_BACKWARD_IF_TRUE",
960
+ target=inst.target,
961
+ )
962
+ else:
963
+ jump_op = create_instruction("POP_JUMP_IF_TRUE", target=inst.target)
964
+ jump_op.positions = inst.positions
965
+ # update inst.exn_tab_entry.end if necessary
966
+ if inst.exn_tab_entry and inst.exn_tab_entry.end is inst:
967
+ inst.exn_tab_entry.end = jump_op
968
+ # preserve exception table entries
969
+ is_op.exn_tab_entry = copy.copy(inst.exn_tab_entry)
970
+ jump_op.exn_tab_entry = copy.copy(inst.exn_tab_entry)
971
+ # modify inst in-place to preserve jump target
972
+ inst.opcode = dis.opmap["LOAD_CONST"]
973
+ inst.opname = "LOAD_CONST"
974
+ inst.arg = None
975
+ inst.argval = None
976
+ new_insts.extend([is_op, jump_op])
977
+ instructions[:] = new_insts
978
+
979
+
980
+ def remove_binary_store_slice(instructions: List[Instruction]) -> None:
981
+ new_insts = []
982
+ for inst in instructions:
983
+ new_insts.append(inst)
984
+ if inst.opname in ("BINARY_SLICE", "STORE_SLICE"):
985
+ # new instruction
986
+ subscr_inst = create_instruction(inst.opname.replace("SLICE", "SUBSCR"))
987
+ if inst.exn_tab_entry and inst.exn_tab_entry.end is inst:
988
+ inst.exn_tab_entry.end = subscr_inst
989
+ subscr_inst.exn_tab_entry = copy.copy(inst.exn_tab_entry)
990
+ subscr_inst.positions = inst.positions
991
+ # modify inst in-place to preserve jump target
992
+ inst.opcode = dis.opmap["BUILD_SLICE"]
993
+ inst.opname = "BUILD_SLICE"
994
+ inst.arg = 2
995
+ inst.argval = 2
996
+ new_insts.append(subscr_inst)
997
+ instructions[:] = new_insts
998
+
999
+
1000
+ FUSED_INSTS = {
1001
+ "LOAD_FAST_LOAD_FAST": ("LOAD_FAST", "LOAD_FAST"),
1002
+ "STORE_FAST_STORE_FAST": ("STORE_FAST", "STORE_FAST"),
1003
+ "STORE_FAST_LOAD_FAST": ("STORE_FAST", "LOAD_FAST"),
1004
+ }
1005
+
1006
+
1007
+ def remove_fused_load_store(instructions: List[Instruction]) -> None:
1008
+ new_insts = []
1009
+ for inst in instructions:
1010
+ new_insts.append(inst)
1011
+ if inst.opname in FUSED_INSTS:
1012
+ inst0, inst1 = FUSED_INSTS[inst.opname]
1013
+ argval0, argval1 = inst.argval
1014
+
1015
+ # modify inst in-place to preserve jump target
1016
+ inst.opcode = dis.opmap[inst0]
1017
+ inst.opname = inst0
1018
+ inst.argval = argval0
1019
+
1020
+ new_inst = create_instruction(inst1, argval=argval1)
1021
+ # update inst.exn_tab_entry.end if necessary
1022
+ if inst.exn_tab_entry and inst.exn_tab_entry.end is inst:
1023
+ inst.exn_tab_entry.end = new_inst
1024
+ # preserve exception table entries
1025
+ new_inst.exn_tab_entry = copy.copy(inst.exn_tab_entry)
1026
+
1027
+ new_insts.append(new_inst)
1028
+ instructions[:] = new_insts
1029
+
1030
+
1031
+ def explicit_super(code: types.CodeType, instructions: List[Instruction]) -> None:
1032
+ """convert super() with no args into explicit arg form"""
1033
+ cell_and_free = (code.co_cellvars or ()) + (code.co_freevars or ())
1034
+ if not len(code.co_varnames):
1035
+ # A function with no argument cannot contain a valid "super()" call
1036
+ return
1037
+ output = []
1038
+ for idx, inst in enumerate(instructions):
1039
+ output.append(inst)
1040
+ if inst.opname == "LOAD_GLOBAL" and inst.argval == "super":
1041
+ nexti = instructions[idx + 1]
1042
+ if nexti.arg == 0 and (
1043
+ (sys.version_info >= (3, 12) and nexti.opname == "CALL")
1044
+ or (
1045
+ sys.version_info >= (3, 11)
1046
+ and sys.version_info < (3, 12)
1047
+ and nexti.opname == "PRECALL"
1048
+ )
1049
+ or (sys.version_info < (3, 11) and nexti.opname == "CALL_FUNCTION")
1050
+ ):
1051
+ assert "__class__" in cell_and_free
1052
+ output.append(create_instruction("LOAD_DEREF", argval="__class__"))
1053
+ first_var = code.co_varnames[0]
1054
+ if first_var in cell_and_free:
1055
+ output.append(create_instruction("LOAD_DEREF", argval=first_var))
1056
+ else:
1057
+ output.append(create_instruction("LOAD_FAST", argval=first_var))
1058
+ nexti.arg = 2
1059
+ nexti.argval = 2
1060
+ if nexti.opname == "PRECALL":
1061
+ # also update the following CALL instruction
1062
+ call_inst = instructions[idx + 2]
1063
+ call_inst.arg = 2
1064
+ call_inst.argval = 2
1065
+
1066
+ instructions[:] = output
1067
+
1068
+
1069
+ def fix_extended_args(instructions: List[Instruction]) -> int:
1070
+ """Fill in correct argvals for EXTENDED_ARG ops"""
1071
+ output: List[Instruction] = []
1072
+
1073
+ def maybe_pop_n(n):
1074
+ for _ in range(n):
1075
+ if output and output[-1].opcode == dis.EXTENDED_ARG:
1076
+ output.pop()
1077
+
1078
+ for inst in instructions:
1079
+ if inst.opcode == dis.EXTENDED_ARG:
1080
+ # Leave this instruction alone for now so we never shrink code
1081
+ inst.arg = 0
1082
+ elif inst.arg and inst.arg > 0xFFFFFF:
1083
+ maybe_pop_n(3)
1084
+ output.append(create_instruction("EXTENDED_ARG", arg=inst.arg >> 24))
1085
+ output.append(create_instruction("EXTENDED_ARG", arg=inst.arg >> 16))
1086
+ output.append(create_instruction("EXTENDED_ARG", arg=inst.arg >> 8))
1087
+ elif inst.arg and inst.arg > 0xFFFF:
1088
+ maybe_pop_n(2)
1089
+ output.append(create_instruction("EXTENDED_ARG", arg=inst.arg >> 16))
1090
+ output.append(create_instruction("EXTENDED_ARG", arg=inst.arg >> 8))
1091
+ elif inst.arg and inst.arg > 0xFF:
1092
+ maybe_pop_n(1)
1093
+ output.append(create_instruction("EXTENDED_ARG", arg=inst.arg >> 8))
1094
+ output.append(inst)
1095
+
1096
+ added = len(output) - len(instructions)
1097
+ assert added >= 0
1098
+ instructions[:] = output
1099
+ return added
1100
+
1101
+
1102
+ def instruction_size(inst) -> int:
1103
+ import torch
1104
+
1105
+ if sys.version_info >= (3, 11):
1106
+ return 2 * (torch._C._dynamo.eval_frame.py_opcode_caches[inst.opcode] + 1)
1107
+ return 2
1108
+
1109
+
1110
+ def check_offsets(instructions) -> None:
1111
+ offset = 0
1112
+ for inst in instructions:
1113
+ assert inst.offset == offset
1114
+ offset += instruction_size(inst)
1115
+
1116
+
1117
+ def update_offsets(instructions) -> None:
1118
+ offset = 0
1119
+ for inst in instructions:
1120
+ inst.offset = offset
1121
+ offset += instruction_size(inst)
1122
+
1123
+
1124
+ def debug_bytes(*args) -> str:
1125
+ index = range(max(map(len, args)))
1126
+ result = []
1127
+ for arg in (
1128
+ [index] + list(args) + [[int(a != b) for a, b in zip(args[-1], args[-2])]]
1129
+ ):
1130
+ result.append(" ".join(f"{x:03}" for x in arg))
1131
+
1132
+ return "bytes mismatch\n" + "\n".join(result)
1133
+
1134
+
1135
+ def debug_checks(code):
1136
+ """Make sure our assembler produces same bytes as we start with"""
1137
+ dode = transform_code_object(code, lambda x, y: None, safe=True)
1138
+ assert code.co_code == dode.co_code, debug_bytes(code.co_code, dode.co_code)
1139
+ assert code.co_lnotab == dode.co_lnotab, debug_bytes(code.co_lnotab, dode.co_lnotab)
1140
+
1141
+
1142
+ HAS_LOCAL = set(dis.haslocal)
1143
+ HAS_NAME = set(dis.hasname)
1144
+ HAS_FREE = set(dis.hasfree)
1145
+ HAS_CONST = set(dis.hasconst)
1146
+
1147
+
1148
+ def get_const_index(code_options, val) -> int:
1149
+ for i, v in enumerate(code_options["co_consts"]):
1150
+ # NOTE: stronger comparison is required, since we have
1151
+ # examples where two values compare equal but have
1152
+ # different semantic meaning in some cases, e.g.
1153
+ # 0.0 == -0.0 but have different effects in torch.copysign.
1154
+ if val is v:
1155
+ return i
1156
+ code_options["co_consts"] += (val,)
1157
+ return len(code_options["co_consts"]) - 1
1158
+
1159
+
1160
+ def fix_vars(instructions: List[Instruction], code_options, varname_from_oparg=None):
1161
+ # compute instruction arg from argval if arg is not provided
1162
+ names = {name: idx for idx, name in enumerate(code_options["co_names"])}
1163
+
1164
+ def get_name_index(name) -> int:
1165
+ try:
1166
+ idx = names[name]
1167
+ except KeyError:
1168
+ # Add a missing item to co_names
1169
+ idx = names[name] = len(names)
1170
+ code_options["co_names"] = (*code_options["co_names"], name)
1171
+ assert len(code_options["co_names"]) == len(names)
1172
+ return idx
1173
+
1174
+ if sys.version_info < (3, 11):
1175
+ assert varname_from_oparg is None
1176
+ varnames = {name: idx for idx, name in enumerate(code_options["co_varnames"])}
1177
+ freenames = {
1178
+ name: idx
1179
+ for idx, name in enumerate(
1180
+ code_options["co_cellvars"] + code_options["co_freevars"]
1181
+ )
1182
+ }
1183
+ else:
1184
+ assert callable(varname_from_oparg)
1185
+ allnames = {}
1186
+ for idx in itertools.count():
1187
+ try:
1188
+ name = varname_from_oparg(idx)
1189
+ allnames[name] = idx
1190
+ except IndexError:
1191
+ break
1192
+ varnames = {name: allnames[name] for name in code_options["co_varnames"]}
1193
+ freenames = {
1194
+ name: allnames[name]
1195
+ for name in code_options["co_cellvars"] + code_options["co_freevars"]
1196
+ }
1197
+ for i in range(len(instructions)):
1198
+
1199
+ def should_compute_arg():
1200
+ # argval is prioritized over arg
1201
+ return instructions[i].argval is not _NotProvided
1202
+
1203
+ if instructions[i].opname == "LOAD_GLOBAL":
1204
+ # 3.11 LOAD_GLOBAL requires both arg and argval - see create_instruction
1205
+ assert instructions[i].argval is not _NotProvided
1206
+ if sys.version_info >= (3, 11):
1207
+ assert instructions[i].arg is not None
1208
+ instructions[i].arg = (get_name_index(instructions[i].argval) << 1) + (
1209
+ cast(int, instructions[i].arg) % 2
1210
+ )
1211
+ else:
1212
+ instructions[i].arg = get_name_index(instructions[i].argval)
1213
+ elif instructions[i].opname == "LOAD_ATTR":
1214
+ # 3.12 LOAD_ATTR requires both arg and argval, like LOAD_GLOBAL
1215
+ assert instructions[i].argval is not _NotProvided
1216
+ if sys.version_info >= (3, 12):
1217
+ assert instructions[i].arg is not None
1218
+ instructions[i].arg = (get_name_index(instructions[i].argval) << 1) + (
1219
+ cast(int, instructions[i].arg) % 2
1220
+ )
1221
+ else:
1222
+ instructions[i].arg = get_name_index(instructions[i].argval)
1223
+ elif instructions[i].opname == "LOAD_SUPER_ATTR":
1224
+ assert instructions[i].arg is not None
1225
+ assert instructions[i].argval is not _NotProvided
1226
+ # Copy low bit, force second bit on for explicit super (the "+ 2")
1227
+ instructions[i].arg = (
1228
+ (get_name_index(instructions[i].argval) << 2)
1229
+ + (cast(int, instructions[i].arg) % 2)
1230
+ + 2
1231
+ )
1232
+ elif instructions[i].opcode in HAS_LOCAL:
1233
+ if should_compute_arg():
1234
+ if (
1235
+ sys.version_info >= (3, 13)
1236
+ and instructions[i].argval not in varnames
1237
+ ):
1238
+ # instructions like LOAD_FAST used for both local and free vars
1239
+ instructions[i].arg = freenames[instructions[i].argval]
1240
+ else:
1241
+ instructions[i].arg = varnames[instructions[i].argval]
1242
+ elif instructions[i].opcode in HAS_NAME:
1243
+ if should_compute_arg():
1244
+ instructions[i].arg = get_name_index(instructions[i].argval)
1245
+ elif instructions[i].opcode in HAS_FREE:
1246
+ if should_compute_arg():
1247
+ instructions[i].arg = freenames[instructions[i].argval]
1248
+ elif instructions[i].opcode in HAS_CONST:
1249
+ # NOTE: only update argval if arg is not provided. This assumes
1250
+ # that any additions to co_consts are appended.
1251
+ if instructions[i].arg is None:
1252
+ # cannot use a dictionary since consts may not be hashable
1253
+ idx = get_const_index(code_options, instructions[i].argval)
1254
+ assert idx >= 0
1255
+ instructions[i].arg = idx
1256
+
1257
+
1258
+ def clear_instruction_args(instructions):
1259
+ # Clear the instruction arg for instructions that have argvals.
1260
+ # Useful for using dis'd bytecode within generated bytecode.
1261
+ for inst in instructions:
1262
+ if (
1263
+ inst.argval is not _NotProvided
1264
+ and (
1265
+ inst.opcode in HAS_LOCAL
1266
+ or inst.opcode in HAS_NAME
1267
+ or inst.opcode in HAS_FREE
1268
+ or inst.opcode in HAS_CONST
1269
+ )
1270
+ and inst.opname not in ("LOAD_GLOBAL", "LOAD_ATTR", "LOAD_SUPER_ATTR")
1271
+ ):
1272
+ inst.arg = None
1273
+
1274
+
1275
+ def get_code_keys() -> List[str]:
1276
+ # Python 3.11 changes to code keys are not fully documented.
1277
+ # See https://github.com/python/cpython/blob/3.11/Objects/clinic/codeobject.c.h#L24
1278
+ # for new format.
1279
+ keys = ["co_argcount"]
1280
+ keys.append("co_posonlyargcount")
1281
+ keys.extend(
1282
+ [
1283
+ "co_kwonlyargcount",
1284
+ "co_nlocals",
1285
+ "co_stacksize",
1286
+ "co_flags",
1287
+ "co_code",
1288
+ "co_consts",
1289
+ "co_names",
1290
+ "co_varnames",
1291
+ "co_filename",
1292
+ "co_name",
1293
+ ]
1294
+ )
1295
+ if sys.version_info >= (3, 11):
1296
+ keys.append("co_qualname")
1297
+ keys.append("co_firstlineno")
1298
+ if sys.version_info >= (3, 10):
1299
+ keys.append("co_linetable")
1300
+ else:
1301
+ keys.append("co_lnotab")
1302
+ if sys.version_info >= (3, 11):
1303
+ # not documented, but introduced in https://github.com/python/cpython/issues/84403
1304
+ keys.append("co_exceptiontable")
1305
+ keys.extend(
1306
+ [
1307
+ "co_freevars",
1308
+ "co_cellvars",
1309
+ ]
1310
+ )
1311
+ return keys
1312
+
1313
+
1314
+ def transform_code_object(code, transformations, safe=False) -> types.CodeType:
1315
+ keys = get_code_keys()
1316
+ code_options = {k: getattr(code, k) for k in keys}
1317
+ assert len(code_options["co_varnames"]) == code_options["co_nlocals"]
1318
+
1319
+ instructions = cleaned_instructions(code, safe)
1320
+ propagate_line_nums(instructions)
1321
+
1322
+ transformations(instructions, code_options)
1323
+ return clean_and_assemble_instructions(instructions, keys, code_options)[1]
1324
+
1325
+
1326
+ def clean_and_assemble_instructions(
1327
+ instructions: List[Instruction], keys: List[str], code_options: Dict[str, Any]
1328
+ ) -> Tuple[List[Instruction], types.CodeType]:
1329
+ # also implicitly checks for no duplicate instructions
1330
+ check_inst_exn_tab_entries_valid(instructions)
1331
+
1332
+ code_options["co_nlocals"] = len(code_options["co_varnames"])
1333
+ varname_from_oparg = None
1334
+ if sys.version_info >= (3, 11):
1335
+ # temporary code object with updated names
1336
+ tmp_code = types.CodeType(*[code_options[k] for k in keys])
1337
+ varname_from_oparg = tmp_code._varname_from_oparg # type: ignore[attr-defined]
1338
+ fix_vars(instructions, code_options, varname_from_oparg=varname_from_oparg)
1339
+
1340
+ dirty = True
1341
+ while dirty:
1342
+ update_offsets(instructions)
1343
+ devirtualize_jumps(instructions)
1344
+ # this pass might change offsets, if so we need to try again
1345
+ dirty = bool(fix_extended_args(instructions))
1346
+
1347
+ remove_extra_line_nums(instructions)
1348
+ bytecode, lnotab = assemble(instructions, code_options["co_firstlineno"])
1349
+ if sys.version_info < (3, 10):
1350
+ code_options["co_lnotab"] = lnotab
1351
+ else:
1352
+ code_options["co_linetable"] = lnotab
1353
+
1354
+ code_options["co_code"] = bytecode
1355
+ code_options["co_stacksize"] = stacksize_analysis(instructions)
1356
+ assert set(keys) - {"co_posonlyargcount"} == set(code_options.keys()) - {
1357
+ "co_posonlyargcount"
1358
+ }
1359
+ if sys.version_info >= (3, 11):
1360
+ code_options["co_exceptiontable"] = assemble_exception_table(
1361
+ compute_exception_table(instructions)
1362
+ )
1363
+
1364
+ return instructions, types.CodeType(*[code_options[k] for k in keys])
1365
+
1366
+
1367
+ def populate_kw_names_argval(instructions, consts):
1368
+ for inst in instructions:
1369
+ if inst.opname == "KW_NAMES":
1370
+ inst.argval = consts[inst.arg]
1371
+
1372
+
1373
+ def cleaned_instructions(code, safe=False) -> List[Instruction]:
1374
+ instructions = list(map(convert_instruction, dis.get_instructions(code)))
1375
+ check_offsets(instructions)
1376
+ if sys.version_info >= (3, 11):
1377
+ populate_kw_names_argval(instructions, code.co_consts)
1378
+ virtualize_exception_table(code.co_exceptiontable, instructions)
1379
+ virtualize_jumps(instructions)
1380
+ strip_extended_args(instructions)
1381
+ if not safe:
1382
+ if sys.version_info < (3, 11):
1383
+ remove_load_call_method(instructions)
1384
+ if sys.version_info < (3, 12):
1385
+ explicit_super(code, instructions)
1386
+ if sys.version_info >= (3, 11):
1387
+ remove_jump_if_none(instructions)
1388
+ if sys.version_info >= (3, 12):
1389
+ remove_binary_store_slice(instructions)
1390
+ if sys.version_info >= (3, 13):
1391
+ remove_fused_load_store(instructions)
1392
+ update_offsets(instructions)
1393
+ devirtualize_jumps(instructions)
1394
+ return instructions
1395
+
1396
+
1397
+ _unique_id_counter = itertools.count()
1398
+
1399
+
1400
+ def unique_id(name) -> str:
1401
+ return f"{name}_{next(_unique_id_counter)}"
1402
+
1403
+
1404
+ def is_generator(code: types.CodeType) -> bool:
1405
+ co_generator = 0x20
1406
+ return (code.co_flags & co_generator) > 0
1407
+
1408
+
1409
+ def bytecode_from_template(fn, varname_map=None, noreturn=True, noprefix=True):
1410
+ """Generates bytecode from a template function `fn` for use in
1411
+ dynamo bytecode generation.
1412
+
1413
+ For example, we can generate Python-version-independent bytecode
1414
+ for looping through a dictionary and copying the values to a new dictionary.
1415
+
1416
+ def template(d1, d2):
1417
+ for k, v in d1.items():
1418
+ d2[k] = v
1419
+
1420
+
1421
+ or a try block:
1422
+
1423
+ def template():
1424
+ try:
1425
+ dummy1
1426
+ except:
1427
+ dummy2
1428
+ raise
1429
+ dummy3
1430
+
1431
+ Args:
1432
+ fn: a function template to generate bytecode from
1433
+ varname_map: a mapping of `fn`'s varnames to new names. This
1434
+ map will be applied to the generated bytecode's varnames.
1435
+ For example, local variables in `fn` can be replaced with
1436
+ new names that are generated by `OutputGraph.new_var`.
1437
+ noreturn: remove all RETURN_* bytecodes and replace them with a jump
1438
+ to the end of the bytecode.
1439
+ noprefix: remove prefix bytecodes (all bytecode before the first RESUME, inclusive).
1440
+ """
1441
+ insts = cleaned_instructions(fn.__code__)
1442
+ clear_instruction_args(insts)
1443
+
1444
+ if noprefix:
1445
+ for i, inst in enumerate(insts):
1446
+ if inst.opname == "RESUME":
1447
+ insts = insts[i + 1 :]
1448
+ break
1449
+
1450
+ for inst in insts:
1451
+ # If we don't reset starts_line, then the generated
1452
+ # bytecode's line number will be based on fn's.
1453
+ inst.starts_line = None
1454
+ if varname_map and inst.argval in varname_map:
1455
+ inst.argval = varname_map[inst.argval]
1456
+
1457
+ if noreturn:
1458
+ if sys.version_info >= (3, 12):
1459
+ # replace RETURN_CONST with LOAD_CONST RETURN_VALUE
1460
+ new_insts = []
1461
+ for inst in insts:
1462
+ if inst.opname == "RETURN_CONST":
1463
+ inst.opcode = dis.opmap["LOAD_CONST"]
1464
+ inst.opname = "LOAD_CONST"
1465
+ new_insts.append(inst)
1466
+ # no need to propagate target/exn table
1467
+ new_insts.append(create_instruction("RETURN_VALUE"))
1468
+ else:
1469
+ new_insts.append(inst)
1470
+ insts = new_insts
1471
+
1472
+ returns = []
1473
+ for inst in insts:
1474
+ if inst.opname == "RETURN_VALUE":
1475
+ returns.append(inst)
1476
+
1477
+ if len(returns) == 1 and returns[0] is insts[-1]:
1478
+ # only 1 return at the end - just pop it
1479
+ insts.pop(-1)
1480
+ elif len(returns) > 0:
1481
+ # create jump target - if the last inst is a return,
1482
+ # we can replace it with a NOP and make that the jump target.
1483
+ if insts[-1] is returns[-1]:
1484
+ insts[-1].opname = "NOP"
1485
+ insts[-1].opcode = dis.opmap["NOP"]
1486
+ insts[-1].arg = None
1487
+ insts[-1].argval = _NotProvided
1488
+ returns.pop(-1)
1489
+ else:
1490
+ insts.append(create_instruction("NOP"))
1491
+
1492
+ # replace returns with jumps
1493
+ for inst in returns:
1494
+ # don't replace inst with new instruction
1495
+ # due to targetting/exn table/etc.
1496
+ jump_inst = create_jump_absolute(insts[-1])
1497
+ inst.opname = jump_inst.opname
1498
+ inst.opcode = jump_inst.opcode
1499
+ inst.arg = jump_inst.arg
1500
+ inst.argval = jump_inst.argval
1501
+ inst.target = jump_inst.target
1502
+
1503
+ return insts
pllava/lib/python3.10/site-packages/torch/_dynamo/compiled_autograd.py ADDED
@@ -0,0 +1,533 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ import contextlib
3
+ import functools
4
+ from typing import Any, Dict, List, Optional, TYPE_CHECKING, Union
5
+
6
+ import torch
7
+ from torch._dynamo.external_utils import (
8
+ call_backward,
9
+ call_hook,
10
+ FakeCompiledAutogradEngine,
11
+ )
12
+ from torch._dynamo.source import GetItemSource, LocalSource
13
+ from torch._dynamo.utils import counters, lazy_format_graph_code, set_locals_to_steal
14
+ from torch._logging import getArtifactLogger, trace_structured
15
+ from torch._prims_common import clone_preserve_strides
16
+ from torch._subclasses import FakeTensorMode
17
+ from torch.fx import GraphModule
18
+ from torch.fx.experimental._backward_state import BackwardState
19
+ from torch.fx.experimental.proxy_tensor import (
20
+ decompose,
21
+ disable_autocast_cache,
22
+ disable_proxy_modes_tracing,
23
+ fetch_object_proxy,
24
+ ProxyTorchDispatchMode,
25
+ PythonKeyTracer,
26
+ track_tensor_tree,
27
+ )
28
+ from torch.fx.experimental.symbolic_shapes import DimDynamic, ShapeEnv
29
+ from torch.fx.traceback import preserve_node_meta, set_stack_trace
30
+ from torch.utils._traceback import CapturedTraceback
31
+
32
+
33
+ if TYPE_CHECKING:
34
+ from torch.fx.proxy import Proxy
35
+
36
+
37
+ compiled_autograd_log = getArtifactLogger(__name__, "compiled_autograd")
38
+ verbose_log = getArtifactLogger(__name__, "compiled_autograd_verbose")
39
+
40
+
41
+ def snapshot_verbose_logging_enabled():
42
+ return torch._logging._internal.log_state.is_artifact_enabled(
43
+ "compiled_autograd_verbose"
44
+ )
45
+
46
+
47
+ def cpp_verbose_log_fn(msg: str) -> None:
48
+ verbose_log.debug(msg)
49
+
50
+
51
+ def snapshot_cudagraph_enabled():
52
+ return torch._inductor.config.triton.cudagraphs
53
+
54
+
55
+ def maybe_clone(x):
56
+ if x is not None:
57
+ return clone_preserve_strides(x)
58
+ return x
59
+
60
+
61
+ class AutogradCompilerInstance:
62
+ def __init__(self, compiler_fn) -> None:
63
+ self.compiler_fn = compiler_fn
64
+ self.stack = contextlib.ExitStack()
65
+ self.close = self.stack.close
66
+ self.shape_env = ShapeEnv()
67
+ self.fake_tensor_mode = FakeTensorMode(
68
+ allow_fallback_kernels=True,
69
+ allow_non_fake_inputs=True,
70
+ shape_env=self.shape_env,
71
+ )
72
+ self.fx_tracer = PythonKeyTracer()
73
+ self.proxy_mode = ProxyTorchDispatchMode(self.fx_tracer, "symbolic")
74
+ self.hooks_proxy: Optional[Proxy] = None
75
+ self.graph_placeholders = ["inputs", "sizes", "scalars", "hooks"]
76
+
77
+ def wrap_fake(self, x, source):
78
+ assert isinstance(x, torch.Tensor)
79
+ return self.fake_tensor_mode.from_tensor(x, source=source)
80
+
81
+ @staticmethod
82
+ def source(name, idx) -> GetItemSource:
83
+ return GetItemSource(LocalSource(name), idx)
84
+
85
+ def begin_capture(
86
+ self,
87
+ inputs: List[torch.Tensor],
88
+ sizes: List[int],
89
+ scalars: List[Union[int, float]],
90
+ ):
91
+ counters["compiled_autograd"]["captures"] += 1
92
+ self.aot_graph_cls_name: Optional[str] = None
93
+ self.aot_graph_infos: Dict[int, Dict[str, Any]] = {}
94
+ self.fx_tracer.root = torch.nn.Module()
95
+ self.fx_tracer.graph = torch.fx.Graph(tracer_cls=PythonKeyTracer)
96
+ self.fx_tracer.tensor_attrs = {}
97
+ args_proxy, sizes_proxy, scalars_proxy, self.hooks_proxy = (
98
+ self.fx_tracer.create_proxy("placeholder", name, (), {})
99
+ for name in self.graph_placeholders
100
+ )
101
+
102
+ # tensor inputs to fake tensors
103
+ inputs = [
104
+ self.wrap_fake(x, self.source("inputs", idx))
105
+ for idx, x in enumerate(inputs)
106
+ ]
107
+ self.bind_tensors_to_proxies(inputs, args_proxy)
108
+
109
+ # size inputs to symints
110
+ sizes = [
111
+ self.shape_env.create_unspecified_symint_and_symbol(
112
+ val,
113
+ self.source("sizes", idx),
114
+ DimDynamic.DYNAMIC,
115
+ )
116
+ for idx, val in enumerate(sizes)
117
+ ]
118
+ self.bind_tensors_to_proxies(sizes, sizes_proxy)
119
+
120
+ for idx, val in enumerate(scalars):
121
+ source = self.source("scalars", idx)
122
+ if isinstance(val, int):
123
+ scalars[idx] = self.shape_env.create_unspecified_symint_and_symbol(
124
+ val,
125
+ source,
126
+ DimDynamic.DYNAMIC,
127
+ )
128
+ elif isinstance(val, float):
129
+ scalars[idx] = self.shape_env.create_symfloatnode(
130
+ self.shape_env.create_unspecified_symbol(
131
+ val,
132
+ source=source,
133
+ dynamic_dim=DimDynamic.DYNAMIC,
134
+ ),
135
+ hint=val,
136
+ source=source,
137
+ )
138
+ else:
139
+ raise AssertionError("Unexpected scalar type: ", type(val))
140
+ self.bind_tensors_to_proxies(scalars, scalars_proxy)
141
+
142
+ # TODO(jansel): are all these modes needed?
143
+ self.stack.enter_context(decompose({}))
144
+ self.stack.enter_context(self.fake_tensor_mode)
145
+ self.stack.enter_context(self.proxy_mode)
146
+ self.stack.enter_context(disable_autocast_cache())
147
+ self.stack.enter_context(preserve_node_meta())
148
+ return inputs, sizes, scalars
149
+
150
+ def proxy_call_backward(
151
+ self,
152
+ inputs,
153
+ output_metadatas,
154
+ saved_tensors,
155
+ backward_idx: int,
156
+ ):
157
+ assert self.hooks_proxy is not None
158
+ backward_c_function = self.hooks_proxy[backward_idx] # type: ignore[index]
159
+ proxies = self.fx_tracer.create_proxy(
160
+ kind="call_function",
161
+ target=call_backward,
162
+ args=(
163
+ backward_c_function,
164
+ self.to_proxy(saved_tensors),
165
+ *self.to_proxy(inputs),
166
+ ),
167
+ kwargs={},
168
+ )
169
+
170
+ with disable_proxy_modes_tracing():
171
+ # create fake Tensors
172
+ grad_ins: List[Optional[torch.Tensor]] = []
173
+ for output_metadata in output_metadatas:
174
+ if output_metadata is None:
175
+ grad_ins.append(None)
176
+ continue
177
+
178
+ layout, device, dtype, size = output_metadata
179
+ grad_ins.append(
180
+ torch.empty(size=size, dtype=dtype, layout=layout, device=device)
181
+ )
182
+ self.bind_tensors_to_proxies(grad_ins, proxies)
183
+ return tuple(grad_ins)
184
+
185
+ def proxy_call_hook(self, hook, *args, **kwargs):
186
+ return self.fx_tracer.create_proxy(
187
+ "call_function",
188
+ call_hook,
189
+ (
190
+ hook,
191
+ *[self.to_proxy(x) for x in args],
192
+ ),
193
+ kwargs,
194
+ )
195
+
196
+ def tensor_pre_hook(self, inputs, hook_id, i: int):
197
+ assert self.hooks_proxy is not None
198
+ hook = self.hooks_proxy[hook_id] # type: ignore[index]
199
+ proxy = self.proxy_call_hook(
200
+ hook,
201
+ inputs[i],
202
+ hook_type="tensor_pre_hook",
203
+ )
204
+ with disable_proxy_modes_tracing():
205
+ inputs[i] = maybe_clone(inputs[i])
206
+ self.bind_tensors_to_proxies([inputs[i]], [proxy])
207
+ return inputs
208
+
209
+ def pre_hook(self, inputs, hook_id):
210
+ assert self.hooks_proxy is not None
211
+ hook = self.hooks_proxy[hook_id] # type: ignore[index]
212
+ proxies = self.proxy_call_hook(
213
+ hook,
214
+ inputs,
215
+ hook_type="pre_hook",
216
+ )
217
+ with disable_proxy_modes_tracing():
218
+ inputs = [maybe_clone(x) for x in inputs]
219
+ self.bind_tensors_to_proxies(inputs, proxies)
220
+ return inputs
221
+
222
+ def post_hook(self, outputs, inputs, hook_id):
223
+ assert self.hooks_proxy is not None
224
+ hook = self.hooks_proxy[hook_id] # type: ignore[index]
225
+ proxies = self.proxy_call_hook(
226
+ hook,
227
+ outputs,
228
+ inputs,
229
+ hook_type="post_hook",
230
+ )
231
+ with disable_proxy_modes_tracing():
232
+ outputs = [maybe_clone(x) for x in outputs]
233
+ self.bind_tensors_to_proxies(outputs, proxies)
234
+ return outputs
235
+
236
+ def post_acc_grad_hook(self, input, hook_id):
237
+ assert isinstance(input, torch.Tensor)
238
+ assert self.hooks_proxy is not None
239
+ hook = self.hooks_proxy[hook_id] # type: ignore[index]
240
+ proxy = self.proxy_call_hook(
241
+ hook,
242
+ input,
243
+ hook_type="post_acc_grad_hook",
244
+ )
245
+ with disable_proxy_modes_tracing():
246
+ input = [maybe_clone(input)]
247
+ self.bind_tensors_to_proxies(input, [proxy])
248
+ return input
249
+
250
+ # Note: [Compiled autograd and cudagraphs]
251
+ # Eager autograd backward implements scalars as 0-dim tensors, see DivBackward0::other_.
252
+ # When compiled autograd traces those nodes, it lifts the scalar tensors, resulting in a graph
253
+ # with some cpu 0-dim tensor inputs. To prevent the entire graph from skipping cudagraph, we move the
254
+ # scalars tensors to cuda. This works because ATen/prims ops will accept cuda 0-dim tensors too.
255
+ def move_graph_nodes_to_cuda(self, graph) -> List[int]:
256
+ to_move: Dict[int, torch.fx.Node] = {}
257
+ has_cuda_inputs = False
258
+ nodes = list(graph.nodes)
259
+ assert nodes[0].target == "inputs"
260
+ inputs = nodes[0]
261
+ inputs_users = list(inputs.users.keys())
262
+ # input access nodes should immediately follow placeholder nodes
263
+ first_getitem_idx = len(self.graph_placeholders)
264
+ assert nodes[first_getitem_idx] == inputs_users[0]
265
+ last_getitem_idx = first_getitem_idx + len(inputs_users) - 1
266
+ assert nodes[last_getitem_idx] == inputs_users[-1]
267
+ for i, node in enumerate(inputs_users):
268
+ if not has_cuda_inputs and node.meta["val"].device.type == "cuda":
269
+ has_cuda_inputs = True
270
+ continue
271
+
272
+ is_cpu = node.meta["val"].device.type == "cpu"
273
+ is_scalar = len(node.meta["val"].size()) == 0
274
+ if is_cpu and is_scalar:
275
+ node_users = list(node.users.keys())
276
+ if all(
277
+ isinstance(user.target, torch._ops.OpOverload)
278
+ and user.target.namespace in ("prims", "aten")
279
+ for user in node_users
280
+ ):
281
+ # all users are prims/aten, can move safely
282
+ to_move[i] = node
283
+
284
+ # only move cpu scalars to cuda if there were cuda activations in this graph,
285
+ # this is to handle the case where cudagraphs is enabled on a cpu-only graph
286
+ if has_cuda_inputs:
287
+ for node in to_move.values():
288
+ node.meta["val"] = node.meta["val"].cuda()
289
+
290
+ # return runtime indices we need to move to cuda
291
+ return list(to_move.keys())
292
+
293
+ return []
294
+
295
+ def end_capture(self, outputs):
296
+ self.fx_tracer.create_proxy(
297
+ "call_function",
298
+ FakeCompiledAutogradEngine._exec_final_callbacks_stub,
299
+ (),
300
+ {},
301
+ )
302
+ self.stack.close()
303
+ self.fx_tracer.create_node(
304
+ "output",
305
+ "output",
306
+ (self.fx_tracer.create_arg(self.to_proxy(outputs)),),
307
+ {},
308
+ )
309
+ self.rename_aot_dispatcher_nodes()
310
+ self.reorder_accumulate_grad_nodes()
311
+ runtime_inputs_to_move: List[int] = []
312
+ if snapshot_cudagraph_enabled():
313
+ runtime_inputs_to_move = self.move_graph_nodes_to_cuda(self.fx_tracer.graph)
314
+
315
+ graph = GraphModule(
316
+ self.fx_tracer.root, self.fx_tracer.graph, "CompiledAutograd"
317
+ )
318
+ set_locals_to_steal(graph, ["inputs"])
319
+ lazy_graph_code = lazy_format_graph_code(
320
+ "Compiled autograd graph",
321
+ graph,
322
+ include_device=True,
323
+ include_stride=True,
324
+ colored=True,
325
+ )
326
+ compiled_autograd_log.info("%s", lazy_graph_code)
327
+ verbose_log.debug("%s", lazy_graph_code)
328
+ trace_structured(
329
+ "compiled_autograd_graph",
330
+ payload_fn=lambda: graph.print_readable(print_output=False),
331
+ )
332
+
333
+ def runtime_wrapper(compiled_fn, inputs, sizes, scalars, hooks):
334
+ global in_compiled_autograd_region
335
+ try:
336
+ in_compiled_autograd_region = True
337
+ for i in runtime_inputs_to_move:
338
+ inputs[i] = inputs[i].pin_memory().cuda(non_blocking=True)
339
+
340
+ return compiled_fn(inputs, sizes, scalars, hooks)
341
+ finally:
342
+ in_compiled_autograd_region = False
343
+
344
+ return runtime_wrapper, self.compiler_fn(graph)
345
+
346
+ def rename_aot_dispatcher_nodes(self):
347
+ """
348
+ Renames nodes as they appear in the AOTDispatcher backward graphs, prefixed by AOT id
349
+ e.g. AOTDispatcher backward graph X's `sin_Y` -> `aotX_sin_Y`
350
+ """
351
+ if self.aot_graph_cls_name is None:
352
+ return
353
+
354
+ def is_similar(a: torch.fx.node.Node, b: torch.fx.node.Node):
355
+ target_match = a.target == b.target
356
+ if not target_match:
357
+ target_match = (
358
+ hasattr(a.target, "__name__")
359
+ and hasattr(b.target, "__name__")
360
+ and a.target.__name__ == b.target.__name__
361
+ )
362
+ return (
363
+ target_match
364
+ and a.op == b.op
365
+ and a.type == b.type
366
+ and len(a.all_input_nodes) == len(b.all_input_nodes)
367
+ )
368
+
369
+ for nodecall_index, info in self.aot_graph_infos.items():
370
+ ca_node_start_idx = info["ca_node_start_idx"]
371
+ aot_id = info["aot_id"]
372
+ aot_graph = info["aot_gm"].graph
373
+
374
+ # 1. Find the first op from user code in the AOT graph
375
+ aot_it = iter(aot_graph.nodes)
376
+ aot_node = next(aot_it)
377
+ assert aot_node is not None
378
+ try:
379
+ while aot_node.op != "call_function":
380
+ aot_node = next(aot_it)
381
+ except StopIteration:
382
+ continue
383
+
384
+ try:
385
+ # 2. Find the first op in the compiled autograd graph segment
386
+ ca_it = iter(self.fx_tracer.graph.nodes)
387
+ for _ in range(ca_node_start_idx):
388
+ next(ca_it)
389
+ ca_node = next(ca_it)
390
+
391
+ # Graphs should all end with output node
392
+ while ca_node.op != "output" and not is_similar(ca_node, aot_node):
393
+ # The compiled autograd graph may contain lazily inserted ops
394
+ # We skip those when aligning nodes
395
+ ca_node = next(ca_it)
396
+
397
+ # 3. Keep alligned and rename nodes
398
+ while aot_node.op != "output" and ca_node.op != "output":
399
+ if not ca_node.users:
400
+ # TODO: DCE for compiled autograd graph
401
+ ca_node = next(ca_it)
402
+ continue
403
+
404
+ if not is_similar(aot_node, ca_node):
405
+ # There should be no lazily inserted ops in the middle of a match
406
+ # So any deviation is an error
407
+ raise StopIteration
408
+
409
+ ca_node.name = f"aot{aot_id}_{aot_node.name}"
410
+ for i, inp in enumerate(aot_node.all_input_nodes):
411
+ ca_node.all_input_nodes[i].name = f"aot{aot_id}_{inp.name}"
412
+
413
+ aot_node = next(aot_it)
414
+ ca_node = next(ca_it)
415
+ except StopIteration:
416
+ verbose_log.debug(
417
+ "Failed to match %s%s (NodeCall %s) nodes with AOT backward graph %s nodes",
418
+ self.aot_graph_cls_name,
419
+ aot_id,
420
+ nodecall_index,
421
+ aot_id,
422
+ )
423
+
424
+ def reorder_accumulate_grad_nodes(self):
425
+ """
426
+ Usage of AOTAutograd causes all the accumulate_grad_ nodes to get pushed to the end of
427
+ the graph. This differs from eager mode, which schedules them as soon as possible. This
428
+ pass attempts to reorder the graph to mimic eager behavior.
429
+ """
430
+ for node in self.fx_tracer.graph.find_nodes(
431
+ op="call_function", target=torch.ops.inductor.accumulate_grad_.default
432
+ ):
433
+ arg = max(node.args) # last arg
434
+ if arg is not node.prev and arg.op != "placeholder":
435
+ arg.append(node)
436
+
437
+ def to_proxy(self, t):
438
+ if t is None:
439
+ return None
440
+ if isinstance(t, list):
441
+ return [self.to_proxy(x) for x in t]
442
+ if isinstance(t, tuple):
443
+ return tuple(self.to_proxy(x) for x in t)
444
+ # can it be torch.SymInt as the code used to imply?
445
+ assert isinstance(t, torch.Tensor)
446
+ proxy_tensor = fetch_object_proxy(self.fx_tracer, t)
447
+ assert isinstance(proxy_tensor, torch.fx.experimental.proxy_tensor._ProxyTensor)
448
+ return proxy_tensor.proxy
449
+
450
+ def bind_tensors_to_proxies(self, tensors, proxies):
451
+ if isinstance(proxies, torch.fx.Proxy):
452
+ proxies = [proxies[i] for i in range(len(tensors))] # type: ignore[index]
453
+ assert len(tensors) == len(proxies)
454
+ track_tensor_tree(tensors, proxies, constant=None, tracer=self.fx_tracer)
455
+
456
+ def bind_backward_state(self, index: int):
457
+ assert self.hooks_proxy is not None
458
+ proxy = self.hooks_proxy[index] # type: ignore[index]
459
+ bw_state = BackwardState()
460
+ track_tensor_tree(bw_state, proxy, constant=None, tracer=self.fx_tracer)
461
+ return bw_state
462
+
463
+ def set_node_origin(
464
+ self,
465
+ node_name: str,
466
+ nodecall_index: int,
467
+ pyobj: Optional[torch.autograd.Function],
468
+ ):
469
+ maybe_aot_id = ""
470
+ if pyobj is not None:
471
+ forward_cls = pyobj._forward_cls # type: ignore[attr-defined]
472
+ if hasattr(forward_cls, "_aot_id"):
473
+ # backward was created by AOT Dispatcher
474
+ self.aot_graph_cls_name = node_name
475
+ maybe_aot_id = forward_cls._aot_id
476
+ self.aot_graph_infos[nodecall_index] = {
477
+ "ca_node_start_idx": len(self.fx_tracer.graph.nodes),
478
+ "aot_id": maybe_aot_id,
479
+ "aot_gm": forward_cls._lazy_backward_info.bw_module,
480
+ }
481
+
482
+ new_code = f"{node_name}{maybe_aot_id} (NodeCall {nodecall_index})"
483
+ raw_stack_trace = CapturedTraceback.extract().format()[-1]
484
+ new_stack_trace = raw_stack_trace.replace(
485
+ "raw_stack_trace = CapturedTraceback.extract().format()[-1]", new_code
486
+ )
487
+ set_stack_trace(new_stack_trace)
488
+
489
+
490
+ # state of the autograd engine dispatch, kept in sync by enable/disable context managers
491
+ compiled_autograd_enabled = False
492
+
493
+ # global flag to check if we are processing graphs produced from a compiled autograd graph
494
+ in_compiled_autograd_region = False
495
+
496
+
497
+ @contextlib.contextmanager
498
+ def enable(compiler_fn):
499
+ prior = torch._C._dynamo.compiled_autograd.set_autograd_compiler(
500
+ functools.partial(AutogradCompilerInstance, compiler_fn)
501
+ )
502
+ if snapshot_verbose_logging_enabled():
503
+ torch._C._dynamo.compiled_autograd.set_verbose_logger(cpp_verbose_log_fn)
504
+ global compiled_autograd_enabled
505
+ compiled_autograd_enabled = True
506
+ try:
507
+ with torch.autograd.set_multithreading_enabled(False):
508
+ yield
509
+ finally:
510
+ if not prior:
511
+ compiled_autograd_enabled = False
512
+ torch._C._dynamo.compiled_autograd.set_autograd_compiler(prior)
513
+
514
+
515
+ @contextlib.contextmanager
516
+ def disable():
517
+ prior = torch._C._dynamo.compiled_autograd.set_autograd_compiler(None)
518
+ global compiled_autograd_enabled
519
+ compiled_autograd_enabled = False
520
+ try:
521
+ yield
522
+ finally:
523
+ if prior:
524
+ compiled_autograd_enabled = True
525
+ torch._C._dynamo.compiled_autograd.set_autograd_compiler(prior)
526
+
527
+
528
+ # return to starting state of a new process
529
+ def reset() -> None:
530
+ compiled_autograd_enable = False
531
+ assert not in_compiled_autograd_region
532
+ torch._C._dynamo.compiled_autograd.set_autograd_compiler(None)
533
+ torch._C._dynamo.compiled_autograd.set_verbose_logger(None)
pllava/lib/python3.10/site-packages/torch/_dynamo/convert_frame.py ADDED
@@ -0,0 +1,1277 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-decorators
2
+ from __future__ import annotations
3
+
4
+ import collections
5
+ import contextlib
6
+ import cProfile
7
+ import dis
8
+ import functools
9
+ import itertools
10
+ import logging
11
+ import os
12
+ import pstats
13
+ import random
14
+ import subprocess
15
+ import sys
16
+ import threading
17
+ import time
18
+ import traceback
19
+ import typing
20
+ import weakref
21
+ from pathlib import Path
22
+ from types import CodeType, FrameType, FunctionType, ModuleType
23
+ from typing import Any, Callable, Dict, List, Optional, Set, TypeVar, Union
24
+ from typing_extensions import ParamSpec
25
+ from weakref import ReferenceType
26
+
27
+ import torch
28
+ import torch._logging
29
+ from torch._C._dynamo.guards import GlobalStateGuard
30
+ from torch._dynamo.distributed import get_compile_pg
31
+ from torch._dynamo.utils import CompileTimeInstructionCounter
32
+ from torch._guards import compile_context, CompileContext, CompileId, tracing
33
+ from torch._logging import structured
34
+ from torch._utils_internal import (
35
+ compile_time_strobelight_meta,
36
+ justknobs_check,
37
+ maybe_upload_prof_stats_to_manifold,
38
+ signpost_event,
39
+ )
40
+ from torch.fx._lazy_graph_module import _use_lazy_graph_module
41
+ from torch.fx.experimental.symbolic_shapes import (
42
+ ConstraintViolationError,
43
+ GuardOnDataDependentSymNode,
44
+ )
45
+ from torch.fx.graph_module import _forward_from_src as original_forward_from_src
46
+ from torch.nn.parallel.distributed import DistributedDataParallel
47
+ from torch.utils._python_dispatch import (
48
+ _disable_current_modes,
49
+ is_in_torch_dispatch_mode,
50
+ )
51
+ from torch.utils._traceback import CapturedTraceback, format_traceback_short
52
+
53
+ from . import config, exc, trace_rules
54
+ from .bytecode_analysis import remove_dead_code, remove_pointless_jumps
55
+ from .bytecode_transformation import (
56
+ check_inst_exn_tab_entries_valid,
57
+ Instruction,
58
+ is_generator,
59
+ propagate_inst_exn_table_entries,
60
+ transform_code_object,
61
+ )
62
+ from .cache_size import (
63
+ CacheSizeRelevantForFrame,
64
+ compute_cache_size,
65
+ exceeds_cache_size_limit,
66
+ is_recompilation,
67
+ )
68
+ from .eval_frame import always_optimize_code_objects, skip_code, TorchPatcher
69
+ from .exc import (
70
+ augment_exc_message,
71
+ BackendCompilerFailed,
72
+ CacheLimitExceeded,
73
+ format_error_msg,
74
+ InternalTorchDynamoError,
75
+ SkipCodeRecursiveException,
76
+ TorchRuntimeError,
77
+ UncapturedHigherOrderOpError,
78
+ unimplemented,
79
+ Unsupported,
80
+ )
81
+ from .guards import (
82
+ CheckFunctionManager,
83
+ get_and_maybe_log_recompilation_reason,
84
+ GuardedCode,
85
+ )
86
+ from .hooks import Hooks
87
+ from .replay_record import ExecutionRecord
88
+ from .symbolic_convert import (
89
+ DistributedState,
90
+ InstructionTranslator,
91
+ LocalState,
92
+ SpeculationLog,
93
+ )
94
+ from .trace_rules import is_numpy
95
+ from .utils import (
96
+ CleanupManager,
97
+ CompilationMetrics,
98
+ counters,
99
+ dynamo_timed,
100
+ format_bytecode,
101
+ frame_phase_timing,
102
+ gen_record_file_name,
103
+ get_chromium_event_logger,
104
+ increment_frame,
105
+ is_namedtuple,
106
+ istype,
107
+ LazyString,
108
+ orig_code_map,
109
+ record_compilation_metrics,
110
+ reset_graph_break_dup_checker,
111
+ setup_compile_debug,
112
+ troubleshooting_url,
113
+ write_record_to_file,
114
+ )
115
+
116
+
117
+ np: Optional[ModuleType]
118
+ try:
119
+ import numpy as np
120
+ except ModuleNotFoundError:
121
+ np = None
122
+
123
+
124
+ if typing.TYPE_CHECKING:
125
+ from .backends.registry import CompilerFn
126
+ from .repro.after_dynamo import WrapBackendDebug
127
+ from .types import BytecodeHook, CacheEntry
128
+ from .variables.builder import FrameStateSizeEntry
129
+
130
+
131
+ log = logging.getLogger(__name__)
132
+ bytecode_log = torch._logging.getArtifactLogger(__name__, "bytecode")
133
+ graph_break_log = torch._logging.getArtifactLogger(__name__, "graph_breaks")
134
+
135
+
136
+ compile_lock = threading.RLock()
137
+
138
+ _T = TypeVar("_T")
139
+ _P = ParamSpec("_P")
140
+
141
+
142
+ class TODO_UNKNOWN:
143
+ pass
144
+
145
+
146
+ class Tracker:
147
+ def __init__(self) -> None:
148
+ self.seen: List[ReferenceType[CodeType]] = []
149
+ self.seen_ids: Set[int] = set()
150
+
151
+ def add(self, strong_obj: CodeType) -> None:
152
+ idx = id(strong_obj)
153
+ if idx not in self.seen_ids:
154
+ obj = weakref.ref(strong_obj, lambda _: self.seen_ids.remove(idx))
155
+ self.seen.append(obj)
156
+ self.seen_ids.add(idx)
157
+
158
+ def __contains__(self, item: CodeType) -> bool:
159
+ return id(item) in self.seen_ids
160
+
161
+ def clear(self) -> None:
162
+ self.seen.clear()
163
+ self.seen_ids.clear()
164
+
165
+
166
+ input_codes = Tracker()
167
+ output_codes = Tracker()
168
+
169
+ initial_global_state: Optional[GlobalStateGuard] = None
170
+
171
+
172
+ @functools.wraps(original_forward_from_src)
173
+ def fx_forward_from_src_skip_result(
174
+ src: str, globals: Dict[str, Any], co_fields: Optional[Dict[str, str]] = None
175
+ ) -> FunctionType:
176
+ # we monkey patch FX to prevent infinite loop of trying to convert
177
+ # our generated code
178
+ result = original_forward_from_src(src, globals, co_fields)
179
+ skip_code(result.__code__)
180
+ return result
181
+
182
+
183
+ def preserve_global_state(fn: Callable[_P, _T]) -> Callable[_P, _T]:
184
+ """
185
+ Context manager to:
186
+ 1) Save/restore torch.is_grad_enabled() state
187
+ 2) Save/restore python random state
188
+ 3) Save/restore torch random state
189
+ 4) Monkey patch torch.fx.graph_module._forward_from_src
190
+ """
191
+
192
+ @functools.wraps(fn)
193
+ def _fn(*args: _P.args, **kwargs: _P.kwargs) -> _T:
194
+ guards = GlobalStateGuard()
195
+ prior_grad_mode = torch.is_grad_enabled()
196
+ # Just in case we get left in a bad dispatch state we want to restore
197
+ # it. This can happen because the dispatch bits aren't a true
198
+ # stack/counter - so we can't just increment/decrement them as we enter
199
+ # and leave.
200
+ with torch._C._PreserveDispatchKeyGuard():
201
+ prior_inference_mode = torch.is_inference_mode_enabled()
202
+ prior_deterministic = torch.are_deterministic_algorithms_enabled()
203
+ prior_warn_only = torch.is_deterministic_algorithms_warn_only_enabled()
204
+ py_rng_state = random.getstate()
205
+ torch_rng_state = torch.random.get_rng_state()
206
+ cuda_rng_state = None
207
+ if torch.cuda.is_available():
208
+ cuda_rng_state = torch.cuda.get_rng_state()
209
+ allow_tf32 = torch._C._get_cublas_allow_tf32()
210
+ prior_fwd_from_src = torch.fx.graph_module._forward_from_src
211
+ torch.fx.graph_module._forward_from_src = fx_forward_from_src_skip_result
212
+ cleanup = setup_compile_debug()
213
+
214
+ exit_stack = contextlib.ExitStack()
215
+ exit_stack.enter_context(
216
+ torch.fx._symbolic_trace._maybe_revert_all_patches()
217
+ )
218
+ try:
219
+ return fn(*args, **kwargs)
220
+ finally:
221
+ cleanup.close()
222
+ exit_stack.close()
223
+ torch._C._set_grad_enabled(prior_grad_mode)
224
+ torch.autograd.grad_mode._enter_inference_mode(prior_inference_mode)
225
+ torch.use_deterministic_algorithms(
226
+ prior_deterministic, warn_only=prior_warn_only
227
+ )
228
+ random.setstate(py_rng_state)
229
+ torch.random.set_rng_state(torch_rng_state)
230
+ if cuda_rng_state is not None:
231
+ torch.cuda.set_rng_state(cuda_rng_state)
232
+ torch._C._set_cublas_allow_tf32(allow_tf32)
233
+ torch.fx.graph_module._forward_from_src = prior_fwd_from_src
234
+ assert (
235
+ guards.check()
236
+ ), f"Global {guards.reason()}state changed while dynamo tracing, please report a bug"
237
+
238
+ _fn._torchdynamo_orig_callable = fn # type: ignore[attr-defined]
239
+ return _fn
240
+
241
+
242
+ @TorchPatcher.suppress_torch_distributed_warnings
243
+ def has_tensor_in_frame(frame: FrameType) -> bool:
244
+ """Check if the frame has torch.* related bits"""
245
+ # Check if the function was decorated using torch._dynamo.optimize
246
+ if frame.f_code in always_optimize_code_objects:
247
+ return True
248
+
249
+ # Check if there is global import of torch.*
250
+ for co_name in frame.f_code.co_names:
251
+ if co_name in frame.f_globals:
252
+ obj = frame.f_globals[co_name]
253
+ if isinstance(obj, ModuleType) and (
254
+ obj.__name__.startswith("torch.") or obj is torch
255
+ ):
256
+ return True
257
+ # ... or a global import of numpy.*
258
+ if np and config.trace_numpy and (obj is np or is_numpy(obj)):
259
+ return True
260
+
261
+ seen_ids: Dict[int, bool] = {}
262
+
263
+ def has_tensor(obj: object) -> bool:
264
+ """Recursively check if the obj has a tensor"""
265
+ obj_id = id(obj)
266
+ if obj_id in seen_ids:
267
+ return seen_ids[obj_id]
268
+ seen_ids[obj_id] = False
269
+
270
+ if isinstance(obj, (torch.Tensor, torch.nn.Module)) or (
271
+ istype(obj, type) and issubclass(obj, torch.nn.Module)
272
+ ):
273
+ seen_ids[obj_id] = True
274
+ return seen_ids[obj_id]
275
+ elif (
276
+ config.trace_numpy
277
+ and np
278
+ and (istype(obj, np.ndarray) or isinstance(obj, np.generic))
279
+ ):
280
+ seen_ids[obj_id] = True
281
+ return seen_ids[obj_id]
282
+ elif istype(obj, (list, tuple)):
283
+ seen_ids[obj_id] = any(has_tensor(v) for v in obj)
284
+ return seen_ids[obj_id]
285
+ elif istype(obj, dict):
286
+ # Some packages like pytest can be updated during runtime. So, make a
287
+ # copy of values to avoid issues like "RuntimeError: dictionary
288
+ # changed size during iteration"
289
+ values = list(obj.values())
290
+ seen_ids[obj_id] = any(has_tensor(v) for v in values)
291
+ return seen_ids[obj_id]
292
+ elif istype(obj, (str, int, float, type(None), bool)):
293
+ seen_ids[obj_id] = False
294
+ return seen_ids[obj_id]
295
+ elif is_namedtuple(obj) and hasattr(obj, "_fields"):
296
+ seen_ids[obj_id] = any(has_tensor(getattr(obj, v)) for v in obj._fields)
297
+ return seen_ids[obj_id]
298
+ else:
299
+ # if config.debug:
300
+ # print(
301
+ # f"Assuming that object of type {type(obj)} does not have a tensor"
302
+ # )
303
+ return False
304
+
305
+ # Check if the passed arguments are of type Tensor
306
+ for value in frame.f_locals.values():
307
+ if has_tensor(value):
308
+ return True
309
+
310
+ log.debug(
311
+ "skipping because no torch.* %s \
312
+ %s %s",
313
+ frame.f_code.co_name,
314
+ frame.f_code.co_filename,
315
+ frame.f_code.co_firstlineno,
316
+ )
317
+
318
+ return False
319
+
320
+
321
+ def exception_handler(
322
+ e: Exception,
323
+ code: CodeType,
324
+ frame: Optional[FrameType] = None,
325
+ export: bool = False,
326
+ ) -> None:
327
+ record_filename = None
328
+ if hasattr(e, "exec_record"):
329
+ record_filename = gen_record_file_name(e, code)
330
+ write_record_to_file(record_filename, e.exec_record)
331
+ e.record_filename = record_filename # type: ignore[attr-defined]
332
+
333
+ augment_exc_message(e, export=export)
334
+
335
+
336
+ FRAME_COUNTER = 0
337
+ FRAME_COMPILE_COUNTER: typing.Counter[
338
+ Union[int, FrameStateSizeEntry]
339
+ ] = collections.Counter()
340
+
341
+
342
+ def maybe_cprofile(func: Callable[_P, _T]) -> Callable[_P, _T]:
343
+ if config.cprofile:
344
+ return cprofile_wrapper(func)
345
+ return func
346
+
347
+
348
+ def cprofile_wrapper(func: Callable[_P, _T]) -> Callable[_P, _T]:
349
+ @functools.wraps(func)
350
+ def profile_wrapper(*args: _P.args, **kwargs: _P.kwargs) -> _T:
351
+ trace_id = CompileContext.current_trace_id()
352
+ assert trace_id, "Trace id is None"
353
+ profile_path = Path(
354
+ f"/tmp/{func.__name__}_{str(trace_id).replace('/', '_')}.profile"
355
+ )
356
+ prof = cProfile.Profile()
357
+ prof.enable()
358
+ start_ts = time.time()
359
+ retval = prof.runcall(func, *args, **kwargs)
360
+ profile_latency = time.time() - start_ts
361
+ prof.disable()
362
+ log.warning(
363
+ "### Cprofile for %s trace id [%s] took %.3f seconds ###",
364
+ func.__name__,
365
+ trace_id,
366
+ profile_latency,
367
+ )
368
+ ps = pstats.Stats(prof)
369
+ try:
370
+ prof.dump_stats(profile_path)
371
+ except PermissionError:
372
+ log.exception("Cannot write to %s", profile_path)
373
+ log.warning("Raw profile at %s", profile_path)
374
+ svg_path = profile_path.with_suffix(".svg")
375
+ try:
376
+ gprof2dot_process = subprocess.Popen(
377
+ [
378
+ "gprof2dot",
379
+ "-f",
380
+ "pstats",
381
+ "--node-label=total-time-percentage",
382
+ "--node-label=self-time-percentage",
383
+ "--node-label=total-time",
384
+ str(profile_path),
385
+ ],
386
+ stdout=subprocess.PIPE,
387
+ )
388
+ subprocess.check_call(
389
+ ["dot", "-Tsvg", "-o", str(svg_path)],
390
+ stdin=gprof2dot_process.stdout,
391
+ )
392
+ log.warning("Generated SVG from profile at %s", svg_path)
393
+ except FileNotFoundError:
394
+ log.warning(
395
+ "Failed to generate SVG from profile -- dumping stats instead."
396
+ "Try installing gprof2dot and dot for a better visualization"
397
+ )
398
+ ps.sort_stats(pstats.SortKey.TIME).print_stats(20)
399
+ ps.sort_stats(pstats.SortKey.CUMULATIVE).print_stats(20)
400
+
401
+ if manifold_link := maybe_upload_prof_stats_to_manifold(
402
+ str(profile_path)
403
+ ): # fb-only
404
+ torch._logging.trace_structured(
405
+ "link",
406
+ lambda: {"name": "cprofile_manifold_url", "url": manifold_link},
407
+ )
408
+ return retval
409
+
410
+ return profile_wrapper
411
+
412
+
413
+ class ConvertFrameAssert:
414
+ def __init__(
415
+ self,
416
+ compiler_fn: CompilerFn,
417
+ one_graph: bool = True,
418
+ export: bool = False,
419
+ export_constraints: Optional[typing.Never] = None,
420
+ ) -> None:
421
+ # assert export_constraints is None
422
+ reset_graph_break_dup_checker()
423
+ self._torchdynamo_orig_callable = compiler_fn
424
+ self._one_graph = one_graph
425
+ self._export = export
426
+ self._export_constraints = export_constraints
427
+
428
+ @property
429
+ def _clone_with_backend(self) -> Callable[[CompilerFn], ConvertFrameAssert]:
430
+ return lambda backend: convert_frame_assert(
431
+ backend, self._one_graph, self._export, self._export_constraints
432
+ )
433
+
434
+ def __call__(
435
+ self,
436
+ frame: FrameType,
437
+ cache_entry: Optional[CacheEntry],
438
+ hooks: Hooks,
439
+ frame_state: Dict[str, Union[int, FrameStateSizeEntry]],
440
+ *,
441
+ skip: int = 0,
442
+ ) -> Optional[GuardedCode]:
443
+ increment_frame()
444
+
445
+ code = frame.f_code
446
+
447
+ cache_size = compute_cache_size(frame, cache_entry)
448
+ input_codes.add(code)
449
+ if code in output_codes:
450
+ return None
451
+ if (
452
+ os.environ.get("TORCHDYNAMO_DEBUG_FUNCTION")
453
+ and os.environ.get("TORCHDYNAMO_DEBUG_FUNCTION") != code.co_name
454
+ ):
455
+ return None
456
+ if code.co_name == "<genexpr>" and code.co_filename.endswith(
457
+ (
458
+ "transformers/file_utils.py",
459
+ "transformers/utils/generic.py",
460
+ "diffusers/utils/outputs.py",
461
+ )
462
+ ):
463
+ # not needed, but cleans up torchbench error stats
464
+ return None
465
+ if code.co_name == "__setattr__":
466
+ # setattr could be tricky to handle generally,
467
+ # but also not likely useful to compile- skip the whole frame
468
+ return None
469
+ if code.co_name == "__init__" and code.co_filename.startswith(
470
+ os.path.dirname(torch.optim.__file__)
471
+ ):
472
+ # optimizer support is still incomplete see
473
+ # test_state_dict in test/dynamo/test_optimizers.py
474
+ return None
475
+
476
+ # Check if the frame is generated by an exec builtin call
477
+ # TODO - Running exec generated frame seems propagates f_globals to the
478
+ # next frames.
479
+ if code.co_name == "<module>" and code.co_filename == "<string>":
480
+ return None
481
+
482
+ if (
483
+ code.co_name == "<lambda>"
484
+ and code.co_filename == "<string>"
485
+ and not bool(frame.f_builtins)
486
+ ):
487
+ # namedtuple subclass constructor. Empty builtins cause issue with
488
+ # len keyword in LIST_LEN guard.
489
+ return None
490
+
491
+ if is_generator(code):
492
+ unimplemented("generator")
493
+
494
+ if not has_tensor_in_frame(frame):
495
+ return None
496
+
497
+ global initial_global_state
498
+ initial_global_state = GlobalStateGuard()
499
+
500
+ global FRAME_COUNTER
501
+ if "_id" not in frame_state:
502
+ frame_state["_id"] = FRAME_COUNTER
503
+ FRAME_COUNTER += 1
504
+ frame_id = frame_state["_id"]
505
+ assert isinstance(frame_id, int)
506
+
507
+ frame_compile_id = FRAME_COMPILE_COUNTER[frame_id]
508
+ FRAME_COMPILE_COUNTER[frame_id] += 1
509
+
510
+ compile_id = CompileId(frame_id, frame_compile_id)
511
+
512
+ signpost_event(
513
+ "dynamo",
514
+ "_convert_frame_assert._compile",
515
+ {
516
+ "co_name": code.co_name,
517
+ "frame_id": frame_id,
518
+ "compile_id": str(compile_id),
519
+ "co_filename": code.co_filename,
520
+ "co_firstlineno": code.co_firstlineno,
521
+ "cache_size": cache_size.num_cache_entries_with_same_id_matched_objs,
522
+ "accumulated_cache_size": cache_size.num_cache_entries,
523
+ },
524
+ )
525
+
526
+ return _compile(
527
+ frame.f_code,
528
+ frame.f_globals,
529
+ frame.f_locals,
530
+ frame.f_builtins,
531
+ self._torchdynamo_orig_callable,
532
+ self._one_graph,
533
+ self._export,
534
+ self._export_constraints,
535
+ hooks,
536
+ cache_entry,
537
+ cache_size,
538
+ frame,
539
+ frame_state=frame_state,
540
+ compile_id=compile_id,
541
+ skip=skip + 1,
542
+ )
543
+
544
+
545
+ def convert_frame_assert(
546
+ compiler_fn: CompilerFn,
547
+ one_graph: bool = True,
548
+ export: bool = False,
549
+ export_constraints: Optional[typing.Never] = None,
550
+ ) -> ConvertFrameAssert:
551
+ """Fully convert a frame into an FX graph"""
552
+ return ConvertFrameAssert(compiler_fn, one_graph, export, export_constraints)
553
+
554
+
555
+ from collections import OrderedDict
556
+
557
+ from torch.utils.hooks import RemovableHandle
558
+
559
+
560
+ if typing.TYPE_CHECKING:
561
+ from .output_graph import OutputGraph
562
+
563
+ # we have to use `OrderedDict` to make `RemovableHandle` work.
564
+ _bytecode_hooks: Dict[int, BytecodeHook] = OrderedDict()
565
+
566
+
567
+ def register_bytecode_hook(hook: BytecodeHook) -> RemovableHandle:
568
+ """Register hooks for bytecode generated by Dynamo. The hook can do some
569
+ logging, as well as return a new code object to be used. Please refer
570
+ to `BytecodeHook` for the hook signature.
571
+ """
572
+ handle = RemovableHandle(_bytecode_hooks)
573
+ _bytecode_hooks[handle.id] = hook
574
+ return handle
575
+
576
+
577
+ def _compile(
578
+ code: CodeType,
579
+ globals: Dict[str, object],
580
+ locals: Dict[str, object],
581
+ builtins: Dict[str, object],
582
+ compiler_fn: CompilerFn,
583
+ one_graph: bool,
584
+ export: bool,
585
+ export_constraints: Optional[typing.Never],
586
+ hooks: Hooks,
587
+ cache_entry: Optional[CacheEntry],
588
+ cache_size: CacheSizeRelevantForFrame,
589
+ frame: Optional[FrameType] = None,
590
+ frame_state: Optional[Dict[str, Union[int, FrameStateSizeEntry]]] = None,
591
+ *,
592
+ compile_id: CompileId,
593
+ skip: int = 0,
594
+ ) -> Optional[GuardedCode]:
595
+ from torch.fx.experimental.validator import (
596
+ bisect,
597
+ BisectValidationException,
598
+ translation_validation_enabled,
599
+ ValidationException,
600
+ )
601
+
602
+ # Only nonlocal defs here please!
603
+ # Time spent compiling this frame before restarting or failing analysis
604
+ dynamo_time_before_restart: float = 0.0
605
+ output: Optional[OutputGraph] = None
606
+ tracer: Optional[InstructionTranslator] = None
607
+
608
+ @preserve_global_state
609
+ def transform(
610
+ instructions: List[Instruction], code_options: Dict[str, object]
611
+ ) -> None:
612
+ nonlocal output
613
+ nonlocal tracer
614
+ speculation_log.restart()
615
+ tracer = InstructionTranslator(
616
+ instructions,
617
+ code,
618
+ locals,
619
+ globals,
620
+ builtins,
621
+ code_options,
622
+ compiler_fn,
623
+ one_graph,
624
+ export,
625
+ export_constraints,
626
+ mutated_closure_cell_contents,
627
+ frame_state=frame_state,
628
+ speculation_log=speculation_log,
629
+ distributed_state=distributed_state,
630
+ )
631
+
632
+ try:
633
+ with tracing(tracer.output.tracing_context), tracer.set_current_tx():
634
+ tracer.run()
635
+ except exc.UnspecializeRestartAnalysis:
636
+ speculation_log.clear()
637
+ raise
638
+ except (exc.SpeculationRestartAnalysis, exc.SkipFrame):
639
+ raise
640
+ except Exception:
641
+ if translation_validation_enabled():
642
+ bisect(tracer.output.shape_env)
643
+ raise
644
+ finally:
645
+ tracer.output.call_cleanup_hooks()
646
+
647
+ output = tracer.output
648
+ assert output is not None
649
+ assert output.output_instructions
650
+ instructions[:] = output.output_instructions
651
+ code_options.update(output.code_options)
652
+
653
+ if config.dead_code_elimination:
654
+ propagate_inst_exn_table_entries(instructions)
655
+ check_inst_exn_tab_entries_valid(instructions)
656
+ instructions[:] = remove_pointless_jumps(remove_dead_code(instructions))
657
+
658
+ def compile_inner(
659
+ code: CodeType,
660
+ one_graph: bool,
661
+ hooks: Hooks,
662
+ transform: Callable[[List[Instruction], Dict[str, Any]], Any],
663
+ ) -> Optional[GuardedCode]:
664
+ with dynamo_timed("_compile.compile_inner", phase_name="entire_frame_compile"):
665
+ with CompileTimeInstructionCounter.record():
666
+ return _compile_inner(code, one_graph, hooks, transform)
667
+
668
+ @compile_time_strobelight_meta(phase_name="compile_inner")
669
+ @maybe_cprofile
670
+ def _compile_inner(
671
+ code: CodeType,
672
+ one_graph: bool,
673
+ hooks: Hooks,
674
+ transform: Callable[[List[Instruction], Dict[str, Any]], Any],
675
+ ) -> Optional[GuardedCode]:
676
+ nonlocal dynamo_time_before_restart
677
+ last_attempt_start_time = start_time = time.time()
678
+
679
+ def log_bytecode(
680
+ prefix: str, name: str, filename: str, line_no: int, code: CodeType
681
+ ) -> None:
682
+ if bytecode_log.isEnabledFor(logging.DEBUG):
683
+ bytecode_log.debug(
684
+ format_bytecode(prefix, name, filename, line_no, code)
685
+ )
686
+
687
+ log_bytecode(
688
+ "ORIGINAL BYTECODE",
689
+ code.co_name,
690
+ code.co_filename,
691
+ code.co_firstlineno,
692
+ code,
693
+ )
694
+
695
+ out_code = None
696
+ for attempt in itertools.count():
697
+ CompileContext.get().attempt = attempt
698
+ try:
699
+ out_code = transform_code_object(code, transform)
700
+ break
701
+ except exc.RestartAnalysis as e:
702
+ log.info(
703
+ "Restarting analysis due to %s",
704
+ LazyString(format_traceback_short, e.__traceback__),
705
+ )
706
+ # If restart reason is None just log the type of the exception
707
+ restart_reasons.add(e.restart_reason or str(type(e)))
708
+ # We now have a new "last attempt", reset the clock
709
+ last_attempt_start_time = time.time()
710
+ if attempt > 100:
711
+ unimplemented("100+ RestartAnalysis() calls")
712
+ except exc.SkipFrame as e:
713
+ log.debug(
714
+ "Skipping frame %s %s \
715
+ %s %s",
716
+ e,
717
+ code.co_name,
718
+ code.co_filename,
719
+ code.co_firstlineno,
720
+ )
721
+ if one_graph:
722
+ log.debug("No graph captured with one_graph=True")
723
+ return None
724
+
725
+ assert (
726
+ distributed_state is None or distributed_state.all_states is not None
727
+ ), "compiler collective wasn't run before compilation completed"
728
+
729
+ assert out_code is not None
730
+ log_bytecode(
731
+ "MODIFIED BYTECODE",
732
+ code.co_name,
733
+ code.co_filename,
734
+ code.co_firstlineno,
735
+ out_code,
736
+ )
737
+
738
+ for hook in _bytecode_hooks.values():
739
+ hook_output = hook(code, out_code)
740
+ if hook_output is not None:
741
+ out_code = hook_output
742
+
743
+ orig_code_map[out_code] = code
744
+ output_codes.add(out_code)
745
+ dynamo_time_before_restart = last_attempt_start_time - start_time
746
+ assert output is not None
747
+
748
+ # Tests for new code objects.
749
+ # The rationale for these tests can be found in torch/csrc/dynamo/eval_frame.c
750
+ # Only test once the code object is created.
751
+ # They are not tested during runtime.
752
+
753
+ def count_args(code: CodeType) -> int:
754
+ import inspect
755
+
756
+ return (
757
+ code.co_argcount
758
+ + code.co_kwonlyargcount
759
+ + bool(code.co_flags & inspect.CO_VARARGS)
760
+ + bool(code.co_flags & inspect.CO_VARKEYWORDS)
761
+ )
762
+
763
+ assert out_code is not None
764
+
765
+ total_argcount_old = count_args(code)
766
+ total_argcount_new = count_args(out_code)
767
+ msg = "arg mismatch: "
768
+ msg += f"old code object has args {code.co_varnames[:total_argcount_old]}, "
769
+ msg += f"new code object has args {out_code.co_varnames[:total_argcount_new]}"
770
+ assert (
771
+ code.co_varnames[:total_argcount_old]
772
+ == out_code.co_varnames[:total_argcount_new]
773
+ ), msg
774
+
775
+ msg = "free var mismatch: "
776
+ msg += f"old code object has free var {code.co_freevars}, "
777
+ msg += f"new code object has free var {out_code.co_freevars}"
778
+ assert code.co_freevars == out_code.co_freevars, msg
779
+
780
+ msg = "cell var mismatch: "
781
+ msg += f"old code object has cell var {code.co_cellvars}, "
782
+ msg += f"new code object has cell var {out_code.co_cellvars}"
783
+ assert code.co_cellvars == out_code.co_cellvars, msg
784
+
785
+ # Skipping Dynamo on a frame without any extracted graph.
786
+ # This does not affect eager functionality. But this is necessary
787
+ # for export for cases where Dynamo-reconstructed bytecode can create
788
+ # new function frames, confusing export in thinking that there
789
+ # are extra graphs now.
790
+
791
+ if output.export and output.is_empty_graph():
792
+ return None
793
+
794
+ assert output.guards is not None
795
+ CleanupManager.instance[out_code] = output.cleanups
796
+ check_fn = CheckFunctionManager(
797
+ output,
798
+ hooks.guard_fail_fn if hooks else None,
799
+ )
800
+
801
+ guarded_code = GuardedCode(out_code, check_fn.check_fn, compile_id)
802
+
803
+ if not output.is_empty_graph() and hooks.guard_export_fn is not None:
804
+ # We should not run the guard_export_fn when Dynamo does not
805
+ # generate any graph. This can happen in export when TorchDynamo
806
+ # generated bytecode has some reconstruction logic for mutated
807
+ # variables which can trigger TorchDynamo on the children frames but
808
+ # they are benign and do not generate any new graphs.
809
+ hooks.guard_export_fn(output.guards)
810
+
811
+ return guarded_code
812
+
813
+ with _use_lazy_graph_module(config.use_lazy_graph_module), compile_context(
814
+ CompileContext(compile_id)
815
+ ):
816
+ restart_reasons: set[str] = set()
817
+ # This is shared across restarts
818
+ mutated_closure_cell_contents: Set[str] = set()
819
+ speculation_log = SpeculationLog()
820
+ if compile_pg := get_compile_pg():
821
+ distributed_state = DistributedState(compile_pg, LocalState())
822
+ else:
823
+ distributed_state = None
824
+ torch._dynamo.callback_handler.run_start_callbacks()
825
+
826
+ # Check recompilations
827
+ recompile_reasons = None
828
+ if is_recompilation(cache_size) and frame:
829
+ recompile_reasons = get_and_maybe_log_recompilation_reason(
830
+ cache_entry, frame
831
+ )
832
+
833
+ exceeded, limit_type = exceeds_cache_size_limit(cache_size, compile_id)
834
+ if exceeded:
835
+
836
+ def format_func_info(code: CodeType) -> str:
837
+ return f"'{code.co_name}' ({code.co_filename}:{code.co_firstlineno})"
838
+
839
+ def format_guard_failures() -> str:
840
+ if not recompile_reasons:
841
+ return "Unable to find recompilation reasons"
842
+ return recompile_reasons[-1]
843
+
844
+ log.warning(
845
+ "torch._dynamo hit config.%s (%s)\n"
846
+ " function: %s\n"
847
+ " last reason: %s\n"
848
+ 'To log all recompilation reasons, use TORCH_LOGS="recompiles".\n'
849
+ "To diagnose recompilation issues, see %s.",
850
+ limit_type,
851
+ getattr(config, limit_type),
852
+ format_func_info(code),
853
+ format_guard_failures(),
854
+ troubleshooting_url,
855
+ )
856
+ if config.skip_code_recursive_on_cache_limit_hit and justknobs_check(
857
+ "pytorch/compiler:skip_code_recursive_on_cache_limit_hit"
858
+ ):
859
+ raise CacheLimitExceeded(f"{limit_type} reached")
860
+ else:
861
+ # do not recursively skip frames
862
+ unimplemented(f"{limit_type} reached")
863
+
864
+ log.debug(
865
+ "torchdynamo start compiling %s %s:%s, stack (elided %s frames):\n%s",
866
+ code.co_name,
867
+ code.co_filename,
868
+ code.co_firstlineno,
869
+ skip + 2,
870
+ # -2: omit current frame, omit contextlib decorator
871
+ "".join(CapturedTraceback.extract(skip=2 + skip).format()),
872
+ )
873
+ # -4: -2 as above, plus trace_structured frames
874
+ #
875
+ # NB: the frame looks like this:
876
+ #
877
+ # # handled by skip argument
878
+ # torch/_dynamo/convert_frame.py:1069 in catch_errors
879
+ # torch/_dynamo/convert_frame.py:910 in _convert_frame
880
+ # torch/_dynamo/convert_frame.py:464 in _convert_frame_assert
881
+ # torch/_utils_internal.py:70 in wrapper_function
882
+ #
883
+ # # 2 current frame and context lib
884
+ # env/lib/python3.10/contextlib.py:79 in inner
885
+ # torch/_dynamo/convert_frame.py:776 in _compile
886
+ #
887
+ # # 2 extra here
888
+ # torch/_logging/_internal.py:1064 in trace_structured
889
+ # torch/_dynamo/convert_frame.py:780 in <lambda>
890
+ convert_frame_intern = structured.intern_string(__file__)
891
+ # Initialize the ChromiumEventLogger on start
892
+ chromium_event_log = get_chromium_event_logger()
893
+ chromium_event_log.reset()
894
+ torch._logging.trace_structured(
895
+ "dynamo_start",
896
+ lambda: {
897
+ "stack": list(
898
+ itertools.takewhile(
899
+ lambda f: f["filename"] != convert_frame_intern,
900
+ structured.from_traceback(
901
+ CapturedTraceback.extract(skip=4 + skip).summary()
902
+ ),
903
+ )
904
+ )
905
+ + [
906
+ {
907
+ "line": code.co_firstlineno,
908
+ "name": code.co_name,
909
+ "filename": structured.intern_string(code.co_filename),
910
+ }
911
+ ]
912
+ },
913
+ )
914
+ start_time = time.time()
915
+ fail_type: Optional[str] = None
916
+ fail_reason: Optional[str] = None
917
+ fail_user_frame_filename: Optional[str] = None
918
+ fail_user_frame_lineno: Optional[int] = None
919
+ start_possibly_missed_reinplacing_opportunities = torch._dynamo.utils.counters[
920
+ "inductor"
921
+ ]["possibly_missed_reinplacing_opportunities"]
922
+ guarded_code = None
923
+ try:
924
+ guarded_code = compile_inner(code, one_graph, hooks, transform)
925
+ return guarded_code
926
+ except Exception as e:
927
+ fail_type = type(e).__qualname__
928
+ fail_reason = str(e)
929
+ # NB: e's msg is mutated here to add user stack, but we DON'T want
930
+ # that stack in the Scuba logged fail_reason
931
+ exception_handler(e, code, frame, export=export)
932
+ fail_user_frame_filename, fail_user_frame_lineno = exc.get_exc_message(
933
+ e, compile_id
934
+ )
935
+ if isinstance(
936
+ e,
937
+ (
938
+ Unsupported,
939
+ TorchRuntimeError,
940
+ BackendCompilerFailed,
941
+ AssertionError,
942
+ ConstraintViolationError,
943
+ GuardOnDataDependentSymNode,
944
+ ValidationException,
945
+ UncapturedHigherOrderOpError,
946
+ BisectValidationException,
947
+ ),
948
+ ):
949
+ raise
950
+ else:
951
+ # Rewrap for clarity
952
+ raise InternalTorchDynamoError(
953
+ f"{type(e).__qualname__}: {str(e)}"
954
+ ).with_traceback(e.__traceback__) from None
955
+ finally:
956
+ if tracer:
957
+ tracer.output.local_scope = {}
958
+
959
+ from .utils import curr_frame
960
+
961
+ frame_key = str(curr_frame)
962
+ if (
963
+ fail_reason is None
964
+ and output is not None
965
+ and frame_key in frame_phase_timing
966
+ ):
967
+ guard_count = len(output.guards)
968
+ shape_env_guard_count = len(output.shape_env.guards)
969
+ graph_op_count = output.count_calls()
970
+ graph_node_count = len(output.graph.nodes)
971
+ graph_input_count = len(output.placeholders)
972
+ entire_frame_compile_time = frame_phase_timing[frame_key].get(
973
+ "entire_frame_compile", None
974
+ )
975
+ backend_compile_time = frame_phase_timing[frame_key].get(
976
+ "backend_compile", None
977
+ )
978
+ inductor_compile_time = frame_phase_timing[frame_key].get(
979
+ "inductor_compile", None
980
+ )
981
+ code_gen_time = frame_phase_timing[frame_key].get("code_gen", None)
982
+ non_compliant_ops = {op.__qualname__ for op in output.non_compliant_ops}
983
+ compliant_custom_ops = {
984
+ op.__qualname__ for op in output.compliant_custom_ops
985
+ }
986
+ possibly_missed_reinplacing_opportunities = (
987
+ torch._dynamo.utils.counters["inductor"][
988
+ "possibly_missed_reinplacing_opportunities"
989
+ ]
990
+ - start_possibly_missed_reinplacing_opportunities
991
+ )
992
+ else:
993
+ guard_count = None
994
+ shape_env_guard_count = None
995
+ graph_op_count = None
996
+ graph_node_count = None
997
+ graph_input_count = None
998
+ entire_frame_compile_time = None
999
+ backend_compile_time = None
1000
+ inductor_compile_time = None
1001
+ code_gen_time = None
1002
+ non_compliant_ops = set({})
1003
+ compliant_custom_ops = set({})
1004
+ restart_reasons = set()
1005
+ # If compilation failed, the entire time is wasted
1006
+ dynamo_time_before_restart = time.time() - start_time
1007
+ possibly_missed_reinplacing_opportunities = None
1008
+
1009
+ metrics = CompilationMetrics(
1010
+ str(compile_id),
1011
+ frame_key,
1012
+ code.co_name,
1013
+ code.co_filename,
1014
+ code.co_firstlineno,
1015
+ cache_size.num_cache_entries_with_same_id_matched_objs,
1016
+ cache_size.num_cache_entries,
1017
+ guard_count,
1018
+ shape_env_guard_count,
1019
+ graph_op_count,
1020
+ graph_node_count,
1021
+ graph_input_count,
1022
+ start_time,
1023
+ entire_frame_compile_time,
1024
+ backend_compile_time,
1025
+ inductor_compile_time,
1026
+ code_gen_time,
1027
+ fail_type,
1028
+ fail_reason,
1029
+ fail_user_frame_filename,
1030
+ fail_user_frame_lineno,
1031
+ non_compliant_ops,
1032
+ compliant_custom_ops,
1033
+ restart_reasons,
1034
+ dynamo_time_before_restart,
1035
+ guarded_code is not None,
1036
+ possibly_missed_reinplacing_opportunities,
1037
+ )
1038
+ record_compilation_metrics(metrics)
1039
+ torch._dynamo.callback_handler.run_end_callbacks()
1040
+
1041
+
1042
+ class ConvertFrame:
1043
+ def __init__(self, compiler_fn: CompilerFn, hooks: Hooks) -> None:
1044
+ self._torchdynamo_orig_callable = compiler_fn
1045
+ self._inner_convert = convert_frame_assert(compiler_fn, one_graph=False)
1046
+ self._hooks = hooks
1047
+
1048
+ @property
1049
+ def _clone_with_backend(self) -> Callable[[WrapBackendDebug], ConvertFrame]:
1050
+ return lambda backend: convert_frame(backend, self._hooks)
1051
+
1052
+ def __call__(
1053
+ self,
1054
+ frame: FrameType,
1055
+ cache_entry: Optional[CacheEntry],
1056
+ hooks: Hooks,
1057
+ frame_state: Dict[str, Union[int, FrameStateSizeEntry]],
1058
+ skip: int = 0,
1059
+ ) -> Optional[
1060
+ Union[GuardedCode, torch._C._dynamo.eval_frame.SkipCodeRecursiveFlag]
1061
+ ]:
1062
+ counters["frames"]["total"] += 1
1063
+ try:
1064
+ result = self._inner_convert(
1065
+ frame, cache_entry, hooks, frame_state, skip=skip + 1
1066
+ )
1067
+ counters["frames"]["ok"] += 1
1068
+ return result
1069
+ except Exception as e:
1070
+ # These two exception types are "soft" failure, in the sense that
1071
+ # we know this is due to something we didn't implement all the
1072
+ # way, scare the user less about it. That being said, if you
1073
+ # are trying to understand why a graph break happened, it's still
1074
+ # important to have this information, so offer it.
1075
+ #
1076
+ # NB: NotImplementedError used to be on this list, but actually
1077
+ # it is impossible for it to reach here, as it is converted into
1078
+ # InternalTorchDynamoError. This behavior seemed reasonable
1079
+ # to me (ezyang, Aug 2023) so I kept it, but maybe at some point
1080
+ # someone wanted these to also get suppressed. If so, you'll
1081
+ # need to make these exceptions not get wrapped
1082
+
1083
+ # We intentionally don't want to suppress error here.
1084
+ if isinstance(e, UncapturedHigherOrderOpError):
1085
+ raise
1086
+
1087
+ soft_fail = isinstance(e, Unsupported)
1088
+
1089
+ # This is a soft failure. In the sense, the code path reaches here
1090
+ # when we do not support graph breaks on bytecodes like LOAD_ATTR,
1091
+ # BUILD_SET etc. In such case, we can fallback to eager without
1092
+ # scaring users.
1093
+ if isinstance(e, Unsupported) and graph_break_log.isEnabledFor(
1094
+ logging.DEBUG
1095
+ ):
1096
+ # Log this message in the graph break. Also use the string
1097
+ # "skip: " to tell that the whole frame is falling back to
1098
+ # eager.
1099
+ if hasattr(e, "compile_id"):
1100
+ with compile_context(CompileContext(e.compile_id)): # type: ignore[attr-defined]
1101
+ user_stack = e.real_stack
1102
+ user_stack_formatted = "".join(
1103
+ traceback.format_list(user_stack)
1104
+ )
1105
+ graph_break_log.debug(
1106
+ "Graph break: skip: from user code at:\n%s",
1107
+ user_stack_formatted,
1108
+ exc_info=True,
1109
+ )
1110
+
1111
+ if not config.suppress_errors and not soft_fail:
1112
+ raise
1113
+
1114
+ # Suppress the error. NB: It's very important to do the
1115
+ # suppression logging HERE, where the actual suppression
1116
+ # happens. Previously it was somewhere else and so it was
1117
+ # possible to accidentally not log at all.
1118
+ record_filename = getattr(e, "record_filename", None)
1119
+ code = frame.f_code
1120
+ error_msg = format_error_msg(e, code, record_filename, frame)
1121
+
1122
+ if soft_fail:
1123
+ log.info(error_msg, exc_info=True)
1124
+ else:
1125
+ log.warning(error_msg, exc_info=True)
1126
+
1127
+ # If we encounter SkipCodeRecursiveException, return skip_code_recursive_flag
1128
+ # to signal to Dynamo eval frame to skip the current frame and any recursive calls.
1129
+ if isinstance(e, SkipCodeRecursiveException):
1130
+ return torch._C._dynamo.eval_frame.skip_code_recursive_flag
1131
+
1132
+ return None
1133
+
1134
+
1135
+ def convert_frame(compiler_fn: CompilerFn, hooks: Hooks) -> ConvertFrame:
1136
+ """Try to convert a frame into an FX graph, if error leave frame unmodified"""
1137
+ return ConvertFrame(compiler_fn, hooks)
1138
+
1139
+
1140
+ # TODO mlazos: add support for same args, or record them
1141
+ def replay(filename: str) -> None:
1142
+ from .backends.debugging import eager
1143
+
1144
+ original_replay_val = config.replay_record_enabled
1145
+ config.replay_record_enabled = False
1146
+ with open(filename, "rb") as in_file:
1147
+ record = ExecutionRecord.load(in_file)
1148
+ record.globals = dict(itertools.chain(record.globals.items(), globals().items()))
1149
+
1150
+ try:
1151
+ _compile(
1152
+ record.code,
1153
+ record.globals,
1154
+ record.locals,
1155
+ record.builtins,
1156
+ compiler_fn=eager,
1157
+ one_graph=False,
1158
+ export=False,
1159
+ export_constraints=None,
1160
+ hooks=Hooks(),
1161
+ cache_size=CacheSizeRelevantForFrame(0, 0),
1162
+ cache_entry=None,
1163
+ frame=None,
1164
+ frame_state={},
1165
+ compile_id=CompileId(42, 999),
1166
+ )
1167
+ finally:
1168
+ config.replay_record_enabled = original_replay_val
1169
+
1170
+
1171
+ def first_real_inst_idx(code: CodeType) -> int:
1172
+ if sys.version_info < (3, 11):
1173
+ return 0
1174
+ for inst in dis.get_instructions(code):
1175
+ if inst.opname == "RESUME":
1176
+ return inst.offset // 2
1177
+ raise RuntimeError("RESUME instruction not found in code")
1178
+
1179
+
1180
+ class ConvertFrameProtocol(typing.Protocol):
1181
+ def __call__(
1182
+ self,
1183
+ frame: FrameType,
1184
+ cache_entry: Optional[CacheEntry],
1185
+ hooks: Hooks,
1186
+ frame_state: Dict[str, Union[int, FrameStateSizeEntry]],
1187
+ *,
1188
+ skip: int = 0,
1189
+ ) -> Optional[GuardedCode]:
1190
+ ...
1191
+
1192
+
1193
+ class CatchErrorsWrapper:
1194
+ def __init__(self, callback: ConvertFrameProtocol, hooks: Hooks) -> None:
1195
+ functools.wraps(callback)(self)
1196
+ self._torchdynamo_orig_callable = callback
1197
+ self.hooks = hooks
1198
+
1199
+ def __call__(
1200
+ self,
1201
+ frame: FrameType,
1202
+ cache_entry: Optional[CacheEntry],
1203
+ frame_state: Dict[str, Union[int, FrameStateSizeEntry]],
1204
+ ) -> Optional[GuardedCode]:
1205
+ assert frame_state is not None
1206
+
1207
+ is_skipfile = trace_rules.check(frame.f_code)
1208
+ if sys.version_info >= (3, 13):
1209
+ has_started_execution = frame.f_lasti > first_real_inst_idx(frame.f_code)
1210
+ else:
1211
+ has_started_execution = frame.f_lasti >= first_real_inst_idx(frame.f_code)
1212
+ if (
1213
+ # TODO: the first condition is not covered by any test
1214
+ has_started_execution
1215
+ or is_skipfile
1216
+ or config.disable
1217
+ or (
1218
+ is_in_torch_dispatch_mode(include_infra_modes=False)
1219
+ and not getattr(self._torchdynamo_orig_callable, "_export", False)
1220
+ )
1221
+ ):
1222
+ if log.isEnabledFor(logging.DEBUG):
1223
+ print(frame.f_lasti, first_real_inst_idx(frame.f_code))
1224
+
1225
+ if has_started_execution:
1226
+ skip_reason = "traced frame already"
1227
+ elif trace_rules.check(frame.f_code):
1228
+ skip_reason = "in skipfiles"
1229
+ elif is_in_torch_dispatch_mode(include_infra_modes=False):
1230
+ skip_reason = "non-infra torch dispatch mode present, this is not supported today in torch.compile"
1231
+ else:
1232
+ skip_reason = "dynamo tracing is disabled"
1233
+
1234
+ log.debug(
1235
+ "skipping: %s (reason: %s, file: %s)",
1236
+ frame.f_code.co_name,
1237
+ skip_reason,
1238
+ frame.f_code.co_filename,
1239
+ )
1240
+ return None
1241
+
1242
+ if frame.f_code.co_filename == "<string>" and frame.f_code.co_name == "__new__":
1243
+ # nametuple constructor
1244
+ return None
1245
+ if config._get_optimize_ddp_mode() == "ddp_optimizer":
1246
+ ddp_module = DistributedDataParallel._get_active_ddp_module()
1247
+ if ddp_module:
1248
+ with compile_lock:
1249
+ from torch._dynamo.backends.distributed import DDPOptimizer
1250
+
1251
+ ddp_optimizer = DDPOptimizer(
1252
+ bucket_bytes_cap=ddp_module.bucket_bytes_cap,
1253
+ backend_compile_fn=self._torchdynamo_orig_callable._torchdynamo_orig_callable, # type: ignore[attr-defined]
1254
+ )
1255
+ assert hasattr(
1256
+ self._torchdynamo_orig_callable, "_clone_with_backend"
1257
+ ), "DDPOptimizer only supports callback fns that know how to clone themselves."
1258
+ hijacked_callback = (
1259
+ self._torchdynamo_orig_callable._clone_with_backend(
1260
+ ddp_optimizer.compile_fn,
1261
+ )
1262
+ )
1263
+ return hijacked_callback(
1264
+ frame, cache_entry, self.hooks, frame_state
1265
+ )
1266
+
1267
+ with compile_lock, _disable_current_modes():
1268
+ # skip=1: skip this frame
1269
+ return self._torchdynamo_orig_callable(
1270
+ frame, cache_entry, self.hooks, frame_state, skip=1
1271
+ )
1272
+
1273
+
1274
+ def catch_errors_wrapper(
1275
+ callback: ConvertFrameProtocol, hooks: Hooks
1276
+ ) -> CatchErrorsWrapper:
1277
+ return CatchErrorsWrapper(callback, hooks)
pllava/lib/python3.10/site-packages/torch/_dynamo/debug_utils.py ADDED
@@ -0,0 +1,824 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ # mypy: disable-error-code="method-assign"
3
+ import atexit
4
+ import copy
5
+ import cProfile
6
+ import functools
7
+ import getpass
8
+ import inspect
9
+ import itertools
10
+ import logging
11
+ import os
12
+ import re
13
+ import subprocess
14
+ import sys
15
+ import tempfile
16
+ import textwrap
17
+ from collections import Counter
18
+ from importlib import import_module
19
+ from typing import Any, Callable, Dict, List, Optional, TypeVar
20
+
21
+ import torch
22
+ import torch._prims_common as utils
23
+ import torch._subclasses.meta_utils
24
+ from torch import Tensor
25
+ from torch._dynamo.testing import rand_strided
26
+ from torch._prims_common import is_float_dtype
27
+ from torch.multiprocessing.reductions import StorageWeakRef
28
+ from torch.utils._content_store import ContentStoreReader, ContentStoreWriter
29
+
30
+ from . import config
31
+ from .utils import clone_inputs, get_debug_dir
32
+
33
+
34
+ log = logging.getLogger(__name__)
35
+
36
+ T = TypeVar("T")
37
+
38
+
39
+ inductor_config = import_module("torch._inductor.config")
40
+ use_buck = inductor_config.is_fbcode()
41
+
42
+ if use_buck:
43
+ import libfb.py.build_info
44
+
45
+
46
+ extra_deps = []
47
+ extra_imports = ""
48
+ if use_buck:
49
+ extra_deps = [
50
+ "//caffe2/torch/fb/sparsenn:sparsenn_operators_gpu",
51
+ "//caffe2/torch/fb/sparsenn:sparsenn_operators",
52
+ "//deeplearning/fbgemm/fbgemm_gpu:sparse_ops_cpu",
53
+ "//deeplearning/fbgemm/fbgemm_gpu:sparse_ops",
54
+ ]
55
+ cur_target = libfb.py.build_info.BuildInfo.get_build_rule().replace("fbcode:", "//") # type: ignore[possibly-undefined]
56
+ extra_imports = "\n".join([f'torch.ops.load_library("{x}")' for x in extra_deps])
57
+
58
+
59
+ BUCK_CMD_PREFIX = ["buck2", "run", "@mode/dev-nosan"]
60
+
61
+
62
+ class BuckTargetWriter:
63
+ def __init__(self, filename):
64
+ self.subdir, self.py_file = os.path.split(os.path.abspath(filename))
65
+ self.target = self.py_file.replace(".py", "")
66
+
67
+ # Get main_module path from fbcode
68
+ self.path = f'{self.subdir.replace("/", ".")}.{self.target}'
69
+ self.path = self.path[self.path.find("fbcode.") :]
70
+ self.path = self.path[7:]
71
+
72
+ # Get cmd line path
73
+ tmp = self.subdir
74
+ tmp = tmp[tmp.find("fbcode/") :][7:]
75
+ self.cmd_line_path = f"//{tmp}:{self.target}"
76
+
77
+ def build(self):
78
+ extra_cpp_deps = "\n".join([f' "{x}",' for x in extra_deps])
79
+ return textwrap.dedent(
80
+ f"""
81
+ load("@fbcode_macros//build_defs:python_binary.bzl", "python_binary")
82
+
83
+ python_binary(
84
+ name="{self.target}",
85
+ srcs = ["{self.py_file}"],
86
+ compile = False,
87
+ deps = [
88
+ "//caffe2:torch",
89
+ "//caffe2/functorch:functorch",
90
+ "//triton:triton",
91
+ "{cur_target}",
92
+ ],
93
+ cpp_deps = [
94
+ {extra_cpp_deps}
95
+ ],
96
+ main_module = "{self.path}",
97
+ par_style = "xar",
98
+ )
99
+ """
100
+ )
101
+
102
+ def write(self, print_msg=True):
103
+ target_file = os.path.join(self.subdir, "TARGETS")
104
+ with open(target_file, "w") as fd:
105
+ fd.write(self.build())
106
+ # log.warning("Wrote isolation TARGETS file at %s", target_file)
107
+ cmd_split = BUCK_CMD_PREFIX + [self.cmd_line_path]
108
+ if print_msg:
109
+ log.warning(
110
+ "Found an example that reproduces the error. Run this cmd to repro - %s",
111
+ " ".join(cmd_split),
112
+ )
113
+ return cmd_split
114
+
115
+
116
+ def minifier_dir():
117
+ path = os.path.join(get_debug_dir(), "minifier")
118
+ if path is None:
119
+ path = f"{tempfile.gettempdir()}/minifier_{getpass.getuser()}"
120
+ if not os.path.exists(path):
121
+ os.makedirs(path, exist_ok=True)
122
+ return path
123
+
124
+
125
+ MAX_CONSTANT_NUMEL_INLINE = 4
126
+
127
+
128
+ class NNModuleToString:
129
+ safe_reprs = [
130
+ torch.nn.Linear,
131
+ torch.nn.Conv1d,
132
+ torch.nn.Conv2d,
133
+ torch.nn.Conv3d,
134
+ torch.nn.BatchNorm1d,
135
+ torch.nn.BatchNorm2d,
136
+ torch.nn.BatchNorm3d,
137
+ torch.nn.LayerNorm,
138
+ torch.nn.Dropout,
139
+ torch.nn.Softmax,
140
+ torch.nn.ReLU,
141
+ torch.nn.GELU,
142
+ torch.nn.Identity,
143
+ torch.nn.MaxPool2d,
144
+ torch.nn.Embedding,
145
+ torch.nn.Tanh,
146
+ torch.nn.ConvTranspose1d,
147
+ torch.nn.GLU,
148
+ torch.nn.LSTM,
149
+ torch.nn.Flatten,
150
+ torch.nn.AdaptiveAvgPool2d,
151
+ ]
152
+
153
+ @staticmethod
154
+ def can_convert_to_string(gm):
155
+ cant_convert = set()
156
+ for _, module in gm.named_children():
157
+ if type(module) not in NNModuleToString.safe_reprs:
158
+ cant_convert.add(module)
159
+
160
+ if len(cant_convert) > 0:
161
+ log.warning("We have not tested reprs of some modules - %s", cant_convert)
162
+ # TODO - Assuming that all modules can be safely repr'd. Check if that assumption is correct.
163
+ return True
164
+
165
+ @staticmethod
166
+ def convert(gm):
167
+ from torch.nn.modules.module import _addindent
168
+
169
+ tab = " " * 4
170
+
171
+ model_str = textwrap.dedent(
172
+ """
173
+ from torch.nn import *
174
+ class Repro(torch.nn.Module):
175
+ def __init__(self) -> None:
176
+ super().__init__()
177
+ """
178
+ )
179
+
180
+ for module_name, module in gm.named_children():
181
+ module_str = f"{module.__repr__()}"
182
+ # module should be a core torch.nn.Module, so all parameters
183
+ # should be on the same device.
184
+ example_param = next(module.parameters(), None)
185
+ if example_param is not None and example_param.is_cuda:
186
+ module_str = f"{module_str}.cuda()"
187
+ model_str += f"{tab*2}self.{module_name} = {module_str}\n"
188
+
189
+ for buffer_name, buffer in gm._buffers.items():
190
+ if buffer is None:
191
+ continue
192
+ # Serialize full data for small buffers
193
+ if buffer.numel() <= MAX_CONSTANT_NUMEL_INLINE:
194
+ from torch._tensor_str import PRINT_OPTS
195
+
196
+ assert PRINT_OPTS.threshold >= MAX_CONSTANT_NUMEL_INLINE
197
+ tensor_str = repr(buffer)
198
+ elif torch.is_floating_point(buffer):
199
+ tensor_str = f"torch.randn({list(buffer.shape)}, dtype={buffer.dtype})"
200
+ else:
201
+ tensor_str = (
202
+ f"torch.randint(1, size={list(buffer.shape)}, dtype={buffer.dtype})"
203
+ )
204
+ if buffer.is_cuda:
205
+ tensor_str = f"{tensor_str}.cuda()"
206
+ model_str += f"{tab*2}self.register_buffer('{buffer_name}', {tensor_str})\n"
207
+
208
+ for param_name, param in gm._parameters.items():
209
+ if param is None:
210
+ continue
211
+ maybe_device = ""
212
+ if param.is_cuda:
213
+ maybe_device = ', device="cuda"'
214
+ tensor_str = f"torch.nn.Parameter(torch.randn({list(param.shape)}, dtype={param.dtype}{maybe_device}))"
215
+ model_str += f"{tab*2}self.{param_name} = {tensor_str}\n"
216
+
217
+ # TODO - Keep this code for now. But, I don't think we will need this.
218
+ # attrs = dir(gm)
219
+ # for attr in attrs:
220
+ # if "_tensor_constant" in attr:
221
+ # val = getattr(gm, attr)
222
+ # model_str += f" {attr} = {val!r}\n"
223
+
224
+ model_str += f"{_addindent(gm.code, 4)}\n"
225
+ return model_str
226
+
227
+
228
+ @functools.lru_cache(None) # subprocess is expensive
229
+ def _cuda_system_info_comment():
230
+ if not torch.cuda.is_available():
231
+ return "# torch.cuda.is_available()==False, no GPU info collected\n"
232
+
233
+ model_str = "# CUDA Info: \n"
234
+ try:
235
+ cuda_version_out = subprocess.check_output(["nvcc", "--version"])
236
+ cuda_version_lines = cuda_version_out.decode().split("\n")
237
+ comment = "".join([f"# {s} \n" for s in cuda_version_lines if s not in [""]])
238
+ model_str += f"{comment}\n"
239
+ except (FileNotFoundError, subprocess.CalledProcessError):
240
+ model_str += "# nvcc not found\n"
241
+
242
+ gpu_names = Counter(
243
+ torch.cuda.get_device_name(i) for i in range(torch.cuda.device_count())
244
+ )
245
+
246
+ model_str += "# GPU Hardware Info: \n"
247
+ for name, count in gpu_names.items():
248
+ model_str += f"# {name} : {count} \n"
249
+ model_str += "\n"
250
+ return model_str
251
+
252
+
253
+ def generate_config_string(*, stable_output=False):
254
+ import torch._functorch.config
255
+ import torch._inductor.config
256
+
257
+ if stable_output:
258
+ return "# config omitted due to stable_output=True"
259
+
260
+ experimental_config = torch.fx.experimental._config.codegen_config() # type: ignore[attr-defined]
261
+ return f"""\
262
+ import torch._dynamo.config
263
+ import torch._inductor.config
264
+ import torch._functorch.config
265
+ import torch.fx.experimental._config
266
+ {torch._dynamo.config.codegen_config()}
267
+ {torch._inductor.config.codegen_config()}
268
+ {torch._functorch.config.codegen_config()}
269
+ {experimental_config}
270
+ """
271
+
272
+
273
+ def get_minifier_repro_path():
274
+ return os.path.join(minifier_dir(), "minifier_launcher.py")
275
+
276
+
277
+ def helper_for_dump_minify(contents):
278
+ minified_repro_path = get_minifier_repro_path()
279
+ log.warning("Writing minified repro to:\n%s", minified_repro_path)
280
+
281
+ if use_buck:
282
+ BuckTargetWriter(minified_repro_path).write()
283
+ try:
284
+ with open(minified_repro_path, "w") as fd:
285
+ fd.write(contents)
286
+
287
+ except OSError as e:
288
+ log.exception("")
289
+ raise NotImplementedError("Could not write to {minified_repro_path}") from e
290
+
291
+
292
+ class AccuracyError(Exception):
293
+ pass
294
+
295
+
296
+ def clone_inputs_retaining_gradness(example_inputs):
297
+ """
298
+ This clone inputs is different from utils clone_input. In case of minifier,
299
+ all the tensors are leaf tensors while creating a new graph. So, we set the
300
+ requires_grad field w/o checking the leafness of the tensor.
301
+ """
302
+ cloned_inputs = clone_inputs(example_inputs)
303
+ for idx in range(len(example_inputs)):
304
+ if isinstance(cloned_inputs[idx], torch.Tensor):
305
+ cloned_inputs[idx].requires_grad_(example_inputs[idx].requires_grad)
306
+ return cloned_inputs
307
+
308
+
309
+ def run_fwd_maybe_bwd(gm, args, only_fwd=False, disable_clone=False):
310
+ """
311
+ Runs a forward and possibly backward iteration for a given mod and args.
312
+
313
+ When disable_clone is True, we will use args as-is without cloning.
314
+ This is higher fidelity but we may destroy the args in the process.
315
+ """
316
+ from .testing import collect_results, reduce_to_scalar_loss, requires_bwd_pass
317
+
318
+ gm = copy.deepcopy(gm)
319
+ if not disable_clone:
320
+ args = clone_inputs_retaining_gradness(args)
321
+
322
+ if hasattr(gm, "zero_grad"):
323
+ gm.zero_grad(True)
324
+
325
+ # TorchInductor returned callable expects lists. So, may need a boxed calling convention.
326
+ out = gm(args) if hasattr(gm, "_boxed_call") else gm(*args)
327
+
328
+ if only_fwd:
329
+ return out
330
+ if requires_bwd_pass(out):
331
+ loss = reduce_to_scalar_loss(out)
332
+ loss.backward()
333
+ return collect_results(gm, out, None, args)
334
+
335
+
336
+ def same_two_models(
337
+ gm,
338
+ opt_gm,
339
+ example_inputs,
340
+ only_fwd=False,
341
+ *,
342
+ require_fp64=False,
343
+ ignore_non_fp=False,
344
+ ):
345
+ """
346
+ Check two models have same accuracy.
347
+
348
+ require_fp64: if True, raise an error if we unable to calculate the fp64 reference
349
+ ignore_non_fp: if True, do not compare outputs which are not floating point. This
350
+ is mostly useful for the minifier (which wants to avoid quantizing floating point
351
+ error into integer/boolean error)
352
+ """
353
+ from .utils import same
354
+
355
+ ref = run_fwd_maybe_bwd(gm, example_inputs, only_fwd)
356
+
357
+ fp64_ref = None
358
+ if config.same_two_models_use_fp64:
359
+ try:
360
+ fp64_model, fp64_examples = cast_to_fp64(
361
+ copy.deepcopy(gm), clone_inputs_retaining_gradness(example_inputs)
362
+ )
363
+ fp64_ref = run_fwd_maybe_bwd(fp64_model, fp64_examples, only_fwd)
364
+ except Exception:
365
+ if require_fp64:
366
+ raise RuntimeError( # noqa: B904
367
+ "Could not generate fp64 outputs, workaround with torch._dynamo.config.same_two_models_use_fp64 = False"
368
+ )
369
+ log.warning("Could not generate fp64 outputs")
370
+
371
+ try:
372
+ res = run_fwd_maybe_bwd(opt_gm, example_inputs, only_fwd)
373
+ except Exception as e:
374
+ # This means that the minified graph is bad/exposes a different problem.
375
+ # As we are checking accuracy here, lets log the exception and return True.
376
+ log.exception(
377
+ "While minifying the program in accuracy minification mode, "
378
+ "ran into a runtime exception which is likely an unrelated issue."
379
+ " Skipping this graph."
380
+ )
381
+ return True
382
+
383
+ passing = same(
384
+ ref,
385
+ res,
386
+ fp64_ref,
387
+ tol=config.repro_tolerance,
388
+ equal_nan=True,
389
+ ignore_non_fp=ignore_non_fp,
390
+ )
391
+ return passing
392
+
393
+
394
+ def cast_dtype_args_to_fp64(model):
395
+ for node in model.graph.nodes:
396
+ if (
397
+ node.op == "call_function"
398
+ and node.target == torch.ops.prims.convert_element_type.default
399
+ ):
400
+ assert len(node.args) == 2
401
+ if is_float_dtype(node.args[1]) and node.args[1] != torch.float64:
402
+ node.args = (node.args[0], torch.float64)
403
+ if node.op == "call_function":
404
+ dtype = node.kwargs.get("dtype")
405
+ if dtype is not None and is_float_dtype(dtype):
406
+ new_kwargs = dict(node.kwargs)
407
+ new_kwargs["dtype"] = torch.float64
408
+ node.kwargs = new_kwargs
409
+
410
+ model.graph.lint()
411
+ model.recompile()
412
+ return model
413
+
414
+
415
+ def cast_to(dtype, model, inputs):
416
+ from torch.utils._pytree import tree_map
417
+
418
+ model = model.to(dtype)
419
+ if dtype == torch.float64:
420
+ # If casting to fp64 for accuracy comparison, we need to
421
+ # replace dtype arguments embedded in the graph with fp64
422
+ model = cast_dtype_args_to_fp64(model)
423
+
424
+ inputs = tree_map(
425
+ lambda x: x.to(dtype)
426
+ if isinstance(x, torch.Tensor) and x.is_floating_point()
427
+ else x,
428
+ inputs,
429
+ )
430
+ return model, inputs
431
+
432
+
433
+ def cast_to_fp64(model, inputs):
434
+ return cast_to(torch.float64, model, inputs)
435
+
436
+
437
+ def backend_accuracy_fails(
438
+ gm,
439
+ example_inputs,
440
+ compiler_fn,
441
+ only_fwd=False,
442
+ *,
443
+ require_fp64=False,
444
+ ignore_non_fp=False,
445
+ ):
446
+ try:
447
+ compiled_gm = compiler_fn(
448
+ copy.deepcopy(gm), clone_inputs_retaining_gradness(example_inputs)
449
+ )
450
+ return not same_two_models(
451
+ gm,
452
+ compiled_gm,
453
+ example_inputs,
454
+ only_fwd,
455
+ require_fp64=require_fp64,
456
+ ignore_non_fp=ignore_non_fp,
457
+ )
458
+ except Exception as e:
459
+ # This means that the minified graph is bad/exposes a different problem.
460
+ # As we are checking accuracy here, lets log the exception and return False.
461
+ log.exception(
462
+ "While minifying the program in accuracy minification mode, "
463
+ "ran into a runtime exception which is likely an unrelated issue."
464
+ " Skipping this graph"
465
+ )
466
+ return False
467
+
468
+
469
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
470
+ # REPRO SUPPORT CODE
471
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
472
+
473
+
474
+ # Helper functions for computing what the default values of tensor
475
+ # values should be. These all coincide with factory functions, e.g., torch.empty
476
+
477
+
478
+ def _stride_or_default(
479
+ stride: Optional["torch._prims_common.StrideType"],
480
+ *,
481
+ shape: "torch._prims_common.ShapeType",
482
+ ) -> "torch._prims_common.StrideType":
483
+ return stride if stride is not None else utils.make_contiguous_strides_for(shape)
484
+
485
+
486
+ def _mk_defaulter(d: T) -> Callable[[Optional[T]], T]:
487
+ return lambda x: x if x is not None else d
488
+
489
+
490
+ _dtype_or_default = _mk_defaulter(torch.float32)
491
+ _device_or_default = _mk_defaulter(torch.device("cpu"))
492
+ _storage_offset_or_default = _mk_defaulter(0)
493
+ _requires_grad_or_default = _mk_defaulter(False)
494
+ _is_leaf_or_default = _mk_defaulter(False)
495
+
496
+
497
+ class NopInputReader:
498
+ def __init__(self) -> None:
499
+ self.total = 0
500
+
501
+ def storage(self, storage_hash, nbytes, *, device=None, dtype_hint=None):
502
+ self.total += 1
503
+
504
+ def tensor(self, *args, **kwargs):
505
+ pass
506
+
507
+ def symint(self, *args, **kwargs):
508
+ pass
509
+
510
+
511
+ # TODO: Support bundling the entire repro into a zip file for ease of
512
+ # transferring around
513
+ class InputReader:
514
+ def __init__(self, save_dir=None, *, pbar=None):
515
+ # If None, we will generate random data instead. It's important
516
+ # to natively support this use case as it will allow people to
517
+ # share repros without including the real data, if the problem
518
+ # reproduces even on random data.
519
+ if save_dir is None:
520
+ log.warning("no save_dir specified, will generate random data")
521
+ self.store = ContentStoreReader(save_dir) if save_dir is not None else None
522
+ self.args = []
523
+ self.pbar = pbar
524
+
525
+ def storage(self, storage_hash, nbytes, *, device=None, dtype_hint=None):
526
+ if self.pbar is not None:
527
+ self.pbar.update(1)
528
+ device = _device_or_default(device)
529
+ dtype_hint = _dtype_or_default(dtype_hint)
530
+ if self.store is not None and storage_hash is not None:
531
+ try:
532
+ storage = self.store.read_storage(storage_hash)
533
+ except FileNotFoundError:
534
+ pass
535
+ else:
536
+ if device != storage.device:
537
+ log.warning("device mismatch: %s != %s", device, storage.device)
538
+ # TODO: transfer it to the right device? But failing this
539
+ # way would be very mysterious! Would have been better
540
+ # not to store device in the serialized format...
541
+ return storage
542
+ log.warning("could not load %s, generating random data instead", storage_hash)
543
+ shape = (nbytes // dtype_hint.itemsize,)
544
+ stride = _stride_or_default(None, shape=shape)
545
+ return rand_strided(shape, stride, dtype_hint, device).untyped_storage()
546
+
547
+ def tensor(
548
+ self,
549
+ storage,
550
+ shape,
551
+ stride=None,
552
+ *,
553
+ storage_offset=None,
554
+ dtype=None,
555
+ requires_grad=None,
556
+ is_leaf=None,
557
+ **metadata,
558
+ ):
559
+ stride = _stride_or_default(stride, shape=shape)
560
+ storage_offset = _storage_offset_or_default(storage_offset)
561
+ dtype = _dtype_or_default(dtype)
562
+ is_leaf = _is_leaf_or_default(is_leaf)
563
+ requires_grad = _requires_grad_or_default(requires_grad)
564
+ t = torch.tensor(
565
+ [], dtype=dtype, device=storage.device, requires_grad=requires_grad
566
+ )
567
+ with torch.no_grad():
568
+ t.set_(storage, storage_offset, shape, stride)
569
+ if not is_leaf:
570
+ # Fake up some autograd history in a very naughty way
571
+ with torch.enable_grad():
572
+ t = t.clone(memory_format=torch.preserve_format)
573
+ with torch.no_grad():
574
+ t.set_(storage, storage_offset, shape, stride)
575
+ assert torch._subclasses.meta_utils.safe_is_leaf(t) == is_leaf
576
+ torch._utils.set_tensor_metadata(t, metadata)
577
+ self.args.append(t)
578
+ return t # for BC
579
+
580
+ def symint(self, val):
581
+ self.args.append(val)
582
+ return val # for BC
583
+
584
+
585
+ # Here is our writer strategy:
586
+ # 1. We will stream all of the inputs to disk
587
+ # 2. You can now deterministically randomize the inputs, or reload
588
+ # the inputs from disk
589
+ # 3. You can YOLO run the script without the inputs, in which case
590
+ # we'll fill the inputs with random data and pray. This is the
591
+ # legacy behavior, but it's also useful if you want to find out
592
+ # if we're so broken even random inputs trigger it
593
+ # 4. We could offer an in process "check if the randomized thing
594
+ # works too" but this is delicate so we don't do it
595
+
596
+
597
+ class InputWriter:
598
+ def __init__(self, save_dir, *, stable_hash=False):
599
+ self._lines = []
600
+ # TODO: consider ensuring tensor and storage counters line up?
601
+ self.storage_counter = itertools.count()
602
+ self.save_dir = save_dir
603
+ self.store = (
604
+ ContentStoreWriter(save_dir, stable_hash=stable_hash)
605
+ if save_dir is not None
606
+ else None
607
+ )
608
+ self.seen_storages = {}
609
+
610
+ def lines(self):
611
+ r = [
612
+ "def load_args(reader):",
613
+ ]
614
+ r.extend(f" {l}" for l in self._lines)
615
+ # In case we need to change the internal format of load_args
616
+ # in an FC-breaking way
617
+ r.append("load_args._version = 0")
618
+ return r
619
+
620
+ # Storages are untyped, but we need to initialize them with data if
621
+ # we don't have the real data, so we give a hint saying what kind
622
+ # of initialization may be appropriate
623
+ #
624
+ # If we had a FakeTensor, device_hint tells us what device should be
625
+ def storage(self, untyped_storage, *, dtype_hint=None, device_hint=None) -> str:
626
+ ws = StorageWeakRef(untyped_storage)
627
+ v = self.seen_storages.get(ws)
628
+ if v is not None:
629
+ return v
630
+ v = f"buf{next(self.storage_counter)}"
631
+ maybe_dtype_hint = ""
632
+ if _dtype_or_default(None) != _dtype_or_default(dtype_hint):
633
+ maybe_dtype_hint = f", dtype_hint={dtype_hint!r}"
634
+ # TODO: being optional on device is kind of pointless as the default
635
+ # is CPU but most repros we care about are CUDA
636
+ maybe_device = ""
637
+ device = untyped_storage.device
638
+ if device.type == "meta":
639
+ assert device_hint is not None
640
+ device = device_hint
641
+ if _device_or_default(None) != device:
642
+ maybe_device = f", device={device!r}"
643
+ nbytes = untyped_storage.nbytes()
644
+ storage_hash = None
645
+ if self.store is not None and untyped_storage.device.type != "meta":
646
+ storage_hash = self.store.write_storage(untyped_storage)
647
+ self._lines.append(
648
+ f"{v} = reader.storage({storage_hash!r}, {nbytes!r}{maybe_device}{maybe_dtype_hint})"
649
+ )
650
+ self.seen_storages[ws] = v
651
+ return v
652
+
653
+ def tensor(self, name, t) -> None:
654
+ from torch.fx.experimental.symbolic_shapes import statically_known_true
655
+
656
+ storage = self.storage(
657
+ t.untyped_storage(), dtype_hint=t.dtype, device_hint=t.device
658
+ )
659
+ args = []
660
+ # NB: this is positional, must come first
661
+ if _stride_or_default(None, shape=t.shape) != t.stride():
662
+ args.append(str(tuple(t.stride())))
663
+ if _dtype_or_default(None) != t.dtype:
664
+ args.append(f"dtype={t.dtype!r}")
665
+ if not statically_known_true(
666
+ _storage_offset_or_default(None) == t.storage_offset()
667
+ ):
668
+ args.append(f"storage_offset={t.storage_offset()!r}")
669
+ tensor_metadata = torch._utils.get_tensor_metadata(t)
670
+ if tensor_metadata:
671
+ args.extend(f"{k}={v!r}" for k, v in tensor_metadata.items())
672
+ if _requires_grad_or_default(None) != t.requires_grad:
673
+ args.append(f"requires_grad={t.requires_grad!r}")
674
+ is_leaf = torch._subclasses.meta_utils.safe_is_leaf(t)
675
+ if _is_leaf_or_default(None) != is_leaf:
676
+ args.append(f"is_leaf={is_leaf!r}")
677
+ self._lines.append(
678
+ "reader.tensor("
679
+ + ", ".join([storage, str(tuple(t.shape)), *args])
680
+ + f") # {name}"
681
+ )
682
+
683
+ # TODO: this doesn't actually symint atm
684
+ def symint(self, name, val) -> None:
685
+ if isinstance(val, torch.SymInt):
686
+ val = val.node.hint
687
+ self._lines.append(f"reader.symint({val!r}) # {name}")
688
+
689
+
690
+ def aot_graph_input_parser(
691
+ func: Callable[[List[Tensor]], List[Tensor]],
692
+ device: str = "cuda",
693
+ sym_shapes: Optional[Dict[str, int]] = None,
694
+ default_sym_shape: Optional[int] = None,
695
+ ) -> Dict[str, Any]:
696
+ """
697
+ Takes in a function which has been printed with print_readable() and constructs kwargs to run it.
698
+
699
+ Handles Tensor inputs, Symints, and a graph module which might have tensor constants.
700
+
701
+ Consider a function `forward` defined as follows:
702
+
703
+ def forward(self, primals_1: "f32[1001, 6]", primals_2: "f32[s0]", primals_3: "Sym(s0)",):
704
+ _tensor_constant0: "i64[4190]" = self._tensor_constant0
705
+ # Further implementation
706
+
707
+ kwargs = aot_graph_input_parser(forward)
708
+ forward(**kwargs)
709
+ """
710
+
711
+ from torch.fx.graph import dtype_abbrs
712
+
713
+ dtype_map = {value: key for key, value in dtype_abbrs.items()}
714
+ dtype_pattern = "|".join(dtype_abbrs.values())
715
+
716
+ # Extracting the source code from the function
717
+ source = inspect.getsource(func)
718
+
719
+ # Regular expressions
720
+ tensor_assignment_regex = rf"(_tensor_constant\d+): \"({dtype_pattern})\[\s*(.*?)\s*\]\" = self\.(_tensor_constant\d+)"
721
+ tensor_regex = rf"({dtype_pattern})\[\s*(.*?)\s*\]"
722
+ sym_shape_regex = r"Sym\((s\d+)\)"
723
+
724
+ class TensorContainer:
725
+ "Container for tensors as attributes"
726
+
727
+ # Dictionary for tensors from annotations
728
+ kwargs: Dict[str, Any] = {}
729
+
730
+ sym_shapes = sym_shapes or {}
731
+
732
+ def get_sym_int(symint):
733
+ torch._check(
734
+ symint in sym_shapes or default_sym_shape is not None,
735
+ lambda: f"{symint} not in symbolic_shapes and default sym shape not passed in",
736
+ )
737
+ return sym_shapes.get(symint, default_sym_shape)
738
+
739
+ def gen_tensor(shape, dtype) -> Tensor:
740
+ # Resolve symbolic shapes to concrete values
741
+ resolved_shape = []
742
+ dynamic_dims = []
743
+ for i, dim in enumerate(shape):
744
+ dim = dim.strip()
745
+ if "s" in dim:
746
+ s = get_sym_int(dim)
747
+ resolved_shape.append(s)
748
+ dynamic_dims.append(i)
749
+ else:
750
+ if dim:
751
+ resolved_shape.append(int(dim))
752
+
753
+ constructor = torch.randn if dtype.is_floating_point else torch.zeros
754
+ out = constructor(resolved_shape, dtype=dtype, device=device) # type: ignore[call-arg]
755
+ for d in dynamic_dims:
756
+ torch._dynamo.mark_dynamic(out, d)
757
+ return out
758
+
759
+ # Parse function annotations for tensor generation
760
+ annotations = func.__annotations__
761
+ for param, annotation in annotations.items():
762
+ # Skip 'return' annotation
763
+ if param == "return":
764
+ continue
765
+
766
+ match = re.search(tensor_regex, annotation)
767
+ if match:
768
+ data_type, shape_str = match.groups()
769
+ shape = tuple(shape_str.split(","))
770
+ dtype = dtype_map[data_type]
771
+ kwargs[param] = gen_tensor(shape, dtype)
772
+
773
+ match = re.search(sym_shape_regex, annotation)
774
+ if match:
775
+ kwargs[param] = get_sym_int(match.group(1))
776
+
777
+ if "self" in inspect.signature(func).parameters:
778
+ container = TensorContainer()
779
+ kwargs["self"] = container
780
+ for match in re.finditer(tensor_assignment_regex, source):
781
+ attr_name, data_type, shape_str, _ = match.groups()
782
+ shape = tuple(shape_str.split(","))
783
+ dtype = dtype_map[data_type]
784
+ setattr(container, attr_name, gen_tensor(shape, dtype))
785
+
786
+ return kwargs
787
+
788
+
789
+ def profile_to_file(filename: str) -> Callable[[T], T]:
790
+ """
791
+ Decorator to cProfile a given function and save the result to disk on process exit.
792
+
793
+ Args:
794
+ filename: filename to save profile to
795
+ """
796
+ prof = cProfile.Profile()
797
+ filename = os.path.abspath(os.path.expanduser(filename))
798
+
799
+ def decorator(fn):
800
+ @functools.wraps(fn)
801
+ def wrapper(*args, **kwargs):
802
+ prof.enable()
803
+ try:
804
+ return fn(*args, **kwargs)
805
+ finally:
806
+ prof.disable()
807
+
808
+ return wrapper
809
+
810
+ def save_it():
811
+ prof.dump_stats(filename)
812
+ sys.stderr.write(
813
+ textwrap.dedent(
814
+ f"""\
815
+ Wrote profile to {filename}, view with:
816
+
817
+ snakeviz {filename}
818
+
819
+ """
820
+ )
821
+ )
822
+
823
+ atexit.register(save_it)
824
+ return decorator
pllava/lib/python3.10/site-packages/torch/_dynamo/mutation_guard.py ADDED
@@ -0,0 +1,150 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ # mypy: disable-error-code="method-assign"
3
+
4
+ import functools
5
+ import weakref
6
+
7
+ import torch.nn
8
+ from torch.nn import Module
9
+
10
+ from . import config
11
+ from .utils import ExactWeakKeyDictionary, is_lazy_module, nn_module_has_global_hooks
12
+
13
+
14
+ unpatched_nn_module_init = torch.nn.Module.__init__
15
+
16
+
17
+ class MutationTracker:
18
+ db = ExactWeakKeyDictionary()
19
+
20
+ def __init__(self):
21
+ self.mutation_count = 0
22
+ self.watchers = []
23
+
24
+ def on_mutation(self, name):
25
+ self.mutation_count += 1
26
+ tmp = self.watchers
27
+ self.watchers = []
28
+ for ref in tmp:
29
+ guarded = ref()
30
+ if guarded is not None:
31
+ guarded.invalidate(ref)
32
+
33
+ def track(self, guarded_code):
34
+ self.watchers.append(weakref.ref(guarded_code))
35
+
36
+
37
+ def watch(obj, guarded_code):
38
+ """invalidate guarded_code when obj is mutated"""
39
+ ensure_patched(type(obj))
40
+
41
+ if obj not in MutationTracker.db:
42
+ MutationTracker.db[obj] = MutationTracker()
43
+ tracker = MutationTracker.db[obj]
44
+ tracker.track(guarded_code)
45
+
46
+
47
+ def ensure_patched(cls):
48
+ if getattr(cls, "___needs_mutation_patch", True):
49
+ cls.___needs_mutation_patch = False
50
+ original_setattr = cls.__setattr__
51
+
52
+ @functools.wraps(original_setattr)
53
+ def custom_setattr(self, key, value):
54
+ try:
55
+ MutationTracker.db[self].on_mutation(key)
56
+ except KeyError:
57
+ pass
58
+ return original_setattr(self, key, value)
59
+
60
+ cls.__setattr__ = custom_setattr
61
+
62
+
63
+ class GenerationTracker:
64
+ generation = 0
65
+ dynamic_classes = ExactWeakKeyDictionary()
66
+ generation_values = ExactWeakKeyDictionary()
67
+
68
+ @classmethod
69
+ def tag(cls, obj):
70
+ cls.generation_values[obj] = cls.generation
71
+
72
+ @staticmethod
73
+ def mark_class_dynamic(cls):
74
+ assert issubclass(cls, torch.nn.Module)
75
+ GenerationTracker.dynamic_classes[cls] = True
76
+
77
+ @classmethod
78
+ def get_generation_value(cls, obj):
79
+ if obj not in cls.generation_values:
80
+ return -1
81
+ return cls.generation_values[obj]
82
+
83
+ @classmethod
84
+ def check(cls, obj):
85
+ return (
86
+ obj in cls.generation_values
87
+ and cls.generation_values[obj] == cls.generation
88
+ )
89
+
90
+ @classmethod
91
+ def clear(cls):
92
+ cls.generation = 0
93
+ cls.dynamic_classes = ExactWeakKeyDictionary()
94
+ cls.generation_values = ExactWeakKeyDictionary()
95
+
96
+
97
+ def is_dynamic_nn_module(obj, is_export):
98
+ """Check for nn.Modules() created dynamically or mutated"""
99
+ if isinstance(obj, torch.nn.Module) and "forward" in obj.__dict__:
100
+ # A monkey patched `.forward` indicates something wacky is going on
101
+ return True
102
+ if hasattr(obj, "torchdynamo_force_dynamic"):
103
+ return obj.torchdynamo_force_dynamic
104
+ if is_lazy_module(obj):
105
+ return False
106
+ # For export, we will have to fix
107
+ # 1) Input signature problem because params are lifted as inputs
108
+ # 2) nn module stack info changes
109
+ # 3) adjust failing tests
110
+ if (
111
+ isinstance(obj, torch.nn.Module)
112
+ and config.inline_inbuilt_nn_modules
113
+ and not is_export
114
+ ):
115
+ return True
116
+
117
+ if isinstance(obj, torch.nn.Module) and nn_module_has_global_hooks():
118
+ return True
119
+ dyn = GenerationTracker.dynamic_classes.get(type(obj)) or GenerationTracker.check(
120
+ obj
121
+ )
122
+ return dyn
123
+
124
+
125
+ def install_generation_tagging_init():
126
+ """
127
+ Monkey patch torch.nn.Module.__init__ and torch.nn.Module.__setstate__
128
+ so we can detect nn.Module instances created dynamically inside forward methods.
129
+ """
130
+
131
+ if getattr(Module, "___needs_generation_tag_patch", True):
132
+ init = Module.__init__
133
+
134
+ def patched_init(self, *args, **kwargs):
135
+ init(self, *args, **kwargs)
136
+ GenerationTracker.tag(self)
137
+
138
+ Module.__init__ = patched_init
139
+
140
+ setstate = Module.__setstate__
141
+
142
+ def patched_setstate(self, state):
143
+ setstate(self, state)
144
+ GenerationTracker.tag(self)
145
+
146
+ Module.__setstate__ = patched_setstate
147
+
148
+ Module.___needs_generation_tag_patch = False # type: ignore[attr-defined]
149
+
150
+ GenerationTracker.generation += 1
pllava/lib/python3.10/site-packages/torch/_dynamo/testing.py ADDED
@@ -0,0 +1,409 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ import contextlib
3
+ import dis
4
+ import functools
5
+ import logging
6
+ import os.path
7
+ import random
8
+ import re
9
+ import sys
10
+ import types
11
+ import unittest
12
+ from typing import List, Optional, Sequence, Union
13
+ from unittest.mock import patch
14
+
15
+ import torch
16
+ from torch import fx
17
+ from torch._dynamo.output_graph import OutputGraph
18
+
19
+ from . import config, eval_frame, optimize_assert, reset
20
+ from .bytecode_transformation import (
21
+ create_instruction,
22
+ debug_checks,
23
+ is_generator,
24
+ transform_code_object,
25
+ )
26
+ from .guards import CheckFunctionManager, CompileId, GuardedCode
27
+ from .utils import same
28
+
29
+
30
+ np: Optional[types.ModuleType] = None
31
+ try:
32
+ import numpy as np
33
+ except ModuleNotFoundError:
34
+ np = None
35
+
36
+
37
+ unsupported = eval_frame.unsupported
38
+ three = 3
39
+
40
+ log = logging.getLogger(__name__)
41
+
42
+
43
+ def clone_me(x):
44
+ if x is None:
45
+ return None
46
+ return x.detach().clone().requires_grad_(x.requires_grad)
47
+
48
+
49
+ def remove_optimized_module_prefix(name) -> str:
50
+ return re.sub(r"^_orig_mod[.]", "", name)
51
+
52
+
53
+ def collect_results(model, prediction, loss, example_inputs):
54
+ results = []
55
+ results.append(prediction)
56
+ results.append(loss)
57
+ # if isinstance(loss, torch.Tensor) and loss.item() > 1:
58
+ # log.warning(
59
+ # f"High loss value alert - {loss:.2f}. Can result in unstable gradients."
60
+ # )
61
+
62
+ grads = {}
63
+ params = {}
64
+ for name, param in model.named_parameters():
65
+ if isinstance(model, eval_frame.OptimizedModule):
66
+ name = remove_optimized_module_prefix(name)
67
+ param_copy = param
68
+ grad = param.grad
69
+ # Treat None and zero grad as same
70
+ if param.grad is None:
71
+ grad = torch.zeros_like(param)
72
+ grads[name + ".grad"] = grad
73
+ params[name] = param_copy
74
+ results.append(grads)
75
+ results.append(params)
76
+ buffers = {}
77
+ for name, buffer in model.named_buffers():
78
+ if isinstance(model, eval_frame.OptimizedModule):
79
+ name = remove_optimized_module_prefix(name)
80
+ buffers[name] = buffer
81
+ results.append(buffers)
82
+ for example in example_inputs:
83
+ if isinstance(example, (tuple, list)):
84
+ for inp in example:
85
+ if isinstance(inp, torch.Tensor):
86
+ results.append(inp.grad)
87
+ else:
88
+ if isinstance(example, torch.Tensor):
89
+ results.append(example.grad)
90
+ return results
91
+
92
+
93
+ def requires_bwd_pass(out):
94
+ if isinstance(out, torch.Tensor):
95
+ return out.requires_grad
96
+ elif isinstance(out, (list, tuple)):
97
+ return any(requires_bwd_pass(x) for x in out)
98
+ elif out is None:
99
+ return False
100
+ elif isinstance(out, int):
101
+ return False
102
+ raise NotImplementedError("Don't know how to reduce", type(out))
103
+
104
+
105
+ def reduce_to_scalar_loss(out):
106
+ """Reduce the output of a model to get scalar loss"""
107
+ if isinstance(out, torch.Tensor):
108
+ # Mean does not work on integer tensors
109
+ return out.sum() / out.numel()
110
+ elif isinstance(out, (list, tuple)):
111
+ return sum(reduce_to_scalar_loss(x) for x in out) / len(out)
112
+ elif type(out).__name__ in (
113
+ "MaskedLMOutput",
114
+ "Seq2SeqLMOutput",
115
+ "CausalLMOutputWithCrossAttentions",
116
+ ):
117
+ return reduce_to_scalar_loss(out.logits)
118
+ elif type(out).__name__ == "SquashedNormal":
119
+ return out.mean.sum()
120
+ elif isinstance(out, dict):
121
+ return sum(reduce_to_scalar_loss(value) for value in out.values()) / len(
122
+ out.keys()
123
+ )
124
+ raise NotImplementedError("Don't know how to reduce", type(out))
125
+
126
+
127
+ def debug_dir() -> str:
128
+ path = os.path.join(os.path.dirname(__file__), "../debug")
129
+ if not os.path.exists(path):
130
+ os.mkdir(path)
131
+ return path
132
+
133
+
134
+ def debug_dump(name, code: types.CodeType, extra="") -> None:
135
+ with open(os.path.join(debug_dir(), name), "w") as fd:
136
+ fd.write(
137
+ f"{dis.Bytecode(code).info()}\n\n{dis.Bytecode(code).dis()}\n\n{extra}\n"
138
+ )
139
+
140
+
141
+ def debug_insert_nops(
142
+ frame, cache_size, hooks, _, *, skip: int = 0
143
+ ) -> Optional[GuardedCode]:
144
+ """used to debug jump updates"""
145
+
146
+ def insert_nops(instructions, code_options):
147
+ instructions.insert(0, create_instruction("NOP"))
148
+ instructions.insert(0, create_instruction("NOP"))
149
+
150
+ if is_generator(frame.f_code):
151
+ return None
152
+
153
+ debug_checks(frame.f_code)
154
+ code = transform_code_object(frame.f_code, insert_nops)
155
+ graph = OutputGraph(
156
+ code_options={},
157
+ compiler_fn=None,
158
+ root_tx=None,
159
+ export=False,
160
+ export_constraints=None,
161
+ frame_state={"_id": 0},
162
+ # TODO: shouldn't this be f_locals/f_globals from frame?
163
+ local_scope=locals(),
164
+ global_scope=globals(),
165
+ f_code=frame.f_code,
166
+ )
167
+
168
+ return GuardedCode(code, CheckFunctionManager(graph).check_fn, CompileId(0, 0))
169
+
170
+
171
+ class CompileCounter:
172
+ def __init__(self):
173
+ self.frame_count = 0
174
+ self.op_count = 0
175
+
176
+ def __call__(self, gm: torch.fx.GraphModule, example_inputs: List[torch.Tensor]):
177
+ self.frame_count += 1
178
+ for node in gm.graph.nodes:
179
+ if "call" in node.op:
180
+ self.op_count += 1
181
+ return gm.forward
182
+
183
+ def clear(self):
184
+ self.frame_count = 0
185
+ self.op_count = 0
186
+
187
+
188
+ class CompileCounterWithBackend:
189
+ def __init__(self, backend):
190
+ self.frame_count = 0
191
+ self.op_count = 0
192
+ self.backend = backend
193
+ self.graphs = []
194
+
195
+ def __call__(self, gm: torch.fx.GraphModule, example_inputs: List[torch.Tensor]):
196
+ from .backends.registry import lookup_backend
197
+
198
+ self.frame_count += 1
199
+ for node in gm.graph.nodes:
200
+ if "call" in node.op:
201
+ self.op_count += 1
202
+ self.graphs.append(gm)
203
+ return lookup_backend(self.backend)(gm, example_inputs)
204
+
205
+
206
+ # Equivalent to backend="eager", but also records graphs that
207
+ # we can assert on
208
+ class EagerAndRecordGraphs:
209
+ def __init__(self):
210
+ self.graphs = []
211
+
212
+ def __call__(self, gm: torch.fx.GraphModule, example_inputs: List[torch.Tensor]):
213
+ self.graphs.append(gm)
214
+ return gm.forward
215
+
216
+
217
+ def strip_comment(code) -> str:
218
+ code = str(code)
219
+ return re.sub(r"(?m)^ *#.*\n?", "", code)
220
+
221
+
222
+ def remove_trailing_space(code) -> str:
223
+ return "\n".join([line.rstrip() for line in code.split("\n")])
224
+
225
+
226
+ def normalize_gm(gm_str) -> str:
227
+ # strip comments as comments have path to files which may differ from
228
+ # system to system.
229
+ return remove_trailing_space(strip_comment(gm_str))
230
+
231
+
232
+ def empty_line_normalizer(code: str) -> str:
233
+ """
234
+ Normalize code: remove empty lines.
235
+ """
236
+ normal_code = re.sub(r"[\r\n]+", "\n", code)
237
+ return normal_code
238
+
239
+
240
+ def standard_test(
241
+ self,
242
+ fn,
243
+ nargs,
244
+ expected_ops=None,
245
+ expected_ops_dynamic=None,
246
+ expected_frame_count=1,
247
+ ):
248
+ if not config.assume_static_by_default and expected_ops_dynamic is not None:
249
+ expected_ops = expected_ops_dynamic
250
+
251
+ actual = CompileCounter()
252
+
253
+ args1 = [torch.randn(10, 10) for _ in range(nargs)]
254
+ args2 = [torch.randn(10, 10) for _ in range(nargs)]
255
+ correct1 = fn(*args1)
256
+ correct2 = fn(*args2)
257
+ reset()
258
+ opt_fn = optimize_assert(actual)(fn)
259
+ val1a = opt_fn(*args1)
260
+ val2a = opt_fn(*args2)
261
+ val1b = opt_fn(*args1)
262
+ val2b = opt_fn(*args2)
263
+ reset()
264
+ self.assertTrue(same(val1a, correct1))
265
+ self.assertTrue(same(val1b, correct1))
266
+ self.assertTrue(same(val2a, correct2))
267
+ self.assertTrue(same(val2b, correct2))
268
+ self.assertEqual(actual.frame_count, expected_frame_count)
269
+ if expected_ops is not None:
270
+ self.assertEqual(actual.op_count, expected_ops)
271
+
272
+
273
+ def dummy_fx_compile(gm: fx.GraphModule, example_inputs):
274
+ return gm.forward
275
+
276
+
277
+ def format_speedup(speedup, pvalue, is_correct=True, pvalue_threshold=0.1):
278
+ if not is_correct:
279
+ return "ERROR"
280
+ if pvalue > pvalue_threshold:
281
+ return f"{speedup:.3f}x SAME"
282
+ return f"{speedup:.3f}x p={pvalue:.2f}"
283
+
284
+
285
+ def rand_strided(
286
+ size: Sequence[int],
287
+ stride: Sequence[int],
288
+ dtype: torch.dtype = torch.float32,
289
+ device: Union[str, torch.device] = "cpu",
290
+ extra_size: int = 0,
291
+ ):
292
+ needed_size = (
293
+ sum((shape - 1) * stride for shape, stride in zip(size, stride))
294
+ + 1
295
+ + extra_size
296
+ )
297
+ if dtype.is_floating_point:
298
+ if dtype.itemsize == 1:
299
+ """
300
+ normal distribution kernel is not implemented for fp8..
301
+ Workaround that by creating a fp16 tensor and then cast.
302
+ """
303
+ buffer = torch.randn(needed_size, dtype=torch.float16, device=device).to(
304
+ dtype=dtype
305
+ )
306
+ else:
307
+ buffer = torch.randn(needed_size, dtype=dtype, device=device)
308
+ else:
309
+ buffer = torch.zeros(size=[needed_size], dtype=dtype, device=device)
310
+ return torch.as_strided(buffer, size, stride)
311
+
312
+
313
+ def _make_fn_with_patches(fn, *patches):
314
+ @functools.wraps(fn)
315
+ def _fn(*args, **kwargs):
316
+ with contextlib.ExitStack() as stack:
317
+ for module, attr, val in patches:
318
+ stack.enter_context(patch.object(module, attr, val))
319
+
320
+ return fn(*args, **kwargs)
321
+
322
+ return _fn
323
+
324
+
325
+ def make_test_cls_with_patches(
326
+ cls, cls_prefix, fn_suffix, *patches, xfail_prop=None, decorator=lambda x: x
327
+ ):
328
+ DummyTestClass = type(f"{cls_prefix}{cls.__name__}", cls.__bases__, {})
329
+ DummyTestClass.__qualname__ = DummyTestClass.__name__
330
+
331
+ for name in dir(cls):
332
+ if name.startswith("test_"):
333
+ fn = getattr(cls, name)
334
+ if not callable(fn):
335
+ setattr(DummyTestClass, name, getattr(cls, name))
336
+ continue
337
+ new_name = f"{name}{fn_suffix}"
338
+ new_fn = _make_fn_with_patches(fn, *patches)
339
+ new_fn.__name__ = new_name
340
+ if xfail_prop is not None and hasattr(fn, xfail_prop):
341
+ new_fn = unittest.expectedFailure(new_fn)
342
+ setattr(DummyTestClass, new_name, decorator(new_fn))
343
+ # NB: Doesn't handle slots correctly, but whatever
344
+ elif not hasattr(DummyTestClass, name):
345
+ setattr(DummyTestClass, name, getattr(cls, name))
346
+
347
+ return DummyTestClass
348
+
349
+
350
+ # test Python 3.11+ specific features
351
+ def skipIfNotPy311(fn):
352
+ if sys.version_info >= (3, 11):
353
+ return fn
354
+ return unittest.skip(fn)
355
+
356
+
357
+ def skipIfNotPy312(fn):
358
+ if sys.version_info >= (3, 12):
359
+ return fn
360
+ return unittest.skip(fn)
361
+
362
+
363
+ def xfailIfPy312(fn):
364
+ if sys.version_info >= (3, 12):
365
+ return unittest.expectedFailure(fn)
366
+ return fn
367
+
368
+
369
+ def skipIfPy312(fn):
370
+ if sys.version_info >= (3, 12):
371
+ return unittest.skip(fn)
372
+ return fn
373
+
374
+
375
+ def requiresPy310(fn):
376
+ if sys.version_info >= (3, 10):
377
+ return fn
378
+ else:
379
+ unittest.skip(fn)
380
+
381
+
382
+ # Controls tests generated in test/inductor/test_torchinductor_dynamic_shapes.py
383
+ # and test/dynamo/test_dynamic_shapes.py
384
+ def expectedFailureDynamic(fn):
385
+ fn._expected_failure_dynamic = True
386
+ return fn
387
+
388
+
389
+ # Controls tests generated in test/inductor/test_torchinductor_codegen_dynamic_shapes.py
390
+ def expectedFailureCodegenDynamic(fn):
391
+ fn._expected_failure_codegen_dynamic = True
392
+ return fn
393
+
394
+
395
+ # Controls test generated in test/inductor/test_cpp_wrapper.py
396
+ def expectedFailureDynamicWrapper(fn):
397
+ fn._expected_failure_dynamic_wrapper = True
398
+ return fn
399
+
400
+
401
+ def reset_rng_state(use_xla=False):
402
+ torch.manual_seed(1337)
403
+ random.seed(1337)
404
+ if np:
405
+ np.random.seed(1337)
406
+ if use_xla:
407
+ import torch_xla.core.xla_model as xm
408
+
409
+ xm.set_rng_state(1337, str(xm.xla_device()))
pllava/lib/python3.10/site-packages/torch/bin/protoc ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3390873b2da56c1397adec3728f1588c51e182f15b123d3b4d4f248d31c1f4da
3
+ size 5330888
pllava/lib/python3.10/site-packages/torch/bin/protoc-3.13.0.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3390873b2da56c1397adec3728f1588c51e182f15b123d3b4d4f248d31c1f4da
3
+ size 5330888
pllava/lib/python3.10/site-packages/torch/contrib/__init__.py ADDED
File without changes
pllava/lib/python3.10/site-packages/torch/contrib/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (165 Bytes). View file
 
pllava/lib/python3.10/site-packages/torch/contrib/__pycache__/_tensorboard_vis.cpython-310.pyc ADDED
Binary file (5.3 kB). View file
 
pllava/lib/python3.10/site-packages/torch/contrib/_tensorboard_vis.py ADDED
@@ -0,0 +1,143 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ import time
3
+ from collections import defaultdict
4
+ from functools import partial
5
+ from typing import DefaultDict
6
+
7
+ import torch
8
+
9
+
10
+ # Unfortunately it doesn't seem as if there was any way to get TensorBoard to do
11
+ # anything without having TF installed, and so this file has a hard dependency on it
12
+ # as well. It really is a debugging tool, so it doesn't matter.
13
+ try:
14
+ from tensorflow.core.util import event_pb2
15
+ from tensorflow.core.framework import graph_pb2
16
+ from tensorflow.python.summary.writer.writer import FileWriter
17
+ except ImportError:
18
+ raise ImportError("TensorBoard visualization of GraphExecutors requires having "
19
+ "TensorFlow installed") from None
20
+
21
+
22
+ def dump_tensorboard_summary(graph_executor, logdir):
23
+ with FileWriter(logdir) as w:
24
+ pb_graph = visualize(graph_executor)
25
+ evt = event_pb2.Event(wall_time=time.time(), graph_def=pb_graph.SerializeToString())
26
+ w.add_event(evt)
27
+
28
+
29
+ def visualize(graph, name_prefix='', pb_graph=None, executors_it=None):
30
+ """Visualizes an independent graph, or a graph executor."""
31
+ value_map = {}
32
+ pb_graph = pb_graph or graph_pb2.GraphDef()
33
+
34
+ if isinstance(graph, torch._C.GraphExecutorState):
35
+ visualize_graph_executor(graph, name_prefix, pb_graph,
36
+ partial(visualize, pb_graph=pb_graph))
37
+ return pb_graph
38
+
39
+ # Set up an input node
40
+ input_node = pb_graph.node.add(op='input', name=name_prefix + 'input')
41
+ for i, value in enumerate(graph.param_node().outputs()):
42
+ value_map[value.unique()] = name_prefix + 'input:' + str(i)
43
+
44
+ visualize_rec(graph, value_map, name_prefix, pb_graph, executors_it)
45
+
46
+ # Gather all outputs
47
+ return_node = pb_graph.node.add(op='output', name=name_prefix + 'output')
48
+ for value in graph.return_node().inputs():
49
+ return_node.input.append(value_map[value.unique()])
50
+
51
+ return pb_graph
52
+
53
+
54
+ def visualize_graph_executor(state, name_prefix, pb_graph, inline_graph):
55
+ """Append the state of a given GraphExecutor to the graph protobuf.
56
+
57
+ Args:
58
+ state (GraphExecutor or GraphExecutorState): GraphExecutor to display.
59
+ name_prefix (str): Name prefix of the containing subgraph.
60
+ pb_graph (GraphDef): graph to append to.
61
+ inline_graph (Callable): a function that handles setting up a value_map,
62
+ so that some graphs in here can be inlined. This is necessary, because
63
+ this will simply be `visualize` for the top-level GraphExecutor,
64
+ or `inline_graph` for all nested ones.
65
+
66
+ The signature should look like (Graph, name_prefix) -> ().
67
+ It will be called exactly once.
68
+
69
+ The strategy is to embed all different configurations as independent subgraphs,
70
+ while inlining the original graph as the one that actually produces the values.
71
+ """
72
+ if state.autograd_fallback_graph is not None:
73
+ visualize(graph=state.autograd_fallback_graph,
74
+ name_prefix=name_prefix + 'autograd_fallback/',
75
+ pb_graph=pb_graph,
76
+ executors_it=iter(state.autograd_fallback.executors()))
77
+
78
+ for i, (arg_spec, plan) in enumerate(state.execution_plans.items()):
79
+ subgraph_name = name_prefix + f'plan{i}/'
80
+
81
+ # Create a disconnected node that will keep information regarding the input
82
+ # types of this trace. This is unfortunately a bit too verbose to be included
83
+ # in the subgraph name.
84
+ input_kinds = pb_graph.node.add(op='INPUT_KIND', name=subgraph_name)
85
+ input_kinds.attr['inputs'].s = repr(arg_spec).encode('ascii')
86
+
87
+ visualize(plan.graph, subgraph_name, pb_graph, iter(plan.code.executors()))
88
+
89
+ # Show gradient as an independent subgraph of this plan
90
+ if plan.grad_executor is not None:
91
+ grad_subgraph_name = subgraph_name + 'grad/'
92
+ visualize(plan.grad_executor, grad_subgraph_name, pb_graph)
93
+
94
+ return inline_graph(state.graph, name_prefix + 'original/')
95
+
96
+
97
+ def visualize_rec(graph, value_map, name_prefix, pb_graph, executors_it=None):
98
+ """Recursive part of visualize (basically skips setting up the input and output nodes)."""
99
+ def inline_graph(subgraph, name, node):
100
+ rec_value_map = {inp.unique(): value_map[val.unique()]
101
+ for inp, val in zip(subgraph.inputs(), node.inputs())}
102
+ visualize_rec(graph=subgraph,
103
+ value_map=rec_value_map,
104
+ name_prefix=name,
105
+ pb_graph=pb_graph)
106
+ for out, val in zip(subgraph.outputs(), node.outputs()):
107
+ value_map[val.unique()] = rec_value_map[out.unique()]
108
+
109
+ op_id_counter: DefaultDict[str, int] = defaultdict(int)
110
+
111
+ def name_for(node):
112
+ kind = node.kind()[node.kind().index('::') + 2:]
113
+ op_id_counter[kind] += 1
114
+ return kind, name_prefix + kind + '_' + str(op_id_counter[kind])
115
+
116
+ def add_fusion_group(node):
117
+ op, name = name_for(node)
118
+ inline_graph(node.g('Subgraph'), name + '/', node)
119
+
120
+ def add_graph_executor(node):
121
+ op, name = name_for(node)
122
+ if executors_it is None:
123
+ add_node(node)
124
+ else:
125
+ ge = next(executors_it)
126
+ visualize_graph_executor(ge, name + '/', pb_graph,
127
+ partial(inline_graph, node=node))
128
+
129
+ def add_node(node):
130
+ if node.kind() == 'prim::FusionGroup':
131
+ return add_fusion_group(node)
132
+ elif node.kind() == 'prim::GraphExecutor':
133
+ return add_graph_executor(node)
134
+ op, name = name_for(node)
135
+ pb_node = pb_graph.node.add(op=op, name=name)
136
+ for value in node.inputs():
137
+ pb_node.input.append(value_map[value.unique()])
138
+ # TODO: handle attrs
139
+ for i, value in enumerate(node.outputs()):
140
+ value_map[value.unique()] = name + ':' + str(i)
141
+
142
+ for node in graph.nodes():
143
+ add_node(node)
pllava/lib/python3.10/site-packages/torch/nested/__init__.py ADDED
@@ -0,0 +1,465 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ from typing import List, Optional, Tuple, Union
3
+
4
+ import torch
5
+ import torch.nn.functional as F
6
+ from torch import SymInt, Tensor
7
+ from torch._C import _add_docstr, _nested # type: ignore[attr-defined]
8
+
9
+ from torch.types import _device as Device, _dtype as DType
10
+
11
+ __all__ = [
12
+ "to_padded_tensor",
13
+ "as_nested_tensor",
14
+ "nested_tensor",
15
+ "nested_tensor_from_jagged",
16
+ "narrow",
17
+ "masked_select",
18
+ ]
19
+
20
+ # Nested Tensor constructor functions
21
+
22
+
23
+ def as_nested_tensor(
24
+ ts: Union[Tensor, List[Tensor], Tuple[Tensor, ...]],
25
+ dtype: Optional[DType] = None,
26
+ device: Optional[Device] = None,
27
+ layout=None
28
+ ) -> Tensor:
29
+ r"""
30
+ Constructs a nested tensor preserving autograd history from a tensor or a list / tuple of
31
+ tensors.
32
+
33
+ If a nested tensor is passed, it will be returned directly unless the device / dtype / layout
34
+ differ. Note that converting device / dtype will result in a copy, while converting layout
35
+ is not currently supported by this function.
36
+
37
+ If a non-nested tensor is passed, it is treated as a batch of constituents of consistent size.
38
+ A copy will be incurred if the passed device / dtype differ from those of the input OR if
39
+ the input is non-contiguous. Otherwise, the input's storage will be used directly.
40
+
41
+ If a tensor list is provided, tensors in the list are always copied during construction of
42
+ the nested tensor.
43
+
44
+ Args:
45
+ ts (Tensor or List[Tensor] or Tuple[Tensor]): a tensor to treat as a nested tensor OR a
46
+ list / tuple of tensors with the same ndim
47
+
48
+ Keyword arguments:
49
+ dtype (:class:`torch.dtype`, optional): the desired type of returned nested tensor.
50
+ Default: if None, same :class:`torch.dtype` as leftmost tensor in the list.
51
+ device (:class:`torch.device`, optional): the desired device of returned nested tensor.
52
+ Default: if None, same :class:`torch.device` as leftmost tensor in the list
53
+ layout (:class:`torch.layout`, optional): the desired layout of returned nested tensor.
54
+ Only strided and jagged layouts are supported. Default: if None, the strided layout.
55
+
56
+ Example::
57
+
58
+ >>> a = torch.arange(3, dtype=torch.float, requires_grad=True)
59
+ >>> b = torch.arange(5, dtype=torch.float, requires_grad=True)
60
+ >>> nt = torch.nested.as_nested_tensor([a, b])
61
+ >>> nt.is_leaf
62
+ False
63
+ >>> fake_grad = torch.nested.nested_tensor([torch.ones_like(a), torch.zeros_like(b)])
64
+ >>> nt.backward(fake_grad)
65
+ >>> a.grad
66
+ tensor([1., 1., 1.])
67
+ >>> b.grad
68
+ tensor([0., 0., 0., 0., 0.])
69
+ >>> c = torch.randn(3, 5, requires_grad=True)
70
+ >>> nt2 = torch.nested.as_nested_tensor(c)
71
+ """
72
+ is_tensor_list = isinstance(ts, (list, tuple)) and all(isinstance(t, Tensor) for t in ts)
73
+ if not isinstance(ts, Tensor) and not is_tensor_list:
74
+ raise TypeError(
75
+ "as_nested_tensor(): Expected first argument to be a tensor or a list / tuple of tensors "
76
+ )
77
+ # convert tuple -> list if needed
78
+ if is_tensor_list and not isinstance(ts, list):
79
+ ts = list(ts)
80
+
81
+ if isinstance(ts, Tensor) and ts.dim() < 2:
82
+ raise RuntimeError("as_nested_tensor(): Expected tensor argument to have dim() > 1")
83
+
84
+ if isinstance(ts, Tensor) and ts.is_nested:
85
+ if layout == ts.layout:
86
+ # return input directly or input copied to device / dtype
87
+ return ts.to(device=device, dtype=dtype)
88
+ else:
89
+ # TODO: Just use nt.to(layout=layout) when it exists.
90
+ raise RuntimeError(
91
+ "as_nested_tensor(): Converting between nested tensor layouts is not supported")
92
+
93
+ if layout is None:
94
+ layout = torch.strided
95
+ if layout == torch.strided:
96
+ if isinstance(ts, Tensor):
97
+ # contiguous() might be necessary to get flattened view.
98
+ # we could probably be more precise about when to do this as an optimization
99
+ buffer = ts.contiguous().view(-1).to(device=device, dtype=dtype)
100
+ nested_sizes = torch.tensor([t.shape for t in ts])
101
+ return torch._nested_view_from_buffer(
102
+ buffer,
103
+ nested_sizes,
104
+ *torch._nested_compute_contiguous_strides_offsets(nested_sizes))
105
+ else:
106
+ assert isinstance(ts, list)
107
+ return torch._nested_tensor_from_tensor_list(ts, dtype, None, device, None)
108
+ elif layout == torch.jagged:
109
+ if isinstance(ts, Tensor):
110
+ if device is None:
111
+ device = ts.device
112
+
113
+ # contiguous() might be necessary to get flattened view.
114
+ # we could probably be more precise about when to do this as an optimization
115
+ values = ts.contiguous().flatten(0, 1).to(device=device, dtype=dtype)
116
+ batch_size = ts.shape[0]
117
+ seq_len = ts.shape[1]
118
+ offsets = torch.arange(0, batch_size * seq_len + 1, seq_len,
119
+ device=device, dtype=torch.int64)
120
+
121
+ from torch.nested._internal.nested_tensor import nested_view_from_values_offsets
122
+
123
+ return nested_view_from_values_offsets(
124
+ values, offsets, min_seqlen=seq_len, max_seqlen=seq_len
125
+ )
126
+ else:
127
+ from torch.nested._internal.nested_tensor import jagged_from_list
128
+
129
+ assert isinstance(ts, list)
130
+ nt, _ = jagged_from_list(ts, offsets=None, device=device, dtype=dtype)
131
+ return nt
132
+ else:
133
+ raise RuntimeError(f"Specified layout is unsupported for nested tensors: {layout}")
134
+
135
+
136
+ # Note: This not only adds doc strings for the nested ops, but
137
+ # also connects the torch.nested Python namespace to the torch._C._nested builtins.
138
+
139
+ to_padded_tensor = _add_docstr(
140
+ _nested.nested_to_padded_tensor,
141
+ r"""
142
+ to_padded_tensor(input, padding, output_size=None, out=None) -> Tensor
143
+
144
+ Returns a new (non-nested) Tensor by padding the :attr:`input` nested tensor.
145
+ The leading entries will be filled with the nested data,
146
+ while the trailing entries will be padded.
147
+
148
+ .. warning::
149
+
150
+ :func:`to_padded_tensor` always copies the underlying data,
151
+ since the nested and the non-nested tensors differ in memory layout.
152
+
153
+ Args:
154
+ padding (float): The padding value for the trailing entries.
155
+
156
+ Keyword args:
157
+ output_size (Tuple[int]): The size of the output tensor.
158
+ If given, it must be large enough to contain all nested data;
159
+ else, will infer by taking the max size of each nested sub-tensor along each dimension.
160
+ out (Tensor, optional): the output tensor.
161
+
162
+ Example::
163
+
164
+ >>> nt = torch.nested.nested_tensor([torch.randn((2, 5)), torch.randn((3, 4))])
165
+ nested_tensor([
166
+ tensor([[ 1.6862, -1.1282, 1.1031, 0.0464, -1.3276],
167
+ [-1.9967, -1.0054, 1.8972, 0.9174, -1.4995]]),
168
+ tensor([[-1.8546, -0.7194, -0.2918, -0.1846],
169
+ [ 0.2773, 0.8793, -0.5183, -0.6447],
170
+ [ 1.8009, 1.8468, -0.9832, -1.5272]])
171
+ ])
172
+ >>> pt_infer = torch.nested.to_padded_tensor(nt, 0.0)
173
+ tensor([[[ 1.6862, -1.1282, 1.1031, 0.0464, -1.3276],
174
+ [-1.9967, -1.0054, 1.8972, 0.9174, -1.4995],
175
+ [ 0.0000, 0.0000, 0.0000, 0.0000, 0.0000]],
176
+ [[-1.8546, -0.7194, -0.2918, -0.1846, 0.0000],
177
+ [ 0.2773, 0.8793, -0.5183, -0.6447, 0.0000],
178
+ [ 1.8009, 1.8468, -0.9832, -1.5272, 0.0000]]])
179
+ >>> pt_large = torch.nested.to_padded_tensor(nt, 1.0, (2, 4, 6))
180
+ tensor([[[ 1.6862, -1.1282, 1.1031, 0.0464, -1.3276, 1.0000],
181
+ [-1.9967, -1.0054, 1.8972, 0.9174, -1.4995, 1.0000],
182
+ [ 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000],
183
+ [ 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000]],
184
+ [[-1.8546, -0.7194, -0.2918, -0.1846, 1.0000, 1.0000],
185
+ [ 0.2773, 0.8793, -0.5183, -0.6447, 1.0000, 1.0000],
186
+ [ 1.8009, 1.8468, -0.9832, -1.5272, 1.0000, 1.0000],
187
+ [ 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000]]])
188
+ >>> pt_small = torch.nested.to_padded_tensor(nt, 2.0, (2, 2, 2))
189
+ RuntimeError: Value in output_size is less than NestedTensor padded size. Truncation is not supported.
190
+
191
+ """,
192
+ )
193
+
194
+ def nested_tensor(tensor_list, *, dtype=None, layout=None, device=None, requires_grad=False, pin_memory=False) -> Tensor:
195
+ r"""
196
+ Constructs a nested tensor with no autograd history (also known as a "leaf tensor", see
197
+ :ref:`Autograd mechanics <autograd-mechanics>`) from :attr:`tensor_list` a list of tensors.
198
+
199
+ Args:
200
+ tensor_list (List[array_like]): a list of tensors, or anything that can be passed to torch.tensor,
201
+ where each element of the list has the same dimensionality.
202
+
203
+ Keyword arguments:
204
+ dtype (:class:`torch.dtype`, optional): the desired type of returned nested tensor.
205
+ Default: if None, same :class:`torch.dtype` as leftmost tensor in the list.
206
+ layout (:class:`torch.layout`, optional): the desired layout of returned nested tensor.
207
+ Only strided and jagged layouts are supported. Default: if None, the strided layout.
208
+ device (:class:`torch.device`, optional): the desired device of returned nested tensor.
209
+ Default: if None, same :class:`torch.device` as leftmost tensor in the list
210
+ requires_grad (bool, optional): If autograd should record operations on the
211
+ returned nested tensor. Default: ``False``.
212
+ pin_memory (bool, optional): If set, returned nested tensor would be allocated in
213
+ the pinned memory. Works only for CPU tensors. Default: ``False``.
214
+
215
+ Example::
216
+
217
+ >>> a = torch.arange(3, dtype=torch.float, requires_grad=True)
218
+ >>> b = torch.arange(5, dtype=torch.float, requires_grad=True)
219
+ >>> nt = torch.nested.nested_tensor([a, b], requires_grad=True)
220
+ >>> nt.is_leaf
221
+ True
222
+ """
223
+ if layout is None:
224
+ layout = torch.strided
225
+ if layout == torch.strided:
226
+ return _nested.nested_tensor(
227
+ tensor_list,
228
+ dtype=dtype,
229
+ device=device,
230
+ requires_grad=requires_grad,
231
+ pin_memory=pin_memory)
232
+ elif layout == torch.jagged:
233
+ # Need to wrap lists of scalars as tensors
234
+ list_of_tensors = [t if isinstance(t, Tensor) else torch.as_tensor(t) for t in tensor_list]
235
+
236
+ from torch.nested._internal.nested_tensor import jagged_from_list
237
+
238
+ with torch.no_grad():
239
+ nt, _ = jagged_from_list(list_of_tensors, offsets=None, device=device, dtype=dtype)
240
+
241
+ nt.requires_grad_(requires_grad)
242
+ if pin_memory:
243
+ nt = nt.pin_memory() # type: ignore[assignment]
244
+
245
+ return nt
246
+ else:
247
+ raise RuntimeError(f"Specified layout is unsupported for nested tensors: {layout}")
248
+
249
+
250
+ def narrow(tensor: Tensor, dim: int, start: Union[int, Tensor], length: Union[int, Tensor], layout=torch.strided) -> Tensor:
251
+ r"""
252
+ Constructs a nested tensor (which might be a view) from :attr:`tensor`, a strided tensor. This follows
253
+ similar semantics to torch.Tensor.narrow, where in the :attr:`dim`-th dimension the new nested tensor
254
+ shows only the elements in the interval `[start, start+length)`. As nested representations
255
+ allow for a different `start` and `length` at each 'row' of that dimension, :attr:`start` and :attr:`length`
256
+ can also be tensors of shape `tensor.shape[0]`.
257
+
258
+ There's some differences depending on the layout you use for the nested tensor. If using strided layout,
259
+ torch.narrow will do a copy of the narrowed data into a contiguous NT with strided layout, while
260
+ jagged layout narrow() will create a non-contiguous view of your original strided tensor. This particular
261
+ representation is really useful for representing kv-caches in Transformer models, as specialized
262
+ SDPA kernels can deal with format easily, resulting in performance improvements.
263
+
264
+
265
+ Args:
266
+ tensor (:class:`torch.Tensor`): a strided tensor, which will be used as the underlying data
267
+ for the nested tensor if using the jagged layout or will be copied for the strided layout.
268
+ dim (int): the dimension where narrow will be applied. Only `dim=1` is supported for the
269
+ jagged layout, while strided supports all dim
270
+ start (Union[int, :class:`torch.Tensor`]): starting element for the narrow operation
271
+ length (Union[int, :class:`torch.Tensor`]): number of elements taken during the narrow op
272
+
273
+ Keyword arguments:
274
+ layout (:class:`torch.layout`, optional): the desired layout of returned nested tensor.
275
+ Only strided and jagged layouts are supported. Default: if None, the strided layout.
276
+
277
+ Example::
278
+
279
+ >>> starts = torch.tensor([0, 1, 2, 3, 4], dtype=torch.int64)
280
+ >>> lengths = torch.tensor([3, 2, 2, 1, 5], dtype=torch.int64)
281
+ >>> narrow_base = torch.randn(5, 10, 20)
282
+ >>> nt_narrowed = torch.nested.narrow(narrow_base, 1, starts, lengths, layout=torch.jagged)
283
+ >>> nt_narrowed.is_contiguous()
284
+ False
285
+ """
286
+ if not isinstance(start, (int, SymInt, Tensor)):
287
+ raise RuntimeError("start must be an integer or a tensor")
288
+
289
+ if not isinstance(length, (int, SymInt, Tensor)):
290
+ raise RuntimeError("length must be an integer or a tensor")
291
+
292
+ if layout == torch.strided:
293
+ if isinstance(start, Tensor) or isinstance(length, Tensor):
294
+ raise RuntimeError("start and length must be integers for the strided layout NT impl")
295
+ # TODO: switch to as_nested_tensor(tensor) when it is available
296
+ nt = as_nested_tensor(torch.unbind(tensor), layout=torch.strided).narrow(dim, start, length)
297
+ elif layout == torch.jagged:
298
+ if dim != 1:
299
+ raise RuntimeError("jagged layout only supports dim=1")
300
+
301
+ from torch.nested._internal.nested_tensor import jagged_from_tensor_and_lengths
302
+
303
+ if isinstance(start, (int, SymInt)):
304
+ start = torch.tensor([start], device=tensor.device, dtype=torch.int64)
305
+
306
+ if isinstance(length, (int, SymInt)):
307
+ length = torch.tensor([length], device=tensor.device, dtype=torch.int64)
308
+
309
+ nt, _, _ = jagged_from_tensor_and_lengths(tensor, start, length)
310
+ else:
311
+ raise RuntimeError(f"Specified layout is unsupported for nested narrow: {layout}")
312
+
313
+ return nt
314
+
315
+
316
+ def nested_tensor_from_jagged(
317
+ values: Tensor,
318
+ offsets: Optional[Tensor] = None,
319
+ lengths: Optional[Tensor] = None,
320
+ jagged_dim: Optional[int] = None,
321
+ min_seqlen: Optional[int] = None,
322
+ max_seqlen: Optional[int] = None,
323
+ ) -> Tensor:
324
+ r"""
325
+ Constructs a jagged layout nested tensor from the given jagged components. The jagged layout
326
+ consists of a required values buffer with the jagged dimension packed into a single dimension.
327
+ The offsets / lengths metadata determines how this dimension is split into batch elements
328
+ and are expected to be allocated on the same device as the values buffer.
329
+
330
+ Expected metadata formats:
331
+ * offsets: Indices within the packed dimension splitting it into heterogeneously-sized
332
+ batch elements. Example: [0, 2, 3, 6] indicates that a packed jagged dim of size 6
333
+ should be conceptually split into batch elements of length [2, 1, 3]. Note that both the
334
+ beginning and ending offsets are required for kernel convenience (i.e. shape batch_size + 1).
335
+ * lengths: Lengths of the individual batch elements; shape == batch_size. Example: [2, 1, 3]
336
+ indicates that a packed jagged dim of size 6 should be conceptually split into batch
337
+ elements of length [2, 1, 3].
338
+
339
+ Note that it can be useful to provide both offsets and lengths. This describes a nested tensor
340
+ with "holes", where the offsets indicate the start position of each batch item and the length
341
+ specifies the total number of elements (see example below).
342
+
343
+ The returned jagged layout nested tensor will be a view of the input values tensor.
344
+
345
+ Args:
346
+ values (:class:`torch.Tensor`): The underlying buffer in the shape of
347
+ (sum_B(*), D_1, ..., D_N). The jagged dimension is packed into a single dimension,
348
+ with the offsets / lengths metadata used to distinguish batch elements.
349
+ offsets (optional :class:`torch.Tensor`): Offsets into the jagged dimension of shape B + 1.
350
+ lengths (optional :class:`torch.Tensor`): Lengths of the batch elements of shape B.
351
+ jagged_dim (optional int): Indicates which dimension in values is the packed jagged
352
+ dimension. If None, this is set to dim=1 (i.e. the dimension immediately following
353
+ the batch dimension). Default: None
354
+ min_seqlen (optional int): If set, uses the specified value as the cached minimum sequence
355
+ length for the returned nested tensor. This can be a useful alternative to computing
356
+ this value on-demand, possibly avoiding a GPU -> CPU sync. Default: None
357
+ max_seqlen (optional int): If set, uses the specified value as the cached maximum sequence
358
+ length for the returned nested tensor. This can be a useful alternative to computing
359
+ this value on-demand, possibly avoiding a GPU -> CPU sync. Default: None
360
+
361
+ Example::
362
+
363
+ >>> values = torch.randn(12, 5)
364
+ >>> offsets = torch.tensor([0, 3, 5, 6, 10, 12])
365
+ >>> nt = nested_tensor_from_jagged(values, offsets)
366
+ >>> # 3D shape with the middle dimension jagged
367
+ >>> nt.shape
368
+ torch.Size([5, j2, 5])
369
+ >>> # Length of each item in the batch:
370
+ >>> offsets.diff()
371
+ tensor([3, 2, 1, 4, 2])
372
+
373
+ >>> values = torch.randn(6, 5)
374
+ >>> offsets = torch.tensor([0, 2, 3, 6])
375
+ >>> lengths = torch.tensor([1, 1, 2])
376
+ >>> # NT with holes
377
+ >>> nt = nested_tensor_from_jagged(values, offsets, lengths)
378
+ >>> a, b, c = nt.unbind()
379
+ >>> # Batch item 1 consists of indices [0, 1)
380
+ >>> torch.equal(a, values[0:1, :])
381
+ True
382
+ >>> # Batch item 2 consists of indices [2, 3)
383
+ >>> torch.equal(b, values[2:3, :])
384
+ True
385
+ >>> # Batch item 3 consists of indices [3, 5)
386
+ >>> torch.equal(c, values[3:5, :])
387
+ True
388
+ """
389
+ from torch.fx._symbolic_trace import is_fx_tracing
390
+ if is_fx_tracing():
391
+ raise RuntimeError(
392
+ "torch.nested.nested_tensor_from_jagged does not support tracing with fx.symbolic_trace. "
393
+ "Use fx.wrap to wrap the function that calls nested_tensor_from_jagged."
394
+ )
395
+
396
+ if offsets is None:
397
+ if lengths is None:
398
+ raise RuntimeError(
399
+ "nested_tensor_from_jagged(): At least one of offsets or lengths is required."
400
+ )
401
+ else:
402
+ # TODO: Truly support offsets=None at some point?
403
+ # For now, just convert lengths -> offsets for kernel convenience
404
+ offsets = F.pad(lengths.cumsum(0), (1, 0))
405
+ lengths = None
406
+
407
+ if jagged_dim is None:
408
+ jagged_dim = 1
409
+
410
+ from torch.nested._internal.nested_tensor import nested_view_from_values_offsets_lengths
411
+
412
+ return nested_view_from_values_offsets_lengths(
413
+ values, offsets, lengths, ragged_idx=jagged_dim, min_seqlen=min_seqlen, max_seqlen=max_seqlen)
414
+
415
+ def masked_select(tensor: Tensor, mask: Tensor) -> Tensor:
416
+ r"""
417
+ Constructs a nested tensor given a strided tensor input and a strided mask, the resulting jagged layout nested tensor
418
+ will have values retain values where the mask is equal to True. The dimensionality of the mask is preserved and is
419
+ represented with the offsets, this is unlike :func:`masked_select` where the output is collapsed to a 1D tensor.
420
+
421
+ Args:
422
+ tensor (:class:`torch.Tensor`): a strided tensor from which the jagged layout nested tensor is constructed from.
423
+ mask (:class:`torch.Tensor`): a strided mask tensor which is applied to the tensor input
424
+
425
+ Example::
426
+
427
+ >>> tensor = torch.randn(3, 3)
428
+ >>> mask = torch.tensor([[False, False, True], [True, False, True], [False, False, True]])
429
+ >>> nt = torch.nested.masked_select(tensor, mask)
430
+ >>> nt.shape
431
+ torch.Size([3, j4])
432
+ >>> # Length of each item in the batch:
433
+ >>> nt.offsets().diff()
434
+ tensor([1, 2, 1])
435
+
436
+ >>> tensor = torch.randn(6, 5)
437
+ >>> mask = torch.tensor([False])
438
+ >>> nt = torch.nested.masked_select(tensor, mask)
439
+ >>> nt.shape
440
+ torch.Size([6, j5])
441
+ >>> # Length of each item in the batch:
442
+ >>> nt.offsets().diff()
443
+ tensor([0, 0, 0, 0, 0, 0])
444
+ """
445
+ if tensor.layout != torch.strided:
446
+ raise RuntimeError(
447
+ f"torch.nested.masked_select requires a strided tensor, given {tensor.layout}"
448
+ )
449
+
450
+ if mask.layout != torch.strided:
451
+ raise RuntimeError(
452
+ f"torch.nested.masked_select requires a strided mask, given: {mask.layout}"
453
+ )
454
+ res_values = tensor.masked_select(mask)
455
+ expanded_mask = mask.expand(tensor.shape)
456
+ res_lengths = expanded_mask.sum(dim=tensor.ndim - 1).view(-1)
457
+
458
+ from torch.nested._internal.nested_tensor import (
459
+ nested_view_from_values_offsets,
460
+ )
461
+
462
+ return nested_view_from_values_offsets(
463
+ values=res_values,
464
+ offsets=F.pad(res_lengths.cumsum(dim=0), (1, 0)),
465
+ )
pllava/lib/python3.10/site-packages/torch/nested/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (18.2 kB). View file
 
pllava/lib/python3.10/site-packages/torch/nested/_internal/__init__.py ADDED
File without changes
pllava/lib/python3.10/site-packages/torch/nested/_internal/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (174 Bytes). View file
 
pllava/lib/python3.10/site-packages/torch/nested/_internal/__pycache__/nested_tensor.cpython-310.pyc ADDED
Binary file (13.2 kB). View file
 
pllava/lib/python3.10/site-packages/torch/nested/_internal/__pycache__/ops.cpython-310.pyc ADDED
Binary file (38 kB). View file
 
pllava/lib/python3.10/site-packages/torch/nested/_internal/__pycache__/sdpa.cpython-310.pyc ADDED
Binary file (14.6 kB). View file
 
pllava/lib/python3.10/site-packages/torch/nested/_internal/nested_tensor.py ADDED
@@ -0,0 +1,564 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ from typing import * # noqa: F403
3
+ from typing import Tuple
4
+
5
+ import torch
6
+ from torch._C import DispatchKey, DispatchKeySet
7
+ from torch._prims_common import is_expandable_to
8
+ from torch.utils.weak import WeakTensorKeyDictionary
9
+
10
+
11
+ _tensor_id_counter = 0
12
+ _tensor_symint_registry = WeakTensorKeyDictionary()
13
+
14
+
15
+ def get_tensor_symint(tensor, *, coeff=1):
16
+ from torch._subclasses.fake_tensor import FakeTensor
17
+ from torch._subclasses.functional_tensor import mb_unwrap_functional_tensor
18
+
19
+ # NB: Only FakeTensor is associated with a memo
20
+ tensor = mb_unwrap_functional_tensor(tensor)
21
+ if isinstance(tensor, FakeTensor):
22
+ return tensor.get_nested_int(coeff=coeff)
23
+
24
+ global _tensor_id_counter
25
+
26
+ tensor_symint = _tensor_symint_registry.get(tensor)
27
+ if tensor_symint is None:
28
+ tensor_symint = torch._C._get_nested_int(_tensor_id_counter, coeff)
29
+ _tensor_id_counter += 1
30
+ _tensor_symint_registry[tensor] = tensor_symint
31
+ return tensor_symint
32
+
33
+
34
+ # SDPA metadata; max / min seqlens are needed for e.g. flash
35
+ def _get_sdpa_extreme_seqlen(func, tensor):
36
+ return int(func(tensor).item())
37
+
38
+
39
+ def _store_val_in_tensor(val) -> torch.Tensor:
40
+ # hack to get dynamic shapes support: store in a (val, 0) shaped tensor
41
+ return torch.zeros(val, 0)
42
+
43
+
44
+ def _load_val_from_tensor(t: torch.Tensor):
45
+ return t.shape[0]
46
+
47
+
48
+ class NestedTensor(torch.Tensor):
49
+ _values: torch.Tensor # type: ignore[assignment]
50
+ _offsets: torch.Tensor
51
+ _lengths: Optional[torch.Tensor]
52
+ # NOTE [ Nested ints for ragged sizes and strides ]
53
+ #
54
+ # Jagged layout tensors are tensors that represent a n-dim tensor with a
55
+ # ragged dimension, but are backed by an (n-1)-dim tensor underneath, e.g.,
56
+ # a jagged tensor with outer shape [B, x, D] is represented internally by a
57
+ # tensor with shape [sum(x), D] where we introduce what we call a nested int
58
+ # denoted as "x" here (but sometimes denoted with "*" to
59
+ # represent the ragged dimension, and sum(x) represents the dim of the inner
60
+ # tensor or equivalently the sum of all the sizes of the constituent
61
+ # tensors' varying lengths.
62
+ #
63
+ # We also use nested ints to represent the strides of this tensor.
64
+ # For example, a jagged tensor with shape [B, x, D] can be strided in two
65
+ # ways: [xD, D, 1] and [x, 1, sum(x)], where xD represents x multiplied by D
66
+ _size: Tuple[int, ...]
67
+ _strides: Tuple[int, ...]
68
+ # Indicates that the nth dimension is ragged
69
+ _ragged_idx: int
70
+ _metadata_cache: Dict[str, Any]
71
+
72
+ @staticmethod
73
+ def __new__(
74
+ cls,
75
+ values,
76
+ offsets,
77
+ *,
78
+ lengths=None,
79
+ **kwargs,
80
+ ):
81
+ ks = DispatchKeySet(DispatchKey.NestedTensor)
82
+ ks = ks.add(DispatchKey.AutogradNestedTensor)
83
+
84
+ # Only support jagged for now.
85
+ assert offsets is not None
86
+ assert offsets.ndim == 1
87
+ assert not isinstance(values, NestedTensor)
88
+ assert values.device == offsets.device
89
+
90
+ # Query cache for the symint associated with offsets or lengths
91
+ # (create a new one if needed).
92
+ ragged_source = offsets if lengths is None else lengths
93
+ ragged_size = get_tensor_symint(ragged_source, coeff=1)
94
+ _ragged_idx = kwargs.get("_ragged_idx", 1)
95
+ B = offsets.shape[0] - 1
96
+ if lengths is not None:
97
+ assert B == lengths.shape[0]
98
+
99
+ # subtract 1 to convert to values dim space
100
+ r = _ragged_idx - 1
101
+ _size = (B, *values.shape[:r], ragged_size, *values.shape[r + 1 :])
102
+ stride = values.stride()
103
+ _strides = (ragged_size * stride[r], *stride)
104
+
105
+ r = torch.Tensor._make_wrapper_subclass( # type: ignore[attr-defined]
106
+ cls,
107
+ _size,
108
+ _strides,
109
+ 0,
110
+ torch.contiguous_format,
111
+ values.dtype,
112
+ torch.jagged,
113
+ values.device,
114
+ False,
115
+ kwargs.get("requires_grad", False),
116
+ "sizes",
117
+ False,
118
+ True, # dispatch_layout
119
+ ks,
120
+ # don't try to calculate storage based on non-zero size
121
+ storage_size=values.untyped_storage().size(),
122
+ )
123
+ r._ragged_idx = _ragged_idx
124
+ r._size = _size
125
+ r._strides = _strides
126
+
127
+ return r
128
+
129
+ def __init__(self, values, offsets, *, lengths=None, **kwargs):
130
+ super().__init__()
131
+
132
+ self._values = values
133
+ self._offsets = offsets
134
+ self._lengths = lengths
135
+
136
+ # holds properties that are computed lazily
137
+ self._metadata_cache = kwargs.get("_metadata_cache") or {}
138
+
139
+ # collapsed ragged dim must always be dynamic
140
+ torch._dynamo.maybe_mark_dynamic(self, self._ragged_idx)
141
+ torch._dynamo.maybe_mark_dynamic(self._values, self._ragged_idx - 1)
142
+
143
+ # min / max sequence length should be dynamic if present
144
+ max_seqlen_tensor = self._metadata_cache.get("max_seqlen", None)
145
+ if max_seqlen_tensor is not None:
146
+ torch._dynamo.mark_dynamic(max_seqlen_tensor, 0)
147
+ min_seqlen_tensor = self._metadata_cache.get("min_seqlen", None)
148
+ if min_seqlen_tensor is not None:
149
+ torch._dynamo.mark_dynamic(min_seqlen_tensor, 0)
150
+
151
+ def values(self):
152
+ # dispatch to get proper view relationship
153
+ return torch._nested_get_values(self) # type: ignore[attr-defined]
154
+
155
+ def offsets(self):
156
+ return self._offsets
157
+
158
+ def lengths(self):
159
+ return self._lengths
160
+
161
+ # Private accessor functions for min / max sequence length. They're
162
+ # purposefully not @properties because those don't work with PT2 (yet).
163
+ # These compute / cache if not present.
164
+ # TODO: Revisit this when @properties are better supported by PT2. I think the ideal
165
+ # state would be to have public @properties for min / max sequence length that compile
166
+ # (including setters).
167
+ def _get_max_seqlen(self):
168
+ max_seqlen_tensor = self._max_seqlen_tensor
169
+ if max_seqlen_tensor is None:
170
+ # compute & cache
171
+ max_val = _get_sdpa_extreme_seqlen(
172
+ torch.max,
173
+ self._offsets.diff() if self._lengths is None else self._lengths,
174
+ )
175
+ max_seqlen_tensor = _store_val_in_tensor(max_val)
176
+ self._metadata_cache["max_seqlen"] = max_seqlen_tensor
177
+ return _load_val_from_tensor(max_seqlen_tensor)
178
+
179
+ def _get_min_seqlen(self):
180
+ min_seqlen_tensor = self._min_seqlen_tensor
181
+ if min_seqlen_tensor is None:
182
+ # compute & cache
183
+ min_val = _get_sdpa_extreme_seqlen(
184
+ torch.min,
185
+ self._offsets.diff() if self._lengths is None else self._lengths,
186
+ )
187
+ min_seqlen_tensor = _store_val_in_tensor(min_val)
188
+ self._metadata_cache["min_seqlen"] = min_seqlen_tensor
189
+ return _load_val_from_tensor(min_seqlen_tensor)
190
+
191
+ # Private accessors used for treating min / max seqlen as inner tensors for
192
+ # flatten / unflatten. These must be properties to work with the traceable wrapper
193
+ # subclass logic. These do not compute / cache if not present.
194
+ @property
195
+ def _max_seqlen_tensor(self) -> Optional[torch.Tensor]:
196
+ return self._metadata_cache.get("max_seqlen", None)
197
+
198
+ @property
199
+ def _min_seqlen_tensor(self) -> Optional[torch.Tensor]:
200
+ return self._metadata_cache.get("min_seqlen", None)
201
+
202
+ # These are old private @property accessors that are kept around for internal BC
203
+ # reasons. TODO: Remove these!
204
+ @property
205
+ def _max_seqlen(self):
206
+ return self._get_max_seqlen()
207
+
208
+ @property
209
+ def _min_seqlen(self):
210
+ return self._get_min_seqlen()
211
+
212
+ def __repr__(self):
213
+ # We should implement this in torch/_tensor_str.py instead
214
+ grad_fn_str = (
215
+ f", requires_grad={self.requires_grad}" if self.requires_grad else ""
216
+ )
217
+ if self.grad_fn:
218
+ grad_fn_str = f", grad_fn={self.grad_fn}"
219
+ return f"NestedTensor(size={self._size}, offsets={self._offsets}{grad_fn_str}, contiguous={self._lengths is None})"
220
+
221
+ def __reduce_ex__(self, proto):
222
+ state = torch._utils._get_obj_state(self)
223
+
224
+ # SymNodes are not serializable
225
+ assert "_size" in state and "_strides" in state
226
+ state = dict(state)
227
+ del state["_size"]
228
+ del state["_strides"]
229
+
230
+ # TODO: Update this to handle the other inner tensors
231
+ func = NestedTensor
232
+ args = (self._values, self._offsets)
233
+ return (torch._tensor._rebuild_from_type_v2, (func, type(self), args, state))
234
+
235
+ def __tensor_flatten__(self):
236
+ ctx = {
237
+ "requires_grad": self.requires_grad,
238
+ "ragged_idx": self._ragged_idx,
239
+ }
240
+ inner_tensors = ["_values", "_offsets"]
241
+ if self._lengths is not None:
242
+ inner_tensors.append("_lengths")
243
+ if self._min_seqlen_tensor is not None:
244
+ inner_tensors.append("_min_seqlen_tensor")
245
+ if self._max_seqlen_tensor is not None:
246
+ inner_tensors.append("_max_seqlen_tensor")
247
+ return inner_tensors, ctx
248
+
249
+ @staticmethod
250
+ def __tensor_unflatten__(inner_tensors: Dict, meta, outer_size, outer_stride):
251
+ from torch._subclasses.fake_tensor import FakeTensor
252
+
253
+ # inner tensors: _values, _offsets, [_lengths], [_min_seqlen], [_max_seqlen]
254
+ assert len(inner_tensors) >= 2 and len(inner_tensors) <= 5
255
+ values = inner_tensors["_values"]
256
+ offsets = inner_tensors["_offsets"]
257
+ lengths = inner_tensors.get("_lengths", None)
258
+ min_seqlen_tensor = inner_tensors.get("_min_seqlen_tensor", None)
259
+ max_seqlen_tensor = inner_tensors.get("_max_seqlen_tensor", None)
260
+
261
+ metadata_cache = {}
262
+ if min_seqlen_tensor is not None:
263
+ metadata_cache["min_seqlen"] = min_seqlen_tensor
264
+ if max_seqlen_tensor is not None:
265
+ metadata_cache["max_seqlen"] = max_seqlen_tensor
266
+ ragged_idx = meta["ragged_idx"]
267
+
268
+ # Alternatively, we could make it the caller's responsibility to
269
+ # cache it. But this heuristic seems simple enough.
270
+ ragged_source = offsets if lengths is None else lengths
271
+ if isinstance(ragged_source, FakeTensor):
272
+ ragged_size = outer_size[ragged_idx]
273
+ ragged_source.nested_int_memo = ragged_size
274
+
275
+ return NestedTensor(
276
+ values,
277
+ offsets=offsets,
278
+ lengths=lengths,
279
+ requires_grad=meta["requires_grad"],
280
+ _ragged_idx=ragged_idx,
281
+ _metadata_cache=metadata_cache,
282
+ )
283
+
284
+ @classmethod
285
+ def __torch_dispatch__(cls, func, types, args=(), kwargs=None):
286
+ kwargs = {} if kwargs is None else kwargs
287
+
288
+ # Lazy import to avoid circular dependency
289
+ from .ops import lookup_jagged
290
+
291
+ fn = lookup_jagged(func, *args, **kwargs)
292
+ if fn is not None:
293
+ return fn(*args, **kwargs)
294
+
295
+ raise NotImplementedError(func)
296
+
297
+ @classmethod
298
+ def __torch_function__(cls, func, types, args=(), kwargs=None):
299
+ if kwargs is None:
300
+ kwargs = {}
301
+
302
+ from torch.fx.experimental.proxy_tensor import maybe_enable_thunkify
303
+
304
+ from .ops import jagged_torch_function
305
+
306
+ # This should be removed after
307
+ # https://github.com/pytorch/pytorch/pull/125941/ lands
308
+ with maybe_enable_thunkify():
309
+ try:
310
+ return jagged_torch_function(func, *args, **kwargs)
311
+ except NotImplementedError:
312
+ pass
313
+ with torch._C.DisableTorchFunctionSubclass():
314
+ return func(*args, **kwargs)
315
+
316
+
317
+ # NB: These fake view autograd.Functions are superseded by real view ops. Don't use them!
318
+ # TODO: Remove ViewBufferFromNested, ViewNestedFromBuffer, and buffer_from_jagged once the
319
+ # internal BC period has passed.
320
+
321
+
322
+ # Not actually a view!
323
+ class ViewBufferFromNested(torch.autograd.Function):
324
+ @staticmethod
325
+ def forward(ctx, x: NestedTensor): # type: ignore[override]
326
+ ctx.save_for_backward(x.offsets())
327
+ ctx.metadata_cache = x._metadata_cache
328
+ ctx.ragged_idx = x._ragged_idx
329
+ return x._values
330
+
331
+ @staticmethod
332
+ def backward(ctx, gO: torch.Tensor): # type: ignore[override]
333
+ (offsets,) = ctx.saved_tensors
334
+ return NestedTensor(
335
+ gO,
336
+ offsets=offsets,
337
+ _metadata_cache=ctx.metadata_cache,
338
+ _ragged_idx=ctx.ragged_idx,
339
+ )
340
+
341
+
342
+ # Not actually a view!
343
+ class ViewNestedFromBuffer(torch.autograd.Function):
344
+ @staticmethod
345
+ def forward(
346
+ ctx,
347
+ values: torch.Tensor,
348
+ offsets: torch.Tensor,
349
+ metadata_cache: Optional[Dict[str, Any]] = None,
350
+ ): # type: ignore[override]
351
+ # maintain BC with this usages of this where the seqlens are stuffed
352
+ # directly into the metadata cache as non-Tensors / ints
353
+ if metadata_cache is not None:
354
+ min_seqlen = metadata_cache.get("min_seqlen", None)
355
+ max_seqlen = metadata_cache.get("max_seqlen", None)
356
+ if min_seqlen is not None and not isinstance(min_seqlen, torch.Tensor):
357
+ metadata_cache["min_seqlen"] = _store_val_in_tensor(min_seqlen)
358
+ if max_seqlen is not None and not isinstance(max_seqlen, torch.Tensor):
359
+ metadata_cache["max_seqlen"] = _store_val_in_tensor(max_seqlen)
360
+ return NestedTensor(
361
+ values.detach(),
362
+ offsets=offsets,
363
+ _metadata_cache=metadata_cache,
364
+ )
365
+
366
+ @staticmethod
367
+ def backward(ctx, gO: NestedTensor): # type: ignore[override]
368
+ return gO._values, None, None
369
+
370
+
371
+ def buffer_from_jagged(jagged):
372
+ return ViewBufferFromNested.apply(jagged)
373
+
374
+
375
+ # Need to make it obvious that users should be passing in offsets
376
+ def jagged_from_list(
377
+ tensors: List[torch.Tensor],
378
+ offsets: Optional[torch.Tensor],
379
+ dtype=None,
380
+ device=None,
381
+ ) -> Tuple[NestedTensor, torch.Tensor]:
382
+ """Constructs a NestedTensor backed by jagged layout from a list of tensors"""
383
+
384
+ if not len(set(t.dtype for t in tensors)) == 1: # noqa: C401
385
+ raise RuntimeError(
386
+ "When constructing a nested tensor, all tensors in list must have the same dtype"
387
+ )
388
+ if not len(set(t.device for t in tensors)) == 1: # noqa: C401
389
+ raise RuntimeError(
390
+ "When constructing a nested tensor, all tensors in list must be on the same device"
391
+ )
392
+
393
+ # Check that the NT is representable by the jagged layout.
394
+ # Jagged layout represents (B, *, D_0, D_1, ..., D_N), where the only
395
+ # raggedness allowed is for the single dim immediately adjacent to the batch dim.
396
+ sizes = [t.shape for t in tensors]
397
+ non_first_sizes = [s[1:] for s in sizes]
398
+ at_most_first_ragged = all(s == non_first_sizes[0] for s in non_first_sizes)
399
+ if not at_most_first_ragged:
400
+ raise RuntimeError(
401
+ "Cannot represent given tensor list as a nested tensor with the jagged layout. "
402
+ "Note that the jagged layout only represents shapes of the form "
403
+ "(B, *, D_0, D_1, ..., D_N), with only * allowed to be ragged."
404
+ )
405
+
406
+ # Set properties appropriately.
407
+ values = torch.cat(tensors, dim=0)
408
+ to_kwargs = {}
409
+ if device is not None:
410
+ to_kwargs["device"] = device
411
+ if dtype is not None:
412
+ to_kwargs["dtype"] = dtype
413
+ values = values.to(**to_kwargs)
414
+
415
+ # Calculate jagged offsets if not provided.
416
+ if offsets is None:
417
+ # Jagged layout specifies that offsets are stored as int64 on the same device as values.
418
+ # TODO: An alternative way to construct offsets is to use F.pad. This avoids creating
419
+ # an extra leaf tensor during the forward, potentially resolving compatibility issues.
420
+ offsets = torch.cat(
421
+ [
422
+ torch.zeros(1, dtype=torch.int64, device=values.device),
423
+ torch.tensor([s[0] for s in sizes], device=values.device).cumsum(dim=0),
424
+ ]
425
+ )
426
+
427
+ # compute this now since it's easy
428
+ min_seqlen = min(t.shape[0] for t in tensors)
429
+ max_seqlen = max(t.shape[0] for t in tensors)
430
+ ret_nt = nested_view_from_values_offsets(
431
+ values, offsets, min_seqlen=min_seqlen, max_seqlen=max_seqlen
432
+ )
433
+ return (ret_nt, offsets) # type: ignore[return-value]
434
+
435
+
436
+ def jagged_from_tensor_and_lengths(
437
+ tensor: torch.Tensor, starts: torch.Tensor, lengths: torch.Tensor
438
+ ) -> Tuple[NestedTensor, torch.Tensor, Optional[torch.Tensor]]:
439
+ """Constructs a NestedTensor backed by jagged layout from a tensor, starts of sequences, and sequence lengths"""
440
+ batch_size = tensor.shape[0]
441
+ if is_expandable_to(starts.shape, (batch_size,)) and is_expandable_to(
442
+ lengths.shape, (batch_size,)
443
+ ):
444
+ start_list = starts.expand(batch_size)
445
+ length_list = lengths.expand(batch_size)
446
+ else:
447
+ raise RuntimeError(
448
+ "When constructing a jagged nested tensor using narrow(), "
449
+ "your start and length must be Tensors that broadcast to input.shape[0]"
450
+ )
451
+
452
+ # Calculate jagged offsets
453
+ assert (
454
+ len(tensor.shape) >= 2
455
+ ), "tensor must at least be 2D for the nested narrow op to work"
456
+ max_seq_len = tensor.shape[1]
457
+ offset_lengths = max_seq_len * torch.arange(
458
+ 0, batch_size, dtype=torch.int64, device=tensor.device
459
+ )
460
+ # Jagged layout specifies that offsets are stored as int64 on the same device as values.
461
+ offsets = torch.cat(
462
+ [
463
+ start_list + offset_lengths,
464
+ (start_list[-1] + offset_lengths[-1] + length_list[-1]).unsqueeze(0),
465
+ ]
466
+ )
467
+
468
+ # Reshape buffer to flatten the 1st and 2nd dimension (view used to enforce non-copy)
469
+ if len(tensor.shape) > 2:
470
+ values = tensor.view(-1, *tensor.shape[2:])
471
+ else:
472
+ values = tensor.view(-1)
473
+
474
+ # Check if offsets and lengths make it possibly contiguous and return a regular NT
475
+ is_contiguous = True
476
+ orig_dim = tensor.shape[1]
477
+ if torch.any(length_list[1:-1].ne(orig_dim)):
478
+ is_contiguous = False
479
+ if torch.any(offsets[1:-2].diff().ne(orig_dim)):
480
+ is_contiguous = False
481
+ if offsets[0] + length_list[0] != orig_dim:
482
+ is_contiguous = False
483
+
484
+ actual_max_seqlen = int(torch.max(lengths).item())
485
+ min_seqlen = int(torch.min(lengths).item())
486
+
487
+ if is_contiguous:
488
+ ret_nt = nested_view_from_values_offsets(
489
+ values[offsets[0] : offsets[-1]],
490
+ offsets - offsets[0],
491
+ min_seqlen=min_seqlen,
492
+ max_seqlen=actual_max_seqlen,
493
+ )
494
+ else:
495
+ ret_nt = nested_view_from_values_offsets_lengths(
496
+ values,
497
+ offsets,
498
+ length_list,
499
+ min_seqlen=min_seqlen,
500
+ max_seqlen=actual_max_seqlen,
501
+ )
502
+
503
+ return (ret_nt, offsets, None if is_contiguous else length_list)
504
+
505
+
506
+ # NB: A dummy arg is required so that NestedTensor.__torch_dispatch__() is invoked
507
+ # for _nested_view_from_values_offsets(). Sizes don't matter much, but they shouldn't be
508
+ # 0/1 because the dummy can be fake-ified and we want to avoid specializing.
509
+ # This arg is otherwise unused.
510
+ _dummy_instance: Optional[torch.Tensor] = None
511
+
512
+
513
+ def _nt_view_dummy() -> torch.Tensor:
514
+ global _dummy_instance
515
+ if _dummy_instance is None:
516
+ _dummy_instance = NestedTensor(
517
+ values=torch.zeros(3, 3, device="meta"),
518
+ offsets=torch.zeros(3, device="meta", dtype=torch.int64),
519
+ ).detach()
520
+ return _dummy_instance
521
+
522
+
523
+ def nested_view_from_values_offsets(
524
+ values, offsets, ragged_idx=1, min_seqlen=None, max_seqlen=None
525
+ ):
526
+ min_seqlen_tensor = None
527
+ if min_seqlen is not None:
528
+ min_seqlen_tensor = _store_val_in_tensor(min_seqlen)
529
+
530
+ max_seqlen_tensor = None
531
+ if max_seqlen is not None:
532
+ max_seqlen_tensor = _store_val_in_tensor(max_seqlen)
533
+
534
+ return torch._nested_view_from_jagged( # type: ignore[attr-defined]
535
+ values,
536
+ offsets,
537
+ _nt_view_dummy(),
538
+ None,
539
+ ragged_idx,
540
+ min_seqlen_tensor,
541
+ max_seqlen_tensor,
542
+ ) # type: ignore[return-value]
543
+
544
+
545
+ def nested_view_from_values_offsets_lengths(
546
+ values, offsets, lengths, ragged_idx=1, min_seqlen=None, max_seqlen=None
547
+ ):
548
+ min_seqlen_tensor = None
549
+ if min_seqlen is not None:
550
+ min_seqlen_tensor = _store_val_in_tensor(min_seqlen)
551
+
552
+ max_seqlen_tensor = None
553
+ if max_seqlen is not None:
554
+ max_seqlen_tensor = _store_val_in_tensor(max_seqlen)
555
+
556
+ return torch._nested_view_from_jagged( # type: ignore[attr-defined]
557
+ values,
558
+ offsets,
559
+ _nt_view_dummy(),
560
+ lengths,
561
+ ragged_idx,
562
+ min_seqlen_tensor,
563
+ max_seqlen_tensor,
564
+ ) # type: ignore[return-value]
pllava/lib/python3.10/site-packages/torch/nested/_internal/ops.py ADDED
@@ -0,0 +1,1675 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ import functools
3
+ import math
4
+ import operator
5
+ from typing import * # noqa: F403
6
+
7
+ import torch
8
+ import torch.nn.functional as F
9
+ from torch.fx.operator_schemas import normalize_function
10
+ from torch.nested._internal.sdpa import jagged_scaled_dot_product_attention
11
+
12
+ from .nested_tensor import NestedTensor
13
+
14
+
15
+ __all__: List[Any] = []
16
+
17
+ JAGGED_OPS_TABLE: Dict[Any, Any] = {}
18
+
19
+
20
+ # Simplifying assumption: we assume that the batch dim is always the left-most
21
+ # dim, and the ragged dim is always the second dim.
22
+ def _outer_to_inner_dim(ndim, dim):
23
+ assert dim >= 0 and dim < ndim
24
+ return 0 if dim < 2 else dim - 1
25
+
26
+
27
+ def _wrap_jagged_dim(
28
+ ndim, dim, op_name, convert_to_inner_dim=True, allow_batch_dim=False
29
+ ):
30
+ from torch._prims_common import canonicalize_dims
31
+
32
+ wrapped = canonicalize_dims(ndim, dim)
33
+ if wrapped == 1:
34
+ raise RuntimeError(f"{op_name}(): not supported for NestedTensor on dim=1")
35
+ elif wrapped == 0 and not allow_batch_dim:
36
+ raise RuntimeError(f"{op_name}(): not supported for NestedTensor on dim=0")
37
+ return _outer_to_inner_dim(ndim, wrapped) if convert_to_inner_dim else wrapped
38
+
39
+
40
+ def _wrap_jagged_dims(ndim, dims, op_name, ragged_idx=1):
41
+ """
42
+ For NestedTensor operators,
43
+ wraps dimensions to non-negative values,
44
+ and returns metadata related to reduction dimension(s).
45
+ """
46
+ from torch._prims_common import canonicalize_dims
47
+
48
+ assert isinstance(
49
+ dims, (tuple, list)
50
+ ), f"_wrap_jagged_dims(): cannot iterate over dimensions of type {type(dims)}"
51
+
52
+ wrapped_dims = [
53
+ canonicalize_dims(ndim, d) for d in dims
54
+ ] # convert all indices to non-negative values
55
+
56
+ operate_on_batch = 0 in wrapped_dims
57
+ operate_on_ragged = ragged_idx in wrapped_dims
58
+ operate_on_non_batch = any(d != 0 and d != ragged_idx for d in wrapped_dims)
59
+
60
+ outer_to_inner_dim = tuple(
61
+ _outer_to_inner_dim(ndim, d) for d in wrapped_dims if d != 0
62
+ )
63
+
64
+ return outer_to_inner_dim, operate_on_batch, operate_on_ragged, operate_on_non_batch
65
+
66
+
67
+ def check_schema(schema_str: str, func, *args, **kwargs) -> None:
68
+ named_arg_types = schema_str.split(", ")
69
+ num_optional_args = [x.endswith("?") for x in named_arg_types].count(True)
70
+ min_args = len(named_arg_types) - num_optional_args
71
+
72
+ # special case: ellipses allows for any number of unchecked args at the end
73
+ if named_arg_types[-1] == "...":
74
+ named_arg_types = named_arg_types[:-1]
75
+ else:
76
+ if not (len(args) >= min_args and len(args) <= len(named_arg_types)):
77
+ raise ValueError(
78
+ f"NestedTensor {func.__name__}({schema_str}): expected at least {min_args} "
79
+ f"arguments and at most {len(named_arg_types)} arguments, but got: "
80
+ f"{len(args)} arguments"
81
+ )
82
+
83
+ arg_type_check_fns = {
84
+ "t": lambda x: isinstance(x, torch.Tensor) and not isinstance(x, NestedTensor),
85
+ "jt": lambda x: isinstance(x, NestedTensor)
86
+ and x._lengths is None
87
+ and x._ragged_idx == 1, # ops with "jt" require contiguous JT only
88
+ "jt_all": lambda x: isinstance(
89
+ x, NestedTensor
90
+ ), # ops with "jt_all" can accept all kinds of JT
91
+ "any": lambda x: True,
92
+ }
93
+ for i, named_arg_type in enumerate(named_arg_types):
94
+ name, arg_type = named_arg_type.split(": ")
95
+ is_optional = arg_type.endswith("?")
96
+ normalized_arg_type = arg_type[:-1] if is_optional else arg_type
97
+ if normalized_arg_type not in arg_type_check_fns.keys():
98
+ raise AssertionError(f"Unknown arg type: {normalized_arg_type}")
99
+
100
+ if i >= len(args):
101
+ if not is_optional:
102
+ raise ValueError(
103
+ f"NestedTensor {func.__name__}({schema_str}) "
104
+ f"missing required argument: {name}"
105
+ )
106
+ continue
107
+
108
+ _check_fn = arg_type_check_fns[normalized_arg_type]
109
+
110
+ def check_fn(x, is_optional=is_optional):
111
+ if is_optional:
112
+ return x is None or _check_fn(x)
113
+ else:
114
+ return _check_fn(x)
115
+
116
+ if not check_fn(args[i]):
117
+ type_to_desc = {
118
+ "t": "tensor",
119
+ "t?": "optional tensor",
120
+ "jt": "contiguous jagged layout NestedTensor",
121
+ "jt_all": "jagged layout NestedTensor",
122
+ "any": "<any type>",
123
+ }
124
+
125
+ raise ValueError(
126
+ f"NestedTensor {func.__name__}({schema_str}): expected {name} to be a "
127
+ f"{type_to_desc[arg_type]}"
128
+ )
129
+
130
+
131
+ def check_ragged_dim_same(
132
+ func, a: NestedTensor, a_name: str, b: NestedTensor, b_name: str
133
+ ) -> None:
134
+ # Calling into .shape here
135
+ if a._size[a._ragged_idx] != b._size[b._ragged_idx]:
136
+ raise RuntimeError(
137
+ f"NestedTensor {func.__name__}: expected {a_name} and {b_name} to have the "
138
+ "same exact offsets tensor."
139
+ )
140
+
141
+
142
+ # returns True if the raggedness-relevant portions of the NT shape
143
+ # match those of the specified size
144
+ def raggedness_matches(nt, size):
145
+ end = nt._ragged_idx + 1
146
+ nt_ragged = nt._size[:end]
147
+ size_ragged = size[:end]
148
+ return len(nt_ragged) == len(size_ragged) and (
149
+ all(ns == s or s == -1 for ns, s in zip(nt_ragged, size_ragged))
150
+ )
151
+
152
+
153
+ def squeeze_leading_ones(t):
154
+ # Note: [ Squeezing leading ones ]
155
+ #
156
+ # Squeeze leading ones from t.
157
+ #
158
+ # We want:
159
+ # (B, j0, ?, ?) + (1, 1, ?, ?) -> (B, j0, ?, ?)
160
+ # (B, j0, ?, ?) + (1, 1, 1, ?, ?) -> (1, B, j0, ?, ?) (not yet supported)
161
+ #
162
+ # 1) Squeeze extra ones and grab values from NT
163
+ # (1, 1, ?, ?) -> (?, ?) and (sum(*), ?, ?) -> (B, j0, ?, ?)
164
+ # 2) Do dense broadcasting:
165
+ # (sum(*), ?, ?) + (?, ?) -> (sum(*), ?, ?)
166
+ # 3) Construct nested tensor
167
+ # (sum(*), ?, ?) -> (B, j0, ?, ?)
168
+ #
169
+ # If unsqueezing on the 0th dim becomes supported, we would unsqueeze
170
+ # at step (4) and we would need to update this function to record how
171
+ # many ones we unsqueezed.
172
+ while t.dim() > 0 and t.shape[0] == 1:
173
+ t = t.squeeze(0)
174
+ return t
175
+
176
+
177
+ def register_func(tables, aten_ops, schema_str):
178
+ if not isinstance(aten_ops, list):
179
+ aten_ops = [aten_ops]
180
+ if not isinstance(tables, list):
181
+ tables = [tables]
182
+
183
+ def wrapper(func):
184
+ for aten_op in aten_ops:
185
+
186
+ def get_inner(aten_op):
187
+ def inner(*args, **kwargs):
188
+ check_schema(schema_str, func, *args, **kwargs)
189
+ return func(aten_op, *args, **kwargs)
190
+
191
+ return inner
192
+
193
+ for table in tables:
194
+ table[aten_op] = get_inner(aten_op)
195
+ return func
196
+
197
+ return wrapper
198
+
199
+
200
+ register_jagged_func = functools.partial(register_func, JAGGED_OPS_TABLE)
201
+
202
+
203
+ def lookup_jagged(func, *args, **kwargs) -> Optional[Callable]:
204
+ dispatch_func = JAGGED_OPS_TABLE.get(func, None)
205
+ if dispatch_func is not None:
206
+ return dispatch_func
207
+
208
+ # Handle pointwise fallbacks
209
+ if torch.Tag.pointwise in func.tags:
210
+ # Assume there aren't additional tensors that aren't the "unary/binary" args
211
+ num_tensor_args = sum(isinstance(x, torch.Tensor) for x in args)
212
+ if num_tensor_args == 1:
213
+ # Build up the check schema string. The first tensor arg is assumed to be
214
+ # an NJT and other args are sent through as-is.
215
+ schema_parts = []
216
+ for arg in func._schema.arguments:
217
+ if isinstance(arg.type, torch.TensorType):
218
+ schema_parts.append(f"{arg.name}: jt_all")
219
+ break
220
+ else:
221
+ schema_parts.append(f"{arg.name}: any")
222
+ schema_parts.append("...")
223
+ check_schema_str = ", ".join(schema_parts)
224
+ check_schema(check_schema_str, func, *args, **kwargs)
225
+ return functools.partial(jagged_unary_pointwise, func)
226
+ elif num_tensor_args == 2:
227
+ check_schema("lhs: any, rhs: any, ...", func, *args, **kwargs)
228
+ return functools.partial(jagged_binary_pointwise, func)
229
+
230
+ return None
231
+
232
+
233
+ def extract_kwargs(arg):
234
+ kwargs = {
235
+ "offsets": arg.offsets(),
236
+ "_metadata_cache": arg._metadata_cache,
237
+ "_ragged_idx": arg._ragged_idx,
238
+ }
239
+ return kwargs
240
+
241
+
242
+ def jagged_unary_pointwise(func, *args, **kwargs):
243
+ # assume if we get here that there is a single NJT input in the args
244
+ njt = next(arg for arg in args if isinstance(arg, NestedTensor))
245
+ return NestedTensor(
246
+ func(*(arg._values if arg is njt else arg for arg in args), **kwargs),
247
+ **extract_kwargs(njt),
248
+ )
249
+
250
+
251
+ def jagged_binary_pointwise(func, *args, **kwargs):
252
+ a, b = args[0], args[1]
253
+ assert isinstance(a, NestedTensor) or isinstance(b, NestedTensor)
254
+
255
+ mismatch_error_msg = (
256
+ "cannot call binary pointwise function {} with inputs of shapes {} and {}"
257
+ )
258
+ # a is NT, b is NT
259
+ if isinstance(a, NestedTensor) and isinstance(b, NestedTensor):
260
+ # ex: (B, j0, D) + (B, j0, D)
261
+ # ex: (B, j0, D) + (B, j0, 1)
262
+ if raggedness_matches(a, b._size):
263
+ return NestedTensor(
264
+ func(a._values, b._values, *args[2:], **kwargs), **extract_kwargs(a)
265
+ )
266
+ raise RuntimeError(mismatch_error_msg.format(func.__name__, a._size, b._size))
267
+ # either a is NT or b is NT at this point
268
+ a_is_nt = isinstance(a, NestedTensor)
269
+ extracted_kwargs = extract_kwargs(a) if a_is_nt else extract_kwargs(b)
270
+
271
+ # === Handle broadcasting across the batch / ragged dims ===
272
+
273
+ # Easy case: take advantage of pre-existing broadcasting logic
274
+ # ex: (B, j0, ?, ?) + (?) -> (B, j0, ?, ?)
275
+ # ex: (B, j0, ?, ?) + (?, ?) -> (B, j0, ?, ?)
276
+ # ex: (B, j0, ?, ?) + (1, 1, ?, ?) -> (B, j0, ?, ?)
277
+ nt, t = (a, b) if a_is_nt else (b, a)
278
+ # See Note: [ Squeezing leading ones ]
279
+ if t.dim() > nt.dim():
280
+ raise NotImplementedError("NYI: broadcasting NT with T with larger dim")
281
+ t_squeezed = squeeze_leading_ones(t)
282
+ if nt.dim() >= t_squeezed.dim() + 2:
283
+ lhs, rhs = (nt._values, t_squeezed) if a_is_nt else (t_squeezed, nt._values)
284
+ return NestedTensor(func(lhs, rhs, *args[2:], **kwargs), **extracted_kwargs)
285
+
286
+ # Harder case: do manual broadcasting over unbound components
287
+ # when NT dim == non-NT dim
288
+ # ex: (B, j0, D_0, D_1) + (B, 1, D_0, D_1) -> (B, j0, D_0, D_1)
289
+ if a.dim() == b.dim():
290
+ # ex: (B, j0, D_0, D_1) + (1, 1, D_0, D_1) -> should
291
+ # be (B, j0, D_0, D_1) but not yet supported
292
+ if a.shape[0] != b.shape[0]:
293
+ raise RuntimeError(
294
+ mismatch_error_msg.format(func.__name__, a.shape, b.shape)
295
+ )
296
+
297
+ # need to use offsets to broadcast across ragged dim properly
298
+ # NB: inefficient fallback here; Triton codegen can help this
299
+ # TODO: Make this work with autograd
300
+ outputs = []
301
+ for a_comp, b_comp in zip(a.unbind(), b.unbind()):
302
+ outputs.append(func(a_comp, b_comp, *args[2:], **kwargs))
303
+ new_values = torch.cat(outputs, dim=0)
304
+ return NestedTensor(new_values, **extracted_kwargs)
305
+
306
+ # ex: (B, j0, D_0, D_1) + (A, B, 1, D_0, D_1) -> error because this breaks the invariant
307
+ # that ragged dim is wrt left-most batch dim
308
+ raise RuntimeError(mismatch_error_msg.format(func.__name__, a.shape, b.shape))
309
+
310
+
311
+ def jagged_torch_function(func, *args, **kwargs):
312
+ # SDPA has special kernels that handle nested tensors.
313
+ # Dispatch to the correct implementation here
314
+ if func is torch._C._nn.scaled_dot_product_attention:
315
+ return jagged_scaled_dot_product_attention(*args, **kwargs)
316
+
317
+ if func.__name__ == "apply_":
318
+ func(args[0]._values, *args[1:], **kwargs)
319
+ return args[0]
320
+
321
+ # Handle flatten() here because it's CompositeImplicit.
322
+ if func.__name__ == "flatten":
323
+
324
+ def _flatten_sig(input, start_dim=0, end_dim=-1):
325
+ pass
326
+
327
+ _, new_kwargs = normalize_function( # type: ignore[misc]
328
+ _flatten_sig, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
329
+ )
330
+
331
+ inp = new_kwargs.pop("input")
332
+
333
+ # NB: stay in outer dim space because we're going to redispatch on a NT input
334
+ start_dim = _wrap_jagged_dim(
335
+ inp.dim(), new_kwargs["start_dim"], "flatten", convert_to_inner_dim=False
336
+ )
337
+ end_dim = _wrap_jagged_dim(
338
+ inp.dim(), new_kwargs["end_dim"], "flatten", convert_to_inner_dim=False
339
+ )
340
+
341
+ if start_dim == end_dim:
342
+ return inp
343
+
344
+ product = functools.reduce(operator.mul, inp.shape[start_dim : end_dim + 1])
345
+ new_shape = (*inp.shape[:start_dim], product, *inp.shape[end_dim + 1 :])
346
+
347
+ return inp.reshape(*new_shape)
348
+
349
+ raise NotImplementedError(func)
350
+
351
+
352
+ @register_jagged_func(
353
+ [
354
+ torch.ops.aten.is_non_overlapping_and_dense.default,
355
+ torch.ops.aten.sym_size.default,
356
+ torch.ops.aten.dim.default,
357
+ torch.ops.aten.numel.default,
358
+ torch.ops.aten.sym_numel.default,
359
+ torch.ops.aten.sym_stride.default,
360
+ torch.ops.aten.sym_storage_offset.default,
361
+ ],
362
+ "self: jt_all",
363
+ )
364
+ def tensor_attr_supported_getter(func, *args, **kwargs):
365
+ if func == torch.ops.aten.is_non_overlapping_and_dense.default:
366
+ return False
367
+
368
+ if func == torch.ops.aten.sym_size.default:
369
+ return args[0]._size
370
+
371
+ if func == torch.ops.aten.dim.default:
372
+ return len(args[0]._size)
373
+
374
+ if func in (torch.ops.aten.sym_numel.default, torch.ops.aten.numel.default):
375
+ if args[0]._lengths is not None:
376
+ return int(sum(args[0]._lengths) * math.prod(args[0]._size[2:]))
377
+ return args[0]._values.numel()
378
+
379
+ if func == torch.ops.aten.sym_stride.default:
380
+ return args[0]._strides
381
+
382
+ if func == torch.ops.aten.sym_storage_offset.default:
383
+ return args[0]._values.storage_offset()
384
+
385
+
386
+ @register_jagged_func(torch.ops.prim.layout.default, "self: jt_all")
387
+ def prim_layout_default(func, *args, **kwargs):
388
+ return torch.jagged
389
+
390
+
391
+ @register_jagged_func(
392
+ [torch.ops.aten.size.default],
393
+ "self: jt_all",
394
+ )
395
+ def tensor_attr_unsupported_getter(func, *args, **kwargs):
396
+ if func == torch.ops.aten.size.default:
397
+ raise RuntimeError(
398
+ "NestedTensors does not support directly calling torch.ops.aten.size "
399
+ "please use `nested_tensor.size()` instead."
400
+ )
401
+
402
+
403
+ @register_jagged_func(torch.ops.aten.is_contiguous.default, "self: jt_all")
404
+ def is_contiguous_general(func, *args, **kwargs):
405
+ from torch._prims_common import is_contiguous_for_memory_format
406
+
407
+ _, new_kwargs = normalize_function( # type: ignore[misc]
408
+ func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
409
+ )
410
+ inp = new_kwargs.pop("input")
411
+
412
+ # If created from narrow() check for lengths
413
+ if inp.lengths() is not None:
414
+ return False
415
+
416
+ new_kwargs["memory_format"] = new_kwargs.get(
417
+ "memory_format", torch.contiguous_format
418
+ )
419
+ if new_kwargs["memory_format"] == torch.preserve_format:
420
+ return True
421
+ return is_contiguous_for_memory_format(inp._values, **new_kwargs)
422
+
423
+
424
+ register_jagged_func(
425
+ torch.ops.aten.is_contiguous.memory_format, "self: jt_all, memory_format: any?"
426
+ )(is_contiguous_general)
427
+
428
+
429
+ @register_jagged_func(
430
+ torch.ops.aten.clone.default, "input: jt_all, memory_format: any?"
431
+ )
432
+ def clone_default(func, *args, **kwargs):
433
+ _, new_kwargs = normalize_function( # type: ignore[misc]
434
+ func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
435
+ )
436
+
437
+ inp = new_kwargs.pop("input")
438
+
439
+ new_meta = extract_kwargs(inp)
440
+
441
+ if inp._lengths is not None:
442
+ if new_kwargs["memory_format"] == torch.contiguous_format:
443
+ # need to copy to remove "holes" non-contiguity / lengths metadata
444
+ # TODO: write a kernel for this
445
+ from .nested_tensor import jagged_from_list
446
+
447
+ # TODO: We probably want the output to have the same ragged structure / nested int.
448
+ assert (
449
+ inp._ragged_idx == 1
450
+ ), "NJT with ragged_idx != 1 not supported for contiguous clone"
451
+ contig, _ = jagged_from_list(inp.unbind(), offsets=None)
452
+ return contig
453
+ else:
454
+ # need to preserve any lengths metadata present
455
+ new_meta["lengths"] = inp._lengths
456
+
457
+ return NestedTensor(func(inp._values, **new_kwargs), **new_meta)
458
+
459
+
460
+ @register_jagged_func(torch.ops.aten.linear.default, "input: jt, weight: t, bias: t?")
461
+ def linear_default(func, *args, **kwargs):
462
+ _, new_kwargs = normalize_function( # type: ignore[misc]
463
+ func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
464
+ )
465
+
466
+ inp = new_kwargs.pop("input")
467
+
468
+ return NestedTensor(func(inp._values, **new_kwargs), **extract_kwargs(inp))
469
+
470
+
471
+ @register_jagged_func(
472
+ torch.ops.aten.linear_backward.default,
473
+ "self: jt, grad_output: jt, weight: t, output_mask: any",
474
+ )
475
+ def linear_backward_default(func, *args, **kwargs):
476
+ _, new_kwargs = normalize_function( # type: ignore[misc]
477
+ func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
478
+ )
479
+
480
+ inp = new_kwargs.pop("input")
481
+ grad_output = new_kwargs.pop("grad_output")
482
+ weight = new_kwargs.pop("weight")
483
+
484
+ check_ragged_dim_same(func, inp, "self", grad_output, "grad_output")
485
+ ds = NestedTensor(
486
+ torch.matmul(grad_output._values, weight), **extract_kwargs(grad_output)
487
+ )
488
+ dw = torch.matmul(grad_output._values.transpose(-2, -1), inp._values)
489
+ db = None # NYI: gradient for bias, need to reduce over ragged dim
490
+ return (ds, dw, db)
491
+
492
+
493
+ @register_jagged_func(torch.ops.aten.to.dtype, "input: jt_all, dtype: any")
494
+ def to_dtype(func, *args, **kwargs):
495
+ _, new_kwargs = normalize_function( # type: ignore[misc]
496
+ func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
497
+ )
498
+
499
+ inp = new_kwargs.pop("input")
500
+
501
+ return NestedTensor(func(inp._values, **new_kwargs), **extract_kwargs(inp))
502
+
503
+
504
+ @register_jagged_func(torch.ops.aten._to_copy.default, "self: jt_all")
505
+ def to_copy_default(func, *args, **kwargs):
506
+ from .nested_tensor import _tensor_symint_registry
507
+
508
+ _, new_kwargs = normalize_function( # type: ignore[misc]
509
+ func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
510
+ )
511
+
512
+ inp = new_kwargs.pop("input")
513
+ # don't change layout
514
+ new_kwargs.pop("layout")
515
+
516
+ new_values = func(inp._values, **new_kwargs)
517
+ new_offsets = inp._offsets.to(device=new_values.device)
518
+
519
+ from torch._subclasses.fake_tensor import FakeTensor
520
+ from torch._subclasses.functional_tensor import (
521
+ FunctionalTensor,
522
+ mb_unwrap_functional_tensor,
523
+ )
524
+
525
+ if isinstance(new_offsets, (FakeTensor, FunctionalTensor)):
526
+ # Temporary hack until we have the union find
527
+ tgt = mb_unwrap_functional_tensor(new_offsets)
528
+ src = mb_unwrap_functional_tensor(inp._offsets)
529
+ tgt.nested_int_memo = src.nested_int_memo
530
+ else:
531
+ _tensor_symint_registry[new_offsets] = _tensor_symint_registry[inp._offsets]
532
+ inp_kwargs = extract_kwargs(inp)
533
+ inp_kwargs["offsets"] = new_offsets
534
+
535
+ return NestedTensor(new_values, **inp_kwargs)
536
+
537
+
538
+ @register_jagged_func(
539
+ torch.ops.aten.copy_.default, "self: jt_all, src: jt_all, non_blocking: any?"
540
+ )
541
+ def copy_default(func, *args, **kwargs):
542
+ _, new_kwargs = normalize_function( # type: ignore[misc]
543
+ func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
544
+ )
545
+ inp = new_kwargs.pop("input")
546
+ src = new_kwargs.pop("src")
547
+ if inp._size != src._size:
548
+ raise RuntimeError(
549
+ "copy_ only supports Nested Tensors that have same size and the exact same offset tensor."
550
+ )
551
+ inp.values().copy_(src.values())
552
+ return inp
553
+
554
+
555
+ register_jagged_func(torch.ops.aten.detach.default, "self: jt_all")(
556
+ jagged_unary_pointwise
557
+ )
558
+
559
+
560
+ @register_jagged_func(
561
+ [
562
+ torch.ops.aten.empty_like.default,
563
+ torch.ops.aten.ones_like.default,
564
+ torch.ops.aten.zeros_like.default,
565
+ torch.ops.aten.randn_like.default,
566
+ ],
567
+ "self: jt_all",
568
+ )
569
+ def like_factory_default(func, *args, **kwargs):
570
+ _, new_kwargs = normalize_function( # type: ignore[misc]
571
+ func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
572
+ )
573
+
574
+ inp = new_kwargs.pop("input")
575
+
576
+ # Default layout is technically torch.strided but only jagged is supported here.
577
+ # Rather than force users to specify the layout, assume jagged.
578
+ # This should be set to strided for redispatching on values.
579
+ new_kwargs["layout"] = torch.strided
580
+
581
+ return NestedTensor(func(inp._values, **new_kwargs), **extract_kwargs(inp))
582
+
583
+
584
+ @register_jagged_func(torch.ops.aten.zero_.default, "self: jt_all")
585
+ def zero__default(func, *args, **kwargs):
586
+ _, new_kwargs = normalize_function( # type: ignore[misc]
587
+ func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
588
+ )
589
+
590
+ inp = new_kwargs.pop("input")
591
+ func(inp._values)
592
+ return inp
593
+
594
+
595
+ @register_jagged_func(
596
+ torch.ops.aten._softmax.default, "self: jt_all, dim: any, half_to_float: any"
597
+ )
598
+ def _softmax_default(func, *args, **kwargs):
599
+ _, new_kwargs = normalize_function( # type: ignore[misc]
600
+ func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
601
+ )
602
+
603
+ if isinstance(new_kwargs["dim"], tuple):
604
+ raise RuntimeError(
605
+ "softmax(): not supported for dimensions of type 'tuple' for NestedTensor"
606
+ )
607
+
608
+ inp = new_kwargs.pop("input")
609
+
610
+ (
611
+ new_kwargs["dim"],
612
+ reduce_on_batch,
613
+ reduce_on_ragged,
614
+ reduce_on_non_batch,
615
+ ) = _wrap_jagged_dims(
616
+ inp.dim(),
617
+ (new_kwargs["dim"],),
618
+ "softmax",
619
+ inp._ragged_idx,
620
+ )
621
+
622
+ if reduce_on_batch:
623
+ raise RuntimeError(
624
+ "softmax(): not supported when reducing across the batch dimension for NestedTensor"
625
+ )
626
+
627
+ if reduce_on_ragged and inp._ragged_idx > 1:
628
+ raise RuntimeError(
629
+ "softmax(): not supported when reducing along the ragged dimension for ragged_idx > 1 for NestedTensor"
630
+ )
631
+
632
+ if reduce_on_ragged and inp._lengths is not None:
633
+ raise RuntimeError(
634
+ "softmax(): not supported where lengths is not None "
635
+ + "if reducing across the ragged dimension for NestedTensor"
636
+ )
637
+
638
+ new_kwargs["dim"] = new_kwargs["dim"][
639
+ 0
640
+ ] # torch.softmax takes in the reduction dimension as an integer
641
+
642
+ if reduce_on_ragged:
643
+ padded_softmax_values = torch.nn.functional.softmax(
644
+ torch.ops.aten._jagged_to_padded_dense_forward(
645
+ inp._values.reshape(
646
+ inp._values.shape[0], -1
647
+ ), # values are required to be 2D tensors for j2pd
648
+ [inp._offsets],
649
+ max_lengths=[inp._max_seqlen], # max length of ragged dimension
650
+ padding_value=float("-inf"), # e^-inf = 0
651
+ ),
652
+ dim=inp._ragged_idx,
653
+ )
654
+
655
+ softmax_values = torch.ops.aten._padded_dense_to_jagged_forward(
656
+ padded_softmax_values,
657
+ [inp._offsets],
658
+ total_L=inp._values.shape[
659
+ 0
660
+ ], # providing this parameter helps avoid a GPU/CPU sync
661
+ ).reshape(
662
+ -1, *inp._values.shape[1:]
663
+ ) # expand softmax_values back to original shape (inp._values.shape)
664
+
665
+ return NestedTensor(softmax_values, **extract_kwargs(inp))
666
+
667
+ return NestedTensor(func(inp._values, **new_kwargs), **extract_kwargs(inp))
668
+
669
+
670
+ @register_jagged_func(
671
+ torch.ops.aten._softmax_backward_data.default,
672
+ "grad_output: jt, output: jt, dim: any, input_dtype: any",
673
+ )
674
+ def _softmax_backward(func, *args, **kwargs):
675
+ _, new_kwargs = normalize_function( # type: ignore[misc]
676
+ func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
677
+ )
678
+ grad_out = new_kwargs.pop("grad_output")
679
+ output = new_kwargs.pop("output")
680
+ return NestedTensor(
681
+ func(grad_out._values, output._values, **new_kwargs), **extract_kwargs(grad_out)
682
+ )
683
+
684
+
685
+ @register_jagged_func(
686
+ torch.ops.aten.native_dropout.default, "self: jt, float: any, train: any?"
687
+ )
688
+ def native_dropout_default(func, *args, **kwargs):
689
+ _, new_kwargs = normalize_function( # type: ignore[misc]
690
+ func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
691
+ )
692
+
693
+ inp = new_kwargs.pop("input")
694
+ out1, out2 = func(inp._values, **new_kwargs)
695
+ return (
696
+ NestedTensor(out1, **extract_kwargs(inp)),
697
+ NestedTensor(out2, **extract_kwargs(inp)),
698
+ )
699
+
700
+
701
+ @register_jagged_func(
702
+ torch.ops.aten.native_dropout_backward.default,
703
+ "grad_output: jt, mask: jt, scale: any",
704
+ )
705
+ def native_dropout_backward_default(func, *args, **kwargs):
706
+ _, new_kwargs = normalize_function( # type: ignore[misc]
707
+ func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
708
+ )
709
+ grad_output = new_kwargs.pop("grad_output")
710
+ mask = new_kwargs.pop("mask")
711
+ return NestedTensor(
712
+ func(grad_output._values, mask._values, **new_kwargs),
713
+ **extract_kwargs(grad_output),
714
+ )
715
+
716
+
717
+ @register_jagged_func(torch.ops.aten.prod.dim_int, "self: jt, dim: any, keepdim: any?")
718
+ def prod_dim_int(func, *args, **kwargs):
719
+ _, new_kwargs = normalize_function( # type: ignore[misc]
720
+ func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
721
+ )
722
+
723
+ inp = new_kwargs.pop("input")
724
+ # TODO: Figure out how to handle this better
725
+ # keep_dim is required to keep it in jagged format
726
+ if not new_kwargs["keepdim"]:
727
+ raise RuntimeError("prod(): keepdim=True must be set for NestedTensor")
728
+ dim = new_kwargs["dim"]
729
+ new_kwargs["dim"] = _wrap_jagged_dim(len(inp._size), dim, "prod")
730
+
731
+ return NestedTensor(func(inp._values, **new_kwargs), **extract_kwargs(args[0]))
732
+
733
+
734
+ @register_jagged_func(
735
+ torch.ops.aten.split.Tensor, "self: jt, split_size: any, dim: any"
736
+ )
737
+ def split_tensor(func, *args, **kwargs):
738
+ _, new_kwargs = normalize_function( # type: ignore[misc]
739
+ func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
740
+ )
741
+
742
+ inp = new_kwargs.pop("input")
743
+
744
+ new_kwargs["dim"] = _wrap_jagged_dim(inp.dim(), new_kwargs["dim"], "split")
745
+
746
+ return tuple(
747
+ NestedTensor(values=x, **extract_kwargs(inp))
748
+ for x in func(inp._values, **new_kwargs)
749
+ )
750
+
751
+
752
+ @register_jagged_func(
753
+ torch.ops.aten.split_with_sizes.default, "self: jt, split_sizes: any, dim: any"
754
+ )
755
+ def split_with_sizes_default(func, *args, **kwargs):
756
+ _, new_kwargs = normalize_function( # type: ignore[misc]
757
+ func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
758
+ )
759
+
760
+ inp = new_kwargs.pop("input")
761
+
762
+ new_kwargs["dim"] = _wrap_jagged_dim(
763
+ inp.dim(), new_kwargs["dim"], "split_with_sizes"
764
+ )
765
+
766
+ return [
767
+ NestedTensor(values=x, **extract_kwargs(inp))
768
+ for x in func(inp._values, **new_kwargs)
769
+ ]
770
+
771
+
772
+ @register_jagged_func(
773
+ torch.ops.aten.narrow.default, "self: jt, dim: any, start: any, length: any"
774
+ )
775
+ def narrow(func, *args, **kwargs):
776
+ _, new_kwargs = normalize_function( # type: ignore[misc]
777
+ func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
778
+ )
779
+ inp = new_kwargs.pop("input")
780
+
781
+ dim = _wrap_jagged_dim(inp.dim(), new_kwargs["dim"], "narrow")
782
+ values = func(
783
+ inp._values,
784
+ dim=dim,
785
+ start=new_kwargs["start"],
786
+ length=new_kwargs["length"],
787
+ )
788
+ return NestedTensor(values, **extract_kwargs(inp))
789
+
790
+
791
+ @register_jagged_func(torch.ops.aten.chunk.default, "self: jt, chunks: any, dim: any?")
792
+ def chunk_default(func, *args, **kwargs):
793
+ _, new_kwargs = normalize_function( # type: ignore[misc]
794
+ func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
795
+ )
796
+
797
+ inp = new_kwargs.pop("input")
798
+
799
+ new_kwargs["dim"] = _wrap_jagged_dim(
800
+ inp.dim(), new_kwargs["dim"], "chunk", allow_batch_dim=True
801
+ )
802
+
803
+ if new_kwargs["dim"] == 0:
804
+ chunks = new_kwargs["chunks"]
805
+ dim0_size = inp._size[0]
806
+ chunk_size = math.ceil(dim0_size / chunks)
807
+
808
+ # get _offsets of the chunks
809
+ lengths = inp._offsets.diff()
810
+ chunked_lengths = lengths.chunk(chunks)
811
+ chunked_offsets = [torch.cumsum(x, dim=0) for x in chunked_lengths]
812
+ chunked_offsets = [F.pad(x, (1, 0), value=0) for x in chunked_offsets] # type: ignore[arg-type]
813
+ nested_kwargs = [
814
+ {"offsets": per_offsets, "_ragged_idx": inp._ragged_idx}
815
+ for per_offsets in chunked_offsets
816
+ ]
817
+
818
+ # get _values of the chunks
819
+ split_sizes = [x.sum().item() for x in chunked_lengths]
820
+ chunk_values = inp._values.split(split_sizes)
821
+
822
+ return [
823
+ NestedTensor(values=chunk_values[i], **(nested_kwargs[i]))
824
+ for i in range(0, chunk_size)
825
+ ]
826
+ else:
827
+ return [
828
+ NestedTensor(values=x, **extract_kwargs(inp))
829
+ for x in func(inp._values, **new_kwargs)
830
+ ]
831
+
832
+
833
+ @register_jagged_func(torch.ops.aten.unbind.int, "self: jt_all, dim: any?")
834
+ def unbind_int(func, *args, **kwargs):
835
+ # Note that this specializes on the length of the offsets
836
+ _, new_kwargs = normalize_function( # type: ignore[misc]
837
+ func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
838
+ )
839
+
840
+ dim = new_kwargs["dim"]
841
+ if dim != 0:
842
+ raise RuntimeError("unbind(): only supported for NestedTensor on dim=0")
843
+
844
+ inp = new_kwargs.pop("input")
845
+ values = inp.values()
846
+ offsets = inp.offsets()
847
+ lengths = inp.lengths()
848
+ ragged_idx = inp._ragged_idx
849
+
850
+ if lengths is None:
851
+ return torch.split(values, offsets.diff().tolist(), dim=(ragged_idx - 1))
852
+
853
+ if ragged_idx <= 0:
854
+ raise RuntimeError(
855
+ "unbind(): nested tensor ragged_idx out of bounds (should be >= 1)"
856
+ )
857
+ for i in range(lengths.shape[0]):
858
+ if offsets[i] + lengths[i] > values.shape[ragged_idx - 1]:
859
+ raise RuntimeError(
860
+ "unbind(): nested tensor offsets and lengths do not match ragged_idx dimension"
861
+ )
862
+ return [
863
+ torch.narrow(values, dim=(ragged_idx - 1), start=offsets[i], length=lengths[i])
864
+ for i in range(lengths.shape[0])
865
+ ]
866
+
867
+
868
+ @register_jagged_func(torch.ops.aten.squeeze.dim, "self: jt, dim: any")
869
+ def squeeze_dim(func, *args, **kwargs):
870
+ _, new_kwargs = normalize_function( # type: ignore[misc]
871
+ func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
872
+ )
873
+
874
+ inp = new_kwargs.pop("input")
875
+ values = inp._values
876
+
877
+ new_kwargs["dim"] = _wrap_jagged_dim(len(inp._size), new_kwargs["dim"], "squeeze")
878
+ return NestedTensor(func(values, **new_kwargs), **extract_kwargs(inp))
879
+
880
+
881
+ @register_jagged_func(torch.ops.aten.unsqueeze.default, "self: jt, dim: any")
882
+ def unsqueeze_default(func, *args, **kwargs):
883
+ _, new_kwargs = normalize_function( # type: ignore[misc]
884
+ func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
885
+ )
886
+
887
+ inp = new_kwargs.pop("input")
888
+ values = inp._values
889
+
890
+ # Account for collapsed jagged dim
891
+ dim = new_kwargs["dim"]
892
+ new_kwargs["dim"] = _wrap_jagged_dim(len(inp._size) + 1, dim, "unsqueeze")
893
+ return NestedTensor(func(values, **new_kwargs), **extract_kwargs(inp))
894
+
895
+
896
+ @register_jagged_func(torch.ops.aten.cat.default, "tensors: any, dim: any")
897
+ def cat_default(func, *args, **kwargs):
898
+ _, new_kwargs = normalize_function( # type: ignore[misc]
899
+ func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
900
+ )
901
+
902
+ tensors = new_kwargs.pop("tensors")
903
+
904
+ # Convert any non-nested to nested
905
+ nested = [t for t in tensors if t.is_nested]
906
+ assert len(nested) > 0
907
+ first = nested[0]
908
+ tensors = [t if t.is_nested else t.expand_as(first) for t in tensors]
909
+
910
+ # Account for collapsed jagged dim
911
+ dim = new_kwargs["dim"]
912
+ new_kwargs["dim"] = _wrap_jagged_dim(len(first.shape), dim, "cat")
913
+
914
+ return NestedTensor(
915
+ func([t._values for t in tensors], **new_kwargs), **extract_kwargs(tensors[0])
916
+ )
917
+
918
+
919
+ @register_jagged_func(torch.ops.aten.matmul.default, "self: jt, other: any")
920
+ def matmul_default(func, *args, **kwargs):
921
+ _, new_kwargs = normalize_function( # type: ignore[misc]
922
+ func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
923
+ )
924
+
925
+ inp = new_kwargs.pop("input")
926
+ other = new_kwargs.pop("other")
927
+
928
+ if inp.is_nested and not other.is_nested:
929
+ return NestedTensor(
930
+ func(inp._values, other, **new_kwargs), **extract_kwargs(inp)
931
+ )
932
+ elif inp.is_nested and other.is_nested:
933
+ # BMM with equivalent ragged dims between the two inputs
934
+ if inp.dim() > 3 and other.dim() > 3 and raggedness_matches(inp, other._size):
935
+ return NestedTensor(func(inp._values, other._values), **extract_kwargs(inp))
936
+
937
+ raise RuntimeError(
938
+ f"matmul(): not supported between inputs of shapes {inp._size} and {other.shape}"
939
+ )
940
+
941
+
942
+ @register_jagged_func(
943
+ torch.ops.aten.expand.default, "self: jt, size: any, implicit: any?"
944
+ )
945
+ def expand_default(func, *args, **kwargs):
946
+ _, new_kwargs = normalize_function( # type: ignore[misc]
947
+ func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
948
+ )
949
+
950
+ inp = new_kwargs.pop("input")
951
+ size = new_kwargs["size"]
952
+
953
+ assert ("implicit" not in new_kwargs) or (not new_kwargs.pop("implicit"))
954
+ if not raggedness_matches(inp, size):
955
+ raise RuntimeError(f"expand(): cannot expand shape {inp._size} -> {size}")
956
+
957
+ expand_arg = [-1, *size[2:]]
958
+ return NestedTensor(func(inp._values, expand_arg), **extract_kwargs(inp))
959
+
960
+
961
+ @register_jagged_func(torch.ops.aten.expand_as.default, "self: t, other: jt")
962
+ def expand_as_default(func, *args, **kwargs):
963
+ _, new_kwargs = normalize_function( # type: ignore[misc]
964
+ func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
965
+ )
966
+
967
+ inp = new_kwargs.pop("input")
968
+ other = new_kwargs.pop("other")
969
+
970
+ return NestedTensor(func(inp, other._values), **extract_kwargs(other))
971
+
972
+
973
+ @register_jagged_func(torch.ops.aten.where.self, "condition: jt, self: jt, other: jt")
974
+ def where_self(func, *args, **kwargs):
975
+ _, new_kwargs = normalize_function( # type: ignore[misc]
976
+ func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
977
+ )
978
+
979
+ condition = new_kwargs.pop("condition")
980
+ inp = new_kwargs.pop("input")
981
+ other = new_kwargs.pop("other")
982
+
983
+ assert condition._size == other._size == inp._size
984
+
985
+ return NestedTensor(
986
+ func(condition._values, inp._values, other._values, **new_kwargs),
987
+ **extract_kwargs(condition),
988
+ )
989
+
990
+
991
+ @register_jagged_func(torch.ops.aten._pin_memory.default, "self: jt, device: any?")
992
+ def _pin_memory_default(func, *args, **kwargs):
993
+ _, new_kwargs = normalize_function( # type: ignore[misc]
994
+ func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
995
+ )
996
+
997
+ inp = new_kwargs.pop("input")
998
+
999
+ return NestedTensor(func(inp._values, **new_kwargs), **extract_kwargs(inp))
1000
+
1001
+
1002
+ @register_jagged_func(torch.ops.aten.is_pinned.default, "self: jt, device: any?")
1003
+ def is_pinned_default(func, *args, **kwargs):
1004
+ _, new_kwargs = normalize_function( # type: ignore[misc]
1005
+ func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
1006
+ )
1007
+
1008
+ inp = new_kwargs.pop("input")
1009
+
1010
+ return func(inp._values, **new_kwargs)
1011
+
1012
+
1013
+ @register_jagged_func(
1014
+ torch.ops.aten.is_same_size.default, "self: jt_all, other: jt_all"
1015
+ )
1016
+ def is_same_size_default(func, *args, **kwargs):
1017
+ return args[0]._size == args[1]._size
1018
+
1019
+
1020
+ @register_jagged_func(
1021
+ torch.ops.aten.sum.dim_IntList,
1022
+ "self: jt_all, dim: any?, keepdim: any?, dtype: any?",
1023
+ )
1024
+ def sum_dim_IntList(func, *args, **kwargs):
1025
+ """
1026
+ Performs a sum along the provided tensor dimension.
1027
+ Returns a dense tensor if the ragged dimension is reduced away, else returns a nested tensor.
1028
+ """
1029
+ _, new_kwargs = normalize_function( # type: ignore[misc]
1030
+ func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
1031
+ )
1032
+ inp = new_kwargs.pop("input")
1033
+
1034
+ (
1035
+ new_kwargs["dim"],
1036
+ reduce_on_batch,
1037
+ reduce_on_ragged,
1038
+ reduce_on_non_batch,
1039
+ ) = _wrap_jagged_dims(
1040
+ inp.dim(),
1041
+ new_kwargs["dim"],
1042
+ "sum",
1043
+ inp._ragged_idx,
1044
+ )
1045
+
1046
+ if reduce_on_ragged and inp._lengths is not None:
1047
+ raise RuntimeError(
1048
+ "sum(): not supported where lengths is not None "
1049
+ + "if reducing across the ragged dimension for NestedTensor"
1050
+ )
1051
+
1052
+ if reduce_on_ragged: # raggedness reduced away --> return dense tensor
1053
+ if (
1054
+ reduce_on_batch
1055
+ ): # reduction cases: (batch, ragged), (batch, ragged, non-batch), etc.
1056
+ out = func(
1057
+ inp._values, **new_kwargs
1058
+ ) # no need to read offsets --> apply sum directly on values
1059
+ else:
1060
+ if (
1061
+ reduce_on_non_batch
1062
+ ): # invalid reduction cases: (ragged, non-batch), etc.
1063
+ raise RuntimeError(
1064
+ "sum(): not supported along a ragged and non-batch dimension for NestedTensor"
1065
+ )
1066
+ # reduction cases: (ragged)
1067
+ values_ragged_dim_outer = inp._values.permute(
1068
+ inp._ragged_idx - 1, # outer dimension
1069
+ *range(0, inp._ragged_idx - 1),
1070
+ *range(inp._ragged_idx, inp.dim() - 1),
1071
+ ) # shift reduction dimension of values backward to outer dimension
1072
+
1073
+ # _jagged_to_padded_dense_forward requires values to be a 2D tensor
1074
+ # with the ragged dimension as the 0th dimension
1075
+ padded = torch.ops.aten._jagged_to_padded_dense_forward(
1076
+ values_ragged_dim_outer.reshape(values_ragged_dim_outer.shape[0], -1),
1077
+ [inp._offsets],
1078
+ max_lengths=[inp._max_seqlen],
1079
+ )
1080
+
1081
+ padded_ragged_dim_original = padded.view(
1082
+ padded.shape[0],
1083
+ inp._max_seqlen,
1084
+ *values_ragged_dim_outer.shape[
1085
+ 1:
1086
+ ], # expand non-batch dimensions of padded tensor
1087
+ ).permute(
1088
+ 0,
1089
+ *range(2, inp._ragged_idx + 1),
1090
+ 1,
1091
+ *range(inp._ragged_idx + 1, inp.dim()),
1092
+ ) # shift reduction dimension of padded tensor forward to original ragged dimension
1093
+
1094
+ out = torch.sum(
1095
+ padded_ragged_dim_original,
1096
+ dim=inp._ragged_idx,
1097
+ ) # need to read offsets --> pad jagged dimension and apply sum
1098
+
1099
+ if new_kwargs["keepdim"]:
1100
+ # TODO: Fix this; it's a bug. should be unsqueezing on ragged_idx
1101
+ out = out.unsqueeze(0)
1102
+ return out
1103
+ else: # raggedness preserved --> return nested tensor
1104
+ if (
1105
+ reduce_on_batch
1106
+ ): # invalid reduction cases: (batch), (batch, non-batch), etc.
1107
+ raise RuntimeError(
1108
+ "sum(): not supported along the batch dimension but not the ragged dimension for NestedTensor"
1109
+ )
1110
+ # reduction cases: (non-batch), (non-batch, non-batch), etc.
1111
+ return NestedTensor(
1112
+ func(inp._values, **new_kwargs), **extract_kwargs(inp)
1113
+ ) # apply sum directly on values
1114
+
1115
+
1116
+ @register_jagged_func(
1117
+ torch.ops.aten.transpose.int, "self: jt_all, dim0: any, dim1: any"
1118
+ )
1119
+ def transpose_int(func, *args, **kwargs):
1120
+ _, new_kwargs = normalize_function( # type: ignore[misc]
1121
+ func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
1122
+ )
1123
+
1124
+ from torch._prims_common import canonicalize_dims
1125
+
1126
+ inp = new_kwargs.pop("input")
1127
+ dim0, dim1 = canonicalize_dims(inp.dim(), (new_kwargs["dim0"], new_kwargs["dim1"]))
1128
+
1129
+ if inp._lengths is not None:
1130
+ raise ValueError(
1131
+ "transpose(): not supported on jagged layout nested tensor with holes"
1132
+ )
1133
+
1134
+ # To support the SDPA API, inputs need to have the ragged idx transposed to dim 2
1135
+ # instead of 1, although the internal Flash and mem-effn implementations will
1136
+ # use the inputs with raggedness in dim 1.
1137
+ if dim0 == inp._ragged_idx or dim1 == inp._ragged_idx:
1138
+ if dim0 == 0 or dim1 == 0:
1139
+ raise ValueError(
1140
+ "Transpose is not supported on the batch dimension for jagged NT"
1141
+ )
1142
+ if dim0 == inp._ragged_idx:
1143
+ to_dim = dim1
1144
+ else:
1145
+ to_dim = dim0
1146
+ inp_kwargs = extract_kwargs(inp)
1147
+ inp_kwargs["_ragged_idx"] = to_dim
1148
+ return NestedTensor(
1149
+ inp.values().transpose(
1150
+ _outer_to_inner_dim(len(inp._size), dim0),
1151
+ _outer_to_inner_dim(len(inp._size), dim1),
1152
+ ),
1153
+ **inp_kwargs,
1154
+ )
1155
+
1156
+ new_kwargs["dim0"] = _wrap_jagged_dim(inp.dim(), new_kwargs["dim0"], "transpose")
1157
+ new_kwargs["dim1"] = _wrap_jagged_dim(inp.dim(), new_kwargs["dim1"], "transpose")
1158
+
1159
+ return NestedTensor(func(inp._values, **new_kwargs), **extract_kwargs(inp))
1160
+
1161
+
1162
+ @register_jagged_func(torch.ops.aten.permute.default, "self: jt_all, dims: any")
1163
+ def permute_default(func, *args, **kwargs):
1164
+ _, new_kwargs = normalize_function( # type: ignore[misc]
1165
+ func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
1166
+ )
1167
+ inp = new_kwargs.pop("input")
1168
+ dims = new_kwargs.pop("dims")
1169
+ inp_kwargs = extract_kwargs(inp)
1170
+ inp_dim = len(inp._size)
1171
+
1172
+ # The first two checks are the same as the checks in the normal permute implementation
1173
+ if inp_dim != len(dims):
1174
+ raise ValueError(
1175
+ f"permute(): number of dimensions in the tensor input ({inp_dim}) "
1176
+ + f"does not match the length of the desired ordering of dimensions ({len(dims)}).",
1177
+ )
1178
+
1179
+ from torch._prims_common import canonicalize_dims
1180
+
1181
+ canonicalized_dims = canonicalize_dims(inp_dim, dims)
1182
+
1183
+ if len(canonicalized_dims) != len(set(canonicalized_dims)):
1184
+ raise ValueError("permute(): duplicate dims are not allowed.")
1185
+
1186
+ if inp._lengths is not None:
1187
+ raise ValueError(
1188
+ "permute(): not supported on jagged layout nested tensor with holes"
1189
+ )
1190
+ if canonicalized_dims[0] != 0:
1191
+ raise ValueError(
1192
+ "Permute is not supported on the batch dimension for jagged NT"
1193
+ )
1194
+ inp_kwargs["_ragged_idx"] = canonicalized_dims.index(inp._ragged_idx)
1195
+ inner_dims = [_outer_to_inner_dim(inp_dim, dim) for dim in canonicalized_dims[1:]]
1196
+ new_kwargs["dims"] = inner_dims
1197
+ return NestedTensor(func(inp._values, **new_kwargs), **inp_kwargs)
1198
+
1199
+
1200
+ @register_jagged_func(
1201
+ [torch.ops.aten.view.default, torch.ops.aten._unsafe_view.default],
1202
+ "self: jt_all, size: any",
1203
+ )
1204
+ def view_default(func, *args, **kwargs):
1205
+ _, new_kwargs = normalize_function( # type: ignore[misc]
1206
+ func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
1207
+ )
1208
+
1209
+ inp = new_kwargs.pop("input")
1210
+ size = new_kwargs.pop("size")
1211
+
1212
+ if inp._ragged_idx != 1 and tuple(inp._size) != tuple(size):
1213
+ raise RuntimeError(
1214
+ f"view(): does not support ragged_idx != 1 except when inp._size == size. "
1215
+ f"inp._size is ({inp._size}) and size is ({size})."
1216
+ )
1217
+
1218
+ # Ensure specified size still includes batch and ragged dims
1219
+ if len(size) < 3 or not raggedness_matches(inp, size):
1220
+ raise RuntimeError(f"view(): cannot view shape {inp._size} as {size}")
1221
+
1222
+ # outer size: the size of the NT, e.g. [3, j0, 10]
1223
+ # inner size: the size of the values, e.g. [8, 10] (e.g. for offsets = [0, 3, 5, 8])
1224
+ # this function gets inner_size[inner_idx] for a given inner_idx.
1225
+ #
1226
+ # example: for outer size [a, b, c, j0, d, e, f]
1227
+ # assume that j0 is ragged, other are concrete integers
1228
+ # and ragged_idx=3
1229
+ # inner size will be [b, c, inp._values.size(ragged_idx), d, e, f]
1230
+ # therefore:
1231
+ # inner_size[0] = outer_size[1]
1232
+ # inner_size[1] = outer_size[2]
1233
+ # inner_size[0] = inp._values.size(ragged_idx - 1)
1234
+ # inner_size[3] = outer_size[4]
1235
+ # inner_size[4] = outer_size[5]
1236
+ def get_inner_size(inner_idx):
1237
+ nonlocal inp, size
1238
+ if inner_idx == inp._ragged_idx - 1:
1239
+ return inp._values.size(inner_idx)
1240
+ else:
1241
+ return size[inner_idx + 1]
1242
+
1243
+ inner_size = [get_inner_size(i) for i in range(len(size) - 1)]
1244
+
1245
+ return NestedTensor(func(inp._values, inner_size), **extract_kwargs(inp))
1246
+
1247
+
1248
+ @register_jagged_func(
1249
+ torch.ops.aten.native_layer_norm.default,
1250
+ "input: jt_all, normalized_shape: any, weight: any?, bias: any?, eps: any",
1251
+ )
1252
+ def native_layer_norm_default(func, *args, **kwargs):
1253
+ _, new_kwargs = normalize_function( # type: ignore[misc]
1254
+ func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
1255
+ )
1256
+
1257
+ inp = new_kwargs.pop("input")
1258
+
1259
+ if inp.dim() <= 2:
1260
+ raise RuntimeError(
1261
+ "layer_norm(): not supported for NestedTensor objects with 2 or fewer dimensions"
1262
+ )
1263
+
1264
+ normalized_shape = new_kwargs["normalized_shape"]
1265
+ ragged_size = inp.shape[inp._ragged_idx]
1266
+
1267
+ num_dims_not_normalized = inp.dim() - len(normalized_shape)
1268
+
1269
+ if (
1270
+ num_dims_not_normalized == 0
1271
+ ): # error if trying to normalize over the batch dimension
1272
+ raise RuntimeError(
1273
+ "layer_norm(): not supported when normalizing over the batch dimension for NestedTensor"
1274
+ )
1275
+
1276
+ if ragged_size in normalized_shape and inp._lengths is not None:
1277
+ raise RuntimeError(
1278
+ "layer_norm(): not supported where lengths is not None if operating on the ragged dimension for NestedTensor"
1279
+ )
1280
+
1281
+ if (
1282
+ ragged_size in normalized_shape
1283
+ ): # special handling for normalizing over the ragged dimension
1284
+ padded_input = torch.ops.aten._jagged_to_padded_dense_forward(
1285
+ inp._values.flatten(
1286
+ start_dim=inp._ragged_idx
1287
+ ), # _jagged_to_padded_dense_forward requires values to be a 2D tensor
1288
+ [inp._offsets],
1289
+ max_lengths=[inp._max_seqlen], # max length of ragged dimension
1290
+ )
1291
+
1292
+ padded_mask = torch.ops.aten._jagged_to_padded_dense_forward(
1293
+ torch.ones((inp._values.shape[0], 1), device=inp.device, dtype=inp.dtype),
1294
+ [inp._offsets],
1295
+ max_lengths=[inp._max_seqlen], # max length of ragged dimension
1296
+ ).expand(
1297
+ padded_input.shape
1298
+ ) # mask elements outside of the ragged dimension and expand to the same shape as padded input (3D dense tensor)
1299
+
1300
+ ragged_lengths = (
1301
+ inp._offsets.diff().unsqueeze(1).unsqueeze(1) * padded_input.shape[2]
1302
+ ) # ragged dim * inner dim, since we sum over dims (1, 2) (the layer on which we normalize)
1303
+
1304
+ mean = (
1305
+ torch.sum(
1306
+ padded_input,
1307
+ dim=(1, 2),
1308
+ keepdim=True,
1309
+ )
1310
+ / ragged_lengths
1311
+ ) # a sum over (1, 2) ensures layer norm, whereas a sum over (1) would be an instance norm
1312
+
1313
+ padded_normalized = (
1314
+ padded_input - mean
1315
+ ) * padded_mask # mask elements outside of the ragged dimension size for correct variance calculation
1316
+
1317
+ variance = (
1318
+ torch.sum(
1319
+ torch.square(padded_normalized),
1320
+ dim=(1, 2),
1321
+ keepdim=True,
1322
+ )
1323
+ / ragged_lengths
1324
+ ) # a sum over (1, 2) ensures layer norm, whereas a sum over (1) would be an instance norm
1325
+
1326
+ std = torch.sqrt(variance + new_kwargs["eps"])
1327
+ padded_layer_norm = padded_normalized / std
1328
+
1329
+ jagged_layer_norm_values = torch.ops.aten._padded_dense_to_jagged_forward(
1330
+ padded_layer_norm,
1331
+ [inp._offsets],
1332
+ total_L=inp._values.shape[
1333
+ 0
1334
+ ], # providing this parameter helps avoid a GPU/CPU sync
1335
+ ).unflatten(
1336
+ -1, inp.shape[inp._ragged_idx + 1 :]
1337
+ ) # unflatten last dimension back into original nested tensor shape, e.g. (B, *, WH) --> (B, *, W, H)
1338
+
1339
+ return (
1340
+ NestedTensor(jagged_layer_norm_values, **extract_kwargs(inp)),
1341
+ mean,
1342
+ std,
1343
+ )
1344
+
1345
+ output, mean, std = func(inp._values, **new_kwargs)
1346
+ return (NestedTensor(output, **extract_kwargs(inp)), mean, std)
1347
+
1348
+
1349
+ @register_jagged_func(
1350
+ torch.ops.aten.native_layer_norm_backward.default,
1351
+ "grad_out: jt, input: jt, normalized_shape: any, mean: any, rstd: any, weight: any?, bias: any?, output_mask: any",
1352
+ )
1353
+ def native_layer_norm_backward_default(func, *args, **kwargs):
1354
+ _, new_kwargs = normalize_function( # type: ignore[misc]
1355
+ func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
1356
+ )
1357
+ grad_out = new_kwargs.pop("grad_out")
1358
+ inp = new_kwargs.pop("input")
1359
+ d_input, d_gamma, d_beta = func(grad_out._values, inp._values, **new_kwargs)
1360
+ if d_input is None:
1361
+ return (None, d_gamma, d_beta)
1362
+
1363
+ return (NestedTensor(d_input, **extract_kwargs(inp)), d_gamma, d_beta)
1364
+
1365
+
1366
+ @register_jagged_func(torch.ops.aten.select.int, "self: jt, dim: any, index: any")
1367
+ def select_int(func, *args, **kwargs):
1368
+ _, new_kwargs = normalize_function( # type: ignore[misc]
1369
+ func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
1370
+ )
1371
+
1372
+ inp = new_kwargs.pop("input")
1373
+ new_kwargs["dim"] = _wrap_jagged_dim(
1374
+ inp.dim(), new_kwargs["dim"], "select", allow_batch_dim=True
1375
+ )
1376
+
1377
+ # handle batch dim slicing via unbind() for now
1378
+ # TODO: make this more efficient
1379
+ if new_kwargs["dim"] == 0:
1380
+ return inp.unbind()[new_kwargs["index"]]
1381
+
1382
+ return NestedTensor(func(inp._values, **new_kwargs), **extract_kwargs(inp))
1383
+
1384
+
1385
+ @register_jagged_func(
1386
+ torch.ops.aten.slice.Tensor,
1387
+ "self: jt, dim: any?, start: any?, end: any?, step: any?",
1388
+ )
1389
+ def slice_tensor(func, *args, **kwargs):
1390
+ _, new_kwargs = normalize_function( # type: ignore[misc]
1391
+ func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
1392
+ )
1393
+
1394
+ inp = new_kwargs.pop("input")
1395
+ new_kwargs["dim"] = _wrap_jagged_dim(inp.dim(), new_kwargs["dim"], "slice")
1396
+
1397
+ return NestedTensor(func(inp._values, **new_kwargs), **extract_kwargs(inp))
1398
+
1399
+
1400
+ @register_jagged_func(
1401
+ torch.ops.aten.convolution.default,
1402
+ "input: jt, weight: t, bias: t?, stride: any, padding: any, "
1403
+ "dilation: any, transposed: any, output_padding: any, groups: any",
1404
+ )
1405
+ def convolution_default(func, *args, **kwargs):
1406
+ _, new_kwargs = normalize_function( # type: ignore[misc]
1407
+ func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
1408
+ )
1409
+
1410
+ inp = new_kwargs.pop("input")
1411
+
1412
+ return NestedTensor(func(inp._values, **new_kwargs), **extract_kwargs(inp))
1413
+
1414
+
1415
+ @register_jagged_func(
1416
+ torch.ops.aten.mean.dim, "self: jt_all, dim: any?, keepdim: any?, dtype: any?"
1417
+ )
1418
+ def mean_dim(func, *args, **kwargs):
1419
+ """
1420
+ Performs a mean along the provided tensor dimension.
1421
+ Returns a dense tensor if the ragged dimension is reduced away, else returns a nested tensor.
1422
+ """
1423
+ _, new_kwargs = normalize_function( # type: ignore[misc]
1424
+ func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
1425
+ )
1426
+
1427
+ if len(new_kwargs["dim"]) > 1:
1428
+ raise RuntimeError(
1429
+ "mean(): not supported across multiple dimensions for NestedTensor"
1430
+ )
1431
+
1432
+ inp = new_kwargs.pop("input")
1433
+
1434
+ (
1435
+ new_kwargs["dim"],
1436
+ reduce_on_batch,
1437
+ reduce_on_ragged,
1438
+ reduce_on_non_batch,
1439
+ ) = _wrap_jagged_dims(
1440
+ inp.dim(),
1441
+ new_kwargs["dim"],
1442
+ "mean",
1443
+ inp._ragged_idx,
1444
+ )
1445
+
1446
+ if reduce_on_batch:
1447
+ raise RuntimeError(
1448
+ "mean(): not supported along the batch dimension but not the ragged dimension for NestedTensor"
1449
+ )
1450
+
1451
+ if reduce_on_ragged and inp._lengths is not None:
1452
+ raise RuntimeError(
1453
+ "mean(): not supported where lengths is not None "
1454
+ + "if reducing across the ragged dimension for NestedTensor"
1455
+ )
1456
+
1457
+ if not new_kwargs["keepdim"]:
1458
+ raise RuntimeError("mean(): not supported when keepdim=False for NestedTensor")
1459
+
1460
+ if reduce_on_ragged: # raggedness reduced away
1461
+ torch_sum = torch.sum(inp, dim=inp._ragged_idx, keepdim=new_kwargs["keepdim"])
1462
+
1463
+ # for every non-batch dimension,
1464
+ # unsqueeze lengths into the same shape as the PyTorch sum,
1465
+ # as the extra dimensions must all be divided by the same length
1466
+ lengths = inp._offsets.diff()
1467
+ for _ in range(inp.dim() - 2):
1468
+ lengths = lengths.unsqueeze(-1)
1469
+
1470
+ return torch_sum / lengths.broadcast_to(torch_sum.shape)
1471
+
1472
+ return NestedTensor(
1473
+ func(inp._values, **new_kwargs), **extract_kwargs(inp)
1474
+ ) # raggedness preserved
1475
+
1476
+
1477
+ @register_jagged_func(torch.ops.aten.stack.default, "tensors: any, dim: any")
1478
+ def stack_default(func, *args, **kwargs):
1479
+ _, new_kwargs = normalize_function( # type: ignore[misc]
1480
+ func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
1481
+ )
1482
+
1483
+ # guaranteed this is non-empty if we got here
1484
+ tensors = new_kwargs.pop("tensors")
1485
+ for t in tensors:
1486
+ if not isinstance(t, NestedTensor):
1487
+ raise RuntimeError("stack(): expected all nested tensors inputs")
1488
+
1489
+ if t.dim() != tensors[0].dim():
1490
+ raise RuntimeError(
1491
+ "stack(): expected all nested tensors to have the same dim"
1492
+ )
1493
+
1494
+ if not raggedness_matches(t, tensors[0].shape):
1495
+ raise RuntimeError(
1496
+ "stack(): expected all nested tensors to have the same nested structure"
1497
+ )
1498
+
1499
+ new_kwargs["dim"] = _wrap_jagged_dim(
1500
+ tensors[0].dim() + 1, new_kwargs["dim"], "stack"
1501
+ )
1502
+
1503
+ return NestedTensor(
1504
+ func([t._values for t in tensors], **new_kwargs), **extract_kwargs(tensors[0])
1505
+ )
1506
+
1507
+
1508
+ @register_jagged_func(
1509
+ torch.ops.aten.embedding.default,
1510
+ "weight: t, indices: jt, padding_idx: any?, scale_grad_by_freq: any?, sparse: any?",
1511
+ )
1512
+ def embedding_default(func, *args, **kwargs):
1513
+ _, new_kwargs = normalize_function( # type: ignore[misc]
1514
+ func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
1515
+ )
1516
+
1517
+ # guaranteed this is non-empty if we got here
1518
+ indices = new_kwargs.pop("indices")
1519
+ weight = new_kwargs.pop("weight")
1520
+
1521
+ return NestedTensor(
1522
+ func(weight, indices._values, **new_kwargs), **extract_kwargs(indices)
1523
+ )
1524
+
1525
+
1526
+ @register_jagged_func(
1527
+ [
1528
+ torch.ops.aten.values.default,
1529
+ torch.ops.aten._nested_get_values.default,
1530
+ ],
1531
+ "self: jt_all",
1532
+ )
1533
+ def values_default(func, *args, **kwargs):
1534
+ _, new_kwargs = normalize_function( # type: ignore[misc]
1535
+ func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
1536
+ )
1537
+
1538
+ inp = new_kwargs.pop("input")
1539
+
1540
+ # TODO: Handle inference mode properly.
1541
+ # See https://github.com/pytorch/pytorch/issues/112024#issuecomment-1779554292
1542
+ return inp._values.detach()
1543
+
1544
+
1545
+ @register_jagged_func(torch.ops.aten.all.default, "self: jt_all")
1546
+ def all_default(func, *args, **kwargs):
1547
+ _, new_kwargs = normalize_function( # type: ignore[misc]
1548
+ func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
1549
+ )
1550
+
1551
+ inp = new_kwargs.pop("input")
1552
+
1553
+ return func(inp._values)
1554
+
1555
+
1556
+ @register_jagged_func(
1557
+ torch.ops.aten._nested_view_from_jagged.default,
1558
+ "values: t, offsets: t, dummy: jt_all, lengths: t?, ragged_idx: any?, min_seqlen: t?, max_seqlen: t?",
1559
+ )
1560
+ def _nested_view_from_jagged_default(func, *args, **kwargs):
1561
+ _, new_kwargs = normalize_function( # type: ignore[misc]
1562
+ func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
1563
+ )
1564
+
1565
+ values, offsets, lengths = (
1566
+ new_kwargs["input"],
1567
+ new_kwargs["offsets"],
1568
+ new_kwargs["lengths"],
1569
+ )
1570
+ ragged_idx = new_kwargs["ragged_idx"]
1571
+ min_seqlen = new_kwargs["min_seqlen"]
1572
+ max_seqlen = new_kwargs["max_seqlen"]
1573
+ metadata_cache = {}
1574
+ if min_seqlen is not None:
1575
+ metadata_cache["min_seqlen"] = min_seqlen
1576
+ if max_seqlen is not None:
1577
+ metadata_cache["max_seqlen"] = max_seqlen
1578
+
1579
+ return NestedTensor(
1580
+ values,
1581
+ offsets,
1582
+ lengths=lengths,
1583
+ _ragged_idx=ragged_idx,
1584
+ _metadata_cache=metadata_cache,
1585
+ )
1586
+
1587
+
1588
+ @register_jagged_func(torch.ops.aten._nested_get_offsets.default, "self: jt_all")
1589
+ def _nested_get_offsets(func, *args, **kwargs):
1590
+ _, new_kwargs = normalize_function( # type: ignore[misc]
1591
+ func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
1592
+ )
1593
+
1594
+ inp = new_kwargs.pop("input")
1595
+ return inp._offsets
1596
+
1597
+
1598
+ @register_jagged_func(torch.ops.aten._nested_get_lengths.default, "self: jt_all")
1599
+ def _nested_get_lengths(func, *args, **kwargs):
1600
+ _, new_kwargs = normalize_function( # type: ignore[misc]
1601
+ func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
1602
+ )
1603
+
1604
+ inp = new_kwargs.pop("input")
1605
+ return inp._lengths
1606
+
1607
+
1608
+ @register_jagged_func(torch.ops.aten._nested_get_ragged_idx.default, "self: jt_all")
1609
+ def _nested_get_ragged_idx(func, *args, **kwargs):
1610
+ _, new_kwargs = normalize_function( # type: ignore[misc]
1611
+ func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
1612
+ )
1613
+
1614
+ inp = new_kwargs.pop("input")
1615
+ return inp._ragged_idx
1616
+
1617
+
1618
+ @register_jagged_func(torch.ops.aten._nested_get_min_seqlen.default, "self: jt_all")
1619
+ def _nested_get_min_seqlen(func, *args, **kwargs):
1620
+ _, new_kwargs = normalize_function( # type: ignore[misc]
1621
+ func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
1622
+ )
1623
+
1624
+ inp = new_kwargs.pop("input")
1625
+ return inp._metadata_cache.get("min_seqlen", None)
1626
+
1627
+
1628
+ @register_jagged_func(torch.ops.aten._nested_get_max_seqlen.default, "self: jt_all")
1629
+ def _nested_get_max_seqlen(func, *args, **kwargs):
1630
+ _, new_kwargs = normalize_function( # type: ignore[misc]
1631
+ func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
1632
+ )
1633
+
1634
+ inp = new_kwargs.pop("input")
1635
+ return inp._metadata_cache.get("max_seqlen", None)
1636
+
1637
+
1638
+ # If a section of the Nested Tensor is fully masked out we still retain the section with a length of 0
1639
+ @register_jagged_func(torch.ops.aten.masked_select.default, "self: jt, mask: any")
1640
+ def masked_select_default(func, *args, **kwargs):
1641
+ _, new_kwargs = normalize_function( # type: ignore[misc]
1642
+ func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
1643
+ )
1644
+ inp = new_kwargs.pop("input")
1645
+ mask = new_kwargs.pop("mask")
1646
+
1647
+ if inp.ndim > 2:
1648
+ raise RuntimeError("masked_select only support 2-D selections currently")
1649
+ elif inp.shape != mask.shape:
1650
+ raise RuntimeError(
1651
+ f"Mask with shape {mask.shape} is not compatible with input's shape {inp.shape}"
1652
+ )
1653
+ res_values = inp._values.masked_select(mask.values())
1654
+ mask_cumsum = F.pad(mask.values().cumsum(dim=0), (1, 0)) # type: ignore[arg-type]
1655
+
1656
+ args = extract_kwargs(inp)
1657
+ args["offsets"] = mask_cumsum[inp._offsets]
1658
+ return NestedTensor(
1659
+ values=res_values,
1660
+ **args,
1661
+ )
1662
+
1663
+
1664
+ # Make the dummy available on the C++ side.
1665
+ @register_jagged_func(torch.ops.aten._nested_get_jagged_dummy.default, "self: any")
1666
+ def _nested_get_jagged_dummy(func, *args, **kwargs):
1667
+ from torch.nested._internal.nested_tensor import _nt_view_dummy
1668
+
1669
+ return _nt_view_dummy()
1670
+
1671
+
1672
+ with torch.library._scoped_library("aten", "IMPL") as aten:
1673
+ aten.impl("_nested_get_jagged_dummy", _nested_get_jagged_dummy, "CPU")
1674
+ aten.impl("_nested_get_jagged_dummy", _nested_get_jagged_dummy, "CUDA")
1675
+ aten.impl("_nested_get_jagged_dummy", _nested_get_jagged_dummy, "Meta")
pllava/lib/python3.10/site-packages/torch/nested/_internal/sdpa.py ADDED
@@ -0,0 +1,871 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ import logging
3
+ from typing import Optional, Tuple
4
+
5
+ import torch
6
+ import torch.nn
7
+ import torch.nn.functional as F
8
+ from torch.backends.cuda import (
9
+ can_use_efficient_attention,
10
+ can_use_flash_attention,
11
+ flash_sdp_enabled,
12
+ math_sdp_enabled,
13
+ mem_efficient_sdp_enabled,
14
+ SDPAParams,
15
+ )
16
+ from torch.nn.attention import SDPBackend
17
+
18
+ from .nested_tensor import NestedTensor
19
+
20
+
21
+ log = logging.getLogger(__name__)
22
+
23
+
24
+ def _validate_sdpa_input(
25
+ query: torch.Tensor,
26
+ key: torch.Tensor,
27
+ value: torch.Tensor,
28
+ attn_mask: Optional[torch.Tensor] = None,
29
+ dropout_p=0.0,
30
+ is_causal=False,
31
+ scale=None,
32
+ ):
33
+ if (
34
+ not isinstance(query, NestedTensor)
35
+ or not isinstance(key, NestedTensor)
36
+ or not isinstance(value, NestedTensor)
37
+ ):
38
+ raise ValueError(
39
+ f"Expected query, key, and value to be nested tensors, "
40
+ f"but got query.is_nested: {query.is_nested}, key.is_nested: {key.is_nested}, "
41
+ f"and value.is_nested: {value.is_nested} instead."
42
+ )
43
+ if query.dtype != key.dtype or query.dtype != value.dtype:
44
+ raise ValueError(
45
+ f"Expected query, key, and value to have the same dtype, "
46
+ f"but got query.dtype: {query.dtype}, key.dtype: {key.dtype}, "
47
+ f"and value.dtype: {value.dtype} instead."
48
+ )
49
+ if query.device != key.device or query.device != value.device:
50
+ raise ValueError(
51
+ f"Expected query, key, and value to have the same device type, "
52
+ f"but got query.device: {query.device}, key.device: {key.device}, "
53
+ f"and value.device: {value.device} instead."
54
+ )
55
+ if query.dim() < 3 or key.dim() < 3 or value.dim() < 3:
56
+ raise ValueError(
57
+ f"Expected query, key, and value to all be at least 3 dimensional, but got query.dim: "
58
+ f"{query.dim()}, key.dim: {key.dim()} and value.dim: {value.dim()} instead."
59
+ )
60
+ if query._ragged_idx != key._ragged_idx or query._ragged_idx != value._ragged_idx:
61
+ raise ValueError(
62
+ f"Expected query, key, and value to all be ragged on the same dimension, but got ragged "
63
+ f"dims {query._ragged_idx}, {key._ragged_idx}, and {value._ragged_idx}, respectively."
64
+ )
65
+ if attn_mask is not None:
66
+ # TODO: Figure out whether masks are actually supported for this layout or not
67
+ raise ValueError("Masks are not yet supported!")
68
+ if attn_mask.dtype != torch.bool and attn_mask.dtype != query.dtype:
69
+ raise ValueError(
70
+ f"Expected attn_mask dtype to be bool or to match query dtype, but got attn_mask.dtype: "
71
+ f"{attn_mask.dtype}, and query.dtype: {query.dtype} instead."
72
+ )
73
+
74
+
75
+ def _check_batch_size_nested(params: SDPAParams, debug=False) -> bool:
76
+ # This is expected to be called after check_tensor_shapes ensuring that the
77
+ # size() calls won't error since the inputs are all 4 dimensional
78
+ q_batch_size = params.query.size(0)
79
+ k_batch_size = params.key.size(0)
80
+ v_batch_size = params.value.size(0)
81
+
82
+ # num_heads logic for nested input is checked in
83
+ # check_for_seq_len_0_nested_tensor as there is handling there to make sure
84
+ # num_heads is not ragged
85
+ return q_batch_size == k_batch_size and q_batch_size == v_batch_size
86
+
87
+
88
+ def _check_head_dim_size_flash_nested(params: SDPAParams, debug=False) -> bool:
89
+ max_size = 256
90
+ query_size_last = params.query.size(-1)
91
+ key_size_last = params.key.size(-1)
92
+ value_size_last = params.value.size(-1)
93
+ same_head_dim_size = (
94
+ query_size_last == key_size_last and query_size_last == value_size_last
95
+ )
96
+ if not (
97
+ same_head_dim_size
98
+ and (query_size_last % 8 == 0)
99
+ and (query_size_last <= max_size)
100
+ ):
101
+ if debug:
102
+ log.warning(
103
+ "For NestedTensor inputs, Flash attention requires q,k,v to have the same "
104
+ "last dimension and to be a multiple of 8 and less than or equal to 256. "
105
+ "Got Query.size(-1): %d, Key.size(-1): %d, Value.size(-1): %d instead.",
106
+ query_size_last,
107
+ key_size_last,
108
+ value_size_last,
109
+ )
110
+ return False
111
+ return True
112
+
113
+
114
+ def _check_for_seq_len_0_and_consistent_head_dim_nested_helper(
115
+ param: torch.Tensor, param_name: str, debug=False
116
+ ) -> bool:
117
+ assert isinstance(param, NestedTensor), "param should be a jagged NT"
118
+
119
+ if param._ragged_idx == 1:
120
+ # num_head_dims is ragged
121
+ if debug:
122
+ log.warning(
123
+ "Fused kernels do not support ragged num_head_dims, %s has a ragged num_heads.",
124
+ param_name,
125
+ )
126
+ return False
127
+
128
+ # This is being called inside sdp with shape [batch, heads, {seq_len}, dim]
129
+ if param._get_min_seqlen() == 0:
130
+ if debug:
131
+ log.warning(
132
+ "Fused kernels do not support seq_len == 0, %s has a seq len of 0.",
133
+ param_name,
134
+ )
135
+ return False
136
+
137
+ return True
138
+
139
+
140
+ def _try_broadcast_param_size(q_size, k_size, v_size, param_name, debug=False) -> bool:
141
+ max_size = max(q_size, k_size, v_size)
142
+ if (
143
+ (q_size != max_size and q_size != 1)
144
+ or (k_size != max_size and k_size != 1)
145
+ or (v_size != max_size and v_size != 1)
146
+ ):
147
+ if debug:
148
+ log.warning(
149
+ "Both fused kernels require query, key and value to have broadcastable %s, "
150
+ "got Query %s %d, Key %s %d, Value %s %d instead.",
151
+ param_name,
152
+ param_name,
153
+ q_size,
154
+ param_name,
155
+ k_size,
156
+ param_name,
157
+ v_size,
158
+ )
159
+ return False
160
+ return True
161
+
162
+
163
+ def _check_for_seq_len_0_nested(params: SDPAParams, debug=False) -> bool:
164
+ # When this function is called we are assured that the nt is dim==4
165
+ q_is_safe = (
166
+ _check_for_seq_len_0_and_consistent_head_dim_nested_helper(
167
+ params.query, "query", debug
168
+ )
169
+ if params.query.is_nested
170
+ else True
171
+ )
172
+ # short circuit if any is unsafe
173
+ if not q_is_safe:
174
+ return False
175
+
176
+ k_is_safe = (
177
+ _check_for_seq_len_0_and_consistent_head_dim_nested_helper(
178
+ params.key, "key", debug
179
+ )
180
+ if params.key.is_nested
181
+ else True
182
+ )
183
+ # short circuit if any is unsafe
184
+ if not k_is_safe:
185
+ return False
186
+
187
+ v_is_safe = (
188
+ _check_for_seq_len_0_and_consistent_head_dim_nested_helper(
189
+ params.value, "value", debug
190
+ )
191
+ if params.value.is_nested
192
+ else True
193
+ )
194
+ # short circuit if any is unsafe
195
+ if not v_is_safe:
196
+ return False
197
+
198
+ # We now know none of the inputs have ragged num_heads, so we can safely
199
+ # access .size(1)
200
+ q_num_heads = params.query.size(1)
201
+ k_num_heads = params.key.size(1)
202
+ v_num_heads = params.value.size(1)
203
+ same_num_heads = q_num_heads == k_num_heads and q_num_heads == v_num_heads
204
+
205
+ if not same_num_heads:
206
+ if (
207
+ params.query.requires_grad
208
+ or params.key.requires_grad
209
+ or params.value.requires_grad
210
+ ):
211
+ if debug:
212
+ log.warning(
213
+ "Both fused kernels do not support training with broadcasted NT inputs."
214
+ )
215
+ return False
216
+ return _try_broadcast_param_size(
217
+ q_num_heads, k_num_heads, v_num_heads, "num heads", debug
218
+ )
219
+ return True
220
+
221
+
222
+ def _can_use_flash_sdpa_jagged(params: SDPAParams, debug=False) -> bool:
223
+ constraints = (
224
+ _check_batch_size_nested,
225
+ _check_head_dim_size_flash_nested,
226
+ _check_for_seq_len_0_nested,
227
+ )
228
+ for constraint in constraints:
229
+ if not constraint(params, debug):
230
+ return False
231
+ return True
232
+
233
+
234
+ def _can_use_efficient_sdpa_jagged(params: SDPAParams, debug=False) -> bool:
235
+ constraints = (
236
+ _check_batch_size_nested,
237
+ _check_for_seq_len_0_nested,
238
+ )
239
+ for constraint in constraints:
240
+ if not constraint(params, debug):
241
+ return False
242
+ return True
243
+
244
+
245
+ def _can_use_math_sdpa_jagged(params: SDPAParams, debug=False) -> bool:
246
+ if (
247
+ not params.query.transpose(1, 2).is_contiguous()
248
+ or not params.key.transpose(1, 2).is_contiguous()
249
+ or not params.value.transpose(1, 2).is_contiguous()
250
+ ):
251
+ if debug:
252
+ log.warning(
253
+ "If inputs are nested tensors they must be contiguous after transposing."
254
+ )
255
+ return False
256
+ if params.is_causal:
257
+ if debug:
258
+ log.warning(
259
+ "Nested tensors for query / key are not supported when is_causal=True."
260
+ )
261
+ return False
262
+ return True
263
+
264
+
265
+ def _select_sdp_backend(query, key, value, attn_mask, dropout, is_causal, enable_gqa):
266
+ if (
267
+ not flash_sdp_enabled()
268
+ and not mem_efficient_sdp_enabled()
269
+ and not math_sdp_enabled()
270
+ ):
271
+ return SDPBackend.ERROR
272
+
273
+ ordering = (
274
+ SDPBackend.FLASH_ATTENTION,
275
+ SDPBackend.EFFICIENT_ATTENTION,
276
+ SDPBackend.MATH,
277
+ )
278
+
279
+ params = SDPAParams(query, key, value, attn_mask, dropout, is_causal, enable_gqa)
280
+
281
+ for backend in ordering:
282
+ if backend == SDPBackend.FLASH_ATTENTION:
283
+ if can_use_flash_attention(params) and _can_use_flash_sdpa_jagged(params):
284
+ return SDPBackend.FLASH_ATTENTION
285
+ if backend == SDPBackend.EFFICIENT_ATTENTION:
286
+ if can_use_efficient_attention(params) and _can_use_efficient_sdpa_jagged(
287
+ params
288
+ ):
289
+ return SDPBackend.EFFICIENT_ATTENTION
290
+ if backend == SDPBackend.MATH:
291
+ if math_sdp_enabled() and _can_use_math_sdpa_jagged(params):
292
+ return SDPBackend.MATH
293
+
294
+ log.warning("Memory efficient kernel not used because:")
295
+ can_use_efficient_attention(params, debug=True)
296
+ _can_use_efficient_sdpa_jagged(params, debug=True)
297
+ log.warning("Flash attention kernel not used because:")
298
+ can_use_flash_attention(params, debug=True)
299
+ _can_use_flash_sdpa_jagged(params, debug=True)
300
+ log.warning("Math attention kernel not used because:")
301
+ _can_use_math_sdpa_jagged(params, debug=True)
302
+ return SDPBackend.ERROR
303
+
304
+
305
+ def _cumulative_and_max_seq_len_nnz(qkv: torch.Tensor) -> Tuple[torch.Tensor, int, int]:
306
+ # This function is used to calculate two pieces of metadata that are needed
307
+ # for use with flash-attention and efficient_attention kernels. They are the
308
+ # cumulative sequence_length over a batch of sequences and the maximum
309
+ # sequence length.
310
+
311
+ # It returns a tuple of cumulative sequence lengths and the maximum sequence
312
+ # length, and the last element in the cumulative_sequence_lengths
313
+ if not isinstance(qkv, NestedTensor):
314
+ raise ValueError("QKV must be nested for flash cumulative_seq_len calculation.")
315
+
316
+ if qkv.lengths() is None:
317
+ # TODO: Explore performance impact of copying
318
+ cumulative_seqlen = qkv.offsets().to(dtype=torch.int32, device=qkv.device)
319
+ max_seqlen = qkv._get_max_seqlen()
320
+ n_elem = qkv.values().shape[0]
321
+ else:
322
+ # TODO: Explore performance impact of copying
323
+ cumulative_seqlen = (
324
+ qkv.lengths().cumsum(0).to(dtype=torch.int32, device=qkv.device)
325
+ )
326
+ batch_size = qkv.size(0)
327
+ max_seqlen = qkv._get_max_seqlen()
328
+ # TODO: Explore performance impact when compiling
329
+ n_elem = int(cumulative_seqlen[-1].item())
330
+ return cumulative_seqlen, max_seqlen, n_elem
331
+
332
+
333
+ def _is_safe_to_get_storage_as_tensor(tensor: torch.Tensor):
334
+ # This function checks if a nested tensor is valid for
335
+ # use with the flash-attention and efficient_attention kernels without
336
+ # needing to call contiguous on the nested tensor input.
337
+ # It checks that the storage offsets' adjacent_differences are a constant
338
+ # mutiple of the previous tensor in the nested tensor and that the strides
339
+ # are monitonically decreasing. This check is done after calling transpose on
340
+ # the nested tensor resulting in a Nt of shape [bsz, {seq_len}, num_heads, dim]
341
+
342
+ # Returns a boolean indicating if contiguous needs to be called for input
343
+ assert isinstance(tensor, NestedTensor)
344
+ offsets = tensor.offsets()
345
+ strides = tensor._strides
346
+
347
+ n_tensors = offsets.size(0) - 1
348
+ if n_tensors <= 1:
349
+ return True
350
+
351
+ # Check initially that the tensor strides are in strictly descending order
352
+ prev_stride = strides[1]
353
+ for stride in strides[2:]:
354
+ if prev_stride <= stride:
355
+ # This would mean that the last stride is greater than the seq_len
356
+ # stride
357
+ return False
358
+ prev_stride = stride
359
+
360
+ # Congrats you made it!
361
+ return True
362
+
363
+
364
+ def _view_as_dense(
365
+ tensor: torch.Tensor, Nnz: int, num_heads: int, head_dim: int
366
+ ) -> torch.Tensor:
367
+ if tensor.is_nested:
368
+ return tensor.values()
369
+ return tensor.view(Nnz, num_heads, head_dim)
370
+
371
+
372
+ # TODO: Next iteration should add test cases and check it works
373
+ # def _sdpa_nested_preprocessing_with_broadcast(query, key, value):
374
+ # # Query (Batch x Num_heads x {Q_seq_len} x Dim_per_head)
375
+ # # Key (Batch x Num_heads x {KV_seq_len} x Dim_per_head)
376
+ # # Value (Batch x Num_heads x {KV_seq_len} x Dim_per_head)
377
+ # q_batch_size = query.size(0)
378
+ # k_batch_size = key.size(0)
379
+ # v_batch_size = value.size(0)
380
+
381
+ # output_batch_size = max(q_batch_size, k_batch_size, v_batch_size)
382
+
383
+ # q_num_heads = query.size(1)
384
+ # k_num_heads = key.size(1)
385
+ # v_num_heads = value.size(1)
386
+
387
+ # output_num_heads = max(q_num_heads, k_num_heads, v_num_heads)
388
+
389
+ # head_dim_qk = query.size(3)
390
+ # head_dim_v = value.size(3)
391
+
392
+ # q_t = query.transpose(1, 2)
393
+ # k_t = key.transpose(1, 2)
394
+ # v_t = value.transpose(1, 2)
395
+
396
+ # # Checks in sdp_utils ensure that if {*}_batch_size/{*}_num_heads !=
397
+ # # output_batch_size/num_heads then they are 1
398
+ # q_batch_size_needs_broadcast = q_batch_size != output_batch_size
399
+ # k_batch_size_needs_broadcast = k_batch_size != output_batch_size
400
+ # v_batch_size_needs_broadcast = v_batch_size != output_batch_size
401
+
402
+ # # If {*}_batch_size_needs_broadcast, then
403
+ # # (1) max_seqlen_batch_{*} is given by {*}_t.size(1)
404
+ # # this is because needs_broadcast indicates that the batch_size is 1
405
+ # # and hence there is only 1 value for seq_len
406
+ # # (2) The cum_seq_lens are given by [0, {*}_t.size(1), 2 * {*}_t.size(1),
407
+ # # ..., outut_batch_size * {*}_t.size(1)]
408
+ # # (3) Nnz_{*} is given by output_batch_size * {*}_t.size(1)
409
+
410
+ # if q_batch_size_needs_broadcast or not q_t.is_nested:
411
+ # max_seqlen_batch_q = q_t.size(1)
412
+ # cumulative_sequence_length_q = torch.arange(
413
+ # 0,
414
+ # (output_batch_size + 1) * max_seqlen_batch_q,
415
+ # max_seqlen_batch_q,
416
+ # device=q_t.device,
417
+ # dtype=torch.int32,
418
+ # )
419
+ # Nnz_q = output_batch_size * max_seqlen_batch_q
420
+ # else:
421
+ # (
422
+ # cumulative_sequence_length_q,
423
+ # max_seqlen_batch_q,
424
+ # Nnz_q,
425
+ # ) = _cumulative_and_max_seq_len_nnz(q_t)
426
+
427
+ # if k_batch_size_needs_broadcast and v_batch_size_needs_broadcast:
428
+ # assert k_t.size(1) == v_t.size(1)
429
+ # max_seqlen_batch_kv = k_t.size(1)
430
+ # cumulative_sequence_length_kv = torch.arange(
431
+ # 0,
432
+ # (output_batch_size + 1) * max_seqlen_batch_kv,
433
+ # max_seqlen_batch_kv,
434
+ # device=k_t.device,
435
+ # dtype=torch.int32,
436
+ # )
437
+ # Nnz_kv = output_batch_size * max_seqlen_batch_kv
438
+ # else:
439
+ # cumulative_sequence_length_kv, max_seqlen_batch_kv, Nnz_kv = (
440
+ # _cumulative_and_max_seq_len_nnz(v_t)
441
+ # if k_batch_size_needs_broadcast
442
+ # else _cumulative_and_max_seq_len_nnz(k_t)
443
+ # )
444
+
445
+ # q_num_heads_needs_broadcast = q_num_heads != output_num_heads
446
+ # k_num_heads_needs_broadcast = k_num_heads != output_num_heads
447
+ # v_num_heads_needs_broadcast = v_num_heads != output_num_heads
448
+
449
+ # if not q_t.is_nested:
450
+ # query_buffer_reshaped = q_t.expand(
451
+ # output_batch_size, q_t.size(1), output_num_heads, head_dim_qk
452
+ # )
453
+ # query_buffer_reshaped = query_buffer_reshaped.reshape(
454
+ # Nnz_q, output_num_heads, head_dim_qk
455
+ # )
456
+ # else:
457
+ # if not q_t.is_contiguous() and not _is_safe_to_get_storage_as_tensor(q_t):
458
+ # q_t = q_t.contiguous()
459
+ # # If we are broadcasting then Nnz_q will be the output_batch_size since
460
+ # # seq_len is 1
461
+ # effective_batch_size_q = (
462
+ # output_batch_size if q_batch_size_needs_broadcast else Nnz_q
463
+ # )
464
+ # query_buffer_reshaped = _view_as_dense(
465
+ # q_t, effective_batch_size_q, output_num_heads, head_dim_qk
466
+ # )
467
+
468
+ # # If the physical layout of the NestedTensor's storage
469
+ # # is not: batch, {seq_len}, num_heads, head_dim then we need
470
+ # # to call contiguous
471
+ # if not k_t.is_contiguous() and not _is_safe_to_get_storage_as_tensor(k_t):
472
+ # k_t = k_t.contiguous()
473
+ # if not v_t.is_contiguous() and not _is_safe_to_get_storage_as_tensor(v_t):
474
+ # v_t = v_t.contiguous()
475
+
476
+ # effective_batch_size_k = (
477
+ # output_batch_size if k_batch_size_needs_broadcast else Nnz_kv
478
+ # )
479
+ # key_buffer_reshaped = _view_as_dense(
480
+ # k_t, effective_batch_size_k, output_num_heads, head_dim_qk
481
+ # )
482
+
483
+ # effective_batch_size_v = (
484
+ # output_batch_size if v_batch_size_needs_broadcast else Nnz_kv
485
+ # )
486
+ # value_buffer_reshaped = _view_as_dense(
487
+ # v_t, effective_batch_size_v, output_num_heads, head_dim_v
488
+ # )
489
+
490
+ # if not q_batch_size_needs_broadcast:
491
+ # output_shape = q_t._size
492
+ # if head_dim_v != head_dim_qk:
493
+ # output_shape[-1] = head_dim_v
494
+ # if q_num_heads_needs_broadcast:
495
+ # output_shape[1] = output_num_heads
496
+ # else:
497
+ # output_shape = torch.empty(3, dtype=torch.int64, device=torch.device("cpu"))
498
+ # output_shape[0] = q_t.size(1)
499
+ # output_shape[1] = output_num_heads
500
+ # output_shape[2] = head_dim_v
501
+
502
+ # return (
503
+ # query_buffer_reshaped,
504
+ # key_buffer_reshaped,
505
+ # value_buffer_reshaped,
506
+ # cumulative_sequence_length_q,
507
+ # cumulative_sequence_length_kv,
508
+ # max_seqlen_batch_q,
509
+ # max_seqlen_batch_kv,
510
+ # output_shape,
511
+ # )
512
+
513
+
514
+ def _sdpa_nested_preprocessing(query, key, value):
515
+ # Query (Batch x Num_heads x {Q_seq_len} x Dim_per_head)
516
+ # Key (Batch x Num_heads x {KV_seq_len} x Dim_per_head)
517
+ # Value (Batch x Num_heads x {KV_seq_len} x Dim_per_head)
518
+ q_batch_size = query.size(0)
519
+ k_batch_size = key.size(0)
520
+ v_batch_size = value.size(0)
521
+
522
+ q_num_heads = query.size(1)
523
+ k_num_heads = key.size(1)
524
+ v_num_heads = value.size(1)
525
+
526
+ if not (q_batch_size == k_batch_size and q_batch_size == v_batch_size) or not (
527
+ q_num_heads == k_num_heads and k_num_heads == v_num_heads
528
+ ):
529
+ raise RuntimeError(
530
+ "This path is currently not implemented for jagged layout NT."
531
+ )
532
+ # return _sdpa_nested_preprocessing_with_broadcast(query, key, value)
533
+
534
+ num_heads = query.size(1)
535
+ head_dim_qk = query.size(3)
536
+ head_dim_v = value.size(3)
537
+ q_t = query.transpose(1, 2)
538
+ k_t = key.transpose(1, 2)
539
+ v_t = value.transpose(1, 2)
540
+
541
+ (
542
+ cumulative_sequence_length_q,
543
+ max_seqlen_batch_q,
544
+ Nnz_q,
545
+ ) = _cumulative_and_max_seq_len_nnz(q_t)
546
+ (
547
+ cumulative_sequence_length_kv,
548
+ max_seqlen_batch_kv,
549
+ Nnz_kv,
550
+ ) = _cumulative_and_max_seq_len_nnz(k_t)
551
+
552
+ # [TODO] K and V have to have the same Nnz, should probably torch_check
553
+ # assume in order to not iterate over v
554
+
555
+ # If the physical layout of the NestedTensor's storage
556
+ # is not: batch, {seq_len}, num_heads, head_dim then we need
557
+ # to call contiguous
558
+ if not q_t.is_contiguous() and not _is_safe_to_get_storage_as_tensor(q_t):
559
+ q_t = q_t.contiguous()
560
+ if not k_t.is_contiguous() and not _is_safe_to_get_storage_as_tensor(k_t):
561
+ k_t = k_t.contiguous()
562
+ if not v_t.is_contiguous() and not _is_safe_to_get_storage_as_tensor(v_t):
563
+ v_t = v_t.contiguous()
564
+
565
+ query_buffer_reshaped = _view_as_dense(q_t, Nnz_q, num_heads, head_dim_qk)
566
+ key_buffer_reshaped = _view_as_dense(k_t, Nnz_kv, num_heads, head_dim_qk)
567
+ value_buffer_reshaped = _view_as_dense(v_t, Nnz_kv, num_heads, head_dim_v)
568
+
569
+ output_nt_info = {
570
+ "offsets": q_t.offsets(),
571
+ "_max_seqlen": q_t._get_max_seqlen(),
572
+ "_min_seqlen": q_t._get_min_seqlen(),
573
+ }
574
+
575
+ return (
576
+ query_buffer_reshaped,
577
+ key_buffer_reshaped,
578
+ value_buffer_reshaped,
579
+ cumulative_sequence_length_q,
580
+ cumulative_sequence_length_kv,
581
+ max_seqlen_batch_q,
582
+ max_seqlen_batch_kv,
583
+ output_nt_info,
584
+ )
585
+
586
+
587
+ def _pad_last_dim(
588
+ tensor: torch.Tensor, alignment_size: int, slice: bool
589
+ ) -> torch.Tensor:
590
+ # FlashAttentionV2 requires that head dimension be a multiple of 8
591
+ # This was previously done within the kernel, however
592
+ # This causes the kernel to maybe alias query, key, value
593
+ # So instead we pad the head_dimensions to be a multiple of 8
594
+ # in the composite region
595
+ last_dim_size = tensor.size(-1)
596
+ if last_dim_size % alignment_size == 0:
597
+ return tensor
598
+ pad_count = alignment_size - (last_dim_size % alignment_size)
599
+ tensor = torch.nn.functional.pad(tensor, [0, pad_count])
600
+ if slice:
601
+ return tensor[..., 0:last_dim_size]
602
+ return tensor
603
+
604
+
605
+ # TODO: coalesce with torch/nn/utils/attention.py
606
+ def _calculate_scale(query, scale):
607
+ # TODO: Investigate why math.sqrt() isn't properly handled by Dynamo?
608
+ softmax_scale = scale if scale is not None else torch.sym_sqrt(1.0 / query.size(-1))
609
+ return softmax_scale
610
+
611
+
612
+ def _post_process_flash_output(out: torch.Tensor, og_size):
613
+ if not out.is_nested and out.size(-1) != og_size:
614
+ out = out[..., 0:og_size]
615
+ return out
616
+
617
+
618
+ def _is_computing_meta_flops(x):
619
+ # Note: there's a use case of using meta tensors & the dispatch-based flop counter.
620
+ # We can use this function to check for this scenario in order to handle it specially.
621
+ if not torch.jit.is_scripting() and x.device.type == "meta":
622
+ torch_dispatch_mode_stack = (
623
+ torch.utils._python_dispatch._get_current_dispatch_mode_stack()
624
+ )
625
+ return any(
626
+ type(x) == torch.utils.flop_counter.FlopCounterMode
627
+ for x in torch_dispatch_mode_stack
628
+ )
629
+ return False
630
+
631
+
632
+ def _autocast(
633
+ query: torch.Tensor,
634
+ key: torch.Tensor,
635
+ value: torch.Tensor,
636
+ attn_mask: Optional[torch.Tensor],
637
+ ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, Optional[torch.Tensor]]:
638
+ """
639
+ [Autocasting SDPA for NJT]
640
+
641
+ Normal autocasting doesn't work for NJT+SDPA right now:
642
+ * NJT intercepts the __torch_function__ call for scaled_dot_product_attention, which happens
643
+ before we get to any aten ops or dispatcher logic; then the torch_function logic calls into
644
+ efficient attention or flash attention. So, autocasting on the scaled_dot_product_attention
645
+ op won't work because we never see that aten op.
646
+ * If we put autocasting on `_flash_attention_forward`, then we'll get autocasting to run, but
647
+ the kernel selection logic in torch_function handling (ie. jagged_scaled_dot_product_attention)
648
+ won't work correctly: the kernel selection logic will run before autocasting, and choose
649
+ a kernel based on the un-autocasted dtypes; but then autocasting will run and the actual
650
+ attention computation will happen in a different dtype.
651
+
652
+ An alternative is to just change the backend selection logic for SDPA+NJT to be autocast-aware
653
+ and rely on autocasting to do the actual conversions for flash attention / efficient attention.
654
+ However, by manually doing the actual autocast before the backend selection, we ensure that the
655
+ autocast handling for backend selection doesn't diverge from the autocast handling for the
656
+ actual dtype conversions.
657
+ """
658
+ device_type = query.device.type
659
+ # meta device is not supported by autocast, so break early for it
660
+ if _is_computing_meta_flops(query) or not torch.is_autocast_enabled(device_type):
661
+ return query, key, value, attn_mask
662
+
663
+ def cvt(x):
664
+ if x is None:
665
+ return x
666
+ target_dtype = torch.get_autocast_dtype(device_type)
667
+ if (
668
+ (not x.dtype.is_floating_point)
669
+ or x.dtype == target_dtype
670
+ or x.dtype == torch.float64
671
+ ):
672
+ return x
673
+ return x.to(target_dtype)
674
+
675
+ return cvt(query), cvt(key), cvt(value), cvt(attn_mask)
676
+
677
+
678
+ def jagged_scaled_dot_product_attention(
679
+ query: torch.Tensor,
680
+ key: torch.Tensor,
681
+ value: torch.Tensor,
682
+ attn_mask: Optional[torch.Tensor] = None,
683
+ dropout_p=0.0,
684
+ is_causal=False,
685
+ scale=None,
686
+ enable_gqa=False,
687
+ ):
688
+ query, key, value, attn_mask = _autocast(query, key, value, attn_mask)
689
+ _validate_sdpa_input(query, key, value, attn_mask, dropout_p, is_causal, scale)
690
+ # for mypy, ugh
691
+ assert (
692
+ isinstance(query, NestedTensor)
693
+ and isinstance(key, NestedTensor)
694
+ and isinstance(value, NestedTensor)
695
+ )
696
+ from torch.nested._internal.nested_tensor import nested_view_from_values_offsets
697
+
698
+ # Special path for non-ragged sequence length (e.g. for SAM where we have a ragged
699
+ # second batch dim instead). For this case, we can just send the dense buffers through
700
+ # vanilla SDPA.
701
+ if query.dim() > 3 and key.dim() > 3 and value.dim() > 3 and query._ragged_idx == 1:
702
+ output = F.scaled_dot_product_attention(
703
+ query.values(),
704
+ key.values(),
705
+ value.values(),
706
+ attn_mask=(
707
+ attn_mask.values() if isinstance(attn_mask, NestedTensor) else attn_mask
708
+ ),
709
+ dropout_p=dropout_p,
710
+ is_causal=is_causal,
711
+ scale=scale,
712
+ )
713
+ return nested_view_from_values_offsets(output, query.offsets())
714
+
715
+ compute_logsumexp = query.requires_grad or key.requires_grad or value.requires_grad
716
+
717
+ backend_choice = _select_sdp_backend(
718
+ query, key, value, attn_mask, dropout_p, is_causal, enable_gqa
719
+ )
720
+
721
+ if _is_computing_meta_flops(query):
722
+ # Backend choice will probably not be correct if we have a meta device,
723
+ # because backend choice is device-aware. In this case, we mostly just
724
+ # want to avoid using math backend (which does a .item() call).
725
+ # Arbitrarily choose flash attention.
726
+ backend_choice = SDPBackend.FLASH_ATTENTION
727
+
728
+ if backend_choice == SDPBackend.FLASH_ATTENTION:
729
+ og_size = query.size(-1)
730
+ query_padded = _pad_last_dim(query, 8, False)
731
+ key_padded = _pad_last_dim(key, 8, False)
732
+ value_padded = _pad_last_dim(value, 8, False)
733
+ # We need to calculate the scale based off the OG head dim size
734
+ og_scale = _calculate_scale(query, scale)
735
+ (
736
+ query_buffer_reshaped,
737
+ key_buffer_reshaped,
738
+ value_buffer_reshaped,
739
+ cumulative_sequence_length_q,
740
+ cumulative_sequence_length_kv,
741
+ max_seqlen_batch_q,
742
+ max_seqlen_batch_kv,
743
+ output_nt_info,
744
+ ) = _sdpa_nested_preprocessing(query_padded, key_padded, value_padded)
745
+
746
+ (
747
+ attention,
748
+ logsumexp,
749
+ philox_seed,
750
+ philox_offset,
751
+ debug_attn_mask,
752
+ ) = torch.ops.aten._flash_attention_forward(
753
+ query_buffer_reshaped,
754
+ key_buffer_reshaped,
755
+ value_buffer_reshaped,
756
+ cumulative_sequence_length_q,
757
+ cumulative_sequence_length_kv,
758
+ max_seqlen_batch_q,
759
+ max_seqlen_batch_kv,
760
+ dropout_p,
761
+ is_causal,
762
+ False,
763
+ scale=og_scale,
764
+ )
765
+
766
+ # Reshape output to convert nnz to batch_size and seq_len
767
+ attention = nested_view_from_values_offsets(
768
+ attention, # output from flash_attn is [total_q, num_heads, head_size_og]
769
+ output_nt_info["offsets"],
770
+ min_seqlen=output_nt_info["_min_seqlen"],
771
+ max_seqlen=output_nt_info["_max_seqlen"],
772
+ ).transpose(1, 2)
773
+ return _post_process_flash_output(attention, og_size)
774
+ elif backend_choice == SDPBackend.EFFICIENT_ATTENTION:
775
+ (
776
+ query_reshaped,
777
+ key_reshaped,
778
+ value_reshaped,
779
+ cumulative_sequence_length_q,
780
+ cumulative_sequence_length_kv,
781
+ max_seqlen_batch_q,
782
+ max_seqlen_batch_kv,
783
+ output_nt_info,
784
+ ) = _sdpa_nested_preprocessing(query, key, value)
785
+ (
786
+ attention,
787
+ log_sumexp,
788
+ seed,
789
+ offset,
790
+ max_seqlen_q,
791
+ max_seqlen_batch_kv,
792
+ ) = torch.ops.aten._efficient_attention_forward(
793
+ query_reshaped.unsqueeze(0),
794
+ key_reshaped.unsqueeze(0),
795
+ value_reshaped.unsqueeze(0),
796
+ None,
797
+ cumulative_sequence_length_q,
798
+ cumulative_sequence_length_kv,
799
+ max_seqlen_batch_q,
800
+ max_seqlen_batch_kv,
801
+ dropout_p,
802
+ int(is_causal),
803
+ compute_logsumexp,
804
+ scale=scale,
805
+ )
806
+
807
+ # Reshape output to convert nnz to batch_size and seq_len
808
+ return nested_view_from_values_offsets(
809
+ attention.squeeze(0),
810
+ output_nt_info["offsets"],
811
+ min_seqlen=output_nt_info["_min_seqlen"],
812
+ max_seqlen=output_nt_info["_max_seqlen"],
813
+ ).transpose(1, 2)
814
+ elif backend_choice == SDPBackend.MATH:
815
+ # save the offsets and shape of the inputs, so we can reshape the final output
816
+ # query @ key = attn: [B, D1, j0, D'] @ [B, D1, D' j1] = [B, D1, j0, j1]
817
+ # attn @ value = out: [B, D1, j0, j1] @ [B, D1, j1, D2] = [B, D1, j0, D2]
818
+ offsets = query.offsets()
819
+ d1 = query._size[1]
820
+ d2 = value._size[-1]
821
+
822
+ min_seqlen_tensor = query._metadata_cache.get(
823
+ "min_seqlen", None
824
+ ) # type: ignore[attr-defined]
825
+ max_seqlen_tensor = query._metadata_cache.get(
826
+ "max_seqlen", None
827
+ ) # type: ignore[attr-defined]
828
+
829
+ # convert jagged layout Nested Tensor to strided layout Nested Tensor
830
+ # which support the math implementation of SDPA
831
+ def get_strided_layout_nested_tensor(jagged_layout_nt):
832
+ lengths = jagged_layout_nt._offsets[1:] - jagged_layout_nt._offsets[:-1]
833
+ transpose = torch.transpose(jagged_layout_nt, 1, 2)
834
+ tensor_list = transpose.values().split(list(lengths), dim=0)
835
+ strided_nt = torch.nested.as_nested_tensor(list(tensor_list))
836
+ strided_nt = strided_nt.transpose(1, 2).contiguous()
837
+ return strided_nt
838
+
839
+ query = get_strided_layout_nested_tensor(query)
840
+ key = get_strided_layout_nested_tensor(key)
841
+ value = get_strided_layout_nested_tensor(value)
842
+
843
+ attn_out = torch._scaled_dot_product_attention_math(
844
+ query, key, value, attn_mask, dropout_p, is_causal, scale=scale
845
+ )[0]
846
+
847
+ from torch.nested._internal.nested_tensor import _load_val_from_tensor
848
+
849
+ # convert strided layout Nested Tensor back to jagged layout Nested Tensor
850
+ attn_out = attn_out.transpose(1, 2).contiguous().values()
851
+ attn_out = attn_out.view(-1, d1, d2)
852
+ attn_out = nested_view_from_values_offsets(
853
+ attn_out,
854
+ offsets,
855
+ min_seqlen=(
856
+ None
857
+ if min_seqlen_tensor is None
858
+ else _load_val_from_tensor(min_seqlen_tensor)
859
+ ),
860
+ max_seqlen=(
861
+ None
862
+ if max_seqlen_tensor is None
863
+ else _load_val_from_tensor(max_seqlen_tensor)
864
+ ),
865
+ ).transpose(1, 2)
866
+
867
+ return attn_out
868
+ else:
869
+ raise RuntimeError(
870
+ "No viable backend for scaled_dot_product_attention was found."
871
+ )
pllava/lib/python3.10/site-packages/torch/optim/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.54 kB). View file
 
pllava/lib/python3.10/site-packages/torch/optim/__pycache__/_adafactor.cpython-310.pyc ADDED
Binary file (20.8 kB). View file
 
pllava/lib/python3.10/site-packages/torch/optim/__pycache__/_functional.cpython-310.pyc ADDED
Binary file (2.08 kB). View file
 
pllava/lib/python3.10/site-packages/torch/optim/__pycache__/adadelta.cpython-310.pyc ADDED
Binary file (11.1 kB). View file
 
pllava/lib/python3.10/site-packages/torch/optim/__pycache__/adagrad.cpython-310.pyc ADDED
Binary file (13.2 kB). View file
 
pllava/lib/python3.10/site-packages/torch/optim/__pycache__/lbfgs.cpython-310.pyc ADDED
Binary file (9.43 kB). View file
 
pllava/lib/python3.10/site-packages/torch/optim/__pycache__/optimizer.cpython-310.pyc ADDED
Binary file (35.6 kB). View file
 
pllava/lib/python3.10/site-packages/torch/optim/__pycache__/radam.cpython-310.pyc ADDED
Binary file (16.2 kB). View file
 
pllava/lib/python3.10/site-packages/torch/optim/__pycache__/rprop.cpython-310.pyc ADDED
Binary file (11.6 kB). View file
 
pllava/lib/python3.10/site-packages/torch/optim/__pycache__/sgd.cpython-310.pyc ADDED
Binary file (12.9 kB). View file
 
pllava/lib/python3.10/site-packages/torch/optim/__pycache__/swa_utils.cpython-310.pyc ADDED
Binary file (16.3 kB). View file
 
pllava/lib/python3.10/site-packages/torch/optim/_multi_tensor/__init__.py ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ :mod:`torch.optim._multi_tensor` is a package implementing various optimization algorithms.
3
+
4
+ Most commonly used methods are already supported, and the interface is general
5
+ enough, so that more sophisticated ones can be also easily integrated in the
6
+ future.
7
+ """
8
+ from functools import partialmethod
9
+
10
+ from torch import optim
11
+
12
+
13
+ def partialclass(cls, *args, **kwargs): # noqa: D103
14
+ class NewCls(cls):
15
+ __init__ = partialmethod(cls.__init__, *args, **kwargs)
16
+
17
+ return NewCls
18
+
19
+
20
+ Adam = partialclass(optim.Adam, foreach=True)
21
+ AdamW = partialclass(optim.AdamW, foreach=True)
22
+ NAdam = partialclass(optim.NAdam, foreach=True)
23
+ SGD = partialclass(optim.SGD, foreach=True)
24
+ RAdam = partialclass(optim.RAdam, foreach=True)
25
+ RMSprop = partialclass(optim.RMSprop, foreach=True)
26
+ Rprop = partialclass(optim.Rprop, foreach=True)
27
+ ASGD = partialclass(optim.ASGD, foreach=True)
28
+ Adamax = partialclass(optim.Adamax, foreach=True)
29
+ Adadelta = partialclass(optim.Adadelta, foreach=True)
30
+ Adagrad = partialclass(optim.Adagrad, foreach=True)
pllava/lib/python3.10/site-packages/torch/optim/_multi_tensor/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.17 kB). View file
 
pllava/lib/python3.10/site-packages/torch/quantization/__init__.py ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ from .fake_quantize import * # noqa: F403
3
+ from .fuse_modules import fuse_modules
4
+ from .fuser_method_mappings import * # noqa: F403
5
+ from .observer import * # noqa: F403
6
+ from .qconfig import * # noqa: F403
7
+ from .quant_type import * # noqa: F403
8
+ from .quantization_mappings import * # noqa: F403
9
+ from .quantize import * # noqa: F403
10
+ from .quantize_jit import * # noqa: F403
11
+ from .stubs import * # noqa: F403
12
+
13
+
14
+ def default_eval_fn(model, calib_data):
15
+ r"""
16
+ Default evaluation function takes a torch.utils.data.Dataset or a list of
17
+ input Tensors and run the model on the dataset
18
+ """
19
+ for data, target in calib_data:
20
+ model(data)
21
+
22
+
23
+ __all__ = [
24
+ "QuantWrapper",
25
+ "QuantStub",
26
+ "DeQuantStub",
27
+ # Top level API for eager mode quantization
28
+ "quantize",
29
+ "quantize_dynamic",
30
+ "quantize_qat",
31
+ "prepare",
32
+ "convert",
33
+ "prepare_qat",
34
+ # Top level API for graph mode quantization on TorchScript
35
+ "quantize_jit",
36
+ "quantize_dynamic_jit",
37
+ "_prepare_ondevice_dynamic_jit",
38
+ "_convert_ondevice_dynamic_jit",
39
+ "_quantize_ondevice_dynamic_jit",
40
+ # Top level API for graph mode quantization on GraphModule(torch.fx)
41
+ # 'fuse_fx', 'quantize_fx', # TODO: add quantize_dynamic_fx
42
+ # 'prepare_fx', 'prepare_dynamic_fx', 'convert_fx',
43
+ "QuantType", # quantization type
44
+ # custom module APIs
45
+ "get_default_static_quant_module_mappings",
46
+ "get_static_quant_module_class",
47
+ "get_default_dynamic_quant_module_mappings",
48
+ "get_default_qat_module_mappings",
49
+ "get_default_qconfig_propagation_list",
50
+ "get_default_compare_output_module_list",
51
+ "get_quantized_operator",
52
+ "get_fuser_method",
53
+ # Sub functions for `prepare` and `swap_module`
54
+ "propagate_qconfig_",
55
+ "add_quant_dequant",
56
+ "swap_module",
57
+ "default_eval_fn",
58
+ # Observers
59
+ "ObserverBase",
60
+ "WeightObserver",
61
+ "HistogramObserver",
62
+ "observer",
63
+ "default_observer",
64
+ "default_weight_observer",
65
+ "default_placeholder_observer",
66
+ "default_per_channel_weight_observer",
67
+ # FakeQuantize (for qat)
68
+ "default_fake_quant",
69
+ "default_weight_fake_quant",
70
+ "default_fixed_qparams_range_neg1to1_fake_quant",
71
+ "default_fixed_qparams_range_0to1_fake_quant",
72
+ "default_per_channel_weight_fake_quant",
73
+ "default_histogram_fake_quant",
74
+ # QConfig
75
+ "QConfig",
76
+ "default_qconfig",
77
+ "default_dynamic_qconfig",
78
+ "float16_dynamic_qconfig",
79
+ "float_qparams_weight_only_qconfig",
80
+ # QAT utilities
81
+ "default_qat_qconfig",
82
+ "prepare_qat",
83
+ "quantize_qat",
84
+ # module transformations
85
+ "fuse_modules",
86
+ ]
pllava/lib/python3.10/site-packages/torch/quantization/__pycache__/_numeric_suite_fx.cpython-310.pyc ADDED
Binary file (993 Bytes). View file
 
pllava/lib/python3.10/site-packages/torch/quantization/__pycache__/fuse_modules.cpython-310.pyc ADDED
Binary file (789 Bytes). View file