ZTWHHH commited on
Commit
c0208d8
·
verified ·
1 Parent(s): 029e743

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +2 -0
  2. llava_next/share/terminfo/n/nansi.sysk +0 -0
  3. llava_next/share/terminfo/n/ncr160vt100wpp +0 -0
  4. llava_next/share/terminfo/n/ncr160vt200pp +0 -0
  5. llava_next/share/terminfo/n/ncr260intan +0 -0
  6. llava_next/share/terminfo/n/ncr260vt100wpp +0 -0
  7. llava_next/share/terminfo/n/ncr260vt300an +0 -0
  8. llava_next/share/terminfo/n/ncr260wy50+pp +0 -0
  9. llava_next/share/terminfo/n/ncrvt100an +0 -0
  10. llava_next/share/terminfo/n/ncrvt100wpp +0 -0
  11. llava_next/share/terminfo/n/ncsa-m +0 -0
  12. llava_next/share/terminfo/n/ncsa-vt220 +0 -0
  13. llava_next/share/terminfo/n/ndr9500-25-nl +0 -0
  14. llava_next/share/terminfo/n/netbsd6 +0 -0
  15. llava_next/share/terminfo/n/news-42-euc +0 -0
  16. llava_next/share/terminfo/n/news29 +0 -0
  17. llava_next/share/terminfo/n/nsterm-256color +0 -0
  18. llava_next/share/terminfo/n/nsterm-7-s +0 -0
  19. llava_next/share/terminfo/n/nsterm-acs +0 -0
  20. llava_next/share/terminfo/n/nsterm-acs-m +0 -0
  21. llava_next/share/terminfo/n/nsterm-c-s-7 +0 -0
  22. llava_next/share/terminfo/n/ntconsole-35 +0 -0
  23. llava_next/share/terminfo/n/ntconsole-35-nti +0 -0
  24. llava_next/share/terminfo/n/ntconsole-50-nti +0 -0
  25. llava_next/share/terminfo/n/nwe501 +0 -0
  26. parrot/lib/python3.10/site-packages/torch/_custom_op/__init__.py +0 -0
  27. parrot/lib/python3.10/site-packages/torch/_custom_op/__pycache__/__init__.cpython-310.pyc +0 -0
  28. parrot/lib/python3.10/site-packages/torch/_custom_op/__pycache__/autograd.cpython-310.pyc +0 -0
  29. parrot/lib/python3.10/site-packages/torch/_custom_op/__pycache__/functional.cpython-310.pyc +0 -0
  30. parrot/lib/python3.10/site-packages/torch/_custom_op/__pycache__/impl.cpython-310.pyc +0 -0
  31. parrot/lib/python3.10/site-packages/torch/_custom_op/autograd.py +275 -0
  32. parrot/lib/python3.10/site-packages/torch/_custom_op/functional.py +188 -0
  33. parrot/lib/python3.10/site-packages/torch/_custom_op/impl.py +873 -0
  34. parrot/lib/python3.10/site-packages/torch/_inductor/codegen/__pycache__/cpp.cpython-310.pyc +3 -0
  35. parrot/lib/python3.10/site-packages/torch/_library/__init__.py +6 -0
  36. parrot/lib/python3.10/site-packages/torch/_library/__pycache__/abstract_impl.cpython-310.pyc +0 -0
  37. parrot/lib/python3.10/site-packages/torch/_library/__pycache__/autograd.cpython-310.pyc +0 -0
  38. parrot/lib/python3.10/site-packages/torch/_library/__pycache__/custom_ops.cpython-310.pyc +0 -0
  39. parrot/lib/python3.10/site-packages/torch/_library/__pycache__/fake_class_registry.cpython-310.pyc +0 -0
  40. parrot/lib/python3.10/site-packages/torch/_library/__pycache__/infer_schema.cpython-310.pyc +0 -0
  41. parrot/lib/python3.10/site-packages/torch/_library/__pycache__/simple_registry.cpython-310.pyc +0 -0
  42. parrot/lib/python3.10/site-packages/torch/_library/__pycache__/utils.cpython-310.pyc +0 -0
  43. parrot/lib/python3.10/site-packages/torch/_library/abstract_impl.py +209 -0
  44. parrot/lib/python3.10/site-packages/torch/_library/autograd.py +226 -0
  45. parrot/lib/python3.10/site-packages/torch/_library/custom_ops.py +573 -0
  46. parrot/lib/python3.10/site-packages/torch/_library/fake_class_registry.py +293 -0
  47. parrot/lib/python3.10/site-packages/torch/_library/infer_schema.py +164 -0
  48. parrot/lib/python3.10/site-packages/torch/_library/simple_registry.py +44 -0
  49. parrot/lib/python3.10/site-packages/torch/_library/utils.py +258 -0
  50. parrot/lib/python3.10/site-packages/torch/ao/__init__.py +17 -0
.gitattributes CHANGED
@@ -824,3 +824,5 @@ pllava/lib/python3.10/site-packages/nvidia/cufft/lib/libcufft.so.10 filter=lfs d
824
  videochat2/lib/python3.10/site-packages/tensorflow/python/framework/_dtypes.so filter=lfs diff=lfs merge=lfs -text
825
  videochat2/lib/python3.10/site-packages/tensorflow/python/autograph/impl/testing/pybind_for_testing.so filter=lfs diff=lfs merge=lfs -text
826
  videochat2/lib/python3.10/site-packages/tensorflow/python/framework/_python_memory_checker_helper.so filter=lfs diff=lfs merge=lfs -text
 
 
 
824
  videochat2/lib/python3.10/site-packages/tensorflow/python/framework/_dtypes.so filter=lfs diff=lfs merge=lfs -text
825
  videochat2/lib/python3.10/site-packages/tensorflow/python/autograph/impl/testing/pybind_for_testing.so filter=lfs diff=lfs merge=lfs -text
826
  videochat2/lib/python3.10/site-packages/tensorflow/python/framework/_python_memory_checker_helper.so filter=lfs diff=lfs merge=lfs -text
827
+ parrot/lib/python3.10/site-packages/torch/_inductor/codegen/__pycache__/cpp.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
828
+ parrot/lib/python3.10/site-packages/torch/lib/libgomp-a34b3233.so.1 filter=lfs diff=lfs merge=lfs -text
llava_next/share/terminfo/n/nansi.sysk ADDED
Binary file (1.88 kB). View file
 
llava_next/share/terminfo/n/ncr160vt100wpp ADDED
Binary file (1.61 kB). View file
 
llava_next/share/terminfo/n/ncr160vt200pp ADDED
Binary file (1.82 kB). View file
 
llava_next/share/terminfo/n/ncr260intan ADDED
Binary file (2 kB). View file
 
llava_next/share/terminfo/n/ncr260vt100wpp ADDED
Binary file (1.61 kB). View file
 
llava_next/share/terminfo/n/ncr260vt300an ADDED
Binary file (1.82 kB). View file
 
llava_next/share/terminfo/n/ncr260wy50+pp ADDED
Binary file (1.2 kB). View file
 
llava_next/share/terminfo/n/ncrvt100an ADDED
Binary file (1.49 kB). View file
 
llava_next/share/terminfo/n/ncrvt100wpp ADDED
Binary file (1.51 kB). View file
 
llava_next/share/terminfo/n/ncsa-m ADDED
Binary file (1.94 kB). View file
 
llava_next/share/terminfo/n/ncsa-vt220 ADDED
Binary file (2.15 kB). View file
 
llava_next/share/terminfo/n/ndr9500-25-nl ADDED
Binary file (955 Bytes). View file
 
llava_next/share/terminfo/n/netbsd6 ADDED
Binary file (1.85 kB). View file
 
llava_next/share/terminfo/n/news-42-euc ADDED
Binary file (1.45 kB). View file
 
llava_next/share/terminfo/n/news29 ADDED
Binary file (1.43 kB). View file
 
llava_next/share/terminfo/n/nsterm-256color ADDED
Binary file (1.99 kB). View file
 
llava_next/share/terminfo/n/nsterm-7-s ADDED
Binary file (1.35 kB). View file
 
llava_next/share/terminfo/n/nsterm-acs ADDED
Binary file (1.38 kB). View file
 
llava_next/share/terminfo/n/nsterm-acs-m ADDED
Binary file (1.22 kB). View file
 
llava_next/share/terminfo/n/nsterm-c-s-7 ADDED
Binary file (1.63 kB). View file
 
llava_next/share/terminfo/n/ntconsole-35 ADDED
Binary file (1.46 kB). View file
 
llava_next/share/terminfo/n/ntconsole-35-nti ADDED
Binary file (1.45 kB). View file
 
llava_next/share/terminfo/n/ntconsole-50-nti ADDED
Binary file (1.45 kB). View file
 
llava_next/share/terminfo/n/nwe501 ADDED
Binary file (1.51 kB). View file
 
parrot/lib/python3.10/site-packages/torch/_custom_op/__init__.py ADDED
File without changes
parrot/lib/python3.10/site-packages/torch/_custom_op/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (168 Bytes). View file
 
parrot/lib/python3.10/site-packages/torch/_custom_op/__pycache__/autograd.cpython-310.pyc ADDED
Binary file (8.88 kB). View file
 
parrot/lib/python3.10/site-packages/torch/_custom_op/__pycache__/functional.cpython-310.pyc ADDED
Binary file (5.95 kB). View file
 
parrot/lib/python3.10/site-packages/torch/_custom_op/__pycache__/impl.cpython-310.pyc ADDED
Binary file (30.9 kB). View file
 
parrot/lib/python3.10/site-packages/torch/_custom_op/autograd.py ADDED
@@ -0,0 +1,275 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ import torch
3
+ import torch.utils._pytree as pytree
4
+ from collections import namedtuple
5
+ import functools
6
+
7
+
8
+ # NOTE [CustomOp autograd kernel indirection]
9
+ # We register `inner` as the autograd kernel for this custom_op.
10
+ # `inner` either calls the autograd formula registered by the user,
11
+ # or goes into an `autograd_not_implemented` kernel.
12
+ #
13
+ # The reason why this indirection exists is
14
+ # so that we can swap out the autograd kernel (the PyTorch dispatcher
15
+ # doesn't actually allow us to do this). By default, we want
16
+ # the `autograd_not_implemented` behavior, but then the user may come
17
+ # and register something that is actually a backward formula
18
+ def autograd_kernel_indirection(custom_op):
19
+ autograd_fallback = autograd_not_implemented(custom_op)
20
+
21
+ def inner(*args, **kwargs):
22
+ if custom_op._has_impl('autograd'):
23
+ kernel = custom_op._get_impl('autograd').func
24
+ return kernel(*args, **kwargs)
25
+ # As explained in NOTE ["backward", "save_for_backward", and "autograd"],
26
+ # after the user gives us "backward" and "save_for_backward", we generate
27
+ # the "autograd" impl. If the user only provided one, then we tell
28
+ # the user they've done something wrong.
29
+ if custom_op._has_impl('save_for_backward') or custom_op._has_impl('backward'):
30
+ missing = (
31
+ 'save_for_backward' if custom_op._has_impl('backward')
32
+ else 'backward'
33
+ )
34
+ found = 'save_for_backward' if missing == 'backward' else 'backward'
35
+ loc = custom_op._get_impl(found).location
36
+ raise RuntimeError(
37
+ f"We found a '{found}' registration for {custom_op} at "
38
+ f"{loc} but were unable to find a '{missing}' registration. "
39
+ f"To use the CustomOp API to register a backward formula, "
40
+ f"please provide us both a backward function and a "
41
+ f"'save for backward' function via `impl_backward` and "
42
+ f"`impl_save_for_backward` respectively.")
43
+ return autograd_fallback(*args, **kwargs)
44
+ return inner
45
+
46
+
47
+ # TODO(#101191): Use the actual C++ autograd not implemented fallback,
48
+ # or change the default autograd fallback to the autograd not implemented fallback.
49
+ def autograd_not_implemented(custom_op):
50
+ def kernel(*args, **kwargs):
51
+ if torch.is_grad_enabled() and pytree.tree_any(
52
+ lambda x: isinstance(x, torch.Tensor) and x.requires_grad, (args, kwargs)
53
+ ):
54
+ raise RuntimeError("Autograd has not been implemented for operator")
55
+ with torch._C._AutoDispatchBelowAutograd():
56
+ return custom_op(*args, **kwargs)
57
+ return kernel
58
+
59
+
60
+ def mark_non_differentiable(ctx, output, output_differentiability):
61
+ # Output types are restricted to be:
62
+ # - Tensor
63
+ # - Tensor[]
64
+ # - int, bool, Scalar, float
65
+ # See _check_can_register_backward
66
+ if output_differentiability is not None:
67
+ if not isinstance(output, tuple):
68
+ tuple_output = (output,)
69
+ else:
70
+ tuple_output = output # type: ignore[assignment]
71
+ assert len(output_differentiability) == len(tuple_output)
72
+ non_differentiable_tensors = []
73
+ for idx, (differentiable, out) in enumerate(zip(output_differentiability, tuple_output)):
74
+ if isinstance(out, torch.Tensor):
75
+ if not differentiable:
76
+ non_differentiable_tensors.append(out)
77
+ continue
78
+ if isinstance(out, list):
79
+ if not differentiable:
80
+ non_differentiable_tensors.extend(out)
81
+ continue
82
+ if differentiable:
83
+ raise RuntimeError(
84
+ f"With output_differentiability={output_differentiability}. "
85
+ f"At idx {idx}, we received an object of type {type(out)} that "
86
+ f"is not a Tensor, so it cannot have be marked as differentiable in "
87
+ f"output_differentiability.")
88
+ if non_differentiable_tensors:
89
+ ctx.mark_non_differentiable(*non_differentiable_tensors)
90
+
91
+
92
+ def construct_autograd_kernel(
93
+ schema,
94
+ output_differentiability,
95
+ custom_op,
96
+ op_overload,
97
+ save_for_backward_fn,
98
+ backward_fn):
99
+
100
+ def apply(*args):
101
+ flat_args, spec = pytree.tree_flatten(args)
102
+ out_spec = None
103
+
104
+ def forward(ctx, *flat_args):
105
+ ctx.set_materialize_grads(True)
106
+ args = pytree.tree_unflatten(list(flat_args), spec)
107
+ with torch._C._AutoDispatchBelowAutograd():
108
+ output = op_overload(*args)
109
+
110
+ # We use the info about args to give better error messages in backward
111
+ args_info = namedtuple_args(
112
+ schema, pytree.tree_map(type, args))
113
+
114
+ save_for_backward_fn_inputs = namedtuple_args(schema, args)
115
+ to_save = save_for_backward_fn(save_for_backward_fn_inputs, output)
116
+
117
+ save_pytree_for_backward(ctx, (to_save, args_info))
118
+ mark_non_differentiable(ctx, output, output_differentiability)
119
+
120
+ nonlocal out_spec
121
+ flat_output, out_spec = pytree.tree_flatten(output)
122
+ return tuple(flat_output)
123
+
124
+ def backward(ctx, *flat_grad_output):
125
+ assert out_spec is not None
126
+ grads = pytree.tree_unflatten(list(flat_grad_output), out_spec)
127
+ saved, args_info = unpack_saved(ctx)
128
+ # There is nothing on the ctx object for now, it is just there so
129
+ # that we can add additional things in the future.
130
+ inner_ctx = object()
131
+ if not isinstance(grads, tuple):
132
+ grads = (grads,)
133
+ grad_inputs_dict = backward_fn(inner_ctx, saved, *grads)
134
+
135
+ # Massage the grad_inputs_dict to a form acceptable by
136
+ # autograd.Function.
137
+ validate_grad_inputs_dict(grad_inputs_dict, custom_op, args_info)
138
+ return grad_inputs_dict_to_flat_tuple(grad_inputs_dict, args_info)
139
+
140
+ generated_cls = gen_autograd_function(
141
+ custom_op._opname + '_customop', forward, backward)
142
+
143
+ flat_output = generated_cls.apply(*flat_args)
144
+ assert out_spec is not None
145
+ return pytree.tree_unflatten(list(flat_output), out_spec)
146
+ return apply
147
+
148
+
149
+ def gen_autograd_function(name, forward, backward):
150
+ generated_cls = type(
151
+ name,
152
+ (torch.autograd.Function,),
153
+ {
154
+ 'forward': staticmethod(forward),
155
+ 'backward': staticmethod(backward),
156
+ }
157
+ )
158
+ return generated_cls
159
+
160
+
161
+ @functools.lru_cache
162
+ def namedtuple_args_cls(schema):
163
+ attribs = [arg.name for arg in schema.arguments.flat_all]
164
+ name = str(schema.name) + "_args"
165
+ # mypy doesn't support dynamic namedtuple name
166
+ tuple_cls = namedtuple(name, attribs) # type: ignore[misc]
167
+ return tuple_cls
168
+
169
+
170
+ def namedtuple_args(schema, args):
171
+ assert isinstance(args, tuple)
172
+ tuple_cls = namedtuple_args_cls(schema)
173
+ return tuple_cls(*args)
174
+
175
+
176
+ def validate_grad_inputs_dict(grad_inputs_dict, forward_op, args_info):
177
+ def error(what):
178
+ backward = forward_op._get_impl('backward')
179
+ raise RuntimeError(
180
+ f"In the backward function defined for {forward_op} at "
181
+ f"{backward.location} using the CustomOp API, {what}")
182
+
183
+ if not isinstance(grad_inputs_dict, dict):
184
+ error(f"expected the output of the backward function to be a dict but "
185
+ f"got {type(grad_inputs_dict)}")
186
+
187
+ expected_keys = {arg.name for arg in forward_op._schema.arguments.flat_all
188
+ if arg.type.is_tensor_like()}
189
+ actual_keys = grad_inputs_dict.keys()
190
+ if expected_keys != actual_keys:
191
+ error(f"expected the returned grad_input dict to have keys "
192
+ f"{expected_keys} but got {actual_keys}. The backward "
193
+ f"function must return a gradient (can be None) for each arg "
194
+ f"to the CustomOp that may be a Tensor or Sequence[Tensor]. "
195
+ f"Args declared to be non-Tensor-like types should not appear "
196
+ f"in the grad_input dict")
197
+
198
+ for name, grad in grad_inputs_dict.items():
199
+ arg_info = getattr(args_info, name)
200
+
201
+ if isinstance(arg_info, list):
202
+ if not isinstance(grad, (tuple, list)):
203
+ error(f"for input '{name}' expected the grad_input dict to "
204
+ f"hold a list of gradients but got object of type "
205
+ f"{type(grad)}.")
206
+ if not len(grad) == len(arg_info):
207
+ error(f"for input '{name}' expected the grad_input dict to "
208
+ f"hold a list of {len(arg_info)} gradients but got "
209
+ f"{len(grad)}")
210
+ for idx, (g, info) in enumerate(zip(grad, arg_info)):
211
+ if g is None:
212
+ continue
213
+ if not isinstance(g, torch.Tensor):
214
+ error(f"for input '{name}' expected the grad_input dict to "
215
+ f"hold a list of None or Tensor gradients but got "
216
+ f"object of {type(g)} at index {idx}")
217
+ if not issubclass(info, torch.Tensor):
218
+ error(f"for input '{name}', got a Tensor as the gradient "
219
+ f"for the {idx}-th value but expected None because "
220
+ f"the {idx}-th value was not a Tensor (it was "
221
+ f"type {arg_info}")
222
+ continue
223
+
224
+ if grad is None:
225
+ continue
226
+ if not isinstance(grad, torch.Tensor):
227
+ error(f"got object of type {type(grad)} as the gradient for input "
228
+ f"'{name}', "
229
+ f"but expected the gradient to be either None or a Tensor")
230
+ if not issubclass(arg_info, torch.Tensor):
231
+ error(f"got a Tensor as the gradient for input '{name}' but "
232
+ f"expected None as the gradient because input '{name}' "
233
+ f"was not a Tensor (it was type {arg_info}).")
234
+
235
+
236
+ def grad_inputs_dict_to_flat_tuple(grad_inputs_dict, args_info):
237
+ result = []
238
+ for name, arg_info in args_info._asdict().items():
239
+ if name not in grad_inputs_dict:
240
+ result.append(pytree.tree_map(lambda x: None, arg_info))
241
+ continue
242
+ result.append(grad_inputs_dict[name])
243
+ return tuple(pytree.tree_leaves(result))
244
+
245
+ # Saves "stuff" (a pytree) onto the ctx object. Use unpack_saved to unpack it.
246
+ # autograd.Function prefers that users use ctx.save_for_backward to
247
+ # save Tensors (to avoid reference cycles) and for non-Tensors to go onto the
248
+ # ctx object.
249
+ def save_pytree_for_backward(ctx, stuff):
250
+ flat_stuff, spec = pytree.tree_flatten(stuff)
251
+ num_elts = len(flat_stuff)
252
+ tensor_idxs = [idx for idx, thing in enumerate(flat_stuff)
253
+ if isinstance(thing, torch.Tensor)]
254
+ non_tensor_idxs = [idx for idx, thing in enumerate(flat_stuff)
255
+ if not isinstance(thing, torch.Tensor)]
256
+ tensors = [thing for thing in flat_stuff if isinstance(thing, torch.Tensor)]
257
+ non_tensors = [thing for thing in flat_stuff if not isinstance(thing, torch.Tensor)]
258
+
259
+ ctx.spec = spec
260
+ ctx.num_elts = num_elts
261
+ ctx.save_for_backward(*tensors)
262
+ ctx.tensor_idxs = tensor_idxs
263
+ ctx.saved_non_tensors = non_tensors
264
+ ctx.non_tensor_idxs = non_tensor_idxs
265
+
266
+
267
+ # Inverse operation to save_pytree_for_backward
268
+ def unpack_saved(ctx):
269
+ flat_stuff = [None] * ctx.num_elts
270
+ for tensor, idx in zip(ctx.saved_tensors, ctx.tensor_idxs):
271
+ flat_stuff[idx] = tensor
272
+ for non_tensor, idx in zip(ctx.saved_non_tensors, ctx.non_tensor_idxs):
273
+ flat_stuff[idx] = non_tensor
274
+ stuff = pytree.tree_unflatten(flat_stuff, ctx.spec)
275
+ return stuff
parrot/lib/python3.10/site-packages/torch/_custom_op/functional.py ADDED
@@ -0,0 +1,188 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ import weakref
3
+
4
+ import torch
5
+ import torch.utils._pytree as pytree
6
+ from torch._C import _ExcludeDispatchKeyGuard, DispatchKey, DispatchKeySet
7
+ from torch._ops import OpOverload
8
+ from torch.library import Library
9
+ from torchgen.model import (
10
+ BaseTy,
11
+ BaseType,
12
+ FunctionSchema,
13
+ OperatorName,
14
+ OptionalType,
15
+ SchemaKind,
16
+ )
17
+
18
+ from .autograd import autograd_not_implemented
19
+
20
+
21
+ def register_functional_op(
22
+ lib: Library,
23
+ new_op_name: str,
24
+ mutable_op: OpOverload,
25
+ ) -> None:
26
+ """Given a mutable operator, registers the functional variant.
27
+
28
+ This API also correctly links the functional variant with the mutable
29
+ operator for the purposes of functionalization.
30
+
31
+ All of the new registrations are performed on the ``lib`` passed in.
32
+
33
+ Arguments:
34
+ lib (Library): Should be a torch.library.Library object that has
35
+ the same namespace as ``mutable_op``'s namespace.
36
+ lib will be used to register the new functional op as well
37
+ as a functionalization kernel for the ``mutable_op``
38
+ If you don't have a library handy, use
39
+ ``torch.library.Library(ns, 'FRAGMENT')`` to construct one.
40
+ new_op_name (str): The name of the functional operator (without the
41
+ namespace). If no namespace, the new functional variant will be
42
+ accessible under ``torch.ops.{lib.ns}.new_op_name``.
43
+ mutable_op (OpOverload): The mutable custom operator. Note
44
+ that you may need to add a `.default` to it, like
45
+ `torch.ops.aten.abs_.default`.
46
+
47
+ """
48
+ validate(mutable_op)
49
+ schema = functional_schema(new_op_name, mutable_op)
50
+ lib.define(schema)
51
+
52
+ functional_impl = construct_functional_impl(mutable_op)
53
+ lib.impl(new_op_name, functional_impl, 'CompositeExplicitAutograd')
54
+
55
+ functional_op = getattr(getattr(torch.ops, lib.ns), new_op_name).default
56
+
57
+ # There's no easy way for us to generate the autograd kernel, so we
58
+ # use autograd_not_implemented. Also, this makes it so that the user
59
+ # is unable to register an autograd formula themselves. This shouldn't
60
+ # be a problem if the user doesn't use the functional op direclty
61
+ # in their program, but we may need to revist this in the future.
62
+ lib.impl(new_op_name, autograd_not_implemented(functional_op), 'Autograd')
63
+
64
+ f_kernel = construct_functionalization_kernel(weakref.proxy(mutable_op), functional_op)
65
+
66
+ lib.impl(mutable_op, f_kernel, 'Functionalize')
67
+
68
+
69
+ def construct_functional_impl(mutable_op):
70
+ def functional_impl(*args):
71
+ # Strategy:
72
+ # - clone args that would have been mutated
73
+ # - run mutable_op
74
+ # - return the cloned args as additional outputs
75
+ new_args = []
76
+ extra_rets = []
77
+ for is_write, arg in zip(mutable_args(mutable_op), args):
78
+ if is_write:
79
+ cloned = arg.clone() if arg is not None else None
80
+ new_args.append(cloned)
81
+ extra_rets.append(cloned)
82
+ else:
83
+ new_args.append(arg)
84
+ result = mutable_op(*new_args)
85
+ if result is None:
86
+ return tuple(extra_rets)
87
+ if isinstance(result, tuple):
88
+ return (*result, *extra_rets)
89
+ return (result, *extra_rets)
90
+ return functional_impl
91
+
92
+
93
+ def construct_functionalization_kernel(mutable_op, functional_op):
94
+ def kernel(*args):
95
+ # There's nothing to be functionalized!
96
+ # We can still end up here because DispatchKey::Functionalize is a mode key
97
+ if pytree.tree_all_only(torch.Tensor, lambda x: not torch._is_functional_tensor(x), args):
98
+ with _ExcludeDispatchKeyGuard(DispatchKeySet(DispatchKey.Functionalize)):
99
+ return mutable_op(*args)
100
+
101
+ # NB: This differs from the codegen -- codegen handles cases where there
102
+ # are mixed FunctionalTensorWrapper and non-FunctionalTensorWrapper.
103
+ # This only really matters for XLA (mixed CPU-XLA tensors) and
104
+ # running functionalization without the PT2 stack (which guarantees to us that
105
+ # all tensors are FunctionalTensorWrapper).
106
+ if not pytree.tree_all_only(torch.Tensor, torch._is_functional_tensor, args):
107
+ raise RuntimeError("{mutable_op}: expected all args to be FunctionalTensorWrapper")
108
+
109
+ unwrapped_args = []
110
+ for arg in args:
111
+ if isinstance(arg, torch.Tensor) and torch._is_functional_tensor(arg):
112
+ torch._sync(arg)
113
+ unwrapped = torch._from_functional_tensor(arg)
114
+ unwrapped_args.append(unwrapped)
115
+ else:
116
+ unwrapped_args.append(arg)
117
+
118
+ with _ExcludeDispatchKeyGuard(DispatchKeySet(DispatchKey.Functionalize)):
119
+ output = functional_op(*unwrapped_args)
120
+
121
+ num_actual_output = len(mutable_op._schema.returns)
122
+ actual_output = pytree.tree_map(
123
+ torch._to_functional_tensor, output[:num_actual_output])
124
+
125
+ new_values_to_propagate = output[num_actual_output:]
126
+ inputs_to_replace = [arg for is_write, arg in zip(mutable_args(mutable_op), args)
127
+ if is_write]
128
+ assert len(new_values_to_propagate) == len(inputs_to_replace)
129
+ for new_value, arg in zip(new_values_to_propagate, inputs_to_replace):
130
+ if (arg is None and new_value is None) or (arg is not None and new_value is not None):
131
+ continue
132
+ torch._C._propagate_xla_data(arg, new_value)
133
+ torch._C._replace_(arg, new_value)
134
+ torch._C._commit_update(arg)
135
+ torch._sync(arg)
136
+
137
+ if len(actual_output) == 1:
138
+ return actual_output[0]
139
+ elif len(actual_output) == 0:
140
+ return None
141
+ return actual_output
142
+
143
+ return kernel
144
+
145
+
146
+ def validate(mutable_op: OpOverload):
147
+ if not isinstance(mutable_op, OpOverload):
148
+ raise TypeError(
149
+ f"register_functional_op(mutable_op): expected mutable_op to be instance of "
150
+ f"OpOverload but got {type(mutable_op)}")
151
+
152
+ # There are generally three types of "in-place" or "mutable" ops.
153
+ # Each of them have their own conventions:
154
+ # - inplace (first input modified in-place and returned as only output)
155
+ # - out= (some args modified in-place and returned as outputs)
156
+ # - mutable (some args modified in-place but none of those returned as outputs)
157
+ # In theory we can support all three, but we'll just support the last
158
+ # option right now for simplicity.
159
+ schema = FunctionSchema.parse(str(mutable_op._schema))
160
+ if not schema.kind() == SchemaKind.mutable:
161
+ raise RuntimeError("Expected op to be mutable (as opposed to functional, inplace or out)")
162
+ for ret in schema.returns:
163
+ # construct_functionalization_kernel assumes this for simplicity
164
+ if ret.annotation is not None:
165
+ raise NotImplementedError(
166
+ "NYI: register_functional_op(op) where op returns a mutated or aliased value. "
167
+ "Please file an issue (and as a workaround, modify your operator to "
168
+ "not return the mutated value or aliases)")
169
+ for arg in schema.arguments.flat_all:
170
+ # construct_functionalization_kernel assumes this for simplicity
171
+ if arg.type.is_tensor_like() and (
172
+ arg.type != BaseType(BaseTy.Tensor)
173
+ and arg.type != OptionalType(BaseType(BaseTy.Tensor))
174
+ ):
175
+ raise NotImplementedError(
176
+ "NYI: register_functional_op(op) where op has a List[Tensor] input."
177
+ "Please file an issue.")
178
+
179
+
180
+ def functional_schema(new_op_name, op: OpOverload):
181
+ schema = FunctionSchema.parse(str(op._schema))
182
+ schema = schema.signature().with_name(OperatorName.parse(new_op_name))
183
+ return str(schema)
184
+
185
+
186
+ def mutable_args(op: OpOverload):
187
+ return tuple(False if arg.alias_info is None else arg.alias_info.is_write
188
+ for arg in op._schema.arguments)
parrot/lib/python3.10/site-packages/torch/_custom_op/impl.py ADDED
@@ -0,0 +1,873 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ import dataclasses
3
+ import functools
4
+ import inspect
5
+ import sys
6
+ import typing
7
+ import weakref
8
+
9
+ from torchgen.model import FunctionSchema, OperatorName, SchemaKind, BaseType, ListType, BaseTy
10
+
11
+ import torch
12
+ import torch._C as _C
13
+ import torch.library as library
14
+ from torch._library.abstract_impl import AbstractImplCtx
15
+ from torch.library import get_ctx
16
+
17
+ from .autograd import autograd_kernel_indirection, construct_autograd_kernel
18
+ import torch._library.infer_schema
19
+ from torch._library.infer_schema import infer_schema
20
+
21
+ """
22
+ For a detailed guide on custom ops, please see
23
+ https://docs.google.com/document/d/1aGWtgxV3HppuxQAdddyPrs74_aEntpkYt9MalnCKnhk
24
+
25
+ This file includes pieces of the implementation of our custom operator API.
26
+ """
27
+
28
+ __all__ = ["custom_op", "CustomOp", "get_ctx", "AbstractImplCtx"]
29
+
30
+
31
+ SUPPORTED_DEVICE_TYPE_TO_KEY = {
32
+ "cpu": "CPU",
33
+ "cuda": "CUDA",
34
+ }
35
+
36
+ # We will not let users register CustomOps with anything that could look like
37
+ # PyTorch internals to avoid confusion.
38
+ RESERVED_NS = {
39
+ "prim",
40
+ "prims",
41
+ "aten",
42
+ "at",
43
+ "torch",
44
+ "pytorch",
45
+ }
46
+
47
+
48
+ def custom_op(
49
+ qualname: str, manual_schema: typing.Optional[str] = None
50
+ ) -> typing.Callable:
51
+ r"""Creates a new CustomOp object.
52
+
53
+ WARNING: if you're a user, please do not use this directly
54
+ (instead use the torch._custom_ops APIs).
55
+ Also please see the following for a detailed guide on custom ops.
56
+ https://docs.google.com/document/d/1aGWtgxV3HppuxQAdddyPrs74_aEntpkYt9MalnCKnhk
57
+
58
+ In PyTorch, defining an op (short for "operator") is a two step-process:
59
+ - we need to define (create) the op
60
+ - we need to implement behavior for how the operator interacts with
61
+ various PyTorch subsystems, like CPU/CUDA Tensors, Autograd, etc.
62
+
63
+ This entrypoint defines the CustomOp object (the first step);
64
+ you must then perform the second step by calling various methods on
65
+ the CustomOp object.
66
+
67
+ This API is used as a decorator (see examples).
68
+
69
+ Arguments:
70
+ qualname (str): Should be a string that looks like
71
+ "namespace::operator_name". Operators in PyTorch need a namespace to
72
+ avoid name collisions; a given operator may only be created once.
73
+ If you are writing a Python library, we recommend the namespace to
74
+ be the name of your top-level module. The operator_name must be
75
+ the same as the name of the function you pass to custom_op
76
+ (see examples).
77
+ manual_schema (Optional[str]): Each PyTorch operator needs a schema that
78
+ tells PyTorch the types of the inputs/outputs. If None (default),
79
+ we will infer the schema from the type annotations on the function
80
+ (see examples). Otherwise, if you don't want to use type annotations,
81
+ you may provide us the schema string.
82
+
83
+ Example::
84
+ >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_CUDA)
85
+ >>> import numpy as np
86
+ >>> from torch import Tensor
87
+ >>>
88
+ >>> # Step 1: define the CustomOp.
89
+ >>> # We need to provide the decorator a "prototype function"
90
+ >>> # (a function with Python ellipses as the body).
91
+ >>> @custom_op("my_library::numpy_sin")
92
+ >>> def numpy_sin(x: Tensor) -> Tensor:
93
+ >>> ...
94
+ >>>
95
+ >>> # numpy_sin is now an instance of class CustomOp
96
+ >>> print(type(numpy_sin))
97
+ >>>
98
+ >>> # Step 2: Register an implementation for various PyTorch subsystems
99
+ >>>
100
+ >>> # Register an implementation for CPU tensors
101
+ >>> @numpy_sin.impl('cpu')
102
+ >>> def numpy_sin_impl_cpu(x):
103
+ >>> return torch.from_numpy(np.sin(x.numpy()))
104
+ >>>
105
+ >>> # Register an implementation for CUDA tensors
106
+ >>> @numpy_sin.impl('cuda')
107
+ >>> def numpy_sin_impl_cuda(x):
108
+ >>> return torch.from_numpy(np.sin(x.cpu().numpy())).to(x.device)
109
+ >>>
110
+ >>> x = torch.randn(3)
111
+ >>> numpy_sin(x) # calls numpy_sin_impl_cpu
112
+ >>>
113
+ >>> x_cuda = x.cuda()
114
+ >>> numpy_sin(x) # calls numpy_sin_impl_cuda
115
+
116
+ """
117
+
118
+ def inner(func):
119
+ if not inspect.isfunction(func):
120
+ raise ValueError(
121
+ f"custom_op(...)(func): Expected `func` to be a Python "
122
+ f"function, got: {type(func)}"
123
+ )
124
+
125
+ ns, name = parse_qualname(qualname)
126
+ validate_namespace(ns)
127
+ if func.__name__ != name:
128
+ raise ValueError(
129
+ f"custom_op(qualname='{qualname}', ...)(func): expected `func` "
130
+ f"to have name '{name}' but got '{func.__name__}'. "
131
+ f"Please either change the name of `func` or the qualname that "
132
+ f"is passed to `custom_op`"
133
+ )
134
+
135
+ schema = infer_schema(func) if manual_schema is None else manual_schema
136
+ schema_str = f"{name}{schema}"
137
+ function_schema = FunctionSchema.parse(schema_str)
138
+ validate_schema(function_schema)
139
+ if manual_schema is not None:
140
+ validate_function_matches_schema(function_schema, func)
141
+
142
+ lib = library.Library(ns, "FRAGMENT")
143
+ lib.define(schema_str)
144
+ ophandle = find_ophandle_or_throw(ns, function_schema.name)
145
+ result = CustomOp(lib, ns, function_schema, name, ophandle, _private_access=True)
146
+
147
+ result.__name__ = func.__name__
148
+ result.__module__ = func.__module__
149
+ result.__doc__ = func.__doc__
150
+
151
+ library.impl(lib, result._opname, "Autograd")(
152
+ autograd_kernel_indirection(weakref.proxy(result))
153
+ )
154
+
155
+ torch._C._dispatch_set_report_error_callback(
156
+ ophandle, functools.partial(report_error_callback, weakref.proxy(result))
157
+ )
158
+
159
+ return result
160
+
161
+ return inner
162
+
163
+
164
+ # Global dictionary holding references to all CustomOp objects
165
+ # Yes, it keeps all CustomOps alive (see NOTE [CustomOp lifetime])
166
+ # Used to query the CustomOp associated with a specific C++ dispatcher operator.
167
+ # An example usage is FakeTensor: FakeTensor checks if a specific operator
168
+ # has an implementation registered via the CustomOp API.
169
+ # Indexed by qualname (e.g. aten::foo)
170
+ global_registry: typing.Dict[str, "CustomOp"] = {}
171
+
172
+
173
+ class CustomOp:
174
+ r"""Class for custom operators in PyTorch.
175
+
176
+ Use the CustomOp API to create user-defined custom operators that behave
177
+ just like regular PyTorch operators (e.g. torch.sin, torch.mm) when it
178
+ comes to various PyTorch subsystems (like torch.compile).
179
+
180
+ To construct a `CustomOp`, use `custom_op`.
181
+ """
182
+
183
+ def __init__(self, lib, cpp_ns, schema, operator_name, ophandle, *, _private_access=False):
184
+ super().__init__()
185
+ if not _private_access:
186
+ raise RuntimeError(
187
+ "The CustomOp constructor is private and we do not guarantee "
188
+ "BC for it. Please use custom_op(...) to create a CustomOp object"
189
+ )
190
+ name = f"{cpp_ns}::{operator_name}"
191
+ self._schema = schema
192
+ self._cpp_ns = cpp_ns
193
+ self._lib: library.Library = lib
194
+ self._ophandle: _C._DispatchOperatorHandle = ophandle
195
+ # Has the name of the op, e.g. "foo". We cache here for convenience.
196
+ self._opname: str = operator_name
197
+ # this is _opname but with namespace. e.g. "custom::foo"
198
+ self._qualname: str = name
199
+ self.__name__ = None # mypy requires this
200
+ # NB: Some of these impls are registered as kernels to DispatchKeys.
201
+ # Modifying the _impls dict directly won't do anything in that case.
202
+ self._impls: typing.Dict[str, typing.Optional[FuncAndLocation]] = {}
203
+ # See NOTE [CustomOp autograd kernel indirection]
204
+ self._registered_autograd_kernel_indirection = False
205
+
206
+ global_registry[self._qualname] = self
207
+
208
+ def _register_autograd_kernel_indirection(self):
209
+ assert not self._registered_autograd_kernel_indirection
210
+ self._lib.impl(self._opname, autograd_kernel_indirection(weakref.proxy(self)), "Autograd")
211
+ self._registered_autograd_kernel_indirection = True
212
+
213
+ # Records the impl and the source location in self._impls
214
+ # Note that this doesn't cause torch.library to use the impl, that
215
+ # needs to be done in a separate self._lib.impl call.
216
+ def _register_impl(self, kind, func, stacklevel=2):
217
+ if self._has_impl(kind):
218
+ func_and_location = self._impls[kind]
219
+ assert func_and_location is not None # Pacify mypy
220
+ location = func_and_location.location
221
+ raise RuntimeError(
222
+ f"Attempting to register a {kind} impl for operator {self._qualname} "
223
+ f"that already has a {kind} impl registered from Python at "
224
+ f"{location}. This is not supported."
225
+ )
226
+ frame = inspect.getframeinfo(sys._getframe(stacklevel))
227
+ location = f"{frame.filename}:{frame.lineno}"
228
+ self._impls[kind] = FuncAndLocation(func, location)
229
+
230
+ def _get_impl(self, kind):
231
+ return self._impls[kind]
232
+
233
+ def _has_impl(self, kind):
234
+ return kind in self._impls
235
+
236
+ def _destroy(self):
237
+ # NOTE: [CustomOp lifetime]
238
+ # A CustomOp, once created, lives forever. The mechanism is that the
239
+ # global registry holds a reference to it. However, to make testing
240
+ # easier, we want to be able to destroy CustomOp objects.
241
+ # CustomOp._destroy does the job, though it leaves the CustomOp
242
+ # in a garbage state.
243
+ del self._lib
244
+
245
+ opnamespace = getattr(torch.ops, self._cpp_ns)
246
+ if hasattr(opnamespace, self._opname):
247
+ delattr(opnamespace, self._opname)
248
+
249
+ del global_registry[self._qualname]
250
+
251
+ def __repr__(self):
252
+ return f'<CustomOp(op="{self._qualname}")>'
253
+
254
+ def __call__(self, *args, **kwargs):
255
+ # Bypass torch.ops.* and directly do OperatorHandle::callBoxed.
256
+ # Using torch.ops.* is a bit of a pain (it can be slow and it has lifetime
257
+ # issues from caching operators that make testing CustomOp difficult).
258
+ result = _C._dispatch_call_boxed(self._ophandle, *args, **kwargs)
259
+ return result
260
+
261
+ def impl(
262
+ self, device_types: typing.Union[str, typing.Iterable[str]], _stacklevel=2,
263
+ ) -> typing.Callable:
264
+ r"""Register an implementation for a device type for this CustomOp object.
265
+
266
+ WARNING: if you're a user, please do not use this directly
267
+ (instead use the torch._custom_ops APIs).
268
+ Also please see the following for a detailed guide on custom ops.
269
+ https://docs.google.com/document/d/1aGWtgxV3HppuxQAdddyPrs74_aEntpkYt9MalnCKnhk
270
+
271
+ If the CustomOp is passed multiple Tensor inputs with different device
272
+ types, it will dispatch to the registered implementation for the highest
273
+ priority device type among those present.
274
+ The supported device types, in order of priority, are {'cuda', 'cpu'}.
275
+
276
+ This API is used as a decorator (see examples).
277
+
278
+ Arguments:
279
+ device_types (str or Iterable[str]): the device type(s) to register the function for.
280
+
281
+ Examples::
282
+ >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_CUDA)
283
+ >>> import numpy as np
284
+ >>> from torch import Tensor
285
+ >>>
286
+ >>> @custom_op("my_library::numpy_cos")
287
+ >>> def numpy_cos(x: Tensor) -> Tensor:
288
+ >>> ...
289
+ >>>
290
+ >>> # Register an implementation for CPU Tensors
291
+ >>> @numpy_cos.impl('cpu')
292
+ >>> def numpy_cos_impl_cpu(x):
293
+ >>> return torch.from_numpy(np.cos(x.numpy()))
294
+ >>>
295
+ >>> # Register an implementation for CUDA Tensors
296
+ >>> @numpy_cos.impl('cuda')
297
+ >>> def numpy_cos_impl_cuda(x):
298
+ >>> return torch.from_numpy(np.cos(x.cpu().numpy())).to(x.device)
299
+ >>>
300
+ >>> x = torch.randn(3)
301
+ >>> numpy_cos(x) # calls numpy_cos_impl_cpu
302
+ >>>
303
+ >>> x_cuda = x.cuda()
304
+ >>> numpy_cos(x) # calls numpy_cos_impl_cuda
305
+
306
+ """
307
+ if isinstance(device_types, str):
308
+ device_types = [device_types]
309
+ for device_type in device_types:
310
+ validate_device_type(device_type)
311
+
312
+ def inner(f):
313
+ for device_type in set(device_types):
314
+ self._check_doesnt_have_library_impl(device_type)
315
+ self._register_impl(device_type, f, stacklevel=_stacklevel)
316
+ dispatch_key = SUPPORTED_DEVICE_TYPE_TO_KEY[device_type]
317
+ library.impl(self._lib, self._opname, dispatch_key)(f)
318
+ return f
319
+
320
+ return inner
321
+
322
+ def _check_doesnt_have_library_impl(self, device_type):
323
+ if self._has_impl(device_type):
324
+ return
325
+ key = SUPPORTED_DEVICE_TYPE_TO_KEY[device_type]
326
+ if _C._dispatch_has_computed_kernel_for_dispatch_key(self._qualname, key):
327
+ raise RuntimeError(
328
+ f"impl(..., device_types={device_type}): the operator {self._qualname} "
329
+ f"already has an implementation for this device type via a "
330
+ f"pre-existing torch.library or TORCH_LIBRARY registration.")
331
+
332
+ def impl_factory(self) -> typing.Callable:
333
+ r"""Register an implementation for a factory function."""
334
+
335
+ def inner(f):
336
+ self._register_impl("factory", f)
337
+ library.impl(self._lib, self._opname, "BackendSelect")(f)
338
+ return f
339
+
340
+ return inner
341
+
342
+ def impl_abstract(self, _stacklevel=2) -> typing.Callable:
343
+ r"""Register an abstract implementation for this operator.
344
+
345
+ WARNING: please do not use this directly (and instead use the torch._custom_ops
346
+ APIs). Also please see the following for a detailed guide on custom ops.
347
+ https://docs.google.com/document/d/1aGWtgxV3HppuxQAdddyPrs74_aEntpkYt9MalnCKnhk
348
+
349
+ An "abstract implementation" specifies the behavior of this operator on
350
+ Tensors that carry no data. Given some input Tensors with certain properties
351
+ (sizes/strides/storage_offset/device), it specifies what the properties of
352
+ the output Tensors are.
353
+
354
+ The abstract implementation has the same signature as the operator.
355
+ It is run for both FakeTensors and meta tensors. To write an abstract
356
+ implementation, assume that all Tensor inputs to the operator are
357
+ regular CPU/CUDA/Meta tensors, but they do not have storage, and
358
+ you are trying to return regular CPU/CUDA/Meta tensor(s) as output.
359
+ The abstract implementation must consist of only PyTorch operations
360
+ (and may not directly access the storage or data of any input or
361
+ intermediate Tensors).
362
+
363
+ This API is used as a decorator (see examples).
364
+
365
+ Examples::
366
+ >>> import numpy as np
367
+ >>> from torch import Tensor
368
+ >>>
369
+ >>> # Example 1: an operator without data-dependent output shape
370
+ >>> @custom_op('my_library::custom_linear')
371
+ >>> def custom_linear(x: Tensor, weight: Tensor, bias: Tensor) -> Tensor:
372
+ >>> ...
373
+ >>>
374
+ >>> @custom_linear.impl_abstract()
375
+ >>> def custom_linear_abstract(x, weight):
376
+ >>> assert x.dim() == 2
377
+ >>> assert weight.dim() == 2
378
+ >>> assert bias.dim() == 1
379
+ >>> assert x.shape[1] == weight.shape[1]
380
+ >>> assert weight.shape[0] == bias.shape[0]
381
+ >>> assert x.device == weight.device
382
+ >>>
383
+ >>> return (x @ weight.t()) + bias
384
+ >>>
385
+ >>> # Example 2: an operator with data-dependent output shape
386
+ >>> @custom_op('my_library::custom_nonzero')
387
+ >>> def custom_nonzero(x: Tensor) -> Tensor:
388
+ >>> ...
389
+ >>>
390
+ >>> @custom_nonzero.impl_abstract()
391
+ >>> def custom_nonzero_abstract(x):
392
+ >>> # Number of nonzero-elements is data-dependent.
393
+ >>> # Since we cannot peek at the data in an abstract impl,
394
+ >>> # we use the ctx object to construct a new symint that
395
+ >>> # represents the data-dependent size.
396
+ >>> ctx = torch._custom_op.get_ctx()
397
+ >>> nnz = ctx.create_unbacked_symint()
398
+ >>> shape = [x.dim(), nnz]
399
+ >>> result = x.new_empty(shape, dtype=torch.long)
400
+ >>> return result
401
+ >>>
402
+ >>> @custom_nonzero.impl(['cpu', 'cuda'])
403
+ >>> def custom_nonzero_impl(x):
404
+ >>> x_np = to_numpy(x)
405
+ >>> res = np.stack(np.nonzero(x_np), axis=1)
406
+ >>> # unbacked symbolic ints in PyTorch must be >= 2, so we
407
+ >>> # constrain the range to at least 2
408
+ >>> if res.shape[0] <= 1:
409
+ >>> raise RuntimeError("not supported")
410
+ >>> return torch.tensor(res, device=x.device)
411
+
412
+ """
413
+
414
+ def inner(f):
415
+ self._check_doesnt_have_library_meta_impl()
416
+ self._register_impl("abstract", f, stacklevel=_stacklevel)
417
+ location = self._get_impl("abstract").location
418
+
419
+ qualname = self._qualname
420
+
421
+ # Handle DispatchKey.Meta registration
422
+ @functools.wraps(f)
423
+ def f_with_ctx(*args, **kwargs):
424
+ def error_on_ctx():
425
+ raise RuntimeError(
426
+ f"Attempted to call get_ctx() for the meta implementation "
427
+ f"for {qualname}."
428
+ f"You have presumably called get_ctx() because the operator "
429
+ f"has a data-dependent output shape; if so, there is no "
430
+ f"such meta implementation and this error is the correct "
431
+ f"behavior. Otherwise, please remove the call to get_ctx() "
432
+ f"in the implementation registered with impl_abstract "
433
+ f"at {location}"
434
+ )
435
+
436
+ with torch._library.abstract_impl.set_ctx_getter(error_on_ctx):
437
+ return f(*args, **kwargs)
438
+
439
+ self._lib.impl(self._opname, f_with_ctx, "Meta")
440
+ return f
441
+
442
+ return inner
443
+
444
+ def _check_can_register_backward(self):
445
+ def error(detail):
446
+ raise RuntimeError(
447
+ f"Cannot use torch._custom_ops APIs to register backward "
448
+ f"formula for {detail}. Got operator "
449
+ f"{self._qualname} with schema: {schema}"
450
+ )
451
+
452
+ schema = self._schema
453
+ if schema.kind() != SchemaKind.functional:
454
+ error("non-functional operator")
455
+
456
+ rets = schema.returns
457
+ if not schema.returns:
458
+ error("operator with no returns")
459
+
460
+ assert len(rets) > 0
461
+ is_non_mutating_view = any(
462
+ r.annotation is not None and not r.annotation.is_write for r in rets
463
+ )
464
+ if is_non_mutating_view:
465
+ error("operator that returns views")
466
+
467
+ # We make assumptions about the schema's return types.
468
+ allowed_return_types = {
469
+ BaseType(BaseTy.int): "int",
470
+ BaseType(BaseTy.SymInt): "SymInt",
471
+ BaseType(BaseTy.bool): "bool",
472
+ BaseType(BaseTy.float): "float",
473
+ BaseType(BaseTy.Tensor): "Tensor",
474
+ ListType(BaseType(BaseTy.Tensor), None): "List[Tensor]",
475
+ }
476
+ for ret in schema.returns:
477
+ if ret.type in allowed_return_types:
478
+ continue
479
+ error(f"operator with return not in {list(allowed_return_types.values())} (got {ret.type})")
480
+
481
+ def _check_doesnt_have_library_autograd_impl(self):
482
+ if self._registered_autograd_kernel_indirection:
483
+ return
484
+
485
+ if _C._dispatch_has_kernel_for_dispatch_key(self._qualname, "CompositeImplicitAutograd"):
486
+ raise RuntimeError(
487
+ f"impl_backward/impl_save_for_backward: the operator {self._qualname} "
488
+ f"already has an implementation for this device type via a "
489
+ f"pre-existing registration to DispatchKey::CompositeImplicitAutograd."
490
+ f"CompositeImplicitAutograd operators do not need an autograd formula; "
491
+ f"instead, the operator will decompose into its constituents and those "
492
+ f"can have autograd formulas defined on them.")
493
+
494
+ # We can improve this by adding "all Autograd<BACKEND> keys", but
495
+ # realistically people will just be using this API for CPU/CUDA for now.
496
+ for key in ["Autograd", "AutogradCPU", "AutogradCUDA"]:
497
+ if _C._dispatch_has_kernel_for_dispatch_key(self._qualname, key):
498
+ raise RuntimeError(
499
+ f"impl_backward/impl_save_for_backward: "
500
+ f"the operator {self._qualname} already has an Autograd kernel "
501
+ f"registered to DispatchKey::{key} vi a pre-existing "
502
+ f"torch.library or TORCH_LIBRARY registration. Please either "
503
+ f"remove those registrations or don't use the torch._custom_ops APIs")
504
+
505
+ def _check_doesnt_have_library_meta_impl(self):
506
+ if self._has_impl("abstract"):
507
+ return
508
+
509
+ # If the user's operator is CompositeExplicitAutograd,
510
+ # allow them to impl_abstract. This is being pragmatic
511
+ # (existing custom ops may have CompositeExplicitAutograd
512
+ # registration that don't work with Meta kernels, so this
513
+ # gives them an escape hatch).
514
+ if (
515
+ _C._dispatch_has_kernel_for_dispatch_key(self._qualname, "CompositeExplicitAutograd")
516
+ and not _C._dispatch_has_kernel_for_dispatch_key(self._qualname, "Meta")
517
+ ):
518
+ return
519
+
520
+ # Otherwise, if the user's already has a Meta kernel or their
521
+ # op is CompositeImplicitAutograd or some other alias dispatch key,
522
+ # raise.
523
+
524
+ # Special case for CompositeImplicitAutograd
525
+ if _C._dispatch_has_kernel_for_dispatch_key(self._qualname, "CompositeImplicitAutograd"):
526
+ raise RuntimeError(
527
+ f"impl_abstract(...): the operator {self._qualname} "
528
+ f"already has an implementation for this device type via a "
529
+ f"pre-existing registration to DispatchKey::CompositeImplicitAutograd."
530
+ f"CompositeImplicitAutograd operators do not need an abstract impl; "
531
+ f"instead, the operator will decompose into its constituents and those "
532
+ f"can have abstract impls defined on them.")
533
+
534
+ if _C._dispatch_has_kernel_for_dispatch_key(self._qualname, "Meta"):
535
+ raise RuntimeError(
536
+ f"impl_abstract(...): the operator {self._qualname} "
537
+ f"already has an DispatchKey::Meta implementation via a "
538
+ f"pre-existing torch.library or TORCH_LIBRARY registration. "
539
+ f"Please either remove that registration or don't call impl_abstract.")
540
+
541
+ # NOTE ["backward", "save_for_backward", and "autograd"]
542
+ # As a part of the explicit autograd API, a user must provide us
543
+ # a "save_for_backward" function and a "backward" function.
544
+ # When both of these have been provided, then we automatically
545
+ # construct the "autograd" kernel.
546
+ def _register_autograd_kernel(self):
547
+ assert self._has_impl("backward")
548
+ assert self._has_impl("save_for_backward")
549
+ kernel = construct_autograd_kernel(
550
+ self._schema,
551
+ self._output_differentiability,
552
+ self,
553
+ get_op(self._qualname),
554
+ self._get_impl("save_for_backward").func,
555
+ self._get_impl("backward").func)
556
+ self._register_impl("autograd", kernel)
557
+
558
+ def impl_save_for_backward(self, _stacklevel=2):
559
+ r"""Register a function that tells us what to save for backward.
560
+
561
+ Please see impl_backward for more details.
562
+ """
563
+ def inner(f):
564
+ self._check_can_register_backward()
565
+ self._check_doesnt_have_library_autograd_impl()
566
+ if not self._registered_autograd_kernel_indirection:
567
+ self._register_autograd_kernel_indirection()
568
+ self._register_impl("save_for_backward", f, stacklevel=_stacklevel)
569
+ if self._has_impl("backward"):
570
+ self._register_autograd_kernel()
571
+ return inner
572
+
573
+ def impl_backward(self, output_differentiability=None, _stacklevel=2):
574
+ r"""Registers a backward formula.
575
+
576
+ WARNING: if you're a user, please do not use this directly
577
+ (instead use the torch._custom_ops APIs).
578
+ Also please see the following for a detailed guide on custom ops.
579
+ https://docs.google.com/document/d/1aGWtgxV3HppuxQAdddyPrs74_aEntpkYt9MalnCKnhk
580
+
581
+ In order for the CustomOp to work with autograd, you need to register
582
+ a backward formula. There are two pieces to this:
583
+ 1. You must give us a function to specify what to save for backward.
584
+ Call this the "save for backward" function.
585
+ 2. You must give us a function that computes gradients. Call this the
586
+ "backward" function.
587
+
588
+ Use `impl_save_for_backward` to define a "save for backward" function
589
+ that specifies what gets saved for backward. The function should accept
590
+ two arguments ``(inputs, output)`` and return the quantities to be saved
591
+ for backward.
592
+
593
+ During runtime, when you call the CustomOp, PyTorch will invoke the
594
+ "save for backward" function with the inputs and output of the CustomOp.
595
+
596
+ Use `impl_backward` to define the "backward" function. The backward
597
+ function must accept ``(ctx, saved, *grads)``:
598
+ - ``ctx`` is a context object where we may provide information
599
+ - ``saved`` is exactly what gets returned from the "save for backward"
600
+ function
601
+ - ``grads`` is one or more gradients. The number of gradients matches
602
+ the number of outputs of the CustomOp.
603
+
604
+ The backward function must return a dict that maps the name of
605
+ an input to the CustomOp to its corresponding gradient. All inputs that
606
+ were declared to be Tensors in the CustomOp definition must be accounted
607
+ for in the dict. The gradient may be a Tensor or None.
608
+
609
+ """
610
+ if output_differentiability is not None:
611
+ def yell():
612
+ raise RuntimeError(
613
+ f"impl_backward(output_differentiability): expected "
614
+ f"output_differentiability to be a list of bools with "
615
+ f"length equal to the number of outputs of this CustomOp "
616
+ f"got: {output_differentiability}")
617
+
618
+ if not isinstance(output_differentiability, list):
619
+ yell()
620
+ for diff in output_differentiability:
621
+ if not isinstance(diff, bool):
622
+ yell()
623
+ if len(self._schema.returns) != len(output_differentiability):
624
+ yell()
625
+
626
+ def inner(f):
627
+ self._check_can_register_backward()
628
+ self._check_doesnt_have_library_autograd_impl()
629
+ if not self._registered_autograd_kernel_indirection:
630
+ self._register_autograd_kernel_indirection()
631
+ self._register_impl("backward", f, stacklevel=_stacklevel)
632
+ self._output_differentiability = output_differentiability
633
+ if self._has_impl("save_for_backward"):
634
+ self._register_autograd_kernel()
635
+ return inner
636
+
637
+
638
+ @dataclasses.dataclass
639
+ class FuncAndLocation:
640
+ func: typing.Callable
641
+ location: str
642
+
643
+
644
+ def find_ophandle_or_throw(cpp_ns: str, operator_name: OperatorName):
645
+ overload_name = (
646
+ "" if operator_name.overload_name is None else operator_name.overload_name
647
+ )
648
+ return _C._dispatch_find_schema_or_throw(
649
+ f"{cpp_ns}::{str(operator_name.name)}", overload_name
650
+ )
651
+
652
+
653
+ def validate_namespace(ns: str) -> None:
654
+ if "." in ns:
655
+ raise ValueError(
656
+ f'custom_op(..., ns="{ns}"): expected ns to not contain any . (and be a '
657
+ f"valid variable name)"
658
+ )
659
+ if ns in RESERVED_NS:
660
+ raise ValueError(
661
+ f"custom_op(..., ns='{ns}'): '{ns}' is a reserved namespace, "
662
+ f"please choose something else. "
663
+ )
664
+
665
+ def validate_schema(schema: FunctionSchema) -> None:
666
+ if not torch._library.utils.is_functional_schema(schema):
667
+ raise ValueError(
668
+ f"custom_op only supports functional operators "
669
+ f"(ops that do not mutate any inputs, do not return "
670
+ f"views of the inputs, and has at least one return). "
671
+ f"Got the following non-functional schema: {schema}"
672
+ )
673
+
674
+ # For simplicity: don't allow self arguments
675
+ if schema.arguments.self_arg is not None:
676
+ raise ValueError(
677
+ f"custom_op does not support arguments named 'self'. Please "
678
+ f"rename your argument. Got: {schema}"
679
+ )
680
+
681
+
682
+ def parse_qualname(qualname: str) -> typing.Tuple[str, str]:
683
+ names = qualname.split("::", 1)
684
+ if len(names) != 2:
685
+ raise ValueError(f"Expected there to be a namespace in {qualname}, i.e. The "
686
+ f"operator name should look something like ns::foo")
687
+ if '.' in names[1]:
688
+ raise ValueError(f"The torch.custom_ops APIs do not handle overloads, "
689
+ f"i.e. operator names with '.' in them. "
690
+ f"Please name your operator something like ns::foo. "
691
+ f"Got: {qualname}")
692
+ return names[0], names[1]
693
+
694
+
695
+ def validate_device_type(device_type: str) -> None:
696
+ if device_type not in SUPPORTED_DEVICE_TYPE_TO_KEY:
697
+ raise ValueError(
698
+ f"CustomOp.impl(device_types=[{device_type}, ...]): we only support device_type "
699
+ f"in {SUPPORTED_DEVICE_TYPE_TO_KEY.keys()}."
700
+ )
701
+
702
+
703
+ def supported_param(param: inspect.Parameter) -> bool:
704
+ return param.kind in (
705
+ inspect.Parameter.POSITIONAL_OR_KEYWORD,
706
+ inspect.Parameter.KEYWORD_ONLY,
707
+ )
708
+
709
+
710
+ def validate_function_matches_schema(
711
+ schema: FunctionSchema, func: typing.Callable
712
+ ) -> None:
713
+ sig = inspect.signature(func)
714
+
715
+ if not all(supported_param(p) for _, p in sig.parameters.items()):
716
+ raise ValueError(
717
+ f"custom_op(..., manual_schema)(func): positional-only args, "
718
+ f"varargs, and kwargs are not supported. Please rewrite `func` "
719
+ f"to not have them. Got `func` with signature: {sig}"
720
+ )
721
+
722
+ if (
723
+ any(
724
+ p.annotation is not inspect.Parameter.empty
725
+ for _, p in sig.parameters.items()
726
+ )
727
+ or sig.return_annotation is not inspect.Signature.empty
728
+ ):
729
+ raise ValueError(
730
+ f"custom_op(..., manual_schema)(func): When passing in a manual "
731
+ f"schema, we expect `func` to have no type annotations to avoid "
732
+ f"ambiguity. Got `func` with signature: {sig}"
733
+ )
734
+
735
+ positional = [
736
+ (name, param)
737
+ for name, param in sig.parameters.items()
738
+ if param.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD
739
+ ]
740
+ kwargonly = [
741
+ (name, param)
742
+ for name, param in sig.parameters.items()
743
+ if param.kind == inspect.Parameter.KEYWORD_ONLY
744
+ ]
745
+
746
+ def error():
747
+ raise ValueError(
748
+ f"custom_op(..., manual_schema)(func): When passing in a manual "
749
+ f"schema, we expect `func`'s signature to match `manual_schema` "
750
+ f"(aside from type annotations). "
751
+ f"func's signature: {sig}, manual_schema: {schema}"
752
+ )
753
+
754
+ def error_default_args():
755
+ raise ValueError(
756
+ f"custom_op(..., manual_schema)(func): "
757
+ f"neither func nor manual_schema should have default "
758
+ f"arguments. Got "
759
+ f"func's signature: {sig}, manual_schema: {schema}"
760
+ )
761
+
762
+ def compare(sig_args, schema_args):
763
+ if len(sig_args) != len(schema_args):
764
+ error()
765
+ for (name, param), arg in zip(sig_args, schema_args):
766
+ if name != arg.name:
767
+ error()
768
+ if param.default is not inspect.Parameter.empty or arg.default is not None:
769
+ error_default_args()
770
+
771
+ compare(positional, schema.arguments.flat_positional)
772
+ compare(kwargonly, schema.arguments.flat_kwarg_only)
773
+
774
+
775
+ def report_error_callback(custom_op: typing.Any, key: str) -> None:
776
+ if key == "Undefined":
777
+ raise NotImplementedError(
778
+ f"{custom_op}: There were no Tensor inputs to this operator "
779
+ f"(e.g. you passed an empty list of Tensors). If your operator is a "
780
+ f"factory function (that is, it takes no Tensors and constructs "
781
+ f"a new one), then please use CustomOp.impl_factory to register "
782
+ f"an implementation for it"
783
+ )
784
+ if key == "Meta":
785
+ raise NotImplementedError(
786
+ f"{custom_op}: when running with device='Meta' tensors: there is no "
787
+ f"abstract impl registered for this CustomOp. Please register one via "
788
+ f"CustomOp.impl_abstract to get this CustomOp to work with Meta tensors"
789
+ )
790
+ if key in ("CPU", "CUDA"):
791
+ device = key.lower()
792
+ raise NotImplementedError(
793
+ f"{custom_op}: when running with device='{device}' tensors: there is no "
794
+ f"{device} impl registered for this CustomOp. Please register one via "
795
+ f"CustomOp.impl(device_type='{device}')"
796
+ )
797
+ raise NotImplementedError(
798
+ f"{custom_op}: No implementation for dispatch key {key}. It is likely "
799
+ f"that we have not added this functionality yet, please either open an "
800
+ f"issue or if you're feeling adventurous, use the low-level "
801
+ f"torch.library API"
802
+ )
803
+
804
+
805
+ def custom_op_from_existing(op):
806
+ ns = op.namespace
807
+ lib = torch.library.Library(ns, "FRAGMENT")
808
+ name = op.name().split("::")[-1]
809
+ schema_str = str(op._schema)
810
+ # CustomOp expects the schema string without the namespace
811
+ schema_str = schema_str.split("::")[-1]
812
+ schema = FunctionSchema.parse(schema_str)
813
+ return CustomOp(lib, ns, schema, name, op, _private_access=True)
814
+
815
+
816
+ def get_op(qualname):
817
+ def error_not_found():
818
+ raise ValueError(
819
+ f"Could not find the operator {qualname}. Please make sure you have "
820
+ f"already registered the operator and (if registered from C++) "
821
+ f"loaded it via torch.ops.load_library.")
822
+
823
+ ns, name = parse_qualname(qualname)
824
+ if not hasattr(torch.ops, ns):
825
+ error_not_found()
826
+ opnamespace = getattr(torch.ops, ns)
827
+ if not hasattr(opnamespace, name):
828
+ error_not_found()
829
+ packet = getattr(opnamespace, name)
830
+ if not hasattr(packet, 'default'):
831
+ error_not_found()
832
+ return packet.default
833
+
834
+
835
+ def _find_custom_op(qualname, also_check_torch_library=False):
836
+ if qualname in global_registry:
837
+ return global_registry[qualname]
838
+ if not also_check_torch_library:
839
+ raise RuntimeError(
840
+ f'Could not find custom op "{qualname}". Did you register it via '
841
+ f"the torch._custom_ops API?")
842
+ overload = get_op(qualname)
843
+ result = custom_op_from_existing(overload)
844
+ return result
845
+
846
+
847
+ def get_abstract_impl(qualname):
848
+ if qualname not in torch._custom_op.impl.global_registry:
849
+ return None
850
+ custom_op = torch._custom_op.impl.global_registry[qualname]
851
+ if custom_op is None:
852
+ return None
853
+ if not custom_op._has_impl("abstract"):
854
+ return None
855
+ return custom_op._get_impl("abstract").func
856
+
857
+
858
+ def _custom_op_with_schema(qualname, schema, needs_fixed_stride_order=True):
859
+ ns, name = qualname.split("::")
860
+ schema_str = f"{name}{schema}"
861
+ function_schema = FunctionSchema.parse(schema_str)
862
+ validate_schema(function_schema)
863
+ tags = [torch._C.Tag.needs_fixed_stride_order] if needs_fixed_stride_order else []
864
+ lib = library.Library(ns, "FRAGMENT")
865
+ lib.define(schema_str, tags=tags)
866
+ ophandle = find_ophandle_or_throw(ns, function_schema.name)
867
+ result = CustomOp(lib, ns, function_schema, name, ophandle, _private_access=True)
868
+ result._register_autograd_kernel_indirection()
869
+
870
+ torch._C._dispatch_set_report_error_callback(
871
+ ophandle, functools.partial(report_error_callback, weakref.proxy(result))
872
+ )
873
+ return get_op(qualname)
parrot/lib/python3.10/site-packages/torch/_inductor/codegen/__pycache__/cpp.cpython-310.pyc ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2496c22e9e92909b5b195598282bae47f73a07f52940bd2129e955d1549ad5fe
3
+ size 124962
parrot/lib/python3.10/site-packages/torch/_library/__init__.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ import torch._library.abstract_impl
2
+ import torch._library.autograd
3
+ import torch._library.simple_registry
4
+ import torch._library.utils
5
+
6
+ from torch._library.fake_class_registry import register_fake_class
parrot/lib/python3.10/site-packages/torch/_library/__pycache__/abstract_impl.cpython-310.pyc ADDED
Binary file (7.6 kB). View file
 
parrot/lib/python3.10/site-packages/torch/_library/__pycache__/autograd.cpython-310.pyc ADDED
Binary file (6.53 kB). View file
 
parrot/lib/python3.10/site-packages/torch/_library/__pycache__/custom_ops.cpython-310.pyc ADDED
Binary file (20.6 kB). View file
 
parrot/lib/python3.10/site-packages/torch/_library/__pycache__/fake_class_registry.cpython-310.pyc ADDED
Binary file (10.6 kB). View file
 
parrot/lib/python3.10/site-packages/torch/_library/__pycache__/infer_schema.cpython-310.pyc ADDED
Binary file (4.44 kB). View file
 
parrot/lib/python3.10/site-packages/torch/_library/__pycache__/simple_registry.cpython-310.pyc ADDED
Binary file (1.9 kB). View file
 
parrot/lib/python3.10/site-packages/torch/_library/__pycache__/utils.cpython-310.pyc ADDED
Binary file (7.78 kB). View file
 
parrot/lib/python3.10/site-packages/torch/_library/abstract_impl.py ADDED
@@ -0,0 +1,209 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ import contextlib
3
+ import functools
4
+ from typing import Callable, Optional
5
+ from typing_extensions import deprecated
6
+
7
+ import torch
8
+ from torch._library.utils import Kernel, RegistrationHandle
9
+
10
+
11
+ class AbstractImplHolder:
12
+ """A holder where one can register an fake impl to."""
13
+
14
+ def __init__(self, qualname: str):
15
+ self.qualname: str = qualname
16
+ self.kernel: Optional[Kernel] = None
17
+ self.lib: Optional[torch.library.Library] = None
18
+
19
+ def register(self, func: Callable, source: str) -> RegistrationHandle:
20
+ """Register an fake impl.
21
+
22
+ Returns a RegistrationHandle that one can use to de-register this
23
+ fake impl.
24
+ """
25
+ if self.kernel is not None:
26
+ raise RuntimeError(
27
+ f"register_fake(...): the operator {self.qualname} "
28
+ f"already has an fake impl registered at "
29
+ f"{self.kernel.source}."
30
+ )
31
+ if torch._C._dispatch_has_kernel_for_dispatch_key(self.qualname, "Meta"):
32
+ raise RuntimeError(
33
+ f"register_fake(...): the operator {self.qualname} "
34
+ f"already has an DispatchKey::Meta implementation via a "
35
+ f"pre-existing torch.library or TORCH_LIBRARY registration. "
36
+ f"Please either remove that registration or don't call "
37
+ f"register_fake."
38
+ )
39
+
40
+ if torch._C._dispatch_has_kernel_for_dispatch_key(
41
+ self.qualname, "CompositeImplicitAutograd"
42
+ ):
43
+ raise RuntimeError(
44
+ f"register_fake(...): the operator {self.qualname} "
45
+ f"already has an implementation for this device type via a "
46
+ f"pre-existing registration to "
47
+ f"DispatchKey::CompositeImplicitAutograd."
48
+ f"CompositeImplicitAutograd operators do not need an fake "
49
+ f"impl; "
50
+ f"instead, the operator will decompose into its constituents "
51
+ f"and those "
52
+ f"can have fake impls defined on them."
53
+ )
54
+
55
+ # Store the kernel in this holder
56
+ self.kernel = Kernel(func, source)
57
+
58
+ # Also register the fake impl to Meta key
59
+ if self.lib is None:
60
+ ns = self.qualname.split("::")[0]
61
+ self.lib = torch.library.Library(ns, "FRAGMENT")
62
+ meta_kernel = construct_meta_kernel(self.qualname, self)
63
+ self.lib.impl(self.qualname, meta_kernel, "Meta")
64
+
65
+ def deregister_fake_class():
66
+ if self.lib:
67
+ self.lib._destroy()
68
+ self.lib = None
69
+ self.kernel = None
70
+
71
+ return RegistrationHandle(deregister_fake_class)
72
+
73
+
74
+ def construct_meta_kernel(
75
+ qualname: str, abstract_impl_holder: AbstractImplHolder
76
+ ) -> Callable:
77
+ assert abstract_impl_holder.kernel is not None
78
+
79
+ @functools.wraps(abstract_impl_holder.kernel.func)
80
+ def meta_kernel(*args, **kwargs):
81
+ assert abstract_impl_holder.kernel is not None
82
+ source = abstract_impl_holder.kernel.source
83
+
84
+ def error_on_ctx():
85
+ raise RuntimeError(
86
+ f"Attempted to call get_ctx() for the meta implementation "
87
+ f"for {qualname} (implemented at {source})"
88
+ f"You have presumably called get_ctx() because the operator "
89
+ f"has a data-dependent output shape; if so, there is no "
90
+ f"such meta implementation and this error is the correct "
91
+ f"behavior."
92
+ )
93
+
94
+ with set_ctx_getter(error_on_ctx):
95
+ return abstract_impl_holder.kernel(*args, **kwargs)
96
+
97
+ return meta_kernel
98
+
99
+
100
+ def get_none():
101
+ return None
102
+
103
+
104
+ global_ctx_getter: Callable = get_none
105
+
106
+
107
+ @contextlib.contextmanager
108
+ def set_ctx_getter(ctx_getter):
109
+ global global_ctx_getter
110
+ prev = global_ctx_getter
111
+ try:
112
+ global_ctx_getter = ctx_getter
113
+ yield
114
+ finally:
115
+ global_ctx_getter = prev
116
+
117
+
118
+ class AbstractImplCtx:
119
+ """
120
+ Context object for writing fake implementations for custom operators.
121
+ """
122
+
123
+ def __init__(self, _fake_mode, _op):
124
+ self._fake_mode = _fake_mode
125
+ self._shape_env = _fake_mode.shape_env
126
+ self._op = _op
127
+
128
+ @deprecated(
129
+ "`create_unbacked_symint` is deprecated, please use `new_dynamic_size` instead",
130
+ category=FutureWarning,
131
+ )
132
+ def create_unbacked_symint(self, *, min=2, max=None) -> torch.SymInt:
133
+ return self.new_dynamic_size(min=min, max=max)
134
+
135
+ def new_dynamic_size(self, *, min=0, max=None) -> torch.SymInt:
136
+ """Constructs a new symint (symbolic int) representing a data-dependent value.
137
+
138
+ This is useful for writing the fake implementation (which is necessary
139
+ for torch.compile) for a CustomOp where an output Tensor has a size
140
+ that depends on the data of the input Tensors.
141
+
142
+ Args:
143
+ min (int): A statically known inclusive lower bound for this symint. Default: 0
144
+ max (Optional[int]): A statically known inclusive upper bound for this
145
+ symint. Default: None
146
+
147
+ .. warning:
148
+
149
+ It is important that the ``min`` and ``max`` (if not None) values are set
150
+ correctly, otherwise, there will be undefined behavior under
151
+ torch.compile. The default value of ``min`` is 2 due to torch.compile
152
+ specializing on 0/1 sizes.
153
+
154
+ You must also verify that your implementation on concrete Tensors
155
+ (e.g. CPU/CUDA) only returns Tensors where the size that corresponds
156
+ to the symint also has respects these constraint.
157
+ The easiest way to do this is to add an assertion in the CPU/CUDA/etc
158
+ implementation that the size follows these bounds.
159
+
160
+ Example::
161
+
162
+ >>> # An operator with data-dependent output shape
163
+ >>> lib = torch.library.Library("mymodule", "FRAGMENT")
164
+ >>> lib.define("mymodule::custom_nonzero(Tensor x) -> Tensor")
165
+ >>>
166
+ >>> @torch.library.register_fake("mymodule::custom_nonzero")
167
+ >>> def _(x):
168
+ >>> # Number of nonzero-elements is data-dependent.
169
+ >>> # Since we cannot peek at the data in an fake impl,
170
+ >>> # we use the ctx object to construct a new symint that
171
+ >>> # represents the data-dependent size.
172
+ >>> ctx = torch.library.get_ctx()
173
+ >>> nnz = ctx.new_dynamic_size()
174
+ >>> shape = [nnz, x.dim()]
175
+ >>> result = x.new_empty(shape, dtype=torch.int64)
176
+ >>> return result
177
+ >>>
178
+ >>> @torch.library.impl(lib, "custom_nonzero", "CPU")
179
+ >>> def _(x):
180
+ >>> x_np = x.numpy()
181
+ >>> res = np.stack(np.nonzero(x_np), axis=1)
182
+ >>> return torch.tensor(res, device=x.device)
183
+
184
+ """
185
+ if (
186
+ self._shape_env is None
187
+ or not self._shape_env.allow_dynamic_output_shape_ops
188
+ ):
189
+ raise torch._subclasses.fake_tensor.DynamicOutputShapeException(self._op)
190
+
191
+ if isinstance(min, torch.SymInt) or isinstance(max, torch.SymInt):
192
+ raise ValueError(
193
+ f"ctx.new_dynamic_size(min={min}, max={max}): expected "
194
+ f"min and max to be statically known ints but got SymInt. "
195
+ f"This is not supported."
196
+ )
197
+
198
+ if min < 0:
199
+ raise ValueError(
200
+ f"ctx.new_dynamic_size(min={min}, ...): expected min to be "
201
+ f"greater than or equal to 0: this API can only create "
202
+ f"non-negative sizes."
203
+ )
204
+
205
+ result = self._shape_env.create_unbacked_symint()
206
+ torch.fx.experimental.symbolic_shapes._constrain_range_for_size(
207
+ result, min=min, max=max
208
+ )
209
+ return result
parrot/lib/python3.10/site-packages/torch/_library/autograd.py ADDED
@@ -0,0 +1,226 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ import dataclasses
3
+ from dataclasses import dataclass
4
+ from typing import Any, Callable, Dict, Optional, Protocol
5
+
6
+ from .. import _C, _ops, autograd, Tensor
7
+
8
+ from ..utils import _pytree
9
+ from . import utils
10
+
11
+
12
+ class InfoProtocol(Protocol):
13
+ _backward_fn: Optional[Callable]
14
+ _setup_context_fn: Optional[Callable]
15
+
16
+
17
+ @dataclasses.dataclass
18
+ class Info:
19
+ _backward_fn: Optional[Callable]
20
+ _setup_context_fn: Optional[Callable]
21
+
22
+
23
+ def make_autograd_impl(op: _ops.OpOverload, info: InfoProtocol) -> Callable:
24
+ name: str = f"GeneratedBackwardFor_{op._namespace}_{op._opname}_{op._overloadname}"
25
+
26
+ has_kwarg_only_args = utils.has_kwarg_only_args(op._schema)
27
+
28
+ @dataclass
29
+ class Metadata:
30
+ keyset: _C.DispatchKeySet
31
+ keyword_only_args: Dict[str, Any]
32
+
33
+ def forward(ctx, *args):
34
+ metadata = args[-1]
35
+ args = args[:-1]
36
+
37
+ with _C._AutoDispatchBelowAutograd():
38
+ keyset = metadata.keyset
39
+ kwargs = metadata.keyword_only_args
40
+ result = op.redispatch(keyset & _C._after_autograd_keyset, *args, **kwargs)
41
+ if info._setup_context_fn:
42
+ # The Dispatcher will remove args that are equal to their default
43
+ # values from (args, kwargs). We're going to add it back so that
44
+ # the user can access them.
45
+ #
46
+ # This is OK to do: The Dispatcher removed the args for serialization
47
+ # FC/BC reasons (that is, a graph will not store args that are equal
48
+ # to their default values), but that doesn't matter here. If the user
49
+ # adds a new default arg, then they must update
50
+ # their setup_context (along with the rest of their operator
51
+ # registrations)
52
+ args, kwargs = utils.fill_defaults(op._schema, args, kwargs)
53
+
54
+ if has_kwarg_only_args:
55
+ info._setup_context_fn(
56
+ ctx=ctx, inputs=args, keyword_only_inputs=kwargs, output=result
57
+ )
58
+ else:
59
+ info._setup_context_fn(ctx=ctx, inputs=args, output=result)
60
+ return result
61
+
62
+ def backward(ctx, *grads):
63
+ if info._backward_fn:
64
+ try:
65
+ prev_needs_input_grad = ctx.needs_input_grad
66
+ ctx.needs_input_grad = ctx.needs_input_grad[:-1]
67
+ result = info._backward_fn(ctx, *grads)
68
+ finally:
69
+ ctx.needs_input_grad = prev_needs_input_grad
70
+ if isinstance(result, tuple):
71
+ return (*result, None)
72
+ return result, None
73
+ raise RuntimeError(
74
+ f"Trying to backward through {op} but no autograd "
75
+ f"formula was registered. "
76
+ f"Please use register_autograd to add one."
77
+ )
78
+
79
+ Generated = type(
80
+ name,
81
+ (autograd.Function,),
82
+ {
83
+ "forward": staticmethod(forward),
84
+ "backward": staticmethod(backward),
85
+ },
86
+ )
87
+
88
+ schema = op._schema
89
+ if any(
90
+ utils.is_tensorlist_like_type(a.type)
91
+ for a in (*schema.arguments, *schema.returns)
92
+ ):
93
+ Generated = supports_tensorlist(Generated)
94
+
95
+ # The dispatcher passes any keyword-only-args as kwargs and the
96
+ # rest of the args (even if specified as kwargs) as args.
97
+ def autograd_impl(keyset, *args, **keyword_only_args):
98
+ result = Generated.apply(*args, Metadata(keyset, keyword_only_args)) # type: ignore[attr-defined]
99
+ return result
100
+
101
+ return autograd_impl
102
+
103
+
104
+ def supports_tensorlist(cls: Any) -> Any:
105
+ """Allows a given autograd.Function class to support List[Tensor] inputs/outputs.
106
+
107
+ Regular autograd.Function has a constraint that it only directly supports autograd for
108
+ Tensors. Applying @supports_tensorlist enables an autograd.Function to support
109
+ autograd for List[Tensor] inputs and outputs.
110
+ """
111
+ orig_forward = cls.forward
112
+ orig_backward = cls.backward
113
+ orig_apply = cls.apply
114
+
115
+ @dataclass
116
+ class Metadata:
117
+ input_spec: spec_t
118
+ output_spec: Optional[spec_t] = None
119
+ result_is_tuple: Optional[bool] = None
120
+
121
+ def new_forward(ctx, *args):
122
+ metadata = args[-1]
123
+ args = args[:-1]
124
+ if not isinstance(metadata, Metadata):
125
+ raise NotImplementedError(
126
+ "NYI: calling supports_tensorlist autograd.Function.forward directly. "
127
+ "You should probably be calling .apply instead. "
128
+ "Please file an issue if not."
129
+ )
130
+ args = unflatten(list(args), metadata.input_spec)
131
+ result = orig_forward(ctx, *args)
132
+ metadata.result_is_tuple = isinstance(result, tuple)
133
+ if not metadata.result_is_tuple:
134
+ result = (result,)
135
+ flat_result, output_spec = flatten(result, not_list_of_tensor)
136
+ metadata.output_spec = output_spec
137
+
138
+ if hasattr(ctx, "_pt_metadata"):
139
+ raise RuntimeError(
140
+ "Please don't set ctx._pt_metadata; PyTorch uses it to store info"
141
+ )
142
+ ctx._pt_metadata = metadata
143
+
144
+ return tuple(flat_result)
145
+
146
+ def new_backward(ctx, *grads):
147
+ if not hasattr(ctx, "_pt_metadata"):
148
+ raise NotImplementedError(
149
+ "NYI: calling supports_tensorlist autograd.Function.backward directly. "
150
+ "This will automatically get called by PyTorch autograd. "
151
+ "Please file an issue if you need this."
152
+ )
153
+
154
+ metadata = ctx._pt_metadata
155
+ grads = unflatten(list(grads), metadata.output_spec)
156
+
157
+ # If the user's input is ([x, y, z], w),
158
+ # then needs_input_grad is (bool, bool, bool, bool, bool).
159
+ # We need to
160
+ # 1. get rid of the additional bool (which comes from the extra
161
+ # `metadata input`)
162
+ # 2. unflatten to get the right structure.
163
+ prev_needs_input_grad = ctx.needs_input_grad
164
+ try:
165
+ ctx.needs_input_grad = unflatten(
166
+ list(ctx.needs_input_grad[:-1]), metadata.input_spec
167
+ )
168
+ grad_inputs = orig_backward(ctx, *grads)
169
+ finally:
170
+ ctx.needs_input_grad = prev_needs_input_grad
171
+
172
+ if not isinstance(grad_inputs, tuple):
173
+ grad_inputs = (grad_inputs,)
174
+ # Assume that any Nones in the backward are Tensors.
175
+ # If the forward has an arg that is [1, 2, 3], the backward should
176
+ # return None as the grad.
177
+ # If the forward has an arg that is [tensor, tensor], the backward
178
+ # may return [None, None], [grad, None], [None, grad], or [grad, grad].
179
+ flat_grad_inputs, grad_inputs_spec = flatten(
180
+ grad_inputs, not_list_of_optional_tensor
181
+ )
182
+ if grad_inputs_spec != metadata.input_spec:
183
+ raise RuntimeError(
184
+ f"Expected the return from backward to be of the same structure "
185
+ f"as the inputs. Got: {grad_inputs_spec} (return from backward), "
186
+ f"{metadata.input_spec} (inputs)"
187
+ )
188
+ return tuple(flat_grad_inputs + [None])
189
+
190
+ def new_apply(*args):
191
+ flat_args, input_spec = flatten(args, is_leaf=not_list_of_tensor)
192
+ metadata = Metadata(input_spec)
193
+ result = orig_apply(*flat_args, metadata) # type: ignore[misc]
194
+ assert metadata.output_spec is not None
195
+ result = unflatten(list(result), metadata.output_spec)
196
+ if not metadata.result_is_tuple:
197
+ assert isinstance(result, tuple)
198
+ assert len(result) == 1
199
+ return result[0]
200
+ return result
201
+
202
+ cls.forward = new_forward
203
+ cls.backward = new_backward
204
+ cls.apply = new_apply
205
+ return cls
206
+
207
+
208
+ def not_list_of_tensor(tree):
209
+ if isinstance(tree, tuple):
210
+ return False
211
+ if isinstance(tree, list):
212
+ return any(not isinstance(l, Tensor) for l in tree)
213
+ return True
214
+
215
+
216
+ def not_list_of_optional_tensor(tree):
217
+ if isinstance(tree, tuple):
218
+ return False
219
+ if isinstance(tree, list):
220
+ return any(l is not None and not isinstance(l, Tensor) for l in tree)
221
+ return True
222
+
223
+
224
+ flatten = _pytree.tree_flatten
225
+ unflatten = _pytree.tree_unflatten
226
+ spec_t = _pytree.TreeSpec
parrot/lib/python3.10/site-packages/torch/_library/custom_ops.py ADDED
@@ -0,0 +1,573 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ import inspect
3
+ import weakref
4
+ from typing import (
5
+ Any,
6
+ Callable,
7
+ Dict,
8
+ Iterable,
9
+ Iterator,
10
+ List,
11
+ Optional,
12
+ Sequence,
13
+ Tuple,
14
+ Union,
15
+ )
16
+
17
+ from torch.utils._exposed_in import exposed_in
18
+
19
+ from .. import _C, _library, _ops, autograd, library, Tensor
20
+ from . import utils
21
+
22
+
23
+ device_types_t = Optional[Union[str, Sequence[str]]]
24
+
25
+
26
+ @exposed_in("torch.library")
27
+ def custom_op(
28
+ name: str,
29
+ fn: Optional[Callable] = None,
30
+ /,
31
+ *,
32
+ mutates_args: Iterable[str],
33
+ device_types: device_types_t = None,
34
+ schema: Optional[str] = None,
35
+ ) -> Callable:
36
+ """Wraps a function into custom operator.
37
+
38
+ Reasons why you may want to create a custom op include:
39
+ - Wrapping a third-party library or custom kernel to work with PyTorch
40
+ subsystems like Autograd.
41
+ - Preventing torch.compile/export/FX tracing from peeking inside your function.
42
+
43
+ This API is used as a decorator around a function (please see examples).
44
+ The provided function must have type hints; these are needed to interface
45
+ with PyTorch's various subsystems.
46
+
47
+ Args:
48
+ name (str): A name for the custom op that looks like "{namespace}::{name}",
49
+ e.g. "mylib::my_linear". The name is used as the op's stable identifier
50
+ in PyTorch subsystems (e.g. torch.export, FX graphs).
51
+ To avoid name collisions, please use your project name as the namespace;
52
+ e.g. all custom ops in pytorch/fbgemm use "fbgemm" as the namespace.
53
+ mutates_args (Iterable[str]): The names of args that the function mutates.
54
+ This MUST be accurate, otherwise, the behavior is undefined.
55
+ device_types (None | str | Sequence[str]): The device type(s) the function
56
+ is valid for. If no device type is provided, then the function
57
+ is used as the default implementation for all device types.
58
+ Examples: "cpu", "cuda".
59
+ schema (None | str): A schema string for the operator. If None
60
+ (recommended) we'll infer a schema for the operator from its type
61
+ annotations. We recommend letting us infer a schema unless you
62
+ have a specific reason not to.
63
+ Example: "(Tensor x, int y) -> (Tensor, Tensor)".
64
+
65
+ .. note::
66
+ We recommend not passing in a ``schema`` arg and instead letting us infer
67
+ it from the type annotations. It is error-prone to write your own schema.
68
+ You may wish to provide your own schema if our interpretation of
69
+ the type annotation is not what you want.
70
+ For more info on how to write a schema string, see
71
+ `here <https://github.com/pytorch/pytorch/blob/main/aten/src/ATen/native/README.md#func>`_
72
+
73
+ Examples::
74
+ >>> import torch
75
+ >>> from torch import Tensor
76
+ >>> from torch.library import custom_op
77
+ >>> import numpy as np
78
+ >>>
79
+ >>> @custom_op("mylib::numpy_sin", mutates_args=())
80
+ >>> def numpy_sin(x: Tensor) -> Tensor:
81
+ >>> x_np = x.cpu().numpy()
82
+ >>> y_np = np.sin(x_np)
83
+ >>> return torch.from_numpy(y_np).to(device=x.device)
84
+ >>>
85
+ >>> x = torch.randn(3)
86
+ >>> y = numpy_sin(x)
87
+ >>> assert torch.allclose(y, x.sin())
88
+ >>>
89
+ >>> # Example of a custom op that only works for one device type.
90
+ >>> @custom_op("mylib::numpy_sin_cpu", mutates_args=(), device_types="cpu")
91
+ >>> def numpy_sin_cpu(x: Tensor) -> Tensor:
92
+ >>> x_np = x.numpy()
93
+ >>> y_np = np.sin(x_np)
94
+ >>> return torch.from_numpy(y_np)
95
+ >>>
96
+ >>> x = torch.randn(3)
97
+ >>> y = numpy_sin_cpu(x)
98
+ >>> assert torch.allclose(y, x.sin())
99
+ >>>
100
+ >>> # Example of a custom op that mutates an input
101
+ >>> @custom_op("mylib::numpy_sin_inplace", mutates_args={"x"}, device_types="cpu")
102
+ >>> def numpy_sin_inplace(x: Tensor) -> None:
103
+ >>> x_np = x.numpy()
104
+ >>> np.sin(x_np, out=x_np)
105
+ >>>
106
+ >>> x = torch.randn(3)
107
+ >>> expected = x.sin()
108
+ >>> numpy_sin_inplace(x)
109
+ >>> assert torch.allclose(x, expected)
110
+
111
+ """
112
+
113
+ def inner(fn):
114
+ import torch
115
+
116
+ if schema is None:
117
+ import torch._custom_op.impl
118
+
119
+ schema_str = torch._custom_op.impl.infer_schema(fn, mutates_args)
120
+ else:
121
+ schema_str = schema
122
+ namespace, opname = name.split("::")
123
+ result = CustomOpDef(namespace, opname, schema_str, fn)
124
+ if schema is not None:
125
+ # Check that schema's alias annotations match those of `mutates_args`.
126
+ expected = set()
127
+ for arg in result._opoverload._schema.arguments:
128
+ if arg.alias_info is not None and arg.alias_info.is_write:
129
+ expected.add(arg.name)
130
+ if expected != set(mutates_args):
131
+ raise ValueError(
132
+ f"Attempted to create a custom op with `mutates_args={mutates_args}` "
133
+ f"and `schema={schema}. The schema suggests that the op mutates {expected}"
134
+ f"which is different from what was provided to us in `mutates_args`. "
135
+ f"Please make these consistent."
136
+ )
137
+ result.register_kernel(device_types)(fn)
138
+ return result
139
+
140
+ if fn is None:
141
+ return inner
142
+ return inner(fn)
143
+
144
+
145
+ class CustomOpDef:
146
+ """CustomOpDef is a wrapper around a function that turns it into a custom op.
147
+
148
+ It has various methods for registering additional behavior for this
149
+ custom op.
150
+
151
+ You should not instantiate CustomOpDef directly; instead, use the
152
+ :func:`torch.library.custom_op` API.
153
+ """
154
+
155
+ def __init__(self, namespace: str, name: str, schema: str, fn: Callable) -> None:
156
+ # Fields used to interface with the PyTorch dispatcher
157
+ self._namespace = namespace
158
+ self._name = name
159
+ self._schema = schema
160
+
161
+ self._init_fn = fn
162
+
163
+ self._backend_fns: Dict[Union[str, None], Callable] = {}
164
+ self._abstract_fn: Optional[Callable] = None
165
+ self._setup_context_fn: Optional[Callable] = None
166
+ self._backward_fn: Optional[Callable] = None
167
+
168
+ self._lib = get_library_allowing_overwrite(self._namespace, self._name)
169
+ self._register_to_dispatcher()
170
+ OPDEFS[self._qualname] = self
171
+
172
+ @property
173
+ def _qualname(self) -> str:
174
+ return f"{self._namespace}::{self._name}"
175
+
176
+ def __repr__(self) -> str:
177
+ return f"<CustomOpDef({self._qualname})>"
178
+
179
+ def register_kernel(
180
+ self, device_types: device_types_t, fn: Optional[Callable] = None, /
181
+ ) -> Callable:
182
+ """Register an implementation for a device type for this operator.
183
+
184
+ Some valid device_types are: "cpu", "cuda", "xla", "mps", "ipu", "xpu".
185
+ This API may be used as a decorator.
186
+
187
+ Args:
188
+ fn (Callable): The function to register as the implementation for
189
+ the given device types.
190
+ device_types (str | Sequence[str]): The device device_types to register an impl to.
191
+
192
+ Examples::
193
+ >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_CUDA)
194
+ >>> import torch
195
+ >>> from torch import Tensor
196
+ >>> from torch.library import custom_op
197
+ >>> import numpy as np
198
+ >>>
199
+ >>> # Create a custom op that works on cpu
200
+ >>> @custom_op("mylib::numpy_sin", mutates_args=(), device_types="cpu")
201
+ >>> def numpy_sin(x: Tensor) -> Tensor:
202
+ >>> x_np = x.numpy()
203
+ >>> y_np = np.sin(x_np)
204
+ >>> return torch.from_numpy(y_np)
205
+ >>>
206
+ >>> # Add implementations for the cuda device
207
+ >>> @numpy_sin.register_kernel("cuda")
208
+ >>> def _(x):
209
+ >>> x_np = x.cpu().numpy()
210
+ >>> y_np = np.sin(x_np)
211
+ >>> return torch.from_numpy(y_np).to(device=x.device)
212
+ >>>
213
+ >>> x_cpu = torch.randn(3)
214
+ >>> x_cuda = x_cpu.cuda()
215
+ >>> assert torch.allclose(numpy_sin(x_cpu), x_cpu.sin())
216
+ >>> assert torch.allclose(numpy_sin(x_cuda), x_cuda.sin())
217
+
218
+ """
219
+
220
+ def inner(fn):
221
+ if device_types is None or isinstance(device_types, str):
222
+ dtypes: List[Union[str, None]] = [device_types]
223
+ else:
224
+ dtypes = list(device_types)
225
+ for device_type in dtypes:
226
+ if device_type not in self._backend_fns:
227
+
228
+ def backend_impl(*args, **kwargs):
229
+ # Checks the assumption that outputs cannot alias
230
+ # inputs or other outputs.
231
+ storages = {
232
+ id(tensor.untyped_storage())
233
+ for tensor in iter_tensors(args, kwargs)
234
+ }
235
+
236
+ result = self._backend_fns[device_type](*args, **kwargs)
237
+
238
+ tuple_result = result
239
+ if not isinstance(result, tuple):
240
+ tuple_result = (result,)
241
+ for tensor in iter_tensors(tuple_result, {}):
242
+ key = id(tensor.untyped_storage())
243
+ if id(tensor.untyped_storage()) in storages:
244
+ fn = self._backend_fns[device_type]
245
+ module = inspect.getmodule(fn)
246
+ raise RuntimeError(
247
+ f"Tensors returned from custom ops (1) must not "
248
+ f"be inputs to the custom op and (2) may not alias "
249
+ f"any inputs or other returns. Please clone the "
250
+ f"the offending output tensors (e.g. output.clone()) "
251
+ f"or refactor your code. "
252
+ f"Offending op: {self._name} (with implementation in {module})"
253
+ )
254
+ storages.add(key)
255
+ return result
256
+
257
+ if device_type is None:
258
+ self._lib.impl(
259
+ self._name, backend_impl, "CompositeExplicitAutograd"
260
+ )
261
+ else:
262
+ self._lib.impl(
263
+ self._name,
264
+ backend_impl,
265
+ _C._dispatch_key_for_device(device_type),
266
+ )
267
+ self._backend_fns[device_type] = fn
268
+ return fn
269
+
270
+ # See NOTE: [Supporting decorator and non-decorator usage]
271
+ if fn is None:
272
+ return inner
273
+ return inner(fn)
274
+
275
+ def register_fake(self, fn: Callable, /) -> Callable:
276
+ r"""Register a FakeTensor implementation for this custom op.
277
+
278
+ This is necessary to get the operator to work efficiently with torch.compile.
279
+
280
+ The Fake impl (sometimes also known as a meta kernel or abstract impl)
281
+ specifies the behavior of this operator on Tensors that carry no data.
282
+ Given some input Tensors with certain properties
283
+ (sizes/strides/storage_offset/device), it specifies what the properties of
284
+ the output Tensors are.
285
+
286
+ Please see :func:`torch.library.impl_abstract` for more details.
287
+
288
+ Args:
289
+ fn (Callable): The function to register as the FakeTensor
290
+ implementation.
291
+
292
+ Examples:
293
+ >>> import torch
294
+ >>> import numpy as np
295
+ >>> from torch import Tensor
296
+ >>>
297
+ >>> # Example 1: an operator without data-dependent output shape
298
+ >>> @torch.library.custom_op("mylib::linear", mutates_args=())
299
+ >>> def linear(x: Tensor, weight: Tensor, bias: Tensor) -> Tensor:
300
+ >>> return (x @ weight.t()) + bias
301
+ >>>
302
+ >>> @linear.register_fake
303
+ >>> def _(x, weight, bias):
304
+ >>> assert x.dim() == 2
305
+ >>> assert weight.dim() == 2
306
+ >>> assert bias.dim() == 1
307
+ >>> assert x.shape[1] == weight.shape[1]
308
+ >>> assert weight.shape[0] == bias.shape[0]
309
+ >>> assert x.device == weight.device
310
+ >>> return x.new_empty(x.size(0), weight.size(0))
311
+ >>>
312
+ >>> x = torch.randn(2, 2)
313
+ >>> weight = torch.randn(2, 2)
314
+ >>> bias = torch.randn(2)
315
+ >>> # xdoctest: +SKIP("Requires Python <= 3.11")
316
+ >>> out = torch.compile(linear, fullgraph=True)(x, weight, bias)
317
+ >>> # xdoctest: +SKIP("Requires Python <= 3.11")
318
+ >>> assert torch.allclose(out, torch.nn.functional.linear(x, weight, bias))
319
+ >>>
320
+ >>> # Example 2: an operator with data-dependent output shape
321
+ >>> @torch.library.custom_op("mylib::nonzero", mutates_args=())
322
+ >>> def nonzero(x: Tensor) -> Tensor:
323
+ >>> x_np = x.cpu().numpy()
324
+ >>> res = np.stack(np.nonzero(x_np), axis=1)
325
+ >>> return torch.tensor(res, device=x.device)
326
+ >>>
327
+ >>> @nonzero.register_fake
328
+ >>> def _(x):
329
+ >>> # Number of nonzero-elements is data-dependent.
330
+ >>> # Since we cannot peek at the data in an abstract impl,
331
+ >>> # we use the ctx object to construct a new symint that
332
+ >>> # represents the data-dependent size.
333
+ >>> ctx = torch.library.get_ctx()
334
+ >>> nnz = ctx.new_dynamic_size()
335
+ >>> shape = [nnz, x.dim()]
336
+ >>> result = x.new_empty(shape, dtype=torch.int64)
337
+ >>> return result
338
+ >>>
339
+ >>> x = torch.tensor([0, 1, 2, 0, 0, 1])
340
+ >>> # xdoctest: +SKIP("Requires Python <= 3.11")
341
+ >>> out = torch.compile(nonzero, fullgraph=True)(x)
342
+ >>> # xdoctest: +SKIP("Requires Python <= 3.11")
343
+ >>> assert torch.allclose(out, x.nonzero())
344
+
345
+ """
346
+ self._abstract_fn = fn
347
+ return fn
348
+
349
+ def register_autograd(
350
+ self,
351
+ backward: Callable,
352
+ /,
353
+ *,
354
+ setup_context: Optional[Callable] = None,
355
+ ) -> None:
356
+ r"""Register a backward formula for this custom op.
357
+
358
+ In order for an operator to work with autograd, you need to register
359
+ a backward formula:
360
+ 1. You must tell us how to compute gradients during the backward pass
361
+ by providing us a "backward" function.
362
+ 2. If you need any values from the forward to compute gradients, you can
363
+ use `setup_context` to save values for backward.
364
+
365
+ ``backward_fn`` runs during the backward pass. It accepts ``(ctx, *grads)``:
366
+ - ``grads`` is one or more gradients. The number of gradients matches
367
+ the number of outputs of the operator.
368
+ The ``ctx`` object is `the same ctx object <context_method_mixins>`_ used by
369
+ :class:`torch.autograd.Function`. The semantics of ``backward_fn`` are the
370
+ same as :meth:`torch.autograd.Function.backward`.
371
+
372
+ ``setup_context(ctx, inputs, output)`` runs during the forward pass.
373
+ Please save quantities needed for backward onto the ``ctx`` object via
374
+ either :meth:`torch.autograd.function.FunctionCtx.save_for_backward`
375
+ or assigning them as attributes of ``ctx``. If your custom op has
376
+ kwarg-only arguments, we expect the signature of ``setup_context``
377
+ to be ``setup_context(ctx, inputs, keyword_only_inputs, output)``.
378
+
379
+ Both ``setup_context_fn`` and ``backward_fn`` must be traceable. That is,
380
+ they may not directly access :meth:`torch.Tensor.data_ptr` and they must
381
+ not depend on or mutate global state. If you need a non-traceable backward,
382
+ you can make it a separate custom_op that you call inside ``backward_fn``.
383
+
384
+ Examples:
385
+ >>> import torch
386
+ >>> import numpy as np
387
+ >>> from torch import Tensor
388
+ >>>
389
+ >>> @torch.library.custom_op("mylib::numpy_sin", mutates_args=())
390
+ >>> def numpy_sin(x: Tensor) -> Tensor:
391
+ >>> x_np = x.cpu().numpy()
392
+ >>> y_np = np.sin(x_np)
393
+ >>> return torch.from_numpy(y_np).to(device=x.device)
394
+ >>>
395
+ >>> def setup_context(ctx, inputs, output) -> Tensor:
396
+ >>> x, = inputs
397
+ >>> ctx.save_for_backward(x)
398
+ >>>
399
+ >>> def backward(ctx, grad):
400
+ >>> x, = ctx.saved_tensors
401
+ >>> return grad * x.cos()
402
+ >>>
403
+ >>> numpy_sin.register_autograd(backward, setup_context=setup_context)
404
+ >>>
405
+ >>> x = torch.randn(3, requires_grad=True)
406
+ >>> y = numpy_sin(x)
407
+ >>> grad_x, = torch.autograd.grad(y, x, torch.ones_like(y))
408
+ >>> assert torch.allclose(grad_x, x.cos())
409
+ >>>
410
+ >>> # Example with a keyword-only arg
411
+ >>> @torch.library.custom_op("mylib::numpy_mul", mutates_args=())
412
+ >>> def numpy_mul(x: Tensor, *, val: float) -> Tensor:
413
+ >>> x_np = x.cpu().numpy()
414
+ >>> y_np = x_np * val
415
+ >>> return torch.from_numpy(y_np).to(device=x.device)
416
+ >>>
417
+ >>> def setup_context(ctx, inputs, keyword_only_inputs, output) -> Tensor:
418
+ >>> ctx.val = keyword_only_inputs["val"]
419
+ >>>
420
+ >>> def backward(ctx, grad):
421
+ >>> return grad * ctx.val
422
+ >>>
423
+ >>> numpy_mul.register_autograd(backward, setup_context=setup_context)
424
+ >>>
425
+ >>> x = torch.randn(3, requires_grad=True)
426
+ >>> y = numpy_mul(x, val=3.14)
427
+ >>> grad_x, = torch.autograd.grad(y, x, torch.ones_like(y))
428
+ >>> assert torch.allclose(grad_x, torch.full_like(x, 3.14))
429
+
430
+ """
431
+ schema = self._opoverload._schema
432
+ if not _library.utils.is_functional_schema(schema):
433
+ raise RuntimeError(
434
+ f"Cannot register autograd formula for non-functional operator "
435
+ f"{self} with schema {schema}. Please create "
436
+ f"a functional operator and register an autograd formula for that."
437
+ )
438
+
439
+ self._backward_fn = backward
440
+ self._setup_context_fn = setup_context
441
+
442
+ def _register_to_dispatcher(self) -> None:
443
+ lib = self._lib
444
+ schema_str = self._name + self._schema
445
+ cpp_schema = _C.parse_schema(schema_str)
446
+ if utils.has_kwarg_only_tensors(cpp_schema):
447
+ # If you want to support this, the progression is:
448
+ # - supporting kwarg-only Tensors that are non-differentiable
449
+ # - supporting kwarg-only Tensors (regardless of differentiability)
450
+ raise NotImplementedError(
451
+ f"custom_op with kwarg-only Tensor args. Please make your "
452
+ f"tensors not kwarg-only. Got: {schema_str}"
453
+ )
454
+
455
+ lib.define(
456
+ schema_str,
457
+ tags=[_C.Tag.pt2_compliant_tag, _C.Tag.needs_fixed_stride_order],
458
+ )
459
+ self._opoverload = _library.utils.lookup_op(self._qualname)
460
+
461
+ def fake_impl(*args, **kwargs):
462
+ if self._abstract_fn is None:
463
+ if _library.utils.can_generate_trivial_fake_impl(self._opoverload):
464
+ return None
465
+ raise RuntimeError(
466
+ f"There was no fake impl registered for {self}. "
467
+ f"This is necessary for torch.compile/export/fx tracing to work. "
468
+ f"Please use `{self._init_fn.__name__}.register_fake` to add an "
469
+ f"fake impl."
470
+ )
471
+ return self._abstract_fn(*args, **kwargs)
472
+
473
+ lib._register_fake(self._name, fake_impl, _stacklevel=4)
474
+
475
+ autograd_impl = _library.autograd.make_autograd_impl(self._opoverload, self)
476
+ lib.impl(self._name, autograd_impl, "Autograd", with_keyset=True)
477
+
478
+ schema = self._opoverload._schema
479
+ if schema.is_mutable:
480
+
481
+ def adinplaceorview_impl(keyset, *args, **kwargs):
482
+ for arg, val in _library.utils.zip_schema(schema, args, kwargs):
483
+ if not arg.alias_info:
484
+ continue
485
+ if not arg.alias_info.is_write:
486
+ continue
487
+ if isinstance(val, Tensor):
488
+ autograd.graph.increment_version(val)
489
+ elif isinstance(val, (tuple, list)):
490
+ for v in val:
491
+ if isinstance(v, Tensor):
492
+ autograd.graph.increment_version(v)
493
+ with _C._AutoDispatchBelowADInplaceOrView():
494
+ return self._opoverload.redispatch(
495
+ keyset & _C._after_ADInplaceOrView_keyset, *args, **kwargs
496
+ )
497
+
498
+ lib.impl(
499
+ self._name,
500
+ adinplaceorview_impl,
501
+ "ADInplaceOrView",
502
+ with_keyset=True,
503
+ )
504
+
505
+ def __call__(self, *args, **kwargs):
506
+ return self._opoverload(*args, **kwargs)
507
+
508
+
509
+ # NOTE: [Supporting decorator and non-decorator usage]
510
+ #
511
+ # Some APIs may be both used as a decorator and not as a decorator.
512
+ # For example:
513
+ #
514
+ # >>> def fn(x):
515
+ # >>> return x.sin()
516
+ # >>>
517
+ # >>> # Usage 1: not as a decorator
518
+ # >>> numpy_sin.register_kernel("cuda", fn)
519
+ # >>>
520
+ # >>> # Usage 2: as a decorator
521
+ # >>> @numpy_sin.register_kernel("cuda")
522
+ # >>> def fn2(x):
523
+ # >>> return x.sin
524
+ #
525
+ # The way we support this is that `register_kernel` accepts an optional `fn`.
526
+ # If `fn` is provided (Usage 1), then we know that the user is using it not
527
+ # as a decorator.
528
+ # If `fn` is not provided (Usage 2), then `register_kernel` needs to return a
529
+ # decorator.
530
+
531
+
532
+ OPDEF_TO_LIB: Dict[str, "library.Library"] = {}
533
+ OPDEFS: weakref.WeakValueDictionary = weakref.WeakValueDictionary()
534
+
535
+
536
+ def get_library_allowing_overwrite(namespace: str, name: str) -> "library.Library":
537
+ qualname = f"{namespace}::{name}"
538
+
539
+ if qualname in OPDEF_TO_LIB:
540
+ OPDEF_TO_LIB[qualname]._destroy()
541
+ del OPDEF_TO_LIB[qualname]
542
+
543
+ lib = library.Library(namespace, "FRAGMENT")
544
+ OPDEF_TO_LIB[qualname] = lib
545
+ return lib
546
+
547
+
548
+ def iter_tensors(
549
+ args: Tuple[Any], kwargs: Dict[str, Any], allowed_nesting: int = 1
550
+ ) -> Iterator[Tensor]:
551
+ def check(arg):
552
+ if isinstance(arg, Tensor):
553
+ yield arg
554
+ elif allowed_nesting > 0 and isinstance(arg, (tuple, list)):
555
+ yield from iter_tensors(tuple(arg), {}, allowed_nesting - 1)
556
+
557
+ for arg in args:
558
+ yield from check(arg)
559
+ for kwarg in kwargs.values():
560
+ yield from check(kwarg)
561
+
562
+
563
+ def _maybe_get_opdef(
564
+ op: Union[CustomOpDef, _ops.OpOverload, str]
565
+ ) -> Optional[CustomOpDef]:
566
+ if isinstance(op, CustomOpDef):
567
+ return op
568
+ if isinstance(op, _ops.OpOverload):
569
+ op = op._name
570
+ assert isinstance(op, str)
571
+ if op in OPDEFS:
572
+ return OPDEFS[op]
573
+ return None
parrot/lib/python3.10/site-packages/torch/_library/fake_class_registry.py ADDED
@@ -0,0 +1,293 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ import logging
3
+ from typing import Any, Dict, Optional, Protocol, Tuple
4
+
5
+ import torch
6
+
7
+ from torch._library.utils import parse_namespace
8
+
9
+ log = logging.getLogger(__name__)
10
+
11
+
12
+ class FakeScriptObject:
13
+ def __init__(self, wrapped_obj: Any, script_class_name: str):
14
+ self.wrapped_obj = wrapped_obj
15
+
16
+ # The fully qualified name of the class of original script object
17
+ self.script_class_name = script_class_name
18
+
19
+
20
+ class FakeScriptMethod:
21
+ def __init__(
22
+ self,
23
+ self_fake_obj: FakeScriptObject,
24
+ method_name: str,
25
+ schema: Optional[torch.FunctionSchema],
26
+ ):
27
+ self.self_fake_obj = self_fake_obj
28
+ self.method_name = method_name
29
+ self.schema = schema
30
+
31
+ def __call__(self, *args, **kwargs):
32
+ from torch._higher_order_ops.torchbind import call_torchbind
33
+
34
+ return call_torchbind(self.self_fake_obj, self.method_name, *args, **kwargs)
35
+
36
+
37
+ class HasStaticMethodFromReal(Protocol):
38
+ @classmethod
39
+ def from_real(cls, real_obj: torch.ScriptObject):
40
+ pass
41
+
42
+
43
+ class FakeClassRegistry:
44
+ def __init__(self):
45
+ self._registered_class: Dict[str, Any] = {}
46
+
47
+ def has_impl(self, full_qualname: str) -> bool:
48
+ return full_qualname in self._registered_class
49
+
50
+ def get_impl(self, full_qualname: str) -> Any:
51
+ self._check_registered(full_qualname)
52
+ return self._registered_class[full_qualname]
53
+
54
+ def register(self, full_qualname: str, fake_class=None) -> None:
55
+ if self.has_impl(full_qualname):
56
+ log.warning(
57
+ "%s is already registered. Previous fake class is overrided with %s.",
58
+ full_qualname,
59
+ fake_class,
60
+ )
61
+ self._registered_class[full_qualname] = fake_class
62
+
63
+ def deregister(self, full_qualname: str) -> Any:
64
+ if not self.has_impl(full_qualname):
65
+ log.warning(
66
+ "Cannot deregister %s. Please use register_fake_class to register it first."
67
+ " Or do you dereigster it twice?",
68
+ full_qualname,
69
+ )
70
+ else:
71
+ return self._registered_class.pop(full_qualname)
72
+
73
+ def clear(self) -> None:
74
+ self._registered_class.clear()
75
+
76
+ def _check_registered(self, full_qualname: str) -> None:
77
+ if full_qualname not in self._registered_class:
78
+ raise RuntimeError(
79
+ f"{full_qualname} is not registered. Please use register_fake_class to register it first."
80
+ )
81
+
82
+
83
+ global_fake_class_registry = FakeClassRegistry()
84
+
85
+
86
+ # TODO: add this check at compile time for __obj_flatten__.
87
+ def _check_valid_flat_script_obj(flat_x):
88
+ if not isinstance(flat_x, tuple):
89
+ raise RuntimeError("Expect flat x to be a tuple.")
90
+
91
+ for tp in flat_x:
92
+ if not isinstance(tp, tuple):
93
+ raise RuntimeError("Expect flat x to be a tuple of tuples.")
94
+
95
+ if not len(tp) == 2 or not isinstance(tp[0], str):
96
+ raise RuntimeError(
97
+ "Expect element of flat x to be a tuple of two elements with first element being a string"
98
+ )
99
+
100
+
101
+ def to_fake_obj(fake_mode, x: torch.ScriptObject) -> FakeScriptObject:
102
+ import torch.utils._pytree as pytree
103
+
104
+ flat_x = x.__obj_flatten__() # type: ignore[attr-defined]
105
+
106
+ _check_valid_flat_script_obj(flat_x)
107
+
108
+ fake_flattened = pytree.tree_map_only(
109
+ torch.Tensor,
110
+ lambda t: fake_mode.from_tensor(t),
111
+ flat_x,
112
+ )
113
+
114
+ fake_x = _find_fake_class_for_script_object(x).__obj_unflatten__(fake_flattened)
115
+
116
+ fake_x_wrapped = FakeScriptObject(fake_x, x._type().qualified_name()) # type: ignore[attr-defined]
117
+
118
+ for name in x._method_names(): # type: ignore[attr-defined]
119
+ attr = getattr(fake_x, name, None)
120
+ if attr:
121
+ if not callable(attr):
122
+ raise RuntimeError(f"Expect {name} to be a callable but got {attr}.")
123
+
124
+ real_attr = getattr(x, name) # type: ignore[attr-defined]
125
+
126
+ # real attr sometimes is not torch.ScriptMethod thus doesn't have schema e.g. __init___ or __eq__
127
+ method_schema: Optional[torch.FunctionSchema] = None
128
+ if isinstance(real_attr, torch.ScriptMethod):
129
+ method_schema = real_attr.schema # type: ignore[attr-defined]
130
+
131
+ setattr(
132
+ fake_x_wrapped,
133
+ name,
134
+ FakeScriptMethod(fake_x_wrapped, name, method_schema),
135
+ )
136
+ else:
137
+ log.warning("fake object of %s doesn't implement method %s.", x, name)
138
+ return fake_x_wrapped
139
+
140
+
141
+ def register_fake_class(qualname, fake_class: Optional[HasStaticMethodFromReal] = None):
142
+ r"""Register a fake implementation for this class.
143
+
144
+ It's in the same spirit of registering a fake implementation for
145
+ an operator but with the difference that it
146
+ associates a fake class with the original torch bind class (registered
147
+ with torch::class_). In this way, torch.compile can handle them properly
148
+ in components such as Dynamo and AOTAutograd.
149
+
150
+ This API may be used as a decorator (see example). For the fake class, users
151
+ are required to provide a from_real classmethod that takes a real object and
152
+ returns an instance of the fake class. All tensors in the fake object should also
153
+ be properly fakified with to_fake_tensor() in from_real.
154
+
155
+
156
+ Examples:
157
+ # For a custom class Foo defined in test_custom_class_registration.cpp:
158
+
159
+ TORCH_LIBRARY(_TorchScriptTesting, m) {
160
+ m.class_<TensorQueue>("_TensorQueue")
161
+ .def(torch::init<at::Tensor>())
162
+ .def("push", &TensorQueue::push)
163
+ .def("pop", &TensorQueue::pop)
164
+ .def("top", &TensorQueue::top)
165
+ .def("size", &TensorQueue::size)
166
+ .def("clone_queue", &TensorQueue::clone_queue)
167
+ .def("__obj_flatten__", &TensorQueue::__obj_flatten__)
168
+ .def_pickle(
169
+ // __getstate__
170
+ [](const c10::intrusive_ptr<TensorQueue>& self)
171
+ -> c10::Dict<std::string, at::Tensor> {
172
+ return self->serialize();
173
+ },
174
+ // __setstate__
175
+ [](c10::Dict<std::string, at::Tensor> data)
176
+ -> c10::intrusive_ptr<TensorQueue> {
177
+ return c10::make_intrusive<TensorQueue>(std::move(data));
178
+ });
179
+ };
180
+ # We could register a fake class FakeTensorQueue in Python as follows:
181
+ import torch
182
+
183
+ @torch._library.register_fake_class("_TorchScriptTesting::_TensorQueue")
184
+ class FakeTensorQueue:
185
+ def __init__(self, queue):
186
+ self.queue = queue
187
+
188
+ @classmethod
189
+ def __obj_unflatten__(cls, flattened_ctx):
190
+ return cls(**dict(ctx))
191
+
192
+ def push(self, x):
193
+ self.queue.append(x)
194
+
195
+ def pop(self):
196
+ return self.queue.pop(0)
197
+
198
+ def size(self):
199
+ return len(self.queue)
200
+
201
+ In this example, the original TensorQeue need to addd a __obj_flatten__ method
202
+ to the class TensorQueue and the flattend result is passed into FakeTensorQueue's
203
+ __obj_unflatten__ as inputs to create a fake class. This protocol allows pytorch to look
204
+ at the contents of the script object and properly handle them in the subsystems
205
+ like dynamo, aot_aotugrad or more.
206
+ """
207
+
208
+ def inner(fake_class: HasStaticMethodFromReal):
209
+ ns, name = parse_namespace(qualname)
210
+
211
+ # This also checks whether the refered torch::class_ exists.
212
+ torchbind_class = torch._C._get_custom_class_python_wrapper(ns, name)
213
+
214
+ from_method = getattr(fake_class, _CONVERT_FROM_REAL_NAME, None)
215
+ if not from_method:
216
+ raise RuntimeError(
217
+ f"{fake_class} doesn't define a classmethod {_CONVERT_FROM_REAL_NAME}."
218
+ )
219
+
220
+ if not isinstance(fake_class.__dict__[_CONVERT_FROM_REAL_NAME], classmethod):
221
+ raise RuntimeError(
222
+ f"{_CONVERT_FROM_REAL_NAME} method is not a classmethod."
223
+ )
224
+
225
+ global_fake_class_registry.register(_full_qual_class_name(qualname), fake_class)
226
+ return fake_class
227
+
228
+ if fake_class is None:
229
+ return inner
230
+ return inner(fake_class)
231
+
232
+
233
+ def deregister_fake_class(qualname):
234
+ return global_fake_class_registry.deregister(_full_qual_class_name(qualname))
235
+
236
+
237
+ def has_fake_class(full_qualname) -> bool:
238
+ return global_fake_class_registry.has_impl(full_qualname)
239
+
240
+
241
+ def find_fake_class(full_qualname) -> Optional[Any]:
242
+ if not has_fake_class(full_qualname):
243
+ return None
244
+ return global_fake_class_registry.get_impl(full_qualname)
245
+
246
+
247
+ def _full_qual_class_name(qualname: str) -> str:
248
+ ns, name = parse_namespace(qualname)
249
+ return "__torch__.torch.classes." + ns + "." + name
250
+
251
+
252
+ # Return the namespace and class name from fully qualified name.
253
+ def _ns_and_class_name(full_qualname: str) -> Tuple[str, str]:
254
+ splits = full_qualname.split(".")
255
+ assert len(splits) == 5
256
+ _torch, torch_ns, classes, ns, class_name = splits
257
+ return ns, class_name
258
+
259
+
260
+ def _find_fake_class_for_script_object(x: torch.ScriptObject) -> Any:
261
+ full_qualname = x._type().qualified_name() # type: ignore[attr-defined]
262
+ ns, class_name = _ns_and_class_name(full_qualname)
263
+ fake_class = find_fake_class(full_qualname)
264
+ if fake_class is None:
265
+ raise RuntimeError(
266
+ f" ScriptObject's {full_qualname} haven't registered a fake class."
267
+ f" Please use register_fake_class({ns}::{class_name}) to annotate a fake class for the script obj."
268
+ f" Specifically, create a python class that implements a fake version for all the methods"
269
+ f" that're used in the program and put annotated class in the program e.g. after loading the library."
270
+ f" The fake methods can be written in the same way as a meta kernel for an operator but need to additionally"
271
+ f" simulate the object's states. Be sure to add a {_CONVERT_FROM_REAL_NAME} classmethod"
272
+ f" to enable creating a fake obj from a real one."
273
+ )
274
+ return fake_class
275
+
276
+
277
+ _CONVERT_FROM_REAL_NAME = "__obj_unflatten__"
278
+
279
+
280
+ def _fake_obj_from_real(fake_mode, x) -> Any:
281
+ fake_class = _find_fake_class_for_script_object(x)
282
+
283
+ from_real_method = getattr(fake_class, _CONVERT_FROM_REAL_NAME, None)
284
+ if not from_real_method:
285
+ raise RuntimeError(
286
+ f"{fake_class} must define a classmethod {_CONVERT_FROM_REAL_NAME}"
287
+ f" that converts the real object to the fake object."
288
+ )
289
+
290
+ # from_real defined by user need the ctx to fakify the tensor states.
291
+ ctx = torch._library.abstract_impl.AbstractImplCtx(fake_mode, None)
292
+ with torch._library.abstract_impl.set_ctx_getter(lambda: ctx):
293
+ return fake_class.from_real(x)
parrot/lib/python3.10/site-packages/torch/_library/infer_schema.py ADDED
@@ -0,0 +1,164 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ import inspect
3
+ import typing
4
+
5
+ from .. import device, dtype, Tensor, types
6
+
7
+
8
+ def infer_schema(prototype_function: typing.Callable, mutates_args=()) -> str:
9
+ """Given a function with type hints, parses a schema.
10
+
11
+ We make some assumptions to make our lives easier that correspond to how people
12
+ write custom ops in real life:
13
+ - none of the outputs alias any of the inputs or each other.
14
+ - only the args listed in mutates_args are being mutated.
15
+
16
+ Callers (e.g. the custom ops API) are responsible for checking these assumptions.
17
+ """
18
+ sig = inspect.signature(prototype_function)
19
+
20
+ def error_fn(what):
21
+ raise ValueError(
22
+ f"infer_schema(func): {what} " f"Got func with signature {sig})"
23
+ )
24
+
25
+ params = []
26
+ seen_args = set()
27
+ saw_kwarg_only_arg = False
28
+ for idx, (name, param) in enumerate(sig.parameters.items()):
29
+ if not supported_param(param):
30
+ error_fn("We do not support positional-only args, varargs, or varkwargs.")
31
+
32
+ if param.kind == inspect.Parameter.KEYWORD_ONLY:
33
+ # The first time we see a kwarg-only arg, add "*" to the schema.
34
+ if not saw_kwarg_only_arg:
35
+ params.append("*")
36
+ saw_kwarg_only_arg = True
37
+
38
+ if param.annotation is inspect.Parameter.empty:
39
+ error_fn(f"Parameter {name} must have a type annotation.")
40
+
41
+ if param.annotation not in SUPPORTED_PARAM_TYPES.keys():
42
+ error_fn(
43
+ f"Parameter {name} has unsupported type {param.annotation}. "
44
+ f"The valid types are: {SUPPORTED_PARAM_TYPES.keys()}."
45
+ )
46
+
47
+ schema_type = SUPPORTED_PARAM_TYPES[param.annotation]
48
+ if name in mutates_args:
49
+ if not schema_type.startswith("Tensor"):
50
+ error_fn(
51
+ f"Parameter {name} is in mutable_args but only Tensors or collections of Tensors can be mutated"
52
+ )
53
+ schema_type = f"Tensor(a{idx}!){schema_type[len('Tensor'):]}"
54
+ seen_args.add(name)
55
+ if param.default is inspect.Parameter.empty:
56
+ params.append(f"{schema_type} {name}")
57
+ else:
58
+ if param.default is not None and not isinstance(
59
+ param.default, (int, float, bool)
60
+ ):
61
+ error_fn(
62
+ f"Parameter {name} has an unsupported default value (we only support "
63
+ f"int, float, bool, None). Please file an issue on GitHub so we can "
64
+ f"prioritize this."
65
+ )
66
+ params.append(f"{schema_type} {name}={param.default}")
67
+ mutates_args_not_seen = set(mutates_args) - seen_args
68
+ if len(mutates_args_not_seen) > 0:
69
+ error_fn(
70
+ f"{mutates_args_not_seen} in mutates_args were not found in "
71
+ f"the custom op's signature. "
72
+ f"mutates_args should contain the names of all args that the "
73
+ f"custom op mutates."
74
+ )
75
+ ret = parse_return(sig.return_annotation, error_fn)
76
+ return f"({', '.join(params)}) -> {ret}"
77
+
78
+
79
+ def derived_types(
80
+ base_type, cpp_type, list_base, optional_base_list, optional_list_base
81
+ ):
82
+ result = [
83
+ (base_type, cpp_type),
84
+ (typing.Optional[base_type], f"{cpp_type}?"),
85
+ ]
86
+
87
+ def derived_seq_types(typ):
88
+ return [
89
+ typing.Sequence[typ], # type: ignore[valid-type]
90
+ typing.List[typ], # type: ignore[valid-type]
91
+ ]
92
+
93
+ if list_base:
94
+ for seq_typ in derived_seq_types(base_type):
95
+ result.append((seq_typ, f"{cpp_type}[]")) # type: ignore[valid-type]
96
+ if optional_base_list:
97
+ for seq_typ in derived_seq_types(typing.Optional[base_type]):
98
+ result.append((seq_typ, f"{cpp_type}?[]")) # type: ignore[valid-type]
99
+ if optional_list_base:
100
+ for seq_typ in derived_seq_types(base_type): # type: ignore[valid-type]
101
+ result.append((typing.Optional[seq_typ], f"{cpp_type}[]?")) # type: ignore[valid-type]
102
+ return result
103
+
104
+
105
+ def get_supported_param_types():
106
+ data = [
107
+ # (python type, schema type, type[] variant, type?[] variant, type[]? variant
108
+ (Tensor, "Tensor", True, True, False),
109
+ (int, "SymInt", True, False, True),
110
+ (float, "float", True, False, True),
111
+ (bool, "bool", True, False, True),
112
+ (str, "str", False, False, False),
113
+ (types.Number, "Scalar", True, False, False),
114
+ (dtype, "ScalarType", False, False, False),
115
+ (device, "Device", False, False, False),
116
+ ]
117
+ result = []
118
+ for line in data:
119
+ result.extend(derived_types(*line))
120
+ return dict(result)
121
+
122
+
123
+ SUPPORTED_RETURN_TYPES = {
124
+ Tensor: "Tensor",
125
+ typing.List[Tensor]: "Tensor[]",
126
+ int: "SymInt",
127
+ float: "float",
128
+ bool: "bool",
129
+ types.Number: "Scalar",
130
+ }
131
+
132
+
133
+ def parse_return(annotation, error_fn):
134
+ if annotation is None:
135
+ return "()"
136
+
137
+ origin = typing.get_origin(annotation)
138
+ if origin is not tuple:
139
+ if annotation not in SUPPORTED_RETURN_TYPES.keys():
140
+ error_fn(
141
+ f"Return has unsupported type {annotation}. "
142
+ f"The valid types are: {SUPPORTED_RETURN_TYPES}."
143
+ )
144
+ return SUPPORTED_RETURN_TYPES[annotation]
145
+
146
+ args = typing.get_args(annotation)
147
+ for arg in args:
148
+ if arg not in SUPPORTED_RETURN_TYPES:
149
+ error_fn(
150
+ f"Return has unsupported type {annotation}. "
151
+ f"The valid types are: {SUPPORTED_RETURN_TYPES}."
152
+ )
153
+
154
+ return "(" + ", ".join([SUPPORTED_RETURN_TYPES[arg] for arg in args]) + ")"
155
+
156
+
157
+ SUPPORTED_PARAM_TYPES = get_supported_param_types()
158
+
159
+
160
+ def supported_param(param: inspect.Parameter) -> bool:
161
+ return param.kind in (
162
+ inspect.Parameter.POSITIONAL_OR_KEYWORD,
163
+ inspect.Parameter.KEYWORD_ONLY,
164
+ )
parrot/lib/python3.10/site-packages/torch/_library/simple_registry.py ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ from .abstract_impl import AbstractImplHolder
3
+
4
+ __all__ = ["SimpleLibraryRegistry", "SimpleOperatorEntry", "singleton"]
5
+
6
+
7
+ class SimpleLibraryRegistry:
8
+ """Registry for the "simple" torch.library APIs
9
+
10
+ The "simple" torch.library APIs are a higher-level API on top of the
11
+ raw PyTorch DispatchKey registration APIs that includes:
12
+ - fake impl
13
+
14
+ Registrations for these APIs do not go into the PyTorch dispatcher's
15
+ table because they may not directly involve a DispatchKey. For example,
16
+ the fake impl is a Python function that gets invoked by FakeTensor.
17
+ Instead, we manage them here.
18
+
19
+ SimpleLibraryRegistry is a mapping from a fully qualified operator name
20
+ (including the overload) to SimpleOperatorEntry.
21
+ """
22
+
23
+ def __init__(self):
24
+ self._data = {}
25
+
26
+ def find(self, qualname: str) -> "SimpleOperatorEntry":
27
+ if qualname not in self._data:
28
+ self._data[qualname] = SimpleOperatorEntry(qualname)
29
+ return self._data[qualname]
30
+
31
+
32
+ singleton: SimpleLibraryRegistry = SimpleLibraryRegistry()
33
+
34
+
35
+ class SimpleOperatorEntry:
36
+ """This is 1:1 to an operator overload.
37
+
38
+ The fields of SimpleOperatorEntry are Holders where kernels can be
39
+ registered to.
40
+ """
41
+
42
+ def __init__(self, qualname: str):
43
+ self.qualname: str = qualname
44
+ self.abstract_impl: AbstractImplHolder = AbstractImplHolder(qualname)
parrot/lib/python3.10/site-packages/torch/_library/utils.py ADDED
@@ -0,0 +1,258 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ import dataclasses
3
+ import inspect
4
+ import sys
5
+ from typing import Any, Callable, Dict, Iterable, Tuple
6
+
7
+ import torch
8
+ import torch._utils_internal as _utils_internal
9
+ from torch import _C
10
+
11
+
12
+ @dataclasses.dataclass
13
+ class Kernel:
14
+ """Models a (function, source location)"""
15
+
16
+ func: Callable
17
+ source: str
18
+
19
+ def __call__(self, *args, **kwargs):
20
+ return self.func(*args, **kwargs)
21
+
22
+
23
+ class RegistrationHandle:
24
+ """Does something when someone calls .destroy() on it"""
25
+
26
+ def __init__(self, on_destroy: Callable):
27
+ self._on_destroy = on_destroy
28
+
29
+ def destroy(self) -> None:
30
+ self._on_destroy()
31
+
32
+
33
+ def get_source(stacklevel: int) -> str:
34
+ """Get a string that represents the caller.
35
+
36
+ Example: "/path/to/foo.py:42"
37
+
38
+ Use stacklevel=1 to get the caller's source
39
+ Use stacklevel=2 to get the caller's caller's source
40
+ etc.
41
+ """
42
+ frame = inspect.getframeinfo(sys._getframe(stacklevel))
43
+ source = f"{frame.filename}:{frame.lineno}"
44
+ return source
45
+
46
+
47
+ def parse_namespace(qualname: str) -> Tuple[str, str]:
48
+ splits = qualname.split("::")
49
+ if len(splits) != 2:
50
+ raise ValueError(
51
+ f"Expected `qualname` to be of the form "
52
+ f'"namespace::name", but got {qualname}. '
53
+ f"The qualname passed to the torch.library APIs must consist "
54
+ f"of a namespace and a name, e.g. aten::sin"
55
+ )
56
+ return splits[0], splits[1]
57
+
58
+
59
+ def lookup_op(qualname: str) -> torch._ops.OpOverload:
60
+ namespace, name = parse_namespace(qualname)
61
+ if "." in name:
62
+ name, overload = name.split(".")
63
+ else:
64
+ overload = "default"
65
+ ns = getattr(torch.ops, namespace)
66
+ packet = getattr(ns, name)
67
+ return getattr(packet, overload)
68
+
69
+
70
+ def is_builtin(op: torch._ops.OpOverload) -> bool:
71
+ assert isinstance(op, torch._ops.OpOverload)
72
+ return op.namespace in {"aten", "prim", "prims"}
73
+
74
+
75
+ def is_functional_schema(schema: Any) -> bool:
76
+ """Check if the schema is functional.
77
+
78
+ An operator is functional if:
79
+ - it does not mutate any of its inputs
80
+ - it does not return a view on any of its inputs
81
+ - it has at least one return
82
+ """
83
+
84
+ def is_functional(schema):
85
+ if schema.is_mutable:
86
+ return False
87
+ rets = schema.returns
88
+ is_non_mutating_view = len(rets) > 0 and any(
89
+ r.alias_info is not None and not r.alias_info.is_write for r in rets
90
+ )
91
+ if is_non_mutating_view:
92
+ return False
93
+ if not schema.returns:
94
+ return False
95
+ return True
96
+
97
+ if isinstance(schema, torch._C.FunctionSchema):
98
+ return is_functional(schema)
99
+
100
+ # Lazy import because not all PyTorch builds have torchgen
101
+ from torchgen.model import FunctionSchema
102
+
103
+ if isinstance(schema, str):
104
+ schema = FunctionSchema.parse(schema)
105
+ assert isinstance(schema, FunctionSchema)
106
+ return is_functional(schema)
107
+
108
+
109
+ # should be torch._C.JitType but that annotation is busted
110
+ def is_tensorlist_like_type(typ: Any) -> bool:
111
+ return (
112
+ typ == _C.ListType(_C.TensorType.get())
113
+ or typ == _C.ListType(_C.OptionalType(_C.TensorType.get()))
114
+ or typ == _C.OptionalType(_C.ListType(_C.TensorType.get()))
115
+ or typ == _C.OptionalType(_C.ListType(_C.OptionalType(_C.TensorType.get())))
116
+ )
117
+
118
+
119
+ # should be torch._C.JitType but that annotation is busted
120
+ def is_tensor_like_type(typ: Any) -> bool:
121
+ return typ == _C.TensorType.get() or typ == _C.OptionalType(_C.TensorType.get())
122
+
123
+
124
+ def mutates_and_returns_first_arg(op: torch._ops.OpOverload):
125
+ """Check if an op is an inplace aten op, i.e. it mutates and returns the first arg.
126
+
127
+ TODO: torchgen/model.py's FunctionSchema.parse is the source of truth for this,
128
+ but not all PyTorch builds have torchgen (due to the yaml dependency being weird).
129
+ Figure this out.
130
+
131
+ Example: add_(Tensor(a!) x, Tensor y) -> Tensor(a)
132
+ """
133
+ if op.namespace != "aten":
134
+ return False
135
+ schema = op._schema
136
+ if not len(schema.returns) == 1:
137
+ return False
138
+ if schema.returns[0].alias_info is None:
139
+ return False
140
+ alias_set = schema.returns[0].alias_info.after_set
141
+ if len(alias_set) != 1:
142
+ return False
143
+ loc = next(iter(alias_set))
144
+ if len(schema.arguments) < 1:
145
+ return False
146
+ first_arg = schema.arguments[0]
147
+ if first_arg.alias_info is None:
148
+ return False
149
+ if not first_arg.alias_info.is_write:
150
+ return False
151
+ alias_set = first_arg.alias_info.after_set
152
+ if len(alias_set) != 1:
153
+ return False
154
+ if loc != next(iter(alias_set)):
155
+ return False
156
+ for arg in schema.arguments[1:]:
157
+ if arg.alias_info is not None:
158
+ return False
159
+ return True
160
+
161
+
162
+ def fill_defaults(schema, args, kwargs):
163
+ new_args = []
164
+ new_kwargs = {}
165
+ for i in range(len(schema.arguments)):
166
+ info = schema.arguments[i]
167
+ if info.kwarg_only:
168
+ if info.name in kwargs:
169
+ new_kwargs[info.name] = kwargs[info.name]
170
+ else:
171
+ new_kwargs[info.name] = info.default_value
172
+ else:
173
+ if i < len(args):
174
+ new_args.append(args[i])
175
+ else:
176
+ new_args.append(info.default_value)
177
+ return tuple(new_args), new_kwargs
178
+
179
+
180
+ def zip_schema(
181
+ schema: _C.FunctionSchema, args: Tuple[Any, ...], kwargs: Dict[str, Any]
182
+ ) -> Iterable[Tuple[_C.Argument, Any]]:
183
+ """zips schema.arguments and (args, kwargs) together.
184
+
185
+ Assumes that (args, kwargs) were the inputs to some torch._ops.OpOverload:
186
+ that is, kwargs must be keyword-only arguments and default values may be omitted.
187
+ """
188
+ assert len(schema.arguments) >= len(args) + len(kwargs)
189
+ for i in range(len(schema.arguments)):
190
+ info = schema.arguments[i]
191
+ if info.kwarg_only:
192
+ if info.name in kwargs:
193
+ yield info, kwargs[info.name]
194
+ continue
195
+ if i >= len(args):
196
+ # args that are equal to their default values are not populated
197
+ # if they are followed by args that are equal to their defaults.
198
+ # Skip these.
199
+ continue
200
+ yield info, args[i]
201
+ return
202
+
203
+
204
+ def can_generate_trivial_fake_impl(op: torch._ops.OpOverload) -> bool:
205
+ assert isinstance(op, torch._ops.OpOverload)
206
+ if is_builtin(op):
207
+ # We control the built-ins. These may (in rare cases)
208
+ # do input metadata mutation (which we have banned on custom ops)
209
+ return False
210
+ schema = op._schema
211
+ # It's suspicious if the op is not mutable but returns nothing, so we return False out of an abundance of caution
212
+ if not schema.is_mutable:
213
+ return False
214
+ if len(schema.returns) > 0:
215
+ return False
216
+ # If the op returns nothing, then it has a trivial fake impl.
217
+ return True
218
+
219
+
220
+ def requires_set_python_module() -> bool:
221
+ """If an op was defined in C++ and extended from Python using the
222
+ torch.library APIs, returns if we require that there have been a
223
+ m.set_python_module("mylib.ops") call from C++ that associates
224
+ the C++ op with a python module.
225
+ """
226
+ return getattr(_utils_internal, "REQUIRES_SET_PYTHON_MODULE", True)
227
+
228
+
229
+ def handle_dispatch_mode(curr_mode, op_overload, *args, **kwargs):
230
+ assert isinstance(curr_mode, torch.utils._python_dispatch.TorchDispatchMode)
231
+ overload_types = []
232
+ args_flattened, _ = torch.utils._pytree.tree_flatten((args, kwargs.values()))
233
+ for a in args_flattened:
234
+ # TODO: need to double check the semantics of the "types" argument to torch_dispatch.
235
+ # It's generated in PyInterpreter.cpp, but seems to be generated in two places,
236
+ # where in one case we only include tensors with the python key, and in another
237
+ # we include **all** tensors.
238
+ if isinstance(a, torch.Tensor) and torch._C._dispatch_keys(a).has(
239
+ torch._C.DispatchKey.Python
240
+ ):
241
+ overload_types.append(type(a))
242
+ # TODO: check that I got these args correct (in C++, we pass in "0000"??)
243
+
244
+ return curr_mode.__torch_dispatch__(op_overload, overload_types, args, kwargs)
245
+
246
+
247
+ def has_kwarg_only_args(schema: _C.FunctionSchema):
248
+ return any(a.kwarg_only for a in schema.arguments)
249
+
250
+
251
+ def has_kwarg_only_tensors(schema: _C.FunctionSchema):
252
+ for a in schema.arguments:
253
+ if not (is_tensor_like_type(a.type) or is_tensorlist_like_type(a.type)):
254
+ continue
255
+ if not a.kwarg_only:
256
+ continue
257
+ return True
258
+ return False
parrot/lib/python3.10/site-packages/torch/ao/__init__.py ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ # torch.ao is a package with a lot of interdependencies.
3
+ # We will use lazy import to avoid cyclic dependencies here.
4
+
5
+
6
+ __all__ = [
7
+ "nn",
8
+ "ns",
9
+ "quantization",
10
+ "pruning",
11
+ ]
12
+
13
+ def __getattr__(name):
14
+ if name in __all__:
15
+ import importlib
16
+ return importlib.import_module("." + name, __name__)
17
+ raise AttributeError(f"module {__name__!r} has no attribute {name!r}")