ZTWHHH commited on
Commit
a805f9f
·
verified ·
1 Parent(s): fcaa79d

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +1 -0
  2. llava_next/lib/python3.10/site-packages/torch/_custom_op/__init__.py +0 -0
  3. llava_next/lib/python3.10/site-packages/torch/_custom_op/__pycache__/__init__.cpython-310.pyc +0 -0
  4. llava_next/lib/python3.10/site-packages/torch/_custom_op/__pycache__/autograd.cpython-310.pyc +0 -0
  5. llava_next/lib/python3.10/site-packages/torch/_custom_op/__pycache__/functional.cpython-310.pyc +0 -0
  6. llava_next/lib/python3.10/site-packages/torch/_custom_op/__pycache__/impl.cpython-310.pyc +0 -0
  7. llava_next/lib/python3.10/site-packages/torch/_custom_op/autograd.py +273 -0
  8. llava_next/lib/python3.10/site-packages/torch/_custom_op/functional.py +173 -0
  9. llava_next/lib/python3.10/site-packages/torch/_custom_op/impl.py +1096 -0
  10. llava_next/lib/python3.10/site-packages/torch/_inductor/__pycache__/lowering.cpython-310.pyc +3 -0
  11. llava_next/lib/python3.10/site-packages/torch/_numpy/__pycache__/__init__.cpython-310.pyc +0 -0
  12. llava_next/lib/python3.10/site-packages/torch/_numpy/__pycache__/_casting_dicts.cpython-310.pyc +0 -0
  13. llava_next/lib/python3.10/site-packages/torch/_numpy/__pycache__/_funcs.cpython-310.pyc +0 -0
  14. llava_next/lib/python3.10/site-packages/torch/_numpy/__pycache__/_funcs_impl.cpython-310.pyc +0 -0
  15. llava_next/lib/python3.10/site-packages/torch/_numpy/__pycache__/_getlimits.cpython-310.pyc +0 -0
  16. llava_next/lib/python3.10/site-packages/torch/_numpy/__pycache__/_reductions_impl.cpython-310.pyc +0 -0
  17. llava_next/lib/python3.10/site-packages/torch/_numpy/__pycache__/_unary_ufuncs_impl.cpython-310.pyc +0 -0
  18. llava_next/lib/python3.10/site-packages/torch/_numpy/_binary_ufuncs_impl.py +84 -0
  19. llava_next/lib/python3.10/site-packages/torch/_numpy/_dtypes.py +422 -0
  20. llava_next/lib/python3.10/site-packages/torch/_numpy/_funcs.py +73 -0
  21. llava_next/lib/python3.10/site-packages/torch/_numpy/_funcs_impl.py +2056 -0
  22. llava_next/lib/python3.10/site-packages/torch/_numpy/_getlimits.py +13 -0
  23. llava_next/lib/python3.10/site-packages/torch/_numpy/_normalizations.py +243 -0
  24. llava_next/lib/python3.10/site-packages/torch/_numpy/_reductions_impl.py +437 -0
  25. llava_next/lib/python3.10/site-packages/torch/_numpy/_unary_ufuncs_impl.py +71 -0
  26. llava_next/lib/python3.10/site-packages/torch/_numpy/fft.py +128 -0
  27. llava_next/lib/python3.10/site-packages/torch/_numpy/linalg.py +237 -0
  28. llava_next/lib/python3.10/site-packages/torch/_numpy/testing/__pycache__/utils.cpython-310.pyc +0 -0
  29. llava_next/lib/python3.10/site-packages/torch/_numpy/testing/utils.py +2381 -0
  30. llava_next/lib/python3.10/site-packages/torch/ao/__init__.py +16 -0
  31. llava_next/lib/python3.10/site-packages/torch/func/__pycache__/__init__.cpython-310.pyc +0 -0
  32. llava_next/lib/python3.10/site-packages/torch/mps/__init__.py +130 -0
  33. llava_next/lib/python3.10/site-packages/torch/mps/__pycache__/__init__.cpython-310.pyc +0 -0
  34. llava_next/lib/python3.10/site-packages/torch/mps/__pycache__/event.cpython-310.pyc +0 -0
  35. llava_next/lib/python3.10/site-packages/torch/mps/__pycache__/profiler.cpython-310.pyc +0 -0
  36. llava_next/lib/python3.10/site-packages/torch/mps/event.py +45 -0
  37. llava_next/lib/python3.10/site-packages/torch/mps/profiler.py +59 -0
  38. parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_layer_norm/__init__.py +8 -0
  39. parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_layer_norm/__pycache__/__init__.cpython-310.pyc +0 -0
  40. parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_layer_norm/__pycache__/cuda_fp_ln_base.cpython-310.pyc +0 -0
  41. parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_layer_norm/__pycache__/cuda_ln.cpython-310.pyc +0 -0
  42. parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_layer_norm/__pycache__/cuda_post_ln.cpython-310.pyc +0 -0
  43. parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_layer_norm/__pycache__/cuda_pre_ln.cpython-310.pyc +0 -0
  44. parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_layer_norm/cuda_fp_ln_base.py +37 -0
  45. parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_layer_norm/cuda_ln.py +30 -0
  46. parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_layer_norm/cuda_pre_ln.py +39 -0
  47. parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_layer_norm/layer_norm.cpp +102 -0
  48. parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_layer_norm/layer_norm.h +67 -0
  49. parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_layer_norm/layer_norm_cuda.cu +490 -0
  50. parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_linear/__pycache__/cuda_linear.cpython-310.pyc +0 -0
.gitattributes CHANGED
@@ -1205,3 +1205,4 @@ valley/lib/python3.10/site-packages/nvidia/cudnn/lib/libcudnn_ops.so.9 filter=lf
1205
  llava_next/lib/python3.10/site-packages/torch/__pycache__/overrides.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
1206
  llava_next/lib/python3.10/site-packages/torch/__pycache__/_meta_registrations.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
1207
  vlmpy310/lib/python3.10/site-packages/skimage/filters/rank/core_cy_3d.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
 
 
1205
  llava_next/lib/python3.10/site-packages/torch/__pycache__/overrides.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
1206
  llava_next/lib/python3.10/site-packages/torch/__pycache__/_meta_registrations.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
1207
  vlmpy310/lib/python3.10/site-packages/skimage/filters/rank/core_cy_3d.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
1208
+ llava_next/lib/python3.10/site-packages/torch/_inductor/__pycache__/lowering.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
llava_next/lib/python3.10/site-packages/torch/_custom_op/__init__.py ADDED
File without changes
llava_next/lib/python3.10/site-packages/torch/_custom_op/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (172 Bytes). View file
 
llava_next/lib/python3.10/site-packages/torch/_custom_op/__pycache__/autograd.cpython-310.pyc ADDED
Binary file (9.03 kB). View file
 
llava_next/lib/python3.10/site-packages/torch/_custom_op/__pycache__/functional.cpython-310.pyc ADDED
Binary file (5.87 kB). View file
 
llava_next/lib/python3.10/site-packages/torch/_custom_op/__pycache__/impl.cpython-310.pyc ADDED
Binary file (37.6 kB). View file
 
llava_next/lib/python3.10/site-packages/torch/_custom_op/autograd.py ADDED
@@ -0,0 +1,273 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.utils._pytree as pytree
3
+ from collections import namedtuple
4
+ import functools
5
+
6
+
7
+ # NOTE [CustomOp autograd kernel indirection]
8
+ # We register `inner` as the autograd kernel for this custom_op.
9
+ # `inner` either calls the autograd formula registered by the user,
10
+ # or goes into an `autograd_not_implemented` kernel.
11
+ #
12
+ # The reason why this indirection exists is
13
+ # so that we can swap out the autograd kernel (the PyTorch dispatcher
14
+ # doesn't actually allow us to do this). By default, we want
15
+ # the `autograd_not_implemented` behavior, but then the user may come
16
+ # and register something that is actually a backward formula
17
+ def autograd_kernel_indirection(custom_op):
18
+ autograd_fallback = autograd_not_implemented(custom_op)
19
+
20
+ def inner(*args, **kwargs):
21
+ if custom_op._has_impl('autograd'):
22
+ kernel = custom_op._get_impl('autograd').func
23
+ return kernel(*args, **kwargs)
24
+ # As explained in NOTE ["backward", "save_for_backward", and "autograd"],
25
+ # after the user gives us "backward" and "save_for_backward", we generate
26
+ # the "autograd" impl. If the user only provided one, then we tell
27
+ # the user they've done something wrong.
28
+ if custom_op._has_impl('save_for_backward') or custom_op._has_impl('backward'):
29
+ missing = (
30
+ 'save_for_backward' if custom_op._has_impl('backward')
31
+ else 'backward'
32
+ )
33
+ found = 'save_for_backward' if missing == 'backward' else 'backward'
34
+ loc = custom_op._get_impl(found).location
35
+ raise RuntimeError(
36
+ f"We found a '{found}' registration for {custom_op} at "
37
+ f"{loc} but were unable to find a '{missing}' registration. "
38
+ f"To use the CustomOp API to register a backward formula, "
39
+ f"please provide us both a backward function and a "
40
+ f"'save for backward' function via `impl_backward` and "
41
+ f"`impl_save_for_backward` respectively.")
42
+ return autograd_fallback(*args, **kwargs)
43
+ return inner
44
+
45
+
46
+ # TODO(#101191): Use the actual C++ autograd not implemented fallback,
47
+ # or change the default autograd fallback to the autograd not implemented fallback.
48
+ def autograd_not_implemented(custom_op):
49
+ def kernel(*args, **kwargs):
50
+ if torch.is_grad_enabled() and pytree.tree_any(
51
+ lambda x: isinstance(x, torch.Tensor) and x.requires_grad, (args, kwargs)
52
+ ):
53
+ raise RuntimeError("Autograd has not been implemented for operator")
54
+ with torch._C._AutoDispatchBelowAutograd():
55
+ return custom_op(*args, **kwargs)
56
+ return kernel
57
+
58
+
59
+ def mark_non_differentiable(ctx, output, output_differentiability):
60
+ # Output types are restricted to be:
61
+ # - Tensor
62
+ # - Tensor[]
63
+ # - int, bool, Scalar, float
64
+ # See _check_can_register_backward
65
+ if output_differentiability is not None:
66
+ if not isinstance(output, tuple):
67
+ tuple_output = (output,)
68
+ else:
69
+ tuple_output = output # type: ignore[assignment]
70
+ assert len(output_differentiability) == len(tuple_output)
71
+ non_differentiable_tensors = []
72
+ for idx, (differentiable, out) in enumerate(zip(output_differentiability, tuple_output)):
73
+ if isinstance(out, torch.Tensor):
74
+ if not differentiable:
75
+ non_differentiable_tensors.append(out)
76
+ continue
77
+ if isinstance(out, list):
78
+ if not differentiable:
79
+ non_differentiable_tensors.extend(out)
80
+ continue
81
+ if differentiable:
82
+ raise RuntimeError(
83
+ f"With output_differentiability={output_differentiability}. "
84
+ f"At idx {idx}, we received an object of type {type(out)} that "
85
+ f"is not a Tensor, so it cannot have be marked as differentiable in "
86
+ f"output_differentiability.")
87
+ if non_differentiable_tensors:
88
+ ctx.mark_non_differentiable(*non_differentiable_tensors)
89
+
90
+
91
+ def construct_autograd_kernel(
92
+ schema,
93
+ output_differentiability,
94
+ custom_op,
95
+ op_overload,
96
+ save_for_backward_fn,
97
+ backward_fn):
98
+
99
+ def apply(*args):
100
+ flat_args, spec = pytree.tree_flatten(args)
101
+ out_spec = None
102
+
103
+ def forward(ctx, *flat_args):
104
+ ctx.set_materialize_grads(True)
105
+ args = pytree.tree_unflatten(list(flat_args), spec)
106
+ with torch._C._AutoDispatchBelowAutograd():
107
+ output = op_overload(*args)
108
+
109
+ # We use the info about args to give better error messages in backward
110
+ args_info = namedtuple_args(
111
+ schema, pytree.tree_map(lambda arg: type(arg), args))
112
+
113
+ save_for_backward_fn_inputs = namedtuple_args(schema, args)
114
+ to_save = save_for_backward_fn(save_for_backward_fn_inputs, output)
115
+
116
+ save_pytree_for_backward(ctx, (to_save, args_info))
117
+ mark_non_differentiable(ctx, output, output_differentiability)
118
+
119
+ nonlocal out_spec
120
+ flat_output, out_spec = pytree.tree_flatten(output)
121
+ return tuple(flat_output)
122
+
123
+ def backward(ctx, *flat_grad_output):
124
+ assert out_spec is not None
125
+ grads = pytree.tree_unflatten(list(flat_grad_output), out_spec)
126
+ saved, args_info = unpack_saved(ctx)
127
+ # There is nothing on the ctx object for now, it is just there so
128
+ # that we can add additional things in the future.
129
+ inner_ctx = object()
130
+ if not isinstance(grads, tuple):
131
+ grads = (grads,)
132
+ grad_inputs_dict = backward_fn(inner_ctx, saved, *grads)
133
+
134
+ # Massage the grad_inputs_dict to a form acceptable by
135
+ # autograd.Function.
136
+ validate_grad_inputs_dict(grad_inputs_dict, custom_op, args_info)
137
+ return grad_inputs_dict_to_flat_tuple(grad_inputs_dict, args_info)
138
+
139
+ generated_cls = gen_autograd_function(
140
+ custom_op._opname + '_customop', forward, backward)
141
+
142
+ flat_output = generated_cls.apply(*flat_args)
143
+ assert out_spec is not None
144
+ return pytree.tree_unflatten(list(flat_output), out_spec)
145
+ return apply
146
+
147
+
148
+ def gen_autograd_function(name, forward, backward):
149
+ generated_cls = type(
150
+ name,
151
+ (torch.autograd.Function,),
152
+ {
153
+ 'forward': staticmethod(forward),
154
+ 'backward': staticmethod(backward),
155
+ }
156
+ )
157
+ return generated_cls
158
+
159
+
160
+ @functools.lru_cache
161
+ def namedtuple_args_cls(schema):
162
+ attribs = [arg.name for arg in schema.arguments.flat_all]
163
+ name = str(schema.name) + "_args"
164
+ # mypy doesn't support dynamic namedtuple name
165
+ tuple_cls = namedtuple(name, attribs) # type: ignore[misc]
166
+ return tuple_cls
167
+
168
+
169
+ def namedtuple_args(schema, args):
170
+ assert isinstance(args, tuple)
171
+ tuple_cls = namedtuple_args_cls(schema)
172
+ return tuple_cls(*args)
173
+
174
+
175
+ def validate_grad_inputs_dict(grad_inputs_dict, forward_op, args_info):
176
+ def error(what):
177
+ backward = forward_op._get_impl('backward')
178
+ raise RuntimeError(
179
+ f"In the backward function defined for {forward_op} at "
180
+ f"{backward.location} using the CustomOp API, {what}")
181
+
182
+ if not isinstance(grad_inputs_dict, dict):
183
+ error(f"expected the output of the backward function to be a dict but "
184
+ f"got {type(grad_inputs_dict)}")
185
+
186
+ expected_keys = {arg.name for arg in forward_op._schema.arguments.flat_all
187
+ if arg.type.is_tensor_like()}
188
+ actual_keys = grad_inputs_dict.keys()
189
+ if expected_keys != actual_keys:
190
+ error(f"expected the returned grad_input dict to have keys "
191
+ f"{expected_keys} but got {actual_keys}. The backward "
192
+ f"function must return a gradient (can be None) for each arg "
193
+ f"to the CustomOp that may be a Tensor or Sequence[Tensor]. "
194
+ f"Args declared to be non-Tensor-like types should not appear "
195
+ f"in the grad_input dict")
196
+
197
+ for name, grad in grad_inputs_dict.items():
198
+ arg_info = getattr(args_info, name)
199
+
200
+ if isinstance(arg_info, list):
201
+ if not isinstance(grad, (tuple, list)):
202
+ error(f"for input '{name}' expected the grad_input dict to "
203
+ f"hold a list of gradients but got object of type "
204
+ f"{type(grad)}.")
205
+ if not len(grad) == len(arg_info):
206
+ error(f"for input '{name}' expected the grad_input dict to "
207
+ f"hold a list of {len(arg_info)} gradients but got "
208
+ f"{len(grad)}")
209
+ for idx, (g, info) in enumerate(zip(grad, arg_info)):
210
+ if g is None:
211
+ continue
212
+ if not isinstance(g, torch.Tensor):
213
+ error(f"for input '{name}' expected the grad_input dict to "
214
+ f"hold a list of None or Tensor gradients but got "
215
+ f"object of {type(g)} at index {idx}")
216
+ if info != torch.Tensor:
217
+ error(f"for input '{name}', got a Tensor as the gradient "
218
+ f"for the {idx}-th value but expected None because "
219
+ f"the {idx}-th value was not a Tensor (it was "
220
+ f"type {arg_info}")
221
+ continue
222
+
223
+ if grad is None:
224
+ continue
225
+ if not isinstance(grad, torch.Tensor):
226
+ error(f"got object of type {type(grad)} as the gradient for input "
227
+ f"'{name}', "
228
+ f"but expected the gradient to be either None or a Tensor")
229
+ if arg_info != torch.Tensor:
230
+ error(f"got a Tensor as the gradient for input '{name}' but "
231
+ f"expected None as the gradient because input '{name}' "
232
+ f"was not a Tensor (it was type {arg_info}).")
233
+
234
+ def grad_inputs_dict_to_flat_tuple(grad_inputs_dict, args_info):
235
+ result = []
236
+ for name, arg_info in args_info._asdict().items():
237
+ if name not in grad_inputs_dict:
238
+ result.append(pytree.tree_map(lambda x: None, arg_info))
239
+ continue
240
+ result.append(grad_inputs_dict[name])
241
+ return tuple(pytree.tree_flatten(result)[0])
242
+
243
+ # Saves "stuff" (a pytree) onto the ctx object. Use unpack_saved to unpack it.
244
+ # autograd.Function prefers that users use ctx.save_for_backward to
245
+ # save Tensors (to avoid reference cycles) and for non-Tensors to go onto the
246
+ # ctx object.
247
+ def save_pytree_for_backward(ctx, stuff):
248
+ flat_stuff, spec = pytree.tree_flatten(stuff)
249
+ num_elts = len(flat_stuff)
250
+ tensor_idxs = [idx for idx, thing in enumerate(flat_stuff)
251
+ if isinstance(thing, torch.Tensor)]
252
+ non_tensor_idxs = [idx for idx, thing in enumerate(flat_stuff)
253
+ if not isinstance(thing, torch.Tensor)]
254
+ tensors = [thing for thing in flat_stuff if isinstance(thing, torch.Tensor)]
255
+ non_tensors = [thing for thing in flat_stuff if not isinstance(thing, torch.Tensor)]
256
+
257
+ ctx.spec = spec
258
+ ctx.num_elts = num_elts
259
+ ctx.save_for_backward(*tensors)
260
+ ctx.tensor_idxs = tensor_idxs
261
+ ctx.saved_non_tensors = non_tensors
262
+ ctx.non_tensor_idxs = non_tensor_idxs
263
+
264
+
265
+ # Inverse operation to save_pytree_for_backward
266
+ def unpack_saved(ctx):
267
+ flat_stuff = [None] * ctx.num_elts
268
+ for tensor, idx in zip(ctx.saved_tensors, ctx.tensor_idxs):
269
+ flat_stuff[idx] = tensor
270
+ for non_tensor, idx in zip(ctx.saved_non_tensors, ctx.non_tensor_idxs):
271
+ flat_stuff[idx] = non_tensor
272
+ stuff = pytree.tree_unflatten(flat_stuff, ctx.spec)
273
+ return stuff
llava_next/lib/python3.10/site-packages/torch/_custom_op/functional.py ADDED
@@ -0,0 +1,173 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from torch.library import Library
3
+ from torch._ops import OpOverload
4
+ from torchgen.model import FunctionSchema, OperatorName, SchemaKind, BaseTy, BaseType
5
+ from torch._C import _ExcludeDispatchKeyGuard, DispatchKeySet, DispatchKey
6
+ from .autograd import autograd_not_implemented
7
+ import torch.utils._pytree as pytree
8
+ import weakref
9
+
10
+
11
+ def register_functional_op(
12
+ lib: Library,
13
+ new_op_name: str,
14
+ mutable_op: OpOverload,
15
+ ) -> None:
16
+ """Given a mutable operator, registers the functional variant.
17
+
18
+ This API also correctly links the functional variant with the mutable
19
+ operator for the purposes of functionalization.
20
+
21
+ All of the new registrations are performed on the ``lib`` passed in.
22
+
23
+ Arguments:
24
+ lib (Library): Should be a torch.library.Library object that has
25
+ the same namespace as ``mutable_op``'s namespace.
26
+ lib will be used to register the new functional op as well
27
+ as a functionalization kernel for the ``mutable_op``
28
+ If you don't have a library handy, use
29
+ ``torch.library.Library(ns, 'FRAGMENT')`` to construct one.
30
+ new_op_name (str): The name of the functional operator (without the
31
+ namespace). If no namespace, the new functional variant will be
32
+ accessible under ``torch.ops.{lib.ns}.new_op_name``.
33
+ mutable_op (OpOverload): The mutable custom operator. Note
34
+ that you may need to add a `.default` to it, like
35
+ `torch.ops.aten.abs_.default`.
36
+
37
+ """
38
+ validate(mutable_op)
39
+ schema = functional_schema(new_op_name, mutable_op)
40
+ lib.define(schema)
41
+
42
+ functional_impl = construct_functional_impl(mutable_op)
43
+ lib.impl(new_op_name, functional_impl, 'CompositeExplicitAutograd')
44
+
45
+ functional_op = getattr(getattr(torch.ops, lib.ns), new_op_name).default
46
+
47
+ # There's no easy way for us to generate the autograd kernel, so we
48
+ # use autograd_not_implemented. Also, this makes it so that the user
49
+ # is unable to register an autograd formula themselves. This shouldn't
50
+ # be a problem if the user doesn't use the functional op direclty
51
+ # in their program, but we may need to revist this in the future.
52
+ lib.impl(new_op_name, autograd_not_implemented(functional_op), 'Autograd')
53
+
54
+ f_kernel = construct_functionalization_kernel(weakref.proxy(mutable_op), functional_op)
55
+
56
+ lib.impl(mutable_op, f_kernel, 'Functionalize')
57
+
58
+
59
+ def construct_functional_impl(mutable_op):
60
+ def functional_impl(*args):
61
+ # Strategy:
62
+ # - clone args that would have been mutated
63
+ # - run mutable_op
64
+ # - return the cloned args as additional outputs
65
+ new_args = []
66
+ extra_rets = []
67
+ for is_write, arg in zip(mutable_args(mutable_op), args):
68
+ if is_write:
69
+ cloned = arg.clone()
70
+ new_args.append(cloned)
71
+ extra_rets.append(cloned)
72
+ else:
73
+ new_args.append(arg)
74
+ result = mutable_op(*new_args)
75
+ if result is None:
76
+ return tuple(extra_rets)
77
+ if isinstance(result, tuple):
78
+ return (*result, *extra_rets)
79
+ return (result, *extra_rets)
80
+ return functional_impl
81
+
82
+
83
+ def construct_functionalization_kernel(mutable_op, functional_op):
84
+ def kernel(*args):
85
+ # There's nothing to be functionalized!
86
+ # We can still end up here because DispatchKey::Functionalize is a mode key
87
+ if pytree.tree_all_only(torch.Tensor, lambda x: not torch._is_functional_tensor(x), args):
88
+ with _ExcludeDispatchKeyGuard(DispatchKeySet(DispatchKey.Functionalize)):
89
+ return mutable_op(*args)
90
+
91
+ # NB: This differs from the codegen -- codegen handles cases where there
92
+ # are mixed FunctionalTensorWrapper and non-FunctionalTensorWrapper.
93
+ # This only really matters for XLA (mixed CPU-XLA tensors) and
94
+ # running functionalization without the PT2 stack (which guarantees to us that
95
+ # all tensors are FunctionalTensorWrapper).
96
+ if not pytree.tree_all_only(torch.Tensor, torch._is_functional_tensor, args):
97
+ raise RuntimeError("{mutable_op}: expected all args to be FunctionalTensorWrapper")
98
+
99
+ unwrapped_args = []
100
+ for arg in args:
101
+ if isinstance(arg, torch.Tensor) and torch._is_functional_tensor(arg):
102
+ torch._sync(arg)
103
+ unwrapped = torch._from_functional_tensor(arg)
104
+ unwrapped_args.append(unwrapped)
105
+ else:
106
+ unwrapped_args.append(arg)
107
+
108
+ with _ExcludeDispatchKeyGuard(DispatchKeySet(DispatchKey.Functionalize)):
109
+ output = functional_op(*unwrapped_args)
110
+
111
+ num_actual_output = len(mutable_op._schema.returns)
112
+ actual_output = pytree.tree_map(
113
+ torch._to_functional_tensor, output[:num_actual_output])
114
+
115
+ new_values_to_propagate = output[num_actual_output:]
116
+ inputs_to_replace = [arg for is_write, arg in zip(mutable_args(mutable_op), args)
117
+ if is_write]
118
+ assert len(new_values_to_propagate) == len(inputs_to_replace)
119
+ for new_value, arg in zip(new_values_to_propagate, inputs_to_replace):
120
+ torch._C._propagate_xla_data(arg, new_value)
121
+ torch._C._replace_(arg, new_value)
122
+ torch._C._commit_update(arg)
123
+ torch._sync(arg)
124
+
125
+ if len(actual_output) == 1:
126
+ return actual_output[0]
127
+ elif len(actual_output) == 0:
128
+ return None
129
+ return actual_output
130
+
131
+ return kernel
132
+
133
+
134
+ def validate(mutable_op: OpOverload):
135
+ if not isinstance(mutable_op, OpOverload):
136
+ raise TypeError(
137
+ f"register_functional_op(mutable_op): expected mutable_op to be instance of "
138
+ f"OpOverload but got {type(mutable_op)}")
139
+
140
+ # There are generally three types of "in-place" or "mutable" ops.
141
+ # Each of them have their own conventions:
142
+ # - inplace (first input modified in-place and returned as only output)
143
+ # - out= (some args modified in-place and returned as outputs)
144
+ # - mutable (some args modified in-place but none of those returned as outputs)
145
+ # In theory we can support all three, but we'll just support the last
146
+ # option right now for simplicity.
147
+ schema = FunctionSchema.parse(str(mutable_op._schema))
148
+ if not schema.kind() == SchemaKind.mutable:
149
+ raise RuntimeError("Expected op to be mutable (as opposed to functional, inplace or out)")
150
+ for ret in schema.returns:
151
+ # construct_functionalization_kernel assumes this for simplicity
152
+ if ret.annotation is not None:
153
+ raise NotImplementedError(
154
+ "NYI: register_functional_op(op) where op returns a mutated or aliased value. "
155
+ "Please file an issue (and as a workaround, modify your operator to "
156
+ "not return the mutated value or aliases)")
157
+ for arg in schema.arguments.flat_all:
158
+ # construct_functionalization_kernel assumes this for simplicity
159
+ if arg.type.is_tensor_like() and arg.type != BaseType(BaseTy.Tensor):
160
+ raise NotImplementedError(
161
+ "NYI: register_functional_op(op) where op accepts Optional or List of tensors."
162
+ "Please file an issue.")
163
+
164
+
165
+ def functional_schema(new_op_name, op: OpOverload):
166
+ schema = FunctionSchema.parse(str(op._schema))
167
+ schema = schema.signature().with_name(OperatorName.parse(new_op_name))
168
+ return str(schema)
169
+
170
+
171
+ def mutable_args(op: OpOverload):
172
+ return tuple(False if arg.alias_info is None else arg.alias_info.is_write
173
+ for arg in op._schema.arguments)
llava_next/lib/python3.10/site-packages/torch/_custom_op/impl.py ADDED
@@ -0,0 +1,1096 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import contextlib
2
+ import dataclasses
3
+ import functools
4
+ import inspect
5
+ import sys
6
+ import typing
7
+ import weakref
8
+
9
+ from torchgen.model import FunctionSchema, OperatorName, SchemaKind, BaseType, ListType, BaseTy
10
+
11
+ import torch
12
+ import torch._C as _C
13
+ import torch.library as library
14
+
15
+ from .autograd import autograd_kernel_indirection, construct_autograd_kernel
16
+
17
+ """
18
+ For a detailed guide on custom ops, please see
19
+ https://docs.google.com/document/d/1aGWtgxV3HppuxQAdddyPrs74_aEntpkYt9MalnCKnhk
20
+
21
+ This file includes pieces of the implementation of our custom operator API.
22
+ """
23
+
24
+ __all__ = ["custom_op", "CustomOp", "get_ctx", "AbstractImplCtx"]
25
+
26
+
27
+ SUPPORTED_DEVICE_TYPE_TO_KEY = {
28
+ "cpu": "CPU",
29
+ "cuda": "CUDA",
30
+ }
31
+
32
+ # We will not let users register CustomOps with anything that could look like
33
+ # PyTorch internals to avoid confusion.
34
+ RESERVED_NS = {
35
+ "prim",
36
+ "prims",
37
+ "aten",
38
+ "at",
39
+ "torch",
40
+ "pytorch",
41
+ }
42
+
43
+
44
+ def custom_op(
45
+ qualname: str, manual_schema: typing.Optional[str] = None
46
+ ) -> typing.Callable:
47
+ r"""Creates a new CustomOp object.
48
+
49
+ WARNING: if you're a user, please do not use this directly
50
+ (instead use the torch._custom_ops APIs).
51
+ Also please see the following for a detailed guide on custom ops.
52
+ https://docs.google.com/document/d/1aGWtgxV3HppuxQAdddyPrs74_aEntpkYt9MalnCKnhk
53
+
54
+ In PyTorch, defining an op (short for "operator") is a two step-process:
55
+ - we need to define (create) the op
56
+ - we need to implement behavior for how the operator interacts with
57
+ various PyTorch subsystems, like CPU/CUDA Tensors, Autograd, etc.
58
+
59
+ This entrypoint defines the CustomOp object (the first step);
60
+ you must then perform the second step by calling various methods on
61
+ the CustomOp object.
62
+
63
+ This API is used as a decorator (see examples).
64
+
65
+ Arguments:
66
+ qualname (str): Should be a string that looks like
67
+ "namespace::operator_name". Operators in PyTorch need a namespace to
68
+ avoid name collisions; a given operator may only be created once.
69
+ If you are writing a Python library, we recommend the namespace to
70
+ be the name of your top-level module. The operator_name must be
71
+ the same as the name of the function you pass to custom_op
72
+ (see examples).
73
+ manual_schema (Optional[str]): Each PyTorch operator needs a schema that
74
+ tells PyTorch the types of the inputs/outputs. If None (default),
75
+ we will infer the schema from the type annotations on the function
76
+ (see examples). Otherwise, if you don't want to use type annotations,
77
+ you may provide us the schema string.
78
+
79
+ Example::
80
+ >>> import numpy as np
81
+ >>> from torch import Tensor
82
+ >>>
83
+ >>> # Step 1: define the CustomOp.
84
+ >>> # We need to provide the decorator a "prototype function"
85
+ >>> # (a function with Python ellipses as the body).
86
+ >>> @custom_op("mylibrary::numpy_sin")
87
+ >>> def numpy_sin(x: Tensor) -> Tensor:
88
+ >>> ...
89
+ >>>
90
+ >>> # numpy_sin is now an instance of class CustomOp
91
+ >>> print(type(numpy_sin))
92
+ >>>
93
+ >>> # Step 2: Register an implementation for various PyTorch subsystems
94
+ >>>
95
+ >>> # Register an implementation for CPU tensors
96
+ >>> @numpy_sin.impl('cpu'):
97
+ >>> def numpy_sin_impl_cpu(x):
98
+ >>> return torch.from_numpy(np.sin(x.numpy()))
99
+ >>>
100
+ >>> # Register an implementation for CUDA tensors
101
+ >>> @numpy_sin.impl('cuda'):
102
+ >>> def numpy_sin_impl_cuda(x):
103
+ >>> return torch.from_numpy(np.sin(x.cpu().numpy())).to(x.device)
104
+ >>>
105
+ >>> x = torch.randn(3)
106
+ >>> numpy_sin(x) # calls numpy_sin_impl_cpu
107
+ >>>
108
+ >>> x_cuda = x.cuda()
109
+ >>> numpy_sin(x) # calls numpy_sin_impl_cuda
110
+
111
+ """
112
+
113
+ def inner(func):
114
+ if not inspect.isfunction(func):
115
+ raise ValueError(
116
+ f"custom_op(...)(func): Expected `func` to be a Python "
117
+ f"function, got: {type(func)}"
118
+ )
119
+
120
+ ns, name = parse_qualname(qualname)
121
+ validate_namespace(ns)
122
+ if func.__name__ != name:
123
+ raise ValueError(
124
+ f"custom_op(qualname='{qualname}', ...)(func): expected `func` "
125
+ f"to have name '{name}' but got '{func.__name__}'. "
126
+ f"Please either change the name of `func` or the qualname that "
127
+ f"is passed to `custom_op`"
128
+ )
129
+
130
+ schema = infer_schema(func) if manual_schema is None else manual_schema
131
+ schema_str = f"{name}{schema}"
132
+ function_schema = FunctionSchema.parse(schema_str)
133
+ validate_schema(function_schema)
134
+ if manual_schema is not None:
135
+ validate_function_matches_schema(function_schema, func)
136
+
137
+ lib = library.Library(ns, "FRAGMENT")
138
+ lib.define(schema_str)
139
+ ophandle = find_ophandle_or_throw(ns, function_schema.name)
140
+ result = CustomOp(lib, ns, function_schema, name, ophandle, _private_access=True)
141
+
142
+ result.__name__ = func.__name__
143
+ result.__module__ = func.__module__
144
+ result.__doc__ = func.__doc__
145
+
146
+ library.impl(lib, result._opname, "Autograd")(
147
+ autograd_kernel_indirection(weakref.proxy(result))
148
+ )
149
+
150
+ torch._C._dispatch_set_report_error_callback(
151
+ ophandle, functools.partial(report_error_callback, weakref.proxy(result))
152
+ )
153
+
154
+ return result
155
+
156
+ return inner
157
+
158
+
159
+ # Global dictionary holding references to all CustomOp objects
160
+ # Yes, it keeps all CustomOps alive (see NOTE [CustomOp lifetime])
161
+ # Used to query the CustomOp associated with a specific C++ dispatcher operator.
162
+ # An example usage is FakeTensor: FakeTensor checks if a specific operator
163
+ # has an implementation registered via the CustomOp API.
164
+ # Indexed by qualname (e.g. aten::foo)
165
+ global_registry: typing.Dict[str, "CustomOp"] = {}
166
+
167
+
168
+ class CustomOp:
169
+ r"""Class for custom operators in PyTorch.
170
+
171
+ Use the CustomOp API to create user-defined custom operators that behave
172
+ just like regular PyTorch operators (e.g. torch.sin, torch.mm) when it
173
+ comes to various PyTorch subsystems (like torch.compile).
174
+
175
+ To construct a `CustomOp`, use `custom_op`.
176
+ """
177
+
178
+ def __init__(self, lib, cpp_ns, schema, operator_name, ophandle, *, _private_access=False):
179
+ super().__init__()
180
+ if not _private_access:
181
+ raise RuntimeError(
182
+ "The CustomOp constructor is private and we do not guarantee "
183
+ "BC for it. Please use custom_op(...) to create a CustomOp object"
184
+ )
185
+ name = f"{cpp_ns}::{operator_name}"
186
+ self._schema = schema
187
+ self._cpp_ns = cpp_ns
188
+ self._lib: library.Library = lib
189
+ self._ophandle: _C._DispatchOperatorHandle = ophandle
190
+ # Has the name of the op, e.g. "foo". We cache here for convenience.
191
+ self._opname: str = operator_name
192
+ # this is _opname but with namespace. e.g. "custom::foo"
193
+ self._qualname: str = name
194
+ self.__name__ = None # mypy requires this
195
+ # NB: Some of these impls are registered as kernels to DispatchKeys.
196
+ # Modifying the _impls dict directly won't do anything in that case.
197
+ self._impls: typing.Dict[str, typing.Optional[FuncAndLocation]] = {}
198
+ # See NOTE [CustomOp autograd kernel indirection]
199
+ self._registered_autograd_kernel_indirection = False
200
+
201
+ global_registry[self._qualname] = self
202
+
203
+ def _register_autograd_kernel_indirection(self):
204
+ assert not self._registered_autograd_kernel_indirection
205
+ self._lib.impl(self._opname, autograd_kernel_indirection(weakref.proxy(self)), "Autograd")
206
+ self._registered_autograd_kernel_indirection = True
207
+
208
+ # Records the impl and the source location in self._impls
209
+ # Note that this doesn't cause torch.library to use the impl, that
210
+ # needs to be done in a separate self._lib.impl call.
211
+ def _register_impl(self, kind, func, stacklevel=2):
212
+ if self._has_impl(kind):
213
+ func_and_location = self._impls[kind]
214
+ assert func_and_location is not None # Pacify mypy
215
+ location = func_and_location.location
216
+ raise RuntimeError(
217
+ f"Attempting to register a {kind} impl for operator {self._qualname} "
218
+ f"that already has a {kind} impl registered from Python at "
219
+ f"{location}. This is not supported."
220
+ )
221
+ frame = inspect.getframeinfo(sys._getframe(stacklevel))
222
+ location = f"{frame.filename}:{frame.lineno}"
223
+ self._impls[kind] = FuncAndLocation(func, location)
224
+
225
+ def _get_impl(self, kind):
226
+ return self._impls[kind]
227
+
228
+ def _has_impl(self, kind):
229
+ return kind in self._impls
230
+
231
+ def _destroy(self):
232
+ # NOTE: [CustomOp lifetime]
233
+ # A CustomOp, once created, lives forever. The mechanism is that the
234
+ # global registry holds a reference to it. However, to make testing
235
+ # easier, we want to be able to destroy CustomOp objects.
236
+ # CustomOp._destroy does the job, though it leaves the CustomOp
237
+ # in a garbage state.
238
+ del self._lib
239
+
240
+ opnamespace = getattr(torch.ops, self._cpp_ns)
241
+ if hasattr(opnamespace, self._opname):
242
+ delattr(opnamespace, self._opname)
243
+
244
+ del global_registry[self._qualname]
245
+
246
+ def __repr__(self):
247
+ return f'<CustomOp(op="{self._qualname}")>'
248
+
249
+ def __call__(self, *args, **kwargs):
250
+ # Bypass torch.ops.* and directly do OperatorHandle::callBoxed.
251
+ # Using torch.ops.* is a bit of a pain (it can be slow and it has lifetime
252
+ # issues from caching operators that make testing CustomOp difficult).
253
+ result = _C._dispatch_call_boxed(self._ophandle, *args, **kwargs)
254
+ return result
255
+
256
+ def impl(
257
+ self, device_types: typing.Union[str, typing.Iterable[str]], _stacklevel=2,
258
+ ) -> typing.Callable:
259
+ r"""Register an implementation for a device type for this CustomOp object.
260
+
261
+ WARNING: if you're a user, please do not use this directly
262
+ (instead use the torch._custom_ops APIs).
263
+ Also please see the following for a detailed guide on custom ops.
264
+ https://docs.google.com/document/d/1aGWtgxV3HppuxQAdddyPrs74_aEntpkYt9MalnCKnhk
265
+
266
+ If the CustomOp is passed multiple Tensor inputs with different device
267
+ types, it will dispatch to the registered implementation for the highest
268
+ priority device type among those present.
269
+ The supported device types, in order of priority, are {'cuda', 'cpu'}.
270
+
271
+ This API is used as a decorator (see examples).
272
+
273
+ Arguments:
274
+ device_types (str or Iterable[str]): the device type(s) to register the function for.
275
+
276
+ Examples::
277
+ >>> import numpy as np
278
+ >>> from torch import Tensor
279
+ >>>
280
+ >>> @custom_op("mylibrary::numpy_sin")
281
+ >>> def numpy_sin(x: Tensor) -> Tensor:
282
+ >>> ...
283
+ >>>
284
+ >>> # Register an implementation for CPU Tensors
285
+ >>> @numpy_sin.impl('cpu'):
286
+ >>> def numpy_sin_impl_cpu(x):
287
+ >>> return torch.from_numpy(np.sin(x.numpy()))
288
+ >>>
289
+ >>> # Register an implementation for CUDA Tensors
290
+ >>> @numpy_sin.impl('cuda'):
291
+ >>> def numpy_sin_impl_cuda(x):
292
+ >>> return torch.from_numpy(np.sin(x.cpu().numpy())).to(x.device)
293
+ >>>
294
+ >>> x = torch.randn(3)
295
+ >>> numpy_sin(x) # calls numpy_sin_impl_cpu
296
+ >>>
297
+ >>> x_cuda = x.cuda()
298
+ >>> numpy_sin(x) # calls numpy_sin_impl_cuda
299
+
300
+ """
301
+ if isinstance(device_types, str):
302
+ device_types = [device_types]
303
+ for device_type in device_types:
304
+ validate_device_type(device_type)
305
+
306
+ def inner(f):
307
+ for device_type in set(device_types):
308
+ self._check_doesnt_have_library_impl(device_type)
309
+ self._register_impl(device_type, f, stacklevel=_stacklevel)
310
+ dispatch_key = SUPPORTED_DEVICE_TYPE_TO_KEY[device_type]
311
+ library.impl(self._lib, self._opname, dispatch_key)(f)
312
+ return f
313
+
314
+ return inner
315
+
316
+ def _check_doesnt_have_library_impl(self, device_type):
317
+ if self._has_impl(device_type):
318
+ return
319
+ key = SUPPORTED_DEVICE_TYPE_TO_KEY[device_type]
320
+ if _C._dispatch_has_computed_kernel_for_dispatch_key(self._qualname, key):
321
+ raise RuntimeError(
322
+ f"impl(..., device_types={device_type}): the operator {self._qualname} "
323
+ f"already has an implementation for this device type via a "
324
+ f"pre-existing torch.library or TORCH_LIBRARY registration.")
325
+
326
+ def impl_factory(self) -> typing.Callable:
327
+ r"""Register an implementation for a factory function."""
328
+
329
+ def inner(f):
330
+ self._register_impl("factory", f)
331
+ library.impl(self._lib, self._opname, "BackendSelect")(f)
332
+ return f
333
+
334
+ return inner
335
+
336
+ def impl_abstract(self, _stacklevel=2) -> typing.Callable:
337
+ r"""Register an abstract implementation for this operator.
338
+
339
+ WARNING: please do not use this directly (and instead use the torch._custom_ops
340
+ APIs). Also please see the following for a detailed guide on custom ops.
341
+ https://docs.google.com/document/d/1aGWtgxV3HppuxQAdddyPrs74_aEntpkYt9MalnCKnhk
342
+
343
+ An "abstract implementation" specifies the behavior of this operator on
344
+ Tensors that carry no data. Given some input Tensors with certain properties
345
+ (sizes/strides/storage_offset/device), it specifies what the properties of
346
+ the output Tensors are.
347
+
348
+ The abstract implementation has the same signature as the operator.
349
+ It is run for both FakeTensors and meta tensors. To write an abstract
350
+ implementation, assume that all Tensor inputs to the operator are
351
+ regular CPU/CUDA/Meta tensors, but they do not have storage, and
352
+ you are trying to return regular CPU/CUDA/Meta tensor(s) as output.
353
+ The abstract implementation must consist of only PyTorch operations
354
+ (and may not directly access the storage or data of any input or
355
+ intermediate Tensors).
356
+
357
+ This API is used as a decorator (see examples).
358
+
359
+ Examples::
360
+ >>> import numpy as np
361
+ >>> from torch import Tensor
362
+ >>>
363
+ >>> # Example 1: an operator without data-dependent output shape
364
+ >>> @custom_op('mylibrary::custom_linear')
365
+ >>> def custom_linear(x: Tensor, weight: Tensor, bias: Tensor):
366
+ >>> ...
367
+ >>>
368
+ >>> @custom_linear.impl_abstract():
369
+ >>> def custom_linear_abstract(x, weight):
370
+ >>> assert x.dim() == 2
371
+ >>> assert weight.dim() == 2
372
+ >>> assert bias.dim() == 1
373
+ >>> assert x.shape[1] == weight.shape[1]
374
+ >>> assert weight.shape[0] == bias.shape[0]
375
+ >>> assert x.device == weight.device
376
+ >>>
377
+ >>> return (x @ weight.t()) + bias
378
+ >>>
379
+ >>> # Example 2: an operator with data-dependent output shape
380
+ >>> @custom_op('mylibrary::custom_nonzero')
381
+ >>> def custom_nonzero(x: Tensor) -> Tensor:
382
+ >>> ...
383
+ >>>
384
+ >>> @custom_nonzero.impl_abstract():
385
+ >>> def custom_nonzero_abstract(x):
386
+ >>> # Number of nonzero-elements is data-dependent.
387
+ >>> # Since we cannot peek at the data in an abstract impl,
388
+ >>> # we use the ctx object to construct a new symint that
389
+ >>> # represents the data-dependent size.
390
+ >>> ctx = torch._custom_op.get_ctx()
391
+ >>> nnz = ctx.create_unbacked_symint()
392
+ >>> shape = [x.dim(), nnz]
393
+ >>> result = x.new_empty(shape, dtype=torch.long)
394
+ >>> return result
395
+ >>>
396
+ >>> @numpy_nonzero.impl(['cpu', 'cuda'])
397
+ >>> def custom_nonzero_impl(x):
398
+ >>> x_np = to_numpy(x)
399
+ >>> res = np.stack(np.nonzero(x_np), axis=1)
400
+ >>> # unbacked symbolic ints in PyTorch must be >= 2, so we
401
+ >>> # constrain the range to at least 2
402
+ >>> if res.shape[0] <= 1:
403
+ >>> raise RuntimeError("not supported")
404
+ >>> return torch.tensor(res, device=x.device)
405
+
406
+ """
407
+
408
+ def inner(f):
409
+ frame = inspect.stack()[1]
410
+ self._check_doesnt_have_library_meta_impl()
411
+ self._register_impl("abstract", f, stacklevel=_stacklevel)
412
+ location = self._get_impl("abstract").location
413
+
414
+ qualname = self._qualname
415
+
416
+ # Handle DispatchKey.Meta registration
417
+ @functools.wraps(f)
418
+ def f_with_ctx(*args, **kwargs):
419
+ def error_on_ctx():
420
+ raise RuntimeError(
421
+ f"Attempted to call get_ctx() for the meta implementation "
422
+ f"for {qualname}."
423
+ f"You have presumably called get_ctx() because the operator "
424
+ f"has a data-dependent output shape; if so, there is no "
425
+ f"such meta implementation and this error is the correct "
426
+ f"behavior. Otherwise, please remove the call to get_ctx() "
427
+ f"in the implementation registered with impl_abstract "
428
+ f"at {location}"
429
+ )
430
+
431
+ with set_ctx_getter(error_on_ctx):
432
+ return f(*args, **kwargs)
433
+
434
+ self._lib.impl(self._opname, f_with_ctx, "Meta")
435
+ return f
436
+
437
+ return inner
438
+
439
+ def _check_can_register_backward(self):
440
+ def error(detail):
441
+ raise RuntimeError(
442
+ f"Cannot use torch._custom_ops APIs to register backward "
443
+ f"formula for {detail}. Got operator "
444
+ f"{self._qualname} with schema: {schema}"
445
+ )
446
+
447
+ schema = self._schema
448
+ if schema.kind() != SchemaKind.functional:
449
+ error("non-functional operator")
450
+
451
+ rets = schema.returns
452
+ if not schema.returns:
453
+ error("operator with no returns")
454
+
455
+ assert len(rets) > 0
456
+ is_non_mutating_view = any(
457
+ r.annotation is not None and not r.annotation.is_write for r in rets
458
+ )
459
+ if is_non_mutating_view:
460
+ error("operator that returns views")
461
+
462
+ # We make assumptions about the schema's return types.
463
+ allowed_return_types = {
464
+ BaseType(BaseTy.int): "int",
465
+ BaseType(BaseTy.SymInt): "SymInt",
466
+ BaseType(BaseTy.bool): "bool",
467
+ BaseType(BaseTy.float): "float",
468
+ BaseType(BaseTy.Tensor): "Tensor",
469
+ ListType(BaseType(BaseTy.Tensor), None): "List[Tensor]",
470
+ }
471
+ for ret in schema.returns:
472
+ if ret.type in allowed_return_types:
473
+ continue
474
+ error(f"operator with return not in {list(allowed_return_types.values())} (got {ret.type})")
475
+
476
+ def _check_doesnt_have_library_autograd_impl(self):
477
+ if self._registered_autograd_kernel_indirection:
478
+ return
479
+
480
+ if _C._dispatch_has_kernel_for_dispatch_key(self._qualname, "CompositeImplicitAutograd"):
481
+ raise RuntimeError(
482
+ f"impl_backward/impl_save_for_backward: the operator {self._qualname} "
483
+ f"already has an implementation for this device type via a "
484
+ f"pre-existing registration to DispatchKey::CompositeImplicitAutograd."
485
+ f"CompositeImplicitAutograd operators do not need an autograd formula; "
486
+ f"instead, the operator will decompose into its constituents and those "
487
+ f"can have autograd formulas defined on them.")
488
+
489
+ # We can improve this by adding "all Autograd<BACKEND> keys", but
490
+ # realistically people will just be using this API for CPU/CUDA for now.
491
+ for key in ["Autograd", "AutogradCPU", "AutogradCUDA"]:
492
+ if _C._dispatch_has_kernel_for_dispatch_key(self._qualname, key):
493
+ raise RuntimeError(
494
+ f"impl_backward/impl_save_for_backward: "
495
+ f"the operator {self._qualname} already has an Autograd kernel "
496
+ f"registered to DispatchKey::{key} vi a pre-existing "
497
+ f"torch.library or TORCH_LIBRARY registration. Please either "
498
+ f"remove those registrations or don't use the torch._custom_ops APIs")
499
+
500
+ def _check_doesnt_have_library_meta_impl(self):
501
+ if self._has_impl("abstract"):
502
+ return
503
+
504
+ # If the user's operator is CompositeExplicitAutograd,
505
+ # allow them to impl_abstract. This is being pragmatic
506
+ # (existing custom ops may have CompositeExplicitAutograd
507
+ # registration that don't work with Meta kernels, so this
508
+ # gives them an escape hatch).
509
+ if (
510
+ _C._dispatch_has_kernel_for_dispatch_key(self._qualname, "CompositeExplicitAutograd")
511
+ and not _C._dispatch_has_kernel_for_dispatch_key(self._qualname, "Meta")
512
+ ):
513
+ return
514
+
515
+ # Otherwise, if the user's already has a Meta kernel or their
516
+ # op is CompositeImplicitAutograd or some other alias dispatch key,
517
+ # raise.
518
+
519
+ # Special case for CompositeImplicitAutograd
520
+ if _C._dispatch_has_kernel_for_dispatch_key(self._qualname, "CompositeImplicitAutograd"):
521
+ raise RuntimeError(
522
+ f"impl_abstract(...): the operator {self._qualname} "
523
+ f"already has an implementation for this device type via a "
524
+ f"pre-existing registration to DispatchKey::CompositeImplicitAutograd."
525
+ f"CompositeImplicitAutograd operators do not need an abstract impl; "
526
+ f"instead, the operator will decompose into its constituents and those "
527
+ f"can have abstract impls defined on them.")
528
+
529
+ if _C._dispatch_has_computed_kernel_for_dispatch_key(self._qualname, "Meta"):
530
+ raise RuntimeError(
531
+ f"impl_abstract(...): the operator {self._qualname} "
532
+ f"already has an DispatchKey::Meta implementation via a "
533
+ f"pre-existing torch.library or TORCH_LIBRARY registration. "
534
+ f"Please either remove that registration or don't call impl_abstract.")
535
+
536
+ # NOTE ["backward", "save_for_backward", and "autograd"]
537
+ # As a part of the explicit autograd API, a user must provide us
538
+ # a "save_for_backward" function and a "backward" function.
539
+ # When both of these have been provided, then we automatically
540
+ # construct the "autograd" kernel.
541
+ def _register_autograd_kernel(self):
542
+ assert self._has_impl("backward")
543
+ assert self._has_impl("save_for_backward")
544
+ kernel = construct_autograd_kernel(
545
+ self._schema,
546
+ self._output_differentiability,
547
+ self,
548
+ get_op(self._qualname),
549
+ self._get_impl("save_for_backward").func,
550
+ self._get_impl("backward").func)
551
+ self._register_impl("autograd", kernel)
552
+
553
+ def impl_save_for_backward(self, _stacklevel=2):
554
+ r"""Register a function that tells us what to save for backward.
555
+
556
+ Please see impl_backward for more details.
557
+ """
558
+ def inner(f):
559
+ self._check_can_register_backward()
560
+ self._check_doesnt_have_library_autograd_impl()
561
+ if not self._registered_autograd_kernel_indirection:
562
+ self._register_autograd_kernel_indirection()
563
+ self._register_impl("save_for_backward", f, stacklevel=_stacklevel)
564
+ if self._has_impl("backward"):
565
+ self._register_autograd_kernel()
566
+ return inner
567
+
568
+ def impl_backward(self, output_differentiability=None, _stacklevel=2):
569
+ r"""Registers a backward formula.
570
+
571
+ WARNING: if you're a user, please do not use this directly
572
+ (instead use the torch._custom_ops APIs).
573
+ Also please see the following for a detailed guide on custom ops.
574
+ https://docs.google.com/document/d/1aGWtgxV3HppuxQAdddyPrs74_aEntpkYt9MalnCKnhk
575
+
576
+ In order for the CustomOp to work with autograd, you need to register
577
+ a backward formula. There are two pieces to this:
578
+ 1. You must give us a function to specify what to save for backward.
579
+ Call this the "save for backward" function.
580
+ 2. You must give us a function that computes gradients. Call this the
581
+ "backward" function.
582
+
583
+ Use `impl_save_for_backward` to define a "save for backward" function
584
+ that specifies what gets saved for backward. The function should accept
585
+ two arguments ``(inputs, output)`` and return the quantities to be saved
586
+ for backward.
587
+
588
+ During runtime, when you call the CustomOp, PyTorch will invoke the
589
+ "save for backward" function with the inputs and output of the CustomOp.
590
+
591
+ Use `impl_backward` to define the "backward" function. The backward
592
+ function must accept ``(ctx, saved, *grads)``:
593
+ - ``ctx`` is a context object where we may provide information
594
+ - ``saved`` is exactly what gets returned from the "save for backward"
595
+ function
596
+ - ``grads`` is one or more gradients. The number of gradients matches
597
+ the number of outputs of the CustomOp.
598
+
599
+ The backward function must return a dict that maps the name of
600
+ an input to the CustomOp to its corresponding gradient. All inputs that
601
+ were declared to be Tensors in the CustomOp definition must be accounted
602
+ for in the dict. The gradient may be a Tensor or None.
603
+
604
+ """
605
+ if output_differentiability is not None:
606
+ def yell():
607
+ raise RuntimeError(
608
+ f"impl_backward(output_differentiability): expected "
609
+ f"output_differentiability to be a list of bools with "
610
+ f"length equal to the number of outputs of this CustomOp "
611
+ f"got: {output_differentiability}")
612
+
613
+ if not isinstance(output_differentiability, list):
614
+ yell()
615
+ for diff in output_differentiability:
616
+ if not isinstance(diff, bool):
617
+ yell()
618
+ if len(self._schema.returns) != len(output_differentiability):
619
+ yell()
620
+
621
+ def inner(f):
622
+ self._check_can_register_backward()
623
+ self._check_doesnt_have_library_autograd_impl()
624
+ if not self._registered_autograd_kernel_indirection:
625
+ self._register_autograd_kernel_indirection()
626
+ self._register_impl("backward", f, stacklevel=_stacklevel)
627
+ self._output_differentiability = output_differentiability
628
+ if self._has_impl("save_for_backward"):
629
+ self._register_autograd_kernel()
630
+ return inner
631
+
632
+
633
+ @dataclasses.dataclass
634
+ class FuncAndLocation:
635
+ func: typing.Callable
636
+ location: str
637
+
638
+
639
+ def find_ophandle_or_throw(cpp_ns: str, operator_name: OperatorName):
640
+ overload_name = (
641
+ "" if operator_name.overload_name is None else operator_name.overload_name
642
+ )
643
+ return _C._dispatch_find_schema_or_throw(
644
+ f"{cpp_ns}::{str(operator_name.name)}", overload_name
645
+ )
646
+
647
+
648
+ def validate_namespace(ns: str) -> None:
649
+ if "." in ns:
650
+ raise ValueError(
651
+ f'custom_op(..., ns="{ns}"): expected ns to not contain any . (and be a '
652
+ f"valid variable name)"
653
+ )
654
+ if ns in RESERVED_NS:
655
+ raise ValueError(
656
+ f"custom_op(..., ns='{ns}'): '{ns}' is a reserved namespace, "
657
+ f"please choose something else. "
658
+ )
659
+
660
+ def validate_schema(schema: FunctionSchema) -> None:
661
+ # Coming in the future. Requires us to have correct logic for
662
+ # the ADInplaceOrView key
663
+ if schema.kind() != SchemaKind.functional:
664
+ raise ValueError(
665
+ f"custom_op does not support non-functional function schema. Got: {schema}"
666
+ )
667
+
668
+ rets = schema.returns
669
+ is_non_mutating_view = len(rets) > 0 and any(
670
+ r.annotation is not None and not r.annotation.is_write for r in rets
671
+ )
672
+ if is_non_mutating_view:
673
+ raise ValueError(f"custom_op does not support view functions. Got: {schema}")
674
+
675
+ # Just seems weird so banning for now
676
+ if not schema.returns:
677
+ raise ValueError(
678
+ f"custom_op does not support function schema with no outputs. Got: {schema}"
679
+ )
680
+
681
+ # For simplicity: don't allow self arguments
682
+ if schema.arguments.self_arg is not None:
683
+ raise ValueError(
684
+ f"custom_op does not support arguments named 'self'. Please "
685
+ f"rename your argument. Got: {schema}"
686
+ )
687
+
688
+
689
+ def parse_qualname(qualname: str) -> typing.Tuple[str, str]:
690
+ names = qualname.split("::", 1)
691
+ if len(names) != 2:
692
+ raise ValueError(f"Expected there to be a namespace in {qualname}, i.e. The "
693
+ f"operator name should look something like ns::foo")
694
+ if '.' in names[1]:
695
+ raise ValueError(f"The torch.custom_ops APIs do not handle overloads, "
696
+ f"i.e. operator names with '.' in them. "
697
+ f"Please name your operator something like ns::foo. "
698
+ f"Got: {qualname}")
699
+ return names[0], names[1]
700
+
701
+
702
+ def validate_device_type(device_type: str) -> None:
703
+ if device_type not in SUPPORTED_DEVICE_TYPE_TO_KEY:
704
+ raise ValueError(
705
+ f"CustomOp.impl(device_types=[{device_type}, ...]): we only support device_type "
706
+ f"in {SUPPORTED_DEVICE_TYPE_TO_KEY.keys()}."
707
+ )
708
+
709
+
710
+ def supported_param(param: inspect.Parameter) -> bool:
711
+ return param.kind in (
712
+ inspect.Parameter.POSITIONAL_OR_KEYWORD,
713
+ inspect.Parameter.KEYWORD_ONLY,
714
+ )
715
+
716
+
717
+ def validate_function_matches_schema(
718
+ schema: FunctionSchema, func: typing.Callable
719
+ ) -> None:
720
+ sig = inspect.signature(func)
721
+
722
+ if not all(supported_param(p) for _, p in sig.parameters.items()):
723
+ raise ValueError(
724
+ f"custom_op(..., manual_schema)(func): positional-only args, "
725
+ f"varargs, and kwargs are not supported. Please rewrite `func` "
726
+ f"to not have them. Got `func` with signature: {sig}"
727
+ )
728
+
729
+ if (
730
+ any(
731
+ p.annotation is not inspect.Parameter.empty
732
+ for _, p in sig.parameters.items()
733
+ )
734
+ or sig.return_annotation is not inspect.Signature.empty
735
+ ):
736
+ raise ValueError(
737
+ f"custom_op(..., manual_schema)(func): When passing in a manual "
738
+ f"schema, we expect `func` to have no type annotations to avoid "
739
+ f"ambiguity. Got `func` with signature: {sig}"
740
+ )
741
+
742
+ positional = [
743
+ (name, param)
744
+ for name, param in sig.parameters.items()
745
+ if param.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD
746
+ ]
747
+ kwargonly = [
748
+ (name, param)
749
+ for name, param in sig.parameters.items()
750
+ if param.kind == inspect.Parameter.KEYWORD_ONLY
751
+ ]
752
+
753
+ def error():
754
+ raise ValueError(
755
+ f"custom_op(..., manual_schema)(func): When passing in a manual "
756
+ f"schema, we expect `func`'s signature to match `manual_schema` "
757
+ f"(aside from type annotations). "
758
+ f"func's signature: {sig}, manual_schema: {schema}"
759
+ )
760
+
761
+ def error_default_args():
762
+ raise ValueError(
763
+ f"custom_op(..., manual_schema)(func): "
764
+ f"neither func nor manual_schema should have default "
765
+ f"arguments. Got "
766
+ f"func's signature: {sig}, manual_schema: {schema}"
767
+ )
768
+
769
+ def compare(sig_args, schema_args):
770
+ if len(sig_args) != len(schema_args):
771
+ error()
772
+ for (name, param), arg in zip(sig_args, schema_args):
773
+ if name != arg.name:
774
+ error()
775
+ if param.default is not inspect.Parameter.empty or arg.default is not None:
776
+ error_default_args()
777
+
778
+ compare(positional, schema.arguments.flat_positional)
779
+ compare(kwargonly, schema.arguments.flat_kwarg_only)
780
+
781
+
782
+ def get_none():
783
+ return None
784
+
785
+
786
+ global_ctx_getter: typing.Callable = get_none
787
+
788
+
789
+ # NOTE [ctx inside the fake implementation]
790
+ # If a user has an operator with data-dependent output shape, then when writing
791
+ # a fake implementation they must query the current ctx and use methods on the
792
+ # ctx to construct a new unbacked symint.
793
+ #
794
+ # This is done via us setting the global_ctx_getter function every time a fake
795
+ # implementation is invoked.
796
+ def get_ctx() -> "AbstractImplCtx":
797
+ """get_ctx() returns the current AbstractImplCtx object.
798
+
799
+ Calling ``get_ctx()`` is only valid inside of an abstract implementation.
800
+ """
801
+ return global_ctx_getter()
802
+
803
+
804
+ @contextlib.contextmanager
805
+ def set_ctx_getter(ctx_getter):
806
+ global global_ctx_getter
807
+ prev = global_ctx_getter
808
+ try:
809
+ global_ctx_getter = ctx_getter
810
+ yield
811
+ finally:
812
+ global_ctx_getter = prev
813
+
814
+
815
+ class AbstractImplCtx:
816
+ """
817
+ Context object for writing abstract implementations for custom operators.
818
+ """
819
+
820
+ def __init__(self, _shape_env, _op):
821
+ self._shape_env = _shape_env
822
+ self._op = _op
823
+
824
+ def create_unbacked_symint(self, *, min=2, max=None) -> torch.SymInt:
825
+ """Constructs a new symint (symbolic int) representing a data-dependent value.
826
+
827
+ This is useful for writing the abstract implementation (which is necessary
828
+ for torch.compile) for a CustomOp where an output Tensor has a size
829
+ that depends on the data of the input Tensors.
830
+
831
+ Args:
832
+ min (int): A statically known inclusive lower bound for this symint.
833
+ min must be at least 2 due to implementation details of
834
+ torch.compile. Default: 2.
835
+ max (Optional[int]): A statically known inclusive upper bound for this
836
+ symint. Default: None
837
+
838
+ .. warning:
839
+
840
+ It is important that the ``min`` and ``max`` (if not None) values are set
841
+ correctly, otherwise, there will be undefined behavior under
842
+ torch.compile. The default value of ``min`` is 2 due to torch.compile
843
+ specializing on 0/1 sizes.
844
+
845
+ You must also verify that your implementation on concrete Tensors
846
+ (e.g. CPU/CUDA) only returns Tensors where the size that corresponds
847
+ to the symint also has respects these constraint.
848
+ The easiest way to do this is to add an assertion in the CPU/CUDA/etc
849
+ implementation that the size follows these bounds.
850
+
851
+ Example::
852
+
853
+ >>> # an operator with data-dependent output shape
854
+ >>> @custom_op("mylibrary::custom_nonzero")
855
+ >>> def custom_nonzero(x: Tensor) -> Tensor:
856
+ >>> ...
857
+ >>>
858
+ >>> @custom_nonzero.impl_abstract():
859
+ >>> def custom_nonzero_abstract(x):
860
+ >>> # Number of nonzero-elements is data-dependent
861
+ >>> ctx = torch._custom_op.get_ctx()
862
+ >>> nnz = ctx.create_unbacked_symint()
863
+ >>> shape = [x.dim(), nnz]
864
+ >>> result = x.new_empty(shape, dtype=torch.long)
865
+ >>> return result
866
+ >>>
867
+ >>> @numpy_nonzero.impl(['cpu', 'cuda'])
868
+ >>> def custom_nonzero_impl(x):
869
+ >>> x_np = to_numpy(x)
870
+ >>> res = np.stack(np.nonzero(x_np), axis=1)
871
+ >>> # the size associated with ctx.create_unbacked_symint()
872
+ >>> # must be constrained in the same way, so we add an assertion here.
873
+ >>> if res.shape[0] < 2 or res.shape[0] > x.numel():
874
+ >>> raise RuntimeError("not supported")
875
+ >>> return torch.tensor(res, device=x.device)
876
+
877
+ """
878
+ if (
879
+ self._shape_env is None
880
+ or not self._shape_env.allow_dynamic_output_shape_ops
881
+ ):
882
+ raise torch._subclasses.fake_tensor.DynamicOutputShapeException(self._op)
883
+
884
+ if isinstance(min, torch.SymInt) or isinstance(max, torch.SymInt):
885
+ raise ValueError(
886
+ f"ctx.create_unbacked_symint(min={min}, max={max}): expected "
887
+ f"min and max to be statically known ints but got SymInt. "
888
+ f"This is not supported."
889
+ )
890
+
891
+ if min < 2:
892
+ raise ValueError(
893
+ f"ctx.create_unbacked_symint(min={min}, ...): expected min to be "
894
+ f"greater than or equal to 2. PyTorch only supports new "
895
+ f"data-dependent sizes of >= 2"
896
+ )
897
+
898
+ result = self._shape_env.create_unbacked_symint()
899
+ torch.fx.experimental.symbolic_shapes.constrain_range(result, min=2, max=max)
900
+ return result
901
+
902
+
903
+ def infer_schema(prototype_function: typing.Callable) -> str:
904
+ sig = inspect.signature(prototype_function)
905
+
906
+ def error_fn(what):
907
+ raise ValueError(
908
+ f"custom_op(...)(func): {what} " f"Got func with signature {sig})"
909
+ )
910
+
911
+ params = [
912
+ parse_param(name, param, error_fn) for name, param in sig.parameters.items()
913
+ ]
914
+ ret = parse_return(sig.return_annotation, error_fn)
915
+ return f"({', '.join(params)}) -> {ret}"
916
+
917
+
918
+ def parse_param(name, param, error_fn):
919
+ if not supported_param(param):
920
+ error_fn("We do not support positional-only args, varargs, or varkwargs.")
921
+
922
+ if param.annotation is inspect.Parameter.empty:
923
+ error_fn(f"Parameter {name} must have a type annotation.")
924
+
925
+ if param.annotation not in SUPPORTED_PARAM_TYPES.keys():
926
+ error_fn(
927
+ f"Parameter {name} has unsupported type {param.annotation}. "
928
+ f"The valid types are: {SUPPORTED_PARAM_TYPES.keys()}."
929
+ )
930
+
931
+ if param.default is not inspect.Parameter.empty:
932
+ error_fn(
933
+ f"Parameter {name} has a default value; this is not supported. "
934
+ f"If you want to use default values then create a function with "
935
+ f"default values that calls the CustomOp"
936
+ )
937
+
938
+ return f"{SUPPORTED_PARAM_TYPES[param.annotation]} {name}"
939
+
940
+
941
+ def derived_types(
942
+ base_type, cpp_type, list_base, optional_base_list, optional_list_base
943
+ ):
944
+ result = [
945
+ (base_type, cpp_type),
946
+ (typing.Optional[base_type], f"{cpp_type}?"),
947
+ ]
948
+ if list_base:
949
+ result.append((typing.Sequence[base_type], f"{cpp_type}[]")) # type: ignore[valid-type]
950
+ if optional_base_list:
951
+ result.append((typing.Sequence[typing.Optional[base_type]], f"{cpp_type}?[]")) # type: ignore[valid-type]
952
+ if optional_list_base:
953
+ result.append((typing.Optional[typing.Sequence[base_type]], f"{cpp_type}[]?")) # type: ignore[valid-type]
954
+ return result
955
+
956
+
957
+ def get_supported_param_types():
958
+ data = [
959
+ # (python type, schema type, type[] variant, type?[] variant, type[]? variant
960
+ (torch.Tensor, "Tensor", True, True, False),
961
+ (int, "SymInt", True, False, True),
962
+ (float, "float", True, False, True),
963
+ (bool, "bool", True, False, True),
964
+ (str, "str", False, False, False),
965
+ (torch.types.Number, "Scalar", True, False, False),
966
+ (torch.dtype, "ScalarType", False, False, False),
967
+ (torch.device, "Device", False, False, False),
968
+ ]
969
+ result = []
970
+ for line in data:
971
+ result.extend(derived_types(*line))
972
+ return dict(result)
973
+
974
+
975
+ SUPPORTED_RETURN_TYPES = {
976
+ torch.Tensor: "Tensor",
977
+ typing.List[torch.Tensor]: "Tensor[]",
978
+ int: "SymInt",
979
+ float: "float",
980
+ bool: "bool",
981
+ torch.types.Number: "Scalar",
982
+ }
983
+
984
+
985
+ def parse_return(annotation, error_fn):
986
+ origin = typing.get_origin(annotation)
987
+ if origin is not tuple:
988
+ if annotation not in SUPPORTED_RETURN_TYPES.keys():
989
+ error_fn(
990
+ f"Return has unsupported type {annotation}. "
991
+ f"The valid types are: {SUPPORTED_RETURN_TYPES}."
992
+ )
993
+ return SUPPORTED_RETURN_TYPES[annotation]
994
+
995
+ args = typing.get_args(annotation)
996
+ for arg in args:
997
+ if arg not in SUPPORTED_RETURN_TYPES:
998
+ error_fn(
999
+ f"Return has unsupported type {annotation}. "
1000
+ f"The valid types are: {SUPPORTED_RETURN_TYPES}."
1001
+ )
1002
+
1003
+ return "(" + ", ".join([SUPPORTED_RETURN_TYPES[arg] for arg in args]) + ")"
1004
+
1005
+
1006
+ SUPPORTED_PARAM_TYPES = get_supported_param_types()
1007
+
1008
+
1009
+ def report_error_callback(custom_op: typing.Any, key: str) -> None:
1010
+ if key == "Undefined":
1011
+ raise NotImplementedError(
1012
+ f"{custom_op}: There were no Tensor inputs to this operator "
1013
+ f"(e.g. you passed an empty list of Tensors). If your operator is a "
1014
+ f"factory function (that is, it takes no Tensors and constructs "
1015
+ f"a new one), then please use CustomOp.impl_factory to register "
1016
+ f"an implementation for it"
1017
+ )
1018
+ if key == "Meta":
1019
+ raise NotImplementedError(
1020
+ f"{custom_op}: when running with device='Meta' tensors: there is no "
1021
+ f"abstract impl registered for this CustomOp. Please register one via "
1022
+ f"CustomOp.impl_abstract to get this CustomOp to work with Meta tensors"
1023
+ )
1024
+ if key in ("CPU", "CUDA"):
1025
+ device = key.lower()
1026
+ raise NotImplementedError(
1027
+ f"{custom_op}: when running with device='{device}' tensors: there is no "
1028
+ f"{device} impl registered for this CustomOp. Please register one via "
1029
+ f"CustomOp.impl(device_type='{device}')"
1030
+ )
1031
+ raise NotImplementedError(
1032
+ f"{custom_op}: No implementation for dispatch key {key}. It is likely "
1033
+ f"that we have not added this functionality yet, please either open an "
1034
+ f"issue or if you're feeling adventurous, use the low-level "
1035
+ f"torch.library API"
1036
+ )
1037
+
1038
+
1039
+ def custom_op_from_existing(op):
1040
+ ns = op.namespace
1041
+ lib = torch.library.Library(ns, "FRAGMENT")
1042
+ name = op.name().split("::")[-1]
1043
+ schema_str = str(op._schema)
1044
+ # CustomOp expects the schema string without the namespace
1045
+ schema_str = schema_str.split("::")[-1]
1046
+ schema = FunctionSchema.parse(schema_str)
1047
+ return CustomOp(lib, ns, schema, name, op, _private_access=True)
1048
+
1049
+
1050
+ def get_op(qualname):
1051
+ def error_not_found():
1052
+ raise ValueError(
1053
+ f"Could not find the operator {qualname}. Please make sure you have "
1054
+ f"already registered the operator and (if registered from C++) "
1055
+ f"loaded it via torch.ops.load_library.")
1056
+
1057
+ ns, name = parse_qualname(qualname)
1058
+ if not hasattr(torch.ops, ns):
1059
+ error_not_found()
1060
+ opnamespace = getattr(torch.ops, ns)
1061
+ if not hasattr(opnamespace, name):
1062
+ error_not_found()
1063
+ packet = getattr(opnamespace, name)
1064
+ if not hasattr(packet, 'default'):
1065
+ error_not_found()
1066
+ return packet.default
1067
+
1068
+
1069
+ def _find_custom_op(qualname, also_check_torch_library=False):
1070
+ if qualname in global_registry:
1071
+ return global_registry[qualname]
1072
+ if not also_check_torch_library:
1073
+ raise RuntimeError(
1074
+ f"Could not find custom op \"{qualname}\". Did you register it via "
1075
+ f"the torch._custom_ops API?")
1076
+ overload = get_op(qualname)
1077
+ result = custom_op_from_existing(overload)
1078
+ return result
1079
+
1080
+
1081
+ def _custom_op_with_schema(qualname, schema):
1082
+ ns, name = qualname.split("::")
1083
+ schema_str = f"{name}{schema}"
1084
+ function_schema = FunctionSchema.parse(schema_str)
1085
+ validate_schema(function_schema)
1086
+
1087
+ lib = library.Library(ns, "FRAGMENT")
1088
+ lib.define(schema_str)
1089
+ ophandle = find_ophandle_or_throw(ns, function_schema.name)
1090
+ result = CustomOp(lib, ns, function_schema, name, ophandle, _private_access=True)
1091
+ result._register_autograd_kernel_indirection()
1092
+
1093
+ torch._C._dispatch_set_report_error_callback(
1094
+ ophandle, functools.partial(report_error_callback, weakref.proxy(result))
1095
+ )
1096
+ return get_op(qualname)
llava_next/lib/python3.10/site-packages/torch/_inductor/__pycache__/lowering.cpython-310.pyc ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d556406c7628577d16d86ac849149fadd71f8ca5de00586fadb0ed9ca35d4612
3
+ size 120129
llava_next/lib/python3.10/site-packages/torch/_numpy/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (759 Bytes). View file
 
llava_next/lib/python3.10/site-packages/torch/_numpy/__pycache__/_casting_dicts.cpython-310.pyc ADDED
Binary file (7.14 kB). View file
 
llava_next/lib/python3.10/site-packages/torch/_numpy/__pycache__/_funcs.cpython-310.pyc ADDED
Binary file (1.65 kB). View file
 
llava_next/lib/python3.10/site-packages/torch/_numpy/__pycache__/_funcs_impl.cpython-310.pyc ADDED
Binary file (42.7 kB). View file
 
llava_next/lib/python3.10/site-packages/torch/_numpy/__pycache__/_getlimits.cpython-310.pyc ADDED
Binary file (502 Bytes). View file
 
llava_next/lib/python3.10/site-packages/torch/_numpy/__pycache__/_reductions_impl.cpython-310.pyc ADDED
Binary file (7.63 kB). View file
 
llava_next/lib/python3.10/site-packages/torch/_numpy/__pycache__/_unary_ufuncs_impl.cpython-310.pyc ADDED
Binary file (1.51 kB). View file
 
llava_next/lib/python3.10/site-packages/torch/_numpy/_binary_ufuncs_impl.py ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Export torch work functions for binary ufuncs, rename/tweak to match numpy.
2
+ This listing is further exported to public symbols in the `torch._numpy/_ufuncs.py` module.
3
+ """
4
+
5
+ import torch
6
+
7
+ from torch import ( # noqa: F401
8
+ add, # noqa: F401
9
+ arctan2, # noqa: F401
10
+ bitwise_and, # noqa: F401
11
+ bitwise_left_shift as left_shift, # noqa: F401
12
+ bitwise_or, # noqa: F401
13
+ bitwise_right_shift as right_shift, # noqa: F401
14
+ bitwise_xor, # noqa: F401
15
+ copysign, # noqa: F401
16
+ divide, # noqa: F401
17
+ eq as equal, # noqa: F401
18
+ float_power, # noqa: F401
19
+ floor_divide, # noqa: F401
20
+ fmax, # noqa: F401
21
+ fmin, # noqa: F401
22
+ fmod, # noqa: F401
23
+ gcd, # noqa: F401
24
+ greater, # noqa: F401
25
+ greater_equal, # noqa: F401
26
+ heaviside, # noqa: F401
27
+ hypot, # noqa: F401
28
+ lcm, # noqa: F401
29
+ ldexp, # noqa: F401
30
+ less, # noqa: F401
31
+ less_equal, # noqa: F401
32
+ logaddexp, # noqa: F401
33
+ logaddexp2, # noqa: F401
34
+ logical_and, # noqa: F401
35
+ logical_or, # noqa: F401
36
+ logical_xor, # noqa: F401
37
+ maximum, # noqa: F401
38
+ minimum, # noqa: F401
39
+ multiply, # noqa: F401
40
+ nextafter, # noqa: F401
41
+ not_equal, # noqa: F401
42
+ pow as power, # noqa: F401
43
+ remainder, # noqa: F401
44
+ remainder as mod, # noqa: F401
45
+ subtract, # noqa: F401
46
+ true_divide, # noqa: F401
47
+ )
48
+
49
+ from . import _dtypes_impl, _util
50
+
51
+
52
+ # work around torch limitations w.r.t. numpy
53
+ def matmul(x, y):
54
+ # work around:
55
+ # - RuntimeError: expected scalar type Int but found Double
56
+ # - RuntimeError: "addmm_impl_cpu_" not implemented for 'Bool'
57
+ # - RuntimeError: "addmm_impl_cpu_" not implemented for 'Half'
58
+ dtype = _dtypes_impl.result_type_impl(x, y)
59
+ is_bool = dtype == torch.bool
60
+ is_half = (x.dtype == torch.float16 or y.dtype == torch.float16) and (
61
+ x.is_cpu or y.is_cpu
62
+ )
63
+
64
+ work_dtype = dtype
65
+ if is_bool:
66
+ work_dtype = torch.uint8
67
+ if is_half:
68
+ work_dtype = torch.float32
69
+
70
+ x = _util.cast_if_needed(x, work_dtype)
71
+ y = _util.cast_if_needed(y, work_dtype)
72
+
73
+ result = torch.matmul(x, y)
74
+
75
+ if work_dtype != dtype:
76
+ result = result.to(dtype)
77
+
78
+ return result
79
+
80
+
81
+ # a stub implementation of divmod, should be improved after
82
+ # https://github.com/pytorch/pytorch/issues/90820 is fixed in pytorch
83
+ def divmod(x, y):
84
+ return x // y, x % y
llava_next/lib/python3.10/site-packages/torch/_numpy/_dtypes.py ADDED
@@ -0,0 +1,422 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ Define analogs of numpy dtypes supported by pytorch.
2
+ Define the scalar types and supported dtypes and numpy <--> torch dtype mappings.
3
+ """
4
+ import builtins
5
+
6
+ import torch
7
+
8
+ from . import _dtypes_impl
9
+
10
+
11
+ # ### Scalar types ###
12
+
13
+
14
+ class generic:
15
+ @property
16
+ def name(self):
17
+ return self.__class__.__name__
18
+
19
+ def __new__(cls, value):
20
+ # NumPy scalars are modelled as 0-D arrays
21
+ # so a call to np.float32(4) produces a 0-D array.
22
+
23
+ from ._ndarray import asarray, ndarray
24
+
25
+ if isinstance(value, str) and value in ["inf", "nan"]:
26
+ value = {"inf": torch.inf, "nan": torch.nan}[value]
27
+
28
+ if isinstance(value, ndarray):
29
+ return value.astype(cls)
30
+ else:
31
+ return asarray(value, dtype=cls)
32
+
33
+
34
+ ##################
35
+ # abstract types #
36
+ ##################
37
+
38
+
39
+ class number(generic):
40
+ pass
41
+
42
+
43
+ class integer(number):
44
+ pass
45
+
46
+
47
+ class inexact(number):
48
+ pass
49
+
50
+
51
+ class signedinteger(integer):
52
+ pass
53
+
54
+
55
+ class unsignedinteger(integer):
56
+ pass
57
+
58
+
59
+ class floating(inexact):
60
+ pass
61
+
62
+
63
+ class complexfloating(inexact):
64
+ pass
65
+
66
+
67
+ # ##### concrete types
68
+
69
+ # signed integers
70
+
71
+
72
+ class int8(signedinteger):
73
+ name = "int8"
74
+ typecode = "b"
75
+ torch_dtype = torch.int8
76
+
77
+
78
+ class int16(signedinteger):
79
+ name = "int16"
80
+ typecode = "h"
81
+ torch_dtype = torch.int16
82
+
83
+
84
+ class int32(signedinteger):
85
+ name = "int32"
86
+ typecode = "i"
87
+ torch_dtype = torch.int32
88
+
89
+
90
+ class int64(signedinteger):
91
+ name = "int64"
92
+ typecode = "l"
93
+ torch_dtype = torch.int64
94
+
95
+
96
+ # unsigned integers
97
+
98
+
99
+ class uint8(unsignedinteger):
100
+ name = "uint8"
101
+ typecode = "B"
102
+ torch_dtype = torch.uint8
103
+
104
+
105
+ # floating point
106
+
107
+
108
+ class float16(floating):
109
+ name = "float16"
110
+ typecode = "e"
111
+ torch_dtype = torch.float16
112
+
113
+
114
+ class float32(floating):
115
+ name = "float32"
116
+ typecode = "f"
117
+ torch_dtype = torch.float32
118
+
119
+
120
+ class float64(floating):
121
+ name = "float64"
122
+ typecode = "d"
123
+ torch_dtype = torch.float64
124
+
125
+
126
+ class complex64(complexfloating):
127
+ name = "complex64"
128
+ typecode = "F"
129
+ torch_dtype = torch.complex64
130
+
131
+
132
+ class complex128(complexfloating):
133
+ name = "complex128"
134
+ typecode = "D"
135
+ torch_dtype = torch.complex128
136
+
137
+
138
+ class bool_(generic):
139
+ name = "bool_"
140
+ typecode = "?"
141
+ torch_dtype = torch.bool
142
+
143
+
144
+ # name aliases
145
+ _name_aliases = {
146
+ "intp": int64,
147
+ "int_": int64,
148
+ "intc": int32,
149
+ "byte": int8,
150
+ "short": int16,
151
+ "longlong": int64, # XXX: is this correct?
152
+ "ubyte": uint8,
153
+ "half": float16,
154
+ "single": float32,
155
+ "double": float64,
156
+ "float_": float64,
157
+ "csingle": complex64,
158
+ "singlecomplex": complex64,
159
+ "cdouble": complex128,
160
+ "cfloat": complex128,
161
+ "complex_": complex128,
162
+ }
163
+ # We register float_ = float32 and so on
164
+ for name, obj in _name_aliases.items():
165
+ vars()[name] = obj
166
+
167
+
168
+ # Replicate this NumPy-defined way of grouping scalar types,
169
+ # cf tests/core/test_scalar_methods.py
170
+ sctypes = {
171
+ "int": [int8, int16, int32, int64],
172
+ "uint": [uint8],
173
+ "float": [float16, float32, float64],
174
+ "complex": [complex64, complex128],
175
+ "others": [bool_],
176
+ }
177
+
178
+
179
+ # Support mappings/functions
180
+
181
+ _names = {st.name: st for cat in sctypes for st in sctypes[cat]}
182
+ _typecodes = {st.typecode: st for cat in sctypes for st in sctypes[cat]}
183
+ _torch_dtypes = {st.torch_dtype: st for cat in sctypes for st in sctypes[cat]}
184
+
185
+
186
+ _aliases = {
187
+ "u1": uint8,
188
+ "i1": int8,
189
+ "i2": int16,
190
+ "i4": int32,
191
+ "i8": int64,
192
+ "b": int8, # XXX: srsly?
193
+ "f2": float16,
194
+ "f4": float32,
195
+ "f8": float64,
196
+ "c8": complex64,
197
+ "c16": complex128,
198
+ # numpy-specific trailing underscore
199
+ "bool_": bool_,
200
+ }
201
+
202
+
203
+ _python_types = {
204
+ int: int64,
205
+ float: float64,
206
+ complex: complex128,
207
+ builtins.bool: bool_,
208
+ # also allow stringified names of python types
209
+ int.__name__: int64,
210
+ float.__name__: float64,
211
+ complex.__name__: complex128,
212
+ builtins.bool.__name__: bool_,
213
+ }
214
+
215
+
216
+ def sctype_from_string(s):
217
+ """Normalize a string value: a type 'name' or a typecode or a width alias."""
218
+ if s in _names:
219
+ return _names[s]
220
+ if s in _name_aliases.keys():
221
+ return _name_aliases[s]
222
+ if s in _typecodes:
223
+ return _typecodes[s]
224
+ if s in _aliases:
225
+ return _aliases[s]
226
+ if s in _python_types:
227
+ return _python_types[s]
228
+ raise TypeError(f"data type {s!r} not understood")
229
+
230
+
231
+ def sctype_from_torch_dtype(torch_dtype):
232
+ return _torch_dtypes[torch_dtype]
233
+
234
+
235
+ # ### DTypes. ###
236
+
237
+
238
+ def dtype(arg):
239
+ if arg is None:
240
+ arg = _dtypes_impl.default_dtypes().float_dtype
241
+ return DType(arg)
242
+
243
+
244
+ class DType:
245
+ def __init__(self, arg):
246
+ # a pytorch object?
247
+ if isinstance(arg, torch.dtype):
248
+ sctype = _torch_dtypes[arg]
249
+ elif isinstance(arg, torch.Tensor):
250
+ sctype = _torch_dtypes[arg.dtype]
251
+ # a scalar type?
252
+ elif issubclass_(arg, generic):
253
+ sctype = arg
254
+ # a dtype already?
255
+ elif isinstance(arg, DType):
256
+ sctype = arg._scalar_type
257
+ # a has a right attribute?
258
+ elif hasattr(arg, "dtype"):
259
+ sctype = arg.dtype._scalar_type
260
+ else:
261
+ sctype = sctype_from_string(arg)
262
+ self._scalar_type = sctype
263
+
264
+ @property
265
+ def name(self):
266
+ return self._scalar_type.name
267
+
268
+ @property
269
+ def type(self):
270
+ return self._scalar_type
271
+
272
+ @property
273
+ def kind(self):
274
+ # https://numpy.org/doc/stable/reference/generated/numpy.dtype.kind.html
275
+ return _torch_dtypes[self.torch_dtype].name[0]
276
+
277
+ @property
278
+ def typecode(self):
279
+ return self._scalar_type.typecode
280
+
281
+ def __eq__(self, other):
282
+ if isinstance(other, DType):
283
+ return self._scalar_type == other._scalar_type
284
+ try:
285
+ other_instance = DType(other)
286
+ except TypeError:
287
+ return False
288
+ return self._scalar_type == other_instance._scalar_type
289
+
290
+ @property
291
+ def torch_dtype(self):
292
+ return self._scalar_type.torch_dtype
293
+
294
+ def __hash__(self):
295
+ return hash(self._scalar_type.name)
296
+
297
+ def __repr__(self):
298
+ return f'dtype("{self.name}")'
299
+
300
+ __str__ = __repr__
301
+
302
+ @property
303
+ def itemsize(self):
304
+ elem = self.type(1)
305
+ return elem.tensor.element_size()
306
+
307
+ def __getstate__(self):
308
+ return self._scalar_type
309
+
310
+ def __setstate__(self, value):
311
+ self._scalar_type = value
312
+
313
+
314
+ typecodes = {
315
+ "All": "efdFDBbhil?",
316
+ "AllFloat": "efdFD",
317
+ "AllInteger": "Bbhil",
318
+ "Integer": "bhil",
319
+ "UnsignedInteger": "B",
320
+ "Float": "efd",
321
+ "Complex": "FD",
322
+ }
323
+
324
+
325
+ # ### Defaults and dtype discovery
326
+
327
+
328
+ def set_default_dtype(fp_dtype="numpy", int_dtype="numpy"):
329
+ """Set the (global) defaults for fp, complex, and int dtypes.
330
+
331
+ The complex dtype is inferred from the float (fp) dtype. It has
332
+ a width at least twice the width of the float dtype,
333
+ i.e., it's complex128 for float64 and complex64 for float32.
334
+
335
+ Parameters
336
+ ----------
337
+ fp_dtype
338
+ Allowed values are "numpy", "pytorch" or dtype_like things which
339
+ can be converted into a DType instance.
340
+ Default is "numpy" (i.e. float64).
341
+ int_dtype
342
+ Allowed values are "numpy", "pytorch" or dtype_like things which
343
+ can be converted into a DType instance.
344
+ Default is "numpy" (i.e. int64).
345
+
346
+ Returns
347
+ -------
348
+ The old default dtype state: a namedtuple with attributes ``float_dtype``,
349
+ ``complex_dtypes`` and ``int_dtype``. These attributes store *pytorch*
350
+ dtypes.
351
+
352
+ Notes
353
+ ------------
354
+ This functions has a side effect: it sets the global state with the provided dtypes.
355
+
356
+ The complex dtype has bit width of at least twice the width of the float
357
+ dtype, i.e. it's complex128 for float64 and complex64 for float32.
358
+
359
+ """
360
+ if fp_dtype not in ["numpy", "pytorch"]:
361
+ fp_dtype = dtype(fp_dtype).torch_dtype
362
+ if int_dtype not in ["numpy", "pytorch"]:
363
+ int_dtype = dtype(int_dtype).torch_dtype
364
+
365
+ if fp_dtype == "numpy":
366
+ float_dtype = torch.float64
367
+ elif fp_dtype == "pytorch":
368
+ float_dtype = torch.float32
369
+ else:
370
+ float_dtype = fp_dtype
371
+
372
+ complex_dtype = {
373
+ torch.float64: torch.complex128,
374
+ torch.float32: torch.complex64,
375
+ torch.float16: torch.complex64,
376
+ }[float_dtype]
377
+
378
+ if int_dtype in ["numpy", "pytorch"]:
379
+ int_dtype = torch.int64
380
+ else:
381
+ int_dtype = int_dtype
382
+
383
+ new_defaults = _dtypes_impl.DefaultDTypes(
384
+ float_dtype=float_dtype, complex_dtype=complex_dtype, int_dtype=int_dtype
385
+ )
386
+
387
+ # set the new global state and return the old state
388
+ old_defaults = _dtypes_impl.default_dtypes
389
+ _dtypes_impl._default_dtypes = new_defaults
390
+ return old_defaults
391
+
392
+
393
+ def issubclass_(arg, klass):
394
+ try:
395
+ return issubclass(arg, klass)
396
+ except TypeError:
397
+ return False
398
+
399
+
400
+ def issubdtype(arg1, arg2):
401
+ # cf https://github.com/numpy/numpy/blob/v1.24.0/numpy/core/numerictypes.py#L356-L420
402
+ if not issubclass_(arg1, generic):
403
+ arg1 = dtype(arg1).type
404
+ if not issubclass_(arg2, generic):
405
+ arg2 = dtype(arg2).type
406
+ return issubclass(arg1, arg2)
407
+
408
+
409
+ __all__ = ["dtype", "DType", "typecodes", "issubdtype", "set_default_dtype"]
410
+ __all__ += list(_names.keys()) # noqa: PLE0605
411
+ __all__ += list(_name_aliases.keys()) # noqa: PLE0605
412
+ __all__ += [ # noqa: PLE0605
413
+ "sctypes",
414
+ "generic",
415
+ "number",
416
+ "integer",
417
+ "signedinteger",
418
+ "unsignedinteger",
419
+ "inexact",
420
+ "floating",
421
+ "complexfloating",
422
+ ]
llava_next/lib/python3.10/site-packages/torch/_numpy/_funcs.py ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import inspect
2
+ import itertools
3
+
4
+ from . import _funcs_impl, _reductions_impl
5
+ from ._normalizations import normalizer
6
+
7
+ # _funcs_impl.py contains functions which mimic NumPy's eponymous equivalents,
8
+ # and consume/return PyTorch tensors/dtypes.
9
+ # They are also type annotated.
10
+ # Pull these functions from _funcs_impl and decorate them with @normalizer, which
11
+ # - Converts any input `np.ndarray`, `torch._numpy.ndarray`, list of lists, Python scalars, etc into a `torch.Tensor`.
12
+ # - Maps NumPy dtypes to PyTorch dtypes
13
+ # - If the input to the `axis` kwarg is an ndarray, it maps it into a tuple
14
+ # - Implements the semantics for the `out=` arg
15
+ # - Wraps back the outputs into `torch._numpy.ndarrays`
16
+
17
+
18
+ def _public_functions(mod):
19
+ def is_public_function(f):
20
+ return inspect.isfunction(f) and not f.__name__.startswith("_")
21
+
22
+ return inspect.getmembers(mod, is_public_function)
23
+
24
+
25
+ # We fill in __all__ in the loop below
26
+ __all__ = []
27
+
28
+ # decorate implementer functions with argument normalizers and export to the top namespace
29
+ for name, func in itertools.chain(
30
+ _public_functions(_funcs_impl), _public_functions(_reductions_impl)
31
+ ):
32
+ if name in ["percentile", "quantile", "median"]:
33
+ decorated = normalizer(func, promote_scalar_result=True)
34
+ elif name == "einsum":
35
+ # normalized manually
36
+ decorated = func
37
+ else:
38
+ decorated = normalizer(func)
39
+
40
+ decorated.__qualname__ = name
41
+ decorated.__name__ = name
42
+ vars()[name] = decorated
43
+ __all__.append(name)
44
+
45
+
46
+ """
47
+ Vendored objects from numpy.lib.index_tricks
48
+ """
49
+
50
+
51
+ class IndexExpression:
52
+ """
53
+ Written by Konrad Hinsen <hinsen@cnrs-orleans.fr>
54
+ last revision: 1999-7-23
55
+
56
+ Cosmetic changes by T. Oliphant 2001
57
+ """
58
+
59
+ def __init__(self, maketuple):
60
+ self.maketuple = maketuple
61
+
62
+ def __getitem__(self, item):
63
+ if self.maketuple and not isinstance(item, tuple):
64
+ return (item,)
65
+ else:
66
+ return item
67
+
68
+
69
+ index_exp = IndexExpression(maketuple=True)
70
+ s_ = IndexExpression(maketuple=False)
71
+
72
+
73
+ __all__ += ["index_exp", "s_"]
llava_next/lib/python3.10/site-packages/torch/_numpy/_funcs_impl.py ADDED
@@ -0,0 +1,2056 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """A thin pytorch / numpy compat layer.
2
+
3
+ Things imported from here have numpy-compatible signatures but operate on
4
+ pytorch tensors.
5
+ """
6
+ # Contents of this module ends up in the main namespace via _funcs.py
7
+ # where type annotations are used in conjunction with the @normalizer decorator.
8
+ from __future__ import annotations
9
+
10
+ import builtins
11
+ import itertools
12
+ import operator
13
+ from typing import Optional, Sequence
14
+
15
+ import torch
16
+
17
+ from . import _dtypes_impl, _util
18
+ from ._normalizations import (
19
+ ArrayLike,
20
+ CastingModes,
21
+ DTypeLike,
22
+ NDArray,
23
+ NotImplementedType,
24
+ OutArray,
25
+ )
26
+
27
+
28
+ def copy(
29
+ a: ArrayLike, order: NotImplementedType = "K", subok: NotImplementedType = False
30
+ ):
31
+ return a.clone()
32
+
33
+
34
+ def copyto(
35
+ dst: NDArray,
36
+ src: ArrayLike,
37
+ casting: Optional[CastingModes] = "same_kind",
38
+ where: NotImplementedType = None,
39
+ ):
40
+ (src,) = _util.typecast_tensors((src,), dst.dtype, casting=casting)
41
+ dst.copy_(src)
42
+
43
+
44
+ def atleast_1d(*arys: ArrayLike):
45
+ res = torch.atleast_1d(*arys)
46
+ if isinstance(res, tuple):
47
+ return list(res)
48
+ else:
49
+ return res
50
+
51
+
52
+ def atleast_2d(*arys: ArrayLike):
53
+ res = torch.atleast_2d(*arys)
54
+ if isinstance(res, tuple):
55
+ return list(res)
56
+ else:
57
+ return res
58
+
59
+
60
+ def atleast_3d(*arys: ArrayLike):
61
+ res = torch.atleast_3d(*arys)
62
+ if isinstance(res, tuple):
63
+ return list(res)
64
+ else:
65
+ return res
66
+
67
+
68
+ def _concat_check(tup, dtype, out):
69
+ if tup == ():
70
+ raise ValueError("need at least one array to concatenate")
71
+
72
+ """Check inputs in concatenate et al."""
73
+ if out is not None and dtype is not None:
74
+ # mimic numpy
75
+ raise TypeError(
76
+ "concatenate() only takes `out` or `dtype` as an "
77
+ "argument, but both were provided."
78
+ )
79
+
80
+
81
+ def _concat_cast_helper(tensors, out=None, dtype=None, casting="same_kind"):
82
+ """Figure out dtypes, cast if necessary."""
83
+
84
+ if out is not None or dtype is not None:
85
+ # figure out the type of the inputs and outputs
86
+ out_dtype = out.dtype.torch_dtype if dtype is None else dtype
87
+ else:
88
+ out_dtype = _dtypes_impl.result_type_impl(*tensors)
89
+
90
+ # cast input arrays if necessary; do not broadcast them agains `out`
91
+ tensors = _util.typecast_tensors(tensors, out_dtype, casting)
92
+
93
+ return tensors
94
+
95
+
96
+ def _concatenate(
97
+ tensors, axis=0, out=None, dtype=None, casting: Optional[CastingModes] = "same_kind"
98
+ ):
99
+ # pure torch implementation, used below and in cov/corrcoef below
100
+ tensors, axis = _util.axis_none_flatten(*tensors, axis=axis)
101
+ tensors = _concat_cast_helper(tensors, out, dtype, casting)
102
+ return torch.cat(tensors, axis)
103
+
104
+
105
+ def concatenate(
106
+ ar_tuple: Sequence[ArrayLike],
107
+ axis=0,
108
+ out: Optional[OutArray] = None,
109
+ dtype: Optional[DTypeLike] = None,
110
+ casting: Optional[CastingModes] = "same_kind",
111
+ ):
112
+ _concat_check(ar_tuple, dtype, out=out)
113
+ result = _concatenate(ar_tuple, axis=axis, out=out, dtype=dtype, casting=casting)
114
+ return result
115
+
116
+
117
+ def vstack(
118
+ tup: Sequence[ArrayLike],
119
+ *,
120
+ dtype: Optional[DTypeLike] = None,
121
+ casting: Optional[CastingModes] = "same_kind",
122
+ ):
123
+ _concat_check(tup, dtype, out=None)
124
+ tensors = _concat_cast_helper(tup, dtype=dtype, casting=casting)
125
+ return torch.vstack(tensors)
126
+
127
+
128
+ row_stack = vstack
129
+
130
+
131
+ def hstack(
132
+ tup: Sequence[ArrayLike],
133
+ *,
134
+ dtype: Optional[DTypeLike] = None,
135
+ casting: Optional[CastingModes] = "same_kind",
136
+ ):
137
+ _concat_check(tup, dtype, out=None)
138
+ tensors = _concat_cast_helper(tup, dtype=dtype, casting=casting)
139
+ return torch.hstack(tensors)
140
+
141
+
142
+ def dstack(
143
+ tup: Sequence[ArrayLike],
144
+ *,
145
+ dtype: Optional[DTypeLike] = None,
146
+ casting: Optional[CastingModes] = "same_kind",
147
+ ):
148
+ # XXX: in numpy 1.24 dstack does not have dtype and casting keywords
149
+ # but {h,v}stack do. Hence add them here for consistency.
150
+ _concat_check(tup, dtype, out=None)
151
+ tensors = _concat_cast_helper(tup, dtype=dtype, casting=casting)
152
+ return torch.dstack(tensors)
153
+
154
+
155
+ def column_stack(
156
+ tup: Sequence[ArrayLike],
157
+ *,
158
+ dtype: Optional[DTypeLike] = None,
159
+ casting: Optional[CastingModes] = "same_kind",
160
+ ):
161
+ # XXX: in numpy 1.24 column_stack does not have dtype and casting keywords
162
+ # but row_stack does. (because row_stack is an alias for vstack, really).
163
+ # Hence add these keywords here for consistency.
164
+ _concat_check(tup, dtype, out=None)
165
+ tensors = _concat_cast_helper(tup, dtype=dtype, casting=casting)
166
+ return torch.column_stack(tensors)
167
+
168
+
169
+ def stack(
170
+ arrays: Sequence[ArrayLike],
171
+ axis=0,
172
+ out: Optional[OutArray] = None,
173
+ *,
174
+ dtype: Optional[DTypeLike] = None,
175
+ casting: Optional[CastingModes] = "same_kind",
176
+ ):
177
+ _concat_check(arrays, dtype, out=out)
178
+
179
+ tensors = _concat_cast_helper(arrays, dtype=dtype, casting=casting)
180
+ result_ndim = tensors[0].ndim + 1
181
+ axis = _util.normalize_axis_index(axis, result_ndim)
182
+ return torch.stack(tensors, axis=axis)
183
+
184
+
185
+ def append(arr: ArrayLike, values: ArrayLike, axis=None):
186
+ if axis is None:
187
+ if arr.ndim != 1:
188
+ arr = arr.flatten()
189
+ values = values.flatten()
190
+ axis = arr.ndim - 1
191
+ return _concatenate((arr, values), axis=axis)
192
+
193
+
194
+ # ### split ###
195
+
196
+
197
+ def _split_helper(tensor, indices_or_sections, axis, strict=False):
198
+ if isinstance(indices_or_sections, int):
199
+ return _split_helper_int(tensor, indices_or_sections, axis, strict)
200
+ elif isinstance(indices_or_sections, (list, tuple)):
201
+ # NB: drop split=..., it only applies to split_helper_int
202
+ return _split_helper_list(tensor, list(indices_or_sections), axis)
203
+ else:
204
+ raise TypeError("split_helper: ", type(indices_or_sections))
205
+
206
+
207
+ def _split_helper_int(tensor, indices_or_sections, axis, strict=False):
208
+ if not isinstance(indices_or_sections, int):
209
+ raise NotImplementedError("split: indices_or_sections")
210
+
211
+ axis = _util.normalize_axis_index(axis, tensor.ndim)
212
+
213
+ # numpy: l%n chunks of size (l//n + 1), the rest are sized l//n
214
+ l, n = tensor.shape[axis], indices_or_sections
215
+
216
+ if n <= 0:
217
+ raise ValueError()
218
+
219
+ if l % n == 0:
220
+ num, sz = n, l // n
221
+ lst = [sz] * num
222
+ else:
223
+ if strict:
224
+ raise ValueError("array split does not result in an equal division")
225
+
226
+ num, sz = l % n, l // n + 1
227
+ lst = [sz] * num
228
+
229
+ lst += [sz - 1] * (n - num)
230
+
231
+ return torch.split(tensor, lst, axis)
232
+
233
+
234
+ def _split_helper_list(tensor, indices_or_sections, axis):
235
+ if not isinstance(indices_or_sections, list):
236
+ raise NotImplementedError("split: indices_or_sections: list")
237
+ # numpy expects indices, while torch expects lengths of sections
238
+ # also, numpy appends zero-size arrays for indices above the shape[axis]
239
+ lst = [x for x in indices_or_sections if x <= tensor.shape[axis]]
240
+ num_extra = len(indices_or_sections) - len(lst)
241
+
242
+ lst.append(tensor.shape[axis])
243
+ lst = [
244
+ lst[0],
245
+ ] + [a - b for a, b in zip(lst[1:], lst[:-1])]
246
+ lst += [0] * num_extra
247
+
248
+ return torch.split(tensor, lst, axis)
249
+
250
+
251
+ def array_split(ary: ArrayLike, indices_or_sections, axis=0):
252
+ return _split_helper(ary, indices_or_sections, axis)
253
+
254
+
255
+ def split(ary: ArrayLike, indices_or_sections, axis=0):
256
+ return _split_helper(ary, indices_or_sections, axis, strict=True)
257
+
258
+
259
+ def hsplit(ary: ArrayLike, indices_or_sections):
260
+ if ary.ndim == 0:
261
+ raise ValueError("hsplit only works on arrays of 1 or more dimensions")
262
+ axis = 1 if ary.ndim > 1 else 0
263
+ return _split_helper(ary, indices_or_sections, axis, strict=True)
264
+
265
+
266
+ def vsplit(ary: ArrayLike, indices_or_sections):
267
+ if ary.ndim < 2:
268
+ raise ValueError("vsplit only works on arrays of 2 or more dimensions")
269
+ return _split_helper(ary, indices_or_sections, 0, strict=True)
270
+
271
+
272
+ def dsplit(ary: ArrayLike, indices_or_sections):
273
+ if ary.ndim < 3:
274
+ raise ValueError("dsplit only works on arrays of 3 or more dimensions")
275
+ return _split_helper(ary, indices_or_sections, 2, strict=True)
276
+
277
+
278
+ def kron(a: ArrayLike, b: ArrayLike):
279
+ return torch.kron(a, b)
280
+
281
+
282
+ def vander(x: ArrayLike, N=None, increasing=False):
283
+ return torch.vander(x, N, increasing)
284
+
285
+
286
+ # ### linspace, geomspace, logspace and arange ###
287
+
288
+
289
+ def linspace(
290
+ start: ArrayLike,
291
+ stop: ArrayLike,
292
+ num=50,
293
+ endpoint=True,
294
+ retstep=False,
295
+ dtype: Optional[DTypeLike] = None,
296
+ axis=0,
297
+ ):
298
+ if axis != 0 or retstep or not endpoint:
299
+ raise NotImplementedError
300
+ if dtype is None:
301
+ dtype = _dtypes_impl.default_dtypes().float_dtype
302
+ # XXX: raises TypeError if start or stop are not scalars
303
+ return torch.linspace(start, stop, num, dtype=dtype)
304
+
305
+
306
+ def geomspace(
307
+ start: ArrayLike,
308
+ stop: ArrayLike,
309
+ num=50,
310
+ endpoint=True,
311
+ dtype: Optional[DTypeLike] = None,
312
+ axis=0,
313
+ ):
314
+ if axis != 0 or not endpoint:
315
+ raise NotImplementedError
316
+ base = torch.pow(stop / start, 1.0 / (num - 1))
317
+ logbase = torch.log(base)
318
+ return torch.logspace(
319
+ torch.log(start) / logbase,
320
+ torch.log(stop) / logbase,
321
+ num,
322
+ base=base,
323
+ )
324
+
325
+
326
+ def logspace(
327
+ start,
328
+ stop,
329
+ num=50,
330
+ endpoint=True,
331
+ base=10.0,
332
+ dtype: Optional[DTypeLike] = None,
333
+ axis=0,
334
+ ):
335
+ if axis != 0 or not endpoint:
336
+ raise NotImplementedError
337
+ return torch.logspace(start, stop, num, base=base, dtype=dtype)
338
+
339
+
340
+ def arange(
341
+ start: Optional[ArrayLike] = None,
342
+ stop: Optional[ArrayLike] = None,
343
+ step: Optional[ArrayLike] = 1,
344
+ dtype: Optional[DTypeLike] = None,
345
+ *,
346
+ like: NotImplementedType = None,
347
+ ):
348
+ if step == 0:
349
+ raise ZeroDivisionError
350
+ if stop is None and start is None:
351
+ raise TypeError
352
+ if stop is None:
353
+ # XXX: this breaks if start is passed as a kwarg:
354
+ # arange(start=4) should raise (no stop) but doesn't
355
+ start, stop = 0, start
356
+ if start is None:
357
+ start = 0
358
+
359
+ # the dtype of the result
360
+ if dtype is None:
361
+ dtype = _dtypes_impl.default_dtypes().int_dtype
362
+ # XXX: default values do not get normalized
363
+ start, stop, step = (_util._coerce_to_tensor(x) for x in (start, stop, step))
364
+
365
+ dummy = torch.empty(1, dtype=dtype)
366
+ target_dtype = _dtypes_impl.result_type_impl(start, stop, step, dummy)
367
+
368
+ # work around RuntimeError: "arange_cpu" not implemented for 'ComplexFloat'
369
+ work_dtype = torch.float64 if target_dtype.is_complex else target_dtype
370
+
371
+ if (step > 0 and start > stop) or (step < 0 and start < stop):
372
+ # empty range
373
+ return torch.empty(0, dtype=target_dtype)
374
+
375
+ result = torch.arange(start, stop, step, dtype=work_dtype)
376
+ result = _util.cast_if_needed(result, target_dtype)
377
+ return result
378
+
379
+
380
+ # ### zeros/ones/empty/full ###
381
+
382
+
383
+ def empty(
384
+ shape,
385
+ dtype: Optional[DTypeLike] = None,
386
+ order: NotImplementedType = "C",
387
+ *,
388
+ like: NotImplementedType = None,
389
+ ):
390
+ if dtype is None:
391
+ dtype = _dtypes_impl.default_dtypes().float_dtype
392
+ return torch.empty(shape, dtype=dtype)
393
+
394
+
395
+ # NB: *_like functions deliberately deviate from numpy: it has subok=True
396
+ # as the default; we set subok=False and raise on anything else.
397
+
398
+
399
+ def empty_like(
400
+ prototype: ArrayLike,
401
+ dtype: Optional[DTypeLike] = None,
402
+ order: NotImplementedType = "K",
403
+ subok: NotImplementedType = False,
404
+ shape=None,
405
+ ):
406
+ result = torch.empty_like(prototype, dtype=dtype)
407
+ if shape is not None:
408
+ result = result.reshape(shape)
409
+ return result
410
+
411
+
412
+ def full(
413
+ shape,
414
+ fill_value: ArrayLike,
415
+ dtype: Optional[DTypeLike] = None,
416
+ order: NotImplementedType = "C",
417
+ *,
418
+ like: NotImplementedType = None,
419
+ ):
420
+ if isinstance(shape, int):
421
+ shape = (shape,)
422
+ if dtype is None:
423
+ dtype = fill_value.dtype
424
+ if not isinstance(shape, (tuple, list)):
425
+ shape = (shape,)
426
+ return torch.full(shape, fill_value, dtype=dtype)
427
+
428
+
429
+ def full_like(
430
+ a: ArrayLike,
431
+ fill_value,
432
+ dtype: Optional[DTypeLike] = None,
433
+ order: NotImplementedType = "K",
434
+ subok: NotImplementedType = False,
435
+ shape=None,
436
+ ):
437
+ # XXX: fill_value broadcasts
438
+ result = torch.full_like(a, fill_value, dtype=dtype)
439
+ if shape is not None:
440
+ result = result.reshape(shape)
441
+ return result
442
+
443
+
444
+ def ones(
445
+ shape,
446
+ dtype: Optional[DTypeLike] = None,
447
+ order: NotImplementedType = "C",
448
+ *,
449
+ like: NotImplementedType = None,
450
+ ):
451
+ if dtype is None:
452
+ dtype = _dtypes_impl.default_dtypes().float_dtype
453
+ return torch.ones(shape, dtype=dtype)
454
+
455
+
456
+ def ones_like(
457
+ a: ArrayLike,
458
+ dtype: Optional[DTypeLike] = None,
459
+ order: NotImplementedType = "K",
460
+ subok: NotImplementedType = False,
461
+ shape=None,
462
+ ):
463
+ result = torch.ones_like(a, dtype=dtype)
464
+ if shape is not None:
465
+ result = result.reshape(shape)
466
+ return result
467
+
468
+
469
+ def zeros(
470
+ shape,
471
+ dtype: Optional[DTypeLike] = None,
472
+ order: NotImplementedType = "C",
473
+ *,
474
+ like: NotImplementedType = None,
475
+ ):
476
+ if dtype is None:
477
+ dtype = _dtypes_impl.default_dtypes().float_dtype
478
+ return torch.zeros(shape, dtype=dtype)
479
+
480
+
481
+ def zeros_like(
482
+ a: ArrayLike,
483
+ dtype: Optional[DTypeLike] = None,
484
+ order: NotImplementedType = "K",
485
+ subok: NotImplementedType = False,
486
+ shape=None,
487
+ ):
488
+ result = torch.zeros_like(a, dtype=dtype)
489
+ if shape is not None:
490
+ result = result.reshape(shape)
491
+ return result
492
+
493
+
494
+ # ### cov & corrcoef ###
495
+
496
+
497
+ def _xy_helper_corrcoef(x_tensor, y_tensor=None, rowvar=True):
498
+ """Prepate inputs for cov and corrcoef."""
499
+
500
+ # https://github.com/numpy/numpy/blob/v1.24.0/numpy/lib/function_base.py#L2636
501
+ if y_tensor is not None:
502
+ # make sure x and y are at least 2D
503
+ ndim_extra = 2 - x_tensor.ndim
504
+ if ndim_extra > 0:
505
+ x_tensor = x_tensor.view((1,) * ndim_extra + x_tensor.shape)
506
+ if not rowvar and x_tensor.shape[0] != 1:
507
+ x_tensor = x_tensor.mT
508
+ x_tensor = x_tensor.clone()
509
+
510
+ ndim_extra = 2 - y_tensor.ndim
511
+ if ndim_extra > 0:
512
+ y_tensor = y_tensor.view((1,) * ndim_extra + y_tensor.shape)
513
+ if not rowvar and y_tensor.shape[0] != 1:
514
+ y_tensor = y_tensor.mT
515
+ y_tensor = y_tensor.clone()
516
+
517
+ x_tensor = _concatenate((x_tensor, y_tensor), axis=0)
518
+
519
+ return x_tensor
520
+
521
+
522
+ def corrcoef(
523
+ x: ArrayLike,
524
+ y: Optional[ArrayLike] = None,
525
+ rowvar=True,
526
+ bias=None,
527
+ ddof=None,
528
+ *,
529
+ dtype: Optional[DTypeLike] = None,
530
+ ):
531
+ if bias is not None or ddof is not None:
532
+ # deprecated in NumPy
533
+ raise NotImplementedError
534
+ xy_tensor = _xy_helper_corrcoef(x, y, rowvar)
535
+
536
+ is_half = (xy_tensor.dtype == torch.float16) and xy_tensor.is_cpu
537
+ if is_half:
538
+ # work around torch's "addmm_impl_cpu_" not implemented for 'Half'"
539
+ dtype = torch.float32
540
+
541
+ xy_tensor = _util.cast_if_needed(xy_tensor, dtype)
542
+ result = torch.corrcoef(xy_tensor)
543
+
544
+ if is_half:
545
+ result = result.to(torch.float16)
546
+
547
+ return result
548
+
549
+
550
+ def cov(
551
+ m: ArrayLike,
552
+ y: Optional[ArrayLike] = None,
553
+ rowvar=True,
554
+ bias=False,
555
+ ddof=None,
556
+ fweights: Optional[ArrayLike] = None,
557
+ aweights: Optional[ArrayLike] = None,
558
+ *,
559
+ dtype: Optional[DTypeLike] = None,
560
+ ):
561
+ m = _xy_helper_corrcoef(m, y, rowvar)
562
+
563
+ if ddof is None:
564
+ ddof = 1 if bias == 0 else 0
565
+
566
+ is_half = (m.dtype == torch.float16) and m.is_cpu
567
+ if is_half:
568
+ # work around torch's "addmm_impl_cpu_" not implemented for 'Half'"
569
+ dtype = torch.float32
570
+
571
+ m = _util.cast_if_needed(m, dtype)
572
+ result = torch.cov(m, correction=ddof, aweights=aweights, fweights=fweights)
573
+
574
+ if is_half:
575
+ result = result.to(torch.float16)
576
+
577
+ return result
578
+
579
+
580
+ def _conv_corr_impl(a, v, mode):
581
+ dt = _dtypes_impl.result_type_impl(a, v)
582
+ a = _util.cast_if_needed(a, dt)
583
+ v = _util.cast_if_needed(v, dt)
584
+
585
+ padding = v.shape[0] - 1 if mode == "full" else mode
586
+
587
+ # NumPy only accepts 1D arrays; PyTorch requires 2D inputs and 3D weights
588
+ aa = a[None, :]
589
+ vv = v[None, None, :]
590
+
591
+ result = torch.nn.functional.conv1d(aa, vv, padding=padding)
592
+
593
+ # torch returns a 2D result, numpy returns a 1D array
594
+ return result[0, :]
595
+
596
+
597
+ def convolve(a: ArrayLike, v: ArrayLike, mode="full"):
598
+ # NumPy: if v is longer than a, the arrays are swapped before computation
599
+ if a.shape[0] < v.shape[0]:
600
+ a, v = v, a
601
+
602
+ # flip the weights since numpy does and torch does not
603
+ v = torch.flip(v, (0,))
604
+
605
+ return _conv_corr_impl(a, v, mode)
606
+
607
+
608
+ def correlate(a: ArrayLike, v: ArrayLike, mode="valid"):
609
+ v = torch.conj_physical(v)
610
+ return _conv_corr_impl(a, v, mode)
611
+
612
+
613
+ # ### logic & element selection ###
614
+
615
+
616
+ def bincount(x: ArrayLike, /, weights: Optional[ArrayLike] = None, minlength=0):
617
+ if x.numel() == 0:
618
+ # edge case allowed by numpy
619
+ x = x.new_empty(0, dtype=int)
620
+
621
+ int_dtype = _dtypes_impl.default_dtypes().int_dtype
622
+ (x,) = _util.typecast_tensors((x,), int_dtype, casting="safe")
623
+
624
+ return torch.bincount(x, weights, minlength)
625
+
626
+
627
+ def where(
628
+ condition: ArrayLike,
629
+ x: Optional[ArrayLike] = None,
630
+ y: Optional[ArrayLike] = None,
631
+ /,
632
+ ):
633
+ if (x is None) != (y is None):
634
+ raise ValueError("either both or neither of x and y should be given")
635
+
636
+ if condition.dtype != torch.bool:
637
+ condition = condition.to(torch.bool)
638
+
639
+ if x is None and y is None:
640
+ result = torch.where(condition)
641
+ else:
642
+ result = torch.where(condition, x, y)
643
+ return result
644
+
645
+
646
+ # ###### module-level queries of object properties
647
+
648
+
649
+ def ndim(a: ArrayLike):
650
+ return a.ndim
651
+
652
+
653
+ def shape(a: ArrayLike):
654
+ return tuple(a.shape)
655
+
656
+
657
+ def size(a: ArrayLike, axis=None):
658
+ if axis is None:
659
+ return a.numel()
660
+ else:
661
+ return a.shape[axis]
662
+
663
+
664
+ # ###### shape manipulations and indexing
665
+
666
+
667
+ def expand_dims(a: ArrayLike, axis):
668
+ shape = _util.expand_shape(a.shape, axis)
669
+ return a.view(shape) # never copies
670
+
671
+
672
+ def flip(m: ArrayLike, axis=None):
673
+ # XXX: semantic difference: np.flip returns a view, torch.flip copies
674
+ if axis is None:
675
+ axis = tuple(range(m.ndim))
676
+ else:
677
+ axis = _util.normalize_axis_tuple(axis, m.ndim)
678
+ return torch.flip(m, axis)
679
+
680
+
681
+ def flipud(m: ArrayLike):
682
+ return torch.flipud(m)
683
+
684
+
685
+ def fliplr(m: ArrayLike):
686
+ return torch.fliplr(m)
687
+
688
+
689
+ def rot90(m: ArrayLike, k=1, axes=(0, 1)):
690
+ axes = _util.normalize_axis_tuple(axes, m.ndim)
691
+ return torch.rot90(m, k, axes)
692
+
693
+
694
+ # ### broadcasting and indices ###
695
+
696
+
697
+ def broadcast_to(array: ArrayLike, shape, subok: NotImplementedType = False):
698
+ return torch.broadcast_to(array, size=shape)
699
+
700
+
701
+ # This is a function from tuples to tuples, so we just reuse it
702
+ from torch import broadcast_shapes
703
+
704
+
705
+ def broadcast_arrays(*args: ArrayLike, subok: NotImplementedType = False):
706
+ return torch.broadcast_tensors(*args)
707
+
708
+
709
+ def meshgrid(*xi: ArrayLike, copy=True, sparse=False, indexing="xy"):
710
+ ndim = len(xi)
711
+
712
+ if indexing not in ["xy", "ij"]:
713
+ raise ValueError("Valid values for `indexing` are 'xy' and 'ij'.")
714
+
715
+ s0 = (1,) * ndim
716
+ output = [x.reshape(s0[:i] + (-1,) + s0[i + 1 :]) for i, x in enumerate(xi)]
717
+
718
+ if indexing == "xy" and ndim > 1:
719
+ # switch first and second axis
720
+ output[0] = output[0].reshape((1, -1) + s0[2:])
721
+ output[1] = output[1].reshape((-1, 1) + s0[2:])
722
+
723
+ if not sparse:
724
+ # Return the full N-D matrix (not only the 1-D vector)
725
+ output = torch.broadcast_tensors(*output)
726
+
727
+ if copy:
728
+ output = [x.clone() for x in output]
729
+
730
+ return list(output) # match numpy, return a list
731
+
732
+
733
+ def indices(dimensions, dtype: Optional[DTypeLike] = int, sparse=False):
734
+ # https://github.com/numpy/numpy/blob/v1.24.0/numpy/core/numeric.py#L1691-L1791
735
+ dimensions = tuple(dimensions)
736
+ N = len(dimensions)
737
+ shape = (1,) * N
738
+ if sparse:
739
+ res = tuple()
740
+ else:
741
+ res = torch.empty((N,) + dimensions, dtype=dtype)
742
+ for i, dim in enumerate(dimensions):
743
+ idx = torch.arange(dim, dtype=dtype).reshape(
744
+ shape[:i] + (dim,) + shape[i + 1 :]
745
+ )
746
+ if sparse:
747
+ res = res + (idx,)
748
+ else:
749
+ res[i] = idx
750
+ return res
751
+
752
+
753
+ # ### tri*-something ###
754
+
755
+
756
+ def tril(m: ArrayLike, k=0):
757
+ return torch.tril(m, k)
758
+
759
+
760
+ def triu(m: ArrayLike, k=0):
761
+ return torch.triu(m, k)
762
+
763
+
764
+ def tril_indices(n, k=0, m=None):
765
+ if m is None:
766
+ m = n
767
+ return torch.tril_indices(n, m, offset=k)
768
+
769
+
770
+ def triu_indices(n, k=0, m=None):
771
+ if m is None:
772
+ m = n
773
+ return torch.triu_indices(n, m, offset=k)
774
+
775
+
776
+ def tril_indices_from(arr: ArrayLike, k=0):
777
+ if arr.ndim != 2:
778
+ raise ValueError("input array must be 2-d")
779
+ # Return a tensor rather than a tuple to avoid a graphbreak
780
+ return torch.tril_indices(arr.shape[0], arr.shape[1], offset=k)
781
+
782
+
783
+ def triu_indices_from(arr: ArrayLike, k=0):
784
+ if arr.ndim != 2:
785
+ raise ValueError("input array must be 2-d")
786
+ # Return a tensor rather than a tuple to avoid a graphbreak
787
+ return torch.triu_indices(arr.shape[0], arr.shape[1], offset=k)
788
+
789
+
790
+ def tri(
791
+ N,
792
+ M=None,
793
+ k=0,
794
+ dtype: Optional[DTypeLike] = None,
795
+ *,
796
+ like: NotImplementedType = None,
797
+ ):
798
+ if M is None:
799
+ M = N
800
+ tensor = torch.ones((N, M), dtype=dtype)
801
+ return torch.tril(tensor, diagonal=k)
802
+
803
+
804
+ # ### equality, equivalence, allclose ###
805
+
806
+
807
+ def isclose(a: ArrayLike, b: ArrayLike, rtol=1.0e-5, atol=1.0e-8, equal_nan=False):
808
+ dtype = _dtypes_impl.result_type_impl(a, b)
809
+ a = _util.cast_if_needed(a, dtype)
810
+ b = _util.cast_if_needed(b, dtype)
811
+ return torch.isclose(a, b, rtol=rtol, atol=atol, equal_nan=equal_nan)
812
+
813
+
814
+ def allclose(a: ArrayLike, b: ArrayLike, rtol=1e-05, atol=1e-08, equal_nan=False):
815
+ dtype = _dtypes_impl.result_type_impl(a, b)
816
+ a = _util.cast_if_needed(a, dtype)
817
+ b = _util.cast_if_needed(b, dtype)
818
+ return torch.allclose(a, b, rtol=rtol, atol=atol, equal_nan=equal_nan)
819
+
820
+
821
+ def _tensor_equal(a1, a2, equal_nan=False):
822
+ # Implementation of array_equal/array_equiv.
823
+ if a1.shape != a2.shape:
824
+ return False
825
+ cond = a1 == a2
826
+ if equal_nan:
827
+ cond = cond | (torch.isnan(a1) & torch.isnan(a2))
828
+ return cond.all().item()
829
+
830
+
831
+ def array_equal(a1: ArrayLike, a2: ArrayLike, equal_nan=False):
832
+ return _tensor_equal(a1, a2, equal_nan=equal_nan)
833
+
834
+
835
+ def array_equiv(a1: ArrayLike, a2: ArrayLike):
836
+ # *almost* the same as array_equal: _equiv tries to broadcast, _equal does not
837
+ try:
838
+ a1_t, a2_t = torch.broadcast_tensors(a1, a2)
839
+ except RuntimeError:
840
+ # failed to broadcast => not equivalent
841
+ return False
842
+ return _tensor_equal(a1_t, a2_t)
843
+
844
+
845
+ def mintypecode():
846
+ raise NotImplementedError
847
+
848
+
849
+ def nan_to_num(
850
+ x: ArrayLike, copy: NotImplementedType = True, nan=0.0, posinf=None, neginf=None
851
+ ):
852
+ # work around RuntimeError: "nan_to_num" not implemented for 'ComplexDouble'
853
+ if x.is_complex():
854
+ re = torch.nan_to_num(x.real, nan=nan, posinf=posinf, neginf=neginf)
855
+ im = torch.nan_to_num(x.imag, nan=nan, posinf=posinf, neginf=neginf)
856
+ return re + 1j * im
857
+ else:
858
+ return torch.nan_to_num(x, nan=nan, posinf=posinf, neginf=neginf)
859
+
860
+
861
+ def asfarray():
862
+ raise NotImplementedError
863
+
864
+
865
+ def block(*args, **kwds):
866
+ raise NotImplementedError
867
+
868
+
869
+ # ### put/take_along_axis ###
870
+
871
+
872
+ def take(
873
+ a: ArrayLike,
874
+ indices: ArrayLike,
875
+ axis=None,
876
+ out: Optional[OutArray] = None,
877
+ mode: NotImplementedType = "raise",
878
+ ):
879
+ (a,), axis = _util.axis_none_flatten(a, axis=axis)
880
+ axis = _util.normalize_axis_index(axis, a.ndim)
881
+ idx = (slice(None),) * axis + (indices, ...)
882
+ result = a[idx]
883
+ return result
884
+
885
+
886
+ def take_along_axis(arr: ArrayLike, indices: ArrayLike, axis):
887
+ (arr,), axis = _util.axis_none_flatten(arr, axis=axis)
888
+ axis = _util.normalize_axis_index(axis, arr.ndim)
889
+ return torch.gather(arr, axis, indices)
890
+
891
+
892
+ def put(
893
+ a: NDArray,
894
+ ind: ArrayLike,
895
+ v: ArrayLike,
896
+ mode: NotImplementedType = "raise",
897
+ ):
898
+ v = v.type(a.dtype)
899
+ # If ind is larger than v, expand v to at least the size of ind. Any
900
+ # unnecessary trailing elements are then trimmed.
901
+ if ind.numel() > v.numel():
902
+ ratio = (ind.numel() + v.numel() - 1) // v.numel()
903
+ v = v.unsqueeze(0).expand((ratio,) + v.shape)
904
+ # Trim unnecessary elements, regarldess if v was expanded or not. Note
905
+ # np.put() trims v to match ind by default too.
906
+ if ind.numel() < v.numel():
907
+ v = v.flatten()
908
+ v = v[: ind.numel()]
909
+ a.put_(ind, v)
910
+ return None
911
+
912
+
913
+ def put_along_axis(arr: ArrayLike, indices: ArrayLike, values: ArrayLike, axis):
914
+ (arr,), axis = _util.axis_none_flatten(arr, axis=axis)
915
+ axis = _util.normalize_axis_index(axis, arr.ndim)
916
+
917
+ indices, values = torch.broadcast_tensors(indices, values)
918
+ values = _util.cast_if_needed(values, arr.dtype)
919
+ result = torch.scatter(arr, axis, indices, values)
920
+ arr.copy_(result.reshape(arr.shape))
921
+ return None
922
+
923
+
924
+ def choose(
925
+ a: ArrayLike,
926
+ choices: Sequence[ArrayLike],
927
+ out: Optional[OutArray] = None,
928
+ mode: NotImplementedType = "raise",
929
+ ):
930
+ # First, broadcast elements of `choices`
931
+ choices = torch.stack(torch.broadcast_tensors(*choices))
932
+
933
+ # Use an analog of `gather(choices, 0, a)` which broadcasts `choices` vs `a`:
934
+ # (taken from https://github.com/pytorch/pytorch/issues/9407#issuecomment-1427907939)
935
+ idx_list = [
936
+ torch.arange(dim).view((1,) * i + (dim,) + (1,) * (choices.ndim - i - 1))
937
+ for i, dim in enumerate(choices.shape)
938
+ ]
939
+
940
+ idx_list[0] = a
941
+ return choices[idx_list].squeeze(0)
942
+
943
+
944
+ # ### unique et al ###
945
+
946
+
947
+ def unique(
948
+ ar: ArrayLike,
949
+ return_index: NotImplementedType = False,
950
+ return_inverse=False,
951
+ return_counts=False,
952
+ axis=None,
953
+ *,
954
+ equal_nan: NotImplementedType = True,
955
+ ):
956
+ (ar,), axis = _util.axis_none_flatten(ar, axis=axis)
957
+ axis = _util.normalize_axis_index(axis, ar.ndim)
958
+
959
+ is_half = ar.dtype == torch.float16
960
+ if is_half:
961
+ ar = ar.to(torch.float32)
962
+
963
+ result = torch.unique(
964
+ ar, return_inverse=return_inverse, return_counts=return_counts, dim=axis
965
+ )
966
+
967
+ if is_half:
968
+ if isinstance(result, tuple):
969
+ result = (result[0].to(torch.float16),) + result[1:]
970
+ else:
971
+ result = result.to(torch.float16)
972
+
973
+ return result
974
+
975
+
976
+ def nonzero(a: ArrayLike):
977
+ return torch.nonzero(a, as_tuple=True)
978
+
979
+
980
+ def argwhere(a: ArrayLike):
981
+ return torch.argwhere(a)
982
+
983
+
984
+ def flatnonzero(a: ArrayLike):
985
+ return torch.flatten(a).nonzero(as_tuple=True)[0]
986
+
987
+
988
+ def clip(
989
+ a: ArrayLike,
990
+ min: Optional[ArrayLike] = None,
991
+ max: Optional[ArrayLike] = None,
992
+ out: Optional[OutArray] = None,
993
+ ):
994
+ return torch.clamp(a, min, max)
995
+
996
+
997
+ def repeat(a: ArrayLike, repeats: ArrayLike, axis=None):
998
+ # XXX: scalar repeats; ArrayLikeOrScalar ?
999
+ return torch.repeat_interleave(a, repeats, axis)
1000
+
1001
+
1002
+ def tile(A: ArrayLike, reps):
1003
+ if isinstance(reps, int):
1004
+ reps = (reps,)
1005
+ return torch.tile(A, reps)
1006
+
1007
+
1008
+ def resize(a: ArrayLike, new_shape=None):
1009
+ # implementation vendored from
1010
+ # https://github.com/numpy/numpy/blob/v1.24.0/numpy/core/fromnumeric.py#L1420-L1497
1011
+ if new_shape is None:
1012
+ return a
1013
+
1014
+ if isinstance(new_shape, int):
1015
+ new_shape = (new_shape,)
1016
+
1017
+ a = a.flatten()
1018
+
1019
+ new_size = 1
1020
+ for dim_length in new_shape:
1021
+ new_size *= dim_length
1022
+ if dim_length < 0:
1023
+ raise ValueError("all elements of `new_shape` must be non-negative")
1024
+
1025
+ if a.numel() == 0 or new_size == 0:
1026
+ # First case must zero fill. The second would have repeats == 0.
1027
+ return torch.zeros(new_shape, dtype=a.dtype)
1028
+
1029
+ repeats = -(-new_size // a.numel()) # ceil division
1030
+ a = concatenate((a,) * repeats)[:new_size]
1031
+
1032
+ return reshape(a, new_shape)
1033
+
1034
+
1035
+ # ### diag et al ###
1036
+
1037
+
1038
+ def diagonal(a: ArrayLike, offset=0, axis1=0, axis2=1):
1039
+ axis1 = _util.normalize_axis_index(axis1, a.ndim)
1040
+ axis2 = _util.normalize_axis_index(axis2, a.ndim)
1041
+ return torch.diagonal(a, offset, axis1, axis2)
1042
+
1043
+
1044
+ def trace(
1045
+ a: ArrayLike,
1046
+ offset=0,
1047
+ axis1=0,
1048
+ axis2=1,
1049
+ dtype: Optional[DTypeLike] = None,
1050
+ out: Optional[OutArray] = None,
1051
+ ):
1052
+ result = torch.diagonal(a, offset, dim1=axis1, dim2=axis2).sum(-1, dtype=dtype)
1053
+ return result
1054
+
1055
+
1056
+ def eye(
1057
+ N,
1058
+ M=None,
1059
+ k=0,
1060
+ dtype: Optional[DTypeLike] = None,
1061
+ order: NotImplementedType = "C",
1062
+ *,
1063
+ like: NotImplementedType = None,
1064
+ ):
1065
+ if dtype is None:
1066
+ dtype = _dtypes_impl.default_dtypes().float_dtype
1067
+ if M is None:
1068
+ M = N
1069
+ z = torch.zeros(N, M, dtype=dtype)
1070
+ z.diagonal(k).fill_(1)
1071
+ return z
1072
+
1073
+
1074
+ def identity(n, dtype: Optional[DTypeLike] = None, *, like: NotImplementedType = None):
1075
+ return torch.eye(n, dtype=dtype)
1076
+
1077
+
1078
+ def diag(v: ArrayLike, k=0):
1079
+ return torch.diag(v, k)
1080
+
1081
+
1082
+ def diagflat(v: ArrayLike, k=0):
1083
+ return torch.diagflat(v, k)
1084
+
1085
+
1086
+ def diag_indices(n, ndim=2):
1087
+ idx = torch.arange(n)
1088
+ return (idx,) * ndim
1089
+
1090
+
1091
+ def diag_indices_from(arr: ArrayLike):
1092
+ if not arr.ndim >= 2:
1093
+ raise ValueError("input array must be at least 2-d")
1094
+ # For more than d=2, the strided formula is only valid for arrays with
1095
+ # all dimensions equal, so we check first.
1096
+ s = arr.shape
1097
+ if s[1:] != s[:-1]:
1098
+ raise ValueError("All dimensions of input must be of equal length")
1099
+ return diag_indices(s[0], arr.ndim)
1100
+
1101
+
1102
+ def fill_diagonal(a: ArrayLike, val: ArrayLike, wrap=False):
1103
+ if a.ndim < 2:
1104
+ raise ValueError("array must be at least 2-d")
1105
+ if val.numel() == 0 and not wrap:
1106
+ a.fill_diagonal_(val)
1107
+ return a
1108
+
1109
+ if val.ndim == 0:
1110
+ val = val.unsqueeze(0)
1111
+
1112
+ # torch.Tensor.fill_diagonal_ only accepts scalars
1113
+ # If the size of val is too large, then val is trimmed
1114
+ if a.ndim == 2:
1115
+ tall = a.shape[0] > a.shape[1]
1116
+ # wrap does nothing for wide matrices...
1117
+ if not wrap or not tall:
1118
+ # Never wraps
1119
+ diag = a.diagonal()
1120
+ diag.copy_(val[: diag.numel()])
1121
+ else:
1122
+ # wraps and tall... leaving one empty line between diagonals?!
1123
+ max_, min_ = a.shape
1124
+ idx = torch.arange(max_ - max_ // (min_ + 1))
1125
+ mod = idx % min_
1126
+ div = idx // min_
1127
+ a[(div * (min_ + 1) + mod, mod)] = val[: idx.numel()]
1128
+ else:
1129
+ idx = diag_indices_from(a)
1130
+ # a.shape = (n, n, ..., n)
1131
+ a[idx] = val[: a.shape[0]]
1132
+
1133
+ return a
1134
+
1135
+
1136
+ def vdot(a: ArrayLike, b: ArrayLike, /):
1137
+ # 1. torch only accepts 1D arrays, numpy flattens
1138
+ # 2. torch requires matching dtype, while numpy casts (?)
1139
+ t_a, t_b = torch.atleast_1d(a, b)
1140
+ if t_a.ndim > 1:
1141
+ t_a = t_a.flatten()
1142
+ if t_b.ndim > 1:
1143
+ t_b = t_b.flatten()
1144
+
1145
+ dtype = _dtypes_impl.result_type_impl(t_a, t_b)
1146
+ is_half = dtype == torch.float16 and (t_a.is_cpu or t_b.is_cpu)
1147
+ is_bool = dtype == torch.bool
1148
+
1149
+ # work around torch's "dot" not implemented for 'Half', 'Bool'
1150
+ if is_half:
1151
+ dtype = torch.float32
1152
+ elif is_bool:
1153
+ dtype = torch.uint8
1154
+
1155
+ t_a = _util.cast_if_needed(t_a, dtype)
1156
+ t_b = _util.cast_if_needed(t_b, dtype)
1157
+
1158
+ result = torch.vdot(t_a, t_b)
1159
+
1160
+ if is_half:
1161
+ result = result.to(torch.float16)
1162
+ elif is_bool:
1163
+ result = result.to(torch.bool)
1164
+
1165
+ return result
1166
+
1167
+
1168
+ def tensordot(a: ArrayLike, b: ArrayLike, axes=2):
1169
+ if isinstance(axes, (list, tuple)):
1170
+ axes = [[ax] if isinstance(ax, int) else ax for ax in axes]
1171
+
1172
+ target_dtype = _dtypes_impl.result_type_impl(a, b)
1173
+ a = _util.cast_if_needed(a, target_dtype)
1174
+ b = _util.cast_if_needed(b, target_dtype)
1175
+
1176
+ return torch.tensordot(a, b, dims=axes)
1177
+
1178
+
1179
+ def dot(a: ArrayLike, b: ArrayLike, out: Optional[OutArray] = None):
1180
+ dtype = _dtypes_impl.result_type_impl(a, b)
1181
+ is_bool = dtype == torch.bool
1182
+ if is_bool:
1183
+ dtype = torch.uint8
1184
+
1185
+ a = _util.cast_if_needed(a, dtype)
1186
+ b = _util.cast_if_needed(b, dtype)
1187
+
1188
+ if a.ndim == 0 or b.ndim == 0:
1189
+ result = a * b
1190
+ else:
1191
+ result = torch.matmul(a, b)
1192
+
1193
+ if is_bool:
1194
+ result = result.to(torch.bool)
1195
+
1196
+ return result
1197
+
1198
+
1199
+ def inner(a: ArrayLike, b: ArrayLike, /):
1200
+ dtype = _dtypes_impl.result_type_impl(a, b)
1201
+ is_half = dtype == torch.float16 and (a.is_cpu or b.is_cpu)
1202
+ is_bool = dtype == torch.bool
1203
+
1204
+ if is_half:
1205
+ # work around torch's "addmm_impl_cpu_" not implemented for 'Half'"
1206
+ dtype = torch.float32
1207
+ elif is_bool:
1208
+ dtype = torch.uint8
1209
+
1210
+ a = _util.cast_if_needed(a, dtype)
1211
+ b = _util.cast_if_needed(b, dtype)
1212
+
1213
+ result = torch.inner(a, b)
1214
+
1215
+ if is_half:
1216
+ result = result.to(torch.float16)
1217
+ elif is_bool:
1218
+ result = result.to(torch.bool)
1219
+ return result
1220
+
1221
+
1222
+ def outer(a: ArrayLike, b: ArrayLike, out: Optional[OutArray] = None):
1223
+ return torch.outer(a, b)
1224
+
1225
+
1226
+ def cross(a: ArrayLike, b: ArrayLike, axisa=-1, axisb=-1, axisc=-1, axis=None):
1227
+ # implementation vendored from
1228
+ # https://github.com/numpy/numpy/blob/v1.24.0/numpy/core/numeric.py#L1486-L1685
1229
+ if axis is not None:
1230
+ axisa, axisb, axisc = (axis,) * 3
1231
+
1232
+ # Check axisa and axisb are within bounds
1233
+ axisa = _util.normalize_axis_index(axisa, a.ndim)
1234
+ axisb = _util.normalize_axis_index(axisb, b.ndim)
1235
+
1236
+ # Move working axis to the end of the shape
1237
+ a = torch.moveaxis(a, axisa, -1)
1238
+ b = torch.moveaxis(b, axisb, -1)
1239
+ msg = "incompatible dimensions for cross product\n" "(dimension must be 2 or 3)"
1240
+ if a.shape[-1] not in (2, 3) or b.shape[-1] not in (2, 3):
1241
+ raise ValueError(msg)
1242
+
1243
+ # Create the output array
1244
+ shape = broadcast_shapes(a[..., 0].shape, b[..., 0].shape)
1245
+ if a.shape[-1] == 3 or b.shape[-1] == 3:
1246
+ shape += (3,)
1247
+ # Check axisc is within bounds
1248
+ axisc = _util.normalize_axis_index(axisc, len(shape))
1249
+ dtype = _dtypes_impl.result_type_impl(a, b)
1250
+ cp = torch.empty(shape, dtype=dtype)
1251
+
1252
+ # recast arrays as dtype
1253
+ a = _util.cast_if_needed(a, dtype)
1254
+ b = _util.cast_if_needed(b, dtype)
1255
+
1256
+ # create local aliases for readability
1257
+ a0 = a[..., 0]
1258
+ a1 = a[..., 1]
1259
+ if a.shape[-1] == 3:
1260
+ a2 = a[..., 2]
1261
+ b0 = b[..., 0]
1262
+ b1 = b[..., 1]
1263
+ if b.shape[-1] == 3:
1264
+ b2 = b[..., 2]
1265
+ if cp.ndim != 0 and cp.shape[-1] == 3:
1266
+ cp0 = cp[..., 0]
1267
+ cp1 = cp[..., 1]
1268
+ cp2 = cp[..., 2]
1269
+
1270
+ if a.shape[-1] == 2:
1271
+ if b.shape[-1] == 2:
1272
+ # a0 * b1 - a1 * b0
1273
+ cp[...] = a0 * b1 - a1 * b0
1274
+ return cp
1275
+ else:
1276
+ assert b.shape[-1] == 3
1277
+ # cp0 = a1 * b2 - 0 (a2 = 0)
1278
+ # cp1 = 0 - a0 * b2 (a2 = 0)
1279
+ # cp2 = a0 * b1 - a1 * b0
1280
+ cp0[...] = a1 * b2
1281
+ cp1[...] = -a0 * b2
1282
+ cp2[...] = a0 * b1 - a1 * b0
1283
+ else:
1284
+ assert a.shape[-1] == 3
1285
+ if b.shape[-1] == 3:
1286
+ cp0[...] = a1 * b2 - a2 * b1
1287
+ cp1[...] = a2 * b0 - a0 * b2
1288
+ cp2[...] = a0 * b1 - a1 * b0
1289
+ else:
1290
+ assert b.shape[-1] == 2
1291
+ cp0[...] = -a2 * b1
1292
+ cp1[...] = a2 * b0
1293
+ cp2[...] = a0 * b1 - a1 * b0
1294
+
1295
+ return torch.moveaxis(cp, -1, axisc)
1296
+
1297
+
1298
+ def einsum(*operands, out=None, dtype=None, order="K", casting="safe", optimize=False):
1299
+ # Have to manually normalize *operands and **kwargs, following the NumPy signature
1300
+ # We have a local import to avoid poluting the global space, as it will be then
1301
+ # exported in funcs.py
1302
+ from ._ndarray import ndarray
1303
+ from ._normalizations import (
1304
+ maybe_copy_to,
1305
+ normalize_array_like,
1306
+ normalize_casting,
1307
+ normalize_dtype,
1308
+ wrap_tensors,
1309
+ )
1310
+
1311
+ dtype = normalize_dtype(dtype)
1312
+ casting = normalize_casting(casting)
1313
+ if out is not None and not isinstance(out, ndarray):
1314
+ raise TypeError("'out' must be an array")
1315
+ if order != "K":
1316
+ raise NotImplementedError("'order' parameter is not supported.")
1317
+
1318
+ # parse arrays and normalize them
1319
+ sublist_format = not isinstance(operands[0], str)
1320
+ if sublist_format:
1321
+ # op, str, op, str ... [sublistout] format: normalize every other argument
1322
+
1323
+ # - if sublistout is not given, the length of operands is even, and we pick
1324
+ # odd-numbered elements, which are arrays.
1325
+ # - if sublistout is given, the length of operands is odd, we peel off
1326
+ # the last one, and pick odd-numbered elements, which are arrays.
1327
+ # Without [:-1], we would have picked sublistout, too.
1328
+ array_operands = operands[:-1][::2]
1329
+ else:
1330
+ # ("ij->", arrays) format
1331
+ subscripts, array_operands = operands[0], operands[1:]
1332
+
1333
+ tensors = [normalize_array_like(op) for op in array_operands]
1334
+ target_dtype = _dtypes_impl.result_type_impl(*tensors) if dtype is None else dtype
1335
+
1336
+ # work around 'bmm' not implemented for 'Half' etc
1337
+ is_half = target_dtype == torch.float16 and all(t.is_cpu for t in tensors)
1338
+ if is_half:
1339
+ target_dtype = torch.float32
1340
+
1341
+ is_short_int = target_dtype in [torch.uint8, torch.int8, torch.int16, torch.int32]
1342
+ if is_short_int:
1343
+ target_dtype = torch.int64
1344
+
1345
+ tensors = _util.typecast_tensors(tensors, target_dtype, casting)
1346
+
1347
+ from torch.backends import opt_einsum
1348
+
1349
+ try:
1350
+ # set the global state to handle the optimize=... argument, restore on exit
1351
+ if opt_einsum.is_available():
1352
+ old_strategy = torch.backends.opt_einsum.strategy
1353
+ torch.backends.opt_einsum.strategy = optimize
1354
+
1355
+ if sublist_format:
1356
+ # recombine operands
1357
+ sublists = operands[1::2]
1358
+ has_sublistout = len(operands) % 2 == 1
1359
+ if has_sublistout:
1360
+ sublistout = operands[-1]
1361
+ operands = list(itertools.chain(*zip(tensors, sublists)))
1362
+ if has_sublistout:
1363
+ operands.append(sublistout)
1364
+
1365
+ result = torch.einsum(*operands)
1366
+ else:
1367
+ result = torch.einsum(subscripts, *tensors)
1368
+
1369
+ finally:
1370
+ if opt_einsum.is_available():
1371
+ torch.backends.opt_einsum.strategy = old_strategy
1372
+
1373
+ result = maybe_copy_to(out, result)
1374
+ return wrap_tensors(result)
1375
+
1376
+
1377
+ # ### sort and partition ###
1378
+
1379
+
1380
+ def _sort_helper(tensor, axis, kind, order):
1381
+ (tensor,), axis = _util.axis_none_flatten(tensor, axis=axis)
1382
+ axis = _util.normalize_axis_index(axis, tensor.ndim)
1383
+
1384
+ stable = kind == "stable"
1385
+
1386
+ return tensor, axis, stable
1387
+
1388
+
1389
+ def sort(a: ArrayLike, axis=-1, kind=None, order: NotImplementedType = None):
1390
+ if a.dtype.is_complex:
1391
+ return NotImplemented
1392
+ # `order` keyword arg is only relevant for structured dtypes; so not supported here.
1393
+ a, axis, stable = _sort_helper(a, axis, kind, order)
1394
+ result = torch.sort(a, dim=axis, stable=stable)
1395
+ return result.values
1396
+
1397
+
1398
+ def argsort(a: ArrayLike, axis=-1, kind=None, order: NotImplementedType = None):
1399
+ if a.dtype.is_complex:
1400
+ return NotImplemented
1401
+ a, axis, stable = _sort_helper(a, axis, kind, order)
1402
+ return torch.argsort(a, dim=axis, stable=stable)
1403
+
1404
+
1405
+ def searchsorted(
1406
+ a: ArrayLike, v: ArrayLike, side="left", sorter: Optional[ArrayLike] = None
1407
+ ):
1408
+ if a.dtype.is_complex:
1409
+ return NotImplemented
1410
+
1411
+ return torch.searchsorted(a, v, side=side, sorter=sorter)
1412
+
1413
+
1414
+ # ### swap/move/roll axis ###
1415
+
1416
+
1417
+ def moveaxis(a: ArrayLike, source, destination):
1418
+ source = _util.normalize_axis_tuple(source, a.ndim, "source")
1419
+ destination = _util.normalize_axis_tuple(destination, a.ndim, "destination")
1420
+ return torch.moveaxis(a, source, destination)
1421
+
1422
+
1423
+ def swapaxes(a: ArrayLike, axis1, axis2):
1424
+ axis1 = _util.normalize_axis_index(axis1, a.ndim)
1425
+ axis2 = _util.normalize_axis_index(axis2, a.ndim)
1426
+ return torch.swapaxes(a, axis1, axis2)
1427
+
1428
+
1429
+ def rollaxis(a: ArrayLike, axis, start=0):
1430
+ # Straight vendor from:
1431
+ # https://github.com/numpy/numpy/blob/v1.24.0/numpy/core/numeric.py#L1259
1432
+ #
1433
+ # Also note this function in NumPy is mostly retained for backwards compat
1434
+ # (https://stackoverflow.com/questions/29891583/reason-why-numpy-rollaxis-is-so-confusing)
1435
+ # so let's not touch it unless hard pressed.
1436
+ n = a.ndim
1437
+ axis = _util.normalize_axis_index(axis, n)
1438
+ if start < 0:
1439
+ start += n
1440
+ msg = "'%s' arg requires %d <= %s < %d, but %d was passed in"
1441
+ if not (0 <= start < n + 1):
1442
+ raise _util.AxisError(msg % ("start", -n, "start", n + 1, start))
1443
+ if axis < start:
1444
+ # it's been removed
1445
+ start -= 1
1446
+ if axis == start:
1447
+ # numpy returns a view, here we try returning the tensor itself
1448
+ # return tensor[...]
1449
+ return a
1450
+ axes = list(range(0, n))
1451
+ axes.remove(axis)
1452
+ axes.insert(start, axis)
1453
+ return a.view(axes)
1454
+
1455
+
1456
+ def roll(a: ArrayLike, shift, axis=None):
1457
+ if axis is not None:
1458
+ axis = _util.normalize_axis_tuple(axis, a.ndim, allow_duplicate=True)
1459
+ if not isinstance(shift, tuple):
1460
+ shift = (shift,) * len(axis)
1461
+ return torch.roll(a, shift, axis)
1462
+
1463
+
1464
+ # ### shape manipulations ###
1465
+
1466
+
1467
+ def squeeze(a: ArrayLike, axis=None):
1468
+ if axis == ():
1469
+ result = a
1470
+ elif axis is None:
1471
+ result = a.squeeze()
1472
+ else:
1473
+ if isinstance(axis, tuple):
1474
+ result = a
1475
+ for ax in axis:
1476
+ result = a.squeeze(ax)
1477
+ else:
1478
+ result = a.squeeze(axis)
1479
+ return result
1480
+
1481
+
1482
+ def reshape(a: ArrayLike, newshape, order: NotImplementedType = "C"):
1483
+ # if sh = (1, 2, 3), numpy allows both .reshape(sh) and .reshape(*sh)
1484
+ newshape = newshape[0] if len(newshape) == 1 else newshape
1485
+ return a.reshape(newshape)
1486
+
1487
+
1488
+ # NB: cannot use torch.reshape(a, newshape) above, because of
1489
+ # (Pdb) torch.reshape(torch.as_tensor([1]), 1)
1490
+ # *** TypeError: reshape(): argument 'shape' (position 2) must be tuple of SymInts, not int
1491
+
1492
+
1493
+ def transpose(a: ArrayLike, axes=None):
1494
+ # numpy allows both .tranpose(sh) and .transpose(*sh)
1495
+ # also older code uses axes being a list
1496
+ if axes in [(), None, (None,)]:
1497
+ axes = tuple(reversed(range(a.ndim)))
1498
+ elif len(axes) == 1:
1499
+ axes = axes[0]
1500
+ return a.permute(axes)
1501
+
1502
+
1503
+ def ravel(a: ArrayLike, order: NotImplementedType = "C"):
1504
+ return torch.flatten(a)
1505
+
1506
+
1507
+ def diff(
1508
+ a: ArrayLike,
1509
+ n=1,
1510
+ axis=-1,
1511
+ prepend: Optional[ArrayLike] = None,
1512
+ append: Optional[ArrayLike] = None,
1513
+ ):
1514
+ axis = _util.normalize_axis_index(axis, a.ndim)
1515
+
1516
+ if n < 0:
1517
+ raise ValueError(f"order must be non-negative but got {n}")
1518
+
1519
+ if n == 0:
1520
+ # match numpy and return the input immediately
1521
+ return a
1522
+
1523
+ if prepend is not None:
1524
+ shape = list(a.shape)
1525
+ shape[axis] = prepend.shape[axis] if prepend.ndim > 0 else 1
1526
+ prepend = torch.broadcast_to(prepend, shape)
1527
+
1528
+ if append is not None:
1529
+ shape = list(a.shape)
1530
+ shape[axis] = append.shape[axis] if append.ndim > 0 else 1
1531
+ append = torch.broadcast_to(append, shape)
1532
+
1533
+ return torch.diff(a, n, axis=axis, prepend=prepend, append=append)
1534
+
1535
+
1536
+ # ### math functions ###
1537
+
1538
+
1539
+ def angle(z: ArrayLike, deg=False):
1540
+ result = torch.angle(z)
1541
+ if deg:
1542
+ result = result * (180 / torch.pi)
1543
+ return result
1544
+
1545
+
1546
+ def sinc(x: ArrayLike):
1547
+ return torch.sinc(x)
1548
+
1549
+
1550
+ # NB: have to normalize *varargs manually
1551
+ def gradient(f: ArrayLike, *varargs, axis=None, edge_order=1):
1552
+ N = f.ndim # number of dimensions
1553
+
1554
+ varargs = _util.ndarrays_to_tensors(varargs)
1555
+
1556
+ if axis is None:
1557
+ axes = tuple(range(N))
1558
+ else:
1559
+ axes = _util.normalize_axis_tuple(axis, N)
1560
+
1561
+ len_axes = len(axes)
1562
+ n = len(varargs)
1563
+ if n == 0:
1564
+ # no spacing argument - use 1 in all axes
1565
+ dx = [1.0] * len_axes
1566
+ elif n == 1 and (
1567
+ type(varargs[0]) in _dtypes_impl.SCALAR_TYPES or varargs[0].ndim == 0
1568
+ ):
1569
+ # single scalar or 0D tensor for all axes (np.ndim(varargs[0]) == 0)
1570
+ dx = varargs * len_axes
1571
+ elif n == len_axes:
1572
+ # scalar or 1d array for each axis
1573
+ dx = list(varargs)
1574
+ for i, distances in enumerate(dx):
1575
+ distances = torch.as_tensor(distances)
1576
+ if distances.ndim == 0:
1577
+ continue
1578
+ elif distances.ndim != 1:
1579
+ raise ValueError("distances must be either scalars or 1d")
1580
+ if len(distances) != f.shape[axes[i]]:
1581
+ raise ValueError(
1582
+ "when 1d, distances must match "
1583
+ "the length of the corresponding dimension"
1584
+ )
1585
+ if not (distances.dtype.is_floating_point or distances.dtype.is_complex):
1586
+ distances = distances.double()
1587
+
1588
+ diffx = torch.diff(distances)
1589
+ # if distances are constant reduce to the scalar case
1590
+ # since it brings a consistent speedup
1591
+ if (diffx == diffx[0]).all():
1592
+ diffx = diffx[0]
1593
+ dx[i] = diffx
1594
+ else:
1595
+ raise TypeError("invalid number of arguments")
1596
+
1597
+ if edge_order > 2:
1598
+ raise ValueError("'edge_order' greater than 2 not supported")
1599
+
1600
+ # use central differences on interior and one-sided differences on the
1601
+ # endpoints. This preserves second order-accuracy over the full domain.
1602
+
1603
+ outvals = []
1604
+
1605
+ # create slice objects --- initially all are [:, :, ..., :]
1606
+ slice1 = [slice(None)] * N
1607
+ slice2 = [slice(None)] * N
1608
+ slice3 = [slice(None)] * N
1609
+ slice4 = [slice(None)] * N
1610
+
1611
+ otype = f.dtype
1612
+ if _dtypes_impl.python_type_for_torch(otype) in (int, bool):
1613
+ # Convert to floating point.
1614
+ # First check if f is a numpy integer type; if so, convert f to float64
1615
+ # to avoid modular arithmetic when computing the changes in f.
1616
+ f = f.double()
1617
+ otype = torch.float64
1618
+
1619
+ for axis, ax_dx in zip(axes, dx):
1620
+ if f.shape[axis] < edge_order + 1:
1621
+ raise ValueError(
1622
+ "Shape of array too small to calculate a numerical gradient, "
1623
+ "at least (edge_order + 1) elements are required."
1624
+ )
1625
+ # result allocation
1626
+ out = torch.empty_like(f, dtype=otype)
1627
+
1628
+ # spacing for the current axis (NB: np.ndim(ax_dx) == 0)
1629
+ uniform_spacing = type(ax_dx) in _dtypes_impl.SCALAR_TYPES or ax_dx.ndim == 0
1630
+
1631
+ # Numerical differentiation: 2nd order interior
1632
+ slice1[axis] = slice(1, -1)
1633
+ slice2[axis] = slice(None, -2)
1634
+ slice3[axis] = slice(1, -1)
1635
+ slice4[axis] = slice(2, None)
1636
+
1637
+ if uniform_spacing:
1638
+ out[tuple(slice1)] = (f[tuple(slice4)] - f[tuple(slice2)]) / (2.0 * ax_dx)
1639
+ else:
1640
+ dx1 = ax_dx[0:-1]
1641
+ dx2 = ax_dx[1:]
1642
+ a = -(dx2) / (dx1 * (dx1 + dx2))
1643
+ b = (dx2 - dx1) / (dx1 * dx2)
1644
+ c = dx1 / (dx2 * (dx1 + dx2))
1645
+ # fix the shape for broadcasting
1646
+ shape = [1] * N
1647
+ shape[axis] = -1
1648
+ a = a.reshape(shape)
1649
+ b = b.reshape(shape)
1650
+ c = c.reshape(shape)
1651
+ # 1D equivalent -- out[1:-1] = a * f[:-2] + b * f[1:-1] + c * f[2:]
1652
+ out[tuple(slice1)] = (
1653
+ a * f[tuple(slice2)] + b * f[tuple(slice3)] + c * f[tuple(slice4)]
1654
+ )
1655
+
1656
+ # Numerical differentiation: 1st order edges
1657
+ if edge_order == 1:
1658
+ slice1[axis] = 0
1659
+ slice2[axis] = 1
1660
+ slice3[axis] = 0
1661
+ dx_0 = ax_dx if uniform_spacing else ax_dx[0]
1662
+ # 1D equivalent -- out[0] = (f[1] - f[0]) / (x[1] - x[0])
1663
+ out[tuple(slice1)] = (f[tuple(slice2)] - f[tuple(slice3)]) / dx_0
1664
+
1665
+ slice1[axis] = -1
1666
+ slice2[axis] = -1
1667
+ slice3[axis] = -2
1668
+ dx_n = ax_dx if uniform_spacing else ax_dx[-1]
1669
+ # 1D equivalent -- out[-1] = (f[-1] - f[-2]) / (x[-1] - x[-2])
1670
+ out[tuple(slice1)] = (f[tuple(slice2)] - f[tuple(slice3)]) / dx_n
1671
+
1672
+ # Numerical differentiation: 2nd order edges
1673
+ else:
1674
+ slice1[axis] = 0
1675
+ slice2[axis] = 0
1676
+ slice3[axis] = 1
1677
+ slice4[axis] = 2
1678
+ if uniform_spacing:
1679
+ a = -1.5 / ax_dx
1680
+ b = 2.0 / ax_dx
1681
+ c = -0.5 / ax_dx
1682
+ else:
1683
+ dx1 = ax_dx[0]
1684
+ dx2 = ax_dx[1]
1685
+ a = -(2.0 * dx1 + dx2) / (dx1 * (dx1 + dx2))
1686
+ b = (dx1 + dx2) / (dx1 * dx2)
1687
+ c = -dx1 / (dx2 * (dx1 + dx2))
1688
+ # 1D equivalent -- out[0] = a * f[0] + b * f[1] + c * f[2]
1689
+ out[tuple(slice1)] = (
1690
+ a * f[tuple(slice2)] + b * f[tuple(slice3)] + c * f[tuple(slice4)]
1691
+ )
1692
+
1693
+ slice1[axis] = -1
1694
+ slice2[axis] = -3
1695
+ slice3[axis] = -2
1696
+ slice4[axis] = -1
1697
+ if uniform_spacing:
1698
+ a = 0.5 / ax_dx
1699
+ b = -2.0 / ax_dx
1700
+ c = 1.5 / ax_dx
1701
+ else:
1702
+ dx1 = ax_dx[-2]
1703
+ dx2 = ax_dx[-1]
1704
+ a = (dx2) / (dx1 * (dx1 + dx2))
1705
+ b = -(dx2 + dx1) / (dx1 * dx2)
1706
+ c = (2.0 * dx2 + dx1) / (dx2 * (dx1 + dx2))
1707
+ # 1D equivalent -- out[-1] = a * f[-3] + b * f[-2] + c * f[-1]
1708
+ out[tuple(slice1)] = (
1709
+ a * f[tuple(slice2)] + b * f[tuple(slice3)] + c * f[tuple(slice4)]
1710
+ )
1711
+
1712
+ outvals.append(out)
1713
+
1714
+ # reset the slice object in this dimension to ":"
1715
+ slice1[axis] = slice(None)
1716
+ slice2[axis] = slice(None)
1717
+ slice3[axis] = slice(None)
1718
+ slice4[axis] = slice(None)
1719
+
1720
+ if len_axes == 1:
1721
+ return outvals[0]
1722
+ else:
1723
+ return outvals
1724
+
1725
+
1726
+ # ### Type/shape etc queries ###
1727
+
1728
+
1729
+ def round(a: ArrayLike, decimals=0, out: Optional[OutArray] = None):
1730
+ if a.is_floating_point():
1731
+ result = torch.round(a, decimals=decimals)
1732
+ elif a.is_complex():
1733
+ # RuntimeError: "round_cpu" not implemented for 'ComplexFloat'
1734
+ result = torch.complex(
1735
+ torch.round(a.real, decimals=decimals),
1736
+ torch.round(a.imag, decimals=decimals),
1737
+ )
1738
+ else:
1739
+ # RuntimeError: "round_cpu" not implemented for 'int'
1740
+ result = a
1741
+ return result
1742
+
1743
+
1744
+ around = round
1745
+ round_ = round
1746
+
1747
+
1748
+ def real_if_close(a: ArrayLike, tol=100):
1749
+ if not torch.is_complex(a):
1750
+ return a
1751
+ if tol > 1:
1752
+ # Undocumented in numpy: if tol < 1, it's an absolute tolerance!
1753
+ # Otherwise, tol > 1 is relative tolerance, in units of the dtype epsilon
1754
+ # https://github.com/numpy/numpy/blob/v1.24.0/numpy/lib/type_check.py#L577
1755
+ tol = tol * torch.finfo(a.dtype).eps
1756
+
1757
+ mask = torch.abs(a.imag) < tol
1758
+ return a.real if mask.all() else a
1759
+
1760
+
1761
+ def real(a: ArrayLike):
1762
+ return torch.real(a)
1763
+
1764
+
1765
+ def imag(a: ArrayLike):
1766
+ if a.is_complex():
1767
+ return a.imag
1768
+ return torch.zeros_like(a)
1769
+
1770
+
1771
+ def iscomplex(x: ArrayLike):
1772
+ if torch.is_complex(x):
1773
+ return x.imag != 0
1774
+ return torch.zeros_like(x, dtype=torch.bool)
1775
+
1776
+
1777
+ def isreal(x: ArrayLike):
1778
+ if torch.is_complex(x):
1779
+ return x.imag == 0
1780
+ return torch.ones_like(x, dtype=torch.bool)
1781
+
1782
+
1783
+ def iscomplexobj(x: ArrayLike):
1784
+ return torch.is_complex(x)
1785
+
1786
+
1787
+ def isrealobj(x: ArrayLike):
1788
+ return not torch.is_complex(x)
1789
+
1790
+
1791
+ def isneginf(x: ArrayLike, out: Optional[OutArray] = None):
1792
+ return torch.isneginf(x)
1793
+
1794
+
1795
+ def isposinf(x: ArrayLike, out: Optional[OutArray] = None):
1796
+ return torch.isposinf(x)
1797
+
1798
+
1799
+ def i0(x: ArrayLike):
1800
+ return torch.special.i0(x)
1801
+
1802
+
1803
+ def isscalar(a):
1804
+ # We need to use normalize_array_like, but we don't want to export it in funcs.py
1805
+ from ._normalizations import normalize_array_like
1806
+
1807
+ try:
1808
+ t = normalize_array_like(a)
1809
+ return t.numel() == 1
1810
+ except Exception:
1811
+ return False
1812
+
1813
+
1814
+ # ### Filter windows ###
1815
+
1816
+
1817
+ def hamming(M):
1818
+ dtype = _dtypes_impl.default_dtypes().float_dtype
1819
+ return torch.hamming_window(M, periodic=False, dtype=dtype)
1820
+
1821
+
1822
+ def hanning(M):
1823
+ dtype = _dtypes_impl.default_dtypes().float_dtype
1824
+ return torch.hann_window(M, periodic=False, dtype=dtype)
1825
+
1826
+
1827
+ def kaiser(M, beta):
1828
+ dtype = _dtypes_impl.default_dtypes().float_dtype
1829
+ return torch.kaiser_window(M, beta=beta, periodic=False, dtype=dtype)
1830
+
1831
+
1832
+ def blackman(M):
1833
+ dtype = _dtypes_impl.default_dtypes().float_dtype
1834
+ return torch.blackman_window(M, periodic=False, dtype=dtype)
1835
+
1836
+
1837
+ def bartlett(M):
1838
+ dtype = _dtypes_impl.default_dtypes().float_dtype
1839
+ return torch.bartlett_window(M, periodic=False, dtype=dtype)
1840
+
1841
+
1842
+ # ### Dtype routines ###
1843
+
1844
+ # vendored from https://github.com/numpy/numpy/blob/v1.24.0/numpy/lib/type_check.py#L666
1845
+
1846
+
1847
+ array_type = [
1848
+ [torch.float16, torch.float32, torch.float64],
1849
+ [None, torch.complex64, torch.complex128],
1850
+ ]
1851
+ array_precision = {
1852
+ torch.float16: 0,
1853
+ torch.float32: 1,
1854
+ torch.float64: 2,
1855
+ torch.complex64: 1,
1856
+ torch.complex128: 2,
1857
+ }
1858
+
1859
+
1860
+ def common_type(*tensors: ArrayLike):
1861
+ is_complex = False
1862
+ precision = 0
1863
+ for a in tensors:
1864
+ t = a.dtype
1865
+ if iscomplexobj(a):
1866
+ is_complex = True
1867
+ if not (t.is_floating_point or t.is_complex):
1868
+ p = 2 # array_precision[_nx.double]
1869
+ else:
1870
+ p = array_precision.get(t, None)
1871
+ if p is None:
1872
+ raise TypeError("can't get common type for non-numeric array")
1873
+ precision = builtins.max(precision, p)
1874
+ if is_complex:
1875
+ return array_type[1][precision]
1876
+ else:
1877
+ return array_type[0][precision]
1878
+
1879
+
1880
+ # ### histograms ###
1881
+
1882
+
1883
+ def histogram(
1884
+ a: ArrayLike,
1885
+ bins: ArrayLike = 10,
1886
+ range=None,
1887
+ normed=None,
1888
+ weights: Optional[ArrayLike] = None,
1889
+ density=None,
1890
+ ):
1891
+ if normed is not None:
1892
+ raise ValueError("normed argument is deprecated, use density= instead")
1893
+
1894
+ is_a_int = not (a.dtype.is_floating_point or a.dtype.is_complex)
1895
+ is_w_int = weights is None or not weights.dtype.is_floating_point
1896
+ if is_a_int:
1897
+ a = a.double()
1898
+
1899
+ if weights is not None:
1900
+ weights = _util.cast_if_needed(weights, a.dtype)
1901
+
1902
+ if isinstance(bins, torch.Tensor):
1903
+ if bins.ndim == 0:
1904
+ # bins was a single int
1905
+ bins = operator.index(bins)
1906
+ else:
1907
+ bins = _util.cast_if_needed(bins, a.dtype)
1908
+
1909
+ if range is None:
1910
+ h, b = torch.histogram(a, bins, weight=weights, density=bool(density))
1911
+ else:
1912
+ h, b = torch.histogram(
1913
+ a, bins, range=range, weight=weights, density=bool(density)
1914
+ )
1915
+
1916
+ if not density and is_w_int:
1917
+ h = h.long()
1918
+ if is_a_int:
1919
+ b = b.long()
1920
+
1921
+ return h, b
1922
+
1923
+
1924
+ def histogram2d(
1925
+ x,
1926
+ y,
1927
+ bins=10,
1928
+ range: Optional[ArrayLike] = None,
1929
+ normed=None,
1930
+ weights: Optional[ArrayLike] = None,
1931
+ density=None,
1932
+ ):
1933
+ # vendored from https://github.com/numpy/numpy/blob/v1.24.0/numpy/lib/twodim_base.py#L655-L821
1934
+ if len(x) != len(y):
1935
+ raise ValueError("x and y must have the same length.")
1936
+
1937
+ try:
1938
+ N = len(bins)
1939
+ except TypeError:
1940
+ N = 1
1941
+
1942
+ if N != 1 and N != 2:
1943
+ bins = [bins, bins]
1944
+
1945
+ h, e = histogramdd((x, y), bins, range, normed, weights, density)
1946
+
1947
+ return h, e[0], e[1]
1948
+
1949
+
1950
+ def histogramdd(
1951
+ sample,
1952
+ bins=10,
1953
+ range: Optional[ArrayLike] = None,
1954
+ normed=None,
1955
+ weights: Optional[ArrayLike] = None,
1956
+ density=None,
1957
+ ):
1958
+ # have to normalize manually because `sample` interpretation differs
1959
+ # for a list of lists and a 2D array
1960
+ if normed is not None:
1961
+ raise ValueError("normed argument is deprecated, use density= instead")
1962
+
1963
+ from ._normalizations import normalize_array_like, normalize_seq_array_like
1964
+
1965
+ if isinstance(sample, (list, tuple)):
1966
+ sample = normalize_array_like(sample).T
1967
+ else:
1968
+ sample = normalize_array_like(sample)
1969
+
1970
+ sample = torch.atleast_2d(sample)
1971
+
1972
+ if not (sample.dtype.is_floating_point or sample.dtype.is_complex):
1973
+ sample = sample.double()
1974
+
1975
+ # bins is either an int, or a sequence of ints or a sequence of arrays
1976
+ bins_is_array = not (
1977
+ isinstance(bins, int) or builtins.all(isinstance(b, int) for b in bins)
1978
+ )
1979
+ if bins_is_array:
1980
+ bins = normalize_seq_array_like(bins)
1981
+ bins_dtypes = [b.dtype for b in bins]
1982
+ bins = [_util.cast_if_needed(b, sample.dtype) for b in bins]
1983
+
1984
+ if range is not None:
1985
+ range = range.flatten().tolist()
1986
+
1987
+ if weights is not None:
1988
+ # range=... is required : interleave min and max values per dimension
1989
+ mm = sample.aminmax(dim=0)
1990
+ range = torch.cat(mm).reshape(2, -1).T.flatten()
1991
+ range = tuple(range.tolist())
1992
+ weights = _util.cast_if_needed(weights, sample.dtype)
1993
+ w_kwd = {"weight": weights}
1994
+ else:
1995
+ w_kwd = {}
1996
+
1997
+ h, b = torch.histogramdd(sample, bins, range, density=bool(density), **w_kwd)
1998
+
1999
+ if bins_is_array:
2000
+ b = [_util.cast_if_needed(bb, dtyp) for bb, dtyp in zip(b, bins_dtypes)]
2001
+
2002
+ return h, b
2003
+
2004
+
2005
+ # ### odds and ends
2006
+
2007
+
2008
+ def min_scalar_type(a: ArrayLike, /):
2009
+ # https://github.com/numpy/numpy/blob/maintenance/1.24.x/numpy/core/src/multiarray/convert_datatype.c#L1288
2010
+
2011
+ from ._dtypes import DType
2012
+
2013
+ if a.numel() > 1:
2014
+ # numpy docs: "For non-scalar array a, returns the vector’s dtype unmodified."
2015
+ return DType(a.dtype)
2016
+
2017
+ if a.dtype == torch.bool:
2018
+ dtype = torch.bool
2019
+
2020
+ elif a.dtype.is_complex:
2021
+ fi = torch.finfo(torch.float32)
2022
+ fits_in_single = a.dtype == torch.complex64 or (
2023
+ fi.min <= a.real <= fi.max and fi.min <= a.imag <= fi.max
2024
+ )
2025
+ dtype = torch.complex64 if fits_in_single else torch.complex128
2026
+
2027
+ elif a.dtype.is_floating_point:
2028
+ for dt in [torch.float16, torch.float32, torch.float64]:
2029
+ fi = torch.finfo(dt)
2030
+ if fi.min <= a <= fi.max:
2031
+ dtype = dt
2032
+ break
2033
+ else:
2034
+ # must be integer
2035
+ for dt in [torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64]:
2036
+ # Prefer unsigned int where possible, as numpy does.
2037
+ ii = torch.iinfo(dt)
2038
+ if ii.min <= a <= ii.max:
2039
+ dtype = dt
2040
+ break
2041
+
2042
+ return DType(dtype)
2043
+
2044
+
2045
+ def pad(array: ArrayLike, pad_width: ArrayLike, mode="constant", **kwargs):
2046
+ if mode != "constant":
2047
+ raise NotImplementedError
2048
+ value = kwargs.get("constant_values", 0)
2049
+ # `value` must be a python scalar for torch.nn.functional.pad
2050
+ typ = _dtypes_impl.python_type_for_torch(array.dtype)
2051
+ value = typ(value)
2052
+
2053
+ pad_width = torch.broadcast_to(pad_width, (array.ndim, 2))
2054
+ pad_width = torch.flip(pad_width, (0,)).flatten()
2055
+
2056
+ return torch.nn.functional.pad(array, tuple(pad_width), value=value)
llava_next/lib/python3.10/site-packages/torch/_numpy/_getlimits.py ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+ from . import _dtypes
4
+
5
+
6
+ def finfo(dtyp):
7
+ torch_dtype = _dtypes.dtype(dtyp).torch_dtype
8
+ return torch.finfo(torch_dtype)
9
+
10
+
11
+ def iinfo(dtyp):
12
+ torch_dtype = _dtypes.dtype(dtyp).torch_dtype
13
+ return torch.iinfo(torch_dtype)
llava_next/lib/python3.10/site-packages/torch/_numpy/_normalizations.py ADDED
@@ -0,0 +1,243 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ "Normalize" arguments: convert array_likes to tensors, dtypes to torch dtypes and so on.
2
+ """
3
+ from __future__ import annotations
4
+
5
+ import functools
6
+ import inspect
7
+ import operator
8
+ import typing
9
+
10
+ import torch
11
+
12
+ from . import _dtypes, _dtypes_impl, _util
13
+
14
+ ArrayLike = typing.TypeVar("ArrayLike")
15
+ Scalar = typing.Union[int, float, complex, bool]
16
+ ArrayLikeOrScalar = typing.Union[ArrayLike, Scalar]
17
+
18
+ DTypeLike = typing.TypeVar("DTypeLike")
19
+ AxisLike = typing.TypeVar("AxisLike")
20
+ NDArray = typing.TypeVar("NDarray")
21
+ CastingModes = typing.TypeVar("CastingModes")
22
+ KeepDims = typing.TypeVar("KeepDims")
23
+
24
+ # OutArray is to annotate the out= array argument.
25
+ #
26
+ # This one is special is several respects:
27
+ # First, It needs to be an NDArray, and we need to preserve the `result is out`
28
+ # semantics. Therefore, we cannot just extract the Tensor from the out array.
29
+ # So we never pass the out array to implementer functions and handle it in the
30
+ # `normalizer` below.
31
+ # Second, the out= argument can be either keyword or positional argument, and
32
+ # as a positional arg, it can be anywhere in the signature.
33
+ # To handle all this, we define a special `OutArray` annotation and dispatch on it.
34
+ #
35
+ OutArray = typing.TypeVar("OutArray")
36
+
37
+ try:
38
+ from typing import NotImplementedType
39
+ except ImportError:
40
+ NotImplementedType = typing.TypeVar("NotImplementedType")
41
+
42
+
43
+ def normalize_array_like(x, parm=None):
44
+ from ._ndarray import asarray
45
+
46
+ return asarray(x).tensor
47
+
48
+
49
+ def normalize_array_like_or_scalar(x, parm=None):
50
+ if type(x) in _dtypes_impl.SCALAR_TYPES:
51
+ return x
52
+ return normalize_array_like(x, parm)
53
+
54
+
55
+ def normalize_optional_array_like(x, parm=None):
56
+ # This explicit normalizer is needed because otherwise normalize_array_like
57
+ # does not run for a parameter annotated as Optional[ArrayLike]
58
+ return None if x is None else normalize_array_like(x, parm)
59
+
60
+
61
+ def normalize_seq_array_like(x, parm=None):
62
+ return tuple(normalize_array_like(value) for value in x)
63
+
64
+
65
+ def normalize_dtype(dtype, parm=None):
66
+ # cf _decorators.dtype_to_torch
67
+ torch_dtype = None
68
+ if dtype is not None:
69
+ dtype = _dtypes.dtype(dtype)
70
+ torch_dtype = dtype.torch_dtype
71
+ return torch_dtype
72
+
73
+
74
+ def normalize_not_implemented(arg, parm):
75
+ if arg != parm.default:
76
+ raise NotImplementedError(f"'{parm.name}' parameter is not supported.")
77
+
78
+
79
+ def normalize_axis_like(arg, parm=None):
80
+ from ._ndarray import ndarray
81
+
82
+ if isinstance(arg, ndarray):
83
+ arg = operator.index(arg)
84
+ return arg
85
+
86
+
87
+ def normalize_ndarray(arg, parm=None):
88
+ # check the arg is an ndarray, extract its tensor attribute
89
+ if arg is None:
90
+ return arg
91
+
92
+ from ._ndarray import ndarray
93
+
94
+ if not isinstance(arg, ndarray):
95
+ raise TypeError(f"'{parm.name}' must be an array")
96
+ return arg.tensor
97
+
98
+
99
+ def normalize_outarray(arg, parm=None):
100
+ # almost normalize_ndarray, only return the array, not its tensor
101
+ if arg is None:
102
+ return arg
103
+
104
+ from ._ndarray import ndarray
105
+
106
+ if not isinstance(arg, ndarray):
107
+ raise TypeError(f"'{parm.name}' must be an array")
108
+ return arg
109
+
110
+
111
+ def normalize_casting(arg, parm=None):
112
+ if arg not in ["no", "equiv", "safe", "same_kind", "unsafe"]:
113
+ raise ValueError(
114
+ f"casting must be one of 'no', 'equiv', 'safe', 'same_kind', or 'unsafe' (got '{arg}')"
115
+ )
116
+ return arg
117
+
118
+
119
+ normalizers = {
120
+ "ArrayLike": normalize_array_like,
121
+ "Union[ArrayLike, Scalar]": normalize_array_like_or_scalar,
122
+ "Optional[ArrayLike]": normalize_optional_array_like,
123
+ "Sequence[ArrayLike]": normalize_seq_array_like,
124
+ "Optional[NDArray]": normalize_ndarray,
125
+ "Optional[OutArray]": normalize_outarray,
126
+ "NDArray": normalize_ndarray,
127
+ "Optional[DTypeLike]": normalize_dtype,
128
+ "AxisLike": normalize_axis_like,
129
+ "NotImplementedType": normalize_not_implemented,
130
+ "Optional[CastingModes]": normalize_casting,
131
+ }
132
+
133
+
134
+ def maybe_normalize(arg, parm):
135
+ """Normalize arg if a normalizer is registred."""
136
+ normalizer = normalizers.get(parm.annotation, None)
137
+ return normalizer(arg, parm) if normalizer else arg
138
+
139
+
140
+ # ### Return value helpers ###
141
+
142
+
143
+ def maybe_copy_to(out, result, promote_scalar_result=False):
144
+ # NB: here out is either an ndarray or None
145
+ if out is None:
146
+ return result
147
+ elif isinstance(result, torch.Tensor):
148
+ if result.shape != out.shape:
149
+ can_fit = result.numel() == 1 and out.ndim == 0
150
+ if promote_scalar_result and can_fit:
151
+ result = result.squeeze()
152
+ else:
153
+ raise ValueError(
154
+ f"Bad size of the out array: out.shape = {out.shape}"
155
+ f" while result.shape = {result.shape}."
156
+ )
157
+ out.tensor.copy_(result)
158
+ return out
159
+ elif isinstance(result, (tuple, list)):
160
+ return type(result)(
161
+ maybe_copy_to(o, r, promote_scalar_result) for o, r in zip(out, result)
162
+ )
163
+ else:
164
+ raise AssertionError() # We should never hit this path
165
+
166
+
167
+ def wrap_tensors(result):
168
+ from ._ndarray import ndarray
169
+
170
+ if isinstance(result, torch.Tensor):
171
+ return ndarray(result)
172
+ elif isinstance(result, (tuple, list)):
173
+ result = type(result)(wrap_tensors(x) for x in result)
174
+ return result
175
+
176
+
177
+ def array_or_scalar(values, py_type=float, return_scalar=False):
178
+ if return_scalar:
179
+ return py_type(values.item())
180
+ else:
181
+ from ._ndarray import ndarray
182
+
183
+ return ndarray(values)
184
+
185
+
186
+ # ### The main decorator to normalize arguments / postprocess the output ###
187
+
188
+
189
+ def normalizer(_func=None, *, promote_scalar_result=False):
190
+ def normalizer_inner(func):
191
+ @functools.wraps(func)
192
+ def wrapped(*args, **kwds):
193
+ sig = inspect.signature(func)
194
+ params = sig.parameters
195
+ first_param = next(iter(params.values()))
196
+ # NumPy's API does not have positional args before variadic positional args
197
+ if first_param.kind == inspect.Parameter.VAR_POSITIONAL:
198
+ args = [maybe_normalize(arg, first_param) for arg in args]
199
+ else:
200
+ # NB: extra unknown arguments: pass through, will raise in func(*args) below
201
+ args = (
202
+ tuple(
203
+ maybe_normalize(arg, parm)
204
+ for arg, parm in zip(args, params.values())
205
+ )
206
+ + args[len(params.values()) :]
207
+ )
208
+
209
+ kwds = {
210
+ name: maybe_normalize(arg, params[name]) if name in params else arg
211
+ for name, arg in kwds.items()
212
+ }
213
+ result = func(*args, **kwds)
214
+
215
+ # keepdims
216
+ bound_args = None
217
+ if "keepdims" in params and params["keepdims"].annotation == "KeepDims":
218
+ # keepdims can be in any position so we need sig.bind
219
+ bound_args = sig.bind(*args, **kwds).arguments
220
+ if bound_args.get("keepdims", False):
221
+ # In this case the first arg is the initial tensor and
222
+ # the second arg is (optionally) the axis
223
+ tensor = args[0]
224
+ axis = bound_args.get("axis")
225
+ result = _util.apply_keepdims(result, axis, tensor.ndim)
226
+
227
+ # out
228
+ if "out" in params:
229
+ # out can be in any position so we need sig.bind
230
+ if bound_args is None:
231
+ bound_args = sig.bind(*args, **kwds).arguments
232
+ out = bound_args.get("out")
233
+ result = maybe_copy_to(out, result, promote_scalar_result)
234
+ result = wrap_tensors(result)
235
+
236
+ return result
237
+
238
+ return wrapped
239
+
240
+ if _func is None:
241
+ return normalizer_inner
242
+ else:
243
+ return normalizer_inner(_func)
llava_next/lib/python3.10/site-packages/torch/_numpy/_reductions_impl.py ADDED
@@ -0,0 +1,437 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ Implementation of reduction operations, to be wrapped into arrays, dtypes etc
2
+ in the 'public' layer.
3
+
4
+ Anything here only deals with torch objects, e.g. "dtype" is a torch.dtype instance etc
5
+ """
6
+ from __future__ import annotations
7
+
8
+ import functools
9
+ from typing import Optional
10
+
11
+ import torch
12
+
13
+ from . import _dtypes_impl, _util
14
+ from ._normalizations import (
15
+ ArrayLike,
16
+ AxisLike,
17
+ DTypeLike,
18
+ KeepDims,
19
+ NotImplementedType,
20
+ OutArray,
21
+ )
22
+
23
+
24
+ def _deco_axis_expand(func):
25
+ """
26
+ Generically handle axis arguments in reductions.
27
+ axis is *always* the 2nd arg in the funciton so no need to have a look at its signature
28
+ """
29
+
30
+ @functools.wraps(func)
31
+ def wrapped(a, axis=None, *args, **kwds):
32
+ if axis is not None:
33
+ axis = _util.normalize_axis_tuple(axis, a.ndim)
34
+
35
+ if axis == ():
36
+ # So we insert a length-one axis and run the reduction along it.
37
+ # We cannot return a.clone() as this would sidestep the checks inside the function
38
+ newshape = _util.expand_shape(a.shape, axis=0)
39
+ a = a.reshape(newshape)
40
+ axis = (0,)
41
+
42
+ return func(a, axis, *args, **kwds)
43
+
44
+ return wrapped
45
+
46
+
47
+ def _atleast_float(dtype, other_dtype):
48
+ """Return a dtype that is real or complex floating-point.
49
+
50
+ For inputs that are boolean or integer dtypes, this returns the default
51
+ float dtype; inputs that are complex get converted to the default complex
52
+ dtype; real floating-point dtypes (`float*`) get passed through unchanged
53
+ """
54
+ if dtype is None:
55
+ dtype = other_dtype
56
+ if not (dtype.is_floating_point or dtype.is_complex):
57
+ return _dtypes_impl.default_dtypes().float_dtype
58
+ return dtype
59
+
60
+
61
+ @_deco_axis_expand
62
+ def count_nonzero(a: ArrayLike, axis: AxisLike = None, *, keepdims: KeepDims = False):
63
+ return a.count_nonzero(axis)
64
+
65
+
66
+ @_deco_axis_expand
67
+ def argmax(
68
+ a: ArrayLike,
69
+ axis: AxisLike = None,
70
+ out: Optional[OutArray] = None,
71
+ *,
72
+ keepdims: KeepDims = False,
73
+ ):
74
+ axis = _util.allow_only_single_axis(axis)
75
+
76
+ if a.dtype == torch.bool:
77
+ # RuntimeError: "argmax_cpu" not implemented for 'Bool'
78
+ a = a.to(torch.uint8)
79
+
80
+ return torch.argmax(a, axis)
81
+
82
+
83
+ @_deco_axis_expand
84
+ def argmin(
85
+ a: ArrayLike,
86
+ axis: AxisLike = None,
87
+ out: Optional[OutArray] = None,
88
+ *,
89
+ keepdims: KeepDims = False,
90
+ ):
91
+ axis = _util.allow_only_single_axis(axis)
92
+
93
+ if a.dtype == torch.bool:
94
+ # RuntimeError: "argmin_cpu" not implemented for 'Bool'
95
+ a = a.to(torch.uint8)
96
+
97
+ return torch.argmin(a, axis)
98
+
99
+
100
+ @_deco_axis_expand
101
+ def any(
102
+ a: ArrayLike,
103
+ axis: AxisLike = None,
104
+ out: Optional[OutArray] = None,
105
+ keepdims: KeepDims = False,
106
+ *,
107
+ where: NotImplementedType = None,
108
+ ):
109
+ axis = _util.allow_only_single_axis(axis)
110
+ axis_kw = {} if axis is None else {"dim": axis}
111
+ return torch.any(a, **axis_kw)
112
+
113
+
114
+ @_deco_axis_expand
115
+ def all(
116
+ a: ArrayLike,
117
+ axis: AxisLike = None,
118
+ out: Optional[OutArray] = None,
119
+ keepdims: KeepDims = False,
120
+ *,
121
+ where: NotImplementedType = None,
122
+ ):
123
+ axis = _util.allow_only_single_axis(axis)
124
+ axis_kw = {} if axis is None else {"dim": axis}
125
+ return torch.all(a, **axis_kw)
126
+
127
+
128
+ @_deco_axis_expand
129
+ def amax(
130
+ a: ArrayLike,
131
+ axis: AxisLike = None,
132
+ out: Optional[OutArray] = None,
133
+ keepdims: KeepDims = False,
134
+ initial: NotImplementedType = None,
135
+ where: NotImplementedType = None,
136
+ ):
137
+ return a.amax(axis)
138
+
139
+
140
+ max = amax
141
+
142
+
143
+ @_deco_axis_expand
144
+ def amin(
145
+ a: ArrayLike,
146
+ axis: AxisLike = None,
147
+ out: Optional[OutArray] = None,
148
+ keepdims: KeepDims = False,
149
+ initial: NotImplementedType = None,
150
+ where: NotImplementedType = None,
151
+ ):
152
+ return a.amin(axis)
153
+
154
+
155
+ min = amin
156
+
157
+
158
+ @_deco_axis_expand
159
+ def ptp(
160
+ a: ArrayLike,
161
+ axis: AxisLike = None,
162
+ out: Optional[OutArray] = None,
163
+ keepdims: KeepDims = False,
164
+ ):
165
+ return a.amax(axis) - a.amin(axis)
166
+
167
+
168
+ @_deco_axis_expand
169
+ def sum(
170
+ a: ArrayLike,
171
+ axis: AxisLike = None,
172
+ dtype: Optional[DTypeLike] = None,
173
+ out: Optional[OutArray] = None,
174
+ keepdims: KeepDims = False,
175
+ initial: NotImplementedType = None,
176
+ where: NotImplementedType = None,
177
+ ):
178
+ assert dtype is None or isinstance(dtype, torch.dtype)
179
+
180
+ if dtype == torch.bool:
181
+ dtype = _dtypes_impl.default_dtypes().int_dtype
182
+
183
+ axis_kw = {} if axis is None else {"dim": axis}
184
+ return a.sum(dtype=dtype, **axis_kw)
185
+
186
+
187
+ @_deco_axis_expand
188
+ def prod(
189
+ a: ArrayLike,
190
+ axis: AxisLike = None,
191
+ dtype: Optional[DTypeLike] = None,
192
+ out: Optional[OutArray] = None,
193
+ keepdims: KeepDims = False,
194
+ initial: NotImplementedType = None,
195
+ where: NotImplementedType = None,
196
+ ):
197
+ axis = _util.allow_only_single_axis(axis)
198
+
199
+ if dtype == torch.bool:
200
+ dtype = _dtypes_impl.default_dtypes().int_dtype
201
+
202
+ axis_kw = {} if axis is None else {"dim": axis}
203
+ return a.prod(dtype=dtype, **axis_kw)
204
+
205
+
206
+ product = prod
207
+
208
+
209
+ @_deco_axis_expand
210
+ def mean(
211
+ a: ArrayLike,
212
+ axis: AxisLike = None,
213
+ dtype: Optional[DTypeLike] = None,
214
+ out: Optional[OutArray] = None,
215
+ keepdims: KeepDims = False,
216
+ *,
217
+ where: NotImplementedType = None,
218
+ ):
219
+ dtype = _atleast_float(dtype, a.dtype)
220
+
221
+ axis_kw = {} if axis is None else {"dim": axis}
222
+ result = a.mean(dtype=dtype, **axis_kw)
223
+
224
+ return result
225
+
226
+
227
+ @_deco_axis_expand
228
+ def std(
229
+ a: ArrayLike,
230
+ axis: AxisLike = None,
231
+ dtype: Optional[DTypeLike] = None,
232
+ out: Optional[OutArray] = None,
233
+ ddof=0,
234
+ keepdims: KeepDims = False,
235
+ *,
236
+ where: NotImplementedType = None,
237
+ ):
238
+ in_dtype = dtype
239
+ dtype = _atleast_float(dtype, a.dtype)
240
+ tensor = _util.cast_if_needed(a, dtype)
241
+ result = tensor.std(dim=axis, correction=ddof)
242
+ return _util.cast_if_needed(result, in_dtype)
243
+
244
+
245
+ @_deco_axis_expand
246
+ def var(
247
+ a: ArrayLike,
248
+ axis: AxisLike = None,
249
+ dtype: Optional[DTypeLike] = None,
250
+ out: Optional[OutArray] = None,
251
+ ddof=0,
252
+ keepdims: KeepDims = False,
253
+ *,
254
+ where: NotImplementedType = None,
255
+ ):
256
+ in_dtype = dtype
257
+ dtype = _atleast_float(dtype, a.dtype)
258
+ tensor = _util.cast_if_needed(a, dtype)
259
+ result = tensor.var(dim=axis, correction=ddof)
260
+ return _util.cast_if_needed(result, in_dtype)
261
+
262
+
263
+ # cumsum / cumprod are almost reductions:
264
+ # 1. no keepdims
265
+ # 2. axis=None flattens
266
+
267
+
268
+ def cumsum(
269
+ a: ArrayLike,
270
+ axis: AxisLike = None,
271
+ dtype: Optional[DTypeLike] = None,
272
+ out: Optional[OutArray] = None,
273
+ ):
274
+ if dtype == torch.bool:
275
+ dtype = _dtypes_impl.default_dtypes().int_dtype
276
+ if dtype is None:
277
+ dtype = a.dtype
278
+
279
+ (a,), axis = _util.axis_none_flatten(a, axis=axis)
280
+ axis = _util.normalize_axis_index(axis, a.ndim)
281
+
282
+ return a.cumsum(axis=axis, dtype=dtype)
283
+
284
+
285
+ def cumprod(
286
+ a: ArrayLike,
287
+ axis: AxisLike = None,
288
+ dtype: Optional[DTypeLike] = None,
289
+ out: Optional[OutArray] = None,
290
+ ):
291
+ if dtype == torch.bool:
292
+ dtype = _dtypes_impl.default_dtypes().int_dtype
293
+ if dtype is None:
294
+ dtype = a.dtype
295
+
296
+ (a,), axis = _util.axis_none_flatten(a, axis=axis)
297
+ axis = _util.normalize_axis_index(axis, a.ndim)
298
+
299
+ return a.cumprod(axis=axis, dtype=dtype)
300
+
301
+
302
+ cumproduct = cumprod
303
+
304
+
305
+ def average(
306
+ a: ArrayLike,
307
+ axis=None,
308
+ weights: ArrayLike = None,
309
+ returned=False,
310
+ *,
311
+ keepdims=False,
312
+ ):
313
+ if weights is None:
314
+ result = mean(a, axis=axis)
315
+ wsum = torch.as_tensor(a.numel() / result.numel(), dtype=result.dtype)
316
+ else:
317
+ if not a.dtype.is_floating_point:
318
+ a = a.double()
319
+
320
+ # axis & weights
321
+ if a.shape != weights.shape:
322
+ if axis is None:
323
+ raise TypeError(
324
+ "Axis must be specified when shapes of a and weights " "differ."
325
+ )
326
+ if weights.ndim != 1:
327
+ raise TypeError(
328
+ "1D weights expected when shapes of a and weights differ."
329
+ )
330
+ if weights.shape[0] != a.shape[axis]:
331
+ raise ValueError(
332
+ "Length of weights not compatible with specified axis."
333
+ )
334
+
335
+ # setup weight to broadcast along axis
336
+ weights = torch.broadcast_to(weights, (a.ndim - 1) * (1,) + weights.shape)
337
+ weights = weights.swapaxes(-1, axis)
338
+
339
+ # do the work
340
+ result_dtype = _dtypes_impl.result_type_impl(a, weights)
341
+ numerator = sum(a * weights, axis, dtype=result_dtype)
342
+ wsum = sum(weights, axis, dtype=result_dtype)
343
+ result = numerator / wsum
344
+
345
+ # We process keepdims manually because the decorator does not deal with variadic returns
346
+ if keepdims:
347
+ result = _util.apply_keepdims(result, axis, a.ndim)
348
+
349
+ if returned:
350
+ if wsum.shape != result.shape:
351
+ wsum = torch.broadcast_to(wsum, result.shape).clone()
352
+ return result, wsum
353
+ else:
354
+ return result
355
+
356
+
357
+ # Not using deco_axis_expand as it assumes that axis is the second arg
358
+ def quantile(
359
+ a: ArrayLike,
360
+ q: ArrayLike,
361
+ axis: AxisLike = None,
362
+ out: Optional[OutArray] = None,
363
+ overwrite_input=False,
364
+ method="linear",
365
+ keepdims: KeepDims = False,
366
+ *,
367
+ interpolation: NotImplementedType = None,
368
+ ):
369
+ if overwrite_input:
370
+ # raise NotImplementedError("overwrite_input in quantile not implemented.")
371
+ # NumPy documents that `overwrite_input` MAY modify inputs:
372
+ # https://numpy.org/doc/stable/reference/generated/numpy.percentile.html#numpy-percentile
373
+ # Here we choose to work out-of-place because why not.
374
+ pass
375
+
376
+ if not a.dtype.is_floating_point:
377
+ dtype = _dtypes_impl.default_dtypes().float_dtype
378
+ a = a.to(dtype)
379
+
380
+ # edge case: torch.quantile only supports float32 and float64
381
+ if a.dtype == torch.float16:
382
+ a = a.to(torch.float32)
383
+
384
+ if axis is None:
385
+ a = a.flatten()
386
+ q = q.flatten()
387
+ axis = (0,)
388
+ else:
389
+ axis = _util.normalize_axis_tuple(axis, a.ndim)
390
+
391
+ # FIXME(Mario) Doesn't np.quantile accept a tuple?
392
+ # torch.quantile does accept a number. If we don't want to implement the tuple behaviour
393
+ # (it's deffo low prio) change `normalize_axis_tuple` into a normalize_axis index above.
394
+ axis = _util.allow_only_single_axis(axis)
395
+
396
+ q = _util.cast_if_needed(q, a.dtype)
397
+
398
+ return torch.quantile(a, q, axis=axis, interpolation=method)
399
+
400
+
401
+ def percentile(
402
+ a: ArrayLike,
403
+ q: ArrayLike,
404
+ axis: AxisLike = None,
405
+ out: Optional[OutArray] = None,
406
+ overwrite_input=False,
407
+ method="linear",
408
+ keepdims: KeepDims = False,
409
+ *,
410
+ interpolation: NotImplementedType = None,
411
+ ):
412
+ return quantile(
413
+ a,
414
+ q / 100.0,
415
+ axis=axis,
416
+ overwrite_input=overwrite_input,
417
+ method=method,
418
+ keepdims=keepdims,
419
+ interpolation=interpolation,
420
+ )
421
+
422
+
423
+ def median(
424
+ a: ArrayLike,
425
+ axis=None,
426
+ out: Optional[OutArray] = None,
427
+ overwrite_input=False,
428
+ keepdims: KeepDims = False,
429
+ ):
430
+ return quantile(
431
+ a,
432
+ torch.as_tensor(0.5),
433
+ axis=axis,
434
+ overwrite_input=overwrite_input,
435
+ out=out,
436
+ keepdims=keepdims,
437
+ )
llava_next/lib/python3.10/site-packages/torch/_numpy/_unary_ufuncs_impl.py ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Export torch work functions for unary ufuncs, rename/tweak to match numpy.
2
+ This listing is further exported to public symbols in the `_numpy/_ufuncs.py` module.
3
+ """
4
+
5
+ import torch
6
+
7
+ from torch import ( # noqa: F401
8
+ absolute as fabs, # noqa: F401
9
+ arccos, # noqa: F401
10
+ arccosh, # noqa: F401
11
+ arcsin, # noqa: F401
12
+ arcsinh, # noqa: F401
13
+ arctan, # noqa: F401
14
+ arctanh, # noqa: F401
15
+ bitwise_not, # noqa: F401
16
+ bitwise_not as invert, # noqa: F401
17
+ ceil, # noqa: F401
18
+ conj_physical as conjugate, # noqa: F401
19
+ cos, # noqa: F401
20
+ cosh, # noqa: F401
21
+ deg2rad, # noqa: F401
22
+ deg2rad as radians, # noqa: F401
23
+ exp, # noqa: F401
24
+ exp2, # noqa: F401
25
+ expm1, # noqa: F401
26
+ floor, # noqa: F401
27
+ isfinite, # noqa: F401
28
+ isinf, # noqa: F401
29
+ isnan, # noqa: F401
30
+ log, # noqa: F401
31
+ log10, # noqa: F401
32
+ log1p, # noqa: F401
33
+ log2, # noqa: F401
34
+ logical_not, # noqa: F401
35
+ negative, # noqa: F401
36
+ rad2deg, # noqa: F401
37
+ rad2deg as degrees, # noqa: F401
38
+ reciprocal, # noqa: F401
39
+ round as fix, # noqa: F401
40
+ round as rint, # noqa: F401
41
+ sign, # noqa: F401
42
+ signbit, # noqa: F401
43
+ sin, # noqa: F401
44
+ sinh, # noqa: F401
45
+ sqrt, # noqa: F401
46
+ square, # noqa: F401
47
+ tan, # noqa: F401
48
+ tanh, # noqa: F401
49
+ trunc, # noqa: F401
50
+ )
51
+
52
+
53
+ # special cases: torch does not export these names
54
+ def cbrt(x):
55
+ return torch.pow(x, 1 / 3)
56
+
57
+
58
+ def positive(x):
59
+ return +x
60
+
61
+
62
+ def absolute(x):
63
+ # work around torch.absolute not impl for bools
64
+ if x.dtype == torch.bool:
65
+ return x
66
+ return torch.absolute(x)
67
+
68
+
69
+ # TODO set __name__ and __qualname__
70
+ abs = absolute
71
+ conj = conjugate
llava_next/lib/python3.10/site-packages/torch/_numpy/fft.py ADDED
@@ -0,0 +1,128 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import functools
4
+
5
+ import torch
6
+
7
+ from . import _dtypes_impl, _util
8
+ from ._normalizations import ArrayLike, normalizer
9
+
10
+
11
+ def upcast(func):
12
+ """NumPy fft casts inputs to 64 bit and *returns 64-bit results*."""
13
+
14
+ @functools.wraps(func)
15
+ def wrapped(tensor, *args, **kwds):
16
+ target_dtype = (
17
+ _dtypes_impl.default_dtypes().complex_dtype
18
+ if tensor.is_complex()
19
+ else _dtypes_impl.default_dtypes().float_dtype
20
+ )
21
+ tensor = _util.cast_if_needed(tensor, target_dtype)
22
+ return func(tensor, *args, **kwds)
23
+
24
+ return wrapped
25
+
26
+
27
+ @normalizer
28
+ @upcast
29
+ def fft(a: ArrayLike, n=None, axis=-1, norm=None):
30
+ return torch.fft.fft(a, n, dim=axis, norm=norm)
31
+
32
+
33
+ @normalizer
34
+ @upcast
35
+ def ifft(a: ArrayLike, n=None, axis=-1, norm=None):
36
+ return torch.fft.ifft(a, n, dim=axis, norm=norm)
37
+
38
+
39
+ @normalizer
40
+ @upcast
41
+ def rfft(a: ArrayLike, n=None, axis=-1, norm=None):
42
+ return torch.fft.rfft(a, n, dim=axis, norm=norm)
43
+
44
+
45
+ @normalizer
46
+ @upcast
47
+ def irfft(a: ArrayLike, n=None, axis=-1, norm=None):
48
+ return torch.fft.irfft(a, n, dim=axis, norm=norm)
49
+
50
+
51
+ @normalizer
52
+ @upcast
53
+ def fftn(a: ArrayLike, s=None, axes=None, norm=None):
54
+ return torch.fft.fftn(a, s, dim=axes, norm=norm)
55
+
56
+
57
+ @normalizer
58
+ @upcast
59
+ def ifftn(a: ArrayLike, s=None, axes=None, norm=None):
60
+ return torch.fft.ifftn(a, s, dim=axes, norm=norm)
61
+
62
+
63
+ @normalizer
64
+ @upcast
65
+ def rfftn(a: ArrayLike, s=None, axes=None, norm=None):
66
+ return torch.fft.rfftn(a, s, dim=axes, norm=norm)
67
+
68
+
69
+ @normalizer
70
+ @upcast
71
+ def irfftn(a: ArrayLike, s=None, axes=None, norm=None):
72
+ return torch.fft.irfftn(a, s, dim=axes, norm=norm)
73
+
74
+
75
+ @normalizer
76
+ @upcast
77
+ def fft2(a: ArrayLike, s=None, axes=(-2, -1), norm=None):
78
+ return torch.fft.fft2(a, s, dim=axes, norm=norm)
79
+
80
+
81
+ @normalizer
82
+ @upcast
83
+ def ifft2(a: ArrayLike, s=None, axes=(-2, -1), norm=None):
84
+ return torch.fft.ifft2(a, s, dim=axes, norm=norm)
85
+
86
+
87
+ @normalizer
88
+ @upcast
89
+ def rfft2(a: ArrayLike, s=None, axes=(-2, -1), norm=None):
90
+ return torch.fft.rfft2(a, s, dim=axes, norm=norm)
91
+
92
+
93
+ @normalizer
94
+ @upcast
95
+ def irfft2(a: ArrayLike, s=None, axes=(-2, -1), norm=None):
96
+ return torch.fft.irfft2(a, s, dim=axes, norm=norm)
97
+
98
+
99
+ @normalizer
100
+ @upcast
101
+ def hfft(a: ArrayLike, n=None, axis=-1, norm=None):
102
+ return torch.fft.hfft(a, n, dim=axis, norm=norm)
103
+
104
+
105
+ @normalizer
106
+ @upcast
107
+ def ihfft(a: ArrayLike, n=None, axis=-1, norm=None):
108
+ return torch.fft.ihfft(a, n, dim=axis, norm=norm)
109
+
110
+
111
+ @normalizer
112
+ def fftfreq(n, d=1.0):
113
+ return torch.fft.fftfreq(n, d)
114
+
115
+
116
+ @normalizer
117
+ def rfftfreq(n, d=1.0):
118
+ return torch.fft.rfftfreq(n, d)
119
+
120
+
121
+ @normalizer
122
+ def fftshift(x: ArrayLike, axes=None):
123
+ return torch.fft.fftshift(x, axes)
124
+
125
+
126
+ @normalizer
127
+ def ifftshift(x: ArrayLike, axes=None):
128
+ return torch.fft.ifftshift(x, axes)
llava_next/lib/python3.10/site-packages/torch/_numpy/linalg.py ADDED
@@ -0,0 +1,237 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import functools
4
+ import math
5
+ from typing import Sequence
6
+
7
+ import torch
8
+
9
+ from . import _dtypes_impl, _util
10
+ from ._normalizations import ArrayLike, KeepDims, normalizer
11
+
12
+
13
+ class LinAlgError(Exception):
14
+ pass
15
+
16
+
17
+ def _atleast_float_1(a):
18
+ if not (a.dtype.is_floating_point or a.dtype.is_complex):
19
+ a = a.to(_dtypes_impl.default_dtypes().float_dtype)
20
+ return a
21
+
22
+
23
+ def _atleast_float_2(a, b):
24
+ dtyp = _dtypes_impl.result_type_impl(a, b)
25
+ if not (dtyp.is_floating_point or dtyp.is_complex):
26
+ dtyp = _dtypes_impl.default_dtypes().float_dtype
27
+
28
+ a = _util.cast_if_needed(a, dtyp)
29
+ b = _util.cast_if_needed(b, dtyp)
30
+ return a, b
31
+
32
+
33
+ def linalg_errors(func):
34
+ @functools.wraps(func)
35
+ def wrapped(*args, **kwds):
36
+ try:
37
+ return func(*args, **kwds)
38
+ except torch._C._LinAlgError as e:
39
+ raise LinAlgError(*e.args)
40
+
41
+ return wrapped
42
+
43
+
44
+ # ### Matrix and vector products ###
45
+
46
+
47
+ @normalizer
48
+ @linalg_errors
49
+ def matrix_power(a: ArrayLike, n):
50
+ a = _atleast_float_1(a)
51
+ return torch.linalg.matrix_power(a, n)
52
+
53
+
54
+ @normalizer
55
+ @linalg_errors
56
+ def multi_dot(inputs: Sequence[ArrayLike], *, out=None):
57
+ return torch.linalg.multi_dot(inputs)
58
+
59
+
60
+ # ### Solving equations and inverting matrices ###
61
+
62
+
63
+ @normalizer
64
+ @linalg_errors
65
+ def solve(a: ArrayLike, b: ArrayLike):
66
+ a, b = _atleast_float_2(a, b)
67
+ return torch.linalg.solve(a, b)
68
+
69
+
70
+ @normalizer
71
+ @linalg_errors
72
+ def lstsq(a: ArrayLike, b: ArrayLike, rcond=None):
73
+ a, b = _atleast_float_2(a, b)
74
+ # NumPy is using gelsd: https://github.com/numpy/numpy/blob/v1.24.0/numpy/linalg/umath_linalg.cpp#L3991
75
+ # on CUDA, only `gels` is available though, so use it instead
76
+ driver = "gels" if a.is_cuda or b.is_cuda else "gelsd"
77
+ return torch.linalg.lstsq(a, b, rcond=rcond, driver=driver)
78
+
79
+
80
+ @normalizer
81
+ @linalg_errors
82
+ def inv(a: ArrayLike):
83
+ a = _atleast_float_1(a)
84
+ result = torch.linalg.inv(a)
85
+ return result
86
+
87
+
88
+ @normalizer
89
+ @linalg_errors
90
+ def pinv(a: ArrayLike, rcond=1e-15, hermitian=False):
91
+ a = _atleast_float_1(a)
92
+ return torch.linalg.pinv(a, rtol=rcond, hermitian=hermitian)
93
+
94
+
95
+ @normalizer
96
+ @linalg_errors
97
+ def tensorsolve(a: ArrayLike, b: ArrayLike, axes=None):
98
+ a, b = _atleast_float_2(a, b)
99
+ return torch.linalg.tensorsolve(a, b, dims=axes)
100
+
101
+
102
+ @normalizer
103
+ @linalg_errors
104
+ def tensorinv(a: ArrayLike, ind=2):
105
+ a = _atleast_float_1(a)
106
+ return torch.linalg.tensorinv(a, ind=ind)
107
+
108
+
109
+ # ### Norms and other numbers ###
110
+
111
+
112
+ @normalizer
113
+ @linalg_errors
114
+ def det(a: ArrayLike):
115
+ a = _atleast_float_1(a)
116
+ return torch.linalg.det(a)
117
+
118
+
119
+ @normalizer
120
+ @linalg_errors
121
+ def slogdet(a: ArrayLike):
122
+ a = _atleast_float_1(a)
123
+ return torch.linalg.slogdet(a)
124
+
125
+
126
+ @normalizer
127
+ @linalg_errors
128
+ def cond(x: ArrayLike, p=None):
129
+ x = _atleast_float_1(x)
130
+
131
+ # check if empty
132
+ # cf: https://github.com/numpy/numpy/blob/v1.24.0/numpy/linalg/linalg.py#L1744
133
+ if x.numel() == 0 and math.prod(x.shape[-2:]) == 0:
134
+ raise LinAlgError("cond is not defined on empty arrays")
135
+
136
+ result = torch.linalg.cond(x, p=p)
137
+
138
+ # Convert nans to infs (numpy does it in a data-dependent way, depending on
139
+ # whether the input array has nans or not)
140
+ # XXX: NumPy does this: https://github.com/numpy/numpy/blob/v1.24.0/numpy/linalg/linalg.py#L1744
141
+ return torch.where(torch.isnan(result), float("inf"), result)
142
+
143
+
144
+ @normalizer
145
+ @linalg_errors
146
+ def matrix_rank(a: ArrayLike, tol=None, hermitian=False):
147
+ a = _atleast_float_1(a)
148
+
149
+ if a.ndim < 2:
150
+ return int((a != 0).any())
151
+
152
+ if tol is None:
153
+ # follow https://github.com/numpy/numpy/blob/v1.24.0/numpy/linalg/linalg.py#L1885
154
+ atol = 0
155
+ rtol = max(a.shape[-2:]) * torch.finfo(a.dtype).eps
156
+ else:
157
+ atol, rtol = tol, 0
158
+ return torch.linalg.matrix_rank(a, atol=atol, rtol=rtol, hermitian=hermitian)
159
+
160
+
161
+ @normalizer
162
+ @linalg_errors
163
+ def norm(x: ArrayLike, ord=None, axis=None, keepdims: KeepDims = False):
164
+ x = _atleast_float_1(x)
165
+ return torch.linalg.norm(x, ord=ord, dim=axis)
166
+
167
+
168
+ # ### Decompositions ###
169
+
170
+
171
+ @normalizer
172
+ @linalg_errors
173
+ def cholesky(a: ArrayLike):
174
+ a = _atleast_float_1(a)
175
+ return torch.linalg.cholesky(a)
176
+
177
+
178
+ @normalizer
179
+ @linalg_errors
180
+ def qr(a: ArrayLike, mode="reduced"):
181
+ a = _atleast_float_1(a)
182
+ result = torch.linalg.qr(a, mode=mode)
183
+ if mode == "r":
184
+ # match NumPy
185
+ result = result.R
186
+ return result
187
+
188
+
189
+ @normalizer
190
+ @linalg_errors
191
+ def svd(a: ArrayLike, full_matrices=True, compute_uv=True, hermitian=False):
192
+ a = _atleast_float_1(a)
193
+ if not compute_uv:
194
+ return torch.linalg.svdvals(a)
195
+
196
+ # NB: ignore the hermitian= argument (no pytorch equivalent)
197
+ result = torch.linalg.svd(a, full_matrices=full_matrices)
198
+ return result
199
+
200
+
201
+ # ### Eigenvalues and eigenvectors ###
202
+
203
+
204
+ @normalizer
205
+ @linalg_errors
206
+ def eig(a: ArrayLike):
207
+ a = _atleast_float_1(a)
208
+ w, vt = torch.linalg.eig(a)
209
+
210
+ if not a.is_complex() and w.is_complex() and (w.imag == 0).all():
211
+ w = w.real
212
+ vt = vt.real
213
+ return w, vt
214
+
215
+
216
+ @normalizer
217
+ @linalg_errors
218
+ def eigh(a: ArrayLike, UPLO="L"):
219
+ a = _atleast_float_1(a)
220
+ return torch.linalg.eigh(a, UPLO=UPLO)
221
+
222
+
223
+ @normalizer
224
+ @linalg_errors
225
+ def eigvals(a: ArrayLike):
226
+ a = _atleast_float_1(a)
227
+ result = torch.linalg.eigvals(a)
228
+ if not a.is_complex() and result.is_complex() and (result.imag == 0).all():
229
+ result = result.real
230
+ return result
231
+
232
+
233
+ @normalizer
234
+ @linalg_errors
235
+ def eigvalsh(a: ArrayLike, UPLO="L"):
236
+ a = _atleast_float_1(a)
237
+ return torch.linalg.eigvalsh(a, UPLO=UPLO)
llava_next/lib/python3.10/site-packages/torch/_numpy/testing/__pycache__/utils.cpython-310.pyc ADDED
Binary file (63 kB). View file
 
llava_next/lib/python3.10/site-packages/torch/_numpy/testing/utils.py ADDED
@@ -0,0 +1,2381 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Utility function to facilitate testing.
3
+
4
+ """
5
+ import contextlib
6
+ import gc
7
+ import operator
8
+ import os
9
+ import platform
10
+ import pprint
11
+ import re
12
+ import shutil
13
+ import sys
14
+ import warnings
15
+ from functools import wraps
16
+ from io import StringIO
17
+ from tempfile import mkdtemp, mkstemp
18
+ from warnings import WarningMessage
19
+
20
+ import torch._numpy as np
21
+ from torch._numpy import arange, asarray as asanyarray, empty, float32, intp, ndarray
22
+
23
+ __all__ = [
24
+ "assert_equal",
25
+ "assert_almost_equal",
26
+ "assert_approx_equal",
27
+ "assert_array_equal",
28
+ "assert_array_less",
29
+ "assert_string_equal",
30
+ "assert_",
31
+ "assert_array_almost_equal",
32
+ "build_err_msg",
33
+ "decorate_methods",
34
+ "print_assert_equal",
35
+ "verbose",
36
+ "assert_",
37
+ "assert_array_almost_equal_nulp",
38
+ "assert_raises_regex",
39
+ "assert_array_max_ulp",
40
+ "assert_warns",
41
+ "assert_no_warnings",
42
+ "assert_allclose",
43
+ "IgnoreException",
44
+ "clear_and_catch_warnings",
45
+ "temppath",
46
+ "tempdir",
47
+ "IS_PYPY",
48
+ "HAS_REFCOUNT",
49
+ "IS_WASM",
50
+ "suppress_warnings",
51
+ "assert_array_compare",
52
+ "assert_no_gc_cycles",
53
+ "break_cycles",
54
+ "IS_PYSTON",
55
+ ]
56
+
57
+
58
+ verbose = 0
59
+
60
+ IS_WASM = platform.machine() in ["wasm32", "wasm64"]
61
+ IS_PYPY = sys.implementation.name == "pypy"
62
+ IS_PYSTON = hasattr(sys, "pyston_version_info")
63
+ HAS_REFCOUNT = getattr(sys, "getrefcount", None) is not None and not IS_PYSTON
64
+
65
+
66
+ def assert_(val, msg=""):
67
+ """
68
+ Assert that works in release mode.
69
+ Accepts callable msg to allow deferring evaluation until failure.
70
+
71
+ The Python built-in ``assert`` does not work when executing code in
72
+ optimized mode (the ``-O`` flag) - no byte-code is generated for it.
73
+
74
+ For documentation on usage, refer to the Python documentation.
75
+
76
+ """
77
+ __tracebackhide__ = True # Hide traceback for py.test
78
+ if not val:
79
+ try:
80
+ smsg = msg()
81
+ except TypeError:
82
+ smsg = msg
83
+ raise AssertionError(smsg)
84
+
85
+
86
+ def gisnan(x):
87
+ return np.isnan(x)
88
+
89
+
90
+ def gisfinite(x):
91
+ return np.isfinite(x)
92
+
93
+
94
+ def gisinf(x):
95
+ return np.isinf(x)
96
+
97
+
98
+ def build_err_msg(
99
+ arrays,
100
+ err_msg,
101
+ header="Items are not equal:",
102
+ verbose=True,
103
+ names=("ACTUAL", "DESIRED"),
104
+ precision=8,
105
+ ):
106
+ msg = ["\n" + header]
107
+ if err_msg:
108
+ if err_msg.find("\n") == -1 and len(err_msg) < 79 - len(header):
109
+ msg = [msg[0] + " " + err_msg]
110
+ else:
111
+ msg.append(err_msg)
112
+ if verbose:
113
+ for i, a in enumerate(arrays):
114
+ if isinstance(a, ndarray):
115
+ # precision argument is only needed if the objects are ndarrays
116
+ # r_func = partial(array_repr, precision=precision)
117
+ r_func = ndarray.__repr__
118
+ else:
119
+ r_func = repr
120
+
121
+ try:
122
+ r = r_func(a)
123
+ except Exception as exc:
124
+ r = f"[repr failed for <{type(a).__name__}>: {exc}]"
125
+ if r.count("\n") > 3:
126
+ r = "\n".join(r.splitlines()[:3])
127
+ r += "..."
128
+ msg.append(f" {names[i]}: {r}")
129
+ return "\n".join(msg)
130
+
131
+
132
+ def assert_equal(actual, desired, err_msg="", verbose=True):
133
+ """
134
+ Raises an AssertionError if two objects are not equal.
135
+
136
+ Given two objects (scalars, lists, tuples, dictionaries or numpy arrays),
137
+ check that all elements of these objects are equal. An exception is raised
138
+ at the first conflicting values.
139
+
140
+ When one of `actual` and `desired` is a scalar and the other is array_like,
141
+ the function checks that each element of the array_like object is equal to
142
+ the scalar.
143
+
144
+ This function handles NaN comparisons as if NaN was a "normal" number.
145
+ That is, AssertionError is not raised if both objects have NaNs in the same
146
+ positions. This is in contrast to the IEEE standard on NaNs, which says
147
+ that NaN compared to anything must return False.
148
+
149
+ Parameters
150
+ ----------
151
+ actual : array_like
152
+ The object to check.
153
+ desired : array_like
154
+ The expected object.
155
+ err_msg : str, optional
156
+ The error message to be printed in case of failure.
157
+ verbose : bool, optional
158
+ If True, the conflicting values are appended to the error message.
159
+
160
+ Raises
161
+ ------
162
+ AssertionError
163
+ If actual and desired are not equal.
164
+
165
+ Examples
166
+ --------
167
+ >>> np.testing.assert_equal([4,5], [4,6])
168
+ Traceback (most recent call last):
169
+ ...
170
+ AssertionError:
171
+ Items are not equal:
172
+ item=1
173
+ ACTUAL: 5
174
+ DESIRED: 6
175
+
176
+ The following comparison does not raise an exception. There are NaNs
177
+ in the inputs, but they are in the same positions.
178
+
179
+ >>> np.testing.assert_equal(np.array([1.0, 2.0, np.nan]), [1, 2, np.nan])
180
+
181
+ """
182
+ __tracebackhide__ = True # Hide traceback for py.test
183
+
184
+ num_nones = sum([actual is None, desired is None])
185
+ if num_nones == 1:
186
+ raise AssertionError(f"Not equal: {actual} != {desired}")
187
+ elif num_nones == 2:
188
+ return True
189
+ # else, carry on
190
+
191
+ if isinstance(actual, np.DType) or isinstance(desired, np.DType):
192
+ result = actual == desired
193
+ if not result:
194
+ raise AssertionError(f"Not equal: {actual} != {desired}")
195
+ else:
196
+ return True
197
+
198
+ if isinstance(desired, dict):
199
+ if not isinstance(actual, dict):
200
+ raise AssertionError(repr(type(actual)))
201
+ assert_equal(len(actual), len(desired), err_msg, verbose)
202
+ for k in desired.keys():
203
+ if k not in actual:
204
+ raise AssertionError(repr(k))
205
+ assert_equal(actual[k], desired[k], f"key={k!r}\n{err_msg}", verbose)
206
+ return
207
+ if isinstance(desired, (list, tuple)) and isinstance(actual, (list, tuple)):
208
+ assert_equal(len(actual), len(desired), err_msg, verbose)
209
+ for k in range(len(desired)):
210
+ assert_equal(actual[k], desired[k], f"item={k!r}\n{err_msg}", verbose)
211
+ return
212
+ from torch._numpy import imag, iscomplexobj, isscalar, ndarray, real, signbit
213
+
214
+ if isinstance(actual, ndarray) or isinstance(desired, ndarray):
215
+ return assert_array_equal(actual, desired, err_msg, verbose)
216
+ msg = build_err_msg([actual, desired], err_msg, verbose=verbose)
217
+
218
+ # Handle complex numbers: separate into real/imag to handle
219
+ # nan/inf/negative zero correctly
220
+ # XXX: catch ValueError for subclasses of ndarray where iscomplex fail
221
+ try:
222
+ usecomplex = iscomplexobj(actual) or iscomplexobj(desired)
223
+ except (ValueError, TypeError):
224
+ usecomplex = False
225
+
226
+ if usecomplex:
227
+ if iscomplexobj(actual):
228
+ actualr = real(actual)
229
+ actuali = imag(actual)
230
+ else:
231
+ actualr = actual
232
+ actuali = 0
233
+ if iscomplexobj(desired):
234
+ desiredr = real(desired)
235
+ desiredi = imag(desired)
236
+ else:
237
+ desiredr = desired
238
+ desiredi = 0
239
+ try:
240
+ assert_equal(actualr, desiredr)
241
+ assert_equal(actuali, desiredi)
242
+ except AssertionError:
243
+ raise AssertionError(msg)
244
+
245
+ # isscalar test to check cases such as [np.nan] != np.nan
246
+ if isscalar(desired) != isscalar(actual):
247
+ raise AssertionError(msg)
248
+
249
+ # Inf/nan/negative zero handling
250
+ try:
251
+ isdesnan = gisnan(desired)
252
+ isactnan = gisnan(actual)
253
+ if isdesnan and isactnan:
254
+ return # both nan, so equal
255
+
256
+ # handle signed zero specially for floats
257
+ array_actual = np.asarray(actual)
258
+ array_desired = np.asarray(desired)
259
+
260
+ if desired == 0 and actual == 0:
261
+ if not signbit(desired) == signbit(actual):
262
+ raise AssertionError(msg)
263
+
264
+ except (TypeError, ValueError, NotImplementedError):
265
+ pass
266
+
267
+ try:
268
+ # Explicitly use __eq__ for comparison, gh-2552
269
+ if not (desired == actual):
270
+ raise AssertionError(msg)
271
+
272
+ except (DeprecationWarning, FutureWarning) as e:
273
+ # this handles the case when the two types are not even comparable
274
+ if "elementwise == comparison" in e.args[0]:
275
+ raise AssertionError(msg)
276
+ else:
277
+ raise
278
+
279
+
280
+ def print_assert_equal(test_string, actual, desired):
281
+ """
282
+ Test if two objects are equal, and print an error message if test fails.
283
+
284
+ The test is performed with ``actual == desired``.
285
+
286
+ Parameters
287
+ ----------
288
+ test_string : str
289
+ The message supplied to AssertionError.
290
+ actual : object
291
+ The object to test for equality against `desired`.
292
+ desired : object
293
+ The expected result.
294
+
295
+ Examples
296
+ --------
297
+ >>> np.testing.print_assert_equal('Test XYZ of func xyz', [0, 1], [0, 1]) # doctest: +SKIP
298
+ >>> np.testing.print_assert_equal('Test XYZ of func xyz', [0, 1], [0, 2]) # doctest: +SKIP
299
+ Traceback (most recent call last):
300
+ ...
301
+ AssertionError: Test XYZ of func xyz failed
302
+ ACTUAL:
303
+ [0, 1]
304
+ DESIRED:
305
+ [0, 2]
306
+
307
+ """
308
+ __tracebackhide__ = True # Hide traceback for py.test
309
+ import pprint
310
+
311
+ if not (actual == desired):
312
+ msg = StringIO()
313
+ msg.write(test_string)
314
+ msg.write(" failed\nACTUAL: \n")
315
+ pprint.pprint(actual, msg)
316
+ msg.write("DESIRED: \n")
317
+ pprint.pprint(desired, msg)
318
+ raise AssertionError(msg.getvalue())
319
+
320
+
321
+ def assert_almost_equal(actual, desired, decimal=7, err_msg="", verbose=True):
322
+ """
323
+ Raises an AssertionError if two items are not equal up to desired
324
+ precision.
325
+
326
+ .. note:: It is recommended to use one of `assert_allclose`,
327
+ `assert_array_almost_equal_nulp` or `assert_array_max_ulp`
328
+ instead of this function for more consistent floating point
329
+ comparisons.
330
+
331
+ The test verifies that the elements of `actual` and `desired` satisfy.
332
+
333
+ ``abs(desired-actual) < float64(1.5 * 10**(-decimal))``
334
+
335
+ That is a looser test than originally documented, but agrees with what the
336
+ actual implementation in `assert_array_almost_equal` did up to rounding
337
+ vagaries. An exception is raised at conflicting values. For ndarrays this
338
+ delegates to assert_array_almost_equal
339
+
340
+ Parameters
341
+ ----------
342
+ actual : array_like
343
+ The object to check.
344
+ desired : array_like
345
+ The expected object.
346
+ decimal : int, optional
347
+ Desired precision, default is 7.
348
+ err_msg : str, optional
349
+ The error message to be printed in case of failure.
350
+ verbose : bool, optional
351
+ If True, the conflicting values are appended to the error message.
352
+
353
+ Raises
354
+ ------
355
+ AssertionError
356
+ If actual and desired are not equal up to specified precision.
357
+
358
+ See Also
359
+ --------
360
+ assert_allclose: Compare two array_like objects for equality with desired
361
+ relative and/or absolute precision.
362
+ assert_array_almost_equal_nulp, assert_array_max_ulp, assert_equal
363
+
364
+ Examples
365
+ --------
366
+ >>> from torch._numpy.testing import assert_almost_equal
367
+ >>> assert_almost_equal(2.3333333333333, 2.33333334)
368
+ >>> assert_almost_equal(2.3333333333333, 2.33333334, decimal=10)
369
+ Traceback (most recent call last):
370
+ ...
371
+ AssertionError:
372
+ Arrays are not almost equal to 10 decimals
373
+ ACTUAL: 2.3333333333333
374
+ DESIRED: 2.33333334
375
+
376
+ >>> assert_almost_equal(np.array([1.0,2.3333333333333]),
377
+ ... np.array([1.0,2.33333334]), decimal=9)
378
+ Traceback (most recent call last):
379
+ ...
380
+ AssertionError:
381
+ Arrays are not almost equal to 9 decimals
382
+ <BLANKLINE>
383
+ Mismatched elements: 1 / 2 (50%)
384
+ Max absolute difference: 6.666699636781459e-09
385
+ Max relative difference: 2.8571569790287484e-09
386
+ x: torch.ndarray([1.0000, 2.3333], dtype=float64)
387
+ y: torch.ndarray([1.0000, 2.3333], dtype=float64)
388
+
389
+ """
390
+ __tracebackhide__ = True # Hide traceback for py.test
391
+ from torch._numpy import imag, iscomplexobj, ndarray, real
392
+
393
+ # Handle complex numbers: separate into real/imag to handle
394
+ # nan/inf/negative zero correctly
395
+ # XXX: catch ValueError for subclasses of ndarray where iscomplex fail
396
+ try:
397
+ usecomplex = iscomplexobj(actual) or iscomplexobj(desired)
398
+ except ValueError:
399
+ usecomplex = False
400
+
401
+ def _build_err_msg():
402
+ header = "Arrays are not almost equal to %d decimals" % decimal
403
+ return build_err_msg([actual, desired], err_msg, verbose=verbose, header=header)
404
+
405
+ if usecomplex:
406
+ if iscomplexobj(actual):
407
+ actualr = real(actual)
408
+ actuali = imag(actual)
409
+ else:
410
+ actualr = actual
411
+ actuali = 0
412
+ if iscomplexobj(desired):
413
+ desiredr = real(desired)
414
+ desiredi = imag(desired)
415
+ else:
416
+ desiredr = desired
417
+ desiredi = 0
418
+ try:
419
+ assert_almost_equal(actualr, desiredr, decimal=decimal)
420
+ assert_almost_equal(actuali, desiredi, decimal=decimal)
421
+ except AssertionError:
422
+ raise AssertionError(_build_err_msg())
423
+
424
+ if isinstance(actual, (ndarray, tuple, list)) or isinstance(
425
+ desired, (ndarray, tuple, list)
426
+ ):
427
+ return assert_array_almost_equal(actual, desired, decimal, err_msg)
428
+ try:
429
+ # If one of desired/actual is not finite, handle it specially here:
430
+ # check that both are nan if any is a nan, and test for equality
431
+ # otherwise
432
+ if not (gisfinite(desired) and gisfinite(actual)):
433
+ if gisnan(desired) or gisnan(actual):
434
+ if not (gisnan(desired) and gisnan(actual)):
435
+ raise AssertionError(_build_err_msg())
436
+ else:
437
+ if not desired == actual:
438
+ raise AssertionError(_build_err_msg())
439
+ return
440
+ except (NotImplementedError, TypeError):
441
+ pass
442
+ if abs(desired - actual) >= np.float64(1.5 * 10.0 ** (-decimal)):
443
+ raise AssertionError(_build_err_msg())
444
+
445
+
446
+ def assert_approx_equal(actual, desired, significant=7, err_msg="", verbose=True):
447
+ """
448
+ Raises an AssertionError if two items are not equal up to significant
449
+ digits.
450
+
451
+ .. note:: It is recommended to use one of `assert_allclose`,
452
+ `assert_array_almost_equal_nulp` or `assert_array_max_ulp`
453
+ instead of this function for more consistent floating point
454
+ comparisons.
455
+
456
+ Given two numbers, check that they are approximately equal.
457
+ Approximately equal is defined as the number of significant digits
458
+ that agree.
459
+
460
+ Parameters
461
+ ----------
462
+ actual : scalar
463
+ The object to check.
464
+ desired : scalar
465
+ The expected object.
466
+ significant : int, optional
467
+ Desired precision, default is 7.
468
+ err_msg : str, optional
469
+ The error message to be printed in case of failure.
470
+ verbose : bool, optional
471
+ If True, the conflicting values are appended to the error message.
472
+
473
+ Raises
474
+ ------
475
+ AssertionError
476
+ If actual and desired are not equal up to specified precision.
477
+
478
+ See Also
479
+ --------
480
+ assert_allclose: Compare two array_like objects for equality with desired
481
+ relative and/or absolute precision.
482
+ assert_array_almost_equal_nulp, assert_array_max_ulp, assert_equal
483
+
484
+ Examples
485
+ --------
486
+ >>> np.testing.assert_approx_equal(0.12345677777777e-20, 0.1234567e-20) # doctest: +SKIP
487
+ >>> np.testing.assert_approx_equal(0.12345670e-20, 0.12345671e-20, # doctest: +SKIP
488
+ ... significant=8)
489
+ >>> np.testing.assert_approx_equal(0.12345670e-20, 0.12345672e-20, # doctest: +SKIP
490
+ ... significant=8)
491
+ Traceback (most recent call last):
492
+ ...
493
+ AssertionError:
494
+ Items are not equal to 8 significant digits:
495
+ ACTUAL: 1.234567e-21
496
+ DESIRED: 1.2345672e-21
497
+
498
+ the evaluated condition that raises the exception is
499
+
500
+ >>> abs(0.12345670e-20/1e-21 - 0.12345672e-20/1e-21) >= 10**-(8-1)
501
+ True
502
+
503
+ """
504
+ __tracebackhide__ = True # Hide traceback for py.test
505
+ import numpy as np
506
+
507
+ (actual, desired) = map(float, (actual, desired))
508
+ if desired == actual:
509
+ return
510
+ # Normalized the numbers to be in range (-10.0,10.0)
511
+ # scale = float(pow(10,math.floor(math.log10(0.5*(abs(desired)+abs(actual))))))
512
+ scale = 0.5 * (np.abs(desired) + np.abs(actual))
513
+ scale = np.power(10, np.floor(np.log10(scale)))
514
+ try:
515
+ sc_desired = desired / scale
516
+ except ZeroDivisionError:
517
+ sc_desired = 0.0
518
+ try:
519
+ sc_actual = actual / scale
520
+ except ZeroDivisionError:
521
+ sc_actual = 0.0
522
+ msg = build_err_msg(
523
+ [actual, desired],
524
+ err_msg,
525
+ header="Items are not equal to %d significant digits:" % significant,
526
+ verbose=verbose,
527
+ )
528
+ try:
529
+ # If one of desired/actual is not finite, handle it specially here:
530
+ # check that both are nan if any is a nan, and test for equality
531
+ # otherwise
532
+ if not (gisfinite(desired) and gisfinite(actual)):
533
+ if gisnan(desired) or gisnan(actual):
534
+ if not (gisnan(desired) and gisnan(actual)):
535
+ raise AssertionError(msg)
536
+ else:
537
+ if not desired == actual:
538
+ raise AssertionError(msg)
539
+ return
540
+ except (TypeError, NotImplementedError):
541
+ pass
542
+ if np.abs(sc_desired - sc_actual) >= np.power(10.0, -(significant - 1)):
543
+ raise AssertionError(msg)
544
+
545
+
546
+ def assert_array_compare(
547
+ comparison,
548
+ x,
549
+ y,
550
+ err_msg="",
551
+ verbose=True,
552
+ header="",
553
+ precision=6,
554
+ equal_nan=True,
555
+ equal_inf=True,
556
+ *,
557
+ strict=False,
558
+ ):
559
+ __tracebackhide__ = True # Hide traceback for py.test
560
+ from torch._numpy import all, array, asarray, bool_, inf, isnan, max
561
+
562
+ x = asarray(x)
563
+ y = asarray(y)
564
+
565
+ def array2string(a):
566
+ return str(a)
567
+
568
+ # original array for output formatting
569
+ ox, oy = x, y
570
+
571
+ def func_assert_same_pos(x, y, func=isnan, hasval="nan"):
572
+ """Handling nan/inf.
573
+
574
+ Combine results of running func on x and y, checking that they are True
575
+ at the same locations.
576
+
577
+ """
578
+ __tracebackhide__ = True # Hide traceback for py.test
579
+ x_id = func(x)
580
+ y_id = func(y)
581
+ # We include work-arounds here to handle three types of slightly
582
+ # pathological ndarray subclasses:
583
+ # (1) all() on `masked` array scalars can return masked arrays, so we
584
+ # use != True
585
+ # (2) __eq__ on some ndarray subclasses returns Python booleans
586
+ # instead of element-wise comparisons, so we cast to bool_() and
587
+ # use isinstance(..., bool) checks
588
+ # (3) subclasses with bare-bones __array_function__ implementations may
589
+ # not implement np.all(), so favor using the .all() method
590
+ # We are not committed to supporting such subclasses, but it's nice to
591
+ # support them if possible.
592
+ if (x_id == y_id).all().item() is not True:
593
+ msg = build_err_msg(
594
+ [x, y],
595
+ err_msg + "\nx and y %s location mismatch:" % (hasval),
596
+ verbose=verbose,
597
+ header=header,
598
+ names=("x", "y"),
599
+ precision=precision,
600
+ )
601
+ raise AssertionError(msg)
602
+ # If there is a scalar, then here we know the array has the same
603
+ # flag as it everywhere, so we should return the scalar flag.
604
+ if isinstance(x_id, bool) or x_id.ndim == 0:
605
+ return bool_(x_id)
606
+ elif isinstance(y_id, bool) or y_id.ndim == 0:
607
+ return bool_(y_id)
608
+ else:
609
+ return y_id
610
+
611
+ try:
612
+ if strict:
613
+ cond = x.shape == y.shape and x.dtype == y.dtype
614
+ else:
615
+ cond = (x.shape == () or y.shape == ()) or x.shape == y.shape
616
+ if not cond:
617
+ if x.shape != y.shape:
618
+ reason = f"\n(shapes {x.shape}, {y.shape} mismatch)"
619
+ else:
620
+ reason = f"\n(dtypes {x.dtype}, {y.dtype} mismatch)"
621
+ msg = build_err_msg(
622
+ [x, y],
623
+ err_msg + reason,
624
+ verbose=verbose,
625
+ header=header,
626
+ names=("x", "y"),
627
+ precision=precision,
628
+ )
629
+ raise AssertionError(msg)
630
+
631
+ flagged = bool_(False)
632
+
633
+ if equal_nan:
634
+ flagged = func_assert_same_pos(x, y, func=isnan, hasval="nan")
635
+
636
+ if equal_inf:
637
+ flagged |= func_assert_same_pos(
638
+ x, y, func=lambda xy: xy == +inf, hasval="+inf"
639
+ )
640
+ flagged |= func_assert_same_pos(
641
+ x, y, func=lambda xy: xy == -inf, hasval="-inf"
642
+ )
643
+
644
+ if flagged.ndim > 0:
645
+ x, y = x[~flagged], y[~flagged]
646
+ # Only do the comparison if actual values are left
647
+ if x.size == 0:
648
+ return
649
+ elif flagged:
650
+ # no sense doing comparison if everything is flagged.
651
+ return
652
+
653
+ val = comparison(x, y)
654
+
655
+ if isinstance(val, bool):
656
+ cond = val
657
+ reduced = array([val])
658
+ else:
659
+ reduced = val.ravel()
660
+ cond = reduced.all()
661
+
662
+ # The below comparison is a hack to ensure that fully masked
663
+ # results, for which val.ravel().all() returns np.ma.masked,
664
+ # do not trigger a failure (np.ma.masked != True evaluates as
665
+ # np.ma.masked, which is falsy).
666
+ if not cond:
667
+ n_mismatch = reduced.size - int(reduced.sum(dtype=intp))
668
+ n_elements = flagged.size if flagged.ndim != 0 else reduced.size
669
+ percent_mismatch = 100 * n_mismatch / n_elements
670
+ remarks = [
671
+ f"Mismatched elements: {n_mismatch} / {n_elements} ({percent_mismatch:.3g}%)"
672
+ ]
673
+
674
+ # with errstate(all='ignore'):
675
+ # ignore errors for non-numeric types
676
+ with contextlib.suppress(TypeError, RuntimeError):
677
+ error = abs(x - y)
678
+ if np.issubdtype(x.dtype, np.unsignedinteger):
679
+ error2 = abs(y - x)
680
+ np.minimum(error, error2, out=error)
681
+ max_abs_error = max(error)
682
+ remarks.append(
683
+ "Max absolute difference: " + array2string(max_abs_error.item())
684
+ )
685
+
686
+ # note: this definition of relative error matches that one
687
+ # used by assert_allclose (found in np.isclose)
688
+ # Filter values where the divisor would be zero
689
+ nonzero = bool_(y != 0)
690
+ if all(~nonzero):
691
+ max_rel_error = array(inf)
692
+ else:
693
+ max_rel_error = max(error[nonzero] / abs(y[nonzero]))
694
+ remarks.append(
695
+ "Max relative difference: " + array2string(max_rel_error.item())
696
+ )
697
+
698
+ err_msg += "\n" + "\n".join(remarks)
699
+ msg = build_err_msg(
700
+ [ox, oy],
701
+ err_msg,
702
+ verbose=verbose,
703
+ header=header,
704
+ names=("x", "y"),
705
+ precision=precision,
706
+ )
707
+ raise AssertionError(msg)
708
+ except ValueError:
709
+ import traceback
710
+
711
+ efmt = traceback.format_exc()
712
+ header = f"error during assertion:\n\n{efmt}\n\n{header}"
713
+
714
+ msg = build_err_msg(
715
+ [x, y],
716
+ err_msg,
717
+ verbose=verbose,
718
+ header=header,
719
+ names=("x", "y"),
720
+ precision=precision,
721
+ )
722
+ raise ValueError(msg)
723
+
724
+
725
+ def assert_array_equal(x, y, err_msg="", verbose=True, *, strict=False):
726
+ """
727
+ Raises an AssertionError if two array_like objects are not equal.
728
+
729
+ Given two array_like objects, check that the shape is equal and all
730
+ elements of these objects are equal (but see the Notes for the special
731
+ handling of a scalar). An exception is raised at shape mismatch or
732
+ conflicting values. In contrast to the standard usage in numpy, NaNs
733
+ are compared like numbers, no assertion is raised if both objects have
734
+ NaNs in the same positions.
735
+
736
+ The usual caution for verifying equality with floating point numbers is
737
+ advised.
738
+
739
+ Parameters
740
+ ----------
741
+ x : array_like
742
+ The actual object to check.
743
+ y : array_like
744
+ The desired, expected object.
745
+ err_msg : str, optional
746
+ The error message to be printed in case of failure.
747
+ verbose : bool, optional
748
+ If True, the conflicting values are appended to the error message.
749
+ strict : bool, optional
750
+ If True, raise an AssertionError when either the shape or the data
751
+ type of the array_like objects does not match. The special
752
+ handling for scalars mentioned in the Notes section is disabled.
753
+
754
+ Raises
755
+ ------
756
+ AssertionError
757
+ If actual and desired objects are not equal.
758
+
759
+ See Also
760
+ --------
761
+ assert_allclose: Compare two array_like objects for equality with desired
762
+ relative and/or absolute precision.
763
+ assert_array_almost_equal_nulp, assert_array_max_ulp, assert_equal
764
+
765
+ Notes
766
+ -----
767
+ When one of `x` and `y` is a scalar and the other is array_like, the
768
+ function checks that each element of the array_like object is equal to
769
+ the scalar. This behaviour can be disabled with the `strict` parameter.
770
+
771
+ Examples
772
+ --------
773
+ The first assert does not raise an exception:
774
+
775
+ >>> np.testing.assert_array_equal([1.0,2.33333,np.nan],
776
+ ... [np.exp(0),2.33333, np.nan])
777
+
778
+ Use `assert_allclose` or one of the nulp (number of floating point values)
779
+ functions for these cases instead:
780
+
781
+ >>> np.testing.assert_allclose([1.0,np.pi,np.nan],
782
+ ... [1, np.sqrt(np.pi)**2, np.nan],
783
+ ... rtol=1e-10, atol=0)
784
+
785
+ As mentioned in the Notes section, `assert_array_equal` has special
786
+ handling for scalars. Here the test checks that each value in `x` is 3:
787
+
788
+ >>> x = np.full((2, 5), fill_value=3)
789
+ >>> np.testing.assert_array_equal(x, 3)
790
+
791
+ Use `strict` to raise an AssertionError when comparing a scalar with an
792
+ array:
793
+
794
+ >>> np.testing.assert_array_equal(x, 3, strict=True)
795
+ Traceback (most recent call last):
796
+ ...
797
+ AssertionError:
798
+ Arrays are not equal
799
+ <BLANKLINE>
800
+ (shapes (2, 5), () mismatch)
801
+ x: torch.ndarray([[3, 3, 3, 3, 3],
802
+ [3, 3, 3, 3, 3]])
803
+ y: torch.ndarray(3)
804
+
805
+ The `strict` parameter also ensures that the array data types match:
806
+
807
+ >>> x = np.array([2, 2, 2])
808
+ >>> y = np.array([2., 2., 2.], dtype=np.float32)
809
+ >>> np.testing.assert_array_equal(x, y, strict=True)
810
+ Traceback (most recent call last):
811
+ ...
812
+ AssertionError:
813
+ Arrays are not equal
814
+ <BLANKLINE>
815
+ (dtypes dtype("int64"), dtype("float32") mismatch)
816
+ x: torch.ndarray([2, 2, 2])
817
+ y: torch.ndarray([2., 2., 2.])
818
+ """
819
+ __tracebackhide__ = True # Hide traceback for py.test
820
+ assert_array_compare(
821
+ operator.__eq__,
822
+ x,
823
+ y,
824
+ err_msg=err_msg,
825
+ verbose=verbose,
826
+ header="Arrays are not equal",
827
+ strict=strict,
828
+ )
829
+
830
+
831
+ def assert_array_almost_equal(x, y, decimal=6, err_msg="", verbose=True):
832
+ """
833
+ Raises an AssertionError if two objects are not equal up to desired
834
+ precision.
835
+
836
+ .. note:: It is recommended to use one of `assert_allclose`,
837
+ `assert_array_almost_equal_nulp` or `assert_array_max_ulp`
838
+ instead of this function for more consistent floating point
839
+ comparisons.
840
+
841
+ The test verifies identical shapes and that the elements of ``actual`` and
842
+ ``desired`` satisfy.
843
+
844
+ ``abs(desired-actual) < 1.5 * 10**(-decimal)``
845
+
846
+ That is a looser test than originally documented, but agrees with what the
847
+ actual implementation did up to rounding vagaries. An exception is raised
848
+ at shape mismatch or conflicting values. In contrast to the standard usage
849
+ in numpy, NaNs are compared like numbers, no assertion is raised if both
850
+ objects have NaNs in the same positions.
851
+
852
+ Parameters
853
+ ----------
854
+ x : array_like
855
+ The actual object to check.
856
+ y : array_like
857
+ The desired, expected object.
858
+ decimal : int, optional
859
+ Desired precision, default is 6.
860
+ err_msg : str, optional
861
+ The error message to be printed in case of failure.
862
+ verbose : bool, optional
863
+ If True, the conflicting values are appended to the error message.
864
+
865
+ Raises
866
+ ------
867
+ AssertionError
868
+ If actual and desired are not equal up to specified precision.
869
+
870
+ See Also
871
+ --------
872
+ assert_allclose: Compare two array_like objects for equality with desired
873
+ relative and/or absolute precision.
874
+ assert_array_almost_equal_nulp, assert_array_max_ulp, assert_equal
875
+
876
+ Examples
877
+ --------
878
+ the first assert does not raise an exception
879
+
880
+ >>> np.testing.assert_array_almost_equal([1.0,2.333,np.nan],
881
+ ... [1.0,2.333,np.nan])
882
+
883
+ >>> np.testing.assert_array_almost_equal([1.0,2.33333,np.nan],
884
+ ... [1.0,2.33339,np.nan], decimal=5)
885
+ Traceback (most recent call last):
886
+ ...
887
+ AssertionError:
888
+ Arrays are not almost equal to 5 decimals
889
+ <BLANKLINE>
890
+ Mismatched elements: 1 / 3 (33.3%)
891
+ Max absolute difference: 5.999999999994898e-05
892
+ Max relative difference: 2.5713661239633743e-05
893
+ x: torch.ndarray([1.0000, 2.3333, nan], dtype=float64)
894
+ y: torch.ndarray([1.0000, 2.3334, nan], dtype=float64)
895
+
896
+ >>> np.testing.assert_array_almost_equal([1.0,2.33333,np.nan],
897
+ ... [1.0,2.33333, 5], decimal=5)
898
+ Traceback (most recent call last):
899
+ ...
900
+ AssertionError:
901
+ Arrays are not almost equal to 5 decimals
902
+ <BLANKLINE>
903
+ x and y nan location mismatch:
904
+ x: torch.ndarray([1.0000, 2.3333, nan], dtype=float64)
905
+ y: torch.ndarray([1.0000, 2.3333, 5.0000], dtype=float64)
906
+
907
+ """
908
+ __tracebackhide__ = True # Hide traceback for py.test
909
+ from torch._numpy import any as npany, float_, issubdtype, number, result_type
910
+
911
+ def compare(x, y):
912
+ try:
913
+ if npany(gisinf(x)) or npany(gisinf(y)):
914
+ xinfid = gisinf(x)
915
+ yinfid = gisinf(y)
916
+ if not (xinfid == yinfid).all():
917
+ return False
918
+ # if one item, x and y is +- inf
919
+ if x.size == y.size == 1:
920
+ return x == y
921
+ x = x[~xinfid]
922
+ y = y[~yinfid]
923
+ except (TypeError, NotImplementedError):
924
+ pass
925
+
926
+ # make sure y is an inexact type to avoid abs(MIN_INT); will cause
927
+ # casting of x later.
928
+ dtype = result_type(y, 1.0)
929
+ y = asanyarray(y, dtype)
930
+ z = abs(x - y)
931
+
932
+ if not issubdtype(z.dtype, number):
933
+ z = z.astype(float_) # handle object arrays
934
+
935
+ return z < 1.5 * 10.0 ** (-decimal)
936
+
937
+ assert_array_compare(
938
+ compare,
939
+ x,
940
+ y,
941
+ err_msg=err_msg,
942
+ verbose=verbose,
943
+ header=("Arrays are not almost equal to %d decimals" % decimal),
944
+ precision=decimal,
945
+ )
946
+
947
+
948
+ def assert_array_less(x, y, err_msg="", verbose=True):
949
+ """
950
+ Raises an AssertionError if two array_like objects are not ordered by less
951
+ than.
952
+
953
+ Given two array_like objects, check that the shape is equal and all
954
+ elements of the first object are strictly smaller than those of the
955
+ second object. An exception is raised at shape mismatch or incorrectly
956
+ ordered values. Shape mismatch does not raise if an object has zero
957
+ dimension. In contrast to the standard usage in numpy, NaNs are
958
+ compared, no assertion is raised if both objects have NaNs in the same
959
+ positions.
960
+
961
+
962
+
963
+ Parameters
964
+ ----------
965
+ x : array_like
966
+ The smaller object to check.
967
+ y : array_like
968
+ The larger object to compare.
969
+ err_msg : string
970
+ The error message to be printed in case of failure.
971
+ verbose : bool
972
+ If True, the conflicting values are appended to the error message.
973
+
974
+ Raises
975
+ ------
976
+ AssertionError
977
+ If actual and desired objects are not equal.
978
+
979
+ See Also
980
+ --------
981
+ assert_array_equal: tests objects for equality
982
+ assert_array_almost_equal: test objects for equality up to precision
983
+
984
+
985
+
986
+ Examples
987
+ --------
988
+ >>> np.testing.assert_array_less([1.0, 1.0, np.nan], [1.1, 2.0, np.nan])
989
+ >>> np.testing.assert_array_less([1.0, 1.0, np.nan], [1, 2.0, np.nan])
990
+ Traceback (most recent call last):
991
+ ...
992
+ AssertionError:
993
+ Arrays are not less-ordered
994
+ <BLANKLINE>
995
+ Mismatched elements: 1 / 3 (33.3%)
996
+ Max absolute difference: 1.0
997
+ Max relative difference: 0.5
998
+ x: torch.ndarray([1., 1., nan], dtype=float64)
999
+ y: torch.ndarray([1., 2., nan], dtype=float64)
1000
+
1001
+ >>> np.testing.assert_array_less([1.0, 4.0], 3)
1002
+ Traceback (most recent call last):
1003
+ ...
1004
+ AssertionError:
1005
+ Arrays are not less-ordered
1006
+ <BLANKLINE>
1007
+ Mismatched elements: 1 / 2 (50%)
1008
+ Max absolute difference: 2.0
1009
+ Max relative difference: 0.6666666666666666
1010
+ x: torch.ndarray([1., 4.], dtype=float64)
1011
+ y: torch.ndarray(3)
1012
+
1013
+ >>> np.testing.assert_array_less([1.0, 2.0, 3.0], [4])
1014
+ Traceback (most recent call last):
1015
+ ...
1016
+ AssertionError:
1017
+ Arrays are not less-ordered
1018
+ <BLANKLINE>
1019
+ (shapes (3,), (1,) mismatch)
1020
+ x: torch.ndarray([1., 2., 3.], dtype=float64)
1021
+ y: torch.ndarray([4])
1022
+
1023
+ """
1024
+ __tracebackhide__ = True # Hide traceback for py.test
1025
+ assert_array_compare(
1026
+ operator.__lt__,
1027
+ x,
1028
+ y,
1029
+ err_msg=err_msg,
1030
+ verbose=verbose,
1031
+ header="Arrays are not less-ordered",
1032
+ equal_inf=False,
1033
+ )
1034
+
1035
+
1036
+ def assert_string_equal(actual, desired):
1037
+ """
1038
+ Test if two strings are equal.
1039
+
1040
+ If the given strings are equal, `assert_string_equal` does nothing.
1041
+ If they are not equal, an AssertionError is raised, and the diff
1042
+ between the strings is shown.
1043
+
1044
+ Parameters
1045
+ ----------
1046
+ actual : str
1047
+ The string to test for equality against the expected string.
1048
+ desired : str
1049
+ The expected string.
1050
+
1051
+ Examples
1052
+ --------
1053
+ >>> np.testing.assert_string_equal('abc', 'abc') # doctest: +SKIP
1054
+ >>> np.testing.assert_string_equal('abc', 'abcd') # doctest: +SKIP
1055
+ Traceback (most recent call last):
1056
+ File "<stdin>", line 1, in <module>
1057
+ ...
1058
+ AssertionError: Differences in strings:
1059
+ - abc+ abcd? +
1060
+
1061
+ """
1062
+ # delay import of difflib to reduce startup time
1063
+ __tracebackhide__ = True # Hide traceback for py.test
1064
+ import difflib
1065
+
1066
+ if not isinstance(actual, str):
1067
+ raise AssertionError(repr(type(actual)))
1068
+ if not isinstance(desired, str):
1069
+ raise AssertionError(repr(type(desired)))
1070
+ if desired == actual:
1071
+ return
1072
+
1073
+ diff = list(
1074
+ difflib.Differ().compare(actual.splitlines(True), desired.splitlines(True))
1075
+ )
1076
+ diff_list = []
1077
+ while diff:
1078
+ d1 = diff.pop(0)
1079
+ if d1.startswith(" "):
1080
+ continue
1081
+ if d1.startswith("- "):
1082
+ l = [d1]
1083
+ d2 = diff.pop(0)
1084
+ if d2.startswith("? "):
1085
+ l.append(d2)
1086
+ d2 = diff.pop(0)
1087
+ if not d2.startswith("+ "):
1088
+ raise AssertionError(repr(d2))
1089
+ l.append(d2)
1090
+ if diff:
1091
+ d3 = diff.pop(0)
1092
+ if d3.startswith("? "):
1093
+ l.append(d3)
1094
+ else:
1095
+ diff.insert(0, d3)
1096
+ if d2[2:] == d1[2:]:
1097
+ continue
1098
+ diff_list.extend(l)
1099
+ continue
1100
+ raise AssertionError(repr(d1))
1101
+ if not diff_list:
1102
+ return
1103
+ msg = f"Differences in strings:\n{''.join(diff_list).rstrip()}"
1104
+ if actual != desired:
1105
+ raise AssertionError(msg)
1106
+
1107
+
1108
+ import unittest
1109
+
1110
+
1111
+ class _Dummy(unittest.TestCase):
1112
+ def nop(self):
1113
+ pass
1114
+
1115
+
1116
+ _d = _Dummy("nop")
1117
+
1118
+
1119
+ def assert_raises_regex(exception_class, expected_regexp, *args, **kwargs):
1120
+ """
1121
+ assert_raises_regex(exception_class, expected_regexp, callable, *args,
1122
+ **kwargs)
1123
+ assert_raises_regex(exception_class, expected_regexp)
1124
+
1125
+ Fail unless an exception of class exception_class and with message that
1126
+ matches expected_regexp is thrown by callable when invoked with arguments
1127
+ args and keyword arguments kwargs.
1128
+
1129
+ Alternatively, can be used as a context manager like `assert_raises`.
1130
+
1131
+ Notes
1132
+ -----
1133
+ .. versionadded:: 1.9.0
1134
+
1135
+ """
1136
+ __tracebackhide__ = True # Hide traceback for py.test
1137
+ return _d.assertRaisesRegex(exception_class, expected_regexp, *args, **kwargs)
1138
+
1139
+
1140
+ def decorate_methods(cls, decorator, testmatch=None):
1141
+ """
1142
+ Apply a decorator to all methods in a class matching a regular expression.
1143
+
1144
+ The given decorator is applied to all public methods of `cls` that are
1145
+ matched by the regular expression `testmatch`
1146
+ (``testmatch.search(methodname)``). Methods that are private, i.e. start
1147
+ with an underscore, are ignored.
1148
+
1149
+ Parameters
1150
+ ----------
1151
+ cls : class
1152
+ Class whose methods to decorate.
1153
+ decorator : function
1154
+ Decorator to apply to methods
1155
+ testmatch : compiled regexp or str, optional
1156
+ The regular expression. Default value is None, in which case the
1157
+ nose default (``re.compile(r'(?:^|[\\b_\\.%s-])[Tt]est' % os.sep)``)
1158
+ is used.
1159
+ If `testmatch` is a string, it is compiled to a regular expression
1160
+ first.
1161
+
1162
+ """
1163
+ if testmatch is None:
1164
+ testmatch = re.compile(r"(?:^|[\\b_\\.%s-])[Tt]est" % os.sep)
1165
+ else:
1166
+ testmatch = re.compile(testmatch)
1167
+ cls_attr = cls.__dict__
1168
+
1169
+ # delayed import to reduce startup time
1170
+ from inspect import isfunction
1171
+
1172
+ methods = [_m for _m in cls_attr.values() if isfunction(_m)]
1173
+ for function in methods:
1174
+ try:
1175
+ if hasattr(function, "compat_func_name"):
1176
+ funcname = function.compat_func_name
1177
+ else:
1178
+ funcname = function.__name__
1179
+ except AttributeError:
1180
+ # not a function
1181
+ continue
1182
+ if testmatch.search(funcname) and not funcname.startswith("_"):
1183
+ setattr(cls, funcname, decorator(function))
1184
+ return
1185
+
1186
+
1187
+ def _assert_valid_refcount(op):
1188
+ """
1189
+ Check that ufuncs don't mishandle refcount of object `1`.
1190
+ Used in a few regression tests.
1191
+ """
1192
+ if not HAS_REFCOUNT:
1193
+ return True
1194
+
1195
+ import gc
1196
+
1197
+ import numpy as np
1198
+
1199
+ b = np.arange(100 * 100).reshape(100, 100)
1200
+ c = b
1201
+ i = 1
1202
+
1203
+ gc.disable()
1204
+ try:
1205
+ rc = sys.getrefcount(i)
1206
+ for j in range(15):
1207
+ d = op(b, c)
1208
+ assert_(sys.getrefcount(i) >= rc)
1209
+ finally:
1210
+ gc.enable()
1211
+ del d # for pyflakes
1212
+
1213
+
1214
+ def assert_allclose(
1215
+ actual,
1216
+ desired,
1217
+ rtol=1e-7,
1218
+ atol=0,
1219
+ equal_nan=True,
1220
+ err_msg="",
1221
+ verbose=True,
1222
+ check_dtype=False,
1223
+ ):
1224
+ """
1225
+ Raises an AssertionError if two objects are not equal up to desired
1226
+ tolerance.
1227
+
1228
+ Given two array_like objects, check that their shapes and all elements
1229
+ are equal (but see the Notes for the special handling of a scalar). An
1230
+ exception is raised if the shapes mismatch or any values conflict. In
1231
+ contrast to the standard usage in numpy, NaNs are compared like numbers,
1232
+ no assertion is raised if both objects have NaNs in the same positions.
1233
+
1234
+ The test is equivalent to ``allclose(actual, desired, rtol, atol)`` (note
1235
+ that ``allclose`` has different default values). It compares the difference
1236
+ between `actual` and `desired` to ``atol + rtol * abs(desired)``.
1237
+
1238
+ .. versionadded:: 1.5.0
1239
+
1240
+ Parameters
1241
+ ----------
1242
+ actual : array_like
1243
+ Array obtained.
1244
+ desired : array_like
1245
+ Array desired.
1246
+ rtol : float, optional
1247
+ Relative tolerance.
1248
+ atol : float, optional
1249
+ Absolute tolerance.
1250
+ equal_nan : bool, optional.
1251
+ If True, NaNs will compare equal.
1252
+ err_msg : str, optional
1253
+ The error message to be printed in case of failure.
1254
+ verbose : bool, optional
1255
+ If True, the conflicting values are appended to the error message.
1256
+
1257
+ Raises
1258
+ ------
1259
+ AssertionError
1260
+ If actual and desired are not equal up to specified precision.
1261
+
1262
+ See Also
1263
+ --------
1264
+ assert_array_almost_equal_nulp, assert_array_max_ulp
1265
+
1266
+ Notes
1267
+ -----
1268
+ When one of `actual` and `desired` is a scalar and the other is
1269
+ array_like, the function checks that each element of the array_like
1270
+ object is equal to the scalar.
1271
+
1272
+ Examples
1273
+ --------
1274
+ >>> x = [1e-5, 1e-3, 1e-1]
1275
+ >>> y = np.arccos(np.cos(x))
1276
+ >>> np.testing.assert_allclose(x, y, rtol=1e-5, atol=0)
1277
+
1278
+ """
1279
+ __tracebackhide__ = True # Hide traceback for py.test
1280
+
1281
+ def compare(x, y):
1282
+ return np.isclose(x, y, rtol=rtol, atol=atol, equal_nan=equal_nan)
1283
+
1284
+ actual, desired = asanyarray(actual), asanyarray(desired)
1285
+ header = f"Not equal to tolerance rtol={rtol:g}, atol={atol:g}"
1286
+
1287
+ if check_dtype:
1288
+ assert actual.dtype == desired.dtype
1289
+
1290
+ assert_array_compare(
1291
+ compare,
1292
+ actual,
1293
+ desired,
1294
+ err_msg=str(err_msg),
1295
+ verbose=verbose,
1296
+ header=header,
1297
+ equal_nan=equal_nan,
1298
+ )
1299
+
1300
+
1301
+ def assert_array_almost_equal_nulp(x, y, nulp=1):
1302
+ """
1303
+ Compare two arrays relatively to their spacing.
1304
+
1305
+ This is a relatively robust method to compare two arrays whose amplitude
1306
+ is variable.
1307
+
1308
+ Parameters
1309
+ ----------
1310
+ x, y : array_like
1311
+ Input arrays.
1312
+ nulp : int, optional
1313
+ The maximum number of unit in the last place for tolerance (see Notes).
1314
+ Default is 1.
1315
+
1316
+ Returns
1317
+ -------
1318
+ None
1319
+
1320
+ Raises
1321
+ ------
1322
+ AssertionError
1323
+ If the spacing between `x` and `y` for one or more elements is larger
1324
+ than `nulp`.
1325
+
1326
+ See Also
1327
+ --------
1328
+ assert_array_max_ulp : Check that all items of arrays differ in at most
1329
+ N Units in the Last Place.
1330
+ spacing : Return the distance between x and the nearest adjacent number.
1331
+
1332
+ Notes
1333
+ -----
1334
+ An assertion is raised if the following condition is not met::
1335
+
1336
+ abs(x - y) <= nulp * spacing(maximum(abs(x), abs(y)))
1337
+
1338
+ Examples
1339
+ --------
1340
+ >>> x = np.array([1., 1e-10, 1e-20])
1341
+ >>> eps = np.finfo(x.dtype).eps
1342
+ >>> np.testing.assert_array_almost_equal_nulp(x, x*eps/2 + x) # doctest: +SKIP
1343
+
1344
+ >>> np.testing.assert_array_almost_equal_nulp(x, x*eps + x) # doctest: +SKIP
1345
+ Traceback (most recent call last):
1346
+ ...
1347
+ AssertionError: X and Y are not equal to 1 ULP (max is 2)
1348
+
1349
+ """
1350
+ __tracebackhide__ = True # Hide traceback for py.test
1351
+ import numpy as np
1352
+
1353
+ ax = np.abs(x)
1354
+ ay = np.abs(y)
1355
+ ref = nulp * np.spacing(np.where(ax > ay, ax, ay))
1356
+ if not np.all(np.abs(x - y) <= ref):
1357
+ if np.iscomplexobj(x) or np.iscomplexobj(y):
1358
+ msg = "X and Y are not equal to %d ULP" % nulp
1359
+ else:
1360
+ max_nulp = np.max(nulp_diff(x, y))
1361
+ msg = "X and Y are not equal to %d ULP (max is %g)" % (nulp, max_nulp)
1362
+ raise AssertionError(msg)
1363
+
1364
+
1365
+ def assert_array_max_ulp(a, b, maxulp=1, dtype=None):
1366
+ """
1367
+ Check that all items of arrays differ in at most N Units in the Last Place.
1368
+
1369
+ Parameters
1370
+ ----------
1371
+ a, b : array_like
1372
+ Input arrays to be compared.
1373
+ maxulp : int, optional
1374
+ The maximum number of units in the last place that elements of `a` and
1375
+ `b` can differ. Default is 1.
1376
+ dtype : dtype, optional
1377
+ Data-type to convert `a` and `b` to if given. Default is None.
1378
+
1379
+ Returns
1380
+ -------
1381
+ ret : ndarray
1382
+ Array containing number of representable floating point numbers between
1383
+ items in `a` and `b`.
1384
+
1385
+ Raises
1386
+ ------
1387
+ AssertionError
1388
+ If one or more elements differ by more than `maxulp`.
1389
+
1390
+ Notes
1391
+ -----
1392
+ For computing the ULP difference, this API does not differentiate between
1393
+ various representations of NAN (ULP difference between 0x7fc00000 and 0xffc00000
1394
+ is zero).
1395
+
1396
+ See Also
1397
+ --------
1398
+ assert_array_almost_equal_nulp : Compare two arrays relatively to their
1399
+ spacing.
1400
+
1401
+ Examples
1402
+ --------
1403
+ >>> a = np.linspace(0., 1., 100)
1404
+ >>> res = np.testing.assert_array_max_ulp(a, np.arcsin(np.sin(a))) # doctest: +SKIP
1405
+
1406
+ """
1407
+ __tracebackhide__ = True # Hide traceback for py.test
1408
+ import numpy as np
1409
+
1410
+ ret = nulp_diff(a, b, dtype)
1411
+ if not np.all(ret <= maxulp):
1412
+ raise AssertionError(
1413
+ f"Arrays are not almost equal up to {maxulp:g} "
1414
+ f"ULP (max difference is {np.max(ret):g} ULP)"
1415
+ )
1416
+ return ret
1417
+
1418
+
1419
+ def nulp_diff(x, y, dtype=None):
1420
+ """For each item in x and y, return the number of representable floating
1421
+ points between them.
1422
+
1423
+ Parameters
1424
+ ----------
1425
+ x : array_like
1426
+ first input array
1427
+ y : array_like
1428
+ second input array
1429
+ dtype : dtype, optional
1430
+ Data-type to convert `x` and `y` to if given. Default is None.
1431
+
1432
+ Returns
1433
+ -------
1434
+ nulp : array_like
1435
+ number of representable floating point numbers between each item in x
1436
+ and y.
1437
+
1438
+ Notes
1439
+ -----
1440
+ For computing the ULP difference, this API does not differentiate between
1441
+ various representations of NAN (ULP difference between 0x7fc00000 and 0xffc00000
1442
+ is zero).
1443
+
1444
+ Examples
1445
+ --------
1446
+ # By definition, epsilon is the smallest number such as 1 + eps != 1, so
1447
+ # there should be exactly one ULP between 1 and 1 + eps
1448
+ >>> nulp_diff(1, 1 + np.finfo(x.dtype).eps) # doctest: +SKIP
1449
+ 1.0
1450
+ """
1451
+ import numpy as np
1452
+
1453
+ if dtype:
1454
+ x = np.asarray(x, dtype=dtype)
1455
+ y = np.asarray(y, dtype=dtype)
1456
+ else:
1457
+ x = np.asarray(x)
1458
+ y = np.asarray(y)
1459
+
1460
+ t = np.common_type(x, y)
1461
+ if np.iscomplexobj(x) or np.iscomplexobj(y):
1462
+ raise NotImplementedError("_nulp not implemented for complex array")
1463
+
1464
+ x = np.array([x], dtype=t)
1465
+ y = np.array([y], dtype=t)
1466
+
1467
+ x[np.isnan(x)] = np.nan
1468
+ y[np.isnan(y)] = np.nan
1469
+
1470
+ if not x.shape == y.shape:
1471
+ raise ValueError(f"x and y do not have the same shape: {x.shape} - {y.shape}")
1472
+
1473
+ def _diff(rx, ry, vdt):
1474
+ diff = np.asarray(rx - ry, dtype=vdt)
1475
+ return np.abs(diff)
1476
+
1477
+ rx = integer_repr(x)
1478
+ ry = integer_repr(y)
1479
+ return _diff(rx, ry, t)
1480
+
1481
+
1482
+ def _integer_repr(x, vdt, comp):
1483
+ # Reinterpret binary representation of the float as sign-magnitude:
1484
+ # take into account two-complement representation
1485
+ # See also
1486
+ # https://randomascii.wordpress.com/2012/02/25/comparing-floating-point-numbers-2012-edition/
1487
+ rx = x.view(vdt)
1488
+ if not (rx.size == 1):
1489
+ rx[rx < 0] = comp - rx[rx < 0]
1490
+ else:
1491
+ if rx < 0:
1492
+ rx = comp - rx
1493
+
1494
+ return rx
1495
+
1496
+
1497
+ def integer_repr(x):
1498
+ """Return the signed-magnitude interpretation of the binary representation
1499
+ of x."""
1500
+ import numpy as np
1501
+
1502
+ if x.dtype == np.float16:
1503
+ return _integer_repr(x, np.int16, np.int16(-(2**15)))
1504
+ elif x.dtype == np.float32:
1505
+ return _integer_repr(x, np.int32, np.int32(-(2**31)))
1506
+ elif x.dtype == np.float64:
1507
+ return _integer_repr(x, np.int64, np.int64(-(2**63)))
1508
+ else:
1509
+ raise ValueError(f"Unsupported dtype {x.dtype}")
1510
+
1511
+
1512
+ @contextlib.contextmanager
1513
+ def _assert_warns_context(warning_class, name=None):
1514
+ __tracebackhide__ = True # Hide traceback for py.test
1515
+ with suppress_warnings() as sup:
1516
+ l = sup.record(warning_class)
1517
+ yield
1518
+ if not len(l) > 0:
1519
+ name_str = f" when calling {name}" if name is not None else ""
1520
+ raise AssertionError("No warning raised" + name_str)
1521
+
1522
+
1523
+ def assert_warns(warning_class, *args, **kwargs):
1524
+ """
1525
+ Fail unless the given callable throws the specified warning.
1526
+
1527
+ A warning of class warning_class should be thrown by the callable when
1528
+ invoked with arguments args and keyword arguments kwargs.
1529
+ If a different type of warning is thrown, it will not be caught.
1530
+
1531
+ If called with all arguments other than the warning class omitted, may be
1532
+ used as a context manager:
1533
+
1534
+ with assert_warns(SomeWarning):
1535
+ do_something()
1536
+
1537
+ The ability to be used as a context manager is new in NumPy v1.11.0.
1538
+
1539
+ .. versionadded:: 1.4.0
1540
+
1541
+ Parameters
1542
+ ----------
1543
+ warning_class : class
1544
+ The class defining the warning that `func` is expected to throw.
1545
+ func : callable, optional
1546
+ Callable to test
1547
+ *args : Arguments
1548
+ Arguments for `func`.
1549
+ **kwargs : Kwargs
1550
+ Keyword arguments for `func`.
1551
+
1552
+ Returns
1553
+ -------
1554
+ The value returned by `func`.
1555
+
1556
+ Examples
1557
+ --------
1558
+ >>> import warnings
1559
+ >>> def deprecated_func(num):
1560
+ ... warnings.warn("Please upgrade", DeprecationWarning)
1561
+ ... return num*num
1562
+ >>> with np.testing.assert_warns(DeprecationWarning):
1563
+ ... assert deprecated_func(4) == 16
1564
+ >>> # or passing a func
1565
+ >>> ret = np.testing.assert_warns(DeprecationWarning, deprecated_func, 4)
1566
+ >>> assert ret == 16
1567
+ """
1568
+ if not args:
1569
+ return _assert_warns_context(warning_class)
1570
+
1571
+ func = args[0]
1572
+ args = args[1:]
1573
+ with _assert_warns_context(warning_class, name=func.__name__):
1574
+ return func(*args, **kwargs)
1575
+
1576
+
1577
+ @contextlib.contextmanager
1578
+ def _assert_no_warnings_context(name=None):
1579
+ __tracebackhide__ = True # Hide traceback for py.test
1580
+ with warnings.catch_warnings(record=True) as l:
1581
+ warnings.simplefilter("always")
1582
+ yield
1583
+ if len(l) > 0:
1584
+ name_str = f" when calling {name}" if name is not None else ""
1585
+ raise AssertionError(f"Got warnings{name_str}: {l}")
1586
+
1587
+
1588
+ def assert_no_warnings(*args, **kwargs):
1589
+ """
1590
+ Fail if the given callable produces any warnings.
1591
+
1592
+ If called with all arguments omitted, may be used as a context manager:
1593
+
1594
+ with assert_no_warnings():
1595
+ do_something()
1596
+
1597
+ The ability to be used as a context manager is new in NumPy v1.11.0.
1598
+
1599
+ .. versionadded:: 1.7.0
1600
+
1601
+ Parameters
1602
+ ----------
1603
+ func : callable
1604
+ The callable to test.
1605
+ \\*args : Arguments
1606
+ Arguments passed to `func`.
1607
+ \\*\\*kwargs : Kwargs
1608
+ Keyword arguments passed to `func`.
1609
+
1610
+ Returns
1611
+ -------
1612
+ The value returned by `func`.
1613
+
1614
+ """
1615
+ if not args:
1616
+ return _assert_no_warnings_context()
1617
+
1618
+ func = args[0]
1619
+ args = args[1:]
1620
+ with _assert_no_warnings_context(name=func.__name__):
1621
+ return func(*args, **kwargs)
1622
+
1623
+
1624
+ def _gen_alignment_data(dtype=float32, type="binary", max_size=24):
1625
+ """
1626
+ generator producing data with different alignment and offsets
1627
+ to test simd vectorization
1628
+
1629
+ Parameters
1630
+ ----------
1631
+ dtype : dtype
1632
+ data type to produce
1633
+ type : string
1634
+ 'unary': create data for unary operations, creates one input
1635
+ and output array
1636
+ 'binary': create data for unary operations, creates two input
1637
+ and output array
1638
+ max_size : integer
1639
+ maximum size of data to produce
1640
+
1641
+ Returns
1642
+ -------
1643
+ if type is 'unary' yields one output, one input array and a message
1644
+ containing information on the data
1645
+ if type is 'binary' yields one output array, two input array and a message
1646
+ containing information on the data
1647
+
1648
+ """
1649
+ ufmt = "unary offset=(%d, %d), size=%d, dtype=%r, %s"
1650
+ bfmt = "binary offset=(%d, %d, %d), size=%d, dtype=%r, %s"
1651
+ for o in range(3):
1652
+ for s in range(o + 2, max(o + 3, max_size)):
1653
+ if type == "unary":
1654
+
1655
+ def inp():
1656
+ return arange(s, dtype=dtype)[o:]
1657
+
1658
+ out = empty((s,), dtype=dtype)[o:]
1659
+ yield out, inp(), ufmt % (o, o, s, dtype, "out of place")
1660
+ d = inp()
1661
+ yield d, d, ufmt % (o, o, s, dtype, "in place")
1662
+ yield out[1:], inp()[:-1], ufmt % (
1663
+ o + 1,
1664
+ o,
1665
+ s - 1,
1666
+ dtype,
1667
+ "out of place",
1668
+ )
1669
+ yield out[:-1], inp()[1:], ufmt % (
1670
+ o,
1671
+ o + 1,
1672
+ s - 1,
1673
+ dtype,
1674
+ "out of place",
1675
+ )
1676
+ yield inp()[:-1], inp()[1:], ufmt % (o, o + 1, s - 1, dtype, "aliased")
1677
+ yield inp()[1:], inp()[:-1], ufmt % (o + 1, o, s - 1, dtype, "aliased")
1678
+ if type == "binary":
1679
+
1680
+ def inp1():
1681
+ return arange(s, dtype=dtype)[o:]
1682
+
1683
+ inp2 = inp1
1684
+ out = empty((s,), dtype=dtype)[o:]
1685
+ yield out, inp1(), inp2(), bfmt % (o, o, o, s, dtype, "out of place")
1686
+ d = inp1()
1687
+ yield d, d, inp2(), bfmt % (o, o, o, s, dtype, "in place1")
1688
+ d = inp2()
1689
+ yield d, inp1(), d, bfmt % (o, o, o, s, dtype, "in place2")
1690
+ yield out[1:], inp1()[:-1], inp2()[:-1], bfmt % (
1691
+ o + 1,
1692
+ o,
1693
+ o,
1694
+ s - 1,
1695
+ dtype,
1696
+ "out of place",
1697
+ )
1698
+ yield out[:-1], inp1()[1:], inp2()[:-1], bfmt % (
1699
+ o,
1700
+ o + 1,
1701
+ o,
1702
+ s - 1,
1703
+ dtype,
1704
+ "out of place",
1705
+ )
1706
+ yield out[:-1], inp1()[:-1], inp2()[1:], bfmt % (
1707
+ o,
1708
+ o,
1709
+ o + 1,
1710
+ s - 1,
1711
+ dtype,
1712
+ "out of place",
1713
+ )
1714
+ yield inp1()[1:], inp1()[:-1], inp2()[:-1], bfmt % (
1715
+ o + 1,
1716
+ o,
1717
+ o,
1718
+ s - 1,
1719
+ dtype,
1720
+ "aliased",
1721
+ )
1722
+ yield inp1()[:-1], inp1()[1:], inp2()[:-1], bfmt % (
1723
+ o,
1724
+ o + 1,
1725
+ o,
1726
+ s - 1,
1727
+ dtype,
1728
+ "aliased",
1729
+ )
1730
+ yield inp1()[:-1], inp1()[:-1], inp2()[1:], bfmt % (
1731
+ o,
1732
+ o,
1733
+ o + 1,
1734
+ s - 1,
1735
+ dtype,
1736
+ "aliased",
1737
+ )
1738
+
1739
+
1740
+ class IgnoreException(Exception):
1741
+ "Ignoring this exception due to disabled feature"
1742
+
1743
+
1744
+ @contextlib.contextmanager
1745
+ def tempdir(*args, **kwargs):
1746
+ """Context manager to provide a temporary test folder.
1747
+
1748
+ All arguments are passed as this to the underlying tempfile.mkdtemp
1749
+ function.
1750
+
1751
+ """
1752
+ tmpdir = mkdtemp(*args, **kwargs)
1753
+ try:
1754
+ yield tmpdir
1755
+ finally:
1756
+ shutil.rmtree(tmpdir)
1757
+
1758
+
1759
+ @contextlib.contextmanager
1760
+ def temppath(*args, **kwargs):
1761
+ """Context manager for temporary files.
1762
+
1763
+ Context manager that returns the path to a closed temporary file. Its
1764
+ parameters are the same as for tempfile.mkstemp and are passed directly
1765
+ to that function. The underlying file is removed when the context is
1766
+ exited, so it should be closed at that time.
1767
+
1768
+ Windows does not allow a temporary file to be opened if it is already
1769
+ open, so the underlying file must be closed after opening before it
1770
+ can be opened again.
1771
+
1772
+ """
1773
+ fd, path = mkstemp(*args, **kwargs)
1774
+ os.close(fd)
1775
+ try:
1776
+ yield path
1777
+ finally:
1778
+ os.remove(path)
1779
+
1780
+
1781
+ class clear_and_catch_warnings(warnings.catch_warnings):
1782
+ """Context manager that resets warning registry for catching warnings
1783
+
1784
+ Warnings can be slippery, because, whenever a warning is triggered, Python
1785
+ adds a ``__warningregistry__`` member to the *calling* module. This makes
1786
+ it impossible to retrigger the warning in this module, whatever you put in
1787
+ the warnings filters. This context manager accepts a sequence of `modules`
1788
+ as a keyword argument to its constructor and:
1789
+
1790
+ * stores and removes any ``__warningregistry__`` entries in given `modules`
1791
+ on entry;
1792
+ * resets ``__warningregistry__`` to its previous state on exit.
1793
+
1794
+ This makes it possible to trigger any warning afresh inside the context
1795
+ manager without disturbing the state of warnings outside.
1796
+
1797
+ For compatibility with Python 3.0, please consider all arguments to be
1798
+ keyword-only.
1799
+
1800
+ Parameters
1801
+ ----------
1802
+ record : bool, optional
1803
+ Specifies whether warnings should be captured by a custom
1804
+ implementation of ``warnings.showwarning()`` and be appended to a list
1805
+ returned by the context manager. Otherwise None is returned by the
1806
+ context manager. The objects appended to the list are arguments whose
1807
+ attributes mirror the arguments to ``showwarning()``.
1808
+ modules : sequence, optional
1809
+ Sequence of modules for which to reset warnings registry on entry and
1810
+ restore on exit. To work correctly, all 'ignore' filters should
1811
+ filter by one of these modules.
1812
+
1813
+ Examples
1814
+ --------
1815
+ >>> import warnings
1816
+ >>> with np.testing.clear_and_catch_warnings( # doctest: +SKIP
1817
+ ... modules=[np.core.fromnumeric]):
1818
+ ... warnings.simplefilter('always')
1819
+ ... warnings.filterwarnings('ignore', module='np.core.fromnumeric')
1820
+ ... # do something that raises a warning but ignore those in
1821
+ ... # np.core.fromnumeric
1822
+ """
1823
+
1824
+ class_modules = ()
1825
+
1826
+ def __init__(self, record=False, modules=()):
1827
+ self.modules = set(modules).union(self.class_modules)
1828
+ self._warnreg_copies = {}
1829
+ super().__init__(record=record)
1830
+
1831
+ def __enter__(self):
1832
+ for mod in self.modules:
1833
+ if hasattr(mod, "__warningregistry__"):
1834
+ mod_reg = mod.__warningregistry__
1835
+ self._warnreg_copies[mod] = mod_reg.copy()
1836
+ mod_reg.clear()
1837
+ return super().__enter__()
1838
+
1839
+ def __exit__(self, *exc_info):
1840
+ super().__exit__(*exc_info)
1841
+ for mod in self.modules:
1842
+ if hasattr(mod, "__warningregistry__"):
1843
+ mod.__warningregistry__.clear()
1844
+ if mod in self._warnreg_copies:
1845
+ mod.__warningregistry__.update(self._warnreg_copies[mod])
1846
+
1847
+
1848
+ class suppress_warnings:
1849
+ """
1850
+ Context manager and decorator doing much the same as
1851
+ ``warnings.catch_warnings``.
1852
+
1853
+ However, it also provides a filter mechanism to work around
1854
+ https://bugs.python.org/issue4180.
1855
+
1856
+ This bug causes Python before 3.4 to not reliably show warnings again
1857
+ after they have been ignored once (even within catch_warnings). It
1858
+ means that no "ignore" filter can be used easily, since following
1859
+ tests might need to see the warning. Additionally it allows easier
1860
+ specificity for testing warnings and can be nested.
1861
+
1862
+ Parameters
1863
+ ----------
1864
+ forwarding_rule : str, optional
1865
+ One of "always", "once", "module", or "location". Analogous to
1866
+ the usual warnings module filter mode, it is useful to reduce
1867
+ noise mostly on the outmost level. Unsuppressed and unrecorded
1868
+ warnings will be forwarded based on this rule. Defaults to "always".
1869
+ "location" is equivalent to the warnings "default", match by exact
1870
+ location the warning warning originated from.
1871
+
1872
+ Notes
1873
+ -----
1874
+ Filters added inside the context manager will be discarded again
1875
+ when leaving it. Upon entering all filters defined outside a
1876
+ context will be applied automatically.
1877
+
1878
+ When a recording filter is added, matching warnings are stored in the
1879
+ ``log`` attribute as well as in the list returned by ``record``.
1880
+
1881
+ If filters are added and the ``module`` keyword is given, the
1882
+ warning registry of this module will additionally be cleared when
1883
+ applying it, entering the context, or exiting it. This could cause
1884
+ warnings to appear a second time after leaving the context if they
1885
+ were configured to be printed once (default) and were already
1886
+ printed before the context was entered.
1887
+
1888
+ Nesting this context manager will work as expected when the
1889
+ forwarding rule is "always" (default). Unfiltered and unrecorded
1890
+ warnings will be passed out and be matched by the outer level.
1891
+ On the outmost level they will be printed (or caught by another
1892
+ warnings context). The forwarding rule argument can modify this
1893
+ behaviour.
1894
+
1895
+ Like ``catch_warnings`` this context manager is not threadsafe.
1896
+
1897
+ Examples
1898
+ --------
1899
+
1900
+ With a context manager::
1901
+
1902
+ with np.testing.suppress_warnings() as sup:
1903
+ sup.filter(DeprecationWarning, "Some text")
1904
+ sup.filter(module=np.ma.core)
1905
+ log = sup.record(FutureWarning, "Does this occur?")
1906
+ command_giving_warnings()
1907
+ # The FutureWarning was given once, the filtered warnings were
1908
+ # ignored. All other warnings abide outside settings (may be
1909
+ # printed/error)
1910
+ assert_(len(log) == 1)
1911
+ assert_(len(sup.log) == 1) # also stored in log attribute
1912
+
1913
+ Or as a decorator::
1914
+
1915
+ sup = np.testing.suppress_warnings()
1916
+ sup.filter(module=np.ma.core) # module must match exactly
1917
+ @sup
1918
+ def some_function():
1919
+ # do something which causes a warning in np.ma.core
1920
+ pass
1921
+ """
1922
+
1923
+ def __init__(self, forwarding_rule="always"):
1924
+ self._entered = False
1925
+
1926
+ # Suppressions are either instance or defined inside one with block:
1927
+ self._suppressions = []
1928
+
1929
+ if forwarding_rule not in {"always", "module", "once", "location"}:
1930
+ raise ValueError("unsupported forwarding rule.")
1931
+ self._forwarding_rule = forwarding_rule
1932
+
1933
+ def _clear_registries(self):
1934
+ if hasattr(warnings, "_filters_mutated"):
1935
+ # clearing the registry should not be necessary on new pythons,
1936
+ # instead the filters should be mutated.
1937
+ warnings._filters_mutated()
1938
+ return
1939
+ # Simply clear the registry, this should normally be harmless,
1940
+ # note that on new pythons it would be invalidated anyway.
1941
+ for module in self._tmp_modules:
1942
+ if hasattr(module, "__warningregistry__"):
1943
+ module.__warningregistry__.clear()
1944
+
1945
+ def _filter(self, category=Warning, message="", module=None, record=False):
1946
+ if record:
1947
+ record = [] # The log where to store warnings
1948
+ else:
1949
+ record = None
1950
+ if self._entered:
1951
+ if module is None:
1952
+ warnings.filterwarnings("always", category=category, message=message)
1953
+ else:
1954
+ module_regex = module.__name__.replace(".", r"\.") + "$"
1955
+ warnings.filterwarnings(
1956
+ "always", category=category, message=message, module=module_regex
1957
+ )
1958
+ self._tmp_modules.add(module)
1959
+ self._clear_registries()
1960
+
1961
+ self._tmp_suppressions.append(
1962
+ (category, message, re.compile(message, re.I), module, record)
1963
+ )
1964
+ else:
1965
+ self._suppressions.append(
1966
+ (category, message, re.compile(message, re.I), module, record)
1967
+ )
1968
+
1969
+ return record
1970
+
1971
+ def filter(self, category=Warning, message="", module=None):
1972
+ """
1973
+ Add a new suppressing filter or apply it if the state is entered.
1974
+
1975
+ Parameters
1976
+ ----------
1977
+ category : class, optional
1978
+ Warning class to filter
1979
+ message : string, optional
1980
+ Regular expression matching the warning message.
1981
+ module : module, optional
1982
+ Module to filter for. Note that the module (and its file)
1983
+ must match exactly and cannot be a submodule. This may make
1984
+ it unreliable for external modules.
1985
+
1986
+ Notes
1987
+ -----
1988
+ When added within a context, filters are only added inside
1989
+ the context and will be forgotten when the context is exited.
1990
+ """
1991
+ self._filter(category=category, message=message, module=module, record=False)
1992
+
1993
+ def record(self, category=Warning, message="", module=None):
1994
+ """
1995
+ Append a new recording filter or apply it if the state is entered.
1996
+
1997
+ All warnings matching will be appended to the ``log`` attribute.
1998
+
1999
+ Parameters
2000
+ ----------
2001
+ category : class, optional
2002
+ Warning class to filter
2003
+ message : string, optional
2004
+ Regular expression matching the warning message.
2005
+ module : module, optional
2006
+ Module to filter for. Note that the module (and its file)
2007
+ must match exactly and cannot be a submodule. This may make
2008
+ it unreliable for external modules.
2009
+
2010
+ Returns
2011
+ -------
2012
+ log : list
2013
+ A list which will be filled with all matched warnings.
2014
+
2015
+ Notes
2016
+ -----
2017
+ When added within a context, filters are only added inside
2018
+ the context and will be forgotten when the context is exited.
2019
+ """
2020
+ return self._filter(
2021
+ category=category, message=message, module=module, record=True
2022
+ )
2023
+
2024
+ def __enter__(self):
2025
+ if self._entered:
2026
+ raise RuntimeError("cannot enter suppress_warnings twice.")
2027
+
2028
+ self._orig_show = warnings.showwarning
2029
+ self._filters = warnings.filters
2030
+ warnings.filters = self._filters[:]
2031
+
2032
+ self._entered = True
2033
+ self._tmp_suppressions = []
2034
+ self._tmp_modules = set()
2035
+ self._forwarded = set()
2036
+
2037
+ self.log = [] # reset global log (no need to keep same list)
2038
+
2039
+ for cat, mess, _, mod, log in self._suppressions:
2040
+ if log is not None:
2041
+ del log[:] # clear the log
2042
+ if mod is None:
2043
+ warnings.filterwarnings("always", category=cat, message=mess)
2044
+ else:
2045
+ module_regex = mod.__name__.replace(".", r"\.") + "$"
2046
+ warnings.filterwarnings(
2047
+ "always", category=cat, message=mess, module=module_regex
2048
+ )
2049
+ self._tmp_modules.add(mod)
2050
+ warnings.showwarning = self._showwarning
2051
+ self._clear_registries()
2052
+
2053
+ return self
2054
+
2055
+ def __exit__(self, *exc_info):
2056
+ warnings.showwarning = self._orig_show
2057
+ warnings.filters = self._filters
2058
+ self._clear_registries()
2059
+ self._entered = False
2060
+ del self._orig_show
2061
+ del self._filters
2062
+
2063
+ def _showwarning(
2064
+ self, message, category, filename, lineno, *args, use_warnmsg=None, **kwargs
2065
+ ):
2066
+ for cat, _, pattern, mod, rec in (self._suppressions + self._tmp_suppressions)[
2067
+ ::-1
2068
+ ]:
2069
+ if issubclass(category, cat) and pattern.match(message.args[0]) is not None:
2070
+ if mod is None:
2071
+ # Message and category match, either recorded or ignored
2072
+ if rec is not None:
2073
+ msg = WarningMessage(
2074
+ message, category, filename, lineno, **kwargs
2075
+ )
2076
+ self.log.append(msg)
2077
+ rec.append(msg)
2078
+ return
2079
+ # Use startswith, because warnings strips the c or o from
2080
+ # .pyc/.pyo files.
2081
+ elif mod.__file__.startswith(filename):
2082
+ # The message and module (filename) match
2083
+ if rec is not None:
2084
+ msg = WarningMessage(
2085
+ message, category, filename, lineno, **kwargs
2086
+ )
2087
+ self.log.append(msg)
2088
+ rec.append(msg)
2089
+ return
2090
+
2091
+ # There is no filter in place, so pass to the outside handler
2092
+ # unless we should only pass it once
2093
+ if self._forwarding_rule == "always":
2094
+ if use_warnmsg is None:
2095
+ self._orig_show(message, category, filename, lineno, *args, **kwargs)
2096
+ else:
2097
+ self._orig_showmsg(use_warnmsg)
2098
+ return
2099
+
2100
+ if self._forwarding_rule == "once":
2101
+ signature = (message.args, category)
2102
+ elif self._forwarding_rule == "module":
2103
+ signature = (message.args, category, filename)
2104
+ elif self._forwarding_rule == "location":
2105
+ signature = (message.args, category, filename, lineno)
2106
+
2107
+ if signature in self._forwarded:
2108
+ return
2109
+ self._forwarded.add(signature)
2110
+ if use_warnmsg is None:
2111
+ self._orig_show(message, category, filename, lineno, *args, **kwargs)
2112
+ else:
2113
+ self._orig_showmsg(use_warnmsg)
2114
+
2115
+ def __call__(self, func):
2116
+ """
2117
+ Function decorator to apply certain suppressions to a whole
2118
+ function.
2119
+ """
2120
+
2121
+ @wraps(func)
2122
+ def new_func(*args, **kwargs):
2123
+ with self:
2124
+ return func(*args, **kwargs)
2125
+
2126
+ return new_func
2127
+
2128
+
2129
+ @contextlib.contextmanager
2130
+ def _assert_no_gc_cycles_context(name=None):
2131
+ __tracebackhide__ = True # Hide traceback for py.test
2132
+
2133
+ # not meaningful to test if there is no refcounting
2134
+ if not HAS_REFCOUNT:
2135
+ yield
2136
+ return
2137
+
2138
+ assert_(gc.isenabled())
2139
+ gc.disable()
2140
+ gc_debug = gc.get_debug()
2141
+ try:
2142
+ for i in range(100):
2143
+ if gc.collect() == 0:
2144
+ break
2145
+ else:
2146
+ raise RuntimeError(
2147
+ "Unable to fully collect garbage - perhaps a __del__ method "
2148
+ "is creating more reference cycles?"
2149
+ )
2150
+
2151
+ gc.set_debug(gc.DEBUG_SAVEALL)
2152
+ yield
2153
+ # gc.collect returns the number of unreachable objects in cycles that
2154
+ # were found -- we are checking that no cycles were created in the context
2155
+ n_objects_in_cycles = gc.collect()
2156
+ objects_in_cycles = gc.garbage[:]
2157
+ finally:
2158
+ del gc.garbage[:]
2159
+ gc.set_debug(gc_debug)
2160
+ gc.enable()
2161
+
2162
+ if n_objects_in_cycles:
2163
+ name_str = f" when calling {name}" if name is not None else ""
2164
+ raise AssertionError(
2165
+ "Reference cycles were found{}: {} objects were collected, "
2166
+ "of which {} are shown below:{}".format(
2167
+ name_str,
2168
+ n_objects_in_cycles,
2169
+ len(objects_in_cycles),
2170
+ "".join(
2171
+ "\n {} object with id={}:\n {}".format(
2172
+ type(o).__name__,
2173
+ id(o),
2174
+ pprint.pformat(o).replace("\n", "\n "),
2175
+ )
2176
+ for o in objects_in_cycles
2177
+ ),
2178
+ )
2179
+ )
2180
+
2181
+
2182
+ def assert_no_gc_cycles(*args, **kwargs):
2183
+ """
2184
+ Fail if the given callable produces any reference cycles.
2185
+
2186
+ If called with all arguments omitted, may be used as a context manager:
2187
+
2188
+ with assert_no_gc_cycles():
2189
+ do_something()
2190
+
2191
+ .. versionadded:: 1.15.0
2192
+
2193
+ Parameters
2194
+ ----------
2195
+ func : callable
2196
+ The callable to test.
2197
+ \\*args : Arguments
2198
+ Arguments passed to `func`.
2199
+ \\*\\*kwargs : Kwargs
2200
+ Keyword arguments passed to `func`.
2201
+
2202
+ Returns
2203
+ -------
2204
+ Nothing. The result is deliberately discarded to ensure that all cycles
2205
+ are found.
2206
+
2207
+ """
2208
+ if not args:
2209
+ return _assert_no_gc_cycles_context()
2210
+
2211
+ func = args[0]
2212
+ args = args[1:]
2213
+ with _assert_no_gc_cycles_context(name=func.__name__):
2214
+ func(*args, **kwargs)
2215
+
2216
+
2217
+ def break_cycles():
2218
+ """
2219
+ Break reference cycles by calling gc.collect
2220
+ Objects can call other objects' methods (for instance, another object's
2221
+ __del__) inside their own __del__. On PyPy, the interpreter only runs
2222
+ between calls to gc.collect, so multiple calls are needed to completely
2223
+ release all cycles.
2224
+ """
2225
+
2226
+ gc.collect()
2227
+ if IS_PYPY:
2228
+ # a few more, just to make sure all the finalizers are called
2229
+ gc.collect()
2230
+ gc.collect()
2231
+ gc.collect()
2232
+ gc.collect()
2233
+
2234
+
2235
+ def requires_memory(free_bytes):
2236
+ """Decorator to skip a test if not enough memory is available"""
2237
+ import pytest
2238
+
2239
+ def decorator(func):
2240
+ @wraps(func)
2241
+ def wrapper(*a, **kw):
2242
+ msg = check_free_memory(free_bytes)
2243
+ if msg is not None:
2244
+ pytest.skip(msg)
2245
+
2246
+ try:
2247
+ return func(*a, **kw)
2248
+ except MemoryError:
2249
+ # Probably ran out of memory regardless: don't regard as failure
2250
+ pytest.xfail("MemoryError raised")
2251
+
2252
+ return wrapper
2253
+
2254
+ return decorator
2255
+
2256
+
2257
+ def check_free_memory(free_bytes):
2258
+ """
2259
+ Check whether `free_bytes` amount of memory is currently free.
2260
+ Returns: None if enough memory available, otherwise error message
2261
+ """
2262
+ env_var = "NPY_AVAILABLE_MEM"
2263
+ env_value = os.environ.get(env_var)
2264
+ if env_value is not None:
2265
+ try:
2266
+ mem_free = _parse_size(env_value)
2267
+ except ValueError as exc:
2268
+ raise ValueError(f"Invalid environment variable {env_var}: {exc}")
2269
+
2270
+ msg = (
2271
+ f"{free_bytes/1e9} GB memory required, but environment variable "
2272
+ f"NPY_AVAILABLE_MEM={env_value} set"
2273
+ )
2274
+ else:
2275
+ mem_free = _get_mem_available()
2276
+
2277
+ if mem_free is None:
2278
+ msg = (
2279
+ "Could not determine available memory; set NPY_AVAILABLE_MEM "
2280
+ "environment variable (e.g. NPY_AVAILABLE_MEM=16GB) to run "
2281
+ "the test."
2282
+ )
2283
+ mem_free = -1
2284
+ else:
2285
+ msg = (
2286
+ f"{free_bytes/1e9} GB memory required, but {mem_free/1e9} GB available"
2287
+ )
2288
+
2289
+ return msg if mem_free < free_bytes else None
2290
+
2291
+
2292
+ def _parse_size(size_str):
2293
+ """Convert memory size strings ('12 GB' etc.) to float"""
2294
+ suffixes = {
2295
+ "": 1,
2296
+ "b": 1,
2297
+ "k": 1000,
2298
+ "m": 1000**2,
2299
+ "g": 1000**3,
2300
+ "t": 1000**4,
2301
+ "kb": 1000,
2302
+ "mb": 1000**2,
2303
+ "gb": 1000**3,
2304
+ "tb": 1000**4,
2305
+ "kib": 1024,
2306
+ "mib": 1024**2,
2307
+ "gib": 1024**3,
2308
+ "tib": 1024**4,
2309
+ }
2310
+
2311
+ size_re = re.compile(
2312
+ r"^\s*(\d+|\d+\.\d+)\s*({})\s*$".format("|".join(suffixes.keys())), re.I
2313
+ )
2314
+
2315
+ m = size_re.match(size_str.lower())
2316
+ if not m or m.group(2) not in suffixes:
2317
+ raise ValueError(f"value {size_str!r} not a valid size")
2318
+ return int(float(m.group(1)) * suffixes[m.group(2)])
2319
+
2320
+
2321
+ def _get_mem_available():
2322
+ """Return available memory in bytes, or None if unknown."""
2323
+ try:
2324
+ import psutil
2325
+
2326
+ return psutil.virtual_memory().available
2327
+ except (ImportError, AttributeError):
2328
+ pass
2329
+
2330
+ if sys.platform.startswith("linux"):
2331
+ info = {}
2332
+ with open("/proc/meminfo") as f:
2333
+ for line in f:
2334
+ p = line.split()
2335
+ info[p[0].strip(":").lower()] = int(p[1]) * 1024
2336
+
2337
+ if "memavailable" in info:
2338
+ # Linux >= 3.14
2339
+ return info["memavailable"]
2340
+ else:
2341
+ return info["memfree"] + info["cached"]
2342
+
2343
+ return None
2344
+
2345
+
2346
+ def _no_tracing(func):
2347
+ """
2348
+ Decorator to temporarily turn off tracing for the duration of a test.
2349
+ Needed in tests that check refcounting, otherwise the tracing itself
2350
+ influences the refcounts
2351
+ """
2352
+ if not hasattr(sys, "gettrace"):
2353
+ return func
2354
+ else:
2355
+
2356
+ @wraps(func)
2357
+ def wrapper(*args, **kwargs):
2358
+ original_trace = sys.gettrace()
2359
+ try:
2360
+ sys.settrace(None)
2361
+ return func(*args, **kwargs)
2362
+ finally:
2363
+ sys.settrace(original_trace)
2364
+
2365
+ return wrapper
2366
+
2367
+
2368
+ def _get_glibc_version():
2369
+ try:
2370
+ ver = os.confstr("CS_GNU_LIBC_VERSION").rsplit(" ")[1]
2371
+ except Exception as inst:
2372
+ ver = "0.0"
2373
+
2374
+ return ver
2375
+
2376
+
2377
+ _glibcver = _get_glibc_version()
2378
+
2379
+
2380
+ def _glibc_older_than(x):
2381
+ return _glibcver != "0.0" and _glibcver < x
llava_next/lib/python3.10/site-packages/torch/ao/__init__.py ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # torch.ao is a package with a lot of interdependencies.
2
+ # We will use lazy import to avoid cyclic dependencies here.
3
+
4
+
5
+ __all__ = [
6
+ "nn",
7
+ "ns",
8
+ "quantization",
9
+ "pruning",
10
+ ]
11
+
12
+ def __getattr__(name):
13
+ if name in __all__:
14
+ import importlib
15
+ return importlib.import_module("." + name, __name__)
16
+ raise AttributeError(f"module {__name__!r} has no attribute {name!r}")
llava_next/lib/python3.10/site-packages/torch/func/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (635 Bytes). View file
 
llava_next/lib/python3.10/site-packages/torch/mps/__init__.py ADDED
@@ -0,0 +1,130 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ r"""
2
+ This package enables an interface for accessing MPS (Metal Performance Shaders) backend in Python.
3
+ Metal is Apple's API for programming metal GPU (graphics processor unit). Using MPS means that increased
4
+ performance can be achieved, by running work on the metal GPU(s).
5
+ See https://developer.apple.com/documentation/metalperformanceshaders for more details.
6
+ """
7
+ import torch
8
+ from .. import Tensor
9
+
10
+ _is_in_bad_fork = getattr(torch._C, "_mps_is_in_bad_fork", lambda: False)
11
+ _default_mps_generator: torch._C.Generator = None # type: ignore[assignment]
12
+
13
+
14
+ # local helper function (not public or exported)
15
+ def _get_default_mps_generator() -> torch._C.Generator:
16
+ global _default_mps_generator
17
+ if _default_mps_generator is None:
18
+ _default_mps_generator = torch._C._mps_get_default_generator()
19
+ return _default_mps_generator
20
+
21
+
22
+ def synchronize() -> None:
23
+ r"""Waits for all kernels in all streams on a MPS device to complete."""
24
+ return torch._C._mps_deviceSynchronize()
25
+
26
+
27
+ def get_rng_state() -> Tensor:
28
+ r"""Returns the random number generator state as a ByteTensor."""
29
+ return _get_default_mps_generator().get_state()
30
+
31
+
32
+ def set_rng_state(new_state: Tensor) -> None:
33
+ r"""Sets the random number generator state.
34
+
35
+ Args:
36
+ new_state (torch.ByteTensor): The desired state
37
+ """
38
+ new_state_copy = new_state.clone(memory_format=torch.contiguous_format)
39
+ _get_default_mps_generator().set_state(new_state_copy)
40
+
41
+
42
+ def manual_seed(seed: int) -> None:
43
+ r"""Sets the seed for generating random numbers.
44
+
45
+ Args:
46
+ seed (int): The desired seed.
47
+ """
48
+ # the torch.mps.manual_seed() can be called from the global
49
+ # torch.manual_seed() in torch/random.py. So we need to make
50
+ # sure mps is available (otherwise we just return without
51
+ # erroring out)
52
+ if not torch._C._has_mps:
53
+ return
54
+ seed = int(seed)
55
+ _get_default_mps_generator().manual_seed(seed)
56
+
57
+
58
+ def seed() -> None:
59
+ r"""Sets the seed for generating random numbers to a random number."""
60
+ _get_default_mps_generator().seed()
61
+
62
+
63
+ def empty_cache() -> None:
64
+ r"""Releases all unoccupied cached memory currently held by the caching
65
+ allocator so that those can be used in other GPU applications.
66
+ """
67
+ torch._C._mps_emptyCache()
68
+
69
+
70
+ def set_per_process_memory_fraction(fraction) -> None:
71
+ r"""Set memory fraction for limiting process's memory allocation on MPS device.
72
+ The allowed value equals the fraction multiplied by recommended maximum device memory
73
+ (obtained from Metal API device.recommendedMaxWorkingSetSize).
74
+ If trying to allocate more than the allowed value in a process, it will raise an out of
75
+ memory error in allocator.
76
+
77
+ Args:
78
+ fraction(float): Range: 0~2. Allowed memory equals total_memory * fraction.
79
+
80
+ .. note::
81
+ Passing 0 to fraction means unlimited allocations
82
+ (may cause system failure if out of memory).
83
+ Passing fraction greater than 1.0 allows limits beyond the value
84
+ returned from device.recommendedMaxWorkingSetSize.
85
+ """
86
+
87
+ if not isinstance(fraction, float):
88
+ raise TypeError("Invalid type for fraction argument, must be `float`")
89
+ if fraction < 0 or fraction > 2:
90
+ raise ValueError(f"Invalid fraction value: {fraction}. Allowed range: 0~2")
91
+
92
+ torch._C._mps_setMemoryFraction(fraction)
93
+
94
+
95
+ def current_allocated_memory() -> int:
96
+ r"""Returns the current GPU memory occupied by tensors in bytes.
97
+
98
+ .. note::
99
+ The returned size does not include cached allocations in
100
+ memory pools of MPSAllocator.
101
+ """
102
+ return torch._C._mps_currentAllocatedMemory()
103
+
104
+
105
+ def driver_allocated_memory() -> int:
106
+ r"""Returns total GPU memory allocated by Metal driver for the process in bytes.
107
+
108
+ .. note::
109
+ The returned size includes cached allocations in MPSAllocator pools
110
+ as well as allocations from MPS/MPSGraph frameworks.
111
+ """
112
+ return torch._C._mps_driverAllocatedMemory()
113
+
114
+
115
+ from . import profiler
116
+ from .event import Event
117
+
118
+ __all__ = [
119
+ "get_rng_state",
120
+ "manual_seed",
121
+ "seed",
122
+ "set_rng_state",
123
+ "synchronize",
124
+ "empty_cache",
125
+ "set_per_process_memory_fraction",
126
+ "current_allocated_memory",
127
+ "driver_allocated_memory",
128
+ "Event",
129
+ "profiler",
130
+ ]
llava_next/lib/python3.10/site-packages/torch/mps/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (4.53 kB). View file
 
llava_next/lib/python3.10/site-packages/torch/mps/__pycache__/event.cpython-310.pyc ADDED
Binary file (2.32 kB). View file
 
llava_next/lib/python3.10/site-packages/torch/mps/__pycache__/profiler.cpython-310.pyc ADDED
Binary file (2.66 kB). View file
 
llava_next/lib/python3.10/site-packages/torch/mps/event.py ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+
4
+ class Event:
5
+ r"""Wrapper around an MPS event.
6
+
7
+ MPS events are synchronization markers that can be used to monitor the
8
+ device's progress, to accurately measure timing, and to synchronize MPS streams.
9
+
10
+ Args:
11
+ enable_timing (bool, optional): indicates if the event should measure time
12
+ (default: ``False``)
13
+ """
14
+
15
+ def __init__(self, enable_timing=False):
16
+ self.__eventId = torch._C._mps_acquireEvent(enable_timing)
17
+
18
+ def __del__(self):
19
+ # checks if torch._C is already destroyed
20
+ if hasattr(torch._C, "_mps_releaseEvent") and self.__eventId > 0:
21
+ torch._C._mps_releaseEvent(self.__eventId)
22
+
23
+ def record(self):
24
+ r"""Records the event in the default stream."""
25
+ torch._C._mps_recordEvent(self.__eventId)
26
+
27
+ def wait(self):
28
+ r"""Makes all future work submitted to the default stream wait for this event."""
29
+ torch._C._mps_waitForEvent(self.__eventId)
30
+
31
+ def query(self):
32
+ r"""Returns True if all work currently captured by event has completed."""
33
+ return torch._C._mps_queryEvent(self.__eventId)
34
+
35
+ def synchronize(self):
36
+ r"""Waits until the completion of all work currently captured in this event.
37
+ This prevents the CPU thread from proceeding until the event completes.
38
+ """
39
+ torch._C._mps_synchronizeEvent(self.__eventId)
40
+
41
+ def elapsed_time(self, end_event):
42
+ r"""Returns the time elapsed in milliseconds after the event was
43
+ recorded and before the end_event was recorded.
44
+ """
45
+ return torch._C._mps_elapsedTimeOfEvents(self.__eventId, end_event.__eventId)
llava_next/lib/python3.10/site-packages/torch/mps/profiler.py ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import contextlib
2
+
3
+ import torch
4
+
5
+ __all__ = ["start", "stop", "profile"]
6
+
7
+
8
+ def start(mode: str = "interval", wait_until_completed: bool = False) -> None:
9
+ r"""Start OS Signpost tracing from MPS backend.
10
+
11
+ The generated OS Signposts could be recorded and viewed in
12
+ XCode Instruments Logging tool.
13
+
14
+ Args:
15
+ mode(str): OS Signpost tracing mode could be "interval", "event",
16
+ or both "interval,event".
17
+ The interval mode traces the duration of execution of the operations,
18
+ whereas event mode marks the completion of executions.
19
+ See document `Recording Performance Data`_ for more info.
20
+ wait_until_completed(bool): Waits until the MPS Stream complete
21
+ executing each encoded GPU operation. This helps generating single
22
+ dispatches on the trace's timeline.
23
+ Note that enabling this option would affect the performance negatively.
24
+
25
+ .. _Recording Performance Data:
26
+ https://developer.apple.com/documentation/os/logging/recording_performance_data
27
+ """
28
+ mode_normalized = mode.lower().replace(" ", "")
29
+ torch._C._mps_profilerStartTrace(mode_normalized, wait_until_completed)
30
+
31
+
32
+ def stop():
33
+ r"""Stops generating OS Signpost tracing from MPS backend."""
34
+ torch._C._mps_profilerStopTrace()
35
+
36
+
37
+ @contextlib.contextmanager
38
+ def profile(mode: str = "interval", wait_until_completed: bool = False):
39
+ r"""Context Manager to enabling generating OS Signpost tracing from MPS backend.
40
+
41
+ Args:
42
+ mode(str): OS Signpost tracing mode could be "interval", "event",
43
+ or both "interval,event".
44
+ The interval mode traces the duration of execution of the operations,
45
+ whereas event mode marks the completion of executions.
46
+ See document `Recording Performance Data`_ for more info.
47
+ wait_until_completed(bool): Waits until the MPS Stream complete
48
+ executing each encoded GPU operation. This helps generating single
49
+ dispatches on the trace's timeline.
50
+ Note that enabling this option would affect the performance negatively.
51
+
52
+ .. _Recording Performance Data:
53
+ https://developer.apple.com/documentation/os/logging/recording_performance_data
54
+ """
55
+ try:
56
+ start(mode, wait_until_completed)
57
+ yield
58
+ finally:
59
+ stop()
parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_layer_norm/__init__.py ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from .cuda_ln import *
7
+ from .cuda_post_ln import *
8
+ from .cuda_pre_ln import *
parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_layer_norm/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (278 Bytes). View file
 
parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_layer_norm/__pycache__/cuda_fp_ln_base.cpython-310.pyc ADDED
Binary file (1.54 kB). View file
 
parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_layer_norm/__pycache__/cuda_ln.cpython-310.pyc ADDED
Binary file (1.14 kB). View file
 
parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_layer_norm/__pycache__/cuda_post_ln.cpython-310.pyc ADDED
Binary file (1.27 kB). View file
 
parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_layer_norm/__pycache__/cuda_pre_ln.cpython-310.pyc ADDED
Binary file (1.51 kB). View file
 
parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_layer_norm/cuda_fp_ln_base.py ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ import torch
7
+
8
+ from ... import DSKernelBase
9
+ from ....inference_utils import elem_size
10
+ from deepspeed.ops.op_builder import InferenceCoreBuilder
11
+
12
+
13
+ class CUDAFPLNBase(DSKernelBase):
14
+ """
15
+ Base class for CUDA LN kernels. They all same the same validation logic,
16
+ so we can share it here.
17
+ """
18
+
19
+ supported_dtypes = [torch.float16, torch.bfloat16, torch.float32]
20
+
21
+ def __init__(self, channels: int, fp_dtype: torch.dtype, epsilon: float = 1e-5):
22
+ """
23
+ Parameters:
24
+ channels (int): Number of channels in the input tensor. Must be divisible to align
25
+ to 16 bytes.
26
+ fp_dtype (torch.dtype): Data type for the input/output/gamma. Supported values
27
+ are torch.float16, torch.bfloat16, and torch.float32.
28
+ """
29
+ if fp_dtype not in CUDAFPLNBase.supported_dtypes:
30
+ raise ValueError("Unsupported data type: {}, supported_dtypes are {}".format(
31
+ fp_dtype, CUDAFPLNBase.supported_dtypes))
32
+
33
+ if elem_size(fp_dtype) * channels % 16 != 0:
34
+ raise ValueError("channels must be divisible by 16 bytes")
35
+
36
+ self.inf_module = InferenceCoreBuilder().load()
37
+ self.epsilon = epsilon
parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_layer_norm/cuda_ln.py ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ import torch
7
+
8
+ from .cuda_fp_ln_base import CUDAFPLNBase
9
+
10
+
11
+ class CUDAFPLN(CUDAFPLNBase):
12
+ """
13
+ Floating point layer norm kernel for CUDA/RoCM.
14
+
15
+ Performs: z = ln(x)
16
+ """
17
+
18
+ def __call__(self, output_z: torch.Tensor, input_x: torch.Tensor, gamma: torch.Tensor,
19
+ beta: torch.Tensor) -> torch.Tensor:
20
+ """
21
+ output_z may alias input_x directly. All Tensors should have the same shape.
22
+
23
+ Parameters:
24
+ output_z (torch.Tensor): Output tensor.
25
+ input_x (torch.Tensor): Input tensor.
26
+ gamma (torch.Tensor): Gamma tensor.
27
+ beta (torch.Tensor): Beta tensor.
28
+ """
29
+ self.inf_module.layer_norm(output_z, input_x, gamma, beta, self.epsilon)
30
+ return output_z
parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_layer_norm/cuda_pre_ln.py ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from typing import Tuple
7
+
8
+ import torch
9
+
10
+ from .cuda_fp_ln_base import CUDAFPLNBase
11
+
12
+
13
+ class CUDAFPPreLN(CUDAFPLNBase):
14
+ """
15
+ Floating point pre-LayerNorm kernel for CUDA/RoCM.
16
+
17
+ Performs: z_res = x_res + y_hid
18
+ z_hid = ln(z_hid)
19
+ """
20
+
21
+ def __call__(self, z_res: torch.Tensor, z_hid: torch.Tensor, x_res: torch.Tensor, y_hid: torch.Tensor,
22
+ gamma: torch.Tensor, beta: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
23
+ """
24
+ z_res can alias x_res. All non-parameter input/output tensors
25
+ must have the same shape. z_hid can alias y_hid.
26
+
27
+ Parameters:
28
+ z_res (torch.Tensor): Output residual.
29
+ z_hid (torch.Tensor): Output hidden states.
30
+ x_res (torch.Tensor): Input residual.
31
+ y_hid (torch.Tensor): Input hidden states.
32
+ gamma (torch.Tensor): Gamma tensor.
33
+ beta (torch.Tensor): Beta tensor.
34
+
35
+ Returns:
36
+ output (torch.Tensor): Output tensor.
37
+ """
38
+ self.inf_module.pre_layer_norm(z_res, z_hid, x_res, y_hid, gamma, beta, self.epsilon)
39
+ return z_res, z_hid
parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_layer_norm/layer_norm.cpp ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ #include "layer_norm.h"
7
+
8
+ #define DISPATCH_LAYER_NORM(T_TYPE, C_TYPE) \
9
+ if (input.options().dtype() == torch::T_TYPE) { \
10
+ launch_fused_ln((C_TYPE*)output.data_ptr(), \
11
+ (const C_TYPE*)input.data_ptr(), \
12
+ (const C_TYPE*)gamma.data_ptr(), \
13
+ (const C_TYPE*)beta.data_ptr(), \
14
+ epsilon, \
15
+ rows, \
16
+ elems_per_row, \
17
+ at::cuda::getCurrentCUDAStream()); \
18
+ }
19
+
20
+ void ds_layer_norm(at::Tensor& output,
21
+ at::Tensor& input,
22
+ at::Tensor& gamma,
23
+ at::Tensor& beta,
24
+ float epsilon)
25
+ {
26
+ bool ragged_input = input.dim() == 2;
27
+
28
+ const int rows = ragged_input ? input.size(0) : input.size(0) * input.size(1);
29
+ const int elems_per_row = ragged_input ? input.size(1) : input.size(2);
30
+
31
+ DISPATCH_LAYER_NORM(kFloat, float);
32
+ DISPATCH_LAYER_NORM(kHalf, __half);
33
+ #ifdef BF16_AVAILABLE
34
+ DISPATCH_LAYER_NORM(kBFloat16, __nv_bfloat16);
35
+ #endif
36
+ }
37
+
38
+ #define DISPATCH_LAYER_NORM_RESIDUAL(T_TYPE, C_TYPE) \
39
+ if (input.options().dtype() == torch::T_TYPE) { \
40
+ launch_fused_post_ln((C_TYPE*)output.data_ptr(), \
41
+ (const C_TYPE*)input.data_ptr(), \
42
+ (const C_TYPE*)residual.data_ptr(), \
43
+ (const C_TYPE*)gamma.data_ptr(), \
44
+ (const C_TYPE*)beta.data_ptr(), \
45
+ epsilon, \
46
+ rows, \
47
+ elems_per_row, \
48
+ at::cuda::getCurrentCUDAStream()); \
49
+ }
50
+
51
+ void ds_post_layer_norm(at::Tensor& output,
52
+ at::Tensor& input,
53
+ at::Tensor& residual,
54
+ at::Tensor& gamma,
55
+ at::Tensor& beta,
56
+ float epsilon)
57
+ {
58
+ bool ragged_input = input.dim() == 2;
59
+
60
+ const int rows = ragged_input ? input.size(0) : input.size(0) * input.size(1);
61
+ const int elems_per_row = ragged_input ? input.size(1) : input.size(2);
62
+
63
+ DISPATCH_LAYER_NORM_RESIDUAL(kFloat, float);
64
+ DISPATCH_LAYER_NORM_RESIDUAL(kHalf, __half);
65
+ #ifdef BF16_AVAILABLE
66
+ DISPATCH_LAYER_NORM_RESIDUAL(kBFloat16, __nv_bfloat16);
67
+ #endif
68
+ }
69
+
70
+ #define DISPATCH_PRE_LAYER_NORM_RESIDUAL(T_TYPE, C_TYPE) \
71
+ if (input.options().dtype() == torch::T_TYPE) { \
72
+ launch_fused_pre_ln((C_TYPE*)norm_output.data_ptr(), \
73
+ (C_TYPE*)res_output.data_ptr(), \
74
+ (const C_TYPE*)input.data_ptr(), \
75
+ (const C_TYPE*)residual.data_ptr(), \
76
+ (const C_TYPE*)gamma.data_ptr(), \
77
+ (const C_TYPE*)beta.data_ptr(), \
78
+ epsilon, \
79
+ rows, \
80
+ elems_per_row, \
81
+ at::cuda::getCurrentCUDAStream()); \
82
+ }
83
+
84
+ void ds_pre_layer_norm(at::Tensor& res_output,
85
+ at::Tensor& norm_output,
86
+ at::Tensor& input,
87
+ at::Tensor& residual,
88
+ at::Tensor& gamma,
89
+ at::Tensor& beta,
90
+ float epsilon)
91
+ {
92
+ bool ragged_input = input.dim() == 2;
93
+
94
+ const int rows = ragged_input ? input.size(0) : input.size(0) * input.size(1);
95
+ const int elems_per_row = ragged_input ? input.size(1) : input.size(2);
96
+
97
+ DISPATCH_PRE_LAYER_NORM_RESIDUAL(kFloat, float);
98
+ DISPATCH_PRE_LAYER_NORM_RESIDUAL(kHalf, __half);
99
+ #ifdef BF16_AVAILABLE
100
+ DISPATCH_PRE_LAYER_NORM_RESIDUAL(kBFloat16, __nv_bfloat16);
101
+ #endif
102
+ }
parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_layer_norm/layer_norm.h ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ #pragma once
7
+
8
+ #include <c10/cuda/CUDAStream.h>
9
+ #include <torch/extension.h>
10
+ #include "ds_kernel_utils.h"
11
+
12
+ /*
13
+ Kernel launch methods for layer norm variants.
14
+ */
15
+
16
+ template <typename T>
17
+ void launch_fused_ln(T* output,
18
+ const T* vals,
19
+ const T* gamma,
20
+ const T* beta,
21
+ float epsilon,
22
+ int rows,
23
+ int elems_per_row,
24
+ cudaStream_t stream);
25
+
26
+ template <typename T>
27
+ void launch_fused_post_ln(T* output,
28
+ const T* vals,
29
+ const T* residual,
30
+ const T* gamma,
31
+ const T* beta,
32
+ float epsilon,
33
+ int rows,
34
+ int elems_per_row,
35
+ cudaStream_t stream);
36
+ template <typename T>
37
+ void launch_fused_pre_ln(T* norm_output,
38
+ T* res_output,
39
+ const T* vals,
40
+ const T* residual,
41
+ const T* gamma,
42
+ const T* beta,
43
+ float epsilon,
44
+ int rows,
45
+ int elems_per_row,
46
+ cudaStream_t stream);
47
+
48
+ void ds_layer_norm(at::Tensor& output,
49
+ at::Tensor& input,
50
+ at::Tensor& gamma,
51
+ at::Tensor& beta,
52
+ float epsilon);
53
+
54
+ void ds_post_layer_norm(at::Tensor& output,
55
+ at::Tensor& input,
56
+ at::Tensor& residual,
57
+ at::Tensor& gamma,
58
+ at::Tensor& beta,
59
+ float epsilon);
60
+
61
+ void ds_pre_layer_norm(at::Tensor& res_output,
62
+ at::Tensor& norm_output,
63
+ at::Tensor& input,
64
+ at::Tensor& residual,
65
+ at::Tensor& gamma,
66
+ at::Tensor& beta,
67
+ float epsilon);
parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_layer_norm/layer_norm_cuda.cu ADDED
@@ -0,0 +1,490 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ #include "conversion_utils.h"
7
+ #include "ds_kernel_utils.h"
8
+ #include "memory_access_utils.h"
9
+ #include "reduction_utils.h"
10
+
11
+ namespace cg = cooperative_groups;
12
+ using rop = reduce::ROpType;
13
+
14
+ namespace ln {
15
+ constexpr int granularity = 16;
16
+ } // namespace ln
17
+
18
+ /*
19
+ Regular layer norm implementation. Assumes elems_per_row % 8
20
+ is equal to 0.
21
+
22
+ Args:
23
+ output: buffer for output data
24
+ vals: buffer for input data
25
+ gamma: gain for normalization
26
+ beta: bias for normalization
27
+ epsilon: numeric stability
28
+ elems_per_row: number of elements each block will normalize
29
+ */
30
+ template <typename T, int unRoll, int threadsPerGroup, int maxThreads>
31
+ __global__ void fused_ln(T* output,
32
+ const T* vals,
33
+ const T* gamma,
34
+ const T* beta,
35
+ float epsilon,
36
+ int elems_per_row)
37
+ {
38
+ constexpr int T_per_load = ln::granularity / sizeof(T);
39
+
40
+ cg::thread_block tb = cg::this_thread_block();
41
+ cg::thread_block_tile<hw_warp_size> warp = cg::tiled_partition<hw_warp_size>(tb);
42
+
43
+ // X-dimension of the block
44
+ const int block_offset = (tb.group_index().x * (maxThreads / threadsPerGroup) * elems_per_row) +
45
+ (tb.thread_index().y * elems_per_row);
46
+ const int thread_offset = tb.thread_index().x * T_per_load;
47
+ const int base_offset = block_offset + thread_offset;
48
+ const int stride = blockDim.x * T_per_load;
49
+
50
+ float sum = reduce::init<rop::Add, float>();
51
+
52
+ const T* input_base = vals + base_offset;
53
+
54
+ T local_buffer[unRoll * T_per_load];
55
+
56
+ #pragma unRoll
57
+ for (int i = 0; i < unRoll; i++) {
58
+ T* iteration_buffer = local_buffer + i * T_per_load;
59
+
60
+ mem_access::load_global<ln::granularity>(
61
+ iteration_buffer, input_base + i * stride, thread_offset + i * stride < elems_per_row);
62
+
63
+ #pragma unRoll
64
+ for (int j = 0; j < T_per_load; j++) {
65
+ float vals_up_cast = conversion::to<float>(iteration_buffer[j]);
66
+ sum = reduce::element<rop::Add>(sum, vals_up_cast);
67
+ }
68
+ }
69
+
70
+ reduce::partitioned_block<rop::Add, threadsPerGroup>(tb, warp, sum);
71
+ const float mean = sum / elems_per_row;
72
+
73
+ float mean_diff = reduce::init<rop::Add, float>();
74
+
75
+ #pragma unRoll
76
+ for (int i = 0; i < unRoll; i++) {
77
+ #pragma unRoll
78
+ for (int j = 0; j < T_per_load; j++) {
79
+ // Using a 0 value here skews the variance, have to if-guard
80
+ if (thread_offset + i * stride < elems_per_row) {
81
+ float diff = (conversion::to<float>(local_buffer[i * T_per_load + j]) - mean);
82
+ mean_diff = reduce::element<rop::Add>(mean_diff, diff * diff);
83
+ }
84
+ }
85
+ }
86
+
87
+ reduce::partitioned_block<rop::Add, threadsPerGroup>(tb, warp, mean_diff);
88
+ const float variance = mean_diff / elems_per_row;
89
+ const float denom = __frsqrt_rn(variance + epsilon);
90
+
91
+ T* block_output = output + block_offset;
92
+
93
+ #pragma unRoll
94
+ for (int i = 0; i < unRoll; i++) {
95
+ T* iteration_buffer = local_buffer + i * T_per_load;
96
+ const int iter_idx = i * stride + thread_offset;
97
+ const bool do_loads = iter_idx < elems_per_row;
98
+
99
+ T gamma_local[T_per_load], beta_local[T_per_load];
100
+
101
+ mem_access::load_global<ln::granularity>(gamma_local, gamma + iter_idx, do_loads);
102
+ mem_access::load_global<ln::granularity>(beta_local, beta + iter_idx, do_loads);
103
+
104
+ #pragma unRoll
105
+ for (int j = 0; j < T_per_load; j++) {
106
+ float val = conversion::to<float>(iteration_buffer[j]);
107
+ val = (val - mean) * denom;
108
+ val =
109
+ val * conversion::to<float>(gamma_local[j]) + conversion::to<float>(beta_local[j]);
110
+ iteration_buffer[j] = conversion::to<T>(val);
111
+ }
112
+
113
+ if (do_loads) {
114
+ mem_access::store_global<ln::granularity>(block_output + iter_idx, iteration_buffer);
115
+ }
116
+ }
117
+ }
118
+
119
+ #define LAUNCH_FUSED_LN(unRollFactor, threadsPerGroup, maxThreads) \
120
+ fused_ln<T, unRollFactor, threadsPerGroup, maxThreads> \
121
+ <<<grid, block, 0, stream>>>(output, vals, gamma, beta, epsilon, elems_per_row);
122
+
123
+ template <typename T>
124
+ void launch_fused_ln(T* output,
125
+ const T* vals,
126
+ const T* gamma,
127
+ const T* beta,
128
+ float epsilon,
129
+ int rows,
130
+ int elems_per_row,
131
+ cudaStream_t stream)
132
+ {
133
+ // 8 for __half, 4 for float
134
+ constexpr int T_per_load = ln::granularity / sizeof(T);
135
+
136
+ constexpr int maxThreads = 256;
137
+
138
+ // For Flaoat, unRoll 4, for __half, unRoll 2
139
+ constexpr int internal_unRoll = sizeof(T) == 4 ? 4 : 2;
140
+
141
+ const bool is_subblock_schedule = (elems_per_row <= 128) ? true : false;
142
+ const int h_per_step = is_subblock_schedule ? T_per_load : T_per_load * internal_unRoll;
143
+
144
+ // Scheduling concern: may be slightly faster for some inputs to assign multiple stages of
145
+ // warp-sized blocks rather than stepping up to 64/96 threads
146
+ const int one_step_threads = next_pow2((elems_per_row + h_per_step - 1) / h_per_step);
147
+ const int threadsPerGroup = (one_step_threads < maxThreads) ? one_step_threads : maxThreads;
148
+
149
+ const int groups_per_block_max =
150
+ is_subblock_schedule ? (maxThreads + threadsPerGroup - 1) / threadsPerGroup : 1;
151
+ const int groups_per_block = (rows < groups_per_block_max) ? rows : groups_per_block_max;
152
+ const int groups_launch = (groups_per_block + rows - 1) / groups_per_block;
153
+
154
+ dim3 block(threadsPerGroup, groups_per_block);
155
+ dim3 grid(groups_launch);
156
+
157
+ const int elems_per_step = threadsPerGroup * h_per_step;
158
+ const int external_unRoll = (elems_per_row + elems_per_step - 1) / elems_per_step;
159
+
160
+ if (is_subblock_schedule) {
161
+ // <=128
162
+ if (threadsPerGroup == 1) {
163
+ LAUNCH_FUSED_LN(1, 1, maxThreads);
164
+ } else if (threadsPerGroup == 2) {
165
+ LAUNCH_FUSED_LN(1, 2, maxThreads);
166
+ } else if (threadsPerGroup == 4) {
167
+ LAUNCH_FUSED_LN(1, 4, maxThreads);
168
+ } else if (threadsPerGroup == 8) {
169
+ LAUNCH_FUSED_LN(1, 8, maxThreads);
170
+ } else if (threadsPerGroup == 16) {
171
+ LAUNCH_FUSED_LN(1, 16, maxThreads);
172
+ }
173
+ } else if (external_unRoll == 1) {
174
+ // 129 - 4096 elems
175
+ // (this can launch with 1-7 warps as well)
176
+ LAUNCH_FUSED_LN(1 * internal_unRoll, maxThreads, maxThreads);
177
+ } else if (external_unRoll == 2) {
178
+ // 4097 - 8192 elems
179
+ LAUNCH_FUSED_LN(2 * internal_unRoll, maxThreads, maxThreads);
180
+ } else if (external_unRoll == 3) {
181
+ // 8193 - 12288 elems
182
+ LAUNCH_FUSED_LN(3 * internal_unRoll, maxThreads, maxThreads);
183
+ } else if (external_unRoll == 4) {
184
+ // 12289 - 16384 elems
185
+ LAUNCH_FUSED_LN(4 * internal_unRoll, maxThreads, maxThreads);
186
+ }
187
+ }
188
+
189
+ #define INSTANTIATE_FUSED_LN(T) \
190
+ template void launch_fused_ln(T*, const T*, const T*, const T*, float, int, int, cudaStream_t);
191
+
192
+ INSTANTIATE_FUSED_LN(__half);
193
+ #ifdef BF16_AVAILABLE
194
+ INSTANTIATE_FUSED_LN(__nv_bfloat16);
195
+ #endif
196
+ INSTANTIATE_FUSED_LN(float);
197
+
198
+ /*
199
+ Fused resiual + bias + layer norm implementation. Assumes elems_per_row % 8
200
+ is equal to 0.
201
+
202
+ TODO(cmikeh2): Goal is to deprecate this implementation. The bias + residual
203
+ need to be fused into compute-bound producer operations.
204
+
205
+ Args:
206
+ output: buffer for output data
207
+ res_output: output of residual addition
208
+ vals: buffer for input data
209
+ residual: residual data
210
+ bias: bias of of input data
211
+ gamma: gain for normalization
212
+ beta: bias for normalization
213
+ epsilon: numeric stability
214
+ elems_per_row: number of elements each block will normalize
215
+ Template arg:
216
+ StoreResidual: controls whether the residual calculation is stored
217
+ or not. When set to false, the input `res_output` is unused.
218
+ */
219
+ template <typename T, int unRoll, int threadsPerGroup, int maxThreads, bool preLnResidual>
220
+ __global__ void fused_residual_ln(T* output,
221
+ T* res_output,
222
+ const T* vals,
223
+ const T* residual,
224
+ const T* gamma,
225
+ const T* beta,
226
+ float epsilon,
227
+ int elems_per_row)
228
+ {
229
+ constexpr int T_per_load = ln::granularity / sizeof(T);
230
+
231
+ cg::thread_block tb = cg::this_thread_block();
232
+ cg::thread_block_tile<hw_warp_size> warp = cg::tiled_partition<hw_warp_size>(tb);
233
+
234
+ // X-dimension of the block
235
+ const int block_offset = (tb.group_index().x * (maxThreads / threadsPerGroup) * elems_per_row) +
236
+ (tb.thread_index().y * elems_per_row);
237
+ const int thread_offset = tb.thread_index().x * T_per_load;
238
+ const int base_offset = block_offset + thread_offset;
239
+ const int stride = tb.size() * T_per_load;
240
+
241
+ float sum = reduce::init<rop::Add, float>();
242
+
243
+ const T* input_base = vals + base_offset;
244
+ const T* residual_base = residual + base_offset;
245
+
246
+ T local_buffer[unRoll * T_per_load];
247
+
248
+ // Unlike a vanilla layernorm, since we're fusing the two adds as well
249
+ // an inner unRoll seems to be less valuable. If anything, a double unRoll
250
+ // makes the most sense if we find we are having performance issues.
251
+ #pragma unRoll
252
+ for (int i = 0; i < unRoll; i++) {
253
+ T* iteration_buffer = local_buffer + i * T_per_load;
254
+ T residual_buffer[T_per_load];
255
+ T bias_buffer[T_per_load];
256
+
257
+ mem_access::load_global<ln::granularity>(
258
+ iteration_buffer, input_base + i * stride, thread_offset + i * stride < elems_per_row);
259
+ mem_access::load_global<ln::granularity>(residual_buffer,
260
+ residual_base + i * stride,
261
+ thread_offset + i * stride < elems_per_row);
262
+
263
+ #pragma unRoll
264
+ for (int j = 0; j < T_per_load; j++) {
265
+ float vals_up_cast = conversion::to<float>(iteration_buffer[j]);
266
+ float res_up_cast = conversion::to<float>(residual_buffer[j]);
267
+ vals_up_cast += res_up_cast;
268
+ sum = reduce::element<rop::Add>(sum, vals_up_cast);
269
+ iteration_buffer[j] = conversion::to<T>(vals_up_cast);
270
+ }
271
+
272
+ if (preLnResidual && (thread_offset + i * stride < elems_per_row)) {
273
+ mem_access::store_global<ln::granularity>(res_output + base_offset + i * stride,
274
+ iteration_buffer);
275
+ }
276
+ }
277
+
278
+ reduce::partitioned_block<rop::Add, threadsPerGroup>(tb, warp, sum);
279
+ const float mean = sum / elems_per_row;
280
+
281
+ float mean_diff = reduce::init<rop::Add, float>();
282
+ #pragma unRoll
283
+ for (int i = 0; i < unRoll; i++) {
284
+ #pragma unRoll
285
+ for (int j = 0; j < T_per_load; j++) {
286
+ // Using a 0 value here skews the variance, have to if-guard
287
+ if (thread_offset + i * stride < elems_per_row) {
288
+ float diff = (conversion::to<float>(local_buffer[i * T_per_load + j]) - mean);
289
+ mean_diff = reduce::element<rop::Add>(mean_diff, diff * diff);
290
+ }
291
+ }
292
+ }
293
+
294
+ reduce::partitioned_block<rop::Add, threadsPerGroup>(tb, warp, mean_diff);
295
+ const float variance = mean_diff / elems_per_row;
296
+ const float denom = __frsqrt_rn(variance + epsilon);
297
+
298
+ T* block_output = output + block_offset;
299
+
300
+ #pragma unRoll
301
+ for (int i = 0; i < unRoll; i++) {
302
+ T* iteration_buffer = local_buffer + i * T_per_load;
303
+ const int iter_idx = i * stride + thread_offset;
304
+ const bool do_loads = iter_idx < elems_per_row;
305
+
306
+ T gamma_local[T_per_load], beta_local[T_per_load];
307
+
308
+ mem_access::load_global<ln::granularity>(gamma_local, gamma + iter_idx, do_loads);
309
+ mem_access::load_global<ln::granularity>(beta_local, beta + iter_idx, do_loads);
310
+
311
+ #pragma unRoll
312
+ for (int j = 0; j < T_per_load; j++) {
313
+ float val = conversion::to<float>(iteration_buffer[j]);
314
+ val = (val - mean) * denom;
315
+ val =
316
+ val * conversion::to<float>(gamma_local[j]) + conversion::to<float>(beta_local[j]);
317
+ iteration_buffer[j] = conversion::to<T>(val);
318
+ }
319
+
320
+ if (do_loads) {
321
+ mem_access::store_global<ln::granularity>(block_output + iter_idx, iteration_buffer);
322
+ }
323
+ }
324
+ }
325
+
326
+ // TODO(cmikeh2): There's a bunch of redundancy here that needs to be removed/simplified.
327
+ #define LAUNCH_FUSED_RES_LN(unRollFactor, threadsPerGroup, maxThreads) \
328
+ fused_residual_ln<T, unRollFactor, threadsPerGroup, maxThreads, false> \
329
+ <<<grid, block, 0, stream>>>( \
330
+ output, nullptr, vals, residual, gamma, beta, epsilon, elems_per_row);
331
+
332
+ template <typename T>
333
+ void launch_fused_post_ln(T* output,
334
+ const T* vals,
335
+ const T* residual,
336
+ const T* gamma,
337
+ const T* beta,
338
+ float epsilon,
339
+ int rows,
340
+ int elems_per_row,
341
+ cudaStream_t stream)
342
+ {
343
+ // 8 for __half, 4 for float
344
+ constexpr int T_per_load = ln::granularity / sizeof(T);
345
+
346
+ constexpr int maxThreads = 256;
347
+
348
+ // For Flaoat, unRoll 4, for __half, unRoll 2
349
+ constexpr int internal_unRoll = sizeof(T) == 4 ? 4 : 2;
350
+
351
+ const bool is_subblock_schedule = (elems_per_row <= 128) ? true : false;
352
+ const int h_per_step = is_subblock_schedule ? T_per_load : T_per_load * internal_unRoll;
353
+
354
+ // Scheduling concern: may be slightly faster for some inputs to assign multiple stages of
355
+ // warp-sized blocks rather than stepping up to 64/96 threads
356
+ const int one_step_threads = next_pow2((elems_per_row + h_per_step - 1) / h_per_step);
357
+ const int threadsPerGroup = (one_step_threads < maxThreads) ? one_step_threads : maxThreads;
358
+
359
+ const int groups_per_block_max =
360
+ is_subblock_schedule ? (maxThreads + threadsPerGroup - 1) / threadsPerGroup : 1;
361
+ const int groups_per_block = (rows < groups_per_block_max) ? rows : groups_per_block_max;
362
+ const int groups_launch = (groups_per_block + rows - 1) / groups_per_block;
363
+
364
+ dim3 block(threadsPerGroup, groups_per_block);
365
+ dim3 grid(groups_launch);
366
+
367
+ const int elems_per_step = threadsPerGroup * h_per_step;
368
+ const int external_unRoll = (elems_per_row + elems_per_step - 1) / elems_per_step;
369
+
370
+ if (is_subblock_schedule) {
371
+ // <=128
372
+ if (threadsPerGroup == 1) {
373
+ LAUNCH_FUSED_RES_LN(1, 1, maxThreads);
374
+ } else if (threadsPerGroup == 2) {
375
+ LAUNCH_FUSED_RES_LN(1, 2, maxThreads);
376
+ } else if (threadsPerGroup == 4) {
377
+ LAUNCH_FUSED_RES_LN(1, 4, maxThreads);
378
+ } else if (threadsPerGroup == 8) {
379
+ LAUNCH_FUSED_RES_LN(1, 8, maxThreads);
380
+ } else if (threadsPerGroup == 16) {
381
+ LAUNCH_FUSED_RES_LN(1, 16, maxThreads);
382
+ }
383
+ } else if (external_unRoll == 1) {
384
+ // 129 - 4096 elems
385
+ // (this can launch with 1-7 warps as well)
386
+ LAUNCH_FUSED_RES_LN(1 * internal_unRoll, maxThreads, maxThreads);
387
+ } else if (external_unRoll == 2) {
388
+ // 4097 - 8192 elems
389
+ LAUNCH_FUSED_RES_LN(2 * internal_unRoll, maxThreads, maxThreads);
390
+ } else if (external_unRoll == 3) {
391
+ // 8193 - 12288 elems
392
+ LAUNCH_FUSED_RES_LN(3 * internal_unRoll, maxThreads, maxThreads);
393
+ } else if (external_unRoll == 4) {
394
+ // 12289 - 16384 elems
395
+ LAUNCH_FUSED_RES_LN(4 * internal_unRoll, maxThreads, maxThreads);
396
+ }
397
+ }
398
+
399
+ #define LAUNCH_FUSED_RES_LN_STORE_PRE_LN_RES(unRollFactor, threadsPerGroup, maxThreads) \
400
+ fused_residual_ln<T, unRollFactor, threadsPerGroup, maxThreads, true> \
401
+ <<<grid, block, 0, stream>>>( \
402
+ norm_output, res_output, vals, residual, gamma, beta, epsilon, elems_per_row);
403
+
404
+ template <typename T>
405
+ void launch_fused_pre_ln(T* norm_output,
406
+ T* res_output,
407
+ const T* vals,
408
+ const T* residual,
409
+ const T* gamma,
410
+ const T* beta,
411
+ float epsilon,
412
+ int rows,
413
+ int elems_per_row,
414
+ cudaStream_t stream)
415
+ {
416
+ // 8 for __half, 4 for float
417
+ constexpr int T_per_load = ln::granularity / sizeof(T);
418
+
419
+ constexpr int maxThreads = 256;
420
+
421
+ // For Flaoat, unRoll 4, for __half, unRoll 2
422
+ constexpr int internal_unRoll = sizeof(T) == 4 ? 4 : 2;
423
+
424
+ const bool is_subblock_schedule = (elems_per_row <= 128) ? true : false;
425
+ const int h_per_step = is_subblock_schedule ? T_per_load : T_per_load * internal_unRoll;
426
+
427
+ // Scheduling concern: may be slightly faster for some inputs to assign multiple stages of
428
+ // warp-sized blocks rather than stepping up to 64/96 threads
429
+ const int one_step_threads = next_pow2((elems_per_row + h_per_step - 1) / h_per_step);
430
+ const int threadsPerGroup = (one_step_threads < maxThreads) ? one_step_threads : maxThreads;
431
+
432
+ const int groups_per_block_max =
433
+ is_subblock_schedule ? (maxThreads + threadsPerGroup - 1) / threadsPerGroup : 1;
434
+ const int groups_per_block = (rows < groups_per_block_max) ? rows : groups_per_block_max;
435
+ const int groups_launch = (groups_per_block + rows - 1) / groups_per_block;
436
+
437
+ dim3 block(threadsPerGroup, groups_per_block);
438
+ dim3 grid(groups_launch);
439
+
440
+ const int elems_per_step = threadsPerGroup * h_per_step;
441
+ const int external_unRoll = (elems_per_row + elems_per_step - 1) / elems_per_step;
442
+
443
+ if (is_subblock_schedule) {
444
+ // <=128
445
+ if (threadsPerGroup == 1) {
446
+ LAUNCH_FUSED_RES_LN_STORE_PRE_LN_RES(1, 1, maxThreads);
447
+ } else if (threadsPerGroup == 2) {
448
+ LAUNCH_FUSED_RES_LN_STORE_PRE_LN_RES(1, 2, maxThreads);
449
+ } else if (threadsPerGroup == 4) {
450
+ LAUNCH_FUSED_RES_LN_STORE_PRE_LN_RES(1, 4, maxThreads);
451
+ } else if (threadsPerGroup == 8) {
452
+ LAUNCH_FUSED_RES_LN_STORE_PRE_LN_RES(1, 8, maxThreads);
453
+ } else if (threadsPerGroup == 16) {
454
+ LAUNCH_FUSED_RES_LN_STORE_PRE_LN_RES(1, 16, maxThreads);
455
+ }
456
+ } else if (external_unRoll == 1) {
457
+ // 129 - 4096 elems
458
+ // (this can launch with 1-7 warps as well)
459
+ LAUNCH_FUSED_RES_LN_STORE_PRE_LN_RES(1 * internal_unRoll, maxThreads, maxThreads);
460
+ } else if (external_unRoll == 2) {
461
+ // 4097 - 8192 elems
462
+ LAUNCH_FUSED_RES_LN_STORE_PRE_LN_RES(2 * internal_unRoll, maxThreads, maxThreads);
463
+ } else if (external_unRoll == 3) {
464
+ // 8193 - 12288 elems
465
+ LAUNCH_FUSED_RES_LN_STORE_PRE_LN_RES(3 * internal_unRoll, maxThreads, maxThreads);
466
+ } else if (external_unRoll == 4) {
467
+ // 12289 - 16384 elems
468
+ LAUNCH_FUSED_RES_LN_STORE_PRE_LN_RES(4 * internal_unRoll, maxThreads, maxThreads);
469
+ }
470
+ }
471
+
472
+ #define INSTANTIATE_RES_LN(T) \
473
+ template void launch_fused_post_ln<T>( \
474
+ T*, const T*, const T*, const T*, const T*, float, int, int, cudaStream_t);
475
+
476
+ #define INSTANTIATE_PRE_LN_RES(T) \
477
+ template void launch_fused_pre_ln<T>( \
478
+ T*, T*, const T*, const T*, const T*, const T*, float, int, int, cudaStream_t);
479
+
480
+ INSTANTIATE_RES_LN(__half);
481
+ INSTANTIATE_RES_LN(float);
482
+ #ifdef BF16_AVAILABLE
483
+ INSTANTIATE_RES_LN(__nv_bfloat16);
484
+ #endif
485
+
486
+ INSTANTIATE_PRE_LN_RES(__half);
487
+ INSTANTIATE_PRE_LN_RES(float);
488
+ #ifdef BF16_AVAILABLE
489
+ INSTANTIATE_PRE_LN_RES(__nv_bfloat16);
490
+ #endif
parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_linear/__pycache__/cuda_linear.cpython-310.pyc ADDED
Binary file (3.74 kB). View file