diff --git a/.gitattributes b/.gitattributes index 16dae735db40be2265bc09da0f7e1ad655b89284..12279cc32e810f0642c122a6cc78c88a719d074b 100644 --- a/.gitattributes +++ b/.gitattributes @@ -1205,3 +1205,4 @@ valley/lib/python3.10/site-packages/nvidia/cudnn/lib/libcudnn_ops.so.9 filter=lf llava_next/lib/python3.10/site-packages/torch/__pycache__/overrides.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text llava_next/lib/python3.10/site-packages/torch/__pycache__/_meta_registrations.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text vlmpy310/lib/python3.10/site-packages/skimage/filters/rank/core_cy_3d.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +llava_next/lib/python3.10/site-packages/torch/_inductor/__pycache__/lowering.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text diff --git a/llava_next/lib/python3.10/site-packages/torch/_custom_op/__init__.py b/llava_next/lib/python3.10/site-packages/torch/_custom_op/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/llava_next/lib/python3.10/site-packages/torch/_custom_op/__pycache__/__init__.cpython-310.pyc b/llava_next/lib/python3.10/site-packages/torch/_custom_op/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..481609c05e2951ab102ff2e0adea34a3470dd134 Binary files /dev/null and b/llava_next/lib/python3.10/site-packages/torch/_custom_op/__pycache__/__init__.cpython-310.pyc differ diff --git a/llava_next/lib/python3.10/site-packages/torch/_custom_op/__pycache__/autograd.cpython-310.pyc b/llava_next/lib/python3.10/site-packages/torch/_custom_op/__pycache__/autograd.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..37a70bf0f0ed6017a8a6a53a751186ab80db2197 Binary files /dev/null and b/llava_next/lib/python3.10/site-packages/torch/_custom_op/__pycache__/autograd.cpython-310.pyc differ diff --git a/llava_next/lib/python3.10/site-packages/torch/_custom_op/__pycache__/functional.cpython-310.pyc b/llava_next/lib/python3.10/site-packages/torch/_custom_op/__pycache__/functional.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bf266fc547a273af3ad7e691f6896ee195748298 Binary files /dev/null and b/llava_next/lib/python3.10/site-packages/torch/_custom_op/__pycache__/functional.cpython-310.pyc differ diff --git a/llava_next/lib/python3.10/site-packages/torch/_custom_op/__pycache__/impl.cpython-310.pyc b/llava_next/lib/python3.10/site-packages/torch/_custom_op/__pycache__/impl.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..48cf5171ee4da71d100d029017944fe0fa0f644c Binary files /dev/null and b/llava_next/lib/python3.10/site-packages/torch/_custom_op/__pycache__/impl.cpython-310.pyc differ diff --git a/llava_next/lib/python3.10/site-packages/torch/_custom_op/autograd.py b/llava_next/lib/python3.10/site-packages/torch/_custom_op/autograd.py new file mode 100644 index 0000000000000000000000000000000000000000..87652bc014d8c692d0b1c44f1474eef9bb2af3f5 --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/_custom_op/autograd.py @@ -0,0 +1,273 @@ +import torch +import torch.utils._pytree as pytree +from collections import namedtuple +import functools + + +# NOTE [CustomOp autograd kernel indirection] +# We register `inner` as the autograd kernel for this custom_op. +# `inner` either calls the autograd formula registered by the user, +# or goes into an `autograd_not_implemented` kernel. +# +# The reason why this indirection exists is +# so that we can swap out the autograd kernel (the PyTorch dispatcher +# doesn't actually allow us to do this). By default, we want +# the `autograd_not_implemented` behavior, but then the user may come +# and register something that is actually a backward formula +def autograd_kernel_indirection(custom_op): + autograd_fallback = autograd_not_implemented(custom_op) + + def inner(*args, **kwargs): + if custom_op._has_impl('autograd'): + kernel = custom_op._get_impl('autograd').func + return kernel(*args, **kwargs) + # As explained in NOTE ["backward", "save_for_backward", and "autograd"], + # after the user gives us "backward" and "save_for_backward", we generate + # the "autograd" impl. If the user only provided one, then we tell + # the user they've done something wrong. + if custom_op._has_impl('save_for_backward') or custom_op._has_impl('backward'): + missing = ( + 'save_for_backward' if custom_op._has_impl('backward') + else 'backward' + ) + found = 'save_for_backward' if missing == 'backward' else 'backward' + loc = custom_op._get_impl(found).location + raise RuntimeError( + f"We found a '{found}' registration for {custom_op} at " + f"{loc} but were unable to find a '{missing}' registration. " + f"To use the CustomOp API to register a backward formula, " + f"please provide us both a backward function and a " + f"'save for backward' function via `impl_backward` and " + f"`impl_save_for_backward` respectively.") + return autograd_fallback(*args, **kwargs) + return inner + + +# TODO(#101191): Use the actual C++ autograd not implemented fallback, +# or change the default autograd fallback to the autograd not implemented fallback. +def autograd_not_implemented(custom_op): + def kernel(*args, **kwargs): + if torch.is_grad_enabled() and pytree.tree_any( + lambda x: isinstance(x, torch.Tensor) and x.requires_grad, (args, kwargs) + ): + raise RuntimeError("Autograd has not been implemented for operator") + with torch._C._AutoDispatchBelowAutograd(): + return custom_op(*args, **kwargs) + return kernel + + +def mark_non_differentiable(ctx, output, output_differentiability): + # Output types are restricted to be: + # - Tensor + # - Tensor[] + # - int, bool, Scalar, float + # See _check_can_register_backward + if output_differentiability is not None: + if not isinstance(output, tuple): + tuple_output = (output,) + else: + tuple_output = output # type: ignore[assignment] + assert len(output_differentiability) == len(tuple_output) + non_differentiable_tensors = [] + for idx, (differentiable, out) in enumerate(zip(output_differentiability, tuple_output)): + if isinstance(out, torch.Tensor): + if not differentiable: + non_differentiable_tensors.append(out) + continue + if isinstance(out, list): + if not differentiable: + non_differentiable_tensors.extend(out) + continue + if differentiable: + raise RuntimeError( + f"With output_differentiability={output_differentiability}. " + f"At idx {idx}, we received an object of type {type(out)} that " + f"is not a Tensor, so it cannot have be marked as differentiable in " + f"output_differentiability.") + if non_differentiable_tensors: + ctx.mark_non_differentiable(*non_differentiable_tensors) + + +def construct_autograd_kernel( + schema, + output_differentiability, + custom_op, + op_overload, + save_for_backward_fn, + backward_fn): + + def apply(*args): + flat_args, spec = pytree.tree_flatten(args) + out_spec = None + + def forward(ctx, *flat_args): + ctx.set_materialize_grads(True) + args = pytree.tree_unflatten(list(flat_args), spec) + with torch._C._AutoDispatchBelowAutograd(): + output = op_overload(*args) + + # We use the info about args to give better error messages in backward + args_info = namedtuple_args( + schema, pytree.tree_map(lambda arg: type(arg), args)) + + save_for_backward_fn_inputs = namedtuple_args(schema, args) + to_save = save_for_backward_fn(save_for_backward_fn_inputs, output) + + save_pytree_for_backward(ctx, (to_save, args_info)) + mark_non_differentiable(ctx, output, output_differentiability) + + nonlocal out_spec + flat_output, out_spec = pytree.tree_flatten(output) + return tuple(flat_output) + + def backward(ctx, *flat_grad_output): + assert out_spec is not None + grads = pytree.tree_unflatten(list(flat_grad_output), out_spec) + saved, args_info = unpack_saved(ctx) + # There is nothing on the ctx object for now, it is just there so + # that we can add additional things in the future. + inner_ctx = object() + if not isinstance(grads, tuple): + grads = (grads,) + grad_inputs_dict = backward_fn(inner_ctx, saved, *grads) + + # Massage the grad_inputs_dict to a form acceptable by + # autograd.Function. + validate_grad_inputs_dict(grad_inputs_dict, custom_op, args_info) + return grad_inputs_dict_to_flat_tuple(grad_inputs_dict, args_info) + + generated_cls = gen_autograd_function( + custom_op._opname + '_customop', forward, backward) + + flat_output = generated_cls.apply(*flat_args) + assert out_spec is not None + return pytree.tree_unflatten(list(flat_output), out_spec) + return apply + + +def gen_autograd_function(name, forward, backward): + generated_cls = type( + name, + (torch.autograd.Function,), + { + 'forward': staticmethod(forward), + 'backward': staticmethod(backward), + } + ) + return generated_cls + + +@functools.lru_cache +def namedtuple_args_cls(schema): + attribs = [arg.name for arg in schema.arguments.flat_all] + name = str(schema.name) + "_args" + # mypy doesn't support dynamic namedtuple name + tuple_cls = namedtuple(name, attribs) # type: ignore[misc] + return tuple_cls + + +def namedtuple_args(schema, args): + assert isinstance(args, tuple) + tuple_cls = namedtuple_args_cls(schema) + return tuple_cls(*args) + + +def validate_grad_inputs_dict(grad_inputs_dict, forward_op, args_info): + def error(what): + backward = forward_op._get_impl('backward') + raise RuntimeError( + f"In the backward function defined for {forward_op} at " + f"{backward.location} using the CustomOp API, {what}") + + if not isinstance(grad_inputs_dict, dict): + error(f"expected the output of the backward function to be a dict but " + f"got {type(grad_inputs_dict)}") + + expected_keys = {arg.name for arg in forward_op._schema.arguments.flat_all + if arg.type.is_tensor_like()} + actual_keys = grad_inputs_dict.keys() + if expected_keys != actual_keys: + error(f"expected the returned grad_input dict to have keys " + f"{expected_keys} but got {actual_keys}. The backward " + f"function must return a gradient (can be None) for each arg " + f"to the CustomOp that may be a Tensor or Sequence[Tensor]. " + f"Args declared to be non-Tensor-like types should not appear " + f"in the grad_input dict") + + for name, grad in grad_inputs_dict.items(): + arg_info = getattr(args_info, name) + + if isinstance(arg_info, list): + if not isinstance(grad, (tuple, list)): + error(f"for input '{name}' expected the grad_input dict to " + f"hold a list of gradients but got object of type " + f"{type(grad)}.") + if not len(grad) == len(arg_info): + error(f"for input '{name}' expected the grad_input dict to " + f"hold a list of {len(arg_info)} gradients but got " + f"{len(grad)}") + for idx, (g, info) in enumerate(zip(grad, arg_info)): + if g is None: + continue + if not isinstance(g, torch.Tensor): + error(f"for input '{name}' expected the grad_input dict to " + f"hold a list of None or Tensor gradients but got " + f"object of {type(g)} at index {idx}") + if info != torch.Tensor: + error(f"for input '{name}', got a Tensor as the gradient " + f"for the {idx}-th value but expected None because " + f"the {idx}-th value was not a Tensor (it was " + f"type {arg_info}") + continue + + if grad is None: + continue + if not isinstance(grad, torch.Tensor): + error(f"got object of type {type(grad)} as the gradient for input " + f"'{name}', " + f"but expected the gradient to be either None or a Tensor") + if arg_info != torch.Tensor: + error(f"got a Tensor as the gradient for input '{name}' but " + f"expected None as the gradient because input '{name}' " + f"was not a Tensor (it was type {arg_info}).") + +def grad_inputs_dict_to_flat_tuple(grad_inputs_dict, args_info): + result = [] + for name, arg_info in args_info._asdict().items(): + if name not in grad_inputs_dict: + result.append(pytree.tree_map(lambda x: None, arg_info)) + continue + result.append(grad_inputs_dict[name]) + return tuple(pytree.tree_flatten(result)[0]) + +# Saves "stuff" (a pytree) onto the ctx object. Use unpack_saved to unpack it. +# autograd.Function prefers that users use ctx.save_for_backward to +# save Tensors (to avoid reference cycles) and for non-Tensors to go onto the +# ctx object. +def save_pytree_for_backward(ctx, stuff): + flat_stuff, spec = pytree.tree_flatten(stuff) + num_elts = len(flat_stuff) + tensor_idxs = [idx for idx, thing in enumerate(flat_stuff) + if isinstance(thing, torch.Tensor)] + non_tensor_idxs = [idx for idx, thing in enumerate(flat_stuff) + if not isinstance(thing, torch.Tensor)] + tensors = [thing for thing in flat_stuff if isinstance(thing, torch.Tensor)] + non_tensors = [thing for thing in flat_stuff if not isinstance(thing, torch.Tensor)] + + ctx.spec = spec + ctx.num_elts = num_elts + ctx.save_for_backward(*tensors) + ctx.tensor_idxs = tensor_idxs + ctx.saved_non_tensors = non_tensors + ctx.non_tensor_idxs = non_tensor_idxs + + +# Inverse operation to save_pytree_for_backward +def unpack_saved(ctx): + flat_stuff = [None] * ctx.num_elts + for tensor, idx in zip(ctx.saved_tensors, ctx.tensor_idxs): + flat_stuff[idx] = tensor + for non_tensor, idx in zip(ctx.saved_non_tensors, ctx.non_tensor_idxs): + flat_stuff[idx] = non_tensor + stuff = pytree.tree_unflatten(flat_stuff, ctx.spec) + return stuff diff --git a/llava_next/lib/python3.10/site-packages/torch/_custom_op/functional.py b/llava_next/lib/python3.10/site-packages/torch/_custom_op/functional.py new file mode 100644 index 0000000000000000000000000000000000000000..c1ffd16e2ae0e81bbc48f0d3b6ea5092c9fc19ea --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/_custom_op/functional.py @@ -0,0 +1,173 @@ +import torch +from torch.library import Library +from torch._ops import OpOverload +from torchgen.model import FunctionSchema, OperatorName, SchemaKind, BaseTy, BaseType +from torch._C import _ExcludeDispatchKeyGuard, DispatchKeySet, DispatchKey +from .autograd import autograd_not_implemented +import torch.utils._pytree as pytree +import weakref + + +def register_functional_op( + lib: Library, + new_op_name: str, + mutable_op: OpOverload, +) -> None: + """Given a mutable operator, registers the functional variant. + + This API also correctly links the functional variant with the mutable + operator for the purposes of functionalization. + + All of the new registrations are performed on the ``lib`` passed in. + + Arguments: + lib (Library): Should be a torch.library.Library object that has + the same namespace as ``mutable_op``'s namespace. + lib will be used to register the new functional op as well + as a functionalization kernel for the ``mutable_op`` + If you don't have a library handy, use + ``torch.library.Library(ns, 'FRAGMENT')`` to construct one. + new_op_name (str): The name of the functional operator (without the + namespace). If no namespace, the new functional variant will be + accessible under ``torch.ops.{lib.ns}.new_op_name``. + mutable_op (OpOverload): The mutable custom operator. Note + that you may need to add a `.default` to it, like + `torch.ops.aten.abs_.default`. + + """ + validate(mutable_op) + schema = functional_schema(new_op_name, mutable_op) + lib.define(schema) + + functional_impl = construct_functional_impl(mutable_op) + lib.impl(new_op_name, functional_impl, 'CompositeExplicitAutograd') + + functional_op = getattr(getattr(torch.ops, lib.ns), new_op_name).default + + # There's no easy way for us to generate the autograd kernel, so we + # use autograd_not_implemented. Also, this makes it so that the user + # is unable to register an autograd formula themselves. This shouldn't + # be a problem if the user doesn't use the functional op direclty + # in their program, but we may need to revist this in the future. + lib.impl(new_op_name, autograd_not_implemented(functional_op), 'Autograd') + + f_kernel = construct_functionalization_kernel(weakref.proxy(mutable_op), functional_op) + + lib.impl(mutable_op, f_kernel, 'Functionalize') + + +def construct_functional_impl(mutable_op): + def functional_impl(*args): + # Strategy: + # - clone args that would have been mutated + # - run mutable_op + # - return the cloned args as additional outputs + new_args = [] + extra_rets = [] + for is_write, arg in zip(mutable_args(mutable_op), args): + if is_write: + cloned = arg.clone() + new_args.append(cloned) + extra_rets.append(cloned) + else: + new_args.append(arg) + result = mutable_op(*new_args) + if result is None: + return tuple(extra_rets) + if isinstance(result, tuple): + return (*result, *extra_rets) + return (result, *extra_rets) + return functional_impl + + +def construct_functionalization_kernel(mutable_op, functional_op): + def kernel(*args): + # There's nothing to be functionalized! + # We can still end up here because DispatchKey::Functionalize is a mode key + if pytree.tree_all_only(torch.Tensor, lambda x: not torch._is_functional_tensor(x), args): + with _ExcludeDispatchKeyGuard(DispatchKeySet(DispatchKey.Functionalize)): + return mutable_op(*args) + + # NB: This differs from the codegen -- codegen handles cases where there + # are mixed FunctionalTensorWrapper and non-FunctionalTensorWrapper. + # This only really matters for XLA (mixed CPU-XLA tensors) and + # running functionalization without the PT2 stack (which guarantees to us that + # all tensors are FunctionalTensorWrapper). + if not pytree.tree_all_only(torch.Tensor, torch._is_functional_tensor, args): + raise RuntimeError("{mutable_op}: expected all args to be FunctionalTensorWrapper") + + unwrapped_args = [] + for arg in args: + if isinstance(arg, torch.Tensor) and torch._is_functional_tensor(arg): + torch._sync(arg) + unwrapped = torch._from_functional_tensor(arg) + unwrapped_args.append(unwrapped) + else: + unwrapped_args.append(arg) + + with _ExcludeDispatchKeyGuard(DispatchKeySet(DispatchKey.Functionalize)): + output = functional_op(*unwrapped_args) + + num_actual_output = len(mutable_op._schema.returns) + actual_output = pytree.tree_map( + torch._to_functional_tensor, output[:num_actual_output]) + + new_values_to_propagate = output[num_actual_output:] + inputs_to_replace = [arg for is_write, arg in zip(mutable_args(mutable_op), args) + if is_write] + assert len(new_values_to_propagate) == len(inputs_to_replace) + for new_value, arg in zip(new_values_to_propagate, inputs_to_replace): + torch._C._propagate_xla_data(arg, new_value) + torch._C._replace_(arg, new_value) + torch._C._commit_update(arg) + torch._sync(arg) + + if len(actual_output) == 1: + return actual_output[0] + elif len(actual_output) == 0: + return None + return actual_output + + return kernel + + +def validate(mutable_op: OpOverload): + if not isinstance(mutable_op, OpOverload): + raise TypeError( + f"register_functional_op(mutable_op): expected mutable_op to be instance of " + f"OpOverload but got {type(mutable_op)}") + + # There are generally three types of "in-place" or "mutable" ops. + # Each of them have their own conventions: + # - inplace (first input modified in-place and returned as only output) + # - out= (some args modified in-place and returned as outputs) + # - mutable (some args modified in-place but none of those returned as outputs) + # In theory we can support all three, but we'll just support the last + # option right now for simplicity. + schema = FunctionSchema.parse(str(mutable_op._schema)) + if not schema.kind() == SchemaKind.mutable: + raise RuntimeError("Expected op to be mutable (as opposed to functional, inplace or out)") + for ret in schema.returns: + # construct_functionalization_kernel assumes this for simplicity + if ret.annotation is not None: + raise NotImplementedError( + "NYI: register_functional_op(op) where op returns a mutated or aliased value. " + "Please file an issue (and as a workaround, modify your operator to " + "not return the mutated value or aliases)") + for arg in schema.arguments.flat_all: + # construct_functionalization_kernel assumes this for simplicity + if arg.type.is_tensor_like() and arg.type != BaseType(BaseTy.Tensor): + raise NotImplementedError( + "NYI: register_functional_op(op) where op accepts Optional or List of tensors." + "Please file an issue.") + + +def functional_schema(new_op_name, op: OpOverload): + schema = FunctionSchema.parse(str(op._schema)) + schema = schema.signature().with_name(OperatorName.parse(new_op_name)) + return str(schema) + + +def mutable_args(op: OpOverload): + return tuple(False if arg.alias_info is None else arg.alias_info.is_write + for arg in op._schema.arguments) diff --git a/llava_next/lib/python3.10/site-packages/torch/_custom_op/impl.py b/llava_next/lib/python3.10/site-packages/torch/_custom_op/impl.py new file mode 100644 index 0000000000000000000000000000000000000000..b3c5bd3cf8fd2eb5faffba73939a69af9b46208b --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/_custom_op/impl.py @@ -0,0 +1,1096 @@ +import contextlib +import dataclasses +import functools +import inspect +import sys +import typing +import weakref + +from torchgen.model import FunctionSchema, OperatorName, SchemaKind, BaseType, ListType, BaseTy + +import torch +import torch._C as _C +import torch.library as library + +from .autograd import autograd_kernel_indirection, construct_autograd_kernel + +""" +For a detailed guide on custom ops, please see +https://docs.google.com/document/d/1aGWtgxV3HppuxQAdddyPrs74_aEntpkYt9MalnCKnhk + +This file includes pieces of the implementation of our custom operator API. +""" + +__all__ = ["custom_op", "CustomOp", "get_ctx", "AbstractImplCtx"] + + +SUPPORTED_DEVICE_TYPE_TO_KEY = { + "cpu": "CPU", + "cuda": "CUDA", +} + +# We will not let users register CustomOps with anything that could look like +# PyTorch internals to avoid confusion. +RESERVED_NS = { + "prim", + "prims", + "aten", + "at", + "torch", + "pytorch", +} + + +def custom_op( + qualname: str, manual_schema: typing.Optional[str] = None +) -> typing.Callable: + r"""Creates a new CustomOp object. + + WARNING: if you're a user, please do not use this directly + (instead use the torch._custom_ops APIs). + Also please see the following for a detailed guide on custom ops. + https://docs.google.com/document/d/1aGWtgxV3HppuxQAdddyPrs74_aEntpkYt9MalnCKnhk + + In PyTorch, defining an op (short for "operator") is a two step-process: + - we need to define (create) the op + - we need to implement behavior for how the operator interacts with + various PyTorch subsystems, like CPU/CUDA Tensors, Autograd, etc. + + This entrypoint defines the CustomOp object (the first step); + you must then perform the second step by calling various methods on + the CustomOp object. + + This API is used as a decorator (see examples). + + Arguments: + qualname (str): Should be a string that looks like + "namespace::operator_name". Operators in PyTorch need a namespace to + avoid name collisions; a given operator may only be created once. + If you are writing a Python library, we recommend the namespace to + be the name of your top-level module. The operator_name must be + the same as the name of the function you pass to custom_op + (see examples). + manual_schema (Optional[str]): Each PyTorch operator needs a schema that + tells PyTorch the types of the inputs/outputs. If None (default), + we will infer the schema from the type annotations on the function + (see examples). Otherwise, if you don't want to use type annotations, + you may provide us the schema string. + + Example:: + >>> import numpy as np + >>> from torch import Tensor + >>> + >>> # Step 1: define the CustomOp. + >>> # We need to provide the decorator a "prototype function" + >>> # (a function with Python ellipses as the body). + >>> @custom_op("mylibrary::numpy_sin") + >>> def numpy_sin(x: Tensor) -> Tensor: + >>> ... + >>> + >>> # numpy_sin is now an instance of class CustomOp + >>> print(type(numpy_sin)) + >>> + >>> # Step 2: Register an implementation for various PyTorch subsystems + >>> + >>> # Register an implementation for CPU tensors + >>> @numpy_sin.impl('cpu'): + >>> def numpy_sin_impl_cpu(x): + >>> return torch.from_numpy(np.sin(x.numpy())) + >>> + >>> # Register an implementation for CUDA tensors + >>> @numpy_sin.impl('cuda'): + >>> def numpy_sin_impl_cuda(x): + >>> return torch.from_numpy(np.sin(x.cpu().numpy())).to(x.device) + >>> + >>> x = torch.randn(3) + >>> numpy_sin(x) # calls numpy_sin_impl_cpu + >>> + >>> x_cuda = x.cuda() + >>> numpy_sin(x) # calls numpy_sin_impl_cuda + + """ + + def inner(func): + if not inspect.isfunction(func): + raise ValueError( + f"custom_op(...)(func): Expected `func` to be a Python " + f"function, got: {type(func)}" + ) + + ns, name = parse_qualname(qualname) + validate_namespace(ns) + if func.__name__ != name: + raise ValueError( + f"custom_op(qualname='{qualname}', ...)(func): expected `func` " + f"to have name '{name}' but got '{func.__name__}'. " + f"Please either change the name of `func` or the qualname that " + f"is passed to `custom_op`" + ) + + schema = infer_schema(func) if manual_schema is None else manual_schema + schema_str = f"{name}{schema}" + function_schema = FunctionSchema.parse(schema_str) + validate_schema(function_schema) + if manual_schema is not None: + validate_function_matches_schema(function_schema, func) + + lib = library.Library(ns, "FRAGMENT") + lib.define(schema_str) + ophandle = find_ophandle_or_throw(ns, function_schema.name) + result = CustomOp(lib, ns, function_schema, name, ophandle, _private_access=True) + + result.__name__ = func.__name__ + result.__module__ = func.__module__ + result.__doc__ = func.__doc__ + + library.impl(lib, result._opname, "Autograd")( + autograd_kernel_indirection(weakref.proxy(result)) + ) + + torch._C._dispatch_set_report_error_callback( + ophandle, functools.partial(report_error_callback, weakref.proxy(result)) + ) + + return result + + return inner + + +# Global dictionary holding references to all CustomOp objects +# Yes, it keeps all CustomOps alive (see NOTE [CustomOp lifetime]) +# Used to query the CustomOp associated with a specific C++ dispatcher operator. +# An example usage is FakeTensor: FakeTensor checks if a specific operator +# has an implementation registered via the CustomOp API. +# Indexed by qualname (e.g. aten::foo) +global_registry: typing.Dict[str, "CustomOp"] = {} + + +class CustomOp: + r"""Class for custom operators in PyTorch. + + Use the CustomOp API to create user-defined custom operators that behave + just like regular PyTorch operators (e.g. torch.sin, torch.mm) when it + comes to various PyTorch subsystems (like torch.compile). + + To construct a `CustomOp`, use `custom_op`. + """ + + def __init__(self, lib, cpp_ns, schema, operator_name, ophandle, *, _private_access=False): + super().__init__() + if not _private_access: + raise RuntimeError( + "The CustomOp constructor is private and we do not guarantee " + "BC for it. Please use custom_op(...) to create a CustomOp object" + ) + name = f"{cpp_ns}::{operator_name}" + self._schema = schema + self._cpp_ns = cpp_ns + self._lib: library.Library = lib + self._ophandle: _C._DispatchOperatorHandle = ophandle + # Has the name of the op, e.g. "foo". We cache here for convenience. + self._opname: str = operator_name + # this is _opname but with namespace. e.g. "custom::foo" + self._qualname: str = name + self.__name__ = None # mypy requires this + # NB: Some of these impls are registered as kernels to DispatchKeys. + # Modifying the _impls dict directly won't do anything in that case. + self._impls: typing.Dict[str, typing.Optional[FuncAndLocation]] = {} + # See NOTE [CustomOp autograd kernel indirection] + self._registered_autograd_kernel_indirection = False + + global_registry[self._qualname] = self + + def _register_autograd_kernel_indirection(self): + assert not self._registered_autograd_kernel_indirection + self._lib.impl(self._opname, autograd_kernel_indirection(weakref.proxy(self)), "Autograd") + self._registered_autograd_kernel_indirection = True + + # Records the impl and the source location in self._impls + # Note that this doesn't cause torch.library to use the impl, that + # needs to be done in a separate self._lib.impl call. + def _register_impl(self, kind, func, stacklevel=2): + if self._has_impl(kind): + func_and_location = self._impls[kind] + assert func_and_location is not None # Pacify mypy + location = func_and_location.location + raise RuntimeError( + f"Attempting to register a {kind} impl for operator {self._qualname} " + f"that already has a {kind} impl registered from Python at " + f"{location}. This is not supported." + ) + frame = inspect.getframeinfo(sys._getframe(stacklevel)) + location = f"{frame.filename}:{frame.lineno}" + self._impls[kind] = FuncAndLocation(func, location) + + def _get_impl(self, kind): + return self._impls[kind] + + def _has_impl(self, kind): + return kind in self._impls + + def _destroy(self): + # NOTE: [CustomOp lifetime] + # A CustomOp, once created, lives forever. The mechanism is that the + # global registry holds a reference to it. However, to make testing + # easier, we want to be able to destroy CustomOp objects. + # CustomOp._destroy does the job, though it leaves the CustomOp + # in a garbage state. + del self._lib + + opnamespace = getattr(torch.ops, self._cpp_ns) + if hasattr(opnamespace, self._opname): + delattr(opnamespace, self._opname) + + del global_registry[self._qualname] + + def __repr__(self): + return f'' + + def __call__(self, *args, **kwargs): + # Bypass torch.ops.* and directly do OperatorHandle::callBoxed. + # Using torch.ops.* is a bit of a pain (it can be slow and it has lifetime + # issues from caching operators that make testing CustomOp difficult). + result = _C._dispatch_call_boxed(self._ophandle, *args, **kwargs) + return result + + def impl( + self, device_types: typing.Union[str, typing.Iterable[str]], _stacklevel=2, + ) -> typing.Callable: + r"""Register an implementation for a device type for this CustomOp object. + + WARNING: if you're a user, please do not use this directly + (instead use the torch._custom_ops APIs). + Also please see the following for a detailed guide on custom ops. + https://docs.google.com/document/d/1aGWtgxV3HppuxQAdddyPrs74_aEntpkYt9MalnCKnhk + + If the CustomOp is passed multiple Tensor inputs with different device + types, it will dispatch to the registered implementation for the highest + priority device type among those present. + The supported device types, in order of priority, are {'cuda', 'cpu'}. + + This API is used as a decorator (see examples). + + Arguments: + device_types (str or Iterable[str]): the device type(s) to register the function for. + + Examples:: + >>> import numpy as np + >>> from torch import Tensor + >>> + >>> @custom_op("mylibrary::numpy_sin") + >>> def numpy_sin(x: Tensor) -> Tensor: + >>> ... + >>> + >>> # Register an implementation for CPU Tensors + >>> @numpy_sin.impl('cpu'): + >>> def numpy_sin_impl_cpu(x): + >>> return torch.from_numpy(np.sin(x.numpy())) + >>> + >>> # Register an implementation for CUDA Tensors + >>> @numpy_sin.impl('cuda'): + >>> def numpy_sin_impl_cuda(x): + >>> return torch.from_numpy(np.sin(x.cpu().numpy())).to(x.device) + >>> + >>> x = torch.randn(3) + >>> numpy_sin(x) # calls numpy_sin_impl_cpu + >>> + >>> x_cuda = x.cuda() + >>> numpy_sin(x) # calls numpy_sin_impl_cuda + + """ + if isinstance(device_types, str): + device_types = [device_types] + for device_type in device_types: + validate_device_type(device_type) + + def inner(f): + for device_type in set(device_types): + self._check_doesnt_have_library_impl(device_type) + self._register_impl(device_type, f, stacklevel=_stacklevel) + dispatch_key = SUPPORTED_DEVICE_TYPE_TO_KEY[device_type] + library.impl(self._lib, self._opname, dispatch_key)(f) + return f + + return inner + + def _check_doesnt_have_library_impl(self, device_type): + if self._has_impl(device_type): + return + key = SUPPORTED_DEVICE_TYPE_TO_KEY[device_type] + if _C._dispatch_has_computed_kernel_for_dispatch_key(self._qualname, key): + raise RuntimeError( + f"impl(..., device_types={device_type}): the operator {self._qualname} " + f"already has an implementation for this device type via a " + f"pre-existing torch.library or TORCH_LIBRARY registration.") + + def impl_factory(self) -> typing.Callable: + r"""Register an implementation for a factory function.""" + + def inner(f): + self._register_impl("factory", f) + library.impl(self._lib, self._opname, "BackendSelect")(f) + return f + + return inner + + def impl_abstract(self, _stacklevel=2) -> typing.Callable: + r"""Register an abstract implementation for this operator. + + WARNING: please do not use this directly (and instead use the torch._custom_ops + APIs). Also please see the following for a detailed guide on custom ops. + https://docs.google.com/document/d/1aGWtgxV3HppuxQAdddyPrs74_aEntpkYt9MalnCKnhk + + An "abstract implementation" specifies the behavior of this operator on + Tensors that carry no data. Given some input Tensors with certain properties + (sizes/strides/storage_offset/device), it specifies what the properties of + the output Tensors are. + + The abstract implementation has the same signature as the operator. + It is run for both FakeTensors and meta tensors. To write an abstract + implementation, assume that all Tensor inputs to the operator are + regular CPU/CUDA/Meta tensors, but they do not have storage, and + you are trying to return regular CPU/CUDA/Meta tensor(s) as output. + The abstract implementation must consist of only PyTorch operations + (and may not directly access the storage or data of any input or + intermediate Tensors). + + This API is used as a decorator (see examples). + + Examples:: + >>> import numpy as np + >>> from torch import Tensor + >>> + >>> # Example 1: an operator without data-dependent output shape + >>> @custom_op('mylibrary::custom_linear') + >>> def custom_linear(x: Tensor, weight: Tensor, bias: Tensor): + >>> ... + >>> + >>> @custom_linear.impl_abstract(): + >>> def custom_linear_abstract(x, weight): + >>> assert x.dim() == 2 + >>> assert weight.dim() == 2 + >>> assert bias.dim() == 1 + >>> assert x.shape[1] == weight.shape[1] + >>> assert weight.shape[0] == bias.shape[0] + >>> assert x.device == weight.device + >>> + >>> return (x @ weight.t()) + bias + >>> + >>> # Example 2: an operator with data-dependent output shape + >>> @custom_op('mylibrary::custom_nonzero') + >>> def custom_nonzero(x: Tensor) -> Tensor: + >>> ... + >>> + >>> @custom_nonzero.impl_abstract(): + >>> def custom_nonzero_abstract(x): + >>> # Number of nonzero-elements is data-dependent. + >>> # Since we cannot peek at the data in an abstract impl, + >>> # we use the ctx object to construct a new symint that + >>> # represents the data-dependent size. + >>> ctx = torch._custom_op.get_ctx() + >>> nnz = ctx.create_unbacked_symint() + >>> shape = [x.dim(), nnz] + >>> result = x.new_empty(shape, dtype=torch.long) + >>> return result + >>> + >>> @numpy_nonzero.impl(['cpu', 'cuda']) + >>> def custom_nonzero_impl(x): + >>> x_np = to_numpy(x) + >>> res = np.stack(np.nonzero(x_np), axis=1) + >>> # unbacked symbolic ints in PyTorch must be >= 2, so we + >>> # constrain the range to at least 2 + >>> if res.shape[0] <= 1: + >>> raise RuntimeError("not supported") + >>> return torch.tensor(res, device=x.device) + + """ + + def inner(f): + frame = inspect.stack()[1] + self._check_doesnt_have_library_meta_impl() + self._register_impl("abstract", f, stacklevel=_stacklevel) + location = self._get_impl("abstract").location + + qualname = self._qualname + + # Handle DispatchKey.Meta registration + @functools.wraps(f) + def f_with_ctx(*args, **kwargs): + def error_on_ctx(): + raise RuntimeError( + f"Attempted to call get_ctx() for the meta implementation " + f"for {qualname}." + f"You have presumably called get_ctx() because the operator " + f"has a data-dependent output shape; if so, there is no " + f"such meta implementation and this error is the correct " + f"behavior. Otherwise, please remove the call to get_ctx() " + f"in the implementation registered with impl_abstract " + f"at {location}" + ) + + with set_ctx_getter(error_on_ctx): + return f(*args, **kwargs) + + self._lib.impl(self._opname, f_with_ctx, "Meta") + return f + + return inner + + def _check_can_register_backward(self): + def error(detail): + raise RuntimeError( + f"Cannot use torch._custom_ops APIs to register backward " + f"formula for {detail}. Got operator " + f"{self._qualname} with schema: {schema}" + ) + + schema = self._schema + if schema.kind() != SchemaKind.functional: + error("non-functional operator") + + rets = schema.returns + if not schema.returns: + error("operator with no returns") + + assert len(rets) > 0 + is_non_mutating_view = any( + r.annotation is not None and not r.annotation.is_write for r in rets + ) + if is_non_mutating_view: + error("operator that returns views") + + # We make assumptions about the schema's return types. + allowed_return_types = { + BaseType(BaseTy.int): "int", + BaseType(BaseTy.SymInt): "SymInt", + BaseType(BaseTy.bool): "bool", + BaseType(BaseTy.float): "float", + BaseType(BaseTy.Tensor): "Tensor", + ListType(BaseType(BaseTy.Tensor), None): "List[Tensor]", + } + for ret in schema.returns: + if ret.type in allowed_return_types: + continue + error(f"operator with return not in {list(allowed_return_types.values())} (got {ret.type})") + + def _check_doesnt_have_library_autograd_impl(self): + if self._registered_autograd_kernel_indirection: + return + + if _C._dispatch_has_kernel_for_dispatch_key(self._qualname, "CompositeImplicitAutograd"): + raise RuntimeError( + f"impl_backward/impl_save_for_backward: the operator {self._qualname} " + f"already has an implementation for this device type via a " + f"pre-existing registration to DispatchKey::CompositeImplicitAutograd." + f"CompositeImplicitAutograd operators do not need an autograd formula; " + f"instead, the operator will decompose into its constituents and those " + f"can have autograd formulas defined on them.") + + # We can improve this by adding "all Autograd keys", but + # realistically people will just be using this API for CPU/CUDA for now. + for key in ["Autograd", "AutogradCPU", "AutogradCUDA"]: + if _C._dispatch_has_kernel_for_dispatch_key(self._qualname, key): + raise RuntimeError( + f"impl_backward/impl_save_for_backward: " + f"the operator {self._qualname} already has an Autograd kernel " + f"registered to DispatchKey::{key} vi a pre-existing " + f"torch.library or TORCH_LIBRARY registration. Please either " + f"remove those registrations or don't use the torch._custom_ops APIs") + + def _check_doesnt_have_library_meta_impl(self): + if self._has_impl("abstract"): + return + + # If the user's operator is CompositeExplicitAutograd, + # allow them to impl_abstract. This is being pragmatic + # (existing custom ops may have CompositeExplicitAutograd + # registration that don't work with Meta kernels, so this + # gives them an escape hatch). + if ( + _C._dispatch_has_kernel_for_dispatch_key(self._qualname, "CompositeExplicitAutograd") + and not _C._dispatch_has_kernel_for_dispatch_key(self._qualname, "Meta") + ): + return + + # Otherwise, if the user's already has a Meta kernel or their + # op is CompositeImplicitAutograd or some other alias dispatch key, + # raise. + + # Special case for CompositeImplicitAutograd + if _C._dispatch_has_kernel_for_dispatch_key(self._qualname, "CompositeImplicitAutograd"): + raise RuntimeError( + f"impl_abstract(...): the operator {self._qualname} " + f"already has an implementation for this device type via a " + f"pre-existing registration to DispatchKey::CompositeImplicitAutograd." + f"CompositeImplicitAutograd operators do not need an abstract impl; " + f"instead, the operator will decompose into its constituents and those " + f"can have abstract impls defined on them.") + + if _C._dispatch_has_computed_kernel_for_dispatch_key(self._qualname, "Meta"): + raise RuntimeError( + f"impl_abstract(...): the operator {self._qualname} " + f"already has an DispatchKey::Meta implementation via a " + f"pre-existing torch.library or TORCH_LIBRARY registration. " + f"Please either remove that registration or don't call impl_abstract.") + + # NOTE ["backward", "save_for_backward", and "autograd"] + # As a part of the explicit autograd API, a user must provide us + # a "save_for_backward" function and a "backward" function. + # When both of these have been provided, then we automatically + # construct the "autograd" kernel. + def _register_autograd_kernel(self): + assert self._has_impl("backward") + assert self._has_impl("save_for_backward") + kernel = construct_autograd_kernel( + self._schema, + self._output_differentiability, + self, + get_op(self._qualname), + self._get_impl("save_for_backward").func, + self._get_impl("backward").func) + self._register_impl("autograd", kernel) + + def impl_save_for_backward(self, _stacklevel=2): + r"""Register a function that tells us what to save for backward. + + Please see impl_backward for more details. + """ + def inner(f): + self._check_can_register_backward() + self._check_doesnt_have_library_autograd_impl() + if not self._registered_autograd_kernel_indirection: + self._register_autograd_kernel_indirection() + self._register_impl("save_for_backward", f, stacklevel=_stacklevel) + if self._has_impl("backward"): + self._register_autograd_kernel() + return inner + + def impl_backward(self, output_differentiability=None, _stacklevel=2): + r"""Registers a backward formula. + + WARNING: if you're a user, please do not use this directly + (instead use the torch._custom_ops APIs). + Also please see the following for a detailed guide on custom ops. + https://docs.google.com/document/d/1aGWtgxV3HppuxQAdddyPrs74_aEntpkYt9MalnCKnhk + + In order for the CustomOp to work with autograd, you need to register + a backward formula. There are two pieces to this: + 1. You must give us a function to specify what to save for backward. + Call this the "save for backward" function. + 2. You must give us a function that computes gradients. Call this the + "backward" function. + + Use `impl_save_for_backward` to define a "save for backward" function + that specifies what gets saved for backward. The function should accept + two arguments ``(inputs, output)`` and return the quantities to be saved + for backward. + + During runtime, when you call the CustomOp, PyTorch will invoke the + "save for backward" function with the inputs and output of the CustomOp. + + Use `impl_backward` to define the "backward" function. The backward + function must accept ``(ctx, saved, *grads)``: + - ``ctx`` is a context object where we may provide information + - ``saved`` is exactly what gets returned from the "save for backward" + function + - ``grads`` is one or more gradients. The number of gradients matches + the number of outputs of the CustomOp. + + The backward function must return a dict that maps the name of + an input to the CustomOp to its corresponding gradient. All inputs that + were declared to be Tensors in the CustomOp definition must be accounted + for in the dict. The gradient may be a Tensor or None. + + """ + if output_differentiability is not None: + def yell(): + raise RuntimeError( + f"impl_backward(output_differentiability): expected " + f"output_differentiability to be a list of bools with " + f"length equal to the number of outputs of this CustomOp " + f"got: {output_differentiability}") + + if not isinstance(output_differentiability, list): + yell() + for diff in output_differentiability: + if not isinstance(diff, bool): + yell() + if len(self._schema.returns) != len(output_differentiability): + yell() + + def inner(f): + self._check_can_register_backward() + self._check_doesnt_have_library_autograd_impl() + if not self._registered_autograd_kernel_indirection: + self._register_autograd_kernel_indirection() + self._register_impl("backward", f, stacklevel=_stacklevel) + self._output_differentiability = output_differentiability + if self._has_impl("save_for_backward"): + self._register_autograd_kernel() + return inner + + +@dataclasses.dataclass +class FuncAndLocation: + func: typing.Callable + location: str + + +def find_ophandle_or_throw(cpp_ns: str, operator_name: OperatorName): + overload_name = ( + "" if operator_name.overload_name is None else operator_name.overload_name + ) + return _C._dispatch_find_schema_or_throw( + f"{cpp_ns}::{str(operator_name.name)}", overload_name + ) + + +def validate_namespace(ns: str) -> None: + if "." in ns: + raise ValueError( + f'custom_op(..., ns="{ns}"): expected ns to not contain any . (and be a ' + f"valid variable name)" + ) + if ns in RESERVED_NS: + raise ValueError( + f"custom_op(..., ns='{ns}'): '{ns}' is a reserved namespace, " + f"please choose something else. " + ) + +def validate_schema(schema: FunctionSchema) -> None: + # Coming in the future. Requires us to have correct logic for + # the ADInplaceOrView key + if schema.kind() != SchemaKind.functional: + raise ValueError( + f"custom_op does not support non-functional function schema. Got: {schema}" + ) + + rets = schema.returns + is_non_mutating_view = len(rets) > 0 and any( + r.annotation is not None and not r.annotation.is_write for r in rets + ) + if is_non_mutating_view: + raise ValueError(f"custom_op does not support view functions. Got: {schema}") + + # Just seems weird so banning for now + if not schema.returns: + raise ValueError( + f"custom_op does not support function schema with no outputs. Got: {schema}" + ) + + # For simplicity: don't allow self arguments + if schema.arguments.self_arg is not None: + raise ValueError( + f"custom_op does not support arguments named 'self'. Please " + f"rename your argument. Got: {schema}" + ) + + +def parse_qualname(qualname: str) -> typing.Tuple[str, str]: + names = qualname.split("::", 1) + if len(names) != 2: + raise ValueError(f"Expected there to be a namespace in {qualname}, i.e. The " + f"operator name should look something like ns::foo") + if '.' in names[1]: + raise ValueError(f"The torch.custom_ops APIs do not handle overloads, " + f"i.e. operator names with '.' in them. " + f"Please name your operator something like ns::foo. " + f"Got: {qualname}") + return names[0], names[1] + + +def validate_device_type(device_type: str) -> None: + if device_type not in SUPPORTED_DEVICE_TYPE_TO_KEY: + raise ValueError( + f"CustomOp.impl(device_types=[{device_type}, ...]): we only support device_type " + f"in {SUPPORTED_DEVICE_TYPE_TO_KEY.keys()}." + ) + + +def supported_param(param: inspect.Parameter) -> bool: + return param.kind in ( + inspect.Parameter.POSITIONAL_OR_KEYWORD, + inspect.Parameter.KEYWORD_ONLY, + ) + + +def validate_function_matches_schema( + schema: FunctionSchema, func: typing.Callable +) -> None: + sig = inspect.signature(func) + + if not all(supported_param(p) for _, p in sig.parameters.items()): + raise ValueError( + f"custom_op(..., manual_schema)(func): positional-only args, " + f"varargs, and kwargs are not supported. Please rewrite `func` " + f"to not have them. Got `func` with signature: {sig}" + ) + + if ( + any( + p.annotation is not inspect.Parameter.empty + for _, p in sig.parameters.items() + ) + or sig.return_annotation is not inspect.Signature.empty + ): + raise ValueError( + f"custom_op(..., manual_schema)(func): When passing in a manual " + f"schema, we expect `func` to have no type annotations to avoid " + f"ambiguity. Got `func` with signature: {sig}" + ) + + positional = [ + (name, param) + for name, param in sig.parameters.items() + if param.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD + ] + kwargonly = [ + (name, param) + for name, param in sig.parameters.items() + if param.kind == inspect.Parameter.KEYWORD_ONLY + ] + + def error(): + raise ValueError( + f"custom_op(..., manual_schema)(func): When passing in a manual " + f"schema, we expect `func`'s signature to match `manual_schema` " + f"(aside from type annotations). " + f"func's signature: {sig}, manual_schema: {schema}" + ) + + def error_default_args(): + raise ValueError( + f"custom_op(..., manual_schema)(func): " + f"neither func nor manual_schema should have default " + f"arguments. Got " + f"func's signature: {sig}, manual_schema: {schema}" + ) + + def compare(sig_args, schema_args): + if len(sig_args) != len(schema_args): + error() + for (name, param), arg in zip(sig_args, schema_args): + if name != arg.name: + error() + if param.default is not inspect.Parameter.empty or arg.default is not None: + error_default_args() + + compare(positional, schema.arguments.flat_positional) + compare(kwargonly, schema.arguments.flat_kwarg_only) + + +def get_none(): + return None + + +global_ctx_getter: typing.Callable = get_none + + +# NOTE [ctx inside the fake implementation] +# If a user has an operator with data-dependent output shape, then when writing +# a fake implementation they must query the current ctx and use methods on the +# ctx to construct a new unbacked symint. +# +# This is done via us setting the global_ctx_getter function every time a fake +# implementation is invoked. +def get_ctx() -> "AbstractImplCtx": + """get_ctx() returns the current AbstractImplCtx object. + + Calling ``get_ctx()`` is only valid inside of an abstract implementation. + """ + return global_ctx_getter() + + +@contextlib.contextmanager +def set_ctx_getter(ctx_getter): + global global_ctx_getter + prev = global_ctx_getter + try: + global_ctx_getter = ctx_getter + yield + finally: + global_ctx_getter = prev + + +class AbstractImplCtx: + """ + Context object for writing abstract implementations for custom operators. + """ + + def __init__(self, _shape_env, _op): + self._shape_env = _shape_env + self._op = _op + + def create_unbacked_symint(self, *, min=2, max=None) -> torch.SymInt: + """Constructs a new symint (symbolic int) representing a data-dependent value. + + This is useful for writing the abstract implementation (which is necessary + for torch.compile) for a CustomOp where an output Tensor has a size + that depends on the data of the input Tensors. + + Args: + min (int): A statically known inclusive lower bound for this symint. + min must be at least 2 due to implementation details of + torch.compile. Default: 2. + max (Optional[int]): A statically known inclusive upper bound for this + symint. Default: None + + .. warning: + + It is important that the ``min`` and ``max`` (if not None) values are set + correctly, otherwise, there will be undefined behavior under + torch.compile. The default value of ``min`` is 2 due to torch.compile + specializing on 0/1 sizes. + + You must also verify that your implementation on concrete Tensors + (e.g. CPU/CUDA) only returns Tensors where the size that corresponds + to the symint also has respects these constraint. + The easiest way to do this is to add an assertion in the CPU/CUDA/etc + implementation that the size follows these bounds. + + Example:: + + >>> # an operator with data-dependent output shape + >>> @custom_op("mylibrary::custom_nonzero") + >>> def custom_nonzero(x: Tensor) -> Tensor: + >>> ... + >>> + >>> @custom_nonzero.impl_abstract(): + >>> def custom_nonzero_abstract(x): + >>> # Number of nonzero-elements is data-dependent + >>> ctx = torch._custom_op.get_ctx() + >>> nnz = ctx.create_unbacked_symint() + >>> shape = [x.dim(), nnz] + >>> result = x.new_empty(shape, dtype=torch.long) + >>> return result + >>> + >>> @numpy_nonzero.impl(['cpu', 'cuda']) + >>> def custom_nonzero_impl(x): + >>> x_np = to_numpy(x) + >>> res = np.stack(np.nonzero(x_np), axis=1) + >>> # the size associated with ctx.create_unbacked_symint() + >>> # must be constrained in the same way, so we add an assertion here. + >>> if res.shape[0] < 2 or res.shape[0] > x.numel(): + >>> raise RuntimeError("not supported") + >>> return torch.tensor(res, device=x.device) + + """ + if ( + self._shape_env is None + or not self._shape_env.allow_dynamic_output_shape_ops + ): + raise torch._subclasses.fake_tensor.DynamicOutputShapeException(self._op) + + if isinstance(min, torch.SymInt) or isinstance(max, torch.SymInt): + raise ValueError( + f"ctx.create_unbacked_symint(min={min}, max={max}): expected " + f"min and max to be statically known ints but got SymInt. " + f"This is not supported." + ) + + if min < 2: + raise ValueError( + f"ctx.create_unbacked_symint(min={min}, ...): expected min to be " + f"greater than or equal to 2. PyTorch only supports new " + f"data-dependent sizes of >= 2" + ) + + result = self._shape_env.create_unbacked_symint() + torch.fx.experimental.symbolic_shapes.constrain_range(result, min=2, max=max) + return result + + +def infer_schema(prototype_function: typing.Callable) -> str: + sig = inspect.signature(prototype_function) + + def error_fn(what): + raise ValueError( + f"custom_op(...)(func): {what} " f"Got func with signature {sig})" + ) + + params = [ + parse_param(name, param, error_fn) for name, param in sig.parameters.items() + ] + ret = parse_return(sig.return_annotation, error_fn) + return f"({', '.join(params)}) -> {ret}" + + +def parse_param(name, param, error_fn): + if not supported_param(param): + error_fn("We do not support positional-only args, varargs, or varkwargs.") + + if param.annotation is inspect.Parameter.empty: + error_fn(f"Parameter {name} must have a type annotation.") + + if param.annotation not in SUPPORTED_PARAM_TYPES.keys(): + error_fn( + f"Parameter {name} has unsupported type {param.annotation}. " + f"The valid types are: {SUPPORTED_PARAM_TYPES.keys()}." + ) + + if param.default is not inspect.Parameter.empty: + error_fn( + f"Parameter {name} has a default value; this is not supported. " + f"If you want to use default values then create a function with " + f"default values that calls the CustomOp" + ) + + return f"{SUPPORTED_PARAM_TYPES[param.annotation]} {name}" + + +def derived_types( + base_type, cpp_type, list_base, optional_base_list, optional_list_base +): + result = [ + (base_type, cpp_type), + (typing.Optional[base_type], f"{cpp_type}?"), + ] + if list_base: + result.append((typing.Sequence[base_type], f"{cpp_type}[]")) # type: ignore[valid-type] + if optional_base_list: + result.append((typing.Sequence[typing.Optional[base_type]], f"{cpp_type}?[]")) # type: ignore[valid-type] + if optional_list_base: + result.append((typing.Optional[typing.Sequence[base_type]], f"{cpp_type}[]?")) # type: ignore[valid-type] + return result + + +def get_supported_param_types(): + data = [ + # (python type, schema type, type[] variant, type?[] variant, type[]? variant + (torch.Tensor, "Tensor", True, True, False), + (int, "SymInt", True, False, True), + (float, "float", True, False, True), + (bool, "bool", True, False, True), + (str, "str", False, False, False), + (torch.types.Number, "Scalar", True, False, False), + (torch.dtype, "ScalarType", False, False, False), + (torch.device, "Device", False, False, False), + ] + result = [] + for line in data: + result.extend(derived_types(*line)) + return dict(result) + + +SUPPORTED_RETURN_TYPES = { + torch.Tensor: "Tensor", + typing.List[torch.Tensor]: "Tensor[]", + int: "SymInt", + float: "float", + bool: "bool", + torch.types.Number: "Scalar", +} + + +def parse_return(annotation, error_fn): + origin = typing.get_origin(annotation) + if origin is not tuple: + if annotation not in SUPPORTED_RETURN_TYPES.keys(): + error_fn( + f"Return has unsupported type {annotation}. " + f"The valid types are: {SUPPORTED_RETURN_TYPES}." + ) + return SUPPORTED_RETURN_TYPES[annotation] + + args = typing.get_args(annotation) + for arg in args: + if arg not in SUPPORTED_RETURN_TYPES: + error_fn( + f"Return has unsupported type {annotation}. " + f"The valid types are: {SUPPORTED_RETURN_TYPES}." + ) + + return "(" + ", ".join([SUPPORTED_RETURN_TYPES[arg] for arg in args]) + ")" + + +SUPPORTED_PARAM_TYPES = get_supported_param_types() + + +def report_error_callback(custom_op: typing.Any, key: str) -> None: + if key == "Undefined": + raise NotImplementedError( + f"{custom_op}: There were no Tensor inputs to this operator " + f"(e.g. you passed an empty list of Tensors). If your operator is a " + f"factory function (that is, it takes no Tensors and constructs " + f"a new one), then please use CustomOp.impl_factory to register " + f"an implementation for it" + ) + if key == "Meta": + raise NotImplementedError( + f"{custom_op}: when running with device='Meta' tensors: there is no " + f"abstract impl registered for this CustomOp. Please register one via " + f"CustomOp.impl_abstract to get this CustomOp to work with Meta tensors" + ) + if key in ("CPU", "CUDA"): + device = key.lower() + raise NotImplementedError( + f"{custom_op}: when running with device='{device}' tensors: there is no " + f"{device} impl registered for this CustomOp. Please register one via " + f"CustomOp.impl(device_type='{device}')" + ) + raise NotImplementedError( + f"{custom_op}: No implementation for dispatch key {key}. It is likely " + f"that we have not added this functionality yet, please either open an " + f"issue or if you're feeling adventurous, use the low-level " + f"torch.library API" + ) + + +def custom_op_from_existing(op): + ns = op.namespace + lib = torch.library.Library(ns, "FRAGMENT") + name = op.name().split("::")[-1] + schema_str = str(op._schema) + # CustomOp expects the schema string without the namespace + schema_str = schema_str.split("::")[-1] + schema = FunctionSchema.parse(schema_str) + return CustomOp(lib, ns, schema, name, op, _private_access=True) + + +def get_op(qualname): + def error_not_found(): + raise ValueError( + f"Could not find the operator {qualname}. Please make sure you have " + f"already registered the operator and (if registered from C++) " + f"loaded it via torch.ops.load_library.") + + ns, name = parse_qualname(qualname) + if not hasattr(torch.ops, ns): + error_not_found() + opnamespace = getattr(torch.ops, ns) + if not hasattr(opnamespace, name): + error_not_found() + packet = getattr(opnamespace, name) + if not hasattr(packet, 'default'): + error_not_found() + return packet.default + + +def _find_custom_op(qualname, also_check_torch_library=False): + if qualname in global_registry: + return global_registry[qualname] + if not also_check_torch_library: + raise RuntimeError( + f"Could not find custom op \"{qualname}\". Did you register it via " + f"the torch._custom_ops API?") + overload = get_op(qualname) + result = custom_op_from_existing(overload) + return result + + +def _custom_op_with_schema(qualname, schema): + ns, name = qualname.split("::") + schema_str = f"{name}{schema}" + function_schema = FunctionSchema.parse(schema_str) + validate_schema(function_schema) + + lib = library.Library(ns, "FRAGMENT") + lib.define(schema_str) + ophandle = find_ophandle_or_throw(ns, function_schema.name) + result = CustomOp(lib, ns, function_schema, name, ophandle, _private_access=True) + result._register_autograd_kernel_indirection() + + torch._C._dispatch_set_report_error_callback( + ophandle, functools.partial(report_error_callback, weakref.proxy(result)) + ) + return get_op(qualname) diff --git a/llava_next/lib/python3.10/site-packages/torch/_inductor/__pycache__/lowering.cpython-310.pyc b/llava_next/lib/python3.10/site-packages/torch/_inductor/__pycache__/lowering.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a801763f07282baac65aa283c452a9d2a00b6965 --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/_inductor/__pycache__/lowering.cpython-310.pyc @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d556406c7628577d16d86ac849149fadd71f8ca5de00586fadb0ed9ca35d4612 +size 120129 diff --git a/llava_next/lib/python3.10/site-packages/torch/_numpy/__pycache__/__init__.cpython-310.pyc b/llava_next/lib/python3.10/site-packages/torch/_numpy/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..10d27223b87b5b97039a2bc367882ee97f2eba2c Binary files /dev/null and b/llava_next/lib/python3.10/site-packages/torch/_numpy/__pycache__/__init__.cpython-310.pyc differ diff --git a/llava_next/lib/python3.10/site-packages/torch/_numpy/__pycache__/_casting_dicts.cpython-310.pyc b/llava_next/lib/python3.10/site-packages/torch/_numpy/__pycache__/_casting_dicts.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f85f66a16b4b22d5ac516731fb190952abd692f1 Binary files /dev/null and b/llava_next/lib/python3.10/site-packages/torch/_numpy/__pycache__/_casting_dicts.cpython-310.pyc differ diff --git a/llava_next/lib/python3.10/site-packages/torch/_numpy/__pycache__/_funcs.cpython-310.pyc b/llava_next/lib/python3.10/site-packages/torch/_numpy/__pycache__/_funcs.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9f5a4594a28a9cf534ca7f5b9bf85cce4043d6c6 Binary files /dev/null and b/llava_next/lib/python3.10/site-packages/torch/_numpy/__pycache__/_funcs.cpython-310.pyc differ diff --git a/llava_next/lib/python3.10/site-packages/torch/_numpy/__pycache__/_funcs_impl.cpython-310.pyc b/llava_next/lib/python3.10/site-packages/torch/_numpy/__pycache__/_funcs_impl.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3c9fbb6a4067a89b796d868fa00c8604415a9111 Binary files /dev/null and b/llava_next/lib/python3.10/site-packages/torch/_numpy/__pycache__/_funcs_impl.cpython-310.pyc differ diff --git a/llava_next/lib/python3.10/site-packages/torch/_numpy/__pycache__/_getlimits.cpython-310.pyc b/llava_next/lib/python3.10/site-packages/torch/_numpy/__pycache__/_getlimits.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..750fa63708e5153e51283832b7d66069d0f01bae Binary files /dev/null and b/llava_next/lib/python3.10/site-packages/torch/_numpy/__pycache__/_getlimits.cpython-310.pyc differ diff --git a/llava_next/lib/python3.10/site-packages/torch/_numpy/__pycache__/_reductions_impl.cpython-310.pyc b/llava_next/lib/python3.10/site-packages/torch/_numpy/__pycache__/_reductions_impl.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..009d79f24aea32ecc8ddd6f6ffb1e80862fdffea Binary files /dev/null and b/llava_next/lib/python3.10/site-packages/torch/_numpy/__pycache__/_reductions_impl.cpython-310.pyc differ diff --git a/llava_next/lib/python3.10/site-packages/torch/_numpy/__pycache__/_unary_ufuncs_impl.cpython-310.pyc b/llava_next/lib/python3.10/site-packages/torch/_numpy/__pycache__/_unary_ufuncs_impl.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4eb0a6ecd0cd10b7708118037f53af56a1be2849 Binary files /dev/null and b/llava_next/lib/python3.10/site-packages/torch/_numpy/__pycache__/_unary_ufuncs_impl.cpython-310.pyc differ diff --git a/llava_next/lib/python3.10/site-packages/torch/_numpy/_binary_ufuncs_impl.py b/llava_next/lib/python3.10/site-packages/torch/_numpy/_binary_ufuncs_impl.py new file mode 100644 index 0000000000000000000000000000000000000000..a103dcc03c9a1d67291395c4b298e0352a694b41 --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/_numpy/_binary_ufuncs_impl.py @@ -0,0 +1,84 @@ +"""Export torch work functions for binary ufuncs, rename/tweak to match numpy. +This listing is further exported to public symbols in the `torch._numpy/_ufuncs.py` module. +""" + +import torch + +from torch import ( # noqa: F401 + add, # noqa: F401 + arctan2, # noqa: F401 + bitwise_and, # noqa: F401 + bitwise_left_shift as left_shift, # noqa: F401 + bitwise_or, # noqa: F401 + bitwise_right_shift as right_shift, # noqa: F401 + bitwise_xor, # noqa: F401 + copysign, # noqa: F401 + divide, # noqa: F401 + eq as equal, # noqa: F401 + float_power, # noqa: F401 + floor_divide, # noqa: F401 + fmax, # noqa: F401 + fmin, # noqa: F401 + fmod, # noqa: F401 + gcd, # noqa: F401 + greater, # noqa: F401 + greater_equal, # noqa: F401 + heaviside, # noqa: F401 + hypot, # noqa: F401 + lcm, # noqa: F401 + ldexp, # noqa: F401 + less, # noqa: F401 + less_equal, # noqa: F401 + logaddexp, # noqa: F401 + logaddexp2, # noqa: F401 + logical_and, # noqa: F401 + logical_or, # noqa: F401 + logical_xor, # noqa: F401 + maximum, # noqa: F401 + minimum, # noqa: F401 + multiply, # noqa: F401 + nextafter, # noqa: F401 + not_equal, # noqa: F401 + pow as power, # noqa: F401 + remainder, # noqa: F401 + remainder as mod, # noqa: F401 + subtract, # noqa: F401 + true_divide, # noqa: F401 +) + +from . import _dtypes_impl, _util + + +# work around torch limitations w.r.t. numpy +def matmul(x, y): + # work around: + # - RuntimeError: expected scalar type Int but found Double + # - RuntimeError: "addmm_impl_cpu_" not implemented for 'Bool' + # - RuntimeError: "addmm_impl_cpu_" not implemented for 'Half' + dtype = _dtypes_impl.result_type_impl(x, y) + is_bool = dtype == torch.bool + is_half = (x.dtype == torch.float16 or y.dtype == torch.float16) and ( + x.is_cpu or y.is_cpu + ) + + work_dtype = dtype + if is_bool: + work_dtype = torch.uint8 + if is_half: + work_dtype = torch.float32 + + x = _util.cast_if_needed(x, work_dtype) + y = _util.cast_if_needed(y, work_dtype) + + result = torch.matmul(x, y) + + if work_dtype != dtype: + result = result.to(dtype) + + return result + + +# a stub implementation of divmod, should be improved after +# https://github.com/pytorch/pytorch/issues/90820 is fixed in pytorch +def divmod(x, y): + return x // y, x % y diff --git a/llava_next/lib/python3.10/site-packages/torch/_numpy/_dtypes.py b/llava_next/lib/python3.10/site-packages/torch/_numpy/_dtypes.py new file mode 100644 index 0000000000000000000000000000000000000000..e002db2c0a49b5720e4a8ecd82b6ac59a35b93d6 --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/_numpy/_dtypes.py @@ -0,0 +1,422 @@ +""" Define analogs of numpy dtypes supported by pytorch. +Define the scalar types and supported dtypes and numpy <--> torch dtype mappings. +""" +import builtins + +import torch + +from . import _dtypes_impl + + +# ### Scalar types ### + + +class generic: + @property + def name(self): + return self.__class__.__name__ + + def __new__(cls, value): + # NumPy scalars are modelled as 0-D arrays + # so a call to np.float32(4) produces a 0-D array. + + from ._ndarray import asarray, ndarray + + if isinstance(value, str) and value in ["inf", "nan"]: + value = {"inf": torch.inf, "nan": torch.nan}[value] + + if isinstance(value, ndarray): + return value.astype(cls) + else: + return asarray(value, dtype=cls) + + +################## +# abstract types # +################## + + +class number(generic): + pass + + +class integer(number): + pass + + +class inexact(number): + pass + + +class signedinteger(integer): + pass + + +class unsignedinteger(integer): + pass + + +class floating(inexact): + pass + + +class complexfloating(inexact): + pass + + +# ##### concrete types + +# signed integers + + +class int8(signedinteger): + name = "int8" + typecode = "b" + torch_dtype = torch.int8 + + +class int16(signedinteger): + name = "int16" + typecode = "h" + torch_dtype = torch.int16 + + +class int32(signedinteger): + name = "int32" + typecode = "i" + torch_dtype = torch.int32 + + +class int64(signedinteger): + name = "int64" + typecode = "l" + torch_dtype = torch.int64 + + +# unsigned integers + + +class uint8(unsignedinteger): + name = "uint8" + typecode = "B" + torch_dtype = torch.uint8 + + +# floating point + + +class float16(floating): + name = "float16" + typecode = "e" + torch_dtype = torch.float16 + + +class float32(floating): + name = "float32" + typecode = "f" + torch_dtype = torch.float32 + + +class float64(floating): + name = "float64" + typecode = "d" + torch_dtype = torch.float64 + + +class complex64(complexfloating): + name = "complex64" + typecode = "F" + torch_dtype = torch.complex64 + + +class complex128(complexfloating): + name = "complex128" + typecode = "D" + torch_dtype = torch.complex128 + + +class bool_(generic): + name = "bool_" + typecode = "?" + torch_dtype = torch.bool + + +# name aliases +_name_aliases = { + "intp": int64, + "int_": int64, + "intc": int32, + "byte": int8, + "short": int16, + "longlong": int64, # XXX: is this correct? + "ubyte": uint8, + "half": float16, + "single": float32, + "double": float64, + "float_": float64, + "csingle": complex64, + "singlecomplex": complex64, + "cdouble": complex128, + "cfloat": complex128, + "complex_": complex128, +} +# We register float_ = float32 and so on +for name, obj in _name_aliases.items(): + vars()[name] = obj + + +# Replicate this NumPy-defined way of grouping scalar types, +# cf tests/core/test_scalar_methods.py +sctypes = { + "int": [int8, int16, int32, int64], + "uint": [uint8], + "float": [float16, float32, float64], + "complex": [complex64, complex128], + "others": [bool_], +} + + +# Support mappings/functions + +_names = {st.name: st for cat in sctypes for st in sctypes[cat]} +_typecodes = {st.typecode: st for cat in sctypes for st in sctypes[cat]} +_torch_dtypes = {st.torch_dtype: st for cat in sctypes for st in sctypes[cat]} + + +_aliases = { + "u1": uint8, + "i1": int8, + "i2": int16, + "i4": int32, + "i8": int64, + "b": int8, # XXX: srsly? + "f2": float16, + "f4": float32, + "f8": float64, + "c8": complex64, + "c16": complex128, + # numpy-specific trailing underscore + "bool_": bool_, +} + + +_python_types = { + int: int64, + float: float64, + complex: complex128, + builtins.bool: bool_, + # also allow stringified names of python types + int.__name__: int64, + float.__name__: float64, + complex.__name__: complex128, + builtins.bool.__name__: bool_, +} + + +def sctype_from_string(s): + """Normalize a string value: a type 'name' or a typecode or a width alias.""" + if s in _names: + return _names[s] + if s in _name_aliases.keys(): + return _name_aliases[s] + if s in _typecodes: + return _typecodes[s] + if s in _aliases: + return _aliases[s] + if s in _python_types: + return _python_types[s] + raise TypeError(f"data type {s!r} not understood") + + +def sctype_from_torch_dtype(torch_dtype): + return _torch_dtypes[torch_dtype] + + +# ### DTypes. ### + + +def dtype(arg): + if arg is None: + arg = _dtypes_impl.default_dtypes().float_dtype + return DType(arg) + + +class DType: + def __init__(self, arg): + # a pytorch object? + if isinstance(arg, torch.dtype): + sctype = _torch_dtypes[arg] + elif isinstance(arg, torch.Tensor): + sctype = _torch_dtypes[arg.dtype] + # a scalar type? + elif issubclass_(arg, generic): + sctype = arg + # a dtype already? + elif isinstance(arg, DType): + sctype = arg._scalar_type + # a has a right attribute? + elif hasattr(arg, "dtype"): + sctype = arg.dtype._scalar_type + else: + sctype = sctype_from_string(arg) + self._scalar_type = sctype + + @property + def name(self): + return self._scalar_type.name + + @property + def type(self): + return self._scalar_type + + @property + def kind(self): + # https://numpy.org/doc/stable/reference/generated/numpy.dtype.kind.html + return _torch_dtypes[self.torch_dtype].name[0] + + @property + def typecode(self): + return self._scalar_type.typecode + + def __eq__(self, other): + if isinstance(other, DType): + return self._scalar_type == other._scalar_type + try: + other_instance = DType(other) + except TypeError: + return False + return self._scalar_type == other_instance._scalar_type + + @property + def torch_dtype(self): + return self._scalar_type.torch_dtype + + def __hash__(self): + return hash(self._scalar_type.name) + + def __repr__(self): + return f'dtype("{self.name}")' + + __str__ = __repr__ + + @property + def itemsize(self): + elem = self.type(1) + return elem.tensor.element_size() + + def __getstate__(self): + return self._scalar_type + + def __setstate__(self, value): + self._scalar_type = value + + +typecodes = { + "All": "efdFDBbhil?", + "AllFloat": "efdFD", + "AllInteger": "Bbhil", + "Integer": "bhil", + "UnsignedInteger": "B", + "Float": "efd", + "Complex": "FD", +} + + +# ### Defaults and dtype discovery + + +def set_default_dtype(fp_dtype="numpy", int_dtype="numpy"): + """Set the (global) defaults for fp, complex, and int dtypes. + + The complex dtype is inferred from the float (fp) dtype. It has + a width at least twice the width of the float dtype, + i.e., it's complex128 for float64 and complex64 for float32. + + Parameters + ---------- + fp_dtype + Allowed values are "numpy", "pytorch" or dtype_like things which + can be converted into a DType instance. + Default is "numpy" (i.e. float64). + int_dtype + Allowed values are "numpy", "pytorch" or dtype_like things which + can be converted into a DType instance. + Default is "numpy" (i.e. int64). + + Returns + ------- + The old default dtype state: a namedtuple with attributes ``float_dtype``, + ``complex_dtypes`` and ``int_dtype``. These attributes store *pytorch* + dtypes. + + Notes + ------------ + This functions has a side effect: it sets the global state with the provided dtypes. + + The complex dtype has bit width of at least twice the width of the float + dtype, i.e. it's complex128 for float64 and complex64 for float32. + + """ + if fp_dtype not in ["numpy", "pytorch"]: + fp_dtype = dtype(fp_dtype).torch_dtype + if int_dtype not in ["numpy", "pytorch"]: + int_dtype = dtype(int_dtype).torch_dtype + + if fp_dtype == "numpy": + float_dtype = torch.float64 + elif fp_dtype == "pytorch": + float_dtype = torch.float32 + else: + float_dtype = fp_dtype + + complex_dtype = { + torch.float64: torch.complex128, + torch.float32: torch.complex64, + torch.float16: torch.complex64, + }[float_dtype] + + if int_dtype in ["numpy", "pytorch"]: + int_dtype = torch.int64 + else: + int_dtype = int_dtype + + new_defaults = _dtypes_impl.DefaultDTypes( + float_dtype=float_dtype, complex_dtype=complex_dtype, int_dtype=int_dtype + ) + + # set the new global state and return the old state + old_defaults = _dtypes_impl.default_dtypes + _dtypes_impl._default_dtypes = new_defaults + return old_defaults + + +def issubclass_(arg, klass): + try: + return issubclass(arg, klass) + except TypeError: + return False + + +def issubdtype(arg1, arg2): + # cf https://github.com/numpy/numpy/blob/v1.24.0/numpy/core/numerictypes.py#L356-L420 + if not issubclass_(arg1, generic): + arg1 = dtype(arg1).type + if not issubclass_(arg2, generic): + arg2 = dtype(arg2).type + return issubclass(arg1, arg2) + + +__all__ = ["dtype", "DType", "typecodes", "issubdtype", "set_default_dtype"] +__all__ += list(_names.keys()) # noqa: PLE0605 +__all__ += list(_name_aliases.keys()) # noqa: PLE0605 +__all__ += [ # noqa: PLE0605 + "sctypes", + "generic", + "number", + "integer", + "signedinteger", + "unsignedinteger", + "inexact", + "floating", + "complexfloating", +] diff --git a/llava_next/lib/python3.10/site-packages/torch/_numpy/_funcs.py b/llava_next/lib/python3.10/site-packages/torch/_numpy/_funcs.py new file mode 100644 index 0000000000000000000000000000000000000000..7f6fc230b242fd693316c8bb67a054430ed2df1c --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/_numpy/_funcs.py @@ -0,0 +1,73 @@ +import inspect +import itertools + +from . import _funcs_impl, _reductions_impl +from ._normalizations import normalizer + +# _funcs_impl.py contains functions which mimic NumPy's eponymous equivalents, +# and consume/return PyTorch tensors/dtypes. +# They are also type annotated. +# Pull these functions from _funcs_impl and decorate them with @normalizer, which +# - Converts any input `np.ndarray`, `torch._numpy.ndarray`, list of lists, Python scalars, etc into a `torch.Tensor`. +# - Maps NumPy dtypes to PyTorch dtypes +# - If the input to the `axis` kwarg is an ndarray, it maps it into a tuple +# - Implements the semantics for the `out=` arg +# - Wraps back the outputs into `torch._numpy.ndarrays` + + +def _public_functions(mod): + def is_public_function(f): + return inspect.isfunction(f) and not f.__name__.startswith("_") + + return inspect.getmembers(mod, is_public_function) + + +# We fill in __all__ in the loop below +__all__ = [] + +# decorate implementer functions with argument normalizers and export to the top namespace +for name, func in itertools.chain( + _public_functions(_funcs_impl), _public_functions(_reductions_impl) +): + if name in ["percentile", "quantile", "median"]: + decorated = normalizer(func, promote_scalar_result=True) + elif name == "einsum": + # normalized manually + decorated = func + else: + decorated = normalizer(func) + + decorated.__qualname__ = name + decorated.__name__ = name + vars()[name] = decorated + __all__.append(name) + + +""" +Vendored objects from numpy.lib.index_tricks +""" + + +class IndexExpression: + """ + Written by Konrad Hinsen + last revision: 1999-7-23 + + Cosmetic changes by T. Oliphant 2001 + """ + + def __init__(self, maketuple): + self.maketuple = maketuple + + def __getitem__(self, item): + if self.maketuple and not isinstance(item, tuple): + return (item,) + else: + return item + + +index_exp = IndexExpression(maketuple=True) +s_ = IndexExpression(maketuple=False) + + +__all__ += ["index_exp", "s_"] diff --git a/llava_next/lib/python3.10/site-packages/torch/_numpy/_funcs_impl.py b/llava_next/lib/python3.10/site-packages/torch/_numpy/_funcs_impl.py new file mode 100644 index 0000000000000000000000000000000000000000..f3232548a0a6cea2fef2545c4754522351a7b8d8 --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/_numpy/_funcs_impl.py @@ -0,0 +1,2056 @@ +"""A thin pytorch / numpy compat layer. + +Things imported from here have numpy-compatible signatures but operate on +pytorch tensors. +""" +# Contents of this module ends up in the main namespace via _funcs.py +# where type annotations are used in conjunction with the @normalizer decorator. +from __future__ import annotations + +import builtins +import itertools +import operator +from typing import Optional, Sequence + +import torch + +from . import _dtypes_impl, _util +from ._normalizations import ( + ArrayLike, + CastingModes, + DTypeLike, + NDArray, + NotImplementedType, + OutArray, +) + + +def copy( + a: ArrayLike, order: NotImplementedType = "K", subok: NotImplementedType = False +): + return a.clone() + + +def copyto( + dst: NDArray, + src: ArrayLike, + casting: Optional[CastingModes] = "same_kind", + where: NotImplementedType = None, +): + (src,) = _util.typecast_tensors((src,), dst.dtype, casting=casting) + dst.copy_(src) + + +def atleast_1d(*arys: ArrayLike): + res = torch.atleast_1d(*arys) + if isinstance(res, tuple): + return list(res) + else: + return res + + +def atleast_2d(*arys: ArrayLike): + res = torch.atleast_2d(*arys) + if isinstance(res, tuple): + return list(res) + else: + return res + + +def atleast_3d(*arys: ArrayLike): + res = torch.atleast_3d(*arys) + if isinstance(res, tuple): + return list(res) + else: + return res + + +def _concat_check(tup, dtype, out): + if tup == (): + raise ValueError("need at least one array to concatenate") + + """Check inputs in concatenate et al.""" + if out is not None and dtype is not None: + # mimic numpy + raise TypeError( + "concatenate() only takes `out` or `dtype` as an " + "argument, but both were provided." + ) + + +def _concat_cast_helper(tensors, out=None, dtype=None, casting="same_kind"): + """Figure out dtypes, cast if necessary.""" + + if out is not None or dtype is not None: + # figure out the type of the inputs and outputs + out_dtype = out.dtype.torch_dtype if dtype is None else dtype + else: + out_dtype = _dtypes_impl.result_type_impl(*tensors) + + # cast input arrays if necessary; do not broadcast them agains `out` + tensors = _util.typecast_tensors(tensors, out_dtype, casting) + + return tensors + + +def _concatenate( + tensors, axis=0, out=None, dtype=None, casting: Optional[CastingModes] = "same_kind" +): + # pure torch implementation, used below and in cov/corrcoef below + tensors, axis = _util.axis_none_flatten(*tensors, axis=axis) + tensors = _concat_cast_helper(tensors, out, dtype, casting) + return torch.cat(tensors, axis) + + +def concatenate( + ar_tuple: Sequence[ArrayLike], + axis=0, + out: Optional[OutArray] = None, + dtype: Optional[DTypeLike] = None, + casting: Optional[CastingModes] = "same_kind", +): + _concat_check(ar_tuple, dtype, out=out) + result = _concatenate(ar_tuple, axis=axis, out=out, dtype=dtype, casting=casting) + return result + + +def vstack( + tup: Sequence[ArrayLike], + *, + dtype: Optional[DTypeLike] = None, + casting: Optional[CastingModes] = "same_kind", +): + _concat_check(tup, dtype, out=None) + tensors = _concat_cast_helper(tup, dtype=dtype, casting=casting) + return torch.vstack(tensors) + + +row_stack = vstack + + +def hstack( + tup: Sequence[ArrayLike], + *, + dtype: Optional[DTypeLike] = None, + casting: Optional[CastingModes] = "same_kind", +): + _concat_check(tup, dtype, out=None) + tensors = _concat_cast_helper(tup, dtype=dtype, casting=casting) + return torch.hstack(tensors) + + +def dstack( + tup: Sequence[ArrayLike], + *, + dtype: Optional[DTypeLike] = None, + casting: Optional[CastingModes] = "same_kind", +): + # XXX: in numpy 1.24 dstack does not have dtype and casting keywords + # but {h,v}stack do. Hence add them here for consistency. + _concat_check(tup, dtype, out=None) + tensors = _concat_cast_helper(tup, dtype=dtype, casting=casting) + return torch.dstack(tensors) + + +def column_stack( + tup: Sequence[ArrayLike], + *, + dtype: Optional[DTypeLike] = None, + casting: Optional[CastingModes] = "same_kind", +): + # XXX: in numpy 1.24 column_stack does not have dtype and casting keywords + # but row_stack does. (because row_stack is an alias for vstack, really). + # Hence add these keywords here for consistency. + _concat_check(tup, dtype, out=None) + tensors = _concat_cast_helper(tup, dtype=dtype, casting=casting) + return torch.column_stack(tensors) + + +def stack( + arrays: Sequence[ArrayLike], + axis=0, + out: Optional[OutArray] = None, + *, + dtype: Optional[DTypeLike] = None, + casting: Optional[CastingModes] = "same_kind", +): + _concat_check(arrays, dtype, out=out) + + tensors = _concat_cast_helper(arrays, dtype=dtype, casting=casting) + result_ndim = tensors[0].ndim + 1 + axis = _util.normalize_axis_index(axis, result_ndim) + return torch.stack(tensors, axis=axis) + + +def append(arr: ArrayLike, values: ArrayLike, axis=None): + if axis is None: + if arr.ndim != 1: + arr = arr.flatten() + values = values.flatten() + axis = arr.ndim - 1 + return _concatenate((arr, values), axis=axis) + + +# ### split ### + + +def _split_helper(tensor, indices_or_sections, axis, strict=False): + if isinstance(indices_or_sections, int): + return _split_helper_int(tensor, indices_or_sections, axis, strict) + elif isinstance(indices_or_sections, (list, tuple)): + # NB: drop split=..., it only applies to split_helper_int + return _split_helper_list(tensor, list(indices_or_sections), axis) + else: + raise TypeError("split_helper: ", type(indices_or_sections)) + + +def _split_helper_int(tensor, indices_or_sections, axis, strict=False): + if not isinstance(indices_or_sections, int): + raise NotImplementedError("split: indices_or_sections") + + axis = _util.normalize_axis_index(axis, tensor.ndim) + + # numpy: l%n chunks of size (l//n + 1), the rest are sized l//n + l, n = tensor.shape[axis], indices_or_sections + + if n <= 0: + raise ValueError() + + if l % n == 0: + num, sz = n, l // n + lst = [sz] * num + else: + if strict: + raise ValueError("array split does not result in an equal division") + + num, sz = l % n, l // n + 1 + lst = [sz] * num + + lst += [sz - 1] * (n - num) + + return torch.split(tensor, lst, axis) + + +def _split_helper_list(tensor, indices_or_sections, axis): + if not isinstance(indices_or_sections, list): + raise NotImplementedError("split: indices_or_sections: list") + # numpy expects indices, while torch expects lengths of sections + # also, numpy appends zero-size arrays for indices above the shape[axis] + lst = [x for x in indices_or_sections if x <= tensor.shape[axis]] + num_extra = len(indices_or_sections) - len(lst) + + lst.append(tensor.shape[axis]) + lst = [ + lst[0], + ] + [a - b for a, b in zip(lst[1:], lst[:-1])] + lst += [0] * num_extra + + return torch.split(tensor, lst, axis) + + +def array_split(ary: ArrayLike, indices_or_sections, axis=0): + return _split_helper(ary, indices_or_sections, axis) + + +def split(ary: ArrayLike, indices_or_sections, axis=0): + return _split_helper(ary, indices_or_sections, axis, strict=True) + + +def hsplit(ary: ArrayLike, indices_or_sections): + if ary.ndim == 0: + raise ValueError("hsplit only works on arrays of 1 or more dimensions") + axis = 1 if ary.ndim > 1 else 0 + return _split_helper(ary, indices_or_sections, axis, strict=True) + + +def vsplit(ary: ArrayLike, indices_or_sections): + if ary.ndim < 2: + raise ValueError("vsplit only works on arrays of 2 or more dimensions") + return _split_helper(ary, indices_or_sections, 0, strict=True) + + +def dsplit(ary: ArrayLike, indices_or_sections): + if ary.ndim < 3: + raise ValueError("dsplit only works on arrays of 3 or more dimensions") + return _split_helper(ary, indices_or_sections, 2, strict=True) + + +def kron(a: ArrayLike, b: ArrayLike): + return torch.kron(a, b) + + +def vander(x: ArrayLike, N=None, increasing=False): + return torch.vander(x, N, increasing) + + +# ### linspace, geomspace, logspace and arange ### + + +def linspace( + start: ArrayLike, + stop: ArrayLike, + num=50, + endpoint=True, + retstep=False, + dtype: Optional[DTypeLike] = None, + axis=0, +): + if axis != 0 or retstep or not endpoint: + raise NotImplementedError + if dtype is None: + dtype = _dtypes_impl.default_dtypes().float_dtype + # XXX: raises TypeError if start or stop are not scalars + return torch.linspace(start, stop, num, dtype=dtype) + + +def geomspace( + start: ArrayLike, + stop: ArrayLike, + num=50, + endpoint=True, + dtype: Optional[DTypeLike] = None, + axis=0, +): + if axis != 0 or not endpoint: + raise NotImplementedError + base = torch.pow(stop / start, 1.0 / (num - 1)) + logbase = torch.log(base) + return torch.logspace( + torch.log(start) / logbase, + torch.log(stop) / logbase, + num, + base=base, + ) + + +def logspace( + start, + stop, + num=50, + endpoint=True, + base=10.0, + dtype: Optional[DTypeLike] = None, + axis=0, +): + if axis != 0 or not endpoint: + raise NotImplementedError + return torch.logspace(start, stop, num, base=base, dtype=dtype) + + +def arange( + start: Optional[ArrayLike] = None, + stop: Optional[ArrayLike] = None, + step: Optional[ArrayLike] = 1, + dtype: Optional[DTypeLike] = None, + *, + like: NotImplementedType = None, +): + if step == 0: + raise ZeroDivisionError + if stop is None and start is None: + raise TypeError + if stop is None: + # XXX: this breaks if start is passed as a kwarg: + # arange(start=4) should raise (no stop) but doesn't + start, stop = 0, start + if start is None: + start = 0 + + # the dtype of the result + if dtype is None: + dtype = _dtypes_impl.default_dtypes().int_dtype + # XXX: default values do not get normalized + start, stop, step = (_util._coerce_to_tensor(x) for x in (start, stop, step)) + + dummy = torch.empty(1, dtype=dtype) + target_dtype = _dtypes_impl.result_type_impl(start, stop, step, dummy) + + # work around RuntimeError: "arange_cpu" not implemented for 'ComplexFloat' + work_dtype = torch.float64 if target_dtype.is_complex else target_dtype + + if (step > 0 and start > stop) or (step < 0 and start < stop): + # empty range + return torch.empty(0, dtype=target_dtype) + + result = torch.arange(start, stop, step, dtype=work_dtype) + result = _util.cast_if_needed(result, target_dtype) + return result + + +# ### zeros/ones/empty/full ### + + +def empty( + shape, + dtype: Optional[DTypeLike] = None, + order: NotImplementedType = "C", + *, + like: NotImplementedType = None, +): + if dtype is None: + dtype = _dtypes_impl.default_dtypes().float_dtype + return torch.empty(shape, dtype=dtype) + + +# NB: *_like functions deliberately deviate from numpy: it has subok=True +# as the default; we set subok=False and raise on anything else. + + +def empty_like( + prototype: ArrayLike, + dtype: Optional[DTypeLike] = None, + order: NotImplementedType = "K", + subok: NotImplementedType = False, + shape=None, +): + result = torch.empty_like(prototype, dtype=dtype) + if shape is not None: + result = result.reshape(shape) + return result + + +def full( + shape, + fill_value: ArrayLike, + dtype: Optional[DTypeLike] = None, + order: NotImplementedType = "C", + *, + like: NotImplementedType = None, +): + if isinstance(shape, int): + shape = (shape,) + if dtype is None: + dtype = fill_value.dtype + if not isinstance(shape, (tuple, list)): + shape = (shape,) + return torch.full(shape, fill_value, dtype=dtype) + + +def full_like( + a: ArrayLike, + fill_value, + dtype: Optional[DTypeLike] = None, + order: NotImplementedType = "K", + subok: NotImplementedType = False, + shape=None, +): + # XXX: fill_value broadcasts + result = torch.full_like(a, fill_value, dtype=dtype) + if shape is not None: + result = result.reshape(shape) + return result + + +def ones( + shape, + dtype: Optional[DTypeLike] = None, + order: NotImplementedType = "C", + *, + like: NotImplementedType = None, +): + if dtype is None: + dtype = _dtypes_impl.default_dtypes().float_dtype + return torch.ones(shape, dtype=dtype) + + +def ones_like( + a: ArrayLike, + dtype: Optional[DTypeLike] = None, + order: NotImplementedType = "K", + subok: NotImplementedType = False, + shape=None, +): + result = torch.ones_like(a, dtype=dtype) + if shape is not None: + result = result.reshape(shape) + return result + + +def zeros( + shape, + dtype: Optional[DTypeLike] = None, + order: NotImplementedType = "C", + *, + like: NotImplementedType = None, +): + if dtype is None: + dtype = _dtypes_impl.default_dtypes().float_dtype + return torch.zeros(shape, dtype=dtype) + + +def zeros_like( + a: ArrayLike, + dtype: Optional[DTypeLike] = None, + order: NotImplementedType = "K", + subok: NotImplementedType = False, + shape=None, +): + result = torch.zeros_like(a, dtype=dtype) + if shape is not None: + result = result.reshape(shape) + return result + + +# ### cov & corrcoef ### + + +def _xy_helper_corrcoef(x_tensor, y_tensor=None, rowvar=True): + """Prepate inputs for cov and corrcoef.""" + + # https://github.com/numpy/numpy/blob/v1.24.0/numpy/lib/function_base.py#L2636 + if y_tensor is not None: + # make sure x and y are at least 2D + ndim_extra = 2 - x_tensor.ndim + if ndim_extra > 0: + x_tensor = x_tensor.view((1,) * ndim_extra + x_tensor.shape) + if not rowvar and x_tensor.shape[0] != 1: + x_tensor = x_tensor.mT + x_tensor = x_tensor.clone() + + ndim_extra = 2 - y_tensor.ndim + if ndim_extra > 0: + y_tensor = y_tensor.view((1,) * ndim_extra + y_tensor.shape) + if not rowvar and y_tensor.shape[0] != 1: + y_tensor = y_tensor.mT + y_tensor = y_tensor.clone() + + x_tensor = _concatenate((x_tensor, y_tensor), axis=0) + + return x_tensor + + +def corrcoef( + x: ArrayLike, + y: Optional[ArrayLike] = None, + rowvar=True, + bias=None, + ddof=None, + *, + dtype: Optional[DTypeLike] = None, +): + if bias is not None or ddof is not None: + # deprecated in NumPy + raise NotImplementedError + xy_tensor = _xy_helper_corrcoef(x, y, rowvar) + + is_half = (xy_tensor.dtype == torch.float16) and xy_tensor.is_cpu + if is_half: + # work around torch's "addmm_impl_cpu_" not implemented for 'Half'" + dtype = torch.float32 + + xy_tensor = _util.cast_if_needed(xy_tensor, dtype) + result = torch.corrcoef(xy_tensor) + + if is_half: + result = result.to(torch.float16) + + return result + + +def cov( + m: ArrayLike, + y: Optional[ArrayLike] = None, + rowvar=True, + bias=False, + ddof=None, + fweights: Optional[ArrayLike] = None, + aweights: Optional[ArrayLike] = None, + *, + dtype: Optional[DTypeLike] = None, +): + m = _xy_helper_corrcoef(m, y, rowvar) + + if ddof is None: + ddof = 1 if bias == 0 else 0 + + is_half = (m.dtype == torch.float16) and m.is_cpu + if is_half: + # work around torch's "addmm_impl_cpu_" not implemented for 'Half'" + dtype = torch.float32 + + m = _util.cast_if_needed(m, dtype) + result = torch.cov(m, correction=ddof, aweights=aweights, fweights=fweights) + + if is_half: + result = result.to(torch.float16) + + return result + + +def _conv_corr_impl(a, v, mode): + dt = _dtypes_impl.result_type_impl(a, v) + a = _util.cast_if_needed(a, dt) + v = _util.cast_if_needed(v, dt) + + padding = v.shape[0] - 1 if mode == "full" else mode + + # NumPy only accepts 1D arrays; PyTorch requires 2D inputs and 3D weights + aa = a[None, :] + vv = v[None, None, :] + + result = torch.nn.functional.conv1d(aa, vv, padding=padding) + + # torch returns a 2D result, numpy returns a 1D array + return result[0, :] + + +def convolve(a: ArrayLike, v: ArrayLike, mode="full"): + # NumPy: if v is longer than a, the arrays are swapped before computation + if a.shape[0] < v.shape[0]: + a, v = v, a + + # flip the weights since numpy does and torch does not + v = torch.flip(v, (0,)) + + return _conv_corr_impl(a, v, mode) + + +def correlate(a: ArrayLike, v: ArrayLike, mode="valid"): + v = torch.conj_physical(v) + return _conv_corr_impl(a, v, mode) + + +# ### logic & element selection ### + + +def bincount(x: ArrayLike, /, weights: Optional[ArrayLike] = None, minlength=0): + if x.numel() == 0: + # edge case allowed by numpy + x = x.new_empty(0, dtype=int) + + int_dtype = _dtypes_impl.default_dtypes().int_dtype + (x,) = _util.typecast_tensors((x,), int_dtype, casting="safe") + + return torch.bincount(x, weights, minlength) + + +def where( + condition: ArrayLike, + x: Optional[ArrayLike] = None, + y: Optional[ArrayLike] = None, + /, +): + if (x is None) != (y is None): + raise ValueError("either both or neither of x and y should be given") + + if condition.dtype != torch.bool: + condition = condition.to(torch.bool) + + if x is None and y is None: + result = torch.where(condition) + else: + result = torch.where(condition, x, y) + return result + + +# ###### module-level queries of object properties + + +def ndim(a: ArrayLike): + return a.ndim + + +def shape(a: ArrayLike): + return tuple(a.shape) + + +def size(a: ArrayLike, axis=None): + if axis is None: + return a.numel() + else: + return a.shape[axis] + + +# ###### shape manipulations and indexing + + +def expand_dims(a: ArrayLike, axis): + shape = _util.expand_shape(a.shape, axis) + return a.view(shape) # never copies + + +def flip(m: ArrayLike, axis=None): + # XXX: semantic difference: np.flip returns a view, torch.flip copies + if axis is None: + axis = tuple(range(m.ndim)) + else: + axis = _util.normalize_axis_tuple(axis, m.ndim) + return torch.flip(m, axis) + + +def flipud(m: ArrayLike): + return torch.flipud(m) + + +def fliplr(m: ArrayLike): + return torch.fliplr(m) + + +def rot90(m: ArrayLike, k=1, axes=(0, 1)): + axes = _util.normalize_axis_tuple(axes, m.ndim) + return torch.rot90(m, k, axes) + + +# ### broadcasting and indices ### + + +def broadcast_to(array: ArrayLike, shape, subok: NotImplementedType = False): + return torch.broadcast_to(array, size=shape) + + +# This is a function from tuples to tuples, so we just reuse it +from torch import broadcast_shapes + + +def broadcast_arrays(*args: ArrayLike, subok: NotImplementedType = False): + return torch.broadcast_tensors(*args) + + +def meshgrid(*xi: ArrayLike, copy=True, sparse=False, indexing="xy"): + ndim = len(xi) + + if indexing not in ["xy", "ij"]: + raise ValueError("Valid values for `indexing` are 'xy' and 'ij'.") + + s0 = (1,) * ndim + output = [x.reshape(s0[:i] + (-1,) + s0[i + 1 :]) for i, x in enumerate(xi)] + + if indexing == "xy" and ndim > 1: + # switch first and second axis + output[0] = output[0].reshape((1, -1) + s0[2:]) + output[1] = output[1].reshape((-1, 1) + s0[2:]) + + if not sparse: + # Return the full N-D matrix (not only the 1-D vector) + output = torch.broadcast_tensors(*output) + + if copy: + output = [x.clone() for x in output] + + return list(output) # match numpy, return a list + + +def indices(dimensions, dtype: Optional[DTypeLike] = int, sparse=False): + # https://github.com/numpy/numpy/blob/v1.24.0/numpy/core/numeric.py#L1691-L1791 + dimensions = tuple(dimensions) + N = len(dimensions) + shape = (1,) * N + if sparse: + res = tuple() + else: + res = torch.empty((N,) + dimensions, dtype=dtype) + for i, dim in enumerate(dimensions): + idx = torch.arange(dim, dtype=dtype).reshape( + shape[:i] + (dim,) + shape[i + 1 :] + ) + if sparse: + res = res + (idx,) + else: + res[i] = idx + return res + + +# ### tri*-something ### + + +def tril(m: ArrayLike, k=0): + return torch.tril(m, k) + + +def triu(m: ArrayLike, k=0): + return torch.triu(m, k) + + +def tril_indices(n, k=0, m=None): + if m is None: + m = n + return torch.tril_indices(n, m, offset=k) + + +def triu_indices(n, k=0, m=None): + if m is None: + m = n + return torch.triu_indices(n, m, offset=k) + + +def tril_indices_from(arr: ArrayLike, k=0): + if arr.ndim != 2: + raise ValueError("input array must be 2-d") + # Return a tensor rather than a tuple to avoid a graphbreak + return torch.tril_indices(arr.shape[0], arr.shape[1], offset=k) + + +def triu_indices_from(arr: ArrayLike, k=0): + if arr.ndim != 2: + raise ValueError("input array must be 2-d") + # Return a tensor rather than a tuple to avoid a graphbreak + return torch.triu_indices(arr.shape[0], arr.shape[1], offset=k) + + +def tri( + N, + M=None, + k=0, + dtype: Optional[DTypeLike] = None, + *, + like: NotImplementedType = None, +): + if M is None: + M = N + tensor = torch.ones((N, M), dtype=dtype) + return torch.tril(tensor, diagonal=k) + + +# ### equality, equivalence, allclose ### + + +def isclose(a: ArrayLike, b: ArrayLike, rtol=1.0e-5, atol=1.0e-8, equal_nan=False): + dtype = _dtypes_impl.result_type_impl(a, b) + a = _util.cast_if_needed(a, dtype) + b = _util.cast_if_needed(b, dtype) + return torch.isclose(a, b, rtol=rtol, atol=atol, equal_nan=equal_nan) + + +def allclose(a: ArrayLike, b: ArrayLike, rtol=1e-05, atol=1e-08, equal_nan=False): + dtype = _dtypes_impl.result_type_impl(a, b) + a = _util.cast_if_needed(a, dtype) + b = _util.cast_if_needed(b, dtype) + return torch.allclose(a, b, rtol=rtol, atol=atol, equal_nan=equal_nan) + + +def _tensor_equal(a1, a2, equal_nan=False): + # Implementation of array_equal/array_equiv. + if a1.shape != a2.shape: + return False + cond = a1 == a2 + if equal_nan: + cond = cond | (torch.isnan(a1) & torch.isnan(a2)) + return cond.all().item() + + +def array_equal(a1: ArrayLike, a2: ArrayLike, equal_nan=False): + return _tensor_equal(a1, a2, equal_nan=equal_nan) + + +def array_equiv(a1: ArrayLike, a2: ArrayLike): + # *almost* the same as array_equal: _equiv tries to broadcast, _equal does not + try: + a1_t, a2_t = torch.broadcast_tensors(a1, a2) + except RuntimeError: + # failed to broadcast => not equivalent + return False + return _tensor_equal(a1_t, a2_t) + + +def mintypecode(): + raise NotImplementedError + + +def nan_to_num( + x: ArrayLike, copy: NotImplementedType = True, nan=0.0, posinf=None, neginf=None +): + # work around RuntimeError: "nan_to_num" not implemented for 'ComplexDouble' + if x.is_complex(): + re = torch.nan_to_num(x.real, nan=nan, posinf=posinf, neginf=neginf) + im = torch.nan_to_num(x.imag, nan=nan, posinf=posinf, neginf=neginf) + return re + 1j * im + else: + return torch.nan_to_num(x, nan=nan, posinf=posinf, neginf=neginf) + + +def asfarray(): + raise NotImplementedError + + +def block(*args, **kwds): + raise NotImplementedError + + +# ### put/take_along_axis ### + + +def take( + a: ArrayLike, + indices: ArrayLike, + axis=None, + out: Optional[OutArray] = None, + mode: NotImplementedType = "raise", +): + (a,), axis = _util.axis_none_flatten(a, axis=axis) + axis = _util.normalize_axis_index(axis, a.ndim) + idx = (slice(None),) * axis + (indices, ...) + result = a[idx] + return result + + +def take_along_axis(arr: ArrayLike, indices: ArrayLike, axis): + (arr,), axis = _util.axis_none_flatten(arr, axis=axis) + axis = _util.normalize_axis_index(axis, arr.ndim) + return torch.gather(arr, axis, indices) + + +def put( + a: NDArray, + ind: ArrayLike, + v: ArrayLike, + mode: NotImplementedType = "raise", +): + v = v.type(a.dtype) + # If ind is larger than v, expand v to at least the size of ind. Any + # unnecessary trailing elements are then trimmed. + if ind.numel() > v.numel(): + ratio = (ind.numel() + v.numel() - 1) // v.numel() + v = v.unsqueeze(0).expand((ratio,) + v.shape) + # Trim unnecessary elements, regarldess if v was expanded or not. Note + # np.put() trims v to match ind by default too. + if ind.numel() < v.numel(): + v = v.flatten() + v = v[: ind.numel()] + a.put_(ind, v) + return None + + +def put_along_axis(arr: ArrayLike, indices: ArrayLike, values: ArrayLike, axis): + (arr,), axis = _util.axis_none_flatten(arr, axis=axis) + axis = _util.normalize_axis_index(axis, arr.ndim) + + indices, values = torch.broadcast_tensors(indices, values) + values = _util.cast_if_needed(values, arr.dtype) + result = torch.scatter(arr, axis, indices, values) + arr.copy_(result.reshape(arr.shape)) + return None + + +def choose( + a: ArrayLike, + choices: Sequence[ArrayLike], + out: Optional[OutArray] = None, + mode: NotImplementedType = "raise", +): + # First, broadcast elements of `choices` + choices = torch.stack(torch.broadcast_tensors(*choices)) + + # Use an analog of `gather(choices, 0, a)` which broadcasts `choices` vs `a`: + # (taken from https://github.com/pytorch/pytorch/issues/9407#issuecomment-1427907939) + idx_list = [ + torch.arange(dim).view((1,) * i + (dim,) + (1,) * (choices.ndim - i - 1)) + for i, dim in enumerate(choices.shape) + ] + + idx_list[0] = a + return choices[idx_list].squeeze(0) + + +# ### unique et al ### + + +def unique( + ar: ArrayLike, + return_index: NotImplementedType = False, + return_inverse=False, + return_counts=False, + axis=None, + *, + equal_nan: NotImplementedType = True, +): + (ar,), axis = _util.axis_none_flatten(ar, axis=axis) + axis = _util.normalize_axis_index(axis, ar.ndim) + + is_half = ar.dtype == torch.float16 + if is_half: + ar = ar.to(torch.float32) + + result = torch.unique( + ar, return_inverse=return_inverse, return_counts=return_counts, dim=axis + ) + + if is_half: + if isinstance(result, tuple): + result = (result[0].to(torch.float16),) + result[1:] + else: + result = result.to(torch.float16) + + return result + + +def nonzero(a: ArrayLike): + return torch.nonzero(a, as_tuple=True) + + +def argwhere(a: ArrayLike): + return torch.argwhere(a) + + +def flatnonzero(a: ArrayLike): + return torch.flatten(a).nonzero(as_tuple=True)[0] + + +def clip( + a: ArrayLike, + min: Optional[ArrayLike] = None, + max: Optional[ArrayLike] = None, + out: Optional[OutArray] = None, +): + return torch.clamp(a, min, max) + + +def repeat(a: ArrayLike, repeats: ArrayLike, axis=None): + # XXX: scalar repeats; ArrayLikeOrScalar ? + return torch.repeat_interleave(a, repeats, axis) + + +def tile(A: ArrayLike, reps): + if isinstance(reps, int): + reps = (reps,) + return torch.tile(A, reps) + + +def resize(a: ArrayLike, new_shape=None): + # implementation vendored from + # https://github.com/numpy/numpy/blob/v1.24.0/numpy/core/fromnumeric.py#L1420-L1497 + if new_shape is None: + return a + + if isinstance(new_shape, int): + new_shape = (new_shape,) + + a = a.flatten() + + new_size = 1 + for dim_length in new_shape: + new_size *= dim_length + if dim_length < 0: + raise ValueError("all elements of `new_shape` must be non-negative") + + if a.numel() == 0 or new_size == 0: + # First case must zero fill. The second would have repeats == 0. + return torch.zeros(new_shape, dtype=a.dtype) + + repeats = -(-new_size // a.numel()) # ceil division + a = concatenate((a,) * repeats)[:new_size] + + return reshape(a, new_shape) + + +# ### diag et al ### + + +def diagonal(a: ArrayLike, offset=0, axis1=0, axis2=1): + axis1 = _util.normalize_axis_index(axis1, a.ndim) + axis2 = _util.normalize_axis_index(axis2, a.ndim) + return torch.diagonal(a, offset, axis1, axis2) + + +def trace( + a: ArrayLike, + offset=0, + axis1=0, + axis2=1, + dtype: Optional[DTypeLike] = None, + out: Optional[OutArray] = None, +): + result = torch.diagonal(a, offset, dim1=axis1, dim2=axis2).sum(-1, dtype=dtype) + return result + + +def eye( + N, + M=None, + k=0, + dtype: Optional[DTypeLike] = None, + order: NotImplementedType = "C", + *, + like: NotImplementedType = None, +): + if dtype is None: + dtype = _dtypes_impl.default_dtypes().float_dtype + if M is None: + M = N + z = torch.zeros(N, M, dtype=dtype) + z.diagonal(k).fill_(1) + return z + + +def identity(n, dtype: Optional[DTypeLike] = None, *, like: NotImplementedType = None): + return torch.eye(n, dtype=dtype) + + +def diag(v: ArrayLike, k=0): + return torch.diag(v, k) + + +def diagflat(v: ArrayLike, k=0): + return torch.diagflat(v, k) + + +def diag_indices(n, ndim=2): + idx = torch.arange(n) + return (idx,) * ndim + + +def diag_indices_from(arr: ArrayLike): + if not arr.ndim >= 2: + raise ValueError("input array must be at least 2-d") + # For more than d=2, the strided formula is only valid for arrays with + # all dimensions equal, so we check first. + s = arr.shape + if s[1:] != s[:-1]: + raise ValueError("All dimensions of input must be of equal length") + return diag_indices(s[0], arr.ndim) + + +def fill_diagonal(a: ArrayLike, val: ArrayLike, wrap=False): + if a.ndim < 2: + raise ValueError("array must be at least 2-d") + if val.numel() == 0 and not wrap: + a.fill_diagonal_(val) + return a + + if val.ndim == 0: + val = val.unsqueeze(0) + + # torch.Tensor.fill_diagonal_ only accepts scalars + # If the size of val is too large, then val is trimmed + if a.ndim == 2: + tall = a.shape[0] > a.shape[1] + # wrap does nothing for wide matrices... + if not wrap or not tall: + # Never wraps + diag = a.diagonal() + diag.copy_(val[: diag.numel()]) + else: + # wraps and tall... leaving one empty line between diagonals?! + max_, min_ = a.shape + idx = torch.arange(max_ - max_ // (min_ + 1)) + mod = idx % min_ + div = idx // min_ + a[(div * (min_ + 1) + mod, mod)] = val[: idx.numel()] + else: + idx = diag_indices_from(a) + # a.shape = (n, n, ..., n) + a[idx] = val[: a.shape[0]] + + return a + + +def vdot(a: ArrayLike, b: ArrayLike, /): + # 1. torch only accepts 1D arrays, numpy flattens + # 2. torch requires matching dtype, while numpy casts (?) + t_a, t_b = torch.atleast_1d(a, b) + if t_a.ndim > 1: + t_a = t_a.flatten() + if t_b.ndim > 1: + t_b = t_b.flatten() + + dtype = _dtypes_impl.result_type_impl(t_a, t_b) + is_half = dtype == torch.float16 and (t_a.is_cpu or t_b.is_cpu) + is_bool = dtype == torch.bool + + # work around torch's "dot" not implemented for 'Half', 'Bool' + if is_half: + dtype = torch.float32 + elif is_bool: + dtype = torch.uint8 + + t_a = _util.cast_if_needed(t_a, dtype) + t_b = _util.cast_if_needed(t_b, dtype) + + result = torch.vdot(t_a, t_b) + + if is_half: + result = result.to(torch.float16) + elif is_bool: + result = result.to(torch.bool) + + return result + + +def tensordot(a: ArrayLike, b: ArrayLike, axes=2): + if isinstance(axes, (list, tuple)): + axes = [[ax] if isinstance(ax, int) else ax for ax in axes] + + target_dtype = _dtypes_impl.result_type_impl(a, b) + a = _util.cast_if_needed(a, target_dtype) + b = _util.cast_if_needed(b, target_dtype) + + return torch.tensordot(a, b, dims=axes) + + +def dot(a: ArrayLike, b: ArrayLike, out: Optional[OutArray] = None): + dtype = _dtypes_impl.result_type_impl(a, b) + is_bool = dtype == torch.bool + if is_bool: + dtype = torch.uint8 + + a = _util.cast_if_needed(a, dtype) + b = _util.cast_if_needed(b, dtype) + + if a.ndim == 0 or b.ndim == 0: + result = a * b + else: + result = torch.matmul(a, b) + + if is_bool: + result = result.to(torch.bool) + + return result + + +def inner(a: ArrayLike, b: ArrayLike, /): + dtype = _dtypes_impl.result_type_impl(a, b) + is_half = dtype == torch.float16 and (a.is_cpu or b.is_cpu) + is_bool = dtype == torch.bool + + if is_half: + # work around torch's "addmm_impl_cpu_" not implemented for 'Half'" + dtype = torch.float32 + elif is_bool: + dtype = torch.uint8 + + a = _util.cast_if_needed(a, dtype) + b = _util.cast_if_needed(b, dtype) + + result = torch.inner(a, b) + + if is_half: + result = result.to(torch.float16) + elif is_bool: + result = result.to(torch.bool) + return result + + +def outer(a: ArrayLike, b: ArrayLike, out: Optional[OutArray] = None): + return torch.outer(a, b) + + +def cross(a: ArrayLike, b: ArrayLike, axisa=-1, axisb=-1, axisc=-1, axis=None): + # implementation vendored from + # https://github.com/numpy/numpy/blob/v1.24.0/numpy/core/numeric.py#L1486-L1685 + if axis is not None: + axisa, axisb, axisc = (axis,) * 3 + + # Check axisa and axisb are within bounds + axisa = _util.normalize_axis_index(axisa, a.ndim) + axisb = _util.normalize_axis_index(axisb, b.ndim) + + # Move working axis to the end of the shape + a = torch.moveaxis(a, axisa, -1) + b = torch.moveaxis(b, axisb, -1) + msg = "incompatible dimensions for cross product\n" "(dimension must be 2 or 3)" + if a.shape[-1] not in (2, 3) or b.shape[-1] not in (2, 3): + raise ValueError(msg) + + # Create the output array + shape = broadcast_shapes(a[..., 0].shape, b[..., 0].shape) + if a.shape[-1] == 3 or b.shape[-1] == 3: + shape += (3,) + # Check axisc is within bounds + axisc = _util.normalize_axis_index(axisc, len(shape)) + dtype = _dtypes_impl.result_type_impl(a, b) + cp = torch.empty(shape, dtype=dtype) + + # recast arrays as dtype + a = _util.cast_if_needed(a, dtype) + b = _util.cast_if_needed(b, dtype) + + # create local aliases for readability + a0 = a[..., 0] + a1 = a[..., 1] + if a.shape[-1] == 3: + a2 = a[..., 2] + b0 = b[..., 0] + b1 = b[..., 1] + if b.shape[-1] == 3: + b2 = b[..., 2] + if cp.ndim != 0 and cp.shape[-1] == 3: + cp0 = cp[..., 0] + cp1 = cp[..., 1] + cp2 = cp[..., 2] + + if a.shape[-1] == 2: + if b.shape[-1] == 2: + # a0 * b1 - a1 * b0 + cp[...] = a0 * b1 - a1 * b0 + return cp + else: + assert b.shape[-1] == 3 + # cp0 = a1 * b2 - 0 (a2 = 0) + # cp1 = 0 - a0 * b2 (a2 = 0) + # cp2 = a0 * b1 - a1 * b0 + cp0[...] = a1 * b2 + cp1[...] = -a0 * b2 + cp2[...] = a0 * b1 - a1 * b0 + else: + assert a.shape[-1] == 3 + if b.shape[-1] == 3: + cp0[...] = a1 * b2 - a2 * b1 + cp1[...] = a2 * b0 - a0 * b2 + cp2[...] = a0 * b1 - a1 * b0 + else: + assert b.shape[-1] == 2 + cp0[...] = -a2 * b1 + cp1[...] = a2 * b0 + cp2[...] = a0 * b1 - a1 * b0 + + return torch.moveaxis(cp, -1, axisc) + + +def einsum(*operands, out=None, dtype=None, order="K", casting="safe", optimize=False): + # Have to manually normalize *operands and **kwargs, following the NumPy signature + # We have a local import to avoid poluting the global space, as it will be then + # exported in funcs.py + from ._ndarray import ndarray + from ._normalizations import ( + maybe_copy_to, + normalize_array_like, + normalize_casting, + normalize_dtype, + wrap_tensors, + ) + + dtype = normalize_dtype(dtype) + casting = normalize_casting(casting) + if out is not None and not isinstance(out, ndarray): + raise TypeError("'out' must be an array") + if order != "K": + raise NotImplementedError("'order' parameter is not supported.") + + # parse arrays and normalize them + sublist_format = not isinstance(operands[0], str) + if sublist_format: + # op, str, op, str ... [sublistout] format: normalize every other argument + + # - if sublistout is not given, the length of operands is even, and we pick + # odd-numbered elements, which are arrays. + # - if sublistout is given, the length of operands is odd, we peel off + # the last one, and pick odd-numbered elements, which are arrays. + # Without [:-1], we would have picked sublistout, too. + array_operands = operands[:-1][::2] + else: + # ("ij->", arrays) format + subscripts, array_operands = operands[0], operands[1:] + + tensors = [normalize_array_like(op) for op in array_operands] + target_dtype = _dtypes_impl.result_type_impl(*tensors) if dtype is None else dtype + + # work around 'bmm' not implemented for 'Half' etc + is_half = target_dtype == torch.float16 and all(t.is_cpu for t in tensors) + if is_half: + target_dtype = torch.float32 + + is_short_int = target_dtype in [torch.uint8, torch.int8, torch.int16, torch.int32] + if is_short_int: + target_dtype = torch.int64 + + tensors = _util.typecast_tensors(tensors, target_dtype, casting) + + from torch.backends import opt_einsum + + try: + # set the global state to handle the optimize=... argument, restore on exit + if opt_einsum.is_available(): + old_strategy = torch.backends.opt_einsum.strategy + torch.backends.opt_einsum.strategy = optimize + + if sublist_format: + # recombine operands + sublists = operands[1::2] + has_sublistout = len(operands) % 2 == 1 + if has_sublistout: + sublistout = operands[-1] + operands = list(itertools.chain(*zip(tensors, sublists))) + if has_sublistout: + operands.append(sublistout) + + result = torch.einsum(*operands) + else: + result = torch.einsum(subscripts, *tensors) + + finally: + if opt_einsum.is_available(): + torch.backends.opt_einsum.strategy = old_strategy + + result = maybe_copy_to(out, result) + return wrap_tensors(result) + + +# ### sort and partition ### + + +def _sort_helper(tensor, axis, kind, order): + (tensor,), axis = _util.axis_none_flatten(tensor, axis=axis) + axis = _util.normalize_axis_index(axis, tensor.ndim) + + stable = kind == "stable" + + return tensor, axis, stable + + +def sort(a: ArrayLike, axis=-1, kind=None, order: NotImplementedType = None): + if a.dtype.is_complex: + return NotImplemented + # `order` keyword arg is only relevant for structured dtypes; so not supported here. + a, axis, stable = _sort_helper(a, axis, kind, order) + result = torch.sort(a, dim=axis, stable=stable) + return result.values + + +def argsort(a: ArrayLike, axis=-1, kind=None, order: NotImplementedType = None): + if a.dtype.is_complex: + return NotImplemented + a, axis, stable = _sort_helper(a, axis, kind, order) + return torch.argsort(a, dim=axis, stable=stable) + + +def searchsorted( + a: ArrayLike, v: ArrayLike, side="left", sorter: Optional[ArrayLike] = None +): + if a.dtype.is_complex: + return NotImplemented + + return torch.searchsorted(a, v, side=side, sorter=sorter) + + +# ### swap/move/roll axis ### + + +def moveaxis(a: ArrayLike, source, destination): + source = _util.normalize_axis_tuple(source, a.ndim, "source") + destination = _util.normalize_axis_tuple(destination, a.ndim, "destination") + return torch.moveaxis(a, source, destination) + + +def swapaxes(a: ArrayLike, axis1, axis2): + axis1 = _util.normalize_axis_index(axis1, a.ndim) + axis2 = _util.normalize_axis_index(axis2, a.ndim) + return torch.swapaxes(a, axis1, axis2) + + +def rollaxis(a: ArrayLike, axis, start=0): + # Straight vendor from: + # https://github.com/numpy/numpy/blob/v1.24.0/numpy/core/numeric.py#L1259 + # + # Also note this function in NumPy is mostly retained for backwards compat + # (https://stackoverflow.com/questions/29891583/reason-why-numpy-rollaxis-is-so-confusing) + # so let's not touch it unless hard pressed. + n = a.ndim + axis = _util.normalize_axis_index(axis, n) + if start < 0: + start += n + msg = "'%s' arg requires %d <= %s < %d, but %d was passed in" + if not (0 <= start < n + 1): + raise _util.AxisError(msg % ("start", -n, "start", n + 1, start)) + if axis < start: + # it's been removed + start -= 1 + if axis == start: + # numpy returns a view, here we try returning the tensor itself + # return tensor[...] + return a + axes = list(range(0, n)) + axes.remove(axis) + axes.insert(start, axis) + return a.view(axes) + + +def roll(a: ArrayLike, shift, axis=None): + if axis is not None: + axis = _util.normalize_axis_tuple(axis, a.ndim, allow_duplicate=True) + if not isinstance(shift, tuple): + shift = (shift,) * len(axis) + return torch.roll(a, shift, axis) + + +# ### shape manipulations ### + + +def squeeze(a: ArrayLike, axis=None): + if axis == (): + result = a + elif axis is None: + result = a.squeeze() + else: + if isinstance(axis, tuple): + result = a + for ax in axis: + result = a.squeeze(ax) + else: + result = a.squeeze(axis) + return result + + +def reshape(a: ArrayLike, newshape, order: NotImplementedType = "C"): + # if sh = (1, 2, 3), numpy allows both .reshape(sh) and .reshape(*sh) + newshape = newshape[0] if len(newshape) == 1 else newshape + return a.reshape(newshape) + + +# NB: cannot use torch.reshape(a, newshape) above, because of +# (Pdb) torch.reshape(torch.as_tensor([1]), 1) +# *** TypeError: reshape(): argument 'shape' (position 2) must be tuple of SymInts, not int + + +def transpose(a: ArrayLike, axes=None): + # numpy allows both .tranpose(sh) and .transpose(*sh) + # also older code uses axes being a list + if axes in [(), None, (None,)]: + axes = tuple(reversed(range(a.ndim))) + elif len(axes) == 1: + axes = axes[0] + return a.permute(axes) + + +def ravel(a: ArrayLike, order: NotImplementedType = "C"): + return torch.flatten(a) + + +def diff( + a: ArrayLike, + n=1, + axis=-1, + prepend: Optional[ArrayLike] = None, + append: Optional[ArrayLike] = None, +): + axis = _util.normalize_axis_index(axis, a.ndim) + + if n < 0: + raise ValueError(f"order must be non-negative but got {n}") + + if n == 0: + # match numpy and return the input immediately + return a + + if prepend is not None: + shape = list(a.shape) + shape[axis] = prepend.shape[axis] if prepend.ndim > 0 else 1 + prepend = torch.broadcast_to(prepend, shape) + + if append is not None: + shape = list(a.shape) + shape[axis] = append.shape[axis] if append.ndim > 0 else 1 + append = torch.broadcast_to(append, shape) + + return torch.diff(a, n, axis=axis, prepend=prepend, append=append) + + +# ### math functions ### + + +def angle(z: ArrayLike, deg=False): + result = torch.angle(z) + if deg: + result = result * (180 / torch.pi) + return result + + +def sinc(x: ArrayLike): + return torch.sinc(x) + + +# NB: have to normalize *varargs manually +def gradient(f: ArrayLike, *varargs, axis=None, edge_order=1): + N = f.ndim # number of dimensions + + varargs = _util.ndarrays_to_tensors(varargs) + + if axis is None: + axes = tuple(range(N)) + else: + axes = _util.normalize_axis_tuple(axis, N) + + len_axes = len(axes) + n = len(varargs) + if n == 0: + # no spacing argument - use 1 in all axes + dx = [1.0] * len_axes + elif n == 1 and ( + type(varargs[0]) in _dtypes_impl.SCALAR_TYPES or varargs[0].ndim == 0 + ): + # single scalar or 0D tensor for all axes (np.ndim(varargs[0]) == 0) + dx = varargs * len_axes + elif n == len_axes: + # scalar or 1d array for each axis + dx = list(varargs) + for i, distances in enumerate(dx): + distances = torch.as_tensor(distances) + if distances.ndim == 0: + continue + elif distances.ndim != 1: + raise ValueError("distances must be either scalars or 1d") + if len(distances) != f.shape[axes[i]]: + raise ValueError( + "when 1d, distances must match " + "the length of the corresponding dimension" + ) + if not (distances.dtype.is_floating_point or distances.dtype.is_complex): + distances = distances.double() + + diffx = torch.diff(distances) + # if distances are constant reduce to the scalar case + # since it brings a consistent speedup + if (diffx == diffx[0]).all(): + diffx = diffx[0] + dx[i] = diffx + else: + raise TypeError("invalid number of arguments") + + if edge_order > 2: + raise ValueError("'edge_order' greater than 2 not supported") + + # use central differences on interior and one-sided differences on the + # endpoints. This preserves second order-accuracy over the full domain. + + outvals = [] + + # create slice objects --- initially all are [:, :, ..., :] + slice1 = [slice(None)] * N + slice2 = [slice(None)] * N + slice3 = [slice(None)] * N + slice4 = [slice(None)] * N + + otype = f.dtype + if _dtypes_impl.python_type_for_torch(otype) in (int, bool): + # Convert to floating point. + # First check if f is a numpy integer type; if so, convert f to float64 + # to avoid modular arithmetic when computing the changes in f. + f = f.double() + otype = torch.float64 + + for axis, ax_dx in zip(axes, dx): + if f.shape[axis] < edge_order + 1: + raise ValueError( + "Shape of array too small to calculate a numerical gradient, " + "at least (edge_order + 1) elements are required." + ) + # result allocation + out = torch.empty_like(f, dtype=otype) + + # spacing for the current axis (NB: np.ndim(ax_dx) == 0) + uniform_spacing = type(ax_dx) in _dtypes_impl.SCALAR_TYPES or ax_dx.ndim == 0 + + # Numerical differentiation: 2nd order interior + slice1[axis] = slice(1, -1) + slice2[axis] = slice(None, -2) + slice3[axis] = slice(1, -1) + slice4[axis] = slice(2, None) + + if uniform_spacing: + out[tuple(slice1)] = (f[tuple(slice4)] - f[tuple(slice2)]) / (2.0 * ax_dx) + else: + dx1 = ax_dx[0:-1] + dx2 = ax_dx[1:] + a = -(dx2) / (dx1 * (dx1 + dx2)) + b = (dx2 - dx1) / (dx1 * dx2) + c = dx1 / (dx2 * (dx1 + dx2)) + # fix the shape for broadcasting + shape = [1] * N + shape[axis] = -1 + a = a.reshape(shape) + b = b.reshape(shape) + c = c.reshape(shape) + # 1D equivalent -- out[1:-1] = a * f[:-2] + b * f[1:-1] + c * f[2:] + out[tuple(slice1)] = ( + a * f[tuple(slice2)] + b * f[tuple(slice3)] + c * f[tuple(slice4)] + ) + + # Numerical differentiation: 1st order edges + if edge_order == 1: + slice1[axis] = 0 + slice2[axis] = 1 + slice3[axis] = 0 + dx_0 = ax_dx if uniform_spacing else ax_dx[0] + # 1D equivalent -- out[0] = (f[1] - f[0]) / (x[1] - x[0]) + out[tuple(slice1)] = (f[tuple(slice2)] - f[tuple(slice3)]) / dx_0 + + slice1[axis] = -1 + slice2[axis] = -1 + slice3[axis] = -2 + dx_n = ax_dx if uniform_spacing else ax_dx[-1] + # 1D equivalent -- out[-1] = (f[-1] - f[-2]) / (x[-1] - x[-2]) + out[tuple(slice1)] = (f[tuple(slice2)] - f[tuple(slice3)]) / dx_n + + # Numerical differentiation: 2nd order edges + else: + slice1[axis] = 0 + slice2[axis] = 0 + slice3[axis] = 1 + slice4[axis] = 2 + if uniform_spacing: + a = -1.5 / ax_dx + b = 2.0 / ax_dx + c = -0.5 / ax_dx + else: + dx1 = ax_dx[0] + dx2 = ax_dx[1] + a = -(2.0 * dx1 + dx2) / (dx1 * (dx1 + dx2)) + b = (dx1 + dx2) / (dx1 * dx2) + c = -dx1 / (dx2 * (dx1 + dx2)) + # 1D equivalent -- out[0] = a * f[0] + b * f[1] + c * f[2] + out[tuple(slice1)] = ( + a * f[tuple(slice2)] + b * f[tuple(slice3)] + c * f[tuple(slice4)] + ) + + slice1[axis] = -1 + slice2[axis] = -3 + slice3[axis] = -2 + slice4[axis] = -1 + if uniform_spacing: + a = 0.5 / ax_dx + b = -2.0 / ax_dx + c = 1.5 / ax_dx + else: + dx1 = ax_dx[-2] + dx2 = ax_dx[-1] + a = (dx2) / (dx1 * (dx1 + dx2)) + b = -(dx2 + dx1) / (dx1 * dx2) + c = (2.0 * dx2 + dx1) / (dx2 * (dx1 + dx2)) + # 1D equivalent -- out[-1] = a * f[-3] + b * f[-2] + c * f[-1] + out[tuple(slice1)] = ( + a * f[tuple(slice2)] + b * f[tuple(slice3)] + c * f[tuple(slice4)] + ) + + outvals.append(out) + + # reset the slice object in this dimension to ":" + slice1[axis] = slice(None) + slice2[axis] = slice(None) + slice3[axis] = slice(None) + slice4[axis] = slice(None) + + if len_axes == 1: + return outvals[0] + else: + return outvals + + +# ### Type/shape etc queries ### + + +def round(a: ArrayLike, decimals=0, out: Optional[OutArray] = None): + if a.is_floating_point(): + result = torch.round(a, decimals=decimals) + elif a.is_complex(): + # RuntimeError: "round_cpu" not implemented for 'ComplexFloat' + result = torch.complex( + torch.round(a.real, decimals=decimals), + torch.round(a.imag, decimals=decimals), + ) + else: + # RuntimeError: "round_cpu" not implemented for 'int' + result = a + return result + + +around = round +round_ = round + + +def real_if_close(a: ArrayLike, tol=100): + if not torch.is_complex(a): + return a + if tol > 1: + # Undocumented in numpy: if tol < 1, it's an absolute tolerance! + # Otherwise, tol > 1 is relative tolerance, in units of the dtype epsilon + # https://github.com/numpy/numpy/blob/v1.24.0/numpy/lib/type_check.py#L577 + tol = tol * torch.finfo(a.dtype).eps + + mask = torch.abs(a.imag) < tol + return a.real if mask.all() else a + + +def real(a: ArrayLike): + return torch.real(a) + + +def imag(a: ArrayLike): + if a.is_complex(): + return a.imag + return torch.zeros_like(a) + + +def iscomplex(x: ArrayLike): + if torch.is_complex(x): + return x.imag != 0 + return torch.zeros_like(x, dtype=torch.bool) + + +def isreal(x: ArrayLike): + if torch.is_complex(x): + return x.imag == 0 + return torch.ones_like(x, dtype=torch.bool) + + +def iscomplexobj(x: ArrayLike): + return torch.is_complex(x) + + +def isrealobj(x: ArrayLike): + return not torch.is_complex(x) + + +def isneginf(x: ArrayLike, out: Optional[OutArray] = None): + return torch.isneginf(x) + + +def isposinf(x: ArrayLike, out: Optional[OutArray] = None): + return torch.isposinf(x) + + +def i0(x: ArrayLike): + return torch.special.i0(x) + + +def isscalar(a): + # We need to use normalize_array_like, but we don't want to export it in funcs.py + from ._normalizations import normalize_array_like + + try: + t = normalize_array_like(a) + return t.numel() == 1 + except Exception: + return False + + +# ### Filter windows ### + + +def hamming(M): + dtype = _dtypes_impl.default_dtypes().float_dtype + return torch.hamming_window(M, periodic=False, dtype=dtype) + + +def hanning(M): + dtype = _dtypes_impl.default_dtypes().float_dtype + return torch.hann_window(M, periodic=False, dtype=dtype) + + +def kaiser(M, beta): + dtype = _dtypes_impl.default_dtypes().float_dtype + return torch.kaiser_window(M, beta=beta, periodic=False, dtype=dtype) + + +def blackman(M): + dtype = _dtypes_impl.default_dtypes().float_dtype + return torch.blackman_window(M, periodic=False, dtype=dtype) + + +def bartlett(M): + dtype = _dtypes_impl.default_dtypes().float_dtype + return torch.bartlett_window(M, periodic=False, dtype=dtype) + + +# ### Dtype routines ### + +# vendored from https://github.com/numpy/numpy/blob/v1.24.0/numpy/lib/type_check.py#L666 + + +array_type = [ + [torch.float16, torch.float32, torch.float64], + [None, torch.complex64, torch.complex128], +] +array_precision = { + torch.float16: 0, + torch.float32: 1, + torch.float64: 2, + torch.complex64: 1, + torch.complex128: 2, +} + + +def common_type(*tensors: ArrayLike): + is_complex = False + precision = 0 + for a in tensors: + t = a.dtype + if iscomplexobj(a): + is_complex = True + if not (t.is_floating_point or t.is_complex): + p = 2 # array_precision[_nx.double] + else: + p = array_precision.get(t, None) + if p is None: + raise TypeError("can't get common type for non-numeric array") + precision = builtins.max(precision, p) + if is_complex: + return array_type[1][precision] + else: + return array_type[0][precision] + + +# ### histograms ### + + +def histogram( + a: ArrayLike, + bins: ArrayLike = 10, + range=None, + normed=None, + weights: Optional[ArrayLike] = None, + density=None, +): + if normed is not None: + raise ValueError("normed argument is deprecated, use density= instead") + + is_a_int = not (a.dtype.is_floating_point or a.dtype.is_complex) + is_w_int = weights is None or not weights.dtype.is_floating_point + if is_a_int: + a = a.double() + + if weights is not None: + weights = _util.cast_if_needed(weights, a.dtype) + + if isinstance(bins, torch.Tensor): + if bins.ndim == 0: + # bins was a single int + bins = operator.index(bins) + else: + bins = _util.cast_if_needed(bins, a.dtype) + + if range is None: + h, b = torch.histogram(a, bins, weight=weights, density=bool(density)) + else: + h, b = torch.histogram( + a, bins, range=range, weight=weights, density=bool(density) + ) + + if not density and is_w_int: + h = h.long() + if is_a_int: + b = b.long() + + return h, b + + +def histogram2d( + x, + y, + bins=10, + range: Optional[ArrayLike] = None, + normed=None, + weights: Optional[ArrayLike] = None, + density=None, +): + # vendored from https://github.com/numpy/numpy/blob/v1.24.0/numpy/lib/twodim_base.py#L655-L821 + if len(x) != len(y): + raise ValueError("x and y must have the same length.") + + try: + N = len(bins) + except TypeError: + N = 1 + + if N != 1 and N != 2: + bins = [bins, bins] + + h, e = histogramdd((x, y), bins, range, normed, weights, density) + + return h, e[0], e[1] + + +def histogramdd( + sample, + bins=10, + range: Optional[ArrayLike] = None, + normed=None, + weights: Optional[ArrayLike] = None, + density=None, +): + # have to normalize manually because `sample` interpretation differs + # for a list of lists and a 2D array + if normed is not None: + raise ValueError("normed argument is deprecated, use density= instead") + + from ._normalizations import normalize_array_like, normalize_seq_array_like + + if isinstance(sample, (list, tuple)): + sample = normalize_array_like(sample).T + else: + sample = normalize_array_like(sample) + + sample = torch.atleast_2d(sample) + + if not (sample.dtype.is_floating_point or sample.dtype.is_complex): + sample = sample.double() + + # bins is either an int, or a sequence of ints or a sequence of arrays + bins_is_array = not ( + isinstance(bins, int) or builtins.all(isinstance(b, int) for b in bins) + ) + if bins_is_array: + bins = normalize_seq_array_like(bins) + bins_dtypes = [b.dtype for b in bins] + bins = [_util.cast_if_needed(b, sample.dtype) for b in bins] + + if range is not None: + range = range.flatten().tolist() + + if weights is not None: + # range=... is required : interleave min and max values per dimension + mm = sample.aminmax(dim=0) + range = torch.cat(mm).reshape(2, -1).T.flatten() + range = tuple(range.tolist()) + weights = _util.cast_if_needed(weights, sample.dtype) + w_kwd = {"weight": weights} + else: + w_kwd = {} + + h, b = torch.histogramdd(sample, bins, range, density=bool(density), **w_kwd) + + if bins_is_array: + b = [_util.cast_if_needed(bb, dtyp) for bb, dtyp in zip(b, bins_dtypes)] + + return h, b + + +# ### odds and ends + + +def min_scalar_type(a: ArrayLike, /): + # https://github.com/numpy/numpy/blob/maintenance/1.24.x/numpy/core/src/multiarray/convert_datatype.c#L1288 + + from ._dtypes import DType + + if a.numel() > 1: + # numpy docs: "For non-scalar array a, returns the vector’s dtype unmodified." + return DType(a.dtype) + + if a.dtype == torch.bool: + dtype = torch.bool + + elif a.dtype.is_complex: + fi = torch.finfo(torch.float32) + fits_in_single = a.dtype == torch.complex64 or ( + fi.min <= a.real <= fi.max and fi.min <= a.imag <= fi.max + ) + dtype = torch.complex64 if fits_in_single else torch.complex128 + + elif a.dtype.is_floating_point: + for dt in [torch.float16, torch.float32, torch.float64]: + fi = torch.finfo(dt) + if fi.min <= a <= fi.max: + dtype = dt + break + else: + # must be integer + for dt in [torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64]: + # Prefer unsigned int where possible, as numpy does. + ii = torch.iinfo(dt) + if ii.min <= a <= ii.max: + dtype = dt + break + + return DType(dtype) + + +def pad(array: ArrayLike, pad_width: ArrayLike, mode="constant", **kwargs): + if mode != "constant": + raise NotImplementedError + value = kwargs.get("constant_values", 0) + # `value` must be a python scalar for torch.nn.functional.pad + typ = _dtypes_impl.python_type_for_torch(array.dtype) + value = typ(value) + + pad_width = torch.broadcast_to(pad_width, (array.ndim, 2)) + pad_width = torch.flip(pad_width, (0,)).flatten() + + return torch.nn.functional.pad(array, tuple(pad_width), value=value) diff --git a/llava_next/lib/python3.10/site-packages/torch/_numpy/_getlimits.py b/llava_next/lib/python3.10/site-packages/torch/_numpy/_getlimits.py new file mode 100644 index 0000000000000000000000000000000000000000..dc2cc845f752917caf2d8817f4f28be2b56f75e3 --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/_numpy/_getlimits.py @@ -0,0 +1,13 @@ +import torch + +from . import _dtypes + + +def finfo(dtyp): + torch_dtype = _dtypes.dtype(dtyp).torch_dtype + return torch.finfo(torch_dtype) + + +def iinfo(dtyp): + torch_dtype = _dtypes.dtype(dtyp).torch_dtype + return torch.iinfo(torch_dtype) diff --git a/llava_next/lib/python3.10/site-packages/torch/_numpy/_normalizations.py b/llava_next/lib/python3.10/site-packages/torch/_numpy/_normalizations.py new file mode 100644 index 0000000000000000000000000000000000000000..9b36ad37663c58cddaf77ff61aa88fab1d22f66c --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/_numpy/_normalizations.py @@ -0,0 +1,243 @@ +""" "Normalize" arguments: convert array_likes to tensors, dtypes to torch dtypes and so on. +""" +from __future__ import annotations + +import functools +import inspect +import operator +import typing + +import torch + +from . import _dtypes, _dtypes_impl, _util + +ArrayLike = typing.TypeVar("ArrayLike") +Scalar = typing.Union[int, float, complex, bool] +ArrayLikeOrScalar = typing.Union[ArrayLike, Scalar] + +DTypeLike = typing.TypeVar("DTypeLike") +AxisLike = typing.TypeVar("AxisLike") +NDArray = typing.TypeVar("NDarray") +CastingModes = typing.TypeVar("CastingModes") +KeepDims = typing.TypeVar("KeepDims") + +# OutArray is to annotate the out= array argument. +# +# This one is special is several respects: +# First, It needs to be an NDArray, and we need to preserve the `result is out` +# semantics. Therefore, we cannot just extract the Tensor from the out array. +# So we never pass the out array to implementer functions and handle it in the +# `normalizer` below. +# Second, the out= argument can be either keyword or positional argument, and +# as a positional arg, it can be anywhere in the signature. +# To handle all this, we define a special `OutArray` annotation and dispatch on it. +# +OutArray = typing.TypeVar("OutArray") + +try: + from typing import NotImplementedType +except ImportError: + NotImplementedType = typing.TypeVar("NotImplementedType") + + +def normalize_array_like(x, parm=None): + from ._ndarray import asarray + + return asarray(x).tensor + + +def normalize_array_like_or_scalar(x, parm=None): + if type(x) in _dtypes_impl.SCALAR_TYPES: + return x + return normalize_array_like(x, parm) + + +def normalize_optional_array_like(x, parm=None): + # This explicit normalizer is needed because otherwise normalize_array_like + # does not run for a parameter annotated as Optional[ArrayLike] + return None if x is None else normalize_array_like(x, parm) + + +def normalize_seq_array_like(x, parm=None): + return tuple(normalize_array_like(value) for value in x) + + +def normalize_dtype(dtype, parm=None): + # cf _decorators.dtype_to_torch + torch_dtype = None + if dtype is not None: + dtype = _dtypes.dtype(dtype) + torch_dtype = dtype.torch_dtype + return torch_dtype + + +def normalize_not_implemented(arg, parm): + if arg != parm.default: + raise NotImplementedError(f"'{parm.name}' parameter is not supported.") + + +def normalize_axis_like(arg, parm=None): + from ._ndarray import ndarray + + if isinstance(arg, ndarray): + arg = operator.index(arg) + return arg + + +def normalize_ndarray(arg, parm=None): + # check the arg is an ndarray, extract its tensor attribute + if arg is None: + return arg + + from ._ndarray import ndarray + + if not isinstance(arg, ndarray): + raise TypeError(f"'{parm.name}' must be an array") + return arg.tensor + + +def normalize_outarray(arg, parm=None): + # almost normalize_ndarray, only return the array, not its tensor + if arg is None: + return arg + + from ._ndarray import ndarray + + if not isinstance(arg, ndarray): + raise TypeError(f"'{parm.name}' must be an array") + return arg + + +def normalize_casting(arg, parm=None): + if arg not in ["no", "equiv", "safe", "same_kind", "unsafe"]: + raise ValueError( + f"casting must be one of 'no', 'equiv', 'safe', 'same_kind', or 'unsafe' (got '{arg}')" + ) + return arg + + +normalizers = { + "ArrayLike": normalize_array_like, + "Union[ArrayLike, Scalar]": normalize_array_like_or_scalar, + "Optional[ArrayLike]": normalize_optional_array_like, + "Sequence[ArrayLike]": normalize_seq_array_like, + "Optional[NDArray]": normalize_ndarray, + "Optional[OutArray]": normalize_outarray, + "NDArray": normalize_ndarray, + "Optional[DTypeLike]": normalize_dtype, + "AxisLike": normalize_axis_like, + "NotImplementedType": normalize_not_implemented, + "Optional[CastingModes]": normalize_casting, +} + + +def maybe_normalize(arg, parm): + """Normalize arg if a normalizer is registred.""" + normalizer = normalizers.get(parm.annotation, None) + return normalizer(arg, parm) if normalizer else arg + + +# ### Return value helpers ### + + +def maybe_copy_to(out, result, promote_scalar_result=False): + # NB: here out is either an ndarray or None + if out is None: + return result + elif isinstance(result, torch.Tensor): + if result.shape != out.shape: + can_fit = result.numel() == 1 and out.ndim == 0 + if promote_scalar_result and can_fit: + result = result.squeeze() + else: + raise ValueError( + f"Bad size of the out array: out.shape = {out.shape}" + f" while result.shape = {result.shape}." + ) + out.tensor.copy_(result) + return out + elif isinstance(result, (tuple, list)): + return type(result)( + maybe_copy_to(o, r, promote_scalar_result) for o, r in zip(out, result) + ) + else: + raise AssertionError() # We should never hit this path + + +def wrap_tensors(result): + from ._ndarray import ndarray + + if isinstance(result, torch.Tensor): + return ndarray(result) + elif isinstance(result, (tuple, list)): + result = type(result)(wrap_tensors(x) for x in result) + return result + + +def array_or_scalar(values, py_type=float, return_scalar=False): + if return_scalar: + return py_type(values.item()) + else: + from ._ndarray import ndarray + + return ndarray(values) + + +# ### The main decorator to normalize arguments / postprocess the output ### + + +def normalizer(_func=None, *, promote_scalar_result=False): + def normalizer_inner(func): + @functools.wraps(func) + def wrapped(*args, **kwds): + sig = inspect.signature(func) + params = sig.parameters + first_param = next(iter(params.values())) + # NumPy's API does not have positional args before variadic positional args + if first_param.kind == inspect.Parameter.VAR_POSITIONAL: + args = [maybe_normalize(arg, first_param) for arg in args] + else: + # NB: extra unknown arguments: pass through, will raise in func(*args) below + args = ( + tuple( + maybe_normalize(arg, parm) + for arg, parm in zip(args, params.values()) + ) + + args[len(params.values()) :] + ) + + kwds = { + name: maybe_normalize(arg, params[name]) if name in params else arg + for name, arg in kwds.items() + } + result = func(*args, **kwds) + + # keepdims + bound_args = None + if "keepdims" in params and params["keepdims"].annotation == "KeepDims": + # keepdims can be in any position so we need sig.bind + bound_args = sig.bind(*args, **kwds).arguments + if bound_args.get("keepdims", False): + # In this case the first arg is the initial tensor and + # the second arg is (optionally) the axis + tensor = args[0] + axis = bound_args.get("axis") + result = _util.apply_keepdims(result, axis, tensor.ndim) + + # out + if "out" in params: + # out can be in any position so we need sig.bind + if bound_args is None: + bound_args = sig.bind(*args, **kwds).arguments + out = bound_args.get("out") + result = maybe_copy_to(out, result, promote_scalar_result) + result = wrap_tensors(result) + + return result + + return wrapped + + if _func is None: + return normalizer_inner + else: + return normalizer_inner(_func) diff --git a/llava_next/lib/python3.10/site-packages/torch/_numpy/_reductions_impl.py b/llava_next/lib/python3.10/site-packages/torch/_numpy/_reductions_impl.py new file mode 100644 index 0000000000000000000000000000000000000000..2efe2b3cb59ced8be3ed3c6bb3539c4c82fe574d --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/_numpy/_reductions_impl.py @@ -0,0 +1,437 @@ +""" Implementation of reduction operations, to be wrapped into arrays, dtypes etc +in the 'public' layer. + +Anything here only deals with torch objects, e.g. "dtype" is a torch.dtype instance etc +""" +from __future__ import annotations + +import functools +from typing import Optional + +import torch + +from . import _dtypes_impl, _util +from ._normalizations import ( + ArrayLike, + AxisLike, + DTypeLike, + KeepDims, + NotImplementedType, + OutArray, +) + + +def _deco_axis_expand(func): + """ + Generically handle axis arguments in reductions. + axis is *always* the 2nd arg in the funciton so no need to have a look at its signature + """ + + @functools.wraps(func) + def wrapped(a, axis=None, *args, **kwds): + if axis is not None: + axis = _util.normalize_axis_tuple(axis, a.ndim) + + if axis == (): + # So we insert a length-one axis and run the reduction along it. + # We cannot return a.clone() as this would sidestep the checks inside the function + newshape = _util.expand_shape(a.shape, axis=0) + a = a.reshape(newshape) + axis = (0,) + + return func(a, axis, *args, **kwds) + + return wrapped + + +def _atleast_float(dtype, other_dtype): + """Return a dtype that is real or complex floating-point. + + For inputs that are boolean or integer dtypes, this returns the default + float dtype; inputs that are complex get converted to the default complex + dtype; real floating-point dtypes (`float*`) get passed through unchanged + """ + if dtype is None: + dtype = other_dtype + if not (dtype.is_floating_point or dtype.is_complex): + return _dtypes_impl.default_dtypes().float_dtype + return dtype + + +@_deco_axis_expand +def count_nonzero(a: ArrayLike, axis: AxisLike = None, *, keepdims: KeepDims = False): + return a.count_nonzero(axis) + + +@_deco_axis_expand +def argmax( + a: ArrayLike, + axis: AxisLike = None, + out: Optional[OutArray] = None, + *, + keepdims: KeepDims = False, +): + axis = _util.allow_only_single_axis(axis) + + if a.dtype == torch.bool: + # RuntimeError: "argmax_cpu" not implemented for 'Bool' + a = a.to(torch.uint8) + + return torch.argmax(a, axis) + + +@_deco_axis_expand +def argmin( + a: ArrayLike, + axis: AxisLike = None, + out: Optional[OutArray] = None, + *, + keepdims: KeepDims = False, +): + axis = _util.allow_only_single_axis(axis) + + if a.dtype == torch.bool: + # RuntimeError: "argmin_cpu" not implemented for 'Bool' + a = a.to(torch.uint8) + + return torch.argmin(a, axis) + + +@_deco_axis_expand +def any( + a: ArrayLike, + axis: AxisLike = None, + out: Optional[OutArray] = None, + keepdims: KeepDims = False, + *, + where: NotImplementedType = None, +): + axis = _util.allow_only_single_axis(axis) + axis_kw = {} if axis is None else {"dim": axis} + return torch.any(a, **axis_kw) + + +@_deco_axis_expand +def all( + a: ArrayLike, + axis: AxisLike = None, + out: Optional[OutArray] = None, + keepdims: KeepDims = False, + *, + where: NotImplementedType = None, +): + axis = _util.allow_only_single_axis(axis) + axis_kw = {} if axis is None else {"dim": axis} + return torch.all(a, **axis_kw) + + +@_deco_axis_expand +def amax( + a: ArrayLike, + axis: AxisLike = None, + out: Optional[OutArray] = None, + keepdims: KeepDims = False, + initial: NotImplementedType = None, + where: NotImplementedType = None, +): + return a.amax(axis) + + +max = amax + + +@_deco_axis_expand +def amin( + a: ArrayLike, + axis: AxisLike = None, + out: Optional[OutArray] = None, + keepdims: KeepDims = False, + initial: NotImplementedType = None, + where: NotImplementedType = None, +): + return a.amin(axis) + + +min = amin + + +@_deco_axis_expand +def ptp( + a: ArrayLike, + axis: AxisLike = None, + out: Optional[OutArray] = None, + keepdims: KeepDims = False, +): + return a.amax(axis) - a.amin(axis) + + +@_deco_axis_expand +def sum( + a: ArrayLike, + axis: AxisLike = None, + dtype: Optional[DTypeLike] = None, + out: Optional[OutArray] = None, + keepdims: KeepDims = False, + initial: NotImplementedType = None, + where: NotImplementedType = None, +): + assert dtype is None or isinstance(dtype, torch.dtype) + + if dtype == torch.bool: + dtype = _dtypes_impl.default_dtypes().int_dtype + + axis_kw = {} if axis is None else {"dim": axis} + return a.sum(dtype=dtype, **axis_kw) + + +@_deco_axis_expand +def prod( + a: ArrayLike, + axis: AxisLike = None, + dtype: Optional[DTypeLike] = None, + out: Optional[OutArray] = None, + keepdims: KeepDims = False, + initial: NotImplementedType = None, + where: NotImplementedType = None, +): + axis = _util.allow_only_single_axis(axis) + + if dtype == torch.bool: + dtype = _dtypes_impl.default_dtypes().int_dtype + + axis_kw = {} if axis is None else {"dim": axis} + return a.prod(dtype=dtype, **axis_kw) + + +product = prod + + +@_deco_axis_expand +def mean( + a: ArrayLike, + axis: AxisLike = None, + dtype: Optional[DTypeLike] = None, + out: Optional[OutArray] = None, + keepdims: KeepDims = False, + *, + where: NotImplementedType = None, +): + dtype = _atleast_float(dtype, a.dtype) + + axis_kw = {} if axis is None else {"dim": axis} + result = a.mean(dtype=dtype, **axis_kw) + + return result + + +@_deco_axis_expand +def std( + a: ArrayLike, + axis: AxisLike = None, + dtype: Optional[DTypeLike] = None, + out: Optional[OutArray] = None, + ddof=0, + keepdims: KeepDims = False, + *, + where: NotImplementedType = None, +): + in_dtype = dtype + dtype = _atleast_float(dtype, a.dtype) + tensor = _util.cast_if_needed(a, dtype) + result = tensor.std(dim=axis, correction=ddof) + return _util.cast_if_needed(result, in_dtype) + + +@_deco_axis_expand +def var( + a: ArrayLike, + axis: AxisLike = None, + dtype: Optional[DTypeLike] = None, + out: Optional[OutArray] = None, + ddof=0, + keepdims: KeepDims = False, + *, + where: NotImplementedType = None, +): + in_dtype = dtype + dtype = _atleast_float(dtype, a.dtype) + tensor = _util.cast_if_needed(a, dtype) + result = tensor.var(dim=axis, correction=ddof) + return _util.cast_if_needed(result, in_dtype) + + +# cumsum / cumprod are almost reductions: +# 1. no keepdims +# 2. axis=None flattens + + +def cumsum( + a: ArrayLike, + axis: AxisLike = None, + dtype: Optional[DTypeLike] = None, + out: Optional[OutArray] = None, +): + if dtype == torch.bool: + dtype = _dtypes_impl.default_dtypes().int_dtype + if dtype is None: + dtype = a.dtype + + (a,), axis = _util.axis_none_flatten(a, axis=axis) + axis = _util.normalize_axis_index(axis, a.ndim) + + return a.cumsum(axis=axis, dtype=dtype) + + +def cumprod( + a: ArrayLike, + axis: AxisLike = None, + dtype: Optional[DTypeLike] = None, + out: Optional[OutArray] = None, +): + if dtype == torch.bool: + dtype = _dtypes_impl.default_dtypes().int_dtype + if dtype is None: + dtype = a.dtype + + (a,), axis = _util.axis_none_flatten(a, axis=axis) + axis = _util.normalize_axis_index(axis, a.ndim) + + return a.cumprod(axis=axis, dtype=dtype) + + +cumproduct = cumprod + + +def average( + a: ArrayLike, + axis=None, + weights: ArrayLike = None, + returned=False, + *, + keepdims=False, +): + if weights is None: + result = mean(a, axis=axis) + wsum = torch.as_tensor(a.numel() / result.numel(), dtype=result.dtype) + else: + if not a.dtype.is_floating_point: + a = a.double() + + # axis & weights + if a.shape != weights.shape: + if axis is None: + raise TypeError( + "Axis must be specified when shapes of a and weights " "differ." + ) + if weights.ndim != 1: + raise TypeError( + "1D weights expected when shapes of a and weights differ." + ) + if weights.shape[0] != a.shape[axis]: + raise ValueError( + "Length of weights not compatible with specified axis." + ) + + # setup weight to broadcast along axis + weights = torch.broadcast_to(weights, (a.ndim - 1) * (1,) + weights.shape) + weights = weights.swapaxes(-1, axis) + + # do the work + result_dtype = _dtypes_impl.result_type_impl(a, weights) + numerator = sum(a * weights, axis, dtype=result_dtype) + wsum = sum(weights, axis, dtype=result_dtype) + result = numerator / wsum + + # We process keepdims manually because the decorator does not deal with variadic returns + if keepdims: + result = _util.apply_keepdims(result, axis, a.ndim) + + if returned: + if wsum.shape != result.shape: + wsum = torch.broadcast_to(wsum, result.shape).clone() + return result, wsum + else: + return result + + +# Not using deco_axis_expand as it assumes that axis is the second arg +def quantile( + a: ArrayLike, + q: ArrayLike, + axis: AxisLike = None, + out: Optional[OutArray] = None, + overwrite_input=False, + method="linear", + keepdims: KeepDims = False, + *, + interpolation: NotImplementedType = None, +): + if overwrite_input: + # raise NotImplementedError("overwrite_input in quantile not implemented.") + # NumPy documents that `overwrite_input` MAY modify inputs: + # https://numpy.org/doc/stable/reference/generated/numpy.percentile.html#numpy-percentile + # Here we choose to work out-of-place because why not. + pass + + if not a.dtype.is_floating_point: + dtype = _dtypes_impl.default_dtypes().float_dtype + a = a.to(dtype) + + # edge case: torch.quantile only supports float32 and float64 + if a.dtype == torch.float16: + a = a.to(torch.float32) + + if axis is None: + a = a.flatten() + q = q.flatten() + axis = (0,) + else: + axis = _util.normalize_axis_tuple(axis, a.ndim) + + # FIXME(Mario) Doesn't np.quantile accept a tuple? + # torch.quantile does accept a number. If we don't want to implement the tuple behaviour + # (it's deffo low prio) change `normalize_axis_tuple` into a normalize_axis index above. + axis = _util.allow_only_single_axis(axis) + + q = _util.cast_if_needed(q, a.dtype) + + return torch.quantile(a, q, axis=axis, interpolation=method) + + +def percentile( + a: ArrayLike, + q: ArrayLike, + axis: AxisLike = None, + out: Optional[OutArray] = None, + overwrite_input=False, + method="linear", + keepdims: KeepDims = False, + *, + interpolation: NotImplementedType = None, +): + return quantile( + a, + q / 100.0, + axis=axis, + overwrite_input=overwrite_input, + method=method, + keepdims=keepdims, + interpolation=interpolation, + ) + + +def median( + a: ArrayLike, + axis=None, + out: Optional[OutArray] = None, + overwrite_input=False, + keepdims: KeepDims = False, +): + return quantile( + a, + torch.as_tensor(0.5), + axis=axis, + overwrite_input=overwrite_input, + out=out, + keepdims=keepdims, + ) diff --git a/llava_next/lib/python3.10/site-packages/torch/_numpy/_unary_ufuncs_impl.py b/llava_next/lib/python3.10/site-packages/torch/_numpy/_unary_ufuncs_impl.py new file mode 100644 index 0000000000000000000000000000000000000000..2dc5340127542078e96196ce067fc4680d2046be --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/_numpy/_unary_ufuncs_impl.py @@ -0,0 +1,71 @@ +"""Export torch work functions for unary ufuncs, rename/tweak to match numpy. +This listing is further exported to public symbols in the `_numpy/_ufuncs.py` module. +""" + +import torch + +from torch import ( # noqa: F401 + absolute as fabs, # noqa: F401 + arccos, # noqa: F401 + arccosh, # noqa: F401 + arcsin, # noqa: F401 + arcsinh, # noqa: F401 + arctan, # noqa: F401 + arctanh, # noqa: F401 + bitwise_not, # noqa: F401 + bitwise_not as invert, # noqa: F401 + ceil, # noqa: F401 + conj_physical as conjugate, # noqa: F401 + cos, # noqa: F401 + cosh, # noqa: F401 + deg2rad, # noqa: F401 + deg2rad as radians, # noqa: F401 + exp, # noqa: F401 + exp2, # noqa: F401 + expm1, # noqa: F401 + floor, # noqa: F401 + isfinite, # noqa: F401 + isinf, # noqa: F401 + isnan, # noqa: F401 + log, # noqa: F401 + log10, # noqa: F401 + log1p, # noqa: F401 + log2, # noqa: F401 + logical_not, # noqa: F401 + negative, # noqa: F401 + rad2deg, # noqa: F401 + rad2deg as degrees, # noqa: F401 + reciprocal, # noqa: F401 + round as fix, # noqa: F401 + round as rint, # noqa: F401 + sign, # noqa: F401 + signbit, # noqa: F401 + sin, # noqa: F401 + sinh, # noqa: F401 + sqrt, # noqa: F401 + square, # noqa: F401 + tan, # noqa: F401 + tanh, # noqa: F401 + trunc, # noqa: F401 +) + + +# special cases: torch does not export these names +def cbrt(x): + return torch.pow(x, 1 / 3) + + +def positive(x): + return +x + + +def absolute(x): + # work around torch.absolute not impl for bools + if x.dtype == torch.bool: + return x + return torch.absolute(x) + + +# TODO set __name__ and __qualname__ +abs = absolute +conj = conjugate diff --git a/llava_next/lib/python3.10/site-packages/torch/_numpy/fft.py b/llava_next/lib/python3.10/site-packages/torch/_numpy/fft.py new file mode 100644 index 0000000000000000000000000000000000000000..63f45a7b499825bc126be07739e69dde7a1edbfc --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/_numpy/fft.py @@ -0,0 +1,128 @@ +from __future__ import annotations + +import functools + +import torch + +from . import _dtypes_impl, _util +from ._normalizations import ArrayLike, normalizer + + +def upcast(func): + """NumPy fft casts inputs to 64 bit and *returns 64-bit results*.""" + + @functools.wraps(func) + def wrapped(tensor, *args, **kwds): + target_dtype = ( + _dtypes_impl.default_dtypes().complex_dtype + if tensor.is_complex() + else _dtypes_impl.default_dtypes().float_dtype + ) + tensor = _util.cast_if_needed(tensor, target_dtype) + return func(tensor, *args, **kwds) + + return wrapped + + +@normalizer +@upcast +def fft(a: ArrayLike, n=None, axis=-1, norm=None): + return torch.fft.fft(a, n, dim=axis, norm=norm) + + +@normalizer +@upcast +def ifft(a: ArrayLike, n=None, axis=-1, norm=None): + return torch.fft.ifft(a, n, dim=axis, norm=norm) + + +@normalizer +@upcast +def rfft(a: ArrayLike, n=None, axis=-1, norm=None): + return torch.fft.rfft(a, n, dim=axis, norm=norm) + + +@normalizer +@upcast +def irfft(a: ArrayLike, n=None, axis=-1, norm=None): + return torch.fft.irfft(a, n, dim=axis, norm=norm) + + +@normalizer +@upcast +def fftn(a: ArrayLike, s=None, axes=None, norm=None): + return torch.fft.fftn(a, s, dim=axes, norm=norm) + + +@normalizer +@upcast +def ifftn(a: ArrayLike, s=None, axes=None, norm=None): + return torch.fft.ifftn(a, s, dim=axes, norm=norm) + + +@normalizer +@upcast +def rfftn(a: ArrayLike, s=None, axes=None, norm=None): + return torch.fft.rfftn(a, s, dim=axes, norm=norm) + + +@normalizer +@upcast +def irfftn(a: ArrayLike, s=None, axes=None, norm=None): + return torch.fft.irfftn(a, s, dim=axes, norm=norm) + + +@normalizer +@upcast +def fft2(a: ArrayLike, s=None, axes=(-2, -1), norm=None): + return torch.fft.fft2(a, s, dim=axes, norm=norm) + + +@normalizer +@upcast +def ifft2(a: ArrayLike, s=None, axes=(-2, -1), norm=None): + return torch.fft.ifft2(a, s, dim=axes, norm=norm) + + +@normalizer +@upcast +def rfft2(a: ArrayLike, s=None, axes=(-2, -1), norm=None): + return torch.fft.rfft2(a, s, dim=axes, norm=norm) + + +@normalizer +@upcast +def irfft2(a: ArrayLike, s=None, axes=(-2, -1), norm=None): + return torch.fft.irfft2(a, s, dim=axes, norm=norm) + + +@normalizer +@upcast +def hfft(a: ArrayLike, n=None, axis=-1, norm=None): + return torch.fft.hfft(a, n, dim=axis, norm=norm) + + +@normalizer +@upcast +def ihfft(a: ArrayLike, n=None, axis=-1, norm=None): + return torch.fft.ihfft(a, n, dim=axis, norm=norm) + + +@normalizer +def fftfreq(n, d=1.0): + return torch.fft.fftfreq(n, d) + + +@normalizer +def rfftfreq(n, d=1.0): + return torch.fft.rfftfreq(n, d) + + +@normalizer +def fftshift(x: ArrayLike, axes=None): + return torch.fft.fftshift(x, axes) + + +@normalizer +def ifftshift(x: ArrayLike, axes=None): + return torch.fft.ifftshift(x, axes) diff --git a/llava_next/lib/python3.10/site-packages/torch/_numpy/linalg.py b/llava_next/lib/python3.10/site-packages/torch/_numpy/linalg.py new file mode 100644 index 0000000000000000000000000000000000000000..f008b5d7d2b3871637f23b96c783b42354fe4c05 --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/_numpy/linalg.py @@ -0,0 +1,237 @@ +from __future__ import annotations + +import functools +import math +from typing import Sequence + +import torch + +from . import _dtypes_impl, _util +from ._normalizations import ArrayLike, KeepDims, normalizer + + +class LinAlgError(Exception): + pass + + +def _atleast_float_1(a): + if not (a.dtype.is_floating_point or a.dtype.is_complex): + a = a.to(_dtypes_impl.default_dtypes().float_dtype) + return a + + +def _atleast_float_2(a, b): + dtyp = _dtypes_impl.result_type_impl(a, b) + if not (dtyp.is_floating_point or dtyp.is_complex): + dtyp = _dtypes_impl.default_dtypes().float_dtype + + a = _util.cast_if_needed(a, dtyp) + b = _util.cast_if_needed(b, dtyp) + return a, b + + +def linalg_errors(func): + @functools.wraps(func) + def wrapped(*args, **kwds): + try: + return func(*args, **kwds) + except torch._C._LinAlgError as e: + raise LinAlgError(*e.args) + + return wrapped + + +# ### Matrix and vector products ### + + +@normalizer +@linalg_errors +def matrix_power(a: ArrayLike, n): + a = _atleast_float_1(a) + return torch.linalg.matrix_power(a, n) + + +@normalizer +@linalg_errors +def multi_dot(inputs: Sequence[ArrayLike], *, out=None): + return torch.linalg.multi_dot(inputs) + + +# ### Solving equations and inverting matrices ### + + +@normalizer +@linalg_errors +def solve(a: ArrayLike, b: ArrayLike): + a, b = _atleast_float_2(a, b) + return torch.linalg.solve(a, b) + + +@normalizer +@linalg_errors +def lstsq(a: ArrayLike, b: ArrayLike, rcond=None): + a, b = _atleast_float_2(a, b) + # NumPy is using gelsd: https://github.com/numpy/numpy/blob/v1.24.0/numpy/linalg/umath_linalg.cpp#L3991 + # on CUDA, only `gels` is available though, so use it instead + driver = "gels" if a.is_cuda or b.is_cuda else "gelsd" + return torch.linalg.lstsq(a, b, rcond=rcond, driver=driver) + + +@normalizer +@linalg_errors +def inv(a: ArrayLike): + a = _atleast_float_1(a) + result = torch.linalg.inv(a) + return result + + +@normalizer +@linalg_errors +def pinv(a: ArrayLike, rcond=1e-15, hermitian=False): + a = _atleast_float_1(a) + return torch.linalg.pinv(a, rtol=rcond, hermitian=hermitian) + + +@normalizer +@linalg_errors +def tensorsolve(a: ArrayLike, b: ArrayLike, axes=None): + a, b = _atleast_float_2(a, b) + return torch.linalg.tensorsolve(a, b, dims=axes) + + +@normalizer +@linalg_errors +def tensorinv(a: ArrayLike, ind=2): + a = _atleast_float_1(a) + return torch.linalg.tensorinv(a, ind=ind) + + +# ### Norms and other numbers ### + + +@normalizer +@linalg_errors +def det(a: ArrayLike): + a = _atleast_float_1(a) + return torch.linalg.det(a) + + +@normalizer +@linalg_errors +def slogdet(a: ArrayLike): + a = _atleast_float_1(a) + return torch.linalg.slogdet(a) + + +@normalizer +@linalg_errors +def cond(x: ArrayLike, p=None): + x = _atleast_float_1(x) + + # check if empty + # cf: https://github.com/numpy/numpy/blob/v1.24.0/numpy/linalg/linalg.py#L1744 + if x.numel() == 0 and math.prod(x.shape[-2:]) == 0: + raise LinAlgError("cond is not defined on empty arrays") + + result = torch.linalg.cond(x, p=p) + + # Convert nans to infs (numpy does it in a data-dependent way, depending on + # whether the input array has nans or not) + # XXX: NumPy does this: https://github.com/numpy/numpy/blob/v1.24.0/numpy/linalg/linalg.py#L1744 + return torch.where(torch.isnan(result), float("inf"), result) + + +@normalizer +@linalg_errors +def matrix_rank(a: ArrayLike, tol=None, hermitian=False): + a = _atleast_float_1(a) + + if a.ndim < 2: + return int((a != 0).any()) + + if tol is None: + # follow https://github.com/numpy/numpy/blob/v1.24.0/numpy/linalg/linalg.py#L1885 + atol = 0 + rtol = max(a.shape[-2:]) * torch.finfo(a.dtype).eps + else: + atol, rtol = tol, 0 + return torch.linalg.matrix_rank(a, atol=atol, rtol=rtol, hermitian=hermitian) + + +@normalizer +@linalg_errors +def norm(x: ArrayLike, ord=None, axis=None, keepdims: KeepDims = False): + x = _atleast_float_1(x) + return torch.linalg.norm(x, ord=ord, dim=axis) + + +# ### Decompositions ### + + +@normalizer +@linalg_errors +def cholesky(a: ArrayLike): + a = _atleast_float_1(a) + return torch.linalg.cholesky(a) + + +@normalizer +@linalg_errors +def qr(a: ArrayLike, mode="reduced"): + a = _atleast_float_1(a) + result = torch.linalg.qr(a, mode=mode) + if mode == "r": + # match NumPy + result = result.R + return result + + +@normalizer +@linalg_errors +def svd(a: ArrayLike, full_matrices=True, compute_uv=True, hermitian=False): + a = _atleast_float_1(a) + if not compute_uv: + return torch.linalg.svdvals(a) + + # NB: ignore the hermitian= argument (no pytorch equivalent) + result = torch.linalg.svd(a, full_matrices=full_matrices) + return result + + +# ### Eigenvalues and eigenvectors ### + + +@normalizer +@linalg_errors +def eig(a: ArrayLike): + a = _atleast_float_1(a) + w, vt = torch.linalg.eig(a) + + if not a.is_complex() and w.is_complex() and (w.imag == 0).all(): + w = w.real + vt = vt.real + return w, vt + + +@normalizer +@linalg_errors +def eigh(a: ArrayLike, UPLO="L"): + a = _atleast_float_1(a) + return torch.linalg.eigh(a, UPLO=UPLO) + + +@normalizer +@linalg_errors +def eigvals(a: ArrayLike): + a = _atleast_float_1(a) + result = torch.linalg.eigvals(a) + if not a.is_complex() and result.is_complex() and (result.imag == 0).all(): + result = result.real + return result + + +@normalizer +@linalg_errors +def eigvalsh(a: ArrayLike, UPLO="L"): + a = _atleast_float_1(a) + return torch.linalg.eigvalsh(a, UPLO=UPLO) diff --git a/llava_next/lib/python3.10/site-packages/torch/_numpy/testing/__pycache__/utils.cpython-310.pyc b/llava_next/lib/python3.10/site-packages/torch/_numpy/testing/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6b62bdf2eefd16961a71843944c9dbc972eccf3a Binary files /dev/null and b/llava_next/lib/python3.10/site-packages/torch/_numpy/testing/__pycache__/utils.cpython-310.pyc differ diff --git a/llava_next/lib/python3.10/site-packages/torch/_numpy/testing/utils.py b/llava_next/lib/python3.10/site-packages/torch/_numpy/testing/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..03f0e99b93d470bcae342e62dc7dde22603b2f95 --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/_numpy/testing/utils.py @@ -0,0 +1,2381 @@ +""" +Utility function to facilitate testing. + +""" +import contextlib +import gc +import operator +import os +import platform +import pprint +import re +import shutil +import sys +import warnings +from functools import wraps +from io import StringIO +from tempfile import mkdtemp, mkstemp +from warnings import WarningMessage + +import torch._numpy as np +from torch._numpy import arange, asarray as asanyarray, empty, float32, intp, ndarray + +__all__ = [ + "assert_equal", + "assert_almost_equal", + "assert_approx_equal", + "assert_array_equal", + "assert_array_less", + "assert_string_equal", + "assert_", + "assert_array_almost_equal", + "build_err_msg", + "decorate_methods", + "print_assert_equal", + "verbose", + "assert_", + "assert_array_almost_equal_nulp", + "assert_raises_regex", + "assert_array_max_ulp", + "assert_warns", + "assert_no_warnings", + "assert_allclose", + "IgnoreException", + "clear_and_catch_warnings", + "temppath", + "tempdir", + "IS_PYPY", + "HAS_REFCOUNT", + "IS_WASM", + "suppress_warnings", + "assert_array_compare", + "assert_no_gc_cycles", + "break_cycles", + "IS_PYSTON", +] + + +verbose = 0 + +IS_WASM = platform.machine() in ["wasm32", "wasm64"] +IS_PYPY = sys.implementation.name == "pypy" +IS_PYSTON = hasattr(sys, "pyston_version_info") +HAS_REFCOUNT = getattr(sys, "getrefcount", None) is not None and not IS_PYSTON + + +def assert_(val, msg=""): + """ + Assert that works in release mode. + Accepts callable msg to allow deferring evaluation until failure. + + The Python built-in ``assert`` does not work when executing code in + optimized mode (the ``-O`` flag) - no byte-code is generated for it. + + For documentation on usage, refer to the Python documentation. + + """ + __tracebackhide__ = True # Hide traceback for py.test + if not val: + try: + smsg = msg() + except TypeError: + smsg = msg + raise AssertionError(smsg) + + +def gisnan(x): + return np.isnan(x) + + +def gisfinite(x): + return np.isfinite(x) + + +def gisinf(x): + return np.isinf(x) + + +def build_err_msg( + arrays, + err_msg, + header="Items are not equal:", + verbose=True, + names=("ACTUAL", "DESIRED"), + precision=8, +): + msg = ["\n" + header] + if err_msg: + if err_msg.find("\n") == -1 and len(err_msg) < 79 - len(header): + msg = [msg[0] + " " + err_msg] + else: + msg.append(err_msg) + if verbose: + for i, a in enumerate(arrays): + if isinstance(a, ndarray): + # precision argument is only needed if the objects are ndarrays + # r_func = partial(array_repr, precision=precision) + r_func = ndarray.__repr__ + else: + r_func = repr + + try: + r = r_func(a) + except Exception as exc: + r = f"[repr failed for <{type(a).__name__}>: {exc}]" + if r.count("\n") > 3: + r = "\n".join(r.splitlines()[:3]) + r += "..." + msg.append(f" {names[i]}: {r}") + return "\n".join(msg) + + +def assert_equal(actual, desired, err_msg="", verbose=True): + """ + Raises an AssertionError if two objects are not equal. + + Given two objects (scalars, lists, tuples, dictionaries or numpy arrays), + check that all elements of these objects are equal. An exception is raised + at the first conflicting values. + + When one of `actual` and `desired` is a scalar and the other is array_like, + the function checks that each element of the array_like object is equal to + the scalar. + + This function handles NaN comparisons as if NaN was a "normal" number. + That is, AssertionError is not raised if both objects have NaNs in the same + positions. This is in contrast to the IEEE standard on NaNs, which says + that NaN compared to anything must return False. + + Parameters + ---------- + actual : array_like + The object to check. + desired : array_like + The expected object. + err_msg : str, optional + The error message to be printed in case of failure. + verbose : bool, optional + If True, the conflicting values are appended to the error message. + + Raises + ------ + AssertionError + If actual and desired are not equal. + + Examples + -------- + >>> np.testing.assert_equal([4,5], [4,6]) + Traceback (most recent call last): + ... + AssertionError: + Items are not equal: + item=1 + ACTUAL: 5 + DESIRED: 6 + + The following comparison does not raise an exception. There are NaNs + in the inputs, but they are in the same positions. + + >>> np.testing.assert_equal(np.array([1.0, 2.0, np.nan]), [1, 2, np.nan]) + + """ + __tracebackhide__ = True # Hide traceback for py.test + + num_nones = sum([actual is None, desired is None]) + if num_nones == 1: + raise AssertionError(f"Not equal: {actual} != {desired}") + elif num_nones == 2: + return True + # else, carry on + + if isinstance(actual, np.DType) or isinstance(desired, np.DType): + result = actual == desired + if not result: + raise AssertionError(f"Not equal: {actual} != {desired}") + else: + return True + + if isinstance(desired, dict): + if not isinstance(actual, dict): + raise AssertionError(repr(type(actual))) + assert_equal(len(actual), len(desired), err_msg, verbose) + for k in desired.keys(): + if k not in actual: + raise AssertionError(repr(k)) + assert_equal(actual[k], desired[k], f"key={k!r}\n{err_msg}", verbose) + return + if isinstance(desired, (list, tuple)) and isinstance(actual, (list, tuple)): + assert_equal(len(actual), len(desired), err_msg, verbose) + for k in range(len(desired)): + assert_equal(actual[k], desired[k], f"item={k!r}\n{err_msg}", verbose) + return + from torch._numpy import imag, iscomplexobj, isscalar, ndarray, real, signbit + + if isinstance(actual, ndarray) or isinstance(desired, ndarray): + return assert_array_equal(actual, desired, err_msg, verbose) + msg = build_err_msg([actual, desired], err_msg, verbose=verbose) + + # Handle complex numbers: separate into real/imag to handle + # nan/inf/negative zero correctly + # XXX: catch ValueError for subclasses of ndarray where iscomplex fail + try: + usecomplex = iscomplexobj(actual) or iscomplexobj(desired) + except (ValueError, TypeError): + usecomplex = False + + if usecomplex: + if iscomplexobj(actual): + actualr = real(actual) + actuali = imag(actual) + else: + actualr = actual + actuali = 0 + if iscomplexobj(desired): + desiredr = real(desired) + desiredi = imag(desired) + else: + desiredr = desired + desiredi = 0 + try: + assert_equal(actualr, desiredr) + assert_equal(actuali, desiredi) + except AssertionError: + raise AssertionError(msg) + + # isscalar test to check cases such as [np.nan] != np.nan + if isscalar(desired) != isscalar(actual): + raise AssertionError(msg) + + # Inf/nan/negative zero handling + try: + isdesnan = gisnan(desired) + isactnan = gisnan(actual) + if isdesnan and isactnan: + return # both nan, so equal + + # handle signed zero specially for floats + array_actual = np.asarray(actual) + array_desired = np.asarray(desired) + + if desired == 0 and actual == 0: + if not signbit(desired) == signbit(actual): + raise AssertionError(msg) + + except (TypeError, ValueError, NotImplementedError): + pass + + try: + # Explicitly use __eq__ for comparison, gh-2552 + if not (desired == actual): + raise AssertionError(msg) + + except (DeprecationWarning, FutureWarning) as e: + # this handles the case when the two types are not even comparable + if "elementwise == comparison" in e.args[0]: + raise AssertionError(msg) + else: + raise + + +def print_assert_equal(test_string, actual, desired): + """ + Test if two objects are equal, and print an error message if test fails. + + The test is performed with ``actual == desired``. + + Parameters + ---------- + test_string : str + The message supplied to AssertionError. + actual : object + The object to test for equality against `desired`. + desired : object + The expected result. + + Examples + -------- + >>> np.testing.print_assert_equal('Test XYZ of func xyz', [0, 1], [0, 1]) # doctest: +SKIP + >>> np.testing.print_assert_equal('Test XYZ of func xyz', [0, 1], [0, 2]) # doctest: +SKIP + Traceback (most recent call last): + ... + AssertionError: Test XYZ of func xyz failed + ACTUAL: + [0, 1] + DESIRED: + [0, 2] + + """ + __tracebackhide__ = True # Hide traceback for py.test + import pprint + + if not (actual == desired): + msg = StringIO() + msg.write(test_string) + msg.write(" failed\nACTUAL: \n") + pprint.pprint(actual, msg) + msg.write("DESIRED: \n") + pprint.pprint(desired, msg) + raise AssertionError(msg.getvalue()) + + +def assert_almost_equal(actual, desired, decimal=7, err_msg="", verbose=True): + """ + Raises an AssertionError if two items are not equal up to desired + precision. + + .. note:: It is recommended to use one of `assert_allclose`, + `assert_array_almost_equal_nulp` or `assert_array_max_ulp` + instead of this function for more consistent floating point + comparisons. + + The test verifies that the elements of `actual` and `desired` satisfy. + + ``abs(desired-actual) < float64(1.5 * 10**(-decimal))`` + + That is a looser test than originally documented, but agrees with what the + actual implementation in `assert_array_almost_equal` did up to rounding + vagaries. An exception is raised at conflicting values. For ndarrays this + delegates to assert_array_almost_equal + + Parameters + ---------- + actual : array_like + The object to check. + desired : array_like + The expected object. + decimal : int, optional + Desired precision, default is 7. + err_msg : str, optional + The error message to be printed in case of failure. + verbose : bool, optional + If True, the conflicting values are appended to the error message. + + Raises + ------ + AssertionError + If actual and desired are not equal up to specified precision. + + See Also + -------- + assert_allclose: Compare two array_like objects for equality with desired + relative and/or absolute precision. + assert_array_almost_equal_nulp, assert_array_max_ulp, assert_equal + + Examples + -------- + >>> from torch._numpy.testing import assert_almost_equal + >>> assert_almost_equal(2.3333333333333, 2.33333334) + >>> assert_almost_equal(2.3333333333333, 2.33333334, decimal=10) + Traceback (most recent call last): + ... + AssertionError: + Arrays are not almost equal to 10 decimals + ACTUAL: 2.3333333333333 + DESIRED: 2.33333334 + + >>> assert_almost_equal(np.array([1.0,2.3333333333333]), + ... np.array([1.0,2.33333334]), decimal=9) + Traceback (most recent call last): + ... + AssertionError: + Arrays are not almost equal to 9 decimals + + Mismatched elements: 1 / 2 (50%) + Max absolute difference: 6.666699636781459e-09 + Max relative difference: 2.8571569790287484e-09 + x: torch.ndarray([1.0000, 2.3333], dtype=float64) + y: torch.ndarray([1.0000, 2.3333], dtype=float64) + + """ + __tracebackhide__ = True # Hide traceback for py.test + from torch._numpy import imag, iscomplexobj, ndarray, real + + # Handle complex numbers: separate into real/imag to handle + # nan/inf/negative zero correctly + # XXX: catch ValueError for subclasses of ndarray where iscomplex fail + try: + usecomplex = iscomplexobj(actual) or iscomplexobj(desired) + except ValueError: + usecomplex = False + + def _build_err_msg(): + header = "Arrays are not almost equal to %d decimals" % decimal + return build_err_msg([actual, desired], err_msg, verbose=verbose, header=header) + + if usecomplex: + if iscomplexobj(actual): + actualr = real(actual) + actuali = imag(actual) + else: + actualr = actual + actuali = 0 + if iscomplexobj(desired): + desiredr = real(desired) + desiredi = imag(desired) + else: + desiredr = desired + desiredi = 0 + try: + assert_almost_equal(actualr, desiredr, decimal=decimal) + assert_almost_equal(actuali, desiredi, decimal=decimal) + except AssertionError: + raise AssertionError(_build_err_msg()) + + if isinstance(actual, (ndarray, tuple, list)) or isinstance( + desired, (ndarray, tuple, list) + ): + return assert_array_almost_equal(actual, desired, decimal, err_msg) + try: + # If one of desired/actual is not finite, handle it specially here: + # check that both are nan if any is a nan, and test for equality + # otherwise + if not (gisfinite(desired) and gisfinite(actual)): + if gisnan(desired) or gisnan(actual): + if not (gisnan(desired) and gisnan(actual)): + raise AssertionError(_build_err_msg()) + else: + if not desired == actual: + raise AssertionError(_build_err_msg()) + return + except (NotImplementedError, TypeError): + pass + if abs(desired - actual) >= np.float64(1.5 * 10.0 ** (-decimal)): + raise AssertionError(_build_err_msg()) + + +def assert_approx_equal(actual, desired, significant=7, err_msg="", verbose=True): + """ + Raises an AssertionError if two items are not equal up to significant + digits. + + .. note:: It is recommended to use one of `assert_allclose`, + `assert_array_almost_equal_nulp` or `assert_array_max_ulp` + instead of this function for more consistent floating point + comparisons. + + Given two numbers, check that they are approximately equal. + Approximately equal is defined as the number of significant digits + that agree. + + Parameters + ---------- + actual : scalar + The object to check. + desired : scalar + The expected object. + significant : int, optional + Desired precision, default is 7. + err_msg : str, optional + The error message to be printed in case of failure. + verbose : bool, optional + If True, the conflicting values are appended to the error message. + + Raises + ------ + AssertionError + If actual and desired are not equal up to specified precision. + + See Also + -------- + assert_allclose: Compare two array_like objects for equality with desired + relative and/or absolute precision. + assert_array_almost_equal_nulp, assert_array_max_ulp, assert_equal + + Examples + -------- + >>> np.testing.assert_approx_equal(0.12345677777777e-20, 0.1234567e-20) # doctest: +SKIP + >>> np.testing.assert_approx_equal(0.12345670e-20, 0.12345671e-20, # doctest: +SKIP + ... significant=8) + >>> np.testing.assert_approx_equal(0.12345670e-20, 0.12345672e-20, # doctest: +SKIP + ... significant=8) + Traceback (most recent call last): + ... + AssertionError: + Items are not equal to 8 significant digits: + ACTUAL: 1.234567e-21 + DESIRED: 1.2345672e-21 + + the evaluated condition that raises the exception is + + >>> abs(0.12345670e-20/1e-21 - 0.12345672e-20/1e-21) >= 10**-(8-1) + True + + """ + __tracebackhide__ = True # Hide traceback for py.test + import numpy as np + + (actual, desired) = map(float, (actual, desired)) + if desired == actual: + return + # Normalized the numbers to be in range (-10.0,10.0) + # scale = float(pow(10,math.floor(math.log10(0.5*(abs(desired)+abs(actual)))))) + scale = 0.5 * (np.abs(desired) + np.abs(actual)) + scale = np.power(10, np.floor(np.log10(scale))) + try: + sc_desired = desired / scale + except ZeroDivisionError: + sc_desired = 0.0 + try: + sc_actual = actual / scale + except ZeroDivisionError: + sc_actual = 0.0 + msg = build_err_msg( + [actual, desired], + err_msg, + header="Items are not equal to %d significant digits:" % significant, + verbose=verbose, + ) + try: + # If one of desired/actual is not finite, handle it specially here: + # check that both are nan if any is a nan, and test for equality + # otherwise + if not (gisfinite(desired) and gisfinite(actual)): + if gisnan(desired) or gisnan(actual): + if not (gisnan(desired) and gisnan(actual)): + raise AssertionError(msg) + else: + if not desired == actual: + raise AssertionError(msg) + return + except (TypeError, NotImplementedError): + pass + if np.abs(sc_desired - sc_actual) >= np.power(10.0, -(significant - 1)): + raise AssertionError(msg) + + +def assert_array_compare( + comparison, + x, + y, + err_msg="", + verbose=True, + header="", + precision=6, + equal_nan=True, + equal_inf=True, + *, + strict=False, +): + __tracebackhide__ = True # Hide traceback for py.test + from torch._numpy import all, array, asarray, bool_, inf, isnan, max + + x = asarray(x) + y = asarray(y) + + def array2string(a): + return str(a) + + # original array for output formatting + ox, oy = x, y + + def func_assert_same_pos(x, y, func=isnan, hasval="nan"): + """Handling nan/inf. + + Combine results of running func on x and y, checking that they are True + at the same locations. + + """ + __tracebackhide__ = True # Hide traceback for py.test + x_id = func(x) + y_id = func(y) + # We include work-arounds here to handle three types of slightly + # pathological ndarray subclasses: + # (1) all() on `masked` array scalars can return masked arrays, so we + # use != True + # (2) __eq__ on some ndarray subclasses returns Python booleans + # instead of element-wise comparisons, so we cast to bool_() and + # use isinstance(..., bool) checks + # (3) subclasses with bare-bones __array_function__ implementations may + # not implement np.all(), so favor using the .all() method + # We are not committed to supporting such subclasses, but it's nice to + # support them if possible. + if (x_id == y_id).all().item() is not True: + msg = build_err_msg( + [x, y], + err_msg + "\nx and y %s location mismatch:" % (hasval), + verbose=verbose, + header=header, + names=("x", "y"), + precision=precision, + ) + raise AssertionError(msg) + # If there is a scalar, then here we know the array has the same + # flag as it everywhere, so we should return the scalar flag. + if isinstance(x_id, bool) or x_id.ndim == 0: + return bool_(x_id) + elif isinstance(y_id, bool) or y_id.ndim == 0: + return bool_(y_id) + else: + return y_id + + try: + if strict: + cond = x.shape == y.shape and x.dtype == y.dtype + else: + cond = (x.shape == () or y.shape == ()) or x.shape == y.shape + if not cond: + if x.shape != y.shape: + reason = f"\n(shapes {x.shape}, {y.shape} mismatch)" + else: + reason = f"\n(dtypes {x.dtype}, {y.dtype} mismatch)" + msg = build_err_msg( + [x, y], + err_msg + reason, + verbose=verbose, + header=header, + names=("x", "y"), + precision=precision, + ) + raise AssertionError(msg) + + flagged = bool_(False) + + if equal_nan: + flagged = func_assert_same_pos(x, y, func=isnan, hasval="nan") + + if equal_inf: + flagged |= func_assert_same_pos( + x, y, func=lambda xy: xy == +inf, hasval="+inf" + ) + flagged |= func_assert_same_pos( + x, y, func=lambda xy: xy == -inf, hasval="-inf" + ) + + if flagged.ndim > 0: + x, y = x[~flagged], y[~flagged] + # Only do the comparison if actual values are left + if x.size == 0: + return + elif flagged: + # no sense doing comparison if everything is flagged. + return + + val = comparison(x, y) + + if isinstance(val, bool): + cond = val + reduced = array([val]) + else: + reduced = val.ravel() + cond = reduced.all() + + # The below comparison is a hack to ensure that fully masked + # results, for which val.ravel().all() returns np.ma.masked, + # do not trigger a failure (np.ma.masked != True evaluates as + # np.ma.masked, which is falsy). + if not cond: + n_mismatch = reduced.size - int(reduced.sum(dtype=intp)) + n_elements = flagged.size if flagged.ndim != 0 else reduced.size + percent_mismatch = 100 * n_mismatch / n_elements + remarks = [ + f"Mismatched elements: {n_mismatch} / {n_elements} ({percent_mismatch:.3g}%)" + ] + + # with errstate(all='ignore'): + # ignore errors for non-numeric types + with contextlib.suppress(TypeError, RuntimeError): + error = abs(x - y) + if np.issubdtype(x.dtype, np.unsignedinteger): + error2 = abs(y - x) + np.minimum(error, error2, out=error) + max_abs_error = max(error) + remarks.append( + "Max absolute difference: " + array2string(max_abs_error.item()) + ) + + # note: this definition of relative error matches that one + # used by assert_allclose (found in np.isclose) + # Filter values where the divisor would be zero + nonzero = bool_(y != 0) + if all(~nonzero): + max_rel_error = array(inf) + else: + max_rel_error = max(error[nonzero] / abs(y[nonzero])) + remarks.append( + "Max relative difference: " + array2string(max_rel_error.item()) + ) + + err_msg += "\n" + "\n".join(remarks) + msg = build_err_msg( + [ox, oy], + err_msg, + verbose=verbose, + header=header, + names=("x", "y"), + precision=precision, + ) + raise AssertionError(msg) + except ValueError: + import traceback + + efmt = traceback.format_exc() + header = f"error during assertion:\n\n{efmt}\n\n{header}" + + msg = build_err_msg( + [x, y], + err_msg, + verbose=verbose, + header=header, + names=("x", "y"), + precision=precision, + ) + raise ValueError(msg) + + +def assert_array_equal(x, y, err_msg="", verbose=True, *, strict=False): + """ + Raises an AssertionError if two array_like objects are not equal. + + Given two array_like objects, check that the shape is equal and all + elements of these objects are equal (but see the Notes for the special + handling of a scalar). An exception is raised at shape mismatch or + conflicting values. In contrast to the standard usage in numpy, NaNs + are compared like numbers, no assertion is raised if both objects have + NaNs in the same positions. + + The usual caution for verifying equality with floating point numbers is + advised. + + Parameters + ---------- + x : array_like + The actual object to check. + y : array_like + The desired, expected object. + err_msg : str, optional + The error message to be printed in case of failure. + verbose : bool, optional + If True, the conflicting values are appended to the error message. + strict : bool, optional + If True, raise an AssertionError when either the shape or the data + type of the array_like objects does not match. The special + handling for scalars mentioned in the Notes section is disabled. + + Raises + ------ + AssertionError + If actual and desired objects are not equal. + + See Also + -------- + assert_allclose: Compare two array_like objects for equality with desired + relative and/or absolute precision. + assert_array_almost_equal_nulp, assert_array_max_ulp, assert_equal + + Notes + ----- + When one of `x` and `y` is a scalar and the other is array_like, the + function checks that each element of the array_like object is equal to + the scalar. This behaviour can be disabled with the `strict` parameter. + + Examples + -------- + The first assert does not raise an exception: + + >>> np.testing.assert_array_equal([1.0,2.33333,np.nan], + ... [np.exp(0),2.33333, np.nan]) + + Use `assert_allclose` or one of the nulp (number of floating point values) + functions for these cases instead: + + >>> np.testing.assert_allclose([1.0,np.pi,np.nan], + ... [1, np.sqrt(np.pi)**2, np.nan], + ... rtol=1e-10, atol=0) + + As mentioned in the Notes section, `assert_array_equal` has special + handling for scalars. Here the test checks that each value in `x` is 3: + + >>> x = np.full((2, 5), fill_value=3) + >>> np.testing.assert_array_equal(x, 3) + + Use `strict` to raise an AssertionError when comparing a scalar with an + array: + + >>> np.testing.assert_array_equal(x, 3, strict=True) + Traceback (most recent call last): + ... + AssertionError: + Arrays are not equal + + (shapes (2, 5), () mismatch) + x: torch.ndarray([[3, 3, 3, 3, 3], + [3, 3, 3, 3, 3]]) + y: torch.ndarray(3) + + The `strict` parameter also ensures that the array data types match: + + >>> x = np.array([2, 2, 2]) + >>> y = np.array([2., 2., 2.], dtype=np.float32) + >>> np.testing.assert_array_equal(x, y, strict=True) + Traceback (most recent call last): + ... + AssertionError: + Arrays are not equal + + (dtypes dtype("int64"), dtype("float32") mismatch) + x: torch.ndarray([2, 2, 2]) + y: torch.ndarray([2., 2., 2.]) + """ + __tracebackhide__ = True # Hide traceback for py.test + assert_array_compare( + operator.__eq__, + x, + y, + err_msg=err_msg, + verbose=verbose, + header="Arrays are not equal", + strict=strict, + ) + + +def assert_array_almost_equal(x, y, decimal=6, err_msg="", verbose=True): + """ + Raises an AssertionError if two objects are not equal up to desired + precision. + + .. note:: It is recommended to use one of `assert_allclose`, + `assert_array_almost_equal_nulp` or `assert_array_max_ulp` + instead of this function for more consistent floating point + comparisons. + + The test verifies identical shapes and that the elements of ``actual`` and + ``desired`` satisfy. + + ``abs(desired-actual) < 1.5 * 10**(-decimal)`` + + That is a looser test than originally documented, but agrees with what the + actual implementation did up to rounding vagaries. An exception is raised + at shape mismatch or conflicting values. In contrast to the standard usage + in numpy, NaNs are compared like numbers, no assertion is raised if both + objects have NaNs in the same positions. + + Parameters + ---------- + x : array_like + The actual object to check. + y : array_like + The desired, expected object. + decimal : int, optional + Desired precision, default is 6. + err_msg : str, optional + The error message to be printed in case of failure. + verbose : bool, optional + If True, the conflicting values are appended to the error message. + + Raises + ------ + AssertionError + If actual and desired are not equal up to specified precision. + + See Also + -------- + assert_allclose: Compare two array_like objects for equality with desired + relative and/or absolute precision. + assert_array_almost_equal_nulp, assert_array_max_ulp, assert_equal + + Examples + -------- + the first assert does not raise an exception + + >>> np.testing.assert_array_almost_equal([1.0,2.333,np.nan], + ... [1.0,2.333,np.nan]) + + >>> np.testing.assert_array_almost_equal([1.0,2.33333,np.nan], + ... [1.0,2.33339,np.nan], decimal=5) + Traceback (most recent call last): + ... + AssertionError: + Arrays are not almost equal to 5 decimals + + Mismatched elements: 1 / 3 (33.3%) + Max absolute difference: 5.999999999994898e-05 + Max relative difference: 2.5713661239633743e-05 + x: torch.ndarray([1.0000, 2.3333, nan], dtype=float64) + y: torch.ndarray([1.0000, 2.3334, nan], dtype=float64) + + >>> np.testing.assert_array_almost_equal([1.0,2.33333,np.nan], + ... [1.0,2.33333, 5], decimal=5) + Traceback (most recent call last): + ... + AssertionError: + Arrays are not almost equal to 5 decimals + + x and y nan location mismatch: + x: torch.ndarray([1.0000, 2.3333, nan], dtype=float64) + y: torch.ndarray([1.0000, 2.3333, 5.0000], dtype=float64) + + """ + __tracebackhide__ = True # Hide traceback for py.test + from torch._numpy import any as npany, float_, issubdtype, number, result_type + + def compare(x, y): + try: + if npany(gisinf(x)) or npany(gisinf(y)): + xinfid = gisinf(x) + yinfid = gisinf(y) + if not (xinfid == yinfid).all(): + return False + # if one item, x and y is +- inf + if x.size == y.size == 1: + return x == y + x = x[~xinfid] + y = y[~yinfid] + except (TypeError, NotImplementedError): + pass + + # make sure y is an inexact type to avoid abs(MIN_INT); will cause + # casting of x later. + dtype = result_type(y, 1.0) + y = asanyarray(y, dtype) + z = abs(x - y) + + if not issubdtype(z.dtype, number): + z = z.astype(float_) # handle object arrays + + return z < 1.5 * 10.0 ** (-decimal) + + assert_array_compare( + compare, + x, + y, + err_msg=err_msg, + verbose=verbose, + header=("Arrays are not almost equal to %d decimals" % decimal), + precision=decimal, + ) + + +def assert_array_less(x, y, err_msg="", verbose=True): + """ + Raises an AssertionError if two array_like objects are not ordered by less + than. + + Given two array_like objects, check that the shape is equal and all + elements of the first object are strictly smaller than those of the + second object. An exception is raised at shape mismatch or incorrectly + ordered values. Shape mismatch does not raise if an object has zero + dimension. In contrast to the standard usage in numpy, NaNs are + compared, no assertion is raised if both objects have NaNs in the same + positions. + + + + Parameters + ---------- + x : array_like + The smaller object to check. + y : array_like + The larger object to compare. + err_msg : string + The error message to be printed in case of failure. + verbose : bool + If True, the conflicting values are appended to the error message. + + Raises + ------ + AssertionError + If actual and desired objects are not equal. + + See Also + -------- + assert_array_equal: tests objects for equality + assert_array_almost_equal: test objects for equality up to precision + + + + Examples + -------- + >>> np.testing.assert_array_less([1.0, 1.0, np.nan], [1.1, 2.0, np.nan]) + >>> np.testing.assert_array_less([1.0, 1.0, np.nan], [1, 2.0, np.nan]) + Traceback (most recent call last): + ... + AssertionError: + Arrays are not less-ordered + + Mismatched elements: 1 / 3 (33.3%) + Max absolute difference: 1.0 + Max relative difference: 0.5 + x: torch.ndarray([1., 1., nan], dtype=float64) + y: torch.ndarray([1., 2., nan], dtype=float64) + + >>> np.testing.assert_array_less([1.0, 4.0], 3) + Traceback (most recent call last): + ... + AssertionError: + Arrays are not less-ordered + + Mismatched elements: 1 / 2 (50%) + Max absolute difference: 2.0 + Max relative difference: 0.6666666666666666 + x: torch.ndarray([1., 4.], dtype=float64) + y: torch.ndarray(3) + + >>> np.testing.assert_array_less([1.0, 2.0, 3.0], [4]) + Traceback (most recent call last): + ... + AssertionError: + Arrays are not less-ordered + + (shapes (3,), (1,) mismatch) + x: torch.ndarray([1., 2., 3.], dtype=float64) + y: torch.ndarray([4]) + + """ + __tracebackhide__ = True # Hide traceback for py.test + assert_array_compare( + operator.__lt__, + x, + y, + err_msg=err_msg, + verbose=verbose, + header="Arrays are not less-ordered", + equal_inf=False, + ) + + +def assert_string_equal(actual, desired): + """ + Test if two strings are equal. + + If the given strings are equal, `assert_string_equal` does nothing. + If they are not equal, an AssertionError is raised, and the diff + between the strings is shown. + + Parameters + ---------- + actual : str + The string to test for equality against the expected string. + desired : str + The expected string. + + Examples + -------- + >>> np.testing.assert_string_equal('abc', 'abc') # doctest: +SKIP + >>> np.testing.assert_string_equal('abc', 'abcd') # doctest: +SKIP + Traceback (most recent call last): + File "", line 1, in + ... + AssertionError: Differences in strings: + - abc+ abcd? + + + """ + # delay import of difflib to reduce startup time + __tracebackhide__ = True # Hide traceback for py.test + import difflib + + if not isinstance(actual, str): + raise AssertionError(repr(type(actual))) + if not isinstance(desired, str): + raise AssertionError(repr(type(desired))) + if desired == actual: + return + + diff = list( + difflib.Differ().compare(actual.splitlines(True), desired.splitlines(True)) + ) + diff_list = [] + while diff: + d1 = diff.pop(0) + if d1.startswith(" "): + continue + if d1.startswith("- "): + l = [d1] + d2 = diff.pop(0) + if d2.startswith("? "): + l.append(d2) + d2 = diff.pop(0) + if not d2.startswith("+ "): + raise AssertionError(repr(d2)) + l.append(d2) + if diff: + d3 = diff.pop(0) + if d3.startswith("? "): + l.append(d3) + else: + diff.insert(0, d3) + if d2[2:] == d1[2:]: + continue + diff_list.extend(l) + continue + raise AssertionError(repr(d1)) + if not diff_list: + return + msg = f"Differences in strings:\n{''.join(diff_list).rstrip()}" + if actual != desired: + raise AssertionError(msg) + + +import unittest + + +class _Dummy(unittest.TestCase): + def nop(self): + pass + + +_d = _Dummy("nop") + + +def assert_raises_regex(exception_class, expected_regexp, *args, **kwargs): + """ + assert_raises_regex(exception_class, expected_regexp, callable, *args, + **kwargs) + assert_raises_regex(exception_class, expected_regexp) + + Fail unless an exception of class exception_class and with message that + matches expected_regexp is thrown by callable when invoked with arguments + args and keyword arguments kwargs. + + Alternatively, can be used as a context manager like `assert_raises`. + + Notes + ----- + .. versionadded:: 1.9.0 + + """ + __tracebackhide__ = True # Hide traceback for py.test + return _d.assertRaisesRegex(exception_class, expected_regexp, *args, **kwargs) + + +def decorate_methods(cls, decorator, testmatch=None): + """ + Apply a decorator to all methods in a class matching a regular expression. + + The given decorator is applied to all public methods of `cls` that are + matched by the regular expression `testmatch` + (``testmatch.search(methodname)``). Methods that are private, i.e. start + with an underscore, are ignored. + + Parameters + ---------- + cls : class + Class whose methods to decorate. + decorator : function + Decorator to apply to methods + testmatch : compiled regexp or str, optional + The regular expression. Default value is None, in which case the + nose default (``re.compile(r'(?:^|[\\b_\\.%s-])[Tt]est' % os.sep)``) + is used. + If `testmatch` is a string, it is compiled to a regular expression + first. + + """ + if testmatch is None: + testmatch = re.compile(r"(?:^|[\\b_\\.%s-])[Tt]est" % os.sep) + else: + testmatch = re.compile(testmatch) + cls_attr = cls.__dict__ + + # delayed import to reduce startup time + from inspect import isfunction + + methods = [_m for _m in cls_attr.values() if isfunction(_m)] + for function in methods: + try: + if hasattr(function, "compat_func_name"): + funcname = function.compat_func_name + else: + funcname = function.__name__ + except AttributeError: + # not a function + continue + if testmatch.search(funcname) and not funcname.startswith("_"): + setattr(cls, funcname, decorator(function)) + return + + +def _assert_valid_refcount(op): + """ + Check that ufuncs don't mishandle refcount of object `1`. + Used in a few regression tests. + """ + if not HAS_REFCOUNT: + return True + + import gc + + import numpy as np + + b = np.arange(100 * 100).reshape(100, 100) + c = b + i = 1 + + gc.disable() + try: + rc = sys.getrefcount(i) + for j in range(15): + d = op(b, c) + assert_(sys.getrefcount(i) >= rc) + finally: + gc.enable() + del d # for pyflakes + + +def assert_allclose( + actual, + desired, + rtol=1e-7, + atol=0, + equal_nan=True, + err_msg="", + verbose=True, + check_dtype=False, +): + """ + Raises an AssertionError if two objects are not equal up to desired + tolerance. + + Given two array_like objects, check that their shapes and all elements + are equal (but see the Notes for the special handling of a scalar). An + exception is raised if the shapes mismatch or any values conflict. In + contrast to the standard usage in numpy, NaNs are compared like numbers, + no assertion is raised if both objects have NaNs in the same positions. + + The test is equivalent to ``allclose(actual, desired, rtol, atol)`` (note + that ``allclose`` has different default values). It compares the difference + between `actual` and `desired` to ``atol + rtol * abs(desired)``. + + .. versionadded:: 1.5.0 + + Parameters + ---------- + actual : array_like + Array obtained. + desired : array_like + Array desired. + rtol : float, optional + Relative tolerance. + atol : float, optional + Absolute tolerance. + equal_nan : bool, optional. + If True, NaNs will compare equal. + err_msg : str, optional + The error message to be printed in case of failure. + verbose : bool, optional + If True, the conflicting values are appended to the error message. + + Raises + ------ + AssertionError + If actual and desired are not equal up to specified precision. + + See Also + -------- + assert_array_almost_equal_nulp, assert_array_max_ulp + + Notes + ----- + When one of `actual` and `desired` is a scalar and the other is + array_like, the function checks that each element of the array_like + object is equal to the scalar. + + Examples + -------- + >>> x = [1e-5, 1e-3, 1e-1] + >>> y = np.arccos(np.cos(x)) + >>> np.testing.assert_allclose(x, y, rtol=1e-5, atol=0) + + """ + __tracebackhide__ = True # Hide traceback for py.test + + def compare(x, y): + return np.isclose(x, y, rtol=rtol, atol=atol, equal_nan=equal_nan) + + actual, desired = asanyarray(actual), asanyarray(desired) + header = f"Not equal to tolerance rtol={rtol:g}, atol={atol:g}" + + if check_dtype: + assert actual.dtype == desired.dtype + + assert_array_compare( + compare, + actual, + desired, + err_msg=str(err_msg), + verbose=verbose, + header=header, + equal_nan=equal_nan, + ) + + +def assert_array_almost_equal_nulp(x, y, nulp=1): + """ + Compare two arrays relatively to their spacing. + + This is a relatively robust method to compare two arrays whose amplitude + is variable. + + Parameters + ---------- + x, y : array_like + Input arrays. + nulp : int, optional + The maximum number of unit in the last place for tolerance (see Notes). + Default is 1. + + Returns + ------- + None + + Raises + ------ + AssertionError + If the spacing between `x` and `y` for one or more elements is larger + than `nulp`. + + See Also + -------- + assert_array_max_ulp : Check that all items of arrays differ in at most + N Units in the Last Place. + spacing : Return the distance between x and the nearest adjacent number. + + Notes + ----- + An assertion is raised if the following condition is not met:: + + abs(x - y) <= nulp * spacing(maximum(abs(x), abs(y))) + + Examples + -------- + >>> x = np.array([1., 1e-10, 1e-20]) + >>> eps = np.finfo(x.dtype).eps + >>> np.testing.assert_array_almost_equal_nulp(x, x*eps/2 + x) # doctest: +SKIP + + >>> np.testing.assert_array_almost_equal_nulp(x, x*eps + x) # doctest: +SKIP + Traceback (most recent call last): + ... + AssertionError: X and Y are not equal to 1 ULP (max is 2) + + """ + __tracebackhide__ = True # Hide traceback for py.test + import numpy as np + + ax = np.abs(x) + ay = np.abs(y) + ref = nulp * np.spacing(np.where(ax > ay, ax, ay)) + if not np.all(np.abs(x - y) <= ref): + if np.iscomplexobj(x) or np.iscomplexobj(y): + msg = "X and Y are not equal to %d ULP" % nulp + else: + max_nulp = np.max(nulp_diff(x, y)) + msg = "X and Y are not equal to %d ULP (max is %g)" % (nulp, max_nulp) + raise AssertionError(msg) + + +def assert_array_max_ulp(a, b, maxulp=1, dtype=None): + """ + Check that all items of arrays differ in at most N Units in the Last Place. + + Parameters + ---------- + a, b : array_like + Input arrays to be compared. + maxulp : int, optional + The maximum number of units in the last place that elements of `a` and + `b` can differ. Default is 1. + dtype : dtype, optional + Data-type to convert `a` and `b` to if given. Default is None. + + Returns + ------- + ret : ndarray + Array containing number of representable floating point numbers between + items in `a` and `b`. + + Raises + ------ + AssertionError + If one or more elements differ by more than `maxulp`. + + Notes + ----- + For computing the ULP difference, this API does not differentiate between + various representations of NAN (ULP difference between 0x7fc00000 and 0xffc00000 + is zero). + + See Also + -------- + assert_array_almost_equal_nulp : Compare two arrays relatively to their + spacing. + + Examples + -------- + >>> a = np.linspace(0., 1., 100) + >>> res = np.testing.assert_array_max_ulp(a, np.arcsin(np.sin(a))) # doctest: +SKIP + + """ + __tracebackhide__ = True # Hide traceback for py.test + import numpy as np + + ret = nulp_diff(a, b, dtype) + if not np.all(ret <= maxulp): + raise AssertionError( + f"Arrays are not almost equal up to {maxulp:g} " + f"ULP (max difference is {np.max(ret):g} ULP)" + ) + return ret + + +def nulp_diff(x, y, dtype=None): + """For each item in x and y, return the number of representable floating + points between them. + + Parameters + ---------- + x : array_like + first input array + y : array_like + second input array + dtype : dtype, optional + Data-type to convert `x` and `y` to if given. Default is None. + + Returns + ------- + nulp : array_like + number of representable floating point numbers between each item in x + and y. + + Notes + ----- + For computing the ULP difference, this API does not differentiate between + various representations of NAN (ULP difference between 0x7fc00000 and 0xffc00000 + is zero). + + Examples + -------- + # By definition, epsilon is the smallest number such as 1 + eps != 1, so + # there should be exactly one ULP between 1 and 1 + eps + >>> nulp_diff(1, 1 + np.finfo(x.dtype).eps) # doctest: +SKIP + 1.0 + """ + import numpy as np + + if dtype: + x = np.asarray(x, dtype=dtype) + y = np.asarray(y, dtype=dtype) + else: + x = np.asarray(x) + y = np.asarray(y) + + t = np.common_type(x, y) + if np.iscomplexobj(x) or np.iscomplexobj(y): + raise NotImplementedError("_nulp not implemented for complex array") + + x = np.array([x], dtype=t) + y = np.array([y], dtype=t) + + x[np.isnan(x)] = np.nan + y[np.isnan(y)] = np.nan + + if not x.shape == y.shape: + raise ValueError(f"x and y do not have the same shape: {x.shape} - {y.shape}") + + def _diff(rx, ry, vdt): + diff = np.asarray(rx - ry, dtype=vdt) + return np.abs(diff) + + rx = integer_repr(x) + ry = integer_repr(y) + return _diff(rx, ry, t) + + +def _integer_repr(x, vdt, comp): + # Reinterpret binary representation of the float as sign-magnitude: + # take into account two-complement representation + # See also + # https://randomascii.wordpress.com/2012/02/25/comparing-floating-point-numbers-2012-edition/ + rx = x.view(vdt) + if not (rx.size == 1): + rx[rx < 0] = comp - rx[rx < 0] + else: + if rx < 0: + rx = comp - rx + + return rx + + +def integer_repr(x): + """Return the signed-magnitude interpretation of the binary representation + of x.""" + import numpy as np + + if x.dtype == np.float16: + return _integer_repr(x, np.int16, np.int16(-(2**15))) + elif x.dtype == np.float32: + return _integer_repr(x, np.int32, np.int32(-(2**31))) + elif x.dtype == np.float64: + return _integer_repr(x, np.int64, np.int64(-(2**63))) + else: + raise ValueError(f"Unsupported dtype {x.dtype}") + + +@contextlib.contextmanager +def _assert_warns_context(warning_class, name=None): + __tracebackhide__ = True # Hide traceback for py.test + with suppress_warnings() as sup: + l = sup.record(warning_class) + yield + if not len(l) > 0: + name_str = f" when calling {name}" if name is not None else "" + raise AssertionError("No warning raised" + name_str) + + +def assert_warns(warning_class, *args, **kwargs): + """ + Fail unless the given callable throws the specified warning. + + A warning of class warning_class should be thrown by the callable when + invoked with arguments args and keyword arguments kwargs. + If a different type of warning is thrown, it will not be caught. + + If called with all arguments other than the warning class omitted, may be + used as a context manager: + + with assert_warns(SomeWarning): + do_something() + + The ability to be used as a context manager is new in NumPy v1.11.0. + + .. versionadded:: 1.4.0 + + Parameters + ---------- + warning_class : class + The class defining the warning that `func` is expected to throw. + func : callable, optional + Callable to test + *args : Arguments + Arguments for `func`. + **kwargs : Kwargs + Keyword arguments for `func`. + + Returns + ------- + The value returned by `func`. + + Examples + -------- + >>> import warnings + >>> def deprecated_func(num): + ... warnings.warn("Please upgrade", DeprecationWarning) + ... return num*num + >>> with np.testing.assert_warns(DeprecationWarning): + ... assert deprecated_func(4) == 16 + >>> # or passing a func + >>> ret = np.testing.assert_warns(DeprecationWarning, deprecated_func, 4) + >>> assert ret == 16 + """ + if not args: + return _assert_warns_context(warning_class) + + func = args[0] + args = args[1:] + with _assert_warns_context(warning_class, name=func.__name__): + return func(*args, **kwargs) + + +@contextlib.contextmanager +def _assert_no_warnings_context(name=None): + __tracebackhide__ = True # Hide traceback for py.test + with warnings.catch_warnings(record=True) as l: + warnings.simplefilter("always") + yield + if len(l) > 0: + name_str = f" when calling {name}" if name is not None else "" + raise AssertionError(f"Got warnings{name_str}: {l}") + + +def assert_no_warnings(*args, **kwargs): + """ + Fail if the given callable produces any warnings. + + If called with all arguments omitted, may be used as a context manager: + + with assert_no_warnings(): + do_something() + + The ability to be used as a context manager is new in NumPy v1.11.0. + + .. versionadded:: 1.7.0 + + Parameters + ---------- + func : callable + The callable to test. + \\*args : Arguments + Arguments passed to `func`. + \\*\\*kwargs : Kwargs + Keyword arguments passed to `func`. + + Returns + ------- + The value returned by `func`. + + """ + if not args: + return _assert_no_warnings_context() + + func = args[0] + args = args[1:] + with _assert_no_warnings_context(name=func.__name__): + return func(*args, **kwargs) + + +def _gen_alignment_data(dtype=float32, type="binary", max_size=24): + """ + generator producing data with different alignment and offsets + to test simd vectorization + + Parameters + ---------- + dtype : dtype + data type to produce + type : string + 'unary': create data for unary operations, creates one input + and output array + 'binary': create data for unary operations, creates two input + and output array + max_size : integer + maximum size of data to produce + + Returns + ------- + if type is 'unary' yields one output, one input array and a message + containing information on the data + if type is 'binary' yields one output array, two input array and a message + containing information on the data + + """ + ufmt = "unary offset=(%d, %d), size=%d, dtype=%r, %s" + bfmt = "binary offset=(%d, %d, %d), size=%d, dtype=%r, %s" + for o in range(3): + for s in range(o + 2, max(o + 3, max_size)): + if type == "unary": + + def inp(): + return arange(s, dtype=dtype)[o:] + + out = empty((s,), dtype=dtype)[o:] + yield out, inp(), ufmt % (o, o, s, dtype, "out of place") + d = inp() + yield d, d, ufmt % (o, o, s, dtype, "in place") + yield out[1:], inp()[:-1], ufmt % ( + o + 1, + o, + s - 1, + dtype, + "out of place", + ) + yield out[:-1], inp()[1:], ufmt % ( + o, + o + 1, + s - 1, + dtype, + "out of place", + ) + yield inp()[:-1], inp()[1:], ufmt % (o, o + 1, s - 1, dtype, "aliased") + yield inp()[1:], inp()[:-1], ufmt % (o + 1, o, s - 1, dtype, "aliased") + if type == "binary": + + def inp1(): + return arange(s, dtype=dtype)[o:] + + inp2 = inp1 + out = empty((s,), dtype=dtype)[o:] + yield out, inp1(), inp2(), bfmt % (o, o, o, s, dtype, "out of place") + d = inp1() + yield d, d, inp2(), bfmt % (o, o, o, s, dtype, "in place1") + d = inp2() + yield d, inp1(), d, bfmt % (o, o, o, s, dtype, "in place2") + yield out[1:], inp1()[:-1], inp2()[:-1], bfmt % ( + o + 1, + o, + o, + s - 1, + dtype, + "out of place", + ) + yield out[:-1], inp1()[1:], inp2()[:-1], bfmt % ( + o, + o + 1, + o, + s - 1, + dtype, + "out of place", + ) + yield out[:-1], inp1()[:-1], inp2()[1:], bfmt % ( + o, + o, + o + 1, + s - 1, + dtype, + "out of place", + ) + yield inp1()[1:], inp1()[:-1], inp2()[:-1], bfmt % ( + o + 1, + o, + o, + s - 1, + dtype, + "aliased", + ) + yield inp1()[:-1], inp1()[1:], inp2()[:-1], bfmt % ( + o, + o + 1, + o, + s - 1, + dtype, + "aliased", + ) + yield inp1()[:-1], inp1()[:-1], inp2()[1:], bfmt % ( + o, + o, + o + 1, + s - 1, + dtype, + "aliased", + ) + + +class IgnoreException(Exception): + "Ignoring this exception due to disabled feature" + + +@contextlib.contextmanager +def tempdir(*args, **kwargs): + """Context manager to provide a temporary test folder. + + All arguments are passed as this to the underlying tempfile.mkdtemp + function. + + """ + tmpdir = mkdtemp(*args, **kwargs) + try: + yield tmpdir + finally: + shutil.rmtree(tmpdir) + + +@contextlib.contextmanager +def temppath(*args, **kwargs): + """Context manager for temporary files. + + Context manager that returns the path to a closed temporary file. Its + parameters are the same as for tempfile.mkstemp and are passed directly + to that function. The underlying file is removed when the context is + exited, so it should be closed at that time. + + Windows does not allow a temporary file to be opened if it is already + open, so the underlying file must be closed after opening before it + can be opened again. + + """ + fd, path = mkstemp(*args, **kwargs) + os.close(fd) + try: + yield path + finally: + os.remove(path) + + +class clear_and_catch_warnings(warnings.catch_warnings): + """Context manager that resets warning registry for catching warnings + + Warnings can be slippery, because, whenever a warning is triggered, Python + adds a ``__warningregistry__`` member to the *calling* module. This makes + it impossible to retrigger the warning in this module, whatever you put in + the warnings filters. This context manager accepts a sequence of `modules` + as a keyword argument to its constructor and: + + * stores and removes any ``__warningregistry__`` entries in given `modules` + on entry; + * resets ``__warningregistry__`` to its previous state on exit. + + This makes it possible to trigger any warning afresh inside the context + manager without disturbing the state of warnings outside. + + For compatibility with Python 3.0, please consider all arguments to be + keyword-only. + + Parameters + ---------- + record : bool, optional + Specifies whether warnings should be captured by a custom + implementation of ``warnings.showwarning()`` and be appended to a list + returned by the context manager. Otherwise None is returned by the + context manager. The objects appended to the list are arguments whose + attributes mirror the arguments to ``showwarning()``. + modules : sequence, optional + Sequence of modules for which to reset warnings registry on entry and + restore on exit. To work correctly, all 'ignore' filters should + filter by one of these modules. + + Examples + -------- + >>> import warnings + >>> with np.testing.clear_and_catch_warnings( # doctest: +SKIP + ... modules=[np.core.fromnumeric]): + ... warnings.simplefilter('always') + ... warnings.filterwarnings('ignore', module='np.core.fromnumeric') + ... # do something that raises a warning but ignore those in + ... # np.core.fromnumeric + """ + + class_modules = () + + def __init__(self, record=False, modules=()): + self.modules = set(modules).union(self.class_modules) + self._warnreg_copies = {} + super().__init__(record=record) + + def __enter__(self): + for mod in self.modules: + if hasattr(mod, "__warningregistry__"): + mod_reg = mod.__warningregistry__ + self._warnreg_copies[mod] = mod_reg.copy() + mod_reg.clear() + return super().__enter__() + + def __exit__(self, *exc_info): + super().__exit__(*exc_info) + for mod in self.modules: + if hasattr(mod, "__warningregistry__"): + mod.__warningregistry__.clear() + if mod in self._warnreg_copies: + mod.__warningregistry__.update(self._warnreg_copies[mod]) + + +class suppress_warnings: + """ + Context manager and decorator doing much the same as + ``warnings.catch_warnings``. + + However, it also provides a filter mechanism to work around + https://bugs.python.org/issue4180. + + This bug causes Python before 3.4 to not reliably show warnings again + after they have been ignored once (even within catch_warnings). It + means that no "ignore" filter can be used easily, since following + tests might need to see the warning. Additionally it allows easier + specificity for testing warnings and can be nested. + + Parameters + ---------- + forwarding_rule : str, optional + One of "always", "once", "module", or "location". Analogous to + the usual warnings module filter mode, it is useful to reduce + noise mostly on the outmost level. Unsuppressed and unrecorded + warnings will be forwarded based on this rule. Defaults to "always". + "location" is equivalent to the warnings "default", match by exact + location the warning warning originated from. + + Notes + ----- + Filters added inside the context manager will be discarded again + when leaving it. Upon entering all filters defined outside a + context will be applied automatically. + + When a recording filter is added, matching warnings are stored in the + ``log`` attribute as well as in the list returned by ``record``. + + If filters are added and the ``module`` keyword is given, the + warning registry of this module will additionally be cleared when + applying it, entering the context, or exiting it. This could cause + warnings to appear a second time after leaving the context if they + were configured to be printed once (default) and were already + printed before the context was entered. + + Nesting this context manager will work as expected when the + forwarding rule is "always" (default). Unfiltered and unrecorded + warnings will be passed out and be matched by the outer level. + On the outmost level they will be printed (or caught by another + warnings context). The forwarding rule argument can modify this + behaviour. + + Like ``catch_warnings`` this context manager is not threadsafe. + + Examples + -------- + + With a context manager:: + + with np.testing.suppress_warnings() as sup: + sup.filter(DeprecationWarning, "Some text") + sup.filter(module=np.ma.core) + log = sup.record(FutureWarning, "Does this occur?") + command_giving_warnings() + # The FutureWarning was given once, the filtered warnings were + # ignored. All other warnings abide outside settings (may be + # printed/error) + assert_(len(log) == 1) + assert_(len(sup.log) == 1) # also stored in log attribute + + Or as a decorator:: + + sup = np.testing.suppress_warnings() + sup.filter(module=np.ma.core) # module must match exactly + @sup + def some_function(): + # do something which causes a warning in np.ma.core + pass + """ + + def __init__(self, forwarding_rule="always"): + self._entered = False + + # Suppressions are either instance or defined inside one with block: + self._suppressions = [] + + if forwarding_rule not in {"always", "module", "once", "location"}: + raise ValueError("unsupported forwarding rule.") + self._forwarding_rule = forwarding_rule + + def _clear_registries(self): + if hasattr(warnings, "_filters_mutated"): + # clearing the registry should not be necessary on new pythons, + # instead the filters should be mutated. + warnings._filters_mutated() + return + # Simply clear the registry, this should normally be harmless, + # note that on new pythons it would be invalidated anyway. + for module in self._tmp_modules: + if hasattr(module, "__warningregistry__"): + module.__warningregistry__.clear() + + def _filter(self, category=Warning, message="", module=None, record=False): + if record: + record = [] # The log where to store warnings + else: + record = None + if self._entered: + if module is None: + warnings.filterwarnings("always", category=category, message=message) + else: + module_regex = module.__name__.replace(".", r"\.") + "$" + warnings.filterwarnings( + "always", category=category, message=message, module=module_regex + ) + self._tmp_modules.add(module) + self._clear_registries() + + self._tmp_suppressions.append( + (category, message, re.compile(message, re.I), module, record) + ) + else: + self._suppressions.append( + (category, message, re.compile(message, re.I), module, record) + ) + + return record + + def filter(self, category=Warning, message="", module=None): + """ + Add a new suppressing filter or apply it if the state is entered. + + Parameters + ---------- + category : class, optional + Warning class to filter + message : string, optional + Regular expression matching the warning message. + module : module, optional + Module to filter for. Note that the module (and its file) + must match exactly and cannot be a submodule. This may make + it unreliable for external modules. + + Notes + ----- + When added within a context, filters are only added inside + the context and will be forgotten when the context is exited. + """ + self._filter(category=category, message=message, module=module, record=False) + + def record(self, category=Warning, message="", module=None): + """ + Append a new recording filter or apply it if the state is entered. + + All warnings matching will be appended to the ``log`` attribute. + + Parameters + ---------- + category : class, optional + Warning class to filter + message : string, optional + Regular expression matching the warning message. + module : module, optional + Module to filter for. Note that the module (and its file) + must match exactly and cannot be a submodule. This may make + it unreliable for external modules. + + Returns + ------- + log : list + A list which will be filled with all matched warnings. + + Notes + ----- + When added within a context, filters are only added inside + the context and will be forgotten when the context is exited. + """ + return self._filter( + category=category, message=message, module=module, record=True + ) + + def __enter__(self): + if self._entered: + raise RuntimeError("cannot enter suppress_warnings twice.") + + self._orig_show = warnings.showwarning + self._filters = warnings.filters + warnings.filters = self._filters[:] + + self._entered = True + self._tmp_suppressions = [] + self._tmp_modules = set() + self._forwarded = set() + + self.log = [] # reset global log (no need to keep same list) + + for cat, mess, _, mod, log in self._suppressions: + if log is not None: + del log[:] # clear the log + if mod is None: + warnings.filterwarnings("always", category=cat, message=mess) + else: + module_regex = mod.__name__.replace(".", r"\.") + "$" + warnings.filterwarnings( + "always", category=cat, message=mess, module=module_regex + ) + self._tmp_modules.add(mod) + warnings.showwarning = self._showwarning + self._clear_registries() + + return self + + def __exit__(self, *exc_info): + warnings.showwarning = self._orig_show + warnings.filters = self._filters + self._clear_registries() + self._entered = False + del self._orig_show + del self._filters + + def _showwarning( + self, message, category, filename, lineno, *args, use_warnmsg=None, **kwargs + ): + for cat, _, pattern, mod, rec in (self._suppressions + self._tmp_suppressions)[ + ::-1 + ]: + if issubclass(category, cat) and pattern.match(message.args[0]) is not None: + if mod is None: + # Message and category match, either recorded or ignored + if rec is not None: + msg = WarningMessage( + message, category, filename, lineno, **kwargs + ) + self.log.append(msg) + rec.append(msg) + return + # Use startswith, because warnings strips the c or o from + # .pyc/.pyo files. + elif mod.__file__.startswith(filename): + # The message and module (filename) match + if rec is not None: + msg = WarningMessage( + message, category, filename, lineno, **kwargs + ) + self.log.append(msg) + rec.append(msg) + return + + # There is no filter in place, so pass to the outside handler + # unless we should only pass it once + if self._forwarding_rule == "always": + if use_warnmsg is None: + self._orig_show(message, category, filename, lineno, *args, **kwargs) + else: + self._orig_showmsg(use_warnmsg) + return + + if self._forwarding_rule == "once": + signature = (message.args, category) + elif self._forwarding_rule == "module": + signature = (message.args, category, filename) + elif self._forwarding_rule == "location": + signature = (message.args, category, filename, lineno) + + if signature in self._forwarded: + return + self._forwarded.add(signature) + if use_warnmsg is None: + self._orig_show(message, category, filename, lineno, *args, **kwargs) + else: + self._orig_showmsg(use_warnmsg) + + def __call__(self, func): + """ + Function decorator to apply certain suppressions to a whole + function. + """ + + @wraps(func) + def new_func(*args, **kwargs): + with self: + return func(*args, **kwargs) + + return new_func + + +@contextlib.contextmanager +def _assert_no_gc_cycles_context(name=None): + __tracebackhide__ = True # Hide traceback for py.test + + # not meaningful to test if there is no refcounting + if not HAS_REFCOUNT: + yield + return + + assert_(gc.isenabled()) + gc.disable() + gc_debug = gc.get_debug() + try: + for i in range(100): + if gc.collect() == 0: + break + else: + raise RuntimeError( + "Unable to fully collect garbage - perhaps a __del__ method " + "is creating more reference cycles?" + ) + + gc.set_debug(gc.DEBUG_SAVEALL) + yield + # gc.collect returns the number of unreachable objects in cycles that + # were found -- we are checking that no cycles were created in the context + n_objects_in_cycles = gc.collect() + objects_in_cycles = gc.garbage[:] + finally: + del gc.garbage[:] + gc.set_debug(gc_debug) + gc.enable() + + if n_objects_in_cycles: + name_str = f" when calling {name}" if name is not None else "" + raise AssertionError( + "Reference cycles were found{}: {} objects were collected, " + "of which {} are shown below:{}".format( + name_str, + n_objects_in_cycles, + len(objects_in_cycles), + "".join( + "\n {} object with id={}:\n {}".format( + type(o).__name__, + id(o), + pprint.pformat(o).replace("\n", "\n "), + ) + for o in objects_in_cycles + ), + ) + ) + + +def assert_no_gc_cycles(*args, **kwargs): + """ + Fail if the given callable produces any reference cycles. + + If called with all arguments omitted, may be used as a context manager: + + with assert_no_gc_cycles(): + do_something() + + .. versionadded:: 1.15.0 + + Parameters + ---------- + func : callable + The callable to test. + \\*args : Arguments + Arguments passed to `func`. + \\*\\*kwargs : Kwargs + Keyword arguments passed to `func`. + + Returns + ------- + Nothing. The result is deliberately discarded to ensure that all cycles + are found. + + """ + if not args: + return _assert_no_gc_cycles_context() + + func = args[0] + args = args[1:] + with _assert_no_gc_cycles_context(name=func.__name__): + func(*args, **kwargs) + + +def break_cycles(): + """ + Break reference cycles by calling gc.collect + Objects can call other objects' methods (for instance, another object's + __del__) inside their own __del__. On PyPy, the interpreter only runs + between calls to gc.collect, so multiple calls are needed to completely + release all cycles. + """ + + gc.collect() + if IS_PYPY: + # a few more, just to make sure all the finalizers are called + gc.collect() + gc.collect() + gc.collect() + gc.collect() + + +def requires_memory(free_bytes): + """Decorator to skip a test if not enough memory is available""" + import pytest + + def decorator(func): + @wraps(func) + def wrapper(*a, **kw): + msg = check_free_memory(free_bytes) + if msg is not None: + pytest.skip(msg) + + try: + return func(*a, **kw) + except MemoryError: + # Probably ran out of memory regardless: don't regard as failure + pytest.xfail("MemoryError raised") + + return wrapper + + return decorator + + +def check_free_memory(free_bytes): + """ + Check whether `free_bytes` amount of memory is currently free. + Returns: None if enough memory available, otherwise error message + """ + env_var = "NPY_AVAILABLE_MEM" + env_value = os.environ.get(env_var) + if env_value is not None: + try: + mem_free = _parse_size(env_value) + except ValueError as exc: + raise ValueError(f"Invalid environment variable {env_var}: {exc}") + + msg = ( + f"{free_bytes/1e9} GB memory required, but environment variable " + f"NPY_AVAILABLE_MEM={env_value} set" + ) + else: + mem_free = _get_mem_available() + + if mem_free is None: + msg = ( + "Could not determine available memory; set NPY_AVAILABLE_MEM " + "environment variable (e.g. NPY_AVAILABLE_MEM=16GB) to run " + "the test." + ) + mem_free = -1 + else: + msg = ( + f"{free_bytes/1e9} GB memory required, but {mem_free/1e9} GB available" + ) + + return msg if mem_free < free_bytes else None + + +def _parse_size(size_str): + """Convert memory size strings ('12 GB' etc.) to float""" + suffixes = { + "": 1, + "b": 1, + "k": 1000, + "m": 1000**2, + "g": 1000**3, + "t": 1000**4, + "kb": 1000, + "mb": 1000**2, + "gb": 1000**3, + "tb": 1000**4, + "kib": 1024, + "mib": 1024**2, + "gib": 1024**3, + "tib": 1024**4, + } + + size_re = re.compile( + r"^\s*(\d+|\d+\.\d+)\s*({})\s*$".format("|".join(suffixes.keys())), re.I + ) + + m = size_re.match(size_str.lower()) + if not m or m.group(2) not in suffixes: + raise ValueError(f"value {size_str!r} not a valid size") + return int(float(m.group(1)) * suffixes[m.group(2)]) + + +def _get_mem_available(): + """Return available memory in bytes, or None if unknown.""" + try: + import psutil + + return psutil.virtual_memory().available + except (ImportError, AttributeError): + pass + + if sys.platform.startswith("linux"): + info = {} + with open("/proc/meminfo") as f: + for line in f: + p = line.split() + info[p[0].strip(":").lower()] = int(p[1]) * 1024 + + if "memavailable" in info: + # Linux >= 3.14 + return info["memavailable"] + else: + return info["memfree"] + info["cached"] + + return None + + +def _no_tracing(func): + """ + Decorator to temporarily turn off tracing for the duration of a test. + Needed in tests that check refcounting, otherwise the tracing itself + influences the refcounts + """ + if not hasattr(sys, "gettrace"): + return func + else: + + @wraps(func) + def wrapper(*args, **kwargs): + original_trace = sys.gettrace() + try: + sys.settrace(None) + return func(*args, **kwargs) + finally: + sys.settrace(original_trace) + + return wrapper + + +def _get_glibc_version(): + try: + ver = os.confstr("CS_GNU_LIBC_VERSION").rsplit(" ")[1] + except Exception as inst: + ver = "0.0" + + return ver + + +_glibcver = _get_glibc_version() + + +def _glibc_older_than(x): + return _glibcver != "0.0" and _glibcver < x diff --git a/llava_next/lib/python3.10/site-packages/torch/ao/__init__.py b/llava_next/lib/python3.10/site-packages/torch/ao/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..fe6f3a4603169f622e953a793f606f7fa74a9cbb --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/ao/__init__.py @@ -0,0 +1,16 @@ +# torch.ao is a package with a lot of interdependencies. +# We will use lazy import to avoid cyclic dependencies here. + + +__all__ = [ + "nn", + "ns", + "quantization", + "pruning", +] + +def __getattr__(name): + if name in __all__: + import importlib + return importlib.import_module("." + name, __name__) + raise AttributeError(f"module {__name__!r} has no attribute {name!r}") diff --git a/llava_next/lib/python3.10/site-packages/torch/func/__pycache__/__init__.cpython-310.pyc b/llava_next/lib/python3.10/site-packages/torch/func/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..63e47cc39bc582bba76cf5efbf0b83376df033ec Binary files /dev/null and b/llava_next/lib/python3.10/site-packages/torch/func/__pycache__/__init__.cpython-310.pyc differ diff --git a/llava_next/lib/python3.10/site-packages/torch/mps/__init__.py b/llava_next/lib/python3.10/site-packages/torch/mps/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..52cda4fb0c06c0b56c41cd031dea53dcaafc87ed --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/mps/__init__.py @@ -0,0 +1,130 @@ +r""" +This package enables an interface for accessing MPS (Metal Performance Shaders) backend in Python. +Metal is Apple's API for programming metal GPU (graphics processor unit). Using MPS means that increased +performance can be achieved, by running work on the metal GPU(s). +See https://developer.apple.com/documentation/metalperformanceshaders for more details. +""" +import torch +from .. import Tensor + +_is_in_bad_fork = getattr(torch._C, "_mps_is_in_bad_fork", lambda: False) +_default_mps_generator: torch._C.Generator = None # type: ignore[assignment] + + +# local helper function (not public or exported) +def _get_default_mps_generator() -> torch._C.Generator: + global _default_mps_generator + if _default_mps_generator is None: + _default_mps_generator = torch._C._mps_get_default_generator() + return _default_mps_generator + + +def synchronize() -> None: + r"""Waits for all kernels in all streams on a MPS device to complete.""" + return torch._C._mps_deviceSynchronize() + + +def get_rng_state() -> Tensor: + r"""Returns the random number generator state as a ByteTensor.""" + return _get_default_mps_generator().get_state() + + +def set_rng_state(new_state: Tensor) -> None: + r"""Sets the random number generator state. + + Args: + new_state (torch.ByteTensor): The desired state + """ + new_state_copy = new_state.clone(memory_format=torch.contiguous_format) + _get_default_mps_generator().set_state(new_state_copy) + + +def manual_seed(seed: int) -> None: + r"""Sets the seed for generating random numbers. + + Args: + seed (int): The desired seed. + """ + # the torch.mps.manual_seed() can be called from the global + # torch.manual_seed() in torch/random.py. So we need to make + # sure mps is available (otherwise we just return without + # erroring out) + if not torch._C._has_mps: + return + seed = int(seed) + _get_default_mps_generator().manual_seed(seed) + + +def seed() -> None: + r"""Sets the seed for generating random numbers to a random number.""" + _get_default_mps_generator().seed() + + +def empty_cache() -> None: + r"""Releases all unoccupied cached memory currently held by the caching + allocator so that those can be used in other GPU applications. + """ + torch._C._mps_emptyCache() + + +def set_per_process_memory_fraction(fraction) -> None: + r"""Set memory fraction for limiting process's memory allocation on MPS device. + The allowed value equals the fraction multiplied by recommended maximum device memory + (obtained from Metal API device.recommendedMaxWorkingSetSize). + If trying to allocate more than the allowed value in a process, it will raise an out of + memory error in allocator. + + Args: + fraction(float): Range: 0~2. Allowed memory equals total_memory * fraction. + + .. note:: + Passing 0 to fraction means unlimited allocations + (may cause system failure if out of memory). + Passing fraction greater than 1.0 allows limits beyond the value + returned from device.recommendedMaxWorkingSetSize. + """ + + if not isinstance(fraction, float): + raise TypeError("Invalid type for fraction argument, must be `float`") + if fraction < 0 or fraction > 2: + raise ValueError(f"Invalid fraction value: {fraction}. Allowed range: 0~2") + + torch._C._mps_setMemoryFraction(fraction) + + +def current_allocated_memory() -> int: + r"""Returns the current GPU memory occupied by tensors in bytes. + + .. note:: + The returned size does not include cached allocations in + memory pools of MPSAllocator. + """ + return torch._C._mps_currentAllocatedMemory() + + +def driver_allocated_memory() -> int: + r"""Returns total GPU memory allocated by Metal driver for the process in bytes. + + .. note:: + The returned size includes cached allocations in MPSAllocator pools + as well as allocations from MPS/MPSGraph frameworks. + """ + return torch._C._mps_driverAllocatedMemory() + + +from . import profiler +from .event import Event + +__all__ = [ + "get_rng_state", + "manual_seed", + "seed", + "set_rng_state", + "synchronize", + "empty_cache", + "set_per_process_memory_fraction", + "current_allocated_memory", + "driver_allocated_memory", + "Event", + "profiler", +] diff --git a/llava_next/lib/python3.10/site-packages/torch/mps/__pycache__/__init__.cpython-310.pyc b/llava_next/lib/python3.10/site-packages/torch/mps/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2d14c0862644e0c6cd15684163250767149f6bd7 Binary files /dev/null and b/llava_next/lib/python3.10/site-packages/torch/mps/__pycache__/__init__.cpython-310.pyc differ diff --git a/llava_next/lib/python3.10/site-packages/torch/mps/__pycache__/event.cpython-310.pyc b/llava_next/lib/python3.10/site-packages/torch/mps/__pycache__/event.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9b8fa883aef2fa85bdc7fcc8ee6c02012729872f Binary files /dev/null and b/llava_next/lib/python3.10/site-packages/torch/mps/__pycache__/event.cpython-310.pyc differ diff --git a/llava_next/lib/python3.10/site-packages/torch/mps/__pycache__/profiler.cpython-310.pyc b/llava_next/lib/python3.10/site-packages/torch/mps/__pycache__/profiler.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5b98b8c1844a336cc9f298dc41a441fda65c36f8 Binary files /dev/null and b/llava_next/lib/python3.10/site-packages/torch/mps/__pycache__/profiler.cpython-310.pyc differ diff --git a/llava_next/lib/python3.10/site-packages/torch/mps/event.py b/llava_next/lib/python3.10/site-packages/torch/mps/event.py new file mode 100644 index 0000000000000000000000000000000000000000..a206b640ef4ad41c546564d3fa91ba257762c9c6 --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/mps/event.py @@ -0,0 +1,45 @@ +import torch + + +class Event: + r"""Wrapper around an MPS event. + + MPS events are synchronization markers that can be used to monitor the + device's progress, to accurately measure timing, and to synchronize MPS streams. + + Args: + enable_timing (bool, optional): indicates if the event should measure time + (default: ``False``) + """ + + def __init__(self, enable_timing=False): + self.__eventId = torch._C._mps_acquireEvent(enable_timing) + + def __del__(self): + # checks if torch._C is already destroyed + if hasattr(torch._C, "_mps_releaseEvent") and self.__eventId > 0: + torch._C._mps_releaseEvent(self.__eventId) + + def record(self): + r"""Records the event in the default stream.""" + torch._C._mps_recordEvent(self.__eventId) + + def wait(self): + r"""Makes all future work submitted to the default stream wait for this event.""" + torch._C._mps_waitForEvent(self.__eventId) + + def query(self): + r"""Returns True if all work currently captured by event has completed.""" + return torch._C._mps_queryEvent(self.__eventId) + + def synchronize(self): + r"""Waits until the completion of all work currently captured in this event. + This prevents the CPU thread from proceeding until the event completes. + """ + torch._C._mps_synchronizeEvent(self.__eventId) + + def elapsed_time(self, end_event): + r"""Returns the time elapsed in milliseconds after the event was + recorded and before the end_event was recorded. + """ + return torch._C._mps_elapsedTimeOfEvents(self.__eventId, end_event.__eventId) diff --git a/llava_next/lib/python3.10/site-packages/torch/mps/profiler.py b/llava_next/lib/python3.10/site-packages/torch/mps/profiler.py new file mode 100644 index 0000000000000000000000000000000000000000..9094a275136c2120a635e97f130fde3d8beb34cc --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/mps/profiler.py @@ -0,0 +1,59 @@ +import contextlib + +import torch + +__all__ = ["start", "stop", "profile"] + + +def start(mode: str = "interval", wait_until_completed: bool = False) -> None: + r"""Start OS Signpost tracing from MPS backend. + + The generated OS Signposts could be recorded and viewed in + XCode Instruments Logging tool. + + Args: + mode(str): OS Signpost tracing mode could be "interval", "event", + or both "interval,event". + The interval mode traces the duration of execution of the operations, + whereas event mode marks the completion of executions. + See document `Recording Performance Data`_ for more info. + wait_until_completed(bool): Waits until the MPS Stream complete + executing each encoded GPU operation. This helps generating single + dispatches on the trace's timeline. + Note that enabling this option would affect the performance negatively. + + .. _Recording Performance Data: + https://developer.apple.com/documentation/os/logging/recording_performance_data + """ + mode_normalized = mode.lower().replace(" ", "") + torch._C._mps_profilerStartTrace(mode_normalized, wait_until_completed) + + +def stop(): + r"""Stops generating OS Signpost tracing from MPS backend.""" + torch._C._mps_profilerStopTrace() + + +@contextlib.contextmanager +def profile(mode: str = "interval", wait_until_completed: bool = False): + r"""Context Manager to enabling generating OS Signpost tracing from MPS backend. + + Args: + mode(str): OS Signpost tracing mode could be "interval", "event", + or both "interval,event". + The interval mode traces the duration of execution of the operations, + whereas event mode marks the completion of executions. + See document `Recording Performance Data`_ for more info. + wait_until_completed(bool): Waits until the MPS Stream complete + executing each encoded GPU operation. This helps generating single + dispatches on the trace's timeline. + Note that enabling this option would affect the performance negatively. + + .. _Recording Performance Data: + https://developer.apple.com/documentation/os/logging/recording_performance_data + """ + try: + start(mode, wait_until_completed) + yield + finally: + stop() diff --git a/parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_layer_norm/__init__.py b/parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_layer_norm/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..bed7688b15d2674d65cf653b4375db97a3851cbe --- /dev/null +++ b/parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_layer_norm/__init__.py @@ -0,0 +1,8 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from .cuda_ln import * +from .cuda_post_ln import * +from .cuda_pre_ln import * diff --git a/parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_layer_norm/__pycache__/__init__.cpython-310.pyc b/parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_layer_norm/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3e55c5612c773b641ad4920c31b97074d71b777a Binary files /dev/null and b/parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_layer_norm/__pycache__/__init__.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_layer_norm/__pycache__/cuda_fp_ln_base.cpython-310.pyc b/parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_layer_norm/__pycache__/cuda_fp_ln_base.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3537598b40b1227cfdb17c5252163b18dbf9d844 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_layer_norm/__pycache__/cuda_fp_ln_base.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_layer_norm/__pycache__/cuda_ln.cpython-310.pyc b/parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_layer_norm/__pycache__/cuda_ln.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..95bdfecbbcab2d87ebae34e959bff49f570ce677 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_layer_norm/__pycache__/cuda_ln.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_layer_norm/__pycache__/cuda_post_ln.cpython-310.pyc b/parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_layer_norm/__pycache__/cuda_post_ln.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..41c9f26574930362175c6f3ba7218107dd51ca76 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_layer_norm/__pycache__/cuda_post_ln.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_layer_norm/__pycache__/cuda_pre_ln.cpython-310.pyc b/parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_layer_norm/__pycache__/cuda_pre_ln.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..123dafd7cae98fdcafc7dedc2c1c80eef1014774 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_layer_norm/__pycache__/cuda_pre_ln.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_layer_norm/cuda_fp_ln_base.py b/parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_layer_norm/cuda_fp_ln_base.py new file mode 100644 index 0000000000000000000000000000000000000000..3c2aa5cb5eb4553010bede7c777268757bd897d4 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_layer_norm/cuda_fp_ln_base.py @@ -0,0 +1,37 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import torch + +from ... import DSKernelBase +from ....inference_utils import elem_size +from deepspeed.ops.op_builder import InferenceCoreBuilder + + +class CUDAFPLNBase(DSKernelBase): + """ + Base class for CUDA LN kernels. They all same the same validation logic, + so we can share it here. + """ + + supported_dtypes = [torch.float16, torch.bfloat16, torch.float32] + + def __init__(self, channels: int, fp_dtype: torch.dtype, epsilon: float = 1e-5): + """ + Parameters: + channels (int): Number of channels in the input tensor. Must be divisible to align + to 16 bytes. + fp_dtype (torch.dtype): Data type for the input/output/gamma. Supported values + are torch.float16, torch.bfloat16, and torch.float32. + """ + if fp_dtype not in CUDAFPLNBase.supported_dtypes: + raise ValueError("Unsupported data type: {}, supported_dtypes are {}".format( + fp_dtype, CUDAFPLNBase.supported_dtypes)) + + if elem_size(fp_dtype) * channels % 16 != 0: + raise ValueError("channels must be divisible by 16 bytes") + + self.inf_module = InferenceCoreBuilder().load() + self.epsilon = epsilon diff --git a/parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_layer_norm/cuda_ln.py b/parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_layer_norm/cuda_ln.py new file mode 100644 index 0000000000000000000000000000000000000000..583736fb8bbc0ebcd07a63452ce029b002d6d937 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_layer_norm/cuda_ln.py @@ -0,0 +1,30 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import torch + +from .cuda_fp_ln_base import CUDAFPLNBase + + +class CUDAFPLN(CUDAFPLNBase): + """ + Floating point layer norm kernel for CUDA/RoCM. + + Performs: z = ln(x) + """ + + def __call__(self, output_z: torch.Tensor, input_x: torch.Tensor, gamma: torch.Tensor, + beta: torch.Tensor) -> torch.Tensor: + """ + output_z may alias input_x directly. All Tensors should have the same shape. + + Parameters: + output_z (torch.Tensor): Output tensor. + input_x (torch.Tensor): Input tensor. + gamma (torch.Tensor): Gamma tensor. + beta (torch.Tensor): Beta tensor. + """ + self.inf_module.layer_norm(output_z, input_x, gamma, beta, self.epsilon) + return output_z diff --git a/parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_layer_norm/cuda_pre_ln.py b/parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_layer_norm/cuda_pre_ln.py new file mode 100644 index 0000000000000000000000000000000000000000..74b2d9cf58801c6273ce1647091752ebae3e147d --- /dev/null +++ b/parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_layer_norm/cuda_pre_ln.py @@ -0,0 +1,39 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from typing import Tuple + +import torch + +from .cuda_fp_ln_base import CUDAFPLNBase + + +class CUDAFPPreLN(CUDAFPLNBase): + """ + Floating point pre-LayerNorm kernel for CUDA/RoCM. + + Performs: z_res = x_res + y_hid + z_hid = ln(z_hid) + """ + + def __call__(self, z_res: torch.Tensor, z_hid: torch.Tensor, x_res: torch.Tensor, y_hid: torch.Tensor, + gamma: torch.Tensor, beta: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: + """ + z_res can alias x_res. All non-parameter input/output tensors + must have the same shape. z_hid can alias y_hid. + + Parameters: + z_res (torch.Tensor): Output residual. + z_hid (torch.Tensor): Output hidden states. + x_res (torch.Tensor): Input residual. + y_hid (torch.Tensor): Input hidden states. + gamma (torch.Tensor): Gamma tensor. + beta (torch.Tensor): Beta tensor. + + Returns: + output (torch.Tensor): Output tensor. + """ + self.inf_module.pre_layer_norm(z_res, z_hid, x_res, y_hid, gamma, beta, self.epsilon) + return z_res, z_hid diff --git a/parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_layer_norm/layer_norm.cpp b/parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_layer_norm/layer_norm.cpp new file mode 100644 index 0000000000000000000000000000000000000000..b2c95d410a1f2354a45724b44064ff26e2321ccf --- /dev/null +++ b/parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_layer_norm/layer_norm.cpp @@ -0,0 +1,102 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#include "layer_norm.h" + +#define DISPATCH_LAYER_NORM(T_TYPE, C_TYPE) \ + if (input.options().dtype() == torch::T_TYPE) { \ + launch_fused_ln((C_TYPE*)output.data_ptr(), \ + (const C_TYPE*)input.data_ptr(), \ + (const C_TYPE*)gamma.data_ptr(), \ + (const C_TYPE*)beta.data_ptr(), \ + epsilon, \ + rows, \ + elems_per_row, \ + at::cuda::getCurrentCUDAStream()); \ + } + +void ds_layer_norm(at::Tensor& output, + at::Tensor& input, + at::Tensor& gamma, + at::Tensor& beta, + float epsilon) +{ + bool ragged_input = input.dim() == 2; + + const int rows = ragged_input ? input.size(0) : input.size(0) * input.size(1); + const int elems_per_row = ragged_input ? input.size(1) : input.size(2); + + DISPATCH_LAYER_NORM(kFloat, float); + DISPATCH_LAYER_NORM(kHalf, __half); +#ifdef BF16_AVAILABLE + DISPATCH_LAYER_NORM(kBFloat16, __nv_bfloat16); +#endif +} + +#define DISPATCH_LAYER_NORM_RESIDUAL(T_TYPE, C_TYPE) \ + if (input.options().dtype() == torch::T_TYPE) { \ + launch_fused_post_ln((C_TYPE*)output.data_ptr(), \ + (const C_TYPE*)input.data_ptr(), \ + (const C_TYPE*)residual.data_ptr(), \ + (const C_TYPE*)gamma.data_ptr(), \ + (const C_TYPE*)beta.data_ptr(), \ + epsilon, \ + rows, \ + elems_per_row, \ + at::cuda::getCurrentCUDAStream()); \ + } + +void ds_post_layer_norm(at::Tensor& output, + at::Tensor& input, + at::Tensor& residual, + at::Tensor& gamma, + at::Tensor& beta, + float epsilon) +{ + bool ragged_input = input.dim() == 2; + + const int rows = ragged_input ? input.size(0) : input.size(0) * input.size(1); + const int elems_per_row = ragged_input ? input.size(1) : input.size(2); + + DISPATCH_LAYER_NORM_RESIDUAL(kFloat, float); + DISPATCH_LAYER_NORM_RESIDUAL(kHalf, __half); +#ifdef BF16_AVAILABLE + DISPATCH_LAYER_NORM_RESIDUAL(kBFloat16, __nv_bfloat16); +#endif +} + +#define DISPATCH_PRE_LAYER_NORM_RESIDUAL(T_TYPE, C_TYPE) \ + if (input.options().dtype() == torch::T_TYPE) { \ + launch_fused_pre_ln((C_TYPE*)norm_output.data_ptr(), \ + (C_TYPE*)res_output.data_ptr(), \ + (const C_TYPE*)input.data_ptr(), \ + (const C_TYPE*)residual.data_ptr(), \ + (const C_TYPE*)gamma.data_ptr(), \ + (const C_TYPE*)beta.data_ptr(), \ + epsilon, \ + rows, \ + elems_per_row, \ + at::cuda::getCurrentCUDAStream()); \ + } + +void ds_pre_layer_norm(at::Tensor& res_output, + at::Tensor& norm_output, + at::Tensor& input, + at::Tensor& residual, + at::Tensor& gamma, + at::Tensor& beta, + float epsilon) +{ + bool ragged_input = input.dim() == 2; + + const int rows = ragged_input ? input.size(0) : input.size(0) * input.size(1); + const int elems_per_row = ragged_input ? input.size(1) : input.size(2); + + DISPATCH_PRE_LAYER_NORM_RESIDUAL(kFloat, float); + DISPATCH_PRE_LAYER_NORM_RESIDUAL(kHalf, __half); +#ifdef BF16_AVAILABLE + DISPATCH_PRE_LAYER_NORM_RESIDUAL(kBFloat16, __nv_bfloat16); +#endif +} diff --git a/parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_layer_norm/layer_norm.h b/parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_layer_norm/layer_norm.h new file mode 100644 index 0000000000000000000000000000000000000000..9ea3a8c4252415953b1f30208257fdc52075063a --- /dev/null +++ b/parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_layer_norm/layer_norm.h @@ -0,0 +1,67 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#pragma once + +#include +#include +#include "ds_kernel_utils.h" + +/* +Kernel launch methods for layer norm variants. +*/ + +template +void launch_fused_ln(T* output, + const T* vals, + const T* gamma, + const T* beta, + float epsilon, + int rows, + int elems_per_row, + cudaStream_t stream); + +template +void launch_fused_post_ln(T* output, + const T* vals, + const T* residual, + const T* gamma, + const T* beta, + float epsilon, + int rows, + int elems_per_row, + cudaStream_t stream); +template +void launch_fused_pre_ln(T* norm_output, + T* res_output, + const T* vals, + const T* residual, + const T* gamma, + const T* beta, + float epsilon, + int rows, + int elems_per_row, + cudaStream_t stream); + +void ds_layer_norm(at::Tensor& output, + at::Tensor& input, + at::Tensor& gamma, + at::Tensor& beta, + float epsilon); + +void ds_post_layer_norm(at::Tensor& output, + at::Tensor& input, + at::Tensor& residual, + at::Tensor& gamma, + at::Tensor& beta, + float epsilon); + +void ds_pre_layer_norm(at::Tensor& res_output, + at::Tensor& norm_output, + at::Tensor& input, + at::Tensor& residual, + at::Tensor& gamma, + at::Tensor& beta, + float epsilon); diff --git a/parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_layer_norm/layer_norm_cuda.cu b/parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_layer_norm/layer_norm_cuda.cu new file mode 100644 index 0000000000000000000000000000000000000000..15f52c46622b5f4aca534dc8e6cb749819846d92 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_layer_norm/layer_norm_cuda.cu @@ -0,0 +1,490 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#include "conversion_utils.h" +#include "ds_kernel_utils.h" +#include "memory_access_utils.h" +#include "reduction_utils.h" + +namespace cg = cooperative_groups; +using rop = reduce::ROpType; + +namespace ln { +constexpr int granularity = 16; +} // namespace ln + +/* +Regular layer norm implementation. Assumes elems_per_row % 8 +is equal to 0. + +Args: + output: buffer for output data + vals: buffer for input data + gamma: gain for normalization + beta: bias for normalization + epsilon: numeric stability + elems_per_row: number of elements each block will normalize +*/ +template +__global__ void fused_ln(T* output, + const T* vals, + const T* gamma, + const T* beta, + float epsilon, + int elems_per_row) +{ + constexpr int T_per_load = ln::granularity / sizeof(T); + + cg::thread_block tb = cg::this_thread_block(); + cg::thread_block_tile warp = cg::tiled_partition(tb); + + // X-dimension of the block + const int block_offset = (tb.group_index().x * (maxThreads / threadsPerGroup) * elems_per_row) + + (tb.thread_index().y * elems_per_row); + const int thread_offset = tb.thread_index().x * T_per_load; + const int base_offset = block_offset + thread_offset; + const int stride = blockDim.x * T_per_load; + + float sum = reduce::init(); + + const T* input_base = vals + base_offset; + + T local_buffer[unRoll * T_per_load]; + +#pragma unRoll + for (int i = 0; i < unRoll; i++) { + T* iteration_buffer = local_buffer + i * T_per_load; + + mem_access::load_global( + iteration_buffer, input_base + i * stride, thread_offset + i * stride < elems_per_row); + +#pragma unRoll + for (int j = 0; j < T_per_load; j++) { + float vals_up_cast = conversion::to(iteration_buffer[j]); + sum = reduce::element(sum, vals_up_cast); + } + } + + reduce::partitioned_block(tb, warp, sum); + const float mean = sum / elems_per_row; + + float mean_diff = reduce::init(); + +#pragma unRoll + for (int i = 0; i < unRoll; i++) { +#pragma unRoll + for (int j = 0; j < T_per_load; j++) { + // Using a 0 value here skews the variance, have to if-guard + if (thread_offset + i * stride < elems_per_row) { + float diff = (conversion::to(local_buffer[i * T_per_load + j]) - mean); + mean_diff = reduce::element(mean_diff, diff * diff); + } + } + } + + reduce::partitioned_block(tb, warp, mean_diff); + const float variance = mean_diff / elems_per_row; + const float denom = __frsqrt_rn(variance + epsilon); + + T* block_output = output + block_offset; + +#pragma unRoll + for (int i = 0; i < unRoll; i++) { + T* iteration_buffer = local_buffer + i * T_per_load; + const int iter_idx = i * stride + thread_offset; + const bool do_loads = iter_idx < elems_per_row; + + T gamma_local[T_per_load], beta_local[T_per_load]; + + mem_access::load_global(gamma_local, gamma + iter_idx, do_loads); + mem_access::load_global(beta_local, beta + iter_idx, do_loads); + +#pragma unRoll + for (int j = 0; j < T_per_load; j++) { + float val = conversion::to(iteration_buffer[j]); + val = (val - mean) * denom; + val = + val * conversion::to(gamma_local[j]) + conversion::to(beta_local[j]); + iteration_buffer[j] = conversion::to(val); + } + + if (do_loads) { + mem_access::store_global(block_output + iter_idx, iteration_buffer); + } + } +} + +#define LAUNCH_FUSED_LN(unRollFactor, threadsPerGroup, maxThreads) \ + fused_ln \ + <<>>(output, vals, gamma, beta, epsilon, elems_per_row); + +template +void launch_fused_ln(T* output, + const T* vals, + const T* gamma, + const T* beta, + float epsilon, + int rows, + int elems_per_row, + cudaStream_t stream) +{ + // 8 for __half, 4 for float + constexpr int T_per_load = ln::granularity / sizeof(T); + + constexpr int maxThreads = 256; + + // For Flaoat, unRoll 4, for __half, unRoll 2 + constexpr int internal_unRoll = sizeof(T) == 4 ? 4 : 2; + + const bool is_subblock_schedule = (elems_per_row <= 128) ? true : false; + const int h_per_step = is_subblock_schedule ? T_per_load : T_per_load * internal_unRoll; + + // Scheduling concern: may be slightly faster for some inputs to assign multiple stages of + // warp-sized blocks rather than stepping up to 64/96 threads + const int one_step_threads = next_pow2((elems_per_row + h_per_step - 1) / h_per_step); + const int threadsPerGroup = (one_step_threads < maxThreads) ? one_step_threads : maxThreads; + + const int groups_per_block_max = + is_subblock_schedule ? (maxThreads + threadsPerGroup - 1) / threadsPerGroup : 1; + const int groups_per_block = (rows < groups_per_block_max) ? rows : groups_per_block_max; + const int groups_launch = (groups_per_block + rows - 1) / groups_per_block; + + dim3 block(threadsPerGroup, groups_per_block); + dim3 grid(groups_launch); + + const int elems_per_step = threadsPerGroup * h_per_step; + const int external_unRoll = (elems_per_row + elems_per_step - 1) / elems_per_step; + + if (is_subblock_schedule) { + // <=128 + if (threadsPerGroup == 1) { + LAUNCH_FUSED_LN(1, 1, maxThreads); + } else if (threadsPerGroup == 2) { + LAUNCH_FUSED_LN(1, 2, maxThreads); + } else if (threadsPerGroup == 4) { + LAUNCH_FUSED_LN(1, 4, maxThreads); + } else if (threadsPerGroup == 8) { + LAUNCH_FUSED_LN(1, 8, maxThreads); + } else if (threadsPerGroup == 16) { + LAUNCH_FUSED_LN(1, 16, maxThreads); + } + } else if (external_unRoll == 1) { + // 129 - 4096 elems + // (this can launch with 1-7 warps as well) + LAUNCH_FUSED_LN(1 * internal_unRoll, maxThreads, maxThreads); + } else if (external_unRoll == 2) { + // 4097 - 8192 elems + LAUNCH_FUSED_LN(2 * internal_unRoll, maxThreads, maxThreads); + } else if (external_unRoll == 3) { + // 8193 - 12288 elems + LAUNCH_FUSED_LN(3 * internal_unRoll, maxThreads, maxThreads); + } else if (external_unRoll == 4) { + // 12289 - 16384 elems + LAUNCH_FUSED_LN(4 * internal_unRoll, maxThreads, maxThreads); + } +} + +#define INSTANTIATE_FUSED_LN(T) \ + template void launch_fused_ln(T*, const T*, const T*, const T*, float, int, int, cudaStream_t); + +INSTANTIATE_FUSED_LN(__half); +#ifdef BF16_AVAILABLE +INSTANTIATE_FUSED_LN(__nv_bfloat16); +#endif +INSTANTIATE_FUSED_LN(float); + +/* +Fused resiual + bias + layer norm implementation. Assumes elems_per_row % 8 +is equal to 0. + +TODO(cmikeh2): Goal is to deprecate this implementation. The bias + residual +need to be fused into compute-bound producer operations. + +Args: + output: buffer for output data + res_output: output of residual addition + vals: buffer for input data + residual: residual data + bias: bias of of input data + gamma: gain for normalization + beta: bias for normalization + epsilon: numeric stability + elems_per_row: number of elements each block will normalize +Template arg: + StoreResidual: controls whether the residual calculation is stored + or not. When set to false, the input `res_output` is unused. +*/ +template +__global__ void fused_residual_ln(T* output, + T* res_output, + const T* vals, + const T* residual, + const T* gamma, + const T* beta, + float epsilon, + int elems_per_row) +{ + constexpr int T_per_load = ln::granularity / sizeof(T); + + cg::thread_block tb = cg::this_thread_block(); + cg::thread_block_tile warp = cg::tiled_partition(tb); + + // X-dimension of the block + const int block_offset = (tb.group_index().x * (maxThreads / threadsPerGroup) * elems_per_row) + + (tb.thread_index().y * elems_per_row); + const int thread_offset = tb.thread_index().x * T_per_load; + const int base_offset = block_offset + thread_offset; + const int stride = tb.size() * T_per_load; + + float sum = reduce::init(); + + const T* input_base = vals + base_offset; + const T* residual_base = residual + base_offset; + + T local_buffer[unRoll * T_per_load]; + + // Unlike a vanilla layernorm, since we're fusing the two adds as well + // an inner unRoll seems to be less valuable. If anything, a double unRoll + // makes the most sense if we find we are having performance issues. +#pragma unRoll + for (int i = 0; i < unRoll; i++) { + T* iteration_buffer = local_buffer + i * T_per_load; + T residual_buffer[T_per_load]; + T bias_buffer[T_per_load]; + + mem_access::load_global( + iteration_buffer, input_base + i * stride, thread_offset + i * stride < elems_per_row); + mem_access::load_global(residual_buffer, + residual_base + i * stride, + thread_offset + i * stride < elems_per_row); + +#pragma unRoll + for (int j = 0; j < T_per_load; j++) { + float vals_up_cast = conversion::to(iteration_buffer[j]); + float res_up_cast = conversion::to(residual_buffer[j]); + vals_up_cast += res_up_cast; + sum = reduce::element(sum, vals_up_cast); + iteration_buffer[j] = conversion::to(vals_up_cast); + } + + if (preLnResidual && (thread_offset + i * stride < elems_per_row)) { + mem_access::store_global(res_output + base_offset + i * stride, + iteration_buffer); + } + } + + reduce::partitioned_block(tb, warp, sum); + const float mean = sum / elems_per_row; + + float mean_diff = reduce::init(); +#pragma unRoll + for (int i = 0; i < unRoll; i++) { +#pragma unRoll + for (int j = 0; j < T_per_load; j++) { + // Using a 0 value here skews the variance, have to if-guard + if (thread_offset + i * stride < elems_per_row) { + float diff = (conversion::to(local_buffer[i * T_per_load + j]) - mean); + mean_diff = reduce::element(mean_diff, diff * diff); + } + } + } + + reduce::partitioned_block(tb, warp, mean_diff); + const float variance = mean_diff / elems_per_row; + const float denom = __frsqrt_rn(variance + epsilon); + + T* block_output = output + block_offset; + +#pragma unRoll + for (int i = 0; i < unRoll; i++) { + T* iteration_buffer = local_buffer + i * T_per_load; + const int iter_idx = i * stride + thread_offset; + const bool do_loads = iter_idx < elems_per_row; + + T gamma_local[T_per_load], beta_local[T_per_load]; + + mem_access::load_global(gamma_local, gamma + iter_idx, do_loads); + mem_access::load_global(beta_local, beta + iter_idx, do_loads); + +#pragma unRoll + for (int j = 0; j < T_per_load; j++) { + float val = conversion::to(iteration_buffer[j]); + val = (val - mean) * denom; + val = + val * conversion::to(gamma_local[j]) + conversion::to(beta_local[j]); + iteration_buffer[j] = conversion::to(val); + } + + if (do_loads) { + mem_access::store_global(block_output + iter_idx, iteration_buffer); + } + } +} + +// TODO(cmikeh2): There's a bunch of redundancy here that needs to be removed/simplified. +#define LAUNCH_FUSED_RES_LN(unRollFactor, threadsPerGroup, maxThreads) \ + fused_residual_ln \ + <<>>( \ + output, nullptr, vals, residual, gamma, beta, epsilon, elems_per_row); + +template +void launch_fused_post_ln(T* output, + const T* vals, + const T* residual, + const T* gamma, + const T* beta, + float epsilon, + int rows, + int elems_per_row, + cudaStream_t stream) +{ + // 8 for __half, 4 for float + constexpr int T_per_load = ln::granularity / sizeof(T); + + constexpr int maxThreads = 256; + + // For Flaoat, unRoll 4, for __half, unRoll 2 + constexpr int internal_unRoll = sizeof(T) == 4 ? 4 : 2; + + const bool is_subblock_schedule = (elems_per_row <= 128) ? true : false; + const int h_per_step = is_subblock_schedule ? T_per_load : T_per_load * internal_unRoll; + + // Scheduling concern: may be slightly faster for some inputs to assign multiple stages of + // warp-sized blocks rather than stepping up to 64/96 threads + const int one_step_threads = next_pow2((elems_per_row + h_per_step - 1) / h_per_step); + const int threadsPerGroup = (one_step_threads < maxThreads) ? one_step_threads : maxThreads; + + const int groups_per_block_max = + is_subblock_schedule ? (maxThreads + threadsPerGroup - 1) / threadsPerGroup : 1; + const int groups_per_block = (rows < groups_per_block_max) ? rows : groups_per_block_max; + const int groups_launch = (groups_per_block + rows - 1) / groups_per_block; + + dim3 block(threadsPerGroup, groups_per_block); + dim3 grid(groups_launch); + + const int elems_per_step = threadsPerGroup * h_per_step; + const int external_unRoll = (elems_per_row + elems_per_step - 1) / elems_per_step; + + if (is_subblock_schedule) { + // <=128 + if (threadsPerGroup == 1) { + LAUNCH_FUSED_RES_LN(1, 1, maxThreads); + } else if (threadsPerGroup == 2) { + LAUNCH_FUSED_RES_LN(1, 2, maxThreads); + } else if (threadsPerGroup == 4) { + LAUNCH_FUSED_RES_LN(1, 4, maxThreads); + } else if (threadsPerGroup == 8) { + LAUNCH_FUSED_RES_LN(1, 8, maxThreads); + } else if (threadsPerGroup == 16) { + LAUNCH_FUSED_RES_LN(1, 16, maxThreads); + } + } else if (external_unRoll == 1) { + // 129 - 4096 elems + // (this can launch with 1-7 warps as well) + LAUNCH_FUSED_RES_LN(1 * internal_unRoll, maxThreads, maxThreads); + } else if (external_unRoll == 2) { + // 4097 - 8192 elems + LAUNCH_FUSED_RES_LN(2 * internal_unRoll, maxThreads, maxThreads); + } else if (external_unRoll == 3) { + // 8193 - 12288 elems + LAUNCH_FUSED_RES_LN(3 * internal_unRoll, maxThreads, maxThreads); + } else if (external_unRoll == 4) { + // 12289 - 16384 elems + LAUNCH_FUSED_RES_LN(4 * internal_unRoll, maxThreads, maxThreads); + } +} + +#define LAUNCH_FUSED_RES_LN_STORE_PRE_LN_RES(unRollFactor, threadsPerGroup, maxThreads) \ + fused_residual_ln \ + <<>>( \ + norm_output, res_output, vals, residual, gamma, beta, epsilon, elems_per_row); + +template +void launch_fused_pre_ln(T* norm_output, + T* res_output, + const T* vals, + const T* residual, + const T* gamma, + const T* beta, + float epsilon, + int rows, + int elems_per_row, + cudaStream_t stream) +{ + // 8 for __half, 4 for float + constexpr int T_per_load = ln::granularity / sizeof(T); + + constexpr int maxThreads = 256; + + // For Flaoat, unRoll 4, for __half, unRoll 2 + constexpr int internal_unRoll = sizeof(T) == 4 ? 4 : 2; + + const bool is_subblock_schedule = (elems_per_row <= 128) ? true : false; + const int h_per_step = is_subblock_schedule ? T_per_load : T_per_load * internal_unRoll; + + // Scheduling concern: may be slightly faster for some inputs to assign multiple stages of + // warp-sized blocks rather than stepping up to 64/96 threads + const int one_step_threads = next_pow2((elems_per_row + h_per_step - 1) / h_per_step); + const int threadsPerGroup = (one_step_threads < maxThreads) ? one_step_threads : maxThreads; + + const int groups_per_block_max = + is_subblock_schedule ? (maxThreads + threadsPerGroup - 1) / threadsPerGroup : 1; + const int groups_per_block = (rows < groups_per_block_max) ? rows : groups_per_block_max; + const int groups_launch = (groups_per_block + rows - 1) / groups_per_block; + + dim3 block(threadsPerGroup, groups_per_block); + dim3 grid(groups_launch); + + const int elems_per_step = threadsPerGroup * h_per_step; + const int external_unRoll = (elems_per_row + elems_per_step - 1) / elems_per_step; + + if (is_subblock_schedule) { + // <=128 + if (threadsPerGroup == 1) { + LAUNCH_FUSED_RES_LN_STORE_PRE_LN_RES(1, 1, maxThreads); + } else if (threadsPerGroup == 2) { + LAUNCH_FUSED_RES_LN_STORE_PRE_LN_RES(1, 2, maxThreads); + } else if (threadsPerGroup == 4) { + LAUNCH_FUSED_RES_LN_STORE_PRE_LN_RES(1, 4, maxThreads); + } else if (threadsPerGroup == 8) { + LAUNCH_FUSED_RES_LN_STORE_PRE_LN_RES(1, 8, maxThreads); + } else if (threadsPerGroup == 16) { + LAUNCH_FUSED_RES_LN_STORE_PRE_LN_RES(1, 16, maxThreads); + } + } else if (external_unRoll == 1) { + // 129 - 4096 elems + // (this can launch with 1-7 warps as well) + LAUNCH_FUSED_RES_LN_STORE_PRE_LN_RES(1 * internal_unRoll, maxThreads, maxThreads); + } else if (external_unRoll == 2) { + // 4097 - 8192 elems + LAUNCH_FUSED_RES_LN_STORE_PRE_LN_RES(2 * internal_unRoll, maxThreads, maxThreads); + } else if (external_unRoll == 3) { + // 8193 - 12288 elems + LAUNCH_FUSED_RES_LN_STORE_PRE_LN_RES(3 * internal_unRoll, maxThreads, maxThreads); + } else if (external_unRoll == 4) { + // 12289 - 16384 elems + LAUNCH_FUSED_RES_LN_STORE_PRE_LN_RES(4 * internal_unRoll, maxThreads, maxThreads); + } +} + +#define INSTANTIATE_RES_LN(T) \ + template void launch_fused_post_ln( \ + T*, const T*, const T*, const T*, const T*, float, int, int, cudaStream_t); + +#define INSTANTIATE_PRE_LN_RES(T) \ + template void launch_fused_pre_ln( \ + T*, T*, const T*, const T*, const T*, const T*, float, int, int, cudaStream_t); + +INSTANTIATE_RES_LN(__half); +INSTANTIATE_RES_LN(float); +#ifdef BF16_AVAILABLE +INSTANTIATE_RES_LN(__nv_bfloat16); +#endif + +INSTANTIATE_PRE_LN_RES(__half); +INSTANTIATE_PRE_LN_RES(float); +#ifdef BF16_AVAILABLE +INSTANTIATE_PRE_LN_RES(__nv_bfloat16); +#endif diff --git a/parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_linear/__pycache__/cuda_linear.cpython-310.pyc b/parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_linear/__pycache__/cuda_linear.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ab6f8d8dc73397e93e6a70838dce90c132d103b6 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_linear/__pycache__/cuda_linear.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_linear/cuda_linear_kernels.cpp b/parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_linear/cuda_linear_kernels.cpp new file mode 100644 index 0000000000000000000000000000000000000000..677bec22ded846462156201acc6b3eacb447e822 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_linear/cuda_linear_kernels.cpp @@ -0,0 +1,224 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#include + +#include "cuda_linear_kernels.h" + +namespace { + +// For bit-level debugging. +template +void print_bits(T num) +{ + char bits[sizeof(T) * 8 + 1] = {'\0'}; + for (int bit = 0; bit < (sizeof(T) * 8); bit++) { + bits[sizeof(T) * 8 - 1 - bit] = '0' + (num & 0x01); + num = num >> 1; + } + printf("%s\n", bits); +} + +void print_bits(half num) +{ + char bits[sizeof(half) * 8 + 1] = {'\0'}; + auto int_num = *reinterpret_cast(&num); + for (int bit = 0; bit < (sizeof(half) * 8); bit++) { + bits[sizeof(half) * 8 - 1 - bit] = '0' + (int_num & 0x01); + int_num = int_num >> 1; + } + printf("%s\n", bits); +} + +/* + * Function to pack 4 fake quantized FP16 value into continuously stored 4 FP6 values. + */ +void cast_fp16_fp6(uint16_t* FP16x4, uint8_t* FP6x4) +{ + // Constants for FP6 + constexpr int exponent_nbits_fp6 = 3; + constexpr int mantissa_nbits_fp6 = 2; + constexpr int exp_bias_fp6 = (1 << (exponent_nbits_fp6 - 1)) - 1; + // Constants for FP16 + constexpr int exponent_nbits_fp16 = 5; + constexpr int mantissa_nbits_fp16 = 10; + constexpr int exp_bias_fp16 = (1 << (exponent_nbits_fp16 - 1)) - 1; + + int fp6_temp[4]; + + float absmin_nonzero_fp6 = 0.0625; + // Note that we regard the exponent of '111' as a regular value rather than NaN or inf. This is + // the same with that in qtorch. + float absmax_fp6 = 28; + + for (int i = 0; i < 4; ++i) { + uint16_t source = FP16x4[i]; + float fp6_value_abs = std::abs(__half2float(*((half*)(&source)))); + if ((fp6_value_abs != 0 && fp6_value_abs < absmin_nonzero_fp6) || + fp6_value_abs > absmax_fp6) { + // TODO(zhen): a better way may be rounding it to the nearest FP6 value. + throw std::invalid_argument("Input value out of range for FP6."); + } + + // It is not safe to do shift operation on uint16_t. So we promote it to int. + int source_promote = int(source); + + int sign_bit = (source_promote >> 15); + // Extracting exponent represented in FP16. The sign mask 0x7FFF is '0111 1111 1111 1111' + int exp_bit = (source_promote & 0x7FFF) >> mantissa_nbits_fp16; + // Extracting mantissa represented in FP16 + int mant_bit = source_promote & ((1 << mantissa_nbits_fp16) - 1); + + int new_exp_bit; + int new_mant_bit; + + if (exp_bit == 0) { + // Subnormal FP16 number. Too small for FP6. + new_exp_bit = 0; + new_mant_bit = 0; + } else { + new_mant_bit = mant_bit >> (mantissa_nbits_fp16 - mantissa_nbits_fp6); + new_exp_bit = exp_bit - exp_bias_fp16 + exp_bias_fp6; + + // Deal with subnormal FP6 values. + int target_exp_val = exp_bit - exp_bias_fp16; + int min_fp6_exp_val = -exp_bias_fp6 + 1; + bool subnormal_fp6 = target_exp_val < min_fp6_exp_val; + if (subnormal_fp6) { + // TODO(zhen): add the rounding logic. + new_exp_bit = 0; + // The implicit 1 in the mantissa of FP16 is not present in subnormal FP6. Thus we + // need to add it + new_mant_bit = (new_mant_bit | (1 << mantissa_nbits_fp6)) >> + (min_fp6_exp_val - target_exp_val); + } + } + + fp6_temp[i] = (sign_bit << (exponent_nbits_fp6 + mantissa_nbits_fp6)) | + (new_exp_bit << mantissa_nbits_fp6) | new_mant_bit; + } + // Pack the values + FP6x4[0] = fp6_temp[0] << 2 | (fp6_temp[1] >> 4); + FP6x4[1] = (fp6_temp[1] & 0x0F) << 4 | (fp6_temp[2] >> 2); + FP6x4[2] = (fp6_temp[2] & 0x03) << 6 | fp6_temp[3]; +} + +/* + * Function to prepack FP16 weights into continuous FP6 values. + * + * Parameters: + * weight_16bit: input weight in FP16, size M*K + * weight_6bit: output weight in packed FP6, continuously stored, size M*K*6/8 + * M, K: the shape of the weight + */ +void weight_prepacking_fp16_to_fp6(uint16_t* weight_16bit, + uint8_t* weight_6bit_packed, + size_t M, + size_t K) +{ + // Every four 16-bit elements are packed into three 6-bit values (4*6bit == 3*8bit). + if (K * 6 % 8 != 0) { throw std::invalid_argument("(K * 6 % 8) should be 0"); } + size_t K_fp6_packed = K * 6 / 8; + // #pragma omp parallel for + for (auto m = 0; m < M; m++) { + uint8_t* ptr_6bit = weight_6bit_packed + m * K_fp6_packed; + uint16_t* ptr_16bit = weight_16bit + m * K; + for (auto k = 0; k < K; k += 4) { + cast_fp16_fp6(ptr_16bit, ptr_6bit); + ptr_16bit += 4; + ptr_6bit += 3; + } + } +} + +} // namespace + +/* + * Function to execute the FP6 linear kernel. + * + * Parameters: + * output: output tensor, size M*N + * hidden_states: input activation tensor, size N*K + * weights_2bit: packed 2bit weights, size M*K*2/8 + * weights_4bit: packed 4bit weights, size M*K*4/8 + * scales: scale tensor, size M + * workspace: workspace tensor, size M*N*split_k + * M: the output channel number of the weight + * N: the token number of the activation + * K: the input channel number of the weight + * split_k: the split size of the GEMM calculation + */ +void cuda_wf6af16_linear(torch::Tensor& output, + torch::Tensor& hidden_states, + torch::Tensor& weights_2bit, + torch::Tensor& weights_4bit, + torch::Tensor& scales, + torch::Tensor& workspace, + int M, + int N, + int K, + int split_k) +{ + TORCH_CHECK(weights_2bit.device().type() == torch::kCUDA, "weight_2bit must be on CUDA"); + TORCH_CHECK(weights_4bit.device().type() == torch::kCUDA, "weight_4bit must be on CUDA"); + TORCH_CHECK(hidden_states.device().type() == torch::kCUDA, "X must be on CUDA"); + TORCH_CHECK(scales.device().type() == torch::kCUDA, "scales must be on CUDA"); + + auto status = fp6_linear_kernel(at::cuda::getCurrentCUDAStream(), + (uint4*)(weights_2bit.data_ptr()), + (uint4*)(weights_4bit.data_ptr()), + (half*)(scales.data_ptr()), + (half*)(hidden_states.data_ptr()), + (half*)(output.data_ptr()), + M, + N, + K, + workspace.data_ptr(), + split_k); + if (status != cudaSuccess) { + AT_ERROR("fp6_linear_kernel failed with error: ", cudaGetErrorString(status)); + } +} + +/* + * Function to prepack the fake 6-bit-quantized FP16 weights into 2bit and 4bit. + * + * Parameters: + * weight: input weight in FP16 (containing the quantized FP6-ranged value), size M*K + * Returns: + * weight_2bit: output weight in 2bit, size M*K*2/8 + * weight_4bit: output weight in 4bit, size M*K*4/8 + */ +std::vector preprocess_weight(torch::Tensor& weight) +{ + TORCH_CHECK(weight.dim() == 2, "weight must be 2-dimensional"); + TORCH_CHECK(weight.scalar_type() == torch::kFloat16, "weight must be FP16"); + TORCH_CHECK(weight.is_contiguous(), "weight must be contiguous"); + TORCH_CHECK(weight.device().type() == torch::kCPU, "weight must be on CPU"); + auto M = weight.size(0); + auto K = weight.size(1); + TORCH_CHECK(K % 4 == 0, "K must be multiple of 4"); + + // Pack weight from FP16 to FP6. + uint16_t* weight_16bit_ptr = reinterpret_cast(weight.data_ptr()); + std::vector weight_6bit_packed(M * K * 6 / 8); + uint8_t* weight_6bit_ptr = weight_6bit_packed.data(); + weight_prepacking_fp16_to_fp6(weight_16bit_ptr, weight_6bit_ptr, M, K); + + // Split weight into 2bit and 4bit. + weight_matrix_prepacking(reinterpret_cast(weight_6bit_ptr), M, K); + uint8_t* weight_2bit_ptr = weight_6bit_ptr; + + // Make sure that the new split tensor does not share the underlying memory with the original + // one. Otherwise it will incur some problems when the original tensor is deleted. It also + // makes the memory flattern risky. + auto weight_2bit = + torch::from_blob(weight_2bit_ptr, {M * K * 2 / 8}, torch::kUInt8).clone().detach(); + uint8_t* weight_4bit_ptr = weight_2bit_ptr + M * K * 2 / 8; + auto weight_4bit = + torch::from_blob(weight_4bit_ptr, {M * K * 4 / 8}, torch::kUInt8).clone().detach(); + + return {weight_2bit, weight_4bit}; +} diff --git a/parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_linear/fp6_linear.cuh b/parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_linear/fp6_linear.cuh new file mode 100644 index 0000000000000000000000000000000000000000..95f7f6050c15c84f41450e8ac60b6aa208088f93 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_linear/fp6_linear.cuh @@ -0,0 +1,46 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +// This is a copy of FP6-LLM kernel code: https://arxiv.org/abs/2401.14112 + +#include +#include +#include + +#include + +/* + * Computes FP6-FP16 GEMM (C++ interface). + */ +cudaError_t fp6_linear_kernel(cudaStream_t stream, + const uint4* Weight1, + const uint4* Weight2, + const half* Scales, + const half* B, + half* C, + const size_t M_Global, + const size_t N_Global, + const size_t K_Global, + float* Reduction_Workspace, // Reduction_Workspace_Size = Split_K * + // M_Global * N_Global * sizeof(fp32) + int Split_K); + +/* + * Computes FP6-FP16 GEMM (PyTorch interface). + */ +torch::Tensor fp6_linear_forward_cuda(torch::Tensor _in_feats, + torch::Tensor _weights, + torch::Tensor _scales, + int splitK = 1); + +/* + * In-place weight prepacking (C++ interface). + */ +void weight_matrix_prepacking(int* FP6Weights, size_t M, size_t K); + +/* + * Weight prepacking (Pytorch interface). + */ +torch::Tensor weight_matrix_prepacking_cpu(torch::Tensor fp6_tensor, size_t M, size_t K); diff --git a/parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_linear/include/kernel_matmul.cuh b/parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_linear/include/kernel_matmul.cuh new file mode 100644 index 0000000000000000000000000000000000000000..aa6ea6c4b1c2bb7af0b6d878b9b9bf10817b1fff --- /dev/null +++ b/parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_linear/include/kernel_matmul.cuh @@ -0,0 +1,261 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +// This is a copy of FP6-LLM kernel code: https://arxiv.org/abs/2401.14112 + +#include "configs.h" +#include "utils_core.cuh" +#include "utils_gmem.cuh" + +/* + * C = A*B + * A: row major with ahead-of-time layout transformation, FP6 + * B: col major, FP16 + * C: col major, FP16 + */ +template +__global__ void QUANT_GEMM_Kernel(const uint4* Weight1, + const uint4* Weight2, + const half* Scales, + const half* B, + OutputDataType* C, + const size_t M_Global, + const size_t N_Global, + const size_t K_Global, + int Split_K) +{ +#ifdef DEBUG_MODE + assert(K_Global % TilingConfig::TILE_K == 0); + assert(M_Global % TilingConfig::TILE_M == 0); + assert(gridDim.y == Split_K * (M_Global / TilingConfig::TILE_M)); +#endif + extern __shared__ __align__(128) + half smem[]; // Dynamic shared memory for FP16 A tiles, 128 Bytes aligned + half(*smem_array)[WARP_K + PADDING_SHARED_MEM_FOR_B_8] = + reinterpret_cast( + smem + + (SMEM_SIZE_A1_TILE + SMEM_SIZE_A2_TILE) / 2); // Dynamic shared memory for FP16 B tiles + __shared__ half QuantScales[64 * TilingConfig::BLOCK_WARPS]; // static shared memory for + // quantization scales, 64 row per + // warp * 4 warps = 512 Bytes + // Thread Block Mapping, considering SplitK + const size_t BatchID = blockIdx.y / (M_Global / TilingConfig::TILE_M); + const size_t x = blockIdx.x; // Output Block ID: (BlockID_Row = y; BlockID_Col = x ) + const size_t y = + blockIdx.y % + (M_Global / TilingConfig::TILE_M); // Output Block ID: (BlockID_Row = y; BlockID_Col = x ) + const size_t Tile_Start_M = y * TilingConfig::TILE_M; + const size_t Tile_Start_N = x * TilingConfig::TILE_N; + const size_t NumColumnToCopy = (N_Global - Tile_Start_N) < TilingConfig::TILE_N + ? (N_Global - Tile_Start_N) + : TilingConfig::TILE_N; + const size_t NumBlock_K = K_Global / TilingConfig::TILE_K; + const size_t AverageNumBlock_K = NumBlock_K / Split_K; + const size_t ExtraNumBlock_K = NumBlock_K - AverageNumBlock_K * Split_K; + size_t NumIter = AverageNumBlock_K; + if (BatchID < ExtraNumBlock_K) NumIter++; + size_t StartBlockID_K = AverageNumBlock_K * BatchID; + if (BatchID < ExtraNumBlock_K) + StartBlockID_K += BatchID; + else + StartBlockID_K += ExtraNumBlock_K; + // Warp ID. + const int warpId = threadIdx.x / WARP_SIZE; + int WARP_i = + warpId / TilingConfig::BLOCK_COL_WARPS; // WARP_i: row number; WARP_j: column number + // int WARP_j = warpId % TilingConfig::BLOCK_COL_WARPS; + // Global Memory Address for Matrix A (Weight) + // ///////////////////////////////////////////////////////////////////////// StartPTR for each + // ThreadBlock(TB) + const uint4* TB_StartGPTR_A1 = + Weight1 + (y * TilingConfig::BLOCK_ROW_WARPS) * NumBlock_K * NUM_INT4_PER_UNIT_2BIT_FRAG; + const uint4* TB_StartGPTR_A2 = + Weight2 + (y * TilingConfig::BLOCK_ROW_WARPS) * NumBlock_K * NUM_INT4_PER_UNIT_4BIT_FRAG; + // StartPTR for each WARP. + const uint4* WARP_StartGPTR_A1 = + TB_StartGPTR_A1 + WARP_i * NumBlock_K * NUM_INT4_PER_UNIT_2BIT_FRAG; + const uint4* WARP_StartGPTR_A2 = + TB_StartGPTR_A2 + WARP_i * NumBlock_K * NUM_INT4_PER_UNIT_4BIT_FRAG; + // StartPTR for each WARP, considering SplitK + const size_t WARP_Start_UnitID_K = StartBlockID_K; + WARP_StartGPTR_A1 += WARP_Start_UnitID_K * NUM_INT4_PER_UNIT_2BIT_FRAG; + WARP_StartGPTR_A2 += WARP_Start_UnitID_K * NUM_INT4_PER_UNIT_4BIT_FRAG; + // Copying A tile from Global to Shared, using double-buffer + // ////////////////////////////////////////////////////////// StartSPTR for each ThreadBlock + uint32_t* AFrag_2BIT_SPTR = reinterpret_cast(smem); + uint32_t* AFrag_4BIT_SPTR = + AFrag_2BIT_SPTR + + SMEM_SIZE_IN_BYTES_PER_WARP_A1 / 4 * TilingConfig::BLOCK_WARPS * + PIPELINE_LEVEL_GMEM; // 8 buffers including double buffers, 12 for trible buffers + // StartSPTR for each WARP + AFrag_2BIT_SPTR += warpId * SMEM_SIZE_IN_BYTES_PER_WARP_A1 / 4; + AFrag_4BIT_SPTR += warpId * SMEM_SIZE_IN_BYTES_PER_WARP_A2 / 4; + // Pre-fetch of A tile + for (int i = 0; i < PIPELINE_LEVEL_GMEM - 1; i++) { + CopyFromGlobalToShared_A( + AFrag_2BIT_SPTR + i * SMEM_SIZE_IN_BYTES_PER_WARP_A1 / 4 * 4, WARP_StartGPTR_A1); + CopyFromGlobalToShared_A( + AFrag_4BIT_SPTR + i * SMEM_SIZE_IN_BYTES_PER_WARP_A2 / 4 * 4, WARP_StartGPTR_A2); + WARP_StartGPTR_A1 += SMEM_SIZE_IN_BYTES_PER_WARP_A1 / 16; + WARP_StartGPTR_A2 += SMEM_SIZE_IN_BYTES_PER_WARP_A2 / 16; + } + // Global Memory Address for Matrix A (QuantScale) + // ///////////////////////////////////////////////////////////////////// + const half* TB_StartGPTR_A_Scale = Scales + (y * TilingConfig::BLOCK_ROW_WARPS) * 64; + const half* WARP_StartGPTR_A_Scales = TB_StartGPTR_A_Scale + WARP_i * 64; + CopyFromGlobalToShared_Scales(QuantScales + WARP_i * 64, WARP_StartGPTR_A_Scales); + // Copying B tile from Global to Shared, considering SplitK + // ///////////////////////////////////////////////////////////// + const half* BTile_GPTR = B + Tile_Start_N * K_Global + StartBlockID_K * TilingConfig::TILE_K; + for (int i = 0; i < PIPELINE_LEVEL_GMEM - 1; i++) { + CopyFromGlobalToShared( + smem_array + i * TilingConfig::TILE_N, BTile_GPTR, K_Global, NumColumnToCopy); + BTile_GPTR += TilingConfig::TILE_K; + } + // Register Allocation for A,B, and C, Initilazed to Zeros + // ///////////////////////////////////////////////////////////////////// + constexpr int NumRegSets_a = + WARP_ROW_MMA_TENSORS; // 1 set = 4 registers, containing a 16*16 MMA block + constexpr int NumRegSets_b = (TilingConfig::WARP_COL_MMA_TENSORS == 1) + ? 1 + : TilingConfig::WARP_COL_MMA_TENSORS / + 2; // 1 set = 4 registers, containing a 16*16 MMA block +#ifdef PIPELINE_LEVEL_SMEM + uint32_t a[NumRegSets_a * PIPELINE_LEVEL_SMEM] + [4]; // double/Trible buffer is used // Registers to store decompressed FP6 + uint32_t b[NumRegSets_b * PIPELINE_LEVEL_SMEM] + [4]; // double/Triple buffer is used // Register to store FP16 B matrix (a slice) +#endif + float c[NumRegSets_a * NumRegSets_b][REG_PER_THREAD_C_TENSOR_16_16]; + for (int i = 0; i < NumRegSets_a * NumRegSets_b; i++) + for (int j = 0; j < REG_PER_THREAD_C_TENSOR_16_16; j++) c[i][j] = 0.0f; + // + cp_async_wait_all(); + __syncthreads(); + + ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// + uint32_t Scales_RPTR[4]; // 4 Registers per thread for Quantization Scales + ExtractFromSharedToReg_Scales(Scales_RPTR, QuantScales + WARP_i * 64); +#ifdef PIPELINE_LEVEL_SMEM + // Initializing the Software Pipeline: writing registers. + // //////////////////////////////////////////////////////////////////////////////////////////////// + initialize_mma_slice( + a, b, AFrag_2BIT_SPTR, AFrag_4BIT_SPTR, smem_array, Scales_RPTR); +#endif +// The outer loop. +// ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// +#pragma unroll(1) + for (size_t tile_id_k = 0; tile_id_k < NumIter; tile_id_k++) { + // Trible-Buffer for A Tile + uint32_t* __restrict__ read_SPTR_Frag1 = + AFrag_2BIT_SPTR + ((tile_id_k + 0) % PIPELINE_LEVEL_GMEM) * + SMEM_SIZE_IN_BYTES_PER_WARP_A1 / 4 * + 4; // 1024 (1)*4: 4 WARPs; (2)/4: int*+1 = char*+16 + uint32_t* __restrict__ read_SPTR_Frag2 = + AFrag_4BIT_SPTR + ((tile_id_k + 0) % PIPELINE_LEVEL_GMEM) * + SMEM_SIZE_IN_BYTES_PER_WARP_A2 / 4 * + 4; // 2048 (1)*4: 4 WARPs; (2)/4: int*+1 = char*+16 +#ifdef PIPELINE_LEVEL_SMEM + uint32_t* __restrict__ read2_SPTR_Frag1 = + AFrag_2BIT_SPTR + + ((tile_id_k + 1) % PIPELINE_LEVEL_GMEM) * SMEM_SIZE_IN_BYTES_PER_WARP_A1 / 4 * 4; + uint32_t* __restrict__ read2_SPTR_Frag2 = + AFrag_4BIT_SPTR + + ((tile_id_k + 1) % PIPELINE_LEVEL_GMEM) * SMEM_SIZE_IN_BYTES_PER_WARP_A2 / 4 * 4; +#endif + uint32_t* __restrict__ write_SPTR_Frag1 = + AFrag_2BIT_SPTR + ((tile_id_k + (PIPELINE_LEVEL_GMEM - 1)) % PIPELINE_LEVEL_GMEM) * + SMEM_SIZE_IN_BYTES_PER_WARP_A1 / 4 * + 4; // 1024 (1)*4: 4 WARPs; (2)/4: int*+1 = char*+16 + uint32_t* __restrict__ write_SPTR_Frag2 = + AFrag_4BIT_SPTR + ((tile_id_k + (PIPELINE_LEVEL_GMEM - 1)) % PIPELINE_LEVEL_GMEM) * + SMEM_SIZE_IN_BYTES_PER_WARP_A2 / 4 * + 4; // 2048 (1)*4: 4 WARPs; (2)/4: int*+1 = char*+16 + // Trible-Buffer for B Tile + half __restrict__(*read_SPTR)[WARP_K + PADDING_SHARED_MEM_FOR_B_8] = + smem_array + ((tile_id_k + 0) % PIPELINE_LEVEL_GMEM) * TilingConfig::TILE_N; +#ifdef PIPELINE_LEVEL_SMEM + half __restrict__(*read2_SPTR)[WARP_K + PADDING_SHARED_MEM_FOR_B_8] = + smem_array + ((tile_id_k + 1) % PIPELINE_LEVEL_GMEM) * TilingConfig::TILE_N; +#endif + half __restrict__(*write_SPTR)[WARP_K + PADDING_SHARED_MEM_FOR_B_8] = + smem_array + + ((tile_id_k + (PIPELINE_LEVEL_GMEM - 1)) % PIPELINE_LEVEL_GMEM) * TilingConfig::TILE_N; + // + bool GlobalCopy = (tile_id_k + PIPELINE_LEVEL_GMEM - 1) < NumIter; + // Copying A tile from Global to Register, Bypassing L1, using double-buffer + CopyFromGlobalToShared_A( + write_SPTR_Frag1, WARP_StartGPTR_A1, GlobalCopy); + CopyFromGlobalToShared_A( + write_SPTR_Frag2, WARP_StartGPTR_A2, GlobalCopy); + // copying B tile from GlobalMemory to SharedMemory + CopyFromGlobalToShared( + write_SPTR, BTile_GPTR, K_Global, NumColumnToCopy, GlobalCopy); + cp_async_group_commit(); +#ifdef PIPELINE_LEVEL_SMEM + core_mma_slice(c, + a, + b, + read_SPTR_Frag1, + read_SPTR_Frag2, + read_SPTR, + Scales_RPTR, + 1); // read_SPTR_Frag1, read_SPTR_Frag2 are different for each + // WARP; read_SPTR is shared among WARPs + core_mma_slice( + c, a, b, read_SPTR_Frag1, read_SPTR_Frag2, read_SPTR, Scales_RPTR, 2); + core_mma_slice( + c, a, b, read_SPTR_Frag1, read_SPTR_Frag2, read_SPTR, Scales_RPTR, 3); + // Barriers and Synchronizations + cp_async_wait_group(); + __syncthreads(); + core_mma_slice( + c, a, b, read2_SPTR_Frag1, read2_SPTR_Frag2, read2_SPTR, Scales_RPTR, 0); + // Updating global PTRs + WARP_StartGPTR_A1 += + SMEM_SIZE_IN_BYTES_PER_WARP_A1 / 16; // 4KB/16=256 (1)/16: int4*+1 = char*+16 + WARP_StartGPTR_A2 += + SMEM_SIZE_IN_BYTES_PER_WARP_A2 / 16; // 8KB/16=512 (1)/16: int4*+1 = char*+16 + BTile_GPTR += TilingConfig::TILE_K; +#else + PipelinedCoreLoop( + c, + read_SPTR, + read_SPTR_Frag1, + read_SPTR_Frag2, + Scales_RPTR); // read_SPTR_Frag1, read_SPTR_Frag2 are different for each WARP; + // read_SPTR is shared among WARPs + // Updating global PTRs + WARP_StartGPTR_A1 += + SMEM_SIZE_IN_BYTES_PER_WARP_A1 / 16; // 4KB/16=256 (1)/16: int4*+1 = char*+16 + WARP_StartGPTR_A2 += + SMEM_SIZE_IN_BYTES_PER_WARP_A2 / 16; // 8KB/16=512 (1)/16: int4*+1 = char*+16 + BTile_GPTR += TilingConfig::TILE_K; + // Barriers and Synchronizations + cp_async_wait_group(); + __syncthreads(); +#endif + } + ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// + ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// + // Store the C fragments to shared memory. + float(*smem_CFrag)[TilingConfig::TILE_M + PADDING_SHARED_MEM_FOR_C_4] = + reinterpret_cast(smem); + StoreToSharedMemoryFromRegister(smem_CFrag, c); + __syncthreads(); + // Now that shared memory contains all the D tiles, stream them to global memory. + OutputDataType* BlockGlobalPTR = + C + BatchID * (M_Global * N_Global) + Tile_Start_M + Tile_Start_N * M_Global; + for (size_t i = warpId; i < NumColumnToCopy; i += TilingConfig::BLOCK_WARPS) // i-th column +#pragma unroll + for (size_t j = threadIdx.x % WARP_SIZE; j < TilingConfig::TILE_M; + j += WARP_SIZE) // j-th row + { + if constexpr (std::is_same::value) + BlockGlobalPTR[j + i * M_Global] = __float2half_rn(smem_CFrag[i][j]); + else + BlockGlobalPTR[j + i * M_Global] = smem_CFrag[i][j]; + } +} diff --git a/parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_linear/include/ptx_mma.cuh b/parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_linear/include/ptx_mma.cuh new file mode 100644 index 0000000000000000000000000000000000000000..f13abe0362796d848ca17c03af389da2e7c28ad6 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_linear/include/ptx_mma.cuh @@ -0,0 +1,125 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +// This is a copy of FP6-LLM kernel code: https://arxiv.org/abs/2401.14112 + +#ifndef PTX_MMA_CUH +#define PTX_MMA_CUH + +#include +#include +#include + +#include +#include "configs.h" + +#ifdef PIPELINE_LEVEL_SMEM +template +__device__ __forceinline__ void B_FromSharedToReg( + uint32_t __restrict__ Reg[][4], + half __restrict__ (*read_SPTR)[WARP_K + PADDING_SHARED_MEM_FOR_B_8], + int slice_id) +{ +#ifdef DEBUG_MODE + static_assert((TilingConfig::WARP_COL_MMA_TENSORS == 1) || + (TilingConfig::WARP_COL_MMA_TENSORS % 2 == 0)); +#endif + + const int warpId = threadIdx.x / WARP_SIZE; + int lane_id = threadIdx.x % WARP_SIZE; + int WARP_j = warpId % TilingConfig::BLOCK_COL_WARPS; + int warp_start_col = TilingConfig::WARP_COL_MMA_TENSORS * MMA_8 * + WARP_j; // each warp may start from reading warp_start_col'th column of + // the B tile in shared memory +#ifdef DEBUG_MODE + assert(warp_start_col == 0); +#endif + + int col = (lane_id % 8) + (lane_id / 16) * 8; + int row = (lane_id % 16) / 8 * 8; + uint32_t smem_local_ptr = static_cast( + __cvta_generic_to_shared(&read_SPTR[warp_start_col + col][slice_id * MMA_16 + row])); + if (TilingConfig::WARP_COL_MMA_TENSORS == 1) { + asm volatile("ldmatrix.sync.aligned.x2.m8n8.shared.b16 {%0, %1}, [%2];\n" + : "=r"(Reg[0][0]), "=r"(Reg[0][1]) + : "r"(smem_local_ptr)); + } else { +#pragma unroll + for (int i = 0; i < TilingConfig::WARP_COL_MMA_TENSORS / 2; i++) { + asm volatile("ldmatrix.sync.aligned.x4.m8n8.shared.b16 {%0, %1, %2, %3}, [%4];\n" + : "=r"(Reg[i][0]), "=r"(Reg[i][1]), "=r"(Reg[i][2]), "=r"(Reg[i][3]) + : "r"(smem_local_ptr)); + smem_local_ptr += 16 * (WARP_K + PADDING_SHARED_MEM_FOR_B_8) * sizeof(half); + } + } +} +#else +// Debug: Whether ldmatrix.trans is required??? +// B is in column-major +template +__device__ __forceinline__ void B_FromSharedToReg( + uint32_t __restrict__ Reg[][4], + half __restrict__ (*read_SPTR)[WARP_K + PADDING_SHARED_MEM_FOR_B_8], + int k_offset) +{ +#ifdef DEBUG_MODE + static_assert((TilingConfig::WARP_COL_MMA_TENSORS == 1) || + (TilingConfig::WARP_COL_MMA_TENSORS % 2 == 0)); +#endif + + const int warpId = threadIdx.x / WARP_SIZE; + int lane_id = threadIdx.x % WARP_SIZE; + int WARP_j = warpId % TilingConfig::BLOCK_COL_WARPS; + int warp_start_col = TilingConfig::WARP_COL_MMA_TENSORS * MMA_8 * + WARP_j; // each warp may start from reading warp_start_col'th column of + // the B tile in shared memory +#ifdef DEBUG_MODE + assert(warp_start_col == 0); +#endif + + int col = (lane_id % 8) + (lane_id / 16) * 8; + int row = (lane_id % 16) / 8 * 8; + uint32_t smem_local_ptr = static_cast( + __cvta_generic_to_shared(&read_SPTR[warp_start_col + col][k_offset + row])); + if (TilingConfig::WARP_COL_MMA_TENSORS == 1) { + asm volatile("ldmatrix.sync.aligned.x2.m8n8.shared.b16 {%0, %1}, [%2];\n" + : "=r"(Reg[0][0]), "=r"(Reg[0][1]) + : "r"(smem_local_ptr)); + } else { +#pragma unroll + for (int i = 0; i < TilingConfig::WARP_COL_MMA_TENSORS / 2; i++) { + asm volatile("ldmatrix.sync.aligned.x4.m8n8.shared.b16 {%0, %1, %2, %3}, [%4];\n" + : "=r"(Reg[i][0]), "=r"(Reg[i][1]), "=r"(Reg[i][2]), "=r"(Reg[i][3]) + : "r"(smem_local_ptr)); + smem_local_ptr += 16 * (WARP_K + PADDING_SHARED_MEM_FOR_B_8) * sizeof(half); + } + } +} +#endif + +__device__ __forceinline__ void MMA_FP16_M16N8K16(uint32_t __restrict__ c[], + uint32_t __restrict__* a, + uint32_t __restrict__* b) +{ + asm volatile( + "mma.sync.aligned.m16n8k16.row.col.f32.f16.f16.f32" + "{ %0, %1, %2, %3}," + "{ %4, %5, %6, %7 }," + "{ %8, %9 }," + "{ %10, %11, %12, %13 };" + : "=r"(c[0]), "=r"(c[1]), "=r"(c[2]), "=r"(c[3]) + : "r"(a[0]), + "r"(a[1]), + "r"(a[2]), + "r"(a[3]), + "r"(b[0]), + "r"(b[1]), + "r"(c[0]), + "r"(c[1]), + "r"(c[2]), + "r"(c[3])); +} + +#endif diff --git a/parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/blocked_flash/__init__.py b/parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/blocked_flash/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..87b2b3d68777a38cfccadaa55740cc681bf45cf1 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/blocked_flash/__init__.py @@ -0,0 +1,6 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from .blocked_flash import * diff --git a/parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/blocked_flash/__pycache__/blocked_flash.cpython-310.pyc b/parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/blocked_flash/__pycache__/blocked_flash.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6db6c2febb13c571ef8035a284e4ce0290403b34 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/blocked_flash/__pycache__/blocked_flash.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/blocked_flash/attention_atom.h b/parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/blocked_flash/attention_atom.h new file mode 100644 index 0000000000000000000000000000000000000000..ed8eb9e19b3dafaa8ab4d5add697c53d95ea1c54 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/blocked_flash/attention_atom.h @@ -0,0 +1,39 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#pragma once + +#include +#include "cuda.h" + +struct AttentionAtom { + /* + The attention atom describes the workload of a particular query. The attention + kernel will execute each ``AttentionAtom`` for each head of the model. + */ + + // Pointer to a list of KV block indices. + int32_t* block_idx_list; + + // Index of first token in the ragged batch associated with this atom. + int32_t q_start_idx; + + // Number of tokens in the ragged batch associated with this atom. + int32_t q_len; + + // Number of key/value blocks associated with this atom. All but the last are + // assumed to be fully dense. + int32_t kv_blocks; + + // Number of tokens in the last key/value block. + int32_t total_extent; + + // Global index of the first token in the atom. For example, in a prompt continuation + // in which we have already processed 768 tokens, this would be 768. + int32_t global_q_idx; + + // Unused + int32_t unused; +}; diff --git a/parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/blocked_flash/blocked_flash.cpp b/parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/blocked_flash/blocked_flash.cpp new file mode 100644 index 0000000000000000000000000000000000000000..6cb60ad00f9b2b9322bc3a23b08b4d562292c991 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/blocked_flash/blocked_flash.cpp @@ -0,0 +1,101 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +/****************************************************************************** + * Copyright (c) 2023, Tri Dao. + ******************************************************************************/ + +#include +#include +#include + +#include "blocked_flash.h" +#include "flash.h" + +#define CHECK_SHAPE(x, ...) \ + TORCH_CHECK(x.sizes() == torch::IntArrayRef({__VA_ARGS__}), \ + #x " must have shape (" #__VA_ARGS__ ")") + +void flash_attn_by_atoms(at::Tensor& out, + at::Tensor& q, + at::Tensor& k, + at::Tensor& v, + at::Tensor& attention_atoms, + const float softmax_scale, + const bool is_causal) +{ + auto dprops = at::cuda::getCurrentDeviceProperties(); + + bool is_sm8x = dprops->major == 8 && dprops->minor >= 0; + bool is_sm90 = dprops->major == 9 && dprops->minor == 0; + TORCH_CHECK(is_sm90 || is_sm8x, "FlashAttention only supports Ampere GPUs or newer."); + + auto q_dtype = q.dtype(); + TORCH_CHECK(q_dtype == torch::kFloat16 || q_dtype == torch::kBFloat16, + "FlashAttention only support fp16 and bf16 data type"); + if (q_dtype == torch::kBFloat16) { + TORCH_CHECK(is_sm90 || is_sm8x, "bfloat16 is only supported on Ampere GPUs or newer"); + } + TORCH_CHECK(k.dtype() == q_dtype, "query and key must have the same dtype"); + TORCH_CHECK(v.dtype() == q_dtype, "query and value must have the same dtype"); + + TORCH_CHECK(q.is_cuda(), "Input tensor must be on CUDA device"); + TORCH_CHECK(k.is_cuda(), "Input tensor must be on CUDA device"); + TORCH_CHECK(v.is_cuda(), "Input tensor must be on CUDA device"); + + TORCH_CHECK(q.stride(-1) == 1, "Input tensor must have contiguous last dimension"); + TORCH_CHECK(k.stride(-1) == 1, "Input tensor must have contiguous last dimension"); + TORCH_CHECK(v.stride(-1) == 1, "Input tensor must have contiguous last dimension"); + + const int total_q = q.size(0); + const int head_size = k.size(-1); + const int num_heads_kv = k.size(-2); + const int num_heads_q = q.size(-1) / head_size; + + TORCH_CHECK(head_size <= 256, "head_size must be <= 256"); + TORCH_CHECK(head_size % 8 == 0, "head_size must be divisible by 8"); + TORCH_CHECK(num_heads_q % num_heads_kv == 0, "num_heads_q must be divisible by num_heads_kv"); + + Flash_fwd_params params; + + params.is_bf16 = q.dtype() == torch::kBFloat16; + + // Set the pointers and strides. + params.q_ptr = q.data_ptr(); + params.k_ptr = k.data_ptr(); + params.v_ptr = v.data_ptr(); + params.o_ptr = out.data_ptr(); + params.atoms = reinterpret_cast(attention_atoms.data_ptr()); + + // All stride are in elements, not bytes. + params.q_row_stride = q.stride(0); + params.k_row_stride = k.stride(1); + params.v_row_stride = v.stride(1); + params.o_row_stride = out.stride(0); + + // Assume heads are contiguous. + params.q_head_stride = head_size; + params.k_head_stride = head_size; + params.v_head_stride = head_size; + params.o_head_stride = head_size; + + // Head params + params.h = num_heads_q; + params.h_k = num_heads_kv; + params.h_h_k_ratio = num_heads_q / num_heads_kv; + params.d = head_size; + auto round_multiple = [](int x, int m) { return (x + m - 1) / m * m; }; + params.d_rounded = round_multiple(head_size, 32); + params.num_atoms = attention_atoms.size(0); + + // Set the different scale values. + params.scale_softmax = softmax_scale; + params.scale_softmax_log2 = softmax_scale * M_LOG2E; + + params.is_causal = is_causal; + + auto stream = at::cuda::getCurrentCUDAStream().stream(); + run_mha_fwd(params, stream); +} diff --git a/parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/blocked_flash/blocked_flash.h b/parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/blocked_flash/blocked_flash.h new file mode 100644 index 0000000000000000000000000000000000000000..68037b4251131747479ce93d6ac5df5a8af0b028 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/blocked_flash/blocked_flash.h @@ -0,0 +1,16 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#pragma once + +#include + +void flash_attn_by_atoms(at::Tensor& out, + at::Tensor& q, + at::Tensor& k, + at::Tensor& v, + at::Tensor& attention_atoms, + const float softmax_scale, + const bool is_causal); diff --git a/parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/blocked_flash/blocked_flash.py b/parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/blocked_flash/blocked_flash.py new file mode 100644 index 0000000000000000000000000000000000000000..54d465698b4edb5f9eddf66f711d2eba8f24b522 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/blocked_flash/blocked_flash.py @@ -0,0 +1,107 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import torch + +from deepspeed.accelerator import get_accelerator +from ....inference_utils import DtypeEnum +from deepspeed.ops.op_builder import RaggedOpsBuilder + +from ... import DSKernelBase + + +def get_q_block_size(head_size: int) -> int: + """ + Returns the query block size required by the kernel given a head size. + """ + cc_major, cc_minor = torch.cuda.get_device_capability(get_accelerator().current_device()) #ignore-cuda + + if cc_major < 8: + raise RuntimeError("Blocked attention requires CUDA compute capability >= 8.0") + + if head_size <= 64: + return 128 + elif head_size <= 160: + if cc_minor != 0: + return 64 + else: + return 128 + elif head_size == 192: + return 128 + elif head_size == 224: + if cc_minor != 0: + return 64 + else: + return 128 + else: + if cc_major == 8 and cc_minor == 0: + return 128 + else: + return 64 + + +def get_kv_block_size(head_size: int) -> int: + """ + Return preferred granulatity for blocked KV-cache implementation. + """ + cc_major, cc_minor = torch.cuda.get_device_capability(get_accelerator().current_device()) #ignore-cuda + + if cc_major < 8: + raise RuntimeError("Blocked attention requires CUDA compute capability >= 8.0") + + if head_size <= 64: + return 128 + elif head_size != 160 or cc_minor != 0: + return 64 + else: + return 32 + + +class BlockedFlashAttn(DSKernelBase): + """ + Modified implementation of flash-attn-2 tuned for inference on blocked KV-cache and wider + range of input sequence lengths. + """ + + supported_dtypes = [DtypeEnum.fp16, DtypeEnum.bf16] + + def __init__(self, head_size: int, dtype: DtypeEnum) -> None: + """ + Triggers any compilation of the kernels. + """ + if not isinstance(dtype, DtypeEnum): + dtype = DtypeEnum(dtype) + + if dtype not in BlockedFlashAttn.supported_dtypes: + raise ValueError("Unsupported data type: {}, supported data types are {}".format( + dtype, BlockedFlashAttn.supported_dtypes)) + + # For testing, need to revert to 32 + if head_size % 16 != 0: + raise ValueError("Head size must be divisible by 32 (configured with {})".format(head_size)) + + inf_module = RaggedOpsBuilder().load() + self.kernel = inf_module.flash_attn_by_atoms + + def __call__(self, out: torch.Tensor, q: torch.Tensor, k: torch.Tensor, v: torch.Tensor, atoms: torch.Tensor, + softmax_scale: float) -> torch.Tensor: + """ + Flash attention implementation atop a blocked KV-cache. Atoms should be pre-populated. + See attention_atom.h for further details on the structure of the information. + + Arguments: + out (torch.Tensor): Output tensor of shape [tokens, hidden_size] + q (torch.Tensor): Query tensor of shape [tokens, hidden_size] + k (torch.Tensor): Key cache tensor of shape [n_blocks, block_size, n_heads_kv, head_size]. This Tensor only needs to be contiguous on the final dimension. + v (torch.Tensor): Value cache tensor of shape [n_blocks, block_size, n_heads_kv, head_size]. This Tensor only needs to be contiguous on the final dimension. + atoms (torch.Tensor): Atom information tensor of shape [num_atoms, 8] and type int32. + Not all data is readable in this format. See attention_atom.h for further details. + softmax_scale (float): Softmax scale factor. + + Returns: + out (torch.Tensor): Output tensor of shape [tokens, hidden_size] + """ + self.kernel(out, q, k, v, atoms, softmax_scale, True) + return out diff --git a/parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/embed/__pycache__/__init__.cpython-310.pyc b/parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/embed/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c2fe4397f7d272e8f67632d26ba22e63ffa9d2f6 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/embed/__pycache__/__init__.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/embed/__pycache__/embed.cpython-310.pyc b/parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/embed/__pycache__/embed.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..656dc783c22b3a92a45aeea4db02260c156e6a41 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/embed/__pycache__/embed.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/embed/embed.cpp b/parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/embed/embed.cpp new file mode 100644 index 0000000000000000000000000000000000000000..04b72bf948db6b2e86c74d19264def509ed26c70 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/embed/embed.cpp @@ -0,0 +1,101 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#include "embed.h" +#include "ragged_kernel_helpers.h" + +#ifdef BF16_AVAILABLE +#define DISPATCH_FOR_FLOAT(DTYPE, ...) \ + [&] { \ + if (DTYPE == torch::kFloat32) { \ + using float_t = float; \ + return __VA_ARGS__(); \ + } else if (DTYPE == torch::kFloat16) { \ + using float_t = __half; \ + return __VA_ARGS__(); \ + } else if (DTYPE == torch::kBFloat16) { \ + using float_t = __nv_bfloat16; \ + return __VA_ARGS__(); \ + } else { \ + TORCH_CHECK(false, "Unsupported dispatch type"); \ + } \ + }() +#else +#define DISPATCH_FOR_FLOAT(DTYPE, ...) \ + [&] { \ + if (DTYPE == torch::kFloat32) { \ + using float_t = float; \ + return __VA_ARGS__(); \ + } else if (DTYPE == torch::kFloat16) { \ + using float_t = __half; \ + return __VA_ARGS__(); \ + } else { \ + TORCH_CHECK(false, "Unsupported dispatch type"); \ + } \ + }() +#endif + +#define DISPATCH_FOR_INT(DTYPE, ...) \ + [&] { \ + if (DTYPE == torch::kInt32) { \ + using int_t = int32_t; \ + return __VA_ARGS__(); \ + } else if (DTYPE == torch::kInt64) { \ + using int_t = int64_t; \ + return __VA_ARGS__(); \ + } else { \ + TORCH_CHECK(false, "Unsupported dispatch type"); \ + } \ + }() + +/* +Embeddings kernel aware of ragged batch structure. +*/ +void ragged_embed(torch::Tensor& embedded_tokens, + torch::Tensor& input_ids, + torch::Tensor& embedding_weight, + c10::optional& position_embedding_weight, + int32_t pos_embed_offset, + torch::Tensor& batch_metadata, + torch::Tensor& seq_metadata, + torch::Tensor& tokens_to_seq, + torch::Tensor& kv_ptrs) +{ + // We don't care about KV cache here, so just hardcoding 0s for block_size/num_blocks + BatchWrapperCPP batch_wrapper = + make_cpp_batch_wrapper(batch_metadata, seq_metadata, tokens_to_seq, kv_ptrs, 0, 0); + + const int32_t n_tokens = input_ids.numel(); + const int32_t embed_dim = embedding_weight.size(1); + const int32_t vocab_size = embedding_weight.size(0); + + DISPATCH_FOR_INT(input_ids.scalar_type(), [&] { + DISPATCH_FOR_FLOAT(embedding_weight.scalar_type(), [&] { + float_t* pos_embed_ptr = nullptr; + int32_t max_position_embed_idx = 0; + if (position_embedding_weight.has_value()) { + TORCH_CHECK( + position_embedding_weight.value().options().dtype() == + embedding_weight.options().dtype(), + "position_embedding_weight and embedding_weight must have the same dtype"); + pos_embed_ptr = + reinterpret_cast(position_embedding_weight.value().data_ptr()); + max_position_embed_idx = position_embedding_weight.value().size(0) - 1; + } + + launch_ragged_embed_kernel((float_t*)embedded_tokens.data_ptr(), + (const int_t*)input_ids.data_ptr(), + (const float_t*)embedding_weight.data_ptr(), + pos_embed_ptr, + batch_wrapper, + n_tokens, + embed_dim, + vocab_size, + max_position_embed_idx, + pos_embed_offset, + at::cuda::getCurrentCUDAStream()); + }); + }); +} diff --git a/parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/embed/embed.h b/parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/embed/embed.h new file mode 100644 index 0000000000000000000000000000000000000000..7897c1362669695133cf051c7a3dc319ed2a0783 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/embed/embed.h @@ -0,0 +1,23 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#pragma once + +#include +#include +#include "embed.cuh" + +/* +Embeddings kernel aware of ragged batch structure. +*/ +void ragged_embed(torch::Tensor& embedded_tokens, + torch::Tensor& input_ids, + torch::Tensor& embedding_weight, + c10::optional& position_weight, + int32_t position_embed_offset, + torch::Tensor& batch_metadata, + torch::Tensor& seq_metadata, + torch::Tensor& tokens_to_seq, + torch::Tensor& kv_ptrs); diff --git a/parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/embed/embed.py b/parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/embed/embed.py new file mode 100644 index 0000000000000000000000000000000000000000..0443ce3fdd8eee3787b16c3baa1604f3f63d211b --- /dev/null +++ b/parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/embed/embed.py @@ -0,0 +1,67 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from typing import Optional + +import torch + +from ... import DSKernelBase +from deepspeed.ops.op_builder import RaggedOpsBuilder +from ....inference_utils import elem_size +from ....ragged import RaggedBatchWrapper + + +class RaggedEmbeddingKernel(DSKernelBase): + """ + Ragged-aware CUDA kernel implementation for an embedding lookup. This will only lookup + the necessary tokens for a padded batch (i.e. if we are CGed and running with a slightly + larger batch size than the actual tokens). + """ + + supported_dtypes = [torch.float16, torch.bfloat16, torch.float32] + supported_token_dtypes = [torch.int32, torch.int64] + + def __init__(self, embed_dtype: torch.dtype, token_dtype: torch.dtype, embed_dim: int) -> None: + """ + Args: + fp_dtype (torch.dtype): Data type of the embedding table and output dtype. + Supported values are torch.float16, torch.bfloat16, and torch.float32. + token_dtype (torch.dtype): Data type of the token ids. Supported values are + torch.int32 and torch.int64. + embed_dim (int): Embedding dimension. Must be aligned to 16 bytes. + """ + if embed_dtype not in RaggedEmbeddingKernel.supported_dtypes: + raise ValueError("Unsupported embedding data type: {}, supported_dtypes are {}".format( + embed_dtype, RaggedEmbeddingKernel.supported_dtypes)) + + if token_dtype not in RaggedEmbeddingKernel.supported_token_dtypes: + raise ValueError("Unsupported token data type: {}, supported_dtypes are {}".format( + token_dtype, RaggedEmbeddingKernel.supported_token_dtypes)) + + if elem_size(embed_dtype) * embed_dim % 16 != 0: + raise ValueError("Embedding dimension must be aligned to 16 bytes, got {}".format(embed_dim)) + + inf_module = RaggedOpsBuilder().load() + self.kernel = inf_module.ragged_embed + + def __call__(self, + embedded_tokens: torch.Tensor, + ragged_wrapper: RaggedBatchWrapper, + embedding_weight: torch.Tensor, + position_embed_weight: Optional[torch.Tensor] = None, + position_embed_offset: int = 0) -> torch.Tensor: + """ + Ragged aware embedding lookup. + + Args: + embedded_tokens (torch.Tensor): Output tensor of shape [num_tokens, embed_dim] + ragged_wrapper (RaggedBatchWrapper): Wrapper for the ragged batch. + embedding_weight (torch.Tensor): Embedding table of shape [vocab_size, embed_dim] + """ + self.kernel(embedded_tokens, ragged_wrapper.input_ids(), + embedding_weight, position_embed_weight, position_embed_offset, + ragged_wrapper.batch_metadata_buffer(), ragged_wrapper.inflight_seq_descriptors(), + ragged_wrapper.tokens_to_seq(), ragged_wrapper.kv_ptrs()) + return embedded_tokens diff --git a/parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/embed/embed_cuda.cu b/parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/embed/embed_cuda.cu new file mode 100644 index 0000000000000000000000000000000000000000..81d6d534ddf5b4c9415786ec22df17c834fb8a37 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/embed/embed_cuda.cu @@ -0,0 +1,137 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#include "ds_kernel_utils.h" +#include "embed.cuh" +#include "memory_access_utils.h" +#include "ragged_dtypes.h" + +namespace embed { + +constexpr int granularity = 16; +constexpr int threads = 512; + +} // namespace embed + +template +__global__ void ragged_embed_kernel(EmbedType* embedded_tokens, + const TokenType* input_ids, + const EmbedType* embedding_weight, + const EmbedType* position_weight, + const BatchWrapperCPP batch_desc, + const int32_t embed_dim, + const int32_t vocab_size, + const int32_t max_position_embed_idx, + const int32_t position_embed_offset) +{ + constexpr int T_vector = embed::granularity / sizeof(EmbedType); + + const int32_t token_idx = blockIdx.y; + + // It's possible our batch is padded (under CG conditions typically) + if (token_idx >= batch_desc.batch_metadata->n_tokens) return; + + TokenType token_value = input_ids[token_idx]; + + if (token_value >= vocab_size || token_value < 0) { + // TODO(cmikeh2): This is invalid, but not sure how we want to handle it being invalid + // yet. + return; + } + + const EmbedType* embedding_row = embedding_weight + token_value * embed_dim; + EmbedType* dest_row = embedded_tokens + token_idx * embed_dim; + + const int channel_offset = (threadIdx.x + embed::threads * blockIdx.x) * T_vector; + + if (channel_offset < embed_dim) { + EmbedType reg_buf[T_vector]; + + mem_access::load_global(reg_buf, embedding_row + channel_offset); + + if (position_weight != nullptr) { + // Map the token to its global idx (indirect memory accesses aren't great but whatever) + const int32_t seq_idx = batch_desc.tokens_to_seq[token_idx]; + const InflightSeqDescriptor seq_desc = batch_desc.seq_metadata[seq_idx]; + int32_t pos_emb_idx = seq_desc.seen_tokens + (token_idx - seq_desc.start_idx); + + // Position embed offset is an OPT-specific feature I think? + pos_emb_idx = pos_emb_idx + position_embed_offset; + + // This clamping is technically + pos_emb_idx = (pos_emb_idx < 0) ? 0 : pos_emb_idx; + pos_emb_idx = (pos_emb_idx >= max_position_embed_idx) ? max_position_embed_idx + : pos_emb_idx; + + const EmbedType* position_embedding_row = position_weight + pos_emb_idx * embed_dim; + + EmbedType pos_buf[T_vector]; + mem_access::load_global(pos_buf, + position_embedding_row + channel_offset); + +#pragma unroll + for (int i = 0; i < T_vector; i++) { reg_buf[i] += pos_buf[i]; } + } + + mem_access::store_global(dest_row + channel_offset, reg_buf); + } +} + +template +void launch_ragged_embed_kernel(EmbedType* embedded_tokens, + const TokenType* input_ids, + const EmbedType* embedding_weight, + const EmbedType* position_weight, + const BatchWrapperCPP batch_desc, + const int32_t n_tokens, + const int32_t embed_dim, + const int32_t vocab_size, + const int32_t max_position_embed_idx, + const int32_t position_embed_offset, + cudaStream_t stream) +{ + constexpr int T_vector = embed::granularity / sizeof(EmbedType); + constexpr int elems_per_block = embed::threads * T_vector; + const int parallel_blocks = (embed_dim + elems_per_block - 1) / elems_per_block; + + const dim3 grid_dim(parallel_blocks, n_tokens, 1); + const dim3 block_dim(embed::threads, 1, 1); + + ragged_embed_kernel + <<>>(embedded_tokens, + input_ids, + embedding_weight, + position_weight, + batch_desc, + embed_dim, + vocab_size, + max_position_embed_idx, + position_embed_offset); +} + +#define INSTANTIATE_EMBED_FOR_TYPES(TOKEN_TYPE, EMBED_TYPE) \ + template void launch_ragged_embed_kernel( \ + EMBED_TYPE * embedded_tokens, \ + const TOKEN_TYPE* input_ids, \ + const EMBED_TYPE* embedding_weight, \ + const EMBED_TYPE* position_weight, \ + const BatchWrapperCPP batch_descriptor, \ + const int32_t n_tokens, \ + const int32_t embed_dim, \ + const int32_t vocab_size, \ + const int32_t max_position_embed_idx, \ + const int32_t position_embed_offset, \ + cudaStream_t stream); + +INSTANTIATE_EMBED_FOR_TYPES(int32_t, float) +INSTANTIATE_EMBED_FOR_TYPES(int64_t, float) + +INSTANTIATE_EMBED_FOR_TYPES(int32_t, __half) +INSTANTIATE_EMBED_FOR_TYPES(int64_t, __half) + +#ifdef BF16_AVAILABLE +INSTANTIATE_EMBED_FOR_TYPES(int32_t, __nv_bfloat16) +INSTANTIATE_EMBED_FOR_TYPES(int64_t, __nv_bfloat16) +#endif diff --git a/parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/linear_blocked_kv_rotary/__pycache__/__init__.cpython-310.pyc b/parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/linear_blocked_kv_rotary/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..15277dab06aee40a34be7c44b088bf408398d6d7 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/linear_blocked_kv_rotary/__pycache__/__init__.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/top_k_gating/top_k_gating.py b/parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/top_k_gating/top_k_gating.py new file mode 100644 index 0000000000000000000000000000000000000000..72ba2b6019bb9e9f1ce58bd7ae36671a07a5bda2 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/top_k_gating/top_k_gating.py @@ -0,0 +1,59 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import torch + +from typing import Tuple + +from ... import DSKernelBase +from ....inference_utils import DtypeEnum +from ....ragged import RaggedBatchWrapper +from deepspeed.ops.op_builder import RaggedOpsBuilder + + +class RaggedTopKGating(DSKernelBase): + """ + CUDA implementation of top-1 gating. This will perform a softmax on the logits, + and return the scale as well as its idx within that expert's allocation. + """ + + supported_logit_dtypes = [DtypeEnum.fp16, DtypeEnum.bf16, DtypeEnum.fp32] + + def __init__(self, logit_dtype: DtypeEnum) -> None: + + if not isinstance(logit_dtype, DtypeEnum): + logit_dtype = DtypeEnum(logit_dtype) + + if logit_dtype not in RaggedTopKGating.supported_logit_dtypes: + raise RuntimeError(f"Unsupported logit dtype {logit_dtype}") + + inf_module = RaggedOpsBuilder().load() + self.kernel = inf_module.top_k_gating + + def __call__(self, expert_counts: torch.Tensor, scores: torch.Tensor, assignments: torch.Tensor, + offsets: torch.Tensor, logits: torch.Tensor, + batch: RaggedBatchWrapper) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: + """ + Perform the ragged top_k_gating. + + Arguments: + expert_counts (torch.Tensor): Tensor of 0s of shape [n_experts] to be filled with + number of tokens assigned to each expert. This must be filled with 0s else + the copy kernel will buffer overflow. In order to minimize the zero-fill cost, + it is recommended to write to 0 during the MoE output remapping. + scores (torch.Tensor): Preallocated output of shape [n_tokens, n_top_k] to place expert scaling + value. + expert_assignment (torch.Tensor): Preallocated output of shape [n_tokens, n_top_k] to place + which expert a token has been assigned to. + expert_offset (torch.Tensor): Preallocated output of shape [n_tokens, n_top_k] to place which + offset within an experts group a token is. + logits (torch.Tensor): Raw logits of gating function. + batch (RaggedBatchWrapper): Batch information for ragged tensor. + + Returns: + tuple of (expert_counts, scores, expert_assignment, expert_offset) + """ + self.kernel(expert_counts, scores, assignments, offsets, logits, batch.batch_metadata_buffer()) + return expert_counts, scores, assignments, offsets diff --git a/vlmpy310/lib/python3.10/site-packages/skimage/feature/__pycache__/_orb_descriptor_positions.cpython-310.pyc b/vlmpy310/lib/python3.10/site-packages/skimage/feature/__pycache__/_orb_descriptor_positions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..453868d77ecf8e703bbbc74cc81e943fad489918 Binary files /dev/null and b/vlmpy310/lib/python3.10/site-packages/skimage/feature/__pycache__/_orb_descriptor_positions.cpython-310.pyc differ diff --git a/vlmpy310/lib/python3.10/site-packages/skimage/feature/__pycache__/blob.cpython-310.pyc b/vlmpy310/lib/python3.10/site-packages/skimage/feature/__pycache__/blob.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b786a1507cb804a6a9aa97a5d5a773c2b2f2f9a3 Binary files /dev/null and b/vlmpy310/lib/python3.10/site-packages/skimage/feature/__pycache__/blob.cpython-310.pyc differ diff --git a/vlmpy310/lib/python3.10/site-packages/skimage/feature/__pycache__/haar.cpython-310.pyc b/vlmpy310/lib/python3.10/site-packages/skimage/feature/__pycache__/haar.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2eeb7e9fb85f29861fbbae7563c104782e5a8e98 Binary files /dev/null and b/vlmpy310/lib/python3.10/site-packages/skimage/feature/__pycache__/haar.cpython-310.pyc differ diff --git a/vlmpy310/lib/python3.10/site-packages/skimage/feature/__pycache__/match.cpython-310.pyc b/vlmpy310/lib/python3.10/site-packages/skimage/feature/__pycache__/match.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c51467adbd9a7e7bf497f8e140f0de99392a4498 Binary files /dev/null and b/vlmpy310/lib/python3.10/site-packages/skimage/feature/__pycache__/match.cpython-310.pyc differ diff --git a/vlmpy310/lib/python3.10/site-packages/skimage/feature/__pycache__/sift.cpython-310.pyc b/vlmpy310/lib/python3.10/site-packages/skimage/feature/__pycache__/sift.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f77ff2361c476c56050a529354e1e8fa40cfdb7d Binary files /dev/null and b/vlmpy310/lib/python3.10/site-packages/skimage/feature/__pycache__/sift.cpython-310.pyc differ diff --git a/vlmpy310/lib/python3.10/site-packages/skimage/feature/__pycache__/texture.cpython-310.pyc b/vlmpy310/lib/python3.10/site-packages/skimage/feature/__pycache__/texture.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cfb2c3d66f758c9ef533f193f43e972d610f85af Binary files /dev/null and b/vlmpy310/lib/python3.10/site-packages/skimage/feature/__pycache__/texture.cpython-310.pyc differ diff --git a/vlmpy310/lib/python3.10/site-packages/skimage/feature/tests/__pycache__/__init__.cpython-310.pyc b/vlmpy310/lib/python3.10/site-packages/skimage/feature/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..204714892ffd29e509807708a1ceddb74a3b4f9b Binary files /dev/null and b/vlmpy310/lib/python3.10/site-packages/skimage/feature/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/vlmpy310/lib/python3.10/site-packages/skimage/feature/tests/__pycache__/test_sift.cpython-310.pyc b/vlmpy310/lib/python3.10/site-packages/skimage/feature/tests/__pycache__/test_sift.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5bc7502c15854fec1b1cd5a29f503ad2fdf260fa Binary files /dev/null and b/vlmpy310/lib/python3.10/site-packages/skimage/feature/tests/__pycache__/test_sift.cpython-310.pyc differ diff --git a/vlmpy310/lib/python3.10/site-packages/skimage/feature/tests/__pycache__/test_util.cpython-310.pyc b/vlmpy310/lib/python3.10/site-packages/skimage/feature/tests/__pycache__/test_util.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..be569c7bdd7574822b06616f63fcd0a972ea0b2d Binary files /dev/null and b/vlmpy310/lib/python3.10/site-packages/skimage/feature/tests/__pycache__/test_util.cpython-310.pyc differ