Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes. See raw diff
- evalkit_cambrian/lib/python3.10/site-packages/dotenv/__main__.py +6 -0
- evalkit_cambrian/lib/python3.10/site-packages/fonttools-4.55.3.dist-info/METADATA +0 -0
- evalkit_cambrian/lib/python3.10/site-packages/fonttools-4.55.3.dist-info/top_level.txt +1 -0
- evalkit_cambrian/lib/python3.10/site-packages/functorch/__init__.py +38 -0
- evalkit_cambrian/lib/python3.10/site-packages/functorch/__pycache__/__init__.cpython-310.pyc +0 -0
- evalkit_cambrian/lib/python3.10/site-packages/functorch/_src/__init__.py +0 -0
- evalkit_cambrian/lib/python3.10/site-packages/functorch/_src/aot_autograd/__init__.py +8 -0
- evalkit_cambrian/lib/python3.10/site-packages/functorch/_src/aot_autograd/__pycache__/__init__.cpython-310.pyc +0 -0
- evalkit_cambrian/lib/python3.10/site-packages/functorch/_src/eager_transforms/__init__.py +7 -0
- evalkit_cambrian/lib/python3.10/site-packages/functorch/_src/eager_transforms/__pycache__/__init__.cpython-310.pyc +0 -0
- evalkit_cambrian/lib/python3.10/site-packages/functorch/_src/make_functional/__pycache__/__init__.cpython-310.pyc +0 -0
- evalkit_cambrian/lib/python3.10/site-packages/functorch/_src/vmap/__init__.py +16 -0
- evalkit_cambrian/lib/python3.10/site-packages/functorch/_src/vmap/__pycache__/__init__.cpython-310.pyc +0 -0
- evalkit_cambrian/lib/python3.10/site-packages/functorch/compile/__pycache__/__init__.cpython-310.pyc +0 -0
- evalkit_cambrian/lib/python3.10/site-packages/functorch/dim/__pycache__/batch_tensor.cpython-310.pyc +0 -0
- evalkit_cambrian/lib/python3.10/site-packages/functorch/dim/__pycache__/dim.cpython-310.pyc +0 -0
- evalkit_cambrian/lib/python3.10/site-packages/functorch/dim/__pycache__/magic_trace.cpython-310.pyc +0 -0
- evalkit_cambrian/lib/python3.10/site-packages/functorch/dim/batch_tensor.py +25 -0
- evalkit_cambrian/lib/python3.10/site-packages/functorch/dim/delayed_mul_tensor.py +77 -0
- evalkit_cambrian/lib/python3.10/site-packages/functorch/dim/dim.py +110 -0
- evalkit_cambrian/lib/python3.10/site-packages/functorch/dim/magic_trace.py +42 -0
- evalkit_cambrian/lib/python3.10/site-packages/functorch/dim/op_properties.py +311 -0
- evalkit_cambrian/lib/python3.10/site-packages/functorch/dim/tree_map.py +14 -0
- evalkit_cambrian/lib/python3.10/site-packages/functorch/einops/__init__.py +3 -0
- evalkit_cambrian/lib/python3.10/site-packages/functorch/einops/__pycache__/__init__.cpython-310.pyc +0 -0
- evalkit_cambrian/lib/python3.10/site-packages/functorch/einops/__pycache__/_parsing.cpython-310.pyc +0 -0
- evalkit_cambrian/lib/python3.10/site-packages/functorch/einops/__pycache__/rearrange.cpython-310.pyc +0 -0
- evalkit_cambrian/lib/python3.10/site-packages/functorch/einops/_parsing.py +302 -0
- evalkit_cambrian/lib/python3.10/site-packages/functorch/einops/rearrange.py +207 -0
- evalkit_cambrian/lib/python3.10/site-packages/functorch/experimental/__pycache__/__init__.cpython-310.pyc +0 -0
- evalkit_cambrian/lib/python3.10/site-packages/propcache/__pycache__/_helpers_py.cpython-310.pyc +0 -0
- evalkit_cambrian/lib/python3.10/site-packages/propcache/__pycache__/api.cpython-310.pyc +0 -0
- evalkit_cambrian/lib/python3.10/site-packages/propcache/api.py +8 -0
- evalkit_cambrian/lib/python3.10/site-packages/starlette/__pycache__/status.cpython-310.pyc +0 -0
- evalkit_cambrian/lib/python3.10/site-packages/starlette/__pycache__/templating.cpython-310.pyc +0 -0
- infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_batch_norm_no_update.h +39 -0
- infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_cufft_get_plan_cache_size_ops.h +28 -0
- infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_sub_native.h +35 -0
- infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_functional_sym_constrain_range_native.h +21 -0
- infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_fused_sgd_compositeexplicitautograd_dispatch.h +28 -0
- infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_reshape_alias_copy.h +91 -0
- infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_softmax_ops.h +39 -0
- infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_stack_cpu_dispatch.h +25 -0
- infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_nearest_exact2d_backward_cpu_dispatch.h +28 -0
- infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_weight_norm_interface_ops.h +39 -0
- infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/adjoint_compositeimplicitautograd_dispatch.h +23 -0
- infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/atleast_3d_ops.h +39 -0
- infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/avg_pool2d_meta.h +114 -0
- infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/bitwise_and_cuda_dispatch.h +26 -0
- infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/channel_shuffle_cuda_dispatch.h +24 -0
evalkit_cambrian/lib/python3.10/site-packages/dotenv/__main__.py
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Entry point for cli, enables execution with `python -m dotenv`"""
|
| 2 |
+
|
| 3 |
+
from .cli import cli
|
| 4 |
+
|
| 5 |
+
if __name__ == "__main__":
|
| 6 |
+
cli()
|
evalkit_cambrian/lib/python3.10/site-packages/fonttools-4.55.3.dist-info/METADATA
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
evalkit_cambrian/lib/python3.10/site-packages/fonttools-4.55.3.dist-info/top_level.txt
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
fontTools
|
evalkit_cambrian/lib/python3.10/site-packages/functorch/__init__.py
ADDED
|
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Facebook, Inc. and its affiliates.
|
| 2 |
+
# All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# This source code is licensed under the BSD-style license found in the
|
| 5 |
+
# LICENSE file in the root directory of this source tree.
|
| 6 |
+
import torch
|
| 7 |
+
|
| 8 |
+
from torch._functorch.deprecated import (
|
| 9 |
+
combine_state_for_ensemble,
|
| 10 |
+
functionalize,
|
| 11 |
+
grad,
|
| 12 |
+
grad_and_value,
|
| 13 |
+
hessian,
|
| 14 |
+
jacfwd,
|
| 15 |
+
jacrev,
|
| 16 |
+
jvp,
|
| 17 |
+
make_functional,
|
| 18 |
+
make_functional_with_buffers,
|
| 19 |
+
vjp,
|
| 20 |
+
vmap,
|
| 21 |
+
)
|
| 22 |
+
|
| 23 |
+
# utilities. Maybe these should go in their own namespace in the future?
|
| 24 |
+
from torch._functorch.make_functional import (
|
| 25 |
+
FunctionalModule,
|
| 26 |
+
FunctionalModuleWithBuffers,
|
| 27 |
+
)
|
| 28 |
+
|
| 29 |
+
# Top-level APIs. Please think carefully before adding something to the
|
| 30 |
+
# top-level namespace:
|
| 31 |
+
# - private helper functions should go into torch._functorch
|
| 32 |
+
# - very experimental things should go into functorch.experimental
|
| 33 |
+
# - compilation related things should go into functorch.compile
|
| 34 |
+
|
| 35 |
+
# Was never documented
|
| 36 |
+
from torch._functorch.python_key import make_fx
|
| 37 |
+
|
| 38 |
+
__version__ = torch.__version__
|
evalkit_cambrian/lib/python3.10/site-packages/functorch/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (681 Bytes). View file
|
|
|
evalkit_cambrian/lib/python3.10/site-packages/functorch/_src/__init__.py
ADDED
|
File without changes
|
evalkit_cambrian/lib/python3.10/site-packages/functorch/_src/aot_autograd/__init__.py
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# This file has moved to under torch/_functorch. It is not public API.
|
| 2 |
+
# If you are not a PyTorch developer and you are relying on the following
|
| 3 |
+
# imports, please file an issue.
|
| 4 |
+
from torch._functorch.aot_autograd import (
|
| 5 |
+
aot_autograd_decompositions,
|
| 6 |
+
KNOWN_TYPES,
|
| 7 |
+
PytreeThunk,
|
| 8 |
+
)
|
evalkit_cambrian/lib/python3.10/site-packages/functorch/_src/aot_autograd/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (314 Bytes). View file
|
|
|
evalkit_cambrian/lib/python3.10/site-packages/functorch/_src/eager_transforms/__init__.py
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# This file has moved to under torch/_functorch. It is not public API.
|
| 2 |
+
# If you are not a PyTorch developer and you are relying on the following
|
| 3 |
+
# imports, please file an issue.
|
| 4 |
+
from torch._functorch.eager_transforms import (
|
| 5 |
+
_assert_wrapped_functional,
|
| 6 |
+
_unwrap_functional_tensor,
|
| 7 |
+
)
|
evalkit_cambrian/lib/python3.10/site-packages/functorch/_src/eager_transforms/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (313 Bytes). View file
|
|
|
evalkit_cambrian/lib/python3.10/site-packages/functorch/_src/make_functional/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (260 Bytes). View file
|
|
|
evalkit_cambrian/lib/python3.10/site-packages/functorch/_src/vmap/__init__.py
ADDED
|
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# This file has moved to under torch/_functorch. It is not public API.
|
| 2 |
+
# If you are not a PyTorch developer and you are relying on the following
|
| 3 |
+
# imports, please file an issue.
|
| 4 |
+
from torch._functorch.vmap import (
|
| 5 |
+
_add_batch_dim,
|
| 6 |
+
_broadcast_to_and_flatten,
|
| 7 |
+
_create_batched_inputs,
|
| 8 |
+
_get_name,
|
| 9 |
+
_process_batched_inputs,
|
| 10 |
+
_remove_batch_dim,
|
| 11 |
+
_unwrap_batched,
|
| 12 |
+
_validate_and_get_batch_size,
|
| 13 |
+
Tensor,
|
| 14 |
+
tree_flatten,
|
| 15 |
+
tree_unflatten,
|
| 16 |
+
)
|
evalkit_cambrian/lib/python3.10/site-packages/functorch/_src/vmap/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (522 Bytes). View file
|
|
|
evalkit_cambrian/lib/python3.10/site-packages/functorch/compile/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (1.09 kB). View file
|
|
|
evalkit_cambrian/lib/python3.10/site-packages/functorch/dim/__pycache__/batch_tensor.cpython-310.pyc
ADDED
|
Binary file (783 Bytes). View file
|
|
|
evalkit_cambrian/lib/python3.10/site-packages/functorch/dim/__pycache__/dim.cpython-310.pyc
ADDED
|
Binary file (3.95 kB). View file
|
|
|
evalkit_cambrian/lib/python3.10/site-packages/functorch/dim/__pycache__/magic_trace.cpython-310.pyc
ADDED
|
Binary file (1.2 kB). View file
|
|
|
evalkit_cambrian/lib/python3.10/site-packages/functorch/dim/batch_tensor.py
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Facebook, Inc. and its affiliates.
|
| 2 |
+
# All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# This source code is licensed under the BSD-style license found in the
|
| 5 |
+
# LICENSE file in the root directory of this source tree.
|
| 6 |
+
from contextlib import contextmanager
|
| 7 |
+
|
| 8 |
+
from torch._C._functorch import _vmap_add_layers, _vmap_remove_layers
|
| 9 |
+
|
| 10 |
+
_enabled = False
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
@contextmanager
|
| 14 |
+
def _enable_layers(dims):
|
| 15 |
+
global _enabled
|
| 16 |
+
assert not _enabled
|
| 17 |
+
input = sorted((d._level, d.size) for d in dims if not isinstance(d, int))
|
| 18 |
+
n = len(input)
|
| 19 |
+
try:
|
| 20 |
+
_vmap_add_layers(input)
|
| 21 |
+
_enabled = True
|
| 22 |
+
yield
|
| 23 |
+
finally:
|
| 24 |
+
_enabled = False
|
| 25 |
+
_vmap_remove_layers(n)
|
evalkit_cambrian/lib/python3.10/site-packages/functorch/dim/delayed_mul_tensor.py
ADDED
|
@@ -0,0 +1,77 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Facebook, Inc. and its affiliates.
|
| 2 |
+
# All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# This source code is licensed under the BSD-style license found in the
|
| 5 |
+
# LICENSE file in the root directory of this source tree.
|
| 6 |
+
import torch
|
| 7 |
+
|
| 8 |
+
from . import _Tensor, Tensor
|
| 9 |
+
from .reference import _dims, _enable_layers, llist, ltuple
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
class DelayedMulTensor(_Tensor):
|
| 13 |
+
def __init__(self, lhs, rhs):
|
| 14 |
+
self._lhs, self._rhs = lhs, rhs
|
| 15 |
+
self._data = None
|
| 16 |
+
self._levels_data = None
|
| 17 |
+
self._has_device = lhs._has_device or rhs._has_device
|
| 18 |
+
self._batchtensor_data = None
|
| 19 |
+
self._tensor_data = None
|
| 20 |
+
|
| 21 |
+
@property
|
| 22 |
+
def _levels(self):
|
| 23 |
+
if self._levels_data is None:
|
| 24 |
+
levels = llist(self._lhs._levels)
|
| 25 |
+
for l in self._rhs._levels:
|
| 26 |
+
if l not in levels:
|
| 27 |
+
levels.append(l)
|
| 28 |
+
self._levels_data = ltuple(levels)
|
| 29 |
+
return self._levels_data
|
| 30 |
+
|
| 31 |
+
@property
|
| 32 |
+
def _batchtensor(self):
|
| 33 |
+
if self._batchtensor_data is None:
|
| 34 |
+
with _enable_layers(self._levels):
|
| 35 |
+
print("bt multiply fallback")
|
| 36 |
+
self._batchtensor_data = self._lhs._batchtensor * self._rhs._batchtensor
|
| 37 |
+
return self._batchtensor_data
|
| 38 |
+
|
| 39 |
+
@property
|
| 40 |
+
def _tensor(self):
|
| 41 |
+
if self._tensor_data is None:
|
| 42 |
+
self._tensor_data = Tensor.from_batched(
|
| 43 |
+
self._batchtensor, self._has_device
|
| 44 |
+
)._tensor
|
| 45 |
+
return self._tensor_data
|
| 46 |
+
|
| 47 |
+
@property
|
| 48 |
+
def ndim(self):
|
| 49 |
+
return self._batchtensor.ndim
|
| 50 |
+
|
| 51 |
+
@property
|
| 52 |
+
def dims(self):
|
| 53 |
+
return ltuple(super().dims)
|
| 54 |
+
|
| 55 |
+
def sum(self, dim):
|
| 56 |
+
dims = _dims(dim, 0, False, False)
|
| 57 |
+
n = ord("a")
|
| 58 |
+
all_levels = self._levels
|
| 59 |
+
|
| 60 |
+
def to_char(d):
|
| 61 |
+
return chr(n + all_levels.index(d))
|
| 62 |
+
|
| 63 |
+
plhs, levelslhs = self._lhs._tensor, self._lhs._levels
|
| 64 |
+
prhs, levelsrhs = self._rhs._tensor, self._rhs._levels
|
| 65 |
+
new_dims = tuple(d for d in self.dims if d not in dims)
|
| 66 |
+
new_levels = [l for l in self._levels if l not in dims]
|
| 67 |
+
fmt = "".join(
|
| 68 |
+
[
|
| 69 |
+
*(to_char(d) for d in levelslhs),
|
| 70 |
+
",",
|
| 71 |
+
*(to_char(d) for d in levelsrhs),
|
| 72 |
+
"->",
|
| 73 |
+
*(to_char(d) for d in new_levels),
|
| 74 |
+
]
|
| 75 |
+
)
|
| 76 |
+
result_data = torch.einsum(fmt, (plhs, prhs))
|
| 77 |
+
return Tensor.from_positional(result_data, new_levels, True)
|
evalkit_cambrian/lib/python3.10/site-packages/functorch/dim/dim.py
ADDED
|
@@ -0,0 +1,110 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Facebook, Inc. and its affiliates.
|
| 2 |
+
# All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# This source code is licensed under the BSD-style license found in the
|
| 5 |
+
# LICENSE file in the root directory of this source tree.
|
| 6 |
+
_vmap_levels = []
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
@dataclass
|
| 10 |
+
class LevelInfo:
|
| 11 |
+
level: int
|
| 12 |
+
alive: bool = True
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
class Dim:
|
| 16 |
+
def __init__(self, name: str, size: Union[None, int] = None):
|
| 17 |
+
self.name = name
|
| 18 |
+
self._size = None
|
| 19 |
+
self._vmap_level = None
|
| 20 |
+
if size is not None:
|
| 21 |
+
self.size = size
|
| 22 |
+
|
| 23 |
+
def __del__(self):
|
| 24 |
+
if self._vmap_level is not None:
|
| 25 |
+
_vmap_active_levels[self._vmap_stack].alive = False
|
| 26 |
+
while (
|
| 27 |
+
not _vmap_levels[-1].alive and current_level() == _vmap_levels[-1].level
|
| 28 |
+
):
|
| 29 |
+
_vmap_decrement_nesting()
|
| 30 |
+
_vmap_levels.pop()
|
| 31 |
+
|
| 32 |
+
@property
|
| 33 |
+
def size(self):
|
| 34 |
+
assert self.is_bound
|
| 35 |
+
return self._size
|
| 36 |
+
|
| 37 |
+
@size.setter
|
| 38 |
+
def size(self, size: int):
|
| 39 |
+
if self._size is None:
|
| 40 |
+
self._size = size
|
| 41 |
+
self._vmap_level = _vmap_increment_nesting(size, "same")
|
| 42 |
+
self._vmap_stack = len(_vmap_levels)
|
| 43 |
+
_vmap_levels.append(LevelInfo(self._vmap_level))
|
| 44 |
+
|
| 45 |
+
elif self._size != size:
|
| 46 |
+
raise DimensionBindError(
|
| 47 |
+
f"Dim '{self}' previously bound to a dimension of size {self._size} cannot bind to a dimension of size {size}"
|
| 48 |
+
)
|
| 49 |
+
|
| 50 |
+
@property
|
| 51 |
+
def is_bound(self):
|
| 52 |
+
return self._size is not None
|
| 53 |
+
|
| 54 |
+
def __repr__(self):
|
| 55 |
+
return self.name
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
def extract_name(inst):
|
| 59 |
+
assert inst.opname == "STORE_FAST" or inst.opname == "STORE_NAME"
|
| 60 |
+
return inst.argval
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
_cache = {}
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
def dims(lists=0):
|
| 67 |
+
frame = inspect.currentframe()
|
| 68 |
+
assert frame is not None
|
| 69 |
+
calling_frame = frame.f_back
|
| 70 |
+
assert calling_frame is not None
|
| 71 |
+
code, lasti = calling_frame.f_code, calling_frame.f_lasti
|
| 72 |
+
key = (code, lasti)
|
| 73 |
+
if key not in _cache:
|
| 74 |
+
first = lasti // 2 + 1
|
| 75 |
+
instructions = list(dis.get_instructions(calling_frame.f_code))
|
| 76 |
+
unpack = instructions[first]
|
| 77 |
+
|
| 78 |
+
if unpack.opname == "STORE_FAST" or unpack.opname == "STORE_NAME":
|
| 79 |
+
# just a single dim, not a list
|
| 80 |
+
name = unpack.argval
|
| 81 |
+
ctor = Dim if lists == 0 else DimList
|
| 82 |
+
_cache[key] = lambda: ctor(name=name)
|
| 83 |
+
else:
|
| 84 |
+
assert unpack.opname == "UNPACK_SEQUENCE"
|
| 85 |
+
ndims = unpack.argval
|
| 86 |
+
names = tuple(
|
| 87 |
+
extract_name(instructions[first + 1 + i]) for i in range(ndims)
|
| 88 |
+
)
|
| 89 |
+
first_list = len(names) - lists
|
| 90 |
+
_cache[key] = lambda: tuple(
|
| 91 |
+
Dim(n) if i < first_list else DimList(name=n)
|
| 92 |
+
for i, n in enumerate(names)
|
| 93 |
+
)
|
| 94 |
+
return _cache[key]()
|
| 95 |
+
|
| 96 |
+
|
| 97 |
+
def _dim_set(positional, arg):
|
| 98 |
+
def convert(a):
|
| 99 |
+
if isinstance(a, Dim):
|
| 100 |
+
return a
|
| 101 |
+
else:
|
| 102 |
+
assert isinstance(a, int)
|
| 103 |
+
return positional[a]
|
| 104 |
+
|
| 105 |
+
if arg is None:
|
| 106 |
+
return positional
|
| 107 |
+
elif not isinstance(arg, (Dim, int)):
|
| 108 |
+
return tuple(convert(a) for a in arg)
|
| 109 |
+
else:
|
| 110 |
+
return (convert(arg),)
|
evalkit_cambrian/lib/python3.10/site-packages/functorch/dim/magic_trace.py
ADDED
|
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Facebook, Inc. and its affiliates.
|
| 2 |
+
# All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# This source code is licensed under the BSD-style license found in the
|
| 5 |
+
# LICENSE file in the root directory of this source tree.
|
| 6 |
+
import os
|
| 7 |
+
import signal
|
| 8 |
+
import subprocess
|
| 9 |
+
from contextlib import contextmanager
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
@contextmanager
|
| 13 |
+
def magic_trace(output="trace.fxt", magic_trace_cache="/tmp/magic-trace"):
|
| 14 |
+
pid = os.getpid()
|
| 15 |
+
if not os.path.exists(magic_trace_cache):
|
| 16 |
+
print(f"Downloading magic_trace to: {magic_trace_cache}")
|
| 17 |
+
subprocess.run(
|
| 18 |
+
[
|
| 19 |
+
"wget",
|
| 20 |
+
"-O",
|
| 21 |
+
magic_trace_cache,
|
| 22 |
+
"-q",
|
| 23 |
+
"https://github.com/janestreet/magic-trace/releases/download/v1.0.2/magic-trace",
|
| 24 |
+
]
|
| 25 |
+
)
|
| 26 |
+
subprocess.run(["chmod", "+x", magic_trace_cache])
|
| 27 |
+
args = [magic_trace_cache, "attach", "-pid", str(pid), "-o", output]
|
| 28 |
+
p = subprocess.Popen(args, stderr=subprocess.PIPE, encoding="utf-8")
|
| 29 |
+
while True:
|
| 30 |
+
x = p.stderr.readline()
|
| 31 |
+
print(x)
|
| 32 |
+
if "Attached" in x:
|
| 33 |
+
break
|
| 34 |
+
try:
|
| 35 |
+
yield
|
| 36 |
+
finally:
|
| 37 |
+
p.send_signal(signal.SIGINT)
|
| 38 |
+
r = p.wait()
|
| 39 |
+
print(p.stderr.read())
|
| 40 |
+
p.stderr.close()
|
| 41 |
+
if r != 0:
|
| 42 |
+
raise ValueError(f"magic_trace exited abnormally: {r}")
|
evalkit_cambrian/lib/python3.10/site-packages/functorch/dim/op_properties.py
ADDED
|
@@ -0,0 +1,311 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Facebook, Inc. and its affiliates.
|
| 2 |
+
# All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# This source code is licensed under the BSD-style license found in the
|
| 5 |
+
# LICENSE file in the root directory of this source tree.
|
| 6 |
+
import torch
|
| 7 |
+
|
| 8 |
+
# pointwise operators can go through a faster pathway
|
| 9 |
+
|
| 10 |
+
tensor_magic_methods = ["add", ""]
|
| 11 |
+
pointwise_magic_methods_with_reverse = (
|
| 12 |
+
"add",
|
| 13 |
+
"sub",
|
| 14 |
+
"mul",
|
| 15 |
+
"floordiv",
|
| 16 |
+
"div",
|
| 17 |
+
"truediv",
|
| 18 |
+
"mod",
|
| 19 |
+
"pow",
|
| 20 |
+
"lshift",
|
| 21 |
+
"rshift",
|
| 22 |
+
"and",
|
| 23 |
+
"or",
|
| 24 |
+
"xor",
|
| 25 |
+
)
|
| 26 |
+
pointwise_magic_methods = (
|
| 27 |
+
*(x for m in pointwise_magic_methods_with_reverse for x in (m, "r" + m)),
|
| 28 |
+
"eq",
|
| 29 |
+
"gt",
|
| 30 |
+
"le",
|
| 31 |
+
"lt",
|
| 32 |
+
"ge",
|
| 33 |
+
"gt",
|
| 34 |
+
"ne",
|
| 35 |
+
"neg",
|
| 36 |
+
"pos",
|
| 37 |
+
"abs",
|
| 38 |
+
"invert",
|
| 39 |
+
"iadd",
|
| 40 |
+
"isub",
|
| 41 |
+
"imul",
|
| 42 |
+
"ifloordiv",
|
| 43 |
+
"idiv",
|
| 44 |
+
"itruediv",
|
| 45 |
+
"imod",
|
| 46 |
+
"ipow",
|
| 47 |
+
"ilshift",
|
| 48 |
+
"irshift",
|
| 49 |
+
"iand",
|
| 50 |
+
"ior",
|
| 51 |
+
"ixor",
|
| 52 |
+
"int",
|
| 53 |
+
"long",
|
| 54 |
+
"float",
|
| 55 |
+
"complex",
|
| 56 |
+
)
|
| 57 |
+
|
| 58 |
+
pointwise_methods = (*(f"__{m}__" for m in pointwise_magic_methods),)
|
| 59 |
+
|
| 60 |
+
pointwise = (
|
| 61 |
+
*(getattr(torch.Tensor, m) for m in pointwise_methods),
|
| 62 |
+
torch.nn.functional.dropout,
|
| 63 |
+
torch.where,
|
| 64 |
+
torch.Tensor.abs,
|
| 65 |
+
torch.abs,
|
| 66 |
+
torch.Tensor.acos,
|
| 67 |
+
torch.acos,
|
| 68 |
+
torch.Tensor.acosh,
|
| 69 |
+
torch.acosh,
|
| 70 |
+
torch.Tensor.add,
|
| 71 |
+
torch.add,
|
| 72 |
+
torch.Tensor.addcdiv,
|
| 73 |
+
torch.addcdiv,
|
| 74 |
+
torch.Tensor.addcmul,
|
| 75 |
+
torch.addcmul,
|
| 76 |
+
torch.Tensor.addr,
|
| 77 |
+
torch.addr,
|
| 78 |
+
torch.Tensor.angle,
|
| 79 |
+
torch.angle,
|
| 80 |
+
torch.Tensor.asin,
|
| 81 |
+
torch.asin,
|
| 82 |
+
torch.Tensor.asinh,
|
| 83 |
+
torch.asinh,
|
| 84 |
+
torch.Tensor.atan,
|
| 85 |
+
torch.atan,
|
| 86 |
+
torch.Tensor.atan2,
|
| 87 |
+
torch.atan2,
|
| 88 |
+
torch.Tensor.atanh,
|
| 89 |
+
torch.atanh,
|
| 90 |
+
torch.Tensor.bitwise_and,
|
| 91 |
+
torch.bitwise_and,
|
| 92 |
+
torch.Tensor.bitwise_left_shift,
|
| 93 |
+
torch.bitwise_left_shift,
|
| 94 |
+
torch.Tensor.bitwise_not,
|
| 95 |
+
torch.bitwise_not,
|
| 96 |
+
torch.Tensor.bitwise_or,
|
| 97 |
+
torch.bitwise_or,
|
| 98 |
+
torch.Tensor.bitwise_right_shift,
|
| 99 |
+
torch.bitwise_right_shift,
|
| 100 |
+
torch.Tensor.bitwise_xor,
|
| 101 |
+
torch.bitwise_xor,
|
| 102 |
+
torch.Tensor.ceil,
|
| 103 |
+
torch.ceil,
|
| 104 |
+
torch.celu,
|
| 105 |
+
torch.nn.functional.celu,
|
| 106 |
+
torch.Tensor.clamp,
|
| 107 |
+
torch.clamp,
|
| 108 |
+
torch.Tensor.clamp_max,
|
| 109 |
+
torch.clamp_max,
|
| 110 |
+
torch.Tensor.clamp_min,
|
| 111 |
+
torch.clamp_min,
|
| 112 |
+
torch.Tensor.copysign,
|
| 113 |
+
torch.copysign,
|
| 114 |
+
torch.Tensor.cos,
|
| 115 |
+
torch.cos,
|
| 116 |
+
torch.Tensor.cosh,
|
| 117 |
+
torch.cosh,
|
| 118 |
+
torch.Tensor.deg2rad,
|
| 119 |
+
torch.deg2rad,
|
| 120 |
+
torch.Tensor.digamma,
|
| 121 |
+
torch.digamma,
|
| 122 |
+
torch.Tensor.div,
|
| 123 |
+
torch.div,
|
| 124 |
+
torch.dropout,
|
| 125 |
+
torch.nn.functional.dropout,
|
| 126 |
+
torch.nn.functional.elu,
|
| 127 |
+
torch.Tensor.eq,
|
| 128 |
+
torch.eq,
|
| 129 |
+
torch.Tensor.erf,
|
| 130 |
+
torch.erf,
|
| 131 |
+
torch.Tensor.erfc,
|
| 132 |
+
torch.erfc,
|
| 133 |
+
torch.Tensor.erfinv,
|
| 134 |
+
torch.erfinv,
|
| 135 |
+
torch.Tensor.exp,
|
| 136 |
+
torch.exp,
|
| 137 |
+
torch.Tensor.exp2,
|
| 138 |
+
torch.exp2,
|
| 139 |
+
torch.Tensor.expm1,
|
| 140 |
+
torch.expm1,
|
| 141 |
+
torch.feature_dropout,
|
| 142 |
+
torch.Tensor.float_power,
|
| 143 |
+
torch.float_power,
|
| 144 |
+
torch.Tensor.floor,
|
| 145 |
+
torch.floor,
|
| 146 |
+
torch.Tensor.floor_divide,
|
| 147 |
+
torch.floor_divide,
|
| 148 |
+
torch.Tensor.fmod,
|
| 149 |
+
torch.fmod,
|
| 150 |
+
torch.Tensor.frac,
|
| 151 |
+
torch.frac,
|
| 152 |
+
torch.Tensor.frexp,
|
| 153 |
+
torch.frexp,
|
| 154 |
+
torch.Tensor.gcd,
|
| 155 |
+
torch.gcd,
|
| 156 |
+
torch.Tensor.ge,
|
| 157 |
+
torch.ge,
|
| 158 |
+
torch.nn.functional.gelu,
|
| 159 |
+
torch.nn.functional.glu,
|
| 160 |
+
torch.Tensor.gt,
|
| 161 |
+
torch.gt,
|
| 162 |
+
torch.Tensor.hardshrink,
|
| 163 |
+
torch.hardshrink,
|
| 164 |
+
torch.nn.functional.hardshrink,
|
| 165 |
+
torch.nn.functional.hardsigmoid,
|
| 166 |
+
torch.nn.functional.hardswish,
|
| 167 |
+
torch.nn.functional.hardtanh,
|
| 168 |
+
torch.Tensor.heaviside,
|
| 169 |
+
torch.heaviside,
|
| 170 |
+
torch.Tensor.hypot,
|
| 171 |
+
torch.hypot,
|
| 172 |
+
torch.Tensor.i0,
|
| 173 |
+
torch.i0,
|
| 174 |
+
torch.Tensor.igamma,
|
| 175 |
+
torch.igamma,
|
| 176 |
+
torch.Tensor.igammac,
|
| 177 |
+
torch.igammac,
|
| 178 |
+
torch.Tensor.isclose,
|
| 179 |
+
torch.isclose,
|
| 180 |
+
torch.Tensor.isfinite,
|
| 181 |
+
torch.isfinite,
|
| 182 |
+
torch.Tensor.isinf,
|
| 183 |
+
torch.isinf,
|
| 184 |
+
torch.Tensor.isnan,
|
| 185 |
+
torch.isnan,
|
| 186 |
+
torch.Tensor.isneginf,
|
| 187 |
+
torch.isneginf,
|
| 188 |
+
torch.Tensor.isposinf,
|
| 189 |
+
torch.isposinf,
|
| 190 |
+
torch.Tensor.isreal,
|
| 191 |
+
torch.isreal,
|
| 192 |
+
torch.Tensor.kron,
|
| 193 |
+
torch.kron,
|
| 194 |
+
torch.Tensor.lcm,
|
| 195 |
+
torch.lcm,
|
| 196 |
+
torch.Tensor.ldexp,
|
| 197 |
+
torch.ldexp,
|
| 198 |
+
torch.Tensor.le,
|
| 199 |
+
torch.le,
|
| 200 |
+
torch.nn.functional.leaky_relu,
|
| 201 |
+
torch.Tensor.lerp,
|
| 202 |
+
torch.lerp,
|
| 203 |
+
torch.Tensor.lgamma,
|
| 204 |
+
torch.lgamma,
|
| 205 |
+
torch.Tensor.log,
|
| 206 |
+
torch.log,
|
| 207 |
+
torch.Tensor.log10,
|
| 208 |
+
torch.log10,
|
| 209 |
+
torch.Tensor.log1p,
|
| 210 |
+
torch.log1p,
|
| 211 |
+
torch.Tensor.log2,
|
| 212 |
+
torch.log2,
|
| 213 |
+
torch.nn.functional.logsigmoid,
|
| 214 |
+
torch.Tensor.logical_and,
|
| 215 |
+
torch.logical_and,
|
| 216 |
+
torch.Tensor.logical_not,
|
| 217 |
+
torch.logical_not,
|
| 218 |
+
torch.Tensor.logical_or,
|
| 219 |
+
torch.logical_or,
|
| 220 |
+
torch.Tensor.logical_xor,
|
| 221 |
+
torch.logical_xor,
|
| 222 |
+
torch.Tensor.logit,
|
| 223 |
+
torch.logit,
|
| 224 |
+
torch.Tensor.lt,
|
| 225 |
+
torch.lt,
|
| 226 |
+
torch.Tensor.maximum,
|
| 227 |
+
torch.maximum,
|
| 228 |
+
torch.Tensor.minimum,
|
| 229 |
+
torch.minimum,
|
| 230 |
+
torch.nn.functional.mish,
|
| 231 |
+
torch.Tensor.mvlgamma,
|
| 232 |
+
torch.mvlgamma,
|
| 233 |
+
torch.Tensor.nan_to_num,
|
| 234 |
+
torch.nan_to_num,
|
| 235 |
+
torch.Tensor.ne,
|
| 236 |
+
torch.ne,
|
| 237 |
+
torch.Tensor.neg,
|
| 238 |
+
torch.neg,
|
| 239 |
+
torch.Tensor.nextafter,
|
| 240 |
+
torch.nextafter,
|
| 241 |
+
torch.Tensor.outer,
|
| 242 |
+
torch.outer,
|
| 243 |
+
torch.polar,
|
| 244 |
+
torch.Tensor.polygamma,
|
| 245 |
+
torch.polygamma,
|
| 246 |
+
torch.Tensor.positive,
|
| 247 |
+
torch.positive,
|
| 248 |
+
torch.Tensor.pow,
|
| 249 |
+
torch.pow,
|
| 250 |
+
torch.Tensor.prelu,
|
| 251 |
+
torch.prelu,
|
| 252 |
+
torch.nn.functional.prelu,
|
| 253 |
+
torch.Tensor.rad2deg,
|
| 254 |
+
torch.rad2deg,
|
| 255 |
+
torch.Tensor.reciprocal,
|
| 256 |
+
torch.reciprocal,
|
| 257 |
+
torch.Tensor.relu,
|
| 258 |
+
torch.relu,
|
| 259 |
+
torch.nn.functional.relu,
|
| 260 |
+
torch.nn.functional.relu6,
|
| 261 |
+
torch.Tensor.remainder,
|
| 262 |
+
torch.remainder,
|
| 263 |
+
torch.Tensor.round,
|
| 264 |
+
torch.round,
|
| 265 |
+
torch.rrelu,
|
| 266 |
+
torch.nn.functional.rrelu,
|
| 267 |
+
torch.Tensor.rsqrt,
|
| 268 |
+
torch.rsqrt,
|
| 269 |
+
torch.rsub,
|
| 270 |
+
torch.selu,
|
| 271 |
+
torch.nn.functional.selu,
|
| 272 |
+
torch.Tensor.sgn,
|
| 273 |
+
torch.sgn,
|
| 274 |
+
torch.Tensor.sigmoid,
|
| 275 |
+
torch.sigmoid,
|
| 276 |
+
torch.nn.functional.sigmoid,
|
| 277 |
+
torch.Tensor.sign,
|
| 278 |
+
torch.sign,
|
| 279 |
+
torch.Tensor.signbit,
|
| 280 |
+
torch.signbit,
|
| 281 |
+
torch.nn.functional.silu,
|
| 282 |
+
torch.Tensor.sin,
|
| 283 |
+
torch.sin,
|
| 284 |
+
torch.Tensor.sinc,
|
| 285 |
+
torch.sinc,
|
| 286 |
+
torch.Tensor.sinh,
|
| 287 |
+
torch.sinh,
|
| 288 |
+
torch.nn.functional.softplus,
|
| 289 |
+
torch.nn.functional.softshrink,
|
| 290 |
+
torch.Tensor.sqrt,
|
| 291 |
+
torch.sqrt,
|
| 292 |
+
torch.Tensor.square,
|
| 293 |
+
torch.square,
|
| 294 |
+
torch.Tensor.sub,
|
| 295 |
+
torch.sub,
|
| 296 |
+
torch.Tensor.tan,
|
| 297 |
+
torch.tan,
|
| 298 |
+
torch.Tensor.tanh,
|
| 299 |
+
torch.tanh,
|
| 300 |
+
torch.nn.functional.tanh,
|
| 301 |
+
torch.threshold,
|
| 302 |
+
torch.nn.functional.threshold,
|
| 303 |
+
torch.trapz,
|
| 304 |
+
torch.Tensor.true_divide,
|
| 305 |
+
torch.true_divide,
|
| 306 |
+
torch.Tensor.trunc,
|
| 307 |
+
torch.trunc,
|
| 308 |
+
torch.Tensor.xlogy,
|
| 309 |
+
torch.xlogy,
|
| 310 |
+
torch.rand_like,
|
| 311 |
+
)
|
evalkit_cambrian/lib/python3.10/site-packages/functorch/dim/tree_map.py
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Facebook, Inc. and its affiliates.
|
| 2 |
+
# All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# This source code is licensed under the BSD-style license found in the
|
| 5 |
+
# LICENSE file in the root directory of this source tree.
|
| 6 |
+
|
| 7 |
+
from functorch._C import dim
|
| 8 |
+
|
| 9 |
+
tree_flatten = dim.tree_flatten
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
def tree_map(fn, tree):
|
| 13 |
+
vs, unflatten = tree_flatten(tree)
|
| 14 |
+
return unflatten(fn(v) for v in vs)
|
evalkit_cambrian/lib/python3.10/site-packages/functorch/einops/__init__.py
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .rearrange import rearrange
|
| 2 |
+
|
| 3 |
+
__all__ = ["rearrange"]
|
evalkit_cambrian/lib/python3.10/site-packages/functorch/einops/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (232 Bytes). View file
|
|
|
evalkit_cambrian/lib/python3.10/site-packages/functorch/einops/__pycache__/_parsing.cpython-310.pyc
ADDED
|
Binary file (9.93 kB). View file
|
|
|
evalkit_cambrian/lib/python3.10/site-packages/functorch/einops/__pycache__/rearrange.cpython-310.pyc
ADDED
|
Binary file (7.2 kB). View file
|
|
|
evalkit_cambrian/lib/python3.10/site-packages/functorch/einops/_parsing.py
ADDED
|
@@ -0,0 +1,302 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Adapted from https://github.com/arogozhnikov/einops/blob/36c7bb16e57d6e57f8f3050f9e07abdf3f00469f/einops/parsing.py.
|
| 2 |
+
|
| 3 |
+
MIT License
|
| 4 |
+
|
| 5 |
+
Copyright (c) 2018 Alex Rogozhnikov
|
| 6 |
+
|
| 7 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
| 8 |
+
of this software and associated documentation files (the "Software"), to deal
|
| 9 |
+
in the Software without restriction, including without limitation the rights
|
| 10 |
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
| 11 |
+
copies of the Software, and to permit persons to whom the Software is
|
| 12 |
+
furnished to do so, subject to the following conditions:
|
| 13 |
+
|
| 14 |
+
The above copyright notice and this permission notice shall be included in all
|
| 15 |
+
copies or substantial portions of the Software.
|
| 16 |
+
|
| 17 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
| 18 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
| 19 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
| 20 |
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
| 21 |
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
| 22 |
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
| 23 |
+
SOFTWARE.
|
| 24 |
+
"""
|
| 25 |
+
from __future__ import annotations
|
| 26 |
+
|
| 27 |
+
import keyword
|
| 28 |
+
import warnings
|
| 29 |
+
from typing import Collection, List, Mapping, Optional, Set, Tuple, Union
|
| 30 |
+
|
| 31 |
+
_ellipsis: str = "…" # NB, this is a single unicode symbol. String is used as it is not a list, but can be iterated
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
class AnonymousAxis:
|
| 35 |
+
"""Used by `ParsedExpression` to represent an axis with a size (> 1), but no associated identifier.
|
| 36 |
+
|
| 37 |
+
Note: Different instances of this class are not equal to each other, even if they have the same value.
|
| 38 |
+
"""
|
| 39 |
+
|
| 40 |
+
def __init__(self, value: str) -> None:
|
| 41 |
+
self.value = int(value)
|
| 42 |
+
if self.value < 1:
|
| 43 |
+
raise ValueError(
|
| 44 |
+
f"Anonymous axis should have positive length, not {self.value}"
|
| 45 |
+
)
|
| 46 |
+
|
| 47 |
+
def __repr__(self) -> str:
|
| 48 |
+
return f"{self.value}-axis"
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
class ParsedExpression:
|
| 52 |
+
"""Structure containing information about one side of an `einops`-style pattern (e.g. 'b c (h w)')."""
|
| 53 |
+
|
| 54 |
+
def __init__(
|
| 55 |
+
self,
|
| 56 |
+
expression: str,
|
| 57 |
+
*,
|
| 58 |
+
allow_underscore: bool = False,
|
| 59 |
+
allow_duplicates: bool = False,
|
| 60 |
+
) -> None:
|
| 61 |
+
"""Parse the expression and store relevant metadata.
|
| 62 |
+
|
| 63 |
+
Args:
|
| 64 |
+
expression (str): the `einops`-pattern to parse
|
| 65 |
+
allow_underscore (bool): whether to allow axis identifier names to begin with an underscore
|
| 66 |
+
allow_duplicates (bool): whether to allow an identifier to appear more than once in the expression
|
| 67 |
+
"""
|
| 68 |
+
self.has_ellipsis: bool = False
|
| 69 |
+
self.has_ellipsis_parenthesized: Optional[bool] = None
|
| 70 |
+
self.identifiers: Set[Union[str, AnonymousAxis]] = set()
|
| 71 |
+
# that's axes like 2, 3, 4 or 5. Axes with size 1 are exceptional and replaced with empty composition
|
| 72 |
+
self.has_non_unitary_anonymous_axes: bool = False
|
| 73 |
+
# composition keeps structure of composite axes, see how different corner cases are handled in tests
|
| 74 |
+
self.composition: List[Union[List[Union[str, AnonymousAxis]], str]] = []
|
| 75 |
+
if "." in expression:
|
| 76 |
+
if "..." not in expression:
|
| 77 |
+
raise ValueError(
|
| 78 |
+
"Expression may contain dots only inside ellipsis (...)"
|
| 79 |
+
)
|
| 80 |
+
if str.count(expression, "...") != 1 or str.count(expression, ".") != 3:
|
| 81 |
+
raise ValueError(
|
| 82 |
+
"Expression may contain dots only inside ellipsis (...); only one ellipsis for tensor "
|
| 83 |
+
)
|
| 84 |
+
expression = expression.replace("...", _ellipsis)
|
| 85 |
+
self.has_ellipsis = True
|
| 86 |
+
|
| 87 |
+
bracket_group: Optional[List[Union[str, AnonymousAxis]]] = None
|
| 88 |
+
|
| 89 |
+
def add_axis_name(x: str) -> None:
|
| 90 |
+
if x in self.identifiers:
|
| 91 |
+
if not (allow_underscore and x == "_") and not allow_duplicates:
|
| 92 |
+
raise ValueError(
|
| 93 |
+
f"Indexing expression contains duplicate dimension '{x}'"
|
| 94 |
+
)
|
| 95 |
+
if x == _ellipsis:
|
| 96 |
+
self.identifiers.add(_ellipsis)
|
| 97 |
+
if bracket_group is None:
|
| 98 |
+
self.composition.append(_ellipsis)
|
| 99 |
+
self.has_ellipsis_parenthesized = False
|
| 100 |
+
else:
|
| 101 |
+
bracket_group.append(_ellipsis)
|
| 102 |
+
self.has_ellipsis_parenthesized = True
|
| 103 |
+
else:
|
| 104 |
+
is_number = str.isdecimal(x)
|
| 105 |
+
if is_number and int(x) == 1:
|
| 106 |
+
# handling the case of anonymous axis of length 1
|
| 107 |
+
if bracket_group is None:
|
| 108 |
+
self.composition.append([])
|
| 109 |
+
else:
|
| 110 |
+
pass # no need to think about 1s inside parenthesis
|
| 111 |
+
return
|
| 112 |
+
is_axis_name, reason = self.check_axis_name_return_reason(
|
| 113 |
+
x, allow_underscore=allow_underscore
|
| 114 |
+
)
|
| 115 |
+
if not (is_number or is_axis_name):
|
| 116 |
+
raise ValueError(f"Invalid axis identifier: {x}\n{reason}")
|
| 117 |
+
axis_name: Union[str, AnonymousAxis] = (
|
| 118 |
+
AnonymousAxis(x) if is_number else x
|
| 119 |
+
)
|
| 120 |
+
self.identifiers.add(axis_name)
|
| 121 |
+
if is_number:
|
| 122 |
+
self.has_non_unitary_anonymous_axes = True
|
| 123 |
+
if bracket_group is None:
|
| 124 |
+
self.composition.append([axis_name])
|
| 125 |
+
else:
|
| 126 |
+
bracket_group.append(axis_name)
|
| 127 |
+
|
| 128 |
+
current_identifier = None
|
| 129 |
+
for char in expression:
|
| 130 |
+
if char in "() ":
|
| 131 |
+
if current_identifier is not None:
|
| 132 |
+
add_axis_name(current_identifier)
|
| 133 |
+
current_identifier = None
|
| 134 |
+
if char == "(":
|
| 135 |
+
if bracket_group is not None:
|
| 136 |
+
raise ValueError(
|
| 137 |
+
"Axis composition is one-level (brackets inside brackets not allowed)"
|
| 138 |
+
)
|
| 139 |
+
bracket_group = []
|
| 140 |
+
elif char == ")":
|
| 141 |
+
if bracket_group is None:
|
| 142 |
+
raise ValueError("Brackets are not balanced")
|
| 143 |
+
self.composition.append(bracket_group)
|
| 144 |
+
bracket_group = None
|
| 145 |
+
elif str.isalnum(char) or char in ["_", _ellipsis]:
|
| 146 |
+
if current_identifier is None:
|
| 147 |
+
current_identifier = char
|
| 148 |
+
else:
|
| 149 |
+
current_identifier += char
|
| 150 |
+
else:
|
| 151 |
+
raise ValueError(f"Unknown character '{char}'")
|
| 152 |
+
|
| 153 |
+
if bracket_group is not None:
|
| 154 |
+
raise ValueError(f"Imbalanced parentheses in expression: '{expression}'")
|
| 155 |
+
if current_identifier is not None:
|
| 156 |
+
add_axis_name(current_identifier)
|
| 157 |
+
|
| 158 |
+
@staticmethod
|
| 159 |
+
def check_axis_name_return_reason(
|
| 160 |
+
name: str, allow_underscore: bool = False
|
| 161 |
+
) -> Tuple[bool, str]:
|
| 162 |
+
"""Check if the given axis name is valid, and a message explaining why if not.
|
| 163 |
+
|
| 164 |
+
Valid axes names are python identifiers except keywords, and should not start or end with an underscore.
|
| 165 |
+
|
| 166 |
+
Args:
|
| 167 |
+
name (str): the axis name to check
|
| 168 |
+
allow_underscore (bool): whether axis names are allowed to start with an underscore
|
| 169 |
+
|
| 170 |
+
Returns:
|
| 171 |
+
Tuple[bool, str]: whether the axis name is valid, a message explaining why if not
|
| 172 |
+
"""
|
| 173 |
+
if not str.isidentifier(name):
|
| 174 |
+
return False, "not a valid python identifier"
|
| 175 |
+
elif name[0] == "_" or name[-1] == "_":
|
| 176 |
+
if name == "_" and allow_underscore:
|
| 177 |
+
return True, ""
|
| 178 |
+
return False, "axis name should should not start or end with underscore"
|
| 179 |
+
else:
|
| 180 |
+
if keyword.iskeyword(name):
|
| 181 |
+
warnings.warn(
|
| 182 |
+
f"It is discouraged to use axes names that are keywords: {name}",
|
| 183 |
+
RuntimeWarning,
|
| 184 |
+
)
|
| 185 |
+
if name in ["axis"]:
|
| 186 |
+
warnings.warn(
|
| 187 |
+
"It is discouraged to use 'axis' as an axis name and will raise an error in future",
|
| 188 |
+
FutureWarning,
|
| 189 |
+
)
|
| 190 |
+
return True, ""
|
| 191 |
+
|
| 192 |
+
@staticmethod
|
| 193 |
+
def check_axis_name(name: str) -> bool:
|
| 194 |
+
"""Check if the name is a valid axis name.
|
| 195 |
+
|
| 196 |
+
Args:
|
| 197 |
+
name (str): the axis name to check
|
| 198 |
+
|
| 199 |
+
Returns:
|
| 200 |
+
bool: whether the axis name is valid
|
| 201 |
+
"""
|
| 202 |
+
is_valid, _ = ParsedExpression.check_axis_name_return_reason(name)
|
| 203 |
+
return is_valid
|
| 204 |
+
|
| 205 |
+
|
| 206 |
+
def parse_pattern(
|
| 207 |
+
pattern: str, axes_lengths: Mapping[str, int]
|
| 208 |
+
) -> Tuple[ParsedExpression, ParsedExpression]:
|
| 209 |
+
"""Parse an `einops`-style pattern into a left-hand side and right-hand side `ParsedExpression` object.
|
| 210 |
+
|
| 211 |
+
Args:
|
| 212 |
+
pattern (str): the `einops`-style rearrangement pattern
|
| 213 |
+
axes_lengths (Mapping[str, int]): any additional length specifications for dimensions
|
| 214 |
+
|
| 215 |
+
Returns:
|
| 216 |
+
Tuple[ParsedExpression, ParsedExpression]: a tuple containing the left-hand side and right-hand side expressions
|
| 217 |
+
"""
|
| 218 |
+
# adapted from einops.einops._prepare_transformation_recipe
|
| 219 |
+
# https://github.com/arogozhnikov/einops/blob/230ac1526c1f42c9e1f7373912c7f8047496df11/einops/einops.py
|
| 220 |
+
try:
|
| 221 |
+
left_str, right_str = pattern.split("->")
|
| 222 |
+
except ValueError:
|
| 223 |
+
raise ValueError("Pattern must contain a single '->' separator") from None
|
| 224 |
+
|
| 225 |
+
if _ellipsis in axes_lengths:
|
| 226 |
+
raise ValueError(f"'{_ellipsis}' is not an allowed axis identifier")
|
| 227 |
+
|
| 228 |
+
left = ParsedExpression(left_str)
|
| 229 |
+
right = ParsedExpression(right_str)
|
| 230 |
+
|
| 231 |
+
if not left.has_ellipsis and right.has_ellipsis:
|
| 232 |
+
raise ValueError(
|
| 233 |
+
f"Ellipsis found in right side, but not left side of a pattern {pattern}"
|
| 234 |
+
)
|
| 235 |
+
if left.has_ellipsis and left.has_ellipsis_parenthesized:
|
| 236 |
+
raise ValueError(
|
| 237 |
+
f"Ellipsis is parenthesis in the left side is not allowed: {pattern}"
|
| 238 |
+
)
|
| 239 |
+
|
| 240 |
+
return left, right
|
| 241 |
+
|
| 242 |
+
|
| 243 |
+
def validate_rearrange_expressions(
|
| 244 |
+
left: ParsedExpression, right: ParsedExpression, axes_lengths: Mapping[str, int]
|
| 245 |
+
) -> None:
|
| 246 |
+
"""Perform expression validations that are specific to the `rearrange` operation.
|
| 247 |
+
|
| 248 |
+
Args:
|
| 249 |
+
left (ParsedExpression): left-hand side expression
|
| 250 |
+
right (ParsedExpression): right-hand side expression
|
| 251 |
+
axes_lengths (Mapping[str, int]): any additional length specifications for dimensions
|
| 252 |
+
"""
|
| 253 |
+
for length in axes_lengths.values():
|
| 254 |
+
if (length_type := type(length)) is not int:
|
| 255 |
+
raise TypeError(
|
| 256 |
+
f"rearrange axis lengths must be integers, got: {length_type}"
|
| 257 |
+
)
|
| 258 |
+
|
| 259 |
+
if left.has_non_unitary_anonymous_axes or right.has_non_unitary_anonymous_axes:
|
| 260 |
+
raise ValueError("rearrange only supports unnamed axes of size 1")
|
| 261 |
+
|
| 262 |
+
difference = set.symmetric_difference(left.identifiers, right.identifiers)
|
| 263 |
+
if len(difference) > 0:
|
| 264 |
+
raise ValueError(
|
| 265 |
+
f"Identifiers only on one side of rearrange expression (should be on both): {difference}"
|
| 266 |
+
)
|
| 267 |
+
|
| 268 |
+
unmatched_axes = axes_lengths.keys() - left.identifiers
|
| 269 |
+
if len(unmatched_axes) > 0:
|
| 270 |
+
raise ValueError(
|
| 271 |
+
f"Identifiers not found in rearrange expression: {unmatched_axes}"
|
| 272 |
+
)
|
| 273 |
+
|
| 274 |
+
|
| 275 |
+
def comma_separate(collection: Collection[Union[str, Collection[str]]]) -> str:
|
| 276 |
+
"""Convert a collection of strings representing first class dims into a comma-separated string.
|
| 277 |
+
|
| 278 |
+
Args:
|
| 279 |
+
collection (Collection[Union[str, Collection[str]]]): the collection of strings to convert
|
| 280 |
+
|
| 281 |
+
Returns:
|
| 282 |
+
str: the comma-separated string
|
| 283 |
+
|
| 284 |
+
Examples:
|
| 285 |
+
>>> comma_separate(('d0',))
|
| 286 |
+
'd0'
|
| 287 |
+
|
| 288 |
+
>>> comma_separate(('d0', 'd1', 'd2', 'd3'))
|
| 289 |
+
'd0, d1, d2, d3'
|
| 290 |
+
|
| 291 |
+
>>> comma_separate([('d1', 'd4')])
|
| 292 |
+
'(d1, d4)'
|
| 293 |
+
|
| 294 |
+
>>> comma_separate([('d0',), (), ('d1',), ('d2',), ('d3', 'd4')])
|
| 295 |
+
'(d0,), (), (d1,), (d2,), (d3, d4)'
|
| 296 |
+
"""
|
| 297 |
+
return ", ".join(
|
| 298 |
+
item
|
| 299 |
+
if isinstance(item, str)
|
| 300 |
+
else f"({comma_separate(item)}{',' if len(item) == 1 else ''})"
|
| 301 |
+
for item in collection
|
| 302 |
+
)
|
evalkit_cambrian/lib/python3.10/site-packages/functorch/einops/rearrange.py
ADDED
|
@@ -0,0 +1,207 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
import functools
|
| 4 |
+
from typing import Callable, Dict, List, Sequence, Tuple, Union
|
| 5 |
+
|
| 6 |
+
import torch
|
| 7 |
+
|
| 8 |
+
from functorch._C import dim as _C
|
| 9 |
+
from ._parsing import (
|
| 10 |
+
_ellipsis,
|
| 11 |
+
AnonymousAxis,
|
| 12 |
+
comma_separate,
|
| 13 |
+
parse_pattern,
|
| 14 |
+
validate_rearrange_expressions,
|
| 15 |
+
)
|
| 16 |
+
|
| 17 |
+
__all__ = ["rearrange"]
|
| 18 |
+
|
| 19 |
+
dims = _C.dims
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
@functools.lru_cache(256)
|
| 23 |
+
def _create_rearrange_callable(
|
| 24 |
+
tensor_ndim: int, pattern: str, **axes_lengths: int
|
| 25 |
+
) -> Callable[[torch.Tensor], torch.Tensor]:
|
| 26 |
+
r"""Translate an `einops`-style pattern into a callable that performs the rearrange using first-class dimensions.
|
| 27 |
+
|
| 28 |
+
Since the an equivalent result is computed for tensors with the same number of dimensions, with the same pattern and
|
| 29 |
+
specified axes lengths, this function can be memoized.
|
| 30 |
+
|
| 31 |
+
Args:
|
| 32 |
+
tensor_ndim (int): the number of dimensions in the tensor to rearrange
|
| 33 |
+
pattern (str): the `einops`-style rearrangement pattern
|
| 34 |
+
axes_lengths (int): any additional length specifications for dimensions
|
| 35 |
+
|
| 36 |
+
Returns:
|
| 37 |
+
Callable[[torch.Tensor], torch.Tensor]: a callable that performs the rearrangement
|
| 38 |
+
"""
|
| 39 |
+
left, right = parse_pattern(pattern, axes_lengths)
|
| 40 |
+
validate_rearrange_expressions(left, right, axes_lengths)
|
| 41 |
+
|
| 42 |
+
n_anon_dims = sum(not dim for dim in left.composition)
|
| 43 |
+
if left.has_ellipsis:
|
| 44 |
+
n_ellipsis_dims = tensor_ndim - (len(left.composition) - 1)
|
| 45 |
+
n_named_dims = len(left.identifiers) - 1
|
| 46 |
+
|
| 47 |
+
if (pattern_ndim := n_anon_dims + n_named_dims) > tensor_ndim:
|
| 48 |
+
raise ValueError(
|
| 49 |
+
f"Number of dimensions in pattern ({pattern_ndim}) must be less than or equal to the number of "
|
| 50 |
+
f"dimensions in the tensor ({tensor_ndim})"
|
| 51 |
+
)
|
| 52 |
+
else:
|
| 53 |
+
n_ellipsis_dims = 0
|
| 54 |
+
n_named_dims = len(left.identifiers)
|
| 55 |
+
|
| 56 |
+
if (pattern_ndim := len(left.composition)) != tensor_ndim:
|
| 57 |
+
raise ValueError(
|
| 58 |
+
f"Number of dimensions in pattern ({pattern_ndim}) must be equal to the number of dimensions in "
|
| 59 |
+
f"the tensor ({tensor_ndim})"
|
| 60 |
+
)
|
| 61 |
+
n_dims = n_named_dims + n_ellipsis_dims + n_anon_dims
|
| 62 |
+
|
| 63 |
+
if n_dims == 0:
|
| 64 |
+
# an identity rearrangement on a 0-dimension tensor
|
| 65 |
+
return lambda tensor: tensor
|
| 66 |
+
|
| 67 |
+
first_class_dims: Tuple[str, ...] = tuple(f"d{i}" for i in range(n_dims))
|
| 68 |
+
identifier_dim_map: Dict[Union[str, AnonymousAxis], Tuple[str, ...]] = {}
|
| 69 |
+
anon_axes: List[AnonymousAxis] = []
|
| 70 |
+
|
| 71 |
+
# map the left-hand side identifiers to strings representing first class dims
|
| 72 |
+
dims_i = 0
|
| 73 |
+
for dimension in left.composition:
|
| 74 |
+
if isinstance(dimension, list):
|
| 75 |
+
for identifier in dimension:
|
| 76 |
+
# non-unitary anon axes are not allowed in rearrange & unitary anon axes are represented as empty lists
|
| 77 |
+
assert isinstance(identifier, str)
|
| 78 |
+
identifier_dim_map[identifier] = (first_class_dims[dims_i],)
|
| 79 |
+
dims_i += 1
|
| 80 |
+
if not dimension:
|
| 81 |
+
# unitary anonymous axis
|
| 82 |
+
anon_axis = AnonymousAxis("1")
|
| 83 |
+
identifier_dim_map[anon_axis] = (first_class_dims[dims_i],)
|
| 84 |
+
anon_axes.append(anon_axis)
|
| 85 |
+
dimension.append(anon_axis)
|
| 86 |
+
dims_i += 1
|
| 87 |
+
elif dimension == _ellipsis:
|
| 88 |
+
identifier = _ellipsis
|
| 89 |
+
identifier_dim_map[identifier] = tuple(
|
| 90 |
+
first_class_dims[dims_i + j] for j in range(n_ellipsis_dims)
|
| 91 |
+
)
|
| 92 |
+
dims_i += n_ellipsis_dims
|
| 93 |
+
else:
|
| 94 |
+
raise ValueError(f"Unexpected dimension: {dimension}")
|
| 95 |
+
|
| 96 |
+
def composition_to_dims(
|
| 97 |
+
composition: Sequence[Union[List[Union[str, AnonymousAxis]], str]]
|
| 98 |
+
) -> List[Union[str, Tuple[str, ...]]]:
|
| 99 |
+
"""Convert a `ParsedExpression.composition` into a `Tensor.__getitem__` index of strings representing first
|
| 100 |
+
class dims."""
|
| 101 |
+
dim_composition: List[Union[str, Tuple[str, ...]]] = []
|
| 102 |
+
for dimension in composition:
|
| 103 |
+
if isinstance(dimension, list):
|
| 104 |
+
dim_composition.append(
|
| 105 |
+
tuple(
|
| 106 |
+
dim
|
| 107 |
+
for identifier in dimension
|
| 108 |
+
for dim in identifier_dim_map[identifier]
|
| 109 |
+
)
|
| 110 |
+
)
|
| 111 |
+
elif dimension == _ellipsis:
|
| 112 |
+
dim_composition.extend(identifier_dim_map[_ellipsis])
|
| 113 |
+
else:
|
| 114 |
+
raise ValueError(f"Unexpected dimension: {dimension}")
|
| 115 |
+
return dim_composition
|
| 116 |
+
|
| 117 |
+
left_dims = composition_to_dims(left.composition)
|
| 118 |
+
right_dims = composition_to_dims(right.composition)
|
| 119 |
+
anon_dims = tuple(identifier_dim_map[axis][0] for axis in anon_axes)
|
| 120 |
+
specified_lengths = tuple(
|
| 121 |
+
(identifier_dim_map[axis][0], length) for axis, length in axes_lengths.items()
|
| 122 |
+
)
|
| 123 |
+
|
| 124 |
+
custom_rearrange_callable_name = "do_rearrange"
|
| 125 |
+
custom_rearrange_callable_code = (
|
| 126 |
+
(
|
| 127 |
+
f"def {custom_rearrange_callable_name}(tensor):\n"
|
| 128 |
+
f" {comma_separate(first_class_dims)} = dims({n_dims})\n"
|
| 129 |
+
)
|
| 130 |
+
+ (
|
| 131 |
+
"".join(
|
| 132 |
+
f" {dim}.size = {length}\n" for (dim, length) in specified_lengths
|
| 133 |
+
)
|
| 134 |
+
if specified_lengths
|
| 135 |
+
else ""
|
| 136 |
+
)
|
| 137 |
+
+ f" tensor = tensor[{comma_separate(left_dims)}].order({comma_separate(right_dims)})\n"
|
| 138 |
+
+ (
|
| 139 |
+
f" return tensor.sum({comma_separate([anon_dims])}, keepdim=False)\n"
|
| 140 |
+
if anon_dims
|
| 141 |
+
else " return tensor\n"
|
| 142 |
+
)
|
| 143 |
+
)
|
| 144 |
+
|
| 145 |
+
exec(custom_rearrange_callable_code)
|
| 146 |
+
return locals()[custom_rearrange_callable_name]
|
| 147 |
+
|
| 148 |
+
|
| 149 |
+
def rearrange(
|
| 150 |
+
tensor: Union[torch.Tensor, List[torch.Tensor], Tuple[torch.Tensor, ...]],
|
| 151 |
+
pattern: str,
|
| 152 |
+
**axes_lengths: int,
|
| 153 |
+
) -> torch.Tensor:
|
| 154 |
+
r"""A native implementation of `einops.rearrange`, a reader-friendly smart element reordering for multidimensional
|
| 155 |
+
tensors. This operation includes functionality of transpose (axes permutation), reshape (view), squeeze, unsqueeze,
|
| 156 |
+
stack, concatenate and other operations.
|
| 157 |
+
|
| 158 |
+
See: https://einops.rocks/api/rearrange/
|
| 159 |
+
|
| 160 |
+
Args:
|
| 161 |
+
tensor (Tensor or sequence of Tensor): the tensor(s) to rearrange
|
| 162 |
+
pattern (str): the rearrangement pattern
|
| 163 |
+
axes_lengths (int): any additional length specifications for dimensions
|
| 164 |
+
|
| 165 |
+
Returns:
|
| 166 |
+
Tensor: the rearranged tensor
|
| 167 |
+
|
| 168 |
+
Examples:
|
| 169 |
+
>>> # suppose we have a set of 32 images in "h w c" format (height-width-channel)
|
| 170 |
+
>>> images = torch.randn((32, 30, 40, 3))
|
| 171 |
+
|
| 172 |
+
>>> # stack along first (batch) axis, output is a single array
|
| 173 |
+
>>> rearrange(images, 'b h w c -> b h w c').shape
|
| 174 |
+
torch.Size([32, 30, 40, 3])
|
| 175 |
+
|
| 176 |
+
>>> # concatenate images along height (vertical axis), 960 = 32 * 30
|
| 177 |
+
>>> rearrange(images, 'b h w c -> (b h) w c').shape
|
| 178 |
+
torch.Size([960, 40, 3])
|
| 179 |
+
|
| 180 |
+
>>> # concatenated images along horizontal axis, 1280 = 32 * 40
|
| 181 |
+
>>> rearrange(images, 'b h w c -> h (b w) c').shape
|
| 182 |
+
torch.Size([30, 1280, 3])
|
| 183 |
+
|
| 184 |
+
>>> # reordered axes to "b c h w" format for deep learning
|
| 185 |
+
>>> rearrange(images, 'b h w c -> b c h w').shape
|
| 186 |
+
torch.Size([32, 3, 30, 40])
|
| 187 |
+
|
| 188 |
+
>>> # flattened each image into a vector, 3600 = 30 * 40 * 3
|
| 189 |
+
>>> rearrange(images, 'b h w c -> b (c h w)').shape
|
| 190 |
+
torch.Size([32, 3600])
|
| 191 |
+
|
| 192 |
+
>>> # split each image into 4 smaller (top-left, top-right, bottom-left, bottom-right), 128 = 32 * 2 * 2
|
| 193 |
+
>>> rearrange(images, 'b (h1 h) (w1 w) c -> (b h1 w1) h w c', h1=2, w1=2).shape
|
| 194 |
+
torch.Size([128, 15, 20, 3])
|
| 195 |
+
|
| 196 |
+
>>> # space-to-depth operation
|
| 197 |
+
>>> rearrange(images, 'b (h h1) (w w1) c -> b h w (c h1 w1)', h1=2, w1=2).shape
|
| 198 |
+
torch.Size([32, 15, 20, 12])
|
| 199 |
+
"""
|
| 200 |
+
if not isinstance(tensor, torch.Tensor):
|
| 201 |
+
tensor = torch.stack(tensor)
|
| 202 |
+
|
| 203 |
+
rearrange_callable = _create_rearrange_callable(
|
| 204 |
+
tensor.ndim, pattern, **axes_lengths
|
| 205 |
+
)
|
| 206 |
+
|
| 207 |
+
return rearrange_callable(tensor)
|
evalkit_cambrian/lib/python3.10/site-packages/functorch/experimental/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (478 Bytes). View file
|
|
|
evalkit_cambrian/lib/python3.10/site-packages/propcache/__pycache__/_helpers_py.cpython-310.pyc
ADDED
|
Binary file (2.35 kB). View file
|
|
|
evalkit_cambrian/lib/python3.10/site-packages/propcache/__pycache__/api.cpython-310.pyc
ADDED
|
Binary file (321 Bytes). View file
|
|
|
evalkit_cambrian/lib/python3.10/site-packages/propcache/api.py
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Public API of the property caching library."""
|
| 2 |
+
|
| 3 |
+
from ._helpers import cached_property, under_cached_property
|
| 4 |
+
|
| 5 |
+
__all__ = (
|
| 6 |
+
"cached_property",
|
| 7 |
+
"under_cached_property",
|
| 8 |
+
)
|
evalkit_cambrian/lib/python3.10/site-packages/starlette/__pycache__/status.cpython-310.pyc
ADDED
|
Binary file (4.46 kB). View file
|
|
|
evalkit_cambrian/lib/python3.10/site-packages/starlette/__pycache__/templating.cpython-310.pyc
ADDED
|
Binary file (6.58 kB). View file
|
|
|
infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_batch_norm_no_update.h
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from Function.h
|
| 4 |
+
|
| 5 |
+
#include <ATen/Context.h>
|
| 6 |
+
#include <ATen/DeviceGuard.h>
|
| 7 |
+
#include <ATen/TensorUtils.h>
|
| 8 |
+
#include <ATen/TracerMode.h>
|
| 9 |
+
#include <ATen/core/Generator.h>
|
| 10 |
+
#include <ATen/core/Reduction.h>
|
| 11 |
+
#include <ATen/core/Tensor.h>
|
| 12 |
+
#include <c10/core/Scalar.h>
|
| 13 |
+
#include <c10/core/Storage.h>
|
| 14 |
+
#include <c10/core/TensorOptions.h>
|
| 15 |
+
#include <c10/util/Deprecated.h>
|
| 16 |
+
#include <optional>
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
#include <ATen/ops/_batch_norm_no_update_ops.h>
|
| 21 |
+
|
| 22 |
+
namespace at {
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
// aten::_batch_norm_no_update(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, float momentum, float eps) -> (Tensor, Tensor, Tensor, Tensor)
|
| 26 |
+
inline ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> _batch_norm_no_update(const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, const ::std::optional<at::Tensor> & running_mean, const ::std::optional<at::Tensor> & running_var, double momentum, double eps) {
|
| 27 |
+
return at::_ops::_batch_norm_no_update::call(input, weight, bias, running_mean, running_var, momentum, eps);
|
| 28 |
+
}
|
| 29 |
+
|
| 30 |
+
// aten::_batch_norm_no_update.out(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, float momentum, float eps, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!))
|
| 31 |
+
inline ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> _batch_norm_no_update_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3, const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, const ::std::optional<at::Tensor> & running_mean, const ::std::optional<at::Tensor> & running_var, double momentum, double eps) {
|
| 32 |
+
return at::_ops::_batch_norm_no_update_out::call(input, weight, bias, running_mean, running_var, momentum, eps, out0, out1, out2, out3);
|
| 33 |
+
}
|
| 34 |
+
// aten::_batch_norm_no_update.out(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, float momentum, float eps, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!))
|
| 35 |
+
inline ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> _batch_norm_no_update_outf(const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, const ::std::optional<at::Tensor> & running_mean, const ::std::optional<at::Tensor> & running_var, double momentum, double eps, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3) {
|
| 36 |
+
return at::_ops::_batch_norm_no_update_out::call(input, weight, bias, running_mean, running_var, momentum, eps, out0, out1, out2, out3);
|
| 37 |
+
}
|
| 38 |
+
|
| 39 |
+
}
|
infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_cufft_get_plan_cache_size_ops.h
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from Operator.h
|
| 4 |
+
|
| 5 |
+
#include <tuple>
|
| 6 |
+
#include <vector>
|
| 7 |
+
|
| 8 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 9 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 10 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 11 |
+
#include <ATen/core/ATen_fwd.h>
|
| 12 |
+
|
| 13 |
+
namespace at {
|
| 14 |
+
namespace _ops {
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
struct TORCH_API _cufft_get_plan_cache_size {
|
| 18 |
+
using schema = int64_t (at::DeviceIndex);
|
| 19 |
+
using ptr_schema = schema*;
|
| 20 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 21 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_cufft_get_plan_cache_size")
|
| 22 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
|
| 23 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_cufft_get_plan_cache_size(DeviceIndex device_index) -> int")
|
| 24 |
+
static int64_t call(at::DeviceIndex device_index);
|
| 25 |
+
static int64_t redispatch(c10::DispatchKeySet dispatchKeySet, at::DeviceIndex device_index);
|
| 26 |
+
};
|
| 27 |
+
|
| 28 |
+
}} // namespace at::_ops
|
infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_sub_native.h
ADDED
|
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from NativeFunction.h
|
| 4 |
+
|
| 5 |
+
#include <c10/core/Scalar.h>
|
| 6 |
+
#include <c10/core/Storage.h>
|
| 7 |
+
#include <c10/core/TensorOptions.h>
|
| 8 |
+
#include <c10/util/Deprecated.h>
|
| 9 |
+
#include <optional>
|
| 10 |
+
#include <c10/core/QScheme.h>
|
| 11 |
+
#include <ATen/core/Reduction.h>
|
| 12 |
+
#include <ATen/core/Tensor.h>
|
| 13 |
+
#include <tuple>
|
| 14 |
+
#include <vector>
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
namespace at {
|
| 18 |
+
namespace native {
|
| 19 |
+
TORCH_API ::std::vector<at::Tensor> foreach_tensor_sub_scalar_kernel_slow(at::TensorList self, const at::Scalar & scalar);
|
| 20 |
+
TORCH_API void _foreach_sub_Scalar_out(at::TensorList self, const at::Scalar & scalar, at::TensorList out);
|
| 21 |
+
TORCH_API void foreach_tensor_sub_scalar_kernel_slow_(at::TensorList self, const at::Scalar & scalar);
|
| 22 |
+
TORCH_API ::std::vector<at::Tensor> foreach_tensor_sub_scalar_kernel_cuda(at::TensorList self, const at::Scalar & scalar);
|
| 23 |
+
TORCH_API void foreach_tensor_sub_scalar_kernel_cuda_(at::TensorList self, const at::Scalar & scalar);
|
| 24 |
+
TORCH_API ::std::vector<at::Tensor> foreach_tensor_sub_list_kernel_slow(at::TensorList self, at::TensorList other, const at::Scalar & alpha=1);
|
| 25 |
+
TORCH_API void _foreach_sub_List_out(at::TensorList self, at::TensorList other, const at::Scalar & alpha, at::TensorList out);
|
| 26 |
+
TORCH_API void foreach_tensor_sub_list_kernel_slow_(at::TensorList self, at::TensorList other, const at::Scalar & alpha=1);
|
| 27 |
+
TORCH_API ::std::vector<at::Tensor> foreach_tensor_sub_list_kernel_cuda(at::TensorList self, at::TensorList other, const at::Scalar & alpha=1);
|
| 28 |
+
TORCH_API void foreach_tensor_sub_list_kernel_cuda_(at::TensorList self, at::TensorList other, const at::Scalar & alpha=1);
|
| 29 |
+
TORCH_API ::std::vector<at::Tensor> foreach_tensor_sub_scalarlist_kernel_slow(at::TensorList self, at::ArrayRef<at::Scalar> scalars);
|
| 30 |
+
TORCH_API void _foreach_sub_ScalarList_out(at::TensorList self, at::ArrayRef<at::Scalar> scalars, at::TensorList out);
|
| 31 |
+
TORCH_API void foreach_tensor_sub_scalarlist_kernel_slow_(at::TensorList self, at::ArrayRef<at::Scalar> scalars);
|
| 32 |
+
TORCH_API ::std::vector<at::Tensor> foreach_tensor_sub_scalarlist_kernel_cuda(at::TensorList self, at::ArrayRef<at::Scalar> scalars);
|
| 33 |
+
TORCH_API void foreach_tensor_sub_scalarlist_kernel_cuda_(at::TensorList self, at::ArrayRef<at::Scalar> scalars);
|
| 34 |
+
} // namespace native
|
| 35 |
+
} // namespace at
|
infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_functional_sym_constrain_range_native.h
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from NativeFunction.h
|
| 4 |
+
|
| 5 |
+
#include <c10/core/Scalar.h>
|
| 6 |
+
#include <c10/core/Storage.h>
|
| 7 |
+
#include <c10/core/TensorOptions.h>
|
| 8 |
+
#include <c10/util/Deprecated.h>
|
| 9 |
+
#include <optional>
|
| 10 |
+
#include <c10/core/QScheme.h>
|
| 11 |
+
#include <ATen/core/Reduction.h>
|
| 12 |
+
#include <ATen/core/Tensor.h>
|
| 13 |
+
#include <tuple>
|
| 14 |
+
#include <vector>
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
namespace at {
|
| 18 |
+
namespace native {
|
| 19 |
+
TORCH_API at::Tensor _functional_sym_constrain_range(const at::Scalar & size, ::std::optional<int64_t> min, ::std::optional<int64_t> max, const at::Tensor & dep_token);
|
| 20 |
+
} // namespace native
|
| 21 |
+
} // namespace at
|
infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_fused_sgd_compositeexplicitautograd_dispatch.h
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace compositeexplicitautograd {
|
| 19 |
+
|
| 20 |
+
TORCH_API ::std::tuple<::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>> _fused_sgd(at::TensorList self, at::TensorList grads, at::TensorList momentum_buffer_list, double weight_decay, double momentum, double lr, double dampening, bool nesterov, bool maximize, bool is_first_step, const ::std::optional<at::Tensor> & grad_scale={}, const ::std::optional<at::Tensor> & found_inf={});
|
| 21 |
+
TORCH_API void _fused_sgd_out(at::TensorList out, at::TensorList self, at::TensorList grads, at::TensorList momentum_buffer_list, double weight_decay, double momentum, double lr, double dampening, bool nesterov, bool maximize, bool is_first_step, const ::std::optional<at::Tensor> & grad_scale={}, const ::std::optional<at::Tensor> & found_inf={});
|
| 22 |
+
TORCH_API void _fused_sgd_outf(at::TensorList self, at::TensorList grads, at::TensorList momentum_buffer_list, double weight_decay, double momentum, double lr, double dampening, bool nesterov, bool maximize, bool is_first_step, const ::std::optional<at::Tensor> & grad_scale, const ::std::optional<at::Tensor> & found_inf, at::TensorList out);
|
| 23 |
+
TORCH_API ::std::tuple<::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>> _fused_sgd(at::TensorList self, at::TensorList grads, at::TensorList momentum_buffer_list, double weight_decay, double momentum, const at::Tensor & lr, double dampening, bool nesterov, bool maximize, bool is_first_step, const ::std::optional<at::Tensor> & grad_scale={}, const ::std::optional<at::Tensor> & found_inf={});
|
| 24 |
+
TORCH_API void _fused_sgd_out(at::TensorList out, at::TensorList self, at::TensorList grads, at::TensorList momentum_buffer_list, double weight_decay, double momentum, const at::Tensor & lr, double dampening, bool nesterov, bool maximize, bool is_first_step, const ::std::optional<at::Tensor> & grad_scale={}, const ::std::optional<at::Tensor> & found_inf={});
|
| 25 |
+
TORCH_API void _fused_sgd_outf(at::TensorList self, at::TensorList grads, at::TensorList momentum_buffer_list, double weight_decay, double momentum, const at::Tensor & lr, double dampening, bool nesterov, bool maximize, bool is_first_step, const ::std::optional<at::Tensor> & grad_scale, const ::std::optional<at::Tensor> & found_inf, at::TensorList out);
|
| 26 |
+
|
| 27 |
+
} // namespace compositeexplicitautograd
|
| 28 |
+
} // namespace at
|
infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_reshape_alias_copy.h
ADDED
|
@@ -0,0 +1,91 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from Function.h
|
| 4 |
+
|
| 5 |
+
#include <ATen/Context.h>
|
| 6 |
+
#include <ATen/DeviceGuard.h>
|
| 7 |
+
#include <ATen/TensorUtils.h>
|
| 8 |
+
#include <ATen/TracerMode.h>
|
| 9 |
+
#include <ATen/core/Generator.h>
|
| 10 |
+
#include <ATen/core/Reduction.h>
|
| 11 |
+
#include <ATen/core/Tensor.h>
|
| 12 |
+
#include <c10/core/Scalar.h>
|
| 13 |
+
#include <c10/core/Storage.h>
|
| 14 |
+
#include <c10/core/TensorOptions.h>
|
| 15 |
+
#include <c10/util/Deprecated.h>
|
| 16 |
+
#include <optional>
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
#include <ATen/ops/_reshape_alias_copy_ops.h>
|
| 21 |
+
|
| 22 |
+
namespace at {
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
// aten::_reshape_alias_copy(Tensor self, SymInt[] size, SymInt[] stride) -> Tensor
|
| 26 |
+
inline at::Tensor _reshape_alias_copy(const at::Tensor & self, at::IntArrayRef size, at::IntArrayRef stride) {
|
| 27 |
+
return at::_ops::_reshape_alias_copy::call(self, c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride));
|
| 28 |
+
}
|
| 29 |
+
namespace symint {
|
| 30 |
+
template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
|
| 31 |
+
at::Tensor _reshape_alias_copy(const at::Tensor & self, at::IntArrayRef size, at::IntArrayRef stride) {
|
| 32 |
+
return at::_ops::_reshape_alias_copy::call(self, c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride));
|
| 33 |
+
}
|
| 34 |
+
}
|
| 35 |
+
|
| 36 |
+
// aten::_reshape_alias_copy(Tensor self, SymInt[] size, SymInt[] stride) -> Tensor
|
| 37 |
+
inline at::Tensor _reshape_alias_copy_symint(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride) {
|
| 38 |
+
return at::_ops::_reshape_alias_copy::call(self, size, stride);
|
| 39 |
+
}
|
| 40 |
+
namespace symint {
|
| 41 |
+
template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
|
| 42 |
+
at::Tensor _reshape_alias_copy(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride) {
|
| 43 |
+
return at::_ops::_reshape_alias_copy::call(self, size, stride);
|
| 44 |
+
}
|
| 45 |
+
}
|
| 46 |
+
|
| 47 |
+
// aten::_reshape_alias_copy.out(Tensor self, SymInt[] size, SymInt[] stride, *, Tensor(a!) out) -> Tensor(a!)
|
| 48 |
+
inline at::Tensor & _reshape_alias_copy_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef size, at::IntArrayRef stride) {
|
| 49 |
+
return at::_ops::_reshape_alias_copy_out::call(self, c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride), out);
|
| 50 |
+
}
|
| 51 |
+
namespace symint {
|
| 52 |
+
template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
|
| 53 |
+
at::Tensor & _reshape_alias_copy_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef size, at::IntArrayRef stride) {
|
| 54 |
+
return at::_ops::_reshape_alias_copy_out::call(self, c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride), out);
|
| 55 |
+
}
|
| 56 |
+
}
|
| 57 |
+
|
| 58 |
+
// aten::_reshape_alias_copy.out(Tensor self, SymInt[] size, SymInt[] stride, *, Tensor(a!) out) -> Tensor(a!)
|
| 59 |
+
inline at::Tensor & _reshape_alias_copy_outf(const at::Tensor & self, at::IntArrayRef size, at::IntArrayRef stride, at::Tensor & out) {
|
| 60 |
+
return at::_ops::_reshape_alias_copy_out::call(self, c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride), out);
|
| 61 |
+
}
|
| 62 |
+
namespace symint {
|
| 63 |
+
template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
|
| 64 |
+
at::Tensor & _reshape_alias_copy_outf(const at::Tensor & self, at::IntArrayRef size, at::IntArrayRef stride, at::Tensor & out) {
|
| 65 |
+
return at::_ops::_reshape_alias_copy_out::call(self, c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride), out);
|
| 66 |
+
}
|
| 67 |
+
}
|
| 68 |
+
|
| 69 |
+
// aten::_reshape_alias_copy.out(Tensor self, SymInt[] size, SymInt[] stride, *, Tensor(a!) out) -> Tensor(a!)
|
| 70 |
+
inline at::Tensor & _reshape_alias_copy_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride) {
|
| 71 |
+
return at::_ops::_reshape_alias_copy_out::call(self, size, stride, out);
|
| 72 |
+
}
|
| 73 |
+
namespace symint {
|
| 74 |
+
template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
|
| 75 |
+
at::Tensor & _reshape_alias_copy_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride) {
|
| 76 |
+
return at::_ops::_reshape_alias_copy_out::call(self, size, stride, out);
|
| 77 |
+
}
|
| 78 |
+
}
|
| 79 |
+
|
| 80 |
+
// aten::_reshape_alias_copy.out(Tensor self, SymInt[] size, SymInt[] stride, *, Tensor(a!) out) -> Tensor(a!)
|
| 81 |
+
inline at::Tensor & _reshape_alias_copy_symint_outf(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, at::Tensor & out) {
|
| 82 |
+
return at::_ops::_reshape_alias_copy_out::call(self, size, stride, out);
|
| 83 |
+
}
|
| 84 |
+
namespace symint {
|
| 85 |
+
template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
|
| 86 |
+
at::Tensor & _reshape_alias_copy_outf(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, at::Tensor & out) {
|
| 87 |
+
return at::_ops::_reshape_alias_copy_out::call(self, size, stride, out);
|
| 88 |
+
}
|
| 89 |
+
}
|
| 90 |
+
|
| 91 |
+
}
|
infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_softmax_ops.h
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from Operator.h
|
| 4 |
+
|
| 5 |
+
#include <tuple>
|
| 6 |
+
#include <vector>
|
| 7 |
+
|
| 8 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 9 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 10 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 11 |
+
#include <ATen/core/ATen_fwd.h>
|
| 12 |
+
|
| 13 |
+
namespace at {
|
| 14 |
+
namespace _ops {
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
struct TORCH_API _softmax {
|
| 18 |
+
using schema = at::Tensor (const at::Tensor &, int64_t, bool);
|
| 19 |
+
using ptr_schema = schema*;
|
| 20 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 21 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_softmax")
|
| 22 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
|
| 23 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_softmax(Tensor self, int dim, bool half_to_float) -> Tensor")
|
| 24 |
+
static at::Tensor call(const at::Tensor & self, int64_t dim, bool half_to_float);
|
| 25 |
+
static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool half_to_float);
|
| 26 |
+
};
|
| 27 |
+
|
| 28 |
+
struct TORCH_API _softmax_out {
|
| 29 |
+
using schema = at::Tensor & (const at::Tensor &, int64_t, bool, at::Tensor &);
|
| 30 |
+
using ptr_schema = schema*;
|
| 31 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 32 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_softmax")
|
| 33 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out")
|
| 34 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_softmax.out(Tensor self, int dim, bool half_to_float, *, Tensor(a!) out) -> Tensor(a!)")
|
| 35 |
+
static at::Tensor & call(const at::Tensor & self, int64_t dim, bool half_to_float, at::Tensor & out);
|
| 36 |
+
static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool half_to_float, at::Tensor & out);
|
| 37 |
+
};
|
| 38 |
+
|
| 39 |
+
}} // namespace at::_ops
|
infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_stack_cpu_dispatch.h
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace cpu {
|
| 19 |
+
|
| 20 |
+
TORCH_API at::Tensor _stack(at::TensorList tensors, int64_t dim=0);
|
| 21 |
+
TORCH_API at::Tensor & _stack_out(at::Tensor & out, at::TensorList tensors, int64_t dim=0);
|
| 22 |
+
TORCH_API at::Tensor & _stack_outf(at::TensorList tensors, int64_t dim, at::Tensor & out);
|
| 23 |
+
|
| 24 |
+
} // namespace cpu
|
| 25 |
+
} // namespace at
|
infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_nearest_exact2d_backward_cpu_dispatch.h
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace cpu {
|
| 19 |
+
|
| 20 |
+
TORCH_API at::Tensor _upsample_nearest_exact2d_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, ::std::optional<double> scales_h=::std::nullopt, ::std::optional<double> scales_w=::std::nullopt);
|
| 21 |
+
TORCH_API at::Tensor _upsample_nearest_exact2d_backward_symint(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, ::std::optional<double> scales_h=::std::nullopt, ::std::optional<double> scales_w=::std::nullopt);
|
| 22 |
+
TORCH_API at::Tensor & _upsample_nearest_exact2d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, ::std::optional<double> scales_h=::std::nullopt, ::std::optional<double> scales_w=::std::nullopt);
|
| 23 |
+
TORCH_API at::Tensor & _upsample_nearest_exact2d_backward_outf(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, ::std::optional<double> scales_h, ::std::optional<double> scales_w, at::Tensor & grad_input);
|
| 24 |
+
TORCH_API at::Tensor & _upsample_nearest_exact2d_backward_symint_out(at::Tensor & grad_input, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, ::std::optional<double> scales_h=::std::nullopt, ::std::optional<double> scales_w=::std::nullopt);
|
| 25 |
+
TORCH_API at::Tensor & _upsample_nearest_exact2d_backward_symint_outf(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, ::std::optional<double> scales_h, ::std::optional<double> scales_w, at::Tensor & grad_input);
|
| 26 |
+
|
| 27 |
+
} // namespace cpu
|
| 28 |
+
} // namespace at
|
infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_weight_norm_interface_ops.h
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from Operator.h
|
| 4 |
+
|
| 5 |
+
#include <tuple>
|
| 6 |
+
#include <vector>
|
| 7 |
+
|
| 8 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 9 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 10 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 11 |
+
#include <ATen/core/ATen_fwd.h>
|
| 12 |
+
|
| 13 |
+
namespace at {
|
| 14 |
+
namespace _ops {
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
struct TORCH_API _weight_norm_interface {
|
| 18 |
+
using schema = ::std::tuple<at::Tensor,at::Tensor> (const at::Tensor &, const at::Tensor &, int64_t);
|
| 19 |
+
using ptr_schema = schema*;
|
| 20 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 21 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_weight_norm_interface")
|
| 22 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
|
| 23 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_weight_norm_interface(Tensor v, Tensor g, int dim=0) -> (Tensor, Tensor)")
|
| 24 |
+
static ::std::tuple<at::Tensor,at::Tensor> call(const at::Tensor & v, const at::Tensor & g, int64_t dim);
|
| 25 |
+
static ::std::tuple<at::Tensor,at::Tensor> redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & v, const at::Tensor & g, int64_t dim);
|
| 26 |
+
};
|
| 27 |
+
|
| 28 |
+
struct TORCH_API _weight_norm_interface_out {
|
| 29 |
+
using schema = ::std::tuple<at::Tensor &,at::Tensor &> (const at::Tensor &, const at::Tensor &, int64_t, at::Tensor &, at::Tensor &);
|
| 30 |
+
using ptr_schema = schema*;
|
| 31 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 32 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_weight_norm_interface")
|
| 33 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out")
|
| 34 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_weight_norm_interface.out(Tensor v, Tensor g, int dim=0, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))")
|
| 35 |
+
static ::std::tuple<at::Tensor &,at::Tensor &> call(const at::Tensor & v, const at::Tensor & g, int64_t dim, at::Tensor & out0, at::Tensor & out1);
|
| 36 |
+
static ::std::tuple<at::Tensor &,at::Tensor &> redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & v, const at::Tensor & g, int64_t dim, at::Tensor & out0, at::Tensor & out1);
|
| 37 |
+
};
|
| 38 |
+
|
| 39 |
+
}} // namespace at::_ops
|
infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/adjoint_compositeimplicitautograd_dispatch.h
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace compositeimplicitautograd {
|
| 19 |
+
|
| 20 |
+
TORCH_API at::Tensor adjoint(const at::Tensor & self);
|
| 21 |
+
|
| 22 |
+
} // namespace compositeimplicitautograd
|
| 23 |
+
} // namespace at
|
infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/atleast_3d_ops.h
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from Operator.h
|
| 4 |
+
|
| 5 |
+
#include <tuple>
|
| 6 |
+
#include <vector>
|
| 7 |
+
|
| 8 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 9 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 10 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 11 |
+
#include <ATen/core/ATen_fwd.h>
|
| 12 |
+
|
| 13 |
+
namespace at {
|
| 14 |
+
namespace _ops {
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
struct TORCH_API atleast_3d {
|
| 18 |
+
using schema = at::Tensor (const at::Tensor &);
|
| 19 |
+
using ptr_schema = schema*;
|
| 20 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 21 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::atleast_3d")
|
| 22 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
|
| 23 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "atleast_3d(Tensor self) -> Tensor")
|
| 24 |
+
static at::Tensor call(const at::Tensor & self);
|
| 25 |
+
static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self);
|
| 26 |
+
};
|
| 27 |
+
|
| 28 |
+
struct TORCH_API atleast_3d_Sequence {
|
| 29 |
+
using schema = ::std::vector<at::Tensor> (at::TensorList);
|
| 30 |
+
using ptr_schema = schema*;
|
| 31 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 32 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::atleast_3d")
|
| 33 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Sequence")
|
| 34 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "atleast_3d.Sequence(Tensor[] tensors) -> Tensor[]")
|
| 35 |
+
static ::std::vector<at::Tensor> call(at::TensorList tensors);
|
| 36 |
+
static ::std::vector<at::Tensor> redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors);
|
| 37 |
+
};
|
| 38 |
+
|
| 39 |
+
}} // namespace at::_ops
|
infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/avg_pool2d_meta.h
ADDED
|
@@ -0,0 +1,114 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from NativeMetaFunction.h
|
| 4 |
+
|
| 5 |
+
#include <c10/core/Scalar.h>
|
| 6 |
+
#include <c10/core/Storage.h>
|
| 7 |
+
#include <c10/core/TensorOptions.h>
|
| 8 |
+
#include <c10/util/Deprecated.h>
|
| 9 |
+
#include <optional>
|
| 10 |
+
#include <c10/core/QScheme.h>
|
| 11 |
+
#include <ATen/core/Reduction.h>
|
| 12 |
+
#include <ATen/TensorIterator.h>
|
| 13 |
+
#include <ATen/TensorMeta.h>
|
| 14 |
+
#include <tuple>
|
| 15 |
+
#include <vector>
|
| 16 |
+
|
| 17 |
+
namespace at {
|
| 18 |
+
namespace meta {
|
| 19 |
+
|
| 20 |
+
struct TORCH_API structured_avg_pool2d : public at::impl::MetaBase {
|
| 21 |
+
|
| 22 |
+
template <bool KH = false, bool KW = false, bool DH = false, bool DW = false, bool PADH = false, bool PADW = false>
|
| 23 |
+
struct TORCH_API precompute_out {
|
| 24 |
+
|
| 25 |
+
precompute_out<true, KW, DH, DW, PADH, PADW> set_kH(int64_t value) {
|
| 26 |
+
static_assert(KH == false, "kH already set");
|
| 27 |
+
precompute_out<true, KW, DH, DW, PADH, PADW> ret;
|
| 28 |
+
ret.kH = value;
|
| 29 |
+
ret.kW = this->kW;
|
| 30 |
+
ret.dH = this->dH;
|
| 31 |
+
ret.dW = this->dW;
|
| 32 |
+
ret.padH = this->padH;
|
| 33 |
+
ret.padW = this->padW;
|
| 34 |
+
return ret;
|
| 35 |
+
}
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
precompute_out<KH, true, DH, DW, PADH, PADW> set_kW(int64_t value) {
|
| 39 |
+
static_assert(KW == false, "kW already set");
|
| 40 |
+
precompute_out<KH, true, DH, DW, PADH, PADW> ret;
|
| 41 |
+
ret.kH = this->kH;
|
| 42 |
+
ret.kW = value;
|
| 43 |
+
ret.dH = this->dH;
|
| 44 |
+
ret.dW = this->dW;
|
| 45 |
+
ret.padH = this->padH;
|
| 46 |
+
ret.padW = this->padW;
|
| 47 |
+
return ret;
|
| 48 |
+
}
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
precompute_out<KH, KW, true, DW, PADH, PADW> set_dH(int64_t value) {
|
| 52 |
+
static_assert(DH == false, "dH already set");
|
| 53 |
+
precompute_out<KH, KW, true, DW, PADH, PADW> ret;
|
| 54 |
+
ret.kH = this->kH;
|
| 55 |
+
ret.kW = this->kW;
|
| 56 |
+
ret.dH = value;
|
| 57 |
+
ret.dW = this->dW;
|
| 58 |
+
ret.padH = this->padH;
|
| 59 |
+
ret.padW = this->padW;
|
| 60 |
+
return ret;
|
| 61 |
+
}
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
precompute_out<KH, KW, DH, true, PADH, PADW> set_dW(int64_t value) {
|
| 65 |
+
static_assert(DW == false, "dW already set");
|
| 66 |
+
precompute_out<KH, KW, DH, true, PADH, PADW> ret;
|
| 67 |
+
ret.kH = this->kH;
|
| 68 |
+
ret.kW = this->kW;
|
| 69 |
+
ret.dH = this->dH;
|
| 70 |
+
ret.dW = value;
|
| 71 |
+
ret.padH = this->padH;
|
| 72 |
+
ret.padW = this->padW;
|
| 73 |
+
return ret;
|
| 74 |
+
}
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
precompute_out<KH, KW, DH, DW, true, PADW> set_padH(int64_t value) {
|
| 78 |
+
static_assert(PADH == false, "padH already set");
|
| 79 |
+
precompute_out<KH, KW, DH, DW, true, PADW> ret;
|
| 80 |
+
ret.kH = this->kH;
|
| 81 |
+
ret.kW = this->kW;
|
| 82 |
+
ret.dH = this->dH;
|
| 83 |
+
ret.dW = this->dW;
|
| 84 |
+
ret.padH = value;
|
| 85 |
+
ret.padW = this->padW;
|
| 86 |
+
return ret;
|
| 87 |
+
}
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
precompute_out<KH, KW, DH, DW, PADH, true> set_padW(int64_t value) {
|
| 91 |
+
static_assert(PADW == false, "padW already set");
|
| 92 |
+
precompute_out<KH, KW, DH, DW, PADH, true> ret;
|
| 93 |
+
ret.kH = this->kH;
|
| 94 |
+
ret.kW = this->kW;
|
| 95 |
+
ret.dH = this->dH;
|
| 96 |
+
ret.dW = this->dW;
|
| 97 |
+
ret.padH = this->padH;
|
| 98 |
+
ret.padW = value;
|
| 99 |
+
return ret;
|
| 100 |
+
}
|
| 101 |
+
|
| 102 |
+
int64_t kH;
|
| 103 |
+
int64_t kW;
|
| 104 |
+
int64_t dH;
|
| 105 |
+
int64_t dW;
|
| 106 |
+
int64_t padH;
|
| 107 |
+
int64_t padW;
|
| 108 |
+
};
|
| 109 |
+
using meta_return_ty = precompute_out <true, true, true, true, true, true>;
|
| 110 |
+
meta_return_ty meta(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, ::std::optional<int64_t> divisor_override);
|
| 111 |
+
};
|
| 112 |
+
|
| 113 |
+
} // namespace native
|
| 114 |
+
} // namespace at
|
infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/bitwise_and_cuda_dispatch.h
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace cuda {
|
| 19 |
+
|
| 20 |
+
TORCH_API at::Tensor bitwise_and(const at::Tensor & self, const at::Tensor & other);
|
| 21 |
+
TORCH_API at::Tensor & bitwise_and_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other);
|
| 22 |
+
TORCH_API at::Tensor & bitwise_and_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out);
|
| 23 |
+
TORCH_API at::Tensor & bitwise_and_(at::Tensor & self, const at::Tensor & other);
|
| 24 |
+
|
| 25 |
+
} // namespace cuda
|
| 26 |
+
} // namespace at
|
infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/channel_shuffle_cuda_dispatch.h
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace cuda {
|
| 19 |
+
|
| 20 |
+
TORCH_API at::Tensor channel_shuffle(const at::Tensor & self, int64_t groups);
|
| 21 |
+
TORCH_API at::Tensor channel_shuffle_symint(const at::Tensor & self, c10::SymInt groups);
|
| 22 |
+
|
| 23 |
+
} // namespace cuda
|
| 24 |
+
} // namespace at
|