Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- mplug_owl2/lib/python3.10/site-packages/torch/include/ATen/AccumulateType.h +173 -0
- mplug_owl2/lib/python3.10/site-packages/torch/include/ATen/Backend.h +2 -0
- mplug_owl2/lib/python3.10/site-packages/torch/include/ATen/BlasBackend.h +27 -0
- mplug_owl2/lib/python3.10/site-packages/torch/include/ATen/CPUFixedAllocator.h +33 -0
- mplug_owl2/lib/python3.10/site-packages/torch/include/ATen/CPUGeneratorImpl.h +49 -0
- mplug_owl2/lib/python3.10/site-packages/torch/include/ATen/CUDAFunctions_inl.h +623 -0
- mplug_owl2/lib/python3.10/site-packages/torch/include/ATen/CachedTensorUtils.h +24 -0
- mplug_owl2/lib/python3.10/site-packages/torch/include/ATen/CollapseDims.h +94 -0
- mplug_owl2/lib/python3.10/site-packages/torch/include/ATen/CompositeExplicitAutogradFunctions.h +29 -0
- mplug_owl2/lib/python3.10/site-packages/torch/include/ATen/CompositeExplicitAutogradFunctions_inl.h +553 -0
- mplug_owl2/lib/python3.10/site-packages/torch/include/ATen/CompositeExplicitAutogradNonFunctionalFunctions.h +29 -0
- mplug_owl2/lib/python3.10/site-packages/torch/include/ATen/CompositeImplicitAutogradFunctions_inl.h +502 -0
- mplug_owl2/lib/python3.10/site-packages/torch/include/ATen/CompositeImplicitAutogradNestedTensorFunctions_inl.h +25 -0
- mplug_owl2/lib/python3.10/site-packages/torch/include/ATen/Context.h +610 -0
- mplug_owl2/lib/python3.10/site-packages/torch/include/ATen/DLConvertor.h +25 -0
- mplug_owl2/lib/python3.10/site-packages/torch/include/ATen/DeviceGuard.h +41 -0
- mplug_owl2/lib/python3.10/site-packages/torch/include/ATen/Dispatch.h +808 -0
- mplug_owl2/lib/python3.10/site-packages/torch/include/ATen/Dispatch_v2.h +186 -0
- mplug_owl2/lib/python3.10/site-packages/torch/include/ATen/ExpandUtils.h +527 -0
- mplug_owl2/lib/python3.10/site-packages/torch/include/ATen/Formatting.h +1 -0
- mplug_owl2/lib/python3.10/site-packages/torch/include/ATen/InitialTensorOptions.h +15 -0
- mplug_owl2/lib/python3.10/site-packages/torch/include/ATen/LegacyBatchedFallback.h +25 -0
- mplug_owl2/lib/python3.10/site-packages/torch/include/ATen/LegacyVmapTransforms.h +183 -0
- mplug_owl2/lib/python3.10/site-packages/torch/include/ATen/MapAllocator.h +143 -0
- mplug_owl2/lib/python3.10/site-packages/torch/include/ATen/MetaFunctions.h +29 -0
- mplug_owl2/lib/python3.10/site-packages/torch/include/ATen/MethodOperators.h +443 -0
- mplug_owl2/lib/python3.10/site-packages/torch/include/ATen/NamedTensorUtils.h +214 -0
- mplug_owl2/lib/python3.10/site-packages/torch/include/ATen/NativeFunctions.h +1344 -0
- mplug_owl2/lib/python3.10/site-packages/torch/include/ATen/NestedTensorImpl.h +286 -0
- mplug_owl2/lib/python3.10/site-packages/torch/include/ATen/OpMathType.h +69 -0
- mplug_owl2/lib/python3.10/site-packages/torch/include/ATen/OpaqueTensorImpl.h +187 -0
- mplug_owl2/lib/python3.10/site-packages/torch/include/ATen/PTThreadPool.h +17 -0
- mplug_owl2/lib/python3.10/site-packages/torch/include/ATen/PadNd.h +28 -0
- mplug_owl2/lib/python3.10/site-packages/torch/include/ATen/Parallel-inl.h +93 -0
- mplug_owl2/lib/python3.10/site-packages/torch/include/ATen/Parallel.h +158 -0
- mplug_owl2/lib/python3.10/site-packages/torch/include/ATen/ParallelFuture.h +13 -0
- mplug_owl2/lib/python3.10/site-packages/torch/include/ATen/ParallelNative.h +15 -0
- mplug_owl2/lib/python3.10/site-packages/torch/include/ATen/RegistrationDeclarations.h +0 -0
- mplug_owl2/lib/python3.10/site-packages/torch/include/ATen/Scalar.h +3 -0
- mplug_owl2/lib/python3.10/site-packages/torch/include/ATen/ScalarOps.h +53 -0
- mplug_owl2/lib/python3.10/site-packages/torch/include/ATen/SequenceNumber.h +13 -0
- mplug_owl2/lib/python3.10/site-packages/torch/include/ATen/Storage.h +2 -0
- mplug_owl2/lib/python3.10/site-packages/torch/include/ATen/StorageUtils.h +49 -0
- mplug_owl2/lib/python3.10/site-packages/torch/include/ATen/TensorIndexing.h +737 -0
- mplug_owl2/lib/python3.10/site-packages/torch/include/ATen/TensorIterator.h +1028 -0
- mplug_owl2/lib/python3.10/site-packages/torch/include/ATen/TensorMeta.h +137 -0
- mplug_owl2/lib/python3.10/site-packages/torch/include/ATen/TensorNames.h +75 -0
- mplug_owl2/lib/python3.10/site-packages/torch/include/ATen/TensorOptions.h +2 -0
- mplug_owl2/lib/python3.10/site-packages/torch/include/ATen/ThreadLocalPythonObjects.h +21 -0
- mplug_owl2/lib/python3.10/site-packages/torch/include/ATen/TypeDefault.h +30 -0
mplug_owl2/lib/python3.10/site-packages/torch/include/ATen/AccumulateType.h
ADDED
|
@@ -0,0 +1,173 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
#include <ATen/Config.h>
|
| 3 |
+
#include <c10/core/DeviceType.h>
|
| 4 |
+
#include <c10/core/ScalarType.h>
|
| 5 |
+
#include <c10/util/BFloat16.h>
|
| 6 |
+
#include <c10/util/Float8_e4m3fn.h>
|
| 7 |
+
#include <c10/util/Float8_e4m3fnuz.h>
|
| 8 |
+
#include <c10/util/Float8_e5m2.h>
|
| 9 |
+
#include <c10/util/Float8_e5m2fnuz.h>
|
| 10 |
+
#include <c10/util/Half.h>
|
| 11 |
+
|
| 12 |
+
// Defines the accumulation type for a scalar type.
|
| 13 |
+
// Example:
|
| 14 |
+
// using accscalar_t = acc_type<scalar_t, /*is_cuda*/true>;
|
| 15 |
+
//
|
| 16 |
+
// Accumulation types are an important concept in numeric computing
|
| 17 |
+
// because you frequently want to perform intermediate computations
|
| 18 |
+
// at a higher precision than the input and output precision, to avoid
|
| 19 |
+
// compounding internal rounding errors. Accumulation is the most
|
| 20 |
+
// well-known intermediate computation (it is of great importance for
|
| 21 |
+
// sum reduction and matrix multiply, for example), but in PyTorch
|
| 22 |
+
// acc_type ends up getting used for all sorts of other intermediate
|
| 23 |
+
// computations, so it perhaps would be more accurately (ahem) called an
|
| 24 |
+
// "accurate" type. acc_type is especially important for reduced
|
| 25 |
+
// precision operations like float16 and bfloat16, where relatively
|
| 26 |
+
// benign looking inputs can easily end up overflowing/underflowing.
|
| 27 |
+
//
|
| 28 |
+
// acc_type is parametrized by whether or not you are running on CUDA
|
| 29 |
+
// or not, because on CUDA double precision operations are expensive
|
| 30 |
+
// and so by default, we don't actually want to use double as an
|
| 31 |
+
// acc_type on CUDA. A lot of things are typed out below, but
|
| 32 |
+
// basically, the table is generated by a few rules:
|
| 33 |
+
//
|
| 34 |
+
// If bool:
|
| 35 |
+
// Use 'bool' as acc_type.
|
| 36 |
+
// If floating point:
|
| 37 |
+
// If CUDA, use 'float' as acc_type (unless scalar_t is double),
|
| 38 |
+
// otherwise (CPU) use 'double'
|
| 39 |
+
// If integral:
|
| 40 |
+
// Use 'int64_t' as acc_type
|
| 41 |
+
//
|
| 42 |
+
// You're not forced to use this template; if you happen to know
|
| 43 |
+
// something specific about your use case, you can specify your own
|
| 44 |
+
// desired behavior. This template, however, will give you a reasonable
|
| 45 |
+
// default that will work for all dtypes supported in PyTorch.
|
| 46 |
+
|
| 47 |
+
#if defined(__CUDACC__)
|
| 48 |
+
#include <cuda.h>
|
| 49 |
+
#include <cuda_fp16.h>
|
| 50 |
+
#elif defined(__HIPCC__)
|
| 51 |
+
#include <hip/hip_fp16.h>
|
| 52 |
+
#include <hip/hip_runtime.h>
|
| 53 |
+
#endif
|
| 54 |
+
|
| 55 |
+
namespace at {
|
| 56 |
+
|
| 57 |
+
template <typename T, c10::DeviceType D>
|
| 58 |
+
struct AccumulateTypeDevice {};
|
| 59 |
+
|
| 60 |
+
template <typename T, bool>
|
| 61 |
+
struct AccumulateType {};
|
| 62 |
+
|
| 63 |
+
template <typename T>
|
| 64 |
+
struct AccumulateType<T, false> {
|
| 65 |
+
using type = typename AccumulateTypeDevice<T, c10::DeviceType::CPU>::type;
|
| 66 |
+
};
|
| 67 |
+
|
| 68 |
+
template <typename T>
|
| 69 |
+
struct AccumulateType<T, true> {
|
| 70 |
+
using type = typename AccumulateTypeDevice<T, c10::DeviceType::CUDA>::type;
|
| 71 |
+
};
|
| 72 |
+
|
| 73 |
+
template <typename T, c10::DeviceType device>
|
| 74 |
+
using acc_type_device = typename AccumulateTypeDevice<T, device>::type;
|
| 75 |
+
|
| 76 |
+
template <typename T, bool is_cuda>
|
| 77 |
+
using acc_type = typename AccumulateType<T, is_cuda>::type;
|
| 78 |
+
|
| 79 |
+
#define ACC_TYPE(t, acc_t, device_type) \
|
| 80 |
+
template <> \
|
| 81 |
+
struct AccumulateTypeDevice<t, device_type> { \
|
| 82 |
+
using type = acc_t; \
|
| 83 |
+
};
|
| 84 |
+
#define MPS_ACC_TYPE(t, acc_t) ACC_TYPE(t, acc_t, c10::DeviceType::MPS)
|
| 85 |
+
#define XPU_ACC_TYPE(t, acc_t) ACC_TYPE(t, acc_t, c10::DeviceType::XPU)
|
| 86 |
+
#define CUDA_ACC_TYPE(t, acc_t) ACC_TYPE(t, acc_t, c10::DeviceType::CUDA)
|
| 87 |
+
#define CPU_ACC_TYPE(t, acc_t) ACC_TYPE(t, acc_t, c10::DeviceType::CPU)
|
| 88 |
+
|
| 89 |
+
MPS_ACC_TYPE(BFloat16, float);
|
| 90 |
+
MPS_ACC_TYPE(Half, float);
|
| 91 |
+
MPS_ACC_TYPE(Float8_e5m2, float);
|
| 92 |
+
MPS_ACC_TYPE(Float8_e4m3fn, float);
|
| 93 |
+
MPS_ACC_TYPE(Float8_e5m2fnuz, float);
|
| 94 |
+
MPS_ACC_TYPE(Float8_e4m3fnuz, float);
|
| 95 |
+
MPS_ACC_TYPE(float, float);
|
| 96 |
+
MPS_ACC_TYPE(double, float);
|
| 97 |
+
MPS_ACC_TYPE(int8_t, int64_t);
|
| 98 |
+
MPS_ACC_TYPE(uint8_t, int64_t);
|
| 99 |
+
MPS_ACC_TYPE(char, int64_t);
|
| 100 |
+
MPS_ACC_TYPE(int16_t, int64_t);
|
| 101 |
+
MPS_ACC_TYPE(int32_t, int64_t);
|
| 102 |
+
MPS_ACC_TYPE(int64_t, int64_t);
|
| 103 |
+
MPS_ACC_TYPE(bool, bool);
|
| 104 |
+
MPS_ACC_TYPE(c10::complex<Half>, c10::complex<float>);
|
| 105 |
+
MPS_ACC_TYPE(c10::complex<float>, c10::complex<float>);
|
| 106 |
+
MPS_ACC_TYPE(c10::complex<double>, c10::complex<float>);
|
| 107 |
+
|
| 108 |
+
XPU_ACC_TYPE(BFloat16, float);
|
| 109 |
+
XPU_ACC_TYPE(Half, float);
|
| 110 |
+
XPU_ACC_TYPE(Float8_e5m2, float);
|
| 111 |
+
XPU_ACC_TYPE(Float8_e4m3fn, float);
|
| 112 |
+
XPU_ACC_TYPE(Float8_e5m2fnuz, float);
|
| 113 |
+
XPU_ACC_TYPE(Float8_e4m3fnuz, float);
|
| 114 |
+
XPU_ACC_TYPE(float, float);
|
| 115 |
+
XPU_ACC_TYPE(double, double);
|
| 116 |
+
XPU_ACC_TYPE(int8_t, int64_t);
|
| 117 |
+
XPU_ACC_TYPE(uint8_t, int64_t);
|
| 118 |
+
XPU_ACC_TYPE(char, int64_t);
|
| 119 |
+
XPU_ACC_TYPE(int16_t, int64_t);
|
| 120 |
+
XPU_ACC_TYPE(int32_t, int64_t);
|
| 121 |
+
XPU_ACC_TYPE(int64_t, int64_t);
|
| 122 |
+
XPU_ACC_TYPE(bool, bool);
|
| 123 |
+
XPU_ACC_TYPE(c10::complex<Half>, c10::complex<float>);
|
| 124 |
+
XPU_ACC_TYPE(c10::complex<float>, c10::complex<float>);
|
| 125 |
+
XPU_ACC_TYPE(c10::complex<double>, c10::complex<double>);
|
| 126 |
+
|
| 127 |
+
#if defined(__CUDACC__) || defined(__HIPCC__)
|
| 128 |
+
CUDA_ACC_TYPE(half, float);
|
| 129 |
+
#endif
|
| 130 |
+
CUDA_ACC_TYPE(BFloat16, float);
|
| 131 |
+
CUDA_ACC_TYPE(Half, float);
|
| 132 |
+
CUDA_ACC_TYPE(Float8_e5m2, float);
|
| 133 |
+
CUDA_ACC_TYPE(Float8_e4m3fn, float);
|
| 134 |
+
CUDA_ACC_TYPE(Float8_e5m2fnuz, float);
|
| 135 |
+
CUDA_ACC_TYPE(Float8_e4m3fnuz, float);
|
| 136 |
+
CUDA_ACC_TYPE(float, float);
|
| 137 |
+
CUDA_ACC_TYPE(double, double);
|
| 138 |
+
CUDA_ACC_TYPE(int8_t, int64_t);
|
| 139 |
+
CUDA_ACC_TYPE(uint8_t, int64_t);
|
| 140 |
+
CUDA_ACC_TYPE(char, int64_t);
|
| 141 |
+
CUDA_ACC_TYPE(int16_t, int64_t);
|
| 142 |
+
CUDA_ACC_TYPE(int32_t, int64_t);
|
| 143 |
+
CUDA_ACC_TYPE(int64_t, int64_t);
|
| 144 |
+
CUDA_ACC_TYPE(bool, bool);
|
| 145 |
+
CUDA_ACC_TYPE(c10::complex<Half>, c10::complex<float>);
|
| 146 |
+
CUDA_ACC_TYPE(c10::complex<float>, c10::complex<float>);
|
| 147 |
+
CUDA_ACC_TYPE(c10::complex<double>, c10::complex<double>);
|
| 148 |
+
|
| 149 |
+
CPU_ACC_TYPE(BFloat16, float);
|
| 150 |
+
CPU_ACC_TYPE(Half, float);
|
| 151 |
+
CPU_ACC_TYPE(Float8_e5m2, float);
|
| 152 |
+
CPU_ACC_TYPE(Float8_e4m3fn, float);
|
| 153 |
+
CPU_ACC_TYPE(Float8_e5m2fnuz, float);
|
| 154 |
+
CPU_ACC_TYPE(Float8_e4m3fnuz, float);
|
| 155 |
+
CPU_ACC_TYPE(float, double);
|
| 156 |
+
CPU_ACC_TYPE(double, double);
|
| 157 |
+
CPU_ACC_TYPE(int8_t, int64_t);
|
| 158 |
+
CPU_ACC_TYPE(uint8_t, int64_t);
|
| 159 |
+
CPU_ACC_TYPE(char, int64_t);
|
| 160 |
+
CPU_ACC_TYPE(int16_t, int64_t);
|
| 161 |
+
CPU_ACC_TYPE(int32_t, int64_t);
|
| 162 |
+
CPU_ACC_TYPE(int64_t, int64_t);
|
| 163 |
+
CPU_ACC_TYPE(bool, bool);
|
| 164 |
+
CPU_ACC_TYPE(c10::complex<Half>, c10::complex<float>);
|
| 165 |
+
CPU_ACC_TYPE(c10::complex<float>, c10::complex<double>);
|
| 166 |
+
CPU_ACC_TYPE(c10::complex<double>, c10::complex<double>);
|
| 167 |
+
|
| 168 |
+
TORCH_API c10::ScalarType toAccumulateType(
|
| 169 |
+
c10::ScalarType type,
|
| 170 |
+
c10::DeviceType device);
|
| 171 |
+
TORCH_API c10::ScalarType toAccumulateType(c10::ScalarType type, bool is_cuda);
|
| 172 |
+
|
| 173 |
+
} // namespace at
|
mplug_owl2/lib/python3.10/site-packages/torch/include/ATen/Backend.h
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
#include <c10/core/Backend.h>
|
mplug_owl2/lib/python3.10/site-packages/torch/include/ATen/BlasBackend.h
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <c10/util/Exception.h>
|
| 4 |
+
|
| 5 |
+
#include <ostream>
|
| 6 |
+
#include <string>
|
| 7 |
+
|
| 8 |
+
namespace at {
|
| 9 |
+
|
| 10 |
+
enum class BlasBackend : int8_t { Cublas, Cublaslt };
|
| 11 |
+
|
| 12 |
+
inline std::string BlasBackendToString(at::BlasBackend backend) {
|
| 13 |
+
switch (backend) {
|
| 14 |
+
case BlasBackend::Cublas:
|
| 15 |
+
return "at::BlasBackend::Cublas";
|
| 16 |
+
case BlasBackend::Cublaslt:
|
| 17 |
+
return "at::BlasBackend::Cublaslt";
|
| 18 |
+
default:
|
| 19 |
+
TORCH_CHECK(false, "Unknown blas backend");
|
| 20 |
+
}
|
| 21 |
+
}
|
| 22 |
+
|
| 23 |
+
inline std::ostream& operator<<(std::ostream& stream, at::BlasBackend backend) {
|
| 24 |
+
return stream << BlasBackendToString(backend);
|
| 25 |
+
}
|
| 26 |
+
|
| 27 |
+
} // namespace at
|
mplug_owl2/lib/python3.10/site-packages/torch/include/ATen/CPUFixedAllocator.h
ADDED
|
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <c10/core/Allocator.h>
|
| 4 |
+
#include <c10/util/Exception.h>
|
| 5 |
+
|
| 6 |
+
// This file creates a fake allocator that just throws exceptions if
|
| 7 |
+
// it is actually used.
|
| 8 |
+
|
| 9 |
+
// state passed to the allocator is the std::function<void(void*)> called
|
| 10 |
+
// when the blob is release by ATen
|
| 11 |
+
|
| 12 |
+
namespace at {
|
| 13 |
+
|
| 14 |
+
static cpu_fixed_malloc(void*, ptrdiff_t) {
|
| 15 |
+
AT_ERROR("attempting to resize a tensor view of an external blob");
|
| 16 |
+
}
|
| 17 |
+
|
| 18 |
+
static cpu_fixed_realloc(void*, void*, ptrdiff_t) {
|
| 19 |
+
AT_ERROR("attempting to resize a tensor view of an external blob");
|
| 20 |
+
}
|
| 21 |
+
|
| 22 |
+
static cpu_fixed_free(void* state, void* allocation) {
|
| 23 |
+
auto on_release = static_cast<std::function<void(void*)>*>(state);
|
| 24 |
+
(*on_release)(allocation);
|
| 25 |
+
delete on_release;
|
| 26 |
+
}
|
| 27 |
+
|
| 28 |
+
static Allocator CPU_fixed_allocator = {
|
| 29 |
+
cpu_fixed_malloc,
|
| 30 |
+
cpu_fixed_realloc,
|
| 31 |
+
cpu_fixed_free};
|
| 32 |
+
|
| 33 |
+
} // namespace at
|
mplug_owl2/lib/python3.10/site-packages/torch/include/ATen/CPUGeneratorImpl.h
ADDED
|
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <ATen/core/Generator.h>
|
| 4 |
+
#include <ATen/core/MT19937RNGEngine.h>
|
| 5 |
+
#include <c10/core/GeneratorImpl.h>
|
| 6 |
+
#include <optional>
|
| 7 |
+
|
| 8 |
+
namespace at {
|
| 9 |
+
|
| 10 |
+
struct TORCH_API CPUGeneratorImpl : public c10::GeneratorImpl {
|
| 11 |
+
// Constructors
|
| 12 |
+
CPUGeneratorImpl(uint64_t seed_in = default_rng_seed_val);
|
| 13 |
+
~CPUGeneratorImpl() override = default;
|
| 14 |
+
|
| 15 |
+
// CPUGeneratorImpl methods
|
| 16 |
+
std::shared_ptr<CPUGeneratorImpl> clone() const;
|
| 17 |
+
void set_current_seed(uint64_t seed) override;
|
| 18 |
+
void set_offset(uint64_t offset) override;
|
| 19 |
+
uint64_t get_offset() const override;
|
| 20 |
+
uint64_t current_seed() const override;
|
| 21 |
+
uint64_t seed() override;
|
| 22 |
+
void set_state(const c10::TensorImpl& new_state) override;
|
| 23 |
+
c10::intrusive_ptr<c10::TensorImpl> get_state() const override;
|
| 24 |
+
static c10::DeviceType device_type();
|
| 25 |
+
uint32_t random();
|
| 26 |
+
uint64_t random64();
|
| 27 |
+
std::optional<float> next_float_normal_sample();
|
| 28 |
+
std::optional<double> next_double_normal_sample();
|
| 29 |
+
void set_next_float_normal_sample(std::optional<float> randn);
|
| 30 |
+
void set_next_double_normal_sample(std::optional<double> randn);
|
| 31 |
+
at::mt19937 engine();
|
| 32 |
+
void set_engine(at::mt19937 engine);
|
| 33 |
+
|
| 34 |
+
private:
|
| 35 |
+
CPUGeneratorImpl* clone_impl() const override;
|
| 36 |
+
at::mt19937 engine_;
|
| 37 |
+
std::optional<float> next_float_normal_sample_;
|
| 38 |
+
std::optional<double> next_double_normal_sample_;
|
| 39 |
+
};
|
| 40 |
+
|
| 41 |
+
namespace detail {
|
| 42 |
+
|
| 43 |
+
TORCH_API const Generator& getDefaultCPUGenerator();
|
| 44 |
+
TORCH_API Generator
|
| 45 |
+
createCPUGenerator(uint64_t seed_val = default_rng_seed_val);
|
| 46 |
+
|
| 47 |
+
} // namespace detail
|
| 48 |
+
|
| 49 |
+
} // namespace at
|
mplug_owl2/lib/python3.10/site-packages/torch/include/ATen/CUDAFunctions_inl.h
ADDED
|
@@ -0,0 +1,623 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunctions_inl.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
#if defined(AT_PER_OPERATOR_HEADERS) && defined(TORCH_ASSERT_ONLY_METHOD_OPERATORS)
|
| 12 |
+
#error This change adds a dependency on all pytorch operators, meaning the \
|
| 13 |
+
file will need to be re-compiled every time an operator is changed or added. \
|
| 14 |
+
Consider including a specific operator from \
|
| 15 |
+
<ATen/ops/{my_operator}_cuda_dispatch.h>. \
|
| 16 |
+
See NOTE [TORCH_ASSERT_ONLY_METHOD_OPERATORS].
|
| 17 |
+
#endif
|
| 18 |
+
|
| 19 |
+
#include <ATen/ops/_adaptive_avg_pool2d_cuda_dispatch.h>
|
| 20 |
+
#include <ATen/ops/_adaptive_avg_pool2d_backward_cuda_dispatch.h>
|
| 21 |
+
#include <ATen/ops/_adaptive_avg_pool3d_cuda_dispatch.h>
|
| 22 |
+
#include <ATen/ops/_adaptive_avg_pool3d_backward_cuda_dispatch.h>
|
| 23 |
+
#include <ATen/ops/_addmm_activation_cuda_dispatch.h>
|
| 24 |
+
#include <ATen/ops/_aminmax_cuda_dispatch.h>
|
| 25 |
+
#include <ATen/ops/_amp_foreach_non_finite_check_and_unscale_cuda_dispatch.h>
|
| 26 |
+
#include <ATen/ops/_amp_update_scale_cuda_dispatch.h>
|
| 27 |
+
#include <ATen/ops/_assert_async_cuda_dispatch.h>
|
| 28 |
+
#include <ATen/ops/_batch_norm_with_update_cuda_dispatch.h>
|
| 29 |
+
#include <ATen/ops/_cdist_backward_cuda_dispatch.h>
|
| 30 |
+
#include <ATen/ops/_cdist_forward_cuda_dispatch.h>
|
| 31 |
+
#include <ATen/ops/_cholesky_solve_helper_cuda_dispatch.h>
|
| 32 |
+
#include <ATen/ops/_chunk_cat_cuda_dispatch.h>
|
| 33 |
+
#include <ATen/ops/_compute_linear_combination_cuda_dispatch.h>
|
| 34 |
+
#include <ATen/ops/_conv_depthwise2d_cuda_dispatch.h>
|
| 35 |
+
#include <ATen/ops/_convert_indices_from_coo_to_csr_cuda_dispatch.h>
|
| 36 |
+
#include <ATen/ops/_convert_indices_from_csr_to_coo_cuda_dispatch.h>
|
| 37 |
+
#include <ATen/ops/_convert_weight_to_int4pack_cuda_dispatch.h>
|
| 38 |
+
#include <ATen/ops/_cslt_compress_cuda_dispatch.h>
|
| 39 |
+
#include <ATen/ops/_cslt_sparse_mm_cuda_dispatch.h>
|
| 40 |
+
#include <ATen/ops/_cslt_sparse_mm_search_cuda_dispatch.h>
|
| 41 |
+
#include <ATen/ops/_ctc_loss_cuda_dispatch.h>
|
| 42 |
+
#include <ATen/ops/_ctc_loss_backward_cuda_dispatch.h>
|
| 43 |
+
#include <ATen/ops/_cudnn_ctc_loss_cuda_dispatch.h>
|
| 44 |
+
#include <ATen/ops/_cudnn_init_dropout_state_cuda_dispatch.h>
|
| 45 |
+
#include <ATen/ops/_cudnn_rnn_cuda_dispatch.h>
|
| 46 |
+
#include <ATen/ops/_cudnn_rnn_backward_cuda_dispatch.h>
|
| 47 |
+
#include <ATen/ops/_cudnn_rnn_flatten_weight_cuda_dispatch.h>
|
| 48 |
+
#include <ATen/ops/_cummax_helper_cuda_dispatch.h>
|
| 49 |
+
#include <ATen/ops/_cummin_helper_cuda_dispatch.h>
|
| 50 |
+
#include <ATen/ops/_dirichlet_grad_cuda_dispatch.h>
|
| 51 |
+
#include <ATen/ops/_efficient_attention_backward_cuda_dispatch.h>
|
| 52 |
+
#include <ATen/ops/_efficient_attention_forward_cuda_dispatch.h>
|
| 53 |
+
#include <ATen/ops/_efficientzerotensor_cuda_dispatch.h>
|
| 54 |
+
#include <ATen/ops/_embedding_bag_cuda_dispatch.h>
|
| 55 |
+
#include <ATen/ops/_embedding_bag_backward_cuda_dispatch.h>
|
| 56 |
+
#include <ATen/ops/_embedding_bag_dense_backward_cuda_dispatch.h>
|
| 57 |
+
#include <ATen/ops/_embedding_bag_forward_only_cuda_dispatch.h>
|
| 58 |
+
#include <ATen/ops/_embedding_bag_per_sample_weights_backward_cuda_dispatch.h>
|
| 59 |
+
#include <ATen/ops/_fake_quantize_learnable_per_channel_affine_cuda_dispatch.h>
|
| 60 |
+
#include <ATen/ops/_fake_quantize_learnable_per_channel_affine_backward_cuda_dispatch.h>
|
| 61 |
+
#include <ATen/ops/_fake_quantize_learnable_per_tensor_affine_cuda_dispatch.h>
|
| 62 |
+
#include <ATen/ops/_fake_quantize_learnable_per_tensor_affine_backward_cuda_dispatch.h>
|
| 63 |
+
#include <ATen/ops/_fake_quantize_per_tensor_affine_cachemask_tensor_qparams_cuda_dispatch.h>
|
| 64 |
+
#include <ATen/ops/_fft_c2c_cuda_dispatch.h>
|
| 65 |
+
#include <ATen/ops/_fft_c2r_cuda_dispatch.h>
|
| 66 |
+
#include <ATen/ops/_fft_r2c_cuda_dispatch.h>
|
| 67 |
+
#include <ATen/ops/_fill_mem_eff_dropout_mask_cuda_dispatch.h>
|
| 68 |
+
#include <ATen/ops/_flash_attention_backward_cuda_dispatch.h>
|
| 69 |
+
#include <ATen/ops/_flash_attention_forward_cuda_dispatch.h>
|
| 70 |
+
#include <ATen/ops/_foreach_abs_cuda_dispatch.h>
|
| 71 |
+
#include <ATen/ops/_foreach_acos_cuda_dispatch.h>
|
| 72 |
+
#include <ATen/ops/_foreach_add_cuda_dispatch.h>
|
| 73 |
+
#include <ATen/ops/_foreach_addcdiv_cuda_dispatch.h>
|
| 74 |
+
#include <ATen/ops/_foreach_addcmul_cuda_dispatch.h>
|
| 75 |
+
#include <ATen/ops/_foreach_asin_cuda_dispatch.h>
|
| 76 |
+
#include <ATen/ops/_foreach_atan_cuda_dispatch.h>
|
| 77 |
+
#include <ATen/ops/_foreach_ceil_cuda_dispatch.h>
|
| 78 |
+
#include <ATen/ops/_foreach_clamp_max_cuda_dispatch.h>
|
| 79 |
+
#include <ATen/ops/_foreach_clamp_min_cuda_dispatch.h>
|
| 80 |
+
#include <ATen/ops/_foreach_copy_cuda_dispatch.h>
|
| 81 |
+
#include <ATen/ops/_foreach_cos_cuda_dispatch.h>
|
| 82 |
+
#include <ATen/ops/_foreach_cosh_cuda_dispatch.h>
|
| 83 |
+
#include <ATen/ops/_foreach_div_cuda_dispatch.h>
|
| 84 |
+
#include <ATen/ops/_foreach_erf_cuda_dispatch.h>
|
| 85 |
+
#include <ATen/ops/_foreach_erfc_cuda_dispatch.h>
|
| 86 |
+
#include <ATen/ops/_foreach_exp_cuda_dispatch.h>
|
| 87 |
+
#include <ATen/ops/_foreach_expm1_cuda_dispatch.h>
|
| 88 |
+
#include <ATen/ops/_foreach_floor_cuda_dispatch.h>
|
| 89 |
+
#include <ATen/ops/_foreach_frac_cuda_dispatch.h>
|
| 90 |
+
#include <ATen/ops/_foreach_lerp_cuda_dispatch.h>
|
| 91 |
+
#include <ATen/ops/_foreach_lgamma_cuda_dispatch.h>
|
| 92 |
+
#include <ATen/ops/_foreach_log_cuda_dispatch.h>
|
| 93 |
+
#include <ATen/ops/_foreach_log10_cuda_dispatch.h>
|
| 94 |
+
#include <ATen/ops/_foreach_log1p_cuda_dispatch.h>
|
| 95 |
+
#include <ATen/ops/_foreach_log2_cuda_dispatch.h>
|
| 96 |
+
#include <ATen/ops/_foreach_max_cuda_dispatch.h>
|
| 97 |
+
#include <ATen/ops/_foreach_maximum_cuda_dispatch.h>
|
| 98 |
+
#include <ATen/ops/_foreach_minimum_cuda_dispatch.h>
|
| 99 |
+
#include <ATen/ops/_foreach_mul_cuda_dispatch.h>
|
| 100 |
+
#include <ATen/ops/_foreach_neg_cuda_dispatch.h>
|
| 101 |
+
#include <ATen/ops/_foreach_norm_cuda_dispatch.h>
|
| 102 |
+
#include <ATen/ops/_foreach_pow_cuda_dispatch.h>
|
| 103 |
+
#include <ATen/ops/_foreach_reciprocal_cuda_dispatch.h>
|
| 104 |
+
#include <ATen/ops/_foreach_round_cuda_dispatch.h>
|
| 105 |
+
#include <ATen/ops/_foreach_sigmoid_cuda_dispatch.h>
|
| 106 |
+
#include <ATen/ops/_foreach_sign_cuda_dispatch.h>
|
| 107 |
+
#include <ATen/ops/_foreach_sin_cuda_dispatch.h>
|
| 108 |
+
#include <ATen/ops/_foreach_sinh_cuda_dispatch.h>
|
| 109 |
+
#include <ATen/ops/_foreach_sqrt_cuda_dispatch.h>
|
| 110 |
+
#include <ATen/ops/_foreach_sub_cuda_dispatch.h>
|
| 111 |
+
#include <ATen/ops/_foreach_tan_cuda_dispatch.h>
|
| 112 |
+
#include <ATen/ops/_foreach_tanh_cuda_dispatch.h>
|
| 113 |
+
#include <ATen/ops/_foreach_trunc_cuda_dispatch.h>
|
| 114 |
+
#include <ATen/ops/_foreach_zero_cuda_dispatch.h>
|
| 115 |
+
#include <ATen/ops/_fused_adam_cuda_dispatch.h>
|
| 116 |
+
#include <ATen/ops/_fused_adamw_cuda_dispatch.h>
|
| 117 |
+
#include <ATen/ops/_fused_dropout_cuda_dispatch.h>
|
| 118 |
+
#include <ATen/ops/_fused_moving_avg_obs_fq_helper_cuda_dispatch.h>
|
| 119 |
+
#include <ATen/ops/_fused_sdp_choice_cuda_dispatch.h>
|
| 120 |
+
#include <ATen/ops/_fused_sgd_cuda_dispatch.h>
|
| 121 |
+
#include <ATen/ops/_index_put_impl_cuda_dispatch.h>
|
| 122 |
+
#include <ATen/ops/_int_mm_cuda_dispatch.h>
|
| 123 |
+
#include <ATen/ops/_jagged_to_padded_dense_forward_cuda_dispatch.h>
|
| 124 |
+
#include <ATen/ops/_linalg_det_cuda_dispatch.h>
|
| 125 |
+
#include <ATen/ops/_linalg_eigh_cuda_dispatch.h>
|
| 126 |
+
#include <ATen/ops/_linalg_eigvals_cuda_dispatch.h>
|
| 127 |
+
#include <ATen/ops/_linalg_slogdet_cuda_dispatch.h>
|
| 128 |
+
#include <ATen/ops/_linalg_solve_ex_cuda_dispatch.h>
|
| 129 |
+
#include <ATen/ops/_linalg_svd_cuda_dispatch.h>
|
| 130 |
+
#include <ATen/ops/_local_scalar_dense_cuda_dispatch.h>
|
| 131 |
+
#include <ATen/ops/_log_softmax_cuda_dispatch.h>
|
| 132 |
+
#include <ATen/ops/_log_softmax_backward_data_cuda_dispatch.h>
|
| 133 |
+
#include <ATen/ops/_logcumsumexp_cuda_dispatch.h>
|
| 134 |
+
#include <ATen/ops/_make_per_channel_quantized_tensor_cuda_dispatch.h>
|
| 135 |
+
#include <ATen/ops/_make_per_tensor_quantized_tensor_cuda_dispatch.h>
|
| 136 |
+
#include <ATen/ops/_masked_scale_cuda_dispatch.h>
|
| 137 |
+
#include <ATen/ops/_masked_softmax_cuda_dispatch.h>
|
| 138 |
+
#include <ATen/ops/_masked_softmax_backward_cuda_dispatch.h>
|
| 139 |
+
#include <ATen/ops/_mixed_dtypes_linear_cuda_dispatch.h>
|
| 140 |
+
#include <ATen/ops/_native_batch_norm_legit_cuda_dispatch.h>
|
| 141 |
+
#include <ATen/ops/_native_multi_head_attention_cuda_dispatch.h>
|
| 142 |
+
#include <ATen/ops/_nested_compute_contiguous_strides_offsets_cuda_dispatch.h>
|
| 143 |
+
#include <ATen/ops/_nested_from_padded_cuda_dispatch.h>
|
| 144 |
+
#include <ATen/ops/_nested_tensor_from_mask_cuda_dispatch.h>
|
| 145 |
+
#include <ATen/ops/_nested_tensor_from_mask_left_aligned_cuda_dispatch.h>
|
| 146 |
+
#include <ATen/ops/_nested_view_from_buffer_cuda_dispatch.h>
|
| 147 |
+
#include <ATen/ops/_padded_dense_to_jagged_forward_cuda_dispatch.h>
|
| 148 |
+
#include <ATen/ops/_pdist_backward_cuda_dispatch.h>
|
| 149 |
+
#include <ATen/ops/_pdist_forward_cuda_dispatch.h>
|
| 150 |
+
#include <ATen/ops/_prelu_kernel_cuda_dispatch.h>
|
| 151 |
+
#include <ATen/ops/_prelu_kernel_backward_cuda_dispatch.h>
|
| 152 |
+
#include <ATen/ops/_reshape_alias_cuda_dispatch.h>
|
| 153 |
+
#include <ATen/ops/_sample_dirichlet_cuda_dispatch.h>
|
| 154 |
+
#include <ATen/ops/_scaled_dot_product_cudnn_attention_cuda_dispatch.h>
|
| 155 |
+
#include <ATen/ops/_scaled_dot_product_cudnn_attention_backward_cuda_dispatch.h>
|
| 156 |
+
#include <ATen/ops/_scaled_dot_product_efficient_attention_cuda_dispatch.h>
|
| 157 |
+
#include <ATen/ops/_scaled_dot_product_efficient_attention_backward_cuda_dispatch.h>
|
| 158 |
+
#include <ATen/ops/_scaled_dot_product_flash_attention_cuda_dispatch.h>
|
| 159 |
+
#include <ATen/ops/_scaled_dot_product_flash_attention_backward_cuda_dispatch.h>
|
| 160 |
+
#include <ATen/ops/_scaled_mm_cuda_dispatch.h>
|
| 161 |
+
#include <ATen/ops/_segment_reduce_backward_cuda_dispatch.h>
|
| 162 |
+
#include <ATen/ops/_slow_conv2d_backward_cuda_dispatch.h>
|
| 163 |
+
#include <ATen/ops/_slow_conv2d_forward_cuda_dispatch.h>
|
| 164 |
+
#include <ATen/ops/_softmax_cuda_dispatch.h>
|
| 165 |
+
#include <ATen/ops/_softmax_backward_data_cuda_dispatch.h>
|
| 166 |
+
#include <ATen/ops/_sparse_semi_structured_addmm_cuda_dispatch.h>
|
| 167 |
+
#include <ATen/ops/_sparse_semi_structured_apply_cuda_dispatch.h>
|
| 168 |
+
#include <ATen/ops/_sparse_semi_structured_apply_dense_cuda_dispatch.h>
|
| 169 |
+
#include <ATen/ops/_sparse_semi_structured_linear_cuda_dispatch.h>
|
| 170 |
+
#include <ATen/ops/_sparse_semi_structured_mm_cuda_dispatch.h>
|
| 171 |
+
#include <ATen/ops/_sparse_semi_structured_tile_cuda_dispatch.h>
|
| 172 |
+
#include <ATen/ops/_standard_gamma_cuda_dispatch.h>
|
| 173 |
+
#include <ATen/ops/_standard_gamma_grad_cuda_dispatch.h>
|
| 174 |
+
#include <ATen/ops/_thnn_fused_gru_cell_cuda_dispatch.h>
|
| 175 |
+
#include <ATen/ops/_thnn_fused_gru_cell_backward_cuda_dispatch.h>
|
| 176 |
+
#include <ATen/ops/_thnn_fused_lstm_cell_cuda_dispatch.h>
|
| 177 |
+
#include <ATen/ops/_thnn_fused_lstm_cell_backward_impl_cuda_dispatch.h>
|
| 178 |
+
#include <ATen/ops/_to_sparse_cuda_dispatch.h>
|
| 179 |
+
#include <ATen/ops/_to_sparse_bsc_cuda_dispatch.h>
|
| 180 |
+
#include <ATen/ops/_to_sparse_bsr_cuda_dispatch.h>
|
| 181 |
+
#include <ATen/ops/_to_sparse_csc_cuda_dispatch.h>
|
| 182 |
+
#include <ATen/ops/_to_sparse_csr_cuda_dispatch.h>
|
| 183 |
+
#include <ATen/ops/_to_sparse_semi_structured_cuda_dispatch.h>
|
| 184 |
+
#include <ATen/ops/_transform_bias_rescale_qkv_cuda_dispatch.h>
|
| 185 |
+
#include <ATen/ops/_transformer_encoder_layer_fwd_cuda_dispatch.h>
|
| 186 |
+
#include <ATen/ops/_triton_multi_head_attention_cuda_dispatch.h>
|
| 187 |
+
#include <ATen/ops/_triton_scaled_dot_attention_cuda_dispatch.h>
|
| 188 |
+
#include <ATen/ops/_unique_cuda_dispatch.h>
|
| 189 |
+
#include <ATen/ops/_unique2_cuda_dispatch.h>
|
| 190 |
+
#include <ATen/ops/_upsample_bicubic2d_aa_cuda_dispatch.h>
|
| 191 |
+
#include <ATen/ops/_upsample_bicubic2d_aa_backward_cuda_dispatch.h>
|
| 192 |
+
#include <ATen/ops/_upsample_bilinear2d_aa_cuda_dispatch.h>
|
| 193 |
+
#include <ATen/ops/_upsample_bilinear2d_aa_backward_cuda_dispatch.h>
|
| 194 |
+
#include <ATen/ops/_upsample_nearest_exact1d_cuda_dispatch.h>
|
| 195 |
+
#include <ATen/ops/_upsample_nearest_exact1d_backward_cuda_dispatch.h>
|
| 196 |
+
#include <ATen/ops/_upsample_nearest_exact2d_cuda_dispatch.h>
|
| 197 |
+
#include <ATen/ops/_upsample_nearest_exact2d_backward_cuda_dispatch.h>
|
| 198 |
+
#include <ATen/ops/_upsample_nearest_exact3d_cuda_dispatch.h>
|
| 199 |
+
#include <ATen/ops/_upsample_nearest_exact3d_backward_cuda_dispatch.h>
|
| 200 |
+
#include <ATen/ops/_use_cudnn_ctc_loss_cuda_dispatch.h>
|
| 201 |
+
#include <ATen/ops/_validate_compressed_sparse_indices_cuda_dispatch.h>
|
| 202 |
+
#include <ATen/ops/_weight_int4pack_mm_cuda_dispatch.h>
|
| 203 |
+
#include <ATen/ops/_weight_norm_interface_cuda_dispatch.h>
|
| 204 |
+
#include <ATen/ops/_weight_norm_interface_backward_cuda_dispatch.h>
|
| 205 |
+
#include <ATen/ops/abs_cuda_dispatch.h>
|
| 206 |
+
#include <ATen/ops/acos_cuda_dispatch.h>
|
| 207 |
+
#include <ATen/ops/acosh_cuda_dispatch.h>
|
| 208 |
+
#include <ATen/ops/adaptive_avg_pool2d_cuda_dispatch.h>
|
| 209 |
+
#include <ATen/ops/adaptive_avg_pool3d_cuda_dispatch.h>
|
| 210 |
+
#include <ATen/ops/adaptive_avg_pool3d_backward_cuda_dispatch.h>
|
| 211 |
+
#include <ATen/ops/adaptive_max_pool2d_cuda_dispatch.h>
|
| 212 |
+
#include <ATen/ops/adaptive_max_pool2d_backward_cuda_dispatch.h>
|
| 213 |
+
#include <ATen/ops/adaptive_max_pool3d_cuda_dispatch.h>
|
| 214 |
+
#include <ATen/ops/adaptive_max_pool3d_backward_cuda_dispatch.h>
|
| 215 |
+
#include <ATen/ops/add_cuda_dispatch.h>
|
| 216 |
+
#include <ATen/ops/addbmm_cuda_dispatch.h>
|
| 217 |
+
#include <ATen/ops/addcdiv_cuda_dispatch.h>
|
| 218 |
+
#include <ATen/ops/addcmul_cuda_dispatch.h>
|
| 219 |
+
#include <ATen/ops/addmm_cuda_dispatch.h>
|
| 220 |
+
#include <ATen/ops/addmv_cuda_dispatch.h>
|
| 221 |
+
#include <ATen/ops/addr_cuda_dispatch.h>
|
| 222 |
+
#include <ATen/ops/all_cuda_dispatch.h>
|
| 223 |
+
#include <ATen/ops/amax_cuda_dispatch.h>
|
| 224 |
+
#include <ATen/ops/amin_cuda_dispatch.h>
|
| 225 |
+
#include <ATen/ops/aminmax_cuda_dispatch.h>
|
| 226 |
+
#include <ATen/ops/angle_cuda_dispatch.h>
|
| 227 |
+
#include <ATen/ops/any_cuda_dispatch.h>
|
| 228 |
+
#include <ATen/ops/arange_cuda_dispatch.h>
|
| 229 |
+
#include <ATen/ops/argmax_cuda_dispatch.h>
|
| 230 |
+
#include <ATen/ops/argmin_cuda_dispatch.h>
|
| 231 |
+
#include <ATen/ops/as_strided_cuda_dispatch.h>
|
| 232 |
+
#include <ATen/ops/asin_cuda_dispatch.h>
|
| 233 |
+
#include <ATen/ops/asinh_cuda_dispatch.h>
|
| 234 |
+
#include <ATen/ops/atan_cuda_dispatch.h>
|
| 235 |
+
#include <ATen/ops/atan2_cuda_dispatch.h>
|
| 236 |
+
#include <ATen/ops/atanh_cuda_dispatch.h>
|
| 237 |
+
#include <ATen/ops/avg_pool2d_cuda_dispatch.h>
|
| 238 |
+
#include <ATen/ops/avg_pool2d_backward_cuda_dispatch.h>
|
| 239 |
+
#include <ATen/ops/avg_pool3d_cuda_dispatch.h>
|
| 240 |
+
#include <ATen/ops/avg_pool3d_backward_cuda_dispatch.h>
|
| 241 |
+
#include <ATen/ops/baddbmm_cuda_dispatch.h>
|
| 242 |
+
#include <ATen/ops/batch_norm_backward_cuda_dispatch.h>
|
| 243 |
+
#include <ATen/ops/batch_norm_backward_elemt_cuda_dispatch.h>
|
| 244 |
+
#include <ATen/ops/batch_norm_backward_reduce_cuda_dispatch.h>
|
| 245 |
+
#include <ATen/ops/batch_norm_elemt_cuda_dispatch.h>
|
| 246 |
+
#include <ATen/ops/batch_norm_gather_stats_cuda_dispatch.h>
|
| 247 |
+
#include <ATen/ops/batch_norm_gather_stats_with_counts_cuda_dispatch.h>
|
| 248 |
+
#include <ATen/ops/batch_norm_stats_cuda_dispatch.h>
|
| 249 |
+
#include <ATen/ops/batch_norm_update_stats_cuda_dispatch.h>
|
| 250 |
+
#include <ATen/ops/bernoulli_cuda_dispatch.h>
|
| 251 |
+
#include <ATen/ops/binary_cross_entropy_cuda_dispatch.h>
|
| 252 |
+
#include <ATen/ops/binary_cross_entropy_backward_cuda_dispatch.h>
|
| 253 |
+
#include <ATen/ops/bincount_cuda_dispatch.h>
|
| 254 |
+
#include <ATen/ops/binomial_cuda_dispatch.h>
|
| 255 |
+
#include <ATen/ops/bitwise_and_cuda_dispatch.h>
|
| 256 |
+
#include <ATen/ops/bitwise_left_shift_cuda_dispatch.h>
|
| 257 |
+
#include <ATen/ops/bitwise_not_cuda_dispatch.h>
|
| 258 |
+
#include <ATen/ops/bitwise_or_cuda_dispatch.h>
|
| 259 |
+
#include <ATen/ops/bitwise_right_shift_cuda_dispatch.h>
|
| 260 |
+
#include <ATen/ops/bitwise_xor_cuda_dispatch.h>
|
| 261 |
+
#include <ATen/ops/bmm_cuda_dispatch.h>
|
| 262 |
+
#include <ATen/ops/bucketize_cuda_dispatch.h>
|
| 263 |
+
#include <ATen/ops/cat_cuda_dispatch.h>
|
| 264 |
+
#include <ATen/ops/cauchy_cuda_dispatch.h>
|
| 265 |
+
#include <ATen/ops/ceil_cuda_dispatch.h>
|
| 266 |
+
#include <ATen/ops/channel_shuffle_cuda_dispatch.h>
|
| 267 |
+
#include <ATen/ops/cholesky_cuda_dispatch.h>
|
| 268 |
+
#include <ATen/ops/cholesky_inverse_cuda_dispatch.h>
|
| 269 |
+
#include <ATen/ops/clamp_cuda_dispatch.h>
|
| 270 |
+
#include <ATen/ops/clamp_max_cuda_dispatch.h>
|
| 271 |
+
#include <ATen/ops/clamp_min_cuda_dispatch.h>
|
| 272 |
+
#include <ATen/ops/col2im_cuda_dispatch.h>
|
| 273 |
+
#include <ATen/ops/complex_cuda_dispatch.h>
|
| 274 |
+
#include <ATen/ops/conj_physical_cuda_dispatch.h>
|
| 275 |
+
#include <ATen/ops/conv_depthwise3d_cuda_dispatch.h>
|
| 276 |
+
#include <ATen/ops/convolution_backward_cuda_dispatch.h>
|
| 277 |
+
#include <ATen/ops/copysign_cuda_dispatch.h>
|
| 278 |
+
#include <ATen/ops/cos_cuda_dispatch.h>
|
| 279 |
+
#include <ATen/ops/cosh_cuda_dispatch.h>
|
| 280 |
+
#include <ATen/ops/count_nonzero_cuda_dispatch.h>
|
| 281 |
+
#include <ATen/ops/cudnn_affine_grid_generator_cuda_dispatch.h>
|
| 282 |
+
#include <ATen/ops/cudnn_affine_grid_generator_backward_cuda_dispatch.h>
|
| 283 |
+
#include <ATen/ops/cudnn_batch_norm_cuda_dispatch.h>
|
| 284 |
+
#include <ATen/ops/cudnn_batch_norm_backward_cuda_dispatch.h>
|
| 285 |
+
#include <ATen/ops/cudnn_convolution_cuda_dispatch.h>
|
| 286 |
+
#include <ATen/ops/cudnn_convolution_add_relu_cuda_dispatch.h>
|
| 287 |
+
#include <ATen/ops/cudnn_convolution_relu_cuda_dispatch.h>
|
| 288 |
+
#include <ATen/ops/cudnn_convolution_transpose_cuda_dispatch.h>
|
| 289 |
+
#include <ATen/ops/cudnn_grid_sampler_cuda_dispatch.h>
|
| 290 |
+
#include <ATen/ops/cudnn_grid_sampler_backward_cuda_dispatch.h>
|
| 291 |
+
#include <ATen/ops/cumprod_cuda_dispatch.h>
|
| 292 |
+
#include <ATen/ops/cumsum_cuda_dispatch.h>
|
| 293 |
+
#include <ATen/ops/dequantize_cuda_dispatch.h>
|
| 294 |
+
#include <ATen/ops/digamma_cuda_dispatch.h>
|
| 295 |
+
#include <ATen/ops/div_cuda_dispatch.h>
|
| 296 |
+
#include <ATen/ops/dot_cuda_dispatch.h>
|
| 297 |
+
#include <ATen/ops/elu_cuda_dispatch.h>
|
| 298 |
+
#include <ATen/ops/elu_backward_cuda_dispatch.h>
|
| 299 |
+
#include <ATen/ops/embedding_dense_backward_cuda_dispatch.h>
|
| 300 |
+
#include <ATen/ops/embedding_renorm_cuda_dispatch.h>
|
| 301 |
+
#include <ATen/ops/empty_cuda_dispatch.h>
|
| 302 |
+
#include <ATen/ops/empty_strided_cuda_dispatch.h>
|
| 303 |
+
#include <ATen/ops/eq_cuda_dispatch.h>
|
| 304 |
+
#include <ATen/ops/equal_cuda_dispatch.h>
|
| 305 |
+
#include <ATen/ops/erf_cuda_dispatch.h>
|
| 306 |
+
#include <ATen/ops/erfc_cuda_dispatch.h>
|
| 307 |
+
#include <ATen/ops/erfinv_cuda_dispatch.h>
|
| 308 |
+
#include <ATen/ops/exp_cuda_dispatch.h>
|
| 309 |
+
#include <ATen/ops/exp2_cuda_dispatch.h>
|
| 310 |
+
#include <ATen/ops/expm1_cuda_dispatch.h>
|
| 311 |
+
#include <ATen/ops/exponential_cuda_dispatch.h>
|
| 312 |
+
#include <ATen/ops/eye_cuda_dispatch.h>
|
| 313 |
+
#include <ATen/ops/fake_quantize_per_channel_affine_cachemask_cuda_dispatch.h>
|
| 314 |
+
#include <ATen/ops/fake_quantize_per_tensor_affine_cachemask_cuda_dispatch.h>
|
| 315 |
+
#include <ATen/ops/fill_cuda_dispatch.h>
|
| 316 |
+
#include <ATen/ops/flip_cuda_dispatch.h>
|
| 317 |
+
#include <ATen/ops/floor_cuda_dispatch.h>
|
| 318 |
+
#include <ATen/ops/floor_divide_cuda_dispatch.h>
|
| 319 |
+
#include <ATen/ops/fmax_cuda_dispatch.h>
|
| 320 |
+
#include <ATen/ops/fmin_cuda_dispatch.h>
|
| 321 |
+
#include <ATen/ops/fmod_cuda_dispatch.h>
|
| 322 |
+
#include <ATen/ops/frac_cuda_dispatch.h>
|
| 323 |
+
#include <ATen/ops/fractional_max_pool2d_cuda_dispatch.h>
|
| 324 |
+
#include <ATen/ops/fractional_max_pool2d_backward_cuda_dispatch.h>
|
| 325 |
+
#include <ATen/ops/fractional_max_pool3d_cuda_dispatch.h>
|
| 326 |
+
#include <ATen/ops/fractional_max_pool3d_backward_cuda_dispatch.h>
|
| 327 |
+
#include <ATen/ops/frexp_cuda_dispatch.h>
|
| 328 |
+
#include <ATen/ops/gather_cuda_dispatch.h>
|
| 329 |
+
#include <ATen/ops/gcd_cuda_dispatch.h>
|
| 330 |
+
#include <ATen/ops/ge_cuda_dispatch.h>
|
| 331 |
+
#include <ATen/ops/gelu_cuda_dispatch.h>
|
| 332 |
+
#include <ATen/ops/gelu_backward_cuda_dispatch.h>
|
| 333 |
+
#include <ATen/ops/geometric_cuda_dispatch.h>
|
| 334 |
+
#include <ATen/ops/geqrf_cuda_dispatch.h>
|
| 335 |
+
#include <ATen/ops/glu_cuda_dispatch.h>
|
| 336 |
+
#include <ATen/ops/glu_backward_cuda_dispatch.h>
|
| 337 |
+
#include <ATen/ops/glu_backward_jvp_cuda_dispatch.h>
|
| 338 |
+
#include <ATen/ops/glu_jvp_cuda_dispatch.h>
|
| 339 |
+
#include <ATen/ops/grid_sampler_2d_cuda_dispatch.h>
|
| 340 |
+
#include <ATen/ops/grid_sampler_2d_backward_cuda_dispatch.h>
|
| 341 |
+
#include <ATen/ops/grid_sampler_3d_cuda_dispatch.h>
|
| 342 |
+
#include <ATen/ops/grid_sampler_3d_backward_cuda_dispatch.h>
|
| 343 |
+
#include <ATen/ops/gt_cuda_dispatch.h>
|
| 344 |
+
#include <ATen/ops/hardshrink_cuda_dispatch.h>
|
| 345 |
+
#include <ATen/ops/hardshrink_backward_cuda_dispatch.h>
|
| 346 |
+
#include <ATen/ops/hardsigmoid_cuda_dispatch.h>
|
| 347 |
+
#include <ATen/ops/hardsigmoid_backward_cuda_dispatch.h>
|
| 348 |
+
#include <ATen/ops/hardswish_cuda_dispatch.h>
|
| 349 |
+
#include <ATen/ops/hardswish_backward_cuda_dispatch.h>
|
| 350 |
+
#include <ATen/ops/hardtanh_cuda_dispatch.h>
|
| 351 |
+
#include <ATen/ops/hardtanh_backward_cuda_dispatch.h>
|
| 352 |
+
#include <ATen/ops/heaviside_cuda_dispatch.h>
|
| 353 |
+
#include <ATen/ops/histc_cuda_dispatch.h>
|
| 354 |
+
#include <ATen/ops/huber_loss_cuda_dispatch.h>
|
| 355 |
+
#include <ATen/ops/huber_loss_backward_cuda_dispatch.h>
|
| 356 |
+
#include <ATen/ops/hypot_cuda_dispatch.h>
|
| 357 |
+
#include <ATen/ops/i0_cuda_dispatch.h>
|
| 358 |
+
#include <ATen/ops/igamma_cuda_dispatch.h>
|
| 359 |
+
#include <ATen/ops/igammac_cuda_dispatch.h>
|
| 360 |
+
#include <ATen/ops/im2col_cuda_dispatch.h>
|
| 361 |
+
#include <ATen/ops/index_cuda_dispatch.h>
|
| 362 |
+
#include <ATen/ops/index_add_cuda_dispatch.h>
|
| 363 |
+
#include <ATen/ops/index_copy_cuda_dispatch.h>
|
| 364 |
+
#include <ATen/ops/index_fill_cuda_dispatch.h>
|
| 365 |
+
#include <ATen/ops/index_reduce_cuda_dispatch.h>
|
| 366 |
+
#include <ATen/ops/index_select_cuda_dispatch.h>
|
| 367 |
+
#include <ATen/ops/is_set_to_cuda_dispatch.h>
|
| 368 |
+
#include <ATen/ops/isin_cuda_dispatch.h>
|
| 369 |
+
#include <ATen/ops/isnan_cuda_dispatch.h>
|
| 370 |
+
#include <ATen/ops/isneginf_cuda_dispatch.h>
|
| 371 |
+
#include <ATen/ops/isposinf_cuda_dispatch.h>
|
| 372 |
+
#include <ATen/ops/kthvalue_cuda_dispatch.h>
|
| 373 |
+
#include <ATen/ops/lcm_cuda_dispatch.h>
|
| 374 |
+
#include <ATen/ops/le_cuda_dispatch.h>
|
| 375 |
+
#include <ATen/ops/leaky_relu_cuda_dispatch.h>
|
| 376 |
+
#include <ATen/ops/leaky_relu_backward_cuda_dispatch.h>
|
| 377 |
+
#include <ATen/ops/lerp_cuda_dispatch.h>
|
| 378 |
+
#include <ATen/ops/lgamma_cuda_dispatch.h>
|
| 379 |
+
#include <ATen/ops/linalg_cholesky_ex_cuda_dispatch.h>
|
| 380 |
+
#include <ATen/ops/linalg_cross_cuda_dispatch.h>
|
| 381 |
+
#include <ATen/ops/linalg_eig_cuda_dispatch.h>
|
| 382 |
+
#include <ATen/ops/linalg_eigvals_cuda_dispatch.h>
|
| 383 |
+
#include <ATen/ops/linalg_householder_product_cuda_dispatch.h>
|
| 384 |
+
#include <ATen/ops/linalg_inv_ex_cuda_dispatch.h>
|
| 385 |
+
#include <ATen/ops/linalg_ldl_factor_ex_cuda_dispatch.h>
|
| 386 |
+
#include <ATen/ops/linalg_ldl_solve_cuda_dispatch.h>
|
| 387 |
+
#include <ATen/ops/linalg_lstsq_cuda_dispatch.h>
|
| 388 |
+
#include <ATen/ops/linalg_lu_cuda_dispatch.h>
|
| 389 |
+
#include <ATen/ops/linalg_lu_factor_ex_cuda_dispatch.h>
|
| 390 |
+
#include <ATen/ops/linalg_lu_solve_cuda_dispatch.h>
|
| 391 |
+
#include <ATen/ops/linalg_matrix_exp_cuda_dispatch.h>
|
| 392 |
+
#include <ATen/ops/linalg_qr_cuda_dispatch.h>
|
| 393 |
+
#include <ATen/ops/linalg_solve_triangular_cuda_dispatch.h>
|
| 394 |
+
#include <ATen/ops/linalg_vector_norm_cuda_dispatch.h>
|
| 395 |
+
#include <ATen/ops/linspace_cuda_dispatch.h>
|
| 396 |
+
#include <ATen/ops/log_cuda_dispatch.h>
|
| 397 |
+
#include <ATen/ops/log10_cuda_dispatch.h>
|
| 398 |
+
#include <ATen/ops/log1p_cuda_dispatch.h>
|
| 399 |
+
#include <ATen/ops/log2_cuda_dispatch.h>
|
| 400 |
+
#include <ATen/ops/log_normal_cuda_dispatch.h>
|
| 401 |
+
#include <ATen/ops/log_sigmoid_backward_cuda_dispatch.h>
|
| 402 |
+
#include <ATen/ops/log_sigmoid_forward_cuda_dispatch.h>
|
| 403 |
+
#include <ATen/ops/logaddexp_cuda_dispatch.h>
|
| 404 |
+
#include <ATen/ops/logaddexp2_cuda_dispatch.h>
|
| 405 |
+
#include <ATen/ops/logical_and_cuda_dispatch.h>
|
| 406 |
+
#include <ATen/ops/logical_not_cuda_dispatch.h>
|
| 407 |
+
#include <ATen/ops/logical_or_cuda_dispatch.h>
|
| 408 |
+
#include <ATen/ops/logical_xor_cuda_dispatch.h>
|
| 409 |
+
#include <ATen/ops/logit_cuda_dispatch.h>
|
| 410 |
+
#include <ATen/ops/logit_backward_cuda_dispatch.h>
|
| 411 |
+
#include <ATen/ops/logspace_cuda_dispatch.h>
|
| 412 |
+
#include <ATen/ops/lshift_cuda_dispatch.h>
|
| 413 |
+
#include <ATen/ops/lt_cuda_dispatch.h>
|
| 414 |
+
#include <ATen/ops/lu_unpack_cuda_dispatch.h>
|
| 415 |
+
#include <ATen/ops/masked_fill_cuda_dispatch.h>
|
| 416 |
+
#include <ATen/ops/masked_scatter_cuda_dispatch.h>
|
| 417 |
+
#include <ATen/ops/masked_select_cuda_dispatch.h>
|
| 418 |
+
#include <ATen/ops/max_cuda_dispatch.h>
|
| 419 |
+
#include <ATen/ops/max_pool2d_with_indices_cuda_dispatch.h>
|
| 420 |
+
#include <ATen/ops/max_pool2d_with_indices_backward_cuda_dispatch.h>
|
| 421 |
+
#include <ATen/ops/max_pool3d_with_indices_cuda_dispatch.h>
|
| 422 |
+
#include <ATen/ops/max_pool3d_with_indices_backward_cuda_dispatch.h>
|
| 423 |
+
#include <ATen/ops/max_unpool2d_cuda_dispatch.h>
|
| 424 |
+
#include <ATen/ops/max_unpool3d_cuda_dispatch.h>
|
| 425 |
+
#include <ATen/ops/maximum_cuda_dispatch.h>
|
| 426 |
+
#include <ATen/ops/mean_cuda_dispatch.h>
|
| 427 |
+
#include <ATen/ops/median_cuda_dispatch.h>
|
| 428 |
+
#include <ATen/ops/min_cuda_dispatch.h>
|
| 429 |
+
#include <ATen/ops/minimum_cuda_dispatch.h>
|
| 430 |
+
#include <ATen/ops/miopen_batch_norm_cuda_dispatch.h>
|
| 431 |
+
#include <ATen/ops/miopen_batch_norm_backward_cuda_dispatch.h>
|
| 432 |
+
#include <ATen/ops/miopen_convolution_cuda_dispatch.h>
|
| 433 |
+
#include <ATen/ops/miopen_convolution_add_relu_cuda_dispatch.h>
|
| 434 |
+
#include <ATen/ops/miopen_convolution_relu_cuda_dispatch.h>
|
| 435 |
+
#include <ATen/ops/miopen_convolution_transpose_cuda_dispatch.h>
|
| 436 |
+
#include <ATen/ops/miopen_depthwise_convolution_cuda_dispatch.h>
|
| 437 |
+
#include <ATen/ops/miopen_rnn_cuda_dispatch.h>
|
| 438 |
+
#include <ATen/ops/miopen_rnn_backward_cuda_dispatch.h>
|
| 439 |
+
#include <ATen/ops/mish_cuda_dispatch.h>
|
| 440 |
+
#include <ATen/ops/mish_backward_cuda_dispatch.h>
|
| 441 |
+
#include <ATen/ops/mm_cuda_dispatch.h>
|
| 442 |
+
#include <ATen/ops/mode_cuda_dispatch.h>
|
| 443 |
+
#include <ATen/ops/mse_loss_cuda_dispatch.h>
|
| 444 |
+
#include <ATen/ops/mse_loss_backward_cuda_dispatch.h>
|
| 445 |
+
#include <ATen/ops/mul_cuda_dispatch.h>
|
| 446 |
+
#include <ATen/ops/multi_margin_loss_cuda_dispatch.h>
|
| 447 |
+
#include <ATen/ops/multi_margin_loss_backward_cuda_dispatch.h>
|
| 448 |
+
#include <ATen/ops/multilabel_margin_loss_backward_cuda_dispatch.h>
|
| 449 |
+
#include <ATen/ops/multilabel_margin_loss_forward_cuda_dispatch.h>
|
| 450 |
+
#include <ATen/ops/multinomial_cuda_dispatch.h>
|
| 451 |
+
#include <ATen/ops/mvlgamma_cuda_dispatch.h>
|
| 452 |
+
#include <ATen/ops/nan_to_num_cuda_dispatch.h>
|
| 453 |
+
#include <ATen/ops/nanmedian_cuda_dispatch.h>
|
| 454 |
+
#include <ATen/ops/nansum_cuda_dispatch.h>
|
| 455 |
+
#include <ATen/ops/native_batch_norm_cuda_dispatch.h>
|
| 456 |
+
#include <ATen/ops/native_batch_norm_backward_cuda_dispatch.h>
|
| 457 |
+
#include <ATen/ops/native_dropout_cuda_dispatch.h>
|
| 458 |
+
#include <ATen/ops/native_dropout_backward_cuda_dispatch.h>
|
| 459 |
+
#include <ATen/ops/native_group_norm_cuda_dispatch.h>
|
| 460 |
+
#include <ATen/ops/native_group_norm_backward_cuda_dispatch.h>
|
| 461 |
+
#include <ATen/ops/native_layer_norm_cuda_dispatch.h>
|
| 462 |
+
#include <ATen/ops/native_layer_norm_backward_cuda_dispatch.h>
|
| 463 |
+
#include <ATen/ops/ne_cuda_dispatch.h>
|
| 464 |
+
#include <ATen/ops/neg_cuda_dispatch.h>
|
| 465 |
+
#include <ATen/ops/nextafter_cuda_dispatch.h>
|
| 466 |
+
#include <ATen/ops/nll_loss2d_backward_cuda_dispatch.h>
|
| 467 |
+
#include <ATen/ops/nll_loss2d_forward_cuda_dispatch.h>
|
| 468 |
+
#include <ATen/ops/nll_loss_backward_cuda_dispatch.h>
|
| 469 |
+
#include <ATen/ops/nll_loss_forward_cuda_dispatch.h>
|
| 470 |
+
#include <ATen/ops/nonzero_cuda_dispatch.h>
|
| 471 |
+
#include <ATen/ops/norm_cuda_dispatch.h>
|
| 472 |
+
#include <ATen/ops/normal_cuda_dispatch.h>
|
| 473 |
+
#include <ATen/ops/ormqr_cuda_dispatch.h>
|
| 474 |
+
#include <ATen/ops/poisson_cuda_dispatch.h>
|
| 475 |
+
#include <ATen/ops/polar_cuda_dispatch.h>
|
| 476 |
+
#include <ATen/ops/polygamma_cuda_dispatch.h>
|
| 477 |
+
#include <ATen/ops/pow_cuda_dispatch.h>
|
| 478 |
+
#include <ATen/ops/prod_cuda_dispatch.h>
|
| 479 |
+
#include <ATen/ops/put_cuda_dispatch.h>
|
| 480 |
+
#include <ATen/ops/quantize_per_channel_cuda_dispatch.h>
|
| 481 |
+
#include <ATen/ops/quantize_per_tensor_cuda_dispatch.h>
|
| 482 |
+
#include <ATen/ops/quantize_per_tensor_dynamic_cuda_dispatch.h>
|
| 483 |
+
#include <ATen/ops/random_cuda_dispatch.h>
|
| 484 |
+
#include <ATen/ops/randperm_cuda_dispatch.h>
|
| 485 |
+
#include <ATen/ops/range_cuda_dispatch.h>
|
| 486 |
+
#include <ATen/ops/reciprocal_cuda_dispatch.h>
|
| 487 |
+
#include <ATen/ops/record_stream_cuda_dispatch.h>
|
| 488 |
+
#include <ATen/ops/reflection_pad1d_cuda_dispatch.h>
|
| 489 |
+
#include <ATen/ops/reflection_pad1d_backward_cuda_dispatch.h>
|
| 490 |
+
#include <ATen/ops/reflection_pad2d_cuda_dispatch.h>
|
| 491 |
+
#include <ATen/ops/reflection_pad2d_backward_cuda_dispatch.h>
|
| 492 |
+
#include <ATen/ops/reflection_pad3d_cuda_dispatch.h>
|
| 493 |
+
#include <ATen/ops/reflection_pad3d_backward_cuda_dispatch.h>
|
| 494 |
+
#include <ATen/ops/relu_cuda_dispatch.h>
|
| 495 |
+
#include <ATen/ops/remainder_cuda_dispatch.h>
|
| 496 |
+
#include <ATen/ops/renorm_cuda_dispatch.h>
|
| 497 |
+
#include <ATen/ops/repeat_interleave_cuda_dispatch.h>
|
| 498 |
+
#include <ATen/ops/replication_pad1d_cuda_dispatch.h>
|
| 499 |
+
#include <ATen/ops/replication_pad1d_backward_cuda_dispatch.h>
|
| 500 |
+
#include <ATen/ops/replication_pad2d_cuda_dispatch.h>
|
| 501 |
+
#include <ATen/ops/replication_pad2d_backward_cuda_dispatch.h>
|
| 502 |
+
#include <ATen/ops/replication_pad3d_cuda_dispatch.h>
|
| 503 |
+
#include <ATen/ops/replication_pad3d_backward_cuda_dispatch.h>
|
| 504 |
+
#include <ATen/ops/resize_cuda_dispatch.h>
|
| 505 |
+
#include <ATen/ops/roll_cuda_dispatch.h>
|
| 506 |
+
#include <ATen/ops/round_cuda_dispatch.h>
|
| 507 |
+
#include <ATen/ops/rrelu_with_noise_cuda_dispatch.h>
|
| 508 |
+
#include <ATen/ops/rshift_cuda_dispatch.h>
|
| 509 |
+
#include <ATen/ops/rsqrt_cuda_dispatch.h>
|
| 510 |
+
#include <ATen/ops/rsub_cuda_dispatch.h>
|
| 511 |
+
#include <ATen/ops/scatter_cuda_dispatch.h>
|
| 512 |
+
#include <ATen/ops/scatter_add_cuda_dispatch.h>
|
| 513 |
+
#include <ATen/ops/scatter_reduce_cuda_dispatch.h>
|
| 514 |
+
#include <ATen/ops/searchsorted_cuda_dispatch.h>
|
| 515 |
+
#include <ATen/ops/segment_reduce_cuda_dispatch.h>
|
| 516 |
+
#include <ATen/ops/set_cuda_dispatch.h>
|
| 517 |
+
#include <ATen/ops/sgn_cuda_dispatch.h>
|
| 518 |
+
#include <ATen/ops/sigmoid_cuda_dispatch.h>
|
| 519 |
+
#include <ATen/ops/sigmoid_backward_cuda_dispatch.h>
|
| 520 |
+
#include <ATen/ops/sign_cuda_dispatch.h>
|
| 521 |
+
#include <ATen/ops/signbit_cuda_dispatch.h>
|
| 522 |
+
#include <ATen/ops/silu_cuda_dispatch.h>
|
| 523 |
+
#include <ATen/ops/silu_backward_cuda_dispatch.h>
|
| 524 |
+
#include <ATen/ops/sin_cuda_dispatch.h>
|
| 525 |
+
#include <ATen/ops/sinc_cuda_dispatch.h>
|
| 526 |
+
#include <ATen/ops/sinh_cuda_dispatch.h>
|
| 527 |
+
#include <ATen/ops/slow_conv_dilated2d_cuda_dispatch.h>
|
| 528 |
+
#include <ATen/ops/slow_conv_dilated3d_cuda_dispatch.h>
|
| 529 |
+
#include <ATen/ops/slow_conv_transpose2d_cuda_dispatch.h>
|
| 530 |
+
#include <ATen/ops/slow_conv_transpose3d_cuda_dispatch.h>
|
| 531 |
+
#include <ATen/ops/smooth_l1_loss_cuda_dispatch.h>
|
| 532 |
+
#include <ATen/ops/smooth_l1_loss_backward_cuda_dispatch.h>
|
| 533 |
+
#include <ATen/ops/softplus_cuda_dispatch.h>
|
| 534 |
+
#include <ATen/ops/softplus_backward_cuda_dispatch.h>
|
| 535 |
+
#include <ATen/ops/softshrink_cuda_dispatch.h>
|
| 536 |
+
#include <ATen/ops/softshrink_backward_cuda_dispatch.h>
|
| 537 |
+
#include <ATen/ops/sort_cuda_dispatch.h>
|
| 538 |
+
#include <ATen/ops/special_airy_ai_cuda_dispatch.h>
|
| 539 |
+
#include <ATen/ops/special_bessel_j0_cuda_dispatch.h>
|
| 540 |
+
#include <ATen/ops/special_bessel_j1_cuda_dispatch.h>
|
| 541 |
+
#include <ATen/ops/special_bessel_y0_cuda_dispatch.h>
|
| 542 |
+
#include <ATen/ops/special_bessel_y1_cuda_dispatch.h>
|
| 543 |
+
#include <ATen/ops/special_chebyshev_polynomial_t_cuda_dispatch.h>
|
| 544 |
+
#include <ATen/ops/special_chebyshev_polynomial_u_cuda_dispatch.h>
|
| 545 |
+
#include <ATen/ops/special_chebyshev_polynomial_v_cuda_dispatch.h>
|
| 546 |
+
#include <ATen/ops/special_chebyshev_polynomial_w_cuda_dispatch.h>
|
| 547 |
+
#include <ATen/ops/special_entr_cuda_dispatch.h>
|
| 548 |
+
#include <ATen/ops/special_erfcx_cuda_dispatch.h>
|
| 549 |
+
#include <ATen/ops/special_hermite_polynomial_h_cuda_dispatch.h>
|
| 550 |
+
#include <ATen/ops/special_hermite_polynomial_he_cuda_dispatch.h>
|
| 551 |
+
#include <ATen/ops/special_i0e_cuda_dispatch.h>
|
| 552 |
+
#include <ATen/ops/special_i1_cuda_dispatch.h>
|
| 553 |
+
#include <ATen/ops/special_i1e_cuda_dispatch.h>
|
| 554 |
+
#include <ATen/ops/special_laguerre_polynomial_l_cuda_dispatch.h>
|
| 555 |
+
#include <ATen/ops/special_legendre_polynomial_p_cuda_dispatch.h>
|
| 556 |
+
#include <ATen/ops/special_log_ndtr_cuda_dispatch.h>
|
| 557 |
+
#include <ATen/ops/special_modified_bessel_i0_cuda_dispatch.h>
|
| 558 |
+
#include <ATen/ops/special_modified_bessel_i1_cuda_dispatch.h>
|
| 559 |
+
#include <ATen/ops/special_modified_bessel_k0_cuda_dispatch.h>
|
| 560 |
+
#include <ATen/ops/special_modified_bessel_k1_cuda_dispatch.h>
|
| 561 |
+
#include <ATen/ops/special_ndtri_cuda_dispatch.h>
|
| 562 |
+
#include <ATen/ops/special_scaled_modified_bessel_k0_cuda_dispatch.h>
|
| 563 |
+
#include <ATen/ops/special_scaled_modified_bessel_k1_cuda_dispatch.h>
|
| 564 |
+
#include <ATen/ops/special_shifted_chebyshev_polynomial_t_cuda_dispatch.h>
|
| 565 |
+
#include <ATen/ops/special_shifted_chebyshev_polynomial_u_cuda_dispatch.h>
|
| 566 |
+
#include <ATen/ops/special_shifted_chebyshev_polynomial_v_cuda_dispatch.h>
|
| 567 |
+
#include <ATen/ops/special_shifted_chebyshev_polynomial_w_cuda_dispatch.h>
|
| 568 |
+
#include <ATen/ops/special_spherical_bessel_j0_cuda_dispatch.h>
|
| 569 |
+
#include <ATen/ops/special_xlog1py_cuda_dispatch.h>
|
| 570 |
+
#include <ATen/ops/special_zeta_cuda_dispatch.h>
|
| 571 |
+
#include <ATen/ops/split_with_sizes_copy_cuda_dispatch.h>
|
| 572 |
+
#include <ATen/ops/sqrt_cuda_dispatch.h>
|
| 573 |
+
#include <ATen/ops/sspaddmm_cuda_dispatch.h>
|
| 574 |
+
#include <ATen/ops/std_cuda_dispatch.h>
|
| 575 |
+
#include <ATen/ops/std_mean_cuda_dispatch.h>
|
| 576 |
+
#include <ATen/ops/sub_cuda_dispatch.h>
|
| 577 |
+
#include <ATen/ops/sum_cuda_dispatch.h>
|
| 578 |
+
#include <ATen/ops/take_cuda_dispatch.h>
|
| 579 |
+
#include <ATen/ops/tan_cuda_dispatch.h>
|
| 580 |
+
#include <ATen/ops/tanh_cuda_dispatch.h>
|
| 581 |
+
#include <ATen/ops/tanh_backward_cuda_dispatch.h>
|
| 582 |
+
#include <ATen/ops/threshold_cuda_dispatch.h>
|
| 583 |
+
#include <ATen/ops/threshold_backward_cuda_dispatch.h>
|
| 584 |
+
#include <ATen/ops/topk_cuda_dispatch.h>
|
| 585 |
+
#include <ATen/ops/trace_cuda_dispatch.h>
|
| 586 |
+
#include <ATen/ops/triangular_solve_cuda_dispatch.h>
|
| 587 |
+
#include <ATen/ops/tril_cuda_dispatch.h>
|
| 588 |
+
#include <ATen/ops/tril_indices_cuda_dispatch.h>
|
| 589 |
+
#include <ATen/ops/triu_cuda_dispatch.h>
|
| 590 |
+
#include <ATen/ops/triu_indices_cuda_dispatch.h>
|
| 591 |
+
#include <ATen/ops/trunc_cuda_dispatch.h>
|
| 592 |
+
#include <ATen/ops/unfold_cuda_dispatch.h>
|
| 593 |
+
#include <ATen/ops/unfold_backward_cuda_dispatch.h>
|
| 594 |
+
#include <ATen/ops/uniform_cuda_dispatch.h>
|
| 595 |
+
#include <ATen/ops/unique_consecutive_cuda_dispatch.h>
|
| 596 |
+
#include <ATen/ops/unique_dim_cuda_dispatch.h>
|
| 597 |
+
#include <ATen/ops/unique_dim_consecutive_cuda_dispatch.h>
|
| 598 |
+
#include <ATen/ops/upsample_bicubic2d_cuda_dispatch.h>
|
| 599 |
+
#include <ATen/ops/upsample_bicubic2d_backward_cuda_dispatch.h>
|
| 600 |
+
#include <ATen/ops/upsample_bilinear2d_cuda_dispatch.h>
|
| 601 |
+
#include <ATen/ops/upsample_bilinear2d_backward_cuda_dispatch.h>
|
| 602 |
+
#include <ATen/ops/upsample_linear1d_cuda_dispatch.h>
|
| 603 |
+
#include <ATen/ops/upsample_linear1d_backward_cuda_dispatch.h>
|
| 604 |
+
#include <ATen/ops/upsample_nearest1d_cuda_dispatch.h>
|
| 605 |
+
#include <ATen/ops/upsample_nearest1d_backward_cuda_dispatch.h>
|
| 606 |
+
#include <ATen/ops/upsample_nearest2d_cuda_dispatch.h>
|
| 607 |
+
#include <ATen/ops/upsample_nearest2d_backward_cuda_dispatch.h>
|
| 608 |
+
#include <ATen/ops/upsample_nearest3d_cuda_dispatch.h>
|
| 609 |
+
#include <ATen/ops/upsample_nearest3d_backward_cuda_dispatch.h>
|
| 610 |
+
#include <ATen/ops/upsample_trilinear3d_cuda_dispatch.h>
|
| 611 |
+
#include <ATen/ops/upsample_trilinear3d_backward_cuda_dispatch.h>
|
| 612 |
+
#include <ATen/ops/var_cuda_dispatch.h>
|
| 613 |
+
#include <ATen/ops/var_mean_cuda_dispatch.h>
|
| 614 |
+
#include <ATen/ops/vdot_cuda_dispatch.h>
|
| 615 |
+
#include <ATen/ops/view_cuda_dispatch.h>
|
| 616 |
+
#include <ATen/ops/view_as_complex_cuda_dispatch.h>
|
| 617 |
+
#include <ATen/ops/view_as_real_cuda_dispatch.h>
|
| 618 |
+
#include <ATen/ops/where_cuda_dispatch.h>
|
| 619 |
+
#include <ATen/ops/xlogy_cuda_dispatch.h>
|
| 620 |
+
#include <ATen/ops/zero_cuda_dispatch.h>
|
| 621 |
+
|
| 622 |
+
|
| 623 |
+
|
mplug_owl2/lib/python3.10/site-packages/torch/include/ATen/CachedTensorUtils.h
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <ATen/ATen.h>
|
| 4 |
+
|
| 5 |
+
namespace at::caching {
|
| 6 |
+
|
| 7 |
+
// Some systems (just cudagraphs currently) will persist a static tensor output
|
| 8 |
+
// whose TensorImpl does not change across iterations. For these tensors caching
|
| 9 |
+
// dtype conversions is invalid. Additionally, there will be an extra reference
|
| 10 |
+
// count to these cached tensors that would prevent buffer inplacing and other
|
| 11 |
+
// checks on tensor uniqueness. If we are not using these systems the enabled
|
| 12 |
+
// flag will be false and we will avoid the hash lookup.
|
| 13 |
+
|
| 14 |
+
TORCH_API bool is_cached_tensor(const at::Tensor& t);
|
| 15 |
+
TORCH_API void add_cached_tensor(const at::Tensor& t);
|
| 16 |
+
TORCH_API void remove_cached_tensor(const at::Tensor& t);
|
| 17 |
+
TORCH_API void set_cached_tensors_enabled(bool enable);
|
| 18 |
+
|
| 19 |
+
// For gradient buffer stealing we will adjust the use count of tensors
|
| 20 |
+
// which are persisted by cudagraphs, just as we need to adjust reference
|
| 21 |
+
// count of tensors with hooks.
|
| 22 |
+
TORCH_API size_t adjusted_use_count(const at::Tensor& t);
|
| 23 |
+
|
| 24 |
+
} // namespace at::caching
|
mplug_owl2/lib/python3.10/site-packages/torch/include/ATen/CollapseDims.h
ADDED
|
@@ -0,0 +1,94 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#include <c10/util/Exception.h>
|
| 2 |
+
#include <utility>
|
| 3 |
+
|
| 4 |
+
namespace at {
|
| 5 |
+
|
| 6 |
+
/*
|
| 7 |
+
[collapse dims] Updates sizes, and strides to reflect a "collapse" of
|
| 8 |
+
the info, possibly excluding the optional excludeDim. A "collapsed" version
|
| 9 |
+
of the info is the fewest dims that order the tensor's elements in the same
|
| 10 |
+
way as the original info. If excludeDim is specified, the collapse is the
|
| 11 |
+
fewest dims that order the tensor's elements as the original and preserve the
|
| 12 |
+
excluded dimension, unless the tensor collapses to a point.
|
| 13 |
+
|
| 14 |
+
This function returns a pair of values.
|
| 15 |
+
|
| 16 |
+
1) The (new) index of the preserved dimension if excludeDim is
|
| 17 |
+
specified. 0 if the tensor is collapsed to a point. -1
|
| 18 |
+
otherwise.
|
| 19 |
+
|
| 20 |
+
2) The new number of dimensions.
|
| 21 |
+
*/
|
| 22 |
+
template <typename T>
|
| 23 |
+
inline std::pair<int64_t, int64_t> collapse_dims(
|
| 24 |
+
T* sizes,
|
| 25 |
+
T* strides,
|
| 26 |
+
int64_t dims,
|
| 27 |
+
const int excludeDim = -1) {
|
| 28 |
+
TORCH_CHECK(
|
| 29 |
+
excludeDim >= -1 && excludeDim < dims,
|
| 30 |
+
"expected excluded dim between -1 and dims - 1");
|
| 31 |
+
|
| 32 |
+
int64_t stopDim = (excludeDim == -1) ? dims : excludeDim;
|
| 33 |
+
int64_t newIndex = -1;
|
| 34 |
+
int64_t oldIndex = 0;
|
| 35 |
+
int64_t remappedExcludedDim = -1;
|
| 36 |
+
|
| 37 |
+
while (oldIndex < dims) {
|
| 38 |
+
// Finds a dimension to collapse into
|
| 39 |
+
for (; oldIndex < stopDim; ++oldIndex) {
|
| 40 |
+
if (sizes[oldIndex] == 1) {
|
| 41 |
+
continue;
|
| 42 |
+
}
|
| 43 |
+
|
| 44 |
+
++newIndex;
|
| 45 |
+
sizes[newIndex] = sizes[oldIndex];
|
| 46 |
+
strides[newIndex] = strides[oldIndex];
|
| 47 |
+
++oldIndex;
|
| 48 |
+
break;
|
| 49 |
+
}
|
| 50 |
+
|
| 51 |
+
// Collapses dims
|
| 52 |
+
for (; oldIndex < stopDim; ++oldIndex) {
|
| 53 |
+
if (sizes[oldIndex] == 1) {
|
| 54 |
+
continue;
|
| 55 |
+
}
|
| 56 |
+
|
| 57 |
+
if (strides[newIndex] == sizes[oldIndex] * strides[oldIndex]) {
|
| 58 |
+
sizes[newIndex] *= sizes[oldIndex];
|
| 59 |
+
strides[newIndex] = strides[oldIndex];
|
| 60 |
+
} else {
|
| 61 |
+
++newIndex;
|
| 62 |
+
sizes[newIndex] = sizes[oldIndex];
|
| 63 |
+
strides[newIndex] = strides[oldIndex];
|
| 64 |
+
}
|
| 65 |
+
}
|
| 66 |
+
|
| 67 |
+
// Handles excludeDim being set (oldIndex == excludeDim)
|
| 68 |
+
if (oldIndex != dims) {
|
| 69 |
+
// Preserves excluded dimension
|
| 70 |
+
++newIndex;
|
| 71 |
+
sizes[newIndex] = sizes[oldIndex];
|
| 72 |
+
strides[newIndex] = strides[oldIndex];
|
| 73 |
+
remappedExcludedDim = newIndex;
|
| 74 |
+
|
| 75 |
+
// Restarts iteration after excludeDim
|
| 76 |
+
++oldIndex;
|
| 77 |
+
stopDim = dims;
|
| 78 |
+
}
|
| 79 |
+
}
|
| 80 |
+
|
| 81 |
+
// Handles special case of all dims size 1
|
| 82 |
+
if (newIndex == -1 || (newIndex == 0 && sizes[0] == 1)) {
|
| 83 |
+
dims = 1;
|
| 84 |
+
sizes[0] = 1;
|
| 85 |
+
strides[0] = 1;
|
| 86 |
+
|
| 87 |
+
return std::pair<int64_t, int64_t>(0, 1);
|
| 88 |
+
}
|
| 89 |
+
|
| 90 |
+
dims = newIndex + 1;
|
| 91 |
+
return std::pair<int64_t, int64_t>(remappedExcludedDim, dims);
|
| 92 |
+
}
|
| 93 |
+
|
| 94 |
+
} // namespace at
|
mplug_owl2/lib/python3.10/site-packages/torch/include/ATen/CompositeExplicitAutogradFunctions.h
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#include <ATen/core/TensorBody.h>
|
| 2 |
+
|
| 3 |
+
// TODO Undo all logic introduced for Note [Avoiding Include Cycles In Static Dispatch]
|
| 4 |
+
// Code introduced to avoid cyclic dependency in static dispatch is no longer
|
| 5 |
+
// needed as static dispatch logic is moved from TensorBody.h, which caused cycles in the first place,
|
| 6 |
+
// to Operators.cpp for supporting multiple backends with multiple kernels.
|
| 7 |
+
//
|
| 8 |
+
// Note [Avoiding Include Cycles In Static Dispatch]
|
| 9 |
+
// In order to avoid #include cycles in the static dispatch build, we've carefully split out
|
| 10 |
+
// the static function definition files into {DispatchKey}Functions.h and {DispatchKey}Functions_inl.h.
|
| 11 |
+
//
|
| 12 |
+
// Without this split, the include cycle looks like TensorBody.h -> CPUFunctions.h -> TensorBody.h.
|
| 13 |
+
// - TensorBody.h #includes CPUFunctions.h in the static dispatch build, because the tensor methods
|
| 14 |
+
// all need to call into the fastpath C++ API defined in CPUFunctions.h. The methods are also all
|
| 15 |
+
// directly inlined into TensorBody.h.
|
| 16 |
+
// - CPUFunctions.h #includes TensorBody.h because it contains function declarations for the entire C++ API,
|
| 17 |
+
// which include functions that have defaultable std::optional<Tensor> arguments.
|
| 18 |
+
// That requires knowing the full Tensor class definition.
|
| 19 |
+
//
|
| 20 |
+
// We break the cycle by doing the following:
|
| 21 |
+
// - Split out CPUFunction.h into two files: CPUFunctions.h and CPUFunctions_inl.h
|
| 22 |
+
// - CPUFunction.h is a dummy file that just includes the Tensor class and includes CPUFunctions_inl.,
|
| 23 |
+
// - CPUFunctions_inl.h includes everything else
|
| 24 |
+
// - (only in the static dispatch build) TensorBody.h makes sure to finish defining the Tensor class,
|
| 25 |
+
// and then it includes CPUFunctions_inl.h.
|
| 26 |
+
// - All other files that want the cpu fastpath functions can include CPUFunctions.h directly.
|
| 27 |
+
// - This also means that static dispatch build, CPUFunctions.h only needs to
|
| 28 |
+
// #include TensorBody.h, and it will automatically bring in CPUFunctions_inl.h.
|
| 29 |
+
#include <ATen/CompositeExplicitAutogradFunctions_inl.h>
|
mplug_owl2/lib/python3.10/site-packages/torch/include/ATen/CompositeExplicitAutogradFunctions_inl.h
ADDED
|
@@ -0,0 +1,553 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunctions_inl.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
#if defined(AT_PER_OPERATOR_HEADERS) && defined(TORCH_ASSERT_ONLY_METHOD_OPERATORS)
|
| 12 |
+
#error This change adds a dependency on all pytorch operators, meaning the \
|
| 13 |
+
file will need to be re-compiled every time an operator is changed or added. \
|
| 14 |
+
Consider including a specific operator from \
|
| 15 |
+
<ATen/ops/{my_operator}_compositeexplicitautograd_dispatch.h>. \
|
| 16 |
+
See NOTE [TORCH_ASSERT_ONLY_METHOD_OPERATORS].
|
| 17 |
+
#endif
|
| 18 |
+
|
| 19 |
+
#include <ATen/ops/_adaptive_avg_pool2d_compositeexplicitautograd_dispatch.h>
|
| 20 |
+
#include <ATen/ops/_adaptive_avg_pool2d_backward_compositeexplicitautograd_dispatch.h>
|
| 21 |
+
#include <ATen/ops/_adaptive_avg_pool3d_compositeexplicitautograd_dispatch.h>
|
| 22 |
+
#include <ATen/ops/_adaptive_avg_pool3d_backward_compositeexplicitautograd_dispatch.h>
|
| 23 |
+
#include <ATen/ops/_add_relu_compositeexplicitautograd_dispatch.h>
|
| 24 |
+
#include <ATen/ops/_aminmax_compositeexplicitautograd_dispatch.h>
|
| 25 |
+
#include <ATen/ops/_amp_foreach_non_finite_check_and_unscale_compositeexplicitautograd_dispatch.h>
|
| 26 |
+
#include <ATen/ops/_amp_update_scale_compositeexplicitautograd_dispatch.h>
|
| 27 |
+
#include <ATen/ops/_assert_scalar_compositeexplicitautograd_dispatch.h>
|
| 28 |
+
#include <ATen/ops/_batch_norm_no_update_compositeexplicitautograd_dispatch.h>
|
| 29 |
+
#include <ATen/ops/_batch_norm_with_update_compositeexplicitautograd_dispatch.h>
|
| 30 |
+
#include <ATen/ops/_cdist_backward_compositeexplicitautograd_dispatch.h>
|
| 31 |
+
#include <ATen/ops/_cdist_forward_compositeexplicitautograd_dispatch.h>
|
| 32 |
+
#include <ATen/ops/_cholesky_solve_helper_compositeexplicitautograd_dispatch.h>
|
| 33 |
+
#include <ATen/ops/_chunk_cat_compositeexplicitautograd_dispatch.h>
|
| 34 |
+
#include <ATen/ops/_coalesce_compositeexplicitautograd_dispatch.h>
|
| 35 |
+
#include <ATen/ops/_coalesced_compositeexplicitautograd_dispatch.h>
|
| 36 |
+
#include <ATen/ops/_conj_compositeexplicitautograd_dispatch.h>
|
| 37 |
+
#include <ATen/ops/_conj_copy_compositeexplicitautograd_dispatch.h>
|
| 38 |
+
#include <ATen/ops/_conj_physical_compositeexplicitautograd_dispatch.h>
|
| 39 |
+
#include <ATen/ops/_convolution_compositeexplicitautograd_dispatch.h>
|
| 40 |
+
#include <ATen/ops/_copy_from_compositeexplicitautograd_dispatch.h>
|
| 41 |
+
#include <ATen/ops/_copy_from_and_resize_compositeexplicitautograd_dispatch.h>
|
| 42 |
+
#include <ATen/ops/_ctc_loss_compositeexplicitautograd_dispatch.h>
|
| 43 |
+
#include <ATen/ops/_ctc_loss_backward_compositeexplicitautograd_dispatch.h>
|
| 44 |
+
#include <ATen/ops/_cudnn_ctc_loss_compositeexplicitautograd_dispatch.h>
|
| 45 |
+
#include <ATen/ops/_cudnn_init_dropout_state_compositeexplicitautograd_dispatch.h>
|
| 46 |
+
#include <ATen/ops/_cudnn_rnn_compositeexplicitautograd_dispatch.h>
|
| 47 |
+
#include <ATen/ops/_cudnn_rnn_backward_compositeexplicitautograd_dispatch.h>
|
| 48 |
+
#include <ATen/ops/_cudnn_rnn_flatten_weight_compositeexplicitautograd_dispatch.h>
|
| 49 |
+
#include <ATen/ops/_dirichlet_grad_compositeexplicitautograd_dispatch.h>
|
| 50 |
+
#include <ATen/ops/_efficientzerotensor_compositeexplicitautograd_dispatch.h>
|
| 51 |
+
#include <ATen/ops/_embedding_bag_compositeexplicitautograd_dispatch.h>
|
| 52 |
+
#include <ATen/ops/_embedding_bag_dense_backward_compositeexplicitautograd_dispatch.h>
|
| 53 |
+
#include <ATen/ops/_embedding_bag_forward_only_compositeexplicitautograd_dispatch.h>
|
| 54 |
+
#include <ATen/ops/_embedding_bag_per_sample_weights_backward_compositeexplicitautograd_dispatch.h>
|
| 55 |
+
#include <ATen/ops/_empty_affine_quantized_compositeexplicitautograd_dispatch.h>
|
| 56 |
+
#include <ATen/ops/_empty_per_channel_affine_quantized_compositeexplicitautograd_dispatch.h>
|
| 57 |
+
#include <ATen/ops/_euclidean_dist_compositeexplicitautograd_dispatch.h>
|
| 58 |
+
#include <ATen/ops/_fake_quantize_learnable_per_channel_affine_compositeexplicitautograd_dispatch.h>
|
| 59 |
+
#include <ATen/ops/_fake_quantize_learnable_per_tensor_affine_compositeexplicitautograd_dispatch.h>
|
| 60 |
+
#include <ATen/ops/_fake_quantize_per_tensor_affine_cachemask_tensor_qparams_compositeexplicitautograd_dispatch.h>
|
| 61 |
+
#include <ATen/ops/_foobar_compositeexplicitautograd_dispatch.h>
|
| 62 |
+
#include <ATen/ops/_foreach_abs_compositeexplicitautograd_dispatch.h>
|
| 63 |
+
#include <ATen/ops/_foreach_acos_compositeexplicitautograd_dispatch.h>
|
| 64 |
+
#include <ATen/ops/_foreach_add_compositeexplicitautograd_dispatch.h>
|
| 65 |
+
#include <ATen/ops/_foreach_addcdiv_compositeexplicitautograd_dispatch.h>
|
| 66 |
+
#include <ATen/ops/_foreach_addcmul_compositeexplicitautograd_dispatch.h>
|
| 67 |
+
#include <ATen/ops/_foreach_asin_compositeexplicitautograd_dispatch.h>
|
| 68 |
+
#include <ATen/ops/_foreach_atan_compositeexplicitautograd_dispatch.h>
|
| 69 |
+
#include <ATen/ops/_foreach_ceil_compositeexplicitautograd_dispatch.h>
|
| 70 |
+
#include <ATen/ops/_foreach_clamp_max_compositeexplicitautograd_dispatch.h>
|
| 71 |
+
#include <ATen/ops/_foreach_clamp_min_compositeexplicitautograd_dispatch.h>
|
| 72 |
+
#include <ATen/ops/_foreach_copy_compositeexplicitautograd_dispatch.h>
|
| 73 |
+
#include <ATen/ops/_foreach_cos_compositeexplicitautograd_dispatch.h>
|
| 74 |
+
#include <ATen/ops/_foreach_cosh_compositeexplicitautograd_dispatch.h>
|
| 75 |
+
#include <ATen/ops/_foreach_div_compositeexplicitautograd_dispatch.h>
|
| 76 |
+
#include <ATen/ops/_foreach_erf_compositeexplicitautograd_dispatch.h>
|
| 77 |
+
#include <ATen/ops/_foreach_erfc_compositeexplicitautograd_dispatch.h>
|
| 78 |
+
#include <ATen/ops/_foreach_exp_compositeexplicitautograd_dispatch.h>
|
| 79 |
+
#include <ATen/ops/_foreach_expm1_compositeexplicitautograd_dispatch.h>
|
| 80 |
+
#include <ATen/ops/_foreach_floor_compositeexplicitautograd_dispatch.h>
|
| 81 |
+
#include <ATen/ops/_foreach_frac_compositeexplicitautograd_dispatch.h>
|
| 82 |
+
#include <ATen/ops/_foreach_lerp_compositeexplicitautograd_dispatch.h>
|
| 83 |
+
#include <ATen/ops/_foreach_lgamma_compositeexplicitautograd_dispatch.h>
|
| 84 |
+
#include <ATen/ops/_foreach_log_compositeexplicitautograd_dispatch.h>
|
| 85 |
+
#include <ATen/ops/_foreach_log10_compositeexplicitautograd_dispatch.h>
|
| 86 |
+
#include <ATen/ops/_foreach_log1p_compositeexplicitautograd_dispatch.h>
|
| 87 |
+
#include <ATen/ops/_foreach_log2_compositeexplicitautograd_dispatch.h>
|
| 88 |
+
#include <ATen/ops/_foreach_max_compositeexplicitautograd_dispatch.h>
|
| 89 |
+
#include <ATen/ops/_foreach_maximum_compositeexplicitautograd_dispatch.h>
|
| 90 |
+
#include <ATen/ops/_foreach_minimum_compositeexplicitautograd_dispatch.h>
|
| 91 |
+
#include <ATen/ops/_foreach_mul_compositeexplicitautograd_dispatch.h>
|
| 92 |
+
#include <ATen/ops/_foreach_neg_compositeexplicitautograd_dispatch.h>
|
| 93 |
+
#include <ATen/ops/_foreach_norm_compositeexplicitautograd_dispatch.h>
|
| 94 |
+
#include <ATen/ops/_foreach_pow_compositeexplicitautograd_dispatch.h>
|
| 95 |
+
#include <ATen/ops/_foreach_reciprocal_compositeexplicitautograd_dispatch.h>
|
| 96 |
+
#include <ATen/ops/_foreach_round_compositeexplicitautograd_dispatch.h>
|
| 97 |
+
#include <ATen/ops/_foreach_sigmoid_compositeexplicitautograd_dispatch.h>
|
| 98 |
+
#include <ATen/ops/_foreach_sign_compositeexplicitautograd_dispatch.h>
|
| 99 |
+
#include <ATen/ops/_foreach_sin_compositeexplicitautograd_dispatch.h>
|
| 100 |
+
#include <ATen/ops/_foreach_sinh_compositeexplicitautograd_dispatch.h>
|
| 101 |
+
#include <ATen/ops/_foreach_sqrt_compositeexplicitautograd_dispatch.h>
|
| 102 |
+
#include <ATen/ops/_foreach_sub_compositeexplicitautograd_dispatch.h>
|
| 103 |
+
#include <ATen/ops/_foreach_tan_compositeexplicitautograd_dispatch.h>
|
| 104 |
+
#include <ATen/ops/_foreach_tanh_compositeexplicitautograd_dispatch.h>
|
| 105 |
+
#include <ATen/ops/_foreach_trunc_compositeexplicitautograd_dispatch.h>
|
| 106 |
+
#include <ATen/ops/_foreach_zero_compositeexplicitautograd_dispatch.h>
|
| 107 |
+
#include <ATen/ops/_functional_assert_scalar_compositeexplicitautograd_dispatch.h>
|
| 108 |
+
#include <ATen/ops/_functional_sym_constrain_range_compositeexplicitautograd_dispatch.h>
|
| 109 |
+
#include <ATen/ops/_functional_sym_constrain_range_for_size_compositeexplicitautograd_dispatch.h>
|
| 110 |
+
#include <ATen/ops/_fused_adagrad_compositeexplicitautograd_dispatch.h>
|
| 111 |
+
#include <ATen/ops/_fused_adam_compositeexplicitautograd_dispatch.h>
|
| 112 |
+
#include <ATen/ops/_fused_adamw_compositeexplicitautograd_dispatch.h>
|
| 113 |
+
#include <ATen/ops/_fused_dropout_compositeexplicitautograd_dispatch.h>
|
| 114 |
+
#include <ATen/ops/_fused_moving_avg_obs_fq_helper_compositeexplicitautograd_dispatch.h>
|
| 115 |
+
#include <ATen/ops/_fused_sgd_compositeexplicitautograd_dispatch.h>
|
| 116 |
+
#include <ATen/ops/_fw_primal_compositeexplicitautograd_dispatch.h>
|
| 117 |
+
#include <ATen/ops/_fw_primal_copy_compositeexplicitautograd_dispatch.h>
|
| 118 |
+
#include <ATen/ops/_grid_sampler_2d_cpu_fallback_compositeexplicitautograd_dispatch.h>
|
| 119 |
+
#include <ATen/ops/_has_same_storage_numel_compositeexplicitautograd_dispatch.h>
|
| 120 |
+
#include <ATen/ops/_histogramdd_bin_edges_compositeexplicitautograd_dispatch.h>
|
| 121 |
+
#include <ATen/ops/_histogramdd_from_bin_cts_compositeexplicitautograd_dispatch.h>
|
| 122 |
+
#include <ATen/ops/_histogramdd_from_bin_tensors_compositeexplicitautograd_dispatch.h>
|
| 123 |
+
#include <ATen/ops/_index_put_impl_compositeexplicitautograd_dispatch.h>
|
| 124 |
+
#include <ATen/ops/_indices_copy_compositeexplicitautograd_dispatch.h>
|
| 125 |
+
#include <ATen/ops/_is_all_true_compositeexplicitautograd_dispatch.h>
|
| 126 |
+
#include <ATen/ops/_is_any_true_compositeexplicitautograd_dispatch.h>
|
| 127 |
+
#include <ATen/ops/_lazy_clone_compositeexplicitautograd_dispatch.h>
|
| 128 |
+
#include <ATen/ops/_linalg_check_errors_compositeexplicitautograd_dispatch.h>
|
| 129 |
+
#include <ATen/ops/_lstm_mps_compositeexplicitautograd_dispatch.h>
|
| 130 |
+
#include <ATen/ops/_make_dual_compositeexplicitautograd_dispatch.h>
|
| 131 |
+
#include <ATen/ops/_make_dual_copy_compositeexplicitautograd_dispatch.h>
|
| 132 |
+
#include <ATen/ops/_make_per_channel_quantized_tensor_compositeexplicitautograd_dispatch.h>
|
| 133 |
+
#include <ATen/ops/_make_per_tensor_quantized_tensor_compositeexplicitautograd_dispatch.h>
|
| 134 |
+
#include <ATen/ops/_masked_scale_compositeexplicitautograd_dispatch.h>
|
| 135 |
+
#include <ATen/ops/_masked_softmax_compositeexplicitautograd_dispatch.h>
|
| 136 |
+
#include <ATen/ops/_masked_softmax_backward_compositeexplicitautograd_dispatch.h>
|
| 137 |
+
#include <ATen/ops/_mkldnn_reshape_compositeexplicitautograd_dispatch.h>
|
| 138 |
+
#include <ATen/ops/_mkldnn_transpose_compositeexplicitautograd_dispatch.h>
|
| 139 |
+
#include <ATen/ops/_mps_convolution_compositeexplicitautograd_dispatch.h>
|
| 140 |
+
#include <ATen/ops/_mps_convolution_transpose_compositeexplicitautograd_dispatch.h>
|
| 141 |
+
#include <ATen/ops/_native_batch_norm_legit_compositeexplicitautograd_dispatch.h>
|
| 142 |
+
#include <ATen/ops/_native_batch_norm_legit_no_training_compositeexplicitautograd_dispatch.h>
|
| 143 |
+
#include <ATen/ops/_native_multi_head_attention_compositeexplicitautograd_dispatch.h>
|
| 144 |
+
#include <ATen/ops/_neg_view_compositeexplicitautograd_dispatch.h>
|
| 145 |
+
#include <ATen/ops/_neg_view_copy_compositeexplicitautograd_dispatch.h>
|
| 146 |
+
#include <ATen/ops/_nested_from_padded_compositeexplicitautograd_dispatch.h>
|
| 147 |
+
#include <ATen/ops/_nested_from_padded_and_nested_example_compositeexplicitautograd_dispatch.h>
|
| 148 |
+
#include <ATen/ops/_nested_get_values_copy_compositeexplicitautograd_dispatch.h>
|
| 149 |
+
#include <ATen/ops/_nested_tensor_from_mask_compositeexplicitautograd_dispatch.h>
|
| 150 |
+
#include <ATen/ops/_nested_tensor_from_tensor_list_compositeexplicitautograd_dispatch.h>
|
| 151 |
+
#include <ATen/ops/_nested_tensor_size_compositeexplicitautograd_dispatch.h>
|
| 152 |
+
#include <ATen/ops/_nested_tensor_storage_offsets_compositeexplicitautograd_dispatch.h>
|
| 153 |
+
#include <ATen/ops/_nested_tensor_strides_compositeexplicitautograd_dispatch.h>
|
| 154 |
+
#include <ATen/ops/_nested_view_from_buffer_copy_compositeexplicitautograd_dispatch.h>
|
| 155 |
+
#include <ATen/ops/_nested_view_from_jagged_copy_compositeexplicitautograd_dispatch.h>
|
| 156 |
+
#include <ATen/ops/_new_zeros_with_same_feature_meta_compositeexplicitautograd_dispatch.h>
|
| 157 |
+
#include <ATen/ops/_nnpack_spatial_convolution_compositeexplicitautograd_dispatch.h>
|
| 158 |
+
#include <ATen/ops/_pack_padded_sequence_compositeexplicitautograd_dispatch.h>
|
| 159 |
+
#include <ATen/ops/_pdist_backward_compositeexplicitautograd_dispatch.h>
|
| 160 |
+
#include <ATen/ops/_pdist_forward_compositeexplicitautograd_dispatch.h>
|
| 161 |
+
#include <ATen/ops/_pin_memory_compositeexplicitautograd_dispatch.h>
|
| 162 |
+
#include <ATen/ops/_print_compositeexplicitautograd_dispatch.h>
|
| 163 |
+
#include <ATen/ops/_reshape_alias_copy_compositeexplicitautograd_dispatch.h>
|
| 164 |
+
#include <ATen/ops/_reshape_copy_compositeexplicitautograd_dispatch.h>
|
| 165 |
+
#include <ATen/ops/_resize_output_compositeexplicitautograd_dispatch.h>
|
| 166 |
+
#include <ATen/ops/_safe_softmax_compositeexplicitautograd_dispatch.h>
|
| 167 |
+
#include <ATen/ops/_sample_dirichlet_compositeexplicitautograd_dispatch.h>
|
| 168 |
+
#include <ATen/ops/_scaled_dot_product_fused_attention_overrideable_compositeexplicitautograd_dispatch.h>
|
| 169 |
+
#include <ATen/ops/_scaled_dot_product_fused_attention_overrideable_backward_compositeexplicitautograd_dispatch.h>
|
| 170 |
+
#include <ATen/ops/_segment_reduce_backward_compositeexplicitautograd_dispatch.h>
|
| 171 |
+
#include <ATen/ops/_slow_conv2d_backward_compositeexplicitautograd_dispatch.h>
|
| 172 |
+
#include <ATen/ops/_sparse_addmm_compositeexplicitautograd_dispatch.h>
|
| 173 |
+
#include <ATen/ops/_sparse_broadcast_to_copy_compositeexplicitautograd_dispatch.h>
|
| 174 |
+
#include <ATen/ops/_sparse_compressed_tensor_with_dims_compositeexplicitautograd_dispatch.h>
|
| 175 |
+
#include <ATen/ops/_sparse_coo_tensor_with_dims_compositeexplicitautograd_dispatch.h>
|
| 176 |
+
#include <ATen/ops/_sparse_coo_tensor_with_dims_and_tensors_compositeexplicitautograd_dispatch.h>
|
| 177 |
+
#include <ATen/ops/_sparse_csr_prod_compositeexplicitautograd_dispatch.h>
|
| 178 |
+
#include <ATen/ops/_sparse_csr_sum_compositeexplicitautograd_dispatch.h>
|
| 179 |
+
#include <ATen/ops/_sparse_log_softmax_compositeexplicitautograd_dispatch.h>
|
| 180 |
+
#include <ATen/ops/_sparse_log_softmax_backward_data_compositeexplicitautograd_dispatch.h>
|
| 181 |
+
#include <ATen/ops/_sparse_mask_projection_compositeexplicitautograd_dispatch.h>
|
| 182 |
+
#include <ATen/ops/_sparse_softmax_compositeexplicitautograd_dispatch.h>
|
| 183 |
+
#include <ATen/ops/_sparse_softmax_backward_data_compositeexplicitautograd_dispatch.h>
|
| 184 |
+
#include <ATen/ops/_sparse_sparse_matmul_compositeexplicitautograd_dispatch.h>
|
| 185 |
+
#include <ATen/ops/_sparse_sum_compositeexplicitautograd_dispatch.h>
|
| 186 |
+
#include <ATen/ops/_sparse_sum_backward_compositeexplicitautograd_dispatch.h>
|
| 187 |
+
#include <ATen/ops/_spdiags_compositeexplicitautograd_dispatch.h>
|
| 188 |
+
#include <ATen/ops/_stack_compositeexplicitautograd_dispatch.h>
|
| 189 |
+
#include <ATen/ops/_standard_gamma_compositeexplicitautograd_dispatch.h>
|
| 190 |
+
#include <ATen/ops/_standard_gamma_grad_compositeexplicitautograd_dispatch.h>
|
| 191 |
+
#include <ATen/ops/_test_autograd_multiple_dispatch_compositeexplicitautograd_dispatch.h>
|
| 192 |
+
#include <ATen/ops/_test_autograd_multiple_dispatch_view_compositeexplicitautograd_dispatch.h>
|
| 193 |
+
#include <ATen/ops/_test_autograd_multiple_dispatch_view_copy_compositeexplicitautograd_dispatch.h>
|
| 194 |
+
#include <ATen/ops/_test_functorch_fallback_compositeexplicitautograd_dispatch.h>
|
| 195 |
+
#include <ATen/ops/_test_optional_filled_intlist_compositeexplicitautograd_dispatch.h>
|
| 196 |
+
#include <ATen/ops/_test_optional_floatlist_compositeexplicitautograd_dispatch.h>
|
| 197 |
+
#include <ATen/ops/_test_optional_intlist_compositeexplicitautograd_dispatch.h>
|
| 198 |
+
#include <ATen/ops/_test_parallel_materialize_compositeexplicitautograd_dispatch.h>
|
| 199 |
+
#include <ATen/ops/_test_warn_in_autograd_compositeexplicitautograd_dispatch.h>
|
| 200 |
+
#include <ATen/ops/_thnn_fused_gru_cell_compositeexplicitautograd_dispatch.h>
|
| 201 |
+
#include <ATen/ops/_thnn_fused_gru_cell_backward_compositeexplicitautograd_dispatch.h>
|
| 202 |
+
#include <ATen/ops/_thnn_fused_lstm_cell_compositeexplicitautograd_dispatch.h>
|
| 203 |
+
#include <ATen/ops/_thnn_fused_lstm_cell_backward_impl_compositeexplicitautograd_dispatch.h>
|
| 204 |
+
#include <ATen/ops/_to_copy_compositeexplicitautograd_dispatch.h>
|
| 205 |
+
#include <ATen/ops/_to_dense_compositeexplicitautograd_dispatch.h>
|
| 206 |
+
#include <ATen/ops/_to_sparse_compositeexplicitautograd_dispatch.h>
|
| 207 |
+
#include <ATen/ops/_to_sparse_bsc_compositeexplicitautograd_dispatch.h>
|
| 208 |
+
#include <ATen/ops/_to_sparse_bsr_compositeexplicitautograd_dispatch.h>
|
| 209 |
+
#include <ATen/ops/_to_sparse_csc_compositeexplicitautograd_dispatch.h>
|
| 210 |
+
#include <ATen/ops/_to_sparse_csr_compositeexplicitautograd_dispatch.h>
|
| 211 |
+
#include <ATen/ops/_transform_bias_rescale_qkv_compositeexplicitautograd_dispatch.h>
|
| 212 |
+
#include <ATen/ops/_transformer_encoder_layer_fwd_compositeexplicitautograd_dispatch.h>
|
| 213 |
+
#include <ATen/ops/_trilinear_compositeexplicitautograd_dispatch.h>
|
| 214 |
+
#include <ATen/ops/_triton_multi_head_attention_compositeexplicitautograd_dispatch.h>
|
| 215 |
+
#include <ATen/ops/_triton_scaled_dot_attention_compositeexplicitautograd_dispatch.h>
|
| 216 |
+
#include <ATen/ops/_unique_compositeexplicitautograd_dispatch.h>
|
| 217 |
+
#include <ATen/ops/_unique2_compositeexplicitautograd_dispatch.h>
|
| 218 |
+
#include <ATen/ops/_unsafe_index_compositeexplicitautograd_dispatch.h>
|
| 219 |
+
#include <ATen/ops/_unsafe_index_put_compositeexplicitautograd_dispatch.h>
|
| 220 |
+
#include <ATen/ops/_unsafe_masked_index_compositeexplicitautograd_dispatch.h>
|
| 221 |
+
#include <ATen/ops/_unsafe_masked_index_put_accumulate_compositeexplicitautograd_dispatch.h>
|
| 222 |
+
#include <ATen/ops/_unsafe_view_compositeexplicitautograd_dispatch.h>
|
| 223 |
+
#include <ATen/ops/_values_copy_compositeexplicitautograd_dispatch.h>
|
| 224 |
+
#include <ATen/ops/_weight_norm_interface_compositeexplicitautograd_dispatch.h>
|
| 225 |
+
#include <ATen/ops/_weight_norm_interface_backward_compositeexplicitautograd_dispatch.h>
|
| 226 |
+
#include <ATen/ops/abs_compositeexplicitautograd_dispatch.h>
|
| 227 |
+
#include <ATen/ops/add_compositeexplicitautograd_dispatch.h>
|
| 228 |
+
#include <ATen/ops/addr_compositeexplicitautograd_dispatch.h>
|
| 229 |
+
#include <ATen/ops/affine_grid_generator_compositeexplicitautograd_dispatch.h>
|
| 230 |
+
#include <ATen/ops/alias_compositeexplicitautograd_dispatch.h>
|
| 231 |
+
#include <ATen/ops/alias_copy_compositeexplicitautograd_dispatch.h>
|
| 232 |
+
#include <ATen/ops/all_compositeexplicitautograd_dispatch.h>
|
| 233 |
+
#include <ATen/ops/allclose_compositeexplicitautograd_dispatch.h>
|
| 234 |
+
#include <ATen/ops/any_compositeexplicitautograd_dispatch.h>
|
| 235 |
+
#include <ATen/ops/arange_compositeexplicitautograd_dispatch.h>
|
| 236 |
+
#include <ATen/ops/as_strided_copy_compositeexplicitautograd_dispatch.h>
|
| 237 |
+
#include <ATen/ops/as_strided_scatter_compositeexplicitautograd_dispatch.h>
|
| 238 |
+
#include <ATen/ops/bartlett_window_compositeexplicitautograd_dispatch.h>
|
| 239 |
+
#include <ATen/ops/batch_norm_backward_elemt_compositeexplicitautograd_dispatch.h>
|
| 240 |
+
#include <ATen/ops/batch_norm_backward_reduce_compositeexplicitautograd_dispatch.h>
|
| 241 |
+
#include <ATen/ops/batch_norm_gather_stats_compositeexplicitautograd_dispatch.h>
|
| 242 |
+
#include <ATen/ops/batch_norm_gather_stats_with_counts_compositeexplicitautograd_dispatch.h>
|
| 243 |
+
#include <ATen/ops/batch_norm_stats_compositeexplicitautograd_dispatch.h>
|
| 244 |
+
#include <ATen/ops/batch_norm_update_stats_compositeexplicitautograd_dispatch.h>
|
| 245 |
+
#include <ATen/ops/bernoulli_compositeexplicitautograd_dispatch.h>
|
| 246 |
+
#include <ATen/ops/binary_cross_entropy_with_logits_compositeexplicitautograd_dispatch.h>
|
| 247 |
+
#include <ATen/ops/bincount_compositeexplicitautograd_dispatch.h>
|
| 248 |
+
#include <ATen/ops/binomial_compositeexplicitautograd_dispatch.h>
|
| 249 |
+
#include <ATen/ops/bitwise_and_compositeexplicitautograd_dispatch.h>
|
| 250 |
+
#include <ATen/ops/bitwise_left_shift_compositeexplicitautograd_dispatch.h>
|
| 251 |
+
#include <ATen/ops/bitwise_or_compositeexplicitautograd_dispatch.h>
|
| 252 |
+
#include <ATen/ops/bitwise_right_shift_compositeexplicitautograd_dispatch.h>
|
| 253 |
+
#include <ATen/ops/bitwise_xor_compositeexplicitautograd_dispatch.h>
|
| 254 |
+
#include <ATen/ops/blackman_window_compositeexplicitautograd_dispatch.h>
|
| 255 |
+
#include <ATen/ops/block_diag_compositeexplicitautograd_dispatch.h>
|
| 256 |
+
#include <ATen/ops/bucketize_compositeexplicitautograd_dispatch.h>
|
| 257 |
+
#include <ATen/ops/cauchy_compositeexplicitautograd_dispatch.h>
|
| 258 |
+
#include <ATen/ops/ccol_indices_compositeexplicitautograd_dispatch.h>
|
| 259 |
+
#include <ATen/ops/ccol_indices_copy_compositeexplicitautograd_dispatch.h>
|
| 260 |
+
#include <ATen/ops/celu_compositeexplicitautograd_dispatch.h>
|
| 261 |
+
#include <ATen/ops/channel_shuffle_compositeexplicitautograd_dispatch.h>
|
| 262 |
+
#include <ATen/ops/cholesky_solve_compositeexplicitautograd_dispatch.h>
|
| 263 |
+
#include <ATen/ops/clone_compositeexplicitautograd_dispatch.h>
|
| 264 |
+
#include <ATen/ops/col_indices_compositeexplicitautograd_dispatch.h>
|
| 265 |
+
#include <ATen/ops/col_indices_copy_compositeexplicitautograd_dispatch.h>
|
| 266 |
+
#include <ATen/ops/complex_compositeexplicitautograd_dispatch.h>
|
| 267 |
+
#include <ATen/ops/conj_physical_compositeexplicitautograd_dispatch.h>
|
| 268 |
+
#include <ATen/ops/constant_pad_nd_compositeexplicitautograd_dispatch.h>
|
| 269 |
+
#include <ATen/ops/conv_depthwise3d_compositeexplicitautograd_dispatch.h>
|
| 270 |
+
#include <ATen/ops/conv_tbc_compositeexplicitautograd_dispatch.h>
|
| 271 |
+
#include <ATen/ops/convolution_compositeexplicitautograd_dispatch.h>
|
| 272 |
+
#include <ATen/ops/convolution_backward_compositeexplicitautograd_dispatch.h>
|
| 273 |
+
#include <ATen/ops/convolution_backward_overrideable_compositeexplicitautograd_dispatch.h>
|
| 274 |
+
#include <ATen/ops/convolution_overrideable_compositeexplicitautograd_dispatch.h>
|
| 275 |
+
#include <ATen/ops/copy_compositeexplicitautograd_dispatch.h>
|
| 276 |
+
#include <ATen/ops/copy_sparse_to_sparse_compositeexplicitautograd_dispatch.h>
|
| 277 |
+
#include <ATen/ops/copysign_compositeexplicitautograd_dispatch.h>
|
| 278 |
+
#include <ATen/ops/count_nonzero_compositeexplicitautograd_dispatch.h>
|
| 279 |
+
#include <ATen/ops/crow_indices_compositeexplicitautograd_dispatch.h>
|
| 280 |
+
#include <ATen/ops/crow_indices_copy_compositeexplicitautograd_dispatch.h>
|
| 281 |
+
#include <ATen/ops/cudnn_affine_grid_generator_compositeexplicitautograd_dispatch.h>
|
| 282 |
+
#include <ATen/ops/cudnn_affine_grid_generator_backward_compositeexplicitautograd_dispatch.h>
|
| 283 |
+
#include <ATen/ops/cudnn_batch_norm_compositeexplicitautograd_dispatch.h>
|
| 284 |
+
#include <ATen/ops/cudnn_batch_norm_backward_compositeexplicitautograd_dispatch.h>
|
| 285 |
+
#include <ATen/ops/cudnn_convolution_add_relu_compositeexplicitautograd_dispatch.h>
|
| 286 |
+
#include <ATen/ops/cudnn_convolution_relu_compositeexplicitautograd_dispatch.h>
|
| 287 |
+
#include <ATen/ops/cudnn_convolution_transpose_compositeexplicitautograd_dispatch.h>
|
| 288 |
+
#include <ATen/ops/cudnn_grid_sampler_compositeexplicitautograd_dispatch.h>
|
| 289 |
+
#include <ATen/ops/cudnn_grid_sampler_backward_compositeexplicitautograd_dispatch.h>
|
| 290 |
+
#include <ATen/ops/cummax_compositeexplicitautograd_dispatch.h>
|
| 291 |
+
#include <ATen/ops/cummin_compositeexplicitautograd_dispatch.h>
|
| 292 |
+
#include <ATen/ops/deg2rad_compositeexplicitautograd_dispatch.h>
|
| 293 |
+
#include <ATen/ops/dense_dim_compositeexplicitautograd_dispatch.h>
|
| 294 |
+
#include <ATen/ops/dequantize_compositeexplicitautograd_dispatch.h>
|
| 295 |
+
#include <ATen/ops/detach_compositeexplicitautograd_dispatch.h>
|
| 296 |
+
#include <ATen/ops/detach_copy_compositeexplicitautograd_dispatch.h>
|
| 297 |
+
#include <ATen/ops/diag_embed_compositeexplicitautograd_dispatch.h>
|
| 298 |
+
#include <ATen/ops/diagonal_compositeexplicitautograd_dispatch.h>
|
| 299 |
+
#include <ATen/ops/diagonal_backward_compositeexplicitautograd_dispatch.h>
|
| 300 |
+
#include <ATen/ops/diagonal_copy_compositeexplicitautograd_dispatch.h>
|
| 301 |
+
#include <ATen/ops/diagonal_scatter_compositeexplicitautograd_dispatch.h>
|
| 302 |
+
#include <ATen/ops/dist_compositeexplicitautograd_dispatch.h>
|
| 303 |
+
#include <ATen/ops/div_compositeexplicitautograd_dispatch.h>
|
| 304 |
+
#include <ATen/ops/dot_compositeexplicitautograd_dispatch.h>
|
| 305 |
+
#include <ATen/ops/embedding_compositeexplicitautograd_dispatch.h>
|
| 306 |
+
#include <ATen/ops/embedding_dense_backward_compositeexplicitautograd_dispatch.h>
|
| 307 |
+
#include <ATen/ops/embedding_renorm_compositeexplicitautograd_dispatch.h>
|
| 308 |
+
#include <ATen/ops/empty_compositeexplicitautograd_dispatch.h>
|
| 309 |
+
#include <ATen/ops/empty_like_compositeexplicitautograd_dispatch.h>
|
| 310 |
+
#include <ATen/ops/empty_permuted_compositeexplicitautograd_dispatch.h>
|
| 311 |
+
#include <ATen/ops/empty_quantized_compositeexplicitautograd_dispatch.h>
|
| 312 |
+
#include <ATen/ops/empty_strided_compositeexplicitautograd_dispatch.h>
|
| 313 |
+
#include <ATen/ops/expand_compositeexplicitautograd_dispatch.h>
|
| 314 |
+
#include <ATen/ops/expand_copy_compositeexplicitautograd_dispatch.h>
|
| 315 |
+
#include <ATen/ops/exponential_compositeexplicitautograd_dispatch.h>
|
| 316 |
+
#include <ATen/ops/eye_compositeexplicitautograd_dispatch.h>
|
| 317 |
+
#include <ATen/ops/fake_quantize_per_channel_affine_cachemask_compositeexplicitautograd_dispatch.h>
|
| 318 |
+
#include <ATen/ops/fake_quantize_per_tensor_affine_cachemask_compositeexplicitautograd_dispatch.h>
|
| 319 |
+
#include <ATen/ops/fft_fftfreq_compositeexplicitautograd_dispatch.h>
|
| 320 |
+
#include <ATen/ops/fft_rfftfreq_compositeexplicitautograd_dispatch.h>
|
| 321 |
+
#include <ATen/ops/fill_compositeexplicitautograd_dispatch.h>
|
| 322 |
+
#include <ATen/ops/flip_compositeexplicitautograd_dispatch.h>
|
| 323 |
+
#include <ATen/ops/floor_divide_compositeexplicitautograd_dispatch.h>
|
| 324 |
+
#include <ATen/ops/fmod_compositeexplicitautograd_dispatch.h>
|
| 325 |
+
#include <ATen/ops/frexp_compositeexplicitautograd_dispatch.h>
|
| 326 |
+
#include <ATen/ops/from_file_compositeexplicitautograd_dispatch.h>
|
| 327 |
+
#include <ATen/ops/full_compositeexplicitautograd_dispatch.h>
|
| 328 |
+
#include <ATen/ops/full_like_compositeexplicitautograd_dispatch.h>
|
| 329 |
+
#include <ATen/ops/geometric_compositeexplicitautograd_dispatch.h>
|
| 330 |
+
#include <ATen/ops/glu_backward_jvp_compositeexplicitautograd_dispatch.h>
|
| 331 |
+
#include <ATen/ops/glu_jvp_compositeexplicitautograd_dispatch.h>
|
| 332 |
+
#include <ATen/ops/grid_sampler_2d_compositeexplicitautograd_dispatch.h>
|
| 333 |
+
#include <ATen/ops/grid_sampler_2d_backward_compositeexplicitautograd_dispatch.h>
|
| 334 |
+
#include <ATen/ops/grid_sampler_3d_compositeexplicitautograd_dispatch.h>
|
| 335 |
+
#include <ATen/ops/grid_sampler_3d_backward_compositeexplicitautograd_dispatch.h>
|
| 336 |
+
#include <ATen/ops/hamming_window_compositeexplicitautograd_dispatch.h>
|
| 337 |
+
#include <ATen/ops/hann_window_compositeexplicitautograd_dispatch.h>
|
| 338 |
+
#include <ATen/ops/hardswish_backward_compositeexplicitautograd_dispatch.h>
|
| 339 |
+
#include <ATen/ops/huber_loss_backward_compositeexplicitautograd_dispatch.h>
|
| 340 |
+
#include <ATen/ops/index_fill_compositeexplicitautograd_dispatch.h>
|
| 341 |
+
#include <ATen/ops/index_put_compositeexplicitautograd_dispatch.h>
|
| 342 |
+
#include <ATen/ops/indices_compositeexplicitautograd_dispatch.h>
|
| 343 |
+
#include <ATen/ops/indices_copy_compositeexplicitautograd_dispatch.h>
|
| 344 |
+
#include <ATen/ops/int_repr_compositeexplicitautograd_dispatch.h>
|
| 345 |
+
#include <ATen/ops/is_coalesced_compositeexplicitautograd_dispatch.h>
|
| 346 |
+
#include <ATen/ops/is_pinned_compositeexplicitautograd_dispatch.h>
|
| 347 |
+
#include <ATen/ops/is_same_size_compositeexplicitautograd_dispatch.h>
|
| 348 |
+
#include <ATen/ops/isinf_compositeexplicitautograd_dispatch.h>
|
| 349 |
+
#include <ATen/ops/isnan_compositeexplicitautograd_dispatch.h>
|
| 350 |
+
#include <ATen/ops/kaiser_window_compositeexplicitautograd_dispatch.h>
|
| 351 |
+
#include <ATen/ops/kthvalue_compositeexplicitautograd_dispatch.h>
|
| 352 |
+
#include <ATen/ops/lift_compositeexplicitautograd_dispatch.h>
|
| 353 |
+
#include <ATen/ops/lift_fresh_compositeexplicitautograd_dispatch.h>
|
| 354 |
+
#include <ATen/ops/lift_fresh_copy_compositeexplicitautograd_dispatch.h>
|
| 355 |
+
#include <ATen/ops/linalg_lstsq_compositeexplicitautograd_dispatch.h>
|
| 356 |
+
#include <ATen/ops/linalg_matrix_exp_compositeexplicitautograd_dispatch.h>
|
| 357 |
+
#include <ATen/ops/linalg_pinv_compositeexplicitautograd_dispatch.h>
|
| 358 |
+
#include <ATen/ops/linear_compositeexplicitautograd_dispatch.h>
|
| 359 |
+
#include <ATen/ops/linear_backward_compositeexplicitautograd_dispatch.h>
|
| 360 |
+
#include <ATen/ops/linspace_compositeexplicitautograd_dispatch.h>
|
| 361 |
+
#include <ATen/ops/log_normal_compositeexplicitautograd_dispatch.h>
|
| 362 |
+
#include <ATen/ops/log_softmax_compositeexplicitautograd_dispatch.h>
|
| 363 |
+
#include <ATen/ops/logcumsumexp_compositeexplicitautograd_dispatch.h>
|
| 364 |
+
#include <ATen/ops/logical_and_compositeexplicitautograd_dispatch.h>
|
| 365 |
+
#include <ATen/ops/logical_not_compositeexplicitautograd_dispatch.h>
|
| 366 |
+
#include <ATen/ops/logical_or_compositeexplicitautograd_dispatch.h>
|
| 367 |
+
#include <ATen/ops/logical_xor_compositeexplicitautograd_dispatch.h>
|
| 368 |
+
#include <ATen/ops/logspace_compositeexplicitautograd_dispatch.h>
|
| 369 |
+
#include <ATen/ops/logsumexp_compositeexplicitautograd_dispatch.h>
|
| 370 |
+
#include <ATen/ops/lshift_compositeexplicitautograd_dispatch.h>
|
| 371 |
+
#include <ATen/ops/lstm_mps_backward_compositeexplicitautograd_dispatch.h>
|
| 372 |
+
#include <ATen/ops/masked_fill_compositeexplicitautograd_dispatch.h>
|
| 373 |
+
#include <ATen/ops/masked_scatter_compositeexplicitautograd_dispatch.h>
|
| 374 |
+
#include <ATen/ops/masked_scatter_backward_compositeexplicitautograd_dispatch.h>
|
| 375 |
+
#include <ATen/ops/matmul_backward_compositeexplicitautograd_dispatch.h>
|
| 376 |
+
#include <ATen/ops/max_pool2d_backward_compositeexplicitautograd_dispatch.h>
|
| 377 |
+
#include <ATen/ops/mean_compositeexplicitautograd_dispatch.h>
|
| 378 |
+
#include <ATen/ops/median_compositeexplicitautograd_dispatch.h>
|
| 379 |
+
#include <ATen/ops/miopen_batch_norm_compositeexplicitautograd_dispatch.h>
|
| 380 |
+
#include <ATen/ops/miopen_batch_norm_backward_compositeexplicitautograd_dispatch.h>
|
| 381 |
+
#include <ATen/ops/miopen_convolution_compositeexplicitautograd_dispatch.h>
|
| 382 |
+
#include <ATen/ops/miopen_convolution_transpose_compositeexplicitautograd_dispatch.h>
|
| 383 |
+
#include <ATen/ops/miopen_depthwise_convolution_compositeexplicitautograd_dispatch.h>
|
| 384 |
+
#include <ATen/ops/miopen_rnn_compositeexplicitautograd_dispatch.h>
|
| 385 |
+
#include <ATen/ops/miopen_rnn_backward_compositeexplicitautograd_dispatch.h>
|
| 386 |
+
#include <ATen/ops/mkldnn_adaptive_avg_pool2d_backward_compositeexplicitautograd_dispatch.h>
|
| 387 |
+
#include <ATen/ops/mkldnn_convolution_compositeexplicitautograd_dispatch.h>
|
| 388 |
+
#include <ATen/ops/mkldnn_linear_compositeexplicitautograd_dispatch.h>
|
| 389 |
+
#include <ATen/ops/mkldnn_linear_backward_compositeexplicitautograd_dispatch.h>
|
| 390 |
+
#include <ATen/ops/mkldnn_linear_backward_input_compositeexplicitautograd_dispatch.h>
|
| 391 |
+
#include <ATen/ops/mkldnn_linear_backward_weights_compositeexplicitautograd_dispatch.h>
|
| 392 |
+
#include <ATen/ops/mkldnn_max_pool2d_compositeexplicitautograd_dispatch.h>
|
| 393 |
+
#include <ATen/ops/mkldnn_max_pool2d_backward_compositeexplicitautograd_dispatch.h>
|
| 394 |
+
#include <ATen/ops/mkldnn_max_pool3d_compositeexplicitautograd_dispatch.h>
|
| 395 |
+
#include <ATen/ops/mkldnn_max_pool3d_backward_compositeexplicitautograd_dispatch.h>
|
| 396 |
+
#include <ATen/ops/mkldnn_reorder_conv2d_weight_compositeexplicitautograd_dispatch.h>
|
| 397 |
+
#include <ATen/ops/mkldnn_reorder_conv3d_weight_compositeexplicitautograd_dispatch.h>
|
| 398 |
+
#include <ATen/ops/mkldnn_rnn_layer_compositeexplicitautograd_dispatch.h>
|
| 399 |
+
#include <ATen/ops/mkldnn_rnn_layer_backward_compositeexplicitautograd_dispatch.h>
|
| 400 |
+
#include <ATen/ops/mode_compositeexplicitautograd_dispatch.h>
|
| 401 |
+
#include <ATen/ops/mps_convolution_backward_compositeexplicitautograd_dispatch.h>
|
| 402 |
+
#include <ATen/ops/mps_convolution_transpose_backward_compositeexplicitautograd_dispatch.h>
|
| 403 |
+
#include <ATen/ops/mul_compositeexplicitautograd_dispatch.h>
|
| 404 |
+
#include <ATen/ops/mv_compositeexplicitautograd_dispatch.h>
|
| 405 |
+
#include <ATen/ops/mvlgamma_compositeexplicitautograd_dispatch.h>
|
| 406 |
+
#include <ATen/ops/nan_to_num_compositeexplicitautograd_dispatch.h>
|
| 407 |
+
#include <ATen/ops/nanmedian_compositeexplicitautograd_dispatch.h>
|
| 408 |
+
#include <ATen/ops/native_batch_norm_backward_compositeexplicitautograd_dispatch.h>
|
| 409 |
+
#include <ATen/ops/native_dropout_compositeexplicitautograd_dispatch.h>
|
| 410 |
+
#include <ATen/ops/native_dropout_backward_compositeexplicitautograd_dispatch.h>
|
| 411 |
+
#include <ATen/ops/native_group_norm_compositeexplicitautograd_dispatch.h>
|
| 412 |
+
#include <ATen/ops/native_group_norm_backward_compositeexplicitautograd_dispatch.h>
|
| 413 |
+
#include <ATen/ops/native_layer_norm_compositeexplicitautograd_dispatch.h>
|
| 414 |
+
#include <ATen/ops/native_layer_norm_backward_compositeexplicitautograd_dispatch.h>
|
| 415 |
+
#include <ATen/ops/native_norm_compositeexplicitautograd_dispatch.h>
|
| 416 |
+
#include <ATen/ops/new_empty_compositeexplicitautograd_dispatch.h>
|
| 417 |
+
#include <ATen/ops/new_empty_strided_compositeexplicitautograd_dispatch.h>
|
| 418 |
+
#include <ATen/ops/new_full_compositeexplicitautograd_dispatch.h>
|
| 419 |
+
#include <ATen/ops/new_ones_compositeexplicitautograd_dispatch.h>
|
| 420 |
+
#include <ATen/ops/new_zeros_compositeexplicitautograd_dispatch.h>
|
| 421 |
+
#include <ATen/ops/norm_compositeexplicitautograd_dispatch.h>
|
| 422 |
+
#include <ATen/ops/normal_compositeexplicitautograd_dispatch.h>
|
| 423 |
+
#include <ATen/ops/ones_compositeexplicitautograd_dispatch.h>
|
| 424 |
+
#include <ATen/ops/ones_like_compositeexplicitautograd_dispatch.h>
|
| 425 |
+
#include <ATen/ops/permute_compositeexplicitautograd_dispatch.h>
|
| 426 |
+
#include <ATen/ops/permute_copy_compositeexplicitautograd_dispatch.h>
|
| 427 |
+
#include <ATen/ops/pixel_shuffle_compositeexplicitautograd_dispatch.h>
|
| 428 |
+
#include <ATen/ops/pixel_unshuffle_compositeexplicitautograd_dispatch.h>
|
| 429 |
+
#include <ATen/ops/poisson_compositeexplicitautograd_dispatch.h>
|
| 430 |
+
#include <ATen/ops/polar_compositeexplicitautograd_dispatch.h>
|
| 431 |
+
#include <ATen/ops/polygamma_compositeexplicitautograd_dispatch.h>
|
| 432 |
+
#include <ATen/ops/prod_compositeexplicitautograd_dispatch.h>
|
| 433 |
+
#include <ATen/ops/put_compositeexplicitautograd_dispatch.h>
|
| 434 |
+
#include <ATen/ops/q_per_channel_scales_compositeexplicitautograd_dispatch.h>
|
| 435 |
+
#include <ATen/ops/q_per_channel_zero_points_compositeexplicitautograd_dispatch.h>
|
| 436 |
+
#include <ATen/ops/quantize_per_channel_compositeexplicitautograd_dispatch.h>
|
| 437 |
+
#include <ATen/ops/quantize_per_tensor_compositeexplicitautograd_dispatch.h>
|
| 438 |
+
#include <ATen/ops/quantize_per_tensor_dynamic_compositeexplicitautograd_dispatch.h>
|
| 439 |
+
#include <ATen/ops/quantized_batch_norm_compositeexplicitautograd_dispatch.h>
|
| 440 |
+
#include <ATen/ops/quantized_max_pool1d_compositeexplicitautograd_dispatch.h>
|
| 441 |
+
#include <ATen/ops/quantized_max_pool2d_compositeexplicitautograd_dispatch.h>
|
| 442 |
+
#include <ATen/ops/quantized_max_pool3d_compositeexplicitautograd_dispatch.h>
|
| 443 |
+
#include <ATen/ops/rad2deg_compositeexplicitautograd_dispatch.h>
|
| 444 |
+
#include <ATen/ops/rand_compositeexplicitautograd_dispatch.h>
|
| 445 |
+
#include <ATen/ops/rand_like_compositeexplicitautograd_dispatch.h>
|
| 446 |
+
#include <ATen/ops/randint_compositeexplicitautograd_dispatch.h>
|
| 447 |
+
#include <ATen/ops/randint_like_compositeexplicitautograd_dispatch.h>
|
| 448 |
+
#include <ATen/ops/randn_compositeexplicitautograd_dispatch.h>
|
| 449 |
+
#include <ATen/ops/randn_like_compositeexplicitautograd_dispatch.h>
|
| 450 |
+
#include <ATen/ops/random_compositeexplicitautograd_dispatch.h>
|
| 451 |
+
#include <ATen/ops/randperm_compositeexplicitautograd_dispatch.h>
|
| 452 |
+
#include <ATen/ops/range_compositeexplicitautograd_dispatch.h>
|
| 453 |
+
#include <ATen/ops/relu_compositeexplicitautograd_dispatch.h>
|
| 454 |
+
#include <ATen/ops/remainder_compositeexplicitautograd_dispatch.h>
|
| 455 |
+
#include <ATen/ops/repeat_compositeexplicitautograd_dispatch.h>
|
| 456 |
+
#include <ATen/ops/repeat_interleave_compositeexplicitautograd_dispatch.h>
|
| 457 |
+
#include <ATen/ops/resize_compositeexplicitautograd_dispatch.h>
|
| 458 |
+
#include <ATen/ops/resize_as_compositeexplicitautograd_dispatch.h>
|
| 459 |
+
#include <ATen/ops/resize_as_sparse_compositeexplicitautograd_dispatch.h>
|
| 460 |
+
#include <ATen/ops/roll_compositeexplicitautograd_dispatch.h>
|
| 461 |
+
#include <ATen/ops/rot90_compositeexplicitautograd_dispatch.h>
|
| 462 |
+
#include <ATen/ops/row_indices_compositeexplicitautograd_dispatch.h>
|
| 463 |
+
#include <ATen/ops/row_indices_copy_compositeexplicitautograd_dispatch.h>
|
| 464 |
+
#include <ATen/ops/rrelu_with_noise_backward_compositeexplicitautograd_dispatch.h>
|
| 465 |
+
#include <ATen/ops/rshift_compositeexplicitautograd_dispatch.h>
|
| 466 |
+
#include <ATen/ops/rsub_compositeexplicitautograd_dispatch.h>
|
| 467 |
+
#include <ATen/ops/scalar_tensor_compositeexplicitautograd_dispatch.h>
|
| 468 |
+
#include <ATen/ops/segment_reduce_compositeexplicitautograd_dispatch.h>
|
| 469 |
+
#include <ATen/ops/select_compositeexplicitautograd_dispatch.h>
|
| 470 |
+
#include <ATen/ops/select_backward_compositeexplicitautograd_dispatch.h>
|
| 471 |
+
#include <ATen/ops/select_copy_compositeexplicitautograd_dispatch.h>
|
| 472 |
+
#include <ATen/ops/select_scatter_compositeexplicitautograd_dispatch.h>
|
| 473 |
+
#include <ATen/ops/set_compositeexplicitautograd_dispatch.h>
|
| 474 |
+
#include <ATen/ops/slice_compositeexplicitautograd_dispatch.h>
|
| 475 |
+
#include <ATen/ops/slice_backward_compositeexplicitautograd_dispatch.h>
|
| 476 |
+
#include <ATen/ops/slice_copy_compositeexplicitautograd_dispatch.h>
|
| 477 |
+
#include <ATen/ops/slice_inverse_compositeexplicitautograd_dispatch.h>
|
| 478 |
+
#include <ATen/ops/slice_scatter_compositeexplicitautograd_dispatch.h>
|
| 479 |
+
#include <ATen/ops/slow_conv_dilated2d_compositeexplicitautograd_dispatch.h>
|
| 480 |
+
#include <ATen/ops/slow_conv_dilated3d_compositeexplicitautograd_dispatch.h>
|
| 481 |
+
#include <ATen/ops/smooth_l1_loss_backward_compositeexplicitautograd_dispatch.h>
|
| 482 |
+
#include <ATen/ops/soft_margin_loss_compositeexplicitautograd_dispatch.h>
|
| 483 |
+
#include <ATen/ops/soft_margin_loss_backward_compositeexplicitautograd_dispatch.h>
|
| 484 |
+
#include <ATen/ops/softmax_compositeexplicitautograd_dispatch.h>
|
| 485 |
+
#include <ATen/ops/sort_compositeexplicitautograd_dispatch.h>
|
| 486 |
+
#include <ATen/ops/sparse_compressed_tensor_compositeexplicitautograd_dispatch.h>
|
| 487 |
+
#include <ATen/ops/sparse_coo_tensor_compositeexplicitautograd_dispatch.h>
|
| 488 |
+
#include <ATen/ops/sparse_dim_compositeexplicitautograd_dispatch.h>
|
| 489 |
+
#include <ATen/ops/sparse_mask_compositeexplicitautograd_dispatch.h>
|
| 490 |
+
#include <ATen/ops/sparse_resize_compositeexplicitautograd_dispatch.h>
|
| 491 |
+
#include <ATen/ops/sparse_resize_and_clear_compositeexplicitautograd_dispatch.h>
|
| 492 |
+
#include <ATen/ops/special_chebyshev_polynomial_t_compositeexplicitautograd_dispatch.h>
|
| 493 |
+
#include <ATen/ops/special_chebyshev_polynomial_u_compositeexplicitautograd_dispatch.h>
|
| 494 |
+
#include <ATen/ops/special_chebyshev_polynomial_v_compositeexplicitautograd_dispatch.h>
|
| 495 |
+
#include <ATen/ops/special_chebyshev_polynomial_w_compositeexplicitautograd_dispatch.h>
|
| 496 |
+
#include <ATen/ops/special_hermite_polynomial_h_compositeexplicitautograd_dispatch.h>
|
| 497 |
+
#include <ATen/ops/special_hermite_polynomial_he_compositeexplicitautograd_dispatch.h>
|
| 498 |
+
#include <ATen/ops/special_laguerre_polynomial_l_compositeexplicitautograd_dispatch.h>
|
| 499 |
+
#include <ATen/ops/special_legendre_polynomial_p_compositeexplicitautograd_dispatch.h>
|
| 500 |
+
#include <ATen/ops/special_shifted_chebyshev_polynomial_t_compositeexplicitautograd_dispatch.h>
|
| 501 |
+
#include <ATen/ops/special_shifted_chebyshev_polynomial_u_compositeexplicitautograd_dispatch.h>
|
| 502 |
+
#include <ATen/ops/special_shifted_chebyshev_polynomial_v_compositeexplicitautograd_dispatch.h>
|
| 503 |
+
#include <ATen/ops/special_shifted_chebyshev_polynomial_w_compositeexplicitautograd_dispatch.h>
|
| 504 |
+
#include <ATen/ops/special_xlog1py_compositeexplicitautograd_dispatch.h>
|
| 505 |
+
#include <ATen/ops/special_zeta_compositeexplicitautograd_dispatch.h>
|
| 506 |
+
#include <ATen/ops/split_compositeexplicitautograd_dispatch.h>
|
| 507 |
+
#include <ATen/ops/split_copy_compositeexplicitautograd_dispatch.h>
|
| 508 |
+
#include <ATen/ops/split_with_sizes_compositeexplicitautograd_dispatch.h>
|
| 509 |
+
#include <ATen/ops/split_with_sizes_copy_compositeexplicitautograd_dispatch.h>
|
| 510 |
+
#include <ATen/ops/squeeze_compositeexplicitautograd_dispatch.h>
|
| 511 |
+
#include <ATen/ops/squeeze_copy_compositeexplicitautograd_dispatch.h>
|
| 512 |
+
#include <ATen/ops/stack_compositeexplicitautograd_dispatch.h>
|
| 513 |
+
#include <ATen/ops/std_mean_compositeexplicitautograd_dispatch.h>
|
| 514 |
+
#include <ATen/ops/sub_compositeexplicitautograd_dispatch.h>
|
| 515 |
+
#include <ATen/ops/sum_compositeexplicitautograd_dispatch.h>
|
| 516 |
+
#include <ATen/ops/sym_constrain_range_compositeexplicitautograd_dispatch.h>
|
| 517 |
+
#include <ATen/ops/sym_constrain_range_for_size_compositeexplicitautograd_dispatch.h>
|
| 518 |
+
#include <ATen/ops/t_compositeexplicitautograd_dispatch.h>
|
| 519 |
+
#include <ATen/ops/t_copy_compositeexplicitautograd_dispatch.h>
|
| 520 |
+
#include <ATen/ops/to_mkldnn_compositeexplicitautograd_dispatch.h>
|
| 521 |
+
#include <ATen/ops/to_padded_tensor_compositeexplicitautograd_dispatch.h>
|
| 522 |
+
#include <ATen/ops/trace_compositeexplicitautograd_dispatch.h>
|
| 523 |
+
#include <ATen/ops/transpose_compositeexplicitautograd_dispatch.h>
|
| 524 |
+
#include <ATen/ops/transpose_copy_compositeexplicitautograd_dispatch.h>
|
| 525 |
+
#include <ATen/ops/tril_indices_compositeexplicitautograd_dispatch.h>
|
| 526 |
+
#include <ATen/ops/triu_indices_compositeexplicitautograd_dispatch.h>
|
| 527 |
+
#include <ATen/ops/unbind_compositeexplicitautograd_dispatch.h>
|
| 528 |
+
#include <ATen/ops/unbind_copy_compositeexplicitautograd_dispatch.h>
|
| 529 |
+
#include <ATen/ops/unfold_backward_compositeexplicitautograd_dispatch.h>
|
| 530 |
+
#include <ATen/ops/unfold_copy_compositeexplicitautograd_dispatch.h>
|
| 531 |
+
#include <ATen/ops/uniform_compositeexplicitautograd_dispatch.h>
|
| 532 |
+
#include <ATen/ops/unique_consecutive_compositeexplicitautograd_dispatch.h>
|
| 533 |
+
#include <ATen/ops/unique_dim_compositeexplicitautograd_dispatch.h>
|
| 534 |
+
#include <ATen/ops/unique_dim_consecutive_compositeexplicitautograd_dispatch.h>
|
| 535 |
+
#include <ATen/ops/unsafe_split_compositeexplicitautograd_dispatch.h>
|
| 536 |
+
#include <ATen/ops/unsafe_split_with_sizes_compositeexplicitautograd_dispatch.h>
|
| 537 |
+
#include <ATen/ops/unsqueeze_compositeexplicitautograd_dispatch.h>
|
| 538 |
+
#include <ATen/ops/unsqueeze_copy_compositeexplicitautograd_dispatch.h>
|
| 539 |
+
#include <ATen/ops/values_compositeexplicitautograd_dispatch.h>
|
| 540 |
+
#include <ATen/ops/values_copy_compositeexplicitautograd_dispatch.h>
|
| 541 |
+
#include <ATen/ops/var_mean_compositeexplicitautograd_dispatch.h>
|
| 542 |
+
#include <ATen/ops/vdot_compositeexplicitautograd_dispatch.h>
|
| 543 |
+
#include <ATen/ops/view_compositeexplicitautograd_dispatch.h>
|
| 544 |
+
#include <ATen/ops/view_as_complex_copy_compositeexplicitautograd_dispatch.h>
|
| 545 |
+
#include <ATen/ops/view_as_real_copy_compositeexplicitautograd_dispatch.h>
|
| 546 |
+
#include <ATen/ops/view_copy_compositeexplicitautograd_dispatch.h>
|
| 547 |
+
#include <ATen/ops/xlogy_compositeexplicitautograd_dispatch.h>
|
| 548 |
+
#include <ATen/ops/zero_compositeexplicitautograd_dispatch.h>
|
| 549 |
+
#include <ATen/ops/zeros_compositeexplicitautograd_dispatch.h>
|
| 550 |
+
#include <ATen/ops/zeros_like_compositeexplicitautograd_dispatch.h>
|
| 551 |
+
|
| 552 |
+
|
| 553 |
+
|
mplug_owl2/lib/python3.10/site-packages/torch/include/ATen/CompositeExplicitAutogradNonFunctionalFunctions.h
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#include <ATen/core/TensorBody.h>
|
| 2 |
+
|
| 3 |
+
// TODO Undo all logic introduced for Note [Avoiding Include Cycles In Static Dispatch]
|
| 4 |
+
// Code introduced to avoid cyclic dependency in static dispatch is no longer
|
| 5 |
+
// needed as static dispatch logic is moved from TensorBody.h, which caused cycles in the first place,
|
| 6 |
+
// to Operators.cpp for supporting multiple backends with multiple kernels.
|
| 7 |
+
//
|
| 8 |
+
// Note [Avoiding Include Cycles In Static Dispatch]
|
| 9 |
+
// In order to avoid #include cycles in the static dispatch build, we've carefully split out
|
| 10 |
+
// the static function definition files into {DispatchKey}Functions.h and {DispatchKey}Functions_inl.h.
|
| 11 |
+
//
|
| 12 |
+
// Without this split, the include cycle looks like TensorBody.h -> CPUFunctions.h -> TensorBody.h.
|
| 13 |
+
// - TensorBody.h #includes CPUFunctions.h in the static dispatch build, because the tensor methods
|
| 14 |
+
// all need to call into the fastpath C++ API defined in CPUFunctions.h. The methods are also all
|
| 15 |
+
// directly inlined into TensorBody.h.
|
| 16 |
+
// - CPUFunctions.h #includes TensorBody.h because it contains function declarations for the entire C++ API,
|
| 17 |
+
// which include functions that have defaultable std::optional<Tensor> arguments.
|
| 18 |
+
// That requires knowing the full Tensor class definition.
|
| 19 |
+
//
|
| 20 |
+
// We break the cycle by doing the following:
|
| 21 |
+
// - Split out CPUFunction.h into two files: CPUFunctions.h and CPUFunctions_inl.h
|
| 22 |
+
// - CPUFunction.h is a dummy file that just includes the Tensor class and includes CPUFunctions_inl.,
|
| 23 |
+
// - CPUFunctions_inl.h includes everything else
|
| 24 |
+
// - (only in the static dispatch build) TensorBody.h makes sure to finish defining the Tensor class,
|
| 25 |
+
// and then it includes CPUFunctions_inl.h.
|
| 26 |
+
// - All other files that want the cpu fastpath functions can include CPUFunctions.h directly.
|
| 27 |
+
// - This also means that static dispatch build, CPUFunctions.h only needs to
|
| 28 |
+
// #include TensorBody.h, and it will automatically bring in CPUFunctions_inl.h.
|
| 29 |
+
#include <ATen/CompositeExplicitAutogradNonFunctionalFunctions_inl.h>
|
mplug_owl2/lib/python3.10/site-packages/torch/include/ATen/CompositeImplicitAutogradFunctions_inl.h
ADDED
|
@@ -0,0 +1,502 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunctions_inl.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
#if defined(AT_PER_OPERATOR_HEADERS) && defined(TORCH_ASSERT_ONLY_METHOD_OPERATORS)
|
| 12 |
+
#error This change adds a dependency on all pytorch operators, meaning the \
|
| 13 |
+
file will need to be re-compiled every time an operator is changed or added. \
|
| 14 |
+
Consider including a specific operator from \
|
| 15 |
+
<ATen/ops/{my_operator}_compositeimplicitautograd_dispatch.h>. \
|
| 16 |
+
See NOTE [TORCH_ASSERT_ONLY_METHOD_OPERATORS].
|
| 17 |
+
#endif
|
| 18 |
+
|
| 19 |
+
#include <ATen/ops/_add_batch_dim_compositeimplicitautograd_dispatch.h>
|
| 20 |
+
#include <ATen/ops/_assert_tensor_metadata_compositeimplicitautograd_dispatch.h>
|
| 21 |
+
#include <ATen/ops/_autocast_to_full_precision_compositeimplicitautograd_dispatch.h>
|
| 22 |
+
#include <ATen/ops/_autocast_to_reduced_precision_compositeimplicitautograd_dispatch.h>
|
| 23 |
+
#include <ATen/ops/_backward_compositeimplicitautograd_dispatch.h>
|
| 24 |
+
#include <ATen/ops/_batch_norm_impl_index_compositeimplicitautograd_dispatch.h>
|
| 25 |
+
#include <ATen/ops/_batch_norm_impl_index_backward_compositeimplicitautograd_dispatch.h>
|
| 26 |
+
#include <ATen/ops/_cast_Byte_compositeimplicitautograd_dispatch.h>
|
| 27 |
+
#include <ATen/ops/_cast_Char_compositeimplicitautograd_dispatch.h>
|
| 28 |
+
#include <ATen/ops/_cast_Double_compositeimplicitautograd_dispatch.h>
|
| 29 |
+
#include <ATen/ops/_cast_Float_compositeimplicitautograd_dispatch.h>
|
| 30 |
+
#include <ATen/ops/_cast_Half_compositeimplicitautograd_dispatch.h>
|
| 31 |
+
#include <ATen/ops/_cast_Int_compositeimplicitautograd_dispatch.h>
|
| 32 |
+
#include <ATen/ops/_cast_Long_compositeimplicitautograd_dispatch.h>
|
| 33 |
+
#include <ATen/ops/_cast_Short_compositeimplicitautograd_dispatch.h>
|
| 34 |
+
#include <ATen/ops/_choose_qparams_per_tensor_compositeimplicitautograd_dispatch.h>
|
| 35 |
+
#include <ATen/ops/_convolution_compositeimplicitautograd_dispatch.h>
|
| 36 |
+
#include <ATen/ops/_convolution_double_backward_compositeimplicitautograd_dispatch.h>
|
| 37 |
+
#include <ATen/ops/_convolution_mode_compositeimplicitautograd_dispatch.h>
|
| 38 |
+
#include <ATen/ops/_cufft_clear_plan_cache_compositeimplicitautograd_dispatch.h>
|
| 39 |
+
#include <ATen/ops/_cufft_get_plan_cache_max_size_compositeimplicitautograd_dispatch.h>
|
| 40 |
+
#include <ATen/ops/_cufft_get_plan_cache_size_compositeimplicitautograd_dispatch.h>
|
| 41 |
+
#include <ATen/ops/_cufft_set_plan_cache_max_size_compositeimplicitautograd_dispatch.h>
|
| 42 |
+
#include <ATen/ops/_debug_has_internal_overlap_compositeimplicitautograd_dispatch.h>
|
| 43 |
+
#include <ATen/ops/_dim_arange_compositeimplicitautograd_dispatch.h>
|
| 44 |
+
#include <ATen/ops/_embedding_bag_sparse_backward_compositeimplicitautograd_dispatch.h>
|
| 45 |
+
#include <ATen/ops/_gather_sparse_backward_compositeimplicitautograd_dispatch.h>
|
| 46 |
+
#include <ATen/ops/_grid_sampler_2d_cpu_fallback_backward_compositeimplicitautograd_dispatch.h>
|
| 47 |
+
#include <ATen/ops/_has_compatible_shallow_copy_type_compositeimplicitautograd_dispatch.h>
|
| 48 |
+
#include <ATen/ops/_is_zerotensor_compositeimplicitautograd_dispatch.h>
|
| 49 |
+
#include <ATen/ops/_lu_with_info_compositeimplicitautograd_dispatch.h>
|
| 50 |
+
#include <ATen/ops/_nnpack_available_compositeimplicitautograd_dispatch.h>
|
| 51 |
+
#include <ATen/ops/_pack_padded_sequence_backward_compositeimplicitautograd_dispatch.h>
|
| 52 |
+
#include <ATen/ops/_pad_circular_compositeimplicitautograd_dispatch.h>
|
| 53 |
+
#include <ATen/ops/_pad_enum_compositeimplicitautograd_dispatch.h>
|
| 54 |
+
#include <ATen/ops/_pad_packed_sequence_compositeimplicitautograd_dispatch.h>
|
| 55 |
+
#include <ATen/ops/_propagate_xla_data_compositeimplicitautograd_dispatch.h>
|
| 56 |
+
#include <ATen/ops/_remove_batch_dim_compositeimplicitautograd_dispatch.h>
|
| 57 |
+
#include <ATen/ops/_reshape_from_tensor_compositeimplicitautograd_dispatch.h>
|
| 58 |
+
#include <ATen/ops/_rowwise_prune_compositeimplicitautograd_dispatch.h>
|
| 59 |
+
#include <ATen/ops/_saturate_weight_to_fp16_compositeimplicitautograd_dispatch.h>
|
| 60 |
+
#include <ATen/ops/_scaled_dot_product_attention_math_compositeimplicitautograd_dispatch.h>
|
| 61 |
+
#include <ATen/ops/_shape_as_tensor_compositeimplicitautograd_dispatch.h>
|
| 62 |
+
#include <ATen/ops/_sobol_engine_draw_compositeimplicitautograd_dispatch.h>
|
| 63 |
+
#include <ATen/ops/_sobol_engine_ff_compositeimplicitautograd_dispatch.h>
|
| 64 |
+
#include <ATen/ops/_sobol_engine_initialize_state_compositeimplicitautograd_dispatch.h>
|
| 65 |
+
#include <ATen/ops/_sobol_engine_scramble_compositeimplicitautograd_dispatch.h>
|
| 66 |
+
#include <ATen/ops/_sparse_bsc_tensor_unsafe_compositeimplicitautograd_dispatch.h>
|
| 67 |
+
#include <ATen/ops/_sparse_bsr_tensor_unsafe_compositeimplicitautograd_dispatch.h>
|
| 68 |
+
#include <ATen/ops/_sparse_compressed_tensor_unsafe_compositeimplicitautograd_dispatch.h>
|
| 69 |
+
#include <ATen/ops/_sparse_coo_tensor_unsafe_compositeimplicitautograd_dispatch.h>
|
| 70 |
+
#include <ATen/ops/_sparse_csc_tensor_unsafe_compositeimplicitautograd_dispatch.h>
|
| 71 |
+
#include <ATen/ops/_sparse_csr_tensor_unsafe_compositeimplicitautograd_dispatch.h>
|
| 72 |
+
#include <ATen/ops/_sparse_log_softmax_compositeimplicitautograd_dispatch.h>
|
| 73 |
+
#include <ATen/ops/_sparse_mm_compositeimplicitautograd_dispatch.h>
|
| 74 |
+
#include <ATen/ops/_sparse_softmax_compositeimplicitautograd_dispatch.h>
|
| 75 |
+
#include <ATen/ops/_sparse_sum_compositeimplicitautograd_dispatch.h>
|
| 76 |
+
#include <ATen/ops/_test_ambiguous_defaults_compositeimplicitautograd_dispatch.h>
|
| 77 |
+
#include <ATen/ops/_test_autograd_multiple_dispatch_compositeimplicitautograd_dispatch.h>
|
| 78 |
+
#include <ATen/ops/_test_check_tensor_compositeimplicitautograd_dispatch.h>
|
| 79 |
+
#include <ATen/ops/_test_serialization_subcmul_compositeimplicitautograd_dispatch.h>
|
| 80 |
+
#include <ATen/ops/_test_string_default_compositeimplicitautograd_dispatch.h>
|
| 81 |
+
#include <ATen/ops/_thnn_differentiable_gru_cell_backward_compositeimplicitautograd_dispatch.h>
|
| 82 |
+
#include <ATen/ops/_thnn_differentiable_lstm_cell_backward_compositeimplicitautograd_dispatch.h>
|
| 83 |
+
#include <ATen/ops/_thnn_fused_lstm_cell_backward_compositeimplicitautograd_dispatch.h>
|
| 84 |
+
#include <ATen/ops/_to_cpu_compositeimplicitautograd_dispatch.h>
|
| 85 |
+
#include <ATen/ops/_unpack_dual_compositeimplicitautograd_dispatch.h>
|
| 86 |
+
#include <ATen/ops/_upsample_bicubic2d_aa_compositeimplicitautograd_dispatch.h>
|
| 87 |
+
#include <ATen/ops/_upsample_bilinear2d_aa_compositeimplicitautograd_dispatch.h>
|
| 88 |
+
#include <ATen/ops/_upsample_nearest_exact1d_compositeimplicitautograd_dispatch.h>
|
| 89 |
+
#include <ATen/ops/_upsample_nearest_exact2d_compositeimplicitautograd_dispatch.h>
|
| 90 |
+
#include <ATen/ops/_upsample_nearest_exact3d_compositeimplicitautograd_dispatch.h>
|
| 91 |
+
#include <ATen/ops/_use_cudnn_rnn_flatten_weight_compositeimplicitautograd_dispatch.h>
|
| 92 |
+
#include <ATen/ops/_validate_sparse_bsc_tensor_args_compositeimplicitautograd_dispatch.h>
|
| 93 |
+
#include <ATen/ops/_validate_sparse_bsr_tensor_args_compositeimplicitautograd_dispatch.h>
|
| 94 |
+
#include <ATen/ops/_validate_sparse_compressed_tensor_args_compositeimplicitautograd_dispatch.h>
|
| 95 |
+
#include <ATen/ops/_validate_sparse_coo_tensor_args_compositeimplicitautograd_dispatch.h>
|
| 96 |
+
#include <ATen/ops/_validate_sparse_csc_tensor_args_compositeimplicitautograd_dispatch.h>
|
| 97 |
+
#include <ATen/ops/_validate_sparse_csr_tensor_args_compositeimplicitautograd_dispatch.h>
|
| 98 |
+
#include <ATen/ops/_version_compositeimplicitautograd_dispatch.h>
|
| 99 |
+
#include <ATen/ops/_weight_norm_compositeimplicitautograd_dispatch.h>
|
| 100 |
+
#include <ATen/ops/_weight_norm_differentiable_backward_compositeimplicitautograd_dispatch.h>
|
| 101 |
+
#include <ATen/ops/_wrapped_linear_prepack_compositeimplicitautograd_dispatch.h>
|
| 102 |
+
#include <ATen/ops/_wrapped_quantized_linear_prepacked_compositeimplicitautograd_dispatch.h>
|
| 103 |
+
#include <ATen/ops/absolute_compositeimplicitautograd_dispatch.h>
|
| 104 |
+
#include <ATen/ops/adaptive_avg_pool1d_compositeimplicitautograd_dispatch.h>
|
| 105 |
+
#include <ATen/ops/adaptive_avg_pool2d_compositeimplicitautograd_dispatch.h>
|
| 106 |
+
#include <ATen/ops/adaptive_avg_pool3d_compositeimplicitautograd_dispatch.h>
|
| 107 |
+
#include <ATen/ops/adaptive_max_pool1d_compositeimplicitautograd_dispatch.h>
|
| 108 |
+
#include <ATen/ops/adjoint_compositeimplicitautograd_dispatch.h>
|
| 109 |
+
#include <ATen/ops/affine_grid_generator_backward_compositeimplicitautograd_dispatch.h>
|
| 110 |
+
#include <ATen/ops/align_as_compositeimplicitautograd_dispatch.h>
|
| 111 |
+
#include <ATen/ops/align_tensors_compositeimplicitautograd_dispatch.h>
|
| 112 |
+
#include <ATen/ops/align_to_compositeimplicitautograd_dispatch.h>
|
| 113 |
+
#include <ATen/ops/all_compositeimplicitautograd_dispatch.h>
|
| 114 |
+
#include <ATen/ops/alpha_dropout_compositeimplicitautograd_dispatch.h>
|
| 115 |
+
#include <ATen/ops/and_compositeimplicitautograd_dispatch.h>
|
| 116 |
+
#include <ATen/ops/any_compositeimplicitautograd_dispatch.h>
|
| 117 |
+
#include <ATen/ops/arccos_compositeimplicitautograd_dispatch.h>
|
| 118 |
+
#include <ATen/ops/arccosh_compositeimplicitautograd_dispatch.h>
|
| 119 |
+
#include <ATen/ops/arcsin_compositeimplicitautograd_dispatch.h>
|
| 120 |
+
#include <ATen/ops/arcsinh_compositeimplicitautograd_dispatch.h>
|
| 121 |
+
#include <ATen/ops/arctan_compositeimplicitautograd_dispatch.h>
|
| 122 |
+
#include <ATen/ops/arctan2_compositeimplicitautograd_dispatch.h>
|
| 123 |
+
#include <ATen/ops/arctanh_compositeimplicitautograd_dispatch.h>
|
| 124 |
+
#include <ATen/ops/argsort_compositeimplicitautograd_dispatch.h>
|
| 125 |
+
#include <ATen/ops/argwhere_compositeimplicitautograd_dispatch.h>
|
| 126 |
+
#include <ATen/ops/atleast_1d_compositeimplicitautograd_dispatch.h>
|
| 127 |
+
#include <ATen/ops/atleast_2d_compositeimplicitautograd_dispatch.h>
|
| 128 |
+
#include <ATen/ops/atleast_3d_compositeimplicitautograd_dispatch.h>
|
| 129 |
+
#include <ATen/ops/avg_pool1d_compositeimplicitautograd_dispatch.h>
|
| 130 |
+
#include <ATen/ops/batch_norm_compositeimplicitautograd_dispatch.h>
|
| 131 |
+
#include <ATen/ops/bilinear_compositeimplicitautograd_dispatch.h>
|
| 132 |
+
#include <ATen/ops/broadcast_tensors_compositeimplicitautograd_dispatch.h>
|
| 133 |
+
#include <ATen/ops/broadcast_to_compositeimplicitautograd_dispatch.h>
|
| 134 |
+
#include <ATen/ops/can_cast_compositeimplicitautograd_dispatch.h>
|
| 135 |
+
#include <ATen/ops/cartesian_prod_compositeimplicitautograd_dispatch.h>
|
| 136 |
+
#include <ATen/ops/cat_compositeimplicitautograd_dispatch.h>
|
| 137 |
+
#include <ATen/ops/cdist_compositeimplicitautograd_dispatch.h>
|
| 138 |
+
#include <ATen/ops/chain_matmul_compositeimplicitautograd_dispatch.h>
|
| 139 |
+
#include <ATen/ops/chalf_compositeimplicitautograd_dispatch.h>
|
| 140 |
+
#include <ATen/ops/choose_qparams_optimized_compositeimplicitautograd_dispatch.h>
|
| 141 |
+
#include <ATen/ops/chunk_compositeimplicitautograd_dispatch.h>
|
| 142 |
+
#include <ATen/ops/clip_compositeimplicitautograd_dispatch.h>
|
| 143 |
+
#include <ATen/ops/coalesce_compositeimplicitautograd_dispatch.h>
|
| 144 |
+
#include <ATen/ops/column_stack_compositeimplicitautograd_dispatch.h>
|
| 145 |
+
#include <ATen/ops/combinations_compositeimplicitautograd_dispatch.h>
|
| 146 |
+
#include <ATen/ops/concat_compositeimplicitautograd_dispatch.h>
|
| 147 |
+
#include <ATen/ops/concatenate_compositeimplicitautograd_dispatch.h>
|
| 148 |
+
#include <ATen/ops/conj_compositeimplicitautograd_dispatch.h>
|
| 149 |
+
#include <ATen/ops/conj_physical_compositeimplicitautograd_dispatch.h>
|
| 150 |
+
#include <ATen/ops/contiguous_compositeimplicitautograd_dispatch.h>
|
| 151 |
+
#include <ATen/ops/conv1d_compositeimplicitautograd_dispatch.h>
|
| 152 |
+
#include <ATen/ops/conv2d_compositeimplicitautograd_dispatch.h>
|
| 153 |
+
#include <ATen/ops/conv3d_compositeimplicitautograd_dispatch.h>
|
| 154 |
+
#include <ATen/ops/conv_tbc_backward_compositeimplicitautograd_dispatch.h>
|
| 155 |
+
#include <ATen/ops/conv_transpose1d_compositeimplicitautograd_dispatch.h>
|
| 156 |
+
#include <ATen/ops/conv_transpose2d_compositeimplicitautograd_dispatch.h>
|
| 157 |
+
#include <ATen/ops/conv_transpose3d_compositeimplicitautograd_dispatch.h>
|
| 158 |
+
#include <ATen/ops/corrcoef_compositeimplicitautograd_dispatch.h>
|
| 159 |
+
#include <ATen/ops/cosine_embedding_loss_compositeimplicitautograd_dispatch.h>
|
| 160 |
+
#include <ATen/ops/cosine_similarity_compositeimplicitautograd_dispatch.h>
|
| 161 |
+
#include <ATen/ops/cov_compositeimplicitautograd_dispatch.h>
|
| 162 |
+
#include <ATen/ops/cross_compositeimplicitautograd_dispatch.h>
|
| 163 |
+
#include <ATen/ops/cross_entropy_loss_compositeimplicitautograd_dispatch.h>
|
| 164 |
+
#include <ATen/ops/ctc_loss_compositeimplicitautograd_dispatch.h>
|
| 165 |
+
#include <ATen/ops/cudnn_is_acceptable_compositeimplicitautograd_dispatch.h>
|
| 166 |
+
#include <ATen/ops/cummax_compositeimplicitautograd_dispatch.h>
|
| 167 |
+
#include <ATen/ops/cummaxmin_backward_compositeimplicitautograd_dispatch.h>
|
| 168 |
+
#include <ATen/ops/cummin_compositeimplicitautograd_dispatch.h>
|
| 169 |
+
#include <ATen/ops/cumprod_compositeimplicitautograd_dispatch.h>
|
| 170 |
+
#include <ATen/ops/cumprod_backward_compositeimplicitautograd_dispatch.h>
|
| 171 |
+
#include <ATen/ops/cumsum_compositeimplicitautograd_dispatch.h>
|
| 172 |
+
#include <ATen/ops/cumulative_trapezoid_compositeimplicitautograd_dispatch.h>
|
| 173 |
+
#include <ATen/ops/data_compositeimplicitautograd_dispatch.h>
|
| 174 |
+
#include <ATen/ops/det_compositeimplicitautograd_dispatch.h>
|
| 175 |
+
#include <ATen/ops/diag_compositeimplicitautograd_dispatch.h>
|
| 176 |
+
#include <ATen/ops/diagflat_compositeimplicitautograd_dispatch.h>
|
| 177 |
+
#include <ATen/ops/diagonal_compositeimplicitautograd_dispatch.h>
|
| 178 |
+
#include <ATen/ops/diff_compositeimplicitautograd_dispatch.h>
|
| 179 |
+
#include <ATen/ops/divide_compositeimplicitautograd_dispatch.h>
|
| 180 |
+
#include <ATen/ops/dropout_compositeimplicitautograd_dispatch.h>
|
| 181 |
+
#include <ATen/ops/dsplit_compositeimplicitautograd_dispatch.h>
|
| 182 |
+
#include <ATen/ops/dstack_compositeimplicitautograd_dispatch.h>
|
| 183 |
+
#include <ATen/ops/einsum_compositeimplicitautograd_dispatch.h>
|
| 184 |
+
#include <ATen/ops/embedding_backward_compositeimplicitautograd_dispatch.h>
|
| 185 |
+
#include <ATen/ops/embedding_bag_compositeimplicitautograd_dispatch.h>
|
| 186 |
+
#include <ATen/ops/embedding_sparse_backward_compositeimplicitautograd_dispatch.h>
|
| 187 |
+
#include <ATen/ops/empty_compositeimplicitautograd_dispatch.h>
|
| 188 |
+
#include <ATen/ops/expand_as_compositeimplicitautograd_dispatch.h>
|
| 189 |
+
#include <ATen/ops/fake_quantize_per_channel_affine_compositeimplicitautograd_dispatch.h>
|
| 190 |
+
#include <ATen/ops/fake_quantize_per_channel_affine_cachemask_backward_compositeimplicitautograd_dispatch.h>
|
| 191 |
+
#include <ATen/ops/fake_quantize_per_tensor_affine_compositeimplicitautograd_dispatch.h>
|
| 192 |
+
#include <ATen/ops/fake_quantize_per_tensor_affine_cachemask_backward_compositeimplicitautograd_dispatch.h>
|
| 193 |
+
#include <ATen/ops/fbgemm_linear_fp16_weight_compositeimplicitautograd_dispatch.h>
|
| 194 |
+
#include <ATen/ops/fbgemm_linear_fp16_weight_fp32_activation_compositeimplicitautograd_dispatch.h>
|
| 195 |
+
#include <ATen/ops/fbgemm_linear_int8_weight_compositeimplicitautograd_dispatch.h>
|
| 196 |
+
#include <ATen/ops/fbgemm_linear_int8_weight_fp32_activation_compositeimplicitautograd_dispatch.h>
|
| 197 |
+
#include <ATen/ops/fbgemm_linear_quantize_weight_compositeimplicitautograd_dispatch.h>
|
| 198 |
+
#include <ATen/ops/fbgemm_pack_gemm_matrix_fp16_compositeimplicitautograd_dispatch.h>
|
| 199 |
+
#include <ATen/ops/fbgemm_pack_quantized_matrix_compositeimplicitautograd_dispatch.h>
|
| 200 |
+
#include <ATen/ops/feature_alpha_dropout_compositeimplicitautograd_dispatch.h>
|
| 201 |
+
#include <ATen/ops/feature_dropout_compositeimplicitautograd_dispatch.h>
|
| 202 |
+
#include <ATen/ops/fft_fft_compositeimplicitautograd_dispatch.h>
|
| 203 |
+
#include <ATen/ops/fft_fft2_compositeimplicitautograd_dispatch.h>
|
| 204 |
+
#include <ATen/ops/fft_fftn_compositeimplicitautograd_dispatch.h>
|
| 205 |
+
#include <ATen/ops/fft_fftshift_compositeimplicitautograd_dispatch.h>
|
| 206 |
+
#include <ATen/ops/fft_hfft_compositeimplicitautograd_dispatch.h>
|
| 207 |
+
#include <ATen/ops/fft_hfft2_compositeimplicitautograd_dispatch.h>
|
| 208 |
+
#include <ATen/ops/fft_hfftn_compositeimplicitautograd_dispatch.h>
|
| 209 |
+
#include <ATen/ops/fft_ifft_compositeimplicitautograd_dispatch.h>
|
| 210 |
+
#include <ATen/ops/fft_ifft2_compositeimplicitautograd_dispatch.h>
|
| 211 |
+
#include <ATen/ops/fft_ifftn_compositeimplicitautograd_dispatch.h>
|
| 212 |
+
#include <ATen/ops/fft_ifftshift_compositeimplicitautograd_dispatch.h>
|
| 213 |
+
#include <ATen/ops/fft_ihfft_compositeimplicitautograd_dispatch.h>
|
| 214 |
+
#include <ATen/ops/fft_ihfft2_compositeimplicitautograd_dispatch.h>
|
| 215 |
+
#include <ATen/ops/fft_ihfftn_compositeimplicitautograd_dispatch.h>
|
| 216 |
+
#include <ATen/ops/fft_irfft_compositeimplicitautograd_dispatch.h>
|
| 217 |
+
#include <ATen/ops/fft_irfft2_compositeimplicitautograd_dispatch.h>
|
| 218 |
+
#include <ATen/ops/fft_irfftn_compositeimplicitautograd_dispatch.h>
|
| 219 |
+
#include <ATen/ops/fft_rfft_compositeimplicitautograd_dispatch.h>
|
| 220 |
+
#include <ATen/ops/fft_rfft2_compositeimplicitautograd_dispatch.h>
|
| 221 |
+
#include <ATen/ops/fft_rfftn_compositeimplicitautograd_dispatch.h>
|
| 222 |
+
#include <ATen/ops/fill_diagonal_compositeimplicitautograd_dispatch.h>
|
| 223 |
+
#include <ATen/ops/fix_compositeimplicitautograd_dispatch.h>
|
| 224 |
+
#include <ATen/ops/flatten_compositeimplicitautograd_dispatch.h>
|
| 225 |
+
#include <ATen/ops/flatten_dense_tensors_compositeimplicitautograd_dispatch.h>
|
| 226 |
+
#include <ATen/ops/fliplr_compositeimplicitautograd_dispatch.h>
|
| 227 |
+
#include <ATen/ops/flipud_compositeimplicitautograd_dispatch.h>
|
| 228 |
+
#include <ATen/ops/float_power_compositeimplicitautograd_dispatch.h>
|
| 229 |
+
#include <ATen/ops/frobenius_norm_compositeimplicitautograd_dispatch.h>
|
| 230 |
+
#include <ATen/ops/fused_moving_avg_obs_fake_quant_compositeimplicitautograd_dispatch.h>
|
| 231 |
+
#include <ATen/ops/gather_compositeimplicitautograd_dispatch.h>
|
| 232 |
+
#include <ATen/ops/gather_backward_compositeimplicitautograd_dispatch.h>
|
| 233 |
+
#include <ATen/ops/ger_compositeimplicitautograd_dispatch.h>
|
| 234 |
+
#include <ATen/ops/gradient_compositeimplicitautograd_dispatch.h>
|
| 235 |
+
#include <ATen/ops/greater_compositeimplicitautograd_dispatch.h>
|
| 236 |
+
#include <ATen/ops/greater_equal_compositeimplicitautograd_dispatch.h>
|
| 237 |
+
#include <ATen/ops/grid_sampler_compositeimplicitautograd_dispatch.h>
|
| 238 |
+
#include <ATen/ops/group_norm_compositeimplicitautograd_dispatch.h>
|
| 239 |
+
#include <ATen/ops/gru_compositeimplicitautograd_dispatch.h>
|
| 240 |
+
#include <ATen/ops/gru_cell_compositeimplicitautograd_dispatch.h>
|
| 241 |
+
#include <ATen/ops/hinge_embedding_loss_compositeimplicitautograd_dispatch.h>
|
| 242 |
+
#include <ATen/ops/histogramdd_compositeimplicitautograd_dispatch.h>
|
| 243 |
+
#include <ATen/ops/hsplit_compositeimplicitautograd_dispatch.h>
|
| 244 |
+
#include <ATen/ops/hstack_compositeimplicitautograd_dispatch.h>
|
| 245 |
+
#include <ATen/ops/imag_compositeimplicitautograd_dispatch.h>
|
| 246 |
+
#include <ATen/ops/index_add_compositeimplicitautograd_dispatch.h>
|
| 247 |
+
#include <ATen/ops/index_copy_compositeimplicitautograd_dispatch.h>
|
| 248 |
+
#include <ATen/ops/index_fill_compositeimplicitautograd_dispatch.h>
|
| 249 |
+
#include <ATen/ops/index_select_compositeimplicitautograd_dispatch.h>
|
| 250 |
+
#include <ATen/ops/index_select_backward_compositeimplicitautograd_dispatch.h>
|
| 251 |
+
#include <ATen/ops/infinitely_differentiable_gelu_backward_compositeimplicitautograd_dispatch.h>
|
| 252 |
+
#include <ATen/ops/inner_compositeimplicitautograd_dispatch.h>
|
| 253 |
+
#include <ATen/ops/instance_norm_compositeimplicitautograd_dispatch.h>
|
| 254 |
+
#include <ATen/ops/inverse_compositeimplicitautograd_dispatch.h>
|
| 255 |
+
#include <ATen/ops/is_complex_compositeimplicitautograd_dispatch.h>
|
| 256 |
+
#include <ATen/ops/is_conj_compositeimplicitautograd_dispatch.h>
|
| 257 |
+
#include <ATen/ops/is_distributed_compositeimplicitautograd_dispatch.h>
|
| 258 |
+
#include <ATen/ops/is_floating_point_compositeimplicitautograd_dispatch.h>
|
| 259 |
+
#include <ATen/ops/is_inference_compositeimplicitautograd_dispatch.h>
|
| 260 |
+
#include <ATen/ops/is_leaf_compositeimplicitautograd_dispatch.h>
|
| 261 |
+
#include <ATen/ops/is_neg_compositeimplicitautograd_dispatch.h>
|
| 262 |
+
#include <ATen/ops/is_nonzero_compositeimplicitautograd_dispatch.h>
|
| 263 |
+
#include <ATen/ops/is_signed_compositeimplicitautograd_dispatch.h>
|
| 264 |
+
#include <ATen/ops/is_vulkan_available_compositeimplicitautograd_dispatch.h>
|
| 265 |
+
#include <ATen/ops/isclose_compositeimplicitautograd_dispatch.h>
|
| 266 |
+
#include <ATen/ops/isfinite_compositeimplicitautograd_dispatch.h>
|
| 267 |
+
#include <ATen/ops/isreal_compositeimplicitautograd_dispatch.h>
|
| 268 |
+
#include <ATen/ops/istft_compositeimplicitautograd_dispatch.h>
|
| 269 |
+
#include <ATen/ops/item_compositeimplicitautograd_dispatch.h>
|
| 270 |
+
#include <ATen/ops/kl_div_compositeimplicitautograd_dispatch.h>
|
| 271 |
+
#include <ATen/ops/kron_compositeimplicitautograd_dispatch.h>
|
| 272 |
+
#include <ATen/ops/kthvalue_compositeimplicitautograd_dispatch.h>
|
| 273 |
+
#include <ATen/ops/l1_loss_compositeimplicitautograd_dispatch.h>
|
| 274 |
+
#include <ATen/ops/layer_norm_compositeimplicitautograd_dispatch.h>
|
| 275 |
+
#include <ATen/ops/ldexp_compositeimplicitautograd_dispatch.h>
|
| 276 |
+
#include <ATen/ops/less_compositeimplicitautograd_dispatch.h>
|
| 277 |
+
#include <ATen/ops/less_equal_compositeimplicitautograd_dispatch.h>
|
| 278 |
+
#include <ATen/ops/linalg_cholesky_compositeimplicitautograd_dispatch.h>
|
| 279 |
+
#include <ATen/ops/linalg_cond_compositeimplicitautograd_dispatch.h>
|
| 280 |
+
#include <ATen/ops/linalg_det_compositeimplicitautograd_dispatch.h>
|
| 281 |
+
#include <ATen/ops/linalg_diagonal_compositeimplicitautograd_dispatch.h>
|
| 282 |
+
#include <ATen/ops/linalg_eigh_compositeimplicitautograd_dispatch.h>
|
| 283 |
+
#include <ATen/ops/linalg_eigvals_compositeimplicitautograd_dispatch.h>
|
| 284 |
+
#include <ATen/ops/linalg_eigvalsh_compositeimplicitautograd_dispatch.h>
|
| 285 |
+
#include <ATen/ops/linalg_inv_compositeimplicitautograd_dispatch.h>
|
| 286 |
+
#include <ATen/ops/linalg_ldl_factor_compositeimplicitautograd_dispatch.h>
|
| 287 |
+
#include <ATen/ops/linalg_lu_factor_compositeimplicitautograd_dispatch.h>
|
| 288 |
+
#include <ATen/ops/linalg_matmul_compositeimplicitautograd_dispatch.h>
|
| 289 |
+
#include <ATen/ops/linalg_matrix_norm_compositeimplicitautograd_dispatch.h>
|
| 290 |
+
#include <ATen/ops/linalg_matrix_power_compositeimplicitautograd_dispatch.h>
|
| 291 |
+
#include <ATen/ops/linalg_matrix_rank_compositeimplicitautograd_dispatch.h>
|
| 292 |
+
#include <ATen/ops/linalg_multi_dot_compositeimplicitautograd_dispatch.h>
|
| 293 |
+
#include <ATen/ops/linalg_norm_compositeimplicitautograd_dispatch.h>
|
| 294 |
+
#include <ATen/ops/linalg_pinv_compositeimplicitautograd_dispatch.h>
|
| 295 |
+
#include <ATen/ops/linalg_slogdet_compositeimplicitautograd_dispatch.h>
|
| 296 |
+
#include <ATen/ops/linalg_solve_compositeimplicitautograd_dispatch.h>
|
| 297 |
+
#include <ATen/ops/linalg_solve_ex_compositeimplicitautograd_dispatch.h>
|
| 298 |
+
#include <ATen/ops/linalg_svd_compositeimplicitautograd_dispatch.h>
|
| 299 |
+
#include <ATen/ops/linalg_svdvals_compositeimplicitautograd_dispatch.h>
|
| 300 |
+
#include <ATen/ops/linalg_tensorinv_compositeimplicitautograd_dispatch.h>
|
| 301 |
+
#include <ATen/ops/linalg_tensorsolve_compositeimplicitautograd_dispatch.h>
|
| 302 |
+
#include <ATen/ops/linalg_vander_compositeimplicitautograd_dispatch.h>
|
| 303 |
+
#include <ATen/ops/linalg_vecdot_compositeimplicitautograd_dispatch.h>
|
| 304 |
+
#include <ATen/ops/linear_compositeimplicitautograd_dispatch.h>
|
| 305 |
+
#include <ATen/ops/log_sigmoid_compositeimplicitautograd_dispatch.h>
|
| 306 |
+
#include <ATen/ops/log_softmax_compositeimplicitautograd_dispatch.h>
|
| 307 |
+
#include <ATen/ops/logcumsumexp_compositeimplicitautograd_dispatch.h>
|
| 308 |
+
#include <ATen/ops/logdet_compositeimplicitautograd_dispatch.h>
|
| 309 |
+
#include <ATen/ops/logsumexp_compositeimplicitautograd_dispatch.h>
|
| 310 |
+
#include <ATen/ops/lstm_compositeimplicitautograd_dispatch.h>
|
| 311 |
+
#include <ATen/ops/lstm_cell_compositeimplicitautograd_dispatch.h>
|
| 312 |
+
#include <ATen/ops/lu_solve_compositeimplicitautograd_dispatch.h>
|
| 313 |
+
#include <ATen/ops/mH_compositeimplicitautograd_dispatch.h>
|
| 314 |
+
#include <ATen/ops/mT_compositeimplicitautograd_dispatch.h>
|
| 315 |
+
#include <ATen/ops/margin_ranking_loss_compositeimplicitautograd_dispatch.h>
|
| 316 |
+
#include <ATen/ops/masked_select_backward_compositeimplicitautograd_dispatch.h>
|
| 317 |
+
#include <ATen/ops/matmul_compositeimplicitautograd_dispatch.h>
|
| 318 |
+
#include <ATen/ops/matrix_H_compositeimplicitautograd_dispatch.h>
|
| 319 |
+
#include <ATen/ops/matrix_exp_compositeimplicitautograd_dispatch.h>
|
| 320 |
+
#include <ATen/ops/matrix_exp_backward_compositeimplicitautograd_dispatch.h>
|
| 321 |
+
#include <ATen/ops/matrix_power_compositeimplicitautograd_dispatch.h>
|
| 322 |
+
#include <ATen/ops/max_compositeimplicitautograd_dispatch.h>
|
| 323 |
+
#include <ATen/ops/max_pool1d_compositeimplicitautograd_dispatch.h>
|
| 324 |
+
#include <ATen/ops/max_pool1d_with_indices_compositeimplicitautograd_dispatch.h>
|
| 325 |
+
#include <ATen/ops/max_pool2d_compositeimplicitautograd_dispatch.h>
|
| 326 |
+
#include <ATen/ops/max_pool3d_compositeimplicitautograd_dispatch.h>
|
| 327 |
+
#include <ATen/ops/mean_compositeimplicitautograd_dispatch.h>
|
| 328 |
+
#include <ATen/ops/median_compositeimplicitautograd_dispatch.h>
|
| 329 |
+
#include <ATen/ops/meshgrid_compositeimplicitautograd_dispatch.h>
|
| 330 |
+
#include <ATen/ops/min_compositeimplicitautograd_dispatch.h>
|
| 331 |
+
#include <ATen/ops/mish_backward_compositeimplicitautograd_dispatch.h>
|
| 332 |
+
#include <ATen/ops/mode_compositeimplicitautograd_dispatch.h>
|
| 333 |
+
#include <ATen/ops/moveaxis_compositeimplicitautograd_dispatch.h>
|
| 334 |
+
#include <ATen/ops/movedim_compositeimplicitautograd_dispatch.h>
|
| 335 |
+
#include <ATen/ops/msort_compositeimplicitautograd_dispatch.h>
|
| 336 |
+
#include <ATen/ops/multilabel_margin_loss_compositeimplicitautograd_dispatch.h>
|
| 337 |
+
#include <ATen/ops/multiply_compositeimplicitautograd_dispatch.h>
|
| 338 |
+
#include <ATen/ops/nanmean_compositeimplicitautograd_dispatch.h>
|
| 339 |
+
#include <ATen/ops/nanmedian_compositeimplicitautograd_dispatch.h>
|
| 340 |
+
#include <ATen/ops/nanquantile_compositeimplicitautograd_dispatch.h>
|
| 341 |
+
#include <ATen/ops/narrow_compositeimplicitautograd_dispatch.h>
|
| 342 |
+
#include <ATen/ops/native_channel_shuffle_compositeimplicitautograd_dispatch.h>
|
| 343 |
+
#include <ATen/ops/negative_compositeimplicitautograd_dispatch.h>
|
| 344 |
+
#include <ATen/ops/nested_to_padded_tensor_compositeimplicitautograd_dispatch.h>
|
| 345 |
+
#include <ATen/ops/nll_loss_compositeimplicitautograd_dispatch.h>
|
| 346 |
+
#include <ATen/ops/nll_loss2d_compositeimplicitautograd_dispatch.h>
|
| 347 |
+
#include <ATen/ops/nll_loss_nd_compositeimplicitautograd_dispatch.h>
|
| 348 |
+
#include <ATen/ops/nonzero_numpy_compositeimplicitautograd_dispatch.h>
|
| 349 |
+
#include <ATen/ops/norm_compositeimplicitautograd_dispatch.h>
|
| 350 |
+
#include <ATen/ops/norm_except_dim_compositeimplicitautograd_dispatch.h>
|
| 351 |
+
#include <ATen/ops/not_equal_compositeimplicitautograd_dispatch.h>
|
| 352 |
+
#include <ATen/ops/nuclear_norm_compositeimplicitautograd_dispatch.h>
|
| 353 |
+
#include <ATen/ops/numpy_T_compositeimplicitautograd_dispatch.h>
|
| 354 |
+
#include <ATen/ops/one_hot_compositeimplicitautograd_dispatch.h>
|
| 355 |
+
#include <ATen/ops/or_compositeimplicitautograd_dispatch.h>
|
| 356 |
+
#include <ATen/ops/orgqr_compositeimplicitautograd_dispatch.h>
|
| 357 |
+
#include <ATen/ops/outer_compositeimplicitautograd_dispatch.h>
|
| 358 |
+
#include <ATen/ops/output_nr_compositeimplicitautograd_dispatch.h>
|
| 359 |
+
#include <ATen/ops/pad_compositeimplicitautograd_dispatch.h>
|
| 360 |
+
#include <ATen/ops/pad_sequence_compositeimplicitautograd_dispatch.h>
|
| 361 |
+
#include <ATen/ops/pairwise_distance_compositeimplicitautograd_dispatch.h>
|
| 362 |
+
#include <ATen/ops/pdist_compositeimplicitautograd_dispatch.h>
|
| 363 |
+
#include <ATen/ops/pin_memory_compositeimplicitautograd_dispatch.h>
|
| 364 |
+
#include <ATen/ops/pinverse_compositeimplicitautograd_dispatch.h>
|
| 365 |
+
#include <ATen/ops/poisson_nll_loss_compositeimplicitautograd_dispatch.h>
|
| 366 |
+
#include <ATen/ops/positive_compositeimplicitautograd_dispatch.h>
|
| 367 |
+
#include <ATen/ops/prelu_compositeimplicitautograd_dispatch.h>
|
| 368 |
+
#include <ATen/ops/prod_compositeimplicitautograd_dispatch.h>
|
| 369 |
+
#include <ATen/ops/promote_types_compositeimplicitautograd_dispatch.h>
|
| 370 |
+
#include <ATen/ops/qr_compositeimplicitautograd_dispatch.h>
|
| 371 |
+
#include <ATen/ops/quantile_compositeimplicitautograd_dispatch.h>
|
| 372 |
+
#include <ATen/ops/quantized_gru_cell_compositeimplicitautograd_dispatch.h>
|
| 373 |
+
#include <ATen/ops/quantized_lstm_cell_compositeimplicitautograd_dispatch.h>
|
| 374 |
+
#include <ATen/ops/quantized_rnn_relu_cell_compositeimplicitautograd_dispatch.h>
|
| 375 |
+
#include <ATen/ops/quantized_rnn_tanh_cell_compositeimplicitautograd_dispatch.h>
|
| 376 |
+
#include <ATen/ops/rand_compositeimplicitautograd_dispatch.h>
|
| 377 |
+
#include <ATen/ops/randn_compositeimplicitautograd_dispatch.h>
|
| 378 |
+
#include <ATen/ops/ravel_compositeimplicitautograd_dispatch.h>
|
| 379 |
+
#include <ATen/ops/real_compositeimplicitautograd_dispatch.h>
|
| 380 |
+
#include <ATen/ops/refine_names_compositeimplicitautograd_dispatch.h>
|
| 381 |
+
#include <ATen/ops/relu6_compositeimplicitautograd_dispatch.h>
|
| 382 |
+
#include <ATen/ops/rename_compositeimplicitautograd_dispatch.h>
|
| 383 |
+
#include <ATen/ops/repeat_interleave_compositeimplicitautograd_dispatch.h>
|
| 384 |
+
#include <ATen/ops/requires_grad_compositeimplicitautograd_dispatch.h>
|
| 385 |
+
#include <ATen/ops/reshape_compositeimplicitautograd_dispatch.h>
|
| 386 |
+
#include <ATen/ops/reshape_as_compositeimplicitautograd_dispatch.h>
|
| 387 |
+
#include <ATen/ops/resolve_conj_compositeimplicitautograd_dispatch.h>
|
| 388 |
+
#include <ATen/ops/resolve_neg_compositeimplicitautograd_dispatch.h>
|
| 389 |
+
#include <ATen/ops/result_type_compositeimplicitautograd_dispatch.h>
|
| 390 |
+
#include <ATen/ops/retain_grad_compositeimplicitautograd_dispatch.h>
|
| 391 |
+
#include <ATen/ops/retains_grad_compositeimplicitautograd_dispatch.h>
|
| 392 |
+
#include <ATen/ops/rms_norm_compositeimplicitautograd_dispatch.h>
|
| 393 |
+
#include <ATen/ops/rnn_relu_compositeimplicitautograd_dispatch.h>
|
| 394 |
+
#include <ATen/ops/rnn_relu_cell_compositeimplicitautograd_dispatch.h>
|
| 395 |
+
#include <ATen/ops/rnn_tanh_compositeimplicitautograd_dispatch.h>
|
| 396 |
+
#include <ATen/ops/rnn_tanh_cell_compositeimplicitautograd_dispatch.h>
|
| 397 |
+
#include <ATen/ops/row_stack_compositeimplicitautograd_dispatch.h>
|
| 398 |
+
#include <ATen/ops/rrelu_compositeimplicitautograd_dispatch.h>
|
| 399 |
+
#include <ATen/ops/scaled_dot_product_attention_compositeimplicitautograd_dispatch.h>
|
| 400 |
+
#include <ATen/ops/scatter_compositeimplicitautograd_dispatch.h>
|
| 401 |
+
#include <ATen/ops/scatter_add_compositeimplicitautograd_dispatch.h>
|
| 402 |
+
#include <ATen/ops/select_compositeimplicitautograd_dispatch.h>
|
| 403 |
+
#include <ATen/ops/selu_compositeimplicitautograd_dispatch.h>
|
| 404 |
+
#include <ATen/ops/set_compositeimplicitautograd_dispatch.h>
|
| 405 |
+
#include <ATen/ops/set_data_compositeimplicitautograd_dispatch.h>
|
| 406 |
+
#include <ATen/ops/silu_backward_compositeimplicitautograd_dispatch.h>
|
| 407 |
+
#include <ATen/ops/size_compositeimplicitautograd_dispatch.h>
|
| 408 |
+
#include <ATen/ops/slogdet_compositeimplicitautograd_dispatch.h>
|
| 409 |
+
#include <ATen/ops/slow_conv3d_compositeimplicitautograd_dispatch.h>
|
| 410 |
+
#include <ATen/ops/smm_compositeimplicitautograd_dispatch.h>
|
| 411 |
+
#include <ATen/ops/softmax_compositeimplicitautograd_dispatch.h>
|
| 412 |
+
#include <ATen/ops/sort_compositeimplicitautograd_dispatch.h>
|
| 413 |
+
#include <ATen/ops/sparse_bsc_tensor_compositeimplicitautograd_dispatch.h>
|
| 414 |
+
#include <ATen/ops/sparse_bsr_tensor_compositeimplicitautograd_dispatch.h>
|
| 415 |
+
#include <ATen/ops/sparse_coo_tensor_compositeimplicitautograd_dispatch.h>
|
| 416 |
+
#include <ATen/ops/sparse_csc_tensor_compositeimplicitautograd_dispatch.h>
|
| 417 |
+
#include <ATen/ops/sparse_csr_tensor_compositeimplicitautograd_dispatch.h>
|
| 418 |
+
#include <ATen/ops/special_digamma_compositeimplicitautograd_dispatch.h>
|
| 419 |
+
#include <ATen/ops/special_erf_compositeimplicitautograd_dispatch.h>
|
| 420 |
+
#include <ATen/ops/special_erfc_compositeimplicitautograd_dispatch.h>
|
| 421 |
+
#include <ATen/ops/special_erfinv_compositeimplicitautograd_dispatch.h>
|
| 422 |
+
#include <ATen/ops/special_exp2_compositeimplicitautograd_dispatch.h>
|
| 423 |
+
#include <ATen/ops/special_expit_compositeimplicitautograd_dispatch.h>
|
| 424 |
+
#include <ATen/ops/special_expm1_compositeimplicitautograd_dispatch.h>
|
| 425 |
+
#include <ATen/ops/special_gammainc_compositeimplicitautograd_dispatch.h>
|
| 426 |
+
#include <ATen/ops/special_gammaincc_compositeimplicitautograd_dispatch.h>
|
| 427 |
+
#include <ATen/ops/special_gammaln_compositeimplicitautograd_dispatch.h>
|
| 428 |
+
#include <ATen/ops/special_i0_compositeimplicitautograd_dispatch.h>
|
| 429 |
+
#include <ATen/ops/special_log1p_compositeimplicitautograd_dispatch.h>
|
| 430 |
+
#include <ATen/ops/special_log_softmax_compositeimplicitautograd_dispatch.h>
|
| 431 |
+
#include <ATen/ops/special_logit_compositeimplicitautograd_dispatch.h>
|
| 432 |
+
#include <ATen/ops/special_logsumexp_compositeimplicitautograd_dispatch.h>
|
| 433 |
+
#include <ATen/ops/special_multigammaln_compositeimplicitautograd_dispatch.h>
|
| 434 |
+
#include <ATen/ops/special_ndtr_compositeimplicitautograd_dispatch.h>
|
| 435 |
+
#include <ATen/ops/special_polygamma_compositeimplicitautograd_dispatch.h>
|
| 436 |
+
#include <ATen/ops/special_psi_compositeimplicitautograd_dispatch.h>
|
| 437 |
+
#include <ATen/ops/special_round_compositeimplicitautograd_dispatch.h>
|
| 438 |
+
#include <ATen/ops/special_sinc_compositeimplicitautograd_dispatch.h>
|
| 439 |
+
#include <ATen/ops/special_softmax_compositeimplicitautograd_dispatch.h>
|
| 440 |
+
#include <ATen/ops/special_xlogy_compositeimplicitautograd_dispatch.h>
|
| 441 |
+
#include <ATen/ops/split_compositeimplicitautograd_dispatch.h>
|
| 442 |
+
#include <ATen/ops/square_compositeimplicitautograd_dispatch.h>
|
| 443 |
+
#include <ATen/ops/squeeze_compositeimplicitautograd_dispatch.h>
|
| 444 |
+
#include <ATen/ops/sspaddmm_compositeimplicitautograd_dispatch.h>
|
| 445 |
+
#include <ATen/ops/std_compositeimplicitautograd_dispatch.h>
|
| 446 |
+
#include <ATen/ops/std_mean_compositeimplicitautograd_dispatch.h>
|
| 447 |
+
#include <ATen/ops/stft_compositeimplicitautograd_dispatch.h>
|
| 448 |
+
#include <ATen/ops/stride_compositeimplicitautograd_dispatch.h>
|
| 449 |
+
#include <ATen/ops/subtract_compositeimplicitautograd_dispatch.h>
|
| 450 |
+
#include <ATen/ops/sum_compositeimplicitautograd_dispatch.h>
|
| 451 |
+
#include <ATen/ops/sum_to_size_compositeimplicitautograd_dispatch.h>
|
| 452 |
+
#include <ATen/ops/svd_compositeimplicitautograd_dispatch.h>
|
| 453 |
+
#include <ATen/ops/swapaxes_compositeimplicitautograd_dispatch.h>
|
| 454 |
+
#include <ATen/ops/swapdims_compositeimplicitautograd_dispatch.h>
|
| 455 |
+
#include <ATen/ops/sym_numel_compositeimplicitautograd_dispatch.h>
|
| 456 |
+
#include <ATen/ops/sym_size_compositeimplicitautograd_dispatch.h>
|
| 457 |
+
#include <ATen/ops/sym_storage_offset_compositeimplicitautograd_dispatch.h>
|
| 458 |
+
#include <ATen/ops/sym_stride_compositeimplicitautograd_dispatch.h>
|
| 459 |
+
#include <ATen/ops/take_along_dim_compositeimplicitautograd_dispatch.h>
|
| 460 |
+
#include <ATen/ops/tensor_split_compositeimplicitautograd_dispatch.h>
|
| 461 |
+
#include <ATen/ops/tensordot_compositeimplicitautograd_dispatch.h>
|
| 462 |
+
#include <ATen/ops/thnn_conv2d_compositeimplicitautograd_dispatch.h>
|
| 463 |
+
#include <ATen/ops/tile_compositeimplicitautograd_dispatch.h>
|
| 464 |
+
#include <ATen/ops/to_compositeimplicitautograd_dispatch.h>
|
| 465 |
+
#include <ATen/ops/to_dense_compositeimplicitautograd_dispatch.h>
|
| 466 |
+
#include <ATen/ops/to_dense_backward_compositeimplicitautograd_dispatch.h>
|
| 467 |
+
#include <ATen/ops/to_mkldnn_backward_compositeimplicitautograd_dispatch.h>
|
| 468 |
+
#include <ATen/ops/to_sparse_compositeimplicitautograd_dispatch.h>
|
| 469 |
+
#include <ATen/ops/to_sparse_bsc_compositeimplicitautograd_dispatch.h>
|
| 470 |
+
#include <ATen/ops/to_sparse_bsr_compositeimplicitautograd_dispatch.h>
|
| 471 |
+
#include <ATen/ops/to_sparse_csc_compositeimplicitautograd_dispatch.h>
|
| 472 |
+
#include <ATen/ops/to_sparse_csr_compositeimplicitautograd_dispatch.h>
|
| 473 |
+
#include <ATen/ops/trace_backward_compositeimplicitautograd_dispatch.h>
|
| 474 |
+
#include <ATen/ops/transpose_compositeimplicitautograd_dispatch.h>
|
| 475 |
+
#include <ATen/ops/trapezoid_compositeimplicitautograd_dispatch.h>
|
| 476 |
+
#include <ATen/ops/trapz_compositeimplicitautograd_dispatch.h>
|
| 477 |
+
#include <ATen/ops/triplet_margin_loss_compositeimplicitautograd_dispatch.h>
|
| 478 |
+
#include <ATen/ops/true_divide_compositeimplicitautograd_dispatch.h>
|
| 479 |
+
#include <ATen/ops/type_as_compositeimplicitautograd_dispatch.h>
|
| 480 |
+
#include <ATen/ops/unbind_compositeimplicitautograd_dispatch.h>
|
| 481 |
+
#include <ATen/ops/unflatten_compositeimplicitautograd_dispatch.h>
|
| 482 |
+
#include <ATen/ops/unflatten_dense_tensors_compositeimplicitautograd_dispatch.h>
|
| 483 |
+
#include <ATen/ops/unsafe_chunk_compositeimplicitautograd_dispatch.h>
|
| 484 |
+
#include <ATen/ops/upsample_bicubic2d_compositeimplicitautograd_dispatch.h>
|
| 485 |
+
#include <ATen/ops/upsample_bilinear2d_compositeimplicitautograd_dispatch.h>
|
| 486 |
+
#include <ATen/ops/upsample_linear1d_compositeimplicitautograd_dispatch.h>
|
| 487 |
+
#include <ATen/ops/upsample_nearest1d_compositeimplicitautograd_dispatch.h>
|
| 488 |
+
#include <ATen/ops/upsample_nearest2d_compositeimplicitautograd_dispatch.h>
|
| 489 |
+
#include <ATen/ops/upsample_nearest3d_compositeimplicitautograd_dispatch.h>
|
| 490 |
+
#include <ATen/ops/upsample_trilinear3d_compositeimplicitautograd_dispatch.h>
|
| 491 |
+
#include <ATen/ops/value_selecting_reduction_backward_compositeimplicitautograd_dispatch.h>
|
| 492 |
+
#include <ATen/ops/vander_compositeimplicitautograd_dispatch.h>
|
| 493 |
+
#include <ATen/ops/var_compositeimplicitautograd_dispatch.h>
|
| 494 |
+
#include <ATen/ops/var_mean_compositeimplicitautograd_dispatch.h>
|
| 495 |
+
#include <ATen/ops/view_as_compositeimplicitautograd_dispatch.h>
|
| 496 |
+
#include <ATen/ops/vsplit_compositeimplicitautograd_dispatch.h>
|
| 497 |
+
#include <ATen/ops/vstack_compositeimplicitautograd_dispatch.h>
|
| 498 |
+
#include <ATen/ops/where_compositeimplicitautograd_dispatch.h>
|
| 499 |
+
#include <ATen/ops/xor_compositeimplicitautograd_dispatch.h>
|
| 500 |
+
|
| 501 |
+
|
| 502 |
+
|
mplug_owl2/lib/python3.10/site-packages/torch/include/ATen/CompositeImplicitAutogradNestedTensorFunctions_inl.h
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunctions_inl.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
#if defined(AT_PER_OPERATOR_HEADERS) && defined(TORCH_ASSERT_ONLY_METHOD_OPERATORS)
|
| 12 |
+
#error This change adds a dependency on all pytorch operators, meaning the \
|
| 13 |
+
file will need to be re-compiled every time an operator is changed or added. \
|
| 14 |
+
Consider including a specific operator from \
|
| 15 |
+
<ATen/ops/{my_operator}_compositeimplicitautogradnestedtensor_dispatch.h>. \
|
| 16 |
+
See NOTE [TORCH_ASSERT_ONLY_METHOD_OPERATORS].
|
| 17 |
+
#endif
|
| 18 |
+
|
| 19 |
+
#include <ATen/ops/randn_like_compositeimplicitautogradnestedtensor_dispatch.h>
|
| 20 |
+
#include <ATen/ops/reshape_compositeimplicitautogradnestedtensor_dispatch.h>
|
| 21 |
+
#include <ATen/ops/reshape_as_compositeimplicitautogradnestedtensor_dispatch.h>
|
| 22 |
+
#include <ATen/ops/zeros_like_compositeimplicitautogradnestedtensor_dispatch.h>
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
|
mplug_owl2/lib/python3.10/site-packages/torch/include/ATen/Context.h
ADDED
|
@@ -0,0 +1,610 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <ATen/BlasBackend.h>
|
| 4 |
+
#include <ATen/CPUGeneratorImpl.h>
|
| 5 |
+
#include <ATen/DeviceAccelerator.h>
|
| 6 |
+
#include <ATen/LinalgBackend.h>
|
| 7 |
+
#include <ATen/core/ATenGeneral.h>
|
| 8 |
+
#include <ATen/core/DeprecatedTypeProperties.h>
|
| 9 |
+
#include <ATen/core/Generator.h>
|
| 10 |
+
#include <ATen/core/LegacyTypeDispatch.h>
|
| 11 |
+
#include <ATen/detail/AcceleratorHooksInterface.h>
|
| 12 |
+
#include <ATen/detail/CUDAHooksInterface.h>
|
| 13 |
+
#include <ATen/detail/HIPHooksInterface.h>
|
| 14 |
+
#include <ATen/detail/IPUHooksInterface.h>
|
| 15 |
+
#include <ATen/detail/MAIAHooksInterface.h>
|
| 16 |
+
#include <ATen/detail/MPSHooksInterface.h>
|
| 17 |
+
#include <ATen/detail/MTIAHooksInterface.h>
|
| 18 |
+
#include <ATen/detail/PrivateUse1HooksInterface.h>
|
| 19 |
+
#include <ATen/detail/XPUHooksInterface.h>
|
| 20 |
+
#include <c10/core/QEngine.h>
|
| 21 |
+
#include <c10/core/impl/DeviceGuardImplInterface.h>
|
| 22 |
+
#include <c10/util/CallOnce.h>
|
| 23 |
+
#include <c10/util/Exception.h>
|
| 24 |
+
#include <c10/util/env.h>
|
| 25 |
+
#include <c10/util/irange.h>
|
| 26 |
+
|
| 27 |
+
#include <cstdint>
|
| 28 |
+
#include <mutex>
|
| 29 |
+
|
| 30 |
+
namespace at {
|
| 31 |
+
|
| 32 |
+
class Tensor;
|
| 33 |
+
|
| 34 |
+
enum class TORCH_API Float32MatmulPrecision { HIGHEST, HIGH, MEDIUM };
|
| 35 |
+
|
| 36 |
+
class TORCH_API Context {
|
| 37 |
+
public:
|
| 38 |
+
Context();
|
| 39 |
+
|
| 40 |
+
const Generator& defaultGenerator(Device device) {
|
| 41 |
+
c10::DeviceType device_type = device.type();
|
| 42 |
+
initCUDAIfNeeded(device_type);
|
| 43 |
+
initHIPIfNeeded(device_type);
|
| 44 |
+
if (device_type == at::kCPU) {
|
| 45 |
+
return at::detail::getDefaultCPUGenerator();
|
| 46 |
+
} else if (device_type == at::kCUDA) {
|
| 47 |
+
return at::detail::getCUDAHooks().getDefaultCUDAGenerator(device.index());
|
| 48 |
+
} else if (device_type == at::kMPS) {
|
| 49 |
+
return at::detail::getMPSHooks().getDefaultMPSGenerator();
|
| 50 |
+
} else if (device_type == at::kXPU) {
|
| 51 |
+
return at::detail::getXPUHooks().getDefaultXPUGenerator(device.index());
|
| 52 |
+
} else if (device_type == at::kIPU) {
|
| 53 |
+
return at::detail::getIPUHooks().getDefaultIPUGenerator(device.index());
|
| 54 |
+
} else if (device_type == at::kPrivateUse1) {
|
| 55 |
+
return at::detail::getPrivateUse1Hooks().getDefaultGenerator(
|
| 56 |
+
device.index());
|
| 57 |
+
} else {
|
| 58 |
+
AT_ERROR(c10::DeviceTypeName(device_type), " device type not enabled.");
|
| 59 |
+
}
|
| 60 |
+
}
|
| 61 |
+
const AcceleratorHooksInterface& getAcceleratorHooksInterface(
|
| 62 |
+
std::optional<c10::DeviceType> opt_device_type = std::nullopt) {
|
| 63 |
+
c10::DeviceType device_type = opt_device_type.has_value()
|
| 64 |
+
? opt_device_type.value()
|
| 65 |
+
: at::getAccelerator(true).value();
|
| 66 |
+
if (device_type == at::kCUDA) {
|
| 67 |
+
return at::detail::getCUDAHooks();
|
| 68 |
+
} else if (device_type == at::kXPU) {
|
| 69 |
+
return at::detail::getXPUHooks();
|
| 70 |
+
} else if (device_type == at::kMPS) {
|
| 71 |
+
return at::detail::getMPSHooks();
|
| 72 |
+
} else if (device_type == at::kPrivateUse1) {
|
| 73 |
+
return at::detail::getPrivateUse1Hooks();
|
| 74 |
+
} else if (device_type == at::kMTIA) {
|
| 75 |
+
return at::detail::getMTIAHooks();
|
| 76 |
+
} else if (device_type == at::kHIP) {
|
| 77 |
+
return at::detail::getHIPHooks();
|
| 78 |
+
} else {
|
| 79 |
+
AT_ERROR(
|
| 80 |
+
c10::DeviceTypeName(device_type), " device type not an accelerator.");
|
| 81 |
+
}
|
| 82 |
+
}
|
| 83 |
+
Device getDeviceFromPtr(void* data, c10::DeviceType device_type) {
|
| 84 |
+
initCUDAIfNeeded(device_type);
|
| 85 |
+
initHIPIfNeeded(device_type);
|
| 86 |
+
initXPUIfNeeded(device_type);
|
| 87 |
+
if (device_type == at::kCPU) {
|
| 88 |
+
return c10::DeviceType::CPU;
|
| 89 |
+
} else if (device_type == at::kCUDA) {
|
| 90 |
+
return at::detail::getCUDAHooks().getDeviceFromPtr(data);
|
| 91 |
+
} else if (device_type == at::kXPU) {
|
| 92 |
+
return at::detail::getXPUHooks().getDeviceFromPtr(data);
|
| 93 |
+
} else if (device_type == at::kPrivateUse1) {
|
| 94 |
+
return at::detail::getPrivateUse1Hooks().getDeviceFromPtr(data);
|
| 95 |
+
} else {
|
| 96 |
+
AT_ERROR(c10::DeviceTypeName(device_type), " device type not enabled.");
|
| 97 |
+
}
|
| 98 |
+
}
|
| 99 |
+
bool isPinnedPtr(
|
| 100 |
+
const void* data,
|
| 101 |
+
std::optional<c10::DeviceType> device_type = std::nullopt) {
|
| 102 |
+
auto opt_device_type =
|
| 103 |
+
device_type.has_value() ? device_type : at::getAccelerator();
|
| 104 |
+
if (!opt_device_type.has_value() || // there is no accelerator
|
| 105 |
+
!at::isAccelerator(
|
| 106 |
+
opt_device_type.value())) { // passed device not an accelerator
|
| 107 |
+
return false;
|
| 108 |
+
}
|
| 109 |
+
return getAcceleratorHooksInterface(opt_device_type.value())
|
| 110 |
+
.isPinnedPtr(data);
|
| 111 |
+
}
|
| 112 |
+
Allocator* getPinnedMemoryAllocator(
|
| 113 |
+
std::optional<c10::DeviceType> device_type = std::nullopt) {
|
| 114 |
+
return getAcceleratorHooksInterface(device_type).getPinnedMemoryAllocator();
|
| 115 |
+
}
|
| 116 |
+
static bool hasOpenMP();
|
| 117 |
+
static bool hasMKL();
|
| 118 |
+
static bool hasLAPACK();
|
| 119 |
+
static bool hasMKLDNN();
|
| 120 |
+
static bool hasMAGMA() {
|
| 121 |
+
return detail::getCUDAHooks().hasMAGMA();
|
| 122 |
+
}
|
| 123 |
+
static bool hasCUDA() {
|
| 124 |
+
return detail::getCUDAHooks().hasCUDA();
|
| 125 |
+
}
|
| 126 |
+
static bool hasMTIA() {
|
| 127 |
+
return detail::getMTIAHooks().hasMTIA();
|
| 128 |
+
}
|
| 129 |
+
static bool hasCUDART() {
|
| 130 |
+
return detail::getCUDAHooks().hasCUDART();
|
| 131 |
+
}
|
| 132 |
+
static long versionCUDART() {
|
| 133 |
+
return detail::getCUDAHooks().versionCUDART();
|
| 134 |
+
}
|
| 135 |
+
static bool hasCuDNN() {
|
| 136 |
+
return detail::getCUDAHooks().hasCuDNN();
|
| 137 |
+
}
|
| 138 |
+
static long versionCuDNN() {
|
| 139 |
+
return detail::getCUDAHooks().versionCuDNN();
|
| 140 |
+
}
|
| 141 |
+
static bool hasCuSOLVER() {
|
| 142 |
+
return detail::getCUDAHooks().hasCuSOLVER();
|
| 143 |
+
}
|
| 144 |
+
static bool hasCuBLASLt() {
|
| 145 |
+
return detail::getCUDAHooks().hasCuBLASLt();
|
| 146 |
+
}
|
| 147 |
+
static bool hasHIP() {
|
| 148 |
+
return detail::getHIPHooks().hasHIP();
|
| 149 |
+
}
|
| 150 |
+
static bool hasMPS() {
|
| 151 |
+
return detail::getMPSHooks().hasMPS();
|
| 152 |
+
}
|
| 153 |
+
static bool hasIPU() {
|
| 154 |
+
return c10::impl::hasDeviceGuardImpl(c10::DeviceType::IPU);
|
| 155 |
+
}
|
| 156 |
+
static bool hasXLA() {
|
| 157 |
+
return c10::impl::hasDeviceGuardImpl(c10::DeviceType::XLA);
|
| 158 |
+
}
|
| 159 |
+
static bool hasXPU() {
|
| 160 |
+
return detail::getXPUHooks().hasXPU();
|
| 161 |
+
}
|
| 162 |
+
static bool hasLazy() {
|
| 163 |
+
return c10::impl::hasDeviceGuardImpl(c10::DeviceType::Lazy);
|
| 164 |
+
}
|
| 165 |
+
static bool hasMAIA() {
|
| 166 |
+
return c10::impl::hasDeviceGuardImpl(c10::DeviceType::MAIA);
|
| 167 |
+
}
|
| 168 |
+
// defined in header so that getNonVariableType has ability to inline
|
| 169 |
+
// call_once check. getNonVariableType is called fairly frequently
|
| 170 |
+
void lazyInitCUDA() {
|
| 171 |
+
c10::call_once(thc_init, [&] { detail::getCUDAHooks().initCUDA(); });
|
| 172 |
+
}
|
| 173 |
+
void lazyInitHIP() {
|
| 174 |
+
c10::call_once(thh_init, [&] { detail::getHIPHooks().initHIP(); });
|
| 175 |
+
}
|
| 176 |
+
void lazyInitXPU() {
|
| 177 |
+
c10::call_once(thx_init, [&] { detail::getXPUHooks().initXPU(); });
|
| 178 |
+
}
|
| 179 |
+
void lazyInitMTIA() {
|
| 180 |
+
c10::call_once(th_mtia_init, [&] { detail::getMTIAHooks().initMTIA(); });
|
| 181 |
+
}
|
| 182 |
+
void lazyInitPrivateUse1() {
|
| 183 |
+
c10::call_once(thp_init, [&] {
|
| 184 |
+
if (isPrivateUse1HooksRegistered()) {
|
| 185 |
+
at::detail::getPrivateUse1Hooks().initPrivateUse1();
|
| 186 |
+
}
|
| 187 |
+
});
|
| 188 |
+
}
|
| 189 |
+
static const at::cuda::NVRTC& getNVRTC() {
|
| 190 |
+
return detail::getCUDAHooks().nvrtc();
|
| 191 |
+
}
|
| 192 |
+
|
| 193 |
+
static bool setFlushDenormal(bool on);
|
| 194 |
+
|
| 195 |
+
// NB: This method is *purely* whether or not a user requested
|
| 196 |
+
// that CuDNN was enabled, it doesn't actually say anything about
|
| 197 |
+
// whether or not CuDNN is actually usable. Use cudnn_is_acceptable
|
| 198 |
+
// to test this instead
|
| 199 |
+
bool userEnabledCuDNN() const;
|
| 200 |
+
void setUserEnabledCuDNN(bool e);
|
| 201 |
+
bool userEnabledMkldnn() const;
|
| 202 |
+
void setUserEnabledMkldnn(bool e);
|
| 203 |
+
bool benchmarkCuDNN() const;
|
| 204 |
+
void setBenchmarkCuDNN(bool);
|
| 205 |
+
int benchmarkLimitCuDNN() const;
|
| 206 |
+
void setBenchmarkLimitCuDNN(int);
|
| 207 |
+
bool deterministicCuDNN() const;
|
| 208 |
+
void setDeterministicCuDNN(bool);
|
| 209 |
+
bool deterministicMkldnn() const;
|
| 210 |
+
void setDeterministicMkldnn(bool);
|
| 211 |
+
bool userEnabledNNPACK() const;
|
| 212 |
+
void setUserEnabledNNPACK(bool e);
|
| 213 |
+
|
| 214 |
+
// Note [Disabling Fused SDP Kernels]
|
| 215 |
+
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
| 216 |
+
// Flash and Memory Efficient SDP kernels are enabled by default.
|
| 217 |
+
// However, they can be disabled by setting
|
| 218 |
+
// at::globalContext().setUserEnabledFlashSDP(false) flag.
|
| 219 |
+
// This is useful for debugging purposes. For example, if you want to
|
| 220 |
+
// compare the performance of the flash SDP kernels with the unfused
|
| 221 |
+
// kernel, you can disable the flash SDP kernels. By disabling
|
| 222 |
+
// the math SDP kernel, you can force your code to use flash kernels.
|
| 223 |
+
// The math SDP kernel can be disabled by setting
|
| 224 |
+
// at::globalContext().setUserEnabledMathSDP(false) flag.
|
| 225 |
+
void setSDPUseFlash(bool);
|
| 226 |
+
bool userEnabledFlashSDP() const;
|
| 227 |
+
|
| 228 |
+
void setSDPUseMemEfficient(bool);
|
| 229 |
+
bool userEnabledMemEfficientSDP() const;
|
| 230 |
+
|
| 231 |
+
void setSDPUseMath(bool);
|
| 232 |
+
bool userEnabledMathSDP() const;
|
| 233 |
+
|
| 234 |
+
void setSDPUseCuDNN(bool);
|
| 235 |
+
bool userEnabledCuDNNSDP() const;
|
| 236 |
+
|
| 237 |
+
void setAllowFP16BF16ReductionMathSDP(bool);
|
| 238 |
+
bool allowFP16BF16ReductionMathSDP() const;
|
| 239 |
+
|
| 240 |
+
void setSDPUseOverrideable(bool);
|
| 241 |
+
bool userEnabledOverrideableSDP() const;
|
| 242 |
+
|
| 243 |
+
at::LinalgBackend linalgPreferredBackend() const;
|
| 244 |
+
void setLinalgPreferredBackend(at::LinalgBackend);
|
| 245 |
+
|
| 246 |
+
at::BlasBackend blasPreferredBackend();
|
| 247 |
+
void setBlasPreferredBackend(at::BlasBackend);
|
| 248 |
+
|
| 249 |
+
// Note [Enabling Deterministic Operations]
|
| 250 |
+
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
| 251 |
+
// Operations in PyTorch that normally act nondeterministically, but have an
|
| 252 |
+
// alternate deterministic implementation, should satisfy the following
|
| 253 |
+
// requirements:
|
| 254 |
+
//
|
| 255 |
+
// * Include this comment: "See Note [Enabling Deterministic Operations]"
|
| 256 |
+
//
|
| 257 |
+
// * Check the value of `at::globalContext().deterministicAlgorithms()` to
|
| 258 |
+
// toggle
|
| 259 |
+
// between nondeterministic and deterministic implementations.
|
| 260 |
+
//
|
| 261 |
+
// * Have an entry in the list of PyTorch operations that toggle between
|
| 262 |
+
// nondeterministic
|
| 263 |
+
// and deterministic implementations, in the docstring of
|
| 264 |
+
// `use_deterministic_algorithms()` in torch/__init__.py
|
| 265 |
+
//
|
| 266 |
+
// `example_func()` below shows an example of toggling between
|
| 267 |
+
// nondeterministic and deterministic implementations:
|
| 268 |
+
//
|
| 269 |
+
// void example_func() {
|
| 270 |
+
// // See Note [Enabling Deterministic Operations]
|
| 271 |
+
// if (at::globalContext().deterministicAlgorithms()) {
|
| 272 |
+
// example_func_deterministic();
|
| 273 |
+
// } else {
|
| 274 |
+
// example_func_nondeterministic();
|
| 275 |
+
// }
|
| 276 |
+
// }
|
| 277 |
+
|
| 278 |
+
bool deterministicAlgorithms() const;
|
| 279 |
+
bool deterministicAlgorithmsWarnOnly() const;
|
| 280 |
+
void setDeterministicAlgorithms(bool, bool);
|
| 281 |
+
bool deterministicFillUninitializedMemory() const;
|
| 282 |
+
void setDeterministicFillUninitializedMemory(bool);
|
| 283 |
+
|
| 284 |
+
// Note [Writing Nondeterministic Operations]
|
| 285 |
+
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
| 286 |
+
// Operations in PyTorch that act nondeterministically and do not have an
|
| 287 |
+
// alternate deterministic implementation should satisfy the following
|
| 288 |
+
// requirements:
|
| 289 |
+
//
|
| 290 |
+
// * Include this comment: "See Note [Writing Nondeterministic Operations]"
|
| 291 |
+
//
|
| 292 |
+
// * Include a comment explaining why the operation is nondeterministic.
|
| 293 |
+
//
|
| 294 |
+
// * Throw an error when `Context::deterministicAlgorithms()` is true. Most
|
| 295 |
+
// of the time, this should be accomplished by calling
|
| 296 |
+
// `at::globalContext().alertNotDeterminstic()`. However, if the
|
| 297 |
+
// nondeterministic behavior is caused by the CuBLAS workspace
|
| 298 |
+
// configuration in CUDA >= 10.2,
|
| 299 |
+
// `at::globalContext().alertCuBLASConfigNotDeterministic()` should be
|
| 300 |
+
// called instead (in this case, a comment explaining why the operation is
|
| 301 |
+
// nondeterministic is not necessary). See below for details on these
|
| 302 |
+
// methods.
|
| 303 |
+
//
|
| 304 |
+
// * Have an entry in the list of nondeterministic PyTorch operations in the
|
| 305 |
+
// docstring of `use_deterministic_algorithms()` in torch/__init__.py
|
| 306 |
+
//
|
| 307 |
+
// * Have a test function in `test/test_torch.py` whose name begins with
|
| 308 |
+
// `test_nondeterministic_alert_`. Alternatively, if CuBLAS workspace
|
| 309 |
+
// configuration is the reason for nondeterminism, the operation should be
|
| 310 |
+
// included in the `test_cublas_config_nondeterministic_alert` test. Any new
|
| 311 |
+
// tests should ideally follow a pattern similar to the existing ones.
|
| 312 |
+
//
|
| 313 |
+
// `example_func()` below shows an example of the comments and error-throwing
|
| 314 |
+
// code for a nondeterministic operation:
|
| 315 |
+
//
|
| 316 |
+
// void example_func() {
|
| 317 |
+
// // See Note [Writing Nondeterministic Operations]
|
| 318 |
+
// // Nondeterministic because <reason>
|
| 319 |
+
// at::globalContext().alertNondeterministic("example_func");
|
| 320 |
+
// ...
|
| 321 |
+
// }
|
| 322 |
+
|
| 323 |
+
// Throws an error if `Context::deterministicAlgorithms()` is true
|
| 324 |
+
static void alertNotDeterministic(c10::string_view const& caller);
|
| 325 |
+
|
| 326 |
+
// Throws an error if `Context::deterministicAlgorithms()` is true, CUDA
|
| 327 |
+
// >= 10.2, and CUBLAS_WORKSPACE_CONFIG is not set to either ":16:8" or
|
| 328 |
+
// ":4096:8". For more details:
|
| 329 |
+
// https://docs.nvidia.com/cuda/cublas/index.html#results-reproducibility
|
| 330 |
+
void alertCuBLASConfigNotDeterministic() const;
|
| 331 |
+
|
| 332 |
+
void setFloat32MatmulPrecision(const std::string& s);
|
| 333 |
+
bool allowTF32CuDNN() const;
|
| 334 |
+
void setAllowTF32CuDNN(bool);
|
| 335 |
+
bool allowTF32CuBLAS() const;
|
| 336 |
+
void setAllowTF32CuBLAS(bool);
|
| 337 |
+
Float32MatmulPrecision float32MatmulPrecision() const;
|
| 338 |
+
void setFloat32MatmulPrecision(Float32MatmulPrecision p);
|
| 339 |
+
bool allowFP16ReductionCuBLAS() const;
|
| 340 |
+
void setAllowFP16ReductionCuBLAS(bool);
|
| 341 |
+
bool allowBF16ReductionCuBLAS() const;
|
| 342 |
+
void setAllowBF16ReductionCuBLAS(bool);
|
| 343 |
+
at::QEngine qEngine() const;
|
| 344 |
+
void setQEngine(at::QEngine e);
|
| 345 |
+
static const std::vector<at::QEngine>& supportedQEngines();
|
| 346 |
+
static bool isXNNPACKAvailable();
|
| 347 |
+
void setCheckSparseTensorInvariants(bool e);
|
| 348 |
+
bool checkSparseTensorInvariants() const;
|
| 349 |
+
// This method is used to release the original weight after pre-packing.
|
| 350 |
+
// It should be called once before loading/running the model.
|
| 351 |
+
// NB: By default it is set to true for mobile builds.
|
| 352 |
+
void setReleaseWeightsWhenPrepacking(bool e);
|
| 353 |
+
bool releaseWeightsWhenPrepacking() const;
|
| 354 |
+
|
| 355 |
+
void setDisplayVmapFallbackWarnings(bool enabled);
|
| 356 |
+
bool areVmapFallbackWarningsEnabled() const;
|
| 357 |
+
|
| 358 |
+
void setDefaultMobileCPUAllocator();
|
| 359 |
+
void unsetDefaultMobileCPUAllocator();
|
| 360 |
+
bool allowFP16ReductionCPU() const;
|
| 361 |
+
void setAllowFP16ReductionCPU(bool);
|
| 362 |
+
|
| 363 |
+
private:
|
| 364 |
+
void initCUDAIfNeeded(c10::DeviceType p) {
|
| 365 |
+
if (p == c10::DeviceType::CUDA) {
|
| 366 |
+
lazyInitCUDA();
|
| 367 |
+
}
|
| 368 |
+
}
|
| 369 |
+
void initHIPIfNeeded(c10::DeviceType p) {
|
| 370 |
+
if (p == c10::DeviceType::HIP) {
|
| 371 |
+
lazyInitHIP();
|
| 372 |
+
}
|
| 373 |
+
}
|
| 374 |
+
void initXPUIfNeeded(c10::DeviceType p) {
|
| 375 |
+
if (p == c10::DeviceType::XPU) {
|
| 376 |
+
lazyInitXPU();
|
| 377 |
+
}
|
| 378 |
+
}
|
| 379 |
+
static bool checkCuBLASConfigDeterministic();
|
| 380 |
+
c10::once_flag thc_init;
|
| 381 |
+
c10::once_flag thh_init;
|
| 382 |
+
c10::once_flag thx_init;
|
| 383 |
+
c10::once_flag th_mtia_init;
|
| 384 |
+
c10::once_flag thp_init;
|
| 385 |
+
bool enabled_cudnn = true;
|
| 386 |
+
bool deterministic_cudnn = false;
|
| 387 |
+
bool deterministic_mkldnn = false;
|
| 388 |
+
bool _deterministic_algorithms = false;
|
| 389 |
+
bool _deterministic_algorithms_warn_only = false;
|
| 390 |
+
bool _deterministic_fill_uninitialized_memory = true;
|
| 391 |
+
bool enabled_flashSDP = true;
|
| 392 |
+
bool enabled_mem_efficientSDP = true;
|
| 393 |
+
bool enabled_mathSDP = true;
|
| 394 |
+
bool enabled_cudnnSDP = true;
|
| 395 |
+
bool enabled_overrideable = true;
|
| 396 |
+
bool allow_fp16_bf16_reduction_mathSDP = false;
|
| 397 |
+
#ifdef USE_ROCM
|
| 398 |
+
bool benchmark_cudnn = true;
|
| 399 |
+
#else
|
| 400 |
+
bool benchmark_cudnn = false;
|
| 401 |
+
#endif
|
| 402 |
+
Float32MatmulPrecision float32_matmul_precision =
|
| 403 |
+
c10::utils::check_env("TORCH_ALLOW_TF32_CUBLAS_OVERRIDE") == true
|
| 404 |
+
? at::Float32MatmulPrecision::HIGH
|
| 405 |
+
: at::Float32MatmulPrecision::HIGHEST;
|
| 406 |
+
int benchmark_limit_cudnn = 10;
|
| 407 |
+
bool allow_tf32_cudnn = true;
|
| 408 |
+
bool allow_fp16_reduction_cublas = true;
|
| 409 |
+
bool allow_bf16_reduction_cublas = true;
|
| 410 |
+
bool enabled_mkldnn = true;
|
| 411 |
+
bool enabled_nnpack = true;
|
| 412 |
+
at::LinalgBackend linalg_preferred_backend =
|
| 413 |
+
c10::utils::check_env("TORCH_LINALG_PREFER_CUSOLVER") == true
|
| 414 |
+
? at::LinalgBackend::Cusolver
|
| 415 |
+
: at::LinalgBackend::Default;
|
| 416 |
+
at::BlasBackend blas_preferred_backend =
|
| 417 |
+
#ifdef USE_ROCM
|
| 418 |
+
(c10::utils::check_env("TORCH_BLAS_PREFER_HIPBLASLT") != false)
|
| 419 |
+
#else
|
| 420 |
+
(c10::utils::check_env("TORCH_BLAS_PREFER_CUBLASLT") == true)
|
| 421 |
+
#endif
|
| 422 |
+
? at::BlasBackend::Cublaslt
|
| 423 |
+
: at::BlasBackend::Cublas;
|
| 424 |
+
#ifdef C10_MOBILE
|
| 425 |
+
bool release_original_weights = true;
|
| 426 |
+
#else
|
| 427 |
+
bool release_original_weights = false;
|
| 428 |
+
#endif
|
| 429 |
+
bool display_vmap_fallback_warnings_ = false;
|
| 430 |
+
std::optional<at::QEngine> quantized_engine = std::nullopt;
|
| 431 |
+
bool enable_sparse_tensor_invariant_checks = false;
|
| 432 |
+
bool allow_fp16_reduction_cpu = false;
|
| 433 |
+
|
| 434 |
+
Allocator* prev_allocator_ptr_{nullptr};
|
| 435 |
+
};
|
| 436 |
+
|
| 437 |
+
TORCH_API Context& globalContext();
|
| 438 |
+
|
| 439 |
+
inline void init() {
|
| 440 |
+
globalContext();
|
| 441 |
+
}
|
| 442 |
+
|
| 443 |
+
TORCH_API Allocator* getCPUAllocator();
|
| 444 |
+
|
| 445 |
+
inline DeprecatedTypeProperties& getDeprecatedTypeProperties(
|
| 446 |
+
Backend p,
|
| 447 |
+
ScalarType s) {
|
| 448 |
+
return globalDeprecatedTypePropertiesRegistry().getDeprecatedTypeProperties(
|
| 449 |
+
p, s);
|
| 450 |
+
}
|
| 451 |
+
|
| 452 |
+
inline DeprecatedTypeProperties& CPU(ScalarType s) {
|
| 453 |
+
return globalDeprecatedTypePropertiesRegistry().getDeprecatedTypeProperties(
|
| 454 |
+
Backend::CPU, s);
|
| 455 |
+
}
|
| 456 |
+
|
| 457 |
+
inline DeprecatedTypeProperties& CUDA(ScalarType s) {
|
| 458 |
+
return globalDeprecatedTypePropertiesRegistry().getDeprecatedTypeProperties(
|
| 459 |
+
Backend::CUDA, s);
|
| 460 |
+
}
|
| 461 |
+
|
| 462 |
+
inline DeprecatedTypeProperties& HIP(ScalarType s) {
|
| 463 |
+
return globalDeprecatedTypePropertiesRegistry().getDeprecatedTypeProperties(
|
| 464 |
+
Backend::HIP, s);
|
| 465 |
+
}
|
| 466 |
+
|
| 467 |
+
inline DeprecatedTypeProperties& MPS(ScalarType s) {
|
| 468 |
+
return globalDeprecatedTypePropertiesRegistry().getDeprecatedTypeProperties(
|
| 469 |
+
Backend::MPS, s);
|
| 470 |
+
}
|
| 471 |
+
|
| 472 |
+
inline bool hasCUDA() {
|
| 473 |
+
return globalContext().hasCUDA();
|
| 474 |
+
}
|
| 475 |
+
|
| 476 |
+
inline bool hasMTIA() {
|
| 477 |
+
return globalContext().hasMTIA();
|
| 478 |
+
}
|
| 479 |
+
|
| 480 |
+
inline bool hasHIP() {
|
| 481 |
+
return globalContext().hasHIP();
|
| 482 |
+
}
|
| 483 |
+
|
| 484 |
+
inline bool hasIPU() {
|
| 485 |
+
return globalContext().hasIPU();
|
| 486 |
+
}
|
| 487 |
+
|
| 488 |
+
inline bool hasXLA() {
|
| 489 |
+
return globalContext().hasXLA();
|
| 490 |
+
}
|
| 491 |
+
|
| 492 |
+
inline bool hasMPS() {
|
| 493 |
+
return globalContext().hasMPS();
|
| 494 |
+
}
|
| 495 |
+
|
| 496 |
+
inline bool hasMAIA() {
|
| 497 |
+
return globalContext().hasMAIA();
|
| 498 |
+
}
|
| 499 |
+
|
| 500 |
+
inline bool hasXPU() {
|
| 501 |
+
return globalContext().hasXPU();
|
| 502 |
+
}
|
| 503 |
+
|
| 504 |
+
// Despite its name, this function returns the number of *CUDA* GPUs.
|
| 505 |
+
inline size_t getNumGPUs() {
|
| 506 |
+
// WARNING: DO NOT ADD LOGIC TO HANDLE OTHER DEVICE TYPES TO THIS
|
| 507 |
+
// FUNCTION. If you are interested in interrogating the number of
|
| 508 |
+
// devices for a specific device type, add that function to the
|
| 509 |
+
// relevant library (e.g., similar to at::cuda::device_count())
|
| 510 |
+
if (hasCUDA() && hasHIP()) {
|
| 511 |
+
throw std::runtime_error(
|
| 512 |
+
"Enabling both CUDA and HIP in ATen is not supported, as HIP masquerades "
|
| 513 |
+
"to be CUDA (e.g., when you say CUDA, on a HIP build of ATen, this actually "
|
| 514 |
+
"means HIP. Rebuild PyTorch with one or the other disabled.");
|
| 515 |
+
} else if (hasCUDA()) {
|
| 516 |
+
return detail::getCUDAHooks().getNumGPUs();
|
| 517 |
+
} else if (hasHIP()) {
|
| 518 |
+
return detail::getHIPHooks().getNumGPUs();
|
| 519 |
+
} else {
|
| 520 |
+
return 0;
|
| 521 |
+
}
|
| 522 |
+
}
|
| 523 |
+
|
| 524 |
+
inline bool hasOpenMP() {
|
| 525 |
+
return globalContext().hasOpenMP();
|
| 526 |
+
}
|
| 527 |
+
|
| 528 |
+
inline bool hasMKL() {
|
| 529 |
+
return globalContext().hasMKL();
|
| 530 |
+
}
|
| 531 |
+
|
| 532 |
+
inline bool hasLAPACK() {
|
| 533 |
+
return globalContext().hasLAPACK();
|
| 534 |
+
}
|
| 535 |
+
|
| 536 |
+
inline bool hasMAGMA() {
|
| 537 |
+
return globalContext().hasMAGMA();
|
| 538 |
+
}
|
| 539 |
+
|
| 540 |
+
inline bool hasMKLDNN() {
|
| 541 |
+
return globalContext().hasMKLDNN();
|
| 542 |
+
}
|
| 543 |
+
|
| 544 |
+
inline void manual_seed(uint64_t seed) {
|
| 545 |
+
auto gen = globalContext().defaultGenerator(c10::DeviceType::CPU);
|
| 546 |
+
{
|
| 547 |
+
// See Note [Acquire lock when using random generators]
|
| 548 |
+
std::lock_guard<std::mutex> lock(gen.mutex());
|
| 549 |
+
gen.set_current_seed(seed);
|
| 550 |
+
}
|
| 551 |
+
// NB: Sometimes we build with CUDA, but we don't have any GPUs
|
| 552 |
+
// available. In that case, we must not seed CUDA; it will fail!
|
| 553 |
+
const auto cuda_num_gpus = detail::getCUDAHooks().getNumGPUs();
|
| 554 |
+
if (hasCUDA() && cuda_num_gpus > 0) {
|
| 555 |
+
for (const auto i : c10::irange(cuda_num_gpus)) {
|
| 556 |
+
auto cuda_gen = globalContext().defaultGenerator(
|
| 557 |
+
Device(at::kCUDA, static_cast<c10::DeviceIndex>(i)));
|
| 558 |
+
{
|
| 559 |
+
// See Note [Acquire lock when using random generators]
|
| 560 |
+
std::lock_guard<std::mutex> lock(cuda_gen.mutex());
|
| 561 |
+
cuda_gen.set_current_seed(seed);
|
| 562 |
+
}
|
| 563 |
+
}
|
| 564 |
+
}
|
| 565 |
+
|
| 566 |
+
const auto xpu_num_gpus = detail::getXPUHooks().getNumGPUs();
|
| 567 |
+
if (hasXPU() && xpu_num_gpus) {
|
| 568 |
+
for (const auto i : c10::irange(xpu_num_gpus)) {
|
| 569 |
+
auto xpu_gen = globalContext().defaultGenerator(
|
| 570 |
+
Device(at::kXPU, static_cast<c10::DeviceIndex>(i)));
|
| 571 |
+
{
|
| 572 |
+
// See Note [Acquire lock when using random generators]
|
| 573 |
+
std::lock_guard<std::mutex> lock(xpu_gen.mutex());
|
| 574 |
+
xpu_gen.set_current_seed(seed);
|
| 575 |
+
}
|
| 576 |
+
}
|
| 577 |
+
}
|
| 578 |
+
|
| 579 |
+
if (hasMPS()) {
|
| 580 |
+
auto mps_gen = globalContext().defaultGenerator(c10::DeviceType::MPS);
|
| 581 |
+
// See Note [Acquire lock when using random generators]
|
| 582 |
+
std::lock_guard<std::mutex> lock(mps_gen.mutex());
|
| 583 |
+
mps_gen.set_current_seed(seed);
|
| 584 |
+
}
|
| 585 |
+
}
|
| 586 |
+
|
| 587 |
+
// When the global flag `allow_tf32` is set to true, cuBLAS handles are
|
| 588 |
+
// automatically configured to use math mode CUBLAS_TF32_TENSOR_OP_MATH.
|
| 589 |
+
// For some operators, such as addmv, TF32 offers no performance improvement
|
| 590 |
+
// but causes precision loss. To help this case, this class implements
|
| 591 |
+
// a RAII guard that can be used to quickly disable TF32 within its scope.
|
| 592 |
+
//
|
| 593 |
+
// Usage:
|
| 594 |
+
// NoTF32Guard disable_tf32;
|
| 595 |
+
struct TORCH_API NoTF32Guard {
|
| 596 |
+
NoTF32Guard();
|
| 597 |
+
~NoTF32Guard();
|
| 598 |
+
static bool should_disable_tf32();
|
| 599 |
+
|
| 600 |
+
private:
|
| 601 |
+
bool changed = false;
|
| 602 |
+
};
|
| 603 |
+
|
| 604 |
+
struct TORCH_API ROCmBackwardPassGuard {
|
| 605 |
+
ROCmBackwardPassGuard();
|
| 606 |
+
~ROCmBackwardPassGuard();
|
| 607 |
+
static bool is_backward_pass();
|
| 608 |
+
};
|
| 609 |
+
|
| 610 |
+
} // namespace at
|
mplug_owl2/lib/python3.10/site-packages/torch/include/ATen/DLConvertor.h
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <ATen/ATen.h>
|
| 4 |
+
#include <ATen/Tensor.h>
|
| 5 |
+
#include <ATen/dlpack.h>
|
| 6 |
+
|
| 7 |
+
// this convertor will:
|
| 8 |
+
// 1) take a Tensor object and wrap it in the DLPack tensor
|
| 9 |
+
// 2) take a dlpack tensor and convert it to the ATen Tensor
|
| 10 |
+
|
| 11 |
+
namespace at {
|
| 12 |
+
|
| 13 |
+
TORCH_API ScalarType toScalarType(const DLDataType& dtype);
|
| 14 |
+
TORCH_API DLManagedTensor* toDLPack(const Tensor& src);
|
| 15 |
+
TORCH_API Tensor fromDLPack(DLManagedTensor* src);
|
| 16 |
+
C10_DEPRECATED_MESSAGE("Please migrate to a non-const variant")
|
| 17 |
+
inline Tensor fromDLPack(const DLManagedTensor* src) {
|
| 18 |
+
return fromDLPack(const_cast<DLManagedTensor*>(src));
|
| 19 |
+
}
|
| 20 |
+
TORCH_API Tensor
|
| 21 |
+
fromDLPack(DLManagedTensor* src, std::function<void(void*)> deleter);
|
| 22 |
+
TORCH_API DLDataType getDLDataType(const Tensor& t);
|
| 23 |
+
TORCH_API DLDevice getDLContext(const Tensor& tensor, const int64_t& device_id);
|
| 24 |
+
|
| 25 |
+
} // namespace at
|
mplug_owl2/lib/python3.10/site-packages/torch/include/ATen/DeviceGuard.h
ADDED
|
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <ATen/core/IListRef.h>
|
| 4 |
+
#include <ATen/core/Tensor.h>
|
| 5 |
+
#include <c10/core/DeviceGuard.h>
|
| 6 |
+
#include <c10/core/ScalarType.h> // TensorList whyyyyy
|
| 7 |
+
|
| 8 |
+
namespace at {
|
| 9 |
+
|
| 10 |
+
// Are you here because you're wondering why DeviceGuard(tensor) no
|
| 11 |
+
// longer works? For code organization reasons, we have temporarily(?)
|
| 12 |
+
// removed this constructor from DeviceGuard. The new way to
|
| 13 |
+
// spell it is:
|
| 14 |
+
//
|
| 15 |
+
// OptionalDeviceGuard guard(device_of(tensor));
|
| 16 |
+
|
| 17 |
+
/// Return the Device of a Tensor, if the Tensor is defined.
|
| 18 |
+
inline std::optional<Device> device_of(const Tensor& t) {
|
| 19 |
+
if (t.defined()) {
|
| 20 |
+
return std::make_optional(t.device());
|
| 21 |
+
} else {
|
| 22 |
+
return std::nullopt;
|
| 23 |
+
}
|
| 24 |
+
}
|
| 25 |
+
|
| 26 |
+
inline std::optional<Device> device_of(const std::optional<Tensor>& t) {
|
| 27 |
+
return t.has_value() ? device_of(t.value()) : std::nullopt;
|
| 28 |
+
}
|
| 29 |
+
|
| 30 |
+
/// Return the Device of a TensorList, if the list is non-empty and
|
| 31 |
+
/// the first Tensor is defined. (This function implicitly assumes
|
| 32 |
+
/// that all tensors in the list have the same device.)
|
| 33 |
+
inline std::optional<Device> device_of(ITensorListRef t) {
|
| 34 |
+
if (!t.empty()) {
|
| 35 |
+
return device_of(t.front());
|
| 36 |
+
} else {
|
| 37 |
+
return std::nullopt;
|
| 38 |
+
}
|
| 39 |
+
}
|
| 40 |
+
|
| 41 |
+
} // namespace at
|
mplug_owl2/lib/python3.10/site-packages/torch/include/ATen/Dispatch.h
ADDED
|
@@ -0,0 +1,808 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <ATen/core/DeprecatedTypeProperties.h>
|
| 4 |
+
#include <c10/macros/Macros.h>
|
| 5 |
+
#include <c10/util/Exception.h>
|
| 6 |
+
#include <c10/util/Half.h>
|
| 7 |
+
#include <c10/util/Metaprogramming.h>
|
| 8 |
+
#include <c10/util/complex.h>
|
| 9 |
+
#include <c10/util/string_view.h>
|
| 10 |
+
|
| 11 |
+
#ifdef __CUDACC__
|
| 12 |
+
#include <cuda.h> // For CUDA_VERSION
|
| 13 |
+
#endif
|
| 14 |
+
|
| 15 |
+
#ifdef TEMPLATE_SELECTIVE_BUILD
|
| 16 |
+
#include <ATen/selected_mobile_ops.h>
|
| 17 |
+
#else
|
| 18 |
+
namespace at {
|
| 19 |
+
/**
|
| 20 |
+
* The method should_include_kernel_dtype() returns true/false
|
| 21 |
+
* based on whether the switching code for a specific dtype should be
|
| 22 |
+
* included based on build time constants generated from tracing model
|
| 23 |
+
* execution. This method will be implemented via code-generation and
|
| 24 |
+
* included in this file when code-gen is ready.
|
| 25 |
+
*/
|
| 26 |
+
inline constexpr bool should_include_kernel_dtype(
|
| 27 |
+
const char* /*kernel_tag_str*/,
|
| 28 |
+
at::ScalarType /*scalar_type*/
|
| 29 |
+
) {
|
| 30 |
+
return true;
|
| 31 |
+
}
|
| 32 |
+
} // namespace at
|
| 33 |
+
#endif
|
| 34 |
+
|
| 35 |
+
/**
|
| 36 |
+
* In the Facebook internal build (using BUCK), this macro is enabled by
|
| 37 |
+
* passing in -c pt.enable_record_kernel_dtype=1 when building the tracer
|
| 38 |
+
* binary.
|
| 39 |
+
*/
|
| 40 |
+
#if defined ENABLE_RECORD_KERNEL_FUNCTION_DTYPE
|
| 41 |
+
namespace at {
|
| 42 |
+
namespace detail {
|
| 43 |
+
TORCH_API void record_kernel_function_dtype(std::string name);
|
| 44 |
+
}
|
| 45 |
+
} // namespace at
|
| 46 |
+
|
| 47 |
+
#define RECORD_KERNEL_FUNCTION_DTYPE(NAME, enum_type) \
|
| 48 |
+
at::detail::record_kernel_function_dtype( \
|
| 49 |
+
std::string(NAME) + "$" + toString(enum_type));
|
| 50 |
+
#else
|
| 51 |
+
#define RECORD_KERNEL_FUNCTION_DTYPE(NAME, enum_type)
|
| 52 |
+
#endif
|
| 53 |
+
|
| 54 |
+
#define AT_PRIVATE_CHECK_SELECTIVE_BUILD(enum_type) \
|
| 55 |
+
do { \
|
| 56 |
+
if constexpr (!at::should_include_kernel_dtype( \
|
| 57 |
+
at_dispatch_name, enum_type)) { \
|
| 58 |
+
AT_ERROR( \
|
| 59 |
+
"dtype '", \
|
| 60 |
+
toString(enum_type), \
|
| 61 |
+
"' not selected for kernel tag ", \
|
| 62 |
+
at_dispatch_name); \
|
| 63 |
+
} \
|
| 64 |
+
} while (0)
|
| 65 |
+
|
| 66 |
+
#define AT_PRIVATE_CASE_TYPE_USING_HINT(enum_type, HINT, ...) \
|
| 67 |
+
case enum_type: { \
|
| 68 |
+
AT_PRIVATE_CHECK_SELECTIVE_BUILD(enum_type); \
|
| 69 |
+
using HINT C10_UNUSED = c10::impl::ScalarTypeToCPPTypeT<enum_type>; \
|
| 70 |
+
return __VA_ARGS__(); \
|
| 71 |
+
}
|
| 72 |
+
|
| 73 |
+
#define AT_DISPATCH_CASE(enum_type, ...) \
|
| 74 |
+
AT_PRIVATE_CASE_TYPE_USING_HINT(enum_type, scalar_t, __VA_ARGS__)
|
| 75 |
+
|
| 76 |
+
#define AT_DISPATCH_CASE_QINT(enum_type, scalar_type, ...) \
|
| 77 |
+
case enum_type: { \
|
| 78 |
+
AT_PRIVATE_CHECK_SELECTIVE_BUILD(enum_type); \
|
| 79 |
+
using scalar_t = scalar_type; \
|
| 80 |
+
using underlying_t C10_UNUSED = typename scalar_t::underlying; \
|
| 81 |
+
const auto& SCALAR_TYPE C10_UNUSED = enum_type; \
|
| 82 |
+
const auto& UNDERLYING_TYPE C10_UNUSED = toUnderlying(enum_type); \
|
| 83 |
+
return __VA_ARGS__(); \
|
| 84 |
+
}
|
| 85 |
+
|
| 86 |
+
#define AT_QINT_SUB_BYTE_PRIVATE_CASE_TYPE( \
|
| 87 |
+
enum_type, scalar_type, bitwidth, qmin, qmax, ...) \
|
| 88 |
+
case enum_type: { \
|
| 89 |
+
AT_PRIVATE_CHECK_SELECTIVE_BUILD(enum_type); \
|
| 90 |
+
using scalar_t = scalar_type; \
|
| 91 |
+
using underlying_t C10_UNUSED = typename scalar_t::underlying; \
|
| 92 |
+
const auto& SCALAR_TYPE C10_UNUSED = enum_type; \
|
| 93 |
+
const auto& UNDERLYING_TYPE C10_UNUSED = toUnderlying(enum_type); \
|
| 94 |
+
C10_UNUSED int bit_width = bitwidth; \
|
| 95 |
+
C10_UNUSED int64_t quant_min = qmin; \
|
| 96 |
+
C10_UNUSED int64_t quant_max = qmax; \
|
| 97 |
+
return __VA_ARGS__(); \
|
| 98 |
+
}
|
| 99 |
+
|
| 100 |
+
namespace detail {
|
| 101 |
+
|
| 102 |
+
inline at::ScalarType scalar_type(at::ScalarType s) {
|
| 103 |
+
return s;
|
| 104 |
+
}
|
| 105 |
+
|
| 106 |
+
C10_DEPRECATED_MESSAGE(
|
| 107 |
+
"passing at::DeprecatedTypeProperties to an AT_DISPATCH macro is deprecated, "
|
| 108 |
+
"pass an at::ScalarType instead")
|
| 109 |
+
inline at::ScalarType scalar_type(const at::DeprecatedTypeProperties& t) {
|
| 110 |
+
return t.scalarType();
|
| 111 |
+
}
|
| 112 |
+
|
| 113 |
+
C10_DEPRECATED_MESSAGE(
|
| 114 |
+
"AT_DISPATCH_ALL_TYPES_AND_HALF is deprecated, "
|
| 115 |
+
"use AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half, ...) instead")
|
| 116 |
+
inline void deprecated_AT_DISPATCH_ALL_TYPES_AND_HALF() {}
|
| 117 |
+
|
| 118 |
+
C10_DEPRECATED_MESSAGE(
|
| 119 |
+
"AT_DISPATCH_ALL_TYPES_AND_HALF_AND_COMPLEX is deprecated, "
|
| 120 |
+
"use AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND(at::ScalarType::Half, ...) "
|
| 121 |
+
"instead")
|
| 122 |
+
inline void deprecated_AT_DISPATCH_ALL_TYPES_AND_HALF_AND_COMPLEX() {}
|
| 123 |
+
|
| 124 |
+
} // namespace detail
|
| 125 |
+
|
| 126 |
+
// The AT_DISPATCH_* family of macros provides the ability to
|
| 127 |
+
// conveniently generate specializations of a kernel over all of the
|
| 128 |
+
// dtypes we care about in PyTorch. We call it "dispatch" because
|
| 129 |
+
// we are "dispatching" to the correct, dtype-specific kernel.
|
| 130 |
+
//
|
| 131 |
+
// A standard usage looks like:
|
| 132 |
+
//
|
| 133 |
+
// AT_DISPATCH_ALL_TYPES(self.scalar_type(), "op_name", [&] {
|
| 134 |
+
// // Your code here, with 'scalar_t' now defined to
|
| 135 |
+
// // be the dtype in question
|
| 136 |
+
// });
|
| 137 |
+
//
|
| 138 |
+
// There are many variations of this macro, so it's important to
|
| 139 |
+
// understand exactly /which/ dtypes you want to get instantiated, as
|
| 140 |
+
// well as what the "default" set is.
|
| 141 |
+
//
|
| 142 |
+
// The default set of dtypes that are instantiated (e.g., by
|
| 143 |
+
// AT_DISPATCH_ALL_TYPES) are floating point types (float, double),
|
| 144 |
+
// and integral types (int32_t, int64_t, int16_t, int8_t, uint8_t),
|
| 145 |
+
// but NOT booleans (bool), half-precision floats (Half) or
|
| 146 |
+
// complex number (c10::complex<float>, c10::complex<double>).
|
| 147 |
+
// This "cut" is somewhat historical (the default types are the
|
| 148 |
+
// ones that TH historically supported), but it also reflects the
|
| 149 |
+
// fact that the non-default types are "poorly" behaved (booleans
|
| 150 |
+
// are NOT integers mod 2, half precision operations ~essentially
|
| 151 |
+
// don't exist on CPU, complex numbers are an experimental application).
|
| 152 |
+
//
|
| 153 |
+
// Here are the questions you should generally ask to decide which
|
| 154 |
+
// dispatch you want:
|
| 155 |
+
//
|
| 156 |
+
// 1. Is this an integral or floating point specific operation?
|
| 157 |
+
// (If so, you'll want one of the FLOATING or INTEGRAL macros.)
|
| 158 |
+
//
|
| 159 |
+
// 2. Should half be supported? (If you're on CPU, the answer is almost
|
| 160 |
+
// definitely no. If you do want support, use one of the AND_HALF
|
| 161 |
+
// macros)
|
| 162 |
+
//
|
| 163 |
+
// Much rarer situations:
|
| 164 |
+
//
|
| 165 |
+
// 3. Should bool be supported? (You often have to write your kernel
|
| 166 |
+
// differently if arithmetic operations are involved.) If so,
|
| 167 |
+
// Use AT_DISPATCH_ALL_TYPES_AND along with ScalarType::Bool
|
| 168 |
+
//
|
| 169 |
+
// 4. Should complex be supported? The answer is almost always no,
|
| 170 |
+
// unless you are working on "generic" code that should work on
|
| 171 |
+
// all dtypes.
|
| 172 |
+
//
|
| 173 |
+
// Parameters:
|
| 174 |
+
// -----------
|
| 175 |
+
//
|
| 176 |
+
// 1. The NAME argument is a "tag" that is used to trace and then
|
| 177 |
+
// conditionally compile fragments of the case statements such
|
| 178 |
+
// that the kernel functions are specialized only for the dtypes
|
| 179 |
+
// that are needed. The NAME parameter *must* be a build time
|
| 180 |
+
// const char* (can't be std::string, etc...)
|
| 181 |
+
//
|
| 182 |
+
// Please ensure that the NAME is unique for every implementation
|
| 183 |
+
// or you run the risk of over-including code for the kernel
|
| 184 |
+
// functions. There is no risk of missing out on any code, so
|
| 185 |
+
// it's mostly a risk of a Type-2 error, and not a Type-1 error.
|
| 186 |
+
//
|
| 187 |
+
// Switch-like syntax:
|
| 188 |
+
// -------------------
|
| 189 |
+
// There is also a switch-case like syntax which is useful if a kernel
|
| 190 |
+
// needs to be specialized for particular scalar types
|
| 191 |
+
//
|
| 192 |
+
// AT_DISPATCH_SWITCH(self.scalar_type(), "op_name",
|
| 193 |
+
// AT_DISPATCH_CASE_INTEGRAL_TYPES([&] {
|
| 194 |
+
// op_integral<scalar_t>(iter);
|
| 195 |
+
// })
|
| 196 |
+
// AT_DISPATCH_CASE_FLOATING_TYPES([&] {
|
| 197 |
+
// op_floating<scalar_t>(iter);
|
| 198 |
+
// })
|
| 199 |
+
// AT_DISPATCH_CASE(kBool, [&] {
|
| 200 |
+
// op_bool(iter);
|
| 201 |
+
// })
|
| 202 |
+
// );
|
| 203 |
+
//
|
| 204 |
+
// For each AT_DISPATCH_FOO macro, there is a corresponding
|
| 205 |
+
// AT_DISPATCH_CASE_FOO macro which can be used inside of an
|
| 206 |
+
// AT_DISPATCH_SWITCH block.
|
| 207 |
+
|
| 208 |
+
// NB: the the_type variable is not used, but we have kept it for
|
| 209 |
+
// backwards compatibility. It's probably not used by anyone though;
|
| 210 |
+
// but we're just being safe (and it doesn't hurt.) Note we must
|
| 211 |
+
// use it to shut up warnings about unused store.
|
| 212 |
+
|
| 213 |
+
#define AT_DISPATCH_SWITCH(TYPE, NAME, ...) \
|
| 214 |
+
[&] { \
|
| 215 |
+
const auto& the_type = TYPE; \
|
| 216 |
+
constexpr const char* at_dispatch_name = NAME; \
|
| 217 |
+
/* don't use TYPE again in case it is an expensive or side-effect op */ \
|
| 218 |
+
at::ScalarType _st = ::detail::scalar_type(the_type); \
|
| 219 |
+
RECORD_KERNEL_FUNCTION_DTYPE(at_dispatch_name, _st); \
|
| 220 |
+
switch (_st) { \
|
| 221 |
+
__VA_ARGS__ \
|
| 222 |
+
default: \
|
| 223 |
+
AT_ERROR( \
|
| 224 |
+
'"', \
|
| 225 |
+
at_dispatch_name, \
|
| 226 |
+
"\" not implemented for '", \
|
| 227 |
+
toString(_st), \
|
| 228 |
+
"'"); \
|
| 229 |
+
} \
|
| 230 |
+
}()
|
| 231 |
+
|
| 232 |
+
#define AT_DISPATCH_CASE_FLOATING_TYPES(...) \
|
| 233 |
+
AT_DISPATCH_CASE(at::ScalarType::Double, __VA_ARGS__) \
|
| 234 |
+
AT_DISPATCH_CASE(at::ScalarType::Float, __VA_ARGS__)
|
| 235 |
+
|
| 236 |
+
#define AT_DISPATCH_FLOATING_TYPES(TYPE, NAME, ...) \
|
| 237 |
+
AT_DISPATCH_SWITCH(TYPE, NAME, AT_DISPATCH_CASE_FLOATING_TYPES(__VA_ARGS__))
|
| 238 |
+
|
| 239 |
+
#define AT_DISPATCH_CASE_FLOATING_TYPES_AND_HALF(...) \
|
| 240 |
+
AT_DISPATCH_CASE(at::ScalarType::Double, __VA_ARGS__) \
|
| 241 |
+
AT_DISPATCH_CASE(at::ScalarType::Float, __VA_ARGS__) \
|
| 242 |
+
AT_DISPATCH_CASE(at::ScalarType::Half, __VA_ARGS__)
|
| 243 |
+
|
| 244 |
+
#define AT_DISPATCH_FLOATING_TYPES_AND_HALF(TYPE, NAME, ...) \
|
| 245 |
+
AT_DISPATCH_SWITCH( \
|
| 246 |
+
TYPE, NAME, AT_DISPATCH_CASE_FLOATING_TYPES_AND_HALF(__VA_ARGS__))
|
| 247 |
+
|
| 248 |
+
#define AT_DISPATCH_CASE_REDUCED_FLOATING_TYPES(...) \
|
| 249 |
+
AT_DISPATCH_CASE(at::ScalarType::Half, __VA_ARGS__) \
|
| 250 |
+
AT_DISPATCH_CASE(at::ScalarType::BFloat16, __VA_ARGS__)
|
| 251 |
+
|
| 252 |
+
#define AT_DISPATCH_REDUCED_FLOATING_TYPES(TYPE, NAME, ...) \
|
| 253 |
+
AT_DISPATCH_SWITCH( \
|
| 254 |
+
TYPE, NAME, AT_DISPATCH_CASE_REDUCED_FLOATING_TYPES(__VA_ARGS__))
|
| 255 |
+
|
| 256 |
+
#define AT_DISPATCH_CASE_FLOATING_TYPES_AND(SCALARTYPE, ...) \
|
| 257 |
+
AT_DISPATCH_CASE_FLOATING_TYPES(__VA_ARGS__) \
|
| 258 |
+
AT_DISPATCH_CASE(SCALARTYPE, __VA_ARGS__)
|
| 259 |
+
|
| 260 |
+
#define AT_DISPATCH_FLOATING_TYPES_AND(SCALARTYPE, TYPE, NAME, ...) \
|
| 261 |
+
AT_DISPATCH_SWITCH( \
|
| 262 |
+
TYPE, \
|
| 263 |
+
NAME, \
|
| 264 |
+
AT_DISPATCH_CASE_FLOATING_TYPES_AND(SCALARTYPE, __VA_ARGS__))
|
| 265 |
+
|
| 266 |
+
#define AT_DISPATCH_CASE_FLOATING_TYPES_AND2(SCALARTYPE1, SCALARTYPE2, ...) \
|
| 267 |
+
AT_DISPATCH_CASE_FLOATING_TYPES(__VA_ARGS__) \
|
| 268 |
+
AT_DISPATCH_CASE(SCALARTYPE1, __VA_ARGS__) \
|
| 269 |
+
AT_DISPATCH_CASE(SCALARTYPE2, __VA_ARGS__)
|
| 270 |
+
|
| 271 |
+
#define AT_DISPATCH_FLOATING_TYPES_AND2( \
|
| 272 |
+
SCALARTYPE1, SCALARTYPE2, TYPE, NAME, ...) \
|
| 273 |
+
AT_DISPATCH_SWITCH( \
|
| 274 |
+
TYPE, \
|
| 275 |
+
NAME, \
|
| 276 |
+
AT_DISPATCH_CASE_FLOATING_TYPES_AND2( \
|
| 277 |
+
SCALARTYPE1, SCALARTYPE2, __VA_ARGS__))
|
| 278 |
+
|
| 279 |
+
#define AT_DISPATCH_CASE_FLOATING_TYPES_AND3( \
|
| 280 |
+
SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, ...) \
|
| 281 |
+
AT_DISPATCH_CASE_FLOATING_TYPES(__VA_ARGS__) \
|
| 282 |
+
AT_DISPATCH_CASE(SCALARTYPE1, __VA_ARGS__) \
|
| 283 |
+
AT_DISPATCH_CASE(SCALARTYPE2, __VA_ARGS__) \
|
| 284 |
+
AT_DISPATCH_CASE(SCALARTYPE3, __VA_ARGS__)
|
| 285 |
+
|
| 286 |
+
#define AT_DISPATCH_FLOATING_TYPES_AND3( \
|
| 287 |
+
SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, TYPE, NAME, ...) \
|
| 288 |
+
AT_DISPATCH_SWITCH( \
|
| 289 |
+
TYPE, \
|
| 290 |
+
NAME, \
|
| 291 |
+
AT_DISPATCH_CASE_FLOATING_TYPES_AND3( \
|
| 292 |
+
SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, __VA_ARGS__))
|
| 293 |
+
|
| 294 |
+
#define AT_DISPATCH_CASE_FLOATING_TYPES_AND4( \
|
| 295 |
+
SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, SCALARTYPE4, ...) \
|
| 296 |
+
AT_DISPATCH_CASE_FLOATING_TYPES(__VA_ARGS__) \
|
| 297 |
+
AT_DISPATCH_CASE(SCALARTYPE1, __VA_ARGS__) \
|
| 298 |
+
AT_DISPATCH_CASE(SCALARTYPE2, __VA_ARGS__) \
|
| 299 |
+
AT_DISPATCH_CASE(SCALARTYPE3, __VA_ARGS__) \
|
| 300 |
+
AT_DISPATCH_CASE(SCALARTYPE4, __VA_ARGS__)
|
| 301 |
+
|
| 302 |
+
#define AT_DISPATCH_FLOATING_TYPES_AND4( \
|
| 303 |
+
SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, SCALARTYPE4, TYPE, NAME, ...) \
|
| 304 |
+
AT_DISPATCH_SWITCH( \
|
| 305 |
+
TYPE, \
|
| 306 |
+
NAME, \
|
| 307 |
+
AT_DISPATCH_CASE_FLOATING_TYPES_AND4( \
|
| 308 |
+
SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, SCALARTYPE4, __VA_ARGS__))
|
| 309 |
+
|
| 310 |
+
#define AT_DISPATCH_CASE_COMPLEX_TYPES(...) \
|
| 311 |
+
AT_DISPATCH_CASE(at::ScalarType::ComplexDouble, __VA_ARGS__) \
|
| 312 |
+
AT_DISPATCH_CASE(at::ScalarType::ComplexFloat, __VA_ARGS__)
|
| 313 |
+
|
| 314 |
+
#define AT_DISPATCH_COMPLEX_TYPES(TYPE, NAME, ...) \
|
| 315 |
+
AT_DISPATCH_SWITCH(TYPE, NAME, AT_DISPATCH_CASE_COMPLEX_TYPES(__VA_ARGS__))
|
| 316 |
+
|
| 317 |
+
#define AT_DISPATCH_CASE_COMPLEX_TYPES_AND(SCALARTYPE, ...) \
|
| 318 |
+
AT_DISPATCH_CASE_COMPLEX_TYPES(__VA_ARGS__) \
|
| 319 |
+
AT_DISPATCH_CASE(SCALARTYPE, __VA_ARGS__)
|
| 320 |
+
|
| 321 |
+
#define AT_DISPATCH_COMPLEX_TYPES_AND(SCALARTYPE, TYPE, NAME, ...) \
|
| 322 |
+
AT_DISPATCH_SWITCH( \
|
| 323 |
+
TYPE, NAME, AT_DISPATCH_CASE_COMPLEX_TYPES_AND(SCALARTYPE, __VA_ARGS__))
|
| 324 |
+
|
| 325 |
+
#define AT_DISPATCH_CASE_FLOATING_AND_COMPLEX_TYPES(...) \
|
| 326 |
+
AT_DISPATCH_CASE_FLOATING_TYPES(__VA_ARGS__) \
|
| 327 |
+
AT_DISPATCH_CASE_COMPLEX_TYPES(__VA_ARGS__)
|
| 328 |
+
|
| 329 |
+
#define AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(TYPE, NAME, ...) \
|
| 330 |
+
AT_DISPATCH_SWITCH( \
|
| 331 |
+
TYPE, NAME, AT_DISPATCH_CASE_FLOATING_AND_COMPLEX_TYPES(__VA_ARGS__))
|
| 332 |
+
|
| 333 |
+
#define AT_DISPATCH_CASE_FLOATING_AND_COMPLEX_TYPES_AND1(SCALARTYPE, ...) \
|
| 334 |
+
AT_DISPATCH_CASE_FLOATING_AND_COMPLEX_TYPES(__VA_ARGS__) \
|
| 335 |
+
AT_DISPATCH_CASE(SCALARTYPE, __VA_ARGS__)
|
| 336 |
+
|
| 337 |
+
#define AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1( \
|
| 338 |
+
SCALARTYPE, TYPE, NAME, ...) \
|
| 339 |
+
AT_DISPATCH_SWITCH( \
|
| 340 |
+
TYPE, \
|
| 341 |
+
NAME, \
|
| 342 |
+
AT_DISPATCH_CASE_FLOATING_AND_COMPLEX_TYPES_AND1( \
|
| 343 |
+
SCALARTYPE, __VA_ARGS__))
|
| 344 |
+
|
| 345 |
+
#define AT_DISPATCH_CASE_FLOATING_AND_COMPLEX_TYPES_AND2( \
|
| 346 |
+
SCALARTYPE1, SCALARTYPE2, ...) \
|
| 347 |
+
AT_DISPATCH_CASE_FLOATING_AND_COMPLEX_TYPES(__VA_ARGS__) \
|
| 348 |
+
AT_DISPATCH_CASE(SCALARTYPE1, __VA_ARGS__) \
|
| 349 |
+
AT_DISPATCH_CASE(SCALARTYPE2, __VA_ARGS__)
|
| 350 |
+
|
| 351 |
+
#define AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2( \
|
| 352 |
+
SCALARTYPE1, SCALARTYPE2, TYPE, NAME, ...) \
|
| 353 |
+
AT_DISPATCH_SWITCH( \
|
| 354 |
+
TYPE, \
|
| 355 |
+
NAME, \
|
| 356 |
+
AT_DISPATCH_CASE_FLOATING_AND_COMPLEX_TYPES_AND2( \
|
| 357 |
+
SCALARTYPE1, SCALARTYPE2, __VA_ARGS__))
|
| 358 |
+
|
| 359 |
+
#define AT_DISPATCH_CASE_FLOATING_AND_COMPLEX_TYPES_AND3( \
|
| 360 |
+
SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, ...) \
|
| 361 |
+
AT_DISPATCH_CASE_FLOATING_AND_COMPLEX_TYPES(__VA_ARGS__) \
|
| 362 |
+
AT_DISPATCH_CASE(SCALARTYPE1, __VA_ARGS__) \
|
| 363 |
+
AT_DISPATCH_CASE(SCALARTYPE2, __VA_ARGS__) \
|
| 364 |
+
AT_DISPATCH_CASE(SCALARTYPE3, __VA_ARGS__)
|
| 365 |
+
|
| 366 |
+
#define AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND3( \
|
| 367 |
+
SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, TYPE, NAME, ...) \
|
| 368 |
+
AT_DISPATCH_SWITCH( \
|
| 369 |
+
TYPE, \
|
| 370 |
+
NAME, \
|
| 371 |
+
AT_DISPATCH_CASE_FLOATING_AND_COMPLEX_TYPES_AND3( \
|
| 372 |
+
SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, __VA_ARGS__))
|
| 373 |
+
|
| 374 |
+
#define AT_DISPATCH_CASE_FLOATING_AND_COMPLEX_TYPES_AND4( \
|
| 375 |
+
SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, SCALARTYPE4, ...) \
|
| 376 |
+
AT_DISPATCH_CASE_FLOATING_AND_COMPLEX_TYPES(__VA_ARGS__) \
|
| 377 |
+
AT_DISPATCH_CASE(SCALARTYPE1, __VA_ARGS__) \
|
| 378 |
+
AT_DISPATCH_CASE(SCALARTYPE2, __VA_ARGS__) \
|
| 379 |
+
AT_DISPATCH_CASE(SCALARTYPE3, __VA_ARGS__) \
|
| 380 |
+
AT_DISPATCH_CASE(SCALARTYPE4, __VA_ARGS__)
|
| 381 |
+
|
| 382 |
+
#define AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND4( \
|
| 383 |
+
SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, SCALARTYPE4, TYPE, NAME, ...) \
|
| 384 |
+
AT_DISPATCH_SWITCH( \
|
| 385 |
+
TYPE, \
|
| 386 |
+
NAME, \
|
| 387 |
+
AT_DISPATCH_CASE_FLOATING_AND_COMPLEX_TYPES_AND4( \
|
| 388 |
+
SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, SCALARTYPE4, __VA_ARGS__))
|
| 389 |
+
|
| 390 |
+
#define AT_DISPATCH_CASE_FLOATING_AND_COMPLEX_TYPES_AND5( \
|
| 391 |
+
SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, SCALARTYPE4, SCALARTYPE5, ...) \
|
| 392 |
+
AT_DISPATCH_CASE_FLOATING_AND_COMPLEX_TYPES(__VA_ARGS__) \
|
| 393 |
+
AT_DISPATCH_CASE(SCALARTYPE1, __VA_ARGS__) \
|
| 394 |
+
AT_DISPATCH_CASE(SCALARTYPE2, __VA_ARGS__) \
|
| 395 |
+
AT_DISPATCH_CASE(SCALARTYPE3, __VA_ARGS__) \
|
| 396 |
+
AT_DISPATCH_CASE(SCALARTYPE4, __VA_ARGS__) \
|
| 397 |
+
AT_DISPATCH_CASE(SCALARTYPE5, __VA_ARGS__)
|
| 398 |
+
|
| 399 |
+
#define AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND5( \
|
| 400 |
+
SCALARTYPE1, \
|
| 401 |
+
SCALARTYPE2, \
|
| 402 |
+
SCALARTYPE3, \
|
| 403 |
+
SCALARTYPE4, \
|
| 404 |
+
SCALARTYPE5, \
|
| 405 |
+
TYPE, \
|
| 406 |
+
NAME, \
|
| 407 |
+
...) \
|
| 408 |
+
AT_DISPATCH_SWITCH( \
|
| 409 |
+
TYPE, \
|
| 410 |
+
NAME, \
|
| 411 |
+
AT_DISPATCH_CASE_FLOATING_AND_COMPLEX_TYPES_AND5( \
|
| 412 |
+
SCALARTYPE1, \
|
| 413 |
+
SCALARTYPE2, \
|
| 414 |
+
SCALARTYPE3, \
|
| 415 |
+
SCALARTYPE4, \
|
| 416 |
+
SCALARTYPE5, \
|
| 417 |
+
__VA_ARGS__))
|
| 418 |
+
|
| 419 |
+
#define AT_DISPATCH_CASE_FLOATING_AND_COMPLEX_TYPES_AND6( \
|
| 420 |
+
SCALARTYPE1, \
|
| 421 |
+
SCALARTYPE2, \
|
| 422 |
+
SCALARTYPE3, \
|
| 423 |
+
SCALARTYPE4, \
|
| 424 |
+
SCALARTYPE5, \
|
| 425 |
+
SCALARTYPE6, \
|
| 426 |
+
...) \
|
| 427 |
+
AT_DISPATCH_CASE_FLOATING_AND_COMPLEX_TYPES(__VA_ARGS__) \
|
| 428 |
+
AT_DISPATCH_CASE(SCALARTYPE1, __VA_ARGS__) \
|
| 429 |
+
AT_DISPATCH_CASE(SCALARTYPE2, __VA_ARGS__) \
|
| 430 |
+
AT_DISPATCH_CASE(SCALARTYPE3, __VA_ARGS__) \
|
| 431 |
+
AT_DISPATCH_CASE(SCALARTYPE4, __VA_ARGS__) \
|
| 432 |
+
AT_DISPATCH_CASE(SCALARTYPE5, __VA_ARGS__) \
|
| 433 |
+
AT_DISPATCH_CASE(SCALARTYPE6, __VA_ARGS__)
|
| 434 |
+
|
| 435 |
+
#define AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND6( \
|
| 436 |
+
SCALARTYPE1, \
|
| 437 |
+
SCALARTYPE2, \
|
| 438 |
+
SCALARTYPE3, \
|
| 439 |
+
SCALARTYPE4, \
|
| 440 |
+
SCALARTYPE5, \
|
| 441 |
+
SCALARTYPE6, \
|
| 442 |
+
TYPE, \
|
| 443 |
+
NAME, \
|
| 444 |
+
...) \
|
| 445 |
+
AT_DISPATCH_SWITCH( \
|
| 446 |
+
TYPE, \
|
| 447 |
+
NAME, \
|
| 448 |
+
AT_DISPATCH_CASE_FLOATING_AND_COMPLEX_TYPES_AND6( \
|
| 449 |
+
SCALARTYPE1, \
|
| 450 |
+
SCALARTYPE2, \
|
| 451 |
+
SCALARTYPE3, \
|
| 452 |
+
SCALARTYPE4, \
|
| 453 |
+
SCALARTYPE5, \
|
| 454 |
+
SCALARTYPE6, \
|
| 455 |
+
__VA_ARGS__))
|
| 456 |
+
|
| 457 |
+
#define AT_DISPATCH_CASE_INTEGRAL_TYPES(...) \
|
| 458 |
+
AT_DISPATCH_CASE(at::ScalarType::Byte, __VA_ARGS__) \
|
| 459 |
+
AT_DISPATCH_CASE(at::ScalarType::Char, __VA_ARGS__) \
|
| 460 |
+
AT_DISPATCH_CASE(at::ScalarType::Int, __VA_ARGS__) \
|
| 461 |
+
AT_DISPATCH_CASE(at::ScalarType::Long, __VA_ARGS__) \
|
| 462 |
+
AT_DISPATCH_CASE(at::ScalarType::Short, __VA_ARGS__)
|
| 463 |
+
|
| 464 |
+
#define AT_DISPATCH_INTEGRAL_TYPES(TYPE, NAME, ...) \
|
| 465 |
+
AT_DISPATCH_SWITCH(TYPE, NAME, AT_DISPATCH_CASE_INTEGRAL_TYPES(__VA_ARGS__))
|
| 466 |
+
|
| 467 |
+
#define AT_DISPATCH_CASE_INTEGRAL_TYPES_AND(SCALARTYPE, ...) \
|
| 468 |
+
AT_DISPATCH_CASE_INTEGRAL_TYPES(__VA_ARGS__) \
|
| 469 |
+
AT_DISPATCH_CASE(SCALARTYPE, __VA_ARGS__)
|
| 470 |
+
|
| 471 |
+
#define AT_DISPATCH_INTEGRAL_TYPES_AND(SCALARTYPE, TYPE, NAME, ...) \
|
| 472 |
+
AT_DISPATCH_SWITCH( \
|
| 473 |
+
TYPE, \
|
| 474 |
+
NAME, \
|
| 475 |
+
AT_DISPATCH_CASE_INTEGRAL_TYPES_AND(SCALARTYPE, __VA_ARGS__))
|
| 476 |
+
|
| 477 |
+
#define AT_DISPATCH_CASE_ALL_TYPES(...) \
|
| 478 |
+
AT_DISPATCH_CASE_INTEGRAL_TYPES(__VA_ARGS__) \
|
| 479 |
+
AT_DISPATCH_CASE_FLOATING_TYPES(__VA_ARGS__)
|
| 480 |
+
|
| 481 |
+
#define AT_DISPATCH_ALL_TYPES(TYPE, NAME, ...) \
|
| 482 |
+
AT_DISPATCH_SWITCH(TYPE, NAME, AT_DISPATCH_CASE_ALL_TYPES(__VA_ARGS__))
|
| 483 |
+
|
| 484 |
+
#define AT_DISPATCH_CASE_QINT_TYPES(...) \
|
| 485 |
+
AT_DISPATCH_CASE_QINT(at::kQInt8, at::qint8, __VA_ARGS__) \
|
| 486 |
+
AT_DISPATCH_CASE_QINT(at::kQUInt8, at::quint8, __VA_ARGS__) \
|
| 487 |
+
AT_DISPATCH_CASE_QINT(at::kQInt32, at::qint32, __VA_ARGS__)
|
| 488 |
+
|
| 489 |
+
#define AT_DISPATCH_QINT_TYPES(TYPE, NAME, ...) \
|
| 490 |
+
AT_DISPATCH_SWITCH(TYPE, NAME, AT_DISPATCH_CASE_QINT_TYPES(__VA_ARGS__))
|
| 491 |
+
|
| 492 |
+
#define AT_DISPATCH_CASE_QINT_TYPES_AND(SCALARTYPE, ...) \
|
| 493 |
+
AT_DISPATCH_CASE_QINT_TYPES(__VA_ARGS__) \
|
| 494 |
+
AT_DISPATCH_CASE(SCALARTYPE, __VA_ARGS__)
|
| 495 |
+
|
| 496 |
+
#define AT_DISPATCH_QINT_TYPES_AND(SCALARTYPE, TYPE, NAME, ...) \
|
| 497 |
+
AT_DISPATCH_SWITCH( \
|
| 498 |
+
TYPE, NAME, AT_DISPATCH_CASE_QINT_TYPES_AND(SCALARTYPE, __VA_ARGS__))
|
| 499 |
+
|
| 500 |
+
#define AT_DISPATCH_CASE_QINT_BYTE_TYPES(...) \
|
| 501 |
+
AT_DISPATCH_CASE_QINT(at::kQInt8, at::qint8, __VA_ARGS__) \
|
| 502 |
+
AT_DISPATCH_CASE_QINT(at::kQUInt8, at::quint8, __VA_ARGS__)
|
| 503 |
+
|
| 504 |
+
#define AT_DISPATCH_QINT_BYTE_TYPES(TYPE, NAME, ...) \
|
| 505 |
+
AT_DISPATCH_SWITCH(TYPE, NAME, AT_DISPATCH_CASE_QINT_BYTE_TYPES(__VA_ARGS__))
|
| 506 |
+
|
| 507 |
+
#define AT_DISPATCH_CASE_QINT_AND_SUB_BYTE_TYPES(...) \
|
| 508 |
+
AT_QINT_SUB_BYTE_PRIVATE_CASE_TYPE( \
|
| 509 |
+
at::kQInt8, at::qint8, CHAR_BIT, SCHAR_MIN, SCHAR_MAX, __VA_ARGS__) \
|
| 510 |
+
AT_QINT_SUB_BYTE_PRIVATE_CASE_TYPE( \
|
| 511 |
+
at::kQUInt8, at::quint8, CHAR_BIT, 0, UCHAR_MAX, __VA_ARGS__) \
|
| 512 |
+
AT_QINT_SUB_BYTE_PRIVATE_CASE_TYPE( \
|
| 513 |
+
at::kQInt32, \
|
| 514 |
+
at::qint32, \
|
| 515 |
+
CHAR_BIT * sizeof(int), \
|
| 516 |
+
INT_MIN, \
|
| 517 |
+
INT_MAX, \
|
| 518 |
+
__VA_ARGS__) \
|
| 519 |
+
AT_QINT_SUB_BYTE_PRIVATE_CASE_TYPE( \
|
| 520 |
+
at::kQUInt4x2, at::quint4x2, 4, 0, 15, __VA_ARGS__) \
|
| 521 |
+
AT_QINT_SUB_BYTE_PRIVATE_CASE_TYPE( \
|
| 522 |
+
at::kQUInt2x4, at::quint2x4, 2, 0, 3, __VA_ARGS__)
|
| 523 |
+
|
| 524 |
+
#define AT_DISPATCH_QINT_AND_SUB_BYTE_TYPES(TYPE, NAME, ...) \
|
| 525 |
+
AT_DISPATCH_SWITCH( \
|
| 526 |
+
TYPE, NAME, AT_DISPATCH_CASE_QINT_AND_SUB_BYTE_TYPES(__VA_ARGS__))
|
| 527 |
+
|
| 528 |
+
#define AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX(...) \
|
| 529 |
+
AT_DISPATCH_CASE_ALL_TYPES(__VA_ARGS__) \
|
| 530 |
+
AT_DISPATCH_CASE_COMPLEX_TYPES(__VA_ARGS__)
|
| 531 |
+
|
| 532 |
+
#define AT_DISPATCH_ALL_TYPES_AND_COMPLEX(TYPE, NAME, ...) \
|
| 533 |
+
AT_DISPATCH_SWITCH( \
|
| 534 |
+
TYPE, NAME, AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX(__VA_ARGS__))
|
| 535 |
+
|
| 536 |
+
#define AT_DISPATCH_CASE_ALL_TYPES_AND(SCALARTYPE, ...) \
|
| 537 |
+
AT_DISPATCH_CASE_ALL_TYPES(__VA_ARGS__) \
|
| 538 |
+
AT_DISPATCH_CASE(SCALARTYPE, __VA_ARGS__)
|
| 539 |
+
|
| 540 |
+
#define AT_DISPATCH_ALL_TYPES_AND(SCALARTYPE, TYPE, NAME, ...) \
|
| 541 |
+
AT_DISPATCH_SWITCH( \
|
| 542 |
+
TYPE, NAME, AT_DISPATCH_CASE_ALL_TYPES_AND(SCALARTYPE, __VA_ARGS__))
|
| 543 |
+
|
| 544 |
+
#define AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX_AND(SCALARTYPE, ...) \
|
| 545 |
+
AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX(__VA_ARGS__) \
|
| 546 |
+
AT_DISPATCH_CASE(SCALARTYPE, __VA_ARGS__)
|
| 547 |
+
|
| 548 |
+
#define AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND(SCALARTYPE, TYPE, NAME, ...) \
|
| 549 |
+
AT_DISPATCH_SWITCH( \
|
| 550 |
+
TYPE, \
|
| 551 |
+
NAME, \
|
| 552 |
+
AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX_AND(SCALARTYPE, __VA_ARGS__))
|
| 553 |
+
|
| 554 |
+
#define AT_DISPATCH_CASE_ALL_TYPES_AND2(SCALARTYPE1, SCALARTYPE2, ...) \
|
| 555 |
+
AT_DISPATCH_CASE_ALL_TYPES(__VA_ARGS__) \
|
| 556 |
+
AT_DISPATCH_CASE(SCALARTYPE1, __VA_ARGS__) \
|
| 557 |
+
AT_DISPATCH_CASE(SCALARTYPE2, __VA_ARGS__)
|
| 558 |
+
|
| 559 |
+
#define AT_DISPATCH_ALL_TYPES_AND2(SCALARTYPE1, SCALARTYPE2, TYPE, NAME, ...) \
|
| 560 |
+
AT_DISPATCH_SWITCH( \
|
| 561 |
+
TYPE, \
|
| 562 |
+
NAME, \
|
| 563 |
+
AT_DISPATCH_CASE_ALL_TYPES_AND2(SCALARTYPE1, SCALARTYPE2, __VA_ARGS__))
|
| 564 |
+
|
| 565 |
+
#define AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX_AND2( \
|
| 566 |
+
SCALARTYPE1, SCALARTYPE2, ...) \
|
| 567 |
+
AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX(__VA_ARGS__) \
|
| 568 |
+
AT_DISPATCH_CASE(SCALARTYPE1, __VA_ARGS__) \
|
| 569 |
+
AT_DISPATCH_CASE(SCALARTYPE2, __VA_ARGS__)
|
| 570 |
+
|
| 571 |
+
#define AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND2( \
|
| 572 |
+
SCALARTYPE1, SCALARTYPE2, TYPE, NAME, ...) \
|
| 573 |
+
AT_DISPATCH_SWITCH( \
|
| 574 |
+
TYPE, \
|
| 575 |
+
NAME, \
|
| 576 |
+
AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX_AND2( \
|
| 577 |
+
SCALARTYPE1, SCALARTYPE2, __VA_ARGS__))
|
| 578 |
+
|
| 579 |
+
#define AT_DISPATCH_CASE_ALL_TYPES_AND3( \
|
| 580 |
+
SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, ...) \
|
| 581 |
+
AT_DISPATCH_CASE_ALL_TYPES(__VA_ARGS__) \
|
| 582 |
+
AT_DISPATCH_CASE(SCALARTYPE1, __VA_ARGS__) \
|
| 583 |
+
AT_DISPATCH_CASE(SCALARTYPE2, __VA_ARGS__) \
|
| 584 |
+
AT_DISPATCH_CASE(SCALARTYPE3, __VA_ARGS__)
|
| 585 |
+
|
| 586 |
+
#define AT_DISPATCH_ALL_TYPES_AND3( \
|
| 587 |
+
SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, TYPE, NAME, ...) \
|
| 588 |
+
AT_DISPATCH_SWITCH( \
|
| 589 |
+
TYPE, \
|
| 590 |
+
NAME, \
|
| 591 |
+
AT_DISPATCH_CASE_ALL_TYPES_AND3( \
|
| 592 |
+
SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, __VA_ARGS__))
|
| 593 |
+
|
| 594 |
+
#define AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX_AND3( \
|
| 595 |
+
SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, ...) \
|
| 596 |
+
AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX(__VA_ARGS__) \
|
| 597 |
+
AT_DISPATCH_CASE(SCALARTYPE1, __VA_ARGS__) \
|
| 598 |
+
AT_DISPATCH_CASE(SCALARTYPE2, __VA_ARGS__) \
|
| 599 |
+
AT_DISPATCH_CASE(SCALARTYPE3, __VA_ARGS__)
|
| 600 |
+
|
| 601 |
+
#define AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3( \
|
| 602 |
+
SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, TYPE, NAME, ...) \
|
| 603 |
+
AT_DISPATCH_SWITCH( \
|
| 604 |
+
TYPE, \
|
| 605 |
+
NAME, \
|
| 606 |
+
AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX_AND3( \
|
| 607 |
+
SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, __VA_ARGS__))
|
| 608 |
+
|
| 609 |
+
#define AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX_AND4( \
|
| 610 |
+
SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, SCALARTYPE4, ...) \
|
| 611 |
+
AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX(__VA_ARGS__) \
|
| 612 |
+
AT_DISPATCH_CASE(SCALARTYPE1, __VA_ARGS__) \
|
| 613 |
+
AT_DISPATCH_CASE(SCALARTYPE2, __VA_ARGS__) \
|
| 614 |
+
AT_DISPATCH_CASE(SCALARTYPE3, __VA_ARGS__) \
|
| 615 |
+
AT_DISPATCH_CASE(SCALARTYPE4, __VA_ARGS__)
|
| 616 |
+
|
| 617 |
+
#define AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND4( \
|
| 618 |
+
SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, SCALARTYPE4, TYPE, NAME, ...) \
|
| 619 |
+
AT_DISPATCH_SWITCH( \
|
| 620 |
+
TYPE, \
|
| 621 |
+
NAME, \
|
| 622 |
+
AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX_AND4( \
|
| 623 |
+
SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, SCALARTYPE4, __VA_ARGS__))
|
| 624 |
+
|
| 625 |
+
#define AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX_AND5( \
|
| 626 |
+
SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, SCALARTYPE4, SCALARTYPE5, ...) \
|
| 627 |
+
AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX(__VA_ARGS__) \
|
| 628 |
+
AT_DISPATCH_CASE(SCALARTYPE1, __VA_ARGS__) \
|
| 629 |
+
AT_DISPATCH_CASE(SCALARTYPE2, __VA_ARGS__) \
|
| 630 |
+
AT_DISPATCH_CASE(SCALARTYPE3, __VA_ARGS__) \
|
| 631 |
+
AT_DISPATCH_CASE(SCALARTYPE4, __VA_ARGS__) \
|
| 632 |
+
AT_DISPATCH_CASE(SCALARTYPE5, __VA_ARGS__)
|
| 633 |
+
|
| 634 |
+
#define AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND5( \
|
| 635 |
+
SCALARTYPE1, \
|
| 636 |
+
SCALARTYPE2, \
|
| 637 |
+
SCALARTYPE3, \
|
| 638 |
+
SCALARTYPE4, \
|
| 639 |
+
SCALARTYPE5, \
|
| 640 |
+
TYPE, \
|
| 641 |
+
NAME, \
|
| 642 |
+
...) \
|
| 643 |
+
AT_DISPATCH_SWITCH( \
|
| 644 |
+
TYPE, \
|
| 645 |
+
NAME, \
|
| 646 |
+
AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX_AND5( \
|
| 647 |
+
SCALARTYPE1, \
|
| 648 |
+
SCALARTYPE2, \
|
| 649 |
+
SCALARTYPE3, \
|
| 650 |
+
SCALARTYPE4, \
|
| 651 |
+
SCALARTYPE5, \
|
| 652 |
+
__VA_ARGS__))
|
| 653 |
+
|
| 654 |
+
#define AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX_AND6( \
|
| 655 |
+
SCALARTYPE1, \
|
| 656 |
+
SCALARTYPE2, \
|
| 657 |
+
SCALARTYPE3, \
|
| 658 |
+
SCALARTYPE4, \
|
| 659 |
+
SCALARTYPE5, \
|
| 660 |
+
SCALARTYPE6, \
|
| 661 |
+
...) \
|
| 662 |
+
AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX(__VA_ARGS__) \
|
| 663 |
+
AT_DISPATCH_CASE(SCALARTYPE1, __VA_ARGS__) \
|
| 664 |
+
AT_DISPATCH_CASE(SCALARTYPE2, __VA_ARGS__) \
|
| 665 |
+
AT_DISPATCH_CASE(SCALARTYPE3, __VA_ARGS__) \
|
| 666 |
+
AT_DISPATCH_CASE(SCALARTYPE4, __VA_ARGS__) \
|
| 667 |
+
AT_DISPATCH_CASE(SCALARTYPE5, __VA_ARGS__) \
|
| 668 |
+
AT_DISPATCH_CASE(SCALARTYPE6, __VA_ARGS__)
|
| 669 |
+
|
| 670 |
+
#define AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND6( \
|
| 671 |
+
SCALARTYPE1, \
|
| 672 |
+
SCALARTYPE2, \
|
| 673 |
+
SCALARTYPE3, \
|
| 674 |
+
SCALARTYPE4, \
|
| 675 |
+
SCALARTYPE5, \
|
| 676 |
+
SCALARTYPE6, \
|
| 677 |
+
TYPE, \
|
| 678 |
+
NAME, \
|
| 679 |
+
...) \
|
| 680 |
+
AT_DISPATCH_SWITCH( \
|
| 681 |
+
TYPE, \
|
| 682 |
+
NAME, \
|
| 683 |
+
AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX_AND6( \
|
| 684 |
+
SCALARTYPE1, \
|
| 685 |
+
SCALARTYPE2, \
|
| 686 |
+
SCALARTYPE3, \
|
| 687 |
+
SCALARTYPE4, \
|
| 688 |
+
SCALARTYPE5, \
|
| 689 |
+
SCALARTYPE6, \
|
| 690 |
+
__VA_ARGS__))
|
| 691 |
+
|
| 692 |
+
#define AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX_AND7( \
|
| 693 |
+
SCALARTYPE1, \
|
| 694 |
+
SCALARTYPE2, \
|
| 695 |
+
SCALARTYPE3, \
|
| 696 |
+
SCALARTYPE4, \
|
| 697 |
+
SCALARTYPE5, \
|
| 698 |
+
SCALARTYPE6, \
|
| 699 |
+
SCALARTYPE7, \
|
| 700 |
+
...) \
|
| 701 |
+
AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX(__VA_ARGS__) \
|
| 702 |
+
AT_DISPATCH_CASE(SCALARTYPE1, __VA_ARGS__) \
|
| 703 |
+
AT_DISPATCH_CASE(SCALARTYPE2, __VA_ARGS__) \
|
| 704 |
+
AT_DISPATCH_CASE(SCALARTYPE3, __VA_ARGS__) \
|
| 705 |
+
AT_DISPATCH_CASE(SCALARTYPE4, __VA_ARGS__) \
|
| 706 |
+
AT_DISPATCH_CASE(SCALARTYPE5, __VA_ARGS__) \
|
| 707 |
+
AT_DISPATCH_CASE(SCALARTYPE6, __VA_ARGS__) \
|
| 708 |
+
AT_DISPATCH_CASE(SCALARTYPE7, __VA_ARGS__)
|
| 709 |
+
|
| 710 |
+
#define AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND7( \
|
| 711 |
+
SCALARTYPE1, \
|
| 712 |
+
SCALARTYPE2, \
|
| 713 |
+
SCALARTYPE3, \
|
| 714 |
+
SCALARTYPE4, \
|
| 715 |
+
SCALARTYPE5, \
|
| 716 |
+
SCALARTYPE6, \
|
| 717 |
+
SCALARTYPE7, \
|
| 718 |
+
TYPE, \
|
| 719 |
+
NAME, \
|
| 720 |
+
...) \
|
| 721 |
+
AT_DISPATCH_SWITCH( \
|
| 722 |
+
TYPE, \
|
| 723 |
+
NAME, \
|
| 724 |
+
AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX_AND7( \
|
| 725 |
+
SCALARTYPE1, \
|
| 726 |
+
SCALARTYPE2, \
|
| 727 |
+
SCALARTYPE3, \
|
| 728 |
+
SCALARTYPE4, \
|
| 729 |
+
SCALARTYPE5, \
|
| 730 |
+
SCALARTYPE6, \
|
| 731 |
+
SCALARTYPE7, \
|
| 732 |
+
__VA_ARGS__))
|
| 733 |
+
|
| 734 |
+
#define AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX_AND8( \
|
| 735 |
+
SCALARTYPE1, \
|
| 736 |
+
SCALARTYPE2, \
|
| 737 |
+
SCALARTYPE3, \
|
| 738 |
+
SCALARTYPE4, \
|
| 739 |
+
SCALARTYPE5, \
|
| 740 |
+
SCALARTYPE6, \
|
| 741 |
+
SCALARTYPE7, \
|
| 742 |
+
SCALARTYPE8, \
|
| 743 |
+
...) \
|
| 744 |
+
AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX(__VA_ARGS__) \
|
| 745 |
+
AT_DISPATCH_CASE(SCALARTYPE1, __VA_ARGS__) \
|
| 746 |
+
AT_DISPATCH_CASE(SCALARTYPE2, __VA_ARGS__) \
|
| 747 |
+
AT_DISPATCH_CASE(SCALARTYPE3, __VA_ARGS__) \
|
| 748 |
+
AT_DISPATCH_CASE(SCALARTYPE4, __VA_ARGS__) \
|
| 749 |
+
AT_DISPATCH_CASE(SCALARTYPE5, __VA_ARGS__) \
|
| 750 |
+
AT_DISPATCH_CASE(SCALARTYPE6, __VA_ARGS__) \
|
| 751 |
+
AT_DISPATCH_CASE(SCALARTYPE7, __VA_ARGS__) \
|
| 752 |
+
AT_DISPATCH_CASE(SCALARTYPE8, __VA_ARGS__)
|
| 753 |
+
|
| 754 |
+
#define AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND8( \
|
| 755 |
+
SCALARTYPE1, \
|
| 756 |
+
SCALARTYPE2, \
|
| 757 |
+
SCALARTYPE3, \
|
| 758 |
+
SCALARTYPE4, \
|
| 759 |
+
SCALARTYPE5, \
|
| 760 |
+
SCALARTYPE6, \
|
| 761 |
+
SCALARTYPE7, \
|
| 762 |
+
SCALARTYPE8, \
|
| 763 |
+
TYPE, \
|
| 764 |
+
NAME, \
|
| 765 |
+
...) \
|
| 766 |
+
AT_DISPATCH_SWITCH( \
|
| 767 |
+
TYPE, \
|
| 768 |
+
NAME, \
|
| 769 |
+
AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX_AND8( \
|
| 770 |
+
SCALARTYPE1, \
|
| 771 |
+
SCALARTYPE2, \
|
| 772 |
+
SCALARTYPE3, \
|
| 773 |
+
SCALARTYPE4, \
|
| 774 |
+
SCALARTYPE5, \
|
| 775 |
+
SCALARTYPE6, \
|
| 776 |
+
SCALARTYPE7, \
|
| 777 |
+
SCALARTYPE8, \
|
| 778 |
+
__VA_ARGS__))
|
| 779 |
+
|
| 780 |
+
#define AT_DISPATCH_CASE_BIT_TYPES(...) \
|
| 781 |
+
AT_DISPATCH_CASE(at::ScalarType::Bits1x8, __VA_ARGS__) \
|
| 782 |
+
AT_DISPATCH_CASE(at::ScalarType::Bits2x4, __VA_ARGS__) \
|
| 783 |
+
AT_DISPATCH_CASE(at::ScalarType::Bits4x2, __VA_ARGS__) \
|
| 784 |
+
AT_DISPATCH_CASE(at::ScalarType::Bits8, __VA_ARGS__) \
|
| 785 |
+
AT_DISPATCH_CASE(at::ScalarType::Bits16, __VA_ARGS__)
|
| 786 |
+
|
| 787 |
+
#define AT_DISPATCH_BIT_TYPES(TYPE, NAME, ...) \
|
| 788 |
+
AT_DISPATCH_SWITCH(TYPE, NAME, AT_DISPATCH_CASE_BIT_TYPES(__VA_ARGS__))
|
| 789 |
+
|
| 790 |
+
#define AT_DISPATCH_INDEX_TYPES(TYPE, NAME, ...) \
|
| 791 |
+
AT_DISPATCH_SWITCH( \
|
| 792 |
+
TYPE, \
|
| 793 |
+
NAME, \
|
| 794 |
+
AT_PRIVATE_CASE_TYPE_USING_HINT( \
|
| 795 |
+
at::ScalarType::Int, index_t, __VA_ARGS__) \
|
| 796 |
+
AT_PRIVATE_CASE_TYPE_USING_HINT( \
|
| 797 |
+
at::ScalarType::Long, index_t, __VA_ARGS__))
|
| 798 |
+
|
| 799 |
+
// ----------------------------------------------------------------------------
|
| 800 |
+
// DEPRECATED MACROS, DON'T USE THESE
|
| 801 |
+
// ----------------------------------------------------------------------------
|
| 802 |
+
|
| 803 |
+
#define AT_DISPATCH_ALL_TYPES_AND_HALF(TYPE, NAME, ...) \
|
| 804 |
+
detail::deprecated_AT_DISPATCH_ALL_TYPES_AND_HALF(); \
|
| 805 |
+
AT_DISPATCH_SWITCH( \
|
| 806 |
+
TYPE, \
|
| 807 |
+
NAME, \
|
| 808 |
+
AT_DISPATCH_CASE_ALL_TYPES_AND(at::ScalarType::Half, __VA_ARGS__))
|
mplug_owl2/lib/python3.10/site-packages/torch/include/ATen/Dispatch_v2.h
ADDED
|
@@ -0,0 +1,186 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#include <ATen/Dispatch.h>
|
| 2 |
+
|
| 3 |
+
// This is a new implementation of the AT_DISPATCH macro family from
|
| 4 |
+
// ATen/Dispatch.h
|
| 5 |
+
//
|
| 6 |
+
// The intended usage is:
|
| 7 |
+
//
|
| 8 |
+
// ScalarType scalar_type;
|
| 9 |
+
//
|
| 10 |
+
// AT_DISPATCH_V2(
|
| 11 |
+
// scalar_type,
|
| 12 |
+
// "debug string",
|
| 13 |
+
// AT_WRAP([&] {
|
| 14 |
+
// ... code to specialize with scalar_t ...
|
| 15 |
+
// }),
|
| 16 |
+
// kHalf,
|
| 17 |
+
// AT_EXPAND(AT_ALL_TYPES),
|
| 18 |
+
// ... as many types arguments as needed ...
|
| 19 |
+
// )
|
| 20 |
+
//
|
| 21 |
+
// For example, given an old style:
|
| 22 |
+
//
|
| 23 |
+
// AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND2(
|
| 24 |
+
// kComplexHalf,
|
| 25 |
+
// kHalf,
|
| 26 |
+
// self.scalar_type(),
|
| 27 |
+
// "_local_scalar_dense_cpu",
|
| 28 |
+
// [&] {
|
| 29 |
+
// scalar_t value = *self.data_ptr<scalar_t>();
|
| 30 |
+
// r = Scalar(value);
|
| 31 |
+
// }
|
| 32 |
+
// )
|
| 33 |
+
//
|
| 34 |
+
// You now write:
|
| 35 |
+
//
|
| 36 |
+
// AT_DISPATCH_V2(
|
| 37 |
+
// self.scalar_type(),
|
| 38 |
+
// "_local_scalar_dense_cpu",
|
| 39 |
+
// AT_WRAP([&] {
|
| 40 |
+
// scalar_t value = *self.data_ptr<scalar_t>();
|
| 41 |
+
// r = Scalar(value);
|
| 42 |
+
// }),
|
| 43 |
+
// AT_EXPAND(AT_ALL_TYPES),
|
| 44 |
+
// AT_EXPAND(AT_COMPLEX_TYPES),
|
| 45 |
+
// kComplexHalf,
|
| 46 |
+
// kHalf,
|
| 47 |
+
// )
|
| 48 |
+
//
|
| 49 |
+
// Notably, it sports the following improvements:
|
| 50 |
+
//
|
| 51 |
+
// - It is not necessary to specify the arity (e.g.,
|
| 52 |
+
// AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND{2,3,4,...})
|
| 53 |
+
// when using the macro
|
| 54 |
+
//
|
| 55 |
+
// - It is not necessary to specify each dtype individually; if
|
| 56 |
+
// there is a set of related dtypes and you want to dispatch
|
| 57 |
+
// over all of them, you can simply say, e.g., AT_EXPAND(AT_INTEGRAL_TYPES)
|
| 58 |
+
// in your argument list.
|
| 59 |
+
//
|
| 60 |
+
// However, you must remember to wrap the payload body in AT_WRAP, or commas
|
| 61 |
+
// inside your lambda will be improperly handled. Furthermore, if you more
|
| 62 |
+
// entries to ScalarType than can be supported by this macro, it will fail
|
| 63 |
+
// with an obscure error (due to attempting to concatenate AT_AP with
|
| 64 |
+
// something that is not a number).
|
| 65 |
+
//
|
| 66 |
+
// The implementation strategy is to use the count arguments trick
|
| 67 |
+
// (e.g., as described in https://stackoverflow.com/a/2124385/23845)
|
| 68 |
+
// to discover how many dtypes have been passed, and then dispatch to a
|
| 69 |
+
// hand-written macro for each arity that applies as many DISPATCH_CASE as
|
| 70 |
+
// necessary. The hand-written macros can be regenerated for other arities
|
| 71 |
+
// with the script below.
|
| 72 |
+
//
|
| 73 |
+
// There is some delicacy in the implementation in controlling when
|
| 74 |
+
// macro expansion occurs, mediated with AT_EXPAND and AT_GUARD. I mostly
|
| 75 |
+
// relied on GPT4 to help me get it right.
|
| 76 |
+
|
| 77 |
+
// Public API macros
|
| 78 |
+
|
| 79 |
+
// See documentation above
|
| 80 |
+
#define AT_DISPATCH_V2(TYPE, NAME, BODY, ...) \
|
| 81 |
+
AT_DISPATCH_SWITCH(TYPE, NAME, AT_AP_VAR(AT_WRAP(BODY), TYPE, __VA_ARGS__))
|
| 82 |
+
|
| 83 |
+
// This macro lets you pass an arbitrary expression that may contain internal
|
| 84 |
+
// commas to another macro without having the commas causing the expression
|
| 85 |
+
// to be interpreted as being multiple arguments
|
| 86 |
+
#define AT_WRAP(...) __VA_ARGS__
|
| 87 |
+
|
| 88 |
+
#define AT_FLOAT8_TYPES \
|
| 89 |
+
c10::kFloat8_e5m2, c10::kFloat8_e5m2fnuz, c10::kFloat8_e4m3fn, \
|
| 90 |
+
c10::kFloat8_e4m3fnuz
|
| 91 |
+
|
| 92 |
+
#define AT_INTEGRAL_TYPES \
|
| 93 |
+
c10::kByte, c10::kChar, c10::kInt, c10::kLong, c10::kShort
|
| 94 |
+
#define AT_FLOATING_TYPES c10::kDouble, c10::kFloat
|
| 95 |
+
#define AT_BAREBONES_UNSIGNED_TYPES c10::kUInt16, c10::kUInt32, c10::kUInt64
|
| 96 |
+
#define AT_INTEGRAL_TYPES_V2 \
|
| 97 |
+
AT_EXPAND(AT_INTEGRAL_TYPES), AT_EXPAND(AT_BAREBONES_UNSIGNED_TYPES)
|
| 98 |
+
#define AT_COMPLEX_TYPES c10::kComplexDouble, c10::kComplexFloat
|
| 99 |
+
#define AT_QINT_TYPES c10::kQInt8, c10::kQUInt8, c10::kQInt32
|
| 100 |
+
// NB: not *actually* all types
|
| 101 |
+
#define AT_ALL_TYPES AT_EXPAND(AT_INTEGRAL_TYPES), AT_EXPAND(AT_FLOATING_TYPES)
|
| 102 |
+
#define AT_ALL_TYPES_AND_COMPLEX \
|
| 103 |
+
AT_EXPAND(AT_ALL_TYPES), AT_EXPAND(AT_COMPLEX_TYPES)
|
| 104 |
+
|
| 105 |
+
// Helper macros
|
| 106 |
+
|
| 107 |
+
#define AT_AP_VAR(N, T, ...) \
|
| 108 |
+
AT_EXPAND(AT_CONCAT(AT_AP, AT_NUM_ARGS(__VA_ARGS__))(AT_WRAP(N), __VA_ARGS__))
|
| 109 |
+
#define AT_CONCAT(a, b) AT_CONCAT_AUX(a, b)
|
| 110 |
+
#define AT_CONCAT_AUX(a, b) a##b
|
| 111 |
+
#define AT_EXPAND(X) X
|
| 112 |
+
|
| 113 |
+
// Ensure we never have too many scalar types for the expansion here to
|
| 114 |
+
// support. To bump this, you must regenerate the macros below.
|
| 115 |
+
static_assert(static_cast<int>(c10::ScalarType::NumOptions) < 45);
|
| 116 |
+
|
| 117 |
+
// Python code to regenerate generate code below:
|
| 118 |
+
#if 0
|
| 119 |
+
|
| 120 |
+
num_args = 45
|
| 121 |
+
|
| 122 |
+
nums = ', '.join(str(i) for i in reversed(range(num_args+1)))
|
| 123 |
+
args = ', '.join(f'_{i}' for i in range(1, num_args+1))
|
| 124 |
+
|
| 125 |
+
print(f'#define AT_NUM_ARGS(...) AT_EXPAND(AT_NUM_ARGS_AUX(__VA_ARGS__, {nums}))')
|
| 126 |
+
print(f'#define AT_NUM_ARGS_AUX({args}, N, ...) N')
|
| 127 |
+
|
| 128 |
+
for i in range(1, num_args+1):
|
| 129 |
+
args = ', '.join(f'_{i}' for i in range(1, i+1))
|
| 130 |
+
cases = ' '.join([f'AT_DISPATCH_CASE(_{j}, N)' for j in range(1, i+1)])
|
| 131 |
+
print(f'#define AT_AP{i}(N, {args}) {cases}')
|
| 132 |
+
|
| 133 |
+
#endif
|
| 134 |
+
|
| 135 |
+
// Begin generated code
|
| 136 |
+
// clang-format off
|
| 137 |
+
|
| 138 |
+
#define AT_NUM_ARGS(...) AT_EXPAND(AT_NUM_ARGS_AUX(__VA_ARGS__, 45, 44, 43, 42, 41, 40, 39, 38, 37, 36, 35, 34, 33, 32, 31, 30, 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0))
|
| 139 |
+
#define AT_NUM_ARGS_AUX(_1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16, _17, _18, _19, _20, _21, _22, _23, _24, _25, _26, _27, _28, _29, _30, _31, _32, _33, _34, _35, _36, _37, _38, _39, _40, _41, _42, _43, _44, _45, N, ...) N
|
| 140 |
+
#define AT_AP1(N, _1) AT_DISPATCH_CASE(_1, N)
|
| 141 |
+
#define AT_AP2(N, _1, _2) AT_DISPATCH_CASE(_1, N) AT_DISPATCH_CASE(_2, N)
|
| 142 |
+
#define AT_AP3(N, _1, _2, _3) AT_DISPATCH_CASE(_1, N) AT_DISPATCH_CASE(_2, N) AT_DISPATCH_CASE(_3, N)
|
| 143 |
+
#define AT_AP4(N, _1, _2, _3, _4) AT_DISPATCH_CASE(_1, N) AT_DISPATCH_CASE(_2, N) AT_DISPATCH_CASE(_3, N) AT_DISPATCH_CASE(_4, N)
|
| 144 |
+
#define AT_AP5(N, _1, _2, _3, _4, _5) AT_DISPATCH_CASE(_1, N) AT_DISPATCH_CASE(_2, N) AT_DISPATCH_CASE(_3, N) AT_DISPATCH_CASE(_4, N) AT_DISPATCH_CASE(_5, N)
|
| 145 |
+
#define AT_AP6(N, _1, _2, _3, _4, _5, _6) AT_DISPATCH_CASE(_1, N) AT_DISPATCH_CASE(_2, N) AT_DISPATCH_CASE(_3, N) AT_DISPATCH_CASE(_4, N) AT_DISPATCH_CASE(_5, N) AT_DISPATCH_CASE(_6, N)
|
| 146 |
+
#define AT_AP7(N, _1, _2, _3, _4, _5, _6, _7) AT_DISPATCH_CASE(_1, N) AT_DISPATCH_CASE(_2, N) AT_DISPATCH_CASE(_3, N) AT_DISPATCH_CASE(_4, N) AT_DISPATCH_CASE(_5, N) AT_DISPATCH_CASE(_6, N) AT_DISPATCH_CASE(_7, N)
|
| 147 |
+
#define AT_AP8(N, _1, _2, _3, _4, _5, _6, _7, _8) AT_DISPATCH_CASE(_1, N) AT_DISPATCH_CASE(_2, N) AT_DISPATCH_CASE(_3, N) AT_DISPATCH_CASE(_4, N) AT_DISPATCH_CASE(_5, N) AT_DISPATCH_CASE(_6, N) AT_DISPATCH_CASE(_7, N) AT_DISPATCH_CASE(_8, N)
|
| 148 |
+
#define AT_AP9(N, _1, _2, _3, _4, _5, _6, _7, _8, _9) AT_DISPATCH_CASE(_1, N) AT_DISPATCH_CASE(_2, N) AT_DISPATCH_CASE(_3, N) AT_DISPATCH_CASE(_4, N) AT_DISPATCH_CASE(_5, N) AT_DISPATCH_CASE(_6, N) AT_DISPATCH_CASE(_7, N) AT_DISPATCH_CASE(_8, N) AT_DISPATCH_CASE(_9, N)
|
| 149 |
+
#define AT_AP10(N, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10) AT_DISPATCH_CASE(_1, N) AT_DISPATCH_CASE(_2, N) AT_DISPATCH_CASE(_3, N) AT_DISPATCH_CASE(_4, N) AT_DISPATCH_CASE(_5, N) AT_DISPATCH_CASE(_6, N) AT_DISPATCH_CASE(_7, N) AT_DISPATCH_CASE(_8, N) AT_DISPATCH_CASE(_9, N) AT_DISPATCH_CASE(_10, N)
|
| 150 |
+
#define AT_AP11(N, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11) AT_DISPATCH_CASE(_1, N) AT_DISPATCH_CASE(_2, N) AT_DISPATCH_CASE(_3, N) AT_DISPATCH_CASE(_4, N) AT_DISPATCH_CASE(_5, N) AT_DISPATCH_CASE(_6, N) AT_DISPATCH_CASE(_7, N) AT_DISPATCH_CASE(_8, N) AT_DISPATCH_CASE(_9, N) AT_DISPATCH_CASE(_10, N) AT_DISPATCH_CASE(_11, N)
|
| 151 |
+
#define AT_AP12(N, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12) AT_DISPATCH_CASE(_1, N) AT_DISPATCH_CASE(_2, N) AT_DISPATCH_CASE(_3, N) AT_DISPATCH_CASE(_4, N) AT_DISPATCH_CASE(_5, N) AT_DISPATCH_CASE(_6, N) AT_DISPATCH_CASE(_7, N) AT_DISPATCH_CASE(_8, N) AT_DISPATCH_CASE(_9, N) AT_DISPATCH_CASE(_10, N) AT_DISPATCH_CASE(_11, N) AT_DISPATCH_CASE(_12, N)
|
| 152 |
+
#define AT_AP13(N, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13) AT_DISPATCH_CASE(_1, N) AT_DISPATCH_CASE(_2, N) AT_DISPATCH_CASE(_3, N) AT_DISPATCH_CASE(_4, N) AT_DISPATCH_CASE(_5, N) AT_DISPATCH_CASE(_6, N) AT_DISPATCH_CASE(_7, N) AT_DISPATCH_CASE(_8, N) AT_DISPATCH_CASE(_9, N) AT_DISPATCH_CASE(_10, N) AT_DISPATCH_CASE(_11, N) AT_DISPATCH_CASE(_12, N) AT_DISPATCH_CASE(_13, N)
|
| 153 |
+
#define AT_AP14(N, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14) AT_DISPATCH_CASE(_1, N) AT_DISPATCH_CASE(_2, N) AT_DISPATCH_CASE(_3, N) AT_DISPATCH_CASE(_4, N) AT_DISPATCH_CASE(_5, N) AT_DISPATCH_CASE(_6, N) AT_DISPATCH_CASE(_7, N) AT_DISPATCH_CASE(_8, N) AT_DISPATCH_CASE(_9, N) AT_DISPATCH_CASE(_10, N) AT_DISPATCH_CASE(_11, N) AT_DISPATCH_CASE(_12, N) AT_DISPATCH_CASE(_13, N) AT_DISPATCH_CASE(_14, N)
|
| 154 |
+
#define AT_AP15(N, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15) AT_DISPATCH_CASE(_1, N) AT_DISPATCH_CASE(_2, N) AT_DISPATCH_CASE(_3, N) AT_DISPATCH_CASE(_4, N) AT_DISPATCH_CASE(_5, N) AT_DISPATCH_CASE(_6, N) AT_DISPATCH_CASE(_7, N) AT_DISPATCH_CASE(_8, N) AT_DISPATCH_CASE(_9, N) AT_DISPATCH_CASE(_10, N) AT_DISPATCH_CASE(_11, N) AT_DISPATCH_CASE(_12, N) AT_DISPATCH_CASE(_13, N) AT_DISPATCH_CASE(_14, N) AT_DISPATCH_CASE(_15, N)
|
| 155 |
+
#define AT_AP16(N, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16) AT_DISPATCH_CASE(_1, N) AT_DISPATCH_CASE(_2, N) AT_DISPATCH_CASE(_3, N) AT_DISPATCH_CASE(_4, N) AT_DISPATCH_CASE(_5, N) AT_DISPATCH_CASE(_6, N) AT_DISPATCH_CASE(_7, N) AT_DISPATCH_CASE(_8, N) AT_DISPATCH_CASE(_9, N) AT_DISPATCH_CASE(_10, N) AT_DISPATCH_CASE(_11, N) AT_DISPATCH_CASE(_12, N) AT_DISPATCH_CASE(_13, N) AT_DISPATCH_CASE(_14, N) AT_DISPATCH_CASE(_15, N) AT_DISPATCH_CASE(_16, N)
|
| 156 |
+
#define AT_AP17(N, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16, _17) AT_DISPATCH_CASE(_1, N) AT_DISPATCH_CASE(_2, N) AT_DISPATCH_CASE(_3, N) AT_DISPATCH_CASE(_4, N) AT_DISPATCH_CASE(_5, N) AT_DISPATCH_CASE(_6, N) AT_DISPATCH_CASE(_7, N) AT_DISPATCH_CASE(_8, N) AT_DISPATCH_CASE(_9, N) AT_DISPATCH_CASE(_10, N) AT_DISPATCH_CASE(_11, N) AT_DISPATCH_CASE(_12, N) AT_DISPATCH_CASE(_13, N) AT_DISPATCH_CASE(_14, N) AT_DISPATCH_CASE(_15, N) AT_DISPATCH_CASE(_16, N) AT_DISPATCH_CASE(_17, N)
|
| 157 |
+
#define AT_AP18(N, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16, _17, _18) AT_DISPATCH_CASE(_1, N) AT_DISPATCH_CASE(_2, N) AT_DISPATCH_CASE(_3, N) AT_DISPATCH_CASE(_4, N) AT_DISPATCH_CASE(_5, N) AT_DISPATCH_CASE(_6, N) AT_DISPATCH_CASE(_7, N) AT_DISPATCH_CASE(_8, N) AT_DISPATCH_CASE(_9, N) AT_DISPATCH_CASE(_10, N) AT_DISPATCH_CASE(_11, N) AT_DISPATCH_CASE(_12, N) AT_DISPATCH_CASE(_13, N) AT_DISPATCH_CASE(_14, N) AT_DISPATCH_CASE(_15, N) AT_DISPATCH_CASE(_16, N) AT_DISPATCH_CASE(_17, N) AT_DISPATCH_CASE(_18, N)
|
| 158 |
+
#define AT_AP19(N, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16, _17, _18, _19) AT_DISPATCH_CASE(_1, N) AT_DISPATCH_CASE(_2, N) AT_DISPATCH_CASE(_3, N) AT_DISPATCH_CASE(_4, N) AT_DISPATCH_CASE(_5, N) AT_DISPATCH_CASE(_6, N) AT_DISPATCH_CASE(_7, N) AT_DISPATCH_CASE(_8, N) AT_DISPATCH_CASE(_9, N) AT_DISPATCH_CASE(_10, N) AT_DISPATCH_CASE(_11, N) AT_DISPATCH_CASE(_12, N) AT_DISPATCH_CASE(_13, N) AT_DISPATCH_CASE(_14, N) AT_DISPATCH_CASE(_15, N) AT_DISPATCH_CASE(_16, N) AT_DISPATCH_CASE(_17, N) AT_DISPATCH_CASE(_18, N) AT_DISPATCH_CASE(_19, N)
|
| 159 |
+
#define AT_AP20(N, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16, _17, _18, _19, _20) AT_DISPATCH_CASE(_1, N) AT_DISPATCH_CASE(_2, N) AT_DISPATCH_CASE(_3, N) AT_DISPATCH_CASE(_4, N) AT_DISPATCH_CASE(_5, N) AT_DISPATCH_CASE(_6, N) AT_DISPATCH_CASE(_7, N) AT_DISPATCH_CASE(_8, N) AT_DISPATCH_CASE(_9, N) AT_DISPATCH_CASE(_10, N) AT_DISPATCH_CASE(_11, N) AT_DISPATCH_CASE(_12, N) AT_DISPATCH_CASE(_13, N) AT_DISPATCH_CASE(_14, N) AT_DISPATCH_CASE(_15, N) AT_DISPATCH_CASE(_16, N) AT_DISPATCH_CASE(_17, N) AT_DISPATCH_CASE(_18, N) AT_DISPATCH_CASE(_19, N) AT_DISPATCH_CASE(_20, N)
|
| 160 |
+
#define AT_AP21(N, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16, _17, _18, _19, _20, _21) AT_DISPATCH_CASE(_1, N) AT_DISPATCH_CASE(_2, N) AT_DISPATCH_CASE(_3, N) AT_DISPATCH_CASE(_4, N) AT_DISPATCH_CASE(_5, N) AT_DISPATCH_CASE(_6, N) AT_DISPATCH_CASE(_7, N) AT_DISPATCH_CASE(_8, N) AT_DISPATCH_CASE(_9, N) AT_DISPATCH_CASE(_10, N) AT_DISPATCH_CASE(_11, N) AT_DISPATCH_CASE(_12, N) AT_DISPATCH_CASE(_13, N) AT_DISPATCH_CASE(_14, N) AT_DISPATCH_CASE(_15, N) AT_DISPATCH_CASE(_16, N) AT_DISPATCH_CASE(_17, N) AT_DISPATCH_CASE(_18, N) AT_DISPATCH_CASE(_19, N) AT_DISPATCH_CASE(_20, N) AT_DISPATCH_CASE(_21, N)
|
| 161 |
+
#define AT_AP22(N, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16, _17, _18, _19, _20, _21, _22) AT_DISPATCH_CASE(_1, N) AT_DISPATCH_CASE(_2, N) AT_DISPATCH_CASE(_3, N) AT_DISPATCH_CASE(_4, N) AT_DISPATCH_CASE(_5, N) AT_DISPATCH_CASE(_6, N) AT_DISPATCH_CASE(_7, N) AT_DISPATCH_CASE(_8, N) AT_DISPATCH_CASE(_9, N) AT_DISPATCH_CASE(_10, N) AT_DISPATCH_CASE(_11, N) AT_DISPATCH_CASE(_12, N) AT_DISPATCH_CASE(_13, N) AT_DISPATCH_CASE(_14, N) AT_DISPATCH_CASE(_15, N) AT_DISPATCH_CASE(_16, N) AT_DISPATCH_CASE(_17, N) AT_DISPATCH_CASE(_18, N) AT_DISPATCH_CASE(_19, N) AT_DISPATCH_CASE(_20, N) AT_DISPATCH_CASE(_21, N) AT_DISPATCH_CASE(_22, N)
|
| 162 |
+
#define AT_AP23(N, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16, _17, _18, _19, _20, _21, _22, _23) AT_DISPATCH_CASE(_1, N) AT_DISPATCH_CASE(_2, N) AT_DISPATCH_CASE(_3, N) AT_DISPATCH_CASE(_4, N) AT_DISPATCH_CASE(_5, N) AT_DISPATCH_CASE(_6, N) AT_DISPATCH_CASE(_7, N) AT_DISPATCH_CASE(_8, N) AT_DISPATCH_CASE(_9, N) AT_DISPATCH_CASE(_10, N) AT_DISPATCH_CASE(_11, N) AT_DISPATCH_CASE(_12, N) AT_DISPATCH_CASE(_13, N) AT_DISPATCH_CASE(_14, N) AT_DISPATCH_CASE(_15, N) AT_DISPATCH_CASE(_16, N) AT_DISPATCH_CASE(_17, N) AT_DISPATCH_CASE(_18, N) AT_DISPATCH_CASE(_19, N) AT_DISPATCH_CASE(_20, N) AT_DISPATCH_CASE(_21, N) AT_DISPATCH_CASE(_22, N) AT_DISPATCH_CASE(_23, N)
|
| 163 |
+
#define AT_AP24(N, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16, _17, _18, _19, _20, _21, _22, _23, _24) AT_DISPATCH_CASE(_1, N) AT_DISPATCH_CASE(_2, N) AT_DISPATCH_CASE(_3, N) AT_DISPATCH_CASE(_4, N) AT_DISPATCH_CASE(_5, N) AT_DISPATCH_CASE(_6, N) AT_DISPATCH_CASE(_7, N) AT_DISPATCH_CASE(_8, N) AT_DISPATCH_CASE(_9, N) AT_DISPATCH_CASE(_10, N) AT_DISPATCH_CASE(_11, N) AT_DISPATCH_CASE(_12, N) AT_DISPATCH_CASE(_13, N) AT_DISPATCH_CASE(_14, N) AT_DISPATCH_CASE(_15, N) AT_DISPATCH_CASE(_16, N) AT_DISPATCH_CASE(_17, N) AT_DISPATCH_CASE(_18, N) AT_DISPATCH_CASE(_19, N) AT_DISPATCH_CASE(_20, N) AT_DISPATCH_CASE(_21, N) AT_DISPATCH_CASE(_22, N) AT_DISPATCH_CASE(_23, N) AT_DISPATCH_CASE(_24, N)
|
| 164 |
+
#define AT_AP25(N, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16, _17, _18, _19, _20, _21, _22, _23, _24, _25) AT_DISPATCH_CASE(_1, N) AT_DISPATCH_CASE(_2, N) AT_DISPATCH_CASE(_3, N) AT_DISPATCH_CASE(_4, N) AT_DISPATCH_CASE(_5, N) AT_DISPATCH_CASE(_6, N) AT_DISPATCH_CASE(_7, N) AT_DISPATCH_CASE(_8, N) AT_DISPATCH_CASE(_9, N) AT_DISPATCH_CASE(_10, N) AT_DISPATCH_CASE(_11, N) AT_DISPATCH_CASE(_12, N) AT_DISPATCH_CASE(_13, N) AT_DISPATCH_CASE(_14, N) AT_DISPATCH_CASE(_15, N) AT_DISPATCH_CASE(_16, N) AT_DISPATCH_CASE(_17, N) AT_DISPATCH_CASE(_18, N) AT_DISPATCH_CASE(_19, N) AT_DISPATCH_CASE(_20, N) AT_DISPATCH_CASE(_21, N) AT_DISPATCH_CASE(_22, N) AT_DISPATCH_CASE(_23, N) AT_DISPATCH_CASE(_24, N) AT_DISPATCH_CASE(_25, N)
|
| 165 |
+
#define AT_AP26(N, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16, _17, _18, _19, _20, _21, _22, _23, _24, _25, _26) AT_DISPATCH_CASE(_1, N) AT_DISPATCH_CASE(_2, N) AT_DISPATCH_CASE(_3, N) AT_DISPATCH_CASE(_4, N) AT_DISPATCH_CASE(_5, N) AT_DISPATCH_CASE(_6, N) AT_DISPATCH_CASE(_7, N) AT_DISPATCH_CASE(_8, N) AT_DISPATCH_CASE(_9, N) AT_DISPATCH_CASE(_10, N) AT_DISPATCH_CASE(_11, N) AT_DISPATCH_CASE(_12, N) AT_DISPATCH_CASE(_13, N) AT_DISPATCH_CASE(_14, N) AT_DISPATCH_CASE(_15, N) AT_DISPATCH_CASE(_16, N) AT_DISPATCH_CASE(_17, N) AT_DISPATCH_CASE(_18, N) AT_DISPATCH_CASE(_19, N) AT_DISPATCH_CASE(_20, N) AT_DISPATCH_CASE(_21, N) AT_DISPATCH_CASE(_22, N) AT_DISPATCH_CASE(_23, N) AT_DISPATCH_CASE(_24, N) AT_DISPATCH_CASE(_25, N) AT_DISPATCH_CASE(_26, N)
|
| 166 |
+
#define AT_AP27(N, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16, _17, _18, _19, _20, _21, _22, _23, _24, _25, _26, _27) AT_DISPATCH_CASE(_1, N) AT_DISPATCH_CASE(_2, N) AT_DISPATCH_CASE(_3, N) AT_DISPATCH_CASE(_4, N) AT_DISPATCH_CASE(_5, N) AT_DISPATCH_CASE(_6, N) AT_DISPATCH_CASE(_7, N) AT_DISPATCH_CASE(_8, N) AT_DISPATCH_CASE(_9, N) AT_DISPATCH_CASE(_10, N) AT_DISPATCH_CASE(_11, N) AT_DISPATCH_CASE(_12, N) AT_DISPATCH_CASE(_13, N) AT_DISPATCH_CASE(_14, N) AT_DISPATCH_CASE(_15, N) AT_DISPATCH_CASE(_16, N) AT_DISPATCH_CASE(_17, N) AT_DISPATCH_CASE(_18, N) AT_DISPATCH_CASE(_19, N) AT_DISPATCH_CASE(_20, N) AT_DISPATCH_CASE(_21, N) AT_DISPATCH_CASE(_22, N) AT_DISPATCH_CASE(_23, N) AT_DISPATCH_CASE(_24, N) AT_DISPATCH_CASE(_25, N) AT_DISPATCH_CASE(_26, N) AT_DISPATCH_CASE(_27, N)
|
| 167 |
+
#define AT_AP28(N, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16, _17, _18, _19, _20, _21, _22, _23, _24, _25, _26, _27, _28) AT_DISPATCH_CASE(_1, N) AT_DISPATCH_CASE(_2, N) AT_DISPATCH_CASE(_3, N) AT_DISPATCH_CASE(_4, N) AT_DISPATCH_CASE(_5, N) AT_DISPATCH_CASE(_6, N) AT_DISPATCH_CASE(_7, N) AT_DISPATCH_CASE(_8, N) AT_DISPATCH_CASE(_9, N) AT_DISPATCH_CASE(_10, N) AT_DISPATCH_CASE(_11, N) AT_DISPATCH_CASE(_12, N) AT_DISPATCH_CASE(_13, N) AT_DISPATCH_CASE(_14, N) AT_DISPATCH_CASE(_15, N) AT_DISPATCH_CASE(_16, N) AT_DISPATCH_CASE(_17, N) AT_DISPATCH_CASE(_18, N) AT_DISPATCH_CASE(_19, N) AT_DISPATCH_CASE(_20, N) AT_DISPATCH_CASE(_21, N) AT_DISPATCH_CASE(_22, N) AT_DISPATCH_CASE(_23, N) AT_DISPATCH_CASE(_24, N) AT_DISPATCH_CASE(_25, N) AT_DISPATCH_CASE(_26, N) AT_DISPATCH_CASE(_27, N) AT_DISPATCH_CASE(_28, N)
|
| 168 |
+
#define AT_AP29(N, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16, _17, _18, _19, _20, _21, _22, _23, _24, _25, _26, _27, _28, _29) AT_DISPATCH_CASE(_1, N) AT_DISPATCH_CASE(_2, N) AT_DISPATCH_CASE(_3, N) AT_DISPATCH_CASE(_4, N) AT_DISPATCH_CASE(_5, N) AT_DISPATCH_CASE(_6, N) AT_DISPATCH_CASE(_7, N) AT_DISPATCH_CASE(_8, N) AT_DISPATCH_CASE(_9, N) AT_DISPATCH_CASE(_10, N) AT_DISPATCH_CASE(_11, N) AT_DISPATCH_CASE(_12, N) AT_DISPATCH_CASE(_13, N) AT_DISPATCH_CASE(_14, N) AT_DISPATCH_CASE(_15, N) AT_DISPATCH_CASE(_16, N) AT_DISPATCH_CASE(_17, N) AT_DISPATCH_CASE(_18, N) AT_DISPATCH_CASE(_19, N) AT_DISPATCH_CASE(_20, N) AT_DISPATCH_CASE(_21, N) AT_DISPATCH_CASE(_22, N) AT_DISPATCH_CASE(_23, N) AT_DISPATCH_CASE(_24, N) AT_DISPATCH_CASE(_25, N) AT_DISPATCH_CASE(_26, N) AT_DISPATCH_CASE(_27, N) AT_DISPATCH_CASE(_28, N) AT_DISPATCH_CASE(_29, N)
|
| 169 |
+
#define AT_AP30(N, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16, _17, _18, _19, _20, _21, _22, _23, _24, _25, _26, _27, _28, _29, _30) AT_DISPATCH_CASE(_1, N) AT_DISPATCH_CASE(_2, N) AT_DISPATCH_CASE(_3, N) AT_DISPATCH_CASE(_4, N) AT_DISPATCH_CASE(_5, N) AT_DISPATCH_CASE(_6, N) AT_DISPATCH_CASE(_7, N) AT_DISPATCH_CASE(_8, N) AT_DISPATCH_CASE(_9, N) AT_DISPATCH_CASE(_10, N) AT_DISPATCH_CASE(_11, N) AT_DISPATCH_CASE(_12, N) AT_DISPATCH_CASE(_13, N) AT_DISPATCH_CASE(_14, N) AT_DISPATCH_CASE(_15, N) AT_DISPATCH_CASE(_16, N) AT_DISPATCH_CASE(_17, N) AT_DISPATCH_CASE(_18, N) AT_DISPATCH_CASE(_19, N) AT_DISPATCH_CASE(_20, N) AT_DISPATCH_CASE(_21, N) AT_DISPATCH_CASE(_22, N) AT_DISPATCH_CASE(_23, N) AT_DISPATCH_CASE(_24, N) AT_DISPATCH_CASE(_25, N) AT_DISPATCH_CASE(_26, N) AT_DISPATCH_CASE(_27, N) AT_DISPATCH_CASE(_28, N) AT_DISPATCH_CASE(_29, N) AT_DISPATCH_CASE(_30, N)
|
| 170 |
+
#define AT_AP31(N, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16, _17, _18, _19, _20, _21, _22, _23, _24, _25, _26, _27, _28, _29, _30, _31) AT_DISPATCH_CASE(_1, N) AT_DISPATCH_CASE(_2, N) AT_DISPATCH_CASE(_3, N) AT_DISPATCH_CASE(_4, N) AT_DISPATCH_CASE(_5, N) AT_DISPATCH_CASE(_6, N) AT_DISPATCH_CASE(_7, N) AT_DISPATCH_CASE(_8, N) AT_DISPATCH_CASE(_9, N) AT_DISPATCH_CASE(_10, N) AT_DISPATCH_CASE(_11, N) AT_DISPATCH_CASE(_12, N) AT_DISPATCH_CASE(_13, N) AT_DISPATCH_CASE(_14, N) AT_DISPATCH_CASE(_15, N) AT_DISPATCH_CASE(_16, N) AT_DISPATCH_CASE(_17, N) AT_DISPATCH_CASE(_18, N) AT_DISPATCH_CASE(_19, N) AT_DISPATCH_CASE(_20, N) AT_DISPATCH_CASE(_21, N) AT_DISPATCH_CASE(_22, N) AT_DISPATCH_CASE(_23, N) AT_DISPATCH_CASE(_24, N) AT_DISPATCH_CASE(_25, N) AT_DISPATCH_CASE(_26, N) AT_DISPATCH_CASE(_27, N) AT_DISPATCH_CASE(_28, N) AT_DISPATCH_CASE(_29, N) AT_DISPATCH_CASE(_30, N) AT_DISPATCH_CASE(_31, N)
|
| 171 |
+
#define AT_AP32(N, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16, _17, _18, _19, _20, _21, _22, _23, _24, _25, _26, _27, _28, _29, _30, _31, _32) AT_DISPATCH_CASE(_1, N) AT_DISPATCH_CASE(_2, N) AT_DISPATCH_CASE(_3, N) AT_DISPATCH_CASE(_4, N) AT_DISPATCH_CASE(_5, N) AT_DISPATCH_CASE(_6, N) AT_DISPATCH_CASE(_7, N) AT_DISPATCH_CASE(_8, N) AT_DISPATCH_CASE(_9, N) AT_DISPATCH_CASE(_10, N) AT_DISPATCH_CASE(_11, N) AT_DISPATCH_CASE(_12, N) AT_DISPATCH_CASE(_13, N) AT_DISPATCH_CASE(_14, N) AT_DISPATCH_CASE(_15, N) AT_DISPATCH_CASE(_16, N) AT_DISPATCH_CASE(_17, N) AT_DISPATCH_CASE(_18, N) AT_DISPATCH_CASE(_19, N) AT_DISPATCH_CASE(_20, N) AT_DISPATCH_CASE(_21, N) AT_DISPATCH_CASE(_22, N) AT_DISPATCH_CASE(_23, N) AT_DISPATCH_CASE(_24, N) AT_DISPATCH_CASE(_25, N) AT_DISPATCH_CASE(_26, N) AT_DISPATCH_CASE(_27, N) AT_DISPATCH_CASE(_28, N) AT_DISPATCH_CASE(_29, N) AT_DISPATCH_CASE(_30, N) AT_DISPATCH_CASE(_31, N) AT_DISPATCH_CASE(_32, N)
|
| 172 |
+
#define AT_AP33(N, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16, _17, _18, _19, _20, _21, _22, _23, _24, _25, _26, _27, _28, _29, _30, _31, _32, _33) AT_DISPATCH_CASE(_1, N) AT_DISPATCH_CASE(_2, N) AT_DISPATCH_CASE(_3, N) AT_DISPATCH_CASE(_4, N) AT_DISPATCH_CASE(_5, N) AT_DISPATCH_CASE(_6, N) AT_DISPATCH_CASE(_7, N) AT_DISPATCH_CASE(_8, N) AT_DISPATCH_CASE(_9, N) AT_DISPATCH_CASE(_10, N) AT_DISPATCH_CASE(_11, N) AT_DISPATCH_CASE(_12, N) AT_DISPATCH_CASE(_13, N) AT_DISPATCH_CASE(_14, N) AT_DISPATCH_CASE(_15, N) AT_DISPATCH_CASE(_16, N) AT_DISPATCH_CASE(_17, N) AT_DISPATCH_CASE(_18, N) AT_DISPATCH_CASE(_19, N) AT_DISPATCH_CASE(_20, N) AT_DISPATCH_CASE(_21, N) AT_DISPATCH_CASE(_22, N) AT_DISPATCH_CASE(_23, N) AT_DISPATCH_CASE(_24, N) AT_DISPATCH_CASE(_25, N) AT_DISPATCH_CASE(_26, N) AT_DISPATCH_CASE(_27, N) AT_DISPATCH_CASE(_28, N) AT_DISPATCH_CASE(_29, N) AT_DISPATCH_CASE(_30, N) AT_DISPATCH_CASE(_31, N) AT_DISPATCH_CASE(_32, N) AT_DISPATCH_CASE(_33, N)
|
| 173 |
+
#define AT_AP34(N, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16, _17, _18, _19, _20, _21, _22, _23, _24, _25, _26, _27, _28, _29, _30, _31, _32, _33, _34) AT_DISPATCH_CASE(_1, N) AT_DISPATCH_CASE(_2, N) AT_DISPATCH_CASE(_3, N) AT_DISPATCH_CASE(_4, N) AT_DISPATCH_CASE(_5, N) AT_DISPATCH_CASE(_6, N) AT_DISPATCH_CASE(_7, N) AT_DISPATCH_CASE(_8, N) AT_DISPATCH_CASE(_9, N) AT_DISPATCH_CASE(_10, N) AT_DISPATCH_CASE(_11, N) AT_DISPATCH_CASE(_12, N) AT_DISPATCH_CASE(_13, N) AT_DISPATCH_CASE(_14, N) AT_DISPATCH_CASE(_15, N) AT_DISPATCH_CASE(_16, N) AT_DISPATCH_CASE(_17, N) AT_DISPATCH_CASE(_18, N) AT_DISPATCH_CASE(_19, N) AT_DISPATCH_CASE(_20, N) AT_DISPATCH_CASE(_21, N) AT_DISPATCH_CASE(_22, N) AT_DISPATCH_CASE(_23, N) AT_DISPATCH_CASE(_24, N) AT_DISPATCH_CASE(_25, N) AT_DISPATCH_CASE(_26, N) AT_DISPATCH_CASE(_27, N) AT_DISPATCH_CASE(_28, N) AT_DISPATCH_CASE(_29, N) AT_DISPATCH_CASE(_30, N) AT_DISPATCH_CASE(_31, N) AT_DISPATCH_CASE(_32, N) AT_DISPATCH_CASE(_33, N) AT_DISPATCH_CASE(_34, N)
|
| 174 |
+
#define AT_AP35(N, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16, _17, _18, _19, _20, _21, _22, _23, _24, _25, _26, _27, _28, _29, _30, _31, _32, _33, _34, _35) AT_DISPATCH_CASE(_1, N) AT_DISPATCH_CASE(_2, N) AT_DISPATCH_CASE(_3, N) AT_DISPATCH_CASE(_4, N) AT_DISPATCH_CASE(_5, N) AT_DISPATCH_CASE(_6, N) AT_DISPATCH_CASE(_7, N) AT_DISPATCH_CASE(_8, N) AT_DISPATCH_CASE(_9, N) AT_DISPATCH_CASE(_10, N) AT_DISPATCH_CASE(_11, N) AT_DISPATCH_CASE(_12, N) AT_DISPATCH_CASE(_13, N) AT_DISPATCH_CASE(_14, N) AT_DISPATCH_CASE(_15, N) AT_DISPATCH_CASE(_16, N) AT_DISPATCH_CASE(_17, N) AT_DISPATCH_CASE(_18, N) AT_DISPATCH_CASE(_19, N) AT_DISPATCH_CASE(_20, N) AT_DISPATCH_CASE(_21, N) AT_DISPATCH_CASE(_22, N) AT_DISPATCH_CASE(_23, N) AT_DISPATCH_CASE(_24, N) AT_DISPATCH_CASE(_25, N) AT_DISPATCH_CASE(_26, N) AT_DISPATCH_CASE(_27, N) AT_DISPATCH_CASE(_28, N) AT_DISPATCH_CASE(_29, N) AT_DISPATCH_CASE(_30, N) AT_DISPATCH_CASE(_31, N) AT_DISPATCH_CASE(_32, N) AT_DISPATCH_CASE(_33, N) AT_DISPATCH_CASE(_34, N) AT_DISPATCH_CASE(_35, N)
|
| 175 |
+
#define AT_AP36(N, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16, _17, _18, _19, _20, _21, _22, _23, _24, _25, _26, _27, _28, _29, _30, _31, _32, _33, _34, _35, _36) AT_DISPATCH_CASE(_1, N) AT_DISPATCH_CASE(_2, N) AT_DISPATCH_CASE(_3, N) AT_DISPATCH_CASE(_4, N) AT_DISPATCH_CASE(_5, N) AT_DISPATCH_CASE(_6, N) AT_DISPATCH_CASE(_7, N) AT_DISPATCH_CASE(_8, N) AT_DISPATCH_CASE(_9, N) AT_DISPATCH_CASE(_10, N) AT_DISPATCH_CASE(_11, N) AT_DISPATCH_CASE(_12, N) AT_DISPATCH_CASE(_13, N) AT_DISPATCH_CASE(_14, N) AT_DISPATCH_CASE(_15, N) AT_DISPATCH_CASE(_16, N) AT_DISPATCH_CASE(_17, N) AT_DISPATCH_CASE(_18, N) AT_DISPATCH_CASE(_19, N) AT_DISPATCH_CASE(_20, N) AT_DISPATCH_CASE(_21, N) AT_DISPATCH_CASE(_22, N) AT_DISPATCH_CASE(_23, N) AT_DISPATCH_CASE(_24, N) AT_DISPATCH_CASE(_25, N) AT_DISPATCH_CASE(_26, N) AT_DISPATCH_CASE(_27, N) AT_DISPATCH_CASE(_28, N) AT_DISPATCH_CASE(_29, N) AT_DISPATCH_CASE(_30, N) AT_DISPATCH_CASE(_31, N) AT_DISPATCH_CASE(_32, N) AT_DISPATCH_CASE(_33, N) AT_DISPATCH_CASE(_34, N) AT_DISPATCH_CASE(_35, N) AT_DISPATCH_CASE(_36, N)
|
| 176 |
+
#define AT_AP37(N, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16, _17, _18, _19, _20, _21, _22, _23, _24, _25, _26, _27, _28, _29, _30, _31, _32, _33, _34, _35, _36, _37) AT_DISPATCH_CASE(_1, N) AT_DISPATCH_CASE(_2, N) AT_DISPATCH_CASE(_3, N) AT_DISPATCH_CASE(_4, N) AT_DISPATCH_CASE(_5, N) AT_DISPATCH_CASE(_6, N) AT_DISPATCH_CASE(_7, N) AT_DISPATCH_CASE(_8, N) AT_DISPATCH_CASE(_9, N) AT_DISPATCH_CASE(_10, N) AT_DISPATCH_CASE(_11, N) AT_DISPATCH_CASE(_12, N) AT_DISPATCH_CASE(_13, N) AT_DISPATCH_CASE(_14, N) AT_DISPATCH_CASE(_15, N) AT_DISPATCH_CASE(_16, N) AT_DISPATCH_CASE(_17, N) AT_DISPATCH_CASE(_18, N) AT_DISPATCH_CASE(_19, N) AT_DISPATCH_CASE(_20, N) AT_DISPATCH_CASE(_21, N) AT_DISPATCH_CASE(_22, N) AT_DISPATCH_CASE(_23, N) AT_DISPATCH_CASE(_24, N) AT_DISPATCH_CASE(_25, N) AT_DISPATCH_CASE(_26, N) AT_DISPATCH_CASE(_27, N) AT_DISPATCH_CASE(_28, N) AT_DISPATCH_CASE(_29, N) AT_DISPATCH_CASE(_30, N) AT_DISPATCH_CASE(_31, N) AT_DISPATCH_CASE(_32, N) AT_DISPATCH_CASE(_33, N) AT_DISPATCH_CASE(_34, N) AT_DISPATCH_CASE(_35, N) AT_DISPATCH_CASE(_36, N) AT_DISPATCH_CASE(_37, N)
|
| 177 |
+
#define AT_AP38(N, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16, _17, _18, _19, _20, _21, _22, _23, _24, _25, _26, _27, _28, _29, _30, _31, _32, _33, _34, _35, _36, _37, _38) AT_DISPATCH_CASE(_1, N) AT_DISPATCH_CASE(_2, N) AT_DISPATCH_CASE(_3, N) AT_DISPATCH_CASE(_4, N) AT_DISPATCH_CASE(_5, N) AT_DISPATCH_CASE(_6, N) AT_DISPATCH_CASE(_7, N) AT_DISPATCH_CASE(_8, N) AT_DISPATCH_CASE(_9, N) AT_DISPATCH_CASE(_10, N) AT_DISPATCH_CASE(_11, N) AT_DISPATCH_CASE(_12, N) AT_DISPATCH_CASE(_13, N) AT_DISPATCH_CASE(_14, N) AT_DISPATCH_CASE(_15, N) AT_DISPATCH_CASE(_16, N) AT_DISPATCH_CASE(_17, N) AT_DISPATCH_CASE(_18, N) AT_DISPATCH_CASE(_19, N) AT_DISPATCH_CASE(_20, N) AT_DISPATCH_CASE(_21, N) AT_DISPATCH_CASE(_22, N) AT_DISPATCH_CASE(_23, N) AT_DISPATCH_CASE(_24, N) AT_DISPATCH_CASE(_25, N) AT_DISPATCH_CASE(_26, N) AT_DISPATCH_CASE(_27, N) AT_DISPATCH_CASE(_28, N) AT_DISPATCH_CASE(_29, N) AT_DISPATCH_CASE(_30, N) AT_DISPATCH_CASE(_31, N) AT_DISPATCH_CASE(_32, N) AT_DISPATCH_CASE(_33, N) AT_DISPATCH_CASE(_34, N) AT_DISPATCH_CASE(_35, N) AT_DISPATCH_CASE(_36, N) AT_DISPATCH_CASE(_37, N) AT_DISPATCH_CASE(_38, N)
|
| 178 |
+
#define AT_AP39(N, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16, _17, _18, _19, _20, _21, _22, _23, _24, _25, _26, _27, _28, _29, _30, _31, _32, _33, _34, _35, _36, _37, _38, _39) AT_DISPATCH_CASE(_1, N) AT_DISPATCH_CASE(_2, N) AT_DISPATCH_CASE(_3, N) AT_DISPATCH_CASE(_4, N) AT_DISPATCH_CASE(_5, N) AT_DISPATCH_CASE(_6, N) AT_DISPATCH_CASE(_7, N) AT_DISPATCH_CASE(_8, N) AT_DISPATCH_CASE(_9, N) AT_DISPATCH_CASE(_10, N) AT_DISPATCH_CASE(_11, N) AT_DISPATCH_CASE(_12, N) AT_DISPATCH_CASE(_13, N) AT_DISPATCH_CASE(_14, N) AT_DISPATCH_CASE(_15, N) AT_DISPATCH_CASE(_16, N) AT_DISPATCH_CASE(_17, N) AT_DISPATCH_CASE(_18, N) AT_DISPATCH_CASE(_19, N) AT_DISPATCH_CASE(_20, N) AT_DISPATCH_CASE(_21, N) AT_DISPATCH_CASE(_22, N) AT_DISPATCH_CASE(_23, N) AT_DISPATCH_CASE(_24, N) AT_DISPATCH_CASE(_25, N) AT_DISPATCH_CASE(_26, N) AT_DISPATCH_CASE(_27, N) AT_DISPATCH_CASE(_28, N) AT_DISPATCH_CASE(_29, N) AT_DISPATCH_CASE(_30, N) AT_DISPATCH_CASE(_31, N) AT_DISPATCH_CASE(_32, N) AT_DISPATCH_CASE(_33, N) AT_DISPATCH_CASE(_34, N) AT_DISPATCH_CASE(_35, N) AT_DISPATCH_CASE(_36, N) AT_DISPATCH_CASE(_37, N) AT_DISPATCH_CASE(_38, N) AT_DISPATCH_CASE(_39, N)
|
| 179 |
+
#define AT_AP40(N, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16, _17, _18, _19, _20, _21, _22, _23, _24, _25, _26, _27, _28, _29, _30, _31, _32, _33, _34, _35, _36, _37, _38, _39, _40) AT_DISPATCH_CASE(_1, N) AT_DISPATCH_CASE(_2, N) AT_DISPATCH_CASE(_3, N) AT_DISPATCH_CASE(_4, N) AT_DISPATCH_CASE(_5, N) AT_DISPATCH_CASE(_6, N) AT_DISPATCH_CASE(_7, N) AT_DISPATCH_CASE(_8, N) AT_DISPATCH_CASE(_9, N) AT_DISPATCH_CASE(_10, N) AT_DISPATCH_CASE(_11, N) AT_DISPATCH_CASE(_12, N) AT_DISPATCH_CASE(_13, N) AT_DISPATCH_CASE(_14, N) AT_DISPATCH_CASE(_15, N) AT_DISPATCH_CASE(_16, N) AT_DISPATCH_CASE(_17, N) AT_DISPATCH_CASE(_18, N) AT_DISPATCH_CASE(_19, N) AT_DISPATCH_CASE(_20, N) AT_DISPATCH_CASE(_21, N) AT_DISPATCH_CASE(_22, N) AT_DISPATCH_CASE(_23, N) AT_DISPATCH_CASE(_24, N) AT_DISPATCH_CASE(_25, N) AT_DISPATCH_CASE(_26, N) AT_DISPATCH_CASE(_27, N) AT_DISPATCH_CASE(_28, N) AT_DISPATCH_CASE(_29, N) AT_DISPATCH_CASE(_30, N) AT_DISPATCH_CASE(_31, N) AT_DISPATCH_CASE(_32, N) AT_DISPATCH_CASE(_33, N) AT_DISPATCH_CASE(_34, N) AT_DISPATCH_CASE(_35, N) AT_DISPATCH_CASE(_36, N) AT_DISPATCH_CASE(_37, N) AT_DISPATCH_CASE(_38, N) AT_DISPATCH_CASE(_39, N) AT_DISPATCH_CASE(_40, N)
|
| 180 |
+
#define AT_AP41(N, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16, _17, _18, _19, _20, _21, _22, _23, _24, _25, _26, _27, _28, _29, _30, _31, _32, _33, _34, _35, _36, _37, _38, _39, _40, _41) AT_DISPATCH_CASE(_1, N) AT_DISPATCH_CASE(_2, N) AT_DISPATCH_CASE(_3, N) AT_DISPATCH_CASE(_4, N) AT_DISPATCH_CASE(_5, N) AT_DISPATCH_CASE(_6, N) AT_DISPATCH_CASE(_7, N) AT_DISPATCH_CASE(_8, N) AT_DISPATCH_CASE(_9, N) AT_DISPATCH_CASE(_10, N) AT_DISPATCH_CASE(_11, N) AT_DISPATCH_CASE(_12, N) AT_DISPATCH_CASE(_13, N) AT_DISPATCH_CASE(_14, N) AT_DISPATCH_CASE(_15, N) AT_DISPATCH_CASE(_16, N) AT_DISPATCH_CASE(_17, N) AT_DISPATCH_CASE(_18, N) AT_DISPATCH_CASE(_19, N) AT_DISPATCH_CASE(_20, N) AT_DISPATCH_CASE(_21, N) AT_DISPATCH_CASE(_22, N) AT_DISPATCH_CASE(_23, N) AT_DISPATCH_CASE(_24, N) AT_DISPATCH_CASE(_25, N) AT_DISPATCH_CASE(_26, N) AT_DISPATCH_CASE(_27, N) AT_DISPATCH_CASE(_28, N) AT_DISPATCH_CASE(_29, N) AT_DISPATCH_CASE(_30, N) AT_DISPATCH_CASE(_31, N) AT_DISPATCH_CASE(_32, N) AT_DISPATCH_CASE(_33, N) AT_DISPATCH_CASE(_34, N) AT_DISPATCH_CASE(_35, N) AT_DISPATCH_CASE(_36, N) AT_DISPATCH_CASE(_37, N) AT_DISPATCH_CASE(_38, N) AT_DISPATCH_CASE(_39, N) AT_DISPATCH_CASE(_40, N) AT_DISPATCH_CASE(_41, N)
|
| 181 |
+
#define AT_AP42(N, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16, _17, _18, _19, _20, _21, _22, _23, _24, _25, _26, _27, _28, _29, _30, _31, _32, _33, _34, _35, _36, _37, _38, _39, _40, _41, _42) AT_DISPATCH_CASE(_1, N) AT_DISPATCH_CASE(_2, N) AT_DISPATCH_CASE(_3, N) AT_DISPATCH_CASE(_4, N) AT_DISPATCH_CASE(_5, N) AT_DISPATCH_CASE(_6, N) AT_DISPATCH_CASE(_7, N) AT_DISPATCH_CASE(_8, N) AT_DISPATCH_CASE(_9, N) AT_DISPATCH_CASE(_10, N) AT_DISPATCH_CASE(_11, N) AT_DISPATCH_CASE(_12, N) AT_DISPATCH_CASE(_13, N) AT_DISPATCH_CASE(_14, N) AT_DISPATCH_CASE(_15, N) AT_DISPATCH_CASE(_16, N) AT_DISPATCH_CASE(_17, N) AT_DISPATCH_CASE(_18, N) AT_DISPATCH_CASE(_19, N) AT_DISPATCH_CASE(_20, N) AT_DISPATCH_CASE(_21, N) AT_DISPATCH_CASE(_22, N) AT_DISPATCH_CASE(_23, N) AT_DISPATCH_CASE(_24, N) AT_DISPATCH_CASE(_25, N) AT_DISPATCH_CASE(_26, N) AT_DISPATCH_CASE(_27, N) AT_DISPATCH_CASE(_28, N) AT_DISPATCH_CASE(_29, N) AT_DISPATCH_CASE(_30, N) AT_DISPATCH_CASE(_31, N) AT_DISPATCH_CASE(_32, N) AT_DISPATCH_CASE(_33, N) AT_DISPATCH_CASE(_34, N) AT_DISPATCH_CASE(_35, N) AT_DISPATCH_CASE(_36, N) AT_DISPATCH_CASE(_37, N) AT_DISPATCH_CASE(_38, N) AT_DISPATCH_CASE(_39, N) AT_DISPATCH_CASE(_40, N) AT_DISPATCH_CASE(_41, N) AT_DISPATCH_CASE(_42, N)
|
| 182 |
+
#define AT_AP43(N, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16, _17, _18, _19, _20, _21, _22, _23, _24, _25, _26, _27, _28, _29, _30, _31, _32, _33, _34, _35, _36, _37, _38, _39, _40, _41, _42, _43) AT_DISPATCH_CASE(_1, N) AT_DISPATCH_CASE(_2, N) AT_DISPATCH_CASE(_3, N) AT_DISPATCH_CASE(_4, N) AT_DISPATCH_CASE(_5, N) AT_DISPATCH_CASE(_6, N) AT_DISPATCH_CASE(_7, N) AT_DISPATCH_CASE(_8, N) AT_DISPATCH_CASE(_9, N) AT_DISPATCH_CASE(_10, N) AT_DISPATCH_CASE(_11, N) AT_DISPATCH_CASE(_12, N) AT_DISPATCH_CASE(_13, N) AT_DISPATCH_CASE(_14, N) AT_DISPATCH_CASE(_15, N) AT_DISPATCH_CASE(_16, N) AT_DISPATCH_CASE(_17, N) AT_DISPATCH_CASE(_18, N) AT_DISPATCH_CASE(_19, N) AT_DISPATCH_CASE(_20, N) AT_DISPATCH_CASE(_21, N) AT_DISPATCH_CASE(_22, N) AT_DISPATCH_CASE(_23, N) AT_DISPATCH_CASE(_24, N) AT_DISPATCH_CASE(_25, N) AT_DISPATCH_CASE(_26, N) AT_DISPATCH_CASE(_27, N) AT_DISPATCH_CASE(_28, N) AT_DISPATCH_CASE(_29, N) AT_DISPATCH_CASE(_30, N) AT_DISPATCH_CASE(_31, N) AT_DISPATCH_CASE(_32, N) AT_DISPATCH_CASE(_33, N) AT_DISPATCH_CASE(_34, N) AT_DISPATCH_CASE(_35, N) AT_DISPATCH_CASE(_36, N) AT_DISPATCH_CASE(_37, N) AT_DISPATCH_CASE(_38, N) AT_DISPATCH_CASE(_39, N) AT_DISPATCH_CASE(_40, N) AT_DISPATCH_CASE(_41, N) AT_DISPATCH_CASE(_42, N) AT_DISPATCH_CASE(_43, N)
|
| 183 |
+
#define AT_AP44(N, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16, _17, _18, _19, _20, _21, _22, _23, _24, _25, _26, _27, _28, _29, _30, _31, _32, _33, _34, _35, _36, _37, _38, _39, _40, _41, _42, _43, _44) AT_DISPATCH_CASE(_1, N) AT_DISPATCH_CASE(_2, N) AT_DISPATCH_CASE(_3, N) AT_DISPATCH_CASE(_4, N) AT_DISPATCH_CASE(_5, N) AT_DISPATCH_CASE(_6, N) AT_DISPATCH_CASE(_7, N) AT_DISPATCH_CASE(_8, N) AT_DISPATCH_CASE(_9, N) AT_DISPATCH_CASE(_10, N) AT_DISPATCH_CASE(_11, N) AT_DISPATCH_CASE(_12, N) AT_DISPATCH_CASE(_13, N) AT_DISPATCH_CASE(_14, N) AT_DISPATCH_CASE(_15, N) AT_DISPATCH_CASE(_16, N) AT_DISPATCH_CASE(_17, N) AT_DISPATCH_CASE(_18, N) AT_DISPATCH_CASE(_19, N) AT_DISPATCH_CASE(_20, N) AT_DISPATCH_CASE(_21, N) AT_DISPATCH_CASE(_22, N) AT_DISPATCH_CASE(_23, N) AT_DISPATCH_CASE(_24, N) AT_DISPATCH_CASE(_25, N) AT_DISPATCH_CASE(_26, N) AT_DISPATCH_CASE(_27, N) AT_DISPATCH_CASE(_28, N) AT_DISPATCH_CASE(_29, N) AT_DISPATCH_CASE(_30, N) AT_DISPATCH_CASE(_31, N) AT_DISPATCH_CASE(_32, N) AT_DISPATCH_CASE(_33, N) AT_DISPATCH_CASE(_34, N) AT_DISPATCH_CASE(_35, N) AT_DISPATCH_CASE(_36, N) AT_DISPATCH_CASE(_37, N) AT_DISPATCH_CASE(_38, N) AT_DISPATCH_CASE(_39, N) AT_DISPATCH_CASE(_40, N) AT_DISPATCH_CASE(_41, N) AT_DISPATCH_CASE(_42, N) AT_DISPATCH_CASE(_43, N) AT_DISPATCH_CASE(_44, N)
|
| 184 |
+
#define AT_AP45(N, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16, _17, _18, _19, _20, _21, _22, _23, _24, _25, _26, _27, _28, _29, _30, _31, _32, _33, _34, _35, _36, _37, _38, _39, _40, _41, _42, _43, _44, _45) AT_DISPATCH_CASE(_1, N) AT_DISPATCH_CASE(_2, N) AT_DISPATCH_CASE(_3, N) AT_DISPATCH_CASE(_4, N) AT_DISPATCH_CASE(_5, N) AT_DISPATCH_CASE(_6, N) AT_DISPATCH_CASE(_7, N) AT_DISPATCH_CASE(_8, N) AT_DISPATCH_CASE(_9, N) AT_DISPATCH_CASE(_10, N) AT_DISPATCH_CASE(_11, N) AT_DISPATCH_CASE(_12, N) AT_DISPATCH_CASE(_13, N) AT_DISPATCH_CASE(_14, N) AT_DISPATCH_CASE(_15, N) AT_DISPATCH_CASE(_16, N) AT_DISPATCH_CASE(_17, N) AT_DISPATCH_CASE(_18, N) AT_DISPATCH_CASE(_19, N) AT_DISPATCH_CASE(_20, N) AT_DISPATCH_CASE(_21, N) AT_DISPATCH_CASE(_22, N) AT_DISPATCH_CASE(_23, N) AT_DISPATCH_CASE(_24, N) AT_DISPATCH_CASE(_25, N) AT_DISPATCH_CASE(_26, N) AT_DISPATCH_CASE(_27, N) AT_DISPATCH_CASE(_28, N) AT_DISPATCH_CASE(_29, N) AT_DISPATCH_CASE(_30, N) AT_DISPATCH_CASE(_31, N) AT_DISPATCH_CASE(_32, N) AT_DISPATCH_CASE(_33, N) AT_DISPATCH_CASE(_34, N) AT_DISPATCH_CASE(_35, N) AT_DISPATCH_CASE(_36, N) AT_DISPATCH_CASE(_37, N) AT_DISPATCH_CASE(_38, N) AT_DISPATCH_CASE(_39, N) AT_DISPATCH_CASE(_40, N) AT_DISPATCH_CASE(_41, N) AT_DISPATCH_CASE(_42, N) AT_DISPATCH_CASE(_43, N) AT_DISPATCH_CASE(_44, N) AT_DISPATCH_CASE(_45, N)
|
| 185 |
+
// End generated code
|
| 186 |
+
// clang-format on
|
mplug_owl2/lib/python3.10/site-packages/torch/include/ATen/ExpandUtils.h
ADDED
|
@@ -0,0 +1,527 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#ifndef AT_PER_OPERATOR_HEADERS
|
| 4 |
+
#include <ATen/Functions.h>
|
| 5 |
+
#else
|
| 6 |
+
#include <ATen/ops/view.h>
|
| 7 |
+
#include <ATen/ops/view_copy.h>
|
| 8 |
+
#endif
|
| 9 |
+
|
| 10 |
+
#include <ATen/Tensor.h>
|
| 11 |
+
#include <ATen/core/DimVector.h>
|
| 12 |
+
#include <c10/util/Exception.h>
|
| 13 |
+
#include <c10/util/MaybeOwned.h>
|
| 14 |
+
#include <c10/util/irange.h>
|
| 15 |
+
|
| 16 |
+
#include <functional>
|
| 17 |
+
#include <tuple>
|
| 18 |
+
#include <utility>
|
| 19 |
+
|
| 20 |
+
namespace at {
|
| 21 |
+
|
| 22 |
+
TORCH_API std::vector<int64_t> infer_size(IntArrayRef a, IntArrayRef b);
|
| 23 |
+
TORCH_API std::vector<SymInt> infer_size_symint(
|
| 24 |
+
SymIntArrayRef a,
|
| 25 |
+
SymIntArrayRef b);
|
| 26 |
+
TORCH_API DimVector infer_size_dimvector(IntArrayRef a, IntArrayRef b);
|
| 27 |
+
TORCH_API SymDimVector
|
| 28 |
+
infer_size_symdimvector(SymIntArrayRef a, SymIntArrayRef b);
|
| 29 |
+
|
| 30 |
+
// Named type instead of a pair/tuple so that we can be sure to
|
| 31 |
+
// construct the vectors in place and get NRVO.
|
| 32 |
+
template <typename Container>
|
| 33 |
+
struct InferExpandGeometryResult {
|
| 34 |
+
Container sizes;
|
| 35 |
+
Container strides;
|
| 36 |
+
explicit InferExpandGeometryResult(size_t ndim)
|
| 37 |
+
: sizes(ndim), strides(ndim) {}
|
| 38 |
+
explicit InferExpandGeometryResult(IntArrayRef sizes_, size_t ndim)
|
| 39 |
+
: sizes(sizes_.begin(), sizes_.end()), strides(ndim) {}
|
| 40 |
+
};
|
| 41 |
+
|
| 42 |
+
TORCH_API std::tuple<std::vector<int64_t>, std::vector<int64_t>>
|
| 43 |
+
inferExpandGeometry(
|
| 44 |
+
IntArrayRef tensor_sizes,
|
| 45 |
+
IntArrayRef tensor_strides,
|
| 46 |
+
IntArrayRef sizes);
|
| 47 |
+
|
| 48 |
+
TORCH_API InferExpandGeometryResult<DimVector> inferExpandGeometry_dimvector(
|
| 49 |
+
IntArrayRef tensor_sizes,
|
| 50 |
+
IntArrayRef tensor_strides,
|
| 51 |
+
IntArrayRef sizes);
|
| 52 |
+
|
| 53 |
+
TORCH_API std::vector<int64_t> infer_dense_strides(
|
| 54 |
+
IntArrayRef tensor_sizes,
|
| 55 |
+
IntArrayRef tensor_strides);
|
| 56 |
+
|
| 57 |
+
// True if input shapes are expandable
|
| 58 |
+
// NOTE: infer_size did a similar check, please keep them sync if change is
|
| 59 |
+
// needed
|
| 60 |
+
inline bool are_expandable(IntArrayRef shape1, IntArrayRef shape2) {
|
| 61 |
+
size_t ndim1 = shape1.size();
|
| 62 |
+
size_t ndim2 = shape2.size();
|
| 63 |
+
size_t ndim = ndim1 < ndim2 ? ndim1 : ndim2;
|
| 64 |
+
|
| 65 |
+
for (int64_t i = static_cast<int64_t>(ndim) - 1; i >= 0; --i) {
|
| 66 |
+
if (shape1[--ndim1] == shape2[--ndim2] || shape1[ndim1] == 1 ||
|
| 67 |
+
shape2[ndim2] == 1) {
|
| 68 |
+
continue;
|
| 69 |
+
}
|
| 70 |
+
return false;
|
| 71 |
+
}
|
| 72 |
+
return true;
|
| 73 |
+
}
|
| 74 |
+
|
| 75 |
+
// avoid copy-construction of Tensor by using a reference_wrapper.
|
| 76 |
+
inline void check_defined(
|
| 77 |
+
std::initializer_list<std::reference_wrapper<const Tensor>> tensors,
|
| 78 |
+
const char* api_name) {
|
| 79 |
+
for (auto& t : tensors) {
|
| 80 |
+
if (!t.get().defined()) {
|
| 81 |
+
AT_ERROR(api_name, "(...) called with an undefined Tensor");
|
| 82 |
+
}
|
| 83 |
+
}
|
| 84 |
+
}
|
| 85 |
+
|
| 86 |
+
// NOTE [ ExpandUtils Borrowing ]
|
| 87 |
+
//
|
| 88 |
+
// Functions in ExpandUtils return `c10::MaybeOwned<Tensor>` because
|
| 89 |
+
// expansion may not actually be needed, in which case we can improve
|
| 90 |
+
// efficiency by returning
|
| 91 |
+
// `c10::MaybeOwned<Tensor>::borrowed(to_expand)`. However, this means
|
| 92 |
+
// that you need to be careful: the returned `c10::MaybeOwned<Tensor>`
|
| 93 |
+
// must not outlive the original `Tensor` object that `to_expand`
|
| 94 |
+
// referred to! The deleted rvalue reference overloads of these
|
| 95 |
+
// functions help with this by preventing trivial use of a temporary
|
| 96 |
+
// resulting from a function call, but it is still possible to make a
|
| 97 |
+
// mistake.
|
| 98 |
+
|
| 99 |
+
inline c10::MaybeOwned<Tensor> expand_inplace(
|
| 100 |
+
const Tensor& tensor,
|
| 101 |
+
const Tensor& to_expand) {
|
| 102 |
+
if (tensor.sym_sizes().equals(to_expand.sym_sizes())) {
|
| 103 |
+
return c10::MaybeOwned<Tensor>::borrowed(to_expand);
|
| 104 |
+
}
|
| 105 |
+
return c10::MaybeOwned<Tensor>::owned(
|
| 106 |
+
to_expand.expand_symint(tensor.sym_sizes()));
|
| 107 |
+
}
|
| 108 |
+
|
| 109 |
+
inline c10::MaybeOwned<Tensor> expand_inplace(
|
| 110 |
+
const Tensor& tensor,
|
| 111 |
+
Tensor&& to_expand) = delete;
|
| 112 |
+
|
| 113 |
+
inline c10::MaybeOwned<Tensor> expand_inplace(
|
| 114 |
+
const Tensor& tensor,
|
| 115 |
+
const Tensor& to_expand,
|
| 116 |
+
const char* api_name) {
|
| 117 |
+
check_defined({tensor, to_expand}, api_name);
|
| 118 |
+
return expand_inplace(tensor, to_expand);
|
| 119 |
+
}
|
| 120 |
+
|
| 121 |
+
inline c10::MaybeOwned<Tensor> expand_inplace(
|
| 122 |
+
const Tensor& tensor,
|
| 123 |
+
Tensor&& to_expand,
|
| 124 |
+
const char* api_name) = delete;
|
| 125 |
+
|
| 126 |
+
inline std::tuple<c10::MaybeOwned<Tensor>, c10::MaybeOwned<Tensor>>
|
| 127 |
+
expand_inplace(
|
| 128 |
+
const Tensor& tensor,
|
| 129 |
+
const Tensor& to_expand1,
|
| 130 |
+
const Tensor& to_expand2) {
|
| 131 |
+
if (tensor.sizes().equals(to_expand1.sizes()) &&
|
| 132 |
+
tensor.sizes().equals((to_expand2.sizes()))) {
|
| 133 |
+
return std::make_tuple(
|
| 134 |
+
c10::MaybeOwned<Tensor>::borrowed(to_expand1),
|
| 135 |
+
c10::MaybeOwned<Tensor>::borrowed(to_expand2));
|
| 136 |
+
}
|
| 137 |
+
|
| 138 |
+
return std::make_tuple(
|
| 139 |
+
c10::MaybeOwned<Tensor>::owned(to_expand1.expand(tensor.sizes())),
|
| 140 |
+
c10::MaybeOwned<Tensor>::owned(to_expand2.expand(tensor.sizes())));
|
| 141 |
+
}
|
| 142 |
+
|
| 143 |
+
inline std::tuple<c10::MaybeOwned<Tensor>, c10::MaybeOwned<Tensor>>
|
| 144 |
+
expand_inplace(
|
| 145 |
+
const Tensor& tensor,
|
| 146 |
+
Tensor&& to_expand1,
|
| 147 |
+
const Tensor& to_expand2) = delete;
|
| 148 |
+
inline std::tuple<c10::MaybeOwned<Tensor>, c10::MaybeOwned<Tensor>>
|
| 149 |
+
expand_inplace(
|
| 150 |
+
const Tensor& tensor,
|
| 151 |
+
const Tensor& to_expand1,
|
| 152 |
+
Tensor&& to_expand2) = delete;
|
| 153 |
+
inline std::tuple<c10::MaybeOwned<Tensor>, c10::MaybeOwned<Tensor>>
|
| 154 |
+
expand_inplace(const Tensor& tensor, Tensor&& to_expand1, Tensor&& to_expand2) =
|
| 155 |
+
delete;
|
| 156 |
+
|
| 157 |
+
inline std::tuple<c10::MaybeOwned<Tensor>, c10::MaybeOwned<Tensor>>
|
| 158 |
+
expand_inplace(
|
| 159 |
+
const Tensor& tensor,
|
| 160 |
+
const Tensor& to_expand1,
|
| 161 |
+
const Tensor& to_expand2,
|
| 162 |
+
const char* api_name) {
|
| 163 |
+
check_defined({tensor, to_expand1, to_expand2}, api_name);
|
| 164 |
+
return expand_inplace(tensor, to_expand1, to_expand2);
|
| 165 |
+
}
|
| 166 |
+
|
| 167 |
+
inline std::tuple<c10::MaybeOwned<Tensor>, c10::MaybeOwned<Tensor>>
|
| 168 |
+
expand_inplace(
|
| 169 |
+
const Tensor& tensor,
|
| 170 |
+
Tensor&& to_expand1,
|
| 171 |
+
const Tensor& to_expand2,
|
| 172 |
+
const char* api_name) = delete;
|
| 173 |
+
inline std::tuple<c10::MaybeOwned<Tensor>, c10::MaybeOwned<Tensor>>
|
| 174 |
+
expand_inplace(
|
| 175 |
+
const Tensor& tensor,
|
| 176 |
+
const Tensor& to_expand1,
|
| 177 |
+
Tensor&& to_expand2,
|
| 178 |
+
const char* api_name) = delete;
|
| 179 |
+
inline std::tuple<c10::MaybeOwned<Tensor>, c10::MaybeOwned<Tensor>>
|
| 180 |
+
expand_inplace(
|
| 181 |
+
const Tensor& tensor,
|
| 182 |
+
Tensor&& to_expand1,
|
| 183 |
+
Tensor&& to_expand2,
|
| 184 |
+
const char* api_name) = delete;
|
| 185 |
+
|
| 186 |
+
// See NOTE [ ExpandUtils Borrowing ] above for `MaybeOwned` explanation.
|
| 187 |
+
inline std::tuple<c10::MaybeOwned<Tensor>, c10::MaybeOwned<Tensor>>
|
| 188 |
+
expand_outplace(const Tensor& to_expand1, const Tensor& to_expand2) {
|
| 189 |
+
auto s1 = to_expand1.sym_sizes();
|
| 190 |
+
auto s2 = to_expand2.sym_sizes();
|
| 191 |
+
if (s1.equals(s2)) {
|
| 192 |
+
return std::make_tuple(
|
| 193 |
+
c10::MaybeOwned<Tensor>::borrowed(to_expand1),
|
| 194 |
+
c10::MaybeOwned<Tensor>::borrowed(to_expand2));
|
| 195 |
+
}
|
| 196 |
+
|
| 197 |
+
auto expanded_size = infer_size_symdimvector(s1, s2);
|
| 198 |
+
return std::make_tuple(
|
| 199 |
+
c10::MaybeOwned<Tensor>::owned(to_expand1.expand_symint(expanded_size)),
|
| 200 |
+
c10::MaybeOwned<Tensor>::owned(to_expand2.expand_symint(expanded_size)));
|
| 201 |
+
}
|
| 202 |
+
|
| 203 |
+
inline std::tuple<c10::MaybeOwned<Tensor>, c10::MaybeOwned<Tensor>>
|
| 204 |
+
expand_outplace(Tensor&& to_expand1, const Tensor& to_expand2) = delete;
|
| 205 |
+
inline std::tuple<c10::MaybeOwned<Tensor>, c10::MaybeOwned<Tensor>>
|
| 206 |
+
expand_outplace(const Tensor& to_expand1, Tensor&& to_expand2) = delete;
|
| 207 |
+
inline std::tuple<c10::MaybeOwned<Tensor>, c10::MaybeOwned<Tensor>>
|
| 208 |
+
expand_outplace(Tensor&& to_expand1, Tensor&& to_expand2) = delete;
|
| 209 |
+
|
| 210 |
+
inline std::tuple<c10::MaybeOwned<Tensor>, c10::MaybeOwned<Tensor>>
|
| 211 |
+
expand_outplace(
|
| 212 |
+
const Tensor& to_expand1,
|
| 213 |
+
const Tensor& to_expand2,
|
| 214 |
+
const char* api_name) {
|
| 215 |
+
check_defined({to_expand1, to_expand2}, api_name);
|
| 216 |
+
return expand_outplace(to_expand1, to_expand2);
|
| 217 |
+
}
|
| 218 |
+
|
| 219 |
+
inline std::tuple<c10::MaybeOwned<Tensor>, c10::MaybeOwned<Tensor>>
|
| 220 |
+
expand_outplace(
|
| 221 |
+
Tensor&& to_expand1,
|
| 222 |
+
const Tensor& to_expand2,
|
| 223 |
+
const char* api_name) = delete;
|
| 224 |
+
inline std::tuple<c10::MaybeOwned<Tensor>, c10::MaybeOwned<Tensor>>
|
| 225 |
+
expand_outplace(
|
| 226 |
+
const Tensor& to_expand1,
|
| 227 |
+
Tensor&& to_expand2,
|
| 228 |
+
const char* api_name) = delete;
|
| 229 |
+
inline std::tuple<c10::MaybeOwned<Tensor>, c10::MaybeOwned<Tensor>>
|
| 230 |
+
expand_outplace(
|
| 231 |
+
Tensor&& to_expand1,
|
| 232 |
+
Tensor&& to_expand2,
|
| 233 |
+
const char* api_name) = delete;
|
| 234 |
+
|
| 235 |
+
inline std::tuple<
|
| 236 |
+
c10::MaybeOwned<Tensor>,
|
| 237 |
+
c10::MaybeOwned<Tensor>,
|
| 238 |
+
c10::MaybeOwned<Tensor>>
|
| 239 |
+
expand_outplace(
|
| 240 |
+
const Tensor& to_expand1,
|
| 241 |
+
const Tensor& to_expand2,
|
| 242 |
+
const Tensor& to_expand3) {
|
| 243 |
+
if (to_expand1.sizes().equals(to_expand2.sizes()) &&
|
| 244 |
+
to_expand1.sizes().equals(to_expand3.sizes())) {
|
| 245 |
+
return std::make_tuple(
|
| 246 |
+
c10::MaybeOwned<Tensor>::borrowed(to_expand1),
|
| 247 |
+
c10::MaybeOwned<Tensor>::borrowed(to_expand2),
|
| 248 |
+
c10::MaybeOwned<Tensor>::borrowed(to_expand3));
|
| 249 |
+
}
|
| 250 |
+
|
| 251 |
+
auto expanded_size12 =
|
| 252 |
+
infer_size_dimvector(to_expand1.sizes(), to_expand2.sizes());
|
| 253 |
+
auto expanded_size =
|
| 254 |
+
infer_size_dimvector(expanded_size12, to_expand3.sizes());
|
| 255 |
+
return std::make_tuple(
|
| 256 |
+
c10::MaybeOwned<Tensor>::owned(to_expand1.expand(expanded_size)),
|
| 257 |
+
c10::MaybeOwned<Tensor>::owned(to_expand2.expand(expanded_size)),
|
| 258 |
+
c10::MaybeOwned<Tensor>::owned(to_expand3.expand(expanded_size)));
|
| 259 |
+
}
|
| 260 |
+
|
| 261 |
+
inline std::tuple<
|
| 262 |
+
c10::MaybeOwned<Tensor>,
|
| 263 |
+
c10::MaybeOwned<Tensor>,
|
| 264 |
+
c10::MaybeOwned<Tensor>>
|
| 265 |
+
expand_outplace(
|
| 266 |
+
Tensor&& to_expand1,
|
| 267 |
+
const Tensor& to_expand2,
|
| 268 |
+
const Tensor& to_expand3) = delete;
|
| 269 |
+
inline std::tuple<
|
| 270 |
+
c10::MaybeOwned<Tensor>,
|
| 271 |
+
c10::MaybeOwned<Tensor>,
|
| 272 |
+
c10::MaybeOwned<Tensor>>
|
| 273 |
+
expand_outplace(
|
| 274 |
+
const Tensor& to_expand1,
|
| 275 |
+
Tensor&& to_expand2,
|
| 276 |
+
const Tensor& to_expand3) = delete;
|
| 277 |
+
inline std::tuple<
|
| 278 |
+
c10::MaybeOwned<Tensor>,
|
| 279 |
+
c10::MaybeOwned<Tensor>,
|
| 280 |
+
c10::MaybeOwned<Tensor>>
|
| 281 |
+
expand_outplace(
|
| 282 |
+
Tensor&& to_expand1,
|
| 283 |
+
Tensor&& to_expand2,
|
| 284 |
+
const Tensor& to_expand3) = delete;
|
| 285 |
+
inline std::tuple<
|
| 286 |
+
c10::MaybeOwned<Tensor>,
|
| 287 |
+
c10::MaybeOwned<Tensor>,
|
| 288 |
+
c10::MaybeOwned<Tensor>>
|
| 289 |
+
expand_outplace(
|
| 290 |
+
const Tensor& to_expand1,
|
| 291 |
+
const Tensor& to_expand2,
|
| 292 |
+
Tensor&& to_expand3) = delete;
|
| 293 |
+
inline std::tuple<
|
| 294 |
+
c10::MaybeOwned<Tensor>,
|
| 295 |
+
c10::MaybeOwned<Tensor>,
|
| 296 |
+
c10::MaybeOwned<Tensor>>
|
| 297 |
+
expand_outplace(
|
| 298 |
+
Tensor&& to_expand1,
|
| 299 |
+
const Tensor& to_expand2,
|
| 300 |
+
Tensor&& to_expand3) = delete;
|
| 301 |
+
inline std::tuple<
|
| 302 |
+
c10::MaybeOwned<Tensor>,
|
| 303 |
+
c10::MaybeOwned<Tensor>,
|
| 304 |
+
c10::MaybeOwned<Tensor>>
|
| 305 |
+
expand_outplace(
|
| 306 |
+
const Tensor& to_expand1,
|
| 307 |
+
Tensor&& to_expand2,
|
| 308 |
+
Tensor&& to_expand3) = delete;
|
| 309 |
+
inline std::tuple<
|
| 310 |
+
c10::MaybeOwned<Tensor>,
|
| 311 |
+
c10::MaybeOwned<Tensor>,
|
| 312 |
+
c10::MaybeOwned<Tensor>>
|
| 313 |
+
expand_outplace(Tensor&& to_expand1, Tensor&& to_expand2, Tensor&& to_expand3) =
|
| 314 |
+
delete;
|
| 315 |
+
|
| 316 |
+
inline std::tuple<
|
| 317 |
+
c10::MaybeOwned<Tensor>,
|
| 318 |
+
c10::MaybeOwned<Tensor>,
|
| 319 |
+
c10::MaybeOwned<Tensor>>
|
| 320 |
+
expand_outplace(
|
| 321 |
+
const Tensor& to_expand1,
|
| 322 |
+
const Tensor& to_expand2,
|
| 323 |
+
const Tensor& to_expand3,
|
| 324 |
+
const char* api_name) {
|
| 325 |
+
check_defined({to_expand1, to_expand2, to_expand3}, api_name);
|
| 326 |
+
return expand_outplace(to_expand1, to_expand2, to_expand3);
|
| 327 |
+
}
|
| 328 |
+
|
| 329 |
+
inline std::tuple<
|
| 330 |
+
c10::MaybeOwned<Tensor>,
|
| 331 |
+
c10::MaybeOwned<Tensor>,
|
| 332 |
+
c10::MaybeOwned<Tensor>>
|
| 333 |
+
expand_outplace(
|
| 334 |
+
Tensor&& to_expand1,
|
| 335 |
+
const Tensor& to_expand2,
|
| 336 |
+
const Tensor& to_expand3,
|
| 337 |
+
const char* api_name) = delete;
|
| 338 |
+
inline std::tuple<
|
| 339 |
+
c10::MaybeOwned<Tensor>,
|
| 340 |
+
c10::MaybeOwned<Tensor>,
|
| 341 |
+
c10::MaybeOwned<Tensor>>
|
| 342 |
+
expand_outplace(
|
| 343 |
+
const Tensor& to_expand1,
|
| 344 |
+
Tensor&& to_expand2,
|
| 345 |
+
const Tensor& to_expand3,
|
| 346 |
+
const char* api_name) = delete;
|
| 347 |
+
inline std::tuple<
|
| 348 |
+
c10::MaybeOwned<Tensor>,
|
| 349 |
+
c10::MaybeOwned<Tensor>,
|
| 350 |
+
c10::MaybeOwned<Tensor>>
|
| 351 |
+
expand_outplace(
|
| 352 |
+
Tensor&& to_expand1,
|
| 353 |
+
Tensor&& to_expand2,
|
| 354 |
+
const Tensor& to_expand3,
|
| 355 |
+
const char* api_name) = delete;
|
| 356 |
+
inline std::tuple<
|
| 357 |
+
c10::MaybeOwned<Tensor>,
|
| 358 |
+
c10::MaybeOwned<Tensor>,
|
| 359 |
+
c10::MaybeOwned<Tensor>>
|
| 360 |
+
expand_outplace(
|
| 361 |
+
const Tensor& to_expand1,
|
| 362 |
+
const Tensor& to_expand2,
|
| 363 |
+
Tensor&& to_expand3,
|
| 364 |
+
const char* api_name) = delete;
|
| 365 |
+
inline std::tuple<
|
| 366 |
+
c10::MaybeOwned<Tensor>,
|
| 367 |
+
c10::MaybeOwned<Tensor>,
|
| 368 |
+
c10::MaybeOwned<Tensor>>
|
| 369 |
+
expand_outplace(
|
| 370 |
+
Tensor&& to_expand1,
|
| 371 |
+
const Tensor& to_expand2,
|
| 372 |
+
Tensor&& to_expand3,
|
| 373 |
+
const char* api_name) = delete;
|
| 374 |
+
inline std::tuple<
|
| 375 |
+
c10::MaybeOwned<Tensor>,
|
| 376 |
+
c10::MaybeOwned<Tensor>,
|
| 377 |
+
c10::MaybeOwned<Tensor>>
|
| 378 |
+
expand_outplace(
|
| 379 |
+
const Tensor& to_expand1,
|
| 380 |
+
Tensor&& to_expand2,
|
| 381 |
+
Tensor&& to_expand3,
|
| 382 |
+
const char* api_name) = delete;
|
| 383 |
+
inline std::tuple<
|
| 384 |
+
c10::MaybeOwned<Tensor>,
|
| 385 |
+
c10::MaybeOwned<Tensor>,
|
| 386 |
+
c10::MaybeOwned<Tensor>>
|
| 387 |
+
expand_outplace(
|
| 388 |
+
Tensor&& to_expand1,
|
| 389 |
+
Tensor&& to_expand2,
|
| 390 |
+
Tensor&& to_expand3,
|
| 391 |
+
const char* api_name) = delete;
|
| 392 |
+
|
| 393 |
+
inline c10::MaybeOwned<Tensor> expand_size(
|
| 394 |
+
const Tensor& to_expand,
|
| 395 |
+
IntArrayRef sizes) {
|
| 396 |
+
if (to_expand.sizes().equals(sizes)) {
|
| 397 |
+
return c10::MaybeOwned<Tensor>::borrowed(to_expand);
|
| 398 |
+
}
|
| 399 |
+
|
| 400 |
+
return c10::MaybeOwned<Tensor>::owned(to_expand.expand(sizes));
|
| 401 |
+
}
|
| 402 |
+
|
| 403 |
+
inline c10::MaybeOwned<Tensor> expand_size(
|
| 404 |
+
Tensor&& to_expand,
|
| 405 |
+
IntArrayRef sizes) = delete;
|
| 406 |
+
|
| 407 |
+
inline c10::MaybeOwned<Tensor> expand_size(
|
| 408 |
+
const Tensor& to_expand,
|
| 409 |
+
IntArrayRef sizes,
|
| 410 |
+
const char* api_name) {
|
| 411 |
+
check_defined({to_expand}, api_name);
|
| 412 |
+
return expand_size(to_expand, sizes);
|
| 413 |
+
}
|
| 414 |
+
|
| 415 |
+
inline c10::MaybeOwned<Tensor> expand_size(
|
| 416 |
+
Tensor&& to_expand,
|
| 417 |
+
IntArrayRef sizes,
|
| 418 |
+
const char* api_name) = delete;
|
| 419 |
+
|
| 420 |
+
inline std::vector<Tensor> expand_outplace(TensorList to_expand) {
|
| 421 |
+
// expands a list of Tensors; ignores undefined (null) tensors
|
| 422 |
+
bool first = true;
|
| 423 |
+
DimVector sizes;
|
| 424 |
+
for (const auto i : c10::irange(to_expand.size())) {
|
| 425 |
+
if (!to_expand[i].defined()) {
|
| 426 |
+
continue;
|
| 427 |
+
} else if (first) {
|
| 428 |
+
sizes = to_expand[i].sizes();
|
| 429 |
+
first = false;
|
| 430 |
+
} else {
|
| 431 |
+
sizes = infer_size_dimvector(sizes, to_expand[i].sizes());
|
| 432 |
+
}
|
| 433 |
+
}
|
| 434 |
+
|
| 435 |
+
std::vector<Tensor> result(to_expand.size());
|
| 436 |
+
for (const auto i : c10::irange(to_expand.size())) {
|
| 437 |
+
if (!to_expand[i].defined()) {
|
| 438 |
+
continue;
|
| 439 |
+
} else if (to_expand[i].sizes().equals(sizes)) {
|
| 440 |
+
result[i] = to_expand[i];
|
| 441 |
+
} else {
|
| 442 |
+
result[i] = to_expand[i].expand(sizes);
|
| 443 |
+
}
|
| 444 |
+
}
|
| 445 |
+
return result;
|
| 446 |
+
}
|
| 447 |
+
|
| 448 |
+
template <typename T>
|
| 449 |
+
inline Tensor _sum_to(
|
| 450 |
+
Tensor tensor,
|
| 451 |
+
const c10::ArrayRef<T> shape,
|
| 452 |
+
bool always_return_non_view = false) {
|
| 453 |
+
if (shape.size() == 0) {
|
| 454 |
+
return tensor.sum();
|
| 455 |
+
}
|
| 456 |
+
|
| 457 |
+
auto sizes = at::symint::sizes<T>(tensor);
|
| 458 |
+
c10::SmallVector<int64_t, 8> reduce_dims;
|
| 459 |
+
const int64_t leading_dims = sizes.size() - shape.size();
|
| 460 |
+
for (const auto i : c10::irange(leading_dims)) {
|
| 461 |
+
reduce_dims.push_back(i);
|
| 462 |
+
}
|
| 463 |
+
for (int64_t i = leading_dims; i < static_cast<int64_t>(sizes.size()); ++i) {
|
| 464 |
+
if (TORCH_GUARD_SIZE_OBLIVIOUS(sym_eq(shape[i - leading_dims], 1)) &&
|
| 465 |
+
TORCH_GUARD_SIZE_OBLIVIOUS(sym_ne(sizes[i], 1))) {
|
| 466 |
+
reduce_dims.push_back(i);
|
| 467 |
+
}
|
| 468 |
+
}
|
| 469 |
+
|
| 470 |
+
if (!reduce_dims.empty()) {
|
| 471 |
+
tensor = tensor.sum(reduce_dims, /*keepdim=*/true);
|
| 472 |
+
}
|
| 473 |
+
|
| 474 |
+
if (always_return_non_view) {
|
| 475 |
+
// This is only actually used by the functionalization pass.
|
| 476 |
+
// We want to be able to guarantee that this function doesn't return a view
|
| 477 |
+
// of the input.
|
| 478 |
+
return leading_dims > 0 ? at::symint::view_copy<T>(tensor, shape)
|
| 479 |
+
: tensor.clone();
|
| 480 |
+
} else {
|
| 481 |
+
return leading_dims > 0 ? at::symint::view<T>(tensor, shape) : tensor;
|
| 482 |
+
}
|
| 483 |
+
}
|
| 484 |
+
|
| 485 |
+
inline Tensor sum_to(
|
| 486 |
+
Tensor tensor,
|
| 487 |
+
const c10::SymIntArrayRef shape,
|
| 488 |
+
bool always_return_non_view = false) {
|
| 489 |
+
return _sum_to(std::move(tensor), shape, always_return_non_view);
|
| 490 |
+
}
|
| 491 |
+
|
| 492 |
+
// Sums `tensor` repeatedly to produce a tensor of shape `shape`.
|
| 493 |
+
// Precondition: is_expandable_to(shape, tensor.sizes()) must be true
|
| 494 |
+
inline Tensor sum_to(
|
| 495 |
+
Tensor tensor,
|
| 496 |
+
const IntArrayRef shape,
|
| 497 |
+
bool always_return_non_view = false) {
|
| 498 |
+
return _sum_to(std::move(tensor), shape, always_return_non_view);
|
| 499 |
+
}
|
| 500 |
+
|
| 501 |
+
inline bool is_expandable_to(
|
| 502 |
+
SymIntArrayRef shape,
|
| 503 |
+
c10::SymIntArrayRef desired) {
|
| 504 |
+
size_t ndim = shape.size();
|
| 505 |
+
size_t target_dim = desired.size();
|
| 506 |
+
if (ndim > target_dim) {
|
| 507 |
+
return false;
|
| 508 |
+
}
|
| 509 |
+
for (const auto i : c10::irange(ndim)) {
|
| 510 |
+
const auto& size = shape[ndim - i - 1];
|
| 511 |
+
const auto& target = desired[target_dim - i - 1];
|
| 512 |
+
if (size != target && size != 1) {
|
| 513 |
+
return false;
|
| 514 |
+
}
|
| 515 |
+
}
|
| 516 |
+
return true;
|
| 517 |
+
}
|
| 518 |
+
|
| 519 |
+
inline bool is_expandable_to(IntArrayRef shape, IntArrayRef desired) {
|
| 520 |
+
auto sym_shape = c10::SymIntArrayRef(
|
| 521 |
+
reinterpret_cast<const c10::SymInt*>(shape.data()), shape.size());
|
| 522 |
+
auto sym_desired = c10::SymIntArrayRef(
|
| 523 |
+
reinterpret_cast<const c10::SymInt*>(desired.data()), desired.size());
|
| 524 |
+
return is_expandable_to(sym_shape, sym_desired);
|
| 525 |
+
}
|
| 526 |
+
|
| 527 |
+
} // namespace at
|
mplug_owl2/lib/python3.10/site-packages/torch/include/ATen/Formatting.h
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
#include <ATen/core/Formatting.h>
|
mplug_owl2/lib/python3.10/site-packages/torch/include/ATen/InitialTensorOptions.h
ADDED
|
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <c10/core/TensorOptions.h>
|
| 4 |
+
|
| 5 |
+
namespace at {
|
| 6 |
+
|
| 7 |
+
// Represents the initial TensorOptions, before the "defaults" are ever changed.
|
| 8 |
+
// This is designed to be used in library code, where the explicit devices,
|
| 9 |
+
// dtypes, etc. are known. NOTE: this is not a stable API.
|
| 10 |
+
inline TensorOptions initialTensorOptions() {
|
| 11 |
+
return TensorOptions(kCPU).dtype(kFloat).layout(kStrided).requires_grad(
|
| 12 |
+
false);
|
| 13 |
+
}
|
| 14 |
+
|
| 15 |
+
} // namespace at
|
mplug_owl2/lib/python3.10/site-packages/torch/include/ATen/LegacyBatchedFallback.h
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
#include <ATen/ATen.h>
|
| 3 |
+
#include <ATen/core/op_registration/op_registration.h>
|
| 4 |
+
#include <torch/library.h>
|
| 5 |
+
|
| 6 |
+
namespace at {
|
| 7 |
+
|
| 8 |
+
// If an operator doesn't have a batching rule implemented then we fallback
|
| 9 |
+
// to this implementation. The fallback only works on out-of-place operators
|
| 10 |
+
// that return only tensors with new memory. (e.g., no in-place operators, no
|
| 11 |
+
// view operations).
|
| 12 |
+
//
|
| 13 |
+
// The fallback effectively takes all of the BatchedTensors in `stack`, slices
|
| 14 |
+
// them, and runs `op` on all of the corresponding slices to produce slices
|
| 15 |
+
// of the outputs. The output slices then get `torch.stack`ed to create the
|
| 16 |
+
// final returns.
|
| 17 |
+
//
|
| 18 |
+
// The performance of the fallback is not very good because it introduces an
|
| 19 |
+
// extra copy from stacking the sliced outputs. Because of this, we prefer to
|
| 20 |
+
// write batching rules for operators whenever possible.
|
| 21 |
+
void batchedTensorForLoopFallback(
|
| 22 |
+
const c10::OperatorHandle& op,
|
| 23 |
+
torch::jit::Stack* stack);
|
| 24 |
+
|
| 25 |
+
} // namespace at
|
mplug_owl2/lib/python3.10/site-packages/torch/include/ATen/LegacyVmapTransforms.h
ADDED
|
@@ -0,0 +1,183 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <ATen/LegacyBatchedTensorImpl.h>
|
| 4 |
+
#include <ATen/core/IListRef.h>
|
| 5 |
+
|
| 6 |
+
namespace at {
|
| 7 |
+
|
| 8 |
+
// This file contains abstractions used for transforming *logical* vmap
|
| 9 |
+
// arguments into *physical* arguments. (Keep reading for definitions of these
|
| 10 |
+
// terms).
|
| 11 |
+
|
| 12 |
+
// NOTE: [Logical vs physical args]
|
| 13 |
+
// Consider the following vmap.
|
| 14 |
+
// vmap(vmap(func, in_dims=(2,)), in_dims=(0,))(torch.ones(2, 3, 4))
|
| 15 |
+
// This would produce a BatchedTensor wrapping a Tensor of size [2, 3, 4],
|
| 16 |
+
// with batch dims 0 and 2:
|
| 17 |
+
// BatchedTensor(ones(2, 3, 4), bdims=[(lvl=1,dim=0),(lvl=2,dim=2)])
|
| 18 |
+
//
|
| 19 |
+
// We say the *logical* view of the tensor has size [3] -- tensors inside
|
| 20 |
+
// `func` appear to have size [3].
|
| 21 |
+
// However, the *physical* underlying tensor (the one passed to vmap) has size
|
| 22 |
+
// [2, 3, 4].
|
| 23 |
+
//
|
| 24 |
+
// This notion of logical vs physical also extends to non-tensor arguments.
|
| 25 |
+
// Consider the previous tensor; let's assume the user called
|
| 26 |
+
// `torch.sum(tensor, dim=0)` inside of `func`. Then the logical
|
| 27 |
+
// dimension they are reducing over is dim 0 but the physical dim is dim 1
|
| 28 |
+
// (the first non-batch dimension)
|
| 29 |
+
|
| 30 |
+
// Forward declared; see NOTE: [What is a VmapPhysicalView?]
|
| 31 |
+
struct VmapPhysicalView;
|
| 32 |
+
|
| 33 |
+
// Most PyTorch operators take 4 or fewer inputs.
|
| 34 |
+
constexpr int64_t kVmapTransformStaticInputSize = 4;
|
| 35 |
+
using VmapPhysicalViewVec =
|
| 36 |
+
SmallVector<VmapPhysicalView, kVmapTransformStaticInputSize>;
|
| 37 |
+
|
| 38 |
+
// Pytorch generally advertises good performance for <= 5 dims.
|
| 39 |
+
// (see ATen/core/DimVector.h). We add a few extra dims (~3) for vmap
|
| 40 |
+
// dimensions to get 8. Adjust this number as necessary
|
| 41 |
+
constexpr int64_t kVmapStaticDimVecSize = 8;
|
| 42 |
+
using VmapDimVector = SmallVector<int64_t, kVmapStaticDimVecSize>;
|
| 43 |
+
using VmapSymDimVector = SmallVector<c10::SymInt, kVmapStaticDimVecSize>;
|
| 44 |
+
|
| 45 |
+
// NOTE: [What is an VmapTransform?]
|
| 46 |
+
// An *VmapTransform* converts logical views of tensors to physical views.
|
| 47 |
+
//
|
| 48 |
+
// Batching rules use VmapTransforms to convert logical arguments to
|
| 49 |
+
// physical arguments, then call one or more at:: operator that handles the
|
| 50 |
+
// physical arguments, and then converts the physical result back to a logical
|
| 51 |
+
// argument.
|
| 52 |
+
|
| 53 |
+
// VmapTransform for operators that take tensors with multiple batch dims.
|
| 54 |
+
// Given one or more logical views on Tensors, `logicalToPhysical`
|
| 55 |
+
// permutes all of the batch dims to the front of the tensor, aligns
|
| 56 |
+
// and expands the batch dims to match each other (according to their `level`),
|
| 57 |
+
// and returns a VmapPhysicalView on the tensor(s).
|
| 58 |
+
struct TORCH_API MultiBatchVmapTransform {
|
| 59 |
+
static VmapPhysicalView logicalToPhysical(const Tensor& logical_tensor);
|
| 60 |
+
static VmapPhysicalViewVec logicalToPhysical(ITensorListRef logical_tensors);
|
| 61 |
+
};
|
| 62 |
+
|
| 63 |
+
// VmapTransform for operators that broadcast all inputs.
|
| 64 |
+
// Given some logical views on Tensors, `logicalToPhysical`:
|
| 65 |
+
// - permutes all of the batch dims to the front of the tensors
|
| 66 |
+
// - aligns all the batch dims to the collective levels of all of the tensors.
|
| 67 |
+
// If a tensor does not have a batch dim for a vmap level, then it receives
|
| 68 |
+
// a size-one dimension for said level.
|
| 69 |
+
// - aligns the non-batch dims to have the same dimensionality, adding extra
|
| 70 |
+
// size-1 dimensions in between the batch dimensions and the non-batch
|
| 71 |
+
// dimensions so that the batch dimensions are lined up from the right.
|
| 72 |
+
//
|
| 73 |
+
// For example: given inputs of size (B, 2) and (B, 3, 2) where B is the batch
|
| 74 |
+
// dimension, BroadcastingVmapTransform returns VmapPhysicalViews that wrap
|
| 75 |
+
// tensors of size (B, 1, 2) and (B, 3, 2).
|
| 76 |
+
//
|
| 77 |
+
// Given inputs of size (B, 2) and (2,), BroadcastingVmapTransform returns
|
| 78 |
+
// VmapPhysicalViews wrapping tensors of size (B, 2) and (1, 2). We don't
|
| 79 |
+
// actually *need* to return a tensor of size (1, 2) for the second tensor
|
| 80 |
+
// because the broadcasting operation takes care of that for us, but we do
|
| 81 |
+
// it anyways to keep things simple.
|
| 82 |
+
struct TORCH_API BroadcastingVmapTransform {
|
| 83 |
+
static VmapPhysicalViewVec logicalToPhysical(TensorList logical_tensors);
|
| 84 |
+
};
|
| 85 |
+
|
| 86 |
+
// Forward declared, if you're reading this file head to toe, don't worry about
|
| 87 |
+
// it yet.
|
| 88 |
+
struct VmapPhysicalToLogicalMap;
|
| 89 |
+
|
| 90 |
+
// NOTE: [What is a VmapPhysicalView?]
|
| 91 |
+
// VmapPhysicalView represents a physical view on a Tensor.
|
| 92 |
+
//
|
| 93 |
+
// One can use it to further convert logical dimension indices, logical shapes,
|
| 94 |
+
// and more to their physical variants, or convert a new (physical) tensor into
|
| 95 |
+
// a logical BatchedTensor. (TODO(rzou): some of these are not yet implemented).
|
| 96 |
+
//
|
| 97 |
+
// VmapPhysicalView stores a physical tensor with all of its batch dimensions at
|
| 98 |
+
// the front and some levels that correspond to said batch dimensions.
|
| 99 |
+
//
|
| 100 |
+
// The levels bitset specifies which vmap levels correspond to the batch
|
| 101 |
+
// dimensions at the front of the tensor. In particular, the number of set bits
|
| 102 |
+
// corresponds to the number of batch dimensions on `tensor` and the rightmost
|
| 103 |
+
// bit of `levels` specifies the maximum number of nested vmaps we are in at
|
| 104 |
+
// this point in time.
|
| 105 |
+
// For example, given:
|
| 106 |
+
// physical_view = VmapPhysicalView(tensor=ones(2, 3, 4, 5, 6), levels={1, 3})
|
| 107 |
+
//
|
| 108 |
+
// Rightmost bit of `levels` is 3 indicating the number of nested vmaps less
|
| 109 |
+
// than or equal to 3.
|
| 110 |
+
// bitset: 010100
|
| 111 |
+
// ^
|
| 112 |
+
// |
|
| 113 |
+
// levels: 012345
|
| 114 |
+
struct TORCH_API VmapPhysicalView {
|
| 115 |
+
VmapPhysicalView(Tensor&& tensor, std::bitset<kVmapNumLevels> levels)
|
| 116 |
+
: levels_(levels), tensor_(std::move(tensor)) {
|
| 117 |
+
TORCH_INTERNAL_ASSERT(!isBatchedTensor(tensor_));
|
| 118 |
+
}
|
| 119 |
+
|
| 120 |
+
Tensor& tensor() {
|
| 121 |
+
return tensor_;
|
| 122 |
+
}
|
| 123 |
+
const Tensor& tensor() const {
|
| 124 |
+
return tensor_;
|
| 125 |
+
}
|
| 126 |
+
|
| 127 |
+
// Maps logical dim indices to physical dim indices. Also does dim wrapping.
|
| 128 |
+
//
|
| 129 |
+
// For example, given:
|
| 130 |
+
// physical_view = VmapPhysicalView(tensor=ones(2, 3, 4, 5), levels={1, 3})
|
| 131 |
+
//
|
| 132 |
+
// Then physical_view.getPhysicalDims({0, 1}) returns {2, 3}.
|
| 133 |
+
// This is because the size of levels tell us that the first two dimensions
|
| 134 |
+
// of `tensor_` are batch dimensions, so a logical dim of `n` is actually
|
| 135 |
+
// a physical dim of `n + 2`.
|
| 136 |
+
VmapDimVector getPhysicalDims(OptionalIntArrayRef logical_dims) const;
|
| 137 |
+
int64_t getPhysicalDim(int64_t logical_dim) const;
|
| 138 |
+
|
| 139 |
+
// Returns a VmapPhysicalToLogicalMap object. This can be used for
|
| 140 |
+
// mapping a physical tensor to a new logical tensor (BatchedTensor)
|
| 141 |
+
VmapPhysicalToLogicalMap getPhysicalToLogicalMap() const;
|
| 142 |
+
|
| 143 |
+
// Maps a logical shape to a physical shape by pre-pending the batch
|
| 144 |
+
// sizes to the logical shape.
|
| 145 |
+
VmapDimVector getPhysicalShape(IntArrayRef logical_shape) const;
|
| 146 |
+
|
| 147 |
+
int64_t numBatchDims() const;
|
| 148 |
+
|
| 149 |
+
private:
|
| 150 |
+
int64_t numLogicalDims() const;
|
| 151 |
+
|
| 152 |
+
std::bitset<kVmapNumLevels> levels_;
|
| 153 |
+
Tensor tensor_;
|
| 154 |
+
};
|
| 155 |
+
|
| 156 |
+
// Convenience struct used for mapping a physical tensor (a non-BatchedTensor)
|
| 157 |
+
// to a logical one (BatchedTensor). It holds some levels that are used to do
|
| 158 |
+
// the mapping and assumes that the batch dimensions in the physical tensor all
|
| 159 |
+
// occur at the front of the tensor.
|
| 160 |
+
struct TORCH_API VmapPhysicalToLogicalMap {
|
| 161 |
+
VmapPhysicalToLogicalMap(std::bitset<kVmapNumLevels> levels)
|
| 162 |
+
: levels_(levels) {}
|
| 163 |
+
|
| 164 |
+
// Maps a physical tensor to a new logical tensor (BatchedTensor).
|
| 165 |
+
// Assumes that all of the "batch dimensions" are at the front
|
| 166 |
+
// of the physical tensor. For example, given:
|
| 167 |
+
// - x = rank-4 Tensor with size 2, 3, 5, 7
|
| 168 |
+
// - levels = (2, 4)
|
| 169 |
+
// Returns:
|
| 170 |
+
// - BatchedTensor(x, bdims=[(dim=0,lvl=2), (dim=1, lvl=4)])
|
| 171 |
+
Tensor apply(const Tensor& physical_tensor) const;
|
| 172 |
+
|
| 173 |
+
// Given a vector of physical tensors,
|
| 174 |
+
// 1. maps each tensor to a new logical tensor. Assumes that all of the
|
| 175 |
+
// "batch dimensions" are at the front of the physical tensors.
|
| 176 |
+
// 2. stores the new logical tensors back into the passed-in vector. This is
|
| 177 |
+
// to avoid additional dynamic allocations.
|
| 178 |
+
void applyInplace(std::vector<Tensor>& physical_tensors) const;
|
| 179 |
+
|
| 180 |
+
std::bitset<kVmapNumLevels> levels_;
|
| 181 |
+
};
|
| 182 |
+
|
| 183 |
+
} // namespace at
|
mplug_owl2/lib/python3.10/site-packages/torch/include/ATen/MapAllocator.h
ADDED
|
@@ -0,0 +1,143 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <c10/core/Allocator.h>
|
| 4 |
+
#include <c10/util/string_view.h>
|
| 5 |
+
|
| 6 |
+
namespace at {
|
| 7 |
+
|
| 8 |
+
enum MappedAllocatorModes {
|
| 9 |
+
ALLOCATOR_MAPPED_SHARED = 1,
|
| 10 |
+
ALLOCATOR_MAPPED_SHAREDMEM = 2,
|
| 11 |
+
ALLOCATOR_MAPPED_EXCLUSIVE = 4,
|
| 12 |
+
ALLOCATOR_MAPPED_NOCREATE = 8,
|
| 13 |
+
ALLOCATOR_MAPPED_KEEPFD = 16,
|
| 14 |
+
ALLOCATOR_MAPPED_FROMFD = 32,
|
| 15 |
+
ALLOCATOR_MAPPED_UNLINK = 64
|
| 16 |
+
};
|
| 17 |
+
|
| 18 |
+
// Sentinel value/type to help distinguish the file descriptor constructor from
|
| 19 |
+
// the non-file descriptor constructor
|
| 20 |
+
enum WithFd { WITH_FD };
|
| 21 |
+
|
| 22 |
+
TORCH_API std::string NewProcessWideShmHandle();
|
| 23 |
+
|
| 24 |
+
class TORCH_API MapAllocator {
|
| 25 |
+
public:
|
| 26 |
+
MapAllocator(c10::string_view filename, int flags, size_t size);
|
| 27 |
+
MapAllocator(
|
| 28 |
+
WithFd,
|
| 29 |
+
c10::string_view filename,
|
| 30 |
+
int fd,
|
| 31 |
+
int flags,
|
| 32 |
+
size_t size);
|
| 33 |
+
MapAllocator(const MapAllocator&) = delete;
|
| 34 |
+
MapAllocator& operator=(const MapAllocator&) = delete;
|
| 35 |
+
MapAllocator(MapAllocator&&) = delete;
|
| 36 |
+
MapAllocator& operator=(MapAllocator&&) = delete;
|
| 37 |
+
|
| 38 |
+
const char* filename() const {
|
| 39 |
+
return filename_.c_str();
|
| 40 |
+
}
|
| 41 |
+
int fd() const {
|
| 42 |
+
#ifdef _WIN32
|
| 43 |
+
TORCH_CHECK(false, "MapAllocator::fd() is unsupported on Windows");
|
| 44 |
+
#else
|
| 45 |
+
return fd_;
|
| 46 |
+
#endif
|
| 47 |
+
}
|
| 48 |
+
ptrdiff_t size() const {
|
| 49 |
+
return size_;
|
| 50 |
+
}
|
| 51 |
+
// Return a pointer to the actual data for this allocator
|
| 52 |
+
// (in the case of the refcounted allocator, this is offset
|
| 53 |
+
// from the base pointer.)
|
| 54 |
+
virtual void* data() const {
|
| 55 |
+
return base_ptr_;
|
| 56 |
+
}
|
| 57 |
+
|
| 58 |
+
int flags() const {
|
| 59 |
+
return flags_;
|
| 60 |
+
}
|
| 61 |
+
|
| 62 |
+
static MapAllocator* fromDataPtr(const at::DataPtr&);
|
| 63 |
+
static at::DataPtr makeDataPtr(
|
| 64 |
+
c10::string_view filename,
|
| 65 |
+
int flags,
|
| 66 |
+
size_t size,
|
| 67 |
+
size_t* actual_size_out);
|
| 68 |
+
static at::DataPtr makeDataPtr(
|
| 69 |
+
WithFd,
|
| 70 |
+
const char* filename,
|
| 71 |
+
int fd,
|
| 72 |
+
int flags,
|
| 73 |
+
size_t size,
|
| 74 |
+
size_t* actual_size_out);
|
| 75 |
+
|
| 76 |
+
// Closes the data. Helps us avoid destructor shenanigans
|
| 77 |
+
virtual void close();
|
| 78 |
+
|
| 79 |
+
// This is very dangerous. You have to redefine this destructor for each
|
| 80 |
+
// subclass
|
| 81 |
+
virtual ~MapAllocator();
|
| 82 |
+
|
| 83 |
+
protected:
|
| 84 |
+
bool closed_ = false;
|
| 85 |
+
std::string filename_;
|
| 86 |
+
int flags_ = 0;
|
| 87 |
+
ptrdiff_t size_; /* mapped size */
|
| 88 |
+
#ifdef _WIN32
|
| 89 |
+
void* handle_;
|
| 90 |
+
void* event_;
|
| 91 |
+
std::string eventname_;
|
| 92 |
+
#else
|
| 93 |
+
int fd_ = -1;
|
| 94 |
+
#endif
|
| 95 |
+
void* base_ptr_ = nullptr;
|
| 96 |
+
};
|
| 97 |
+
|
| 98 |
+
// Base-from-member idiom
|
| 99 |
+
struct TORCH_API RefcountedMapAllocatorArgCheck {
|
| 100 |
+
RefcountedMapAllocatorArgCheck(int flags);
|
| 101 |
+
};
|
| 102 |
+
|
| 103 |
+
class TORCH_API RefcountedMapAllocator : private RefcountedMapAllocatorArgCheck,
|
| 104 |
+
public MapAllocator {
|
| 105 |
+
public:
|
| 106 |
+
RefcountedMapAllocator(const char* filename, int flags, size_t size);
|
| 107 |
+
RefcountedMapAllocator(
|
| 108 |
+
WithFd,
|
| 109 |
+
const char* filename,
|
| 110 |
+
int fd,
|
| 111 |
+
int flags,
|
| 112 |
+
size_t size);
|
| 113 |
+
|
| 114 |
+
static RefcountedMapAllocator* fromDataPtr(const at::DataPtr&);
|
| 115 |
+
static at::DataPtr makeDataPtr(
|
| 116 |
+
const char* filename,
|
| 117 |
+
int flags,
|
| 118 |
+
size_t size,
|
| 119 |
+
size_t* actual_size_out);
|
| 120 |
+
static at::DataPtr makeDataPtr(
|
| 121 |
+
WithFd,
|
| 122 |
+
const char* filename,
|
| 123 |
+
int fd,
|
| 124 |
+
int flags,
|
| 125 |
+
size_t size,
|
| 126 |
+
size_t* actual_size_out);
|
| 127 |
+
|
| 128 |
+
void* data() const override;
|
| 129 |
+
|
| 130 |
+
void incref();
|
| 131 |
+
int decref();
|
| 132 |
+
void close() override;
|
| 133 |
+
|
| 134 |
+
~RefcountedMapAllocator() override {
|
| 135 |
+
RefcountedMapAllocator::close();
|
| 136 |
+
}
|
| 137 |
+
|
| 138 |
+
protected:
|
| 139 |
+
void checkFlags();
|
| 140 |
+
void initializeAlloc();
|
| 141 |
+
};
|
| 142 |
+
|
| 143 |
+
} // namespace at
|
mplug_owl2/lib/python3.10/site-packages/torch/include/ATen/MetaFunctions.h
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#include <ATen/core/TensorBody.h>
|
| 2 |
+
|
| 3 |
+
// TODO Undo all logic introduced for Note [Avoiding Include Cycles In Static Dispatch]
|
| 4 |
+
// Code introduced to avoid cyclic dependency in static dispatch is no longer
|
| 5 |
+
// needed as static dispatch logic is moved from TensorBody.h, which caused cycles in the first place,
|
| 6 |
+
// to Operators.cpp for supporting multiple backends with multiple kernels.
|
| 7 |
+
//
|
| 8 |
+
// Note [Avoiding Include Cycles In Static Dispatch]
|
| 9 |
+
// In order to avoid #include cycles in the static dispatch build, we've carefully split out
|
| 10 |
+
// the static function definition files into {DispatchKey}Functions.h and {DispatchKey}Functions_inl.h.
|
| 11 |
+
//
|
| 12 |
+
// Without this split, the include cycle looks like TensorBody.h -> CPUFunctions.h -> TensorBody.h.
|
| 13 |
+
// - TensorBody.h #includes CPUFunctions.h in the static dispatch build, because the tensor methods
|
| 14 |
+
// all need to call into the fastpath C++ API defined in CPUFunctions.h. The methods are also all
|
| 15 |
+
// directly inlined into TensorBody.h.
|
| 16 |
+
// - CPUFunctions.h #includes TensorBody.h because it contains function declarations for the entire C++ API,
|
| 17 |
+
// which include functions that have defaultable std::optional<Tensor> arguments.
|
| 18 |
+
// That requires knowing the full Tensor class definition.
|
| 19 |
+
//
|
| 20 |
+
// We break the cycle by doing the following:
|
| 21 |
+
// - Split out CPUFunction.h into two files: CPUFunctions.h and CPUFunctions_inl.h
|
| 22 |
+
// - CPUFunction.h is a dummy file that just includes the Tensor class and includes CPUFunctions_inl.,
|
| 23 |
+
// - CPUFunctions_inl.h includes everything else
|
| 24 |
+
// - (only in the static dispatch build) TensorBody.h makes sure to finish defining the Tensor class,
|
| 25 |
+
// and then it includes CPUFunctions_inl.h.
|
| 26 |
+
// - All other files that want the cpu fastpath functions can include CPUFunctions.h directly.
|
| 27 |
+
// - This also means that static dispatch build, CPUFunctions.h only needs to
|
| 28 |
+
// #include TensorBody.h, and it will automatically bring in CPUFunctions_inl.h.
|
| 29 |
+
#include <ATen/MetaFunctions_inl.h>
|
mplug_owl2/lib/python3.10/site-packages/torch/include/ATen/MethodOperators.h
ADDED
|
@@ -0,0 +1,443 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from MethodOperators.h
|
| 4 |
+
|
| 5 |
+
#ifdef TORCH_ASSERT_NO_OPERATORS
|
| 6 |
+
#error This change adds a dependency on native_functions.yaml, \
|
| 7 |
+
meaning the file will need to be re-compiled every time an operator \
|
| 8 |
+
is changed or added. Consider if your change would be better placed in \
|
| 9 |
+
another file, or if a more specific header might achieve the same goal. \
|
| 10 |
+
See NOTE: [Tensor vs. TensorBase]
|
| 11 |
+
#endif
|
| 12 |
+
|
| 13 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 14 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 15 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 16 |
+
#include <ATen/core/ATen_fwd.h>
|
| 17 |
+
|
| 18 |
+
#include <ATen/ops/_addmm_activation_ops.h>
|
| 19 |
+
#include <ATen/ops/_autocast_to_full_precision_ops.h>
|
| 20 |
+
#include <ATen/ops/_autocast_to_reduced_precision_ops.h>
|
| 21 |
+
#include <ATen/ops/_backward_ops.h>
|
| 22 |
+
#include <ATen/ops/_coalesced_ops.h>
|
| 23 |
+
#include <ATen/ops/_conj_ops.h>
|
| 24 |
+
#include <ATen/ops/_conj_physical_ops.h>
|
| 25 |
+
#include <ATen/ops/_dimI_ops.h>
|
| 26 |
+
#include <ATen/ops/_dimV_ops.h>
|
| 27 |
+
#include <ATen/ops/_fw_primal_ops.h>
|
| 28 |
+
#include <ATen/ops/_indices_ops.h>
|
| 29 |
+
#include <ATen/ops/_is_all_true_ops.h>
|
| 30 |
+
#include <ATen/ops/_is_any_true_ops.h>
|
| 31 |
+
#include <ATen/ops/_is_zerotensor_ops.h>
|
| 32 |
+
#include <ATen/ops/_lazy_clone_ops.h>
|
| 33 |
+
#include <ATen/ops/_neg_view_ops.h>
|
| 34 |
+
#include <ATen/ops/_nested_tensor_size_ops.h>
|
| 35 |
+
#include <ATen/ops/_nested_tensor_storage_offsets_ops.h>
|
| 36 |
+
#include <ATen/ops/_nested_tensor_strides_ops.h>
|
| 37 |
+
#include <ATen/ops/_nnz_ops.h>
|
| 38 |
+
#include <ATen/ops/_reshape_alias_ops.h>
|
| 39 |
+
#include <ATen/ops/_sparse_mask_projection_ops.h>
|
| 40 |
+
#include <ATen/ops/_to_dense_ops.h>
|
| 41 |
+
#include <ATen/ops/_to_sparse_bsc_ops.h>
|
| 42 |
+
#include <ATen/ops/_to_sparse_bsr_ops.h>
|
| 43 |
+
#include <ATen/ops/_to_sparse_csc_ops.h>
|
| 44 |
+
#include <ATen/ops/_to_sparse_csr_ops.h>
|
| 45 |
+
#include <ATen/ops/_to_sparse_ops.h>
|
| 46 |
+
#include <ATen/ops/_values_ops.h>
|
| 47 |
+
#include <ATen/ops/_version_ops.h>
|
| 48 |
+
#include <ATen/ops/abs_ops.h>
|
| 49 |
+
#include <ATen/ops/absolute_ops.h>
|
| 50 |
+
#include <ATen/ops/acos_ops.h>
|
| 51 |
+
#include <ATen/ops/acosh_ops.h>
|
| 52 |
+
#include <ATen/ops/add_ops.h>
|
| 53 |
+
#include <ATen/ops/addbmm_ops.h>
|
| 54 |
+
#include <ATen/ops/addcdiv_ops.h>
|
| 55 |
+
#include <ATen/ops/addcmul_ops.h>
|
| 56 |
+
#include <ATen/ops/addmm_ops.h>
|
| 57 |
+
#include <ATen/ops/addmv_ops.h>
|
| 58 |
+
#include <ATen/ops/addr_ops.h>
|
| 59 |
+
#include <ATen/ops/adjoint_ops.h>
|
| 60 |
+
#include <ATen/ops/alias_ops.h>
|
| 61 |
+
#include <ATen/ops/align_as_ops.h>
|
| 62 |
+
#include <ATen/ops/align_to_ops.h>
|
| 63 |
+
#include <ATen/ops/all_ops.h>
|
| 64 |
+
#include <ATen/ops/allclose_ops.h>
|
| 65 |
+
#include <ATen/ops/amax_ops.h>
|
| 66 |
+
#include <ATen/ops/amin_ops.h>
|
| 67 |
+
#include <ATen/ops/aminmax_ops.h>
|
| 68 |
+
#include <ATen/ops/and_ops.h>
|
| 69 |
+
#include <ATen/ops/angle_ops.h>
|
| 70 |
+
#include <ATen/ops/any_ops.h>
|
| 71 |
+
#include <ATen/ops/arccos_ops.h>
|
| 72 |
+
#include <ATen/ops/arccosh_ops.h>
|
| 73 |
+
#include <ATen/ops/arcsin_ops.h>
|
| 74 |
+
#include <ATen/ops/arcsinh_ops.h>
|
| 75 |
+
#include <ATen/ops/arctan2_ops.h>
|
| 76 |
+
#include <ATen/ops/arctan_ops.h>
|
| 77 |
+
#include <ATen/ops/arctanh_ops.h>
|
| 78 |
+
#include <ATen/ops/argmax_ops.h>
|
| 79 |
+
#include <ATen/ops/argmin_ops.h>
|
| 80 |
+
#include <ATen/ops/argsort_ops.h>
|
| 81 |
+
#include <ATen/ops/argwhere_ops.h>
|
| 82 |
+
#include <ATen/ops/as_strided_ops.h>
|
| 83 |
+
#include <ATen/ops/as_strided_scatter_ops.h>
|
| 84 |
+
#include <ATen/ops/asin_ops.h>
|
| 85 |
+
#include <ATen/ops/asinh_ops.h>
|
| 86 |
+
#include <ATen/ops/atan2_ops.h>
|
| 87 |
+
#include <ATen/ops/atan_ops.h>
|
| 88 |
+
#include <ATen/ops/atanh_ops.h>
|
| 89 |
+
#include <ATen/ops/baddbmm_ops.h>
|
| 90 |
+
#include <ATen/ops/bernoulli_ops.h>
|
| 91 |
+
#include <ATen/ops/bincount_ops.h>
|
| 92 |
+
#include <ATen/ops/bitwise_and_ops.h>
|
| 93 |
+
#include <ATen/ops/bitwise_left_shift_ops.h>
|
| 94 |
+
#include <ATen/ops/bitwise_not_ops.h>
|
| 95 |
+
#include <ATen/ops/bitwise_or_ops.h>
|
| 96 |
+
#include <ATen/ops/bitwise_right_shift_ops.h>
|
| 97 |
+
#include <ATen/ops/bitwise_xor_ops.h>
|
| 98 |
+
#include <ATen/ops/bmm_ops.h>
|
| 99 |
+
#include <ATen/ops/broadcast_to_ops.h>
|
| 100 |
+
#include <ATen/ops/cauchy_ops.h>
|
| 101 |
+
#include <ATen/ops/ccol_indices_ops.h>
|
| 102 |
+
#include <ATen/ops/ceil_ops.h>
|
| 103 |
+
#include <ATen/ops/chalf_ops.h>
|
| 104 |
+
#include <ATen/ops/cholesky_inverse_ops.h>
|
| 105 |
+
#include <ATen/ops/cholesky_ops.h>
|
| 106 |
+
#include <ATen/ops/cholesky_solve_ops.h>
|
| 107 |
+
#include <ATen/ops/chunk_ops.h>
|
| 108 |
+
#include <ATen/ops/clamp_max_ops.h>
|
| 109 |
+
#include <ATen/ops/clamp_min_ops.h>
|
| 110 |
+
#include <ATen/ops/clamp_ops.h>
|
| 111 |
+
#include <ATen/ops/clip_ops.h>
|
| 112 |
+
#include <ATen/ops/clone_ops.h>
|
| 113 |
+
#include <ATen/ops/coalesce_ops.h>
|
| 114 |
+
#include <ATen/ops/col_indices_ops.h>
|
| 115 |
+
#include <ATen/ops/conj_ops.h>
|
| 116 |
+
#include <ATen/ops/conj_physical_ops.h>
|
| 117 |
+
#include <ATen/ops/contiguous_ops.h>
|
| 118 |
+
#include <ATen/ops/copy_ops.h>
|
| 119 |
+
#include <ATen/ops/copysign_ops.h>
|
| 120 |
+
#include <ATen/ops/corrcoef_ops.h>
|
| 121 |
+
#include <ATen/ops/cos_ops.h>
|
| 122 |
+
#include <ATen/ops/cosh_ops.h>
|
| 123 |
+
#include <ATen/ops/count_nonzero_ops.h>
|
| 124 |
+
#include <ATen/ops/cov_ops.h>
|
| 125 |
+
#include <ATen/ops/cross_ops.h>
|
| 126 |
+
#include <ATen/ops/crow_indices_ops.h>
|
| 127 |
+
#include <ATen/ops/cummax_ops.h>
|
| 128 |
+
#include <ATen/ops/cummin_ops.h>
|
| 129 |
+
#include <ATen/ops/cumprod_ops.h>
|
| 130 |
+
#include <ATen/ops/cumsum_ops.h>
|
| 131 |
+
#include <ATen/ops/data_ops.h>
|
| 132 |
+
#include <ATen/ops/deg2rad_ops.h>
|
| 133 |
+
#include <ATen/ops/dense_dim_ops.h>
|
| 134 |
+
#include <ATen/ops/dequantize_ops.h>
|
| 135 |
+
#include <ATen/ops/det_ops.h>
|
| 136 |
+
#include <ATen/ops/detach_ops.h>
|
| 137 |
+
#include <ATen/ops/diag_embed_ops.h>
|
| 138 |
+
#include <ATen/ops/diag_ops.h>
|
| 139 |
+
#include <ATen/ops/diagflat_ops.h>
|
| 140 |
+
#include <ATen/ops/diagonal_ops.h>
|
| 141 |
+
#include <ATen/ops/diagonal_scatter_ops.h>
|
| 142 |
+
#include <ATen/ops/diff_ops.h>
|
| 143 |
+
#include <ATen/ops/digamma_ops.h>
|
| 144 |
+
#include <ATen/ops/dist_ops.h>
|
| 145 |
+
#include <ATen/ops/div_ops.h>
|
| 146 |
+
#include <ATen/ops/divide_ops.h>
|
| 147 |
+
#include <ATen/ops/dot_ops.h>
|
| 148 |
+
#include <ATen/ops/dsplit_ops.h>
|
| 149 |
+
#include <ATen/ops/eq_ops.h>
|
| 150 |
+
#include <ATen/ops/equal_ops.h>
|
| 151 |
+
#include <ATen/ops/erf_ops.h>
|
| 152 |
+
#include <ATen/ops/erfc_ops.h>
|
| 153 |
+
#include <ATen/ops/erfinv_ops.h>
|
| 154 |
+
#include <ATen/ops/exp2_ops.h>
|
| 155 |
+
#include <ATen/ops/exp_ops.h>
|
| 156 |
+
#include <ATen/ops/expand_as_ops.h>
|
| 157 |
+
#include <ATen/ops/expand_ops.h>
|
| 158 |
+
#include <ATen/ops/expm1_ops.h>
|
| 159 |
+
#include <ATen/ops/exponential_ops.h>
|
| 160 |
+
#include <ATen/ops/fill_diagonal_ops.h>
|
| 161 |
+
#include <ATen/ops/fill_ops.h>
|
| 162 |
+
#include <ATen/ops/fix_ops.h>
|
| 163 |
+
#include <ATen/ops/flatten_ops.h>
|
| 164 |
+
#include <ATen/ops/flip_ops.h>
|
| 165 |
+
#include <ATen/ops/fliplr_ops.h>
|
| 166 |
+
#include <ATen/ops/flipud_ops.h>
|
| 167 |
+
#include <ATen/ops/float_power_ops.h>
|
| 168 |
+
#include <ATen/ops/floor_divide_ops.h>
|
| 169 |
+
#include <ATen/ops/floor_ops.h>
|
| 170 |
+
#include <ATen/ops/fmax_ops.h>
|
| 171 |
+
#include <ATen/ops/fmin_ops.h>
|
| 172 |
+
#include <ATen/ops/fmod_ops.h>
|
| 173 |
+
#include <ATen/ops/frac_ops.h>
|
| 174 |
+
#include <ATen/ops/frexp_ops.h>
|
| 175 |
+
#include <ATen/ops/gather_ops.h>
|
| 176 |
+
#include <ATen/ops/gcd_ops.h>
|
| 177 |
+
#include <ATen/ops/ge_ops.h>
|
| 178 |
+
#include <ATen/ops/geometric_ops.h>
|
| 179 |
+
#include <ATen/ops/geqrf_ops.h>
|
| 180 |
+
#include <ATen/ops/ger_ops.h>
|
| 181 |
+
#include <ATen/ops/greater_equal_ops.h>
|
| 182 |
+
#include <ATen/ops/greater_ops.h>
|
| 183 |
+
#include <ATen/ops/gt_ops.h>
|
| 184 |
+
#include <ATen/ops/hardshrink_backward_ops.h>
|
| 185 |
+
#include <ATen/ops/hardshrink_ops.h>
|
| 186 |
+
#include <ATen/ops/heaviside_ops.h>
|
| 187 |
+
#include <ATen/ops/histc_ops.h>
|
| 188 |
+
#include <ATen/ops/histogram_ops.h>
|
| 189 |
+
#include <ATen/ops/hsplit_ops.h>
|
| 190 |
+
#include <ATen/ops/hypot_ops.h>
|
| 191 |
+
#include <ATen/ops/i0_ops.h>
|
| 192 |
+
#include <ATen/ops/igamma_ops.h>
|
| 193 |
+
#include <ATen/ops/igammac_ops.h>
|
| 194 |
+
#include <ATen/ops/index_add_ops.h>
|
| 195 |
+
#include <ATen/ops/index_copy_ops.h>
|
| 196 |
+
#include <ATen/ops/index_fill_ops.h>
|
| 197 |
+
#include <ATen/ops/index_ops.h>
|
| 198 |
+
#include <ATen/ops/index_put_ops.h>
|
| 199 |
+
#include <ATen/ops/index_reduce_ops.h>
|
| 200 |
+
#include <ATen/ops/index_select_ops.h>
|
| 201 |
+
#include <ATen/ops/indices_ops.h>
|
| 202 |
+
#include <ATen/ops/inner_ops.h>
|
| 203 |
+
#include <ATen/ops/int_repr_ops.h>
|
| 204 |
+
#include <ATen/ops/inverse_ops.h>
|
| 205 |
+
#include <ATen/ops/is_coalesced_ops.h>
|
| 206 |
+
#include <ATen/ops/is_complex_ops.h>
|
| 207 |
+
#include <ATen/ops/is_conj_ops.h>
|
| 208 |
+
#include <ATen/ops/is_distributed_ops.h>
|
| 209 |
+
#include <ATen/ops/is_floating_point_ops.h>
|
| 210 |
+
#include <ATen/ops/is_inference_ops.h>
|
| 211 |
+
#include <ATen/ops/is_leaf_ops.h>
|
| 212 |
+
#include <ATen/ops/is_neg_ops.h>
|
| 213 |
+
#include <ATen/ops/is_nonzero_ops.h>
|
| 214 |
+
#include <ATen/ops/is_pinned_ops.h>
|
| 215 |
+
#include <ATen/ops/is_same_size_ops.h>
|
| 216 |
+
#include <ATen/ops/is_set_to_ops.h>
|
| 217 |
+
#include <ATen/ops/is_signed_ops.h>
|
| 218 |
+
#include <ATen/ops/isclose_ops.h>
|
| 219 |
+
#include <ATen/ops/isfinite_ops.h>
|
| 220 |
+
#include <ATen/ops/isinf_ops.h>
|
| 221 |
+
#include <ATen/ops/isnan_ops.h>
|
| 222 |
+
#include <ATen/ops/isneginf_ops.h>
|
| 223 |
+
#include <ATen/ops/isposinf_ops.h>
|
| 224 |
+
#include <ATen/ops/isreal_ops.h>
|
| 225 |
+
#include <ATen/ops/istft_ops.h>
|
| 226 |
+
#include <ATen/ops/item_ops.h>
|
| 227 |
+
#include <ATen/ops/kron_ops.h>
|
| 228 |
+
#include <ATen/ops/kthvalue_ops.h>
|
| 229 |
+
#include <ATen/ops/lcm_ops.h>
|
| 230 |
+
#include <ATen/ops/ldexp_ops.h>
|
| 231 |
+
#include <ATen/ops/le_ops.h>
|
| 232 |
+
#include <ATen/ops/lerp_ops.h>
|
| 233 |
+
#include <ATen/ops/less_equal_ops.h>
|
| 234 |
+
#include <ATen/ops/less_ops.h>
|
| 235 |
+
#include <ATen/ops/lgamma_ops.h>
|
| 236 |
+
#include <ATen/ops/log10_ops.h>
|
| 237 |
+
#include <ATen/ops/log1p_ops.h>
|
| 238 |
+
#include <ATen/ops/log2_ops.h>
|
| 239 |
+
#include <ATen/ops/log_normal_ops.h>
|
| 240 |
+
#include <ATen/ops/log_ops.h>
|
| 241 |
+
#include <ATen/ops/log_softmax_ops.h>
|
| 242 |
+
#include <ATen/ops/logaddexp2_ops.h>
|
| 243 |
+
#include <ATen/ops/logaddexp_ops.h>
|
| 244 |
+
#include <ATen/ops/logcumsumexp_ops.h>
|
| 245 |
+
#include <ATen/ops/logdet_ops.h>
|
| 246 |
+
#include <ATen/ops/logical_and_ops.h>
|
| 247 |
+
#include <ATen/ops/logical_not_ops.h>
|
| 248 |
+
#include <ATen/ops/logical_or_ops.h>
|
| 249 |
+
#include <ATen/ops/logical_xor_ops.h>
|
| 250 |
+
#include <ATen/ops/logit_ops.h>
|
| 251 |
+
#include <ATen/ops/logsumexp_ops.h>
|
| 252 |
+
#include <ATen/ops/lshift_ops.h>
|
| 253 |
+
#include <ATen/ops/lt_ops.h>
|
| 254 |
+
#include <ATen/ops/lu_solve_ops.h>
|
| 255 |
+
#include <ATen/ops/mH_ops.h>
|
| 256 |
+
#include <ATen/ops/mT_ops.h>
|
| 257 |
+
#include <ATen/ops/masked_fill_ops.h>
|
| 258 |
+
#include <ATen/ops/masked_scatter_ops.h>
|
| 259 |
+
#include <ATen/ops/masked_select_ops.h>
|
| 260 |
+
#include <ATen/ops/matmul_ops.h>
|
| 261 |
+
#include <ATen/ops/matrix_H_ops.h>
|
| 262 |
+
#include <ATen/ops/matrix_exp_ops.h>
|
| 263 |
+
#include <ATen/ops/matrix_power_ops.h>
|
| 264 |
+
#include <ATen/ops/max_ops.h>
|
| 265 |
+
#include <ATen/ops/maximum_ops.h>
|
| 266 |
+
#include <ATen/ops/mean_ops.h>
|
| 267 |
+
#include <ATen/ops/median_ops.h>
|
| 268 |
+
#include <ATen/ops/min_ops.h>
|
| 269 |
+
#include <ATen/ops/minimum_ops.h>
|
| 270 |
+
#include <ATen/ops/mm_ops.h>
|
| 271 |
+
#include <ATen/ops/mode_ops.h>
|
| 272 |
+
#include <ATen/ops/moveaxis_ops.h>
|
| 273 |
+
#include <ATen/ops/movedim_ops.h>
|
| 274 |
+
#include <ATen/ops/msort_ops.h>
|
| 275 |
+
#include <ATen/ops/mul_ops.h>
|
| 276 |
+
#include <ATen/ops/multinomial_ops.h>
|
| 277 |
+
#include <ATen/ops/multiply_ops.h>
|
| 278 |
+
#include <ATen/ops/mv_ops.h>
|
| 279 |
+
#include <ATen/ops/mvlgamma_ops.h>
|
| 280 |
+
#include <ATen/ops/nan_to_num_ops.h>
|
| 281 |
+
#include <ATen/ops/nanmean_ops.h>
|
| 282 |
+
#include <ATen/ops/nanmedian_ops.h>
|
| 283 |
+
#include <ATen/ops/nanquantile_ops.h>
|
| 284 |
+
#include <ATen/ops/nansum_ops.h>
|
| 285 |
+
#include <ATen/ops/narrow_copy_ops.h>
|
| 286 |
+
#include <ATen/ops/narrow_ops.h>
|
| 287 |
+
#include <ATen/ops/ne_ops.h>
|
| 288 |
+
#include <ATen/ops/neg_ops.h>
|
| 289 |
+
#include <ATen/ops/negative_ops.h>
|
| 290 |
+
#include <ATen/ops/new_empty_ops.h>
|
| 291 |
+
#include <ATen/ops/new_empty_strided_ops.h>
|
| 292 |
+
#include <ATen/ops/new_full_ops.h>
|
| 293 |
+
#include <ATen/ops/new_ones_ops.h>
|
| 294 |
+
#include <ATen/ops/new_zeros_ops.h>
|
| 295 |
+
#include <ATen/ops/nextafter_ops.h>
|
| 296 |
+
#include <ATen/ops/nonzero_numpy_ops.h>
|
| 297 |
+
#include <ATen/ops/nonzero_ops.h>
|
| 298 |
+
#include <ATen/ops/nonzero_static_ops.h>
|
| 299 |
+
#include <ATen/ops/norm_ops.h>
|
| 300 |
+
#include <ATen/ops/normal_ops.h>
|
| 301 |
+
#include <ATen/ops/not_equal_ops.h>
|
| 302 |
+
#include <ATen/ops/numpy_T_ops.h>
|
| 303 |
+
#include <ATen/ops/or_ops.h>
|
| 304 |
+
#include <ATen/ops/orgqr_ops.h>
|
| 305 |
+
#include <ATen/ops/ormqr_ops.h>
|
| 306 |
+
#include <ATen/ops/outer_ops.h>
|
| 307 |
+
#include <ATen/ops/output_nr_ops.h>
|
| 308 |
+
#include <ATen/ops/permute_ops.h>
|
| 309 |
+
#include <ATen/ops/pin_memory_ops.h>
|
| 310 |
+
#include <ATen/ops/pinverse_ops.h>
|
| 311 |
+
#include <ATen/ops/polygamma_ops.h>
|
| 312 |
+
#include <ATen/ops/positive_ops.h>
|
| 313 |
+
#include <ATen/ops/pow_ops.h>
|
| 314 |
+
#include <ATen/ops/prelu_ops.h>
|
| 315 |
+
#include <ATen/ops/prod_ops.h>
|
| 316 |
+
#include <ATen/ops/put_ops.h>
|
| 317 |
+
#include <ATen/ops/q_per_channel_axis_ops.h>
|
| 318 |
+
#include <ATen/ops/q_per_channel_scales_ops.h>
|
| 319 |
+
#include <ATen/ops/q_per_channel_zero_points_ops.h>
|
| 320 |
+
#include <ATen/ops/q_scale_ops.h>
|
| 321 |
+
#include <ATen/ops/q_zero_point_ops.h>
|
| 322 |
+
#include <ATen/ops/qr_ops.h>
|
| 323 |
+
#include <ATen/ops/qscheme_ops.h>
|
| 324 |
+
#include <ATen/ops/quantile_ops.h>
|
| 325 |
+
#include <ATen/ops/rad2deg_ops.h>
|
| 326 |
+
#include <ATen/ops/random_ops.h>
|
| 327 |
+
#include <ATen/ops/ravel_ops.h>
|
| 328 |
+
#include <ATen/ops/reciprocal_ops.h>
|
| 329 |
+
#include <ATen/ops/record_stream_ops.h>
|
| 330 |
+
#include <ATen/ops/refine_names_ops.h>
|
| 331 |
+
#include <ATen/ops/relu_ops.h>
|
| 332 |
+
#include <ATen/ops/remainder_ops.h>
|
| 333 |
+
#include <ATen/ops/rename_ops.h>
|
| 334 |
+
#include <ATen/ops/renorm_ops.h>
|
| 335 |
+
#include <ATen/ops/repeat_interleave_ops.h>
|
| 336 |
+
#include <ATen/ops/repeat_ops.h>
|
| 337 |
+
#include <ATen/ops/requires_grad_ops.h>
|
| 338 |
+
#include <ATen/ops/reshape_as_ops.h>
|
| 339 |
+
#include <ATen/ops/reshape_ops.h>
|
| 340 |
+
#include <ATen/ops/resize_as_ops.h>
|
| 341 |
+
#include <ATen/ops/resize_as_sparse_ops.h>
|
| 342 |
+
#include <ATen/ops/resize_ops.h>
|
| 343 |
+
#include <ATen/ops/resolve_conj_ops.h>
|
| 344 |
+
#include <ATen/ops/resolve_neg_ops.h>
|
| 345 |
+
#include <ATen/ops/retain_grad_ops.h>
|
| 346 |
+
#include <ATen/ops/retains_grad_ops.h>
|
| 347 |
+
#include <ATen/ops/roll_ops.h>
|
| 348 |
+
#include <ATen/ops/rot90_ops.h>
|
| 349 |
+
#include <ATen/ops/round_ops.h>
|
| 350 |
+
#include <ATen/ops/row_indices_ops.h>
|
| 351 |
+
#include <ATen/ops/rshift_ops.h>
|
| 352 |
+
#include <ATen/ops/rsqrt_ops.h>
|
| 353 |
+
#include <ATen/ops/scatter_add_ops.h>
|
| 354 |
+
#include <ATen/ops/scatter_ops.h>
|
| 355 |
+
#include <ATen/ops/scatter_reduce_ops.h>
|
| 356 |
+
#include <ATen/ops/select_ops.h>
|
| 357 |
+
#include <ATen/ops/select_scatter_ops.h>
|
| 358 |
+
#include <ATen/ops/set_data_ops.h>
|
| 359 |
+
#include <ATen/ops/set_ops.h>
|
| 360 |
+
#include <ATen/ops/sgn_ops.h>
|
| 361 |
+
#include <ATen/ops/sigmoid_ops.h>
|
| 362 |
+
#include <ATen/ops/sign_ops.h>
|
| 363 |
+
#include <ATen/ops/signbit_ops.h>
|
| 364 |
+
#include <ATen/ops/sin_ops.h>
|
| 365 |
+
#include <ATen/ops/sinc_ops.h>
|
| 366 |
+
#include <ATen/ops/sinh_ops.h>
|
| 367 |
+
#include <ATen/ops/size_ops.h>
|
| 368 |
+
#include <ATen/ops/slice_inverse_ops.h>
|
| 369 |
+
#include <ATen/ops/slice_ops.h>
|
| 370 |
+
#include <ATen/ops/slice_scatter_ops.h>
|
| 371 |
+
#include <ATen/ops/slogdet_ops.h>
|
| 372 |
+
#include <ATen/ops/smm_ops.h>
|
| 373 |
+
#include <ATen/ops/softmax_ops.h>
|
| 374 |
+
#include <ATen/ops/sort_ops.h>
|
| 375 |
+
#include <ATen/ops/sparse_dim_ops.h>
|
| 376 |
+
#include <ATen/ops/sparse_mask_ops.h>
|
| 377 |
+
#include <ATen/ops/sparse_resize_and_clear_ops.h>
|
| 378 |
+
#include <ATen/ops/sparse_resize_ops.h>
|
| 379 |
+
#include <ATen/ops/split_ops.h>
|
| 380 |
+
#include <ATen/ops/split_with_sizes_ops.h>
|
| 381 |
+
#include <ATen/ops/sqrt_ops.h>
|
| 382 |
+
#include <ATen/ops/square_ops.h>
|
| 383 |
+
#include <ATen/ops/squeeze_ops.h>
|
| 384 |
+
#include <ATen/ops/sspaddmm_ops.h>
|
| 385 |
+
#include <ATen/ops/std_ops.h>
|
| 386 |
+
#include <ATen/ops/stft_ops.h>
|
| 387 |
+
#include <ATen/ops/stride_ops.h>
|
| 388 |
+
#include <ATen/ops/sub_ops.h>
|
| 389 |
+
#include <ATen/ops/subtract_ops.h>
|
| 390 |
+
#include <ATen/ops/sum_ops.h>
|
| 391 |
+
#include <ATen/ops/sum_to_size_ops.h>
|
| 392 |
+
#include <ATen/ops/svd_ops.h>
|
| 393 |
+
#include <ATen/ops/swapaxes_ops.h>
|
| 394 |
+
#include <ATen/ops/swapdims_ops.h>
|
| 395 |
+
#include <ATen/ops/t_ops.h>
|
| 396 |
+
#include <ATen/ops/take_along_dim_ops.h>
|
| 397 |
+
#include <ATen/ops/take_ops.h>
|
| 398 |
+
#include <ATen/ops/tan_ops.h>
|
| 399 |
+
#include <ATen/ops/tanh_ops.h>
|
| 400 |
+
#include <ATen/ops/tensor_split_ops.h>
|
| 401 |
+
#include <ATen/ops/tile_ops.h>
|
| 402 |
+
#include <ATen/ops/to_dense_ops.h>
|
| 403 |
+
#include <ATen/ops/to_mkldnn_ops.h>
|
| 404 |
+
#include <ATen/ops/to_ops.h>
|
| 405 |
+
#include <ATen/ops/to_padded_tensor_ops.h>
|
| 406 |
+
#include <ATen/ops/to_sparse_bsc_ops.h>
|
| 407 |
+
#include <ATen/ops/to_sparse_bsr_ops.h>
|
| 408 |
+
#include <ATen/ops/to_sparse_csc_ops.h>
|
| 409 |
+
#include <ATen/ops/to_sparse_csr_ops.h>
|
| 410 |
+
#include <ATen/ops/to_sparse_ops.h>
|
| 411 |
+
#include <ATen/ops/topk_ops.h>
|
| 412 |
+
#include <ATen/ops/trace_ops.h>
|
| 413 |
+
#include <ATen/ops/transpose_ops.h>
|
| 414 |
+
#include <ATen/ops/triangular_solve_ops.h>
|
| 415 |
+
#include <ATen/ops/tril_ops.h>
|
| 416 |
+
#include <ATen/ops/triu_ops.h>
|
| 417 |
+
#include <ATen/ops/true_divide_ops.h>
|
| 418 |
+
#include <ATen/ops/trunc_ops.h>
|
| 419 |
+
#include <ATen/ops/type_as_ops.h>
|
| 420 |
+
#include <ATen/ops/unbind_ops.h>
|
| 421 |
+
#include <ATen/ops/unflatten_ops.h>
|
| 422 |
+
#include <ATen/ops/unfold_ops.h>
|
| 423 |
+
#include <ATen/ops/uniform_ops.h>
|
| 424 |
+
#include <ATen/ops/unsafe_chunk_ops.h>
|
| 425 |
+
#include <ATen/ops/unsafe_split_ops.h>
|
| 426 |
+
#include <ATen/ops/unsafe_split_with_sizes_ops.h>
|
| 427 |
+
#include <ATen/ops/unsqueeze_ops.h>
|
| 428 |
+
#include <ATen/ops/values_ops.h>
|
| 429 |
+
#include <ATen/ops/var_ops.h>
|
| 430 |
+
#include <ATen/ops/vdot_ops.h>
|
| 431 |
+
#include <ATen/ops/view_as_ops.h>
|
| 432 |
+
#include <ATen/ops/view_ops.h>
|
| 433 |
+
#include <ATen/ops/vsplit_ops.h>
|
| 434 |
+
#include <ATen/ops/where_ops.h>
|
| 435 |
+
#include <ATen/ops/xlogy_ops.h>
|
| 436 |
+
#include <ATen/ops/xor_ops.h>
|
| 437 |
+
#include <ATen/ops/zero_ops.h>
|
| 438 |
+
|
| 439 |
+
namespace at {
|
| 440 |
+
namespace _ops {
|
| 441 |
+
|
| 442 |
+
} // namespace _ops
|
| 443 |
+
} // namespace at
|
mplug_owl2/lib/python3.10/site-packages/torch/include/ATen/NamedTensorUtils.h
ADDED
|
@@ -0,0 +1,214 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
#include <ATen/NamedTensor.h>
|
| 3 |
+
#include <ATen/TensorNames.h>
|
| 4 |
+
#include <ATen/WrapDimUtilsMulti.h>
|
| 5 |
+
|
| 6 |
+
#include <ATen/core/DimVector.h>
|
| 7 |
+
#include <ATen/core/Tensor.h>
|
| 8 |
+
|
| 9 |
+
namespace at {
|
| 10 |
+
|
| 11 |
+
using NameVector = SmallVector<Dimname, kDimVectorStaticSize>;
|
| 12 |
+
|
| 13 |
+
inline bool has_names(const ITensorListRef& tensors) {
|
| 14 |
+
return std::any_of(tensors.begin(), tensors.end(), [](const Tensor& t) {
|
| 15 |
+
return t.has_names();
|
| 16 |
+
});
|
| 17 |
+
}
|
| 18 |
+
|
| 19 |
+
// Converts dim to an positional index. Errors if `dim` cannot be used to
|
| 20 |
+
// refer to any dimension of tensor.
|
| 21 |
+
TORCH_API int64_t dimname_to_position(const Tensor& tensor, Dimname dim);
|
| 22 |
+
TORCH_API std::vector<int64_t> dimnames_to_positions(
|
| 23 |
+
const Tensor& tensor,
|
| 24 |
+
DimnameList dims);
|
| 25 |
+
|
| 26 |
+
// Unifies two DimnameList to produce a third. This is useful for implementing
|
| 27 |
+
// the named inference rule for binary broadcasting operations like add.
|
| 28 |
+
//
|
| 29 |
+
// There are three main constraints:
|
| 30 |
+
// 1) Check matching: Names must match positionally from the right.
|
| 31 |
+
// 2) Check misaligned: If a name `n` is in `names`, then it must appear at
|
| 32 |
+
// the same index from the right in other.
|
| 33 |
+
// 3) The output names are obtained by unifying the names individually from the
|
| 34 |
+
// right.
|
| 35 |
+
TORCH_API std::vector<Dimname> unify_from_right(
|
| 36 |
+
DimnameList names,
|
| 37 |
+
DimnameList other,
|
| 38 |
+
const char* action = "broadcast");
|
| 39 |
+
|
| 40 |
+
[[noreturn]] inline void reportNYIDimnameOverload(const char* op_name) {
|
| 41 |
+
TORCH_CHECK(
|
| 42 |
+
false,
|
| 43 |
+
op_name,
|
| 44 |
+
": You passed a dimname (string) to this op in place of a dimension "
|
| 45 |
+
"index but it does not yet support this behavior. Please pass a dimension "
|
| 46 |
+
"index to work around this.");
|
| 47 |
+
}
|
| 48 |
+
|
| 49 |
+
// [NOTE] Writing name inference rules
|
| 50 |
+
//
|
| 51 |
+
// Operators that support named tensors are either composed of operations that
|
| 52 |
+
// support named tensors or implement some name inference rule. An op that
|
| 53 |
+
// implements its own name inference rule generally looks like the following:
|
| 54 |
+
//
|
| 55 |
+
// Tensor op(...) {
|
| 56 |
+
// perform_shape_checks(...);
|
| 57 |
+
// # (1)
|
| 58 |
+
// auto maybe_outnames = compute_outnames(...);
|
| 59 |
+
// auto result = [&]() {
|
| 60 |
+
// NoNamesGuard guard;
|
| 61 |
+
// return op_impl(...);
|
| 62 |
+
// }();
|
| 63 |
+
// # (2)
|
| 64 |
+
// propagate_names_if_nonempty(result, maybe_outnames);
|
| 65 |
+
//
|
| 66 |
+
// Each op has (1) a compute outnames step and (2) a propagate names step.
|
| 67 |
+
//
|
| 68 |
+
// compute_outnames is responsible for checking that input names match and
|
| 69 |
+
// determining what the output names should be. It returns either:
|
| 70 |
+
// - {} (if the inputs tensors are all unnamed)
|
| 71 |
+
// - non-empty outnames.
|
| 72 |
+
//
|
| 73 |
+
// propagate_names_if_nonempty propagates the outnames if they exist to the
|
| 74 |
+
// result tensors.
|
| 75 |
+
//
|
| 76 |
+
// The {} case is an optimization; if the user does not use named tensors they
|
| 77 |
+
// pay no perf cost for it.
|
| 78 |
+
|
| 79 |
+
namespace namedinference {
|
| 80 |
+
|
| 81 |
+
const Tensor& propagate_names_if_present_and_nonempty(
|
| 82 |
+
const Tensor& result,
|
| 83 |
+
std::optional<DimnameList> maybe_names,
|
| 84 |
+
bool validate_names = false);
|
| 85 |
+
// Propagates `names` to `result` if `names` is not empty.
|
| 86 |
+
// `names` can be empty; see [NOTE] Writing name inference rules
|
| 87 |
+
// If `names` is not empty, `names.size()` should equal `result.dim()`.
|
| 88 |
+
// When in doubt, use this overload instead of the others.
|
| 89 |
+
TORCH_API const Tensor& propagate_names_if_nonempty(
|
| 90 |
+
const Tensor& result,
|
| 91 |
+
DimnameList maybe_names,
|
| 92 |
+
bool validate_names = false);
|
| 93 |
+
|
| 94 |
+
// Propagates `names` to `result`. Only use this if we are certain that there
|
| 95 |
+
// are names to propagate (that names is not empty).
|
| 96 |
+
TORCH_API const Tensor& propagate_names(
|
| 97 |
+
const Tensor& result,
|
| 98 |
+
DimnameList names,
|
| 99 |
+
bool validate_names = false);
|
| 100 |
+
|
| 101 |
+
// Propagates all names from src to result.
|
| 102 |
+
TORCH_API void propagate_names(const Tensor& result, const Tensor& src);
|
| 103 |
+
|
| 104 |
+
// Propagates all names except for those at the excluded_idxs.
|
| 105 |
+
TORCH_API void propagate_names_except(
|
| 106 |
+
const Tensor& result,
|
| 107 |
+
const Tensor& src,
|
| 108 |
+
IntArrayRef excluded_idxs);
|
| 109 |
+
|
| 110 |
+
// Used for reduction ops that have a `keepdim` arg.
|
| 111 |
+
TORCH_API void propagate_names_for_reduction(
|
| 112 |
+
const Tensor& result,
|
| 113 |
+
const Tensor& src,
|
| 114 |
+
IntArrayRef excluded_idxs,
|
| 115 |
+
bool keepdim);
|
| 116 |
+
|
| 117 |
+
TORCH_API void propagate_names_for_expand(
|
| 118 |
+
const Tensor& result,
|
| 119 |
+
const Tensor& self);
|
| 120 |
+
|
| 121 |
+
TORCH_API std::vector<Dimname> compute_cat_outnames(
|
| 122 |
+
const MaterializedITensorListRef& tensors);
|
| 123 |
+
|
| 124 |
+
TORCH_API std::vector<Dimname> compute_broadcast_outnames(
|
| 125 |
+
const Tensor& self,
|
| 126 |
+
const Tensor& other);
|
| 127 |
+
|
| 128 |
+
TORCH_API std::vector<Dimname> broadcast_to_outnames(
|
| 129 |
+
const Tensor& tensor,
|
| 130 |
+
const Tensor& reference_tensor,
|
| 131 |
+
const char* op_name);
|
| 132 |
+
|
| 133 |
+
TORCH_API std::vector<Dimname> compute_matmul_outnames(
|
| 134 |
+
const Tensor& self,
|
| 135 |
+
const Tensor& other);
|
| 136 |
+
|
| 137 |
+
TORCH_API std::vector<Dimname> compute_cdist_outnames(
|
| 138 |
+
const Tensor& self,
|
| 139 |
+
const Tensor& other);
|
| 140 |
+
|
| 141 |
+
TORCH_API std::vector<Dimname> compute_bmm_outnames(
|
| 142 |
+
const Tensor& result,
|
| 143 |
+
const Tensor& self,
|
| 144 |
+
const Tensor& other);
|
| 145 |
+
|
| 146 |
+
TORCH_API std::vector<Dimname> compute_squeeze_outnames(const Tensor& tensor);
|
| 147 |
+
TORCH_API std::vector<Dimname> compute_squeeze_outnames(
|
| 148 |
+
const Tensor& tensor,
|
| 149 |
+
std::bitset<dim_bitset_size> dims);
|
| 150 |
+
|
| 151 |
+
std::vector<Dimname> compute_diagonal_outnames(
|
| 152 |
+
const Tensor& tensor,
|
| 153 |
+
int64_t dim1,
|
| 154 |
+
int64_t dim2);
|
| 155 |
+
|
| 156 |
+
// TensorImpl* overloads for Legacy TH/THC code. Use these sparingly.
|
| 157 |
+
|
| 158 |
+
TORCH_API TensorImpl* propagate_names_if_nonempty(
|
| 159 |
+
TensorImpl* result,
|
| 160 |
+
DimnameList maybe_names,
|
| 161 |
+
bool validate_names = false);
|
| 162 |
+
|
| 163 |
+
TORCH_API TensorImpl* propagate_names(
|
| 164 |
+
TensorImpl* result,
|
| 165 |
+
DimnameList names,
|
| 166 |
+
bool validate_names = false);
|
| 167 |
+
|
| 168 |
+
TORCH_API void propagate_names(TensorImpl* result, /*const */ TensorImpl* src);
|
| 169 |
+
|
| 170 |
+
TORCH_API inline void propagate_names(
|
| 171 |
+
const TensorBase& result,
|
| 172 |
+
DimnameList names,
|
| 173 |
+
bool validate_names = false) {
|
| 174 |
+
propagate_names(result.unsafeGetTensorImpl(), names, validate_names);
|
| 175 |
+
}
|
| 176 |
+
|
| 177 |
+
TORCH_API inline void propagate_names_if_nonempty(
|
| 178 |
+
const TensorBase& result,
|
| 179 |
+
DimnameList names,
|
| 180 |
+
bool validate_names = false) {
|
| 181 |
+
propagate_names_if_nonempty(
|
| 182 |
+
result.unsafeGetTensorImpl(), names, validate_names);
|
| 183 |
+
}
|
| 184 |
+
|
| 185 |
+
TORCH_API inline void propagate_names(
|
| 186 |
+
const TensorBase& result,
|
| 187 |
+
const TensorBase& src) {
|
| 188 |
+
propagate_names(result.unsafeGetTensorImpl(), src.unsafeGetTensorImpl());
|
| 189 |
+
}
|
| 190 |
+
|
| 191 |
+
// result = m1 @ m2 + bias
|
| 192 |
+
TORCH_API std::vector<Dimname> propagate_names_for_addmm(
|
| 193 |
+
const Tensor& m1,
|
| 194 |
+
const Tensor& m2,
|
| 195 |
+
const Tensor& bias);
|
| 196 |
+
|
| 197 |
+
TORCH_API std::vector<Dimname> propagate_names_for_addmv(
|
| 198 |
+
const Tensor& mat,
|
| 199 |
+
const Tensor& vec,
|
| 200 |
+
const Tensor& bias);
|
| 201 |
+
|
| 202 |
+
TORCH_API void check_names_for_dot(TensorImpl* vec1, TensorImpl* vec2);
|
| 203 |
+
|
| 204 |
+
TORCH_API std::vector<Dimname> compute_baddbmm_outnames(
|
| 205 |
+
const Tensor& result,
|
| 206 |
+
const Tensor& self,
|
| 207 |
+
const Tensor& other,
|
| 208 |
+
const Tensor& bias);
|
| 209 |
+
|
| 210 |
+
TORCH_API bool are_names_equal(TensorImpl* self, TensorImpl* other);
|
| 211 |
+
|
| 212 |
+
} // namespace namedinference
|
| 213 |
+
|
| 214 |
+
} // namespace at
|
mplug_owl2/lib/python3.10/site-packages/torch/include/ATen/NativeFunctions.h
ADDED
|
@@ -0,0 +1,1344 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from NativeFunctions.h
|
| 4 |
+
|
| 5 |
+
#ifdef TORCH_ASSERT_NO_OPERATORS
|
| 6 |
+
#error This change adds a dependency on native_functions.yaml, \
|
| 7 |
+
meaning the file will need to be re-compiled every time an operator \
|
| 8 |
+
is changed or added. Consider if your change would be better placed in \
|
| 9 |
+
another file, or if a more specific header might achieve the same goal. \
|
| 10 |
+
See NOTE: [Tensor vs. TensorBase]
|
| 11 |
+
#endif
|
| 12 |
+
|
| 13 |
+
#if defined(AT_PER_OPERATOR_HEADERS) && defined(TORCH_ASSERT_ONLY_METHOD_OPERATORS)
|
| 14 |
+
#error This change adds a dependency on all pytorch operators, meaning the \
|
| 15 |
+
file will need to be re-compiled every time an operator is changed or added. \
|
| 16 |
+
Consider including a specific operator from <ATen/ops/{my_operator}_native.h> \
|
| 17 |
+
and see NOTE [TORCH_ASSERT_ONLY_METHOD_OPERATORS].
|
| 18 |
+
#endif
|
| 19 |
+
|
| 20 |
+
#include <c10/core/Scalar.h>
|
| 21 |
+
#include <c10/core/Storage.h>
|
| 22 |
+
#include <c10/core/TensorOptions.h>
|
| 23 |
+
#include <c10/util/Deprecated.h>
|
| 24 |
+
#include <optional>
|
| 25 |
+
#include <c10/core/QScheme.h>
|
| 26 |
+
#include <ATen/core/Reduction.h>
|
| 27 |
+
#include <ATen/core/Tensor.h>
|
| 28 |
+
#include <tuple>
|
| 29 |
+
#include <vector>
|
| 30 |
+
|
| 31 |
+
#include <ATen/ops/_adaptive_avg_pool2d_native.h>
|
| 32 |
+
#include <ATen/ops/_adaptive_avg_pool2d_backward_native.h>
|
| 33 |
+
#include <ATen/ops/_adaptive_avg_pool3d_native.h>
|
| 34 |
+
#include <ATen/ops/_adaptive_avg_pool3d_backward_native.h>
|
| 35 |
+
#include <ATen/ops/_add_batch_dim_native.h>
|
| 36 |
+
#include <ATen/ops/_add_relu_native.h>
|
| 37 |
+
#include <ATen/ops/_addmm_activation_native.h>
|
| 38 |
+
#include <ATen/ops/_aminmax_native.h>
|
| 39 |
+
#include <ATen/ops/_amp_foreach_non_finite_check_and_unscale_native.h>
|
| 40 |
+
#include <ATen/ops/_amp_update_scale_native.h>
|
| 41 |
+
#include <ATen/ops/_assert_async_native.h>
|
| 42 |
+
#include <ATen/ops/_assert_scalar_native.h>
|
| 43 |
+
#include <ATen/ops/_assert_tensor_metadata_native.h>
|
| 44 |
+
#include <ATen/ops/_autocast_to_full_precision_native.h>
|
| 45 |
+
#include <ATen/ops/_autocast_to_reduced_precision_native.h>
|
| 46 |
+
#include <ATen/ops/_backward_native.h>
|
| 47 |
+
#include <ATen/ops/_batch_norm_impl_index_native.h>
|
| 48 |
+
#include <ATen/ops/_batch_norm_impl_index_backward_native.h>
|
| 49 |
+
#include <ATen/ops/_batch_norm_no_update_native.h>
|
| 50 |
+
#include <ATen/ops/_batch_norm_with_update_native.h>
|
| 51 |
+
#include <ATen/ops/_cast_Byte_native.h>
|
| 52 |
+
#include <ATen/ops/_cast_Char_native.h>
|
| 53 |
+
#include <ATen/ops/_cast_Double_native.h>
|
| 54 |
+
#include <ATen/ops/_cast_Float_native.h>
|
| 55 |
+
#include <ATen/ops/_cast_Half_native.h>
|
| 56 |
+
#include <ATen/ops/_cast_Int_native.h>
|
| 57 |
+
#include <ATen/ops/_cast_Long_native.h>
|
| 58 |
+
#include <ATen/ops/_cast_Short_native.h>
|
| 59 |
+
#include <ATen/ops/_cdist_backward_native.h>
|
| 60 |
+
#include <ATen/ops/_cdist_forward_native.h>
|
| 61 |
+
#include <ATen/ops/_cholesky_solve_helper_native.h>
|
| 62 |
+
#include <ATen/ops/_choose_qparams_per_tensor_native.h>
|
| 63 |
+
#include <ATen/ops/_chunk_cat_native.h>
|
| 64 |
+
#include <ATen/ops/_coalesce_native.h>
|
| 65 |
+
#include <ATen/ops/_coalesced_native.h>
|
| 66 |
+
#include <ATen/ops/_compute_linear_combination_native.h>
|
| 67 |
+
#include <ATen/ops/_conj_native.h>
|
| 68 |
+
#include <ATen/ops/_conj_copy_native.h>
|
| 69 |
+
#include <ATen/ops/_conj_physical_native.h>
|
| 70 |
+
#include <ATen/ops/_conv_depthwise2d_native.h>
|
| 71 |
+
#include <ATen/ops/_convert_indices_from_coo_to_csr_native.h>
|
| 72 |
+
#include <ATen/ops/_convert_indices_from_csr_to_coo_native.h>
|
| 73 |
+
#include <ATen/ops/_convert_weight_to_int4pack_native.h>
|
| 74 |
+
#include <ATen/ops/_convolution_native.h>
|
| 75 |
+
#include <ATen/ops/_convolution_double_backward_native.h>
|
| 76 |
+
#include <ATen/ops/_convolution_mode_native.h>
|
| 77 |
+
#include <ATen/ops/_copy_from_native.h>
|
| 78 |
+
#include <ATen/ops/_copy_from_and_resize_native.h>
|
| 79 |
+
#include <ATen/ops/_cslt_compress_native.h>
|
| 80 |
+
#include <ATen/ops/_cslt_sparse_mm_native.h>
|
| 81 |
+
#include <ATen/ops/_cslt_sparse_mm_search_native.h>
|
| 82 |
+
#include <ATen/ops/_ctc_loss_native.h>
|
| 83 |
+
#include <ATen/ops/_ctc_loss_backward_native.h>
|
| 84 |
+
#include <ATen/ops/_cudnn_ctc_loss_native.h>
|
| 85 |
+
#include <ATen/ops/_cudnn_init_dropout_state_native.h>
|
| 86 |
+
#include <ATen/ops/_cudnn_rnn_native.h>
|
| 87 |
+
#include <ATen/ops/_cudnn_rnn_backward_native.h>
|
| 88 |
+
#include <ATen/ops/_cudnn_rnn_flatten_weight_native.h>
|
| 89 |
+
#include <ATen/ops/_cufft_clear_plan_cache_native.h>
|
| 90 |
+
#include <ATen/ops/_cufft_get_plan_cache_max_size_native.h>
|
| 91 |
+
#include <ATen/ops/_cufft_get_plan_cache_size_native.h>
|
| 92 |
+
#include <ATen/ops/_cufft_set_plan_cache_max_size_native.h>
|
| 93 |
+
#include <ATen/ops/_cummax_helper_native.h>
|
| 94 |
+
#include <ATen/ops/_cummin_helper_native.h>
|
| 95 |
+
#include <ATen/ops/_debug_has_internal_overlap_native.h>
|
| 96 |
+
#include <ATen/ops/_dimI_native.h>
|
| 97 |
+
#include <ATen/ops/_dimV_native.h>
|
| 98 |
+
#include <ATen/ops/_dim_arange_native.h>
|
| 99 |
+
#include <ATen/ops/_dirichlet_grad_native.h>
|
| 100 |
+
#include <ATen/ops/_efficient_attention_backward_native.h>
|
| 101 |
+
#include <ATen/ops/_efficient_attention_forward_native.h>
|
| 102 |
+
#include <ATen/ops/_efficientzerotensor_native.h>
|
| 103 |
+
#include <ATen/ops/_embedding_bag_native.h>
|
| 104 |
+
#include <ATen/ops/_embedding_bag_backward_native.h>
|
| 105 |
+
#include <ATen/ops/_embedding_bag_dense_backward_native.h>
|
| 106 |
+
#include <ATen/ops/_embedding_bag_forward_only_native.h>
|
| 107 |
+
#include <ATen/ops/_embedding_bag_per_sample_weights_backward_native.h>
|
| 108 |
+
#include <ATen/ops/_embedding_bag_sparse_backward_native.h>
|
| 109 |
+
#include <ATen/ops/_empty_affine_quantized_native.h>
|
| 110 |
+
#include <ATen/ops/_empty_per_channel_affine_quantized_native.h>
|
| 111 |
+
#include <ATen/ops/_euclidean_dist_native.h>
|
| 112 |
+
#include <ATen/ops/_fake_quantize_learnable_per_channel_affine_native.h>
|
| 113 |
+
#include <ATen/ops/_fake_quantize_learnable_per_channel_affine_backward_native.h>
|
| 114 |
+
#include <ATen/ops/_fake_quantize_learnable_per_tensor_affine_native.h>
|
| 115 |
+
#include <ATen/ops/_fake_quantize_learnable_per_tensor_affine_backward_native.h>
|
| 116 |
+
#include <ATen/ops/_fake_quantize_per_tensor_affine_cachemask_tensor_qparams_native.h>
|
| 117 |
+
#include <ATen/ops/_fft_c2c_native.h>
|
| 118 |
+
#include <ATen/ops/_fft_c2r_native.h>
|
| 119 |
+
#include <ATen/ops/_fft_r2c_native.h>
|
| 120 |
+
#include <ATen/ops/_fill_mem_eff_dropout_mask_native.h>
|
| 121 |
+
#include <ATen/ops/_flash_attention_backward_native.h>
|
| 122 |
+
#include <ATen/ops/_flash_attention_forward_native.h>
|
| 123 |
+
#include <ATen/ops/_foobar_native.h>
|
| 124 |
+
#include <ATen/ops/_foreach_abs_native.h>
|
| 125 |
+
#include <ATen/ops/_foreach_acos_native.h>
|
| 126 |
+
#include <ATen/ops/_foreach_add_native.h>
|
| 127 |
+
#include <ATen/ops/_foreach_addcdiv_native.h>
|
| 128 |
+
#include <ATen/ops/_foreach_addcmul_native.h>
|
| 129 |
+
#include <ATen/ops/_foreach_asin_native.h>
|
| 130 |
+
#include <ATen/ops/_foreach_atan_native.h>
|
| 131 |
+
#include <ATen/ops/_foreach_ceil_native.h>
|
| 132 |
+
#include <ATen/ops/_foreach_clamp_max_native.h>
|
| 133 |
+
#include <ATen/ops/_foreach_clamp_min_native.h>
|
| 134 |
+
#include <ATen/ops/_foreach_copy_native.h>
|
| 135 |
+
#include <ATen/ops/_foreach_cos_native.h>
|
| 136 |
+
#include <ATen/ops/_foreach_cosh_native.h>
|
| 137 |
+
#include <ATen/ops/_foreach_div_native.h>
|
| 138 |
+
#include <ATen/ops/_foreach_erf_native.h>
|
| 139 |
+
#include <ATen/ops/_foreach_erfc_native.h>
|
| 140 |
+
#include <ATen/ops/_foreach_exp_native.h>
|
| 141 |
+
#include <ATen/ops/_foreach_expm1_native.h>
|
| 142 |
+
#include <ATen/ops/_foreach_floor_native.h>
|
| 143 |
+
#include <ATen/ops/_foreach_frac_native.h>
|
| 144 |
+
#include <ATen/ops/_foreach_lerp_native.h>
|
| 145 |
+
#include <ATen/ops/_foreach_lgamma_native.h>
|
| 146 |
+
#include <ATen/ops/_foreach_log_native.h>
|
| 147 |
+
#include <ATen/ops/_foreach_log10_native.h>
|
| 148 |
+
#include <ATen/ops/_foreach_log1p_native.h>
|
| 149 |
+
#include <ATen/ops/_foreach_log2_native.h>
|
| 150 |
+
#include <ATen/ops/_foreach_max_native.h>
|
| 151 |
+
#include <ATen/ops/_foreach_maximum_native.h>
|
| 152 |
+
#include <ATen/ops/_foreach_minimum_native.h>
|
| 153 |
+
#include <ATen/ops/_foreach_mul_native.h>
|
| 154 |
+
#include <ATen/ops/_foreach_neg_native.h>
|
| 155 |
+
#include <ATen/ops/_foreach_norm_native.h>
|
| 156 |
+
#include <ATen/ops/_foreach_pow_native.h>
|
| 157 |
+
#include <ATen/ops/_foreach_reciprocal_native.h>
|
| 158 |
+
#include <ATen/ops/_foreach_round_native.h>
|
| 159 |
+
#include <ATen/ops/_foreach_sigmoid_native.h>
|
| 160 |
+
#include <ATen/ops/_foreach_sign_native.h>
|
| 161 |
+
#include <ATen/ops/_foreach_sin_native.h>
|
| 162 |
+
#include <ATen/ops/_foreach_sinh_native.h>
|
| 163 |
+
#include <ATen/ops/_foreach_sqrt_native.h>
|
| 164 |
+
#include <ATen/ops/_foreach_sub_native.h>
|
| 165 |
+
#include <ATen/ops/_foreach_tan_native.h>
|
| 166 |
+
#include <ATen/ops/_foreach_tanh_native.h>
|
| 167 |
+
#include <ATen/ops/_foreach_trunc_native.h>
|
| 168 |
+
#include <ATen/ops/_foreach_zero_native.h>
|
| 169 |
+
#include <ATen/ops/_functional_assert_async_native.h>
|
| 170 |
+
#include <ATen/ops/_functional_assert_scalar_native.h>
|
| 171 |
+
#include <ATen/ops/_functional_sym_constrain_range_native.h>
|
| 172 |
+
#include <ATen/ops/_functional_sym_constrain_range_for_size_native.h>
|
| 173 |
+
#include <ATen/ops/_fused_adagrad_native.h>
|
| 174 |
+
#include <ATen/ops/_fused_adam_native.h>
|
| 175 |
+
#include <ATen/ops/_fused_adamw_native.h>
|
| 176 |
+
#include <ATen/ops/_fused_dropout_native.h>
|
| 177 |
+
#include <ATen/ops/_fused_moving_avg_obs_fq_helper_native.h>
|
| 178 |
+
#include <ATen/ops/_fused_sdp_choice_native.h>
|
| 179 |
+
#include <ATen/ops/_fused_sgd_native.h>
|
| 180 |
+
#include <ATen/ops/_fw_primal_native.h>
|
| 181 |
+
#include <ATen/ops/_fw_primal_copy_native.h>
|
| 182 |
+
#include <ATen/ops/_gather_sparse_backward_native.h>
|
| 183 |
+
#include <ATen/ops/_grid_sampler_2d_cpu_fallback_native.h>
|
| 184 |
+
#include <ATen/ops/_grid_sampler_2d_cpu_fallback_backward_native.h>
|
| 185 |
+
#include <ATen/ops/_has_compatible_shallow_copy_type_native.h>
|
| 186 |
+
#include <ATen/ops/_has_same_storage_numel_native.h>
|
| 187 |
+
#include <ATen/ops/_histogramdd_bin_edges_native.h>
|
| 188 |
+
#include <ATen/ops/_histogramdd_from_bin_cts_native.h>
|
| 189 |
+
#include <ATen/ops/_histogramdd_from_bin_tensors_native.h>
|
| 190 |
+
#include <ATen/ops/_index_put_impl_native.h>
|
| 191 |
+
#include <ATen/ops/_indices_native.h>
|
| 192 |
+
#include <ATen/ops/_indices_copy_native.h>
|
| 193 |
+
#include <ATen/ops/_int_mm_native.h>
|
| 194 |
+
#include <ATen/ops/_is_all_true_native.h>
|
| 195 |
+
#include <ATen/ops/_is_any_true_native.h>
|
| 196 |
+
#include <ATen/ops/_is_zerotensor_native.h>
|
| 197 |
+
#include <ATen/ops/_jagged_to_padded_dense_forward_native.h>
|
| 198 |
+
#include <ATen/ops/_lazy_clone_native.h>
|
| 199 |
+
#include <ATen/ops/_linalg_check_errors_native.h>
|
| 200 |
+
#include <ATen/ops/_linalg_det_native.h>
|
| 201 |
+
#include <ATen/ops/_linalg_eigh_native.h>
|
| 202 |
+
#include <ATen/ops/_linalg_eigvals_native.h>
|
| 203 |
+
#include <ATen/ops/_linalg_slogdet_native.h>
|
| 204 |
+
#include <ATen/ops/_linalg_solve_ex_native.h>
|
| 205 |
+
#include <ATen/ops/_linalg_svd_native.h>
|
| 206 |
+
#include <ATen/ops/_local_scalar_dense_native.h>
|
| 207 |
+
#include <ATen/ops/_log_softmax_native.h>
|
| 208 |
+
#include <ATen/ops/_log_softmax_backward_data_native.h>
|
| 209 |
+
#include <ATen/ops/_logcumsumexp_native.h>
|
| 210 |
+
#include <ATen/ops/_lstm_mps_native.h>
|
| 211 |
+
#include <ATen/ops/_lu_with_info_native.h>
|
| 212 |
+
#include <ATen/ops/_make_dep_token_native.h>
|
| 213 |
+
#include <ATen/ops/_make_dual_native.h>
|
| 214 |
+
#include <ATen/ops/_make_dual_copy_native.h>
|
| 215 |
+
#include <ATen/ops/_make_per_channel_quantized_tensor_native.h>
|
| 216 |
+
#include <ATen/ops/_make_per_tensor_quantized_tensor_native.h>
|
| 217 |
+
#include <ATen/ops/_masked_scale_native.h>
|
| 218 |
+
#include <ATen/ops/_masked_softmax_native.h>
|
| 219 |
+
#include <ATen/ops/_masked_softmax_backward_native.h>
|
| 220 |
+
#include <ATen/ops/_mixed_dtypes_linear_native.h>
|
| 221 |
+
#include <ATen/ops/_mkldnn_reshape_native.h>
|
| 222 |
+
#include <ATen/ops/_mkldnn_transpose_native.h>
|
| 223 |
+
#include <ATen/ops/_mps_convolution_native.h>
|
| 224 |
+
#include <ATen/ops/_mps_convolution_transpose_native.h>
|
| 225 |
+
#include <ATen/ops/_native_batch_norm_legit_native.h>
|
| 226 |
+
#include <ATen/ops/_native_batch_norm_legit_no_training_native.h>
|
| 227 |
+
#include <ATen/ops/_native_multi_head_attention_native.h>
|
| 228 |
+
#include <ATen/ops/_neg_view_native.h>
|
| 229 |
+
#include <ATen/ops/_neg_view_copy_native.h>
|
| 230 |
+
#include <ATen/ops/_nested_compute_contiguous_strides_offsets_native.h>
|
| 231 |
+
#include <ATen/ops/_nested_from_padded_native.h>
|
| 232 |
+
#include <ATen/ops/_nested_from_padded_and_nested_example_native.h>
|
| 233 |
+
#include <ATen/ops/_nested_get_jagged_dummy_native.h>
|
| 234 |
+
#include <ATen/ops/_nested_get_lengths_native.h>
|
| 235 |
+
#include <ATen/ops/_nested_get_max_seqlen_native.h>
|
| 236 |
+
#include <ATen/ops/_nested_get_min_seqlen_native.h>
|
| 237 |
+
#include <ATen/ops/_nested_get_offsets_native.h>
|
| 238 |
+
#include <ATen/ops/_nested_get_ragged_idx_native.h>
|
| 239 |
+
#include <ATen/ops/_nested_get_values_native.h>
|
| 240 |
+
#include <ATen/ops/_nested_get_values_copy_native.h>
|
| 241 |
+
#include <ATen/ops/_nested_select_backward_native.h>
|
| 242 |
+
#include <ATen/ops/_nested_sum_backward_native.h>
|
| 243 |
+
#include <ATen/ops/_nested_tensor_from_mask_native.h>
|
| 244 |
+
#include <ATen/ops/_nested_tensor_from_mask_left_aligned_native.h>
|
| 245 |
+
#include <ATen/ops/_nested_tensor_from_tensor_list_native.h>
|
| 246 |
+
#include <ATen/ops/_nested_tensor_size_native.h>
|
| 247 |
+
#include <ATen/ops/_nested_tensor_softmax_with_shape_native.h>
|
| 248 |
+
#include <ATen/ops/_nested_tensor_storage_offsets_native.h>
|
| 249 |
+
#include <ATen/ops/_nested_tensor_strides_native.h>
|
| 250 |
+
#include <ATen/ops/_nested_view_from_buffer_native.h>
|
| 251 |
+
#include <ATen/ops/_nested_view_from_buffer_copy_native.h>
|
| 252 |
+
#include <ATen/ops/_nested_view_from_jagged_native.h>
|
| 253 |
+
#include <ATen/ops/_nested_view_from_jagged_copy_native.h>
|
| 254 |
+
#include <ATen/ops/_new_zeros_with_same_feature_meta_native.h>
|
| 255 |
+
#include <ATen/ops/_nnpack_available_native.h>
|
| 256 |
+
#include <ATen/ops/_nnpack_spatial_convolution_native.h>
|
| 257 |
+
#include <ATen/ops/_nnz_native.h>
|
| 258 |
+
#include <ATen/ops/_pack_padded_sequence_native.h>
|
| 259 |
+
#include <ATen/ops/_pack_padded_sequence_backward_native.h>
|
| 260 |
+
#include <ATen/ops/_pad_circular_native.h>
|
| 261 |
+
#include <ATen/ops/_pad_enum_native.h>
|
| 262 |
+
#include <ATen/ops/_pad_packed_sequence_native.h>
|
| 263 |
+
#include <ATen/ops/_padded_dense_to_jagged_forward_native.h>
|
| 264 |
+
#include <ATen/ops/_pdist_backward_native.h>
|
| 265 |
+
#include <ATen/ops/_pdist_forward_native.h>
|
| 266 |
+
#include <ATen/ops/_pin_memory_native.h>
|
| 267 |
+
#include <ATen/ops/_prelu_kernel_native.h>
|
| 268 |
+
#include <ATen/ops/_prelu_kernel_backward_native.h>
|
| 269 |
+
#include <ATen/ops/_print_native.h>
|
| 270 |
+
#include <ATen/ops/_propagate_xla_data_native.h>
|
| 271 |
+
#include <ATen/ops/_remove_batch_dim_native.h>
|
| 272 |
+
#include <ATen/ops/_reshape_alias_native.h>
|
| 273 |
+
#include <ATen/ops/_reshape_alias_copy_native.h>
|
| 274 |
+
#include <ATen/ops/_reshape_copy_native.h>
|
| 275 |
+
#include <ATen/ops/_reshape_from_tensor_native.h>
|
| 276 |
+
#include <ATen/ops/_resize_output_native.h>
|
| 277 |
+
#include <ATen/ops/_rowwise_prune_native.h>
|
| 278 |
+
#include <ATen/ops/_safe_softmax_native.h>
|
| 279 |
+
#include <ATen/ops/_sample_dirichlet_native.h>
|
| 280 |
+
#include <ATen/ops/_saturate_weight_to_fp16_native.h>
|
| 281 |
+
#include <ATen/ops/_scaled_dot_product_attention_math_native.h>
|
| 282 |
+
#include <ATen/ops/_scaled_dot_product_attention_math_for_mps_native.h>
|
| 283 |
+
#include <ATen/ops/_scaled_dot_product_cudnn_attention_native.h>
|
| 284 |
+
#include <ATen/ops/_scaled_dot_product_cudnn_attention_backward_native.h>
|
| 285 |
+
#include <ATen/ops/_scaled_dot_product_efficient_attention_native.h>
|
| 286 |
+
#include <ATen/ops/_scaled_dot_product_efficient_attention_backward_native.h>
|
| 287 |
+
#include <ATen/ops/_scaled_dot_product_flash_attention_native.h>
|
| 288 |
+
#include <ATen/ops/_scaled_dot_product_flash_attention_backward_native.h>
|
| 289 |
+
#include <ATen/ops/_scaled_dot_product_flash_attention_for_cpu_native.h>
|
| 290 |
+
#include <ATen/ops/_scaled_dot_product_flash_attention_for_cpu_backward_native.h>
|
| 291 |
+
#include <ATen/ops/_scaled_dot_product_fused_attention_overrideable_native.h>
|
| 292 |
+
#include <ATen/ops/_scaled_dot_product_fused_attention_overrideable_backward_native.h>
|
| 293 |
+
#include <ATen/ops/_scaled_mm_native.h>
|
| 294 |
+
#include <ATen/ops/_segment_reduce_backward_native.h>
|
| 295 |
+
#include <ATen/ops/_shape_as_tensor_native.h>
|
| 296 |
+
#include <ATen/ops/_slow_conv2d_backward_native.h>
|
| 297 |
+
#include <ATen/ops/_slow_conv2d_forward_native.h>
|
| 298 |
+
#include <ATen/ops/_sobol_engine_draw_native.h>
|
| 299 |
+
#include <ATen/ops/_sobol_engine_ff_native.h>
|
| 300 |
+
#include <ATen/ops/_sobol_engine_initialize_state_native.h>
|
| 301 |
+
#include <ATen/ops/_sobol_engine_scramble_native.h>
|
| 302 |
+
#include <ATen/ops/_softmax_native.h>
|
| 303 |
+
#include <ATen/ops/_softmax_backward_data_native.h>
|
| 304 |
+
#include <ATen/ops/_sparse_addmm_native.h>
|
| 305 |
+
#include <ATen/ops/_sparse_broadcast_to_native.h>
|
| 306 |
+
#include <ATen/ops/_sparse_broadcast_to_copy_native.h>
|
| 307 |
+
#include <ATen/ops/_sparse_bsc_tensor_unsafe_native.h>
|
| 308 |
+
#include <ATen/ops/_sparse_bsr_tensor_unsafe_native.h>
|
| 309 |
+
#include <ATen/ops/_sparse_compressed_tensor_unsafe_native.h>
|
| 310 |
+
#include <ATen/ops/_sparse_compressed_tensor_with_dims_native.h>
|
| 311 |
+
#include <ATen/ops/_sparse_coo_tensor_unsafe_native.h>
|
| 312 |
+
#include <ATen/ops/_sparse_coo_tensor_with_dims_native.h>
|
| 313 |
+
#include <ATen/ops/_sparse_coo_tensor_with_dims_and_tensors_native.h>
|
| 314 |
+
#include <ATen/ops/_sparse_csc_tensor_unsafe_native.h>
|
| 315 |
+
#include <ATen/ops/_sparse_csr_prod_native.h>
|
| 316 |
+
#include <ATen/ops/_sparse_csr_sum_native.h>
|
| 317 |
+
#include <ATen/ops/_sparse_csr_tensor_unsafe_native.h>
|
| 318 |
+
#include <ATen/ops/_sparse_log_softmax_native.h>
|
| 319 |
+
#include <ATen/ops/_sparse_log_softmax_backward_data_native.h>
|
| 320 |
+
#include <ATen/ops/_sparse_mask_projection_native.h>
|
| 321 |
+
#include <ATen/ops/_sparse_mm_native.h>
|
| 322 |
+
#include <ATen/ops/_sparse_mm_reduce_impl_native.h>
|
| 323 |
+
#include <ATen/ops/_sparse_mm_reduce_impl_backward_native.h>
|
| 324 |
+
#include <ATen/ops/_sparse_semi_structured_addmm_native.h>
|
| 325 |
+
#include <ATen/ops/_sparse_semi_structured_apply_native.h>
|
| 326 |
+
#include <ATen/ops/_sparse_semi_structured_apply_dense_native.h>
|
| 327 |
+
#include <ATen/ops/_sparse_semi_structured_linear_native.h>
|
| 328 |
+
#include <ATen/ops/_sparse_semi_structured_mm_native.h>
|
| 329 |
+
#include <ATen/ops/_sparse_semi_structured_tile_native.h>
|
| 330 |
+
#include <ATen/ops/_sparse_softmax_native.h>
|
| 331 |
+
#include <ATen/ops/_sparse_softmax_backward_data_native.h>
|
| 332 |
+
#include <ATen/ops/_sparse_sparse_matmul_native.h>
|
| 333 |
+
#include <ATen/ops/_sparse_sum_native.h>
|
| 334 |
+
#include <ATen/ops/_sparse_sum_backward_native.h>
|
| 335 |
+
#include <ATen/ops/_spdiags_native.h>
|
| 336 |
+
#include <ATen/ops/_spsolve_native.h>
|
| 337 |
+
#include <ATen/ops/_stack_native.h>
|
| 338 |
+
#include <ATen/ops/_standard_gamma_native.h>
|
| 339 |
+
#include <ATen/ops/_standard_gamma_grad_native.h>
|
| 340 |
+
#include <ATen/ops/_test_ambiguous_defaults_native.h>
|
| 341 |
+
#include <ATen/ops/_test_autograd_multiple_dispatch_native.h>
|
| 342 |
+
#include <ATen/ops/_test_autograd_multiple_dispatch_view_native.h>
|
| 343 |
+
#include <ATen/ops/_test_autograd_multiple_dispatch_view_copy_native.h>
|
| 344 |
+
#include <ATen/ops/_test_check_tensor_native.h>
|
| 345 |
+
#include <ATen/ops/_test_functorch_fallback_native.h>
|
| 346 |
+
#include <ATen/ops/_test_optional_filled_intlist_native.h>
|
| 347 |
+
#include <ATen/ops/_test_optional_floatlist_native.h>
|
| 348 |
+
#include <ATen/ops/_test_optional_intlist_native.h>
|
| 349 |
+
#include <ATen/ops/_test_parallel_materialize_native.h>
|
| 350 |
+
#include <ATen/ops/_test_serialization_subcmul_native.h>
|
| 351 |
+
#include <ATen/ops/_test_string_default_native.h>
|
| 352 |
+
#include <ATen/ops/_test_warn_in_autograd_native.h>
|
| 353 |
+
#include <ATen/ops/_thnn_differentiable_gru_cell_backward_native.h>
|
| 354 |
+
#include <ATen/ops/_thnn_differentiable_lstm_cell_backward_native.h>
|
| 355 |
+
#include <ATen/ops/_thnn_fused_gru_cell_native.h>
|
| 356 |
+
#include <ATen/ops/_thnn_fused_gru_cell_backward_native.h>
|
| 357 |
+
#include <ATen/ops/_thnn_fused_lstm_cell_native.h>
|
| 358 |
+
#include <ATen/ops/_thnn_fused_lstm_cell_backward_native.h>
|
| 359 |
+
#include <ATen/ops/_thnn_fused_lstm_cell_backward_impl_native.h>
|
| 360 |
+
#include <ATen/ops/_to_copy_native.h>
|
| 361 |
+
#include <ATen/ops/_to_cpu_native.h>
|
| 362 |
+
#include <ATen/ops/_to_dense_native.h>
|
| 363 |
+
#include <ATen/ops/_to_sparse_native.h>
|
| 364 |
+
#include <ATen/ops/_to_sparse_bsc_native.h>
|
| 365 |
+
#include <ATen/ops/_to_sparse_bsr_native.h>
|
| 366 |
+
#include <ATen/ops/_to_sparse_csc_native.h>
|
| 367 |
+
#include <ATen/ops/_to_sparse_csr_native.h>
|
| 368 |
+
#include <ATen/ops/_to_sparse_semi_structured_native.h>
|
| 369 |
+
#include <ATen/ops/_transform_bias_rescale_qkv_native.h>
|
| 370 |
+
#include <ATen/ops/_transformer_encoder_layer_fwd_native.h>
|
| 371 |
+
#include <ATen/ops/_trilinear_native.h>
|
| 372 |
+
#include <ATen/ops/_triton_multi_head_attention_native.h>
|
| 373 |
+
#include <ATen/ops/_triton_scaled_dot_attention_native.h>
|
| 374 |
+
#include <ATen/ops/_unique_native.h>
|
| 375 |
+
#include <ATen/ops/_unique2_native.h>
|
| 376 |
+
#include <ATen/ops/_unpack_dual_native.h>
|
| 377 |
+
#include <ATen/ops/_unsafe_index_native.h>
|
| 378 |
+
#include <ATen/ops/_unsafe_index_put_native.h>
|
| 379 |
+
#include <ATen/ops/_unsafe_masked_index_native.h>
|
| 380 |
+
#include <ATen/ops/_unsafe_masked_index_put_accumulate_native.h>
|
| 381 |
+
#include <ATen/ops/_unsafe_view_native.h>
|
| 382 |
+
#include <ATen/ops/_upsample_bicubic2d_aa_native.h>
|
| 383 |
+
#include <ATen/ops/_upsample_bicubic2d_aa_backward_native.h>
|
| 384 |
+
#include <ATen/ops/_upsample_bilinear2d_aa_native.h>
|
| 385 |
+
#include <ATen/ops/_upsample_bilinear2d_aa_backward_native.h>
|
| 386 |
+
#include <ATen/ops/_upsample_nearest_exact1d_native.h>
|
| 387 |
+
#include <ATen/ops/_upsample_nearest_exact1d_backward_native.h>
|
| 388 |
+
#include <ATen/ops/_upsample_nearest_exact2d_native.h>
|
| 389 |
+
#include <ATen/ops/_upsample_nearest_exact2d_backward_native.h>
|
| 390 |
+
#include <ATen/ops/_upsample_nearest_exact3d_native.h>
|
| 391 |
+
#include <ATen/ops/_upsample_nearest_exact3d_backward_native.h>
|
| 392 |
+
#include <ATen/ops/_use_cudnn_ctc_loss_native.h>
|
| 393 |
+
#include <ATen/ops/_use_cudnn_rnn_flatten_weight_native.h>
|
| 394 |
+
#include <ATen/ops/_validate_compressed_sparse_indices_native.h>
|
| 395 |
+
#include <ATen/ops/_validate_sparse_bsc_tensor_args_native.h>
|
| 396 |
+
#include <ATen/ops/_validate_sparse_bsr_tensor_args_native.h>
|
| 397 |
+
#include <ATen/ops/_validate_sparse_compressed_tensor_args_native.h>
|
| 398 |
+
#include <ATen/ops/_validate_sparse_coo_tensor_args_native.h>
|
| 399 |
+
#include <ATen/ops/_validate_sparse_csc_tensor_args_native.h>
|
| 400 |
+
#include <ATen/ops/_validate_sparse_csr_tensor_args_native.h>
|
| 401 |
+
#include <ATen/ops/_values_native.h>
|
| 402 |
+
#include <ATen/ops/_values_copy_native.h>
|
| 403 |
+
#include <ATen/ops/_version_native.h>
|
| 404 |
+
#include <ATen/ops/_weight_int4pack_mm_native.h>
|
| 405 |
+
#include <ATen/ops/_weight_int8pack_mm_native.h>
|
| 406 |
+
#include <ATen/ops/_weight_norm_native.h>
|
| 407 |
+
#include <ATen/ops/_weight_norm_differentiable_backward_native.h>
|
| 408 |
+
#include <ATen/ops/_weight_norm_interface_native.h>
|
| 409 |
+
#include <ATen/ops/_weight_norm_interface_backward_native.h>
|
| 410 |
+
#include <ATen/ops/_wrapped_linear_prepack_native.h>
|
| 411 |
+
#include <ATen/ops/_wrapped_quantized_linear_prepacked_native.h>
|
| 412 |
+
#include <ATen/ops/abs_native.h>
|
| 413 |
+
#include <ATen/ops/absolute_native.h>
|
| 414 |
+
#include <ATen/ops/acos_native.h>
|
| 415 |
+
#include <ATen/ops/acosh_native.h>
|
| 416 |
+
#include <ATen/ops/adaptive_avg_pool1d_native.h>
|
| 417 |
+
#include <ATen/ops/adaptive_avg_pool2d_native.h>
|
| 418 |
+
#include <ATen/ops/adaptive_avg_pool3d_native.h>
|
| 419 |
+
#include <ATen/ops/adaptive_avg_pool3d_backward_native.h>
|
| 420 |
+
#include <ATen/ops/adaptive_max_pool1d_native.h>
|
| 421 |
+
#include <ATen/ops/adaptive_max_pool2d_native.h>
|
| 422 |
+
#include <ATen/ops/adaptive_max_pool2d_backward_native.h>
|
| 423 |
+
#include <ATen/ops/adaptive_max_pool3d_native.h>
|
| 424 |
+
#include <ATen/ops/adaptive_max_pool3d_backward_native.h>
|
| 425 |
+
#include <ATen/ops/add_native.h>
|
| 426 |
+
#include <ATen/ops/addbmm_native.h>
|
| 427 |
+
#include <ATen/ops/addcdiv_native.h>
|
| 428 |
+
#include <ATen/ops/addcmul_native.h>
|
| 429 |
+
#include <ATen/ops/addmm_native.h>
|
| 430 |
+
#include <ATen/ops/addmv_native.h>
|
| 431 |
+
#include <ATen/ops/addr_native.h>
|
| 432 |
+
#include <ATen/ops/adjoint_native.h>
|
| 433 |
+
#include <ATen/ops/affine_grid_generator_native.h>
|
| 434 |
+
#include <ATen/ops/affine_grid_generator_backward_native.h>
|
| 435 |
+
#include <ATen/ops/alias_native.h>
|
| 436 |
+
#include <ATen/ops/alias_copy_native.h>
|
| 437 |
+
#include <ATen/ops/align_as_native.h>
|
| 438 |
+
#include <ATen/ops/align_tensors_native.h>
|
| 439 |
+
#include <ATen/ops/align_to_native.h>
|
| 440 |
+
#include <ATen/ops/all_native.h>
|
| 441 |
+
#include <ATen/ops/allclose_native.h>
|
| 442 |
+
#include <ATen/ops/alpha_dropout_native.h>
|
| 443 |
+
#include <ATen/ops/amax_native.h>
|
| 444 |
+
#include <ATen/ops/amin_native.h>
|
| 445 |
+
#include <ATen/ops/aminmax_native.h>
|
| 446 |
+
#include <ATen/ops/and_native.h>
|
| 447 |
+
#include <ATen/ops/angle_native.h>
|
| 448 |
+
#include <ATen/ops/any_native.h>
|
| 449 |
+
#include <ATen/ops/arange_native.h>
|
| 450 |
+
#include <ATen/ops/arccos_native.h>
|
| 451 |
+
#include <ATen/ops/arccosh_native.h>
|
| 452 |
+
#include <ATen/ops/arcsin_native.h>
|
| 453 |
+
#include <ATen/ops/arcsinh_native.h>
|
| 454 |
+
#include <ATen/ops/arctan_native.h>
|
| 455 |
+
#include <ATen/ops/arctan2_native.h>
|
| 456 |
+
#include <ATen/ops/arctanh_native.h>
|
| 457 |
+
#include <ATen/ops/argmax_native.h>
|
| 458 |
+
#include <ATen/ops/argmin_native.h>
|
| 459 |
+
#include <ATen/ops/argsort_native.h>
|
| 460 |
+
#include <ATen/ops/argwhere_native.h>
|
| 461 |
+
#include <ATen/ops/as_strided_native.h>
|
| 462 |
+
#include <ATen/ops/as_strided_copy_native.h>
|
| 463 |
+
#include <ATen/ops/as_strided_scatter_native.h>
|
| 464 |
+
#include <ATen/ops/asin_native.h>
|
| 465 |
+
#include <ATen/ops/asinh_native.h>
|
| 466 |
+
#include <ATen/ops/atan_native.h>
|
| 467 |
+
#include <ATen/ops/atan2_native.h>
|
| 468 |
+
#include <ATen/ops/atanh_native.h>
|
| 469 |
+
#include <ATen/ops/atleast_1d_native.h>
|
| 470 |
+
#include <ATen/ops/atleast_2d_native.h>
|
| 471 |
+
#include <ATen/ops/atleast_3d_native.h>
|
| 472 |
+
#include <ATen/ops/avg_pool1d_native.h>
|
| 473 |
+
#include <ATen/ops/avg_pool2d_native.h>
|
| 474 |
+
#include <ATen/ops/avg_pool2d_backward_native.h>
|
| 475 |
+
#include <ATen/ops/avg_pool3d_native.h>
|
| 476 |
+
#include <ATen/ops/avg_pool3d_backward_native.h>
|
| 477 |
+
#include <ATen/ops/baddbmm_native.h>
|
| 478 |
+
#include <ATen/ops/bartlett_window_native.h>
|
| 479 |
+
#include <ATen/ops/batch_norm_native.h>
|
| 480 |
+
#include <ATen/ops/batch_norm_backward_native.h>
|
| 481 |
+
#include <ATen/ops/batch_norm_backward_elemt_native.h>
|
| 482 |
+
#include <ATen/ops/batch_norm_backward_reduce_native.h>
|
| 483 |
+
#include <ATen/ops/batch_norm_elemt_native.h>
|
| 484 |
+
#include <ATen/ops/batch_norm_gather_stats_native.h>
|
| 485 |
+
#include <ATen/ops/batch_norm_gather_stats_with_counts_native.h>
|
| 486 |
+
#include <ATen/ops/batch_norm_stats_native.h>
|
| 487 |
+
#include <ATen/ops/batch_norm_update_stats_native.h>
|
| 488 |
+
#include <ATen/ops/bernoulli_native.h>
|
| 489 |
+
#include <ATen/ops/bilinear_native.h>
|
| 490 |
+
#include <ATen/ops/binary_cross_entropy_native.h>
|
| 491 |
+
#include <ATen/ops/binary_cross_entropy_backward_native.h>
|
| 492 |
+
#include <ATen/ops/binary_cross_entropy_with_logits_native.h>
|
| 493 |
+
#include <ATen/ops/bincount_native.h>
|
| 494 |
+
#include <ATen/ops/binomial_native.h>
|
| 495 |
+
#include <ATen/ops/bitwise_and_native.h>
|
| 496 |
+
#include <ATen/ops/bitwise_left_shift_native.h>
|
| 497 |
+
#include <ATen/ops/bitwise_not_native.h>
|
| 498 |
+
#include <ATen/ops/bitwise_or_native.h>
|
| 499 |
+
#include <ATen/ops/bitwise_right_shift_native.h>
|
| 500 |
+
#include <ATen/ops/bitwise_xor_native.h>
|
| 501 |
+
#include <ATen/ops/blackman_window_native.h>
|
| 502 |
+
#include <ATen/ops/block_diag_native.h>
|
| 503 |
+
#include <ATen/ops/bmm_native.h>
|
| 504 |
+
#include <ATen/ops/broadcast_tensors_native.h>
|
| 505 |
+
#include <ATen/ops/broadcast_to_native.h>
|
| 506 |
+
#include <ATen/ops/bucketize_native.h>
|
| 507 |
+
#include <ATen/ops/can_cast_native.h>
|
| 508 |
+
#include <ATen/ops/cartesian_prod_native.h>
|
| 509 |
+
#include <ATen/ops/cat_native.h>
|
| 510 |
+
#include <ATen/ops/cauchy_native.h>
|
| 511 |
+
#include <ATen/ops/ccol_indices_native.h>
|
| 512 |
+
#include <ATen/ops/ccol_indices_copy_native.h>
|
| 513 |
+
#include <ATen/ops/cdist_native.h>
|
| 514 |
+
#include <ATen/ops/ceil_native.h>
|
| 515 |
+
#include <ATen/ops/celu_native.h>
|
| 516 |
+
#include <ATen/ops/chain_matmul_native.h>
|
| 517 |
+
#include <ATen/ops/chalf_native.h>
|
| 518 |
+
#include <ATen/ops/channel_shuffle_native.h>
|
| 519 |
+
#include <ATen/ops/cholesky_native.h>
|
| 520 |
+
#include <ATen/ops/cholesky_inverse_native.h>
|
| 521 |
+
#include <ATen/ops/cholesky_solve_native.h>
|
| 522 |
+
#include <ATen/ops/choose_qparams_optimized_native.h>
|
| 523 |
+
#include <ATen/ops/chunk_native.h>
|
| 524 |
+
#include <ATen/ops/clamp_native.h>
|
| 525 |
+
#include <ATen/ops/clamp_max_native.h>
|
| 526 |
+
#include <ATen/ops/clamp_min_native.h>
|
| 527 |
+
#include <ATen/ops/clip_native.h>
|
| 528 |
+
#include <ATen/ops/clone_native.h>
|
| 529 |
+
#include <ATen/ops/coalesce_native.h>
|
| 530 |
+
#include <ATen/ops/col2im_native.h>
|
| 531 |
+
#include <ATen/ops/col_indices_native.h>
|
| 532 |
+
#include <ATen/ops/col_indices_copy_native.h>
|
| 533 |
+
#include <ATen/ops/column_stack_native.h>
|
| 534 |
+
#include <ATen/ops/combinations_native.h>
|
| 535 |
+
#include <ATen/ops/complex_native.h>
|
| 536 |
+
#include <ATen/ops/concat_native.h>
|
| 537 |
+
#include <ATen/ops/concatenate_native.h>
|
| 538 |
+
#include <ATen/ops/conj_native.h>
|
| 539 |
+
#include <ATen/ops/conj_physical_native.h>
|
| 540 |
+
#include <ATen/ops/constant_pad_nd_native.h>
|
| 541 |
+
#include <ATen/ops/contiguous_native.h>
|
| 542 |
+
#include <ATen/ops/conv1d_native.h>
|
| 543 |
+
#include <ATen/ops/conv2d_native.h>
|
| 544 |
+
#include <ATen/ops/conv3d_native.h>
|
| 545 |
+
#include <ATen/ops/conv_depthwise3d_native.h>
|
| 546 |
+
#include <ATen/ops/conv_tbc_native.h>
|
| 547 |
+
#include <ATen/ops/conv_tbc_backward_native.h>
|
| 548 |
+
#include <ATen/ops/conv_transpose1d_native.h>
|
| 549 |
+
#include <ATen/ops/conv_transpose2d_native.h>
|
| 550 |
+
#include <ATen/ops/conv_transpose3d_native.h>
|
| 551 |
+
#include <ATen/ops/convolution_native.h>
|
| 552 |
+
#include <ATen/ops/convolution_backward_native.h>
|
| 553 |
+
#include <ATen/ops/convolution_backward_overrideable_native.h>
|
| 554 |
+
#include <ATen/ops/convolution_overrideable_native.h>
|
| 555 |
+
#include <ATen/ops/copy_native.h>
|
| 556 |
+
#include <ATen/ops/copy_sparse_to_sparse_native.h>
|
| 557 |
+
#include <ATen/ops/copysign_native.h>
|
| 558 |
+
#include <ATen/ops/corrcoef_native.h>
|
| 559 |
+
#include <ATen/ops/cos_native.h>
|
| 560 |
+
#include <ATen/ops/cosh_native.h>
|
| 561 |
+
#include <ATen/ops/cosine_embedding_loss_native.h>
|
| 562 |
+
#include <ATen/ops/cosine_similarity_native.h>
|
| 563 |
+
#include <ATen/ops/count_nonzero_native.h>
|
| 564 |
+
#include <ATen/ops/cov_native.h>
|
| 565 |
+
#include <ATen/ops/cross_native.h>
|
| 566 |
+
#include <ATen/ops/cross_entropy_loss_native.h>
|
| 567 |
+
#include <ATen/ops/crow_indices_native.h>
|
| 568 |
+
#include <ATen/ops/crow_indices_copy_native.h>
|
| 569 |
+
#include <ATen/ops/ctc_loss_native.h>
|
| 570 |
+
#include <ATen/ops/cudnn_affine_grid_generator_native.h>
|
| 571 |
+
#include <ATen/ops/cudnn_affine_grid_generator_backward_native.h>
|
| 572 |
+
#include <ATen/ops/cudnn_batch_norm_native.h>
|
| 573 |
+
#include <ATen/ops/cudnn_batch_norm_backward_native.h>
|
| 574 |
+
#include <ATen/ops/cudnn_convolution_native.h>
|
| 575 |
+
#include <ATen/ops/cudnn_convolution_add_relu_native.h>
|
| 576 |
+
#include <ATen/ops/cudnn_convolution_relu_native.h>
|
| 577 |
+
#include <ATen/ops/cudnn_convolution_transpose_native.h>
|
| 578 |
+
#include <ATen/ops/cudnn_grid_sampler_native.h>
|
| 579 |
+
#include <ATen/ops/cudnn_grid_sampler_backward_native.h>
|
| 580 |
+
#include <ATen/ops/cudnn_is_acceptable_native.h>
|
| 581 |
+
#include <ATen/ops/cummax_native.h>
|
| 582 |
+
#include <ATen/ops/cummaxmin_backward_native.h>
|
| 583 |
+
#include <ATen/ops/cummin_native.h>
|
| 584 |
+
#include <ATen/ops/cumprod_native.h>
|
| 585 |
+
#include <ATen/ops/cumprod_backward_native.h>
|
| 586 |
+
#include <ATen/ops/cumsum_native.h>
|
| 587 |
+
#include <ATen/ops/cumulative_trapezoid_native.h>
|
| 588 |
+
#include <ATen/ops/data_native.h>
|
| 589 |
+
#include <ATen/ops/deg2rad_native.h>
|
| 590 |
+
#include <ATen/ops/dense_dim_native.h>
|
| 591 |
+
#include <ATen/ops/dequantize_native.h>
|
| 592 |
+
#include <ATen/ops/det_native.h>
|
| 593 |
+
#include <ATen/ops/detach_native.h>
|
| 594 |
+
#include <ATen/ops/detach_copy_native.h>
|
| 595 |
+
#include <ATen/ops/diag_native.h>
|
| 596 |
+
#include <ATen/ops/diag_embed_native.h>
|
| 597 |
+
#include <ATen/ops/diagflat_native.h>
|
| 598 |
+
#include <ATen/ops/diagonal_native.h>
|
| 599 |
+
#include <ATen/ops/diagonal_backward_native.h>
|
| 600 |
+
#include <ATen/ops/diagonal_copy_native.h>
|
| 601 |
+
#include <ATen/ops/diagonal_scatter_native.h>
|
| 602 |
+
#include <ATen/ops/diff_native.h>
|
| 603 |
+
#include <ATen/ops/digamma_native.h>
|
| 604 |
+
#include <ATen/ops/dist_native.h>
|
| 605 |
+
#include <ATen/ops/div_native.h>
|
| 606 |
+
#include <ATen/ops/divide_native.h>
|
| 607 |
+
#include <ATen/ops/dot_native.h>
|
| 608 |
+
#include <ATen/ops/dropout_native.h>
|
| 609 |
+
#include <ATen/ops/dsplit_native.h>
|
| 610 |
+
#include <ATen/ops/dstack_native.h>
|
| 611 |
+
#include <ATen/ops/einsum_native.h>
|
| 612 |
+
#include <ATen/ops/elu_native.h>
|
| 613 |
+
#include <ATen/ops/elu_backward_native.h>
|
| 614 |
+
#include <ATen/ops/embedding_native.h>
|
| 615 |
+
#include <ATen/ops/embedding_backward_native.h>
|
| 616 |
+
#include <ATen/ops/embedding_bag_native.h>
|
| 617 |
+
#include <ATen/ops/embedding_dense_backward_native.h>
|
| 618 |
+
#include <ATen/ops/embedding_renorm_native.h>
|
| 619 |
+
#include <ATen/ops/embedding_sparse_backward_native.h>
|
| 620 |
+
#include <ATen/ops/empty_native.h>
|
| 621 |
+
#include <ATen/ops/empty_like_native.h>
|
| 622 |
+
#include <ATen/ops/empty_permuted_native.h>
|
| 623 |
+
#include <ATen/ops/empty_quantized_native.h>
|
| 624 |
+
#include <ATen/ops/empty_strided_native.h>
|
| 625 |
+
#include <ATen/ops/eq_native.h>
|
| 626 |
+
#include <ATen/ops/equal_native.h>
|
| 627 |
+
#include <ATen/ops/erf_native.h>
|
| 628 |
+
#include <ATen/ops/erfc_native.h>
|
| 629 |
+
#include <ATen/ops/erfinv_native.h>
|
| 630 |
+
#include <ATen/ops/exp_native.h>
|
| 631 |
+
#include <ATen/ops/exp2_native.h>
|
| 632 |
+
#include <ATen/ops/expand_native.h>
|
| 633 |
+
#include <ATen/ops/expand_as_native.h>
|
| 634 |
+
#include <ATen/ops/expand_copy_native.h>
|
| 635 |
+
#include <ATen/ops/expm1_native.h>
|
| 636 |
+
#include <ATen/ops/exponential_native.h>
|
| 637 |
+
#include <ATen/ops/eye_native.h>
|
| 638 |
+
#include <ATen/ops/fake_quantize_per_channel_affine_native.h>
|
| 639 |
+
#include <ATen/ops/fake_quantize_per_channel_affine_cachemask_native.h>
|
| 640 |
+
#include <ATen/ops/fake_quantize_per_channel_affine_cachemask_backward_native.h>
|
| 641 |
+
#include <ATen/ops/fake_quantize_per_tensor_affine_native.h>
|
| 642 |
+
#include <ATen/ops/fake_quantize_per_tensor_affine_cachemask_native.h>
|
| 643 |
+
#include <ATen/ops/fake_quantize_per_tensor_affine_cachemask_backward_native.h>
|
| 644 |
+
#include <ATen/ops/fbgemm_linear_fp16_weight_native.h>
|
| 645 |
+
#include <ATen/ops/fbgemm_linear_fp16_weight_fp32_activation_native.h>
|
| 646 |
+
#include <ATen/ops/fbgemm_linear_int8_weight_native.h>
|
| 647 |
+
#include <ATen/ops/fbgemm_linear_int8_weight_fp32_activation_native.h>
|
| 648 |
+
#include <ATen/ops/fbgemm_linear_quantize_weight_native.h>
|
| 649 |
+
#include <ATen/ops/fbgemm_pack_gemm_matrix_fp16_native.h>
|
| 650 |
+
#include <ATen/ops/fbgemm_pack_quantized_matrix_native.h>
|
| 651 |
+
#include <ATen/ops/feature_alpha_dropout_native.h>
|
| 652 |
+
#include <ATen/ops/feature_dropout_native.h>
|
| 653 |
+
#include <ATen/ops/fft_fft_native.h>
|
| 654 |
+
#include <ATen/ops/fft_fft2_native.h>
|
| 655 |
+
#include <ATen/ops/fft_fftfreq_native.h>
|
| 656 |
+
#include <ATen/ops/fft_fftn_native.h>
|
| 657 |
+
#include <ATen/ops/fft_fftshift_native.h>
|
| 658 |
+
#include <ATen/ops/fft_hfft_native.h>
|
| 659 |
+
#include <ATen/ops/fft_hfft2_native.h>
|
| 660 |
+
#include <ATen/ops/fft_hfftn_native.h>
|
| 661 |
+
#include <ATen/ops/fft_ifft_native.h>
|
| 662 |
+
#include <ATen/ops/fft_ifft2_native.h>
|
| 663 |
+
#include <ATen/ops/fft_ifftn_native.h>
|
| 664 |
+
#include <ATen/ops/fft_ifftshift_native.h>
|
| 665 |
+
#include <ATen/ops/fft_ihfft_native.h>
|
| 666 |
+
#include <ATen/ops/fft_ihfft2_native.h>
|
| 667 |
+
#include <ATen/ops/fft_ihfftn_native.h>
|
| 668 |
+
#include <ATen/ops/fft_irfft_native.h>
|
| 669 |
+
#include <ATen/ops/fft_irfft2_native.h>
|
| 670 |
+
#include <ATen/ops/fft_irfftn_native.h>
|
| 671 |
+
#include <ATen/ops/fft_rfft_native.h>
|
| 672 |
+
#include <ATen/ops/fft_rfft2_native.h>
|
| 673 |
+
#include <ATen/ops/fft_rfftfreq_native.h>
|
| 674 |
+
#include <ATen/ops/fft_rfftn_native.h>
|
| 675 |
+
#include <ATen/ops/fill_native.h>
|
| 676 |
+
#include <ATen/ops/fill_diagonal_native.h>
|
| 677 |
+
#include <ATen/ops/fix_native.h>
|
| 678 |
+
#include <ATen/ops/flatten_native.h>
|
| 679 |
+
#include <ATen/ops/flatten_dense_tensors_native.h>
|
| 680 |
+
#include <ATen/ops/flip_native.h>
|
| 681 |
+
#include <ATen/ops/fliplr_native.h>
|
| 682 |
+
#include <ATen/ops/flipud_native.h>
|
| 683 |
+
#include <ATen/ops/float_power_native.h>
|
| 684 |
+
#include <ATen/ops/floor_native.h>
|
| 685 |
+
#include <ATen/ops/floor_divide_native.h>
|
| 686 |
+
#include <ATen/ops/fmax_native.h>
|
| 687 |
+
#include <ATen/ops/fmin_native.h>
|
| 688 |
+
#include <ATen/ops/fmod_native.h>
|
| 689 |
+
#include <ATen/ops/frac_native.h>
|
| 690 |
+
#include <ATen/ops/fractional_max_pool2d_native.h>
|
| 691 |
+
#include <ATen/ops/fractional_max_pool2d_backward_native.h>
|
| 692 |
+
#include <ATen/ops/fractional_max_pool3d_native.h>
|
| 693 |
+
#include <ATen/ops/fractional_max_pool3d_backward_native.h>
|
| 694 |
+
#include <ATen/ops/frexp_native.h>
|
| 695 |
+
#include <ATen/ops/frobenius_norm_native.h>
|
| 696 |
+
#include <ATen/ops/from_file_native.h>
|
| 697 |
+
#include <ATen/ops/full_native.h>
|
| 698 |
+
#include <ATen/ops/full_like_native.h>
|
| 699 |
+
#include <ATen/ops/fused_moving_avg_obs_fake_quant_native.h>
|
| 700 |
+
#include <ATen/ops/gather_native.h>
|
| 701 |
+
#include <ATen/ops/gather_backward_native.h>
|
| 702 |
+
#include <ATen/ops/gcd_native.h>
|
| 703 |
+
#include <ATen/ops/ge_native.h>
|
| 704 |
+
#include <ATen/ops/gelu_native.h>
|
| 705 |
+
#include <ATen/ops/gelu_backward_native.h>
|
| 706 |
+
#include <ATen/ops/geometric_native.h>
|
| 707 |
+
#include <ATen/ops/geqrf_native.h>
|
| 708 |
+
#include <ATen/ops/ger_native.h>
|
| 709 |
+
#include <ATen/ops/glu_native.h>
|
| 710 |
+
#include <ATen/ops/glu_backward_native.h>
|
| 711 |
+
#include <ATen/ops/glu_backward_jvp_native.h>
|
| 712 |
+
#include <ATen/ops/glu_jvp_native.h>
|
| 713 |
+
#include <ATen/ops/gradient_native.h>
|
| 714 |
+
#include <ATen/ops/greater_native.h>
|
| 715 |
+
#include <ATen/ops/greater_equal_native.h>
|
| 716 |
+
#include <ATen/ops/grid_sampler_native.h>
|
| 717 |
+
#include <ATen/ops/grid_sampler_2d_native.h>
|
| 718 |
+
#include <ATen/ops/grid_sampler_2d_backward_native.h>
|
| 719 |
+
#include <ATen/ops/grid_sampler_3d_native.h>
|
| 720 |
+
#include <ATen/ops/grid_sampler_3d_backward_native.h>
|
| 721 |
+
#include <ATen/ops/group_norm_native.h>
|
| 722 |
+
#include <ATen/ops/gru_native.h>
|
| 723 |
+
#include <ATen/ops/gru_cell_native.h>
|
| 724 |
+
#include <ATen/ops/gt_native.h>
|
| 725 |
+
#include <ATen/ops/hamming_window_native.h>
|
| 726 |
+
#include <ATen/ops/hann_window_native.h>
|
| 727 |
+
#include <ATen/ops/hardshrink_native.h>
|
| 728 |
+
#include <ATen/ops/hardshrink_backward_native.h>
|
| 729 |
+
#include <ATen/ops/hardsigmoid_native.h>
|
| 730 |
+
#include <ATen/ops/hardsigmoid_backward_native.h>
|
| 731 |
+
#include <ATen/ops/hardswish_native.h>
|
| 732 |
+
#include <ATen/ops/hardswish_backward_native.h>
|
| 733 |
+
#include <ATen/ops/hardtanh_native.h>
|
| 734 |
+
#include <ATen/ops/hardtanh_backward_native.h>
|
| 735 |
+
#include <ATen/ops/heaviside_native.h>
|
| 736 |
+
#include <ATen/ops/hinge_embedding_loss_native.h>
|
| 737 |
+
#include <ATen/ops/histc_native.h>
|
| 738 |
+
#include <ATen/ops/histogram_native.h>
|
| 739 |
+
#include <ATen/ops/histogramdd_native.h>
|
| 740 |
+
#include <ATen/ops/hsplit_native.h>
|
| 741 |
+
#include <ATen/ops/hspmm_native.h>
|
| 742 |
+
#include <ATen/ops/hstack_native.h>
|
| 743 |
+
#include <ATen/ops/huber_loss_native.h>
|
| 744 |
+
#include <ATen/ops/huber_loss_backward_native.h>
|
| 745 |
+
#include <ATen/ops/hypot_native.h>
|
| 746 |
+
#include <ATen/ops/i0_native.h>
|
| 747 |
+
#include <ATen/ops/igamma_native.h>
|
| 748 |
+
#include <ATen/ops/igammac_native.h>
|
| 749 |
+
#include <ATen/ops/im2col_native.h>
|
| 750 |
+
#include <ATen/ops/imag_native.h>
|
| 751 |
+
#include <ATen/ops/index_native.h>
|
| 752 |
+
#include <ATen/ops/index_add_native.h>
|
| 753 |
+
#include <ATen/ops/index_copy_native.h>
|
| 754 |
+
#include <ATen/ops/index_fill_native.h>
|
| 755 |
+
#include <ATen/ops/index_put_native.h>
|
| 756 |
+
#include <ATen/ops/index_reduce_native.h>
|
| 757 |
+
#include <ATen/ops/index_select_native.h>
|
| 758 |
+
#include <ATen/ops/index_select_backward_native.h>
|
| 759 |
+
#include <ATen/ops/indices_native.h>
|
| 760 |
+
#include <ATen/ops/indices_copy_native.h>
|
| 761 |
+
#include <ATen/ops/infinitely_differentiable_gelu_backward_native.h>
|
| 762 |
+
#include <ATen/ops/inner_native.h>
|
| 763 |
+
#include <ATen/ops/instance_norm_native.h>
|
| 764 |
+
#include <ATen/ops/int_repr_native.h>
|
| 765 |
+
#include <ATen/ops/inverse_native.h>
|
| 766 |
+
#include <ATen/ops/is_coalesced_native.h>
|
| 767 |
+
#include <ATen/ops/is_complex_native.h>
|
| 768 |
+
#include <ATen/ops/is_conj_native.h>
|
| 769 |
+
#include <ATen/ops/is_distributed_native.h>
|
| 770 |
+
#include <ATen/ops/is_floating_point_native.h>
|
| 771 |
+
#include <ATen/ops/is_inference_native.h>
|
| 772 |
+
#include <ATen/ops/is_leaf_native.h>
|
| 773 |
+
#include <ATen/ops/is_neg_native.h>
|
| 774 |
+
#include <ATen/ops/is_nonzero_native.h>
|
| 775 |
+
#include <ATen/ops/is_pinned_native.h>
|
| 776 |
+
#include <ATen/ops/is_same_size_native.h>
|
| 777 |
+
#include <ATen/ops/is_set_to_native.h>
|
| 778 |
+
#include <ATen/ops/is_signed_native.h>
|
| 779 |
+
#include <ATen/ops/is_vulkan_available_native.h>
|
| 780 |
+
#include <ATen/ops/isclose_native.h>
|
| 781 |
+
#include <ATen/ops/isfinite_native.h>
|
| 782 |
+
#include <ATen/ops/isin_native.h>
|
| 783 |
+
#include <ATen/ops/isinf_native.h>
|
| 784 |
+
#include <ATen/ops/isnan_native.h>
|
| 785 |
+
#include <ATen/ops/isneginf_native.h>
|
| 786 |
+
#include <ATen/ops/isposinf_native.h>
|
| 787 |
+
#include <ATen/ops/isreal_native.h>
|
| 788 |
+
#include <ATen/ops/istft_native.h>
|
| 789 |
+
#include <ATen/ops/item_native.h>
|
| 790 |
+
#include <ATen/ops/kaiser_window_native.h>
|
| 791 |
+
#include <ATen/ops/kl_div_native.h>
|
| 792 |
+
#include <ATen/ops/kron_native.h>
|
| 793 |
+
#include <ATen/ops/kthvalue_native.h>
|
| 794 |
+
#include <ATen/ops/l1_loss_native.h>
|
| 795 |
+
#include <ATen/ops/layer_norm_native.h>
|
| 796 |
+
#include <ATen/ops/lcm_native.h>
|
| 797 |
+
#include <ATen/ops/ldexp_native.h>
|
| 798 |
+
#include <ATen/ops/le_native.h>
|
| 799 |
+
#include <ATen/ops/leaky_relu_native.h>
|
| 800 |
+
#include <ATen/ops/leaky_relu_backward_native.h>
|
| 801 |
+
#include <ATen/ops/lerp_native.h>
|
| 802 |
+
#include <ATen/ops/less_native.h>
|
| 803 |
+
#include <ATen/ops/less_equal_native.h>
|
| 804 |
+
#include <ATen/ops/lgamma_native.h>
|
| 805 |
+
#include <ATen/ops/lift_native.h>
|
| 806 |
+
#include <ATen/ops/lift_fresh_native.h>
|
| 807 |
+
#include <ATen/ops/lift_fresh_copy_native.h>
|
| 808 |
+
#include <ATen/ops/linalg_cholesky_native.h>
|
| 809 |
+
#include <ATen/ops/linalg_cholesky_ex_native.h>
|
| 810 |
+
#include <ATen/ops/linalg_cond_native.h>
|
| 811 |
+
#include <ATen/ops/linalg_cross_native.h>
|
| 812 |
+
#include <ATen/ops/linalg_det_native.h>
|
| 813 |
+
#include <ATen/ops/linalg_diagonal_native.h>
|
| 814 |
+
#include <ATen/ops/linalg_eig_native.h>
|
| 815 |
+
#include <ATen/ops/linalg_eigh_native.h>
|
| 816 |
+
#include <ATen/ops/linalg_eigvals_native.h>
|
| 817 |
+
#include <ATen/ops/linalg_eigvalsh_native.h>
|
| 818 |
+
#include <ATen/ops/linalg_householder_product_native.h>
|
| 819 |
+
#include <ATen/ops/linalg_inv_native.h>
|
| 820 |
+
#include <ATen/ops/linalg_inv_ex_native.h>
|
| 821 |
+
#include <ATen/ops/linalg_ldl_factor_native.h>
|
| 822 |
+
#include <ATen/ops/linalg_ldl_factor_ex_native.h>
|
| 823 |
+
#include <ATen/ops/linalg_ldl_solve_native.h>
|
| 824 |
+
#include <ATen/ops/linalg_lstsq_native.h>
|
| 825 |
+
#include <ATen/ops/linalg_lu_native.h>
|
| 826 |
+
#include <ATen/ops/linalg_lu_factor_native.h>
|
| 827 |
+
#include <ATen/ops/linalg_lu_factor_ex_native.h>
|
| 828 |
+
#include <ATen/ops/linalg_lu_solve_native.h>
|
| 829 |
+
#include <ATen/ops/linalg_matmul_native.h>
|
| 830 |
+
#include <ATen/ops/linalg_matrix_exp_native.h>
|
| 831 |
+
#include <ATen/ops/linalg_matrix_norm_native.h>
|
| 832 |
+
#include <ATen/ops/linalg_matrix_power_native.h>
|
| 833 |
+
#include <ATen/ops/linalg_matrix_rank_native.h>
|
| 834 |
+
#include <ATen/ops/linalg_multi_dot_native.h>
|
| 835 |
+
#include <ATen/ops/linalg_norm_native.h>
|
| 836 |
+
#include <ATen/ops/linalg_pinv_native.h>
|
| 837 |
+
#include <ATen/ops/linalg_qr_native.h>
|
| 838 |
+
#include <ATen/ops/linalg_slogdet_native.h>
|
| 839 |
+
#include <ATen/ops/linalg_solve_native.h>
|
| 840 |
+
#include <ATen/ops/linalg_solve_ex_native.h>
|
| 841 |
+
#include <ATen/ops/linalg_solve_triangular_native.h>
|
| 842 |
+
#include <ATen/ops/linalg_svd_native.h>
|
| 843 |
+
#include <ATen/ops/linalg_svdvals_native.h>
|
| 844 |
+
#include <ATen/ops/linalg_tensorinv_native.h>
|
| 845 |
+
#include <ATen/ops/linalg_tensorsolve_native.h>
|
| 846 |
+
#include <ATen/ops/linalg_vander_native.h>
|
| 847 |
+
#include <ATen/ops/linalg_vecdot_native.h>
|
| 848 |
+
#include <ATen/ops/linalg_vector_norm_native.h>
|
| 849 |
+
#include <ATen/ops/linear_native.h>
|
| 850 |
+
#include <ATen/ops/linear_backward_native.h>
|
| 851 |
+
#include <ATen/ops/linspace_native.h>
|
| 852 |
+
#include <ATen/ops/log_native.h>
|
| 853 |
+
#include <ATen/ops/log10_native.h>
|
| 854 |
+
#include <ATen/ops/log1p_native.h>
|
| 855 |
+
#include <ATen/ops/log2_native.h>
|
| 856 |
+
#include <ATen/ops/log_normal_native.h>
|
| 857 |
+
#include <ATen/ops/log_sigmoid_native.h>
|
| 858 |
+
#include <ATen/ops/log_sigmoid_backward_native.h>
|
| 859 |
+
#include <ATen/ops/log_sigmoid_forward_native.h>
|
| 860 |
+
#include <ATen/ops/log_softmax_native.h>
|
| 861 |
+
#include <ATen/ops/logaddexp_native.h>
|
| 862 |
+
#include <ATen/ops/logaddexp2_native.h>
|
| 863 |
+
#include <ATen/ops/logcumsumexp_native.h>
|
| 864 |
+
#include <ATen/ops/logdet_native.h>
|
| 865 |
+
#include <ATen/ops/logical_and_native.h>
|
| 866 |
+
#include <ATen/ops/logical_not_native.h>
|
| 867 |
+
#include <ATen/ops/logical_or_native.h>
|
| 868 |
+
#include <ATen/ops/logical_xor_native.h>
|
| 869 |
+
#include <ATen/ops/logit_native.h>
|
| 870 |
+
#include <ATen/ops/logit_backward_native.h>
|
| 871 |
+
#include <ATen/ops/logspace_native.h>
|
| 872 |
+
#include <ATen/ops/logsumexp_native.h>
|
| 873 |
+
#include <ATen/ops/lshift_native.h>
|
| 874 |
+
#include <ATen/ops/lstm_native.h>
|
| 875 |
+
#include <ATen/ops/lstm_cell_native.h>
|
| 876 |
+
#include <ATen/ops/lstm_mps_backward_native.h>
|
| 877 |
+
#include <ATen/ops/lt_native.h>
|
| 878 |
+
#include <ATen/ops/lu_solve_native.h>
|
| 879 |
+
#include <ATen/ops/lu_unpack_native.h>
|
| 880 |
+
#include <ATen/ops/mH_native.h>
|
| 881 |
+
#include <ATen/ops/mT_native.h>
|
| 882 |
+
#include <ATen/ops/margin_ranking_loss_native.h>
|
| 883 |
+
#include <ATen/ops/masked_fill_native.h>
|
| 884 |
+
#include <ATen/ops/masked_scatter_native.h>
|
| 885 |
+
#include <ATen/ops/masked_scatter_backward_native.h>
|
| 886 |
+
#include <ATen/ops/masked_select_native.h>
|
| 887 |
+
#include <ATen/ops/masked_select_backward_native.h>
|
| 888 |
+
#include <ATen/ops/matmul_native.h>
|
| 889 |
+
#include <ATen/ops/matmul_backward_native.h>
|
| 890 |
+
#include <ATen/ops/matrix_H_native.h>
|
| 891 |
+
#include <ATen/ops/matrix_exp_native.h>
|
| 892 |
+
#include <ATen/ops/matrix_exp_backward_native.h>
|
| 893 |
+
#include <ATen/ops/matrix_power_native.h>
|
| 894 |
+
#include <ATen/ops/max_native.h>
|
| 895 |
+
#include <ATen/ops/max_pool1d_native.h>
|
| 896 |
+
#include <ATen/ops/max_pool1d_with_indices_native.h>
|
| 897 |
+
#include <ATen/ops/max_pool2d_native.h>
|
| 898 |
+
#include <ATen/ops/max_pool2d_backward_native.h>
|
| 899 |
+
#include <ATen/ops/max_pool2d_with_indices_native.h>
|
| 900 |
+
#include <ATen/ops/max_pool2d_with_indices_backward_native.h>
|
| 901 |
+
#include <ATen/ops/max_pool3d_native.h>
|
| 902 |
+
#include <ATen/ops/max_pool3d_with_indices_native.h>
|
| 903 |
+
#include <ATen/ops/max_pool3d_with_indices_backward_native.h>
|
| 904 |
+
#include <ATen/ops/max_unpool2d_native.h>
|
| 905 |
+
#include <ATen/ops/max_unpool3d_native.h>
|
| 906 |
+
#include <ATen/ops/maximum_native.h>
|
| 907 |
+
#include <ATen/ops/mean_native.h>
|
| 908 |
+
#include <ATen/ops/median_native.h>
|
| 909 |
+
#include <ATen/ops/meshgrid_native.h>
|
| 910 |
+
#include <ATen/ops/min_native.h>
|
| 911 |
+
#include <ATen/ops/minimum_native.h>
|
| 912 |
+
#include <ATen/ops/miopen_batch_norm_native.h>
|
| 913 |
+
#include <ATen/ops/miopen_batch_norm_backward_native.h>
|
| 914 |
+
#include <ATen/ops/miopen_convolution_native.h>
|
| 915 |
+
#include <ATen/ops/miopen_convolution_add_relu_native.h>
|
| 916 |
+
#include <ATen/ops/miopen_convolution_relu_native.h>
|
| 917 |
+
#include <ATen/ops/miopen_convolution_transpose_native.h>
|
| 918 |
+
#include <ATen/ops/miopen_depthwise_convolution_native.h>
|
| 919 |
+
#include <ATen/ops/miopen_rnn_native.h>
|
| 920 |
+
#include <ATen/ops/miopen_rnn_backward_native.h>
|
| 921 |
+
#include <ATen/ops/mish_native.h>
|
| 922 |
+
#include <ATen/ops/mish_backward_native.h>
|
| 923 |
+
#include <ATen/ops/mkldnn_adaptive_avg_pool2d_native.h>
|
| 924 |
+
#include <ATen/ops/mkldnn_adaptive_avg_pool2d_backward_native.h>
|
| 925 |
+
#include <ATen/ops/mkldnn_convolution_native.h>
|
| 926 |
+
#include <ATen/ops/mkldnn_linear_native.h>
|
| 927 |
+
#include <ATen/ops/mkldnn_linear_backward_native.h>
|
| 928 |
+
#include <ATen/ops/mkldnn_linear_backward_input_native.h>
|
| 929 |
+
#include <ATen/ops/mkldnn_linear_backward_weights_native.h>
|
| 930 |
+
#include <ATen/ops/mkldnn_max_pool2d_native.h>
|
| 931 |
+
#include <ATen/ops/mkldnn_max_pool2d_backward_native.h>
|
| 932 |
+
#include <ATen/ops/mkldnn_max_pool3d_native.h>
|
| 933 |
+
#include <ATen/ops/mkldnn_max_pool3d_backward_native.h>
|
| 934 |
+
#include <ATen/ops/mkldnn_reorder_conv2d_weight_native.h>
|
| 935 |
+
#include <ATen/ops/mkldnn_reorder_conv3d_weight_native.h>
|
| 936 |
+
#include <ATen/ops/mkldnn_rnn_layer_native.h>
|
| 937 |
+
#include <ATen/ops/mkldnn_rnn_layer_backward_native.h>
|
| 938 |
+
#include <ATen/ops/mm_native.h>
|
| 939 |
+
#include <ATen/ops/mode_native.h>
|
| 940 |
+
#include <ATen/ops/moveaxis_native.h>
|
| 941 |
+
#include <ATen/ops/movedim_native.h>
|
| 942 |
+
#include <ATen/ops/mps_convolution_backward_native.h>
|
| 943 |
+
#include <ATen/ops/mps_convolution_transpose_backward_native.h>
|
| 944 |
+
#include <ATen/ops/mse_loss_native.h>
|
| 945 |
+
#include <ATen/ops/mse_loss_backward_native.h>
|
| 946 |
+
#include <ATen/ops/msort_native.h>
|
| 947 |
+
#include <ATen/ops/mul_native.h>
|
| 948 |
+
#include <ATen/ops/multi_margin_loss_native.h>
|
| 949 |
+
#include <ATen/ops/multi_margin_loss_backward_native.h>
|
| 950 |
+
#include <ATen/ops/multilabel_margin_loss_native.h>
|
| 951 |
+
#include <ATen/ops/multilabel_margin_loss_backward_native.h>
|
| 952 |
+
#include <ATen/ops/multilabel_margin_loss_forward_native.h>
|
| 953 |
+
#include <ATen/ops/multinomial_native.h>
|
| 954 |
+
#include <ATen/ops/multiply_native.h>
|
| 955 |
+
#include <ATen/ops/mv_native.h>
|
| 956 |
+
#include <ATen/ops/mvlgamma_native.h>
|
| 957 |
+
#include <ATen/ops/nan_to_num_native.h>
|
| 958 |
+
#include <ATen/ops/nanmean_native.h>
|
| 959 |
+
#include <ATen/ops/nanmedian_native.h>
|
| 960 |
+
#include <ATen/ops/nanquantile_native.h>
|
| 961 |
+
#include <ATen/ops/nansum_native.h>
|
| 962 |
+
#include <ATen/ops/narrow_native.h>
|
| 963 |
+
#include <ATen/ops/narrow_copy_native.h>
|
| 964 |
+
#include <ATen/ops/native_batch_norm_native.h>
|
| 965 |
+
#include <ATen/ops/native_batch_norm_backward_native.h>
|
| 966 |
+
#include <ATen/ops/native_channel_shuffle_native.h>
|
| 967 |
+
#include <ATen/ops/native_dropout_native.h>
|
| 968 |
+
#include <ATen/ops/native_dropout_backward_native.h>
|
| 969 |
+
#include <ATen/ops/native_group_norm_native.h>
|
| 970 |
+
#include <ATen/ops/native_group_norm_backward_native.h>
|
| 971 |
+
#include <ATen/ops/native_layer_norm_native.h>
|
| 972 |
+
#include <ATen/ops/native_layer_norm_backward_native.h>
|
| 973 |
+
#include <ATen/ops/native_norm_native.h>
|
| 974 |
+
#include <ATen/ops/ne_native.h>
|
| 975 |
+
#include <ATen/ops/neg_native.h>
|
| 976 |
+
#include <ATen/ops/negative_native.h>
|
| 977 |
+
#include <ATen/ops/nested_to_padded_tensor_native.h>
|
| 978 |
+
#include <ATen/ops/new_empty_native.h>
|
| 979 |
+
#include <ATen/ops/new_empty_strided_native.h>
|
| 980 |
+
#include <ATen/ops/new_full_native.h>
|
| 981 |
+
#include <ATen/ops/new_ones_native.h>
|
| 982 |
+
#include <ATen/ops/new_zeros_native.h>
|
| 983 |
+
#include <ATen/ops/nextafter_native.h>
|
| 984 |
+
#include <ATen/ops/nll_loss_native.h>
|
| 985 |
+
#include <ATen/ops/nll_loss2d_native.h>
|
| 986 |
+
#include <ATen/ops/nll_loss2d_backward_native.h>
|
| 987 |
+
#include <ATen/ops/nll_loss2d_forward_native.h>
|
| 988 |
+
#include <ATen/ops/nll_loss_backward_native.h>
|
| 989 |
+
#include <ATen/ops/nll_loss_forward_native.h>
|
| 990 |
+
#include <ATen/ops/nll_loss_nd_native.h>
|
| 991 |
+
#include <ATen/ops/nonzero_native.h>
|
| 992 |
+
#include <ATen/ops/nonzero_numpy_native.h>
|
| 993 |
+
#include <ATen/ops/nonzero_static_native.h>
|
| 994 |
+
#include <ATen/ops/norm_native.h>
|
| 995 |
+
#include <ATen/ops/norm_except_dim_native.h>
|
| 996 |
+
#include <ATen/ops/normal_native.h>
|
| 997 |
+
#include <ATen/ops/not_equal_native.h>
|
| 998 |
+
#include <ATen/ops/nuclear_norm_native.h>
|
| 999 |
+
#include <ATen/ops/numpy_T_native.h>
|
| 1000 |
+
#include <ATen/ops/one_hot_native.h>
|
| 1001 |
+
#include <ATen/ops/ones_native.h>
|
| 1002 |
+
#include <ATen/ops/ones_like_native.h>
|
| 1003 |
+
#include <ATen/ops/or_native.h>
|
| 1004 |
+
#include <ATen/ops/orgqr_native.h>
|
| 1005 |
+
#include <ATen/ops/ormqr_native.h>
|
| 1006 |
+
#include <ATen/ops/outer_native.h>
|
| 1007 |
+
#include <ATen/ops/output_nr_native.h>
|
| 1008 |
+
#include <ATen/ops/pad_native.h>
|
| 1009 |
+
#include <ATen/ops/pad_sequence_native.h>
|
| 1010 |
+
#include <ATen/ops/pairwise_distance_native.h>
|
| 1011 |
+
#include <ATen/ops/pdist_native.h>
|
| 1012 |
+
#include <ATen/ops/permute_native.h>
|
| 1013 |
+
#include <ATen/ops/permute_copy_native.h>
|
| 1014 |
+
#include <ATen/ops/pin_memory_native.h>
|
| 1015 |
+
#include <ATen/ops/pinverse_native.h>
|
| 1016 |
+
#include <ATen/ops/pixel_shuffle_native.h>
|
| 1017 |
+
#include <ATen/ops/pixel_unshuffle_native.h>
|
| 1018 |
+
#include <ATen/ops/poisson_native.h>
|
| 1019 |
+
#include <ATen/ops/poisson_nll_loss_native.h>
|
| 1020 |
+
#include <ATen/ops/polar_native.h>
|
| 1021 |
+
#include <ATen/ops/polygamma_native.h>
|
| 1022 |
+
#include <ATen/ops/positive_native.h>
|
| 1023 |
+
#include <ATen/ops/pow_native.h>
|
| 1024 |
+
#include <ATen/ops/prelu_native.h>
|
| 1025 |
+
#include <ATen/ops/prod_native.h>
|
| 1026 |
+
#include <ATen/ops/promote_types_native.h>
|
| 1027 |
+
#include <ATen/ops/put_native.h>
|
| 1028 |
+
#include <ATen/ops/q_per_channel_axis_native.h>
|
| 1029 |
+
#include <ATen/ops/q_per_channel_scales_native.h>
|
| 1030 |
+
#include <ATen/ops/q_per_channel_zero_points_native.h>
|
| 1031 |
+
#include <ATen/ops/q_scale_native.h>
|
| 1032 |
+
#include <ATen/ops/q_zero_point_native.h>
|
| 1033 |
+
#include <ATen/ops/qr_native.h>
|
| 1034 |
+
#include <ATen/ops/qscheme_native.h>
|
| 1035 |
+
#include <ATen/ops/quantile_native.h>
|
| 1036 |
+
#include <ATen/ops/quantize_per_channel_native.h>
|
| 1037 |
+
#include <ATen/ops/quantize_per_tensor_native.h>
|
| 1038 |
+
#include <ATen/ops/quantize_per_tensor_dynamic_native.h>
|
| 1039 |
+
#include <ATen/ops/quantized_batch_norm_native.h>
|
| 1040 |
+
#include <ATen/ops/quantized_gru_cell_native.h>
|
| 1041 |
+
#include <ATen/ops/quantized_lstm_cell_native.h>
|
| 1042 |
+
#include <ATen/ops/quantized_max_pool1d_native.h>
|
| 1043 |
+
#include <ATen/ops/quantized_max_pool2d_native.h>
|
| 1044 |
+
#include <ATen/ops/quantized_max_pool3d_native.h>
|
| 1045 |
+
#include <ATen/ops/quantized_rnn_relu_cell_native.h>
|
| 1046 |
+
#include <ATen/ops/quantized_rnn_tanh_cell_native.h>
|
| 1047 |
+
#include <ATen/ops/rad2deg_native.h>
|
| 1048 |
+
#include <ATen/ops/rand_native.h>
|
| 1049 |
+
#include <ATen/ops/rand_like_native.h>
|
| 1050 |
+
#include <ATen/ops/randint_native.h>
|
| 1051 |
+
#include <ATen/ops/randint_like_native.h>
|
| 1052 |
+
#include <ATen/ops/randn_native.h>
|
| 1053 |
+
#include <ATen/ops/randn_like_native.h>
|
| 1054 |
+
#include <ATen/ops/random_native.h>
|
| 1055 |
+
#include <ATen/ops/randperm_native.h>
|
| 1056 |
+
#include <ATen/ops/range_native.h>
|
| 1057 |
+
#include <ATen/ops/ravel_native.h>
|
| 1058 |
+
#include <ATen/ops/real_native.h>
|
| 1059 |
+
#include <ATen/ops/reciprocal_native.h>
|
| 1060 |
+
#include <ATen/ops/record_stream_native.h>
|
| 1061 |
+
#include <ATen/ops/refine_names_native.h>
|
| 1062 |
+
#include <ATen/ops/reflection_pad1d_native.h>
|
| 1063 |
+
#include <ATen/ops/reflection_pad1d_backward_native.h>
|
| 1064 |
+
#include <ATen/ops/reflection_pad2d_native.h>
|
| 1065 |
+
#include <ATen/ops/reflection_pad2d_backward_native.h>
|
| 1066 |
+
#include <ATen/ops/reflection_pad3d_native.h>
|
| 1067 |
+
#include <ATen/ops/reflection_pad3d_backward_native.h>
|
| 1068 |
+
#include <ATen/ops/relu_native.h>
|
| 1069 |
+
#include <ATen/ops/relu6_native.h>
|
| 1070 |
+
#include <ATen/ops/remainder_native.h>
|
| 1071 |
+
#include <ATen/ops/rename_native.h>
|
| 1072 |
+
#include <ATen/ops/renorm_native.h>
|
| 1073 |
+
#include <ATen/ops/repeat_native.h>
|
| 1074 |
+
#include <ATen/ops/repeat_interleave_native.h>
|
| 1075 |
+
#include <ATen/ops/replication_pad1d_native.h>
|
| 1076 |
+
#include <ATen/ops/replication_pad1d_backward_native.h>
|
| 1077 |
+
#include <ATen/ops/replication_pad2d_native.h>
|
| 1078 |
+
#include <ATen/ops/replication_pad2d_backward_native.h>
|
| 1079 |
+
#include <ATen/ops/replication_pad3d_native.h>
|
| 1080 |
+
#include <ATen/ops/replication_pad3d_backward_native.h>
|
| 1081 |
+
#include <ATen/ops/requires_grad_native.h>
|
| 1082 |
+
#include <ATen/ops/reshape_native.h>
|
| 1083 |
+
#include <ATen/ops/reshape_as_native.h>
|
| 1084 |
+
#include <ATen/ops/resize_native.h>
|
| 1085 |
+
#include <ATen/ops/resize_as_native.h>
|
| 1086 |
+
#include <ATen/ops/resize_as_sparse_native.h>
|
| 1087 |
+
#include <ATen/ops/resolve_conj_native.h>
|
| 1088 |
+
#include <ATen/ops/resolve_neg_native.h>
|
| 1089 |
+
#include <ATen/ops/result_type_native.h>
|
| 1090 |
+
#include <ATen/ops/retain_grad_native.h>
|
| 1091 |
+
#include <ATen/ops/retains_grad_native.h>
|
| 1092 |
+
#include <ATen/ops/rms_norm_native.h>
|
| 1093 |
+
#include <ATen/ops/rnn_relu_native.h>
|
| 1094 |
+
#include <ATen/ops/rnn_relu_cell_native.h>
|
| 1095 |
+
#include <ATen/ops/rnn_tanh_native.h>
|
| 1096 |
+
#include <ATen/ops/rnn_tanh_cell_native.h>
|
| 1097 |
+
#include <ATen/ops/roll_native.h>
|
| 1098 |
+
#include <ATen/ops/rot90_native.h>
|
| 1099 |
+
#include <ATen/ops/round_native.h>
|
| 1100 |
+
#include <ATen/ops/row_indices_native.h>
|
| 1101 |
+
#include <ATen/ops/row_indices_copy_native.h>
|
| 1102 |
+
#include <ATen/ops/row_stack_native.h>
|
| 1103 |
+
#include <ATen/ops/rrelu_native.h>
|
| 1104 |
+
#include <ATen/ops/rrelu_with_noise_native.h>
|
| 1105 |
+
#include <ATen/ops/rrelu_with_noise_backward_native.h>
|
| 1106 |
+
#include <ATen/ops/rshift_native.h>
|
| 1107 |
+
#include <ATen/ops/rsqrt_native.h>
|
| 1108 |
+
#include <ATen/ops/rsub_native.h>
|
| 1109 |
+
#include <ATen/ops/scalar_tensor_native.h>
|
| 1110 |
+
#include <ATen/ops/scaled_dot_product_attention_native.h>
|
| 1111 |
+
#include <ATen/ops/scatter_native.h>
|
| 1112 |
+
#include <ATen/ops/scatter_add_native.h>
|
| 1113 |
+
#include <ATen/ops/scatter_reduce_native.h>
|
| 1114 |
+
#include <ATen/ops/searchsorted_native.h>
|
| 1115 |
+
#include <ATen/ops/segment_reduce_native.h>
|
| 1116 |
+
#include <ATen/ops/select_native.h>
|
| 1117 |
+
#include <ATen/ops/select_backward_native.h>
|
| 1118 |
+
#include <ATen/ops/select_copy_native.h>
|
| 1119 |
+
#include <ATen/ops/select_scatter_native.h>
|
| 1120 |
+
#include <ATen/ops/selu_native.h>
|
| 1121 |
+
#include <ATen/ops/set_native.h>
|
| 1122 |
+
#include <ATen/ops/set_data_native.h>
|
| 1123 |
+
#include <ATen/ops/sgn_native.h>
|
| 1124 |
+
#include <ATen/ops/sigmoid_native.h>
|
| 1125 |
+
#include <ATen/ops/sigmoid_backward_native.h>
|
| 1126 |
+
#include <ATen/ops/sign_native.h>
|
| 1127 |
+
#include <ATen/ops/signbit_native.h>
|
| 1128 |
+
#include <ATen/ops/silu_native.h>
|
| 1129 |
+
#include <ATen/ops/silu_backward_native.h>
|
| 1130 |
+
#include <ATen/ops/sin_native.h>
|
| 1131 |
+
#include <ATen/ops/sinc_native.h>
|
| 1132 |
+
#include <ATen/ops/sinh_native.h>
|
| 1133 |
+
#include <ATen/ops/size_native.h>
|
| 1134 |
+
#include <ATen/ops/slice_native.h>
|
| 1135 |
+
#include <ATen/ops/slice_backward_native.h>
|
| 1136 |
+
#include <ATen/ops/slice_copy_native.h>
|
| 1137 |
+
#include <ATen/ops/slice_inverse_native.h>
|
| 1138 |
+
#include <ATen/ops/slice_scatter_native.h>
|
| 1139 |
+
#include <ATen/ops/slogdet_native.h>
|
| 1140 |
+
#include <ATen/ops/slow_conv3d_native.h>
|
| 1141 |
+
#include <ATen/ops/slow_conv3d_forward_native.h>
|
| 1142 |
+
#include <ATen/ops/slow_conv_dilated2d_native.h>
|
| 1143 |
+
#include <ATen/ops/slow_conv_dilated3d_native.h>
|
| 1144 |
+
#include <ATen/ops/slow_conv_transpose2d_native.h>
|
| 1145 |
+
#include <ATen/ops/slow_conv_transpose3d_native.h>
|
| 1146 |
+
#include <ATen/ops/smm_native.h>
|
| 1147 |
+
#include <ATen/ops/smooth_l1_loss_native.h>
|
| 1148 |
+
#include <ATen/ops/smooth_l1_loss_backward_native.h>
|
| 1149 |
+
#include <ATen/ops/soft_margin_loss_native.h>
|
| 1150 |
+
#include <ATen/ops/soft_margin_loss_backward_native.h>
|
| 1151 |
+
#include <ATen/ops/softmax_native.h>
|
| 1152 |
+
#include <ATen/ops/softplus_native.h>
|
| 1153 |
+
#include <ATen/ops/softplus_backward_native.h>
|
| 1154 |
+
#include <ATen/ops/softshrink_native.h>
|
| 1155 |
+
#include <ATen/ops/softshrink_backward_native.h>
|
| 1156 |
+
#include <ATen/ops/sort_native.h>
|
| 1157 |
+
#include <ATen/ops/sparse_bsc_tensor_native.h>
|
| 1158 |
+
#include <ATen/ops/sparse_bsr_tensor_native.h>
|
| 1159 |
+
#include <ATen/ops/sparse_compressed_tensor_native.h>
|
| 1160 |
+
#include <ATen/ops/sparse_coo_tensor_native.h>
|
| 1161 |
+
#include <ATen/ops/sparse_csc_tensor_native.h>
|
| 1162 |
+
#include <ATen/ops/sparse_csr_tensor_native.h>
|
| 1163 |
+
#include <ATen/ops/sparse_dim_native.h>
|
| 1164 |
+
#include <ATen/ops/sparse_mask_native.h>
|
| 1165 |
+
#include <ATen/ops/sparse_resize_native.h>
|
| 1166 |
+
#include <ATen/ops/sparse_resize_and_clear_native.h>
|
| 1167 |
+
#include <ATen/ops/sparse_sampled_addmm_native.h>
|
| 1168 |
+
#include <ATen/ops/special_airy_ai_native.h>
|
| 1169 |
+
#include <ATen/ops/special_bessel_j0_native.h>
|
| 1170 |
+
#include <ATen/ops/special_bessel_j1_native.h>
|
| 1171 |
+
#include <ATen/ops/special_bessel_y0_native.h>
|
| 1172 |
+
#include <ATen/ops/special_bessel_y1_native.h>
|
| 1173 |
+
#include <ATen/ops/special_chebyshev_polynomial_t_native.h>
|
| 1174 |
+
#include <ATen/ops/special_chebyshev_polynomial_u_native.h>
|
| 1175 |
+
#include <ATen/ops/special_chebyshev_polynomial_v_native.h>
|
| 1176 |
+
#include <ATen/ops/special_chebyshev_polynomial_w_native.h>
|
| 1177 |
+
#include <ATen/ops/special_digamma_native.h>
|
| 1178 |
+
#include <ATen/ops/special_entr_native.h>
|
| 1179 |
+
#include <ATen/ops/special_erf_native.h>
|
| 1180 |
+
#include <ATen/ops/special_erfc_native.h>
|
| 1181 |
+
#include <ATen/ops/special_erfcx_native.h>
|
| 1182 |
+
#include <ATen/ops/special_erfinv_native.h>
|
| 1183 |
+
#include <ATen/ops/special_exp2_native.h>
|
| 1184 |
+
#include <ATen/ops/special_expit_native.h>
|
| 1185 |
+
#include <ATen/ops/special_expm1_native.h>
|
| 1186 |
+
#include <ATen/ops/special_gammainc_native.h>
|
| 1187 |
+
#include <ATen/ops/special_gammaincc_native.h>
|
| 1188 |
+
#include <ATen/ops/special_gammaln_native.h>
|
| 1189 |
+
#include <ATen/ops/special_hermite_polynomial_h_native.h>
|
| 1190 |
+
#include <ATen/ops/special_hermite_polynomial_he_native.h>
|
| 1191 |
+
#include <ATen/ops/special_i0_native.h>
|
| 1192 |
+
#include <ATen/ops/special_i0e_native.h>
|
| 1193 |
+
#include <ATen/ops/special_i1_native.h>
|
| 1194 |
+
#include <ATen/ops/special_i1e_native.h>
|
| 1195 |
+
#include <ATen/ops/special_laguerre_polynomial_l_native.h>
|
| 1196 |
+
#include <ATen/ops/special_legendre_polynomial_p_native.h>
|
| 1197 |
+
#include <ATen/ops/special_log1p_native.h>
|
| 1198 |
+
#include <ATen/ops/special_log_ndtr_native.h>
|
| 1199 |
+
#include <ATen/ops/special_log_softmax_native.h>
|
| 1200 |
+
#include <ATen/ops/special_logit_native.h>
|
| 1201 |
+
#include <ATen/ops/special_logsumexp_native.h>
|
| 1202 |
+
#include <ATen/ops/special_modified_bessel_i0_native.h>
|
| 1203 |
+
#include <ATen/ops/special_modified_bessel_i1_native.h>
|
| 1204 |
+
#include <ATen/ops/special_modified_bessel_k0_native.h>
|
| 1205 |
+
#include <ATen/ops/special_modified_bessel_k1_native.h>
|
| 1206 |
+
#include <ATen/ops/special_multigammaln_native.h>
|
| 1207 |
+
#include <ATen/ops/special_ndtr_native.h>
|
| 1208 |
+
#include <ATen/ops/special_ndtri_native.h>
|
| 1209 |
+
#include <ATen/ops/special_polygamma_native.h>
|
| 1210 |
+
#include <ATen/ops/special_psi_native.h>
|
| 1211 |
+
#include <ATen/ops/special_round_native.h>
|
| 1212 |
+
#include <ATen/ops/special_scaled_modified_bessel_k0_native.h>
|
| 1213 |
+
#include <ATen/ops/special_scaled_modified_bessel_k1_native.h>
|
| 1214 |
+
#include <ATen/ops/special_shifted_chebyshev_polynomial_t_native.h>
|
| 1215 |
+
#include <ATen/ops/special_shifted_chebyshev_polynomial_u_native.h>
|
| 1216 |
+
#include <ATen/ops/special_shifted_chebyshev_polynomial_v_native.h>
|
| 1217 |
+
#include <ATen/ops/special_shifted_chebyshev_polynomial_w_native.h>
|
| 1218 |
+
#include <ATen/ops/special_sinc_native.h>
|
| 1219 |
+
#include <ATen/ops/special_softmax_native.h>
|
| 1220 |
+
#include <ATen/ops/special_spherical_bessel_j0_native.h>
|
| 1221 |
+
#include <ATen/ops/special_xlog1py_native.h>
|
| 1222 |
+
#include <ATen/ops/special_xlogy_native.h>
|
| 1223 |
+
#include <ATen/ops/special_zeta_native.h>
|
| 1224 |
+
#include <ATen/ops/split_native.h>
|
| 1225 |
+
#include <ATen/ops/split_copy_native.h>
|
| 1226 |
+
#include <ATen/ops/split_with_sizes_native.h>
|
| 1227 |
+
#include <ATen/ops/split_with_sizes_copy_native.h>
|
| 1228 |
+
#include <ATen/ops/sqrt_native.h>
|
| 1229 |
+
#include <ATen/ops/square_native.h>
|
| 1230 |
+
#include <ATen/ops/squeeze_native.h>
|
| 1231 |
+
#include <ATen/ops/squeeze_copy_native.h>
|
| 1232 |
+
#include <ATen/ops/sspaddmm_native.h>
|
| 1233 |
+
#include <ATen/ops/stack_native.h>
|
| 1234 |
+
#include <ATen/ops/std_native.h>
|
| 1235 |
+
#include <ATen/ops/std_mean_native.h>
|
| 1236 |
+
#include <ATen/ops/stft_native.h>
|
| 1237 |
+
#include <ATen/ops/stride_native.h>
|
| 1238 |
+
#include <ATen/ops/sub_native.h>
|
| 1239 |
+
#include <ATen/ops/subtract_native.h>
|
| 1240 |
+
#include <ATen/ops/sum_native.h>
|
| 1241 |
+
#include <ATen/ops/sum_to_size_native.h>
|
| 1242 |
+
#include <ATen/ops/svd_native.h>
|
| 1243 |
+
#include <ATen/ops/swapaxes_native.h>
|
| 1244 |
+
#include <ATen/ops/swapdims_native.h>
|
| 1245 |
+
#include <ATen/ops/sym_constrain_range_native.h>
|
| 1246 |
+
#include <ATen/ops/sym_constrain_range_for_size_native.h>
|
| 1247 |
+
#include <ATen/ops/sym_numel_native.h>
|
| 1248 |
+
#include <ATen/ops/sym_size_native.h>
|
| 1249 |
+
#include <ATen/ops/sym_storage_offset_native.h>
|
| 1250 |
+
#include <ATen/ops/sym_stride_native.h>
|
| 1251 |
+
#include <ATen/ops/t_native.h>
|
| 1252 |
+
#include <ATen/ops/t_copy_native.h>
|
| 1253 |
+
#include <ATen/ops/take_native.h>
|
| 1254 |
+
#include <ATen/ops/take_along_dim_native.h>
|
| 1255 |
+
#include <ATen/ops/tan_native.h>
|
| 1256 |
+
#include <ATen/ops/tanh_native.h>
|
| 1257 |
+
#include <ATen/ops/tanh_backward_native.h>
|
| 1258 |
+
#include <ATen/ops/tensor_split_native.h>
|
| 1259 |
+
#include <ATen/ops/tensordot_native.h>
|
| 1260 |
+
#include <ATen/ops/thnn_conv2d_native.h>
|
| 1261 |
+
#include <ATen/ops/threshold_native.h>
|
| 1262 |
+
#include <ATen/ops/threshold_backward_native.h>
|
| 1263 |
+
#include <ATen/ops/tile_native.h>
|
| 1264 |
+
#include <ATen/ops/to_native.h>
|
| 1265 |
+
#include <ATen/ops/to_dense_native.h>
|
| 1266 |
+
#include <ATen/ops/to_dense_backward_native.h>
|
| 1267 |
+
#include <ATen/ops/to_mkldnn_native.h>
|
| 1268 |
+
#include <ATen/ops/to_mkldnn_backward_native.h>
|
| 1269 |
+
#include <ATen/ops/to_padded_tensor_native.h>
|
| 1270 |
+
#include <ATen/ops/to_sparse_native.h>
|
| 1271 |
+
#include <ATen/ops/to_sparse_bsc_native.h>
|
| 1272 |
+
#include <ATen/ops/to_sparse_bsr_native.h>
|
| 1273 |
+
#include <ATen/ops/to_sparse_csc_native.h>
|
| 1274 |
+
#include <ATen/ops/to_sparse_csr_native.h>
|
| 1275 |
+
#include <ATen/ops/topk_native.h>
|
| 1276 |
+
#include <ATen/ops/trace_native.h>
|
| 1277 |
+
#include <ATen/ops/trace_backward_native.h>
|
| 1278 |
+
#include <ATen/ops/transpose_native.h>
|
| 1279 |
+
#include <ATen/ops/transpose_copy_native.h>
|
| 1280 |
+
#include <ATen/ops/trapezoid_native.h>
|
| 1281 |
+
#include <ATen/ops/trapz_native.h>
|
| 1282 |
+
#include <ATen/ops/triangular_solve_native.h>
|
| 1283 |
+
#include <ATen/ops/tril_native.h>
|
| 1284 |
+
#include <ATen/ops/tril_indices_native.h>
|
| 1285 |
+
#include <ATen/ops/triplet_margin_loss_native.h>
|
| 1286 |
+
#include <ATen/ops/triu_native.h>
|
| 1287 |
+
#include <ATen/ops/triu_indices_native.h>
|
| 1288 |
+
#include <ATen/ops/true_divide_native.h>
|
| 1289 |
+
#include <ATen/ops/trunc_native.h>
|
| 1290 |
+
#include <ATen/ops/type_as_native.h>
|
| 1291 |
+
#include <ATen/ops/unbind_native.h>
|
| 1292 |
+
#include <ATen/ops/unbind_copy_native.h>
|
| 1293 |
+
#include <ATen/ops/unflatten_native.h>
|
| 1294 |
+
#include <ATen/ops/unflatten_dense_tensors_native.h>
|
| 1295 |
+
#include <ATen/ops/unfold_native.h>
|
| 1296 |
+
#include <ATen/ops/unfold_backward_native.h>
|
| 1297 |
+
#include <ATen/ops/unfold_copy_native.h>
|
| 1298 |
+
#include <ATen/ops/uniform_native.h>
|
| 1299 |
+
#include <ATen/ops/unique_consecutive_native.h>
|
| 1300 |
+
#include <ATen/ops/unique_dim_native.h>
|
| 1301 |
+
#include <ATen/ops/unique_dim_consecutive_native.h>
|
| 1302 |
+
#include <ATen/ops/unsafe_chunk_native.h>
|
| 1303 |
+
#include <ATen/ops/unsafe_split_native.h>
|
| 1304 |
+
#include <ATen/ops/unsafe_split_with_sizes_native.h>
|
| 1305 |
+
#include <ATen/ops/unsqueeze_native.h>
|
| 1306 |
+
#include <ATen/ops/unsqueeze_copy_native.h>
|
| 1307 |
+
#include <ATen/ops/upsample_bicubic2d_native.h>
|
| 1308 |
+
#include <ATen/ops/upsample_bicubic2d_backward_native.h>
|
| 1309 |
+
#include <ATen/ops/upsample_bilinear2d_native.h>
|
| 1310 |
+
#include <ATen/ops/upsample_bilinear2d_backward_native.h>
|
| 1311 |
+
#include <ATen/ops/upsample_linear1d_native.h>
|
| 1312 |
+
#include <ATen/ops/upsample_linear1d_backward_native.h>
|
| 1313 |
+
#include <ATen/ops/upsample_nearest1d_native.h>
|
| 1314 |
+
#include <ATen/ops/upsample_nearest1d_backward_native.h>
|
| 1315 |
+
#include <ATen/ops/upsample_nearest2d_native.h>
|
| 1316 |
+
#include <ATen/ops/upsample_nearest2d_backward_native.h>
|
| 1317 |
+
#include <ATen/ops/upsample_nearest3d_native.h>
|
| 1318 |
+
#include <ATen/ops/upsample_nearest3d_backward_native.h>
|
| 1319 |
+
#include <ATen/ops/upsample_trilinear3d_native.h>
|
| 1320 |
+
#include <ATen/ops/upsample_trilinear3d_backward_native.h>
|
| 1321 |
+
#include <ATen/ops/value_selecting_reduction_backward_native.h>
|
| 1322 |
+
#include <ATen/ops/values_native.h>
|
| 1323 |
+
#include <ATen/ops/values_copy_native.h>
|
| 1324 |
+
#include <ATen/ops/vander_native.h>
|
| 1325 |
+
#include <ATen/ops/var_native.h>
|
| 1326 |
+
#include <ATen/ops/var_mean_native.h>
|
| 1327 |
+
#include <ATen/ops/vdot_native.h>
|
| 1328 |
+
#include <ATen/ops/view_native.h>
|
| 1329 |
+
#include <ATen/ops/view_as_native.h>
|
| 1330 |
+
#include <ATen/ops/view_as_complex_native.h>
|
| 1331 |
+
#include <ATen/ops/view_as_complex_copy_native.h>
|
| 1332 |
+
#include <ATen/ops/view_as_real_native.h>
|
| 1333 |
+
#include <ATen/ops/view_as_real_copy_native.h>
|
| 1334 |
+
#include <ATen/ops/view_copy_native.h>
|
| 1335 |
+
#include <ATen/ops/vsplit_native.h>
|
| 1336 |
+
#include <ATen/ops/vstack_native.h>
|
| 1337 |
+
#include <ATen/ops/where_native.h>
|
| 1338 |
+
#include <ATen/ops/xlogy_native.h>
|
| 1339 |
+
#include <ATen/ops/xor_native.h>
|
| 1340 |
+
#include <ATen/ops/zero_native.h>
|
| 1341 |
+
#include <ATen/ops/zeros_native.h>
|
| 1342 |
+
#include <ATen/ops/zeros_like_native.h>
|
| 1343 |
+
|
| 1344 |
+
|
mplug_owl2/lib/python3.10/site-packages/torch/include/ATen/NestedTensorImpl.h
ADDED
|
@@ -0,0 +1,286 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
#include <ATen/MemoryOverlap.h>
|
| 3 |
+
#include <ATen/Tensor.h>
|
| 4 |
+
#include <c10/core/DispatchKey.h>
|
| 5 |
+
#include <c10/core/DispatchKeySet.h>
|
| 6 |
+
#include <c10/core/MemoryFormat.h>
|
| 7 |
+
#include <c10/core/TensorImpl.h>
|
| 8 |
+
#include <c10/util/ArrayRef.h>
|
| 9 |
+
#include <c10/util/Exception.h>
|
| 10 |
+
#include <c10/util/Metaprogramming.h>
|
| 11 |
+
#include <c10/util/irange.h>
|
| 12 |
+
|
| 13 |
+
namespace at::native {
|
| 14 |
+
struct NestedTensorImpl;
|
| 15 |
+
inline bool nested_tensor_impl_is_contiguous(const NestedTensorImpl* nt);
|
| 16 |
+
int64_t get_numel_from_nested_size_tensor(const at::Tensor& tensor);
|
| 17 |
+
at::Tensor construct_nested_strides(const at::Tensor& nested_size);
|
| 18 |
+
at::Tensor construct_offsets(const at::Tensor& nested_size);
|
| 19 |
+
|
| 20 |
+
struct TORCH_API NestedTensorImpl : public c10::TensorImpl {
|
| 21 |
+
explicit NestedTensorImpl(
|
| 22 |
+
Storage storage,
|
| 23 |
+
c10::DispatchKeySet key_set,
|
| 24 |
+
const caffe2::TypeMeta data_type,
|
| 25 |
+
at::Tensor nested_sizes,
|
| 26 |
+
at::Tensor nested_strides,
|
| 27 |
+
at::Tensor storage_offsets);
|
| 28 |
+
|
| 29 |
+
explicit NestedTensorImpl(
|
| 30 |
+
const at::Tensor& buffer,
|
| 31 |
+
at::Tensor nested_sizes,
|
| 32 |
+
at::Tensor nested_strides,
|
| 33 |
+
at::Tensor storage_offsets);
|
| 34 |
+
// assume contiguous, `nested_strides` and `offsets`
|
| 35 |
+
// can be infered from `nested_sizes`
|
| 36 |
+
explicit NestedTensorImpl(
|
| 37 |
+
const at::Tensor& buffer,
|
| 38 |
+
const at::Tensor& nested_sizes);
|
| 39 |
+
|
| 40 |
+
// This constructor is used creating view tensors from nested tensors
|
| 41 |
+
explicit NestedTensorImpl(
|
| 42 |
+
c10::TensorImpl::ImplType impl_type,
|
| 43 |
+
const at::Tensor& base_tensor,
|
| 44 |
+
at::Tensor nested_sizes,
|
| 45 |
+
at::Tensor nested_strides,
|
| 46 |
+
at::Tensor storage_offsets);
|
| 47 |
+
|
| 48 |
+
// TODO: don't expose private implementation details like this; in
|
| 49 |
+
// particular, resizing this tensor will mess up our dim() and
|
| 50 |
+
// callers cannot fix it.
|
| 51 |
+
const Tensor& get_nested_sizes() const {
|
| 52 |
+
return nested_sizes_;
|
| 53 |
+
}
|
| 54 |
+
// TODO: don't expose private implementation details like this
|
| 55 |
+
const Tensor& get_nested_strides() const {
|
| 56 |
+
return nested_strides_;
|
| 57 |
+
}
|
| 58 |
+
const Tensor& get_storage_offsets() const {
|
| 59 |
+
return storage_offsets_;
|
| 60 |
+
}
|
| 61 |
+
// Returns nullopt if the ith dimension is irregular. The ith dimension
|
| 62 |
+
// of a NestedTensor is regular if the unbound tensors match in
|
| 63 |
+
// size at the (i-1)th dimension.
|
| 64 |
+
std::optional<int64_t> opt_size(int64_t d) const;
|
| 65 |
+
|
| 66 |
+
int64_t size(int64_t d) const {
|
| 67 |
+
std::optional<int64_t> optional_size = this->opt_size(d);
|
| 68 |
+
TORCH_CHECK(
|
| 69 |
+
optional_size.has_value(),
|
| 70 |
+
"Given dimension ",
|
| 71 |
+
d,
|
| 72 |
+
" is irregular and does not have a size.");
|
| 73 |
+
return *optional_size;
|
| 74 |
+
}
|
| 75 |
+
/**
|
| 76 |
+
* Return a view of the nested tensor as a 1 dimensional contiguous tensor.
|
| 77 |
+
*
|
| 78 |
+
* The buffer tensor created by this function shares the same storage_impl as
|
| 79 |
+
* the original nested tensor, and therefore can be seen as a view.
|
| 80 |
+
*
|
| 81 |
+
* @return A newly constructed view tensor
|
| 82 |
+
*/
|
| 83 |
+
at::Tensor get_buffer() const {
|
| 84 |
+
TORCH_CHECK(
|
| 85 |
+
nested_tensor_impl_is_contiguous(this),
|
| 86 |
+
"NestedTensor must be contiguous to get buffer.");
|
| 87 |
+
return get_unsafe_storage_as_tensor();
|
| 88 |
+
}
|
| 89 |
+
/**
|
| 90 |
+
* If possible use get_buffer() instead. This function returns the storage
|
| 91 |
+
* as a tensor directly, which is not safe to use in general. If using this
|
| 92 |
+
* function, The caller must ensure to account for nested_sizes,
|
| 93 |
+
* nested_strides and storage_offsets.
|
| 94 |
+
*
|
| 95 |
+
* @return A newly constructed view tensor
|
| 96 |
+
*/
|
| 97 |
+
at::Tensor get_unsafe_storage_as_tensor() const {
|
| 98 |
+
auto buffer_key_set_ = generate_buffer_key_set();
|
| 99 |
+
const auto buffer_size = get_buffer_size();
|
| 100 |
+
auto buffer_tensor_impl = c10::make_intrusive<TensorImpl>(
|
| 101 |
+
c10::TensorImpl::VIEW, Storage(storage_), buffer_key_set_, data_type_);
|
| 102 |
+
buffer_tensor_impl->set_sizes_contiguous(
|
| 103 |
+
c10::makeArrayRef(static_cast<int64_t>(buffer_size)));
|
| 104 |
+
return Tensor(buffer_tensor_impl);
|
| 105 |
+
}
|
| 106 |
+
|
| 107 |
+
size_t get_buffer_size() const {
|
| 108 |
+
return storage_.nbytes() / data_type_.itemsize();
|
| 109 |
+
}
|
| 110 |
+
|
| 111 |
+
protected:
|
| 112 |
+
const char* tensorimpl_type_name() const override;
|
| 113 |
+
|
| 114 |
+
// TODO: numel_custom and is_contiguous_custom can be profitably overridden
|
| 115 |
+
// with real implementations
|
| 116 |
+
int64_t numel_custom() const override;
|
| 117 |
+
c10::SymInt sym_numel_custom() const override;
|
| 118 |
+
bool is_contiguous_custom(MemoryFormat) const override;
|
| 119 |
+
int64_t size_custom(int64_t d) const override {
|
| 120 |
+
return this->size(d);
|
| 121 |
+
}
|
| 122 |
+
c10::SymInt sym_size_custom(int64_t d) const override {
|
| 123 |
+
return c10::SymInt{this->size(d)};
|
| 124 |
+
}
|
| 125 |
+
IntArrayRef sizes_custom() const override;
|
| 126 |
+
c10::SymIntArrayRef sym_sizes_custom() const override;
|
| 127 |
+
IntArrayRef strides_custom() const override;
|
| 128 |
+
c10::SymIntArrayRef sym_strides_custom() const override;
|
| 129 |
+
|
| 130 |
+
// this one is real
|
| 131 |
+
int64_t dim_custom() const override;
|
| 132 |
+
|
| 133 |
+
c10::intrusive_ptr<TensorImpl> shallow_copy_and_detach(
|
| 134 |
+
const c10::VariableVersion& version_counter,
|
| 135 |
+
bool allow_tensor_metadata_change) const override;
|
| 136 |
+
|
| 137 |
+
c10::intrusive_ptr<TensorImpl> shallow_copy_and_detach(
|
| 138 |
+
c10::VariableVersion&& version_counter,
|
| 139 |
+
bool allow_tensor_metadata_change) const override;
|
| 140 |
+
|
| 141 |
+
void shallow_copy_from(const c10::intrusive_ptr<TensorImpl>& impl) override {
|
| 142 |
+
copy_tensor_metadata(
|
| 143 |
+
/*src_impl=*/impl.get(),
|
| 144 |
+
/*dest_impl=*/this,
|
| 145 |
+
/*version_counter=*/version_counter(),
|
| 146 |
+
/*allow_tensor_metadata_change=*/allow_tensor_metadata_change());
|
| 147 |
+
}
|
| 148 |
+
|
| 149 |
+
private:
|
| 150 |
+
// Must be called after any changes to our dim() to sync the state
|
| 151 |
+
// to TensorImpl.
|
| 152 |
+
void refresh_dim();
|
| 153 |
+
|
| 154 |
+
// NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members)
|
| 155 |
+
const at::Tensor nested_sizes_, nested_strides_;
|
| 156 |
+
// The starting positions of the underlying tensors in contiguous buffer
|
| 157 |
+
// i.e. the buffer memory offsets to get the underlying tensors
|
| 158 |
+
// The reason to keep this metadata is that, without strong enough constraint
|
| 159 |
+
// it cannot be derived from `nested_sizes_`
|
| 160 |
+
// and `nested_strides_`:
|
| 161 |
+
// 1. when buffer has blanks, e.g. [tensor1, blank, tensor2]
|
| 162 |
+
// this can happen e.g. after slicing a nested tensor
|
| 163 |
+
// 2. when multiple tensors share a same memory
|
| 164 |
+
// 3. when the nesting ordering is changed, e.g. [tensor1, tensor3, tensor2]
|
| 165 |
+
// Some strong enough constraints are:
|
| 166 |
+
// 1. every underlying tensor is contiguous in memory
|
| 167 |
+
// && nesting in ascending order
|
| 168 |
+
// NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members)
|
| 169 |
+
const at::Tensor storage_offsets_;
|
| 170 |
+
// NOTE: -1 here means the size is missing
|
| 171 |
+
// Optional to allow it to be computed lazily from nested.
|
| 172 |
+
// TODO: maybe we can remove this metadata since
|
| 173 |
+
// we can compute it from `nested_sizes_`
|
| 174 |
+
mutable std::optional<std::vector<int64_t>> opt_sizes_;
|
| 175 |
+
|
| 176 |
+
template <typename VariableVersion>
|
| 177 |
+
c10::intrusive_ptr<TensorImpl> shallow_copy_and_detach_core(
|
| 178 |
+
VariableVersion&& version_counter,
|
| 179 |
+
bool allow_tensor_metadata_change) const;
|
| 180 |
+
|
| 181 |
+
/**
|
| 182 |
+
* Generates a non-nested key_set from a nested tensor.
|
| 183 |
+
*
|
| 184 |
+
* For many nested tensor kernel implementations a buffer tensor
|
| 185 |
+
* is generated and redispatched to a non-nested kernel this function
|
| 186 |
+
* generates the key set used by that buffer tensor
|
| 187 |
+
*
|
| 188 |
+
* @return Appropriate key set for non-nested tensor
|
| 189 |
+
*/
|
| 190 |
+
inline c10::DispatchKeySet generate_buffer_key_set() const {
|
| 191 |
+
auto buffer_key_set = this->key_set();
|
| 192 |
+
const bool Autograd = buffer_key_set.has_any(c10::autograd_dispatch_keyset);
|
| 193 |
+
// Remove nested tensor specific keys
|
| 194 |
+
buffer_key_set = buffer_key_set -
|
| 195 |
+
c10::DispatchKeySet{
|
| 196 |
+
c10::DispatchKey::NestedTensor,
|
| 197 |
+
c10::DispatchKey::AutogradNestedTensor};
|
| 198 |
+
|
| 199 |
+
// Add dense tensor specific keys
|
| 200 |
+
buffer_key_set =
|
| 201 |
+
buffer_key_set | c10::DispatchKeySet{c10::DispatchKey::Dense};
|
| 202 |
+
buffer_key_set = Autograd
|
| 203 |
+
? c10::DispatchKeySet{c10::DispatchKey::Autograd} | buffer_key_set
|
| 204 |
+
: buffer_key_set;
|
| 205 |
+
|
| 206 |
+
return buffer_key_set;
|
| 207 |
+
}
|
| 208 |
+
};
|
| 209 |
+
|
| 210 |
+
inline NestedTensorImpl* get_nested_tensor_impl_or_null(
|
| 211 |
+
const at::Tensor& tensor) {
|
| 212 |
+
if (tensor.is_nested()) {
|
| 213 |
+
return static_cast<NestedTensorImpl*>(tensor.unsafeGetTensorImpl());
|
| 214 |
+
}
|
| 215 |
+
return nullptr;
|
| 216 |
+
}
|
| 217 |
+
|
| 218 |
+
inline NestedTensorImpl* get_nested_tensor_impl(const at::Tensor& tensor) {
|
| 219 |
+
TORCH_CHECK(
|
| 220 |
+
tensor.is_nested(), "get_nested_tensor_impl requires a NestedTensor.");
|
| 221 |
+
return static_cast<NestedTensorImpl*>(tensor.unsafeGetTensorImpl());
|
| 222 |
+
}
|
| 223 |
+
|
| 224 |
+
inline bool nested_tensor_impl_is_contiguous(const NestedTensorImpl* nt) {
|
| 225 |
+
int64_t ntensors = nt->size(0);
|
| 226 |
+
if (ntensors == 0) {
|
| 227 |
+
return true;
|
| 228 |
+
}
|
| 229 |
+
const Tensor &sizemat = nt->get_nested_sizes(),
|
| 230 |
+
&stridemat = nt->get_nested_strides();
|
| 231 |
+
const int64_t* offsets_ptr =
|
| 232 |
+
nt->get_storage_offsets().const_data_ptr<int64_t>();
|
| 233 |
+
int64_t orig_dim = sizemat.size(1);
|
| 234 |
+
// nesting scalars
|
| 235 |
+
if (orig_dim == 0) {
|
| 236 |
+
// each scalar must be contiguous
|
| 237 |
+
// if there is blank memory between underlying scalars
|
| 238 |
+
for (int64_t i = 0; i < ntensors; i++) {
|
| 239 |
+
if (offsets_ptr[i] != i) {
|
| 240 |
+
return false;
|
| 241 |
+
}
|
| 242 |
+
}
|
| 243 |
+
}
|
| 244 |
+
// nesting tensors
|
| 245 |
+
else {
|
| 246 |
+
// if any underlying tensor is non-contiguous
|
| 247 |
+
const int64_t *sizemat_ptr = sizemat.const_data_ptr<int64_t>(),
|
| 248 |
+
*stridemat_ptr = stridemat.const_data_ptr<int64_t>();
|
| 249 |
+
for (int64_t i = 0; i < ntensors; i++) {
|
| 250 |
+
if (stridemat_ptr[orig_dim - 1] != 1) {
|
| 251 |
+
return false;
|
| 252 |
+
}
|
| 253 |
+
int64_t product = sizemat_ptr[orig_dim - 1];
|
| 254 |
+
for (int64_t j = orig_dim - 2; j >= 0; j--) {
|
| 255 |
+
if (stridemat_ptr[j] != product) {
|
| 256 |
+
return false;
|
| 257 |
+
}
|
| 258 |
+
product *= sizemat_ptr[j];
|
| 259 |
+
}
|
| 260 |
+
sizemat_ptr += orig_dim;
|
| 261 |
+
stridemat_ptr += orig_dim;
|
| 262 |
+
}
|
| 263 |
+
// if there is blank memory between underlying tensors
|
| 264 |
+
if (offsets_ptr[0] != 0) {
|
| 265 |
+
return false;
|
| 266 |
+
}
|
| 267 |
+
sizemat_ptr = sizemat.const_data_ptr<int64_t>();
|
| 268 |
+
stridemat_ptr = stridemat.const_data_ptr<int64_t>();
|
| 269 |
+
for (int64_t i = 1; i < ntensors; i++) {
|
| 270 |
+
if (offsets_ptr[i] !=
|
| 271 |
+
offsets_ptr[i - 1] + *sizemat_ptr * *stridemat_ptr) {
|
| 272 |
+
return false;
|
| 273 |
+
}
|
| 274 |
+
sizemat_ptr += orig_dim;
|
| 275 |
+
stridemat_ptr += orig_dim;
|
| 276 |
+
}
|
| 277 |
+
}
|
| 278 |
+
// everything is fine
|
| 279 |
+
return true;
|
| 280 |
+
}
|
| 281 |
+
|
| 282 |
+
inline const at::Tensor& get_nested_sizes(const at::Tensor& tensor) {
|
| 283 |
+
return get_nested_tensor_impl(tensor)->get_nested_sizes();
|
| 284 |
+
}
|
| 285 |
+
|
| 286 |
+
} // namespace at::native
|
mplug_owl2/lib/python3.10/site-packages/torch/include/ATen/OpMathType.h
ADDED
|
@@ -0,0 +1,69 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <c10/core/ScalarType.h>
|
| 4 |
+
#include <c10/util/BFloat16.h>
|
| 5 |
+
#include <c10/util/Exception.h>
|
| 6 |
+
#include <c10/util/Float8_e4m3fn.h>
|
| 7 |
+
#include <c10/util/Float8_e4m3fnuz.h>
|
| 8 |
+
#include <c10/util/Float8_e5m2.h>
|
| 9 |
+
#include <c10/util/Float8_e5m2fnuz.h>
|
| 10 |
+
#include <c10/util/Half.h>
|
| 11 |
+
|
| 12 |
+
namespace at {
|
| 13 |
+
|
| 14 |
+
// For FP16 or BFloat16 inputs, ops should perform internal math in FP32.
|
| 15 |
+
template <typename scalar_t>
|
| 16 |
+
struct OpMathType {
|
| 17 |
+
using type = scalar_t;
|
| 18 |
+
};
|
| 19 |
+
template <>
|
| 20 |
+
struct OpMathType<at::Half> {
|
| 21 |
+
using type = float;
|
| 22 |
+
};
|
| 23 |
+
template <>
|
| 24 |
+
struct OpMathType<at::BFloat16> {
|
| 25 |
+
using type = float;
|
| 26 |
+
};
|
| 27 |
+
template <>
|
| 28 |
+
struct OpMathType<at::Float8_e5m2> {
|
| 29 |
+
using type = float;
|
| 30 |
+
};
|
| 31 |
+
template <>
|
| 32 |
+
struct OpMathType<at::Float8_e4m3fn> {
|
| 33 |
+
using type = float;
|
| 34 |
+
};
|
| 35 |
+
template <>
|
| 36 |
+
struct OpMathType<at::Float8_e5m2fnuz> {
|
| 37 |
+
using type = float;
|
| 38 |
+
};
|
| 39 |
+
template <>
|
| 40 |
+
struct OpMathType<at::Float8_e4m3fnuz> {
|
| 41 |
+
using type = float;
|
| 42 |
+
};
|
| 43 |
+
template <>
|
| 44 |
+
struct OpMathType<c10::complex<Half>> {
|
| 45 |
+
using type = c10::complex<float>;
|
| 46 |
+
};
|
| 47 |
+
|
| 48 |
+
template <typename T>
|
| 49 |
+
using opmath_type = typename OpMathType<T>::type;
|
| 50 |
+
|
| 51 |
+
namespace {
|
| 52 |
+
|
| 53 |
+
inline c10::ScalarType toOpMathType(const c10::ScalarType type) {
|
| 54 |
+
switch (type) {
|
| 55 |
+
#define DEFINE_CASE(scalar_t, TypeNum) \
|
| 56 |
+
case ScalarType::TypeNum: \
|
| 57 |
+
return CppTypeToScalarType<at::opmath_type<scalar_t>>::value;
|
| 58 |
+
|
| 59 |
+
AT_FORALL_SCALAR_TYPES_WITH_COMPLEX(DEFINE_CASE)
|
| 60 |
+
#undef DEFINE_CASE
|
| 61 |
+
|
| 62 |
+
default:
|
| 63 |
+
TORCH_INTERNAL_ASSERT(false, "Unrecognized ScalarType: ", type);
|
| 64 |
+
}
|
| 65 |
+
}
|
| 66 |
+
|
| 67 |
+
} // namespace
|
| 68 |
+
|
| 69 |
+
} // namespace at
|
mplug_owl2/lib/python3.10/site-packages/torch/include/ATen/OpaqueTensorImpl.h
ADDED
|
@@ -0,0 +1,187 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <c10/core/MemoryFormat.h>
|
| 4 |
+
#include <c10/core/SymIntArrayRef.h>
|
| 5 |
+
#include <c10/core/TensorImpl.h>
|
| 6 |
+
#include <c10/util/Exception.h>
|
| 7 |
+
|
| 8 |
+
namespace at {
|
| 9 |
+
|
| 10 |
+
// An "Opaque" TensorImpl -- there are no strides and (for now)
|
| 11 |
+
// even data() is not supported (thus no pointer arithmetic).
|
| 12 |
+
|
| 13 |
+
// NOTE: We could allow data() in the future, but would have to ensure pointer
|
| 14 |
+
// arithmetic code is properly guarded.
|
| 15 |
+
//
|
| 16 |
+
// NOTE: This does not support resize_ (and other metadata-changing ops) because
|
| 17 |
+
// of `shallow_copy_and_detach`. We would need to define an interface to
|
| 18 |
+
// "shallow copy" in order to add support.
|
| 19 |
+
|
| 20 |
+
template <typename OpaqueHandle>
|
| 21 |
+
struct TORCH_API OpaqueTensorImpl : public TensorImpl {
|
| 22 |
+
// public constructor for now...
|
| 23 |
+
OpaqueTensorImpl(
|
| 24 |
+
at::DispatchKeySet key_set,
|
| 25 |
+
const caffe2::TypeMeta data_type,
|
| 26 |
+
c10::Device device,
|
| 27 |
+
OpaqueHandle opaque_handle,
|
| 28 |
+
c10::IntArrayRef sizes,
|
| 29 |
+
bool is_non_overlapping_and_dense = true)
|
| 30 |
+
: TensorImpl(key_set, data_type, device),
|
| 31 |
+
opaque_handle_(std::move(opaque_handle)) {
|
| 32 |
+
set_storage_access_should_throw();
|
| 33 |
+
set_custom_sizes_strides(SizesStridesPolicy::CustomStrides);
|
| 34 |
+
sizes_and_strides_.set_sizes(sizes);
|
| 35 |
+
refresh_numel();
|
| 36 |
+
// NOLINTNEXTLINE(cppcoreguidelines-prefer-member-initializer)
|
| 37 |
+
is_non_overlapping_and_dense_ = is_non_overlapping_and_dense;
|
| 38 |
+
}
|
| 39 |
+
|
| 40 |
+
// Destructor doesn't call release_resources because it's
|
| 41 |
+
// unnecessary; don't forget to change that if needed!
|
| 42 |
+
void release_resources() override {
|
| 43 |
+
TensorImpl::release_resources();
|
| 44 |
+
opaque_handle_ = {};
|
| 45 |
+
}
|
| 46 |
+
|
| 47 |
+
void set_size(int64_t dim, int64_t new_size) override {
|
| 48 |
+
AT_ERROR("opaque tensors do not have set_size");
|
| 49 |
+
}
|
| 50 |
+
|
| 51 |
+
void set_stride(int64_t dim, int64_t new_stride) override {
|
| 52 |
+
AT_ERROR("opaque tensors do not have set_stride");
|
| 53 |
+
}
|
| 54 |
+
|
| 55 |
+
void set_storage_offset(int64_t storage_offset) override {
|
| 56 |
+
AT_ERROR("opaque tensors do not have set_storage_offset");
|
| 57 |
+
}
|
| 58 |
+
|
| 59 |
+
#ifdef DEBUG
|
| 60 |
+
bool has_storage() const override {
|
| 61 |
+
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
|
| 62 |
+
!storage_, "OpaqueTensorImpl assumes that storage_ is never set");
|
| 63 |
+
return false;
|
| 64 |
+
}
|
| 65 |
+
#endif
|
| 66 |
+
|
| 67 |
+
/**
|
| 68 |
+
* Return a TensorImpl that is a shallow-copy of this TensorImpl.
|
| 69 |
+
*
|
| 70 |
+
* For usage of `version_counter` and `allow_tensor_metadata_change`,
|
| 71 |
+
* see NOTE [ TensorImpl Shallow-Copying ].
|
| 72 |
+
*/
|
| 73 |
+
c10::intrusive_ptr<TensorImpl> shallow_copy_and_detach(
|
| 74 |
+
const c10::VariableVersion& version_counter,
|
| 75 |
+
bool allow_tensor_metadata_change) const override {
|
| 76 |
+
auto impl = c10::make_intrusive<OpaqueTensorImpl<OpaqueHandle>>(
|
| 77 |
+
key_set(),
|
| 78 |
+
dtype(),
|
| 79 |
+
device(),
|
| 80 |
+
opaque_handle_,
|
| 81 |
+
sizes_and_strides_.sizes_arrayref());
|
| 82 |
+
copy_tensor_metadata(
|
| 83 |
+
/*src_opaque_impl=*/this,
|
| 84 |
+
/*dest_opaque_impl=*/impl.get(),
|
| 85 |
+
/*version_counter=*/version_counter,
|
| 86 |
+
/*allow_tensor_metadata_change=*/allow_tensor_metadata_change);
|
| 87 |
+
impl->refresh_numel();
|
| 88 |
+
return impl;
|
| 89 |
+
}
|
| 90 |
+
|
| 91 |
+
/**
|
| 92 |
+
* Return a TensorImpl that is a shallow-copy of this TensorImpl.
|
| 93 |
+
*
|
| 94 |
+
* For usage of `version_counter` and `allow_tensor_metadata_change`,
|
| 95 |
+
* see NOTE [ TensorImpl Shallow-Copying ].
|
| 96 |
+
*/
|
| 97 |
+
c10::intrusive_ptr<TensorImpl> shallow_copy_and_detach(
|
| 98 |
+
c10::VariableVersion&& version_counter,
|
| 99 |
+
bool allow_tensor_metadata_change) const override {
|
| 100 |
+
auto impl = c10::make_intrusive<OpaqueTensorImpl<OpaqueHandle>>(
|
| 101 |
+
key_set(),
|
| 102 |
+
dtype(),
|
| 103 |
+
device(),
|
| 104 |
+
opaque_handle_,
|
| 105 |
+
sizes_and_strides_.sizes_arrayref());
|
| 106 |
+
copy_tensor_metadata(
|
| 107 |
+
/*src_opaque_impl=*/this,
|
| 108 |
+
/*dest_opaque_impl=*/impl.get(),
|
| 109 |
+
/*version_counter=*/std::move(version_counter),
|
| 110 |
+
/*allow_tensor_metadata_change=*/allow_tensor_metadata_change);
|
| 111 |
+
impl->refresh_numel();
|
| 112 |
+
return impl;
|
| 113 |
+
}
|
| 114 |
+
|
| 115 |
+
/**
|
| 116 |
+
* Shallow-copies data from another TensorImpl into this TensorImpl.
|
| 117 |
+
*
|
| 118 |
+
* For why this function doesn't check this TensorImpl's
|
| 119 |
+
* `allow_tensor_metadata_change_`, see NOTE [ TensorImpl Shallow-Copying ].
|
| 120 |
+
*/
|
| 121 |
+
void shallow_copy_from(const c10::intrusive_ptr<TensorImpl>& impl) override {
|
| 122 |
+
AT_ASSERT(has_compatible_shallow_copy_type(impl->key_set()));
|
| 123 |
+
auto opaque_impl =
|
| 124 |
+
static_cast<const OpaqueTensorImpl<OpaqueHandle>*>(impl.get());
|
| 125 |
+
copy_tensor_metadata(
|
| 126 |
+
/*src_impl=*/opaque_impl,
|
| 127 |
+
/*dest_impl=*/this,
|
| 128 |
+
/*version_counter=*/version_counter(),
|
| 129 |
+
/*allow_tensor_metadata_change=*/allow_tensor_metadata_change());
|
| 130 |
+
refresh_numel();
|
| 131 |
+
}
|
| 132 |
+
|
| 133 |
+
const OpaqueHandle& opaque_handle() const {
|
| 134 |
+
return opaque_handle_;
|
| 135 |
+
}
|
| 136 |
+
|
| 137 |
+
OpaqueHandle& unsafe_opaque_handle() {
|
| 138 |
+
return opaque_handle_;
|
| 139 |
+
}
|
| 140 |
+
|
| 141 |
+
protected:
|
| 142 |
+
/**
|
| 143 |
+
* Copy the tensor metadata fields (e.g. sizes / strides / storage pointer /
|
| 144 |
+
* storage_offset) from one TensorImpl to another TensorImpl.
|
| 145 |
+
*
|
| 146 |
+
* For usage of `version_counter` and `allow_tensor_metadata_change`, see NOTE
|
| 147 |
+
* [ TensorImpl Shallow-Copying ].
|
| 148 |
+
*/
|
| 149 |
+
static void copy_tensor_metadata(
|
| 150 |
+
const OpaqueTensorImpl<OpaqueHandle>* src_opaque_impl,
|
| 151 |
+
OpaqueTensorImpl<OpaqueHandle>* dest_opaque_impl,
|
| 152 |
+
const c10::VariableVersion& version_counter,
|
| 153 |
+
bool allow_tensor_metadata_change) {
|
| 154 |
+
TensorImpl::copy_tensor_metadata(
|
| 155 |
+
src_opaque_impl,
|
| 156 |
+
dest_opaque_impl,
|
| 157 |
+
version_counter,
|
| 158 |
+
allow_tensor_metadata_change);
|
| 159 |
+
|
| 160 |
+
// OpaqueTensorImpl-specific fields.
|
| 161 |
+
dest_opaque_impl->opaque_handle_ = src_opaque_impl->opaque_handle_;
|
| 162 |
+
}
|
| 163 |
+
|
| 164 |
+
static void copy_tensor_metadata(
|
| 165 |
+
const OpaqueTensorImpl<OpaqueHandle>* src_opaque_impl,
|
| 166 |
+
OpaqueTensorImpl<OpaqueHandle>* dest_opaque_impl,
|
| 167 |
+
c10::VariableVersion&& version_counter,
|
| 168 |
+
bool allow_tensor_metadata_change) {
|
| 169 |
+
TensorImpl::copy_tensor_metadata(
|
| 170 |
+
src_opaque_impl,
|
| 171 |
+
dest_opaque_impl,
|
| 172 |
+
std::move(version_counter),
|
| 173 |
+
allow_tensor_metadata_change);
|
| 174 |
+
|
| 175 |
+
// OpaqueTensorImpl-specific fields.
|
| 176 |
+
dest_opaque_impl->opaque_handle_ = src_opaque_impl->opaque_handle_;
|
| 177 |
+
}
|
| 178 |
+
|
| 179 |
+
private:
|
| 180 |
+
const char* tensorimpl_type_name() const override {
|
| 181 |
+
return "OpaqueTensorImpl";
|
| 182 |
+
}
|
| 183 |
+
|
| 184 |
+
OpaqueHandle opaque_handle_;
|
| 185 |
+
};
|
| 186 |
+
|
| 187 |
+
} // namespace at
|
mplug_owl2/lib/python3.10/site-packages/torch/include/ATen/PTThreadPool.h
ADDED
|
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <ATen/Parallel.h>
|
| 4 |
+
#include <c10/core/thread_pool.h>
|
| 5 |
+
|
| 6 |
+
namespace at {
|
| 7 |
+
|
| 8 |
+
class TORCH_API PTThreadPool : public c10::ThreadPool {
|
| 9 |
+
public:
|
| 10 |
+
explicit PTThreadPool(int pool_size, int numa_node_id = -1)
|
| 11 |
+
: c10::ThreadPool(pool_size, numa_node_id, []() {
|
| 12 |
+
c10::setThreadName("PTThreadPool");
|
| 13 |
+
at::init_num_threads();
|
| 14 |
+
}) {}
|
| 15 |
+
};
|
| 16 |
+
|
| 17 |
+
} // namespace at
|
mplug_owl2/lib/python3.10/site-packages/torch/include/ATen/PadNd.h
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
#include <c10/util/Exception.h>
|
| 3 |
+
#include <c10/util/string_view.h>
|
| 4 |
+
|
| 5 |
+
namespace at {
|
| 6 |
+
|
| 7 |
+
enum class padding_mode {
|
| 8 |
+
reflect,
|
| 9 |
+
replicate,
|
| 10 |
+
circular,
|
| 11 |
+
constant,
|
| 12 |
+
};
|
| 13 |
+
|
| 14 |
+
static inline c10::string_view padding_mode_string(padding_mode m) {
|
| 15 |
+
switch (m) {
|
| 16 |
+
case padding_mode::reflect:
|
| 17 |
+
return "reflect";
|
| 18 |
+
case padding_mode::replicate:
|
| 19 |
+
return "replicate";
|
| 20 |
+
case padding_mode::circular:
|
| 21 |
+
return "circular";
|
| 22 |
+
case padding_mode::constant:
|
| 23 |
+
return "constant";
|
| 24 |
+
}
|
| 25 |
+
TORCH_CHECK(false, "Invalid padding mode (", static_cast<int64_t>(m), ")");
|
| 26 |
+
}
|
| 27 |
+
|
| 28 |
+
} // namespace at
|
mplug_owl2/lib/python3.10/site-packages/torch/include/ATen/Parallel-inl.h
ADDED
|
@@ -0,0 +1,93 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <c10/util/Exception.h>
|
| 4 |
+
#include <c10/util/ParallelGuard.h>
|
| 5 |
+
#include <c10/util/SmallVector.h>
|
| 6 |
+
|
| 7 |
+
namespace at {
|
| 8 |
+
|
| 9 |
+
template <class F>
|
| 10 |
+
inline void parallel_for(
|
| 11 |
+
const int64_t begin,
|
| 12 |
+
const int64_t end,
|
| 13 |
+
const int64_t grain_size,
|
| 14 |
+
const F& f) {
|
| 15 |
+
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(grain_size >= 0);
|
| 16 |
+
if (begin >= end) {
|
| 17 |
+
return;
|
| 18 |
+
}
|
| 19 |
+
|
| 20 |
+
#ifdef INTRA_OP_PARALLEL
|
| 21 |
+
at::internal::lazy_init_num_threads();
|
| 22 |
+
const auto numiter = end - begin;
|
| 23 |
+
const bool use_parallel =
|
| 24 |
+
(numiter > grain_size && numiter > 1 && !at::in_parallel_region() &&
|
| 25 |
+
at::get_num_threads() > 1);
|
| 26 |
+
if (!use_parallel) {
|
| 27 |
+
internal::ThreadIdGuard tid_guard(0);
|
| 28 |
+
c10::ParallelGuard guard(true);
|
| 29 |
+
f(begin, end);
|
| 30 |
+
return;
|
| 31 |
+
}
|
| 32 |
+
|
| 33 |
+
internal::invoke_parallel(
|
| 34 |
+
begin, end, grain_size, [&](int64_t begin, int64_t end) {
|
| 35 |
+
c10::ParallelGuard guard(true);
|
| 36 |
+
f(begin, end);
|
| 37 |
+
});
|
| 38 |
+
#else
|
| 39 |
+
internal::ThreadIdGuard tid_guard(0);
|
| 40 |
+
c10::ParallelGuard guard(true);
|
| 41 |
+
f(begin, end);
|
| 42 |
+
#endif
|
| 43 |
+
}
|
| 44 |
+
|
| 45 |
+
template <class scalar_t, class F, class SF>
|
| 46 |
+
inline scalar_t parallel_reduce(
|
| 47 |
+
const int64_t begin,
|
| 48 |
+
const int64_t end,
|
| 49 |
+
const int64_t grain_size,
|
| 50 |
+
const scalar_t ident,
|
| 51 |
+
const F& f,
|
| 52 |
+
const SF& sf) {
|
| 53 |
+
TORCH_CHECK(grain_size >= 0);
|
| 54 |
+
if (begin >= end) {
|
| 55 |
+
return ident;
|
| 56 |
+
}
|
| 57 |
+
|
| 58 |
+
#ifdef INTRA_OP_PARALLEL
|
| 59 |
+
at::internal::lazy_init_num_threads();
|
| 60 |
+
const auto max_threads = at::get_num_threads();
|
| 61 |
+
const bool use_parallel =
|
| 62 |
+
((end - begin) > grain_size && !at::in_parallel_region() &&
|
| 63 |
+
max_threads > 1);
|
| 64 |
+
if (!use_parallel) {
|
| 65 |
+
internal::ThreadIdGuard tid_guard(0);
|
| 66 |
+
c10::ParallelGuard guard(true);
|
| 67 |
+
return f(begin, end, ident);
|
| 68 |
+
}
|
| 69 |
+
|
| 70 |
+
c10::SmallVector<scalar_t, 64> results(max_threads, ident);
|
| 71 |
+
internal::invoke_parallel(
|
| 72 |
+
begin,
|
| 73 |
+
end,
|
| 74 |
+
grain_size,
|
| 75 |
+
[&](const int64_t my_begin, const int64_t my_end) {
|
| 76 |
+
const auto tid = at::get_thread_num();
|
| 77 |
+
c10::ParallelGuard guard(true);
|
| 78 |
+
results[tid] = f(my_begin, my_end, ident);
|
| 79 |
+
});
|
| 80 |
+
|
| 81 |
+
scalar_t result = ident;
|
| 82 |
+
for (auto partial_result : results) {
|
| 83 |
+
result = sf(result, partial_result);
|
| 84 |
+
}
|
| 85 |
+
return result;
|
| 86 |
+
#else
|
| 87 |
+
internal::ThreadIdGuard tid_guard(0);
|
| 88 |
+
c10::ParallelGuard guard(true);
|
| 89 |
+
return f(begin, end, ident);
|
| 90 |
+
#endif
|
| 91 |
+
}
|
| 92 |
+
|
| 93 |
+
} // namespace at
|
mplug_owl2/lib/python3.10/site-packages/torch/include/ATen/Parallel.h
ADDED
|
@@ -0,0 +1,158 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
#include <ATen/Config.h>
|
| 3 |
+
#include <c10/macros/Macros.h>
|
| 4 |
+
#include <functional>
|
| 5 |
+
#include <string>
|
| 6 |
+
|
| 7 |
+
namespace at {
|
| 8 |
+
|
| 9 |
+
inline int64_t divup(int64_t x, int64_t y) {
|
| 10 |
+
return (x + y - 1) / y;
|
| 11 |
+
}
|
| 12 |
+
|
| 13 |
+
// Called during new thread initialization
|
| 14 |
+
TORCH_API void init_num_threads();
|
| 15 |
+
|
| 16 |
+
// Sets the number of threads to be used in parallel region
|
| 17 |
+
TORCH_API void set_num_threads(int);
|
| 18 |
+
|
| 19 |
+
// Returns the maximum number of threads that may be used in a parallel region
|
| 20 |
+
TORCH_API int get_num_threads();
|
| 21 |
+
|
| 22 |
+
// Returns the current thread number (starting from 0)
|
| 23 |
+
// in the current parallel region, or 0 in the sequential region
|
| 24 |
+
TORCH_API int get_thread_num();
|
| 25 |
+
|
| 26 |
+
// Checks whether the code runs in parallel region
|
| 27 |
+
TORCH_API bool in_parallel_region();
|
| 28 |
+
|
| 29 |
+
namespace internal {
|
| 30 |
+
|
| 31 |
+
// Initialise num_threads lazily at first parallel call
|
| 32 |
+
inline void lazy_init_num_threads() {
|
| 33 |
+
thread_local bool init = false;
|
| 34 |
+
if (C10_UNLIKELY(!init)) {
|
| 35 |
+
at::init_num_threads();
|
| 36 |
+
init = true;
|
| 37 |
+
}
|
| 38 |
+
}
|
| 39 |
+
|
| 40 |
+
TORCH_API void set_thread_num(int);
|
| 41 |
+
|
| 42 |
+
class TORCH_API ThreadIdGuard {
|
| 43 |
+
public:
|
| 44 |
+
ThreadIdGuard(int new_id) : old_id_(at::get_thread_num()) {
|
| 45 |
+
set_thread_num(new_id);
|
| 46 |
+
}
|
| 47 |
+
|
| 48 |
+
~ThreadIdGuard() {
|
| 49 |
+
set_thread_num(old_id_);
|
| 50 |
+
}
|
| 51 |
+
|
| 52 |
+
private:
|
| 53 |
+
int old_id_;
|
| 54 |
+
};
|
| 55 |
+
|
| 56 |
+
} // namespace internal
|
| 57 |
+
|
| 58 |
+
/*
|
| 59 |
+
parallel_for
|
| 60 |
+
|
| 61 |
+
begin: index at which to start applying user function
|
| 62 |
+
|
| 63 |
+
end: index at which to stop applying user function
|
| 64 |
+
|
| 65 |
+
grain_size: number of elements per chunk. impacts the degree of parallelization
|
| 66 |
+
|
| 67 |
+
f: user function applied in parallel to the chunks, signature:
|
| 68 |
+
void f(int64_t begin, int64_t end)
|
| 69 |
+
|
| 70 |
+
Warning: parallel_for does NOT copy thread local
|
| 71 |
+
states from the current thread to the worker threads.
|
| 72 |
+
This means for example that Tensor operations CANNOT be used in the
|
| 73 |
+
body of your function, only data pointers.
|
| 74 |
+
*/
|
| 75 |
+
template <class F>
|
| 76 |
+
inline void parallel_for(
|
| 77 |
+
const int64_t begin,
|
| 78 |
+
const int64_t end,
|
| 79 |
+
const int64_t grain_size,
|
| 80 |
+
const F& f);
|
| 81 |
+
|
| 82 |
+
/*
|
| 83 |
+
parallel_reduce
|
| 84 |
+
|
| 85 |
+
begin: index at which to start applying reduction
|
| 86 |
+
|
| 87 |
+
end: index at which to stop applying reduction
|
| 88 |
+
|
| 89 |
+
grain_size: number of elements per chunk. impacts number of elements in
|
| 90 |
+
intermediate results tensor and degree of parallelization.
|
| 91 |
+
|
| 92 |
+
ident: identity for binary combination function sf. sf(ident, x) needs to return
|
| 93 |
+
x.
|
| 94 |
+
|
| 95 |
+
f: function for reduction over a chunk. f needs to be of signature scalar_t
|
| 96 |
+
f(int64_t partial_begin, int64_t partial_end, scalar_t identifiy)
|
| 97 |
+
|
| 98 |
+
sf: function to combine two partial results. sf needs to be of signature
|
| 99 |
+
scalar_t sf(scalar_t x, scalar_t y)
|
| 100 |
+
|
| 101 |
+
For example, you might have a tensor of 10000 entires and want to sum together
|
| 102 |
+
all the elements. Parallel_reduce with a grain_size of 2500 will then allocate
|
| 103 |
+
an intermediate result tensor with 4 elements. Then it will execute the function
|
| 104 |
+
"f" you provide and pass the beginning and end index of these chunks, so
|
| 105 |
+
0-2499, 2500-4999, etc. and the combination identity. It will then write out
|
| 106 |
+
the result from each of these chunks into the intermediate result tensor. After
|
| 107 |
+
that it'll reduce the partial results from each chunk into a single number using
|
| 108 |
+
the combination function sf and the identity ident. For a total summation this
|
| 109 |
+
would be "+" and 0 respectively. This is similar to tbb's approach [1], where
|
| 110 |
+
you need to provide a function to accumulate a subrange, a function to combine
|
| 111 |
+
two partial results and an identity.
|
| 112 |
+
|
| 113 |
+
Warning: parallel_reduce does NOT copy thread local
|
| 114 |
+
states from the current thread to the worker threads.
|
| 115 |
+
This means for example that Tensor operations CANNOT be used in the
|
| 116 |
+
body of your function, only data pointers.
|
| 117 |
+
|
| 118 |
+
[1] https://software.intel.com/en-us/node/506154
|
| 119 |
+
*/
|
| 120 |
+
template <class scalar_t, class F, class SF>
|
| 121 |
+
inline scalar_t parallel_reduce(
|
| 122 |
+
const int64_t begin,
|
| 123 |
+
const int64_t end,
|
| 124 |
+
const int64_t grain_size,
|
| 125 |
+
const scalar_t ident,
|
| 126 |
+
const F& f,
|
| 127 |
+
const SF& sf);
|
| 128 |
+
|
| 129 |
+
// Returns a detailed string describing parallelization settings
|
| 130 |
+
TORCH_API std::string get_parallel_info();
|
| 131 |
+
|
| 132 |
+
// Sets number of threads used for inter-op parallelism
|
| 133 |
+
TORCH_API void set_num_interop_threads(int);
|
| 134 |
+
|
| 135 |
+
// Returns the number of threads used for inter-op parallelism
|
| 136 |
+
TORCH_API int get_num_interop_threads();
|
| 137 |
+
|
| 138 |
+
// Launches inter-op parallel task
|
| 139 |
+
TORCH_API void launch(std::function<void()> func);
|
| 140 |
+
namespace internal {
|
| 141 |
+
void launch_no_thread_state(std::function<void()> fn);
|
| 142 |
+
} // namespace internal
|
| 143 |
+
|
| 144 |
+
// Launches intra-op parallel task
|
| 145 |
+
TORCH_API void intraop_launch(std::function<void()> func);
|
| 146 |
+
|
| 147 |
+
// Returns number of intra-op threads used by default
|
| 148 |
+
TORCH_API int intraop_default_num_threads();
|
| 149 |
+
|
| 150 |
+
} // namespace at
|
| 151 |
+
|
| 152 |
+
#if AT_PARALLEL_OPENMP
|
| 153 |
+
#include <ATen/ParallelOpenMP.h> // IWYU pragma: keep
|
| 154 |
+
#elif AT_PARALLEL_NATIVE
|
| 155 |
+
#include <ATen/ParallelNative.h> // IWYU pragma: keep
|
| 156 |
+
#endif
|
| 157 |
+
|
| 158 |
+
#include <ATen/Parallel-inl.h> // IWYU pragma: keep
|
mplug_owl2/lib/python3.10/site-packages/torch/include/ATen/ParallelFuture.h
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <ATen/core/ivalue.h>
|
| 4 |
+
#include <c10/macros/Macros.h>
|
| 5 |
+
#include <functional>
|
| 6 |
+
|
| 7 |
+
namespace at {
|
| 8 |
+
|
| 9 |
+
// Launches intra-op parallel task, returns a future
|
| 10 |
+
TORCH_API c10::intrusive_ptr<c10::ivalue::Future> intraop_launch_future(
|
| 11 |
+
std::function<void()> func);
|
| 12 |
+
|
| 13 |
+
} // namespace at
|
mplug_owl2/lib/python3.10/site-packages/torch/include/ATen/ParallelNative.h
ADDED
|
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <c10/util/Exception.h>
|
| 4 |
+
|
| 5 |
+
#define INTRA_OP_PARALLEL
|
| 6 |
+
|
| 7 |
+
namespace at::internal {
|
| 8 |
+
|
| 9 |
+
TORCH_API void invoke_parallel(
|
| 10 |
+
const int64_t begin,
|
| 11 |
+
const int64_t end,
|
| 12 |
+
const int64_t grain_size,
|
| 13 |
+
const std::function<void(int64_t, int64_t)>& f);
|
| 14 |
+
|
| 15 |
+
} // namespace at::internal
|
mplug_owl2/lib/python3.10/site-packages/torch/include/ATen/RegistrationDeclarations.h
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
mplug_owl2/lib/python3.10/site-packages/torch/include/ATen/Scalar.h
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <ATen/core/Scalar.h>
|
mplug_owl2/lib/python3.10/site-packages/torch/include/ATen/ScalarOps.h
ADDED
|
@@ -0,0 +1,53 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <ATen/Tensor.h>
|
| 4 |
+
#include <c10/core/Scalar.h>
|
| 5 |
+
|
| 6 |
+
#ifndef AT_PER_OPERATOR_HEADERS
|
| 7 |
+
#include <ATen/Functions.h>
|
| 8 |
+
#else
|
| 9 |
+
#include <ATen/ops/scalar_tensor.h>
|
| 10 |
+
#endif
|
| 11 |
+
|
| 12 |
+
namespace at::detail {
|
| 13 |
+
// When filling a number to 1-element CPU tensor, we want to skip
|
| 14 |
+
// everything but manipulate data ptr directly.
|
| 15 |
+
// Ideally this fast pass should be implemented in TensorIterator,
|
| 16 |
+
// but we also want to skip compute_types which in not avoidable
|
| 17 |
+
// in TensorIterator for now.
|
| 18 |
+
Tensor& scalar_fill(Tensor& self, const Scalar& value);
|
| 19 |
+
TORCH_API Tensor scalar_tensor_static(
|
| 20 |
+
const Scalar& s,
|
| 21 |
+
std::optional<ScalarType> dtype_opt,
|
| 22 |
+
std::optional<Device> device_opt);
|
| 23 |
+
} // namespace at::detail
|
| 24 |
+
|
| 25 |
+
// This is in the c10 namespace because we use ADL to find the functions in it.
|
| 26 |
+
namespace c10 {
|
| 27 |
+
|
| 28 |
+
// FIXME: this should be (and was) Scalar::toTensor, but there is currently no
|
| 29 |
+
// way to implement this without going through Derived Types (which are not part
|
| 30 |
+
// of core).
|
| 31 |
+
inline at::Tensor scalar_to_tensor(
|
| 32 |
+
const Scalar& s,
|
| 33 |
+
const Device device = at::kCPU) {
|
| 34 |
+
// This is the fast track we have for CPU scalar tensors.
|
| 35 |
+
if (device == at::kCPU) {
|
| 36 |
+
return at::detail::scalar_tensor_static(s, s.type(), at::kCPU);
|
| 37 |
+
}
|
| 38 |
+
return at::scalar_tensor(s, at::device(device).dtype(s.type()));
|
| 39 |
+
}
|
| 40 |
+
|
| 41 |
+
} // namespace c10
|
| 42 |
+
|
| 43 |
+
namespace at::native {
|
| 44 |
+
|
| 45 |
+
inline Tensor wrapped_scalar_tensor(
|
| 46 |
+
const Scalar& scalar,
|
| 47 |
+
const Device device = at::kCPU) {
|
| 48 |
+
auto tensor = scalar_to_tensor(scalar, device);
|
| 49 |
+
tensor.unsafeGetTensorImpl()->set_wrapped_number(true);
|
| 50 |
+
return tensor;
|
| 51 |
+
}
|
| 52 |
+
|
| 53 |
+
} // namespace at::native
|
mplug_owl2/lib/python3.10/site-packages/torch/include/ATen/SequenceNumber.h
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <c10/macros/Export.h>
|
| 4 |
+
#include <cstdint>
|
| 5 |
+
|
| 6 |
+
// A simple thread local enumeration, used to link forward and backward pass
|
| 7 |
+
// ops and is used by autograd and observers framework
|
| 8 |
+
namespace at::sequence_number {
|
| 9 |
+
|
| 10 |
+
TORCH_API uint64_t peek();
|
| 11 |
+
TORCH_API uint64_t get_and_increment();
|
| 12 |
+
|
| 13 |
+
} // namespace at::sequence_number
|
mplug_owl2/lib/python3.10/site-packages/torch/include/ATen/Storage.h
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
#include <c10/core/Storage.h>
|
mplug_owl2/lib/python3.10/site-packages/torch/include/ATen/StorageUtils.h
ADDED
|
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <c10/core/Storage.h>
|
| 4 |
+
#include <c10/core/StorageImpl.h>
|
| 5 |
+
#include <c10/util/intrusive_ptr.h>
|
| 6 |
+
|
| 7 |
+
namespace at {
|
| 8 |
+
|
| 9 |
+
class TensorBase;
|
| 10 |
+
|
| 11 |
+
// Here we define a series of utils to create/manipulate ATen backed
|
| 12 |
+
// c10 storage implementations.
|
| 13 |
+
|
| 14 |
+
/**
|
| 15 |
+
* Create a new shared memory storage impl managed by file descriptor
|
| 16 |
+
*
|
| 17 |
+
* @param size size in bytes
|
| 18 |
+
*/
|
| 19 |
+
C10_EXPORT c10::intrusive_ptr<c10::StorageImpl> new_shm_fd_storage(size_t size);
|
| 20 |
+
|
| 21 |
+
/**
|
| 22 |
+
* Copy src to dst
|
| 23 |
+
* Caller must guarantee the validness of the storage objects
|
| 24 |
+
* during the entire copy process, esp. when it's async.
|
| 25 |
+
*
|
| 26 |
+
* This can probably live in c10 namespace later if needed,
|
| 27 |
+
* but for now keep it in at to keep implementation simple.
|
| 28 |
+
*
|
| 29 |
+
* @param dst dst tensor
|
| 30 |
+
* @param src src tensor
|
| 31 |
+
* @param non_blocking (default false) whether this operation blocks caller
|
| 32 |
+
*/
|
| 33 |
+
C10_EXPORT void storage_copy(
|
| 34 |
+
c10::Storage& dst,
|
| 35 |
+
const c10::Storage& src,
|
| 36 |
+
bool non_blocking = false);
|
| 37 |
+
|
| 38 |
+
/**
|
| 39 |
+
* In place change the storage to shm based.
|
| 40 |
+
*
|
| 41 |
+
* This is only applicable to CPU tensors not already shared.
|
| 42 |
+
* Otherwise, it's a no op to mirror the THP tensor behavior:
|
| 43 |
+
* https://pytorch.org/docs/stable/generated/torch.Tensor.share_memory_.html
|
| 44 |
+
*
|
| 45 |
+
* @param t a tensor
|
| 46 |
+
*/
|
| 47 |
+
C10_EXPORT void share_memory_(TensorBase& t);
|
| 48 |
+
|
| 49 |
+
} // namespace at
|
mplug_owl2/lib/python3.10/site-packages/torch/include/ATen/TensorIndexing.h
ADDED
|
@@ -0,0 +1,737 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <ATen/ExpandUtils.h>
|
| 4 |
+
#include <ATen/ScalarOps.h>
|
| 5 |
+
#include <ATen/core/Tensor.h>
|
| 6 |
+
#include <ATen/core/TensorBody.h>
|
| 7 |
+
#include <c10/core/SymInt.h>
|
| 8 |
+
#include <c10/util/irange.h>
|
| 9 |
+
#include <optional>
|
| 10 |
+
|
| 11 |
+
#ifndef AT_PER_OPERATOR_HEADERS
|
| 12 |
+
#include <ATen/Functions.h>
|
| 13 |
+
#include <ATen/NativeFunctions.h>
|
| 14 |
+
#else
|
| 15 |
+
#include <ATen/ops/alias.h>
|
| 16 |
+
#include <ATen/ops/empty.h>
|
| 17 |
+
#include <ATen/ops/scalar_tensor.h>
|
| 18 |
+
#include <ATen/ops/zeros.h>
|
| 19 |
+
#endif
|
| 20 |
+
|
| 21 |
+
#include <ATen/core/List.h>
|
| 22 |
+
|
| 23 |
+
#include <utility>
|
| 24 |
+
|
| 25 |
+
namespace at::indexing {
|
| 26 |
+
|
| 27 |
+
constexpr int64_t INDEX_MIN = c10::SymInt::min_representable_int();
|
| 28 |
+
constexpr int64_t INDEX_MAX = -(INDEX_MIN + 1);
|
| 29 |
+
|
| 30 |
+
enum class TensorIndexType { None, Ellipsis, SymInt, Boolean, Slice, Tensor };
|
| 31 |
+
|
| 32 |
+
constexpr std::nullopt_t None = std::nullopt;
|
| 33 |
+
|
| 34 |
+
struct TORCH_API EllipsisIndexType final {
|
| 35 |
+
EllipsisIndexType() = default;
|
| 36 |
+
};
|
| 37 |
+
TORCH_API extern const EllipsisIndexType Ellipsis;
|
| 38 |
+
|
| 39 |
+
struct TORCH_API Slice final {
|
| 40 |
+
public:
|
| 41 |
+
Slice(
|
| 42 |
+
std::optional<c10::SymInt> start_index = std::nullopt,
|
| 43 |
+
std::optional<c10::SymInt> stop_index = std::nullopt,
|
| 44 |
+
std::optional<c10::SymInt> step_index = std::nullopt) {
|
| 45 |
+
if (!step_index.has_value()) {
|
| 46 |
+
step_ = c10::SymInt(1);
|
| 47 |
+
} else {
|
| 48 |
+
step_ = std::move(step_index).value();
|
| 49 |
+
}
|
| 50 |
+
|
| 51 |
+
TORCH_CHECK_VALUE(
|
| 52 |
+
step_.sym_ne(0).expect_true(__FILE__, __LINE__),
|
| 53 |
+
"slice step cannot be zero");
|
| 54 |
+
|
| 55 |
+
if (!start_index.has_value()) {
|
| 56 |
+
start_ = c10::SymInt(step_ < 0 ? INDEX_MAX : 0);
|
| 57 |
+
} else {
|
| 58 |
+
start_ = std::move(start_index).value();
|
| 59 |
+
}
|
| 60 |
+
|
| 61 |
+
if (!stop_index.has_value()) {
|
| 62 |
+
stop_ = c10::SymInt(step_ < 0 ? INDEX_MIN : INDEX_MAX);
|
| 63 |
+
} else {
|
| 64 |
+
stop_ = std::move(stop_index).value();
|
| 65 |
+
}
|
| 66 |
+
}
|
| 67 |
+
|
| 68 |
+
inline c10::SymInt start() const {
|
| 69 |
+
return start_;
|
| 70 |
+
}
|
| 71 |
+
|
| 72 |
+
inline c10::SymInt stop() const {
|
| 73 |
+
return stop_;
|
| 74 |
+
}
|
| 75 |
+
|
| 76 |
+
inline c10::SymInt step() const {
|
| 77 |
+
return step_;
|
| 78 |
+
}
|
| 79 |
+
|
| 80 |
+
private:
|
| 81 |
+
c10::SymInt start_;
|
| 82 |
+
c10::SymInt stop_;
|
| 83 |
+
c10::SymInt step_;
|
| 84 |
+
};
|
| 85 |
+
|
| 86 |
+
TORCH_API std::ostream& operator<<(std::ostream& stream, const Slice& slice);
|
| 87 |
+
|
| 88 |
+
// `at::indexing::TensorIndex` is used for converting C++ tensor indices such as
|
| 89 |
+
// `{None, "...", Ellipsis, 0, true, Slice(1, None, 2), torch::tensor({1, 2})}`
|
| 90 |
+
// into its equivalent `std::vector<TensorIndex>`, so that further tensor
|
| 91 |
+
// indexing operations can be performed using the supplied indices.
|
| 92 |
+
//
|
| 93 |
+
// There is one-to-one correspondence between Python and C++ tensor index types:
|
| 94 |
+
// Python | C++
|
| 95 |
+
// -----------------------------------------------------
|
| 96 |
+
// `None` | `at::indexing::None`
|
| 97 |
+
// `Ellipsis` | `at::indexing::Ellipsis`
|
| 98 |
+
// `...` | `"..."`
|
| 99 |
+
// `123` | `123`
|
| 100 |
+
// `True` / `False` | `true` / `false`
|
| 101 |
+
// `:` | `Slice()` / `Slice(None, None)`
|
| 102 |
+
// `::` | `Slice()` / `Slice(None, None, None)`
|
| 103 |
+
// `1:` | `Slice(1, None)`
|
| 104 |
+
// `1::` | `Slice(1, None, None)`
|
| 105 |
+
// `:3` | `Slice(None, 3)`
|
| 106 |
+
// `:3:` | `Slice(None, 3, None)`
|
| 107 |
+
// `::2` | `Slice(None, None, 2)`
|
| 108 |
+
// `1:3` | `Slice(1, 3)`
|
| 109 |
+
// `1::2` | `Slice(1, None, 2)`
|
| 110 |
+
// `:3:2` | `Slice(None, 3, 2)`
|
| 111 |
+
// `1:3:2` | `Slice(1, 3, 2)`
|
| 112 |
+
// `torch.tensor([1, 2])`) | `torch::tensor({1, 2})`
|
| 113 |
+
struct TORCH_API TensorIndex final {
|
| 114 |
+
// Case 1: `at::indexing::None`
|
| 115 |
+
TensorIndex(std::nullopt_t) : type_(TensorIndexType::None) {}
|
| 116 |
+
|
| 117 |
+
// Case 2: "..." / `at::indexing::Ellipsis`
|
| 118 |
+
TensorIndex(at::indexing::EllipsisIndexType)
|
| 119 |
+
: type_(TensorIndexType::Ellipsis) {}
|
| 120 |
+
TensorIndex(const char* str) : TensorIndex(at::indexing::Ellipsis) {
|
| 121 |
+
TORCH_CHECK_VALUE(
|
| 122 |
+
strcmp(str, "...") == 0,
|
| 123 |
+
"Expected \"...\" to represent an ellipsis index, but got \"",
|
| 124 |
+
str,
|
| 125 |
+
"\"");
|
| 126 |
+
}
|
| 127 |
+
|
| 128 |
+
// Case 3: (Sym) Integer value
|
| 129 |
+
TensorIndex(SymInt integer)
|
| 130 |
+
: integer_(std::move(integer)), type_(TensorIndexType::SymInt) {}
|
| 131 |
+
TensorIndex(int64_t integer) : TensorIndex(SymInt(integer)) {}
|
| 132 |
+
TensorIndex(int integer) : TensorIndex(SymInt(integer)) {}
|
| 133 |
+
|
| 134 |
+
// Case 4: Boolean value
|
| 135 |
+
template <class T, class = std::enable_if_t<std::is_same_v<bool, T>>>
|
| 136 |
+
TensorIndex(T boolean) : boolean_(boolean), type_(TensorIndexType::Boolean) {}
|
| 137 |
+
|
| 138 |
+
// Case 5: Slice represented in `at::indexing::Slice` form
|
| 139 |
+
TensorIndex(Slice slice)
|
| 140 |
+
: slice_(std::move(slice)), type_(TensorIndexType::Slice) {}
|
| 141 |
+
|
| 142 |
+
// Case 6: Tensor value
|
| 143 |
+
TensorIndex(Tensor tensor)
|
| 144 |
+
: tensor_(std::move(tensor)), type_(TensorIndexType::Tensor) {}
|
| 145 |
+
|
| 146 |
+
inline bool is_none() const {
|
| 147 |
+
return type_ == TensorIndexType::None;
|
| 148 |
+
}
|
| 149 |
+
|
| 150 |
+
inline bool is_ellipsis() const {
|
| 151 |
+
return type_ == TensorIndexType::Ellipsis;
|
| 152 |
+
}
|
| 153 |
+
|
| 154 |
+
inline bool is_integer() const {
|
| 155 |
+
return type_ == TensorIndexType::SymInt;
|
| 156 |
+
}
|
| 157 |
+
|
| 158 |
+
inline SymInt integer() const {
|
| 159 |
+
return integer_;
|
| 160 |
+
}
|
| 161 |
+
|
| 162 |
+
inline bool is_boolean() const {
|
| 163 |
+
return type_ == TensorIndexType::Boolean;
|
| 164 |
+
}
|
| 165 |
+
|
| 166 |
+
inline bool boolean() const {
|
| 167 |
+
return boolean_;
|
| 168 |
+
}
|
| 169 |
+
|
| 170 |
+
inline bool is_slice() const {
|
| 171 |
+
return type_ == TensorIndexType::Slice;
|
| 172 |
+
}
|
| 173 |
+
|
| 174 |
+
inline const Slice& slice() const {
|
| 175 |
+
return slice_;
|
| 176 |
+
}
|
| 177 |
+
|
| 178 |
+
inline bool is_tensor() const {
|
| 179 |
+
return type_ == TensorIndexType::Tensor;
|
| 180 |
+
}
|
| 181 |
+
|
| 182 |
+
inline const Tensor& tensor() const {
|
| 183 |
+
return tensor_;
|
| 184 |
+
}
|
| 185 |
+
|
| 186 |
+
private:
|
| 187 |
+
SymInt integer_ = 0;
|
| 188 |
+
bool boolean_ = false;
|
| 189 |
+
Slice slice_;
|
| 190 |
+
Tensor tensor_;
|
| 191 |
+
TensorIndexType type_;
|
| 192 |
+
};
|
| 193 |
+
|
| 194 |
+
TORCH_API std::ostream& operator<<(
|
| 195 |
+
std::ostream& stream,
|
| 196 |
+
const TensorIndex& tensor_index);
|
| 197 |
+
TORCH_API std::ostream& operator<<(
|
| 198 |
+
std::ostream& stream,
|
| 199 |
+
const std::vector<TensorIndex>& tensor_indices);
|
| 200 |
+
|
| 201 |
+
namespace impl {
|
| 202 |
+
inline Tensor applySlice(
|
| 203 |
+
const Tensor& self,
|
| 204 |
+
int64_t dim,
|
| 205 |
+
c10::SymInt start,
|
| 206 |
+
c10::SymInt stop,
|
| 207 |
+
c10::SymInt step,
|
| 208 |
+
bool disable_slice_optimization,
|
| 209 |
+
const at::Device& self_device,
|
| 210 |
+
const std::optional<SymIntArrayRef>& self_sizes) {
|
| 211 |
+
// TODO: implement negative step
|
| 212 |
+
TORCH_CHECK_VALUE(
|
| 213 |
+
step.sym_gt(0).expect_true(__FILE__, __LINE__),
|
| 214 |
+
"step must be greater than zero");
|
| 215 |
+
|
| 216 |
+
// See NOTE [nested tensor size for indexing]
|
| 217 |
+
if (self_sizes.has_value()) {
|
| 218 |
+
// Skip this optimization if we are tracing, as the trace may be polymorphic
|
| 219 |
+
// over the shape of the `self` tensor, and we still want to record
|
| 220 |
+
// the slice.
|
| 221 |
+
SymInt length = (self_device == at::kCPU || self_device == at::kCUDA)
|
| 222 |
+
? (*self_sizes)[dim]
|
| 223 |
+
: self.sym_size(dim);
|
| 224 |
+
if (!disable_slice_optimization &&
|
| 225 |
+
TORCH_GUARD_SIZE_OBLIVIOUS(start.sym_eq(0)) &&
|
| 226 |
+
TORCH_GUARD_SIZE_OBLIVIOUS(length.sym_eq(stop)) && step == 1) {
|
| 227 |
+
return self;
|
| 228 |
+
}
|
| 229 |
+
}
|
| 230 |
+
return self.slice_symint(
|
| 231 |
+
dim, std::move(start), std::move(stop), std::move(step));
|
| 232 |
+
}
|
| 233 |
+
|
| 234 |
+
inline Tensor applySelect(
|
| 235 |
+
const Tensor& self,
|
| 236 |
+
int64_t dim,
|
| 237 |
+
SymInt index,
|
| 238 |
+
int64_t real_dim,
|
| 239 |
+
const at::Device& /*self_device*/,
|
| 240 |
+
const std::optional<SymIntArrayRef>& self_sizes) {
|
| 241 |
+
// See NOTE [nested tensor size for indexing]
|
| 242 |
+
if (self_sizes.has_value()) {
|
| 243 |
+
auto maybe_index = index.maybe_as_int();
|
| 244 |
+
if (maybe_index.has_value()) {
|
| 245 |
+
TORCH_CHECK_INDEX(
|
| 246 |
+
!(maybe_index.value() == 0 && dim == 0 && self_sizes->empty()),
|
| 247 |
+
"invalid index of a 0-dim tensor. ",
|
| 248 |
+
"Use `tensor.item()` in Python or `tensor.item<T>()` in C++ to convert a 0-dim tensor to a number");
|
| 249 |
+
}
|
| 250 |
+
|
| 251 |
+
auto size = (*self_sizes)[dim];
|
| 252 |
+
// Note: `size >= -index` is not equivalent to `size > -1 - index` if index
|
| 253 |
+
// is INT64_MIN For std::numeric_limits<int64_t>::min() result of unary
|
| 254 |
+
// minus is undefined by the standard but in practice is equal to self. On
|
| 255 |
+
// the other hand, indexing wraping is valid for all negative int64_t
|
| 256 |
+
// values, as x[INT64_MIN] is the same as x[INT64_MAX]
|
| 257 |
+
TORCH_CHECK_INDEX(
|
| 258 |
+
size > -1 - index && size > index,
|
| 259 |
+
"index ",
|
| 260 |
+
index,
|
| 261 |
+
" is out of bounds for dimension ",
|
| 262 |
+
real_dim,
|
| 263 |
+
" with size ",
|
| 264 |
+
size);
|
| 265 |
+
}
|
| 266 |
+
|
| 267 |
+
// if the index is negative, do not normalize it because that would fix the
|
| 268 |
+
// index on the current tensor size in the tracer. aten::select also works on
|
| 269 |
+
// negative indices
|
| 270 |
+
return self.select_symint(dim, std::move(index));
|
| 271 |
+
}
|
| 272 |
+
|
| 273 |
+
inline Tensor boolToIndexingTensorCPUOrCUDA(const Tensor& self, bool value) {
|
| 274 |
+
// booleans add a dimension of size 1. true indexes this dimension as if 0:,
|
| 275 |
+
// false as empty.
|
| 276 |
+
if (value) {
|
| 277 |
+
return at::empty({1}, self.options().dtype(kLong)).fill_(0.);
|
| 278 |
+
} else {
|
| 279 |
+
return at::empty({0}, self.options().dtype(kLong));
|
| 280 |
+
}
|
| 281 |
+
}
|
| 282 |
+
|
| 283 |
+
inline Tensor boolToIndexingTensorNonNativeDeviceType(
|
| 284 |
+
const Tensor& self,
|
| 285 |
+
bool value) {
|
| 286 |
+
// booleans add a dimension of size 1. true indexes this dimension as if 0:,
|
| 287 |
+
// false as empty.
|
| 288 |
+
if (value) {
|
| 289 |
+
return at::zeros({1}, self.options().dtype(kLong));
|
| 290 |
+
} else {
|
| 291 |
+
return at::empty({0}, self.options().dtype(kLong));
|
| 292 |
+
}
|
| 293 |
+
}
|
| 294 |
+
|
| 295 |
+
inline Tensor boolToIndexingTensor(
|
| 296 |
+
const Tensor& self,
|
| 297 |
+
bool value,
|
| 298 |
+
const at::Device& self_device) {
|
| 299 |
+
if (self_device == at::kCPU || self_device == at::kCUDA) {
|
| 300 |
+
return boolToIndexingTensorCPUOrCUDA(self, value);
|
| 301 |
+
} else {
|
| 302 |
+
return boolToIndexingTensorNonNativeDeviceType(self, value);
|
| 303 |
+
}
|
| 304 |
+
}
|
| 305 |
+
|
| 306 |
+
inline Tensor scalarToTensorNonNativeDeviceType(
|
| 307 |
+
const Scalar& v,
|
| 308 |
+
const TensorOptions& options) {
|
| 309 |
+
return at::scalar_tensor(v, options);
|
| 310 |
+
}
|
| 311 |
+
|
| 312 |
+
inline void recordTensorIndex(
|
| 313 |
+
const Tensor& tensor,
|
| 314 |
+
std::vector<Tensor>& outIndices,
|
| 315 |
+
int64_t* dim_ptr) {
|
| 316 |
+
// TODO: check scalarType
|
| 317 |
+
outIndices.resize(*dim_ptr + 1);
|
| 318 |
+
outIndices[*dim_ptr] = tensor;
|
| 319 |
+
(*dim_ptr)++;
|
| 320 |
+
};
|
| 321 |
+
|
| 322 |
+
inline c10::List<::std::optional<Tensor>> typeConvertIndices(
|
| 323 |
+
const Tensor& /*self*/,
|
| 324 |
+
std::vector<Tensor>&& indices) {
|
| 325 |
+
c10::List<::std::optional<Tensor>> converted_inds;
|
| 326 |
+
converted_inds.reserve(indices.size());
|
| 327 |
+
for (auto&& i : std::move(indices)) {
|
| 328 |
+
converted_inds.push_back(std::move(i));
|
| 329 |
+
}
|
| 330 |
+
return converted_inds;
|
| 331 |
+
}
|
| 332 |
+
|
| 333 |
+
// NOTE: Why do we mirror instead of replace the `count_specified_dimensions`
|
| 334 |
+
// function in torch/csrc/autograd/python_variable_indexing.cpp? It's because
|
| 335 |
+
// `count_specified_dimensions` is on the hot path of Python tensor multi-dim
|
| 336 |
+
// indexing (i.e. it's called by `applySlicing` which is called by
|
| 337 |
+
// `THPVariable_getitem` / `THPVariable_setitem` when handling indexing of more
|
| 338 |
+
// than one dimension). If we were to merge the Python/C++
|
| 339 |
+
// `count_specified_dimensions` function, on the Python side we would have to
|
| 340 |
+
// construct a `std::vector` container to be consumed by the C++
|
| 341 |
+
// `count_specified_dimensions` function, which adds 100s of nanoseconds
|
| 342 |
+
// overhead and is undesirable.
|
| 343 |
+
inline int64_t count_specified_dimensions(
|
| 344 |
+
const ArrayRef<TensorIndex>& indices) {
|
| 345 |
+
// Count the number of indexed dimensions (everything but ellipsis and None)
|
| 346 |
+
int64_t count = 0;
|
| 347 |
+
for (auto& obj : indices) {
|
| 348 |
+
if (obj.is_tensor()) {
|
| 349 |
+
auto& tensor = obj.tensor();
|
| 350 |
+
if (tensor.scalar_type() == kByte || tensor.scalar_type() == kBool) {
|
| 351 |
+
count += tensor.dim();
|
| 352 |
+
} else {
|
| 353 |
+
count++;
|
| 354 |
+
}
|
| 355 |
+
} else if (!obj.is_none() && !obj.is_ellipsis() && !obj.is_boolean()) {
|
| 356 |
+
count++;
|
| 357 |
+
}
|
| 358 |
+
}
|
| 359 |
+
return count;
|
| 360 |
+
}
|
| 361 |
+
} // namespace impl
|
| 362 |
+
|
| 363 |
+
// NOTE: Many functions below are only for consumption from Python indexing
|
| 364 |
+
// implementation, they include:
|
| 365 |
+
//
|
| 366 |
+
// - `Tensor scalarToTensor(...)`
|
| 367 |
+
// - `IntArrayRef slicePrefix1sSize(...)`
|
| 368 |
+
// - `void copy_to(...)`
|
| 369 |
+
// - `Tensor handleDimInMultiDimIndexing(...)`
|
| 370 |
+
// - `Tensor dispatch_index(...)`
|
| 371 |
+
// - `Tensor dispatch_index_put_(...)`
|
| 372 |
+
// - `Tensor get_item(...)`
|
| 373 |
+
// - `void set_item(...)`
|
| 374 |
+
//
|
| 375 |
+
// The rest of the functions are in `at::indexing::impl` namespace, signifying
|
| 376 |
+
// that they shouldn't be used from Python indexing implementation.
|
| 377 |
+
inline Tensor scalarToTensor(
|
| 378 |
+
const Scalar& v,
|
| 379 |
+
const TensorOptions& options,
|
| 380 |
+
const at::Device& self_device) {
|
| 381 |
+
if (self_device == at::kCPU && !v.isSymbolic()) {
|
| 382 |
+
return at::detail::scalar_tensor_static(
|
| 383 |
+
v, options.dtype_opt()->toScalarType(), self_device);
|
| 384 |
+
} else {
|
| 385 |
+
return impl::scalarToTensorNonNativeDeviceType(v, options);
|
| 386 |
+
}
|
| 387 |
+
}
|
| 388 |
+
|
| 389 |
+
// To match numpy semantics:
|
| 390 |
+
// As a special case for backwards compatibility,
|
| 391 |
+
// strip away unit dimensions from the left of 'src'
|
| 392 |
+
inline SymIntArrayRef slicePrefix1sSize(const SymIntArrayRef& sizes) {
|
| 393 |
+
size_t first_non1_src = sizes.size();
|
| 394 |
+
for (const auto i : c10::irange(sizes.size())) {
|
| 395 |
+
// Unbacked SymInt has different behavior, but this is sound because
|
| 396 |
+
// failing to slice will only ever cause an error, not divergent
|
| 397 |
+
// behavior
|
| 398 |
+
if (!sizes[i].has_hint() || sizes[i] != 1) {
|
| 399 |
+
first_non1_src = i;
|
| 400 |
+
break;
|
| 401 |
+
}
|
| 402 |
+
}
|
| 403 |
+
|
| 404 |
+
return sizes.slice(first_non1_src);
|
| 405 |
+
}
|
| 406 |
+
|
| 407 |
+
inline void copy_to(const Tensor& dst, const Tensor& src) {
|
| 408 |
+
if (dst.sym_sizes().equals(src.sym_sizes())) {
|
| 409 |
+
// A shortcut to avoid generating hard-coded constant sizes during tracing.
|
| 410 |
+
// This is not a perfect solution: when src & dst have different shapes,
|
| 411 |
+
// constants will still appear. Users can workaround that case by
|
| 412 |
+
// dst[index..] = src.reshape(..)
|
| 413 |
+
dst.copy_(src);
|
| 414 |
+
return;
|
| 415 |
+
} else if (src.dim() == 0 && src.device().type() == at::kCPU) {
|
| 416 |
+
dst.fill_(src);
|
| 417 |
+
return;
|
| 418 |
+
}
|
| 419 |
+
auto src_view = src.view_symint(slicePrefix1sSize(src.sym_sizes()));
|
| 420 |
+
c10::MaybeOwned<Tensor> b_src = expand_inplace(dst, src_view, "setitem");
|
| 421 |
+
dst.copy_(*b_src);
|
| 422 |
+
}
|
| 423 |
+
|
| 424 |
+
// See NOTE [ Setting `disable_slice_optimization` when calling C++ tensor
|
| 425 |
+
// indexing functions from Python ]
|
| 426 |
+
inline Tensor handleDimInMultiDimIndexing(
|
| 427 |
+
const Tensor& prev_dim_result,
|
| 428 |
+
const Tensor& original_tensor,
|
| 429 |
+
const TensorIndex& index,
|
| 430 |
+
int64_t* dim_ptr,
|
| 431 |
+
int64_t* specified_dims_ptr,
|
| 432 |
+
int64_t real_dim,
|
| 433 |
+
std::vector<Tensor>& outIndices,
|
| 434 |
+
bool disable_slice_optimization,
|
| 435 |
+
const at::Device& original_tensor_device,
|
| 436 |
+
const std::optional<SymIntArrayRef>& prev_dim_result_sizes) {
|
| 437 |
+
if (index.is_integer()) {
|
| 438 |
+
return impl::applySelect(
|
| 439 |
+
prev_dim_result,
|
| 440 |
+
*dim_ptr,
|
| 441 |
+
index.integer(),
|
| 442 |
+
real_dim,
|
| 443 |
+
original_tensor_device,
|
| 444 |
+
prev_dim_result_sizes);
|
| 445 |
+
} else if (index.is_slice()) {
|
| 446 |
+
Tensor result = impl::applySlice(
|
| 447 |
+
prev_dim_result,
|
| 448 |
+
*dim_ptr,
|
| 449 |
+
index.slice().start(),
|
| 450 |
+
index.slice().stop(),
|
| 451 |
+
index.slice().step(),
|
| 452 |
+
/*disable_slice_optimization=*/disable_slice_optimization,
|
| 453 |
+
original_tensor_device,
|
| 454 |
+
prev_dim_result_sizes);
|
| 455 |
+
(*dim_ptr)++;
|
| 456 |
+
return result;
|
| 457 |
+
} else if (index.is_ellipsis()) {
|
| 458 |
+
(*dim_ptr) += original_tensor.dim() - (*specified_dims_ptr);
|
| 459 |
+
return prev_dim_result;
|
| 460 |
+
} else if (index.is_none()) {
|
| 461 |
+
Tensor result = prev_dim_result.unsqueeze(*dim_ptr);
|
| 462 |
+
(*dim_ptr)++;
|
| 463 |
+
return result;
|
| 464 |
+
} else if (index.is_boolean()) {
|
| 465 |
+
Tensor result = prev_dim_result.unsqueeze(*dim_ptr);
|
| 466 |
+
impl::recordTensorIndex(
|
| 467 |
+
impl::boolToIndexingTensor(
|
| 468 |
+
result, index.boolean(), original_tensor_device),
|
| 469 |
+
outIndices,
|
| 470 |
+
dim_ptr);
|
| 471 |
+
return result;
|
| 472 |
+
} else if (index.is_tensor()) {
|
| 473 |
+
Tensor result = prev_dim_result;
|
| 474 |
+
const Tensor& tensor = index.tensor();
|
| 475 |
+
auto scalar_type = tensor.scalar_type();
|
| 476 |
+
if (tensor.dim() == 0 &&
|
| 477 |
+
at::isIntegralType(scalar_type, /*includeBool=*/true)) {
|
| 478 |
+
if (scalar_type != at::kByte && scalar_type != at::kBool) {
|
| 479 |
+
result = impl::applySelect(
|
| 480 |
+
result,
|
| 481 |
+
*dim_ptr,
|
| 482 |
+
tensor.item<int64_t>(),
|
| 483 |
+
real_dim,
|
| 484 |
+
original_tensor_device,
|
| 485 |
+
prev_dim_result_sizes);
|
| 486 |
+
} else {
|
| 487 |
+
result = result.unsqueeze(*dim_ptr);
|
| 488 |
+
if (scalar_type == at::kBool) {
|
| 489 |
+
impl::recordTensorIndex(
|
| 490 |
+
impl::boolToIndexingTensor(
|
| 491 |
+
result, tensor.item<bool>() != 0, original_tensor_device),
|
| 492 |
+
outIndices,
|
| 493 |
+
dim_ptr);
|
| 494 |
+
} else {
|
| 495 |
+
impl::recordTensorIndex(
|
| 496 |
+
impl::boolToIndexingTensor(
|
| 497 |
+
result, tensor.item<uint8_t>() != 0, original_tensor_device),
|
| 498 |
+
outIndices,
|
| 499 |
+
dim_ptr);
|
| 500 |
+
}
|
| 501 |
+
}
|
| 502 |
+
} else {
|
| 503 |
+
impl::recordTensorIndex(tensor, outIndices, dim_ptr);
|
| 504 |
+
}
|
| 505 |
+
return result;
|
| 506 |
+
} else {
|
| 507 |
+
TORCH_INTERNAL_ASSERT(false, "Invalid TensorIndex type");
|
| 508 |
+
}
|
| 509 |
+
}
|
| 510 |
+
|
| 511 |
+
namespace impl {
|
| 512 |
+
// This mirrors `applySlicing` in
|
| 513 |
+
// torch/csrc/autograd/python_variable_indexing.cpp
|
| 514 |
+
inline Tensor applySlicing(
|
| 515 |
+
const Tensor& self,
|
| 516 |
+
const ArrayRef<TensorIndex>& indices,
|
| 517 |
+
std::vector<Tensor>& outIndices,
|
| 518 |
+
bool disable_slice_optimization,
|
| 519 |
+
const at::Device& self_device,
|
| 520 |
+
const std::optional<SymIntArrayRef>& self_sizes) {
|
| 521 |
+
int64_t dim = 0;
|
| 522 |
+
int64_t specified_dims = impl::count_specified_dimensions(indices);
|
| 523 |
+
|
| 524 |
+
// See NOTE [nested tensor size for indexing]
|
| 525 |
+
if (self_sizes.has_value()) {
|
| 526 |
+
TORCH_CHECK_INDEX(
|
| 527 |
+
specified_dims <= (int64_t)self_sizes->size(),
|
| 528 |
+
"too many indices for tensor of dimension ",
|
| 529 |
+
(int)self_sizes->size());
|
| 530 |
+
}
|
| 531 |
+
|
| 532 |
+
Tensor result = self;
|
| 533 |
+
for (const auto i : c10::irange(indices.size())) {
|
| 534 |
+
auto& obj = indices[i];
|
| 535 |
+
// See NOTE [nested tensor size for indexing]
|
| 536 |
+
std::optional<SymIntArrayRef> result_sizes = result.is_nested()
|
| 537 |
+
? std::optional<SymIntArrayRef>(std::nullopt)
|
| 538 |
+
: std::optional<SymIntArrayRef>(result.sym_sizes());
|
| 539 |
+
result = handleDimInMultiDimIndexing(
|
| 540 |
+
/*prev_dim_result=*/result,
|
| 541 |
+
/*original_tensor=*/self,
|
| 542 |
+
/*index=*/obj,
|
| 543 |
+
/*dim_ptr=*/&dim,
|
| 544 |
+
/*specified_dims_ptr=*/&specified_dims,
|
| 545 |
+
/*real_dim=*/static_cast<int64_t>(i),
|
| 546 |
+
/*outIndices=*/outIndices,
|
| 547 |
+
/*disable_slice_optimization=*/disable_slice_optimization,
|
| 548 |
+
/*original_tensor_device=*/self_device,
|
| 549 |
+
/*prev_dim_result_sizes=*/result_sizes);
|
| 550 |
+
}
|
| 551 |
+
return result;
|
| 552 |
+
}
|
| 553 |
+
} // namespace impl
|
| 554 |
+
|
| 555 |
+
inline Tensor dispatch_index(
|
| 556 |
+
const Tensor& self,
|
| 557 |
+
std::vector<Tensor>&& indices) {
|
| 558 |
+
return self.index(impl::typeConvertIndices(self, std::move(indices)));
|
| 559 |
+
}
|
| 560 |
+
|
| 561 |
+
inline Tensor dispatch_index_put_(
|
| 562 |
+
Tensor& self,
|
| 563 |
+
std::vector<Tensor>&& indices,
|
| 564 |
+
const Tensor& value) {
|
| 565 |
+
return self.index_put_(
|
| 566 |
+
impl::typeConvertIndices(self, std::move(indices)), value);
|
| 567 |
+
}
|
| 568 |
+
|
| 569 |
+
// NOTE [ Setting `disable_slice_optimization` when calling C++ tensor indexing
|
| 570 |
+
// functions from Python ]
|
| 571 |
+
//
|
| 572 |
+
// Question: When should we set `disable_slice_optimization` to `true` when
|
| 573 |
+
// calling C++ tensor indexing functions from Python indexing code?
|
| 574 |
+
//
|
| 575 |
+
// Answer: What "slice optimization" means: when we have a slicing expression
|
| 576 |
+
// like `x[0:5, 0]`, where the sliced tensor was of size 5 in dimension 0, we
|
| 577 |
+
// would skip dispatching the actual slice call as an optimization. However,
|
| 578 |
+
// here are the cases where we DON'T want this optimization:
|
| 579 |
+
//
|
| 580 |
+
// 1. When we are doing 1-D slicing (e.g. `tensor[:]`).
|
| 581 |
+
// Reason: we always return a shallow copy for expressions such as
|
| 582 |
+
// `tensor[:]` / `tensor[...]` / `tensor[:, :]`. (Note that for `tensor[:,
|
| 583 |
+
// :]`, we return an alias of `tensor` by doing the following:
|
| 584 |
+
// ```
|
| 585 |
+
// Tensor sliced = impl::applySlicing(self, indices, tensorIndices,
|
| 586 |
+
// disable_slice_optimization, self_device, self_sizes); if
|
| 587 |
+
// (tensorIndices.empty()) {
|
| 588 |
+
// if (sliced.is_same(self)) {
|
| 589 |
+
// // ensure we return a shallow copy for things like x[...]
|
| 590 |
+
// sliced = at::alias(sliced);
|
| 591 |
+
// }
|
| 592 |
+
// return sliced;
|
| 593 |
+
// }
|
| 594 |
+
// ```)
|
| 595 |
+
// 2. When we are doing JIT tracing.
|
| 596 |
+
// Reason: JIT tracing needs the `self.slice(...)` call to properly trace the
|
| 597 |
+
// slice operation.
|
| 598 |
+
|
| 599 |
+
// This mirrors `THPVariable_getitem` in
|
| 600 |
+
// torch/csrc/autograd/python_variable_indexing.cpp See NOTE [ Setting
|
| 601 |
+
// `disable_slice_optimization` when calling C++ tensor indexing functions from
|
| 602 |
+
// Python ]
|
| 603 |
+
inline Tensor get_item(
|
| 604 |
+
const Tensor& self,
|
| 605 |
+
const ArrayRef<TensorIndex>& indices,
|
| 606 |
+
bool disable_slice_optimization = false) {
|
| 607 |
+
at::Device self_device = self.device();
|
| 608 |
+
// NOTE [nested tensor size for indexing]
|
| 609 |
+
// nested tensor does not have a size (yet) so for now we represent its size
|
| 610 |
+
// as null may need to be changed after we reach a better solution for nested
|
| 611 |
+
// tensor size
|
| 612 |
+
std::optional<SymIntArrayRef> self_sizes = self.is_nested()
|
| 613 |
+
? std::optional<SymIntArrayRef>(std::nullopt)
|
| 614 |
+
: std::optional<SymIntArrayRef>(self.sym_sizes());
|
| 615 |
+
|
| 616 |
+
// handle simple types: integers, slices, none, ellipsis, bool
|
| 617 |
+
if (indices.size() == 1) {
|
| 618 |
+
const TensorIndex& index = indices[0];
|
| 619 |
+
if (index.is_integer()) {
|
| 620 |
+
return impl::applySelect(
|
| 621 |
+
self, 0, index.integer(), 0, self_device, self_sizes);
|
| 622 |
+
} else if (index.is_slice()) {
|
| 623 |
+
return impl::applySlice(
|
| 624 |
+
self,
|
| 625 |
+
0,
|
| 626 |
+
index.slice().start(),
|
| 627 |
+
index.slice().stop(),
|
| 628 |
+
index.slice().step(),
|
| 629 |
+
/*disable_slice_optimization=*/true,
|
| 630 |
+
self_device,
|
| 631 |
+
self_sizes);
|
| 632 |
+
} else if (index.is_none()) {
|
| 633 |
+
return self.unsqueeze(0);
|
| 634 |
+
} else if (index.is_ellipsis()) {
|
| 635 |
+
return at::alias(self);
|
| 636 |
+
} else if (index.is_boolean()) {
|
| 637 |
+
Tensor result = self.unsqueeze(0);
|
| 638 |
+
return dispatch_index(
|
| 639 |
+
result,
|
| 640 |
+
std::vector<Tensor>{impl::boolToIndexingTensor(
|
| 641 |
+
result, index.boolean(), self_device)});
|
| 642 |
+
}
|
| 643 |
+
}
|
| 644 |
+
|
| 645 |
+
std::vector<Tensor> tensorIndices;
|
| 646 |
+
Tensor sliced = impl::applySlicing(
|
| 647 |
+
self,
|
| 648 |
+
indices,
|
| 649 |
+
tensorIndices,
|
| 650 |
+
disable_slice_optimization,
|
| 651 |
+
self_device,
|
| 652 |
+
self_sizes);
|
| 653 |
+
if (tensorIndices.empty()) {
|
| 654 |
+
if (sliced.is_same(self)) {
|
| 655 |
+
// ensure we return a shallow copy for things like x[...]
|
| 656 |
+
sliced = at::alias(sliced);
|
| 657 |
+
}
|
| 658 |
+
return sliced;
|
| 659 |
+
}
|
| 660 |
+
|
| 661 |
+
// indexing by tensors ("advanced" indexing)
|
| 662 |
+
return dispatch_index(sliced, std::move(tensorIndices));
|
| 663 |
+
}
|
| 664 |
+
|
| 665 |
+
// This mirrors `THPVariable_setitem` in
|
| 666 |
+
// torch/csrc/autograd/python_variable_indexing.cpp for "the assigned value is a
|
| 667 |
+
// Tensor" case See NOTE [ Setting `disable_slice_optimization` when calling C++
|
| 668 |
+
// tensor indexing functions from Python ]
|
| 669 |
+
inline void set_item(
|
| 670 |
+
const Tensor& self,
|
| 671 |
+
const ArrayRef<TensorIndex>& indices,
|
| 672 |
+
const Tensor& value,
|
| 673 |
+
bool disable_slice_optimization = false) {
|
| 674 |
+
at::Device self_device = self.device();
|
| 675 |
+
SymIntArrayRef self_sizes = self.sym_sizes();
|
| 676 |
+
|
| 677 |
+
// handle simple types: integers, slices, ellipsis, bool
|
| 678 |
+
if (indices.size() == 1) {
|
| 679 |
+
const TensorIndex& index = indices[0];
|
| 680 |
+
if (index.is_boolean() && !index.boolean()) {
|
| 681 |
+
// do nothing for false (technically we should check the size, but we
|
| 682 |
+
// don't have real 0-sized shapes.
|
| 683 |
+
return;
|
| 684 |
+
} else if (index.is_ellipsis()) {
|
| 685 |
+
copy_to(self, value);
|
| 686 |
+
return;
|
| 687 |
+
} else if (index.is_none() || (index.is_boolean() && index.boolean())) {
|
| 688 |
+
copy_to(self.unsqueeze(0), value);
|
| 689 |
+
return;
|
| 690 |
+
} else if (index.is_integer()) {
|
| 691 |
+
copy_to(
|
| 692 |
+
impl::applySelect(
|
| 693 |
+
self, 0, index.integer(), 0, self_device, self_sizes),
|
| 694 |
+
value);
|
| 695 |
+
return;
|
| 696 |
+
} else if (index.is_slice()) {
|
| 697 |
+
copy_to(
|
| 698 |
+
impl::applySlice(
|
| 699 |
+
self,
|
| 700 |
+
0,
|
| 701 |
+
index.slice().start(),
|
| 702 |
+
index.slice().stop(),
|
| 703 |
+
index.slice().step(),
|
| 704 |
+
/*disable_slice_optimization=*/disable_slice_optimization,
|
| 705 |
+
self_device,
|
| 706 |
+
self_sizes),
|
| 707 |
+
value);
|
| 708 |
+
return;
|
| 709 |
+
}
|
| 710 |
+
}
|
| 711 |
+
|
| 712 |
+
std::vector<Tensor> tensorIndices;
|
| 713 |
+
Tensor sliced = impl::applySlicing(
|
| 714 |
+
self,
|
| 715 |
+
indices,
|
| 716 |
+
tensorIndices,
|
| 717 |
+
disable_slice_optimization,
|
| 718 |
+
self_device,
|
| 719 |
+
self_sizes);
|
| 720 |
+
if (tensorIndices.empty()) {
|
| 721 |
+
copy_to(sliced, value);
|
| 722 |
+
return;
|
| 723 |
+
}
|
| 724 |
+
|
| 725 |
+
SymIntArrayRef valueSizes = value.sym_sizes();
|
| 726 |
+
SymIntArrayRef slicedValueSizes = slicePrefix1sSize(valueSizes);
|
| 727 |
+
Tensor valuesSliced;
|
| 728 |
+
if (!valueSizes.equals(slicedValueSizes)) {
|
| 729 |
+
valuesSliced = value.view_symint(slicedValueSizes);
|
| 730 |
+
} else {
|
| 731 |
+
valuesSliced = value;
|
| 732 |
+
}
|
| 733 |
+
dispatch_index_put_(sliced, std::move(tensorIndices), valuesSliced);
|
| 734 |
+
return;
|
| 735 |
+
}
|
| 736 |
+
|
| 737 |
+
} // namespace at::indexing
|
mplug_owl2/lib/python3.10/site-packages/torch/include/ATen/TensorIterator.h
ADDED
|
@@ -0,0 +1,1028 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <ATen/TensorMeta.h>
|
| 4 |
+
#include <ATen/core/Dimname.h>
|
| 5 |
+
#include <ATen/core/Range.h>
|
| 6 |
+
#include <ATen/core/TensorBase.h>
|
| 7 |
+
#include <c10/core/DynamicCast.h>
|
| 8 |
+
#include <c10/util/FunctionRef.h>
|
| 9 |
+
#include <c10/util/MaybeOwned.h>
|
| 10 |
+
#include <c10/util/SmallVector.h>
|
| 11 |
+
#include <c10/util/TypeCast.h>
|
| 12 |
+
#include <c10/util/irange.h>
|
| 13 |
+
|
| 14 |
+
#include <array>
|
| 15 |
+
#include <bitset>
|
| 16 |
+
|
| 17 |
+
namespace at {
|
| 18 |
+
class Tensor;
|
| 19 |
+
class OptionalTensorRef;
|
| 20 |
+
using NameVector = SmallVector<Dimname, kDimVectorStaticSize>;
|
| 21 |
+
} // namespace at
|
| 22 |
+
|
| 23 |
+
// TensorIterator is a helper class for element-wise operations, such as
|
| 24 |
+
// arithmetic, comparisons, and trigonometric functions. It handles
|
| 25 |
+
// broadcasting and type conversions of operands.
|
| 26 |
+
//
|
| 27 |
+
// This is inspired by NumPy's Array Iterator API (NpyIter).
|
| 28 |
+
//
|
| 29 |
+
// The files Loops.h and Loops.cuh provide functions to build kernels that
|
| 30 |
+
// use TensorIterator.
|
| 31 |
+
//
|
| 32 |
+
// Example:
|
| 33 |
+
//
|
| 34 |
+
// auto iter = TensorIteratorConfig()
|
| 35 |
+
// .add_output(output)
|
| 36 |
+
// .add_input(input)
|
| 37 |
+
// .build()
|
| 38 |
+
//
|
| 39 |
+
// [MyKernel.cpp / MyKernel.cu]
|
| 40 |
+
// cpu_kernel(iter, [](float a, float b) {
|
| 41 |
+
// return a + b;
|
| 42 |
+
// });
|
| 43 |
+
//
|
| 44 |
+
// gpu_kernel(iter, []GPU_LAMBDA(float a, float b) -> float {
|
| 45 |
+
// return a + b;
|
| 46 |
+
// });
|
| 47 |
+
//
|
| 48 |
+
// Note [Order of Construction]
|
| 49 |
+
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
| 50 |
+
// When setting up the tensor iterator configuration, the output Tensors
|
| 51 |
+
// have to be added first via
|
| 52 |
+
// TensorIteratorConfig::add_owned_output(at::Tensor). After adding all outputs,
|
| 53 |
+
// the inputs can be added via
|
| 54 |
+
// TensorIteratorConfig::add_owned_input(at::Tensor).
|
| 55 |
+
// Adding another output after inputs have been added will rise an exception.
|
| 56 |
+
//
|
| 57 |
+
// Note [Common Dtype Computation]
|
| 58 |
+
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
| 59 |
+
// Some operations have a natural notion of a "common dtype" or
|
| 60 |
+
// "computation dtype" where all inputs are cast to one dtype, the
|
| 61 |
+
// operation is performed, and then the results are cast to all outputs.
|
| 62 |
+
//
|
| 63 |
+
// TensorIterator infers a common dtype if all inputs have the same dtype,
|
| 64 |
+
// and it computes one using type promotion rules on its inputs if
|
| 65 |
+
// promote_inputs_to_common_dtype_ is true. Attempting to query
|
| 66 |
+
// a common dtype otherwise will throw an exception.
|
| 67 |
+
//
|
| 68 |
+
// Note that the outputs are not considered when computing a common dtype.
|
| 69 |
+
|
| 70 |
+
namespace at {
|
| 71 |
+
|
| 72 |
+
namespace internal {
|
| 73 |
+
// This parameter is heuristically chosen to determine the minimum number of
|
| 74 |
+
// work that warrants parallelism. For example, when summing an array, it is
|
| 75 |
+
// deemed inefficient to parallelise over arrays shorter than 32768. Further,
|
| 76 |
+
// no parallel algorithm (such as parallel_reduce) should split work into
|
| 77 |
+
// smaller than GRAIN_SIZE chunks.
|
| 78 |
+
constexpr int64_t GRAIN_SIZE = 32768;
|
| 79 |
+
|
| 80 |
+
// Storage for a non-owning Tensor, without needing to include Tensor.h
|
| 81 |
+
class TORCH_API OpaqueOptionalTensorRef {
|
| 82 |
+
alignas(alignof(TensorBase)) std::array<char, sizeof(TensorBase)> data_{};
|
| 83 |
+
|
| 84 |
+
public:
|
| 85 |
+
OpaqueOptionalTensorRef();
|
| 86 |
+
OpaqueOptionalTensorRef(const OpaqueOptionalTensorRef&) = default;
|
| 87 |
+
OpaqueOptionalTensorRef& operator=(const OpaqueOptionalTensorRef&) = default;
|
| 88 |
+
OpaqueOptionalTensorRef(OpaqueOptionalTensorRef&&) noexcept = default;
|
| 89 |
+
OpaqueOptionalTensorRef& operator=(OpaqueOptionalTensorRef&&) noexcept =
|
| 90 |
+
default;
|
| 91 |
+
~OpaqueOptionalTensorRef();
|
| 92 |
+
|
| 93 |
+
OptionalTensorRef* get() {
|
| 94 |
+
return reinterpret_cast<OptionalTensorRef*>(data_.data());
|
| 95 |
+
}
|
| 96 |
+
const OptionalTensorRef* get() const {
|
| 97 |
+
return reinterpret_cast<const OptionalTensorRef*>(data_.data());
|
| 98 |
+
}
|
| 99 |
+
|
| 100 |
+
OptionalTensorRef& operator*() {
|
| 101 |
+
return *get();
|
| 102 |
+
}
|
| 103 |
+
const OptionalTensorRef& operator*() const {
|
| 104 |
+
return *get();
|
| 105 |
+
}
|
| 106 |
+
OptionalTensorRef* operator->() {
|
| 107 |
+
return get();
|
| 108 |
+
}
|
| 109 |
+
const OptionalTensorRef* operator->() const {
|
| 110 |
+
return get();
|
| 111 |
+
}
|
| 112 |
+
|
| 113 |
+
const Tensor& getTensor() const;
|
| 114 |
+
};
|
| 115 |
+
} // namespace internal
|
| 116 |
+
|
| 117 |
+
struct TORCH_API OperandInfo {
|
| 118 |
+
using StrideVector = SmallVector<int64_t, 6>;
|
| 119 |
+
OperandInfo() = default;
|
| 120 |
+
C10_ALWAYS_INLINE explicit OperandInfo(c10::MaybeOwned<TensorBase>&& t) {
|
| 121 |
+
if (t->defined()) {
|
| 122 |
+
device = t->device();
|
| 123 |
+
target_dtype = t->scalar_type();
|
| 124 |
+
current_dtype = target_dtype;
|
| 125 |
+
}
|
| 126 |
+
tensor(std::move(t));
|
| 127 |
+
validate();
|
| 128 |
+
}
|
| 129 |
+
|
| 130 |
+
C10_ALWAYS_INLINE OperandInfo(const OperandInfo&) = default;
|
| 131 |
+
C10_ALWAYS_INLINE OperandInfo& operator=(const OperandInfo&) = default;
|
| 132 |
+
C10_ALWAYS_INLINE OperandInfo(OperandInfo&&) noexcept = default;
|
| 133 |
+
C10_ALWAYS_INLINE OperandInfo& operator=(OperandInfo&&) noexcept = default;
|
| 134 |
+
C10_ALWAYS_INLINE ~OperandInfo() = default;
|
| 135 |
+
|
| 136 |
+
/// The data pointer. This may be different from tensor->data_ptr() if the
|
| 137 |
+
/// iterator is split.
|
| 138 |
+
void* data = nullptr;
|
| 139 |
+
|
| 140 |
+
/// Stride after broadcasting. The stride is in bytes, not number of elements.
|
| 141 |
+
StrideVector stride_bytes;
|
| 142 |
+
|
| 143 |
+
/// The desired device and type for the operand. For inputs, this specifies
|
| 144 |
+
/// that the input should be converted to this type if necessary. For outputs,
|
| 145 |
+
/// this specifies which type to allocate. target_dtype and device are
|
| 146 |
+
/// initialized with the dtype and device of the tensor but during type
|
| 147 |
+
/// promotion target_dtype value can become different from tensor's dtype
|
| 148 |
+
/// also, during type promotion target_dtype and device can be set for an
|
| 149 |
+
/// undefined tensor so that tensor can be properly constructed later.
|
| 150 |
+
std::optional<Device> device = std::nullopt;
|
| 151 |
+
ScalarType target_dtype = ScalarType::Undefined;
|
| 152 |
+
// Caches dtype of the tensor, because scalar_type is an expensive operation
|
| 153 |
+
// If dtype of the tensor is changed (e.g. as a result of type promotion or in
|
| 154 |
+
// allocate_outputs), this
|
| 155 |
+
// value should be changed too.
|
| 156 |
+
ScalarType current_dtype = ScalarType::Undefined;
|
| 157 |
+
|
| 158 |
+
bool is_device_defined() const {
|
| 159 |
+
return device.has_value();
|
| 160 |
+
}
|
| 161 |
+
bool is_type_defined() const {
|
| 162 |
+
return target_dtype != ScalarType::Undefined;
|
| 163 |
+
}
|
| 164 |
+
TensorOptions options() const {
|
| 165 |
+
return TensorOptions(target_dtype).device(device);
|
| 166 |
+
}
|
| 167 |
+
|
| 168 |
+
bool is_output = false;
|
| 169 |
+
|
| 170 |
+
// will_resize is only for output tensor.
|
| 171 |
+
// 1) Functional call(like torch.add(self, other)): output tensor is
|
| 172 |
+
// undefined, and pytorch creates a new tensor by using common shape
|
| 173 |
+
// and computed stride in TensorIterator;
|
| 174 |
+
// 2) Inplace call(like torch.add_(self, other)): output tensor is same
|
| 175 |
+
// with input tensor, and can't to modify tensor's size and stride;
|
| 176 |
+
// 3) Op call with output(like torch.add(self, other, out = output)):
|
| 177 |
+
// output tensor is defined, but tensor shape maybe different with common
|
| 178 |
+
// shape. If tensor shape is not same with common shape, this output
|
| 179 |
+
// tensor will be resized by using common shape and computed stride in
|
| 180 |
+
// TensorIterator. Otherwise can't modify tensor's size and stride.
|
| 181 |
+
bool will_resize = false;
|
| 182 |
+
|
| 183 |
+
bool is_read_write = false;
|
| 184 |
+
|
| 185 |
+
bool is_const = false;
|
| 186 |
+
|
| 187 |
+
void validate() {
|
| 188 |
+
TORCH_CHECK(
|
| 189 |
+
!tensor_base_->defined() || tensor_base_->layout() == kStrided,
|
| 190 |
+
"unsupported tensor layout: ",
|
| 191 |
+
tensor_base_->layout());
|
| 192 |
+
}
|
| 193 |
+
|
| 194 |
+
/// The tensor operand. Note that the strides, data pointer, and
|
| 195 |
+
/// other attributes may differ due to dimension reordering and
|
| 196 |
+
/// coalescing.
|
| 197 |
+
const Tensor& tensor() const {
|
| 198 |
+
return tensor_storage_.getTensor();
|
| 199 |
+
}
|
| 200 |
+
const TensorBase& tensor_base() const {
|
| 201 |
+
return *tensor_base_;
|
| 202 |
+
}
|
| 203 |
+
void tensor(c10::MaybeOwned<TensorBase>&& tensor);
|
| 204 |
+
|
| 205 |
+
// Save the original tensor operand in cases when an output is modified
|
| 206 |
+
// (e.g. if dtype is changed)
|
| 207 |
+
const Tensor& original_tensor() const {
|
| 208 |
+
return original_tensor_storage_.getTensor();
|
| 209 |
+
}
|
| 210 |
+
const TensorBase& original_tensor_base() const {
|
| 211 |
+
return *original_tensor_base_;
|
| 212 |
+
}
|
| 213 |
+
|
| 214 |
+
// Set tensor to a new value, and store the old tensor value in
|
| 215 |
+
// original_tensor Should only ever be called once for the lifetime of an
|
| 216 |
+
// operand
|
| 217 |
+
void exchange_tensor(c10::MaybeOwned<TensorBase>&& new_tensor);
|
| 218 |
+
|
| 219 |
+
// Move original_tensor back into tensor, exchange_tensor must have been
|
| 220 |
+
// called before
|
| 221 |
+
void restore_original_tensor();
|
| 222 |
+
|
| 223 |
+
private:
|
| 224 |
+
c10::MaybeOwned<TensorBase> tensor_base_;
|
| 225 |
+
c10::MaybeOwned<TensorBase> original_tensor_base_ =
|
| 226 |
+
c10::MaybeOwned<TensorBase>::owned(std::in_place);
|
| 227 |
+
|
| 228 |
+
// We store TensorBase visibly in the header to allow inline access.
|
| 229 |
+
// However, we sometimes need a genuine `const Tensor &` for the
|
| 230 |
+
// TensorIterator API. So, we also store a non-owning `Tensor`
|
| 231 |
+
// object in these `_storage_` variables.
|
| 232 |
+
internal::OpaqueOptionalTensorRef tensor_storage_;
|
| 233 |
+
internal::OpaqueOptionalTensorRef original_tensor_storage_;
|
| 234 |
+
};
|
| 235 |
+
|
| 236 |
+
struct SplitUntil32Bit;
|
| 237 |
+
|
| 238 |
+
enum class FastSetupType : uint8_t {
|
| 239 |
+
NONE,
|
| 240 |
+
CONTIGUOUS,
|
| 241 |
+
CHANNELS_LAST,
|
| 242 |
+
NON_OVERLAPPING_DENSE
|
| 243 |
+
};
|
| 244 |
+
|
| 245 |
+
class TensorIteratorConfig;
|
| 246 |
+
struct TensorIterator;
|
| 247 |
+
|
| 248 |
+
struct TORCH_API TensorIteratorBase : public impl::MetaBase {
|
| 249 |
+
using DimMask = std::bitset<64>;
|
| 250 |
+
using PtrVector = SmallVector<char*, 4>;
|
| 251 |
+
using StrideVector = SmallVector<int64_t, 6>;
|
| 252 |
+
|
| 253 |
+
TensorIteratorBase();
|
| 254 |
+
void build(TensorIteratorConfig&);
|
| 255 |
+
|
| 256 |
+
// The inner-loop function operates on the fastest moving dimension. It
|
| 257 |
+
// implements element-wise operations in terms of 1-d strided tensors.
|
| 258 |
+
//
|
| 259 |
+
// Arguments:
|
| 260 |
+
// data: data pointers for each operand (length `ntensors`)
|
| 261 |
+
// strides: stride for each operand (length `ntensors`)
|
| 262 |
+
// size: size of inner loop
|
| 263 |
+
//
|
| 264 |
+
// The `size` often matches shape[0], but may be smaller due to
|
| 265 |
+
// parallelization of the inner loop.
|
| 266 |
+
using loop2d_t = c10::function_ref<
|
| 267 |
+
void(char** data, const int64_t* strides, int64_t size0, int64_t size1)>;
|
| 268 |
+
|
| 269 |
+
using loop_subiter_t = c10::function_ref<void(TensorIteratorBase& subiter)>;
|
| 270 |
+
|
| 271 |
+
void foreach_reduced_elt(loop_subiter_t loop, bool parallelize = true);
|
| 272 |
+
|
| 273 |
+
int ndim() const {
|
| 274 |
+
return static_cast<int>(shape_.size());
|
| 275 |
+
}
|
| 276 |
+
IntArrayRef shape() const {
|
| 277 |
+
return shape_;
|
| 278 |
+
}
|
| 279 |
+
int64_t numel() const;
|
| 280 |
+
int ntensors() const {
|
| 281 |
+
return static_cast<int>(operands_.size());
|
| 282 |
+
}
|
| 283 |
+
int noutputs() const {
|
| 284 |
+
return num_outputs_;
|
| 285 |
+
}
|
| 286 |
+
int ninputs() const {
|
| 287 |
+
return ntensors() - noutputs();
|
| 288 |
+
}
|
| 289 |
+
IntArrayRef view_offsets() const {
|
| 290 |
+
return view_offsets_;
|
| 291 |
+
}
|
| 292 |
+
|
| 293 |
+
/// number of elements in the output operand. this is the same as numel() for
|
| 294 |
+
/// operations that are not reductions.
|
| 295 |
+
int64_t num_output_elements() const;
|
| 296 |
+
|
| 297 |
+
/// number of reduced dimensions in a reduction operation
|
| 298 |
+
int num_reduce_dims() const;
|
| 299 |
+
|
| 300 |
+
/// 1-dimensional iteration and no buffering or type conversion
|
| 301 |
+
bool is_trivial_1d() const;
|
| 302 |
+
/// Reducible to 1-dimensional and all operands are contiguous
|
| 303 |
+
bool is_contiguous() const;
|
| 304 |
+
bool is_dim_reduced(int dim) const;
|
| 305 |
+
|
| 306 |
+
/// Accessors for each operand
|
| 307 |
+
IntArrayRef strides(int64_t arg) const {
|
| 308 |
+
return operands_[arg].stride_bytes;
|
| 309 |
+
}
|
| 310 |
+
void* data_ptr(int64_t arg) const;
|
| 311 |
+
ScalarType dtype(int64_t arg = 0) const {
|
| 312 |
+
return operands_[arg].current_dtype;
|
| 313 |
+
}
|
| 314 |
+
ScalarType common_dtype() const {
|
| 315 |
+
TORCH_INTERNAL_ASSERT(
|
| 316 |
+
common_dtype_ != ScalarType::Undefined,
|
| 317 |
+
"Queried for invalid common dtype!");
|
| 318 |
+
return common_dtype_;
|
| 319 |
+
}
|
| 320 |
+
ScalarType input_dtype(int64_t arg = 0) const {
|
| 321 |
+
return operands_[num_outputs_ + arg].current_dtype;
|
| 322 |
+
}
|
| 323 |
+
Device device(int64_t arg = 0) const {
|
| 324 |
+
return operands_[arg].device.value();
|
| 325 |
+
}
|
| 326 |
+
c10::DeviceType device_type(int64_t arg = 0) const {
|
| 327 |
+
return device(arg).type();
|
| 328 |
+
}
|
| 329 |
+
int64_t element_size(int64_t arg) const {
|
| 330 |
+
return static_cast<int64_t>(elementSize(dtype(arg)));
|
| 331 |
+
}
|
| 332 |
+
bool is_scalar(int64_t arg) const;
|
| 333 |
+
bool is_cpu_scalar(int64_t arg) const;
|
| 334 |
+
|
| 335 |
+
const TensorBase& tensor_base(int64_t arg) const {
|
| 336 |
+
return operands_[arg].tensor_base();
|
| 337 |
+
}
|
| 338 |
+
const Tensor& tensor(int64_t arg) const {
|
| 339 |
+
return operands_[arg].tensor();
|
| 340 |
+
}
|
| 341 |
+
|
| 342 |
+
const TensorBase& output_base(int64_t arg = 0) const {
|
| 343 |
+
AT_ASSERT(arg < num_outputs_);
|
| 344 |
+
return tensor_base(arg);
|
| 345 |
+
}
|
| 346 |
+
|
| 347 |
+
const Tensor& output(int64_t arg = 0) const {
|
| 348 |
+
AT_ASSERT(arg < num_outputs_);
|
| 349 |
+
return tensor(arg);
|
| 350 |
+
}
|
| 351 |
+
|
| 352 |
+
const TensorBase& input_base(int64_t arg = 0) const {
|
| 353 |
+
AT_ASSERT(arg >= 0 && arg < ntensors() - num_outputs_);
|
| 354 |
+
return tensor_base(num_outputs_ + arg);
|
| 355 |
+
}
|
| 356 |
+
const Tensor& input(int64_t arg = 0) const {
|
| 357 |
+
AT_ASSERT(arg >= 0 && arg < ntensors() - num_outputs_);
|
| 358 |
+
return tensor(num_outputs_ + arg);
|
| 359 |
+
}
|
| 360 |
+
|
| 361 |
+
// Copies from temporary outputs back to the original outputs
|
| 362 |
+
// NOTE: only used on CPU
|
| 363 |
+
void cast_outputs();
|
| 364 |
+
|
| 365 |
+
/// Removes an operand from this iterator
|
| 366 |
+
void remove_operand(int64_t arg);
|
| 367 |
+
/// Shrinks an iterated dimension
|
| 368 |
+
void narrow(int dim, int64_t start, int64_t size);
|
| 369 |
+
/// Narrows every dim after and including `start_dim` to size one.
|
| 370 |
+
void select_all_keeping_dim(int start_dim, IntArrayRef starts);
|
| 371 |
+
/// Replaces the data pointer for the operand at index `arg`.
|
| 372 |
+
/// The new pointer should have the same sizes, strides and dtype as the
|
| 373 |
+
/// original
|
| 374 |
+
void unsafe_replace_operand(int64_t arg, void* data);
|
| 375 |
+
|
| 376 |
+
/// Splits this TensorIterator into two iterators. Together they iterate over
|
| 377 |
+
/// the entire operation. Used by `with_32bit_indexing()`.
|
| 378 |
+
std::unique_ptr<TensorIterator> split(int dim);
|
| 379 |
+
|
| 380 |
+
/// Returns the dimension with the largest extent: (size[dim]-1) * stride[dim]
|
| 381 |
+
int get_dim_to_split() const;
|
| 382 |
+
|
| 383 |
+
template <typename T>
|
| 384 |
+
T scalar_value(int64_t arg) {
|
| 385 |
+
auto& op = operands_[arg];
|
| 386 |
+
return c10::fetch_and_cast<T>(op.tensor_base().scalar_type(), op.data);
|
| 387 |
+
}
|
| 388 |
+
|
| 389 |
+
/// Return scalar value from original_tensor_base if it is defined. When
|
| 390 |
+
/// common_dtype is Half, casting scalar input to common_dtype might overflow.
|
| 391 |
+
/// If the scalar is aleady given in the type of Half, then return scalar
|
| 392 |
+
/// value from tensor_base.
|
| 393 |
+
template <typename T>
|
| 394 |
+
T original_scalar_value(int64_t arg) {
|
| 395 |
+
auto& original_tensor_base = operands_[arg].original_tensor_base();
|
| 396 |
+
if (original_tensor_base.defined()) {
|
| 397 |
+
TORCH_INTERNAL_ASSERT(
|
| 398 |
+
original_tensor_base.scalar_type() != common_dtype());
|
| 399 |
+
return c10::fetch_and_cast<T>(
|
| 400 |
+
original_tensor_base.scalar_type(),
|
| 401 |
+
original_tensor_base.const_data_ptr());
|
| 402 |
+
} else {
|
| 403 |
+
return scalar_value<T>(arg);
|
| 404 |
+
}
|
| 405 |
+
}
|
| 406 |
+
|
| 407 |
+
private:
|
| 408 |
+
template <typename loop1d_t>
|
| 409 |
+
auto loop_2d_from_1d(const loop1d_t& loop) {
|
| 410 |
+
return
|
| 411 |
+
[loop, ntensor = ntensors()](
|
| 412 |
+
char** base, const int64_t* strides, int64_t size0, int64_t size1) {
|
| 413 |
+
PtrVector data(base, base + ntensor);
|
| 414 |
+
const int64_t* outer_strides = &strides[ntensor];
|
| 415 |
+
for (const auto i : c10::irange(size1)) {
|
| 416 |
+
if (i > 0) {
|
| 417 |
+
for (const auto arg : c10::irange(ntensor)) {
|
| 418 |
+
data[arg] += outer_strides[arg];
|
| 419 |
+
}
|
| 420 |
+
}
|
| 421 |
+
loop(data.data(), strides, size0);
|
| 422 |
+
}
|
| 423 |
+
};
|
| 424 |
+
}
|
| 425 |
+
|
| 426 |
+
public:
|
| 427 |
+
template <
|
| 428 |
+
typename loop1d_t,
|
| 429 |
+
std::enable_if_t<
|
| 430 |
+
std::is_convertible_v<
|
| 431 |
+
loop1d_t,
|
| 432 |
+
c10::function_ref<
|
| 433 |
+
void(char**, const int64_t* strides, int64_t size)>>,
|
| 434 |
+
int> = 0>
|
| 435 |
+
void for_each(loop1d_t loop, int64_t grain_size = at::internal::GRAIN_SIZE) {
|
| 436 |
+
for_each(loop_2d_from_1d(loop), grain_size);
|
| 437 |
+
}
|
| 438 |
+
|
| 439 |
+
void for_each(loop2d_t loop, int64_t grain_size = at::internal::GRAIN_SIZE);
|
| 440 |
+
|
| 441 |
+
void parallel_reduce(loop2d_t loop);
|
| 442 |
+
|
| 443 |
+
template <
|
| 444 |
+
typename loop1d_t,
|
| 445 |
+
std::enable_if_t<
|
| 446 |
+
std::is_convertible_v<
|
| 447 |
+
loop1d_t,
|
| 448 |
+
c10::function_ref<
|
| 449 |
+
void(char**, const int64_t* strides, int64_t size)>>,
|
| 450 |
+
int> = 0>
|
| 451 |
+
void serial_for_each(loop1d_t loop, Range range) {
|
| 452 |
+
serial_for_each(loop_2d_from_1d(loop), range);
|
| 453 |
+
}
|
| 454 |
+
|
| 455 |
+
void serial_for_each(loop2d_t loop, Range range) const;
|
| 456 |
+
|
| 457 |
+
/// Create a strides array for a Tensor with shape of this iterator. The
|
| 458 |
+
/// parameter `element_size` specifies the size of Tensor's data type in
|
| 459 |
+
/// bytes (e.g. `4` for `float`)
|
| 460 |
+
StrideVector compatible_stride(int64_t element_size) const;
|
| 461 |
+
|
| 462 |
+
/// Inverts the re-ordering done by reorder_dimensions. This can only be
|
| 463 |
+
/// called *before* coalesce_dimensions() is called.
|
| 464 |
+
DimVector invert_perm(IntArrayRef input) const;
|
| 465 |
+
|
| 466 |
+
/// Reapply same re-ordering as it is done by reorder_dimensions. This can
|
| 467 |
+
/// only be called *before* coalesce_dimensions() is called.
|
| 468 |
+
DimVector apply_perm_and_mul(IntArrayRef input, int mul) const;
|
| 469 |
+
|
| 470 |
+
/// Helper functions for CPU iteration
|
| 471 |
+
StrideVector get_dim_strides(int dim) const;
|
| 472 |
+
StrideVector get_strides() const;
|
| 473 |
+
StrideVector get_inner_strides() const {
|
| 474 |
+
return get_dim_strides(0);
|
| 475 |
+
}
|
| 476 |
+
PtrVector get_base_ptrs() const;
|
| 477 |
+
|
| 478 |
+
// Helper functions for advanced stride manipulations (e.g. torch.flip)
|
| 479 |
+
void _unsafe_set_arg_strides(const int64_t arg, IntArrayRef strides) {
|
| 480 |
+
operands_[arg].stride_bytes = strides;
|
| 481 |
+
}
|
| 482 |
+
void _unsafe_set_arg_data(const int64_t arg, void* data) {
|
| 483 |
+
operands_[arg].data = data;
|
| 484 |
+
}
|
| 485 |
+
|
| 486 |
+
// Helper functions for custom device, custom device can get OperandInfo and
|
| 487 |
+
// NameVector in their side.
|
| 488 |
+
const OperandInfo& operand(int arg = 0) const {
|
| 489 |
+
return operands_[arg];
|
| 490 |
+
}
|
| 491 |
+
OperandInfo& operand(int arg = 0) {
|
| 492 |
+
return operands_[arg];
|
| 493 |
+
}
|
| 494 |
+
NameVector& get_dim_names() {
|
| 495 |
+
return names_;
|
| 496 |
+
}
|
| 497 |
+
const NameVector& get_dim_names() const {
|
| 498 |
+
return names_;
|
| 499 |
+
}
|
| 500 |
+
|
| 501 |
+
/// true if the stride computation can use 32-bit arithmetic. Used by GPU
|
| 502 |
+
/// kernels
|
| 503 |
+
bool can_use_32bit_indexing() const;
|
| 504 |
+
|
| 505 |
+
/// An "iteratable" object that recursively splits this iterator into
|
| 506 |
+
/// sub-iterators that can use 32-bit indexing.
|
| 507 |
+
SplitUntil32Bit with_32bit_indexing() const;
|
| 508 |
+
|
| 509 |
+
/// If the kernel should accumulate into the output. Only relevant for CUDA
|
| 510 |
+
/// reductions.
|
| 511 |
+
bool should_accumulate() const {
|
| 512 |
+
return accumulate_;
|
| 513 |
+
}
|
| 514 |
+
|
| 515 |
+
/// Whether this iterator produces the actual output,
|
| 516 |
+
/// as opposed to something that will be accumulated further. Only relevant
|
| 517 |
+
/// for CUDA reductions.
|
| 518 |
+
bool is_final_output() const {
|
| 519 |
+
return final_output_;
|
| 520 |
+
}
|
| 521 |
+
|
| 522 |
+
bool has_contiguous_first_dim() const {
|
| 523 |
+
if (ndim() == 0) {
|
| 524 |
+
return true;
|
| 525 |
+
}
|
| 526 |
+
|
| 527 |
+
int num_tensors = ntensors();
|
| 528 |
+
for (const auto i : c10::irange(num_tensors)) {
|
| 529 |
+
if (strides(i)[0] != element_size(i)) {
|
| 530 |
+
return false;
|
| 531 |
+
}
|
| 532 |
+
}
|
| 533 |
+
return true;
|
| 534 |
+
}
|
| 535 |
+
|
| 536 |
+
void set_output_raw_strided(
|
| 537 |
+
int64_t output_idx,
|
| 538 |
+
IntArrayRef sizes,
|
| 539 |
+
IntArrayRef strides,
|
| 540 |
+
TensorOptions options,
|
| 541 |
+
DimnameList names) override;
|
| 542 |
+
|
| 543 |
+
#define TORCH_DISALLOW_TEMPORARIES_IMPL(methodname, maybestatic) \
|
| 544 |
+
maybestatic void methodname( \
|
| 545 |
+
TensorBase&& out, const TensorBase& a, const TensorBase& b) = delete; \
|
| 546 |
+
maybestatic void methodname( \
|
| 547 |
+
const TensorBase& out, TensorBase&& a, const TensorBase& b) = delete; \
|
| 548 |
+
maybestatic void methodname( \
|
| 549 |
+
const TensorBase& out, const TensorBase& a, TensorBase&& b) = delete; \
|
| 550 |
+
maybestatic void methodname( \
|
| 551 |
+
TensorBase&& out, TensorBase&& a, const TensorBase& b) = delete; \
|
| 552 |
+
maybestatic void methodname( \
|
| 553 |
+
TensorBase&& out, const TensorBase& a, TensorBase&& b) = delete; \
|
| 554 |
+
maybestatic void methodname( \
|
| 555 |
+
const TensorBase& out, TensorBase&& a, TensorBase&& b) = delete; \
|
| 556 |
+
maybestatic void methodname( \
|
| 557 |
+
TensorBase&& out, TensorBase&& a, TensorBase&& b) = delete;
|
| 558 |
+
|
| 559 |
+
#define TORCH_DISALLOW_TEMPORARIES(methodname) \
|
| 560 |
+
TORCH_DISALLOW_TEMPORARIES_IMPL(methodname, )
|
| 561 |
+
|
| 562 |
+
void build_binary_float_op(
|
| 563 |
+
const TensorBase& out,
|
| 564 |
+
const TensorBase& a,
|
| 565 |
+
const TensorBase& b);
|
| 566 |
+
void build_borrowing_binary_float_op(
|
| 567 |
+
const TensorBase& out,
|
| 568 |
+
const TensorBase& a,
|
| 569 |
+
const TensorBase& b);
|
| 570 |
+
TORCH_DISALLOW_TEMPORARIES(build_borrowing_binary_float_op)
|
| 571 |
+
void build_binary_op(
|
| 572 |
+
const TensorBase& out,
|
| 573 |
+
const TensorBase& a,
|
| 574 |
+
const TensorBase& b);
|
| 575 |
+
void build_borrowing_binary_op(
|
| 576 |
+
const TensorBase& out,
|
| 577 |
+
const TensorBase& a,
|
| 578 |
+
const TensorBase& b);
|
| 579 |
+
TORCH_DISALLOW_TEMPORARIES(build_borrowing_binary_op)
|
| 580 |
+
void build_unary_float_op(const TensorBase& out, const TensorBase& a);
|
| 581 |
+
void build_borrowing_unary_float_op(
|
| 582 |
+
const TensorBase& out,
|
| 583 |
+
const TensorBase& a);
|
| 584 |
+
TORCH_DISALLOW_TEMPORARIES(build_borrowing_unary_float_op)
|
| 585 |
+
void build_unary_op(const TensorBase& out, const TensorBase& a);
|
| 586 |
+
// Odd special case needed for pow. Has to borrow the output because
|
| 587 |
+
// it's a structured kernel, but the argument is potentially a copy.
|
| 588 |
+
void build_output_borrowing_argument_owning_unary_op(
|
| 589 |
+
const TensorBase& out,
|
| 590 |
+
const TensorBase& a);
|
| 591 |
+
void build_borrowing_unary_op(const TensorBase& out, const TensorBase& a);
|
| 592 |
+
TORCH_DISALLOW_TEMPORARIES(build_borrowing_unary_op)
|
| 593 |
+
void build_borrowing_unary_force_boolean_op(
|
| 594 |
+
const TensorBase& out,
|
| 595 |
+
const TensorBase& a);
|
| 596 |
+
TORCH_DISALLOW_TEMPORARIES(build_borrowing_unary_force_boolean_op)
|
| 597 |
+
void build_comparison_op(
|
| 598 |
+
const TensorBase& out,
|
| 599 |
+
const TensorBase& a,
|
| 600 |
+
const TensorBase& b);
|
| 601 |
+
void build_borrowing_comparison_op(
|
| 602 |
+
const TensorBase& out,
|
| 603 |
+
const TensorBase& a,
|
| 604 |
+
const TensorBase& b);
|
| 605 |
+
TORCH_DISALLOW_TEMPORARIES(build_borrowing_comparison_op)
|
| 606 |
+
// Another special case: we need to own the second argument for comparison
|
| 607 |
+
// ops.
|
| 608 |
+
void build_borrowing_except_last_argument_comparison_op(
|
| 609 |
+
const TensorBase& out,
|
| 610 |
+
const TensorBase& a,
|
| 611 |
+
const TensorBase& b);
|
| 612 |
+
void build_ternary_op(
|
| 613 |
+
const TensorBase& out,
|
| 614 |
+
const TensorBase& a,
|
| 615 |
+
const TensorBase& b,
|
| 616 |
+
const TensorBase& c);
|
| 617 |
+
|
| 618 |
+
#undef TORCH_DISALLOW_TEMPORARIES
|
| 619 |
+
protected:
|
| 620 |
+
// Mutable reference as it moves tensors out of TensorIteratorConfig
|
| 621 |
+
void populate_operands(TensorIteratorConfig&);
|
| 622 |
+
void mark_outputs();
|
| 623 |
+
void mark_resize_outputs(const TensorIteratorConfig&);
|
| 624 |
+
void compute_mem_overlaps(const TensorIteratorConfig&);
|
| 625 |
+
void compute_shape(const TensorIteratorConfig&);
|
| 626 |
+
void compute_strides(const TensorIteratorConfig&);
|
| 627 |
+
void reorder_dimensions();
|
| 628 |
+
void permute_dimensions(IntArrayRef perm);
|
| 629 |
+
void compute_types(const TensorIteratorConfig&);
|
| 630 |
+
ScalarType compute_common_dtype();
|
| 631 |
+
void allocate_or_resize_outputs();
|
| 632 |
+
bool fast_set_up(const TensorIteratorConfig&);
|
| 633 |
+
FastSetupType compute_fast_setup_type(const TensorIteratorConfig&);
|
| 634 |
+
void compute_names(const TensorIteratorConfig&);
|
| 635 |
+
void propagate_names_to_outputs();
|
| 636 |
+
void coalesce_dimensions();
|
| 637 |
+
|
| 638 |
+
protected:
|
| 639 |
+
/// Records the "computation" shape of the output tensor. The computation
|
| 640 |
+
/// shape is different from the regular shape in a few ways:
|
| 641 |
+
///
|
| 642 |
+
/// - The shape may be permuted (via permute_dimensions) so that we
|
| 643 |
+
/// process the dimensions in the most computationally efficient order
|
| 644 |
+
/// (rather than the logical order given to us by the users.)
|
| 645 |
+
/// - The shape may have adjacent dimensions collapsed (via
|
| 646 |
+
/// coalesce_dimensions) so that we minimize the number of
|
| 647 |
+
/// dimensions we have to explicitly iterate over. For example,
|
| 648 |
+
/// a pointwise operation on a contiguous tensor "computationally"
|
| 649 |
+
/// consists of only a single dimension.
|
| 650 |
+
///
|
| 651 |
+
/// In other words, the computation shape is the output shape as it
|
| 652 |
+
/// actually matters for implementing the kernel, but not necessarily the
|
| 653 |
+
/// output shape that the user will see in the end.
|
| 654 |
+
///
|
| 655 |
+
/// The lifecycle of mutations to shape_ in TensorIterator:
|
| 656 |
+
/// - declare_static_shape() sets an initial shape explicitly
|
| 657 |
+
/// provided by user, otherwise
|
| 658 |
+
/// - compute_shape() computes the true (non-computational) shape
|
| 659 |
+
/// specified by the user.
|
| 660 |
+
/// - reorder_dimensions() reorders dimensions to improve coalescing.
|
| 661 |
+
/// - coalesce_dimensions() then coalesces adjacent dimensions when
|
| 662 |
+
/// possible.
|
| 663 |
+
///
|
| 664 |
+
/// The shape may also be further modified if we create sub-TensorIterators,
|
| 665 |
+
/// e.g., via narrow or select_all_keeping_dim.
|
| 666 |
+
DimVector shape_;
|
| 667 |
+
|
| 668 |
+
/// Temporarily records the permutation computed by reorder_dimensions.
|
| 669 |
+
/// This permutation maps the computation output dimension (dim) to
|
| 670 |
+
/// the original true output dimension (perm_[dim]). It is used by
|
| 671 |
+
/// invert_perm to undo the permutation. After coalesce_dimensions is
|
| 672 |
+
/// called, the permutation is no longer valid (as, in general, there
|
| 673 |
+
/// is no permutation that will make computation dimensions to
|
| 674 |
+
/// output dimensions); methods that manipulate perm_ are obligated
|
| 675 |
+
/// to test that !has_coalesced_dimensions
|
| 676 |
+
DimVector perm_;
|
| 677 |
+
|
| 678 |
+
/// Has coalesce_dimensions() (or any moral equivalent, e.g., fast_build())
|
| 679 |
+
/// been called? This is SOLELY used to check validity of perm_.
|
| 680 |
+
bool has_coalesced_dimensions_ = false;
|
| 681 |
+
|
| 682 |
+
/// Whether iteration must be fixed. This disables dimension permuting and
|
| 683 |
+
/// also changes how for_each divides work among threads.
|
| 684 |
+
bool enforce_linear_iteration_ = false;
|
| 685 |
+
|
| 686 |
+
/// The index offsets into the original tensors for each dimension.
|
| 687 |
+
/// This is only non-zero when you narrow() a TensorIterator (e.g.,
|
| 688 |
+
/// when you make sub-TensorIterators).
|
| 689 |
+
DimVector view_offsets_;
|
| 690 |
+
|
| 691 |
+
/// The computed names of the output tensor. Computed by compute_names()
|
| 692 |
+
NameVector names_;
|
| 693 |
+
|
| 694 |
+
/// The operands of the TensorIterator: both the inputs and outputs. The
|
| 695 |
+
/// outputs MUST come first in the operands_ list. There is always an
|
| 696 |
+
/// operand for each output of the TensorIterator, even if TensorIterator
|
| 697 |
+
/// will ultimately be responsible for allocating the output; in those
|
| 698 |
+
/// cases, tensor is simply undefined (and will be populated later
|
| 699 |
+
/// during build()).
|
| 700 |
+
///
|
| 701 |
+
/// This list is initially populated prior to build(), but build() mutates
|
| 702 |
+
/// OperandInfo to populate more information.
|
| 703 |
+
SmallVector<OperandInfo, 4> operands_;
|
| 704 |
+
|
| 705 |
+
/// Number of outputs in operands_ (the length of the outputs prefix
|
| 706 |
+
/// in operands_).
|
| 707 |
+
int num_outputs_ = 0;
|
| 708 |
+
|
| 709 |
+
/// Whether or not all operands have the same shape and are 1d+. Having all
|
| 710 |
+
/// the same shape affects whether or not the iterator is eligible for fast
|
| 711 |
+
/// setup.
|
| 712 |
+
bool all_ops_same_shape_ = false;
|
| 713 |
+
/// Whether or not all operands are 0d, this affects type promotion
|
| 714 |
+
bool all_ops_are_scalars_ = false;
|
| 715 |
+
|
| 716 |
+
/// The "computation" dtype of TensorIterator, specifying what the dtype
|
| 717 |
+
/// we will do the internal computation in TensorIterator. Typically,
|
| 718 |
+
/// this matches the dtype of the output tensors, but not always!
|
| 719 |
+
ScalarType common_dtype_ = ScalarType::Undefined;
|
| 720 |
+
|
| 721 |
+
/// This is currently defined as kCPU, or the device of the first non-CPU
|
| 722 |
+
/// tensor argument. See TensorIteratorBase::compute_types for details.
|
| 723 |
+
Device common_device_ = kCPU;
|
| 724 |
+
|
| 725 |
+
/// Set by split(), see should_accumulate() and is_final_output()
|
| 726 |
+
bool accumulate_ = false;
|
| 727 |
+
bool final_output_ = true;
|
| 728 |
+
|
| 729 |
+
// From TensorIteratorConfig
|
| 730 |
+
bool is_reduction_ = false;
|
| 731 |
+
|
| 732 |
+
/// Set by populate_operands(), says if we're handling meta tensors
|
| 733 |
+
bool is_meta_ = false;
|
| 734 |
+
};
|
| 735 |
+
|
| 736 |
+
struct TORCH_API TensorIterator final : public TensorIteratorBase {
|
| 737 |
+
TensorIterator() : TensorIteratorBase() {}
|
| 738 |
+
// Slicing is OK, TensorIterator guaranteed NOT to have any fields
|
| 739 |
+
TensorIterator(const TensorIteratorBase& iter) : TensorIteratorBase(iter) {}
|
| 740 |
+
|
| 741 |
+
#define TORCH_DISALLOW_TEMPORARIES(methodname) \
|
| 742 |
+
TORCH_DISALLOW_TEMPORARIES_IMPL(methodname, static)
|
| 743 |
+
|
| 744 |
+
static TensorIterator binary_float_op(
|
| 745 |
+
TensorBase& out,
|
| 746 |
+
const TensorBase& a,
|
| 747 |
+
const TensorBase& b);
|
| 748 |
+
static TensorIterator binary_op(
|
| 749 |
+
TensorBase& out,
|
| 750 |
+
const TensorBase& a,
|
| 751 |
+
const TensorBase& b);
|
| 752 |
+
static TensorIterator borrowing_binary_op(
|
| 753 |
+
const TensorBase& out,
|
| 754 |
+
const TensorBase& a,
|
| 755 |
+
const TensorBase& b);
|
| 756 |
+
TORCH_DISALLOW_TEMPORARIES(borrowing_binary_op)
|
| 757 |
+
static TensorIterator comparison_op(
|
| 758 |
+
TensorBase& out,
|
| 759 |
+
const TensorBase& a,
|
| 760 |
+
const TensorBase& b);
|
| 761 |
+
static TensorIterator unary_op(TensorBase& out, const TensorBase& a);
|
| 762 |
+
static TensorIterator unary_float_op(TensorBase& out, const TensorBase& a);
|
| 763 |
+
static TensorIterator nullary_op(TensorBase& out);
|
| 764 |
+
static TensorIterator borrowing_nullary_op(const TensorBase& out);
|
| 765 |
+
static TensorIterator borrowing_nullary_op(TensorBase&& out) = delete;
|
| 766 |
+
static TensorIterator reduce_op(TensorBase& out, const TensorBase& a);
|
| 767 |
+
static TensorIterator reduce_op(
|
| 768 |
+
TensorBase& out1,
|
| 769 |
+
TensorBase& out2,
|
| 770 |
+
const TensorBase& a);
|
| 771 |
+
#undef TORCH_DISALLOW_TEMPORARIES
|
| 772 |
+
#undef TORCH_DISALLOW_TEMPORARIES_IMPL
|
| 773 |
+
|
| 774 |
+
const Tensor& maybe_get_output(int64_t output_idx) override;
|
| 775 |
+
void set_output_raw_strided(
|
| 776 |
+
int64_t output_idx,
|
| 777 |
+
IntArrayRef sizes,
|
| 778 |
+
IntArrayRef strides,
|
| 779 |
+
TensorOptions options,
|
| 780 |
+
DimnameList names) override;
|
| 781 |
+
};
|
| 782 |
+
|
| 783 |
+
class TORCH_API TensorIteratorConfig final {
|
| 784 |
+
public:
|
| 785 |
+
friend struct TensorIteratorBase;
|
| 786 |
+
friend struct TensorIterator;
|
| 787 |
+
|
| 788 |
+
TensorIteratorConfig() = default;
|
| 789 |
+
|
| 790 |
+
C10_DISABLE_COPY_AND_ASSIGN(TensorIteratorConfig);
|
| 791 |
+
|
| 792 |
+
/// Construction
|
| 793 |
+
// Stores input/output Tensors without incrementing the reference count.
|
| 794 |
+
// Important: the outputs have to be added before the inputs.
|
| 795 |
+
TensorIteratorConfig& add_output(const TensorBase& output) {
|
| 796 |
+
return add_borrowed_output(output);
|
| 797 |
+
}
|
| 798 |
+
TensorIteratorConfig& add_input(const TensorBase& input) {
|
| 799 |
+
return add_borrowed_input(input);
|
| 800 |
+
}
|
| 801 |
+
TensorIteratorConfig& add_const_input(const TensorBase& input) {
|
| 802 |
+
return add_borrowed_const_input(input);
|
| 803 |
+
}
|
| 804 |
+
|
| 805 |
+
// Borrowing from temporaries is unlikely to go well.
|
| 806 |
+
TensorIteratorConfig& add_output(TensorBase&& output) = delete;
|
| 807 |
+
TensorIteratorConfig& add_input(TensorBase&& input) = delete;
|
| 808 |
+
TensorIteratorConfig& add_const_input(TensorBase&& input) = delete;
|
| 809 |
+
|
| 810 |
+
// Stores input/output Tensors while incrementing the reference count.
|
| 811 |
+
// Note that add_{in,out}put are nearly always what you
|
| 812 |
+
// want, and the exception (adding an unnamed temporary) won't
|
| 813 |
+
// compile.
|
| 814 |
+
TensorIteratorConfig& add_owned_output(const TensorBase& output);
|
| 815 |
+
TensorIteratorConfig& add_owned_input(const TensorBase& input);
|
| 816 |
+
TensorIteratorConfig& add_owned_const_input(const TensorBase& input);
|
| 817 |
+
|
| 818 |
+
// Advanced API: stores input/output Tensors without incrementing
|
| 819 |
+
// the reference count. The caller must ensure that these Tensors
|
| 820 |
+
// live at least as long as this TensorIteratorConfig and any
|
| 821 |
+
// TensorIteratorBase built from this TensorIteratorConfig.
|
| 822 |
+
// Important: the outputs have to be added before the inputs.
|
| 823 |
+
TensorIteratorConfig& add_borrowed_output(const TensorBase& output);
|
| 824 |
+
TensorIteratorConfig& add_borrowed_input(const TensorBase& input);
|
| 825 |
+
TensorIteratorConfig& add_borrowed_const_input(const TensorBase& input);
|
| 826 |
+
|
| 827 |
+
// Borrowing from temporaries is unlikely to go well.
|
| 828 |
+
TensorIteratorConfig& add_borrowed_output(TensorBase&& output) = delete;
|
| 829 |
+
TensorIteratorConfig& add_borrowed_input(TensorBase&& input) = delete;
|
| 830 |
+
TensorIteratorConfig& add_borrowed_const_input(TensorBase&& input) = delete;
|
| 831 |
+
|
| 832 |
+
// Sets the check_mem_overlap_ flag, which is true by default.
|
| 833 |
+
// If true, inputs are checked for partial overlap with the outputs and
|
| 834 |
+
// outputs are checked for internal overlap (e.g. broadcasted views). An error
|
| 835 |
+
// is raised if unacceptable overlap is detected.
|
| 836 |
+
// If you're migrating an existing operator to using TensorIterator, please
|
| 837 |
+
// consider if the previous implementation checked memory overlap. If it did
|
| 838 |
+
// not, and if the operator is idempotent (for example, Tensor.fill_(0)), then
|
| 839 |
+
// checking memory overlap is BC-breaking. Please don't check memory overlap
|
| 840 |
+
// in that case.
|
| 841 |
+
TensorIteratorConfig& set_check_mem_overlap(bool check_mem_overlap) {
|
| 842 |
+
check_mem_overlap_ = check_mem_overlap;
|
| 843 |
+
return *this;
|
| 844 |
+
}
|
| 845 |
+
|
| 846 |
+
// Sets the check_all_same_dtype_ flag, which is true by default
|
| 847 |
+
// If true, checks that all inputs and defined outputs have the same dtype
|
| 848 |
+
// Setting either of promote_inputs_to_common_dtype_
|
| 849 |
+
// or cast_common_dtype_to_outputs_ to true will set
|
| 850 |
+
// check_all_same_dtype_ to false.
|
| 851 |
+
TensorIteratorConfig& check_all_same_dtype(const bool _check_all_same_dtype) {
|
| 852 |
+
check_all_same_dtype_ = _check_all_same_dtype;
|
| 853 |
+
return *this;
|
| 854 |
+
}
|
| 855 |
+
|
| 856 |
+
// Sets the check_all_same_device_ flag, which is true by default
|
| 857 |
+
// If true, all operands must be on the same device, with the possible
|
| 858 |
+
// exception of CPU scalars, which can be passed to some CUDA kernels
|
| 859 |
+
// as kernel arguments.
|
| 860 |
+
TensorIteratorConfig& check_all_same_device(
|
| 861 |
+
const bool _check_all_same_device) {
|
| 862 |
+
check_all_same_device_ = _check_all_same_device;
|
| 863 |
+
return *this;
|
| 864 |
+
}
|
| 865 |
+
|
| 866 |
+
// Sets the enforce_safe_casting_to_output_ flag, which is false by default
|
| 867 |
+
// If true, the iterator's "common dtype" must be computable
|
| 868 |
+
// (see the [Common Dtype Computation] note) and
|
| 869 |
+
// canCast(common dtype, output dtype) must be true for all outputs.
|
| 870 |
+
TensorIteratorConfig& enforce_safe_casting_to_output(
|
| 871 |
+
const bool _enforce_safe_casting_to_output) {
|
| 872 |
+
enforce_safe_casting_to_output_ = _enforce_safe_casting_to_output;
|
| 873 |
+
return *this;
|
| 874 |
+
}
|
| 875 |
+
|
| 876 |
+
// Sets the enforce_linear_iteration_ flag, which is false by default.
|
| 877 |
+
// If true, iteration goes in the same order as a C-contiguous tensor
|
| 878 |
+
// is layed out in memory. i.e. last dimension iterates fastest.
|
| 879 |
+
//
|
| 880 |
+
// This iteration order can be less efficient and may even prevent
|
| 881 |
+
// vectorization. So only use if the correctness of your kernel depends on it.
|
| 882 |
+
TensorIteratorConfig& enforce_linear_iteration(
|
| 883 |
+
const bool _enforce_linear_iteration = true) {
|
| 884 |
+
enforce_linear_iteration_ = _enforce_linear_iteration;
|
| 885 |
+
return *this;
|
| 886 |
+
}
|
| 887 |
+
|
| 888 |
+
// Sets the promote_inputs_to_common_dtype_ flag, which is false by default
|
| 889 |
+
// If true, the iterator's "common dtype" is always computed (see the
|
| 890 |
+
// [Common Dtype Computation] note) and, on the CPU, temporary copies of
|
| 891 |
+
// the inputs in the common dtype are passed as the actual inputs to
|
| 892 |
+
// the operation.
|
| 893 |
+
// Setting this flag to true sets check_all_same_dtype_ to false.
|
| 894 |
+
TensorIteratorConfig& promote_inputs_to_common_dtype(
|
| 895 |
+
const bool _promote_inputs_to_common_dtype) {
|
| 896 |
+
promote_inputs_to_common_dtype_ = _promote_inputs_to_common_dtype;
|
| 897 |
+
if (_promote_inputs_to_common_dtype) {
|
| 898 |
+
check_all_same_dtype_ = false;
|
| 899 |
+
}
|
| 900 |
+
return *this;
|
| 901 |
+
}
|
| 902 |
+
|
| 903 |
+
// Sets the promote_integer_inputs_to_float_ flag, which is false by default
|
| 904 |
+
// NOTE: If set to true, the promote_inputs_to_common_dtype_ must also be
|
| 905 |
+
// true. If true, if the iterator's "common dtype" is an integral type
|
| 906 |
+
// (including bool)
|
| 907 |
+
// then it is changed to the default float scalar type.
|
| 908 |
+
TensorIteratorConfig& promote_integer_inputs_to_float(
|
| 909 |
+
const bool _promote_integer_inputs_to_float) {
|
| 910 |
+
promote_integer_inputs_to_float_ = _promote_integer_inputs_to_float;
|
| 911 |
+
TORCH_INTERNAL_ASSERT(
|
| 912 |
+
!promote_integer_inputs_to_float_ || promote_inputs_to_common_dtype_);
|
| 913 |
+
return *this;
|
| 914 |
+
}
|
| 915 |
+
|
| 916 |
+
TensorIteratorConfig& is_reduction(const bool _is_reduction) {
|
| 917 |
+
is_reduction_ = _is_reduction;
|
| 918 |
+
return *this;
|
| 919 |
+
}
|
| 920 |
+
|
| 921 |
+
TensorIteratorConfig& allow_cpu_scalars(const bool _allow_cpu_scalars) {
|
| 922 |
+
allow_cpu_scalars_ = _allow_cpu_scalars;
|
| 923 |
+
return *this;
|
| 924 |
+
}
|
| 925 |
+
|
| 926 |
+
// Sets the cast_common_dtype_to_outputs_ flag, which is false by default
|
| 927 |
+
// If true, the iterator's "common dtype" must be computatable
|
| 928 |
+
// (see the [Common Dtype Computation] note) and, on the CPU, temporary
|
| 929 |
+
// copies of the outputs are passed as the actual output to the operation.
|
| 930 |
+
// These temporaries are then copied to the original outputs after
|
| 931 |
+
// the operation is performed (see cast_outputs()).
|
| 932 |
+
// Setting this flag to true sets check_all_same_dtype_ to false.
|
| 933 |
+
TensorIteratorConfig& cast_common_dtype_to_outputs(
|
| 934 |
+
const bool _cast_common_dtype_to_outputs) {
|
| 935 |
+
cast_common_dtype_to_outputs_ = _cast_common_dtype_to_outputs;
|
| 936 |
+
if (_cast_common_dtype_to_outputs) {
|
| 937 |
+
check_all_same_dtype_ = false;
|
| 938 |
+
}
|
| 939 |
+
return *this;
|
| 940 |
+
}
|
| 941 |
+
|
| 942 |
+
TensorIteratorConfig& resize_outputs(bool resize_outputs) {
|
| 943 |
+
resize_outputs_ = resize_outputs;
|
| 944 |
+
return *this;
|
| 945 |
+
}
|
| 946 |
+
|
| 947 |
+
// Bypass output dtype/device computation and fix the dtype/device as
|
| 948 |
+
// specified here.
|
| 949 |
+
TensorIteratorConfig& declare_static_dtype_and_device(
|
| 950 |
+
ScalarType dtype,
|
| 951 |
+
Device device);
|
| 952 |
+
TensorIteratorConfig& declare_static_dtype(ScalarType dtype);
|
| 953 |
+
TensorIteratorConfig& declare_static_device(Device device);
|
| 954 |
+
TensorIteratorConfig& declare_static_shape(IntArrayRef shape);
|
| 955 |
+
TensorIteratorConfig& declare_static_shape(
|
| 956 |
+
IntArrayRef shape,
|
| 957 |
+
IntArrayRef squash_dims);
|
| 958 |
+
|
| 959 |
+
// It would be better if this was && qualified, but this would be at the cost
|
| 960 |
+
// of a lot of boilerplate above
|
| 961 |
+
TensorIterator build() {
|
| 962 |
+
TensorIterator iter;
|
| 963 |
+
iter.build(*this);
|
| 964 |
+
return iter;
|
| 965 |
+
}
|
| 966 |
+
|
| 967 |
+
private:
|
| 968 |
+
bool is_tensor_const(size_t idx);
|
| 969 |
+
|
| 970 |
+
SmallVector<c10::MaybeOwned<TensorBase>, 4> tensors_;
|
| 971 |
+
int num_outputs_ = 0;
|
| 972 |
+
int num_inputs_ = 0;
|
| 973 |
+
|
| 974 |
+
std::optional<DimVector> static_shape_ = std::nullopt;
|
| 975 |
+
std::optional<ScalarType> static_dtype_ = std::nullopt;
|
| 976 |
+
std::optional<Device> static_device_ = std::nullopt;
|
| 977 |
+
bool check_mem_overlap_ = true;
|
| 978 |
+
bool allow_cpu_scalars_ = false;
|
| 979 |
+
bool is_reduction_ = false;
|
| 980 |
+
bool resize_outputs_ = true;
|
| 981 |
+
bool check_all_same_dtype_ = true;
|
| 982 |
+
bool check_all_same_device_ = true;
|
| 983 |
+
bool enforce_safe_casting_to_output_ = false;
|
| 984 |
+
bool enforce_linear_iteration_ = false;
|
| 985 |
+
bool promote_inputs_to_common_dtype_ = false;
|
| 986 |
+
bool promote_integer_inputs_to_float_ = false;
|
| 987 |
+
bool cast_common_dtype_to_outputs_ = false;
|
| 988 |
+
|
| 989 |
+
SmallVector<size_t, 4> const_tensor_indices_;
|
| 990 |
+
};
|
| 991 |
+
|
| 992 |
+
/// A container-like struct that acts as if it contains splits of a
|
| 993 |
+
/// TensorIterator that can use 32-bit indexing. Taken together the splits cover
|
| 994 |
+
/// the original TensorIterator.
|
| 995 |
+
struct TORCH_API SplitUntil32Bit {
|
| 996 |
+
struct TORCH_API iterator {
|
| 997 |
+
iterator() = default;
|
| 998 |
+
iterator(const TensorIteratorBase& iter);
|
| 999 |
+
iterator(iterator&&) = default;
|
| 1000 |
+
|
| 1001 |
+
// Guaranteed to be a TensorIterator proper!
|
| 1002 |
+
TensorIterator& operator*() const;
|
| 1003 |
+
iterator& operator++();
|
| 1004 |
+
bool operator==(const iterator& other) const {
|
| 1005 |
+
// two iterators are equal if they are the same object or they're both
|
| 1006 |
+
// empty
|
| 1007 |
+
return this == &other || (vec.empty() && other.vec.empty());
|
| 1008 |
+
}
|
| 1009 |
+
// needed for C++11 range-based for loop
|
| 1010 |
+
bool operator!=(const iterator& other) const {
|
| 1011 |
+
return !(*this == other);
|
| 1012 |
+
}
|
| 1013 |
+
|
| 1014 |
+
/// stack of TensorIterators to be split
|
| 1015 |
+
std::vector<std::unique_ptr<TensorIterator>> vec;
|
| 1016 |
+
};
|
| 1017 |
+
|
| 1018 |
+
SplitUntil32Bit(const TensorIteratorBase& iter) : iter(iter) {}
|
| 1019 |
+
|
| 1020 |
+
iterator begin() const;
|
| 1021 |
+
iterator end() const;
|
| 1022 |
+
|
| 1023 |
+
private:
|
| 1024 |
+
// NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members)
|
| 1025 |
+
const TensorIteratorBase& iter;
|
| 1026 |
+
};
|
| 1027 |
+
|
| 1028 |
+
} // namespace at
|
mplug_owl2/lib/python3.10/site-packages/torch/include/ATen/TensorMeta.h
ADDED
|
@@ -0,0 +1,137 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <ATen/DimVector.h>
|
| 4 |
+
#include <ATen/core/Dimname.h>
|
| 5 |
+
#include <c10/core/TensorOptions.h>
|
| 6 |
+
#include <c10/util/strides.h>
|
| 7 |
+
|
| 8 |
+
namespace at {
|
| 9 |
+
|
| 10 |
+
class Tensor;
|
| 11 |
+
|
| 12 |
+
namespace impl {
|
| 13 |
+
|
| 14 |
+
// Use this to define the prototype for a meta function. There are two
|
| 15 |
+
// versions; one that takes one argument (just the operator name), or FUNC2
|
| 16 |
+
// variant that takes two arguments (operator name and overload name).
|
| 17 |
+
//
|
| 18 |
+
// Example usage:
|
| 19 |
+
//
|
| 20 |
+
// TORCH_META_FUNC2(add, Tensor) (
|
| 21 |
+
// const Tensor& self, const Tensor& other
|
| 22 |
+
// ) {
|
| 23 |
+
// ... compute sizes and options ...
|
| 24 |
+
// set_output(sizes, options);
|
| 25 |
+
// }
|
| 26 |
+
//
|
| 27 |
+
#define TORCH_META_FUNC(name) void structured_##name::meta
|
| 28 |
+
#define TORCH_META_FUNC2(name, overload) \
|
| 29 |
+
void structured_##name##_##overload::meta
|
| 30 |
+
|
| 31 |
+
// These are versions of TORCH_META_FUNC(2) that include a precompute_out struct
|
| 32 |
+
// as a return value. They should be used when the kernel in question has
|
| 33 |
+
// precomputed values declared in native_functions.yaml and the corresponding
|
| 34 |
+
// implementation should return an instance of the aforementioned struct.
|
| 35 |
+
#define TORCH_PRECOMPUTE_META_FUNC(name) \
|
| 36 |
+
structured_##name::meta_return_ty structured_##name::meta
|
| 37 |
+
#define TORCH_PRECOMPUTE_META_FUNC2(name, overload) \
|
| 38 |
+
structured_##name##_##overload::meta_return_ty \
|
| 39 |
+
structured_##name##_##overload::meta
|
| 40 |
+
|
| 41 |
+
// Use this to create a precompute struct in a meta function.
|
| 42 |
+
#define TORCH_PRECOMPUTE_STRUCT(name) structured_##name::precompute_out<>
|
| 43 |
+
#define TORCH_PRECOMPUTE_STRUCT2(name, overload) \
|
| 44 |
+
structured_##name##_##overload::precompute_out<>
|
| 45 |
+
|
| 46 |
+
// Use this to define the prototype for an implementation. This takes only
|
| 47 |
+
// one argument, which is the name of the dispatch key entry you're
|
| 48 |
+
// implementing.
|
| 49 |
+
//
|
| 50 |
+
// Example usage:
|
| 51 |
+
//
|
| 52 |
+
// TORCH_IMPL_FUNC(add_cpu) (
|
| 53 |
+
// Tensor& result, const Tensor& self, const Tensor& other
|
| 54 |
+
// ) {
|
| 55 |
+
// ... do the actual implementation ...
|
| 56 |
+
// }
|
| 57 |
+
//
|
| 58 |
+
#define TORCH_IMPL_FUNC(name) void structured_##name::impl
|
| 59 |
+
|
| 60 |
+
// Base class for all structured kernel classes. The set_output virtual
|
| 61 |
+
// method is varied depending whether or not the operator is
|
| 62 |
+
// functional/out/inplace, and could also be specialized for CPU/CUDA/etc
|
| 63 |
+
// (although presently it isn't).
|
| 64 |
+
//
|
| 65 |
+
// A notable subclass of this interface is TensorIteratorBase.
|
| 66 |
+
struct TORCH_API MetaBase {
|
| 67 |
+
MetaBase() = default;
|
| 68 |
+
MetaBase(const MetaBase&) = default;
|
| 69 |
+
MetaBase& operator=(const MetaBase&) = default;
|
| 70 |
+
MetaBase(MetaBase&&) noexcept = default;
|
| 71 |
+
MetaBase& operator=(MetaBase&&) noexcept = default;
|
| 72 |
+
virtual const Tensor& maybe_get_output(int64_t output_idx) = 0;
|
| 73 |
+
|
| 74 |
+
// Note: [set_output_*]
|
| 75 |
+
// See: https://github.com/pytorch/pytorch/issues/69813
|
| 76 |
+
// Whenever defining the output properties in the META function of a
|
| 77 |
+
// structured kernel (what was usually done with `set_output`), use one of
|
| 78 |
+
// these 3 variants, instead. In order to decide which variant to use, check
|
| 79 |
+
// the following decision tree:
|
| 80 |
+
//
|
| 81 |
+
// - Can the kernel you are going to implement support output tensors
|
| 82 |
+
// with arbitrary strides?
|
| 83 |
+
// |
|
| 84 |
+
// -- YES: `set_output_raw_strided`
|
| 85 |
+
// |
|
| 86 |
+
// -- NO: Should the output tensor strides be contiguous?
|
| 87 |
+
// |
|
| 88 |
+
// -- YES: `set_output_contiguous`
|
| 89 |
+
// |
|
| 90 |
+
// -- NO: `set_output_strided`
|
| 91 |
+
//
|
| 92 |
+
// Use this function whenever the kernel requires specific strides for the
|
| 93 |
+
// output. If `strides` does not match the given output strides, proxy outputs
|
| 94 |
+
// will be created and passed to the IMPL function.
|
| 95 |
+
virtual void set_output_strided(
|
| 96 |
+
int64_t output_idx [[maybe_unused]],
|
| 97 |
+
IntArrayRef sizes [[maybe_unused]],
|
| 98 |
+
IntArrayRef strides [[maybe_unused]],
|
| 99 |
+
TensorOptions options [[maybe_unused]],
|
| 100 |
+
DimnameList names [[maybe_unused]] = {}) {
|
| 101 |
+
TORCH_INTERNAL_ASSERT(false, "set_output_strided not implemented.");
|
| 102 |
+
}
|
| 103 |
+
|
| 104 |
+
// Use this function whenever the kernel knows how to handle arbitrary strided
|
| 105 |
+
// outputs. This function has the same behavior as the old `set_output`: it
|
| 106 |
+
// will only re-stride if the given output was resized.
|
| 107 |
+
virtual void set_output_raw_strided(
|
| 108 |
+
int64_t output_idx [[maybe_unused]],
|
| 109 |
+
IntArrayRef sizes [[maybe_unused]],
|
| 110 |
+
IntArrayRef strides_hint [[maybe_unused]],
|
| 111 |
+
TensorOptions options [[maybe_unused]],
|
| 112 |
+
DimnameList names [[maybe_unused]] = {}) {
|
| 113 |
+
TORCH_INTERNAL_ASSERT(false, "set_output_strided not implemented.");
|
| 114 |
+
}
|
| 115 |
+
|
| 116 |
+
// Use this function if the kernel requires contiguous strides.
|
| 117 |
+
// Alias for `set_output_strided`, but with contiguous strides.
|
| 118 |
+
void set_output_contiguous(
|
| 119 |
+
int64_t output_idx,
|
| 120 |
+
IntArrayRef sizes,
|
| 121 |
+
TensorOptions options,
|
| 122 |
+
DimnameList names = {}) {
|
| 123 |
+
auto strides = c10::contiguous_strides(sizes);
|
| 124 |
+
set_output_strided(output_idx, sizes, strides, options, names);
|
| 125 |
+
}
|
| 126 |
+
|
| 127 |
+
// Returns a reference to an undefined tensor if there is no presupplied
|
| 128 |
+
// output
|
| 129 |
+
const Tensor& maybe_get_output() {
|
| 130 |
+
return maybe_get_output(0);
|
| 131 |
+
}
|
| 132 |
+
virtual ~MetaBase() = default;
|
| 133 |
+
};
|
| 134 |
+
|
| 135 |
+
} // namespace impl
|
| 136 |
+
|
| 137 |
+
} // namespace at
|
mplug_owl2/lib/python3.10/site-packages/torch/include/ATen/TensorNames.h
ADDED
|
@@ -0,0 +1,75 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <ATen/WrapDimUtils.h>
|
| 4 |
+
|
| 5 |
+
namespace at::namedinference {
|
| 6 |
+
|
| 7 |
+
// TensorName and TensorNames are wrappers around Dimname and DimnameList
|
| 8 |
+
// that contain helper functions to make writing name inference rules easier.
|
| 9 |
+
//
|
| 10 |
+
// A TensorName represents a Dimname associated with some DimnameList (from a
|
| 11 |
+
// Tensor). This encapsulates all the information that is needed to check if
|
| 12 |
+
// names *match* and to *unify* names.
|
| 13 |
+
//
|
| 14 |
+
// Definition: Two names in two tensors *match* if they are equal, or if at
|
| 15 |
+
// least one of them is a wildcard that can be *refined* to the other name.
|
| 16 |
+
//
|
| 17 |
+
// Definition: unify(name, other) fails if the names do not match. Otherwise,
|
| 18 |
+
// it returns the most refined of name and other.
|
| 19 |
+
//
|
| 20 |
+
// Here is an example of checking if two names match.
|
| 21 |
+
// tensor: Tensor[A, None]
|
| 22 |
+
// other: Tensor[A]
|
| 23 |
+
//
|
| 24 |
+
// Let's say we wish to check if tensor.names[-1] matches other.names[-1].
|
| 25 |
+
// None (in tensor) cannot match A (in other) because if the None were refined
|
| 26 |
+
// to A, `tensor` would have duplicate names [A, A]. Therefore we need to check
|
| 27 |
+
// tensor.names [A, None] for the existence of A.
|
| 28 |
+
struct TORCH_API TensorName {
|
| 29 |
+
explicit TensorName(ArrayRef<Dimname> origin, int origin_idx)
|
| 30 |
+
: origin_(origin),
|
| 31 |
+
name_(origin[maybe_wrap_dim(
|
| 32 |
+
origin_idx,
|
| 33 |
+
static_cast<int64_t>(origin.size()))]),
|
| 34 |
+
origin_idx_(origin_idx) {}
|
| 35 |
+
|
| 36 |
+
// op_name is only used for error reporting.
|
| 37 |
+
const TensorName& unify(const TensorName& other, const char* op_name) const;
|
| 38 |
+
Dimname toDimname() const;
|
| 39 |
+
|
| 40 |
+
private:
|
| 41 |
+
ArrayRef<Dimname> origin_;
|
| 42 |
+
Dimname name_;
|
| 43 |
+
int origin_idx_; // A named tensor can have at most 64 dims.
|
| 44 |
+
|
| 45 |
+
TORCH_API friend std::ostream& operator<<(
|
| 46 |
+
std::ostream& out,
|
| 47 |
+
const TensorName& tensorname);
|
| 48 |
+
};
|
| 49 |
+
|
| 50 |
+
using TensorNameVec = SmallVector<TensorName, 10>;
|
| 51 |
+
|
| 52 |
+
struct TORCH_API TensorNames {
|
| 53 |
+
explicit TensorNames(ArrayRef<Dimname> names);
|
| 54 |
+
|
| 55 |
+
// Create TensorNames from names[start:end]. Each individual TensorName stores
|
| 56 |
+
// `names`, NOT names[start:end], because the original tensor's names are
|
| 57 |
+
// `names`.
|
| 58 |
+
explicit TensorNames(ArrayRef<Dimname> names, int64_t start, int64_t end);
|
| 59 |
+
|
| 60 |
+
// op_name is only used for error reporting.
|
| 61 |
+
TensorNames& unifyFromRightInplace(
|
| 62 |
+
const TensorNames& other,
|
| 63 |
+
const char* op_name = "unify");
|
| 64 |
+
void checkUnique(const char* op_name) const;
|
| 65 |
+
|
| 66 |
+
void append(TensorName name);
|
| 67 |
+
std::vector<Dimname> toDimnameVec() const;
|
| 68 |
+
|
| 69 |
+
private:
|
| 70 |
+
explicit TensorNames(TensorNameVec&& names) : names_(std::move(names)){};
|
| 71 |
+
|
| 72 |
+
TensorNameVec names_;
|
| 73 |
+
};
|
| 74 |
+
|
| 75 |
+
} // namespace at::namedinference
|
mplug_owl2/lib/python3.10/site-packages/torch/include/ATen/TensorOptions.h
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
#include <c10/core/TensorOptions.h>
|
mplug_owl2/lib/python3.10/site-packages/torch/include/ATen/ThreadLocalPythonObjects.h
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <c10/core/SafePyObject.h>
|
| 4 |
+
#include <c10/macros/Macros.h>
|
| 5 |
+
#include <unordered_map>
|
| 6 |
+
|
| 7 |
+
namespace at::impl {
|
| 8 |
+
|
| 9 |
+
struct TORCH_API ThreadLocalPythonObjects {
|
| 10 |
+
static void set(const std::string& key, std::shared_ptr<SafePyObject> value);
|
| 11 |
+
static const std::shared_ptr<SafePyObject>& get(const std::string& key);
|
| 12 |
+
static bool contains(const std::string& key);
|
| 13 |
+
|
| 14 |
+
static const ThreadLocalPythonObjects& get_state();
|
| 15 |
+
static void set_state(ThreadLocalPythonObjects state);
|
| 16 |
+
|
| 17 |
+
private:
|
| 18 |
+
std::unordered_map<std::string, std::shared_ptr<c10::SafePyObject>> obj_dict_;
|
| 19 |
+
};
|
| 20 |
+
|
| 21 |
+
} // namespace at::impl
|
mplug_owl2/lib/python3.10/site-packages/torch/include/ATen/TypeDefault.h
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <ATen/Dimname.h>
|
| 4 |
+
#include <c10/core/MemoryFormat.h>
|
| 5 |
+
#include <c10/core/QScheme.h>
|
| 6 |
+
#include <c10/core/Scalar.h>
|
| 7 |
+
#include <c10/core/TensorOptions.h>
|
| 8 |
+
#include <c10/macros/Export.h>
|
| 9 |
+
#include <c10/util/ArrayRef.h>
|
| 10 |
+
#include <c10/util/intrusive_ptr.h>
|
| 11 |
+
|
| 12 |
+
namespace c10 {
|
| 13 |
+
struct Storage;
|
| 14 |
+
}
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
class Tensor;
|
| 19 |
+
using TensorList = ArrayRef<Tensor>;
|
| 20 |
+
|
| 21 |
+
class Context;
|
| 22 |
+
struct Generator;
|
| 23 |
+
|
| 24 |
+
struct Quantizer;
|
| 25 |
+
// This is temporary typedef to enable Quantizer in aten native function API
|
| 26 |
+
// we'll remove them when we are actually exposing Quantizer class
|
| 27 |
+
// to frontend
|
| 28 |
+
using ConstQuantizerPtr = const c10::intrusive_ptr<Quantizer>&;
|
| 29 |
+
|
| 30 |
+
} // namespace at
|