Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +1 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/CPUGeneratorImpl.h +49 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/CollapseDims.h +94 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/CompositeExplicitAutogradFunctions_inl.h +533 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/CompositeExplicitAutogradNonFunctionalFunctions.h +29 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/DeviceGuard.h +41 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/Dispatch.h +808 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/LegacyBatchedFallback.h +25 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/PadNd.h +28 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/Parallel-inl.h +83 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/RegistrationDeclarations.h +0 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/TensorGeometry.h +144 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/TensorIndexing.h +731 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/TensorIterator.h +987 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/TensorOptions.h +2 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ThreadLocalPythonObjects.h +21 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/record_function.h +741 -0
- videollama2/lib/python3.10/site-packages/torch/include/c10/cuda/CUDAAlgorithm.h +33 -0
- videollama2/lib/python3.10/site-packages/torch/include/c10/cuda/CUDAAllocatorConfig.h +116 -0
- videollama2/lib/python3.10/site-packages/torch/include/c10/cuda/CUDACachingAllocator.h +450 -0
- videollama2/lib/python3.10/site-packages/torch/include/c10/cuda/CUDADeviceAssertion.h +98 -0
- videollama2/lib/python3.10/site-packages/torch/include/c10/cuda/CUDADeviceAssertionHost.h +158 -0
- videollama2/lib/python3.10/site-packages/torch/include/c10/cuda/CUDAException.h +102 -0
- videollama2/lib/python3.10/site-packages/torch/include/c10/cuda/CUDAFunctions.h +118 -0
- videollama2/lib/python3.10/site-packages/torch/include/c10/cuda/CUDAGraphsC10Utils.h +92 -0
- videollama2/lib/python3.10/site-packages/torch/include/c10/cuda/CUDAGuard.h +305 -0
- videollama2/lib/python3.10/site-packages/torch/include/c10/cuda/CUDAMacros.h +44 -0
- videollama2/lib/python3.10/site-packages/torch/include/c10/cuda/CUDAMathCompat.h +156 -0
- videollama2/lib/python3.10/site-packages/torch/include/c10/cuda/CUDAMiscFunctions.h +14 -0
- videollama2/lib/python3.10/site-packages/torch/include/c10/cuda/CUDAStream.h +273 -0
- videollama2/lib/python3.10/site-packages/torch/include/c10/cuda/driver_api.h +49 -0
- videollama2/lib/python3.10/site-packages/torch/include/c10/cuda/impl/CUDAGuardImpl.h +211 -0
- videollama2/lib/python3.10/site-packages/torch/include/c10/cuda/impl/CUDATest.h +13 -0
- videollama2/lib/python3.10/site-packages/torch/include/c10/cuda/impl/cuda_cmake_macros.h +6 -0
- videollama2/lib/python3.10/site-packages/torch/include/c10/util/AlignOf.h +174 -0
- videollama2/lib/python3.10/site-packages/torch/include/c10/util/ArrayRef.h +371 -0
- videollama2/lib/python3.10/site-packages/torch/include/c10/util/BFloat16-inl.h +343 -0
- videollama2/lib/python3.10/site-packages/torch/include/c10/util/Backtrace.h +17 -0
- videollama2/lib/python3.10/site-packages/torch/include/c10/util/CallOnce.h +67 -0
- videollama2/lib/python3.10/site-packages/torch/include/c10/util/DimVector.h +16 -0
- videollama2/lib/python3.10/site-packages/torch/include/c10/util/FbcodeMaps.h +29 -0
- videollama2/lib/python3.10/site-packages/torch/include/c10/util/Flags.h +226 -0
- videollama2/lib/python3.10/site-packages/torch/include/c10/util/Float8_e4m3fn.h +247 -0
- videollama2/lib/python3.10/site-packages/torch/include/c10/util/Float8_e5m2-inl.h +283 -0
- videollama2/lib/python3.10/site-packages/torch/include/c10/util/Float8_e5m2fnuz-inl.h +87 -0
- videollama2/lib/python3.10/site-packages/torch/include/c10/util/Load.h +38 -0
- videollama2/lib/python3.10/site-packages/torch/include/c10/util/MaybeOwned.h +233 -0
- videollama2/lib/python3.10/site-packages/torch/include/c10/util/Metaprogramming.h +226 -0
- videollama2/lib/python3.10/site-packages/torch/include/c10/util/OptionalArrayRef.h +231 -0
- videollama2/lib/python3.10/site-packages/torch/include/c10/util/ThreadLocal.h +153 -0
.gitattributes
CHANGED
|
@@ -1414,3 +1414,4 @@ vllm/lib/python3.10/site-packages/pyarrow/_substrait.cpython-310-x86_64-linux-gn
|
|
| 1414 |
vllm/lib/python3.10/site-packages/pyarrow/_parquet.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
| 1415 |
parrot/lib/python3.10/site-packages/numpy/ma/__pycache__/core.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 1416 |
parrot/lib/python3.10/site-packages/numpy/linalg/_umath_linalg.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
| 1414 |
vllm/lib/python3.10/site-packages/pyarrow/_parquet.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
| 1415 |
parrot/lib/python3.10/site-packages/numpy/ma/__pycache__/core.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 1416 |
parrot/lib/python3.10/site-packages/numpy/linalg/_umath_linalg.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
| 1417 |
+
vllm/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_dataset.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/CPUGeneratorImpl.h
ADDED
|
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <ATen/core/Generator.h>
|
| 4 |
+
#include <ATen/core/MT19937RNGEngine.h>
|
| 5 |
+
#include <c10/core/GeneratorImpl.h>
|
| 6 |
+
#include <c10/util/Optional.h>
|
| 7 |
+
|
| 8 |
+
namespace at {
|
| 9 |
+
|
| 10 |
+
struct TORCH_API CPUGeneratorImpl : public c10::GeneratorImpl {
|
| 11 |
+
// Constructors
|
| 12 |
+
CPUGeneratorImpl(uint64_t seed_in = default_rng_seed_val);
|
| 13 |
+
~CPUGeneratorImpl() override = default;
|
| 14 |
+
|
| 15 |
+
// CPUGeneratorImpl methods
|
| 16 |
+
std::shared_ptr<CPUGeneratorImpl> clone() const;
|
| 17 |
+
void set_current_seed(uint64_t seed) override;
|
| 18 |
+
void set_offset(uint64_t offset) override;
|
| 19 |
+
uint64_t get_offset() const override;
|
| 20 |
+
uint64_t current_seed() const override;
|
| 21 |
+
uint64_t seed() override;
|
| 22 |
+
void set_state(const c10::TensorImpl& new_state) override;
|
| 23 |
+
c10::intrusive_ptr<c10::TensorImpl> get_state() const override;
|
| 24 |
+
static c10::DeviceType device_type();
|
| 25 |
+
uint32_t random();
|
| 26 |
+
uint64_t random64();
|
| 27 |
+
c10::optional<float> next_float_normal_sample();
|
| 28 |
+
c10::optional<double> next_double_normal_sample();
|
| 29 |
+
void set_next_float_normal_sample(c10::optional<float> randn);
|
| 30 |
+
void set_next_double_normal_sample(c10::optional<double> randn);
|
| 31 |
+
at::mt19937 engine();
|
| 32 |
+
void set_engine(at::mt19937 engine);
|
| 33 |
+
|
| 34 |
+
private:
|
| 35 |
+
CPUGeneratorImpl* clone_impl() const override;
|
| 36 |
+
at::mt19937 engine_;
|
| 37 |
+
c10::optional<float> next_float_normal_sample_;
|
| 38 |
+
c10::optional<double> next_double_normal_sample_;
|
| 39 |
+
};
|
| 40 |
+
|
| 41 |
+
namespace detail {
|
| 42 |
+
|
| 43 |
+
TORCH_API const Generator& getDefaultCPUGenerator();
|
| 44 |
+
TORCH_API Generator
|
| 45 |
+
createCPUGenerator(uint64_t seed_val = default_rng_seed_val);
|
| 46 |
+
|
| 47 |
+
} // namespace detail
|
| 48 |
+
|
| 49 |
+
} // namespace at
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/CollapseDims.h
ADDED
|
@@ -0,0 +1,94 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#include <c10/util/Exception.h>
|
| 2 |
+
#include <utility>
|
| 3 |
+
|
| 4 |
+
namespace at {
|
| 5 |
+
|
| 6 |
+
/*
|
| 7 |
+
[collapse dims] Updates sizes, and strides to reflect a "collapse" of
|
| 8 |
+
the info, possibly excluding the optional excludeDim. A "collapsed" version
|
| 9 |
+
of the info is the fewest dims that order the tensor's elements in the same
|
| 10 |
+
way as the original info. If excludeDim is specified, the collapse is the
|
| 11 |
+
fewest dims that order the tensor's elements as the original and preserve the
|
| 12 |
+
excluded dimension, unless the tensor collapses to a point.
|
| 13 |
+
|
| 14 |
+
This function returns a pair of values.
|
| 15 |
+
|
| 16 |
+
1) The (new) index of the preserved dimension if excludeDim is
|
| 17 |
+
specified. 0 if the tensor is collapsed to a point. -1
|
| 18 |
+
otherwise.
|
| 19 |
+
|
| 20 |
+
2) The new number of dimensions.
|
| 21 |
+
*/
|
| 22 |
+
template <typename T>
|
| 23 |
+
inline std::pair<int64_t, int64_t> collapse_dims(
|
| 24 |
+
T* sizes,
|
| 25 |
+
T* strides,
|
| 26 |
+
int64_t dims,
|
| 27 |
+
const int excludeDim = -1) {
|
| 28 |
+
TORCH_CHECK(
|
| 29 |
+
excludeDim >= -1 && excludeDim < dims,
|
| 30 |
+
"expected excluded dim between -1 and dims - 1");
|
| 31 |
+
|
| 32 |
+
int64_t stopDim = (excludeDim == -1) ? dims : excludeDim;
|
| 33 |
+
int64_t newIndex = -1;
|
| 34 |
+
int64_t oldIndex = 0;
|
| 35 |
+
int64_t remappedExcludedDim = -1;
|
| 36 |
+
|
| 37 |
+
while (oldIndex < dims) {
|
| 38 |
+
// Finds a dimension to collapse into
|
| 39 |
+
for (; oldIndex < stopDim; ++oldIndex) {
|
| 40 |
+
if (sizes[oldIndex] == 1) {
|
| 41 |
+
continue;
|
| 42 |
+
}
|
| 43 |
+
|
| 44 |
+
++newIndex;
|
| 45 |
+
sizes[newIndex] = sizes[oldIndex];
|
| 46 |
+
strides[newIndex] = strides[oldIndex];
|
| 47 |
+
++oldIndex;
|
| 48 |
+
break;
|
| 49 |
+
}
|
| 50 |
+
|
| 51 |
+
// Collapses dims
|
| 52 |
+
for (; oldIndex < stopDim; ++oldIndex) {
|
| 53 |
+
if (sizes[oldIndex] == 1) {
|
| 54 |
+
continue;
|
| 55 |
+
}
|
| 56 |
+
|
| 57 |
+
if (strides[newIndex] == sizes[oldIndex] * strides[oldIndex]) {
|
| 58 |
+
sizes[newIndex] *= sizes[oldIndex];
|
| 59 |
+
strides[newIndex] = strides[oldIndex];
|
| 60 |
+
} else {
|
| 61 |
+
++newIndex;
|
| 62 |
+
sizes[newIndex] = sizes[oldIndex];
|
| 63 |
+
strides[newIndex] = strides[oldIndex];
|
| 64 |
+
}
|
| 65 |
+
}
|
| 66 |
+
|
| 67 |
+
// Handles excludeDim being set (oldIndex == excludeDim)
|
| 68 |
+
if (oldIndex != dims) {
|
| 69 |
+
// Preserves excluded dimension
|
| 70 |
+
++newIndex;
|
| 71 |
+
sizes[newIndex] = sizes[oldIndex];
|
| 72 |
+
strides[newIndex] = strides[oldIndex];
|
| 73 |
+
remappedExcludedDim = newIndex;
|
| 74 |
+
|
| 75 |
+
// Restarts iteration after excludeDim
|
| 76 |
+
++oldIndex;
|
| 77 |
+
stopDim = dims;
|
| 78 |
+
}
|
| 79 |
+
}
|
| 80 |
+
|
| 81 |
+
// Handles special case of all dims size 1
|
| 82 |
+
if (newIndex == -1 || (newIndex == 0 && sizes[0] == 1)) {
|
| 83 |
+
dims = 1;
|
| 84 |
+
sizes[0] = 1;
|
| 85 |
+
strides[0] = 1;
|
| 86 |
+
|
| 87 |
+
return std::pair<int64_t, int64_t>(0, 1);
|
| 88 |
+
}
|
| 89 |
+
|
| 90 |
+
dims = newIndex + 1;
|
| 91 |
+
return std::pair<int64_t, int64_t>(remappedExcludedDim, dims);
|
| 92 |
+
}
|
| 93 |
+
|
| 94 |
+
} // namespace at
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/CompositeExplicitAutogradFunctions_inl.h
ADDED
|
@@ -0,0 +1,533 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunctions_inl.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
#if defined(AT_PER_OPERATOR_HEADERS) && defined(TORCH_ASSERT_ONLY_METHOD_OPERATORS)
|
| 12 |
+
#error This change adds a dependency on all pytorch operators, meaning the \
|
| 13 |
+
file will need to be re-compiled every time an operator is changed or added. \
|
| 14 |
+
Consider including a specific operator from \
|
| 15 |
+
<ATen/ops/{my_operator}_compositeexplicitautograd_dispatch.h>. \
|
| 16 |
+
See NOTE [TORCH_ASSERT_ONLY_METHOD_OPERATORS].
|
| 17 |
+
#endif
|
| 18 |
+
|
| 19 |
+
#include <ATen/ops/_adaptive_avg_pool2d_compositeexplicitautograd_dispatch.h>
|
| 20 |
+
#include <ATen/ops/_adaptive_avg_pool2d_backward_compositeexplicitautograd_dispatch.h>
|
| 21 |
+
#include <ATen/ops/_adaptive_avg_pool3d_compositeexplicitautograd_dispatch.h>
|
| 22 |
+
#include <ATen/ops/_adaptive_avg_pool3d_backward_compositeexplicitautograd_dispatch.h>
|
| 23 |
+
#include <ATen/ops/_add_relu_compositeexplicitautograd_dispatch.h>
|
| 24 |
+
#include <ATen/ops/_aminmax_compositeexplicitautograd_dispatch.h>
|
| 25 |
+
#include <ATen/ops/_amp_foreach_non_finite_check_and_unscale_compositeexplicitautograd_dispatch.h>
|
| 26 |
+
#include <ATen/ops/_amp_update_scale_compositeexplicitautograd_dispatch.h>
|
| 27 |
+
#include <ATen/ops/_cdist_backward_compositeexplicitautograd_dispatch.h>
|
| 28 |
+
#include <ATen/ops/_cdist_forward_compositeexplicitautograd_dispatch.h>
|
| 29 |
+
#include <ATen/ops/_cholesky_solve_helper_compositeexplicitautograd_dispatch.h>
|
| 30 |
+
#include <ATen/ops/_coalesce_compositeexplicitautograd_dispatch.h>
|
| 31 |
+
#include <ATen/ops/_coalesced_compositeexplicitautograd_dispatch.h>
|
| 32 |
+
#include <ATen/ops/_conj_compositeexplicitautograd_dispatch.h>
|
| 33 |
+
#include <ATen/ops/_conj_copy_compositeexplicitautograd_dispatch.h>
|
| 34 |
+
#include <ATen/ops/_conj_physical_compositeexplicitautograd_dispatch.h>
|
| 35 |
+
#include <ATen/ops/_convolution_compositeexplicitautograd_dispatch.h>
|
| 36 |
+
#include <ATen/ops/_copy_from_compositeexplicitautograd_dispatch.h>
|
| 37 |
+
#include <ATen/ops/_copy_from_and_resize_compositeexplicitautograd_dispatch.h>
|
| 38 |
+
#include <ATen/ops/_ctc_loss_compositeexplicitautograd_dispatch.h>
|
| 39 |
+
#include <ATen/ops/_ctc_loss_backward_compositeexplicitautograd_dispatch.h>
|
| 40 |
+
#include <ATen/ops/_cudnn_ctc_loss_compositeexplicitautograd_dispatch.h>
|
| 41 |
+
#include <ATen/ops/_cudnn_init_dropout_state_compositeexplicitautograd_dispatch.h>
|
| 42 |
+
#include <ATen/ops/_cudnn_rnn_compositeexplicitautograd_dispatch.h>
|
| 43 |
+
#include <ATen/ops/_cudnn_rnn_backward_compositeexplicitautograd_dispatch.h>
|
| 44 |
+
#include <ATen/ops/_cudnn_rnn_flatten_weight_compositeexplicitautograd_dispatch.h>
|
| 45 |
+
#include <ATen/ops/_dirichlet_grad_compositeexplicitautograd_dispatch.h>
|
| 46 |
+
#include <ATen/ops/_efficientzerotensor_compositeexplicitautograd_dispatch.h>
|
| 47 |
+
#include <ATen/ops/_embedding_bag_compositeexplicitautograd_dispatch.h>
|
| 48 |
+
#include <ATen/ops/_embedding_bag_dense_backward_compositeexplicitautograd_dispatch.h>
|
| 49 |
+
#include <ATen/ops/_embedding_bag_forward_only_compositeexplicitautograd_dispatch.h>
|
| 50 |
+
#include <ATen/ops/_embedding_bag_per_sample_weights_backward_compositeexplicitautograd_dispatch.h>
|
| 51 |
+
#include <ATen/ops/_empty_affine_quantized_compositeexplicitautograd_dispatch.h>
|
| 52 |
+
#include <ATen/ops/_empty_per_channel_affine_quantized_compositeexplicitautograd_dispatch.h>
|
| 53 |
+
#include <ATen/ops/_euclidean_dist_compositeexplicitautograd_dispatch.h>
|
| 54 |
+
#include <ATen/ops/_fake_quantize_learnable_per_channel_affine_compositeexplicitautograd_dispatch.h>
|
| 55 |
+
#include <ATen/ops/_fake_quantize_learnable_per_tensor_affine_compositeexplicitautograd_dispatch.h>
|
| 56 |
+
#include <ATen/ops/_fake_quantize_per_tensor_affine_cachemask_tensor_qparams_compositeexplicitautograd_dispatch.h>
|
| 57 |
+
#include <ATen/ops/_foobar_compositeexplicitautograd_dispatch.h>
|
| 58 |
+
#include <ATen/ops/_foreach_abs_compositeexplicitautograd_dispatch.h>
|
| 59 |
+
#include <ATen/ops/_foreach_acos_compositeexplicitautograd_dispatch.h>
|
| 60 |
+
#include <ATen/ops/_foreach_add_compositeexplicitautograd_dispatch.h>
|
| 61 |
+
#include <ATen/ops/_foreach_addcdiv_compositeexplicitautograd_dispatch.h>
|
| 62 |
+
#include <ATen/ops/_foreach_addcmul_compositeexplicitautograd_dispatch.h>
|
| 63 |
+
#include <ATen/ops/_foreach_asin_compositeexplicitautograd_dispatch.h>
|
| 64 |
+
#include <ATen/ops/_foreach_atan_compositeexplicitautograd_dispatch.h>
|
| 65 |
+
#include <ATen/ops/_foreach_ceil_compositeexplicitautograd_dispatch.h>
|
| 66 |
+
#include <ATen/ops/_foreach_clamp_max_compositeexplicitautograd_dispatch.h>
|
| 67 |
+
#include <ATen/ops/_foreach_clamp_min_compositeexplicitautograd_dispatch.h>
|
| 68 |
+
#include <ATen/ops/_foreach_copy_compositeexplicitautograd_dispatch.h>
|
| 69 |
+
#include <ATen/ops/_foreach_cos_compositeexplicitautograd_dispatch.h>
|
| 70 |
+
#include <ATen/ops/_foreach_cosh_compositeexplicitautograd_dispatch.h>
|
| 71 |
+
#include <ATen/ops/_foreach_div_compositeexplicitautograd_dispatch.h>
|
| 72 |
+
#include <ATen/ops/_foreach_erf_compositeexplicitautograd_dispatch.h>
|
| 73 |
+
#include <ATen/ops/_foreach_erfc_compositeexplicitautograd_dispatch.h>
|
| 74 |
+
#include <ATen/ops/_foreach_exp_compositeexplicitautograd_dispatch.h>
|
| 75 |
+
#include <ATen/ops/_foreach_expm1_compositeexplicitautograd_dispatch.h>
|
| 76 |
+
#include <ATen/ops/_foreach_floor_compositeexplicitautograd_dispatch.h>
|
| 77 |
+
#include <ATen/ops/_foreach_frac_compositeexplicitautograd_dispatch.h>
|
| 78 |
+
#include <ATen/ops/_foreach_lerp_compositeexplicitautograd_dispatch.h>
|
| 79 |
+
#include <ATen/ops/_foreach_lgamma_compositeexplicitautograd_dispatch.h>
|
| 80 |
+
#include <ATen/ops/_foreach_log_compositeexplicitautograd_dispatch.h>
|
| 81 |
+
#include <ATen/ops/_foreach_log10_compositeexplicitautograd_dispatch.h>
|
| 82 |
+
#include <ATen/ops/_foreach_log1p_compositeexplicitautograd_dispatch.h>
|
| 83 |
+
#include <ATen/ops/_foreach_log2_compositeexplicitautograd_dispatch.h>
|
| 84 |
+
#include <ATen/ops/_foreach_maximum_compositeexplicitautograd_dispatch.h>
|
| 85 |
+
#include <ATen/ops/_foreach_minimum_compositeexplicitautograd_dispatch.h>
|
| 86 |
+
#include <ATen/ops/_foreach_mul_compositeexplicitautograd_dispatch.h>
|
| 87 |
+
#include <ATen/ops/_foreach_neg_compositeexplicitautograd_dispatch.h>
|
| 88 |
+
#include <ATen/ops/_foreach_norm_compositeexplicitautograd_dispatch.h>
|
| 89 |
+
#include <ATen/ops/_foreach_pow_compositeexplicitautograd_dispatch.h>
|
| 90 |
+
#include <ATen/ops/_foreach_reciprocal_compositeexplicitautograd_dispatch.h>
|
| 91 |
+
#include <ATen/ops/_foreach_round_compositeexplicitautograd_dispatch.h>
|
| 92 |
+
#include <ATen/ops/_foreach_sigmoid_compositeexplicitautograd_dispatch.h>
|
| 93 |
+
#include <ATen/ops/_foreach_sign_compositeexplicitautograd_dispatch.h>
|
| 94 |
+
#include <ATen/ops/_foreach_sin_compositeexplicitautograd_dispatch.h>
|
| 95 |
+
#include <ATen/ops/_foreach_sinh_compositeexplicitautograd_dispatch.h>
|
| 96 |
+
#include <ATen/ops/_foreach_sqrt_compositeexplicitautograd_dispatch.h>
|
| 97 |
+
#include <ATen/ops/_foreach_sub_compositeexplicitautograd_dispatch.h>
|
| 98 |
+
#include <ATen/ops/_foreach_tan_compositeexplicitautograd_dispatch.h>
|
| 99 |
+
#include <ATen/ops/_foreach_tanh_compositeexplicitautograd_dispatch.h>
|
| 100 |
+
#include <ATen/ops/_foreach_trunc_compositeexplicitautograd_dispatch.h>
|
| 101 |
+
#include <ATen/ops/_foreach_zero_compositeexplicitautograd_dispatch.h>
|
| 102 |
+
#include <ATen/ops/_functional_sym_constrain_range_compositeexplicitautograd_dispatch.h>
|
| 103 |
+
#include <ATen/ops/_functional_sym_constrain_range_for_size_compositeexplicitautograd_dispatch.h>
|
| 104 |
+
#include <ATen/ops/_fused_adam_compositeexplicitautograd_dispatch.h>
|
| 105 |
+
#include <ATen/ops/_fused_adamw_compositeexplicitautograd_dispatch.h>
|
| 106 |
+
#include <ATen/ops/_fused_dropout_compositeexplicitautograd_dispatch.h>
|
| 107 |
+
#include <ATen/ops/_fused_moving_avg_obs_fq_helper_compositeexplicitautograd_dispatch.h>
|
| 108 |
+
#include <ATen/ops/_fw_primal_compositeexplicitautograd_dispatch.h>
|
| 109 |
+
#include <ATen/ops/_fw_primal_copy_compositeexplicitautograd_dispatch.h>
|
| 110 |
+
#include <ATen/ops/_grid_sampler_2d_cpu_fallback_compositeexplicitautograd_dispatch.h>
|
| 111 |
+
#include <ATen/ops/_has_same_storage_numel_compositeexplicitautograd_dispatch.h>
|
| 112 |
+
#include <ATen/ops/_histogramdd_bin_edges_compositeexplicitautograd_dispatch.h>
|
| 113 |
+
#include <ATen/ops/_histogramdd_from_bin_cts_compositeexplicitautograd_dispatch.h>
|
| 114 |
+
#include <ATen/ops/_histogramdd_from_bin_tensors_compositeexplicitautograd_dispatch.h>
|
| 115 |
+
#include <ATen/ops/_index_put_impl_compositeexplicitautograd_dispatch.h>
|
| 116 |
+
#include <ATen/ops/_indices_copy_compositeexplicitautograd_dispatch.h>
|
| 117 |
+
#include <ATen/ops/_is_all_true_compositeexplicitautograd_dispatch.h>
|
| 118 |
+
#include <ATen/ops/_is_any_true_compositeexplicitautograd_dispatch.h>
|
| 119 |
+
#include <ATen/ops/_linalg_check_errors_compositeexplicitautograd_dispatch.h>
|
| 120 |
+
#include <ATen/ops/_lstm_mps_compositeexplicitautograd_dispatch.h>
|
| 121 |
+
#include <ATen/ops/_make_dual_compositeexplicitautograd_dispatch.h>
|
| 122 |
+
#include <ATen/ops/_make_dual_copy_compositeexplicitautograd_dispatch.h>
|
| 123 |
+
#include <ATen/ops/_make_per_channel_quantized_tensor_compositeexplicitautograd_dispatch.h>
|
| 124 |
+
#include <ATen/ops/_make_per_tensor_quantized_tensor_compositeexplicitautograd_dispatch.h>
|
| 125 |
+
#include <ATen/ops/_masked_scale_compositeexplicitautograd_dispatch.h>
|
| 126 |
+
#include <ATen/ops/_masked_softmax_compositeexplicitautograd_dispatch.h>
|
| 127 |
+
#include <ATen/ops/_masked_softmax_backward_compositeexplicitautograd_dispatch.h>
|
| 128 |
+
#include <ATen/ops/_mkldnn_reshape_compositeexplicitautograd_dispatch.h>
|
| 129 |
+
#include <ATen/ops/_mkldnn_transpose_compositeexplicitautograd_dispatch.h>
|
| 130 |
+
#include <ATen/ops/_mps_convolution_compositeexplicitautograd_dispatch.h>
|
| 131 |
+
#include <ATen/ops/_mps_convolution_transpose_compositeexplicitautograd_dispatch.h>
|
| 132 |
+
#include <ATen/ops/_native_batch_norm_legit_compositeexplicitautograd_dispatch.h>
|
| 133 |
+
#include <ATen/ops/_native_batch_norm_legit_no_training_compositeexplicitautograd_dispatch.h>
|
| 134 |
+
#include <ATen/ops/_native_multi_head_attention_compositeexplicitautograd_dispatch.h>
|
| 135 |
+
#include <ATen/ops/_neg_view_compositeexplicitautograd_dispatch.h>
|
| 136 |
+
#include <ATen/ops/_neg_view_copy_compositeexplicitautograd_dispatch.h>
|
| 137 |
+
#include <ATen/ops/_nested_from_padded_compositeexplicitautograd_dispatch.h>
|
| 138 |
+
#include <ATen/ops/_nested_from_padded_and_nested_example_compositeexplicitautograd_dispatch.h>
|
| 139 |
+
#include <ATen/ops/_nested_tensor_from_mask_compositeexplicitautograd_dispatch.h>
|
| 140 |
+
#include <ATen/ops/_nested_tensor_from_tensor_list_compositeexplicitautograd_dispatch.h>
|
| 141 |
+
#include <ATen/ops/_nested_tensor_size_compositeexplicitautograd_dispatch.h>
|
| 142 |
+
#include <ATen/ops/_nested_tensor_storage_offsets_compositeexplicitautograd_dispatch.h>
|
| 143 |
+
#include <ATen/ops/_nested_tensor_strides_compositeexplicitautograd_dispatch.h>
|
| 144 |
+
#include <ATen/ops/_nested_view_from_buffer_copy_compositeexplicitautograd_dispatch.h>
|
| 145 |
+
#include <ATen/ops/_new_zeros_with_same_feature_meta_compositeexplicitautograd_dispatch.h>
|
| 146 |
+
#include <ATen/ops/_nnpack_spatial_convolution_compositeexplicitautograd_dispatch.h>
|
| 147 |
+
#include <ATen/ops/_pack_padded_sequence_compositeexplicitautograd_dispatch.h>
|
| 148 |
+
#include <ATen/ops/_pdist_backward_compositeexplicitautograd_dispatch.h>
|
| 149 |
+
#include <ATen/ops/_pdist_forward_compositeexplicitautograd_dispatch.h>
|
| 150 |
+
#include <ATen/ops/_pin_memory_compositeexplicitautograd_dispatch.h>
|
| 151 |
+
#include <ATen/ops/_reshape_alias_copy_compositeexplicitautograd_dispatch.h>
|
| 152 |
+
#include <ATen/ops/_reshape_copy_compositeexplicitautograd_dispatch.h>
|
| 153 |
+
#include <ATen/ops/_resize_output_compositeexplicitautograd_dispatch.h>
|
| 154 |
+
#include <ATen/ops/_sample_dirichlet_compositeexplicitautograd_dispatch.h>
|
| 155 |
+
#include <ATen/ops/_segment_reduce_backward_compositeexplicitautograd_dispatch.h>
|
| 156 |
+
#include <ATen/ops/_slow_conv2d_backward_compositeexplicitautograd_dispatch.h>
|
| 157 |
+
#include <ATen/ops/_sparse_addmm_compositeexplicitautograd_dispatch.h>
|
| 158 |
+
#include <ATen/ops/_sparse_broadcast_to_copy_compositeexplicitautograd_dispatch.h>
|
| 159 |
+
#include <ATen/ops/_sparse_coo_tensor_with_dims_compositeexplicitautograd_dispatch.h>
|
| 160 |
+
#include <ATen/ops/_sparse_coo_tensor_with_dims_and_tensors_compositeexplicitautograd_dispatch.h>
|
| 161 |
+
#include <ATen/ops/_sparse_csr_prod_compositeexplicitautograd_dispatch.h>
|
| 162 |
+
#include <ATen/ops/_sparse_csr_sum_compositeexplicitautograd_dispatch.h>
|
| 163 |
+
#include <ATen/ops/_sparse_log_softmax_compositeexplicitautograd_dispatch.h>
|
| 164 |
+
#include <ATen/ops/_sparse_log_softmax_backward_data_compositeexplicitautograd_dispatch.h>
|
| 165 |
+
#include <ATen/ops/_sparse_mask_projection_compositeexplicitautograd_dispatch.h>
|
| 166 |
+
#include <ATen/ops/_sparse_softmax_compositeexplicitautograd_dispatch.h>
|
| 167 |
+
#include <ATen/ops/_sparse_softmax_backward_data_compositeexplicitautograd_dispatch.h>
|
| 168 |
+
#include <ATen/ops/_sparse_sparse_matmul_compositeexplicitautograd_dispatch.h>
|
| 169 |
+
#include <ATen/ops/_sparse_sum_compositeexplicitautograd_dispatch.h>
|
| 170 |
+
#include <ATen/ops/_sparse_sum_backward_compositeexplicitautograd_dispatch.h>
|
| 171 |
+
#include <ATen/ops/_spdiags_compositeexplicitautograd_dispatch.h>
|
| 172 |
+
#include <ATen/ops/_stack_compositeexplicitautograd_dispatch.h>
|
| 173 |
+
#include <ATen/ops/_standard_gamma_compositeexplicitautograd_dispatch.h>
|
| 174 |
+
#include <ATen/ops/_standard_gamma_grad_compositeexplicitautograd_dispatch.h>
|
| 175 |
+
#include <ATen/ops/_test_autograd_multiple_dispatch_compositeexplicitautograd_dispatch.h>
|
| 176 |
+
#include <ATen/ops/_test_autograd_multiple_dispatch_view_compositeexplicitautograd_dispatch.h>
|
| 177 |
+
#include <ATen/ops/_test_autograd_multiple_dispatch_view_copy_compositeexplicitautograd_dispatch.h>
|
| 178 |
+
#include <ATen/ops/_test_functorch_fallback_compositeexplicitautograd_dispatch.h>
|
| 179 |
+
#include <ATen/ops/_test_optional_filled_intlist_compositeexplicitautograd_dispatch.h>
|
| 180 |
+
#include <ATen/ops/_test_optional_floatlist_compositeexplicitautograd_dispatch.h>
|
| 181 |
+
#include <ATen/ops/_test_optional_intlist_compositeexplicitautograd_dispatch.h>
|
| 182 |
+
#include <ATen/ops/_test_warn_in_autograd_compositeexplicitautograd_dispatch.h>
|
| 183 |
+
#include <ATen/ops/_thnn_fused_gru_cell_compositeexplicitautograd_dispatch.h>
|
| 184 |
+
#include <ATen/ops/_thnn_fused_gru_cell_backward_compositeexplicitautograd_dispatch.h>
|
| 185 |
+
#include <ATen/ops/_thnn_fused_lstm_cell_compositeexplicitautograd_dispatch.h>
|
| 186 |
+
#include <ATen/ops/_thnn_fused_lstm_cell_backward_impl_compositeexplicitautograd_dispatch.h>
|
| 187 |
+
#include <ATen/ops/_to_copy_compositeexplicitautograd_dispatch.h>
|
| 188 |
+
#include <ATen/ops/_to_dense_compositeexplicitautograd_dispatch.h>
|
| 189 |
+
#include <ATen/ops/_to_sparse_compositeexplicitautograd_dispatch.h>
|
| 190 |
+
#include <ATen/ops/_to_sparse_bsc_compositeexplicitautograd_dispatch.h>
|
| 191 |
+
#include <ATen/ops/_to_sparse_bsr_compositeexplicitautograd_dispatch.h>
|
| 192 |
+
#include <ATen/ops/_to_sparse_csc_compositeexplicitautograd_dispatch.h>
|
| 193 |
+
#include <ATen/ops/_to_sparse_csr_compositeexplicitautograd_dispatch.h>
|
| 194 |
+
#include <ATen/ops/_transform_bias_rescale_qkv_compositeexplicitautograd_dispatch.h>
|
| 195 |
+
#include <ATen/ops/_transformer_encoder_layer_fwd_compositeexplicitautograd_dispatch.h>
|
| 196 |
+
#include <ATen/ops/_trilinear_compositeexplicitautograd_dispatch.h>
|
| 197 |
+
#include <ATen/ops/_triton_multi_head_attention_compositeexplicitautograd_dispatch.h>
|
| 198 |
+
#include <ATen/ops/_triton_scaled_dot_attention_compositeexplicitautograd_dispatch.h>
|
| 199 |
+
#include <ATen/ops/_unique_compositeexplicitautograd_dispatch.h>
|
| 200 |
+
#include <ATen/ops/_unique2_compositeexplicitautograd_dispatch.h>
|
| 201 |
+
#include <ATen/ops/_unsafe_index_compositeexplicitautograd_dispatch.h>
|
| 202 |
+
#include <ATen/ops/_unsafe_index_put_compositeexplicitautograd_dispatch.h>
|
| 203 |
+
#include <ATen/ops/_unsafe_view_compositeexplicitautograd_dispatch.h>
|
| 204 |
+
#include <ATen/ops/_values_copy_compositeexplicitautograd_dispatch.h>
|
| 205 |
+
#include <ATen/ops/_weight_norm_interface_compositeexplicitautograd_dispatch.h>
|
| 206 |
+
#include <ATen/ops/_weight_norm_interface_backward_compositeexplicitautograd_dispatch.h>
|
| 207 |
+
#include <ATen/ops/abs_compositeexplicitautograd_dispatch.h>
|
| 208 |
+
#include <ATen/ops/add_compositeexplicitautograd_dispatch.h>
|
| 209 |
+
#include <ATen/ops/addr_compositeexplicitautograd_dispatch.h>
|
| 210 |
+
#include <ATen/ops/affine_grid_generator_compositeexplicitautograd_dispatch.h>
|
| 211 |
+
#include <ATen/ops/alias_compositeexplicitautograd_dispatch.h>
|
| 212 |
+
#include <ATen/ops/alias_copy_compositeexplicitautograd_dispatch.h>
|
| 213 |
+
#include <ATen/ops/all_compositeexplicitautograd_dispatch.h>
|
| 214 |
+
#include <ATen/ops/allclose_compositeexplicitautograd_dispatch.h>
|
| 215 |
+
#include <ATen/ops/any_compositeexplicitautograd_dispatch.h>
|
| 216 |
+
#include <ATen/ops/arange_compositeexplicitautograd_dispatch.h>
|
| 217 |
+
#include <ATen/ops/argsort_compositeexplicitautograd_dispatch.h>
|
| 218 |
+
#include <ATen/ops/as_strided_copy_compositeexplicitautograd_dispatch.h>
|
| 219 |
+
#include <ATen/ops/as_strided_scatter_compositeexplicitautograd_dispatch.h>
|
| 220 |
+
#include <ATen/ops/bartlett_window_compositeexplicitautograd_dispatch.h>
|
| 221 |
+
#include <ATen/ops/batch_norm_backward_elemt_compositeexplicitautograd_dispatch.h>
|
| 222 |
+
#include <ATen/ops/batch_norm_backward_reduce_compositeexplicitautograd_dispatch.h>
|
| 223 |
+
#include <ATen/ops/batch_norm_gather_stats_compositeexplicitautograd_dispatch.h>
|
| 224 |
+
#include <ATen/ops/batch_norm_gather_stats_with_counts_compositeexplicitautograd_dispatch.h>
|
| 225 |
+
#include <ATen/ops/batch_norm_stats_compositeexplicitautograd_dispatch.h>
|
| 226 |
+
#include <ATen/ops/batch_norm_update_stats_compositeexplicitautograd_dispatch.h>
|
| 227 |
+
#include <ATen/ops/bernoulli_compositeexplicitautograd_dispatch.h>
|
| 228 |
+
#include <ATen/ops/binary_cross_entropy_with_logits_compositeexplicitautograd_dispatch.h>
|
| 229 |
+
#include <ATen/ops/bincount_compositeexplicitautograd_dispatch.h>
|
| 230 |
+
#include <ATen/ops/binomial_compositeexplicitautograd_dispatch.h>
|
| 231 |
+
#include <ATen/ops/bitwise_and_compositeexplicitautograd_dispatch.h>
|
| 232 |
+
#include <ATen/ops/bitwise_left_shift_compositeexplicitautograd_dispatch.h>
|
| 233 |
+
#include <ATen/ops/bitwise_or_compositeexplicitautograd_dispatch.h>
|
| 234 |
+
#include <ATen/ops/bitwise_right_shift_compositeexplicitautograd_dispatch.h>
|
| 235 |
+
#include <ATen/ops/bitwise_xor_compositeexplicitautograd_dispatch.h>
|
| 236 |
+
#include <ATen/ops/blackman_window_compositeexplicitautograd_dispatch.h>
|
| 237 |
+
#include <ATen/ops/block_diag_compositeexplicitautograd_dispatch.h>
|
| 238 |
+
#include <ATen/ops/bucketize_compositeexplicitautograd_dispatch.h>
|
| 239 |
+
#include <ATen/ops/cauchy_compositeexplicitautograd_dispatch.h>
|
| 240 |
+
#include <ATen/ops/ccol_indices_compositeexplicitautograd_dispatch.h>
|
| 241 |
+
#include <ATen/ops/ccol_indices_copy_compositeexplicitautograd_dispatch.h>
|
| 242 |
+
#include <ATen/ops/celu_compositeexplicitautograd_dispatch.h>
|
| 243 |
+
#include <ATen/ops/channel_shuffle_compositeexplicitautograd_dispatch.h>
|
| 244 |
+
#include <ATen/ops/cholesky_solve_compositeexplicitautograd_dispatch.h>
|
| 245 |
+
#include <ATen/ops/clone_compositeexplicitautograd_dispatch.h>
|
| 246 |
+
#include <ATen/ops/col_indices_compositeexplicitautograd_dispatch.h>
|
| 247 |
+
#include <ATen/ops/col_indices_copy_compositeexplicitautograd_dispatch.h>
|
| 248 |
+
#include <ATen/ops/complex_compositeexplicitautograd_dispatch.h>
|
| 249 |
+
#include <ATen/ops/conj_physical_compositeexplicitautograd_dispatch.h>
|
| 250 |
+
#include <ATen/ops/constant_pad_nd_compositeexplicitautograd_dispatch.h>
|
| 251 |
+
#include <ATen/ops/conv_depthwise3d_compositeexplicitautograd_dispatch.h>
|
| 252 |
+
#include <ATen/ops/conv_tbc_compositeexplicitautograd_dispatch.h>
|
| 253 |
+
#include <ATen/ops/convolution_compositeexplicitautograd_dispatch.h>
|
| 254 |
+
#include <ATen/ops/convolution_backward_compositeexplicitautograd_dispatch.h>
|
| 255 |
+
#include <ATen/ops/convolution_backward_overrideable_compositeexplicitautograd_dispatch.h>
|
| 256 |
+
#include <ATen/ops/convolution_overrideable_compositeexplicitautograd_dispatch.h>
|
| 257 |
+
#include <ATen/ops/copy_compositeexplicitautograd_dispatch.h>
|
| 258 |
+
#include <ATen/ops/copy_sparse_to_sparse_compositeexplicitautograd_dispatch.h>
|
| 259 |
+
#include <ATen/ops/copysign_compositeexplicitautograd_dispatch.h>
|
| 260 |
+
#include <ATen/ops/count_nonzero_compositeexplicitautograd_dispatch.h>
|
| 261 |
+
#include <ATen/ops/crow_indices_compositeexplicitautograd_dispatch.h>
|
| 262 |
+
#include <ATen/ops/crow_indices_copy_compositeexplicitautograd_dispatch.h>
|
| 263 |
+
#include <ATen/ops/cudnn_affine_grid_generator_compositeexplicitautograd_dispatch.h>
|
| 264 |
+
#include <ATen/ops/cudnn_affine_grid_generator_backward_compositeexplicitautograd_dispatch.h>
|
| 265 |
+
#include <ATen/ops/cudnn_batch_norm_compositeexplicitautograd_dispatch.h>
|
| 266 |
+
#include <ATen/ops/cudnn_batch_norm_backward_compositeexplicitautograd_dispatch.h>
|
| 267 |
+
#include <ATen/ops/cudnn_convolution_compositeexplicitautograd_dispatch.h>
|
| 268 |
+
#include <ATen/ops/cudnn_convolution_add_relu_compositeexplicitautograd_dispatch.h>
|
| 269 |
+
#include <ATen/ops/cudnn_convolution_relu_compositeexplicitautograd_dispatch.h>
|
| 270 |
+
#include <ATen/ops/cudnn_convolution_transpose_compositeexplicitautograd_dispatch.h>
|
| 271 |
+
#include <ATen/ops/cudnn_grid_sampler_compositeexplicitautograd_dispatch.h>
|
| 272 |
+
#include <ATen/ops/cudnn_grid_sampler_backward_compositeexplicitautograd_dispatch.h>
|
| 273 |
+
#include <ATen/ops/cummax_compositeexplicitautograd_dispatch.h>
|
| 274 |
+
#include <ATen/ops/cummin_compositeexplicitautograd_dispatch.h>
|
| 275 |
+
#include <ATen/ops/deg2rad_compositeexplicitautograd_dispatch.h>
|
| 276 |
+
#include <ATen/ops/dequantize_compositeexplicitautograd_dispatch.h>
|
| 277 |
+
#include <ATen/ops/detach_compositeexplicitautograd_dispatch.h>
|
| 278 |
+
#include <ATen/ops/detach_copy_compositeexplicitautograd_dispatch.h>
|
| 279 |
+
#include <ATen/ops/diag_embed_compositeexplicitautograd_dispatch.h>
|
| 280 |
+
#include <ATen/ops/diagonal_compositeexplicitautograd_dispatch.h>
|
| 281 |
+
#include <ATen/ops/diagonal_backward_compositeexplicitautograd_dispatch.h>
|
| 282 |
+
#include <ATen/ops/diagonal_copy_compositeexplicitautograd_dispatch.h>
|
| 283 |
+
#include <ATen/ops/diagonal_scatter_compositeexplicitautograd_dispatch.h>
|
| 284 |
+
#include <ATen/ops/dist_compositeexplicitautograd_dispatch.h>
|
| 285 |
+
#include <ATen/ops/div_compositeexplicitautograd_dispatch.h>
|
| 286 |
+
#include <ATen/ops/dot_compositeexplicitautograd_dispatch.h>
|
| 287 |
+
#include <ATen/ops/embedding_compositeexplicitautograd_dispatch.h>
|
| 288 |
+
#include <ATen/ops/embedding_dense_backward_compositeexplicitautograd_dispatch.h>
|
| 289 |
+
#include <ATen/ops/embedding_renorm_compositeexplicitautograd_dispatch.h>
|
| 290 |
+
#include <ATen/ops/empty_compositeexplicitautograd_dispatch.h>
|
| 291 |
+
#include <ATen/ops/empty_like_compositeexplicitautograd_dispatch.h>
|
| 292 |
+
#include <ATen/ops/empty_permuted_compositeexplicitautograd_dispatch.h>
|
| 293 |
+
#include <ATen/ops/empty_quantized_compositeexplicitautograd_dispatch.h>
|
| 294 |
+
#include <ATen/ops/empty_strided_compositeexplicitautograd_dispatch.h>
|
| 295 |
+
#include <ATen/ops/expand_compositeexplicitautograd_dispatch.h>
|
| 296 |
+
#include <ATen/ops/expand_copy_compositeexplicitautograd_dispatch.h>
|
| 297 |
+
#include <ATen/ops/exponential_compositeexplicitautograd_dispatch.h>
|
| 298 |
+
#include <ATen/ops/eye_compositeexplicitautograd_dispatch.h>
|
| 299 |
+
#include <ATen/ops/fake_quantize_per_channel_affine_cachemask_compositeexplicitautograd_dispatch.h>
|
| 300 |
+
#include <ATen/ops/fake_quantize_per_tensor_affine_cachemask_compositeexplicitautograd_dispatch.h>
|
| 301 |
+
#include <ATen/ops/fft_fftfreq_compositeexplicitautograd_dispatch.h>
|
| 302 |
+
#include <ATen/ops/fft_rfftfreq_compositeexplicitautograd_dispatch.h>
|
| 303 |
+
#include <ATen/ops/fill_compositeexplicitautograd_dispatch.h>
|
| 304 |
+
#include <ATen/ops/flip_compositeexplicitautograd_dispatch.h>
|
| 305 |
+
#include <ATen/ops/floor_divide_compositeexplicitautograd_dispatch.h>
|
| 306 |
+
#include <ATen/ops/fmod_compositeexplicitautograd_dispatch.h>
|
| 307 |
+
#include <ATen/ops/frexp_compositeexplicitautograd_dispatch.h>
|
| 308 |
+
#include <ATen/ops/from_file_compositeexplicitautograd_dispatch.h>
|
| 309 |
+
#include <ATen/ops/full_compositeexplicitautograd_dispatch.h>
|
| 310 |
+
#include <ATen/ops/full_like_compositeexplicitautograd_dispatch.h>
|
| 311 |
+
#include <ATen/ops/geometric_compositeexplicitautograd_dispatch.h>
|
| 312 |
+
#include <ATen/ops/glu_backward_jvp_compositeexplicitautograd_dispatch.h>
|
| 313 |
+
#include <ATen/ops/glu_jvp_compositeexplicitautograd_dispatch.h>
|
| 314 |
+
#include <ATen/ops/grid_sampler_2d_compositeexplicitautograd_dispatch.h>
|
| 315 |
+
#include <ATen/ops/grid_sampler_2d_backward_compositeexplicitautograd_dispatch.h>
|
| 316 |
+
#include <ATen/ops/grid_sampler_3d_compositeexplicitautograd_dispatch.h>
|
| 317 |
+
#include <ATen/ops/grid_sampler_3d_backward_compositeexplicitautograd_dispatch.h>
|
| 318 |
+
#include <ATen/ops/hamming_window_compositeexplicitautograd_dispatch.h>
|
| 319 |
+
#include <ATen/ops/hann_window_compositeexplicitautograd_dispatch.h>
|
| 320 |
+
#include <ATen/ops/hardswish_backward_compositeexplicitautograd_dispatch.h>
|
| 321 |
+
#include <ATen/ops/huber_loss_backward_compositeexplicitautograd_dispatch.h>
|
| 322 |
+
#include <ATen/ops/index_fill_compositeexplicitautograd_dispatch.h>
|
| 323 |
+
#include <ATen/ops/index_put_compositeexplicitautograd_dispatch.h>
|
| 324 |
+
#include <ATen/ops/indices_compositeexplicitautograd_dispatch.h>
|
| 325 |
+
#include <ATen/ops/indices_copy_compositeexplicitautograd_dispatch.h>
|
| 326 |
+
#include <ATen/ops/int_repr_compositeexplicitautograd_dispatch.h>
|
| 327 |
+
#include <ATen/ops/is_coalesced_compositeexplicitautograd_dispatch.h>
|
| 328 |
+
#include <ATen/ops/is_pinned_compositeexplicitautograd_dispatch.h>
|
| 329 |
+
#include <ATen/ops/is_same_size_compositeexplicitautograd_dispatch.h>
|
| 330 |
+
#include <ATen/ops/isinf_compositeexplicitautograd_dispatch.h>
|
| 331 |
+
#include <ATen/ops/isnan_compositeexplicitautograd_dispatch.h>
|
| 332 |
+
#include <ATen/ops/kaiser_window_compositeexplicitautograd_dispatch.h>
|
| 333 |
+
#include <ATen/ops/kthvalue_compositeexplicitautograd_dispatch.h>
|
| 334 |
+
#include <ATen/ops/lift_compositeexplicitautograd_dispatch.h>
|
| 335 |
+
#include <ATen/ops/lift_fresh_compositeexplicitautograd_dispatch.h>
|
| 336 |
+
#include <ATen/ops/lift_fresh_copy_compositeexplicitautograd_dispatch.h>
|
| 337 |
+
#include <ATen/ops/linalg_lstsq_compositeexplicitautograd_dispatch.h>
|
| 338 |
+
#include <ATen/ops/linalg_matrix_exp_compositeexplicitautograd_dispatch.h>
|
| 339 |
+
#include <ATen/ops/linalg_pinv_compositeexplicitautograd_dispatch.h>
|
| 340 |
+
#include <ATen/ops/linear_compositeexplicitautograd_dispatch.h>
|
| 341 |
+
#include <ATen/ops/linear_backward_compositeexplicitautograd_dispatch.h>
|
| 342 |
+
#include <ATen/ops/linspace_compositeexplicitautograd_dispatch.h>
|
| 343 |
+
#include <ATen/ops/log_normal_compositeexplicitautograd_dispatch.h>
|
| 344 |
+
#include <ATen/ops/log_softmax_compositeexplicitautograd_dispatch.h>
|
| 345 |
+
#include <ATen/ops/logcumsumexp_compositeexplicitautograd_dispatch.h>
|
| 346 |
+
#include <ATen/ops/logical_and_compositeexplicitautograd_dispatch.h>
|
| 347 |
+
#include <ATen/ops/logical_not_compositeexplicitautograd_dispatch.h>
|
| 348 |
+
#include <ATen/ops/logical_or_compositeexplicitautograd_dispatch.h>
|
| 349 |
+
#include <ATen/ops/logical_xor_compositeexplicitautograd_dispatch.h>
|
| 350 |
+
#include <ATen/ops/logspace_compositeexplicitautograd_dispatch.h>
|
| 351 |
+
#include <ATen/ops/logsumexp_compositeexplicitautograd_dispatch.h>
|
| 352 |
+
#include <ATen/ops/lshift_compositeexplicitautograd_dispatch.h>
|
| 353 |
+
#include <ATen/ops/lstm_mps_backward_compositeexplicitautograd_dispatch.h>
|
| 354 |
+
#include <ATen/ops/masked_fill_compositeexplicitautograd_dispatch.h>
|
| 355 |
+
#include <ATen/ops/masked_scatter_compositeexplicitautograd_dispatch.h>
|
| 356 |
+
#include <ATen/ops/masked_scatter_backward_compositeexplicitautograd_dispatch.h>
|
| 357 |
+
#include <ATen/ops/matmul_backward_compositeexplicitautograd_dispatch.h>
|
| 358 |
+
#include <ATen/ops/max_pool2d_backward_compositeexplicitautograd_dispatch.h>
|
| 359 |
+
#include <ATen/ops/mean_compositeexplicitautograd_dispatch.h>
|
| 360 |
+
#include <ATen/ops/median_compositeexplicitautograd_dispatch.h>
|
| 361 |
+
#include <ATen/ops/miopen_batch_norm_compositeexplicitautograd_dispatch.h>
|
| 362 |
+
#include <ATen/ops/miopen_batch_norm_backward_compositeexplicitautograd_dispatch.h>
|
| 363 |
+
#include <ATen/ops/miopen_convolution_compositeexplicitautograd_dispatch.h>
|
| 364 |
+
#include <ATen/ops/miopen_convolution_transpose_compositeexplicitautograd_dispatch.h>
|
| 365 |
+
#include <ATen/ops/miopen_depthwise_convolution_compositeexplicitautograd_dispatch.h>
|
| 366 |
+
#include <ATen/ops/miopen_rnn_compositeexplicitautograd_dispatch.h>
|
| 367 |
+
#include <ATen/ops/miopen_rnn_backward_compositeexplicitautograd_dispatch.h>
|
| 368 |
+
#include <ATen/ops/mkldnn_adaptive_avg_pool2d_backward_compositeexplicitautograd_dispatch.h>
|
| 369 |
+
#include <ATen/ops/mkldnn_convolution_compositeexplicitautograd_dispatch.h>
|
| 370 |
+
#include <ATen/ops/mkldnn_linear_compositeexplicitautograd_dispatch.h>
|
| 371 |
+
#include <ATen/ops/mkldnn_linear_backward_compositeexplicitautograd_dispatch.h>
|
| 372 |
+
#include <ATen/ops/mkldnn_linear_backward_input_compositeexplicitautograd_dispatch.h>
|
| 373 |
+
#include <ATen/ops/mkldnn_linear_backward_weights_compositeexplicitautograd_dispatch.h>
|
| 374 |
+
#include <ATen/ops/mkldnn_max_pool2d_compositeexplicitautograd_dispatch.h>
|
| 375 |
+
#include <ATen/ops/mkldnn_max_pool2d_backward_compositeexplicitautograd_dispatch.h>
|
| 376 |
+
#include <ATen/ops/mkldnn_max_pool3d_compositeexplicitautograd_dispatch.h>
|
| 377 |
+
#include <ATen/ops/mkldnn_max_pool3d_backward_compositeexplicitautograd_dispatch.h>
|
| 378 |
+
#include <ATen/ops/mkldnn_reorder_conv2d_weight_compositeexplicitautograd_dispatch.h>
|
| 379 |
+
#include <ATen/ops/mkldnn_reorder_conv3d_weight_compositeexplicitautograd_dispatch.h>
|
| 380 |
+
#include <ATen/ops/mkldnn_rnn_layer_compositeexplicitautograd_dispatch.h>
|
| 381 |
+
#include <ATen/ops/mkldnn_rnn_layer_backward_compositeexplicitautograd_dispatch.h>
|
| 382 |
+
#include <ATen/ops/mode_compositeexplicitautograd_dispatch.h>
|
| 383 |
+
#include <ATen/ops/mps_convolution_backward_compositeexplicitautograd_dispatch.h>
|
| 384 |
+
#include <ATen/ops/mps_convolution_transpose_backward_compositeexplicitautograd_dispatch.h>
|
| 385 |
+
#include <ATen/ops/mul_compositeexplicitautograd_dispatch.h>
|
| 386 |
+
#include <ATen/ops/mv_compositeexplicitautograd_dispatch.h>
|
| 387 |
+
#include <ATen/ops/mvlgamma_compositeexplicitautograd_dispatch.h>
|
| 388 |
+
#include <ATen/ops/nan_to_num_compositeexplicitautograd_dispatch.h>
|
| 389 |
+
#include <ATen/ops/nanmedian_compositeexplicitautograd_dispatch.h>
|
| 390 |
+
#include <ATen/ops/native_batch_norm_backward_compositeexplicitautograd_dispatch.h>
|
| 391 |
+
#include <ATen/ops/native_dropout_compositeexplicitautograd_dispatch.h>
|
| 392 |
+
#include <ATen/ops/native_dropout_backward_compositeexplicitautograd_dispatch.h>
|
| 393 |
+
#include <ATen/ops/native_group_norm_compositeexplicitautograd_dispatch.h>
|
| 394 |
+
#include <ATen/ops/native_group_norm_backward_compositeexplicitautograd_dispatch.h>
|
| 395 |
+
#include <ATen/ops/native_layer_norm_compositeexplicitautograd_dispatch.h>
|
| 396 |
+
#include <ATen/ops/native_layer_norm_backward_compositeexplicitautograd_dispatch.h>
|
| 397 |
+
#include <ATen/ops/native_norm_compositeexplicitautograd_dispatch.h>
|
| 398 |
+
#include <ATen/ops/new_empty_compositeexplicitautograd_dispatch.h>
|
| 399 |
+
#include <ATen/ops/new_empty_strided_compositeexplicitautograd_dispatch.h>
|
| 400 |
+
#include <ATen/ops/new_full_compositeexplicitautograd_dispatch.h>
|
| 401 |
+
#include <ATen/ops/new_ones_compositeexplicitautograd_dispatch.h>
|
| 402 |
+
#include <ATen/ops/new_zeros_compositeexplicitautograd_dispatch.h>
|
| 403 |
+
#include <ATen/ops/norm_compositeexplicitautograd_dispatch.h>
|
| 404 |
+
#include <ATen/ops/normal_compositeexplicitautograd_dispatch.h>
|
| 405 |
+
#include <ATen/ops/ones_compositeexplicitautograd_dispatch.h>
|
| 406 |
+
#include <ATen/ops/ones_like_compositeexplicitautograd_dispatch.h>
|
| 407 |
+
#include <ATen/ops/permute_compositeexplicitautograd_dispatch.h>
|
| 408 |
+
#include <ATen/ops/permute_copy_compositeexplicitautograd_dispatch.h>
|
| 409 |
+
#include <ATen/ops/pixel_shuffle_compositeexplicitautograd_dispatch.h>
|
| 410 |
+
#include <ATen/ops/pixel_unshuffle_compositeexplicitautograd_dispatch.h>
|
| 411 |
+
#include <ATen/ops/poisson_compositeexplicitautograd_dispatch.h>
|
| 412 |
+
#include <ATen/ops/polar_compositeexplicitautograd_dispatch.h>
|
| 413 |
+
#include <ATen/ops/polygamma_compositeexplicitautograd_dispatch.h>
|
| 414 |
+
#include <ATen/ops/prod_compositeexplicitautograd_dispatch.h>
|
| 415 |
+
#include <ATen/ops/put_compositeexplicitautograd_dispatch.h>
|
| 416 |
+
#include <ATen/ops/q_per_channel_scales_compositeexplicitautograd_dispatch.h>
|
| 417 |
+
#include <ATen/ops/q_per_channel_zero_points_compositeexplicitautograd_dispatch.h>
|
| 418 |
+
#include <ATen/ops/quantize_per_channel_compositeexplicitautograd_dispatch.h>
|
| 419 |
+
#include <ATen/ops/quantize_per_tensor_compositeexplicitautograd_dispatch.h>
|
| 420 |
+
#include <ATen/ops/quantize_per_tensor_dynamic_compositeexplicitautograd_dispatch.h>
|
| 421 |
+
#include <ATen/ops/quantized_batch_norm_compositeexplicitautograd_dispatch.h>
|
| 422 |
+
#include <ATen/ops/quantized_max_pool1d_compositeexplicitautograd_dispatch.h>
|
| 423 |
+
#include <ATen/ops/quantized_max_pool2d_compositeexplicitautograd_dispatch.h>
|
| 424 |
+
#include <ATen/ops/quantized_max_pool3d_compositeexplicitautograd_dispatch.h>
|
| 425 |
+
#include <ATen/ops/rad2deg_compositeexplicitautograd_dispatch.h>
|
| 426 |
+
#include <ATen/ops/rand_compositeexplicitautograd_dispatch.h>
|
| 427 |
+
#include <ATen/ops/rand_like_compositeexplicitautograd_dispatch.h>
|
| 428 |
+
#include <ATen/ops/randint_compositeexplicitautograd_dispatch.h>
|
| 429 |
+
#include <ATen/ops/randint_like_compositeexplicitautograd_dispatch.h>
|
| 430 |
+
#include <ATen/ops/randn_compositeexplicitautograd_dispatch.h>
|
| 431 |
+
#include <ATen/ops/randn_like_compositeexplicitautograd_dispatch.h>
|
| 432 |
+
#include <ATen/ops/random_compositeexplicitautograd_dispatch.h>
|
| 433 |
+
#include <ATen/ops/randperm_compositeexplicitautograd_dispatch.h>
|
| 434 |
+
#include <ATen/ops/range_compositeexplicitautograd_dispatch.h>
|
| 435 |
+
#include <ATen/ops/relu_compositeexplicitautograd_dispatch.h>
|
| 436 |
+
#include <ATen/ops/remainder_compositeexplicitautograd_dispatch.h>
|
| 437 |
+
#include <ATen/ops/repeat_compositeexplicitautograd_dispatch.h>
|
| 438 |
+
#include <ATen/ops/repeat_interleave_compositeexplicitautograd_dispatch.h>
|
| 439 |
+
#include <ATen/ops/resize_compositeexplicitautograd_dispatch.h>
|
| 440 |
+
#include <ATen/ops/resize_as_compositeexplicitautograd_dispatch.h>
|
| 441 |
+
#include <ATen/ops/resize_as_sparse_compositeexplicitautograd_dispatch.h>
|
| 442 |
+
#include <ATen/ops/roll_compositeexplicitautograd_dispatch.h>
|
| 443 |
+
#include <ATen/ops/rot90_compositeexplicitautograd_dispatch.h>
|
| 444 |
+
#include <ATen/ops/row_indices_compositeexplicitautograd_dispatch.h>
|
| 445 |
+
#include <ATen/ops/row_indices_copy_compositeexplicitautograd_dispatch.h>
|
| 446 |
+
#include <ATen/ops/rrelu_with_noise_backward_compositeexplicitautograd_dispatch.h>
|
| 447 |
+
#include <ATen/ops/rshift_compositeexplicitautograd_dispatch.h>
|
| 448 |
+
#include <ATen/ops/rsub_compositeexplicitautograd_dispatch.h>
|
| 449 |
+
#include <ATen/ops/scalar_tensor_compositeexplicitautograd_dispatch.h>
|
| 450 |
+
#include <ATen/ops/segment_reduce_compositeexplicitautograd_dispatch.h>
|
| 451 |
+
#include <ATen/ops/select_compositeexplicitautograd_dispatch.h>
|
| 452 |
+
#include <ATen/ops/select_backward_compositeexplicitautograd_dispatch.h>
|
| 453 |
+
#include <ATen/ops/select_copy_compositeexplicitautograd_dispatch.h>
|
| 454 |
+
#include <ATen/ops/select_scatter_compositeexplicitautograd_dispatch.h>
|
| 455 |
+
#include <ATen/ops/set_compositeexplicitautograd_dispatch.h>
|
| 456 |
+
#include <ATen/ops/slice_compositeexplicitautograd_dispatch.h>
|
| 457 |
+
#include <ATen/ops/slice_backward_compositeexplicitautograd_dispatch.h>
|
| 458 |
+
#include <ATen/ops/slice_copy_compositeexplicitautograd_dispatch.h>
|
| 459 |
+
#include <ATen/ops/slice_scatter_compositeexplicitautograd_dispatch.h>
|
| 460 |
+
#include <ATen/ops/slow_conv_dilated2d_compositeexplicitautograd_dispatch.h>
|
| 461 |
+
#include <ATen/ops/slow_conv_dilated3d_compositeexplicitautograd_dispatch.h>
|
| 462 |
+
#include <ATen/ops/smooth_l1_loss_backward_compositeexplicitautograd_dispatch.h>
|
| 463 |
+
#include <ATen/ops/soft_margin_loss_compositeexplicitautograd_dispatch.h>
|
| 464 |
+
#include <ATen/ops/soft_margin_loss_backward_compositeexplicitautograd_dispatch.h>
|
| 465 |
+
#include <ATen/ops/softmax_compositeexplicitautograd_dispatch.h>
|
| 466 |
+
#include <ATen/ops/sort_compositeexplicitautograd_dispatch.h>
|
| 467 |
+
#include <ATen/ops/sparse_compressed_tensor_compositeexplicitautograd_dispatch.h>
|
| 468 |
+
#include <ATen/ops/sparse_coo_tensor_compositeexplicitautograd_dispatch.h>
|
| 469 |
+
#include <ATen/ops/sparse_mask_compositeexplicitautograd_dispatch.h>
|
| 470 |
+
#include <ATen/ops/sparse_resize_compositeexplicitautograd_dispatch.h>
|
| 471 |
+
#include <ATen/ops/sparse_resize_and_clear_compositeexplicitautograd_dispatch.h>
|
| 472 |
+
#include <ATen/ops/special_chebyshev_polynomial_t_compositeexplicitautograd_dispatch.h>
|
| 473 |
+
#include <ATen/ops/special_chebyshev_polynomial_u_compositeexplicitautograd_dispatch.h>
|
| 474 |
+
#include <ATen/ops/special_chebyshev_polynomial_v_compositeexplicitautograd_dispatch.h>
|
| 475 |
+
#include <ATen/ops/special_chebyshev_polynomial_w_compositeexplicitautograd_dispatch.h>
|
| 476 |
+
#include <ATen/ops/special_hermite_polynomial_h_compositeexplicitautograd_dispatch.h>
|
| 477 |
+
#include <ATen/ops/special_hermite_polynomial_he_compositeexplicitautograd_dispatch.h>
|
| 478 |
+
#include <ATen/ops/special_laguerre_polynomial_l_compositeexplicitautograd_dispatch.h>
|
| 479 |
+
#include <ATen/ops/special_legendre_polynomial_p_compositeexplicitautograd_dispatch.h>
|
| 480 |
+
#include <ATen/ops/special_shifted_chebyshev_polynomial_t_compositeexplicitautograd_dispatch.h>
|
| 481 |
+
#include <ATen/ops/special_shifted_chebyshev_polynomial_u_compositeexplicitautograd_dispatch.h>
|
| 482 |
+
#include <ATen/ops/special_shifted_chebyshev_polynomial_v_compositeexplicitautograd_dispatch.h>
|
| 483 |
+
#include <ATen/ops/special_shifted_chebyshev_polynomial_w_compositeexplicitautograd_dispatch.h>
|
| 484 |
+
#include <ATen/ops/special_xlog1py_compositeexplicitautograd_dispatch.h>
|
| 485 |
+
#include <ATen/ops/special_zeta_compositeexplicitautograd_dispatch.h>
|
| 486 |
+
#include <ATen/ops/split_compositeexplicitautograd_dispatch.h>
|
| 487 |
+
#include <ATen/ops/split_copy_compositeexplicitautograd_dispatch.h>
|
| 488 |
+
#include <ATen/ops/split_with_sizes_compositeexplicitautograd_dispatch.h>
|
| 489 |
+
#include <ATen/ops/split_with_sizes_copy_compositeexplicitautograd_dispatch.h>
|
| 490 |
+
#include <ATen/ops/squeeze_compositeexplicitautograd_dispatch.h>
|
| 491 |
+
#include <ATen/ops/squeeze_copy_compositeexplicitautograd_dispatch.h>
|
| 492 |
+
#include <ATen/ops/stack_compositeexplicitautograd_dispatch.h>
|
| 493 |
+
#include <ATen/ops/std_mean_compositeexplicitautograd_dispatch.h>
|
| 494 |
+
#include <ATen/ops/sub_compositeexplicitautograd_dispatch.h>
|
| 495 |
+
#include <ATen/ops/sum_compositeexplicitautograd_dispatch.h>
|
| 496 |
+
#include <ATen/ops/sym_constrain_range_compositeexplicitautograd_dispatch.h>
|
| 497 |
+
#include <ATen/ops/sym_constrain_range_for_size_compositeexplicitautograd_dispatch.h>
|
| 498 |
+
#include <ATen/ops/t_compositeexplicitautograd_dispatch.h>
|
| 499 |
+
#include <ATen/ops/t_copy_compositeexplicitautograd_dispatch.h>
|
| 500 |
+
#include <ATen/ops/to_mkldnn_compositeexplicitautograd_dispatch.h>
|
| 501 |
+
#include <ATen/ops/to_padded_tensor_compositeexplicitautograd_dispatch.h>
|
| 502 |
+
#include <ATen/ops/trace_compositeexplicitautograd_dispatch.h>
|
| 503 |
+
#include <ATen/ops/transpose_compositeexplicitautograd_dispatch.h>
|
| 504 |
+
#include <ATen/ops/transpose_copy_compositeexplicitautograd_dispatch.h>
|
| 505 |
+
#include <ATen/ops/tril_indices_compositeexplicitautograd_dispatch.h>
|
| 506 |
+
#include <ATen/ops/triu_indices_compositeexplicitautograd_dispatch.h>
|
| 507 |
+
#include <ATen/ops/unbind_compositeexplicitautograd_dispatch.h>
|
| 508 |
+
#include <ATen/ops/unbind_copy_compositeexplicitautograd_dispatch.h>
|
| 509 |
+
#include <ATen/ops/unfold_backward_compositeexplicitautograd_dispatch.h>
|
| 510 |
+
#include <ATen/ops/unfold_copy_compositeexplicitautograd_dispatch.h>
|
| 511 |
+
#include <ATen/ops/uniform_compositeexplicitautograd_dispatch.h>
|
| 512 |
+
#include <ATen/ops/unique_consecutive_compositeexplicitautograd_dispatch.h>
|
| 513 |
+
#include <ATen/ops/unique_dim_compositeexplicitautograd_dispatch.h>
|
| 514 |
+
#include <ATen/ops/unique_dim_consecutive_compositeexplicitautograd_dispatch.h>
|
| 515 |
+
#include <ATen/ops/unsafe_split_compositeexplicitautograd_dispatch.h>
|
| 516 |
+
#include <ATen/ops/unsafe_split_with_sizes_compositeexplicitautograd_dispatch.h>
|
| 517 |
+
#include <ATen/ops/unsqueeze_compositeexplicitautograd_dispatch.h>
|
| 518 |
+
#include <ATen/ops/unsqueeze_copy_compositeexplicitautograd_dispatch.h>
|
| 519 |
+
#include <ATen/ops/values_compositeexplicitautograd_dispatch.h>
|
| 520 |
+
#include <ATen/ops/values_copy_compositeexplicitautograd_dispatch.h>
|
| 521 |
+
#include <ATen/ops/var_mean_compositeexplicitautograd_dispatch.h>
|
| 522 |
+
#include <ATen/ops/vdot_compositeexplicitautograd_dispatch.h>
|
| 523 |
+
#include <ATen/ops/view_compositeexplicitautograd_dispatch.h>
|
| 524 |
+
#include <ATen/ops/view_as_complex_copy_compositeexplicitautograd_dispatch.h>
|
| 525 |
+
#include <ATen/ops/view_as_real_copy_compositeexplicitautograd_dispatch.h>
|
| 526 |
+
#include <ATen/ops/view_copy_compositeexplicitautograd_dispatch.h>
|
| 527 |
+
#include <ATen/ops/xlogy_compositeexplicitautograd_dispatch.h>
|
| 528 |
+
#include <ATen/ops/zero_compositeexplicitautograd_dispatch.h>
|
| 529 |
+
#include <ATen/ops/zeros_compositeexplicitautograd_dispatch.h>
|
| 530 |
+
#include <ATen/ops/zeros_like_compositeexplicitautograd_dispatch.h>
|
| 531 |
+
|
| 532 |
+
|
| 533 |
+
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/CompositeExplicitAutogradNonFunctionalFunctions.h
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#include <ATen/core/TensorBody.h>
|
| 2 |
+
|
| 3 |
+
// TODO Undo all logic introduced for Note [Avoiding Include Cycles In Static Dispatch]
|
| 4 |
+
// Code introduced to avoid cyclic dependency in static dispatch is no longer
|
| 5 |
+
// needed as static dispatch logic is moved from TensorBody.h, which caused cycles in the first place,
|
| 6 |
+
// to Operators.cpp for supporting multiple backends with multiple kernels.
|
| 7 |
+
//
|
| 8 |
+
// Note [Avoiding Include Cycles In Static Dispatch]
|
| 9 |
+
// In order to avoid #include cycles in the static dispatch build, we've carefully split out
|
| 10 |
+
// the static function definition files into {DispatchKey}Functions.h and {DispatchKey}Functions_inl.h.
|
| 11 |
+
//
|
| 12 |
+
// Without this split, the include cycle looks like TensorBody.h -> CPUFunctions.h -> TensorBody.h.
|
| 13 |
+
// - TensorBody.h #includes CPUFunctions.h in the static dispatch build, because the tensor methods
|
| 14 |
+
// all need to call into the fastpath C++ API defined in CPUFunctions.h. The methods are also all
|
| 15 |
+
// directly inlined into TensorBody.h.
|
| 16 |
+
// - CPUFunctions.h #includes TensorBody.h because it contains function declarations for the entire C++ API,
|
| 17 |
+
// which include functions that have defaultable optional<Tensor> arguments.
|
| 18 |
+
// That requires knowing the full Tensor class definition.
|
| 19 |
+
//
|
| 20 |
+
// We break the cycle by doing the following:
|
| 21 |
+
// - Split out CPUFunction.h into two files: CPUFunctions.h and CPUFunctions_inl.h
|
| 22 |
+
// - CPUFunction.h is a dummy file that just includes the Tensor class and includes CPUFunctions_inl.,
|
| 23 |
+
// - CPUFunctions_inl.h includes everything else
|
| 24 |
+
// - (only in the static dispatch build) TensorBody.h makes sure to finish defining the Tensor class,
|
| 25 |
+
// and then it includes CPUFunctions_inl.h.
|
| 26 |
+
// - All other files that want the cpu fastpath functions can include CPUFunctions.h directly.
|
| 27 |
+
// - This also means that static dispatch build, CPUFunctions.h only needs to
|
| 28 |
+
// #include TensorBody.h, and it will automatically bring in CPUFunctions_inl.h.
|
| 29 |
+
#include <ATen/CompositeExplicitAutogradNonFunctionalFunctions_inl.h>
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/DeviceGuard.h
ADDED
|
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <ATen/core/IListRef.h>
|
| 4 |
+
#include <ATen/core/Tensor.h>
|
| 5 |
+
#include <c10/core/DeviceGuard.h>
|
| 6 |
+
#include <c10/core/ScalarType.h> // TensorList whyyyyy
|
| 7 |
+
|
| 8 |
+
namespace at {
|
| 9 |
+
|
| 10 |
+
// Are you here because you're wondering why DeviceGuard(tensor) no
|
| 11 |
+
// longer works? For code organization reasons, we have temporarily(?)
|
| 12 |
+
// removed this constructor from DeviceGuard. The new way to
|
| 13 |
+
// spell it is:
|
| 14 |
+
//
|
| 15 |
+
// OptionalDeviceGuard guard(device_of(tensor));
|
| 16 |
+
|
| 17 |
+
/// Return the Device of a Tensor, if the Tensor is defined.
|
| 18 |
+
inline c10::optional<Device> device_of(const Tensor& t) {
|
| 19 |
+
if (t.defined()) {
|
| 20 |
+
return c10::make_optional(t.device());
|
| 21 |
+
} else {
|
| 22 |
+
return c10::nullopt;
|
| 23 |
+
}
|
| 24 |
+
}
|
| 25 |
+
|
| 26 |
+
inline c10::optional<Device> device_of(const c10::optional<Tensor>& t) {
|
| 27 |
+
return t.has_value() ? device_of(t.value()) : c10::nullopt;
|
| 28 |
+
}
|
| 29 |
+
|
| 30 |
+
/// Return the Device of a TensorList, if the list is non-empty and
|
| 31 |
+
/// the first Tensor is defined. (This function implicitly assumes
|
| 32 |
+
/// that all tensors in the list have the same device.)
|
| 33 |
+
inline c10::optional<Device> device_of(ITensorListRef t) {
|
| 34 |
+
if (!t.empty()) {
|
| 35 |
+
return device_of(t.front());
|
| 36 |
+
} else {
|
| 37 |
+
return c10::nullopt;
|
| 38 |
+
}
|
| 39 |
+
}
|
| 40 |
+
|
| 41 |
+
} // namespace at
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/Dispatch.h
ADDED
|
@@ -0,0 +1,808 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <ATen/core/DeprecatedTypeProperties.h>
|
| 4 |
+
#include <c10/macros/Macros.h>
|
| 5 |
+
#include <c10/util/Exception.h>
|
| 6 |
+
#include <c10/util/Half.h>
|
| 7 |
+
#include <c10/util/Metaprogramming.h>
|
| 8 |
+
#include <c10/util/complex.h>
|
| 9 |
+
#include <c10/util/string_view.h>
|
| 10 |
+
|
| 11 |
+
#ifdef __CUDACC__
|
| 12 |
+
#include <cuda.h> // For CUDA_VERSION
|
| 13 |
+
#endif
|
| 14 |
+
|
| 15 |
+
#ifdef TEMPLATE_SELECTIVE_BUILD
|
| 16 |
+
#include <ATen/selected_mobile_ops.h>
|
| 17 |
+
#else
|
| 18 |
+
namespace at {
|
| 19 |
+
/**
|
| 20 |
+
* The method should_include_kernel_dtype() returns true/false
|
| 21 |
+
* based on whether the switching code for a specific dtype should be
|
| 22 |
+
* included based on build time constants generated from tracing model
|
| 23 |
+
* execution. This method will be implmeneted via code-generation and
|
| 24 |
+
* included in this file when code-gen is ready.
|
| 25 |
+
*/
|
| 26 |
+
inline constexpr bool should_include_kernel_dtype(
|
| 27 |
+
const char* /*kernel_tag_str*/,
|
| 28 |
+
at::ScalarType /*scalar_type*/
|
| 29 |
+
) {
|
| 30 |
+
return true;
|
| 31 |
+
}
|
| 32 |
+
} // namespace at
|
| 33 |
+
#endif
|
| 34 |
+
|
| 35 |
+
/**
|
| 36 |
+
* In the Facebook internal build (using BUCK), this macro is enabled by
|
| 37 |
+
* passing in -c pt.enable_record_kernel_dtype=1 when building the tracer
|
| 38 |
+
* binary.
|
| 39 |
+
*/
|
| 40 |
+
#if defined ENABLE_RECORD_KERNEL_FUNCTION_DTYPE
|
| 41 |
+
namespace at {
|
| 42 |
+
namespace detail {
|
| 43 |
+
TORCH_API void record_kernel_function_dtype(std::string name);
|
| 44 |
+
}
|
| 45 |
+
} // namespace at
|
| 46 |
+
|
| 47 |
+
#define RECORD_KERNEL_FUNCTION_DTYPE(NAME, enum_type) \
|
| 48 |
+
at::detail::record_kernel_function_dtype( \
|
| 49 |
+
std::string(NAME) + "$" + toString(enum_type));
|
| 50 |
+
#else
|
| 51 |
+
#define RECORD_KERNEL_FUNCTION_DTYPE(NAME, enum_type)
|
| 52 |
+
#endif
|
| 53 |
+
|
| 54 |
+
#define AT_PRIVATE_CHECK_SELECTIVE_BUILD(enum_type) \
|
| 55 |
+
do { \
|
| 56 |
+
if constexpr (!at::should_include_kernel_dtype( \
|
| 57 |
+
at_dispatch_name, enum_type)) { \
|
| 58 |
+
AT_ERROR( \
|
| 59 |
+
"dtype '", \
|
| 60 |
+
toString(enum_type), \
|
| 61 |
+
"' not selected for kernel tag ", \
|
| 62 |
+
at_dispatch_name); \
|
| 63 |
+
} \
|
| 64 |
+
} while (0)
|
| 65 |
+
|
| 66 |
+
#define AT_PRIVATE_CASE_TYPE_USING_HINT(enum_type, HINT, ...) \
|
| 67 |
+
case enum_type: { \
|
| 68 |
+
AT_PRIVATE_CHECK_SELECTIVE_BUILD(enum_type); \
|
| 69 |
+
using HINT C10_UNUSED = c10::impl::ScalarTypeToCPPTypeT<enum_type>; \
|
| 70 |
+
return __VA_ARGS__(); \
|
| 71 |
+
}
|
| 72 |
+
|
| 73 |
+
#define AT_DISPATCH_CASE(enum_type, ...) \
|
| 74 |
+
AT_PRIVATE_CASE_TYPE_USING_HINT(enum_type, scalar_t, __VA_ARGS__)
|
| 75 |
+
|
| 76 |
+
#define AT_DISPATCH_CASE_QINT(enum_type, scalar_type, ...) \
|
| 77 |
+
case enum_type: { \
|
| 78 |
+
AT_PRIVATE_CHECK_SELECTIVE_BUILD(enum_type); \
|
| 79 |
+
using scalar_t = scalar_type; \
|
| 80 |
+
using underlying_t C10_UNUSED = typename scalar_t::underlying; \
|
| 81 |
+
const auto& SCALAR_TYPE C10_UNUSED = enum_type; \
|
| 82 |
+
const auto& UNDERLYING_TYPE C10_UNUSED = toUnderlying(enum_type); \
|
| 83 |
+
return __VA_ARGS__(); \
|
| 84 |
+
}
|
| 85 |
+
|
| 86 |
+
#define AT_QINT_SUB_BYTE_PRIVATE_CASE_TYPE( \
|
| 87 |
+
enum_type, scalar_type, bitwidth, qmin, qmax, ...) \
|
| 88 |
+
case enum_type: { \
|
| 89 |
+
AT_PRIVATE_CHECK_SELECTIVE_BUILD(enum_type); \
|
| 90 |
+
using scalar_t = scalar_type; \
|
| 91 |
+
using underlying_t C10_UNUSED = typename scalar_t::underlying; \
|
| 92 |
+
const auto& SCALAR_TYPE C10_UNUSED = enum_type; \
|
| 93 |
+
const auto& UNDERLYING_TYPE C10_UNUSED = toUnderlying(enum_type); \
|
| 94 |
+
C10_UNUSED int bit_width = bitwidth; \
|
| 95 |
+
C10_UNUSED int64_t quant_min = qmin; \
|
| 96 |
+
C10_UNUSED int64_t quant_max = qmax; \
|
| 97 |
+
return __VA_ARGS__(); \
|
| 98 |
+
}
|
| 99 |
+
|
| 100 |
+
namespace detail {
|
| 101 |
+
|
| 102 |
+
inline at::ScalarType scalar_type(at::ScalarType s) {
|
| 103 |
+
return s;
|
| 104 |
+
}
|
| 105 |
+
|
| 106 |
+
C10_DEPRECATED_MESSAGE(
|
| 107 |
+
"passing at::DeprecatedTypeProperties to an AT_DISPATCH macro is deprecated, "
|
| 108 |
+
"pass an at::ScalarType instead")
|
| 109 |
+
inline at::ScalarType scalar_type(const at::DeprecatedTypeProperties& t) {
|
| 110 |
+
return t.scalarType();
|
| 111 |
+
}
|
| 112 |
+
|
| 113 |
+
C10_DEPRECATED_MESSAGE(
|
| 114 |
+
"AT_DISPATCH_ALL_TYPES_AND_HALF is deprecated, "
|
| 115 |
+
"use AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half, ...) instead")
|
| 116 |
+
inline void deprecated_AT_DISPATCH_ALL_TYPES_AND_HALF() {}
|
| 117 |
+
|
| 118 |
+
C10_DEPRECATED_MESSAGE(
|
| 119 |
+
"AT_DISPATCH_ALL_TYPES_AND_HALF_AND_COMPLEX is deprecated, "
|
| 120 |
+
"use AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND(at::ScalarType::Half, ...) "
|
| 121 |
+
"instead")
|
| 122 |
+
inline void deprecated_AT_DISPATCH_ALL_TYPES_AND_HALF_AND_COMPLEX() {}
|
| 123 |
+
|
| 124 |
+
} // namespace detail
|
| 125 |
+
|
| 126 |
+
// The AT_DISPATCH_* family of macros provides the ability to
|
| 127 |
+
// conveniently generate specializations of a kernel over all of the
|
| 128 |
+
// dtypes we care about in PyTorch. We call it "dispatch" because
|
| 129 |
+
// we are "dispatching" to the correct, dtype-specific kernel.
|
| 130 |
+
//
|
| 131 |
+
// A standard usage looks like:
|
| 132 |
+
//
|
| 133 |
+
// AT_DISPATCH_ALL_TYPES(self.scalar_type(), "op_name", [&] {
|
| 134 |
+
// // Your code here, with 'scalar_t' now defined to
|
| 135 |
+
// // be the dtype in question
|
| 136 |
+
// });
|
| 137 |
+
//
|
| 138 |
+
// There are many variations of this macro, so it's important to
|
| 139 |
+
// understand exactly /which/ dtypes you want to get instantiated, as
|
| 140 |
+
// well as what the "default" set is.
|
| 141 |
+
//
|
| 142 |
+
// The default set of dtypes that are instantiated (e.g., by
|
| 143 |
+
// AT_DISPATCH_ALL_TYPES) are floating point types (float, double),
|
| 144 |
+
// and integral types (int32_t, int64_t, int16_t, int8_t, uint8_t),
|
| 145 |
+
// but NOT booleans (bool), half-precision floats (Half) or
|
| 146 |
+
// complex number (c10::complex<float>, c10::complex<double>).
|
| 147 |
+
// This "cut" is somewhat historical (the default types are the
|
| 148 |
+
// ones that TH historically supported), but it also reflects the
|
| 149 |
+
// fact that the non-default types are "poorly" behaved (booleans
|
| 150 |
+
// are NOT integers mod 2, half precision operations ~essentially
|
| 151 |
+
// don't exist on CPU, complex numbers are an experimental application).
|
| 152 |
+
//
|
| 153 |
+
// Here are the questions you should generally ask to decide which
|
| 154 |
+
// dispatch you want:
|
| 155 |
+
//
|
| 156 |
+
// 1. Is this an integral or floating point specific operation?
|
| 157 |
+
// (If so, you'll want one of the FLOATING or INTEGRAL macros.)
|
| 158 |
+
//
|
| 159 |
+
// 2. Should half be supported? (If you're on CPU, the answer is almost
|
| 160 |
+
// definitely no. If you do want support, use one of the AND_HALF
|
| 161 |
+
// macros)
|
| 162 |
+
//
|
| 163 |
+
// Much rarer situations:
|
| 164 |
+
//
|
| 165 |
+
// 3. Should bool be supported? (You often have to write your kernel
|
| 166 |
+
// differently if arithmetic operations are involved.) If so,
|
| 167 |
+
// Use AT_DISPATCH_ALL_TYPES_AND along with ScalarType::Bool
|
| 168 |
+
//
|
| 169 |
+
// 4. Should complex be supported? The answer is almost always no,
|
| 170 |
+
// unless you are working on "generic" code that should work on
|
| 171 |
+
// all dtypes.
|
| 172 |
+
//
|
| 173 |
+
// Parameters:
|
| 174 |
+
// -----------
|
| 175 |
+
//
|
| 176 |
+
// 1. The NAME argument is a "tag" that is used to trace and then
|
| 177 |
+
// conditionally compile fragments of the case statements such
|
| 178 |
+
// that the kernel functions are specialized only for the dtypes
|
| 179 |
+
// that are needed. The NAME parameter *must* be a build time
|
| 180 |
+
// const char* (can't be std::string, etc...)
|
| 181 |
+
//
|
| 182 |
+
// Please ensure that the NAME is unique for every implementation
|
| 183 |
+
// or you run the risk of over-including code for the kernel
|
| 184 |
+
// functions. There is no risk of missing out on any code, so
|
| 185 |
+
// it's mostly a risk of a Type-2 error, and not a Type-1 error.
|
| 186 |
+
//
|
| 187 |
+
// Switch-like syntax:
|
| 188 |
+
// -------------------
|
| 189 |
+
// There is also a switch-case like syntax which is useful if a kernel
|
| 190 |
+
// needs to be specialized for particular scalar types
|
| 191 |
+
//
|
| 192 |
+
// AT_DISPATCH_SWITCH(self.scalar_type(), "op_name",
|
| 193 |
+
// AT_DISPATCH_CASE_INTEGRAL_TYPES([&] {
|
| 194 |
+
// op_integral<scalar_t>(iter);
|
| 195 |
+
// })
|
| 196 |
+
// AT_DISPATCH_CASE_FLOATING_TYPES([&] {
|
| 197 |
+
// op_floating<scalar_t>(iter);
|
| 198 |
+
// })
|
| 199 |
+
// AT_DISPATCH_CASE(kBool, [&] {
|
| 200 |
+
// op_bool(iter);
|
| 201 |
+
// })
|
| 202 |
+
// );
|
| 203 |
+
//
|
| 204 |
+
// For each AT_DISPATCH_FOO macro, there is a corresponding
|
| 205 |
+
// AT_DISPATCH_CASE_FOO macro which can be used inside of an
|
| 206 |
+
// AT_DISPATCH_SWITCH block.
|
| 207 |
+
|
| 208 |
+
// NB: the the_type variable is not used, but we have kept it for
|
| 209 |
+
// backwards compatibility. It's probably not used by anyone though;
|
| 210 |
+
// but we're just being safe (and it doesn't hurt.) Note we must
|
| 211 |
+
// use it to shut up warnings about unused store.
|
| 212 |
+
|
| 213 |
+
#define AT_DISPATCH_SWITCH(TYPE, NAME, ...) \
|
| 214 |
+
[&] { \
|
| 215 |
+
const auto& the_type = TYPE; \
|
| 216 |
+
constexpr const char* at_dispatch_name = NAME; \
|
| 217 |
+
/* don't use TYPE again in case it is an expensive or side-effect op */ \
|
| 218 |
+
at::ScalarType _st = ::detail::scalar_type(the_type); \
|
| 219 |
+
RECORD_KERNEL_FUNCTION_DTYPE(at_dispatch_name, _st); \
|
| 220 |
+
switch (_st) { \
|
| 221 |
+
__VA_ARGS__ \
|
| 222 |
+
default: \
|
| 223 |
+
AT_ERROR( \
|
| 224 |
+
'"', \
|
| 225 |
+
at_dispatch_name, \
|
| 226 |
+
"\" not implemented for '", \
|
| 227 |
+
toString(_st), \
|
| 228 |
+
"'"); \
|
| 229 |
+
} \
|
| 230 |
+
}()
|
| 231 |
+
|
| 232 |
+
#define AT_DISPATCH_CASE_FLOATING_TYPES(...) \
|
| 233 |
+
AT_DISPATCH_CASE(at::ScalarType::Double, __VA_ARGS__) \
|
| 234 |
+
AT_DISPATCH_CASE(at::ScalarType::Float, __VA_ARGS__)
|
| 235 |
+
|
| 236 |
+
#define AT_DISPATCH_FLOATING_TYPES(TYPE, NAME, ...) \
|
| 237 |
+
AT_DISPATCH_SWITCH(TYPE, NAME, AT_DISPATCH_CASE_FLOATING_TYPES(__VA_ARGS__))
|
| 238 |
+
|
| 239 |
+
#define AT_DISPATCH_CASE_FLOATING_TYPES_AND_HALF(...) \
|
| 240 |
+
AT_DISPATCH_CASE(at::ScalarType::Double, __VA_ARGS__) \
|
| 241 |
+
AT_DISPATCH_CASE(at::ScalarType::Float, __VA_ARGS__) \
|
| 242 |
+
AT_DISPATCH_CASE(at::ScalarType::Half, __VA_ARGS__)
|
| 243 |
+
|
| 244 |
+
#define AT_DISPATCH_FLOATING_TYPES_AND_HALF(TYPE, NAME, ...) \
|
| 245 |
+
AT_DISPATCH_SWITCH( \
|
| 246 |
+
TYPE, NAME, AT_DISPATCH_CASE_FLOATING_TYPES_AND_HALF(__VA_ARGS__))
|
| 247 |
+
|
| 248 |
+
#define AT_DISPATCH_CASE_REDUCED_FLOATING_TYPES(...) \
|
| 249 |
+
AT_DISPATCH_CASE(at::ScalarType::Half, __VA_ARGS__) \
|
| 250 |
+
AT_DISPATCH_CASE(at::ScalarType::BFloat16, __VA_ARGS__)
|
| 251 |
+
|
| 252 |
+
#define AT_DISPATCH_REDUCED_FLOATING_TYPES(TYPE, NAME, ...) \
|
| 253 |
+
AT_DISPATCH_SWITCH( \
|
| 254 |
+
TYPE, NAME, AT_DISPATCH_CASE_REDUCED_FLOATING_TYPES(__VA_ARGS__))
|
| 255 |
+
|
| 256 |
+
#define AT_DISPATCH_CASE_FLOATING_TYPES_AND(SCALARTYPE, ...) \
|
| 257 |
+
AT_DISPATCH_CASE_FLOATING_TYPES(__VA_ARGS__) \
|
| 258 |
+
AT_DISPATCH_CASE(SCALARTYPE, __VA_ARGS__)
|
| 259 |
+
|
| 260 |
+
#define AT_DISPATCH_FLOATING_TYPES_AND(SCALARTYPE, TYPE, NAME, ...) \
|
| 261 |
+
AT_DISPATCH_SWITCH( \
|
| 262 |
+
TYPE, \
|
| 263 |
+
NAME, \
|
| 264 |
+
AT_DISPATCH_CASE_FLOATING_TYPES_AND(SCALARTYPE, __VA_ARGS__))
|
| 265 |
+
|
| 266 |
+
#define AT_DISPATCH_CASE_FLOATING_TYPES_AND2(SCALARTYPE1, SCALARTYPE2, ...) \
|
| 267 |
+
AT_DISPATCH_CASE_FLOATING_TYPES(__VA_ARGS__) \
|
| 268 |
+
AT_DISPATCH_CASE(SCALARTYPE1, __VA_ARGS__) \
|
| 269 |
+
AT_DISPATCH_CASE(SCALARTYPE2, __VA_ARGS__)
|
| 270 |
+
|
| 271 |
+
#define AT_DISPATCH_FLOATING_TYPES_AND2( \
|
| 272 |
+
SCALARTYPE1, SCALARTYPE2, TYPE, NAME, ...) \
|
| 273 |
+
AT_DISPATCH_SWITCH( \
|
| 274 |
+
TYPE, \
|
| 275 |
+
NAME, \
|
| 276 |
+
AT_DISPATCH_CASE_FLOATING_TYPES_AND2( \
|
| 277 |
+
SCALARTYPE1, SCALARTYPE2, __VA_ARGS__))
|
| 278 |
+
|
| 279 |
+
#define AT_DISPATCH_CASE_FLOATING_TYPES_AND3( \
|
| 280 |
+
SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, ...) \
|
| 281 |
+
AT_DISPATCH_CASE_FLOATING_TYPES(__VA_ARGS__) \
|
| 282 |
+
AT_DISPATCH_CASE(SCALARTYPE1, __VA_ARGS__) \
|
| 283 |
+
AT_DISPATCH_CASE(SCALARTYPE2, __VA_ARGS__) \
|
| 284 |
+
AT_DISPATCH_CASE(SCALARTYPE3, __VA_ARGS__)
|
| 285 |
+
|
| 286 |
+
#define AT_DISPATCH_FLOATING_TYPES_AND3( \
|
| 287 |
+
SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, TYPE, NAME, ...) \
|
| 288 |
+
AT_DISPATCH_SWITCH( \
|
| 289 |
+
TYPE, \
|
| 290 |
+
NAME, \
|
| 291 |
+
AT_DISPATCH_CASE_FLOATING_TYPES_AND3( \
|
| 292 |
+
SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, __VA_ARGS__))
|
| 293 |
+
|
| 294 |
+
#define AT_DISPATCH_CASE_FLOATING_TYPES_AND4( \
|
| 295 |
+
SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, SCALARTYPE4, ...) \
|
| 296 |
+
AT_DISPATCH_CASE_FLOATING_TYPES(__VA_ARGS__) \
|
| 297 |
+
AT_DISPATCH_CASE(SCALARTYPE1, __VA_ARGS__) \
|
| 298 |
+
AT_DISPATCH_CASE(SCALARTYPE2, __VA_ARGS__) \
|
| 299 |
+
AT_DISPATCH_CASE(SCALARTYPE3, __VA_ARGS__) \
|
| 300 |
+
AT_DISPATCH_CASE(SCALARTYPE4, __VA_ARGS__)
|
| 301 |
+
|
| 302 |
+
#define AT_DISPATCH_FLOATING_TYPES_AND4( \
|
| 303 |
+
SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, SCALARTYPE4, TYPE, NAME, ...) \
|
| 304 |
+
AT_DISPATCH_SWITCH( \
|
| 305 |
+
TYPE, \
|
| 306 |
+
NAME, \
|
| 307 |
+
AT_DISPATCH_CASE_FLOATING_TYPES_AND4( \
|
| 308 |
+
SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, SCALARTYPE4, __VA_ARGS__))
|
| 309 |
+
|
| 310 |
+
#define AT_DISPATCH_CASE_COMPLEX_TYPES(...) \
|
| 311 |
+
AT_DISPATCH_CASE(at::ScalarType::ComplexDouble, __VA_ARGS__) \
|
| 312 |
+
AT_DISPATCH_CASE(at::ScalarType::ComplexFloat, __VA_ARGS__)
|
| 313 |
+
|
| 314 |
+
#define AT_DISPATCH_COMPLEX_TYPES(TYPE, NAME, ...) \
|
| 315 |
+
AT_DISPATCH_SWITCH(TYPE, NAME, AT_DISPATCH_CASE_COMPLEX_TYPES(__VA_ARGS__))
|
| 316 |
+
|
| 317 |
+
#define AT_DISPATCH_CASE_COMPLEX_TYPES_AND(SCALARTYPE, ...) \
|
| 318 |
+
AT_DISPATCH_CASE_COMPLEX_TYPES(__VA_ARGS__) \
|
| 319 |
+
AT_DISPATCH_CASE(SCALARTYPE, __VA_ARGS__)
|
| 320 |
+
|
| 321 |
+
#define AT_DISPATCH_COMPLEX_TYPES_AND(SCALARTYPE, TYPE, NAME, ...) \
|
| 322 |
+
AT_DISPATCH_SWITCH( \
|
| 323 |
+
TYPE, NAME, AT_DISPATCH_CASE_COMPLEX_TYPES_AND(SCALARTYPE, __VA_ARGS__))
|
| 324 |
+
|
| 325 |
+
#define AT_DISPATCH_CASE_FLOATING_AND_COMPLEX_TYPES(...) \
|
| 326 |
+
AT_DISPATCH_CASE_FLOATING_TYPES(__VA_ARGS__) \
|
| 327 |
+
AT_DISPATCH_CASE_COMPLEX_TYPES(__VA_ARGS__)
|
| 328 |
+
|
| 329 |
+
#define AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(TYPE, NAME, ...) \
|
| 330 |
+
AT_DISPATCH_SWITCH( \
|
| 331 |
+
TYPE, NAME, AT_DISPATCH_CASE_FLOATING_AND_COMPLEX_TYPES(__VA_ARGS__))
|
| 332 |
+
|
| 333 |
+
#define AT_DISPATCH_CASE_FLOATING_AND_COMPLEX_TYPES_AND1(SCALARTYPE, ...) \
|
| 334 |
+
AT_DISPATCH_CASE_FLOATING_AND_COMPLEX_TYPES(__VA_ARGS__) \
|
| 335 |
+
AT_DISPATCH_CASE(SCALARTYPE, __VA_ARGS__)
|
| 336 |
+
|
| 337 |
+
#define AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1( \
|
| 338 |
+
SCALARTYPE, TYPE, NAME, ...) \
|
| 339 |
+
AT_DISPATCH_SWITCH( \
|
| 340 |
+
TYPE, \
|
| 341 |
+
NAME, \
|
| 342 |
+
AT_DISPATCH_CASE_FLOATING_AND_COMPLEX_TYPES_AND1( \
|
| 343 |
+
SCALARTYPE, __VA_ARGS__))
|
| 344 |
+
|
| 345 |
+
#define AT_DISPATCH_CASE_FLOATING_AND_COMPLEX_TYPES_AND2( \
|
| 346 |
+
SCALARTYPE1, SCALARTYPE2, ...) \
|
| 347 |
+
AT_DISPATCH_CASE_FLOATING_AND_COMPLEX_TYPES(__VA_ARGS__) \
|
| 348 |
+
AT_DISPATCH_CASE(SCALARTYPE1, __VA_ARGS__) \
|
| 349 |
+
AT_DISPATCH_CASE(SCALARTYPE2, __VA_ARGS__)
|
| 350 |
+
|
| 351 |
+
#define AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2( \
|
| 352 |
+
SCALARTYPE1, SCALARTYPE2, TYPE, NAME, ...) \
|
| 353 |
+
AT_DISPATCH_SWITCH( \
|
| 354 |
+
TYPE, \
|
| 355 |
+
NAME, \
|
| 356 |
+
AT_DISPATCH_CASE_FLOATING_AND_COMPLEX_TYPES_AND2( \
|
| 357 |
+
SCALARTYPE1, SCALARTYPE2, __VA_ARGS__))
|
| 358 |
+
|
| 359 |
+
#define AT_DISPATCH_CASE_FLOATING_AND_COMPLEX_TYPES_AND3( \
|
| 360 |
+
SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, ...) \
|
| 361 |
+
AT_DISPATCH_CASE_FLOATING_AND_COMPLEX_TYPES(__VA_ARGS__) \
|
| 362 |
+
AT_DISPATCH_CASE(SCALARTYPE1, __VA_ARGS__) \
|
| 363 |
+
AT_DISPATCH_CASE(SCALARTYPE2, __VA_ARGS__) \
|
| 364 |
+
AT_DISPATCH_CASE(SCALARTYPE3, __VA_ARGS__)
|
| 365 |
+
|
| 366 |
+
#define AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND3( \
|
| 367 |
+
SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, TYPE, NAME, ...) \
|
| 368 |
+
AT_DISPATCH_SWITCH( \
|
| 369 |
+
TYPE, \
|
| 370 |
+
NAME, \
|
| 371 |
+
AT_DISPATCH_CASE_FLOATING_AND_COMPLEX_TYPES_AND3( \
|
| 372 |
+
SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, __VA_ARGS__))
|
| 373 |
+
|
| 374 |
+
#define AT_DISPATCH_CASE_FLOATING_AND_COMPLEX_TYPES_AND4( \
|
| 375 |
+
SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, SCALARTYPE4, ...) \
|
| 376 |
+
AT_DISPATCH_CASE_FLOATING_AND_COMPLEX_TYPES(__VA_ARGS__) \
|
| 377 |
+
AT_DISPATCH_CASE(SCALARTYPE1, __VA_ARGS__) \
|
| 378 |
+
AT_DISPATCH_CASE(SCALARTYPE2, __VA_ARGS__) \
|
| 379 |
+
AT_DISPATCH_CASE(SCALARTYPE3, __VA_ARGS__) \
|
| 380 |
+
AT_DISPATCH_CASE(SCALARTYPE4, __VA_ARGS__)
|
| 381 |
+
|
| 382 |
+
#define AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND4( \
|
| 383 |
+
SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, SCALARTYPE4, TYPE, NAME, ...) \
|
| 384 |
+
AT_DISPATCH_SWITCH( \
|
| 385 |
+
TYPE, \
|
| 386 |
+
NAME, \
|
| 387 |
+
AT_DISPATCH_CASE_FLOATING_AND_COMPLEX_TYPES_AND4( \
|
| 388 |
+
SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, SCALARTYPE4, __VA_ARGS__))
|
| 389 |
+
|
| 390 |
+
#define AT_DISPATCH_CASE_FLOATING_AND_COMPLEX_TYPES_AND5( \
|
| 391 |
+
SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, SCALARTYPE4, SCALARTYPE5, ...) \
|
| 392 |
+
AT_DISPATCH_CASE_FLOATING_AND_COMPLEX_TYPES(__VA_ARGS__) \
|
| 393 |
+
AT_DISPATCH_CASE(SCALARTYPE1, __VA_ARGS__) \
|
| 394 |
+
AT_DISPATCH_CASE(SCALARTYPE2, __VA_ARGS__) \
|
| 395 |
+
AT_DISPATCH_CASE(SCALARTYPE3, __VA_ARGS__) \
|
| 396 |
+
AT_DISPATCH_CASE(SCALARTYPE4, __VA_ARGS__) \
|
| 397 |
+
AT_DISPATCH_CASE(SCALARTYPE5, __VA_ARGS__)
|
| 398 |
+
|
| 399 |
+
#define AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND5( \
|
| 400 |
+
SCALARTYPE1, \
|
| 401 |
+
SCALARTYPE2, \
|
| 402 |
+
SCALARTYPE3, \
|
| 403 |
+
SCALARTYPE4, \
|
| 404 |
+
SCALARTYPE5, \
|
| 405 |
+
TYPE, \
|
| 406 |
+
NAME, \
|
| 407 |
+
...) \
|
| 408 |
+
AT_DISPATCH_SWITCH( \
|
| 409 |
+
TYPE, \
|
| 410 |
+
NAME, \
|
| 411 |
+
AT_DISPATCH_CASE_FLOATING_AND_COMPLEX_TYPES_AND5( \
|
| 412 |
+
SCALARTYPE1, \
|
| 413 |
+
SCALARTYPE2, \
|
| 414 |
+
SCALARTYPE3, \
|
| 415 |
+
SCALARTYPE4, \
|
| 416 |
+
SCALARTYPE5, \
|
| 417 |
+
__VA_ARGS__))
|
| 418 |
+
|
| 419 |
+
#define AT_DISPATCH_CASE_FLOATING_AND_COMPLEX_TYPES_AND6( \
|
| 420 |
+
SCALARTYPE1, \
|
| 421 |
+
SCALARTYPE2, \
|
| 422 |
+
SCALARTYPE3, \
|
| 423 |
+
SCALARTYPE4, \
|
| 424 |
+
SCALARTYPE5, \
|
| 425 |
+
SCALARTYPE6, \
|
| 426 |
+
...) \
|
| 427 |
+
AT_DISPATCH_CASE_FLOATING_AND_COMPLEX_TYPES(__VA_ARGS__) \
|
| 428 |
+
AT_DISPATCH_CASE(SCALARTYPE1, __VA_ARGS__) \
|
| 429 |
+
AT_DISPATCH_CASE(SCALARTYPE2, __VA_ARGS__) \
|
| 430 |
+
AT_DISPATCH_CASE(SCALARTYPE3, __VA_ARGS__) \
|
| 431 |
+
AT_DISPATCH_CASE(SCALARTYPE4, __VA_ARGS__) \
|
| 432 |
+
AT_DISPATCH_CASE(SCALARTYPE5, __VA_ARGS__) \
|
| 433 |
+
AT_DISPATCH_CASE(SCALARTYPE6, __VA_ARGS__)
|
| 434 |
+
|
| 435 |
+
#define AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND6( \
|
| 436 |
+
SCALARTYPE1, \
|
| 437 |
+
SCALARTYPE2, \
|
| 438 |
+
SCALARTYPE3, \
|
| 439 |
+
SCALARTYPE4, \
|
| 440 |
+
SCALARTYPE5, \
|
| 441 |
+
SCALARTYPE6, \
|
| 442 |
+
TYPE, \
|
| 443 |
+
NAME, \
|
| 444 |
+
...) \
|
| 445 |
+
AT_DISPATCH_SWITCH( \
|
| 446 |
+
TYPE, \
|
| 447 |
+
NAME, \
|
| 448 |
+
AT_DISPATCH_CASE_FLOATING_AND_COMPLEX_TYPES_AND6( \
|
| 449 |
+
SCALARTYPE1, \
|
| 450 |
+
SCALARTYPE2, \
|
| 451 |
+
SCALARTYPE3, \
|
| 452 |
+
SCALARTYPE4, \
|
| 453 |
+
SCALARTYPE5, \
|
| 454 |
+
SCALARTYPE6, \
|
| 455 |
+
__VA_ARGS__))
|
| 456 |
+
|
| 457 |
+
#define AT_DISPATCH_CASE_INTEGRAL_TYPES(...) \
|
| 458 |
+
AT_DISPATCH_CASE(at::ScalarType::Byte, __VA_ARGS__) \
|
| 459 |
+
AT_DISPATCH_CASE(at::ScalarType::Char, __VA_ARGS__) \
|
| 460 |
+
AT_DISPATCH_CASE(at::ScalarType::Int, __VA_ARGS__) \
|
| 461 |
+
AT_DISPATCH_CASE(at::ScalarType::Long, __VA_ARGS__) \
|
| 462 |
+
AT_DISPATCH_CASE(at::ScalarType::Short, __VA_ARGS__)
|
| 463 |
+
|
| 464 |
+
#define AT_DISPATCH_INTEGRAL_TYPES(TYPE, NAME, ...) \
|
| 465 |
+
AT_DISPATCH_SWITCH(TYPE, NAME, AT_DISPATCH_CASE_INTEGRAL_TYPES(__VA_ARGS__))
|
| 466 |
+
|
| 467 |
+
#define AT_DISPATCH_CASE_INTEGRAL_TYPES_AND(SCALARTYPE, ...) \
|
| 468 |
+
AT_DISPATCH_CASE_INTEGRAL_TYPES(__VA_ARGS__) \
|
| 469 |
+
AT_DISPATCH_CASE(SCALARTYPE, __VA_ARGS__)
|
| 470 |
+
|
| 471 |
+
#define AT_DISPATCH_INTEGRAL_TYPES_AND(SCALARTYPE, TYPE, NAME, ...) \
|
| 472 |
+
AT_DISPATCH_SWITCH( \
|
| 473 |
+
TYPE, \
|
| 474 |
+
NAME, \
|
| 475 |
+
AT_DISPATCH_CASE_INTEGRAL_TYPES_AND(SCALARTYPE, __VA_ARGS__))
|
| 476 |
+
|
| 477 |
+
#define AT_DISPATCH_CASE_ALL_TYPES(...) \
|
| 478 |
+
AT_DISPATCH_CASE_INTEGRAL_TYPES(__VA_ARGS__) \
|
| 479 |
+
AT_DISPATCH_CASE_FLOATING_TYPES(__VA_ARGS__)
|
| 480 |
+
|
| 481 |
+
#define AT_DISPATCH_ALL_TYPES(TYPE, NAME, ...) \
|
| 482 |
+
AT_DISPATCH_SWITCH(TYPE, NAME, AT_DISPATCH_CASE_ALL_TYPES(__VA_ARGS__))
|
| 483 |
+
|
| 484 |
+
#define AT_DISPATCH_CASE_QINT_TYPES(...) \
|
| 485 |
+
AT_DISPATCH_CASE_QINT(at::kQInt8, at::qint8, __VA_ARGS__) \
|
| 486 |
+
AT_DISPATCH_CASE_QINT(at::kQUInt8, at::quint8, __VA_ARGS__) \
|
| 487 |
+
AT_DISPATCH_CASE_QINT(at::kQInt32, at::qint32, __VA_ARGS__)
|
| 488 |
+
|
| 489 |
+
#define AT_DISPATCH_QINT_TYPES(TYPE, NAME, ...) \
|
| 490 |
+
AT_DISPATCH_SWITCH(TYPE, NAME, AT_DISPATCH_CASE_QINT_TYPES(__VA_ARGS__))
|
| 491 |
+
|
| 492 |
+
#define AT_DISPATCH_CASE_QINT_TYPES_AND(SCALARTYPE, ...) \
|
| 493 |
+
AT_DISPATCH_CASE_QINT_TYPES(__VA_ARGS__) \
|
| 494 |
+
AT_DISPATCH_CASE(SCALARTYPE, __VA_ARGS__)
|
| 495 |
+
|
| 496 |
+
#define AT_DISPATCH_QINT_TYPES_AND(SCALARTYPE, TYPE, NAME, ...) \
|
| 497 |
+
AT_DISPATCH_SWITCH( \
|
| 498 |
+
TYPE, NAME, AT_DISPATCH_CASE_QINT_TYPES_AND(SCALARTYPE, __VA_ARGS__))
|
| 499 |
+
|
| 500 |
+
#define AT_DISPATCH_CASE_QINT_BYTE_TYPES(...) \
|
| 501 |
+
AT_DISPATCH_CASE_QINT(at::kQInt8, at::qint8, __VA_ARGS__) \
|
| 502 |
+
AT_DISPATCH_CASE_QINT(at::kQUInt8, at::quint8, __VA_ARGS__)
|
| 503 |
+
|
| 504 |
+
#define AT_DISPATCH_QINT_BYTE_TYPES(TYPE, NAME, ...) \
|
| 505 |
+
AT_DISPATCH_SWITCH(TYPE, NAME, AT_DISPATCH_CASE_QINT_BYTE_TYPES(__VA_ARGS__))
|
| 506 |
+
|
| 507 |
+
#define AT_DISPATCH_CASE_QINT_AND_SUB_BYTE_TYPES(...) \
|
| 508 |
+
AT_QINT_SUB_BYTE_PRIVATE_CASE_TYPE( \
|
| 509 |
+
at::kQInt8, at::qint8, CHAR_BIT, SCHAR_MIN, SCHAR_MAX, __VA_ARGS__) \
|
| 510 |
+
AT_QINT_SUB_BYTE_PRIVATE_CASE_TYPE( \
|
| 511 |
+
at::kQUInt8, at::quint8, CHAR_BIT, 0, UCHAR_MAX, __VA_ARGS__) \
|
| 512 |
+
AT_QINT_SUB_BYTE_PRIVATE_CASE_TYPE( \
|
| 513 |
+
at::kQInt32, \
|
| 514 |
+
at::qint32, \
|
| 515 |
+
CHAR_BIT * sizeof(int), \
|
| 516 |
+
INT_MIN, \
|
| 517 |
+
INT_MAX, \
|
| 518 |
+
__VA_ARGS__) \
|
| 519 |
+
AT_QINT_SUB_BYTE_PRIVATE_CASE_TYPE( \
|
| 520 |
+
at::kQUInt4x2, at::quint4x2, 4, 0, 15, __VA_ARGS__) \
|
| 521 |
+
AT_QINT_SUB_BYTE_PRIVATE_CASE_TYPE( \
|
| 522 |
+
at::kQUInt2x4, at::quint2x4, 2, 0, 3, __VA_ARGS__)
|
| 523 |
+
|
| 524 |
+
#define AT_DISPATCH_QINT_AND_SUB_BYTE_TYPES(TYPE, NAME, ...) \
|
| 525 |
+
AT_DISPATCH_SWITCH( \
|
| 526 |
+
TYPE, NAME, AT_DISPATCH_CASE_QINT_AND_SUB_BYTE_TYPES(__VA_ARGS__))
|
| 527 |
+
|
| 528 |
+
#define AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX(...) \
|
| 529 |
+
AT_DISPATCH_CASE_ALL_TYPES(__VA_ARGS__) \
|
| 530 |
+
AT_DISPATCH_CASE_COMPLEX_TYPES(__VA_ARGS__)
|
| 531 |
+
|
| 532 |
+
#define AT_DISPATCH_ALL_TYPES_AND_COMPLEX(TYPE, NAME, ...) \
|
| 533 |
+
AT_DISPATCH_SWITCH( \
|
| 534 |
+
TYPE, NAME, AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX(__VA_ARGS__))
|
| 535 |
+
|
| 536 |
+
#define AT_DISPATCH_CASE_ALL_TYPES_AND(SCALARTYPE, ...) \
|
| 537 |
+
AT_DISPATCH_CASE_ALL_TYPES(__VA_ARGS__) \
|
| 538 |
+
AT_DISPATCH_CASE(SCALARTYPE, __VA_ARGS__)
|
| 539 |
+
|
| 540 |
+
#define AT_DISPATCH_ALL_TYPES_AND(SCALARTYPE, TYPE, NAME, ...) \
|
| 541 |
+
AT_DISPATCH_SWITCH( \
|
| 542 |
+
TYPE, NAME, AT_DISPATCH_CASE_ALL_TYPES_AND(SCALARTYPE, __VA_ARGS__))
|
| 543 |
+
|
| 544 |
+
#define AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX_AND(SCALARTYPE, ...) \
|
| 545 |
+
AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX(__VA_ARGS__) \
|
| 546 |
+
AT_DISPATCH_CASE(SCALARTYPE, __VA_ARGS__)
|
| 547 |
+
|
| 548 |
+
#define AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND(SCALARTYPE, TYPE, NAME, ...) \
|
| 549 |
+
AT_DISPATCH_SWITCH( \
|
| 550 |
+
TYPE, \
|
| 551 |
+
NAME, \
|
| 552 |
+
AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX_AND(SCALARTYPE, __VA_ARGS__))
|
| 553 |
+
|
| 554 |
+
#define AT_DISPATCH_CASE_ALL_TYPES_AND2(SCALARTYPE1, SCALARTYPE2, ...) \
|
| 555 |
+
AT_DISPATCH_CASE_ALL_TYPES(__VA_ARGS__) \
|
| 556 |
+
AT_DISPATCH_CASE(SCALARTYPE1, __VA_ARGS__) \
|
| 557 |
+
AT_DISPATCH_CASE(SCALARTYPE2, __VA_ARGS__)
|
| 558 |
+
|
| 559 |
+
#define AT_DISPATCH_ALL_TYPES_AND2(SCALARTYPE1, SCALARTYPE2, TYPE, NAME, ...) \
|
| 560 |
+
AT_DISPATCH_SWITCH( \
|
| 561 |
+
TYPE, \
|
| 562 |
+
NAME, \
|
| 563 |
+
AT_DISPATCH_CASE_ALL_TYPES_AND2(SCALARTYPE1, SCALARTYPE2, __VA_ARGS__))
|
| 564 |
+
|
| 565 |
+
#define AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX_AND2( \
|
| 566 |
+
SCALARTYPE1, SCALARTYPE2, ...) \
|
| 567 |
+
AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX(__VA_ARGS__) \
|
| 568 |
+
AT_DISPATCH_CASE(SCALARTYPE1, __VA_ARGS__) \
|
| 569 |
+
AT_DISPATCH_CASE(SCALARTYPE2, __VA_ARGS__)
|
| 570 |
+
|
| 571 |
+
#define AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND2( \
|
| 572 |
+
SCALARTYPE1, SCALARTYPE2, TYPE, NAME, ...) \
|
| 573 |
+
AT_DISPATCH_SWITCH( \
|
| 574 |
+
TYPE, \
|
| 575 |
+
NAME, \
|
| 576 |
+
AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX_AND2( \
|
| 577 |
+
SCALARTYPE1, SCALARTYPE2, __VA_ARGS__))
|
| 578 |
+
|
| 579 |
+
#define AT_DISPATCH_CASE_ALL_TYPES_AND3( \
|
| 580 |
+
SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, ...) \
|
| 581 |
+
AT_DISPATCH_CASE_ALL_TYPES(__VA_ARGS__) \
|
| 582 |
+
AT_DISPATCH_CASE(SCALARTYPE1, __VA_ARGS__) \
|
| 583 |
+
AT_DISPATCH_CASE(SCALARTYPE2, __VA_ARGS__) \
|
| 584 |
+
AT_DISPATCH_CASE(SCALARTYPE3, __VA_ARGS__)
|
| 585 |
+
|
| 586 |
+
#define AT_DISPATCH_ALL_TYPES_AND3( \
|
| 587 |
+
SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, TYPE, NAME, ...) \
|
| 588 |
+
AT_DISPATCH_SWITCH( \
|
| 589 |
+
TYPE, \
|
| 590 |
+
NAME, \
|
| 591 |
+
AT_DISPATCH_CASE_ALL_TYPES_AND3( \
|
| 592 |
+
SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, __VA_ARGS__))
|
| 593 |
+
|
| 594 |
+
#define AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX_AND3( \
|
| 595 |
+
SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, ...) \
|
| 596 |
+
AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX(__VA_ARGS__) \
|
| 597 |
+
AT_DISPATCH_CASE(SCALARTYPE1, __VA_ARGS__) \
|
| 598 |
+
AT_DISPATCH_CASE(SCALARTYPE2, __VA_ARGS__) \
|
| 599 |
+
AT_DISPATCH_CASE(SCALARTYPE3, __VA_ARGS__)
|
| 600 |
+
|
| 601 |
+
#define AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3( \
|
| 602 |
+
SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, TYPE, NAME, ...) \
|
| 603 |
+
AT_DISPATCH_SWITCH( \
|
| 604 |
+
TYPE, \
|
| 605 |
+
NAME, \
|
| 606 |
+
AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX_AND3( \
|
| 607 |
+
SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, __VA_ARGS__))
|
| 608 |
+
|
| 609 |
+
#define AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX_AND4( \
|
| 610 |
+
SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, SCALARTYPE4, ...) \
|
| 611 |
+
AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX(__VA_ARGS__) \
|
| 612 |
+
AT_DISPATCH_CASE(SCALARTYPE1, __VA_ARGS__) \
|
| 613 |
+
AT_DISPATCH_CASE(SCALARTYPE2, __VA_ARGS__) \
|
| 614 |
+
AT_DISPATCH_CASE(SCALARTYPE3, __VA_ARGS__) \
|
| 615 |
+
AT_DISPATCH_CASE(SCALARTYPE4, __VA_ARGS__)
|
| 616 |
+
|
| 617 |
+
#define AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND4( \
|
| 618 |
+
SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, SCALARTYPE4, TYPE, NAME, ...) \
|
| 619 |
+
AT_DISPATCH_SWITCH( \
|
| 620 |
+
TYPE, \
|
| 621 |
+
NAME, \
|
| 622 |
+
AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX_AND4( \
|
| 623 |
+
SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, SCALARTYPE4, __VA_ARGS__))
|
| 624 |
+
|
| 625 |
+
#define AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX_AND5( \
|
| 626 |
+
SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, SCALARTYPE4, SCALARTYPE5, ...) \
|
| 627 |
+
AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX(__VA_ARGS__) \
|
| 628 |
+
AT_DISPATCH_CASE(SCALARTYPE1, __VA_ARGS__) \
|
| 629 |
+
AT_DISPATCH_CASE(SCALARTYPE2, __VA_ARGS__) \
|
| 630 |
+
AT_DISPATCH_CASE(SCALARTYPE3, __VA_ARGS__) \
|
| 631 |
+
AT_DISPATCH_CASE(SCALARTYPE4, __VA_ARGS__) \
|
| 632 |
+
AT_DISPATCH_CASE(SCALARTYPE5, __VA_ARGS__)
|
| 633 |
+
|
| 634 |
+
#define AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND5( \
|
| 635 |
+
SCALARTYPE1, \
|
| 636 |
+
SCALARTYPE2, \
|
| 637 |
+
SCALARTYPE3, \
|
| 638 |
+
SCALARTYPE4, \
|
| 639 |
+
SCALARTYPE5, \
|
| 640 |
+
TYPE, \
|
| 641 |
+
NAME, \
|
| 642 |
+
...) \
|
| 643 |
+
AT_DISPATCH_SWITCH( \
|
| 644 |
+
TYPE, \
|
| 645 |
+
NAME, \
|
| 646 |
+
AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX_AND5( \
|
| 647 |
+
SCALARTYPE1, \
|
| 648 |
+
SCALARTYPE2, \
|
| 649 |
+
SCALARTYPE3, \
|
| 650 |
+
SCALARTYPE4, \
|
| 651 |
+
SCALARTYPE5, \
|
| 652 |
+
__VA_ARGS__))
|
| 653 |
+
|
| 654 |
+
#define AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX_AND6( \
|
| 655 |
+
SCALARTYPE1, \
|
| 656 |
+
SCALARTYPE2, \
|
| 657 |
+
SCALARTYPE3, \
|
| 658 |
+
SCALARTYPE4, \
|
| 659 |
+
SCALARTYPE5, \
|
| 660 |
+
SCALARTYPE6, \
|
| 661 |
+
...) \
|
| 662 |
+
AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX(__VA_ARGS__) \
|
| 663 |
+
AT_DISPATCH_CASE(SCALARTYPE1, __VA_ARGS__) \
|
| 664 |
+
AT_DISPATCH_CASE(SCALARTYPE2, __VA_ARGS__) \
|
| 665 |
+
AT_DISPATCH_CASE(SCALARTYPE3, __VA_ARGS__) \
|
| 666 |
+
AT_DISPATCH_CASE(SCALARTYPE4, __VA_ARGS__) \
|
| 667 |
+
AT_DISPATCH_CASE(SCALARTYPE5, __VA_ARGS__) \
|
| 668 |
+
AT_DISPATCH_CASE(SCALARTYPE6, __VA_ARGS__)
|
| 669 |
+
|
| 670 |
+
#define AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND6( \
|
| 671 |
+
SCALARTYPE1, \
|
| 672 |
+
SCALARTYPE2, \
|
| 673 |
+
SCALARTYPE3, \
|
| 674 |
+
SCALARTYPE4, \
|
| 675 |
+
SCALARTYPE5, \
|
| 676 |
+
SCALARTYPE6, \
|
| 677 |
+
TYPE, \
|
| 678 |
+
NAME, \
|
| 679 |
+
...) \
|
| 680 |
+
AT_DISPATCH_SWITCH( \
|
| 681 |
+
TYPE, \
|
| 682 |
+
NAME, \
|
| 683 |
+
AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX_AND6( \
|
| 684 |
+
SCALARTYPE1, \
|
| 685 |
+
SCALARTYPE2, \
|
| 686 |
+
SCALARTYPE3, \
|
| 687 |
+
SCALARTYPE4, \
|
| 688 |
+
SCALARTYPE5, \
|
| 689 |
+
SCALARTYPE6, \
|
| 690 |
+
__VA_ARGS__))
|
| 691 |
+
|
| 692 |
+
#define AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX_AND7( \
|
| 693 |
+
SCALARTYPE1, \
|
| 694 |
+
SCALARTYPE2, \
|
| 695 |
+
SCALARTYPE3, \
|
| 696 |
+
SCALARTYPE4, \
|
| 697 |
+
SCALARTYPE5, \
|
| 698 |
+
SCALARTYPE6, \
|
| 699 |
+
SCALARTYPE7, \
|
| 700 |
+
...) \
|
| 701 |
+
AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX(__VA_ARGS__) \
|
| 702 |
+
AT_DISPATCH_CASE(SCALARTYPE1, __VA_ARGS__) \
|
| 703 |
+
AT_DISPATCH_CASE(SCALARTYPE2, __VA_ARGS__) \
|
| 704 |
+
AT_DISPATCH_CASE(SCALARTYPE3, __VA_ARGS__) \
|
| 705 |
+
AT_DISPATCH_CASE(SCALARTYPE4, __VA_ARGS__) \
|
| 706 |
+
AT_DISPATCH_CASE(SCALARTYPE5, __VA_ARGS__) \
|
| 707 |
+
AT_DISPATCH_CASE(SCALARTYPE6, __VA_ARGS__) \
|
| 708 |
+
AT_DISPATCH_CASE(SCALARTYPE7, __VA_ARGS__)
|
| 709 |
+
|
| 710 |
+
#define AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND7( \
|
| 711 |
+
SCALARTYPE1, \
|
| 712 |
+
SCALARTYPE2, \
|
| 713 |
+
SCALARTYPE3, \
|
| 714 |
+
SCALARTYPE4, \
|
| 715 |
+
SCALARTYPE5, \
|
| 716 |
+
SCALARTYPE6, \
|
| 717 |
+
SCALARTYPE7, \
|
| 718 |
+
TYPE, \
|
| 719 |
+
NAME, \
|
| 720 |
+
...) \
|
| 721 |
+
AT_DISPATCH_SWITCH( \
|
| 722 |
+
TYPE, \
|
| 723 |
+
NAME, \
|
| 724 |
+
AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX_AND7( \
|
| 725 |
+
SCALARTYPE1, \
|
| 726 |
+
SCALARTYPE2, \
|
| 727 |
+
SCALARTYPE3, \
|
| 728 |
+
SCALARTYPE4, \
|
| 729 |
+
SCALARTYPE5, \
|
| 730 |
+
SCALARTYPE6, \
|
| 731 |
+
SCALARTYPE7, \
|
| 732 |
+
__VA_ARGS__))
|
| 733 |
+
|
| 734 |
+
#define AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX_AND8( \
|
| 735 |
+
SCALARTYPE1, \
|
| 736 |
+
SCALARTYPE2, \
|
| 737 |
+
SCALARTYPE3, \
|
| 738 |
+
SCALARTYPE4, \
|
| 739 |
+
SCALARTYPE5, \
|
| 740 |
+
SCALARTYPE6, \
|
| 741 |
+
SCALARTYPE7, \
|
| 742 |
+
SCALARTYPE8, \
|
| 743 |
+
...) \
|
| 744 |
+
AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX(__VA_ARGS__) \
|
| 745 |
+
AT_DISPATCH_CASE(SCALARTYPE1, __VA_ARGS__) \
|
| 746 |
+
AT_DISPATCH_CASE(SCALARTYPE2, __VA_ARGS__) \
|
| 747 |
+
AT_DISPATCH_CASE(SCALARTYPE3, __VA_ARGS__) \
|
| 748 |
+
AT_DISPATCH_CASE(SCALARTYPE4, __VA_ARGS__) \
|
| 749 |
+
AT_DISPATCH_CASE(SCALARTYPE5, __VA_ARGS__) \
|
| 750 |
+
AT_DISPATCH_CASE(SCALARTYPE6, __VA_ARGS__) \
|
| 751 |
+
AT_DISPATCH_CASE(SCALARTYPE7, __VA_ARGS__) \
|
| 752 |
+
AT_DISPATCH_CASE(SCALARTYPE8, __VA_ARGS__)
|
| 753 |
+
|
| 754 |
+
#define AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND8( \
|
| 755 |
+
SCALARTYPE1, \
|
| 756 |
+
SCALARTYPE2, \
|
| 757 |
+
SCALARTYPE3, \
|
| 758 |
+
SCALARTYPE4, \
|
| 759 |
+
SCALARTYPE5, \
|
| 760 |
+
SCALARTYPE6, \
|
| 761 |
+
SCALARTYPE7, \
|
| 762 |
+
SCALARTYPE8, \
|
| 763 |
+
TYPE, \
|
| 764 |
+
NAME, \
|
| 765 |
+
...) \
|
| 766 |
+
AT_DISPATCH_SWITCH( \
|
| 767 |
+
TYPE, \
|
| 768 |
+
NAME, \
|
| 769 |
+
AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX_AND8( \
|
| 770 |
+
SCALARTYPE1, \
|
| 771 |
+
SCALARTYPE2, \
|
| 772 |
+
SCALARTYPE3, \
|
| 773 |
+
SCALARTYPE4, \
|
| 774 |
+
SCALARTYPE5, \
|
| 775 |
+
SCALARTYPE6, \
|
| 776 |
+
SCALARTYPE7, \
|
| 777 |
+
SCALARTYPE8, \
|
| 778 |
+
__VA_ARGS__))
|
| 779 |
+
|
| 780 |
+
#define AT_DISPATCH_CASE_BIT_TYPES(...) \
|
| 781 |
+
AT_DISPATCH_CASE(at::ScalarType::Bits1x8, __VA_ARGS__) \
|
| 782 |
+
AT_DISPATCH_CASE(at::ScalarType::Bits2x4, __VA_ARGS__) \
|
| 783 |
+
AT_DISPATCH_CASE(at::ScalarType::Bits4x2, __VA_ARGS__) \
|
| 784 |
+
AT_DISPATCH_CASE(at::ScalarType::Bits8, __VA_ARGS__) \
|
| 785 |
+
AT_DISPATCH_CASE(at::ScalarType::Bits16, __VA_ARGS__)
|
| 786 |
+
|
| 787 |
+
#define AT_DISPATCH_BIT_TYPES(TYPE, NAME, ...) \
|
| 788 |
+
AT_DISPATCH_SWITCH(TYPE, NAME, AT_DISPATCH_CASE_BIT_TYPES(__VA_ARGS__))
|
| 789 |
+
|
| 790 |
+
#define AT_DISPATCH_INDEX_TYPES(TYPE, NAME, ...) \
|
| 791 |
+
AT_DISPATCH_SWITCH( \
|
| 792 |
+
TYPE, \
|
| 793 |
+
NAME, \
|
| 794 |
+
AT_PRIVATE_CASE_TYPE_USING_HINT( \
|
| 795 |
+
at::ScalarType::Int, index_t, __VA_ARGS__) \
|
| 796 |
+
AT_PRIVATE_CASE_TYPE_USING_HINT( \
|
| 797 |
+
at::ScalarType::Long, index_t, __VA_ARGS__))
|
| 798 |
+
|
| 799 |
+
// ----------------------------------------------------------------------------
|
| 800 |
+
// DEPRECATED MACROS, DON'T USE THESE
|
| 801 |
+
// ----------------------------------------------------------------------------
|
| 802 |
+
|
| 803 |
+
#define AT_DISPATCH_ALL_TYPES_AND_HALF(TYPE, NAME, ...) \
|
| 804 |
+
detail::deprecated_AT_DISPATCH_ALL_TYPES_AND_HALF(); \
|
| 805 |
+
AT_DISPATCH_SWITCH( \
|
| 806 |
+
TYPE, \
|
| 807 |
+
NAME, \
|
| 808 |
+
AT_DISPATCH_CASE_ALL_TYPES_AND(at::ScalarType::Half, __VA_ARGS__))
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/LegacyBatchedFallback.h
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
#include <ATen/ATen.h>
|
| 3 |
+
#include <ATen/core/op_registration/op_registration.h>
|
| 4 |
+
#include <torch/library.h>
|
| 5 |
+
|
| 6 |
+
namespace at {
|
| 7 |
+
|
| 8 |
+
// If an operator doesn't have a batching rule implemented then we fallback
|
| 9 |
+
// to this implementation. The fallback only works on out-of-place operators
|
| 10 |
+
// that return only tensors with new memory. (e.g., no in-place operators, no
|
| 11 |
+
// view operations).
|
| 12 |
+
//
|
| 13 |
+
// The fallback effectively takes all of the BatchedTensors in `stack`, slices
|
| 14 |
+
// them, and runs `op` on all of the corresponding slices to produce slices
|
| 15 |
+
// of the outputs. The output slices then get `torch.stack`ed to create the
|
| 16 |
+
// final returns.
|
| 17 |
+
//
|
| 18 |
+
// The performance of the fallback is not very good because it introduces an
|
| 19 |
+
// extra copy from stacking the sliced outputs. Because of this, we prefer to
|
| 20 |
+
// write batching rules for operators whenever possible.
|
| 21 |
+
void batchedTensorForLoopFallback(
|
| 22 |
+
const c10::OperatorHandle& op,
|
| 23 |
+
torch::jit::Stack* stack);
|
| 24 |
+
|
| 25 |
+
} // namespace at
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/PadNd.h
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
#include <c10/util/Exception.h>
|
| 3 |
+
#include <c10/util/string_view.h>
|
| 4 |
+
|
| 5 |
+
namespace at {
|
| 6 |
+
|
| 7 |
+
enum class padding_mode {
|
| 8 |
+
reflect,
|
| 9 |
+
replicate,
|
| 10 |
+
circular,
|
| 11 |
+
constant,
|
| 12 |
+
};
|
| 13 |
+
|
| 14 |
+
static inline c10::string_view padding_mode_string(padding_mode m) {
|
| 15 |
+
switch (m) {
|
| 16 |
+
case padding_mode::reflect:
|
| 17 |
+
return "reflect";
|
| 18 |
+
case padding_mode::replicate:
|
| 19 |
+
return "replicate";
|
| 20 |
+
case padding_mode::circular:
|
| 21 |
+
return "circular";
|
| 22 |
+
case padding_mode::constant:
|
| 23 |
+
return "constant";
|
| 24 |
+
}
|
| 25 |
+
TORCH_CHECK(false, "Invalid padding mode (", static_cast<int64_t>(m), ")");
|
| 26 |
+
}
|
| 27 |
+
|
| 28 |
+
} // namespace at
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/Parallel-inl.h
ADDED
|
@@ -0,0 +1,83 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <c10/util/Exception.h>
|
| 4 |
+
#include <c10/util/SmallVector.h>
|
| 5 |
+
|
| 6 |
+
namespace at {
|
| 7 |
+
|
| 8 |
+
template <class F>
|
| 9 |
+
inline void parallel_for(
|
| 10 |
+
const int64_t begin,
|
| 11 |
+
const int64_t end,
|
| 12 |
+
const int64_t grain_size,
|
| 13 |
+
const F& f) {
|
| 14 |
+
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(grain_size >= 0);
|
| 15 |
+
if (begin >= end) {
|
| 16 |
+
return;
|
| 17 |
+
}
|
| 18 |
+
|
| 19 |
+
#ifdef INTRA_OP_PARALLEL
|
| 20 |
+
at::internal::lazy_init_num_threads();
|
| 21 |
+
const auto numiter = end - begin;
|
| 22 |
+
const bool use_parallel =
|
| 23 |
+
(numiter > grain_size && numiter > 1 && !at::in_parallel_region() &&
|
| 24 |
+
at::get_num_threads() > 1);
|
| 25 |
+
if (!use_parallel) {
|
| 26 |
+
internal::ThreadIdGuard tid_guard(0);
|
| 27 |
+
f(begin, end);
|
| 28 |
+
return;
|
| 29 |
+
}
|
| 30 |
+
|
| 31 |
+
internal::invoke_parallel(begin, end, grain_size, f);
|
| 32 |
+
#else
|
| 33 |
+
internal::ThreadIdGuard tid_guard(0);
|
| 34 |
+
f(begin, end);
|
| 35 |
+
#endif
|
| 36 |
+
}
|
| 37 |
+
|
| 38 |
+
template <class scalar_t, class F, class SF>
|
| 39 |
+
inline scalar_t parallel_reduce(
|
| 40 |
+
const int64_t begin,
|
| 41 |
+
const int64_t end,
|
| 42 |
+
const int64_t grain_size,
|
| 43 |
+
const scalar_t ident,
|
| 44 |
+
const F& f,
|
| 45 |
+
const SF& sf) {
|
| 46 |
+
TORCH_CHECK(grain_size >= 0);
|
| 47 |
+
if (begin >= end) {
|
| 48 |
+
return ident;
|
| 49 |
+
}
|
| 50 |
+
|
| 51 |
+
#ifdef INTRA_OP_PARALLEL
|
| 52 |
+
at::internal::lazy_init_num_threads();
|
| 53 |
+
const auto max_threads = at::get_num_threads();
|
| 54 |
+
const bool use_parallel =
|
| 55 |
+
((end - begin) > grain_size && !at::in_parallel_region() &&
|
| 56 |
+
max_threads > 1);
|
| 57 |
+
if (!use_parallel) {
|
| 58 |
+
internal::ThreadIdGuard tid_guard(0);
|
| 59 |
+
return f(begin, end, ident);
|
| 60 |
+
}
|
| 61 |
+
|
| 62 |
+
c10::SmallVector<scalar_t, 64> results(max_threads, ident);
|
| 63 |
+
internal::invoke_parallel(
|
| 64 |
+
begin,
|
| 65 |
+
end,
|
| 66 |
+
grain_size,
|
| 67 |
+
[&](const int64_t my_begin, const int64_t my_end) {
|
| 68 |
+
const auto tid = at::get_thread_num();
|
| 69 |
+
results[tid] = f(my_begin, my_end, ident);
|
| 70 |
+
});
|
| 71 |
+
|
| 72 |
+
scalar_t result = ident;
|
| 73 |
+
for (auto partial_result : results) {
|
| 74 |
+
result = sf(result, partial_result);
|
| 75 |
+
}
|
| 76 |
+
return result;
|
| 77 |
+
#else
|
| 78 |
+
internal::ThreadIdGuard tid_guard(0);
|
| 79 |
+
return f(begin, end, ident);
|
| 80 |
+
#endif
|
| 81 |
+
}
|
| 82 |
+
|
| 83 |
+
} // namespace at
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/RegistrationDeclarations.h
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/TensorGeometry.h
ADDED
|
@@ -0,0 +1,144 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <ATen/core/TensorBase.h>
|
| 4 |
+
#include <c10/core/WrapDimMinimal.h>
|
| 5 |
+
|
| 6 |
+
namespace at {
|
| 7 |
+
|
| 8 |
+
// Return if the tensor geometry represented by `sizes` and `strides` is
|
| 9 |
+
// contiguous Although we cache is_contiguous in tensor now, this is till useful
|
| 10 |
+
// because it allows checking if a particular geometry is contiguous without
|
| 11 |
+
// explicitly constructing a tensor, e.g., when you want to choose a kernel
|
| 12 |
+
// strategy based on whether a subgeometry is contiguous.
|
| 13 |
+
TORCH_API bool geometry_is_contiguous(IntArrayRef sizes, IntArrayRef strides);
|
| 14 |
+
|
| 15 |
+
struct TORCH_API TensorGeometry {
|
| 16 |
+
TensorGeometry() = default;
|
| 17 |
+
|
| 18 |
+
explicit TensorGeometry(c10::SymIntArrayRef sizes)
|
| 19 |
+
: sizes_(sizes.vec()),
|
| 20 |
+
strides_(sizes.size()),
|
| 21 |
+
has_symbolic_sizes_strides_(
|
| 22 |
+
!c10::asIntArrayRefSlowOpt(sizes).has_value()) {
|
| 23 |
+
int64_t dim = sizes.size();
|
| 24 |
+
c10::SymInt expected_stride = 1;
|
| 25 |
+
for (int64_t i = dim - 1; i >= 0; i--) {
|
| 26 |
+
strides_[i] = expected_stride;
|
| 27 |
+
expected_stride *= sizes_[i];
|
| 28 |
+
}
|
| 29 |
+
numel_ = expected_stride;
|
| 30 |
+
}
|
| 31 |
+
|
| 32 |
+
explicit TensorGeometry(const TensorBase& t)
|
| 33 |
+
: sizes_(t.sym_sizes().vec()),
|
| 34 |
+
strides_(t.sym_strides().vec()),
|
| 35 |
+
storage_offset_(t.sym_storage_offset()),
|
| 36 |
+
numel_(t.sym_numel()),
|
| 37 |
+
has_symbolic_sizes_strides_(
|
| 38 |
+
t.unsafeGetTensorImpl()->has_symbolic_sizes_strides()) {}
|
| 39 |
+
|
| 40 |
+
// true if the tensor is contiguous
|
| 41 |
+
bool is_contiguous() const;
|
| 42 |
+
|
| 43 |
+
int64_t dim() const {
|
| 44 |
+
return sizes_.size();
|
| 45 |
+
}
|
| 46 |
+
|
| 47 |
+
int64_t size(int64_t dim) const {
|
| 48 |
+
TORCH_INTERNAL_ASSERT(!has_symbolic_sizes_strides_);
|
| 49 |
+
dim = c10::maybe_wrap_dim(dim, this->dim());
|
| 50 |
+
return sizes_.at(static_cast<size_t>(dim)).as_int_unchecked();
|
| 51 |
+
}
|
| 52 |
+
c10::IntArrayRef sizes() const {
|
| 53 |
+
TORCH_INTERNAL_ASSERT(!has_symbolic_sizes_strides_);
|
| 54 |
+
return c10::asIntArrayRefUnchecked(sizes_);
|
| 55 |
+
}
|
| 56 |
+
int64_t stride(int64_t dim) const {
|
| 57 |
+
TORCH_INTERNAL_ASSERT(!has_symbolic_sizes_strides_);
|
| 58 |
+
dim = c10::maybe_wrap_dim(dim, this->dim());
|
| 59 |
+
return strides_.at(static_cast<size_t>(dim)).as_int_unchecked();
|
| 60 |
+
}
|
| 61 |
+
c10::IntArrayRef strides() const {
|
| 62 |
+
TORCH_INTERNAL_ASSERT(!has_symbolic_sizes_strides_);
|
| 63 |
+
return c10::asIntArrayRefUnchecked(strides_);
|
| 64 |
+
}
|
| 65 |
+
int64_t storage_offset() const {
|
| 66 |
+
TORCH_INTERNAL_ASSERT(!has_symbolic_sizes_strides_);
|
| 67 |
+
return storage_offset_.as_int_unchecked();
|
| 68 |
+
}
|
| 69 |
+
int64_t numel() const {
|
| 70 |
+
TORCH_INTERNAL_ASSERT(!has_symbolic_sizes_strides_);
|
| 71 |
+
return numel_.as_int_unchecked();
|
| 72 |
+
}
|
| 73 |
+
|
| 74 |
+
c10::SymInt sym_size(int64_t dim) const {
|
| 75 |
+
dim = c10::maybe_wrap_dim(dim, this->dim());
|
| 76 |
+
return sizes_.at(static_cast<size_t>(dim));
|
| 77 |
+
}
|
| 78 |
+
c10::SymIntArrayRef sym_sizes() const {
|
| 79 |
+
return sizes_;
|
| 80 |
+
}
|
| 81 |
+
c10::SymInt sym_stride(int64_t dim) const {
|
| 82 |
+
dim = c10::maybe_wrap_dim(dim, this->dim());
|
| 83 |
+
return strides_.at(static_cast<size_t>(dim));
|
| 84 |
+
}
|
| 85 |
+
c10::SymIntArrayRef sym_strides() const {
|
| 86 |
+
return strides_;
|
| 87 |
+
}
|
| 88 |
+
c10::SymInt sym_storage_offset() const {
|
| 89 |
+
return storage_offset_;
|
| 90 |
+
}
|
| 91 |
+
c10::SymInt sym_numel() const {
|
| 92 |
+
return numel_;
|
| 93 |
+
}
|
| 94 |
+
|
| 95 |
+
TensorGeometry transpose(int64_t dim0, int64_t dim1) {
|
| 96 |
+
TensorGeometry r = *this; // copy
|
| 97 |
+
TORCH_CHECK(
|
| 98 |
+
dim0 < dim(),
|
| 99 |
+
"transpose: dim0=",
|
| 100 |
+
dim0,
|
| 101 |
+
" out of range (dim=",
|
| 102 |
+
dim(),
|
| 103 |
+
")")
|
| 104 |
+
TORCH_CHECK(
|
| 105 |
+
dim1 < dim(),
|
| 106 |
+
"transpose: dim1=",
|
| 107 |
+
dim1,
|
| 108 |
+
" out of range (dim=",
|
| 109 |
+
dim(),
|
| 110 |
+
")")
|
| 111 |
+
std::swap(r.sizes_[dim0], r.sizes_[dim1]);
|
| 112 |
+
std::swap(r.strides_[dim0], r.strides_[dim1]);
|
| 113 |
+
return r;
|
| 114 |
+
}
|
| 115 |
+
|
| 116 |
+
std::vector<c10::SymInt>& mutable_sizes() {
|
| 117 |
+
return sizes_;
|
| 118 |
+
}
|
| 119 |
+
std::vector<c10::SymInt>& mutable_strides() {
|
| 120 |
+
return strides_;
|
| 121 |
+
}
|
| 122 |
+
c10::SymInt& mutable_storage_offset() {
|
| 123 |
+
return storage_offset_;
|
| 124 |
+
}
|
| 125 |
+
void recompute() {
|
| 126 |
+
// recalculate numel after a change
|
| 127 |
+
c10::SymInt numel = 1;
|
| 128 |
+
for (const auto& i : sizes_) {
|
| 129 |
+
numel = numel * i;
|
| 130 |
+
}
|
| 131 |
+
numel_ = std::move(numel);
|
| 132 |
+
has_symbolic_sizes_strides_ =
|
| 133 |
+
!c10::asIntArrayRefSlowOpt(sizes_).has_value();
|
| 134 |
+
}
|
| 135 |
+
|
| 136 |
+
private:
|
| 137 |
+
std::vector<c10::SymInt> sizes_;
|
| 138 |
+
std::vector<c10::SymInt> strides_;
|
| 139 |
+
c10::SymInt storage_offset_;
|
| 140 |
+
c10::SymInt numel_;
|
| 141 |
+
bool has_symbolic_sizes_strides_{false};
|
| 142 |
+
};
|
| 143 |
+
|
| 144 |
+
} // namespace at
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/TensorIndexing.h
ADDED
|
@@ -0,0 +1,731 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <ATen/ExpandUtils.h>
|
| 4 |
+
#include <ATen/ScalarOps.h>
|
| 5 |
+
#include <ATen/core/Tensor.h>
|
| 6 |
+
#include <ATen/core/TensorBody.h>
|
| 7 |
+
#include <c10/core/SymInt.h>
|
| 8 |
+
#include <c10/util/Optional.h>
|
| 9 |
+
#include <c10/util/irange.h>
|
| 10 |
+
|
| 11 |
+
#ifndef AT_PER_OPERATOR_HEADERS
|
| 12 |
+
#include <ATen/Functions.h>
|
| 13 |
+
#include <ATen/NativeFunctions.h>
|
| 14 |
+
#else
|
| 15 |
+
#include <ATen/ops/alias.h>
|
| 16 |
+
#include <ATen/ops/empty.h>
|
| 17 |
+
#include <ATen/ops/scalar_tensor.h>
|
| 18 |
+
#include <ATen/ops/zeros.h>
|
| 19 |
+
#endif
|
| 20 |
+
|
| 21 |
+
#include <ATen/core/List.h>
|
| 22 |
+
|
| 23 |
+
#include <utility>
|
| 24 |
+
|
| 25 |
+
namespace at::indexing {
|
| 26 |
+
|
| 27 |
+
const int64_t INDEX_MIN = c10::SymInt::min_representable_int();
|
| 28 |
+
const int64_t INDEX_MAX = -(INDEX_MIN + 1);
|
| 29 |
+
|
| 30 |
+
enum class TensorIndexType { None, Ellipsis, SymInt, Boolean, Slice, Tensor };
|
| 31 |
+
|
| 32 |
+
constexpr c10::nullopt_t None = c10::nullopt;
|
| 33 |
+
|
| 34 |
+
struct TORCH_API EllipsisIndexType final {
|
| 35 |
+
EllipsisIndexType() = default;
|
| 36 |
+
};
|
| 37 |
+
TORCH_API extern const EllipsisIndexType Ellipsis;
|
| 38 |
+
|
| 39 |
+
struct TORCH_API Slice final {
|
| 40 |
+
public:
|
| 41 |
+
Slice(
|
| 42 |
+
c10::optional<c10::SymInt> start_index = c10::nullopt,
|
| 43 |
+
c10::optional<c10::SymInt> stop_index = c10::nullopt,
|
| 44 |
+
c10::optional<c10::SymInt> step_index = c10::nullopt) {
|
| 45 |
+
if (!step_index.has_value()) {
|
| 46 |
+
step_ = c10::SymInt(1);
|
| 47 |
+
} else {
|
| 48 |
+
step_ = std::move(step_index).value();
|
| 49 |
+
}
|
| 50 |
+
|
| 51 |
+
TORCH_CHECK_VALUE(step_ != 0, "slice step cannot be zero");
|
| 52 |
+
|
| 53 |
+
if (!start_index.has_value()) {
|
| 54 |
+
start_ = c10::SymInt(step_ < 0 ? INDEX_MAX : 0);
|
| 55 |
+
} else {
|
| 56 |
+
start_ = std::move(start_index).value();
|
| 57 |
+
}
|
| 58 |
+
|
| 59 |
+
if (!stop_index.has_value()) {
|
| 60 |
+
stop_ = c10::SymInt(step_ < 0 ? INDEX_MIN : INDEX_MAX);
|
| 61 |
+
} else {
|
| 62 |
+
stop_ = std::move(stop_index).value();
|
| 63 |
+
}
|
| 64 |
+
}
|
| 65 |
+
|
| 66 |
+
inline c10::SymInt start() const {
|
| 67 |
+
return start_;
|
| 68 |
+
}
|
| 69 |
+
|
| 70 |
+
inline c10::SymInt stop() const {
|
| 71 |
+
return stop_;
|
| 72 |
+
}
|
| 73 |
+
|
| 74 |
+
inline c10::SymInt step() const {
|
| 75 |
+
return step_;
|
| 76 |
+
}
|
| 77 |
+
|
| 78 |
+
private:
|
| 79 |
+
c10::SymInt start_;
|
| 80 |
+
c10::SymInt stop_;
|
| 81 |
+
c10::SymInt step_;
|
| 82 |
+
};
|
| 83 |
+
|
| 84 |
+
TORCH_API std::ostream& operator<<(std::ostream& stream, const Slice& slice);
|
| 85 |
+
|
| 86 |
+
// `at::indexing::TensorIndex` is used for converting C++ tensor indices such as
|
| 87 |
+
// `{None, "...", Ellipsis, 0, true, Slice(1, None, 2), torch::tensor({1, 2})}`
|
| 88 |
+
// into its equivalent `std::vector<TensorIndex>`, so that further tensor
|
| 89 |
+
// indexing operations can be performed using the supplied indices.
|
| 90 |
+
//
|
| 91 |
+
// There is one-to-one correspondence between Python and C++ tensor index types:
|
| 92 |
+
// Python | C++
|
| 93 |
+
// -----------------------------------------------------
|
| 94 |
+
// `None` | `at::indexing::None`
|
| 95 |
+
// `Ellipsis` | `at::indexing::Ellipsis`
|
| 96 |
+
// `...` | `"..."`
|
| 97 |
+
// `123` | `123`
|
| 98 |
+
// `True` / `False` | `true` / `false`
|
| 99 |
+
// `:` | `Slice()` / `Slice(None, None)`
|
| 100 |
+
// `::` | `Slice()` / `Slice(None, None, None)`
|
| 101 |
+
// `1:` | `Slice(1, None)`
|
| 102 |
+
// `1::` | `Slice(1, None, None)`
|
| 103 |
+
// `:3` | `Slice(None, 3)`
|
| 104 |
+
// `:3:` | `Slice(None, 3, None)`
|
| 105 |
+
// `::2` | `Slice(None, None, 2)`
|
| 106 |
+
// `1:3` | `Slice(1, 3)`
|
| 107 |
+
// `1::2` | `Slice(1, None, 2)`
|
| 108 |
+
// `:3:2` | `Slice(None, 3, 2)`
|
| 109 |
+
// `1:3:2` | `Slice(1, 3, 2)`
|
| 110 |
+
// `torch.tensor([1, 2])`) | `torch::tensor({1, 2})`
|
| 111 |
+
struct TORCH_API TensorIndex final {
|
| 112 |
+
// Case 1: `at::indexing::None`
|
| 113 |
+
TensorIndex(c10::nullopt_t) : type_(TensorIndexType::None) {}
|
| 114 |
+
|
| 115 |
+
// Case 2: "..." / `at::indexing::Ellipsis`
|
| 116 |
+
TensorIndex(at::indexing::EllipsisIndexType)
|
| 117 |
+
: type_(TensorIndexType::Ellipsis) {}
|
| 118 |
+
TensorIndex(const char* str) : TensorIndex(at::indexing::Ellipsis) {
|
| 119 |
+
TORCH_CHECK_VALUE(
|
| 120 |
+
strcmp(str, "...") == 0,
|
| 121 |
+
"Expected \"...\" to represent an ellipsis index, but got \"",
|
| 122 |
+
str,
|
| 123 |
+
"\"");
|
| 124 |
+
}
|
| 125 |
+
|
| 126 |
+
// Case 3: (Sym) Integer value
|
| 127 |
+
TensorIndex(SymInt integer)
|
| 128 |
+
: integer_(std::move(integer)), type_(TensorIndexType::SymInt) {}
|
| 129 |
+
TensorIndex(int64_t integer) : TensorIndex(SymInt(integer)) {}
|
| 130 |
+
TensorIndex(int integer) : TensorIndex(SymInt(integer)) {}
|
| 131 |
+
|
| 132 |
+
// Case 4: Boolean value
|
| 133 |
+
template <
|
| 134 |
+
class T,
|
| 135 |
+
class = typename std::enable_if<std::is_same<bool, T>::value>::type>
|
| 136 |
+
TensorIndex(T boolean) : boolean_(boolean), type_(TensorIndexType::Boolean) {}
|
| 137 |
+
|
| 138 |
+
// Case 5: Slice represented in `at::indexing::Slice` form
|
| 139 |
+
TensorIndex(Slice slice)
|
| 140 |
+
: slice_(std::move(slice)), type_(TensorIndexType::Slice) {}
|
| 141 |
+
|
| 142 |
+
// Case 6: Tensor value
|
| 143 |
+
TensorIndex(Tensor tensor)
|
| 144 |
+
: tensor_(std::move(tensor)), type_(TensorIndexType::Tensor) {}
|
| 145 |
+
|
| 146 |
+
inline bool is_none() const {
|
| 147 |
+
return type_ == TensorIndexType::None;
|
| 148 |
+
}
|
| 149 |
+
|
| 150 |
+
inline bool is_ellipsis() const {
|
| 151 |
+
return type_ == TensorIndexType::Ellipsis;
|
| 152 |
+
}
|
| 153 |
+
|
| 154 |
+
inline bool is_integer() const {
|
| 155 |
+
return type_ == TensorIndexType::SymInt;
|
| 156 |
+
}
|
| 157 |
+
|
| 158 |
+
inline SymInt integer() const {
|
| 159 |
+
return integer_;
|
| 160 |
+
}
|
| 161 |
+
|
| 162 |
+
inline bool is_boolean() const {
|
| 163 |
+
return type_ == TensorIndexType::Boolean;
|
| 164 |
+
}
|
| 165 |
+
|
| 166 |
+
inline bool boolean() const {
|
| 167 |
+
return boolean_;
|
| 168 |
+
}
|
| 169 |
+
|
| 170 |
+
inline bool is_slice() const {
|
| 171 |
+
return type_ == TensorIndexType::Slice;
|
| 172 |
+
}
|
| 173 |
+
|
| 174 |
+
inline const Slice& slice() const {
|
| 175 |
+
return slice_;
|
| 176 |
+
}
|
| 177 |
+
|
| 178 |
+
inline bool is_tensor() const {
|
| 179 |
+
return type_ == TensorIndexType::Tensor;
|
| 180 |
+
}
|
| 181 |
+
|
| 182 |
+
inline const Tensor& tensor() const {
|
| 183 |
+
return tensor_;
|
| 184 |
+
}
|
| 185 |
+
|
| 186 |
+
private:
|
| 187 |
+
SymInt integer_ = 0;
|
| 188 |
+
bool boolean_ = false;
|
| 189 |
+
Slice slice_;
|
| 190 |
+
Tensor tensor_;
|
| 191 |
+
TensorIndexType type_;
|
| 192 |
+
};
|
| 193 |
+
|
| 194 |
+
TORCH_API std::ostream& operator<<(
|
| 195 |
+
std::ostream& stream,
|
| 196 |
+
const TensorIndex& tensor_index);
|
| 197 |
+
TORCH_API std::ostream& operator<<(
|
| 198 |
+
std::ostream& stream,
|
| 199 |
+
const std::vector<TensorIndex>& tensor_indices);
|
| 200 |
+
|
| 201 |
+
namespace impl {
|
| 202 |
+
static inline Tensor applySlice(
|
| 203 |
+
const Tensor& self,
|
| 204 |
+
int64_t dim,
|
| 205 |
+
c10::SymInt start,
|
| 206 |
+
c10::SymInt stop,
|
| 207 |
+
c10::SymInt step,
|
| 208 |
+
bool disable_slice_optimization,
|
| 209 |
+
const at::Device& self_device,
|
| 210 |
+
const c10::optional<SymIntArrayRef>& self_sizes) {
|
| 211 |
+
// TODO: implement negative step
|
| 212 |
+
TORCH_CHECK_VALUE(step > 0, "step must be greater than zero");
|
| 213 |
+
|
| 214 |
+
// See NOTE [nested tensor size for indexing]
|
| 215 |
+
if (self_sizes.has_value()) {
|
| 216 |
+
// Skip this optimization if we are tracing, as the trace may be polymorphic
|
| 217 |
+
// over the shape of the `self` tensor, and we still want to record
|
| 218 |
+
// the slice.
|
| 219 |
+
SymInt length = (self_device == at::kCPU || self_device == at::kCUDA)
|
| 220 |
+
? (*self_sizes)[dim]
|
| 221 |
+
: self.sym_size(dim);
|
| 222 |
+
if (!disable_slice_optimization && start == 0 && length == stop &&
|
| 223 |
+
step == 1) {
|
| 224 |
+
return self;
|
| 225 |
+
}
|
| 226 |
+
}
|
| 227 |
+
return self.slice_symint(
|
| 228 |
+
dim, std::move(start), std::move(stop), std::move(step));
|
| 229 |
+
}
|
| 230 |
+
|
| 231 |
+
static inline Tensor applySelect(
|
| 232 |
+
const Tensor& self,
|
| 233 |
+
int64_t dim,
|
| 234 |
+
SymInt index,
|
| 235 |
+
int64_t real_dim,
|
| 236 |
+
const at::Device& /*self_device*/,
|
| 237 |
+
const c10::optional<SymIntArrayRef>& self_sizes) {
|
| 238 |
+
// See NOTE [nested tensor size for indexing]
|
| 239 |
+
if (self_sizes.has_value()) {
|
| 240 |
+
auto maybe_index = index.maybe_as_int();
|
| 241 |
+
if (maybe_index.has_value()) {
|
| 242 |
+
TORCH_CHECK_INDEX(
|
| 243 |
+
!(maybe_index.value() == 0 && dim == 0 && self_sizes->empty()),
|
| 244 |
+
"invalid index of a 0-dim tensor. ",
|
| 245 |
+
"Use `tensor.item()` in Python or `tensor.item<T>()` in C++ to convert a 0-dim tensor to a number");
|
| 246 |
+
}
|
| 247 |
+
|
| 248 |
+
auto size = (*self_sizes)[dim];
|
| 249 |
+
TORCH_CHECK_INDEX(
|
| 250 |
+
size >= -index && size > index,
|
| 251 |
+
"index ",
|
| 252 |
+
index,
|
| 253 |
+
" is out of bounds for dimension ",
|
| 254 |
+
real_dim,
|
| 255 |
+
" with size ",
|
| 256 |
+
size);
|
| 257 |
+
}
|
| 258 |
+
|
| 259 |
+
// if the index is negative, do not normalize it because that would fix the
|
| 260 |
+
// index on the current tensor size in the tracer. aten::select also works on
|
| 261 |
+
// negative indices
|
| 262 |
+
return self.select_symint(dim, std::move(index));
|
| 263 |
+
}
|
| 264 |
+
|
| 265 |
+
static inline Tensor boolToIndexingTensorCPUOrCUDA(
|
| 266 |
+
const Tensor& self,
|
| 267 |
+
bool value) {
|
| 268 |
+
// booleans add a dimension of size 1. true indexes this dimension as if 0:,
|
| 269 |
+
// false as empty.
|
| 270 |
+
if (value) {
|
| 271 |
+
return at::empty({1}, {}, self.options().dtype(kLong)).fill_(0.);
|
| 272 |
+
} else {
|
| 273 |
+
return at::empty({0}, {}, self.options().dtype(kLong));
|
| 274 |
+
}
|
| 275 |
+
}
|
| 276 |
+
|
| 277 |
+
static inline Tensor boolToIndexingTensorNonNativeDeviceType(
|
| 278 |
+
const Tensor& self,
|
| 279 |
+
bool value) {
|
| 280 |
+
// booleans add a dimension of size 1. true indexes this dimension as if 0:,
|
| 281 |
+
// false as empty.
|
| 282 |
+
if (value) {
|
| 283 |
+
return at::zeros({1}, {}, self.options().dtype(kLong));
|
| 284 |
+
} else {
|
| 285 |
+
return at::empty({0}, {}, self.options().dtype(kLong));
|
| 286 |
+
}
|
| 287 |
+
}
|
| 288 |
+
|
| 289 |
+
static inline Tensor boolToIndexingTensor(
|
| 290 |
+
const Tensor& self,
|
| 291 |
+
bool value,
|
| 292 |
+
const at::Device& self_device) {
|
| 293 |
+
if (self_device == at::kCPU || self_device == at::kCUDA) {
|
| 294 |
+
return boolToIndexingTensorCPUOrCUDA(self, value);
|
| 295 |
+
} else {
|
| 296 |
+
return boolToIndexingTensorNonNativeDeviceType(self, value);
|
| 297 |
+
}
|
| 298 |
+
}
|
| 299 |
+
|
| 300 |
+
static inline Tensor scalarToTensorNonNativeDeviceType(
|
| 301 |
+
const Scalar& v,
|
| 302 |
+
const TensorOptions& options) {
|
| 303 |
+
return at::scalar_tensor(v, options);
|
| 304 |
+
}
|
| 305 |
+
|
| 306 |
+
static inline void recordTensorIndex(
|
| 307 |
+
const Tensor& tensor,
|
| 308 |
+
std::vector<Tensor>& outIndices,
|
| 309 |
+
int64_t* dim_ptr) {
|
| 310 |
+
// TODO: check scalarType
|
| 311 |
+
outIndices.resize(*dim_ptr + 1);
|
| 312 |
+
outIndices[*dim_ptr] = tensor;
|
| 313 |
+
(*dim_ptr)++;
|
| 314 |
+
};
|
| 315 |
+
|
| 316 |
+
static inline c10::List<c10::optional<Tensor>> typeConvertIndices(
|
| 317 |
+
const Tensor& /*self*/,
|
| 318 |
+
std::vector<Tensor>&& indices) {
|
| 319 |
+
c10::List<c10::optional<Tensor>> converted_inds;
|
| 320 |
+
converted_inds.reserve(indices.size());
|
| 321 |
+
for (const auto& i : indices) {
|
| 322 |
+
converted_inds.push_back(std::move(i));
|
| 323 |
+
}
|
| 324 |
+
return converted_inds;
|
| 325 |
+
}
|
| 326 |
+
|
| 327 |
+
// NOTE: Why do we mirror instead of replace the `count_specified_dimensions`
|
| 328 |
+
// function in torch/csrc/autograd/python_variable_indexing.cpp? It's because
|
| 329 |
+
// `count_specified_dimensions` is on the hot path of Python tensor multi-dim
|
| 330 |
+
// indexing (i.e. it's called by `applySlicing` which is called by
|
| 331 |
+
// `THPVariable_getitem` / `THPVariable_setitem` when handling indexing of more
|
| 332 |
+
// than one dimension). If we were to merge the Python/C++
|
| 333 |
+
// `count_specified_dimensions` function, on the Python side we would have to
|
| 334 |
+
// construct a `std::vector` container to be consumed by the C++
|
| 335 |
+
// `count_specified_dimensions` function, which adds 100s of nanoseconds
|
| 336 |
+
// overhead and is undesirable.
|
| 337 |
+
static inline int64_t count_specified_dimensions(
|
| 338 |
+
const ArrayRef<TensorIndex>& indices) {
|
| 339 |
+
// Count the number of indexed dimensions (everything but ellipsis and None)
|
| 340 |
+
int64_t count = 0;
|
| 341 |
+
for (auto& obj : indices) {
|
| 342 |
+
if (obj.is_tensor()) {
|
| 343 |
+
auto& tensor = obj.tensor();
|
| 344 |
+
if (tensor.scalar_type() == kByte || tensor.scalar_type() == kBool) {
|
| 345 |
+
count += tensor.dim();
|
| 346 |
+
} else {
|
| 347 |
+
count++;
|
| 348 |
+
}
|
| 349 |
+
} else if (!obj.is_none() && !obj.is_ellipsis() && !obj.is_boolean()) {
|
| 350 |
+
count++;
|
| 351 |
+
}
|
| 352 |
+
}
|
| 353 |
+
return count;
|
| 354 |
+
}
|
| 355 |
+
} // namespace impl
|
| 356 |
+
|
| 357 |
+
// NOTE: Many functions below are only for consumption from Python indexing
|
| 358 |
+
// implementation, they include:
|
| 359 |
+
//
|
| 360 |
+
// - `Tensor scalarToTensor(...)`
|
| 361 |
+
// - `IntArrayRef slicePrefix1sSize(...)`
|
| 362 |
+
// - `void copy_to(...)`
|
| 363 |
+
// - `Tensor handleDimInMultiDimIndexing(...)`
|
| 364 |
+
// - `Tensor dispatch_index(...)`
|
| 365 |
+
// - `Tensor dispatch_index_put_(...)`
|
| 366 |
+
// - `Tensor get_item(...)`
|
| 367 |
+
// - `void set_item(...)`
|
| 368 |
+
//
|
| 369 |
+
// The rest of the functions are in `at::indexing::impl` namespace, signifying
|
| 370 |
+
// that they shouldn't be used from Python indexing implementation.
|
| 371 |
+
static inline Tensor scalarToTensor(
|
| 372 |
+
const Scalar& v,
|
| 373 |
+
const TensorOptions& options,
|
| 374 |
+
const at::Device& self_device) {
|
| 375 |
+
if (self_device == at::kCPU && !v.isSymbolic()) {
|
| 376 |
+
return at::detail::scalar_tensor_static(
|
| 377 |
+
v, options.dtype_opt()->toScalarType(), self_device);
|
| 378 |
+
} else {
|
| 379 |
+
return impl::scalarToTensorNonNativeDeviceType(v, options);
|
| 380 |
+
}
|
| 381 |
+
}
|
| 382 |
+
|
| 383 |
+
// To match numpy semantics:
|
| 384 |
+
// As a special case for backwards compatibility,
|
| 385 |
+
// strip away unit dimensions from the left of 'src'
|
| 386 |
+
static inline SymIntArrayRef slicePrefix1sSize(const SymIntArrayRef& sizes) {
|
| 387 |
+
size_t first_non1_src = sizes.size();
|
| 388 |
+
for (const auto i : c10::irange(sizes.size())) {
|
| 389 |
+
// Unbacked SymInt has different behavior, but this is sound because
|
| 390 |
+
// failing to slice will only ever cause an error, not divergent
|
| 391 |
+
// behavior
|
| 392 |
+
if (!sizes[i].has_hint() || sizes[i] != 1) {
|
| 393 |
+
first_non1_src = i;
|
| 394 |
+
break;
|
| 395 |
+
}
|
| 396 |
+
}
|
| 397 |
+
|
| 398 |
+
return sizes.slice(first_non1_src);
|
| 399 |
+
}
|
| 400 |
+
|
| 401 |
+
static inline void copy_to(const Tensor& dst, const Tensor& src) {
|
| 402 |
+
if (dst.sym_sizes().equals(src.sym_sizes())) {
|
| 403 |
+
// A shortcut to avoid generating hard-coded constant sizes during tracing.
|
| 404 |
+
// This is not a perfect solution: when src & dst have different shapes,
|
| 405 |
+
// constants will still appear. Users can workaround that case by
|
| 406 |
+
// dst[index..] = src.reshape(..)
|
| 407 |
+
dst.copy_(src);
|
| 408 |
+
return;
|
| 409 |
+
} else if (src.dim() == 0 && src.device().type() == at::kCPU) {
|
| 410 |
+
dst.fill_(src);
|
| 411 |
+
return;
|
| 412 |
+
}
|
| 413 |
+
auto src_view = src.view_symint(slicePrefix1sSize(src.sym_sizes()));
|
| 414 |
+
c10::MaybeOwned<Tensor> b_src = expand_inplace(dst, src_view, "setitem");
|
| 415 |
+
dst.copy_(*b_src);
|
| 416 |
+
}
|
| 417 |
+
|
| 418 |
+
// See NOTE [ Setting `disable_slice_optimization` when calling C++ tensor
|
| 419 |
+
// indexing functions from Python ]
|
| 420 |
+
static inline Tensor handleDimInMultiDimIndexing(
|
| 421 |
+
const Tensor& prev_dim_result,
|
| 422 |
+
const Tensor& original_tensor,
|
| 423 |
+
const TensorIndex& index,
|
| 424 |
+
int64_t* dim_ptr,
|
| 425 |
+
int64_t* specified_dims_ptr,
|
| 426 |
+
int64_t real_dim,
|
| 427 |
+
std::vector<Tensor>& outIndices,
|
| 428 |
+
bool disable_slice_optimization,
|
| 429 |
+
const at::Device& original_tensor_device,
|
| 430 |
+
const c10::optional<SymIntArrayRef>& prev_dim_result_sizes) {
|
| 431 |
+
if (index.is_integer()) {
|
| 432 |
+
return impl::applySelect(
|
| 433 |
+
prev_dim_result,
|
| 434 |
+
*dim_ptr,
|
| 435 |
+
index.integer(),
|
| 436 |
+
real_dim,
|
| 437 |
+
original_tensor_device,
|
| 438 |
+
prev_dim_result_sizes);
|
| 439 |
+
} else if (index.is_slice()) {
|
| 440 |
+
Tensor result = impl::applySlice(
|
| 441 |
+
prev_dim_result,
|
| 442 |
+
*dim_ptr,
|
| 443 |
+
index.slice().start(),
|
| 444 |
+
index.slice().stop(),
|
| 445 |
+
index.slice().step(),
|
| 446 |
+
/*disable_slice_optimization=*/disable_slice_optimization,
|
| 447 |
+
original_tensor_device,
|
| 448 |
+
prev_dim_result_sizes);
|
| 449 |
+
(*dim_ptr)++;
|
| 450 |
+
return result;
|
| 451 |
+
} else if (index.is_ellipsis()) {
|
| 452 |
+
(*dim_ptr) += original_tensor.dim() - (*specified_dims_ptr);
|
| 453 |
+
return prev_dim_result;
|
| 454 |
+
} else if (index.is_none()) {
|
| 455 |
+
Tensor result = prev_dim_result.unsqueeze(*dim_ptr);
|
| 456 |
+
(*dim_ptr)++;
|
| 457 |
+
return result;
|
| 458 |
+
} else if (index.is_boolean()) {
|
| 459 |
+
Tensor result = prev_dim_result.unsqueeze(*dim_ptr);
|
| 460 |
+
impl::recordTensorIndex(
|
| 461 |
+
impl::boolToIndexingTensor(
|
| 462 |
+
result, index.boolean(), original_tensor_device),
|
| 463 |
+
outIndices,
|
| 464 |
+
dim_ptr);
|
| 465 |
+
return result;
|
| 466 |
+
} else if (index.is_tensor()) {
|
| 467 |
+
Tensor result = prev_dim_result;
|
| 468 |
+
const Tensor& tensor = index.tensor();
|
| 469 |
+
auto scalar_type = tensor.scalar_type();
|
| 470 |
+
if (tensor.dim() == 0 &&
|
| 471 |
+
at::isIntegralType(scalar_type, /*includeBool=*/true)) {
|
| 472 |
+
if (scalar_type != at::kByte && scalar_type != at::kBool) {
|
| 473 |
+
result = impl::applySelect(
|
| 474 |
+
result,
|
| 475 |
+
*dim_ptr,
|
| 476 |
+
tensor.item<int64_t>(),
|
| 477 |
+
real_dim,
|
| 478 |
+
original_tensor_device,
|
| 479 |
+
prev_dim_result_sizes);
|
| 480 |
+
} else {
|
| 481 |
+
result = result.unsqueeze(*dim_ptr);
|
| 482 |
+
if (scalar_type == at::kBool) {
|
| 483 |
+
impl::recordTensorIndex(
|
| 484 |
+
impl::boolToIndexingTensor(
|
| 485 |
+
result, tensor.item<bool>() != 0, original_tensor_device),
|
| 486 |
+
outIndices,
|
| 487 |
+
dim_ptr);
|
| 488 |
+
} else {
|
| 489 |
+
impl::recordTensorIndex(
|
| 490 |
+
impl::boolToIndexingTensor(
|
| 491 |
+
result, tensor.item<uint8_t>() != 0, original_tensor_device),
|
| 492 |
+
outIndices,
|
| 493 |
+
dim_ptr);
|
| 494 |
+
}
|
| 495 |
+
}
|
| 496 |
+
} else {
|
| 497 |
+
impl::recordTensorIndex(tensor, outIndices, dim_ptr);
|
| 498 |
+
}
|
| 499 |
+
return result;
|
| 500 |
+
} else {
|
| 501 |
+
TORCH_INTERNAL_ASSERT(false, "Invalid TensorIndex type");
|
| 502 |
+
}
|
| 503 |
+
}
|
| 504 |
+
|
| 505 |
+
namespace impl {
|
| 506 |
+
// This mirrors `applySlicing` in
|
| 507 |
+
// torch/csrc/autograd/python_variable_indexing.cpp
|
| 508 |
+
static inline Tensor applySlicing(
|
| 509 |
+
const Tensor& self,
|
| 510 |
+
const ArrayRef<TensorIndex>& indices,
|
| 511 |
+
std::vector<Tensor>& outIndices,
|
| 512 |
+
bool disable_slice_optimization,
|
| 513 |
+
const at::Device& self_device,
|
| 514 |
+
const c10::optional<SymIntArrayRef>& self_sizes) {
|
| 515 |
+
int64_t dim = 0;
|
| 516 |
+
int64_t specified_dims = impl::count_specified_dimensions(indices);
|
| 517 |
+
|
| 518 |
+
// See NOTE [nested tensor size for indexing]
|
| 519 |
+
if (self_sizes.has_value()) {
|
| 520 |
+
TORCH_CHECK_INDEX(
|
| 521 |
+
specified_dims <= (int64_t)self_sizes->size(),
|
| 522 |
+
"too many indices for tensor of dimension ",
|
| 523 |
+
(int)self_sizes->size());
|
| 524 |
+
}
|
| 525 |
+
|
| 526 |
+
Tensor result = self;
|
| 527 |
+
for (const auto i : c10::irange(indices.size())) {
|
| 528 |
+
auto& obj = indices[i];
|
| 529 |
+
// See NOTE [nested tensor size for indexing]
|
| 530 |
+
c10::optional<SymIntArrayRef> result_sizes = result.is_nested()
|
| 531 |
+
? c10::optional<SymIntArrayRef>(c10::nullopt)
|
| 532 |
+
: c10::optional<SymIntArrayRef>(result.sym_sizes());
|
| 533 |
+
result = handleDimInMultiDimIndexing(
|
| 534 |
+
/*prev_dim_result=*/result,
|
| 535 |
+
/*original_tensor=*/self,
|
| 536 |
+
/*index=*/obj,
|
| 537 |
+
/*dim=*/&dim,
|
| 538 |
+
/*specified_dims_ptr=*/&specified_dims,
|
| 539 |
+
/*real_dim=*/i,
|
| 540 |
+
/*outIndices=*/outIndices,
|
| 541 |
+
/*disable_slice_optimization=*/disable_slice_optimization,
|
| 542 |
+
/*original_tensor_device=*/self_device,
|
| 543 |
+
/*prev_dim_result_sizes=*/result_sizes);
|
| 544 |
+
}
|
| 545 |
+
return result;
|
| 546 |
+
}
|
| 547 |
+
} // namespace impl
|
| 548 |
+
|
| 549 |
+
static inline Tensor dispatch_index(
|
| 550 |
+
const Tensor& self,
|
| 551 |
+
std::vector<Tensor>&& indices) {
|
| 552 |
+
return self.index(impl::typeConvertIndices(self, std::move(indices)));
|
| 553 |
+
}
|
| 554 |
+
|
| 555 |
+
static inline Tensor dispatch_index_put_(
|
| 556 |
+
Tensor& self,
|
| 557 |
+
std::vector<Tensor>&& indices,
|
| 558 |
+
const Tensor& value) {
|
| 559 |
+
return self.index_put_(
|
| 560 |
+
impl::typeConvertIndices(self, std::move(indices)), value);
|
| 561 |
+
}
|
| 562 |
+
|
| 563 |
+
// NOTE [ Setting `disable_slice_optimization` when calling C++ tensor indexing
|
| 564 |
+
// functions from Python ]
|
| 565 |
+
//
|
| 566 |
+
// Question: When should we set `disable_slice_optimization` to `true` when
|
| 567 |
+
// calling C++ tensor indexing functions from Python indexing code?
|
| 568 |
+
//
|
| 569 |
+
// Answer: What "slice optimization" means: when we have a slicing expression
|
| 570 |
+
// like `x[0:5, 0]`, where the sliced tensor was of size 5 in dimension 0, we
|
| 571 |
+
// would skip dispatching the actual slice call as an optimization. However,
|
| 572 |
+
// here are the cases where we DON'T want this optimization:
|
| 573 |
+
//
|
| 574 |
+
// 1. When we are doing 1-D slicing (e.g. `tensor[:]`).
|
| 575 |
+
// Reason: we always return a shallow copy for expressions such as
|
| 576 |
+
// `tensor[:]` / `tensor[...]` / `tensor[:, :]`. (Note that for `tensor[:,
|
| 577 |
+
// :]`, we return an alias of `tensor` by doing the following:
|
| 578 |
+
// ```
|
| 579 |
+
// Tensor sliced = impl::applySlicing(self, indices, tensorIndices,
|
| 580 |
+
// disable_slice_optimization, self_device, self_sizes); if
|
| 581 |
+
// (tensorIndices.empty()) {
|
| 582 |
+
// if (sliced.is_same(self)) {
|
| 583 |
+
// // ensure we return a shallow copy for things like x[...]
|
| 584 |
+
// sliced = at::alias(sliced);
|
| 585 |
+
// }
|
| 586 |
+
// return sliced;
|
| 587 |
+
// }
|
| 588 |
+
// ```)
|
| 589 |
+
// 2. When we are doing JIT tracing.
|
| 590 |
+
// Reason: JIT tracing needs the `self.slice(...)` call to properly trace the
|
| 591 |
+
// slice operation.
|
| 592 |
+
|
| 593 |
+
// This mirrors `THPVariable_getitem` in
|
| 594 |
+
// torch/csrc/autograd/python_variable_indexing.cpp See NOTE [ Setting
|
| 595 |
+
// `disable_slice_optimization` when calling C++ tensor indexing functions from
|
| 596 |
+
// Python ]
|
| 597 |
+
static inline Tensor get_item(
|
| 598 |
+
const Tensor& self,
|
| 599 |
+
const ArrayRef<TensorIndex>& indices,
|
| 600 |
+
bool disable_slice_optimization = false) {
|
| 601 |
+
at::Device self_device = self.device();
|
| 602 |
+
// NOTE [nested tensor size for indexing]
|
| 603 |
+
// nested tensor does not have a size (yet) so for now we represent its size
|
| 604 |
+
// as null may need to be changed after we reach a better solution for nested
|
| 605 |
+
// tensor size
|
| 606 |
+
c10::optional<SymIntArrayRef> self_sizes = self.is_nested()
|
| 607 |
+
? c10::optional<SymIntArrayRef>(c10::nullopt)
|
| 608 |
+
: c10::optional<SymIntArrayRef>(self.sym_sizes());
|
| 609 |
+
|
| 610 |
+
// handle simple types: integers, slices, none, ellipsis, bool
|
| 611 |
+
if (indices.size() == 1) {
|
| 612 |
+
const TensorIndex& index = indices[0];
|
| 613 |
+
if (index.is_integer()) {
|
| 614 |
+
return impl::applySelect(
|
| 615 |
+
self, 0, index.integer(), 0, self_device, self_sizes);
|
| 616 |
+
} else if (index.is_slice()) {
|
| 617 |
+
return impl::applySlice(
|
| 618 |
+
self,
|
| 619 |
+
0,
|
| 620 |
+
index.slice().start(),
|
| 621 |
+
index.slice().stop(),
|
| 622 |
+
index.slice().step(),
|
| 623 |
+
/*disable_slice_optimization=*/true,
|
| 624 |
+
self_device,
|
| 625 |
+
self_sizes);
|
| 626 |
+
} else if (index.is_none()) {
|
| 627 |
+
return self.unsqueeze(0);
|
| 628 |
+
} else if (index.is_ellipsis()) {
|
| 629 |
+
return at::alias(self);
|
| 630 |
+
} else if (index.is_boolean()) {
|
| 631 |
+
Tensor result = self.unsqueeze(0);
|
| 632 |
+
return dispatch_index(
|
| 633 |
+
result,
|
| 634 |
+
std::vector<Tensor>{impl::boolToIndexingTensor(
|
| 635 |
+
result, index.boolean(), self_device)});
|
| 636 |
+
}
|
| 637 |
+
}
|
| 638 |
+
|
| 639 |
+
std::vector<Tensor> tensorIndices;
|
| 640 |
+
Tensor sliced = impl::applySlicing(
|
| 641 |
+
self,
|
| 642 |
+
indices,
|
| 643 |
+
tensorIndices,
|
| 644 |
+
disable_slice_optimization,
|
| 645 |
+
self_device,
|
| 646 |
+
self_sizes);
|
| 647 |
+
if (tensorIndices.empty()) {
|
| 648 |
+
if (sliced.is_same(self)) {
|
| 649 |
+
// ensure we return a shallow copy for things like x[...]
|
| 650 |
+
sliced = at::alias(sliced);
|
| 651 |
+
}
|
| 652 |
+
return sliced;
|
| 653 |
+
}
|
| 654 |
+
|
| 655 |
+
// indexing by tensors ("advanced" indexing)
|
| 656 |
+
return dispatch_index(sliced, std::move(tensorIndices));
|
| 657 |
+
}
|
| 658 |
+
|
| 659 |
+
// This mirrors `THPVariable_setitem` in
|
| 660 |
+
// torch/csrc/autograd/python_variable_indexing.cpp for "the assigned value is a
|
| 661 |
+
// Tensor" case See NOTE [ Setting `disable_slice_optimization` when calling C++
|
| 662 |
+
// tensor indexing functions from Python ]
|
| 663 |
+
static inline void set_item(
|
| 664 |
+
const Tensor& self,
|
| 665 |
+
const ArrayRef<TensorIndex>& indices,
|
| 666 |
+
const Tensor& value,
|
| 667 |
+
bool disable_slice_optimization = false) {
|
| 668 |
+
at::Device self_device = self.device();
|
| 669 |
+
SymIntArrayRef self_sizes = self.sym_sizes();
|
| 670 |
+
|
| 671 |
+
// handle simple types: integers, slices, ellipsis, bool
|
| 672 |
+
if (indices.size() == 1) {
|
| 673 |
+
const TensorIndex& index = indices[0];
|
| 674 |
+
if (index.is_boolean() && !index.boolean()) {
|
| 675 |
+
// do nothing for false (technically we should check the size, but we
|
| 676 |
+
// don't have real 0-sized shapes.
|
| 677 |
+
return;
|
| 678 |
+
} else if (index.is_ellipsis()) {
|
| 679 |
+
copy_to(self, value);
|
| 680 |
+
return;
|
| 681 |
+
} else if (index.is_none() || (index.is_boolean() && index.boolean())) {
|
| 682 |
+
copy_to(self.unsqueeze(0), value);
|
| 683 |
+
return;
|
| 684 |
+
} else if (index.is_integer()) {
|
| 685 |
+
copy_to(
|
| 686 |
+
impl::applySelect(
|
| 687 |
+
self, 0, index.integer(), 0, self_device, self_sizes),
|
| 688 |
+
value);
|
| 689 |
+
return;
|
| 690 |
+
} else if (index.is_slice()) {
|
| 691 |
+
copy_to(
|
| 692 |
+
impl::applySlice(
|
| 693 |
+
self,
|
| 694 |
+
0,
|
| 695 |
+
index.slice().start(),
|
| 696 |
+
index.slice().stop(),
|
| 697 |
+
index.slice().step(),
|
| 698 |
+
/*disable_slice_optimization=*/disable_slice_optimization,
|
| 699 |
+
self_device,
|
| 700 |
+
self_sizes),
|
| 701 |
+
value);
|
| 702 |
+
return;
|
| 703 |
+
}
|
| 704 |
+
}
|
| 705 |
+
|
| 706 |
+
std::vector<Tensor> tensorIndices;
|
| 707 |
+
Tensor sliced = impl::applySlicing(
|
| 708 |
+
self,
|
| 709 |
+
indices,
|
| 710 |
+
tensorIndices,
|
| 711 |
+
disable_slice_optimization,
|
| 712 |
+
self_device,
|
| 713 |
+
self_sizes);
|
| 714 |
+
if (tensorIndices.empty()) {
|
| 715 |
+
copy_to(sliced, value);
|
| 716 |
+
return;
|
| 717 |
+
}
|
| 718 |
+
|
| 719 |
+
SymIntArrayRef valueSizes = value.sym_sizes();
|
| 720 |
+
SymIntArrayRef slicedValueSizes = slicePrefix1sSize(valueSizes);
|
| 721 |
+
Tensor valuesSliced;
|
| 722 |
+
if (!valueSizes.equals(slicedValueSizes)) {
|
| 723 |
+
valuesSliced = value.view_symint(slicedValueSizes);
|
| 724 |
+
} else {
|
| 725 |
+
valuesSliced = value;
|
| 726 |
+
}
|
| 727 |
+
dispatch_index_put_(sliced, std::move(tensorIndices), valuesSliced);
|
| 728 |
+
return;
|
| 729 |
+
}
|
| 730 |
+
|
| 731 |
+
} // namespace at::indexing
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/TensorIterator.h
ADDED
|
@@ -0,0 +1,987 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <ATen/TensorMeta.h>
|
| 4 |
+
#include <ATen/core/Dimname.h>
|
| 5 |
+
#include <ATen/core/Range.h>
|
| 6 |
+
#include <ATen/core/TensorBase.h>
|
| 7 |
+
#include <c10/core/DynamicCast.h>
|
| 8 |
+
#include <c10/util/FunctionRef.h>
|
| 9 |
+
#include <c10/util/MaybeOwned.h>
|
| 10 |
+
#include <c10/util/SmallVector.h>
|
| 11 |
+
#include <c10/util/TypeCast.h>
|
| 12 |
+
#include <c10/util/irange.h>
|
| 13 |
+
|
| 14 |
+
#include <array>
|
| 15 |
+
#include <bitset>
|
| 16 |
+
|
| 17 |
+
namespace at {
|
| 18 |
+
class Tensor;
|
| 19 |
+
class OptionalTensorRef;
|
| 20 |
+
using NameVector = SmallVector<Dimname, kDimVectorStaticSize>;
|
| 21 |
+
} // namespace at
|
| 22 |
+
|
| 23 |
+
// TensorIterator is a helper class for element-wise operations, such as
|
| 24 |
+
// arithmetic, comparisons, and trigonometric functions. It handles
|
| 25 |
+
// broadcasting and type conversions of operands.
|
| 26 |
+
//
|
| 27 |
+
// This is inspired by NumPy's Array Iterator API (NpyIter).
|
| 28 |
+
//
|
| 29 |
+
// The files Loops.h and Loops.cuh provide functions to build kernels that
|
| 30 |
+
// use TensorIterator.
|
| 31 |
+
//
|
| 32 |
+
// Example:
|
| 33 |
+
//
|
| 34 |
+
// auto iter = TensorIteratorConfig()
|
| 35 |
+
// .add_output(output)
|
| 36 |
+
// .add_input(input)
|
| 37 |
+
// .build()
|
| 38 |
+
//
|
| 39 |
+
// [MyKernel.cpp / MyKernel.cu]
|
| 40 |
+
// cpu_kernel(iter, [](float a, float b) {
|
| 41 |
+
// return a + b;
|
| 42 |
+
// });
|
| 43 |
+
//
|
| 44 |
+
// gpu_kernel(iter, []GPU_LAMBDA(float a, float b) -> float {
|
| 45 |
+
// return a + b;
|
| 46 |
+
// });
|
| 47 |
+
//
|
| 48 |
+
// Note [Order of Construction]
|
| 49 |
+
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
| 50 |
+
// When setting up the tensor iterator configuration, the output Tensors
|
| 51 |
+
// have to be added first via
|
| 52 |
+
// TensorIteratorConfig::add_owned_output(at::Tensor). After adding all outputs,
|
| 53 |
+
// the inputs can be added via
|
| 54 |
+
// TensorIteratorConfig::add_owned_input(at::Tensor).
|
| 55 |
+
// Adding another output after inputs have been added will rise an exception.
|
| 56 |
+
//
|
| 57 |
+
// Note [Common Dtype Computation]
|
| 58 |
+
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
| 59 |
+
// Some operations have a natural notion of a "common dtype" or
|
| 60 |
+
// "computation dtype" where all inputs are cast to one dtype, the
|
| 61 |
+
// operation is performed, and then the results are cast to all outputs.
|
| 62 |
+
//
|
| 63 |
+
// TensorIterator infers a common dtype if all inputs have the same dtype,
|
| 64 |
+
// and it computes one using type promotion rules on its inputs if
|
| 65 |
+
// promote_inputs_to_common_dtype_ is true. Attempting to query
|
| 66 |
+
// a common dtype otherwise will throw an exception.
|
| 67 |
+
//
|
| 68 |
+
// Note that the outputs are not considered when computing a common dtype.
|
| 69 |
+
|
| 70 |
+
namespace at {
|
| 71 |
+
|
| 72 |
+
namespace internal {
|
| 73 |
+
// This parameter is heuristically chosen to determine the minimum number of
|
| 74 |
+
// work that warrants parallelism. For example, when summing an array, it is
|
| 75 |
+
// deemed inefficient to parallelise over arrays shorter than 32768. Further,
|
| 76 |
+
// no parallel algorithm (such as parallel_reduce) should split work into
|
| 77 |
+
// smaller than GRAIN_SIZE chunks.
|
| 78 |
+
constexpr int64_t GRAIN_SIZE = 32768;
|
| 79 |
+
|
| 80 |
+
// Storage for a non-owning Tensor, without needing to include Tensor.h
|
| 81 |
+
class TORCH_API OpaqueOptionalTensorRef {
|
| 82 |
+
alignas(alignof(TensorBase)) std::array<char, sizeof(TensorBase)> data_;
|
| 83 |
+
|
| 84 |
+
public:
|
| 85 |
+
OpaqueOptionalTensorRef();
|
| 86 |
+
OpaqueOptionalTensorRef(const OpaqueOptionalTensorRef&) = default;
|
| 87 |
+
OpaqueOptionalTensorRef& operator=(const OpaqueOptionalTensorRef&) = default;
|
| 88 |
+
OpaqueOptionalTensorRef(OpaqueOptionalTensorRef&&) noexcept = default;
|
| 89 |
+
OpaqueOptionalTensorRef& operator=(OpaqueOptionalTensorRef&&) noexcept =
|
| 90 |
+
default;
|
| 91 |
+
~OpaqueOptionalTensorRef();
|
| 92 |
+
|
| 93 |
+
OptionalTensorRef* get() {
|
| 94 |
+
return reinterpret_cast<OptionalTensorRef*>(data_.data());
|
| 95 |
+
}
|
| 96 |
+
const OptionalTensorRef* get() const {
|
| 97 |
+
return reinterpret_cast<const OptionalTensorRef*>(data_.data());
|
| 98 |
+
}
|
| 99 |
+
|
| 100 |
+
OptionalTensorRef& operator*() {
|
| 101 |
+
return *get();
|
| 102 |
+
}
|
| 103 |
+
const OptionalTensorRef& operator*() const {
|
| 104 |
+
return *get();
|
| 105 |
+
}
|
| 106 |
+
OptionalTensorRef* operator->() {
|
| 107 |
+
return get();
|
| 108 |
+
}
|
| 109 |
+
const OptionalTensorRef* operator->() const {
|
| 110 |
+
return get();
|
| 111 |
+
}
|
| 112 |
+
|
| 113 |
+
const Tensor& getTensor() const;
|
| 114 |
+
};
|
| 115 |
+
} // namespace internal
|
| 116 |
+
|
| 117 |
+
struct TORCH_API OperandInfo {
|
| 118 |
+
using StrideVector = SmallVector<int64_t, 6>;
|
| 119 |
+
OperandInfo() = default;
|
| 120 |
+
C10_ALWAYS_INLINE explicit OperandInfo(c10::MaybeOwned<TensorBase>&& t) {
|
| 121 |
+
if (t->defined()) {
|
| 122 |
+
device = t->device();
|
| 123 |
+
target_dtype = t->scalar_type();
|
| 124 |
+
current_dtype = target_dtype;
|
| 125 |
+
}
|
| 126 |
+
tensor(std::move(t));
|
| 127 |
+
validate();
|
| 128 |
+
}
|
| 129 |
+
|
| 130 |
+
C10_ALWAYS_INLINE OperandInfo(const OperandInfo&) = default;
|
| 131 |
+
C10_ALWAYS_INLINE OperandInfo& operator=(const OperandInfo&) = default;
|
| 132 |
+
C10_ALWAYS_INLINE OperandInfo(OperandInfo&&) noexcept = default;
|
| 133 |
+
C10_ALWAYS_INLINE OperandInfo& operator=(OperandInfo&&) noexcept = default;
|
| 134 |
+
C10_ALWAYS_INLINE ~OperandInfo() = default;
|
| 135 |
+
|
| 136 |
+
/// The data pointer. This may be different from tensor->data_ptr() if the
|
| 137 |
+
/// iterator is split.
|
| 138 |
+
void* data = nullptr;
|
| 139 |
+
|
| 140 |
+
/// Stride after broadcasting. The stride is in bytes, not number of elements.
|
| 141 |
+
StrideVector stride_bytes;
|
| 142 |
+
|
| 143 |
+
/// The desired device and type for the operand. For inputs, this specifies
|
| 144 |
+
/// that the input should be converted to this type if necessary. For outputs,
|
| 145 |
+
/// this specifies which type to allocate. target_dtype and device are
|
| 146 |
+
/// initialized with the dtype and device of the tensor but during type
|
| 147 |
+
/// promotion target_dtype value can become different from tensor's dtype
|
| 148 |
+
/// also, during type promotion target_dtype and device can be set for an
|
| 149 |
+
/// undefined tensor so that tensor can be properly constructed later.
|
| 150 |
+
c10::optional<Device> device = c10::nullopt;
|
| 151 |
+
ScalarType target_dtype = ScalarType::Undefined;
|
| 152 |
+
// Caches dtype of the tensor, because scalar_type is an expensive operation
|
| 153 |
+
// If dtype of the tensor is changed (e.g. as a result of type promotion or in
|
| 154 |
+
// allocate_outputs), this
|
| 155 |
+
// value should be changed too.
|
| 156 |
+
ScalarType current_dtype = ScalarType::Undefined;
|
| 157 |
+
|
| 158 |
+
bool is_device_defined() const {
|
| 159 |
+
return device.has_value();
|
| 160 |
+
}
|
| 161 |
+
bool is_type_defined() const {
|
| 162 |
+
return target_dtype != ScalarType::Undefined;
|
| 163 |
+
}
|
| 164 |
+
TensorOptions options() const {
|
| 165 |
+
return TensorOptions(target_dtype).device(device);
|
| 166 |
+
}
|
| 167 |
+
|
| 168 |
+
bool is_output = false;
|
| 169 |
+
|
| 170 |
+
bool will_resize = false;
|
| 171 |
+
|
| 172 |
+
bool is_read_write = false;
|
| 173 |
+
|
| 174 |
+
void validate() {
|
| 175 |
+
TORCH_CHECK(
|
| 176 |
+
!tensor_base_->defined() || tensor_base_->layout() == kStrided,
|
| 177 |
+
"unsupported tensor layout: ",
|
| 178 |
+
tensor_base_->layout());
|
| 179 |
+
}
|
| 180 |
+
|
| 181 |
+
/// The tensor operand. Note that the strides, data pointer, and
|
| 182 |
+
/// other attributes may differ due to dimension reordering and
|
| 183 |
+
/// coalescing.
|
| 184 |
+
const Tensor& tensor() const {
|
| 185 |
+
return tensor_storage_.getTensor();
|
| 186 |
+
}
|
| 187 |
+
const TensorBase& tensor_base() const {
|
| 188 |
+
return *tensor_base_;
|
| 189 |
+
}
|
| 190 |
+
void tensor(c10::MaybeOwned<TensorBase>&& tensor);
|
| 191 |
+
|
| 192 |
+
// Save the original tensor operand in cases when an output is modified
|
| 193 |
+
// (e.g. if dtype is changed)
|
| 194 |
+
const Tensor& original_tensor() const {
|
| 195 |
+
return original_tensor_storage_.getTensor();
|
| 196 |
+
}
|
| 197 |
+
const TensorBase& original_tensor_base() const {
|
| 198 |
+
return *original_tensor_base_;
|
| 199 |
+
}
|
| 200 |
+
|
| 201 |
+
// Set tensor to a new value, and store the old tensor value in
|
| 202 |
+
// original_tensor Should only ever be called once for the lifetime of an
|
| 203 |
+
// operand
|
| 204 |
+
void exchange_tensor(c10::MaybeOwned<TensorBase>&& new_tensor);
|
| 205 |
+
|
| 206 |
+
// Move original_tensor back into tensor, exchange_tensor must have been
|
| 207 |
+
// called before
|
| 208 |
+
void restore_original_tensor();
|
| 209 |
+
|
| 210 |
+
private:
|
| 211 |
+
c10::MaybeOwned<TensorBase> tensor_base_;
|
| 212 |
+
c10::MaybeOwned<TensorBase> original_tensor_base_ =
|
| 213 |
+
c10::MaybeOwned<TensorBase>::owned(c10::in_place);
|
| 214 |
+
|
| 215 |
+
// We store TensorBase visibly in the header to allow inline access.
|
| 216 |
+
// However, we sometimes need a genuine `const Tensor &` for the
|
| 217 |
+
// TensorIterator API. So, we also store a non-owning `Tensor`
|
| 218 |
+
// object in these `_storage_` variables.
|
| 219 |
+
internal::OpaqueOptionalTensorRef tensor_storage_;
|
| 220 |
+
internal::OpaqueOptionalTensorRef original_tensor_storage_;
|
| 221 |
+
};
|
| 222 |
+
|
| 223 |
+
struct SplitUntil32Bit;
|
| 224 |
+
|
| 225 |
+
enum class FastSetupType : uint8_t {
|
| 226 |
+
NONE,
|
| 227 |
+
CONTIGUOUS,
|
| 228 |
+
CHANNELS_LAST,
|
| 229 |
+
NON_OVERLAPPING_DENSE
|
| 230 |
+
};
|
| 231 |
+
|
| 232 |
+
class TensorIteratorConfig;
|
| 233 |
+
struct TensorIterator;
|
| 234 |
+
|
| 235 |
+
struct TORCH_API TensorIteratorBase : public impl::MetaBase {
|
| 236 |
+
using DimMask = std::bitset<64>;
|
| 237 |
+
using PtrVector = SmallVector<char*, 4>;
|
| 238 |
+
using StrideVector = SmallVector<int64_t, 6>;
|
| 239 |
+
|
| 240 |
+
TensorIteratorBase();
|
| 241 |
+
void build(TensorIteratorConfig&);
|
| 242 |
+
|
| 243 |
+
// The inner-loop function operates on the fastest moving dimension. It
|
| 244 |
+
// implements element-wise operations in terms of 1-d strided tensors.
|
| 245 |
+
//
|
| 246 |
+
// Arguments:
|
| 247 |
+
// data: data pointers for each operand (length `ntensors`)
|
| 248 |
+
// strides: stride for each operand (length `ntensors`)
|
| 249 |
+
// size: size of inner loop
|
| 250 |
+
//
|
| 251 |
+
// The `size` often matches shape[0], but may be smaller due to
|
| 252 |
+
// parallelization of the inner loop.
|
| 253 |
+
using loop2d_t = c10::function_ref<
|
| 254 |
+
void(char** data, const int64_t* strides, int64_t size0, int64_t size1)>;
|
| 255 |
+
|
| 256 |
+
using loop_subiter_t = c10::function_ref<void(TensorIteratorBase& subiter)>;
|
| 257 |
+
|
| 258 |
+
void foreach_reduced_elt(loop_subiter_t loop, bool parallelize = true);
|
| 259 |
+
|
| 260 |
+
int ndim() const {
|
| 261 |
+
return static_cast<int>(shape_.size());
|
| 262 |
+
}
|
| 263 |
+
IntArrayRef shape() const {
|
| 264 |
+
return shape_;
|
| 265 |
+
}
|
| 266 |
+
int64_t numel() const;
|
| 267 |
+
int ntensors() const {
|
| 268 |
+
return static_cast<int>(operands_.size());
|
| 269 |
+
}
|
| 270 |
+
int noutputs() const {
|
| 271 |
+
return num_outputs_;
|
| 272 |
+
}
|
| 273 |
+
int ninputs() const {
|
| 274 |
+
return ntensors() - noutputs();
|
| 275 |
+
}
|
| 276 |
+
IntArrayRef view_offsets() const {
|
| 277 |
+
return view_offsets_;
|
| 278 |
+
}
|
| 279 |
+
|
| 280 |
+
/// number of elements in the output operand. this is the same as numel() for
|
| 281 |
+
/// operations that are not reductions.
|
| 282 |
+
int64_t num_output_elements() const;
|
| 283 |
+
|
| 284 |
+
/// number of reduced dimensions in a reduction operation
|
| 285 |
+
int num_reduce_dims() const;
|
| 286 |
+
|
| 287 |
+
/// 1-dimensional iteration and no buffering or type conversion
|
| 288 |
+
bool is_trivial_1d() const;
|
| 289 |
+
/// Reducible to 1-dimensional and all operands are contiguous
|
| 290 |
+
bool is_contiguous() const;
|
| 291 |
+
bool is_dim_reduced(int dim) const;
|
| 292 |
+
|
| 293 |
+
/// Accessors for each operand
|
| 294 |
+
IntArrayRef strides(int arg) const {
|
| 295 |
+
return operands_[arg].stride_bytes;
|
| 296 |
+
}
|
| 297 |
+
void* data_ptr(int arg) const;
|
| 298 |
+
ScalarType dtype(int arg = 0) const {
|
| 299 |
+
return operands_[arg].current_dtype;
|
| 300 |
+
}
|
| 301 |
+
ScalarType common_dtype() const {
|
| 302 |
+
TORCH_INTERNAL_ASSERT(
|
| 303 |
+
common_dtype_ != ScalarType::Undefined,
|
| 304 |
+
"Queried for invalid common dtype!");
|
| 305 |
+
return common_dtype_;
|
| 306 |
+
}
|
| 307 |
+
ScalarType input_dtype(int arg = 0) const {
|
| 308 |
+
return operands_[num_outputs_ + arg].current_dtype;
|
| 309 |
+
}
|
| 310 |
+
Device device(int arg = 0) const {
|
| 311 |
+
return operands_[arg].device.value();
|
| 312 |
+
}
|
| 313 |
+
c10::DeviceType device_type(int arg = 0) const {
|
| 314 |
+
return device(arg).type();
|
| 315 |
+
}
|
| 316 |
+
int64_t element_size(int arg) const {
|
| 317 |
+
return static_cast<int64_t>(elementSize(dtype(arg)));
|
| 318 |
+
}
|
| 319 |
+
bool is_scalar(int arg) const;
|
| 320 |
+
bool is_cpu_scalar(int arg) const;
|
| 321 |
+
|
| 322 |
+
const TensorBase& tensor_base(int arg) const {
|
| 323 |
+
return operands_[arg].tensor_base();
|
| 324 |
+
}
|
| 325 |
+
const Tensor& tensor(int arg) const {
|
| 326 |
+
return operands_[arg].tensor();
|
| 327 |
+
}
|
| 328 |
+
|
| 329 |
+
const TensorBase& output_base(int arg = 0) const {
|
| 330 |
+
AT_ASSERT(arg < num_outputs_);
|
| 331 |
+
return tensor_base(arg);
|
| 332 |
+
}
|
| 333 |
+
|
| 334 |
+
const Tensor& output(int arg = 0) const {
|
| 335 |
+
AT_ASSERT(arg < num_outputs_);
|
| 336 |
+
return tensor(arg);
|
| 337 |
+
}
|
| 338 |
+
|
| 339 |
+
const TensorBase& input_base(int arg = 0) const {
|
| 340 |
+
AT_ASSERT(arg >= 0 && arg < ntensors() - num_outputs_);
|
| 341 |
+
return tensor_base(num_outputs_ + arg);
|
| 342 |
+
}
|
| 343 |
+
const Tensor& input(int arg = 0) const {
|
| 344 |
+
AT_ASSERT(arg >= 0 && arg < ntensors() - num_outputs_);
|
| 345 |
+
return tensor(num_outputs_ + arg);
|
| 346 |
+
}
|
| 347 |
+
|
| 348 |
+
// Copies from temporary outputs back to the original outputs
|
| 349 |
+
// NOTE: only used on CPU
|
| 350 |
+
void cast_outputs();
|
| 351 |
+
|
| 352 |
+
/// Removes an operand from this iterator
|
| 353 |
+
void remove_operand(int arg);
|
| 354 |
+
/// Shrinks an iterated dimension
|
| 355 |
+
void narrow(int dim, int64_t start, int64_t size);
|
| 356 |
+
/// Narrows every dim after and including `start_dim` to size one.
|
| 357 |
+
void select_all_keeping_dim(int start_dim, IntArrayRef starts);
|
| 358 |
+
/// Replaces the data pointer for the operand at index `arg`.
|
| 359 |
+
/// The new pointer should have the same sizes, strides and dtype as the
|
| 360 |
+
/// original
|
| 361 |
+
void unsafe_replace_operand(int arg, void* data);
|
| 362 |
+
|
| 363 |
+
/// Splits this TensorIterator into two iterators. Together they iterate over
|
| 364 |
+
/// the entire operation. Used by `with_32bit_indexing()`.
|
| 365 |
+
std::unique_ptr<TensorIterator> split(int dim);
|
| 366 |
+
|
| 367 |
+
/// Returns the dimension with the largest extent: (size[dim]-1) * stride[dim]
|
| 368 |
+
int get_dim_to_split() const;
|
| 369 |
+
|
| 370 |
+
template <typename T>
|
| 371 |
+
T scalar_value(int arg) {
|
| 372 |
+
auto& op = operands_[arg];
|
| 373 |
+
return c10::fetch_and_cast<T>(op.tensor_base().scalar_type(), op.data);
|
| 374 |
+
}
|
| 375 |
+
|
| 376 |
+
/// Return scalar value from original_tensor_base if it is defined. When
|
| 377 |
+
/// common_dtype is Half, casting scalar input to common_dtype might overflow.
|
| 378 |
+
/// If the scalar is aleady given in the type of Half, then return scalar
|
| 379 |
+
/// value from tensor_base.
|
| 380 |
+
template <typename T>
|
| 381 |
+
T original_scalar_value(int arg) {
|
| 382 |
+
auto& original_tensor_base = operands_[arg].original_tensor_base();
|
| 383 |
+
if (original_tensor_base.defined()) {
|
| 384 |
+
TORCH_INTERNAL_ASSERT(
|
| 385 |
+
original_tensor_base.scalar_type() != common_dtype());
|
| 386 |
+
return c10::fetch_and_cast<T>(
|
| 387 |
+
original_tensor_base.scalar_type(), original_tensor_base.data_ptr());
|
| 388 |
+
} else {
|
| 389 |
+
return scalar_value<T>(arg);
|
| 390 |
+
}
|
| 391 |
+
}
|
| 392 |
+
|
| 393 |
+
private:
|
| 394 |
+
template <typename loop1d_t>
|
| 395 |
+
auto loop_2d_from_1d(const loop1d_t& loop) {
|
| 396 |
+
return
|
| 397 |
+
[loop, ntensor = ntensors()](
|
| 398 |
+
char** base, const int64_t* strides, int64_t size0, int64_t size1) {
|
| 399 |
+
PtrVector data(base, base + ntensor);
|
| 400 |
+
const int64_t* outer_strides = &strides[ntensor];
|
| 401 |
+
for (const auto i : c10::irange(size1)) {
|
| 402 |
+
if (i > 0) {
|
| 403 |
+
for (const auto arg : c10::irange(ntensor)) {
|
| 404 |
+
data[arg] += outer_strides[arg];
|
| 405 |
+
}
|
| 406 |
+
}
|
| 407 |
+
loop(data.data(), strides, size0);
|
| 408 |
+
}
|
| 409 |
+
};
|
| 410 |
+
}
|
| 411 |
+
|
| 412 |
+
public:
|
| 413 |
+
template <
|
| 414 |
+
typename loop1d_t,
|
| 415 |
+
std::enable_if_t<
|
| 416 |
+
std::is_convertible<
|
| 417 |
+
loop1d_t,
|
| 418 |
+
c10::function_ref<
|
| 419 |
+
void(char**, const int64_t* strides, int64_t size)>>::value,
|
| 420 |
+
int> = 0>
|
| 421 |
+
void for_each(loop1d_t loop, int64_t grain_size = at::internal::GRAIN_SIZE) {
|
| 422 |
+
for_each(loop_2d_from_1d(loop), grain_size);
|
| 423 |
+
}
|
| 424 |
+
|
| 425 |
+
void for_each(loop2d_t loop, int64_t grain_size = at::internal::GRAIN_SIZE);
|
| 426 |
+
|
| 427 |
+
void parallel_reduce(loop2d_t loop);
|
| 428 |
+
|
| 429 |
+
template <
|
| 430 |
+
typename loop1d_t,
|
| 431 |
+
std::enable_if_t<
|
| 432 |
+
std::is_convertible<
|
| 433 |
+
loop1d_t,
|
| 434 |
+
c10::function_ref<
|
| 435 |
+
void(char**, const int64_t* strides, int64_t size)>>::value,
|
| 436 |
+
int> = 0>
|
| 437 |
+
void serial_for_each(loop1d_t loop, Range range) {
|
| 438 |
+
serial_for_each(loop_2d_from_1d(loop), range);
|
| 439 |
+
}
|
| 440 |
+
|
| 441 |
+
void serial_for_each(loop2d_t loop, Range range) const;
|
| 442 |
+
|
| 443 |
+
/// Create a strides array for a Tensor with shape of this iterator. The
|
| 444 |
+
/// parameter `element_size` specifies the size of Tensor's data type in
|
| 445 |
+
/// bytes (e.g. `4` for `float`)
|
| 446 |
+
StrideVector compatible_stride(int element_size) const;
|
| 447 |
+
|
| 448 |
+
/// Inverts the re-ordering done by reorder_dimensions. This can only be
|
| 449 |
+
/// called *before* coalesce_dimensions() is called.
|
| 450 |
+
DimVector invert_perm(IntArrayRef input) const;
|
| 451 |
+
|
| 452 |
+
/// Reapply same re-ordering as it is done by reorder_dimensions. This can
|
| 453 |
+
/// only be called *before* coalesce_dimensions() is called.
|
| 454 |
+
DimVector apply_perm_and_mul(IntArrayRef input, int mul) const;
|
| 455 |
+
|
| 456 |
+
/// Helper functions for CPU iteration
|
| 457 |
+
StrideVector get_dim_strides(int dim) const;
|
| 458 |
+
StrideVector get_strides() const;
|
| 459 |
+
StrideVector get_inner_strides() const {
|
| 460 |
+
return get_dim_strides(0);
|
| 461 |
+
}
|
| 462 |
+
PtrVector get_base_ptrs() const;
|
| 463 |
+
|
| 464 |
+
// Helper functions for advanced stride manipulations (e.g. torch.flip)
|
| 465 |
+
void _unsafe_set_arg_strides(const int arg, IntArrayRef strides) {
|
| 466 |
+
operands_[arg].stride_bytes = strides;
|
| 467 |
+
}
|
| 468 |
+
void _unsafe_set_arg_data(const int arg, void* data) {
|
| 469 |
+
operands_[arg].data = data;
|
| 470 |
+
}
|
| 471 |
+
|
| 472 |
+
/// true if the stride computation can use 32-bit arithmetic. Used by GPU
|
| 473 |
+
/// kernels
|
| 474 |
+
bool can_use_32bit_indexing() const;
|
| 475 |
+
|
| 476 |
+
/// An "iteratable" object that recursively splits this iterator into
|
| 477 |
+
/// sub-iterators that can use 32-bit indexing.
|
| 478 |
+
SplitUntil32Bit with_32bit_indexing() const;
|
| 479 |
+
|
| 480 |
+
/// If the kernel should accumulate into the output. Only relevant for CUDA
|
| 481 |
+
/// reductions.
|
| 482 |
+
bool should_accumulate() const {
|
| 483 |
+
return accumulate_;
|
| 484 |
+
}
|
| 485 |
+
|
| 486 |
+
/// Whether this iterator produces the actual output,
|
| 487 |
+
/// as opposed to something that will be accumulated further. Only relevant
|
| 488 |
+
/// for CUDA reductions.
|
| 489 |
+
bool is_final_output() const {
|
| 490 |
+
return final_output_;
|
| 491 |
+
}
|
| 492 |
+
|
| 493 |
+
bool has_contiguous_first_dim() const {
|
| 494 |
+
if (ndim() == 0) {
|
| 495 |
+
return true;
|
| 496 |
+
}
|
| 497 |
+
|
| 498 |
+
int num_tensors = ntensors();
|
| 499 |
+
for (const auto i : c10::irange(num_tensors)) {
|
| 500 |
+
if (strides(i)[0] != element_size(i)) {
|
| 501 |
+
return false;
|
| 502 |
+
}
|
| 503 |
+
}
|
| 504 |
+
return true;
|
| 505 |
+
}
|
| 506 |
+
|
| 507 |
+
void set_output_raw_strided(
|
| 508 |
+
int64_t output_idx,
|
| 509 |
+
IntArrayRef sizes,
|
| 510 |
+
IntArrayRef strides,
|
| 511 |
+
TensorOptions options,
|
| 512 |
+
DimnameList names) override;
|
| 513 |
+
|
| 514 |
+
#define TORCH_DISALLOW_TEMPORARIES_IMPL(methodname, maybestatic) \
|
| 515 |
+
maybestatic void methodname( \
|
| 516 |
+
TensorBase&& out, const TensorBase& a, const TensorBase& b) = delete; \
|
| 517 |
+
maybestatic void methodname( \
|
| 518 |
+
const TensorBase& out, TensorBase&& a, const TensorBase& b) = delete; \
|
| 519 |
+
maybestatic void methodname( \
|
| 520 |
+
const TensorBase& out, const TensorBase& a, TensorBase&& b) = delete; \
|
| 521 |
+
maybestatic void methodname( \
|
| 522 |
+
TensorBase&& out, TensorBase&& a, const TensorBase& b) = delete; \
|
| 523 |
+
maybestatic void methodname( \
|
| 524 |
+
TensorBase&& out, const TensorBase& a, TensorBase&& b) = delete; \
|
| 525 |
+
maybestatic void methodname( \
|
| 526 |
+
const TensorBase& out, TensorBase&& a, TensorBase&& b) = delete; \
|
| 527 |
+
maybestatic void methodname( \
|
| 528 |
+
TensorBase&& out, TensorBase&& a, TensorBase&& b) = delete;
|
| 529 |
+
|
| 530 |
+
#define TORCH_DISALLOW_TEMPORARIES(methodname) \
|
| 531 |
+
TORCH_DISALLOW_TEMPORARIES_IMPL(methodname, )
|
| 532 |
+
|
| 533 |
+
void build_binary_float_op(
|
| 534 |
+
const TensorBase& out,
|
| 535 |
+
const TensorBase& a,
|
| 536 |
+
const TensorBase& b);
|
| 537 |
+
void build_borrowing_binary_float_op(
|
| 538 |
+
const TensorBase& out,
|
| 539 |
+
const TensorBase& a,
|
| 540 |
+
const TensorBase& b);
|
| 541 |
+
TORCH_DISALLOW_TEMPORARIES(build_borrowing_binary_float_op)
|
| 542 |
+
void build_binary_op(
|
| 543 |
+
const TensorBase& out,
|
| 544 |
+
const TensorBase& a,
|
| 545 |
+
const TensorBase& b);
|
| 546 |
+
void build_borrowing_binary_op(
|
| 547 |
+
const TensorBase& out,
|
| 548 |
+
const TensorBase& a,
|
| 549 |
+
const TensorBase& b);
|
| 550 |
+
TORCH_DISALLOW_TEMPORARIES(build_borrowing_binary_op)
|
| 551 |
+
void build_unary_float_op(const TensorBase& out, const TensorBase& a);
|
| 552 |
+
void build_borrowing_unary_float_op(
|
| 553 |
+
const TensorBase& out,
|
| 554 |
+
const TensorBase& a);
|
| 555 |
+
TORCH_DISALLOW_TEMPORARIES(build_borrowing_unary_float_op)
|
| 556 |
+
void build_unary_op(const TensorBase& out, const TensorBase& a);
|
| 557 |
+
// Odd special case needed for pow. Has to borrow the output because
|
| 558 |
+
// it's a structured kernel, but the argument is potentially a copy.
|
| 559 |
+
void build_output_borrowing_argument_owning_unary_op(
|
| 560 |
+
const TensorBase& out,
|
| 561 |
+
const TensorBase& a);
|
| 562 |
+
void build_borrowing_unary_op(const TensorBase& out, const TensorBase& a);
|
| 563 |
+
TORCH_DISALLOW_TEMPORARIES(build_borrowing_unary_op)
|
| 564 |
+
void build_borrowing_unary_force_boolean_op(
|
| 565 |
+
const TensorBase& out,
|
| 566 |
+
const TensorBase& a);
|
| 567 |
+
TORCH_DISALLOW_TEMPORARIES(build_borrowing_unary_force_boolean_op)
|
| 568 |
+
void build_comparison_op(
|
| 569 |
+
const TensorBase& out,
|
| 570 |
+
const TensorBase& a,
|
| 571 |
+
const TensorBase& b);
|
| 572 |
+
void build_borrowing_comparison_op(
|
| 573 |
+
const TensorBase& out,
|
| 574 |
+
const TensorBase& a,
|
| 575 |
+
const TensorBase& b);
|
| 576 |
+
TORCH_DISALLOW_TEMPORARIES(build_borrowing_comparison_op)
|
| 577 |
+
// Another special case: we need to own the second argument for comparison
|
| 578 |
+
// ops.
|
| 579 |
+
void build_borrowing_except_last_argument_comparison_op(
|
| 580 |
+
const TensorBase& out,
|
| 581 |
+
const TensorBase& a,
|
| 582 |
+
const TensorBase& b);
|
| 583 |
+
void build_ternary_op(
|
| 584 |
+
const TensorBase& out,
|
| 585 |
+
const TensorBase& a,
|
| 586 |
+
const TensorBase& b,
|
| 587 |
+
const TensorBase& c);
|
| 588 |
+
|
| 589 |
+
#undef TORCH_DISALLOW_TEMPORARIES
|
| 590 |
+
protected:
|
| 591 |
+
// Mutable reference as it moves tensors out of TensorIteratorConfig
|
| 592 |
+
void populate_operands(TensorIteratorConfig&);
|
| 593 |
+
void mark_outputs();
|
| 594 |
+
void mark_resize_outputs(const TensorIteratorConfig&);
|
| 595 |
+
void compute_mem_overlaps(const TensorIteratorConfig&);
|
| 596 |
+
void compute_shape(const TensorIteratorConfig&);
|
| 597 |
+
void compute_strides(const TensorIteratorConfig&);
|
| 598 |
+
void reorder_dimensions();
|
| 599 |
+
void permute_dimensions(IntArrayRef perm);
|
| 600 |
+
void compute_types(const TensorIteratorConfig&);
|
| 601 |
+
ScalarType compute_common_dtype();
|
| 602 |
+
void allocate_or_resize_outputs();
|
| 603 |
+
bool fast_set_up(const TensorIteratorConfig&);
|
| 604 |
+
FastSetupType compute_fast_setup_type(const TensorIteratorConfig&);
|
| 605 |
+
void compute_names(const TensorIteratorConfig&);
|
| 606 |
+
void propagate_names_to_outputs();
|
| 607 |
+
void coalesce_dimensions();
|
| 608 |
+
|
| 609 |
+
protected:
|
| 610 |
+
/// Records the "computation" shape of the output tensor. The computation
|
| 611 |
+
/// shape is different from the regular shape in a few ways:
|
| 612 |
+
///
|
| 613 |
+
/// - The shape may be permuted (via permute_dimensions) so that we
|
| 614 |
+
/// process the dimensions in the most computationally efficient order
|
| 615 |
+
/// (rather than the logical order given to us by the users.)
|
| 616 |
+
/// - The shape may have adjacent dimensions collapsed (via
|
| 617 |
+
/// coalesce_dimensions) so that we minimize the number of
|
| 618 |
+
/// dimensions we have to explicitly iterate over. For example,
|
| 619 |
+
/// a pointwise operation on a contiguous tensor "computationally"
|
| 620 |
+
/// consists of only a single dimension.
|
| 621 |
+
///
|
| 622 |
+
/// In other words, the computation shape is the output shape as it
|
| 623 |
+
/// actually matters for implementing the kernel, but not necessarily the
|
| 624 |
+
/// output shape that the user will see in the end.
|
| 625 |
+
///
|
| 626 |
+
/// The lifecycle of mutations to shape_ in TensorIterator:
|
| 627 |
+
/// - declare_static_shape() sets an initial shape explicitly
|
| 628 |
+
/// provided by user, otherwise
|
| 629 |
+
/// - compute_shape() computes the true (non-computational) shape
|
| 630 |
+
/// specified by the user.
|
| 631 |
+
/// - reorder_dimensions() reorders dimensions to improve coalescing.
|
| 632 |
+
/// - coalesce_dimensions() then coalesces adjacent dimensions when
|
| 633 |
+
/// possible.
|
| 634 |
+
///
|
| 635 |
+
/// The shape may also be further modified if we create sub-TensorIterators,
|
| 636 |
+
/// e.g., via narrow or select_all_keeping_dim.
|
| 637 |
+
DimVector shape_;
|
| 638 |
+
|
| 639 |
+
/// Temporarily records the permutation computed by reorder_dimensions.
|
| 640 |
+
/// This permutation maps the computation output dimension (dim) to
|
| 641 |
+
/// the original true output dimension (perm_[dim]). It is used by
|
| 642 |
+
/// invert_perm to undo the permutation. After coalesce_dimensions is
|
| 643 |
+
/// called, the permutation is no longer valid (as, in general, there
|
| 644 |
+
/// is no permutation that will make computation dimensions to
|
| 645 |
+
/// output dimensions); methods that manipulate perm_ are obligated
|
| 646 |
+
/// to test that !has_coalesced_dimensions
|
| 647 |
+
DimVector perm_;
|
| 648 |
+
|
| 649 |
+
/// Has coalesce_dimensions() (or any moral equivalent, e.g., fast_build())
|
| 650 |
+
/// been called? This is SOLELY used to check validity of perm_.
|
| 651 |
+
bool has_coalesced_dimensions_ = false;
|
| 652 |
+
|
| 653 |
+
/// Whether iteration must be fixed. This disables dimension permuting and
|
| 654 |
+
/// also changes how for_each divides work among threads.
|
| 655 |
+
bool enforce_linear_iteration_ = false;
|
| 656 |
+
|
| 657 |
+
/// The index offsets into the original tensors for each dimension.
|
| 658 |
+
/// This is only non-zero when you narrow() a TensorIterator (e.g.,
|
| 659 |
+
/// when you make sub-TensorIterators).
|
| 660 |
+
DimVector view_offsets_;
|
| 661 |
+
|
| 662 |
+
/// The computed names of the output tensor. Computed by compute_names()
|
| 663 |
+
NameVector names_;
|
| 664 |
+
|
| 665 |
+
/// The operands of the TensorIterator: both the inputs and outputs. The
|
| 666 |
+
/// outputs MUST come first in the operands_ list. There is always an
|
| 667 |
+
/// operand for each output of the TensorIterator, even if TensorIterator
|
| 668 |
+
/// will ultimately be responsible for allocating the output; in those
|
| 669 |
+
/// cases, tensor is simply undefined (and will be populated later
|
| 670 |
+
/// during build()).
|
| 671 |
+
///
|
| 672 |
+
/// This list is initially populated prior to build(), but build() mutates
|
| 673 |
+
/// OperandInfo to populate more information.
|
| 674 |
+
SmallVector<OperandInfo, 4> operands_;
|
| 675 |
+
|
| 676 |
+
/// Number of outputs in operands_ (the length of the outputs prefix
|
| 677 |
+
/// in operands_).
|
| 678 |
+
int num_outputs_ = 0;
|
| 679 |
+
|
| 680 |
+
/// Whether or not all operands have the same shape and are 1d+. Having all
|
| 681 |
+
/// the same shape affects whether or not the iterator is eligible for fast
|
| 682 |
+
/// setup.
|
| 683 |
+
bool all_ops_same_shape_ = false;
|
| 684 |
+
/// Whether or not all operands are 0d, this affects type promotion
|
| 685 |
+
bool all_ops_are_scalars_ = false;
|
| 686 |
+
|
| 687 |
+
/// The "computation" dtype of TensorIterator, specifying what the dtype
|
| 688 |
+
/// we will do the internal computation in TensorIterator. Typically,
|
| 689 |
+
/// this matches the dtype of the output tensors, but not always!
|
| 690 |
+
ScalarType common_dtype_ = ScalarType::Undefined;
|
| 691 |
+
|
| 692 |
+
/// This is currently defined as kCPU, or the device of the first non-CPU
|
| 693 |
+
/// tensor argument. See TensorIteratorBase::compute_types for details.
|
| 694 |
+
Device common_device_ = kCPU;
|
| 695 |
+
|
| 696 |
+
/// Set by split(), see should_accumulate() and is_final_output()
|
| 697 |
+
bool accumulate_ = false;
|
| 698 |
+
bool final_output_ = true;
|
| 699 |
+
|
| 700 |
+
// From TensorIteratorConfig
|
| 701 |
+
bool is_reduction_ = false;
|
| 702 |
+
|
| 703 |
+
/// Set by populate_operands(), says if we're handling meta tensors
|
| 704 |
+
bool is_meta_ = false;
|
| 705 |
+
};
|
| 706 |
+
|
| 707 |
+
struct TORCH_API TensorIterator final : public TensorIteratorBase {
|
| 708 |
+
TensorIterator() : TensorIteratorBase() {}
|
| 709 |
+
// Slicing is OK, TensorIterator guaranteed NOT to have any fields
|
| 710 |
+
TensorIterator(const TensorIteratorBase& iter) : TensorIteratorBase(iter) {}
|
| 711 |
+
|
| 712 |
+
#define TORCH_DISALLOW_TEMPORARIES(methodname) \
|
| 713 |
+
TORCH_DISALLOW_TEMPORARIES_IMPL(methodname, static)
|
| 714 |
+
|
| 715 |
+
static TensorIterator binary_float_op(
|
| 716 |
+
TensorBase& out,
|
| 717 |
+
const TensorBase& a,
|
| 718 |
+
const TensorBase& b);
|
| 719 |
+
static TensorIterator binary_op(
|
| 720 |
+
TensorBase& out,
|
| 721 |
+
const TensorBase& a,
|
| 722 |
+
const TensorBase& b);
|
| 723 |
+
static TensorIterator borrowing_binary_op(
|
| 724 |
+
const TensorBase& out,
|
| 725 |
+
const TensorBase& a,
|
| 726 |
+
const TensorBase& b);
|
| 727 |
+
TORCH_DISALLOW_TEMPORARIES(borrowing_binary_op)
|
| 728 |
+
static TensorIterator comparison_op(
|
| 729 |
+
TensorBase& out,
|
| 730 |
+
const TensorBase& a,
|
| 731 |
+
const TensorBase& b);
|
| 732 |
+
static TensorIterator unary_op(TensorBase& out, const TensorBase& a);
|
| 733 |
+
static TensorIterator unary_float_op(TensorBase& out, const TensorBase& a);
|
| 734 |
+
static TensorIterator nullary_op(TensorBase& out);
|
| 735 |
+
static TensorIterator borrowing_nullary_op(const TensorBase& out);
|
| 736 |
+
static TensorIterator borrowing_nullary_op(TensorBase&& out) = delete;
|
| 737 |
+
static TensorIterator reduce_op(TensorBase& out, const TensorBase& a);
|
| 738 |
+
static TensorIterator reduce_op(
|
| 739 |
+
TensorBase& out1,
|
| 740 |
+
TensorBase& out2,
|
| 741 |
+
const TensorBase& a);
|
| 742 |
+
#undef TORCH_DISALLOW_TEMPORARIES
|
| 743 |
+
#undef TORCH_DISALLOW_TEMPORARIES_IMPL
|
| 744 |
+
|
| 745 |
+
const Tensor& maybe_get_output(int64_t output_idx) override;
|
| 746 |
+
void set_output_raw_strided(
|
| 747 |
+
int64_t output_idx,
|
| 748 |
+
IntArrayRef sizes,
|
| 749 |
+
IntArrayRef strides,
|
| 750 |
+
TensorOptions options,
|
| 751 |
+
DimnameList names) override;
|
| 752 |
+
};
|
| 753 |
+
|
| 754 |
+
class TORCH_API TensorIteratorConfig final {
|
| 755 |
+
public:
|
| 756 |
+
friend struct TensorIteratorBase;
|
| 757 |
+
friend struct TensorIterator;
|
| 758 |
+
|
| 759 |
+
TensorIteratorConfig() = default;
|
| 760 |
+
|
| 761 |
+
C10_DISABLE_COPY_AND_ASSIGN(TensorIteratorConfig);
|
| 762 |
+
|
| 763 |
+
/// Construction
|
| 764 |
+
// Stores input/output Tensors without incrementing the reference count.
|
| 765 |
+
// Important: the outputs have to be added before the inputs.
|
| 766 |
+
TensorIteratorConfig& add_output(const TensorBase& output) {
|
| 767 |
+
return add_borrowed_output(output);
|
| 768 |
+
}
|
| 769 |
+
TensorIteratorConfig& add_input(const TensorBase& input) {
|
| 770 |
+
return add_borrowed_input(input);
|
| 771 |
+
}
|
| 772 |
+
|
| 773 |
+
// Borrowing from temporaries is unlikely to go well.
|
| 774 |
+
TensorIteratorConfig& add_output(TensorBase&& output) = delete;
|
| 775 |
+
TensorIteratorConfig& add_input(TensorBase&& input) = delete;
|
| 776 |
+
|
| 777 |
+
// Stores input/output Tensors while incrementing the reference count.
|
| 778 |
+
// Note that add_{in,out}put are nearly always what you
|
| 779 |
+
// want, and the exception (adding an unnamed temporary) won't
|
| 780 |
+
// compile.
|
| 781 |
+
TensorIteratorConfig& add_owned_output(const TensorBase& output);
|
| 782 |
+
TensorIteratorConfig& add_owned_input(const TensorBase& input);
|
| 783 |
+
|
| 784 |
+
// Advanced API: stores input/output Tensors without incrementing
|
| 785 |
+
// the reference count. The caller must ensure that these Tensors
|
| 786 |
+
// live at least as long as this TensorIteratorConfig and any
|
| 787 |
+
// TensorIteratorBase built from this TensorIteratorConfig.
|
| 788 |
+
// Important: the outputs have to be added before the inputs.
|
| 789 |
+
TensorIteratorConfig& add_borrowed_output(const TensorBase& output);
|
| 790 |
+
TensorIteratorConfig& add_borrowed_input(const TensorBase& input);
|
| 791 |
+
|
| 792 |
+
// Borrowing from temporaries is unlikely to go well.
|
| 793 |
+
TensorIteratorConfig& add_borrowed_output(TensorBase&& output) = delete;
|
| 794 |
+
TensorIteratorConfig& add_borrowed_input(TensorBase&& input) = delete;
|
| 795 |
+
|
| 796 |
+
// Sets the check_mem_overlap_ flag, which is true by default.
|
| 797 |
+
// If true, inputs are checked for partial overlap with the outputs and
|
| 798 |
+
// outputs are checked for internal overlap (e.g. broadcasted views). An error
|
| 799 |
+
// is raised if unacceptable overlap is detected.
|
| 800 |
+
// If you're migrating an existing operator to using TensorIterator, please
|
| 801 |
+
// consider if the previous implementation checked memory overlap. If it did
|
| 802 |
+
// not, and if the operator is idempotent (for example, Tensor.fill_(0)), then
|
| 803 |
+
// checking memory overlap is BC-breaking. Please don't check memory overlap
|
| 804 |
+
// in that case.
|
| 805 |
+
TensorIteratorConfig& set_check_mem_overlap(bool check_mem_overlap) {
|
| 806 |
+
check_mem_overlap_ = check_mem_overlap;
|
| 807 |
+
return *this;
|
| 808 |
+
}
|
| 809 |
+
|
| 810 |
+
// Sets the check_all_same_dtype_ flag, which is true by default
|
| 811 |
+
// If true, checks that all inputs and defined outputs have the same dtype
|
| 812 |
+
// Setting either of promote_inputs_to_common_dtype_
|
| 813 |
+
// or cast_common_dtype_to_outputs_ to true will set
|
| 814 |
+
// check_all_same_dtype_ to false.
|
| 815 |
+
TensorIteratorConfig& check_all_same_dtype(const bool _check_all_same_dtype) {
|
| 816 |
+
check_all_same_dtype_ = _check_all_same_dtype;
|
| 817 |
+
return *this;
|
| 818 |
+
}
|
| 819 |
+
|
| 820 |
+
// Sets the check_all_same_device_ flag, which is true by default
|
| 821 |
+
// If true, all operands must be on the same device, with the possible
|
| 822 |
+
// exception of CPU scalars, which can be passed to some CUDA kernels
|
| 823 |
+
// as kernel arguments.
|
| 824 |
+
TensorIteratorConfig& check_all_same_device(
|
| 825 |
+
const bool _check_all_same_device) {
|
| 826 |
+
check_all_same_device_ = _check_all_same_device;
|
| 827 |
+
return *this;
|
| 828 |
+
}
|
| 829 |
+
|
| 830 |
+
// Sets the enforce_safe_casting_to_output_ flag, which is false by default
|
| 831 |
+
// If true, the iterator's "common dtype" must be computable
|
| 832 |
+
// (see the [Common Dtype Computation] note) and
|
| 833 |
+
// canCast(common dtype, output dtype) must be true for all outputs.
|
| 834 |
+
TensorIteratorConfig& enforce_safe_casting_to_output(
|
| 835 |
+
const bool _enforce_safe_casting_to_output) {
|
| 836 |
+
enforce_safe_casting_to_output_ = _enforce_safe_casting_to_output;
|
| 837 |
+
return *this;
|
| 838 |
+
}
|
| 839 |
+
|
| 840 |
+
// Sets the enforce_linear_iteration_ flag, which is false by default.
|
| 841 |
+
// If true, iteration goes in the same order as a C-contiguous tensor
|
| 842 |
+
// is layed out in memory. i.e. last dimension iterates fastest.
|
| 843 |
+
//
|
| 844 |
+
// This iteration order can be less efficient and may even prevent
|
| 845 |
+
// vectorization. So only use if the correctness of your kernel depends on it.
|
| 846 |
+
TensorIteratorConfig& enforce_linear_iteration(
|
| 847 |
+
const bool _enforce_linear_iteration = true) {
|
| 848 |
+
enforce_linear_iteration_ = _enforce_linear_iteration;
|
| 849 |
+
return *this;
|
| 850 |
+
}
|
| 851 |
+
|
| 852 |
+
// Sets the promote_inputs_to_common_dtype_ flag, which is false by default
|
| 853 |
+
// If true, the iterator's "common dtype" is always computed (see the
|
| 854 |
+
// [Common Dtype Computation] note) and, on the CPU, temporary copies of
|
| 855 |
+
// the inputs in the common dtype are passed as the actual inputs to
|
| 856 |
+
// the operation.
|
| 857 |
+
// Setting this flag to true sets check_all_same_dtype_ to false.
|
| 858 |
+
TensorIteratorConfig& promote_inputs_to_common_dtype(
|
| 859 |
+
const bool _promote_inputs_to_common_dtype) {
|
| 860 |
+
promote_inputs_to_common_dtype_ = _promote_inputs_to_common_dtype;
|
| 861 |
+
if (_promote_inputs_to_common_dtype) {
|
| 862 |
+
check_all_same_dtype_ = false;
|
| 863 |
+
}
|
| 864 |
+
return *this;
|
| 865 |
+
}
|
| 866 |
+
|
| 867 |
+
// Sets the promote_integer_inputs_to_float_ flag, which is false by default
|
| 868 |
+
// NOTE: If set to true, the promote_inputs_to_common_dtype_ must also be
|
| 869 |
+
// true. If true, if the iterator's "common dtype" is an integral type
|
| 870 |
+
// (including bool)
|
| 871 |
+
// then it is changed to the default float scalar type.
|
| 872 |
+
TensorIteratorConfig& promote_integer_inputs_to_float(
|
| 873 |
+
const bool _promote_integer_inputs_to_float) {
|
| 874 |
+
promote_integer_inputs_to_float_ = _promote_integer_inputs_to_float;
|
| 875 |
+
TORCH_INTERNAL_ASSERT(
|
| 876 |
+
!promote_integer_inputs_to_float_ || promote_inputs_to_common_dtype_);
|
| 877 |
+
return *this;
|
| 878 |
+
}
|
| 879 |
+
|
| 880 |
+
TensorIteratorConfig& is_reduction(const bool _is_reduction) {
|
| 881 |
+
is_reduction_ = _is_reduction;
|
| 882 |
+
return *this;
|
| 883 |
+
}
|
| 884 |
+
|
| 885 |
+
TensorIteratorConfig& allow_cpu_scalars(const bool _allow_cpu_scalars) {
|
| 886 |
+
allow_cpu_scalars_ = _allow_cpu_scalars;
|
| 887 |
+
return *this;
|
| 888 |
+
}
|
| 889 |
+
|
| 890 |
+
// Sets the cast_common_dtype_to_outputs_ flag, which is false by default
|
| 891 |
+
// If true, the iterator's "common dtype" must be computatable
|
| 892 |
+
// (see the [Common Dtype Computation] note) and, on the CPU, temporary
|
| 893 |
+
// copies of the outputs are passed as the actual output to the operation.
|
| 894 |
+
// These temporaries are then copied to the original outputs after
|
| 895 |
+
// the operation is performed (see cast_outputs()).
|
| 896 |
+
// Setting this flag to true sets check_all_same_dtype_ to false.
|
| 897 |
+
TensorIteratorConfig& cast_common_dtype_to_outputs(
|
| 898 |
+
const bool _cast_common_dtype_to_outputs) {
|
| 899 |
+
cast_common_dtype_to_outputs_ = _cast_common_dtype_to_outputs;
|
| 900 |
+
if (_cast_common_dtype_to_outputs) {
|
| 901 |
+
check_all_same_dtype_ = false;
|
| 902 |
+
}
|
| 903 |
+
return *this;
|
| 904 |
+
}
|
| 905 |
+
|
| 906 |
+
TensorIteratorConfig& resize_outputs(bool resize_outputs) {
|
| 907 |
+
resize_outputs_ = resize_outputs;
|
| 908 |
+
return *this;
|
| 909 |
+
}
|
| 910 |
+
|
| 911 |
+
// Bypass output dtype/device computation and fix the dtype/device as
|
| 912 |
+
// specified here.
|
| 913 |
+
TensorIteratorConfig& declare_static_dtype_and_device(
|
| 914 |
+
ScalarType dtype,
|
| 915 |
+
Device device);
|
| 916 |
+
TensorIteratorConfig& declare_static_dtype(ScalarType dtype);
|
| 917 |
+
TensorIteratorConfig& declare_static_device(Device device);
|
| 918 |
+
TensorIteratorConfig& declare_static_shape(IntArrayRef shape);
|
| 919 |
+
TensorIteratorConfig& declare_static_shape(
|
| 920 |
+
IntArrayRef shape,
|
| 921 |
+
IntArrayRef squash_dims);
|
| 922 |
+
|
| 923 |
+
// It would be better if this was && qualified, but this would be at the cost
|
| 924 |
+
// of a lot of boilerplate above
|
| 925 |
+
TensorIterator build() {
|
| 926 |
+
TensorIterator iter;
|
| 927 |
+
iter.build(*this);
|
| 928 |
+
return iter;
|
| 929 |
+
}
|
| 930 |
+
|
| 931 |
+
private:
|
| 932 |
+
SmallVector<c10::MaybeOwned<TensorBase>, 4> tensors_;
|
| 933 |
+
int num_outputs_ = 0;
|
| 934 |
+
int num_inputs_ = 0;
|
| 935 |
+
|
| 936 |
+
c10::optional<DimVector> static_shape_ = c10::nullopt;
|
| 937 |
+
c10::optional<ScalarType> static_dtype_ = c10::nullopt;
|
| 938 |
+
c10::optional<Device> static_device_ = c10::nullopt;
|
| 939 |
+
bool check_mem_overlap_ = true;
|
| 940 |
+
bool allow_cpu_scalars_ = false;
|
| 941 |
+
bool is_reduction_ = false;
|
| 942 |
+
bool resize_outputs_ = true;
|
| 943 |
+
bool check_all_same_dtype_ = true;
|
| 944 |
+
bool check_all_same_device_ = true;
|
| 945 |
+
bool enforce_safe_casting_to_output_ = false;
|
| 946 |
+
bool enforce_linear_iteration_ = false;
|
| 947 |
+
bool promote_inputs_to_common_dtype_ = false;
|
| 948 |
+
bool promote_integer_inputs_to_float_ = false;
|
| 949 |
+
bool cast_common_dtype_to_outputs_ = false;
|
| 950 |
+
};
|
| 951 |
+
|
| 952 |
+
/// A container-like struct that acts as if it contains splits of a
|
| 953 |
+
/// TensorIterator that can use 32-bit indexing. Taken together the splits cover
|
| 954 |
+
/// the original TensorIterator.
|
| 955 |
+
struct TORCH_API SplitUntil32Bit {
|
| 956 |
+
struct TORCH_API iterator {
|
| 957 |
+
iterator() = default;
|
| 958 |
+
iterator(const TensorIteratorBase& iter);
|
| 959 |
+
iterator(iterator&&) = default;
|
| 960 |
+
|
| 961 |
+
// Guaranteed to be a TensorIterator proper!
|
| 962 |
+
TensorIterator& operator*() const;
|
| 963 |
+
iterator& operator++();
|
| 964 |
+
bool operator==(const iterator& other) const {
|
| 965 |
+
// two iterators are equal if they are the same object or they're both
|
| 966 |
+
// empty
|
| 967 |
+
return this == &other || (vec.empty() && other.vec.empty());
|
| 968 |
+
}
|
| 969 |
+
// needed for C++11 range-based for loop
|
| 970 |
+
bool operator!=(const iterator& other) const {
|
| 971 |
+
return !(*this == other);
|
| 972 |
+
}
|
| 973 |
+
|
| 974 |
+
/// stack of TensorIterators to be split
|
| 975 |
+
std::vector<std::unique_ptr<TensorIterator>> vec;
|
| 976 |
+
};
|
| 977 |
+
|
| 978 |
+
SplitUntil32Bit(const TensorIteratorBase& iter) : iter(iter) {}
|
| 979 |
+
|
| 980 |
+
iterator begin() const;
|
| 981 |
+
iterator end() const;
|
| 982 |
+
|
| 983 |
+
private:
|
| 984 |
+
const TensorIteratorBase& iter;
|
| 985 |
+
};
|
| 986 |
+
|
| 987 |
+
} // namespace at
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/TensorOptions.h
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
#include <c10/core/TensorOptions.h>
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ThreadLocalPythonObjects.h
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <c10/core/SafePyObject.h>
|
| 4 |
+
#include <c10/macros/Macros.h>
|
| 5 |
+
#include <unordered_map>
|
| 6 |
+
|
| 7 |
+
namespace at::impl {
|
| 8 |
+
|
| 9 |
+
struct TORCH_API ThreadLocalPythonObjects {
|
| 10 |
+
static void set(const std::string& key, std::shared_ptr<SafePyObject> value);
|
| 11 |
+
static const std::shared_ptr<SafePyObject>& get(const std::string& key);
|
| 12 |
+
static bool contains(const std::string& key);
|
| 13 |
+
|
| 14 |
+
static const ThreadLocalPythonObjects& get_state();
|
| 15 |
+
static void set_state(ThreadLocalPythonObjects state);
|
| 16 |
+
|
| 17 |
+
private:
|
| 18 |
+
std::unordered_map<std::string, std::shared_ptr<c10::SafePyObject>> obj_dict_;
|
| 19 |
+
};
|
| 20 |
+
|
| 21 |
+
} // namespace at::impl
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/record_function.h
ADDED
|
@@ -0,0 +1,741 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <ATen/core/ivalue.h>
|
| 4 |
+
#include <ATen/core/operator_name.h>
|
| 5 |
+
#include <c10/macros/Export.h>
|
| 6 |
+
#include <c10/util/Optional.h>
|
| 7 |
+
#include <c10/util/SmallVector.h>
|
| 8 |
+
|
| 9 |
+
#include <array>
|
| 10 |
+
#include <atomic>
|
| 11 |
+
#include <functional>
|
| 12 |
+
#include <memory>
|
| 13 |
+
#include <variant>
|
| 14 |
+
|
| 15 |
+
namespace c10 {
|
| 16 |
+
class TORCH_API OperatorHandle;
|
| 17 |
+
}
|
| 18 |
+
|
| 19 |
+
namespace at {
|
| 20 |
+
|
| 21 |
+
// Function name to record NCCL metadata
|
| 22 |
+
extern TORCH_API const std::string kParamCommsCallName;
|
| 23 |
+
|
| 24 |
+
// Kind of record function scope;
|
| 25 |
+
enum class C10_API_ENUM RecordScope : uint8_t {
|
| 26 |
+
// c10/ATen ops, autograd nodes
|
| 27 |
+
FUNCTION = 0,
|
| 28 |
+
// Functions/nodes called from the autograd
|
| 29 |
+
BACKWARD_FUNCTION,
|
| 30 |
+
// TorchScript functions, methods
|
| 31 |
+
TORCHSCRIPT_FUNCTION,
|
| 32 |
+
// Kernel Function dtype Tag
|
| 33 |
+
KERNEL_FUNCTION_DTYPE,
|
| 34 |
+
// Torchbind custom class,
|
| 35 |
+
CUSTOM_CLASS,
|
| 36 |
+
// Generic Build Feature
|
| 37 |
+
BUILD_FEATURE,
|
| 38 |
+
// Kernel Function dtype Tag
|
| 39 |
+
LITE_INTERPRETER,
|
| 40 |
+
// User defined scope (e.g. with record_function())
|
| 41 |
+
USER_SCOPE,
|
| 42 |
+
// Scopes for static runtime, a specialized TorchScript interpreter
|
| 43 |
+
STATIC_RUNTIME_OP,
|
| 44 |
+
STATIC_RUNTIME_MODEL,
|
| 45 |
+
NUM_SCOPES, // must be the last in the list
|
| 46 |
+
};
|
| 47 |
+
|
| 48 |
+
} // namespace at
|
| 49 |
+
|
| 50 |
+
namespace std {
|
| 51 |
+
template <>
|
| 52 |
+
struct hash<at::RecordScope> {
|
| 53 |
+
size_t operator()(const at::RecordScope& sc) const {
|
| 54 |
+
return static_cast<std::size_t>(sc);
|
| 55 |
+
}
|
| 56 |
+
};
|
| 57 |
+
} // namespace std
|
| 58 |
+
|
| 59 |
+
namespace at {
|
| 60 |
+
|
| 61 |
+
struct TORCH_API StringView {
|
| 62 |
+
StringView() : StringView(nullptr) {}
|
| 63 |
+
explicit StringView(const char* str_ptr)
|
| 64 |
+
: owned_str_ptr_(nullptr), str_ptr_(str_ptr) {}
|
| 65 |
+
explicit StringView(std::string str)
|
| 66 |
+
: owned_str_ptr_(std::make_shared<std::string>(std::move(str))),
|
| 67 |
+
str_ptr_(owned_str_ptr_->c_str()) {}
|
| 68 |
+
|
| 69 |
+
const char* str() const {
|
| 70 |
+
return str_ptr_;
|
| 71 |
+
}
|
| 72 |
+
|
| 73 |
+
friend std::ostream& operator<<(std::ostream& os, const StringView& dt) {
|
| 74 |
+
os << dt.str();
|
| 75 |
+
return os;
|
| 76 |
+
}
|
| 77 |
+
|
| 78 |
+
friend bool operator==(const StringView& lhs, const StringView& rhs) {
|
| 79 |
+
return strcmp(lhs.str(), rhs.str()) == 0;
|
| 80 |
+
}
|
| 81 |
+
|
| 82 |
+
friend bool operator!=(const StringView& lhs, const StringView& rhs) {
|
| 83 |
+
return !(lhs == rhs);
|
| 84 |
+
}
|
| 85 |
+
|
| 86 |
+
private:
|
| 87 |
+
std::shared_ptr<std::string> owned_str_ptr_;
|
| 88 |
+
const char* str_ptr_;
|
| 89 |
+
};
|
| 90 |
+
|
| 91 |
+
// Soft limit on the number of callbacks to use;
|
| 92 |
+
constexpr std::size_t kSoftLimitCallbacks = 4;
|
| 93 |
+
|
| 94 |
+
// An abstract base class for various observer contexts that can be attached to
|
| 95 |
+
// the RecordFunction.
|
| 96 |
+
struct ObserverContext {
|
| 97 |
+
virtual ~ObserverContext() = default;
|
| 98 |
+
|
| 99 |
+
protected:
|
| 100 |
+
ObserverContext() = default;
|
| 101 |
+
};
|
| 102 |
+
|
| 103 |
+
typedef c10::SmallVector<uint64_t, kSoftLimitCallbacks> CallbackHandles;
|
| 104 |
+
typedef c10::SmallVector<std::unique_ptr<ObserverContext>, kSoftLimitCallbacks>
|
| 105 |
+
ObserverContextList;
|
| 106 |
+
typedef uint64_t RecordFunctionHandle;
|
| 107 |
+
struct RecordFunction;
|
| 108 |
+
|
| 109 |
+
//
|
| 110 |
+
// PyTorch callbacks/observers API:
|
| 111 |
+
//
|
| 112 |
+
|
| 113 |
+
/**
|
| 114 |
+
* RecordFunctionCallback represents a pair of callbacks to be used with
|
| 115 |
+
* RecordFunction, members:
|
| 116 |
+
* start, end - the callbacks to run when entering and exiting the scope;
|
| 117 |
+
* optionally, the start callback may return an ObserverContext which will
|
| 118 |
+
* be passed to the end callback, use appropriate constructor accordingly.
|
| 119 |
+
* needs_inputs - whether the callbacks need the inputs passed from the
|
| 120 |
+
* observed function/range; NOTE: passing the inputs incurs an additional
|
| 121 |
+
* overhead; sampling_probability - if not 1.0, then the callback is
|
| 122 |
+
* probabilistically sampled to run; NOTE: start and end callbacks always run as
|
| 123 |
+
* a pair and are sampled together; scopes - types of scopes to execute the
|
| 124 |
+
* callbacks on (see RecordScope); passing empty set means the callbacks will be
|
| 125 |
+
* executed for all possible scope types should_run - optional function that
|
| 126 |
+
* returns whether this callback should run; overwrites the effect of setting
|
| 127 |
+
* sampling_probability
|
| 128 |
+
*/
|
| 129 |
+
class TORCH_API RecordFunctionCallback {
|
| 130 |
+
public:
|
| 131 |
+
using StartCallback =
|
| 132 |
+
std::unique_ptr<ObserverContext> (*)(const RecordFunction&);
|
| 133 |
+
using EndCallback = void (*)(const RecordFunction&, ObserverContext*);
|
| 134 |
+
|
| 135 |
+
// This interface supports observers that require passing an ObserverContext
|
| 136 |
+
// between start and end callbacks.
|
| 137 |
+
explicit RecordFunctionCallback(
|
| 138 |
+
StartCallback start,
|
| 139 |
+
EndCallback end = nullptr)
|
| 140 |
+
: start_(start), end_(end) {
|
| 141 |
+
scopes_.fill(true);
|
| 142 |
+
}
|
| 143 |
+
|
| 144 |
+
RecordFunctionCallback& needsInputs(bool needs_inputs) {
|
| 145 |
+
needs_inputs_ = needs_inputs;
|
| 146 |
+
return *this;
|
| 147 |
+
}
|
| 148 |
+
|
| 149 |
+
RecordFunctionCallback& needsOutputs(bool needs_outputs) {
|
| 150 |
+
needs_outputs_ = needs_outputs;
|
| 151 |
+
return *this;
|
| 152 |
+
}
|
| 153 |
+
|
| 154 |
+
RecordFunctionCallback& needsIds(bool needs_ids) {
|
| 155 |
+
needs_ids_ = needs_ids;
|
| 156 |
+
return *this;
|
| 157 |
+
}
|
| 158 |
+
|
| 159 |
+
RecordFunctionCallback& samplingProb(double sampling_prob) {
|
| 160 |
+
TORCH_CHECK(
|
| 161 |
+
sampling_prob >= 0.0 && sampling_prob <= 1.0,
|
| 162 |
+
"Invalid sampling probability");
|
| 163 |
+
sampling_prob_ = sampling_prob;
|
| 164 |
+
return *this;
|
| 165 |
+
}
|
| 166 |
+
|
| 167 |
+
RecordFunctionCallback& scopes(
|
| 168 |
+
const std::unordered_set<RecordScope, std::hash<RecordScope>>& scopes) {
|
| 169 |
+
if (!scopes.empty()) {
|
| 170 |
+
scopes_.fill(false);
|
| 171 |
+
for (auto sc : scopes) {
|
| 172 |
+
scopes_[static_cast<size_t>(sc)] = true;
|
| 173 |
+
}
|
| 174 |
+
} else {
|
| 175 |
+
scopes_.fill(true);
|
| 176 |
+
}
|
| 177 |
+
return *this;
|
| 178 |
+
}
|
| 179 |
+
|
| 180 |
+
bool needsInputs() const {
|
| 181 |
+
return needs_inputs_;
|
| 182 |
+
}
|
| 183 |
+
|
| 184 |
+
bool needsOutputs() const {
|
| 185 |
+
return needs_outputs_;
|
| 186 |
+
}
|
| 187 |
+
|
| 188 |
+
bool needsIds() const {
|
| 189 |
+
return needs_ids_;
|
| 190 |
+
}
|
| 191 |
+
|
| 192 |
+
double samplingProb() const {
|
| 193 |
+
return sampling_prob_;
|
| 194 |
+
}
|
| 195 |
+
|
| 196 |
+
bool checkScope(RecordScope sc) const {
|
| 197 |
+
return scopes_[(size_t)sc];
|
| 198 |
+
}
|
| 199 |
+
|
| 200 |
+
StartCallback start() const {
|
| 201 |
+
return start_;
|
| 202 |
+
}
|
| 203 |
+
|
| 204 |
+
EndCallback end() const {
|
| 205 |
+
return end_;
|
| 206 |
+
}
|
| 207 |
+
|
| 208 |
+
private:
|
| 209 |
+
StartCallback start_;
|
| 210 |
+
EndCallback end_;
|
| 211 |
+
double sampling_prob_ = 1.0;
|
| 212 |
+
std::array<bool, static_cast<size_t>(RecordScope::NUM_SCOPES)> scopes_ = {};
|
| 213 |
+
bool needs_inputs_ = false;
|
| 214 |
+
bool needs_outputs_ = false;
|
| 215 |
+
bool needs_ids_ = false;
|
| 216 |
+
};
|
| 217 |
+
|
| 218 |
+
// Notes:
|
| 219 |
+
// - two types of callbacks are provided: thread local and global
|
| 220 |
+
// - thread local callbacks are added/removed only for the given thread
|
| 221 |
+
// and are stored locally for each thread and separately from the list
|
| 222 |
+
// of the global callbacks
|
| 223 |
+
// - global callbacks are stored in a single per process list and are
|
| 224 |
+
// invoked by every RecordFunction, in addition to the thread local
|
| 225 |
+
// callbacks specific to the given thread
|
| 226 |
+
// - we allow the added callbacks to be sampled, by specifying a sampling
|
| 227 |
+
// probability for each callback pair, if the start callback is
|
| 228 |
+
// not picked to run, the corresponding end callback won't be called
|
| 229 |
+
// - a typical use case for the global callbacks is passive monitoring
|
| 230 |
+
// in the background (e.g. fleet-wide monitoring), without focusing on
|
| 231 |
+
// the specific piece of code
|
| 232 |
+
// - in contrast, thread local callbacks are enabled locally, on demand,
|
| 233 |
+
// for the specific piece of code (range) and are not sampled
|
| 234 |
+
// - a typical use case for thread local callbacks is profiler and code
|
| 235 |
+
// execution tracer
|
| 236 |
+
// - note, thread local callbacks are automatically propagated with
|
| 237 |
+
// ThreadLocalState across JIT continuations and async tasks (at::launch)
|
| 238 |
+
|
| 239 |
+
typedef uint64_t CallbackHandle;
|
| 240 |
+
|
| 241 |
+
constexpr CallbackHandle INVALID_CALLBACK_HANDLE{0};
|
| 242 |
+
|
| 243 |
+
// It is unnecessary to use atomic operations for enabling
|
| 244 |
+
// thread-local function callbacks. Moreover, it prevents saving to
|
| 245 |
+
// ThreadLocalState because std::atomic is non-copyable.
|
| 246 |
+
struct RecordFunctionCallbacksEntry {
|
| 247 |
+
RecordFunctionCallbacksEntry(RecordFunctionCallback&& cb, CallbackHandle h)
|
| 248 |
+
: callback_(cb), handle_(h) {}
|
| 249 |
+
|
| 250 |
+
RecordFunctionCallback callback_;
|
| 251 |
+
bool enabled_{true};
|
| 252 |
+
CallbackHandle handle_;
|
| 253 |
+
};
|
| 254 |
+
|
| 255 |
+
// Holds pairs (callbacks, unique_id)
|
| 256 |
+
using RecordFunctionCallbacks = std::vector<RecordFunctionCallbacksEntry>;
|
| 257 |
+
|
| 258 |
+
// Generated by the callback managers to determine which functions to run.
|
| 259 |
+
struct StepCallbacks {
|
| 260 |
+
StepCallbacks() = default;
|
| 261 |
+
StepCallbacks(uint64_t thread_id, RecordScope scope)
|
| 262 |
+
: thread_id_{thread_id}, scope_{scope} {}
|
| 263 |
+
|
| 264 |
+
bool empty() const {
|
| 265 |
+
return callbacks_.empty();
|
| 266 |
+
}
|
| 267 |
+
|
| 268 |
+
struct StartEndPair {
|
| 269 |
+
RecordFunctionCallback::StartCallback start_;
|
| 270 |
+
RecordFunctionCallback::EndCallback end_;
|
| 271 |
+
};
|
| 272 |
+
|
| 273 |
+
using StartEndPairs = c10::SmallVector<StartEndPair, kSoftLimitCallbacks>;
|
| 274 |
+
|
| 275 |
+
StartEndPairs callbacks_;
|
| 276 |
+
uint64_t thread_id_{0};
|
| 277 |
+
RecordScope scope_{RecordScope::FUNCTION};
|
| 278 |
+
bool needs_inputs_{false};
|
| 279 |
+
bool needs_outputs_{false};
|
| 280 |
+
bool needs_ids_{false};
|
| 281 |
+
};
|
| 282 |
+
|
| 283 |
+
struct TORCH_API RecordFunction {
|
| 284 |
+
// Default constructor is used with before function called afterwards:
|
| 285 |
+
// scope - record scope that this function tracks
|
| 286 |
+
// pre_sampled - whether this RecordFunction was already pre-sampled with
|
| 287 |
+
// kLowProb probability
|
| 288 |
+
explicit RecordFunction(RecordScope scope = RecordScope::FUNCTION);
|
| 289 |
+
explicit RecordFunction(StepCallbacks&& step_callbacks);
|
| 290 |
+
|
| 291 |
+
template <typename F>
|
| 292 |
+
void before(
|
| 293 |
+
F fn,
|
| 294 |
+
c10::ArrayRef<const c10::IValue> args,
|
| 295 |
+
int64_t current_sequence_nr = -1) {
|
| 296 |
+
if (!isActive()) {
|
| 297 |
+
return;
|
| 298 |
+
}
|
| 299 |
+
inputs_ = args;
|
| 300 |
+
before(fn, current_sequence_nr);
|
| 301 |
+
}
|
| 302 |
+
|
| 303 |
+
template <typename F>
|
| 304 |
+
void before(
|
| 305 |
+
F fn,
|
| 306 |
+
const std::vector<IValue>* args,
|
| 307 |
+
int64_t current_sequence_nr = -1) {
|
| 308 |
+
before(
|
| 309 |
+
std::move(fn),
|
| 310 |
+
c10::ArrayRef<const c10::IValue>(args->data(), args->size()),
|
| 311 |
+
current_sequence_nr);
|
| 312 |
+
}
|
| 313 |
+
|
| 314 |
+
// Destructor calls end callbacks
|
| 315 |
+
virtual ~RecordFunction();
|
| 316 |
+
|
| 317 |
+
RecordFunction(const RecordFunction&) = delete;
|
| 318 |
+
RecordFunction& operator=(const RecordFunction&) = delete;
|
| 319 |
+
|
| 320 |
+
const char* name() const;
|
| 321 |
+
|
| 322 |
+
int64_t seqNr() const {
|
| 323 |
+
return sequence_nr_;
|
| 324 |
+
}
|
| 325 |
+
|
| 326 |
+
c10::ArrayRef<const IValue> inputs() const {
|
| 327 |
+
#ifndef NDEBUG
|
| 328 |
+
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
|
| 329 |
+
inputs_valid_, "Called inputs() outside RecordFunction start callback");
|
| 330 |
+
#endif
|
| 331 |
+
return inputs_;
|
| 332 |
+
}
|
| 333 |
+
|
| 334 |
+
const std::vector<c10::IValue>& outputs() const {
|
| 335 |
+
return outputs_;
|
| 336 |
+
}
|
| 337 |
+
|
| 338 |
+
void setOutputs(std::vector<c10::IValue>&& outputs) {
|
| 339 |
+
outputs_ = std::move(outputs);
|
| 340 |
+
}
|
| 341 |
+
|
| 342 |
+
void setOutputs(c10::ArrayRef<c10::IValue> outputs) {
|
| 343 |
+
outputs_ = outputs.vec();
|
| 344 |
+
}
|
| 345 |
+
|
| 346 |
+
size_t num_inputs() const;
|
| 347 |
+
size_t num_outputs() const;
|
| 348 |
+
|
| 349 |
+
// Retrieves the thread_id that this RecordFunction ran start callbacks with.
|
| 350 |
+
// Useful for writing thread safe end callbacks that may be potentially
|
| 351 |
+
// executed in a different thread (async ops)
|
| 352 |
+
uint64_t threadId() const {
|
| 353 |
+
return step_callbacks_.thread_id_;
|
| 354 |
+
}
|
| 355 |
+
|
| 356 |
+
// For backward functions - thread id of the corresponding forward function,
|
| 357 |
+
// or zero otherwise;
|
| 358 |
+
// used alongside with sequence number to correlate backward functions with
|
| 359 |
+
// the forward ones
|
| 360 |
+
uint64_t forwardThreadId() const {
|
| 361 |
+
return fwd_thread_id_;
|
| 362 |
+
}
|
| 363 |
+
|
| 364 |
+
void setForwardThreadId(uint64_t thread_id) {
|
| 365 |
+
fwd_thread_id_ = thread_id;
|
| 366 |
+
}
|
| 367 |
+
|
| 368 |
+
RecordScope scope() const {
|
| 369 |
+
return step_callbacks_.scope_;
|
| 370 |
+
}
|
| 371 |
+
|
| 372 |
+
// Returns logical thread_id for the current thread
|
| 373 |
+
static uint64_t currentThreadId();
|
| 374 |
+
|
| 375 |
+
// Internal functions, do not use directly;
|
| 376 |
+
// used in python's context manager
|
| 377 |
+
|
| 378 |
+
// before functions initialize RecordFunction members and call
|
| 379 |
+
// start callbacks
|
| 380 |
+
using schema_ref_t = std::reference_wrapper<const c10::FunctionSchema>;
|
| 381 |
+
void before(const char* name, int64_t sequence_nr = -1);
|
| 382 |
+
void before(std::string name, int64_t sequence_nr = -1);
|
| 383 |
+
void before(schema_ref_t schema, int64_t sequence_nr = -1);
|
| 384 |
+
|
| 385 |
+
// Sets node ID for distributed profiling
|
| 386 |
+
static void setDefaultNodeId(int64_t defaultNodeId);
|
| 387 |
+
// Gets node ID for distributed profiling
|
| 388 |
+
static int64_t getDefaultNodeId();
|
| 389 |
+
|
| 390 |
+
// Calls end callbacks. After end(), accessors will no longer provide useful
|
| 391 |
+
// results.
|
| 392 |
+
void end();
|
| 393 |
+
|
| 394 |
+
// Internal-only, used only force async event for distributed events
|
| 395 |
+
// profiling.
|
| 396 |
+
void _setAsync();
|
| 397 |
+
|
| 398 |
+
// Returns whether this RecordFunction corresponds to an async event or not.
|
| 399 |
+
bool isAsync() const;
|
| 400 |
+
|
| 401 |
+
// Returns whether this RecordFunction corresponds to NCCL metadata collection
|
| 402 |
+
// or not.
|
| 403 |
+
bool isNcclMeta() const {
|
| 404 |
+
return is_nccl_meta_;
|
| 405 |
+
}
|
| 406 |
+
|
| 407 |
+
// Internal-only, used to denote out variant used for Static Runtime execution
|
| 408 |
+
void _setStaticRuntimeOutVariant();
|
| 409 |
+
bool isStaticRuntimeOutVariant() const;
|
| 410 |
+
|
| 411 |
+
RecordFunctionHandle handle() const {
|
| 412 |
+
return handle_;
|
| 413 |
+
}
|
| 414 |
+
|
| 415 |
+
c10::optional<OperatorName> operator_name() const;
|
| 416 |
+
|
| 417 |
+
// This method returns a copy of the FunctionSchema and can be expensive.
|
| 418 |
+
c10::optional<FunctionSchema> operator_schema() const;
|
| 419 |
+
|
| 420 |
+
void setHandle(RecordFunctionHandle handle) {
|
| 421 |
+
handle_ = handle;
|
| 422 |
+
}
|
| 423 |
+
|
| 424 |
+
// Whether this RecordFunction runs any callbacks.
|
| 425 |
+
bool isActive() const {
|
| 426 |
+
return !step_callbacks_.empty();
|
| 427 |
+
}
|
| 428 |
+
|
| 429 |
+
bool needsInputs() const {
|
| 430 |
+
return step_callbacks_.needs_inputs_;
|
| 431 |
+
}
|
| 432 |
+
|
| 433 |
+
bool needsOutputs() const {
|
| 434 |
+
return step_callbacks_.needs_outputs_;
|
| 435 |
+
}
|
| 436 |
+
|
| 437 |
+
int64_t debugHandle() const {
|
| 438 |
+
return debug_handle_;
|
| 439 |
+
}
|
| 440 |
+
|
| 441 |
+
void setDebugHandle(int64_t debug_handle) {
|
| 442 |
+
debug_handle_ = debug_handle;
|
| 443 |
+
}
|
| 444 |
+
|
| 445 |
+
void invalidateInputs() {
|
| 446 |
+
#ifndef NDEBUG
|
| 447 |
+
inputs_valid_ = false;
|
| 448 |
+
#endif
|
| 449 |
+
}
|
| 450 |
+
|
| 451 |
+
private:
|
| 452 |
+
void runStartCallbacks();
|
| 453 |
+
|
| 454 |
+
StepCallbacks step_callbacks_;
|
| 455 |
+
|
| 456 |
+
// In cases when RecordFunction might be active but we chose not to
|
| 457 |
+
// use the observers (e.g. operator is not observed), this boolean
|
| 458 |
+
// flag is used to check whether the start callbacks were called
|
| 459 |
+
bool called_start_callbacks_ = false;
|
| 460 |
+
|
| 461 |
+
#ifndef NDEBUG
|
| 462 |
+
bool inputs_valid_ = false;
|
| 463 |
+
#endif
|
| 464 |
+
|
| 465 |
+
// Stores various ObserverContext objects with event metadata for callbacks.
|
| 466 |
+
ObserverContextList ctx_;
|
| 467 |
+
|
| 468 |
+
std::variant<std::string, schema_ref_t> fn_;
|
| 469 |
+
|
| 470 |
+
int64_t sequence_nr_ = -1;
|
| 471 |
+
c10::ArrayRef<const IValue> inputs_;
|
| 472 |
+
std::vector<c10::IValue> outputs_;
|
| 473 |
+
|
| 474 |
+
// For backward functions - thread id of the forward function
|
| 475 |
+
uint64_t fwd_thread_id_ = 0;
|
| 476 |
+
|
| 477 |
+
// Unique id for this RecordFunction, used in callbacks to track start
|
| 478 |
+
// and end of ranges
|
| 479 |
+
RecordFunctionHandle handle_{0};
|
| 480 |
+
|
| 481 |
+
// Whether this record_function corresponds to an async event or not. Async
|
| 482 |
+
// events can complete in different threads or follow a future-like pattern
|
| 483 |
+
// of use.
|
| 484 |
+
bool is_async_{false};
|
| 485 |
+
|
| 486 |
+
// Debug handles are used for lazy annotation of module hierarchy
|
| 487 |
+
// and callstack.
|
| 488 |
+
// This is specifically is useful for mobile runtime, where generated
|
| 489 |
+
// debug handles can be lazily symbolicated using debug information
|
| 490 |
+
int64_t debug_handle_{-1};
|
| 491 |
+
|
| 492 |
+
// Whether this RecordFunction is used for an out variant run with
|
| 493 |
+
// Static Runtime
|
| 494 |
+
bool is_static_runtime_out_variant_{false};
|
| 495 |
+
|
| 496 |
+
// Whether this RecordFunction is used for NCCL metadata collection
|
| 497 |
+
bool is_nccl_meta_{false};
|
| 498 |
+
};
|
| 499 |
+
|
| 500 |
+
TORCH_API StepCallbacks getStepCallbacks(RecordScope scope);
|
| 501 |
+
|
| 502 |
+
TORCH_API c10::optional<StepCallbacks> getStepCallbacksUnlessEmpty(
|
| 503 |
+
RecordScope scope);
|
| 504 |
+
|
| 505 |
+
namespace detail {
|
| 506 |
+
template <typename Inputs, typename F, typename... Args>
|
| 507 |
+
void record_function_with_scope(
|
| 508 |
+
RecordFunction& guard,
|
| 509 |
+
F fn,
|
| 510 |
+
const Inputs& inputs,
|
| 511 |
+
Args&&... args) {
|
| 512 |
+
if (guard.needsInputs()) {
|
| 513 |
+
guard.before(
|
| 514 |
+
fn,
|
| 515 |
+
c10::ArrayRef<const c10::IValue>(inputs.data(), inputs.size()),
|
| 516 |
+
std::forward<Args>(args)...);
|
| 517 |
+
} else {
|
| 518 |
+
guard.before(fn, std::forward<Args>(args)...);
|
| 519 |
+
}
|
| 520 |
+
}
|
| 521 |
+
|
| 522 |
+
template <typename Inputs, typename F, typename... Args>
|
| 523 |
+
void record_function_with_scope_and_debug_handle(
|
| 524 |
+
RecordFunction& guard,
|
| 525 |
+
F fn,
|
| 526 |
+
int64_t debug_handle,
|
| 527 |
+
const Inputs& inputs,
|
| 528 |
+
Args&&... args) {
|
| 529 |
+
guard.setDebugHandle(debug_handle);
|
| 530 |
+
if (guard.needsInputs()) {
|
| 531 |
+
guard.before(
|
| 532 |
+
fn,
|
| 533 |
+
c10::ArrayRef<const c10::IValue>(inputs.data(), inputs.size()),
|
| 534 |
+
std::forward<Args>(args)...);
|
| 535 |
+
} else {
|
| 536 |
+
guard.before(fn, std::forward<Args>(args)...);
|
| 537 |
+
}
|
| 538 |
+
}
|
| 539 |
+
|
| 540 |
+
template <typename F, typename... Args>
|
| 541 |
+
void record_function_with_scope(
|
| 542 |
+
RecordFunction& guard,
|
| 543 |
+
F fn,
|
| 544 |
+
c10::ArrayRef<const c10::IValue> inputs,
|
| 545 |
+
Args&&... args) {
|
| 546 |
+
return record_function_with_scope<
|
| 547 |
+
c10::ArrayRef<const c10::IValue>,
|
| 548 |
+
F,
|
| 549 |
+
Args...>(guard, std::move(fn), inputs, std::forward<Args>(args)...);
|
| 550 |
+
}
|
| 551 |
+
|
| 552 |
+
template <typename F, typename... Args>
|
| 553 |
+
void record_function_with_scope_and_debug_handle(
|
| 554 |
+
RecordFunction& guard,
|
| 555 |
+
F fn,
|
| 556 |
+
int64_t debug_handle,
|
| 557 |
+
c10::ArrayRef<const c10::IValue> inputs,
|
| 558 |
+
Args&&... args) {
|
| 559 |
+
return record_function_with_scope_and_debug_handle<
|
| 560 |
+
c10::ArrayRef<const c10::IValue>,
|
| 561 |
+
F,
|
| 562 |
+
Args...>(
|
| 563 |
+
guard, std::move(fn), debug_handle, inputs, std::forward<Args>(args)...);
|
| 564 |
+
}
|
| 565 |
+
|
| 566 |
+
} // namespace detail
|
| 567 |
+
|
| 568 |
+
// optional argument - function's seq_no
|
| 569 |
+
#define RECORD_FUNCTION_WITH_SCOPE(scope, fn, inputs, ...) \
|
| 570 |
+
at::RecordFunction guard(scope); \
|
| 571 |
+
if (guard.isActive()) { \
|
| 572 |
+
::at::detail::record_function_with_scope( \
|
| 573 |
+
guard, fn, inputs, ##__VA_ARGS__); \
|
| 574 |
+
}
|
| 575 |
+
|
| 576 |
+
#define RECORD_FUNCTION_WITH_SCOPE_INPUTS_OUTPUTS( \
|
| 577 |
+
scope, fn, inputs, outputs, ...) \
|
| 578 |
+
at::RecordFunction guard(scope); \
|
| 579 |
+
if (guard.isActive()) { \
|
| 580 |
+
if (guard.needsInputs()) { \
|
| 581 |
+
guard.before(fn, inputs, ##__VA_ARGS__); \
|
| 582 |
+
} else { \
|
| 583 |
+
guard.before(fn, ##__VA_ARGS__); \
|
| 584 |
+
} \
|
| 585 |
+
if (guard.needsOutputs()) { \
|
| 586 |
+
guard.setOutputs(outputs); \
|
| 587 |
+
} \
|
| 588 |
+
}
|
| 589 |
+
|
| 590 |
+
#define RECORD_FUNCTION(fn, inputs, ...) \
|
| 591 |
+
RECORD_FUNCTION_WITH_SCOPE( \
|
| 592 |
+
at::RecordScope::FUNCTION, fn, inputs, ##__VA_ARGS__)
|
| 593 |
+
|
| 594 |
+
#define RECORD_TORCHSCRIPT_FUNCTION(mn, inputs) \
|
| 595 |
+
RECORD_FUNCTION_WITH_SCOPE(at::RecordScope::TORCHSCRIPT_FUNCTION, mn, inputs)
|
| 596 |
+
|
| 597 |
+
#define RECORD_FUNCTION_WITH_INPUTS_OUTPUTS(fn, inputs, outputs, ...) \
|
| 598 |
+
RECORD_FUNCTION_WITH_SCOPE_INPUTS_OUTPUTS( \
|
| 599 |
+
at::RecordScope::FUNCTION, fn, inputs, outputs, ##__VA_ARGS__)
|
| 600 |
+
|
| 601 |
+
// Custom user scopes in C++; similar to Python's 'with record_function("..."):'
|
| 602 |
+
#define RECORD_USER_SCOPE(fn) \
|
| 603 |
+
RECORD_FUNCTION_WITH_SCOPE( \
|
| 604 |
+
at::RecordScope::USER_SCOPE, fn, c10::ArrayRef<const c10::IValue>{})
|
| 605 |
+
|
| 606 |
+
// RECORD_USER_SCOPE with inputs
|
| 607 |
+
#define RECORD_USER_SCOPE_WITH_INPUTS(fn, inputs) \
|
| 608 |
+
RECORD_FUNCTION_WITH_SCOPE(at::RecordScope::USER_SCOPE, fn, inputs)
|
| 609 |
+
|
| 610 |
+
// Helper macro to pass in debug handle that is used to
|
| 611 |
+
// post process events
|
| 612 |
+
#define RECORD_WITH_SCOPE_DEBUG_HANDLE_AND_INPUTS( \
|
| 613 |
+
scope, fn, debug_handle, inputs, ...) \
|
| 614 |
+
at::RecordFunction guard(scope); \
|
| 615 |
+
if (guard.isActive()) { \
|
| 616 |
+
::at::detail::record_function_with_scope_and_debug_handle( \
|
| 617 |
+
guard, fn, debug_handle, inputs, ##__VA_ARGS__); \
|
| 618 |
+
}
|
| 619 |
+
|
| 620 |
+
// Helper macros to record LITE INTERPETER scope events with debug handles
|
| 621 |
+
#define RECORD_EDGE_SCOPE_WITH_DEBUG_HANDLE_AND_INPUTS( \
|
| 622 |
+
fn, debug_handle, inputs) \
|
| 623 |
+
RECORD_WITH_SCOPE_DEBUG_HANDLE_AND_INPUTS( \
|
| 624 |
+
at::RecordScope::LITE_INTERPRETER, fn, debug_handle, inputs)
|
| 625 |
+
|
| 626 |
+
// Bookend to the RECORD_FUNCTION macros. Use this after the kernel
|
| 627 |
+
// launch to let the profiler bind the outputs to the op that produced
|
| 628 |
+
// them. Note that guard is declared by RECORD_FUNCTION so this macro
|
| 629 |
+
// needs to be called from the same scope as RECORD_FUNCTION
|
| 630 |
+
#define RECORD_OUTPUTS(outputs) \
|
| 631 |
+
if (guard.needsOutputs()) { \
|
| 632 |
+
guard.setOutputs( \
|
| 633 |
+
std::vector<c10::IValue>(outputs.begin(), outputs.end())); \
|
| 634 |
+
}
|
| 635 |
+
|
| 636 |
+
/**
|
| 637 |
+
* addThreadLocalCallback adds a thread local callback to run with
|
| 638 |
+
* RecordFunction, returns handle to use with removeThreadLocalCallback
|
| 639 |
+
*/
|
| 640 |
+
TORCH_API CallbackHandle addThreadLocalCallback(RecordFunctionCallback cb);
|
| 641 |
+
|
| 642 |
+
/**
|
| 643 |
+
* hasThreadLocalCallbacks returns whether there're callbacks registered
|
| 644 |
+
* with addThreadLocalCallback
|
| 645 |
+
*/
|
| 646 |
+
TORCH_API bool hasThreadLocalCallbacks();
|
| 647 |
+
|
| 648 |
+
/**
|
| 649 |
+
* clearThreadLocalCallbacks removes all thread local callbacks
|
| 650 |
+
*/
|
| 651 |
+
TORCH_API void clearThreadLocalCallbacks();
|
| 652 |
+
|
| 653 |
+
/**
|
| 654 |
+
* addGlobalCallback adds a global callback to run with RecordFunction:
|
| 655 |
+
*
|
| 656 |
+
* only during the program initialization
|
| 657 |
+
*/
|
| 658 |
+
TORCH_API CallbackHandle addGlobalCallback(RecordFunctionCallback cb);
|
| 659 |
+
|
| 660 |
+
/**
|
| 661 |
+
* removeCallback removes a callback given the handle returned by
|
| 662 |
+
* addThreadLocalCallback or addGlobalCallback;
|
| 663 |
+
*
|
| 664 |
+
* no other code can run simultaneously
|
| 665 |
+
*/
|
| 666 |
+
TORCH_API void removeCallback(CallbackHandle handle);
|
| 667 |
+
|
| 668 |
+
/**
|
| 669 |
+
* Prevent the given callback from executing. If handle is invalid,
|
| 670 |
+
* does nothing.
|
| 671 |
+
*/
|
| 672 |
+
TORCH_API void disableCallback(CallbackHandle handle);
|
| 673 |
+
|
| 674 |
+
/**
|
| 675 |
+
* Allow the given callback, previously disabled with disableCallback, to
|
| 676 |
+
* execute again. If handle is invalid, does nothing.
|
| 677 |
+
*/
|
| 678 |
+
TORCH_API void reenableCallback(CallbackHandle handle);
|
| 679 |
+
|
| 680 |
+
/**
|
| 681 |
+
* hasGlobalCallbacks returns whether there're global callbacks
|
| 682 |
+
* registered with pushGlobalCallback
|
| 683 |
+
*/
|
| 684 |
+
TORCH_API bool hasGlobalCallbacks();
|
| 685 |
+
|
| 686 |
+
/**
|
| 687 |
+
* clearGlobalCallbacks removes all global callbacks
|
| 688 |
+
*/
|
| 689 |
+
TORCH_API void clearGlobalCallbacks();
|
| 690 |
+
|
| 691 |
+
// for both thread local and global callbacks
|
| 692 |
+
TORCH_API bool hasCallbacks();
|
| 693 |
+
TORCH_API void clearCallbacks();
|
| 694 |
+
|
| 695 |
+
/**
|
| 696 |
+
* enableRecordFunction enables RecordFunction thread locally
|
| 697 |
+
*/
|
| 698 |
+
TORCH_API void enableRecordFunction(bool enable = true);
|
| 699 |
+
|
| 700 |
+
/**
|
| 701 |
+
* isRecordFunctionEnabled returns whether RecordFunction
|
| 702 |
+
* is enabled thread locally
|
| 703 |
+
*/
|
| 704 |
+
TORCH_API bool isRecordFunctionEnabled();
|
| 705 |
+
|
| 706 |
+
class TORCH_API RecordFunctionGuard {
|
| 707 |
+
public:
|
| 708 |
+
explicit RecordFunctionGuard(bool is_enabled = true)
|
| 709 |
+
: prev_value_(isRecordFunctionEnabled()) {
|
| 710 |
+
enableRecordFunction(is_enabled);
|
| 711 |
+
}
|
| 712 |
+
|
| 713 |
+
virtual ~RecordFunctionGuard() {
|
| 714 |
+
enableRecordFunction(prev_value_);
|
| 715 |
+
}
|
| 716 |
+
|
| 717 |
+
private:
|
| 718 |
+
bool prev_value_ = false;
|
| 719 |
+
};
|
| 720 |
+
|
| 721 |
+
class TORCH_API DisableRecordFunctionGuard : public RecordFunctionGuard {
|
| 722 |
+
public:
|
| 723 |
+
DisableRecordFunctionGuard() : RecordFunctionGuard(false) {}
|
| 724 |
+
~DisableRecordFunctionGuard() override = default;
|
| 725 |
+
};
|
| 726 |
+
|
| 727 |
+
struct TORCH_API RecordFunctionTLS {
|
| 728 |
+
// Thread local vector of callbacks, holds pairs (callbacks, unique_id);
|
| 729 |
+
// must be sorted in increasing handles order
|
| 730 |
+
RecordFunctionCallbacks sorted_tls_callbacks_;
|
| 731 |
+
|
| 732 |
+
bool tls_record_function_enabled_ = true;
|
| 733 |
+
};
|
| 734 |
+
|
| 735 |
+
TORCH_API const RecordFunctionTLS& get_record_function_tls_();
|
| 736 |
+
|
| 737 |
+
TORCH_API void set_record_function_tls_(const RecordFunctionTLS& tls);
|
| 738 |
+
|
| 739 |
+
TORCH_API void set_record_function_seed_for_testing(uint32_t seed);
|
| 740 |
+
|
| 741 |
+
} // namespace at
|
videollama2/lib/python3.10/site-packages/torch/include/c10/cuda/CUDAAlgorithm.h
ADDED
|
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#ifdef THRUST_DEVICE_LOWER_BOUND_WORKS
|
| 2 |
+
#include <thrust/binary_search.h>
|
| 3 |
+
#include <thrust/device_vector.h>
|
| 4 |
+
#include <thrust/execution_policy.h>
|
| 5 |
+
#include <thrust/functional.h>
|
| 6 |
+
#endif
|
| 7 |
+
namespace c10 {
|
| 8 |
+
namespace cuda {
|
| 9 |
+
#ifdef THRUST_DEVICE_LOWER_BOUND_WORKS
|
| 10 |
+
template <typename Iter, typename Scalar>
|
| 11 |
+
__forceinline__ __device__ Iter
|
| 12 |
+
lower_bound(Iter start, Iter end, Scalar value) {
|
| 13 |
+
return thrust::lower_bound(thrust::device, start, end, value);
|
| 14 |
+
}
|
| 15 |
+
#else
|
| 16 |
+
// thrust::lower_bound is broken on device, see
|
| 17 |
+
// https://github.com/NVIDIA/thrust/issues/1734 Implementation inspired by
|
| 18 |
+
// https://github.com/pytorch/pytorch/blob/805120ab572efef66425c9f595d9c6c464383336/aten/src/ATen/native/cuda/Bucketization.cu#L28
|
| 19 |
+
template <typename Iter, typename Scalar>
|
| 20 |
+
__device__ Iter lower_bound(Iter start, Iter end, Scalar value) {
|
| 21 |
+
while (start < end) {
|
| 22 |
+
auto mid = start + ((end - start) >> 1);
|
| 23 |
+
if (*mid < value) {
|
| 24 |
+
start = mid + 1;
|
| 25 |
+
} else {
|
| 26 |
+
end = mid;
|
| 27 |
+
}
|
| 28 |
+
}
|
| 29 |
+
return end;
|
| 30 |
+
}
|
| 31 |
+
#endif // THRUST_DEVICE_LOWER_BOUND_WORKS
|
| 32 |
+
} // namespace cuda
|
| 33 |
+
} // namespace c10
|
videollama2/lib/python3.10/site-packages/torch/include/c10/cuda/CUDAAllocatorConfig.h
ADDED
|
@@ -0,0 +1,116 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <c10/cuda/CUDACachingAllocator.h>
|
| 4 |
+
#include <c10/cuda/CUDAException.h>
|
| 5 |
+
#include <c10/cuda/CUDAMacros.h>
|
| 6 |
+
#include <c10/util/Exception.h>
|
| 7 |
+
#include <c10/util/llvmMathExtras.h>
|
| 8 |
+
#include <cuda_runtime_api.h>
|
| 9 |
+
|
| 10 |
+
#include <atomic>
|
| 11 |
+
#include <vector>
|
| 12 |
+
|
| 13 |
+
namespace c10 {
|
| 14 |
+
namespace cuda {
|
| 15 |
+
namespace CUDACachingAllocator {
|
| 16 |
+
|
| 17 |
+
// Environment config parser
|
| 18 |
+
class C10_CUDA_API CUDAAllocatorConfig {
|
| 19 |
+
public:
|
| 20 |
+
static size_t max_split_size() {
|
| 21 |
+
return instance().m_max_split_size;
|
| 22 |
+
}
|
| 23 |
+
static double garbage_collection_threshold() {
|
| 24 |
+
return instance().m_garbage_collection_threshold;
|
| 25 |
+
}
|
| 26 |
+
|
| 27 |
+
static bool expandable_segments() {
|
| 28 |
+
#ifndef PYTORCH_C10_DRIVER_API_SUPPORTED
|
| 29 |
+
if (instance().m_expandable_segments) {
|
| 30 |
+
TORCH_WARN_ONCE("expandable_segments not supported on this platform")
|
| 31 |
+
}
|
| 32 |
+
return false;
|
| 33 |
+
#else
|
| 34 |
+
return instance().m_expandable_segments;
|
| 35 |
+
#endif
|
| 36 |
+
}
|
| 37 |
+
|
| 38 |
+
static bool release_lock_on_cudamalloc() {
|
| 39 |
+
return instance().m_release_lock_on_cudamalloc;
|
| 40 |
+
}
|
| 41 |
+
|
| 42 |
+
/** Pinned memory allocator settings */
|
| 43 |
+
static bool pinned_use_cuda_host_register() {
|
| 44 |
+
return instance().m_pinned_use_cuda_host_register;
|
| 45 |
+
}
|
| 46 |
+
|
| 47 |
+
static size_t pinned_num_register_threads() {
|
| 48 |
+
return instance().m_pinned_num_register_threads;
|
| 49 |
+
}
|
| 50 |
+
|
| 51 |
+
static size_t pinned_max_register_threads() {
|
| 52 |
+
// Based on the benchmark results, we see better allocation performance
|
| 53 |
+
// with 8 threads. However on future systems, we may need more threads
|
| 54 |
+
// and limiting this to 128 threads.
|
| 55 |
+
return 128;
|
| 56 |
+
}
|
| 57 |
+
|
| 58 |
+
// This is used to round-up allocation size to nearest power of 2 divisions.
|
| 59 |
+
// More description below in function roundup_power2_next_division
|
| 60 |
+
// As ane example, if we want 4 divisions between 2's power, this can be done
|
| 61 |
+
// using env variable: PYTORCH_CUDA_ALLOC_CONF=roundup_power2_divisions:4
|
| 62 |
+
static size_t roundup_power2_divisions(size_t size);
|
| 63 |
+
|
| 64 |
+
static CUDAAllocatorConfig& instance() {
|
| 65 |
+
static CUDAAllocatorConfig* s_instance = ([]() {
|
| 66 |
+
auto inst = new CUDAAllocatorConfig();
|
| 67 |
+
const char* env = getenv("PYTORCH_CUDA_ALLOC_CONF");
|
| 68 |
+
inst->parseArgs(env);
|
| 69 |
+
return inst;
|
| 70 |
+
})();
|
| 71 |
+
return *s_instance;
|
| 72 |
+
}
|
| 73 |
+
|
| 74 |
+
void parseArgs(const char* env);
|
| 75 |
+
|
| 76 |
+
private:
|
| 77 |
+
CUDAAllocatorConfig();
|
| 78 |
+
|
| 79 |
+
void lexArgs(const char* env, std::vector<std::string>& config);
|
| 80 |
+
void consumeToken(
|
| 81 |
+
const std::vector<std::string>& config,
|
| 82 |
+
size_t i,
|
| 83 |
+
const char c);
|
| 84 |
+
size_t parseMaxSplitSize(const std::vector<std::string>& config, size_t i);
|
| 85 |
+
size_t parseGarbageCollectionThreshold(
|
| 86 |
+
const std::vector<std::string>& config,
|
| 87 |
+
size_t i);
|
| 88 |
+
size_t parseRoundUpPower2Divisions(
|
| 89 |
+
const std::vector<std::string>& config,
|
| 90 |
+
size_t i);
|
| 91 |
+
size_t parseAllocatorConfig(
|
| 92 |
+
const std::vector<std::string>& config,
|
| 93 |
+
size_t i,
|
| 94 |
+
bool& used_cudaMallocAsync);
|
| 95 |
+
size_t parsePinnedUseCudaHostRegister(
|
| 96 |
+
const std::vector<std::string>& config,
|
| 97 |
+
size_t i);
|
| 98 |
+
size_t parsePinnedNumRegisterThreads(
|
| 99 |
+
const std::vector<std::string>& config,
|
| 100 |
+
size_t i);
|
| 101 |
+
|
| 102 |
+
std::atomic<size_t> m_max_split_size;
|
| 103 |
+
std::vector<size_t> m_roundup_power2_divisions;
|
| 104 |
+
std::atomic<double> m_garbage_collection_threshold;
|
| 105 |
+
std::atomic<size_t> m_pinned_num_register_threads;
|
| 106 |
+
std::atomic<bool> m_expandable_segments;
|
| 107 |
+
std::atomic<bool> m_release_lock_on_cudamalloc;
|
| 108 |
+
std::atomic<bool> m_pinned_use_cuda_host_register;
|
| 109 |
+
};
|
| 110 |
+
|
| 111 |
+
// General caching allocator utilities
|
| 112 |
+
C10_CUDA_API void setAllocatorSettings(const std::string& env);
|
| 113 |
+
|
| 114 |
+
} // namespace CUDACachingAllocator
|
| 115 |
+
} // namespace cuda
|
| 116 |
+
} // namespace c10
|
videollama2/lib/python3.10/site-packages/torch/include/c10/cuda/CUDACachingAllocator.h
ADDED
|
@@ -0,0 +1,450 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <c10/core/Allocator.h>
|
| 4 |
+
#include <c10/core/StorageImpl.h>
|
| 5 |
+
#include <c10/cuda/CUDAGraphsC10Utils.h>
|
| 6 |
+
#include <c10/cuda/CUDAMacros.h>
|
| 7 |
+
#include <c10/cuda/CUDAStream.h>
|
| 8 |
+
#include <c10/util/ApproximateClock.h>
|
| 9 |
+
#include <c10/util/Registry.h>
|
| 10 |
+
|
| 11 |
+
#include <array>
|
| 12 |
+
#include <mutex>
|
| 13 |
+
#include <set>
|
| 14 |
+
#include <unordered_set>
|
| 15 |
+
|
| 16 |
+
namespace c10 {
|
| 17 |
+
|
| 18 |
+
// Caching allocator will execute every registered callback if it unable to find
|
| 19 |
+
// block inside of already allocated area.
|
| 20 |
+
class C10_CUDA_API FreeMemoryCallback {
|
| 21 |
+
public:
|
| 22 |
+
virtual ~FreeMemoryCallback() = default;
|
| 23 |
+
virtual bool Execute() = 0;
|
| 24 |
+
};
|
| 25 |
+
|
| 26 |
+
C10_DECLARE_REGISTRY(FreeCudaMemoryCallbacksRegistry, FreeMemoryCallback);
|
| 27 |
+
#define REGISTER_FREE_MEMORY_CALLBACK(name, ...) \
|
| 28 |
+
C10_REGISTER_CLASS(FreeCudaMemoryCallbacksRegistry, name, __VA_ARGS__);
|
| 29 |
+
|
| 30 |
+
namespace cuda {
|
| 31 |
+
|
| 32 |
+
// TODO: Turn this into an honest to goodness class. I briefly attempted to do
|
| 33 |
+
// this, but it was a bit irritating to figure out how to also correctly
|
| 34 |
+
// apply pimpl pattern so I didn't have to leak any internal implementation
|
| 35 |
+
// details in the header (CUDACachingAllocator could be made a pimpl, but
|
| 36 |
+
// you also need to appropriately define a class which is a subclass
|
| 37 |
+
// of Allocator. Not impossible, but required a bit more surgery than
|
| 38 |
+
// I wanted to do at the time.)
|
| 39 |
+
//
|
| 40 |
+
// Why is this using a namespace rather than old-style THCCachingAllocator_
|
| 41 |
+
// prefix? Mostly because it made the HIPify rules easier to write; _ is
|
| 42 |
+
// not counted as a word boundary, so you would otherwise have to list each
|
| 43 |
+
// of these functions.
|
| 44 |
+
|
| 45 |
+
namespace CUDACachingAllocator {
|
| 46 |
+
|
| 47 |
+
extern const size_t kLargeBuffer;
|
| 48 |
+
|
| 49 |
+
struct Stat {
|
| 50 |
+
int64_t current = 0;
|
| 51 |
+
int64_t peak = 0;
|
| 52 |
+
int64_t allocated = 0;
|
| 53 |
+
int64_t freed = 0;
|
| 54 |
+
};
|
| 55 |
+
|
| 56 |
+
enum struct StatType : uint64_t {
|
| 57 |
+
AGGREGATE = 0,
|
| 58 |
+
SMALL_POOL = 1,
|
| 59 |
+
LARGE_POOL = 2,
|
| 60 |
+
NUM_TYPES = 3 // remember to update this whenever a new stat type is added
|
| 61 |
+
};
|
| 62 |
+
|
| 63 |
+
typedef std::array<Stat, static_cast<size_t>(StatType::NUM_TYPES)> StatArray;
|
| 64 |
+
|
| 65 |
+
// Struct containing memory allocator summary statistics for a device.
|
| 66 |
+
struct DeviceStats {
|
| 67 |
+
// COUNT: allocations requested by client code
|
| 68 |
+
StatArray allocation;
|
| 69 |
+
// COUNT: number of allocated segments from cudaMalloc().
|
| 70 |
+
StatArray segment;
|
| 71 |
+
// COUNT: number of active memory blocks (allocated or used by stream)
|
| 72 |
+
StatArray active;
|
| 73 |
+
// COUNT: number of inactive, split memory blocks (unallocated but can't be
|
| 74 |
+
// released via cudaFree)
|
| 75 |
+
StatArray inactive_split;
|
| 76 |
+
|
| 77 |
+
// SUM: bytes allocated by this memory alocator
|
| 78 |
+
StatArray allocated_bytes;
|
| 79 |
+
// SUM: bytes reserved by this memory allocator (both free and used)
|
| 80 |
+
StatArray reserved_bytes;
|
| 81 |
+
// SUM: bytes within active memory blocks
|
| 82 |
+
StatArray active_bytes;
|
| 83 |
+
// SUM: bytes within inactive, split memory blocks
|
| 84 |
+
StatArray inactive_split_bytes;
|
| 85 |
+
// SUM: bytes requested by client code
|
| 86 |
+
StatArray requested_bytes;
|
| 87 |
+
|
| 88 |
+
// COUNT: total number of failed calls to CUDA malloc necessitating cache
|
| 89 |
+
// flushes.
|
| 90 |
+
int64_t num_alloc_retries = 0;
|
| 91 |
+
|
| 92 |
+
// COUNT: total number of OOMs (i.e. failed calls to CUDA after cache flush)
|
| 93 |
+
int64_t num_ooms = 0;
|
| 94 |
+
|
| 95 |
+
// COUNT: total number of oversize blocks allocated from pool
|
| 96 |
+
Stat oversize_allocations;
|
| 97 |
+
|
| 98 |
+
// COUNT: total number of oversize blocks requiring malloc
|
| 99 |
+
Stat oversize_segments;
|
| 100 |
+
|
| 101 |
+
// SIZE: maximum block size that is allowed to be split.
|
| 102 |
+
int64_t max_split_size = 0;
|
| 103 |
+
};
|
| 104 |
+
|
| 105 |
+
typedef std::shared_ptr<GatheredContext> (*CreateContextFn)(void);
|
| 106 |
+
|
| 107 |
+
// Struct containing info of an allocation block (i.e. a fractional part of a
|
| 108 |
+
// cudaMalloc)..
|
| 109 |
+
struct BlockInfo {
|
| 110 |
+
int64_t size = 0;
|
| 111 |
+
int64_t requested_size = 0;
|
| 112 |
+
int32_t gc_counter = 0;
|
| 113 |
+
bool allocated = false;
|
| 114 |
+
bool active = false;
|
| 115 |
+
std::shared_ptr<GatheredContext>
|
| 116 |
+
context_when_allocated; // per-watcher context
|
| 117 |
+
};
|
| 118 |
+
|
| 119 |
+
// Struct containing info of a memory segment (i.e. one contiguous cudaMalloc).
|
| 120 |
+
struct SegmentInfo {
|
| 121 |
+
int64_t device = 0;
|
| 122 |
+
int64_t address = 0;
|
| 123 |
+
int64_t total_size = 0;
|
| 124 |
+
int64_t requested_size = 0; // unrounded, actually requested size
|
| 125 |
+
int64_t allocated_size = 0;
|
| 126 |
+
int64_t active_size = 0;
|
| 127 |
+
cudaStream_t stream = 0;
|
| 128 |
+
bool is_large = false;
|
| 129 |
+
bool is_expandable = false;
|
| 130 |
+
MempoolId_t owner_private_pool_id = {0, 0};
|
| 131 |
+
std::vector<BlockInfo> blocks;
|
| 132 |
+
std::shared_ptr<GatheredContext> context_when_allocated;
|
| 133 |
+
};
|
| 134 |
+
|
| 135 |
+
struct AllocatorState {
|
| 136 |
+
virtual ~AllocatorState() = default;
|
| 137 |
+
};
|
| 138 |
+
|
| 139 |
+
union trace_time_ {
|
| 140 |
+
time_t t_;
|
| 141 |
+
approx_time_t approx_t_;
|
| 142 |
+
};
|
| 143 |
+
|
| 144 |
+
struct TraceEntry {
|
| 145 |
+
enum Action {
|
| 146 |
+
ALLOC, // API made to the caching allocator for new memory
|
| 147 |
+
FREE_REQUESTED, // API call made to the caching allocator to free memory
|
| 148 |
+
FREE_COMPLETED, // The allocator might have to delay a free because
|
| 149 |
+
// it is still in use on another stream via record_stream
|
| 150 |
+
// This event is generated when a free actually completes.
|
| 151 |
+
SEGMENT_ALLOC, // a call to cudaMalloc to get more memory from the OS
|
| 152 |
+
SEGMENT_FREE, // a call to cudaFree to return memory to the OS (e.g. to
|
| 153 |
+
// defragment or empty_caches)
|
| 154 |
+
SEGMENT_MAP, // a call to cuMemMap (used with expandable_segments)
|
| 155 |
+
SEGMENT_UNMAP, // unmap part of a segment (used with expandable segments)
|
| 156 |
+
SNAPSHOT, // a call to snapshot, used to correlate memory snapshots to trace
|
| 157 |
+
// events
|
| 158 |
+
OOM // the allocator threw an OutOfMemoryError (addr_ is the amount of free
|
| 159 |
+
// bytes reported by cuda)
|
| 160 |
+
};
|
| 161 |
+
TraceEntry(
|
| 162 |
+
Action action,
|
| 163 |
+
int device,
|
| 164 |
+
int64_t addr,
|
| 165 |
+
size_t size,
|
| 166 |
+
cudaStream_t stream,
|
| 167 |
+
approx_time_t time,
|
| 168 |
+
std::shared_ptr<GatheredContext> context = nullptr)
|
| 169 |
+
: action_(action),
|
| 170 |
+
device_(device),
|
| 171 |
+
addr_(addr),
|
| 172 |
+
context_(std::move(context)),
|
| 173 |
+
stream_(stream),
|
| 174 |
+
size_(size) {
|
| 175 |
+
time_.approx_t_ = time;
|
| 176 |
+
}
|
| 177 |
+
Action action_;
|
| 178 |
+
int device_;
|
| 179 |
+
int64_t addr_; // for OOM, this is the amount of free bytes reported by cuda
|
| 180 |
+
std::shared_ptr<GatheredContext> context_;
|
| 181 |
+
cudaStream_t stream_;
|
| 182 |
+
int64_t size_;
|
| 183 |
+
trace_time_ time_;
|
| 184 |
+
};
|
| 185 |
+
|
| 186 |
+
struct SnapshotInfo {
|
| 187 |
+
std::vector<SegmentInfo> segments;
|
| 188 |
+
std::vector<std::vector<TraceEntry>> device_traces;
|
| 189 |
+
};
|
| 190 |
+
|
| 191 |
+
// returns the pointers freed in the pool
|
| 192 |
+
// and the pointers allocated. Note: a pointer
|
| 193 |
+
// may appear in both freed and allocated
|
| 194 |
+
struct CheckpointDelta {
|
| 195 |
+
std::vector<void*> ptrs_freed;
|
| 196 |
+
std::vector<at::DataPtr> dataptrs_allocd;
|
| 197 |
+
};
|
| 198 |
+
|
| 199 |
+
enum struct RecordContext {
|
| 200 |
+
NEVER = 0,
|
| 201 |
+
STATE = 1, // only keep stacks for active allocations
|
| 202 |
+
ALLOC = 2, // additionally keep stacks for allocations in the trace history
|
| 203 |
+
ALL = 3, // additionally record stacks for when something is freed
|
| 204 |
+
};
|
| 205 |
+
|
| 206 |
+
// Size pretty-printer
|
| 207 |
+
std::string format_size(uint64_t size);
|
| 208 |
+
|
| 209 |
+
using OutOfMemoryObserver = std::function<void(
|
| 210 |
+
int64_t device,
|
| 211 |
+
int64_t allocated,
|
| 212 |
+
int64_t device_total,
|
| 213 |
+
int64_t device_free)>;
|
| 214 |
+
|
| 215 |
+
using AllocatorTraceTracker = std::function<void(const TraceEntry&)>;
|
| 216 |
+
|
| 217 |
+
class CUDAAllocator : public Allocator {
|
| 218 |
+
public:
|
| 219 |
+
virtual void* raw_alloc(size_t nbytes) = 0;
|
| 220 |
+
virtual void* raw_alloc_with_stream(size_t nbytes, cudaStream_t stream) = 0;
|
| 221 |
+
virtual void raw_delete(void* ptr) = 0;
|
| 222 |
+
virtual void init(int device_count) = 0;
|
| 223 |
+
virtual bool initialized() = 0;
|
| 224 |
+
virtual void setMemoryFraction(double fraction, int device) = 0;
|
| 225 |
+
virtual void emptyCache() = 0;
|
| 226 |
+
virtual void cacheInfo(int dev_id, size_t* largestBlock) = 0;
|
| 227 |
+
virtual void* getBaseAllocation(void* ptr, size_t* size) = 0;
|
| 228 |
+
virtual void recordStream(const DataPtr&, CUDAStream stream) = 0;
|
| 229 |
+
virtual DeviceStats getDeviceStats(int device) = 0;
|
| 230 |
+
virtual void resetAccumulatedStats(int device) = 0;
|
| 231 |
+
virtual void resetPeakStats(int device) = 0;
|
| 232 |
+
virtual SnapshotInfo snapshot() = 0;
|
| 233 |
+
virtual void beginAllocateStreamToPool(
|
| 234 |
+
int device,
|
| 235 |
+
cudaStream_t stream,
|
| 236 |
+
MempoolId_t mempool_id) = 0;
|
| 237 |
+
virtual void endAllocateStreamToPool(int device, cudaStream_t stream) = 0;
|
| 238 |
+
virtual void releasePool(int device, MempoolId_t mempool_id) = 0;
|
| 239 |
+
// returns true if the allocated blocks are equal to expected live allocations
|
| 240 |
+
virtual bool checkPoolLiveAllocations(
|
| 241 |
+
int device,
|
| 242 |
+
MempoolId_t mempool_id,
|
| 243 |
+
const std::unordered_set<void*>& expected_live_allocations) {
|
| 244 |
+
TORCH_CHECK(
|
| 245 |
+
false,
|
| 246 |
+
name(),
|
| 247 |
+
" does not yet support checkPoolLiveAllocations. "
|
| 248 |
+
"If you need it, please file an issue describing your use case.");
|
| 249 |
+
}
|
| 250 |
+
virtual std::shared_ptr<void> getIpcDevPtr(std::string handle) = 0;
|
| 251 |
+
virtual bool isHistoryEnabled() {
|
| 252 |
+
TORCH_CHECK(
|
| 253 |
+
false,
|
| 254 |
+
name(),
|
| 255 |
+
" does not yet support recordHistory. "
|
| 256 |
+
"If you need it, please file an issue describing your use case.");
|
| 257 |
+
}
|
| 258 |
+
virtual void recordHistory(
|
| 259 |
+
bool enabled,
|
| 260 |
+
CreateContextFn context_recorder,
|
| 261 |
+
size_t alloc_trace_max_entries,
|
| 262 |
+
RecordContext when) = 0;
|
| 263 |
+
virtual void attachOutOfMemoryObserver(OutOfMemoryObserver observer) = 0;
|
| 264 |
+
|
| 265 |
+
// Attached AllocatorTraceTracker callbacks will be called while the
|
| 266 |
+
// per-device allocator lock is held. Any additional locks taken from within
|
| 267 |
+
// the callback must be proven to always have the lock order that never
|
| 268 |
+
// triggers a deadlock. In particular, Python's GIL may be held when
|
| 269 |
+
// calling the allocator so it is unsafe to try to acquire the GIL in this
|
| 270 |
+
// callback.
|
| 271 |
+
virtual void attachAllocatorTraceTracker(AllocatorTraceTracker tracker) = 0;
|
| 272 |
+
|
| 273 |
+
virtual void enablePeerAccess(int dev, int dev_to_access) = 0;
|
| 274 |
+
|
| 275 |
+
// memory not allocated from cudaMalloc cannot be copied
|
| 276 |
+
// across devices using cudaMemcpyAsync if peer to peer access is disabled.
|
| 277 |
+
// instead it requires cudaMemcpyAsyncPeer
|
| 278 |
+
// with P2P Enabled, all combinations work
|
| 279 |
+
// with P2P Disabled:
|
| 280 |
+
// cudaMalloc cudaMallocAsync/cuMemMap
|
| 281 |
+
// cudaMemcpyAsyncPeer works works
|
| 282 |
+
// cudaMemcpyAsync works error
|
| 283 |
+
|
| 284 |
+
// This function performs chooses to use the Peer version of
|
| 285 |
+
// memcpy if required based on where the allocated put dst/src.
|
| 286 |
+
virtual cudaError_t memcpyAsync(
|
| 287 |
+
void* dst,
|
| 288 |
+
int dstDevice,
|
| 289 |
+
const void* src,
|
| 290 |
+
int srcDevice,
|
| 291 |
+
size_t count,
|
| 292 |
+
cudaStream_t stream,
|
| 293 |
+
bool p2p_enabled) = 0;
|
| 294 |
+
virtual std::shared_ptr<AllocatorState> getCheckpointState(
|
| 295 |
+
int device,
|
| 296 |
+
MempoolId_t id) = 0;
|
| 297 |
+
virtual CheckpointDelta setCheckpointPoolState(
|
| 298 |
+
int device,
|
| 299 |
+
std::shared_ptr<AllocatorState> pps) = 0;
|
| 300 |
+
virtual std::string name() = 0;
|
| 301 |
+
};
|
| 302 |
+
|
| 303 |
+
// Allocator object, statically initialized
|
| 304 |
+
// See BackendInitializer in CUDACachingAllocator.cpp.
|
| 305 |
+
// Atomic loads on x86 are just normal loads,
|
| 306 |
+
// (atomic stores are different), so reading this value
|
| 307 |
+
// is no different than loading a pointer.
|
| 308 |
+
C10_CUDA_API extern std::atomic<CUDAAllocator*> allocator;
|
| 309 |
+
|
| 310 |
+
inline CUDAAllocator* get() {
|
| 311 |
+
return allocator.load();
|
| 312 |
+
}
|
| 313 |
+
|
| 314 |
+
// Called directly by clients.
|
| 315 |
+
inline void* raw_alloc(size_t nbytes) {
|
| 316 |
+
return get()->raw_alloc(nbytes);
|
| 317 |
+
}
|
| 318 |
+
|
| 319 |
+
inline void* raw_alloc_with_stream(size_t nbytes, cudaStream_t stream) {
|
| 320 |
+
return get()->raw_alloc_with_stream(nbytes, stream);
|
| 321 |
+
}
|
| 322 |
+
|
| 323 |
+
inline void raw_delete(void* ptr) {
|
| 324 |
+
return get()->raw_delete(ptr);
|
| 325 |
+
}
|
| 326 |
+
|
| 327 |
+
inline void init(int device_count) {
|
| 328 |
+
return get()->init(device_count);
|
| 329 |
+
}
|
| 330 |
+
|
| 331 |
+
inline void setMemoryFraction(double fraction, int device) {
|
| 332 |
+
return get()->setMemoryFraction(fraction, device);
|
| 333 |
+
}
|
| 334 |
+
|
| 335 |
+
inline void emptyCache() {
|
| 336 |
+
return get()->emptyCache();
|
| 337 |
+
}
|
| 338 |
+
|
| 339 |
+
inline void cacheInfo(int dev_id, size_t* largestBlock) {
|
| 340 |
+
return get()->cacheInfo(dev_id, largestBlock);
|
| 341 |
+
}
|
| 342 |
+
|
| 343 |
+
inline void* getBaseAllocation(void* ptr, size_t* size) {
|
| 344 |
+
return get()->getBaseAllocation(ptr, size);
|
| 345 |
+
}
|
| 346 |
+
|
| 347 |
+
inline void recordStream(const DataPtr& dataPtr, CUDAStream stream) {
|
| 348 |
+
return get()->recordStream(dataPtr, stream);
|
| 349 |
+
}
|
| 350 |
+
|
| 351 |
+
inline DeviceStats getDeviceStats(int device) {
|
| 352 |
+
return get()->getDeviceStats(device);
|
| 353 |
+
}
|
| 354 |
+
|
| 355 |
+
inline void resetAccumulatedStats(int device) {
|
| 356 |
+
return get()->resetAccumulatedStats(device);
|
| 357 |
+
}
|
| 358 |
+
|
| 359 |
+
inline void resetPeakStats(int device) {
|
| 360 |
+
return get()->resetPeakStats(device);
|
| 361 |
+
}
|
| 362 |
+
|
| 363 |
+
inline SnapshotInfo snapshot() {
|
| 364 |
+
return get()->snapshot();
|
| 365 |
+
}
|
| 366 |
+
|
| 367 |
+
inline std::shared_ptr<AllocatorState> getCheckpointState(
|
| 368 |
+
int device,
|
| 369 |
+
MempoolId_t id) {
|
| 370 |
+
return get()->getCheckpointState(device, id);
|
| 371 |
+
}
|
| 372 |
+
|
| 373 |
+
inline CheckpointDelta setCheckpointPoolState(
|
| 374 |
+
int device,
|
| 375 |
+
std::shared_ptr<AllocatorState> pps) {
|
| 376 |
+
return get()->setCheckpointPoolState(device, pps);
|
| 377 |
+
}
|
| 378 |
+
|
| 379 |
+
// CUDAGraph interactions
|
| 380 |
+
inline void beginAllocateStreamToPool(
|
| 381 |
+
int device,
|
| 382 |
+
cudaStream_t stream,
|
| 383 |
+
MempoolId_t mempool_id) {
|
| 384 |
+
return get()->beginAllocateStreamToPool(device, stream, mempool_id);
|
| 385 |
+
}
|
| 386 |
+
|
| 387 |
+
inline void endAllocateStreamToPool(int device, cudaStream_t stream) {
|
| 388 |
+
return get()->endAllocateStreamToPool(device, stream);
|
| 389 |
+
}
|
| 390 |
+
|
| 391 |
+
inline void recordHistory(
|
| 392 |
+
bool enabled,
|
| 393 |
+
CreateContextFn context_recorder,
|
| 394 |
+
size_t alloc_trace_max_entries,
|
| 395 |
+
RecordContext when) {
|
| 396 |
+
return get()->recordHistory(
|
| 397 |
+
enabled, context_recorder, alloc_trace_max_entries, when);
|
| 398 |
+
}
|
| 399 |
+
|
| 400 |
+
inline bool isHistoryEnabled() {
|
| 401 |
+
return get()->isHistoryEnabled();
|
| 402 |
+
}
|
| 403 |
+
|
| 404 |
+
inline bool checkPoolLiveAllocations(
|
| 405 |
+
int device,
|
| 406 |
+
MempoolId_t mempool_id,
|
| 407 |
+
const std::unordered_set<void*>& expected_live_allocations) {
|
| 408 |
+
return get()->checkPoolLiveAllocations(
|
| 409 |
+
device, mempool_id, expected_live_allocations);
|
| 410 |
+
}
|
| 411 |
+
|
| 412 |
+
inline void attachOutOfMemoryObserver(OutOfMemoryObserver observer) {
|
| 413 |
+
return get()->attachOutOfMemoryObserver(observer);
|
| 414 |
+
}
|
| 415 |
+
|
| 416 |
+
inline void attachAllocatorTraceTracker(AllocatorTraceTracker tracker) {
|
| 417 |
+
return get()->attachAllocatorTraceTracker(tracker);
|
| 418 |
+
}
|
| 419 |
+
|
| 420 |
+
inline void releasePool(int device, MempoolId_t mempool_id) {
|
| 421 |
+
return get()->releasePool(device, mempool_id);
|
| 422 |
+
}
|
| 423 |
+
// Not part of CUDA_ALLOCATOR_BACKEND_INTERFACE
|
| 424 |
+
inline std::shared_ptr<void> getIpcDevPtr(std::string handle) {
|
| 425 |
+
return get()->getIpcDevPtr(handle);
|
| 426 |
+
}
|
| 427 |
+
|
| 428 |
+
inline std::string name() {
|
| 429 |
+
return get()->name();
|
| 430 |
+
}
|
| 431 |
+
|
| 432 |
+
inline cudaError_t memcpyAsync(
|
| 433 |
+
void* dst,
|
| 434 |
+
int dstDevice,
|
| 435 |
+
const void* src,
|
| 436 |
+
int srcDevice,
|
| 437 |
+
size_t count,
|
| 438 |
+
cudaStream_t stream,
|
| 439 |
+
bool p2p_enabled) {
|
| 440 |
+
return get()->memcpyAsync(
|
| 441 |
+
dst, dstDevice, src, srcDevice, count, stream, p2p_enabled);
|
| 442 |
+
}
|
| 443 |
+
|
| 444 |
+
inline void enablePeerAccess(int dev, int dev_to_access) {
|
| 445 |
+
return get()->enablePeerAccess(dev, dev_to_access);
|
| 446 |
+
}
|
| 447 |
+
|
| 448 |
+
} // namespace CUDACachingAllocator
|
| 449 |
+
} // namespace cuda
|
| 450 |
+
} // namespace c10
|
videollama2/lib/python3.10/site-packages/torch/include/c10/cuda/CUDADeviceAssertion.h
ADDED
|
@@ -0,0 +1,98 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <c10/cuda/CUDAException.h>
|
| 4 |
+
#include <c10/macros/Macros.h>
|
| 5 |
+
|
| 6 |
+
namespace c10 {
|
| 7 |
+
namespace cuda {
|
| 8 |
+
|
| 9 |
+
#ifdef TORCH_USE_CUDA_DSA
|
| 10 |
+
// Copy string from `src` to `dst`
|
| 11 |
+
static __device__ void dstrcpy(char* dst, const char* src) {
|
| 12 |
+
int i = 0;
|
| 13 |
+
// Copy string from source to destination, ensuring that it
|
| 14 |
+
// isn't longer than `C10_CUDA_DSA_MAX_STR_LEN-1`
|
| 15 |
+
while (*src != '\0' && i++ < C10_CUDA_DSA_MAX_STR_LEN - 1) {
|
| 16 |
+
*dst++ = *src++;
|
| 17 |
+
}
|
| 18 |
+
*dst = '\0';
|
| 19 |
+
}
|
| 20 |
+
|
| 21 |
+
static __device__ void dsa_add_new_assertion_failure(
|
| 22 |
+
DeviceAssertionsData* assertions_data,
|
| 23 |
+
const char* assertion_msg,
|
| 24 |
+
const char* filename,
|
| 25 |
+
const char* function_name,
|
| 26 |
+
const int line_number,
|
| 27 |
+
const uint32_t caller,
|
| 28 |
+
const dim3 block_id,
|
| 29 |
+
const dim3 thread_id) {
|
| 30 |
+
// `assertions_data` may be nullptr if device-side assertion checking
|
| 31 |
+
// is disabled at run-time. If it is disabled at compile time this
|
| 32 |
+
// function will never be called
|
| 33 |
+
if (!assertions_data) {
|
| 34 |
+
return;
|
| 35 |
+
}
|
| 36 |
+
|
| 37 |
+
// Atomically increment so other threads can fail at the same time
|
| 38 |
+
// Note that incrementing this means that the CPU can observe that
|
| 39 |
+
// a failure has happened and can begin to respond before we've
|
| 40 |
+
// written information about that failure out to the buffer.
|
| 41 |
+
const auto nid = atomicAdd(&(assertions_data->assertion_count), 1);
|
| 42 |
+
|
| 43 |
+
if (nid >= C10_CUDA_DSA_ASSERTION_COUNT) {
|
| 44 |
+
// At this point we're ran out of assertion buffer space.
|
| 45 |
+
// We could print a message about this, but that'd get
|
| 46 |
+
// spammy if a lot of threads did it, so we just silently
|
| 47 |
+
// ignore any other assertion failures. In most cases the
|
| 48 |
+
// failures will all probably be analogous anyway.
|
| 49 |
+
return;
|
| 50 |
+
}
|
| 51 |
+
|
| 52 |
+
// Write information about the assertion failure to memory.
|
| 53 |
+
// Note that this occurs only after the `assertion_count`
|
| 54 |
+
// increment broadcasts that there's been a problem.
|
| 55 |
+
auto& self = assertions_data->assertions[nid];
|
| 56 |
+
dstrcpy(self.assertion_msg, assertion_msg);
|
| 57 |
+
dstrcpy(self.filename, filename);
|
| 58 |
+
dstrcpy(self.function_name, function_name);
|
| 59 |
+
self.line_number = line_number;
|
| 60 |
+
self.caller = caller;
|
| 61 |
+
self.block_id[0] = block_id.x;
|
| 62 |
+
self.block_id[1] = block_id.y;
|
| 63 |
+
self.block_id[2] = block_id.z;
|
| 64 |
+
self.thread_id[0] = thread_id.x;
|
| 65 |
+
self.thread_id[1] = thread_id.y;
|
| 66 |
+
self.thread_id[2] = thread_id.z;
|
| 67 |
+
}
|
| 68 |
+
|
| 69 |
+
// Emulates a kernel assertion. The assertion won't stop the kernel's progress,
|
| 70 |
+
// so you should assume everything the kernel produces is garbage if there's an
|
| 71 |
+
// assertion failure.
|
| 72 |
+
// NOTE: This assumes that `assertions_data` and `assertion_caller_id` are
|
| 73 |
+
// arguments of the kernel and therefore accessible.
|
| 74 |
+
#define CUDA_KERNEL_ASSERT2(condition) \
|
| 75 |
+
do { \
|
| 76 |
+
if (C10_UNLIKELY(!(condition))) { \
|
| 77 |
+
/* Has an atomic element so threads can fail at the same time */ \
|
| 78 |
+
c10::cuda::dsa_add_new_assertion_failure( \
|
| 79 |
+
assertions_data, \
|
| 80 |
+
C10_STRINGIZE(condition), \
|
| 81 |
+
__FILE__, \
|
| 82 |
+
__FUNCTION__, \
|
| 83 |
+
__LINE__, \
|
| 84 |
+
assertion_caller_id, \
|
| 85 |
+
blockIdx, \
|
| 86 |
+
threadIdx); \
|
| 87 |
+
/* Now that the kernel has failed we early exit the kernel, but */ \
|
| 88 |
+
/* otherwise keep going and rely on the host to check UVM and */ \
|
| 89 |
+
/* determine we've had a problem */ \
|
| 90 |
+
return; \
|
| 91 |
+
} \
|
| 92 |
+
} while (false)
|
| 93 |
+
#else
|
| 94 |
+
#define CUDA_KERNEL_ASSERT2(condition) assert(condition)
|
| 95 |
+
#endif
|
| 96 |
+
|
| 97 |
+
} // namespace cuda
|
| 98 |
+
} // namespace c10
|
videollama2/lib/python3.10/site-packages/torch/include/c10/cuda/CUDADeviceAssertionHost.h
ADDED
|
@@ -0,0 +1,158 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <c10/cuda/CUDAMacros.h>
|
| 4 |
+
|
| 5 |
+
#include <memory>
|
| 6 |
+
#include <mutex>
|
| 7 |
+
#include <string>
|
| 8 |
+
#include <vector>
|
| 9 |
+
|
| 10 |
+
#ifdef USE_CUDA
|
| 11 |
+
#define TORCH_USE_CUDA_DSA
|
| 12 |
+
#endif
|
| 13 |
+
|
| 14 |
+
/// Number of assertion failure messages we can store. If this is too small
|
| 15 |
+
/// threads will fail silently.
|
| 16 |
+
constexpr int C10_CUDA_DSA_ASSERTION_COUNT = 10;
|
| 17 |
+
constexpr int C10_CUDA_DSA_MAX_STR_LEN = 512;
|
| 18 |
+
|
| 19 |
+
namespace c10 {
|
| 20 |
+
namespace cuda {
|
| 21 |
+
|
| 22 |
+
/// Holds information about any device-side assertions that fail.
|
| 23 |
+
/// Held in managed memory and access by both the CPU and the GPU.
|
| 24 |
+
struct DeviceAssertionData {
|
| 25 |
+
/// Stringification of the assertion
|
| 26 |
+
char assertion_msg[C10_CUDA_DSA_MAX_STR_LEN];
|
| 27 |
+
/// File the assertion was in
|
| 28 |
+
char filename[C10_CUDA_DSA_MAX_STR_LEN];
|
| 29 |
+
/// Name of the function the assertion was in
|
| 30 |
+
char function_name[C10_CUDA_DSA_MAX_STR_LEN];
|
| 31 |
+
/// Line number the assertion was at
|
| 32 |
+
int line_number;
|
| 33 |
+
/// Number uniquely identifying the kernel launch that triggered the assertion
|
| 34 |
+
uint32_t caller;
|
| 35 |
+
/// block_id of the thread that failed the assertion
|
| 36 |
+
int32_t block_id[3];
|
| 37 |
+
/// third_id of the thread that failed the assertion
|
| 38 |
+
int32_t thread_id[3];
|
| 39 |
+
};
|
| 40 |
+
|
| 41 |
+
/// Used to hold assertions generated by the device
|
| 42 |
+
/// Held in managed memory and access by both the CPU and the GPU.
|
| 43 |
+
struct DeviceAssertionsData {
|
| 44 |
+
/// Total number of assertions found; a subset of thse will be recorded
|
| 45 |
+
/// in `assertions`
|
| 46 |
+
int32_t assertion_count;
|
| 47 |
+
/// An array of assertions that will be written to in a race-free manner
|
| 48 |
+
DeviceAssertionData assertions[C10_CUDA_DSA_ASSERTION_COUNT];
|
| 49 |
+
};
|
| 50 |
+
|
| 51 |
+
/// Use to hold info about kernel launches so that we can run kernels
|
| 52 |
+
/// asynchronously and still associate launches with device-side
|
| 53 |
+
/// assertion failures
|
| 54 |
+
struct CUDAKernelLaunchInfo {
|
| 55 |
+
/// Filename of the code where the kernel was launched from
|
| 56 |
+
const char* launch_filename;
|
| 57 |
+
/// Function from which the kernel was launched
|
| 58 |
+
const char* launch_function;
|
| 59 |
+
/// Line number of where the code was launched from
|
| 60 |
+
uint32_t launch_linenum;
|
| 61 |
+
/// Backtrace of where the kernel was launched from, only populated if
|
| 62 |
+
/// CUDAKernelLaunchRegistry::gather_launch_stacktrace is True
|
| 63 |
+
std::string launch_stacktrace;
|
| 64 |
+
/// Kernel that was launched
|
| 65 |
+
const char* kernel_name;
|
| 66 |
+
/// Device the kernel was launched on
|
| 67 |
+
int device;
|
| 68 |
+
/// Stream the kernel was launched on
|
| 69 |
+
int32_t stream;
|
| 70 |
+
/// A number that uniquely identifies the kernel launch
|
| 71 |
+
uint64_t generation_number;
|
| 72 |
+
};
|
| 73 |
+
|
| 74 |
+
/// Circular buffer used to hold information about kernel launches
|
| 75 |
+
/// this is later used to reconstruct how a device-side kernel assertion failure
|
| 76 |
+
/// occurred CUDAKernelLaunchRegistry is used as a singleton
|
| 77 |
+
class C10_CUDA_API CUDAKernelLaunchRegistry {
|
| 78 |
+
private:
|
| 79 |
+
/// Assume that this is the max number of kernel launches that might ever be
|
| 80 |
+
/// enqueued across all streams on a single device
|
| 81 |
+
static constexpr int max_kernel_launches = 1024;
|
| 82 |
+
/// How many kernel launch infos we've inserted. Used to ensure that circular
|
| 83 |
+
/// queue doesn't provide false information by always increasing, but also to
|
| 84 |
+
/// mark where we are inserting into the queue
|
| 85 |
+
#ifdef TORCH_USE_CUDA_DSA
|
| 86 |
+
uint64_t generation_number = 0;
|
| 87 |
+
#endif
|
| 88 |
+
/// Shared mutex between writer and accessor to ensure multi-threaded safety.
|
| 89 |
+
mutable std::mutex read_write_mutex;
|
| 90 |
+
/// Used to ensure prevent race conditions in GPU memory allocation
|
| 91 |
+
mutable std::mutex gpu_alloc_mutex;
|
| 92 |
+
/// Pointer to managed memory keeping track of device-side assertions. There
|
| 93 |
+
/// is one entry for each possible device the process might work with. Unused
|
| 94 |
+
/// entries are nullptrs. We could also use an unordered_set here, but this
|
| 95 |
+
/// vector design will be faster and the wasted memory is small since we
|
| 96 |
+
/// expect the number of GPUs per node will always be small
|
| 97 |
+
std::vector<
|
| 98 |
+
std::unique_ptr<DeviceAssertionsData, void (*)(DeviceAssertionsData*)>>
|
| 99 |
+
uvm_assertions;
|
| 100 |
+
/// A single circular buffer holds information about every kernel launch the
|
| 101 |
+
/// process makes across all devices.
|
| 102 |
+
std::vector<CUDAKernelLaunchInfo> kernel_launches;
|
| 103 |
+
bool check_env_for_enable_launch_stacktracing() const;
|
| 104 |
+
bool check_env_for_dsa_enabled() const;
|
| 105 |
+
|
| 106 |
+
public:
|
| 107 |
+
CUDAKernelLaunchRegistry();
|
| 108 |
+
/// Register a new kernel launch and obtain a generation number back to be
|
| 109 |
+
/// passed to the kernel
|
| 110 |
+
uint32_t insert(
|
| 111 |
+
const char* launch_filename,
|
| 112 |
+
const char* launch_function,
|
| 113 |
+
const uint32_t launch_linenum,
|
| 114 |
+
const char* kernel_name,
|
| 115 |
+
const int32_t stream_id);
|
| 116 |
+
/// Get copies of the kernel launch registry and each device's assertion
|
| 117 |
+
/// failure buffer so they can be inspected without raising race conditions
|
| 118 |
+
std::
|
| 119 |
+
pair<std::vector<DeviceAssertionsData>, std::vector<CUDAKernelLaunchInfo>>
|
| 120 |
+
snapshot() const;
|
| 121 |
+
/// Get a pointer to the current device's assertion failure buffer. If no such
|
| 122 |
+
/// buffer exists then one is created. This means that the first kernel launch
|
| 123 |
+
/// made on each device will be slightly slower because memory allocations are
|
| 124 |
+
/// required
|
| 125 |
+
DeviceAssertionsData* get_uvm_assertions_ptr_for_current_device();
|
| 126 |
+
/// Gets the global singleton of the registry
|
| 127 |
+
static CUDAKernelLaunchRegistry& get_singleton_ref();
|
| 128 |
+
/// If not all devices support DSA, we disable it
|
| 129 |
+
const bool do_all_devices_support_managed_memory = false;
|
| 130 |
+
/// Whether or not to gather stack traces when launching kernels
|
| 131 |
+
bool gather_launch_stacktrace = false;
|
| 132 |
+
/// Whether or not host-side DSA is enabled or disabled at run-time
|
| 133 |
+
/// Note: Device-side code cannot be enabled/disabled at run-time
|
| 134 |
+
bool enabled_at_runtime = false;
|
| 135 |
+
/// Whether or not a device has indicated a failure
|
| 136 |
+
bool has_failed() const;
|
| 137 |
+
#ifdef TORCH_USE_CUDA_DSA
|
| 138 |
+
const bool enabled_at_compile_time = true;
|
| 139 |
+
#else
|
| 140 |
+
const bool enabled_at_compile_time = false;
|
| 141 |
+
#endif
|
| 142 |
+
};
|
| 143 |
+
|
| 144 |
+
std::string c10_retrieve_device_side_assertion_info();
|
| 145 |
+
|
| 146 |
+
} // namespace cuda
|
| 147 |
+
} // namespace c10
|
| 148 |
+
|
| 149 |
+
// Each kernel launched with TORCH_DSA_KERNEL_LAUNCH
|
| 150 |
+
// requires the same input arguments. We introduce the following macro to
|
| 151 |
+
// standardize these.
|
| 152 |
+
#define TORCH_DSA_KERNEL_ARGS \
|
| 153 |
+
[[maybe_unused]] c10::cuda::DeviceAssertionsData *const assertions_data, \
|
| 154 |
+
[[maybe_unused]] uint32_t assertion_caller_id
|
| 155 |
+
|
| 156 |
+
// This macro can be used to pass the DSA arguments onward to another
|
| 157 |
+
// function
|
| 158 |
+
#define TORCH_DSA_KERNEL_ARGS_PASS assertions_data, assertion_caller_id
|
videollama2/lib/python3.10/site-packages/torch/include/c10/cuda/CUDAException.h
ADDED
|
@@ -0,0 +1,102 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <c10/cuda/CUDADeviceAssertionHost.h>
|
| 4 |
+
#include <c10/cuda/CUDAMacros.h>
|
| 5 |
+
#include <c10/cuda/CUDAMiscFunctions.h>
|
| 6 |
+
#include <c10/macros/Macros.h>
|
| 7 |
+
#include <c10/util/Exception.h>
|
| 8 |
+
#include <c10/util/irange.h>
|
| 9 |
+
#include <cuda.h>
|
| 10 |
+
|
| 11 |
+
// Note [CHECK macro]
|
| 12 |
+
// ~~~~~~~~~~~~~~~~~~
|
| 13 |
+
// This is a macro so that AT_ERROR can get accurate __LINE__
|
| 14 |
+
// and __FILE__ information. We could split this into a short
|
| 15 |
+
// macro and a function implementation if we pass along __LINE__
|
| 16 |
+
// and __FILE__, but no one has found this worth doing.
|
| 17 |
+
|
| 18 |
+
// Used to denote errors from CUDA framework.
|
| 19 |
+
// This needs to be declared here instead util/Exception.h for proper conversion
|
| 20 |
+
// during hipify.
|
| 21 |
+
namespace c10 {
|
| 22 |
+
class C10_CUDA_API CUDAError : public c10::Error {
|
| 23 |
+
using Error::Error;
|
| 24 |
+
};
|
| 25 |
+
} // namespace c10
|
| 26 |
+
|
| 27 |
+
#define C10_CUDA_CHECK(EXPR) \
|
| 28 |
+
do { \
|
| 29 |
+
const cudaError_t __err = EXPR; \
|
| 30 |
+
c10::cuda::c10_cuda_check_implementation( \
|
| 31 |
+
static_cast<int32_t>(__err), \
|
| 32 |
+
__FILE__, \
|
| 33 |
+
__func__, /* Line number data type not well-defined between \
|
| 34 |
+
compilers, so we perform an explicit cast */ \
|
| 35 |
+
static_cast<uint32_t>(__LINE__), \
|
| 36 |
+
true); \
|
| 37 |
+
} while (0)
|
| 38 |
+
|
| 39 |
+
#define C10_CUDA_CHECK_WARN(EXPR) \
|
| 40 |
+
do { \
|
| 41 |
+
const cudaError_t __err = EXPR; \
|
| 42 |
+
if (C10_UNLIKELY(__err != cudaSuccess)) { \
|
| 43 |
+
auto error_unused C10_UNUSED = cudaGetLastError(); \
|
| 44 |
+
(void)error_unused; \
|
| 45 |
+
TORCH_WARN("CUDA warning: ", cudaGetErrorString(__err)); \
|
| 46 |
+
} \
|
| 47 |
+
} while (0)
|
| 48 |
+
|
| 49 |
+
// Indicates that a CUDA error is handled in a non-standard way
|
| 50 |
+
#define C10_CUDA_ERROR_HANDLED(EXPR) EXPR
|
| 51 |
+
|
| 52 |
+
// Intentionally ignore a CUDA error
|
| 53 |
+
#define C10_CUDA_IGNORE_ERROR(EXPR) \
|
| 54 |
+
do { \
|
| 55 |
+
const cudaError_t __err = EXPR; \
|
| 56 |
+
if (C10_UNLIKELY(__err != cudaSuccess)) { \
|
| 57 |
+
cudaError_t error_unused C10_UNUSED = cudaGetLastError(); \
|
| 58 |
+
(void)error_unused; \
|
| 59 |
+
} \
|
| 60 |
+
} while (0)
|
| 61 |
+
|
| 62 |
+
// Clear the last CUDA error
|
| 63 |
+
#define C10_CUDA_CLEAR_ERROR() \
|
| 64 |
+
do { \
|
| 65 |
+
cudaError_t error_unused C10_UNUSED = cudaGetLastError(); \
|
| 66 |
+
(void)error_unused; \
|
| 67 |
+
} while (0)
|
| 68 |
+
|
| 69 |
+
// This should be used directly after every kernel launch to ensure
|
| 70 |
+
// the launch happened correctly and provide an early, close-to-source
|
| 71 |
+
// diagnostic if it didn't.
|
| 72 |
+
#define C10_CUDA_KERNEL_LAUNCH_CHECK() C10_CUDA_CHECK(cudaGetLastError())
|
| 73 |
+
|
| 74 |
+
/// Launches a CUDA kernel appending to it all the information need to handle
|
| 75 |
+
/// device-side assertion failures. Checks that the launch was successful.
|
| 76 |
+
#define TORCH_DSA_KERNEL_LAUNCH( \
|
| 77 |
+
kernel, blocks, threads, shared_mem, stream, ...) \
|
| 78 |
+
do { \
|
| 79 |
+
auto& launch_registry = \
|
| 80 |
+
c10::cuda::CUDAKernelLaunchRegistry::get_singleton_ref(); \
|
| 81 |
+
kernel<<<blocks, threads, shared_mem, stream>>>( \
|
| 82 |
+
__VA_ARGS__, \
|
| 83 |
+
launch_registry.get_uvm_assertions_ptr_for_current_device(), \
|
| 84 |
+
launch_registry.insert( \
|
| 85 |
+
__FILE__, __FUNCTION__, __LINE__, #kernel, stream.id())); \
|
| 86 |
+
C10_CUDA_KERNEL_LAUNCH_CHECK(); \
|
| 87 |
+
} while (0)
|
| 88 |
+
|
| 89 |
+
namespace c10 {
|
| 90 |
+
namespace cuda {
|
| 91 |
+
|
| 92 |
+
/// In the event of a CUDA failure, formats a nice error message about that
|
| 93 |
+
/// failure and also checks for device-side assertion failures
|
| 94 |
+
C10_CUDA_API void c10_cuda_check_implementation(
|
| 95 |
+
const int32_t err,
|
| 96 |
+
const char* filename,
|
| 97 |
+
const char* function_name,
|
| 98 |
+
const int line_number,
|
| 99 |
+
const bool include_device_assertions);
|
| 100 |
+
|
| 101 |
+
} // namespace cuda
|
| 102 |
+
} // namespace c10
|
videollama2/lib/python3.10/site-packages/torch/include/c10/cuda/CUDAFunctions.h
ADDED
|
@@ -0,0 +1,118 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// This header provides C++ wrappers around commonly used CUDA API functions.
|
| 4 |
+
// The benefit of using C++ here is that we can raise an exception in the
|
| 5 |
+
// event of an error, rather than explicitly pass around error codes. This
|
| 6 |
+
// leads to more natural APIs.
|
| 7 |
+
//
|
| 8 |
+
// The naming convention used here matches the naming convention of torch.cuda
|
| 9 |
+
|
| 10 |
+
#include <c10/core/Device.h>
|
| 11 |
+
#include <c10/core/impl/GPUTrace.h>
|
| 12 |
+
#include <c10/cuda/CUDAException.h>
|
| 13 |
+
#include <c10/cuda/CUDAMacros.h>
|
| 14 |
+
#include <cuda_runtime_api.h>
|
| 15 |
+
namespace c10 {
|
| 16 |
+
namespace cuda {
|
| 17 |
+
|
| 18 |
+
// NB: In the past, we were inconsistent about whether or not this reported
|
| 19 |
+
// an error if there were driver problems are not. Based on experience
|
| 20 |
+
// interacting with users, it seems that people basically ~never want this
|
| 21 |
+
// function to fail; it should just return zero if things are not working.
|
| 22 |
+
// Oblige them.
|
| 23 |
+
// It still might log a warning for user first time it's invoked
|
| 24 |
+
C10_CUDA_API DeviceIndex device_count() noexcept;
|
| 25 |
+
|
| 26 |
+
// Version of device_count that throws is no devices are detected
|
| 27 |
+
C10_CUDA_API DeviceIndex device_count_ensure_non_zero();
|
| 28 |
+
|
| 29 |
+
C10_CUDA_API DeviceIndex current_device();
|
| 30 |
+
|
| 31 |
+
C10_CUDA_API void set_device(DeviceIndex device);
|
| 32 |
+
|
| 33 |
+
C10_CUDA_API void device_synchronize();
|
| 34 |
+
|
| 35 |
+
C10_CUDA_API void warn_or_error_on_sync();
|
| 36 |
+
|
| 37 |
+
// Raw CUDA device management functions
|
| 38 |
+
C10_CUDA_API cudaError_t GetDeviceCount(int* dev_count);
|
| 39 |
+
|
| 40 |
+
C10_CUDA_API cudaError_t GetDevice(int* device);
|
| 41 |
+
|
| 42 |
+
C10_CUDA_API cudaError_t SetDevice(int device);
|
| 43 |
+
|
| 44 |
+
C10_CUDA_API cudaError_t MaybeSetDevice(int device);
|
| 45 |
+
|
| 46 |
+
C10_CUDA_API int ExchangeDevice(int device);
|
| 47 |
+
|
| 48 |
+
C10_CUDA_API int MaybeExchangeDevice(int device);
|
| 49 |
+
|
| 50 |
+
C10_CUDA_API void SetTargetDevice();
|
| 51 |
+
|
| 52 |
+
enum class SyncDebugMode { L_DISABLED = 0, L_WARN, L_ERROR };
|
| 53 |
+
|
| 54 |
+
// this is a holder for c10 global state (similar to at GlobalContext)
|
| 55 |
+
// currently it's used to store cuda synchronization warning state,
|
| 56 |
+
// but can be expanded to hold other related global state, e.g. to
|
| 57 |
+
// record stream usage
|
| 58 |
+
class WarningState {
|
| 59 |
+
public:
|
| 60 |
+
void set_sync_debug_mode(SyncDebugMode l) {
|
| 61 |
+
sync_debug_mode = l;
|
| 62 |
+
}
|
| 63 |
+
|
| 64 |
+
SyncDebugMode get_sync_debug_mode() {
|
| 65 |
+
return sync_debug_mode;
|
| 66 |
+
}
|
| 67 |
+
|
| 68 |
+
private:
|
| 69 |
+
SyncDebugMode sync_debug_mode = SyncDebugMode::L_DISABLED;
|
| 70 |
+
};
|
| 71 |
+
|
| 72 |
+
C10_CUDA_API __inline__ WarningState& warning_state() {
|
| 73 |
+
static WarningState warning_state_;
|
| 74 |
+
return warning_state_;
|
| 75 |
+
}
|
| 76 |
+
// the subsequent functions are defined in the header because for performance
|
| 77 |
+
// reasons we want them to be inline
|
| 78 |
+
C10_CUDA_API void __inline__ memcpy_and_sync(
|
| 79 |
+
void* dst,
|
| 80 |
+
const void* src,
|
| 81 |
+
int64_t nbytes,
|
| 82 |
+
cudaMemcpyKind kind,
|
| 83 |
+
cudaStream_t stream) {
|
| 84 |
+
if (C10_UNLIKELY(
|
| 85 |
+
warning_state().get_sync_debug_mode() != SyncDebugMode::L_DISABLED)) {
|
| 86 |
+
warn_or_error_on_sync();
|
| 87 |
+
}
|
| 88 |
+
const c10::impl::PyInterpreter* interp = c10::impl::GPUTrace::get_trace();
|
| 89 |
+
if (C10_UNLIKELY(interp)) {
|
| 90 |
+
(*interp)->trace_gpu_stream_synchronization(
|
| 91 |
+
reinterpret_cast<uintptr_t>(stream));
|
| 92 |
+
}
|
| 93 |
+
#if defined(TORCH_HIP_VERSION) && (TORCH_HIP_VERSION >= 301)
|
| 94 |
+
C10_CUDA_CHECK(hipMemcpyWithStream(dst, src, nbytes, kind, stream));
|
| 95 |
+
#else
|
| 96 |
+
C10_CUDA_CHECK(cudaMemcpyAsync(dst, src, nbytes, kind, stream));
|
| 97 |
+
C10_CUDA_CHECK(cudaStreamSynchronize(stream));
|
| 98 |
+
#endif
|
| 99 |
+
}
|
| 100 |
+
|
| 101 |
+
C10_CUDA_API void __inline__ stream_synchronize(cudaStream_t stream) {
|
| 102 |
+
if (C10_UNLIKELY(
|
| 103 |
+
warning_state().get_sync_debug_mode() != SyncDebugMode::L_DISABLED)) {
|
| 104 |
+
warn_or_error_on_sync();
|
| 105 |
+
}
|
| 106 |
+
const c10::impl::PyInterpreter* interp = c10::impl::GPUTrace::get_trace();
|
| 107 |
+
if (C10_UNLIKELY(interp)) {
|
| 108 |
+
(*interp)->trace_gpu_stream_synchronization(
|
| 109 |
+
reinterpret_cast<uintptr_t>(stream));
|
| 110 |
+
}
|
| 111 |
+
C10_CUDA_CHECK(cudaStreamSynchronize(stream));
|
| 112 |
+
}
|
| 113 |
+
|
| 114 |
+
C10_CUDA_API bool hasPrimaryContext(DeviceIndex device_index);
|
| 115 |
+
C10_CUDA_API c10::optional<DeviceIndex> getDeviceIndexWithPrimaryContext();
|
| 116 |
+
|
| 117 |
+
} // namespace cuda
|
| 118 |
+
} // namespace c10
|
videollama2/lib/python3.10/site-packages/torch/include/c10/cuda/CUDAGraphsC10Utils.h
ADDED
|
@@ -0,0 +1,92 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <c10/cuda/CUDAStream.h>
|
| 4 |
+
#include <utility>
|
| 5 |
+
|
| 6 |
+
// CUDA Graphs utils used by c10 and aten.
|
| 7 |
+
// aten/cuda/CUDAGraphsUtils.cuh adds utils used by aten only.
|
| 8 |
+
|
| 9 |
+
namespace c10 {
|
| 10 |
+
namespace cuda {
|
| 11 |
+
|
| 12 |
+
using CaptureId_t = unsigned long long;
|
| 13 |
+
|
| 14 |
+
// first is set if the instance is created by CUDAGraph::capture_begin.
|
| 15 |
+
// second is set if the instance is created by at::cuda::graph_pool_handle.
|
| 16 |
+
using MempoolId_t = std::pair<CaptureId_t, CaptureId_t>;
|
| 17 |
+
|
| 18 |
+
// RAII guard for "cudaStreamCaptureMode", a thread-local value
|
| 19 |
+
// that controls the error-checking strictness of a capture.
|
| 20 |
+
#if !defined(USE_ROCM) || ROCM_VERSION >= 50300
|
| 21 |
+
struct C10_CUDA_API CUDAStreamCaptureModeGuard {
|
| 22 |
+
CUDAStreamCaptureModeGuard(cudaStreamCaptureMode desired) {
|
| 23 |
+
strictness_ = desired;
|
| 24 |
+
C10_CUDA_CHECK(cudaThreadExchangeStreamCaptureMode(&strictness_));
|
| 25 |
+
}
|
| 26 |
+
~CUDAStreamCaptureModeGuard() {
|
| 27 |
+
C10_CUDA_CHECK_WARN(cudaThreadExchangeStreamCaptureMode(&strictness_));
|
| 28 |
+
}
|
| 29 |
+
|
| 30 |
+
private:
|
| 31 |
+
cudaStreamCaptureMode strictness_;
|
| 32 |
+
};
|
| 33 |
+
#endif
|
| 34 |
+
|
| 35 |
+
#if !defined(USE_ROCM) || ROCM_VERSION >= 50300
|
| 36 |
+
// Protects against enum cudaStreamCaptureStatus implementation changes.
|
| 37 |
+
// Some compilers seem not to like static_assert without the messages.
|
| 38 |
+
static_assert(
|
| 39 |
+
int(cudaStreamCaptureStatus::cudaStreamCaptureStatusNone) == 0,
|
| 40 |
+
"unexpected int(cudaStreamCaptureStatusNone) value");
|
| 41 |
+
static_assert(
|
| 42 |
+
int(cudaStreamCaptureStatus::cudaStreamCaptureStatusActive) == 1,
|
| 43 |
+
"unexpected int(cudaStreamCaptureStatusActive) value");
|
| 44 |
+
static_assert(
|
| 45 |
+
int(cudaStreamCaptureStatus::cudaStreamCaptureStatusInvalidated) == 2,
|
| 46 |
+
"unexpected int(cudaStreamCaptureStatusInvalidated) value");
|
| 47 |
+
#endif
|
| 48 |
+
|
| 49 |
+
enum class CaptureStatus : int {
|
| 50 |
+
#if !defined(USE_ROCM) || ROCM_VERSION >= 50300
|
| 51 |
+
None = int(cudaStreamCaptureStatus::cudaStreamCaptureStatusNone),
|
| 52 |
+
Active = int(cudaStreamCaptureStatus::cudaStreamCaptureStatusActive),
|
| 53 |
+
Invalidated = int(cudaStreamCaptureStatus::cudaStreamCaptureStatusInvalidated)
|
| 54 |
+
#else
|
| 55 |
+
None = 0
|
| 56 |
+
#endif
|
| 57 |
+
};
|
| 58 |
+
|
| 59 |
+
inline std::ostream& operator<<(std::ostream& os, CaptureStatus status) {
|
| 60 |
+
switch (status) {
|
| 61 |
+
case CaptureStatus::None:
|
| 62 |
+
os << "cudaStreamCaptureStatusNone";
|
| 63 |
+
break;
|
| 64 |
+
#if !defined(USE_ROCM) || ROCM_VERSION >= 50300
|
| 65 |
+
case CaptureStatus::Active:
|
| 66 |
+
os << "cudaStreamCaptureStatusActive";
|
| 67 |
+
break;
|
| 68 |
+
case CaptureStatus::Invalidated:
|
| 69 |
+
os << "cudaStreamCaptureStatusInvalidated";
|
| 70 |
+
break;
|
| 71 |
+
#endif
|
| 72 |
+
default:
|
| 73 |
+
TORCH_INTERNAL_ASSERT(
|
| 74 |
+
false, "Unknown CUDA graph CaptureStatus", int(status));
|
| 75 |
+
}
|
| 76 |
+
return os;
|
| 77 |
+
}
|
| 78 |
+
|
| 79 |
+
// Use this version where you're sure a CUDA context exists already.
|
| 80 |
+
inline CaptureStatus currentStreamCaptureStatusMayInitCtx() {
|
| 81 |
+
#if !defined(USE_ROCM) || ROCM_VERSION >= 50300
|
| 82 |
+
cudaStreamCaptureStatus is_capturing;
|
| 83 |
+
C10_CUDA_CHECK(
|
| 84 |
+
cudaStreamIsCapturing(c10::cuda::getCurrentCUDAStream(), &is_capturing));
|
| 85 |
+
return CaptureStatus(is_capturing);
|
| 86 |
+
#else
|
| 87 |
+
return CaptureStatus::None;
|
| 88 |
+
#endif
|
| 89 |
+
}
|
| 90 |
+
|
| 91 |
+
} // namespace cuda
|
| 92 |
+
} // namespace c10
|
videollama2/lib/python3.10/site-packages/torch/include/c10/cuda/CUDAGuard.h
ADDED
|
@@ -0,0 +1,305 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <c10/core/DeviceType.h>
|
| 4 |
+
#include <c10/core/impl/InlineDeviceGuard.h>
|
| 5 |
+
#include <c10/core/impl/InlineStreamGuard.h>
|
| 6 |
+
#include <c10/cuda/CUDAMacros.h>
|
| 7 |
+
#include <c10/cuda/impl/CUDAGuardImpl.h>
|
| 8 |
+
|
| 9 |
+
#include <cstddef>
|
| 10 |
+
|
| 11 |
+
namespace c10 {
|
| 12 |
+
namespace cuda {
|
| 13 |
+
|
| 14 |
+
// This code is kind of boilerplatey. See Note [Whither the DeviceGuard
|
| 15 |
+
// boilerplate]
|
| 16 |
+
|
| 17 |
+
/// A variant of DeviceGuard that is specialized for CUDA. It accepts
|
| 18 |
+
/// integer indices (interpreting them as CUDA devices) and is a little
|
| 19 |
+
/// more efficient than DeviceGuard (it compiles to straight line
|
| 20 |
+
/// cudaSetDevice/cudaGetDevice calls); however, it can only be used
|
| 21 |
+
/// from code that links against CUDA directly.
|
| 22 |
+
struct CUDAGuard {
|
| 23 |
+
/// No default constructor; see Note [Omitted default constructor from RAII]
|
| 24 |
+
explicit CUDAGuard() = delete;
|
| 25 |
+
|
| 26 |
+
/// Set the current CUDA device to the passed device index.
|
| 27 |
+
explicit CUDAGuard(DeviceIndex device_index) : guard_(device_index) {}
|
| 28 |
+
|
| 29 |
+
/// Sets the current CUDA device to the passed device. Errors if the passed
|
| 30 |
+
/// device is not a CUDA device.
|
| 31 |
+
explicit CUDAGuard(Device device) : guard_(device) {}
|
| 32 |
+
|
| 33 |
+
// Copy is not allowed
|
| 34 |
+
CUDAGuard(const CUDAGuard&) = delete;
|
| 35 |
+
CUDAGuard& operator=(const CUDAGuard&) = delete;
|
| 36 |
+
|
| 37 |
+
// Move is not allowed (there is no uninitialized state)
|
| 38 |
+
CUDAGuard(CUDAGuard&& other) = delete;
|
| 39 |
+
CUDAGuard& operator=(CUDAGuard&& other) = delete;
|
| 40 |
+
|
| 41 |
+
/// Sets the CUDA device to the given device. Errors if the given device
|
| 42 |
+
/// is not a CUDA device.
|
| 43 |
+
void set_device(Device device) {
|
| 44 |
+
guard_.set_device(device);
|
| 45 |
+
}
|
| 46 |
+
|
| 47 |
+
/// Sets the CUDA device to the given device. Errors if the given device
|
| 48 |
+
/// is not a CUDA device. (This method is provided for uniformity with
|
| 49 |
+
/// DeviceGuard).
|
| 50 |
+
void reset_device(Device device) {
|
| 51 |
+
guard_.reset_device(device);
|
| 52 |
+
}
|
| 53 |
+
|
| 54 |
+
/// Sets the CUDA device to the given device index.
|
| 55 |
+
void set_index(DeviceIndex device_index) {
|
| 56 |
+
guard_.set_index(device_index);
|
| 57 |
+
}
|
| 58 |
+
|
| 59 |
+
/// Returns the device that was set upon construction of the guard
|
| 60 |
+
Device original_device() const {
|
| 61 |
+
return guard_.original_device();
|
| 62 |
+
}
|
| 63 |
+
|
| 64 |
+
/// Returns the last device that was set via `set_device`, if any, otherwise
|
| 65 |
+
/// the device passed during construction.
|
| 66 |
+
Device current_device() const {
|
| 67 |
+
return guard_.current_device();
|
| 68 |
+
}
|
| 69 |
+
|
| 70 |
+
private:
|
| 71 |
+
/// The guard for the current device.
|
| 72 |
+
c10::impl::InlineDeviceGuard<impl::CUDAGuardImpl> guard_;
|
| 73 |
+
};
|
| 74 |
+
|
| 75 |
+
/// A variant of OptionalDeviceGuard that is specialized for CUDA. See
|
| 76 |
+
/// CUDAGuard for when you can use this.
|
| 77 |
+
struct OptionalCUDAGuard {
|
| 78 |
+
/// Create an uninitialized OptionalCUDAGuard.
|
| 79 |
+
explicit OptionalCUDAGuard() : guard_() {}
|
| 80 |
+
|
| 81 |
+
/// Set the current CUDA device to the passed Device, if it is not nullopt.
|
| 82 |
+
explicit OptionalCUDAGuard(optional<Device> device_opt)
|
| 83 |
+
: guard_(device_opt) {}
|
| 84 |
+
|
| 85 |
+
/// Set the current CUDA device to the passed device index, if it is not
|
| 86 |
+
/// nullopt
|
| 87 |
+
explicit OptionalCUDAGuard(optional<DeviceIndex> device_index_opt)
|
| 88 |
+
: guard_(device_index_opt) {}
|
| 89 |
+
|
| 90 |
+
// Copy is not allowed
|
| 91 |
+
OptionalCUDAGuard(const OptionalCUDAGuard&) = delete;
|
| 92 |
+
OptionalCUDAGuard& operator=(const OptionalCUDAGuard&) = delete;
|
| 93 |
+
|
| 94 |
+
// See Note [Move construction for RAII guards is tricky]
|
| 95 |
+
OptionalCUDAGuard(OptionalCUDAGuard&& other) = delete;
|
| 96 |
+
|
| 97 |
+
// See Note [Move assignment for RAII guards is tricky]
|
| 98 |
+
OptionalCUDAGuard& operator=(OptionalCUDAGuard&& other) = delete;
|
| 99 |
+
|
| 100 |
+
/// Sets the CUDA device to the given device, initializing the guard if it
|
| 101 |
+
/// is not already initialized. Errors if the given device is not a CUDA
|
| 102 |
+
/// device.
|
| 103 |
+
void set_device(Device device) {
|
| 104 |
+
guard_.set_device(device);
|
| 105 |
+
}
|
| 106 |
+
|
| 107 |
+
/// Sets the CUDA device to the given device, initializing the guard if it is
|
| 108 |
+
/// not already initialized. Errors if the given device is not a CUDA device.
|
| 109 |
+
/// (This method is provided for uniformity with OptionalDeviceGuard).
|
| 110 |
+
void reset_device(Device device) {
|
| 111 |
+
guard_.reset_device(device);
|
| 112 |
+
}
|
| 113 |
+
|
| 114 |
+
/// Sets the CUDA device to the given device index, initializing the guard if
|
| 115 |
+
/// it is not already initialized.
|
| 116 |
+
void set_index(DeviceIndex device_index) {
|
| 117 |
+
guard_.set_index(device_index);
|
| 118 |
+
}
|
| 119 |
+
|
| 120 |
+
/// Returns the device that was set immediately prior to initialization of the
|
| 121 |
+
/// guard, or nullopt if the guard is uninitialized.
|
| 122 |
+
optional<Device> original_device() const {
|
| 123 |
+
return guard_.original_device();
|
| 124 |
+
}
|
| 125 |
+
|
| 126 |
+
/// Returns the most recent device that was set using this device guard,
|
| 127 |
+
/// either from construction, or via set_device, if the guard is initialized,
|
| 128 |
+
/// or nullopt if the guard is uninitialized.
|
| 129 |
+
optional<Device> current_device() const {
|
| 130 |
+
return guard_.current_device();
|
| 131 |
+
}
|
| 132 |
+
|
| 133 |
+
/// Restore the original CUDA device, resetting this guard to uninitialized
|
| 134 |
+
/// state.
|
| 135 |
+
void reset() {
|
| 136 |
+
guard_.reset();
|
| 137 |
+
}
|
| 138 |
+
|
| 139 |
+
private:
|
| 140 |
+
c10::impl::InlineOptionalDeviceGuard<impl::CUDAGuardImpl> guard_;
|
| 141 |
+
};
|
| 142 |
+
|
| 143 |
+
/// A variant of StreamGuard that is specialized for CUDA. See CUDAGuard
|
| 144 |
+
/// for when you can use this.
|
| 145 |
+
struct CUDAStreamGuard {
|
| 146 |
+
/// No default constructor, see Note [Omitted default constructor from RAII]
|
| 147 |
+
explicit CUDAStreamGuard() = delete;
|
| 148 |
+
|
| 149 |
+
/// Set the current CUDA device to the device associated with the passed
|
| 150 |
+
/// stream, and set the current CUDA stream on that device to the passed
|
| 151 |
+
/// stream. Errors if the Stream is not a CUDA stream.
|
| 152 |
+
explicit CUDAStreamGuard(Stream stream) : guard_(stream) {}
|
| 153 |
+
|
| 154 |
+
/// Copy is disallowed
|
| 155 |
+
CUDAStreamGuard(const CUDAStreamGuard&) = delete;
|
| 156 |
+
CUDAStreamGuard& operator=(const CUDAStreamGuard&) = delete;
|
| 157 |
+
|
| 158 |
+
/// Move is disallowed, as CUDAStreamGuard does not have an uninitialized
|
| 159 |
+
/// state, which is required for moves on types with nontrivial destructors.
|
| 160 |
+
CUDAStreamGuard(CUDAStreamGuard&& other) = delete;
|
| 161 |
+
CUDAStreamGuard& operator=(CUDAStreamGuard&& other) = delete;
|
| 162 |
+
|
| 163 |
+
/// Resets the currently set stream to the original stream and
|
| 164 |
+
/// the currently set device to the original device. Then,
|
| 165 |
+
/// set the current device to the device associated with the passed stream,
|
| 166 |
+
/// and set the current stream on that device to the passed stream.
|
| 167 |
+
/// Errors if the stream passed is not a CUDA stream.
|
| 168 |
+
///
|
| 169 |
+
/// NOTE: this implementation may skip some stream/device setting if
|
| 170 |
+
/// it can prove that it is unnecessary.
|
| 171 |
+
///
|
| 172 |
+
/// WARNING: reset_stream does NOT preserve previously set streams on
|
| 173 |
+
/// different devices. If you need to set streams on multiple devices
|
| 174 |
+
/// on CUDA, use CUDAMultiStreamGuard instead.
|
| 175 |
+
void reset_stream(Stream stream) {
|
| 176 |
+
guard_.reset_stream(stream);
|
| 177 |
+
}
|
| 178 |
+
|
| 179 |
+
/// Returns the CUDA stream that was set at the time the guard was
|
| 180 |
+
/// constructed.
|
| 181 |
+
CUDAStream original_stream() const {
|
| 182 |
+
return CUDAStream(CUDAStream::UNCHECKED, guard_.original_stream());
|
| 183 |
+
}
|
| 184 |
+
|
| 185 |
+
/// Returns the most recent CUDA stream that was set using this device guard,
|
| 186 |
+
/// either from construction, or via set_stream.
|
| 187 |
+
CUDAStream current_stream() const {
|
| 188 |
+
return CUDAStream(CUDAStream::UNCHECKED, guard_.current_stream());
|
| 189 |
+
}
|
| 190 |
+
|
| 191 |
+
/// Returns the most recent CUDA device that was set using this device guard,
|
| 192 |
+
/// either from construction, or via set_device/reset_device/set_index.
|
| 193 |
+
Device current_device() const {
|
| 194 |
+
return guard_.current_device();
|
| 195 |
+
}
|
| 196 |
+
|
| 197 |
+
/// Returns the CUDA device that was set at the most recent reset_stream(),
|
| 198 |
+
/// or otherwise the device at construction time.
|
| 199 |
+
Device original_device() const {
|
| 200 |
+
return guard_.original_device();
|
| 201 |
+
}
|
| 202 |
+
|
| 203 |
+
private:
|
| 204 |
+
c10::impl::InlineStreamGuard<impl::CUDAGuardImpl> guard_;
|
| 205 |
+
};
|
| 206 |
+
|
| 207 |
+
/// A variant of OptionalStreamGuard that is specialized for CUDA. See
|
| 208 |
+
/// CUDAGuard for when you can use this.
|
| 209 |
+
struct OptionalCUDAStreamGuard {
|
| 210 |
+
/// Create an uninitialized guard.
|
| 211 |
+
explicit OptionalCUDAStreamGuard() : guard_() {}
|
| 212 |
+
|
| 213 |
+
/// Set the current CUDA device to the device associated with the passed
|
| 214 |
+
/// stream, and set the current CUDA stream on that device to the passed
|
| 215 |
+
/// stream. Errors if the Stream is not a CUDA stream.
|
| 216 |
+
explicit OptionalCUDAStreamGuard(Stream stream) : guard_(stream) {}
|
| 217 |
+
|
| 218 |
+
/// Set the current device to the device associated with the passed stream,
|
| 219 |
+
/// and set the current stream on that device to the passed stream,
|
| 220 |
+
/// if the passed stream is not nullopt.
|
| 221 |
+
explicit OptionalCUDAStreamGuard(optional<Stream> stream_opt)
|
| 222 |
+
: guard_(stream_opt) {}
|
| 223 |
+
|
| 224 |
+
/// Copy is disallowed
|
| 225 |
+
OptionalCUDAStreamGuard(const OptionalCUDAStreamGuard&) = delete;
|
| 226 |
+
OptionalCUDAStreamGuard& operator=(const OptionalCUDAStreamGuard&) = delete;
|
| 227 |
+
|
| 228 |
+
// See Note [Move construction for RAII guards is tricky]
|
| 229 |
+
OptionalCUDAStreamGuard(OptionalCUDAStreamGuard&& other) = delete;
|
| 230 |
+
|
| 231 |
+
// See Note [Move assignment for RAII guards is tricky]
|
| 232 |
+
OptionalCUDAStreamGuard& operator=(OptionalCUDAStreamGuard&& other) = delete;
|
| 233 |
+
|
| 234 |
+
/// Resets the currently set CUDA stream to the original stream and
|
| 235 |
+
/// the currently set device to the original device. Then,
|
| 236 |
+
/// set the current device to the device associated with the passed stream,
|
| 237 |
+
/// and set the current stream on that device to the passed stream.
|
| 238 |
+
/// Initializes the guard if it was not previously initialized.
|
| 239 |
+
void reset_stream(Stream stream) {
|
| 240 |
+
guard_.reset_stream(stream);
|
| 241 |
+
}
|
| 242 |
+
|
| 243 |
+
/// Returns the CUDA stream that was set at the time the guard was most
|
| 244 |
+
/// recently initialized, or nullopt if the guard is uninitialized.
|
| 245 |
+
optional<CUDAStream> original_stream() const {
|
| 246 |
+
auto r = guard_.original_stream();
|
| 247 |
+
if (r.has_value()) {
|
| 248 |
+
return make_optional(CUDAStream(CUDAStream::UNCHECKED, r.value()));
|
| 249 |
+
} else {
|
| 250 |
+
return nullopt;
|
| 251 |
+
}
|
| 252 |
+
}
|
| 253 |
+
|
| 254 |
+
/// Returns the most recent CUDA stream that was set using this stream guard,
|
| 255 |
+
/// either from construction, or via reset_stream, if the guard is
|
| 256 |
+
/// initialized, or nullopt if the guard is uninitialized.
|
| 257 |
+
optional<CUDAStream> current_stream() const {
|
| 258 |
+
auto r = guard_.current_stream();
|
| 259 |
+
if (r.has_value()) {
|
| 260 |
+
return make_optional(CUDAStream(CUDAStream::UNCHECKED, r.value()));
|
| 261 |
+
} else {
|
| 262 |
+
return nullopt;
|
| 263 |
+
}
|
| 264 |
+
}
|
| 265 |
+
|
| 266 |
+
/// Restore the original CUDA device and stream, resetting this guard to
|
| 267 |
+
/// uninitialized state.
|
| 268 |
+
void reset() {
|
| 269 |
+
guard_.reset();
|
| 270 |
+
}
|
| 271 |
+
|
| 272 |
+
private:
|
| 273 |
+
c10::impl::InlineOptionalStreamGuard<impl::CUDAGuardImpl> guard_;
|
| 274 |
+
};
|
| 275 |
+
|
| 276 |
+
/// A variant of MultiStreamGuard that is specialized for CUDA.
|
| 277 |
+
struct CUDAMultiStreamGuard {
|
| 278 |
+
explicit CUDAMultiStreamGuard(ArrayRef<CUDAStream> streams)
|
| 279 |
+
: guard_(unwrapStreams(streams)) {}
|
| 280 |
+
|
| 281 |
+
/// Copy is disallowed
|
| 282 |
+
CUDAMultiStreamGuard(const CUDAMultiStreamGuard&) = delete;
|
| 283 |
+
CUDAMultiStreamGuard& operator=(const CUDAMultiStreamGuard&) = delete;
|
| 284 |
+
|
| 285 |
+
// See Note [Move construction for RAII guards is tricky]
|
| 286 |
+
CUDAMultiStreamGuard(CUDAMultiStreamGuard&& other) = delete;
|
| 287 |
+
|
| 288 |
+
// See Note [Move assignment for RAII guards is tricky]
|
| 289 |
+
CUDAMultiStreamGuard& operator=(CUDAMultiStreamGuard&& other) = delete;
|
| 290 |
+
|
| 291 |
+
private:
|
| 292 |
+
c10::impl::InlineMultiStreamGuard<impl::CUDAGuardImpl> guard_;
|
| 293 |
+
|
| 294 |
+
static std::vector<Stream> unwrapStreams(ArrayRef<CUDAStream> cudaStreams) {
|
| 295 |
+
std::vector<Stream> streams;
|
| 296 |
+
streams.reserve(cudaStreams.size());
|
| 297 |
+
for (const CUDAStream& cudaStream : cudaStreams) {
|
| 298 |
+
streams.push_back(cudaStream);
|
| 299 |
+
}
|
| 300 |
+
return streams;
|
| 301 |
+
}
|
| 302 |
+
};
|
| 303 |
+
|
| 304 |
+
} // namespace cuda
|
| 305 |
+
} // namespace c10
|
videollama2/lib/python3.10/site-packages/torch/include/c10/cuda/CUDAMacros.h
ADDED
|
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#ifndef C10_USING_CUSTOM_GENERATED_MACROS
|
| 4 |
+
|
| 5 |
+
// We have not yet modified the AMD HIP build to generate this file so
|
| 6 |
+
// we add an extra option to specifically ignore it.
|
| 7 |
+
#ifndef C10_CUDA_NO_CMAKE_CONFIGURE_FILE
|
| 8 |
+
#include <c10/cuda/impl/cuda_cmake_macros.h>
|
| 9 |
+
#endif // C10_CUDA_NO_CMAKE_CONFIGURE_FILE
|
| 10 |
+
|
| 11 |
+
#endif
|
| 12 |
+
|
| 13 |
+
// See c10/macros/Export.h for a detailed explanation of what the function
|
| 14 |
+
// of these macros are. We need one set of macros for every separate library
|
| 15 |
+
// we build.
|
| 16 |
+
|
| 17 |
+
#ifdef _WIN32
|
| 18 |
+
#if defined(C10_CUDA_BUILD_SHARED_LIBS)
|
| 19 |
+
#define C10_CUDA_EXPORT __declspec(dllexport)
|
| 20 |
+
#define C10_CUDA_IMPORT __declspec(dllimport)
|
| 21 |
+
#else
|
| 22 |
+
#define C10_CUDA_EXPORT
|
| 23 |
+
#define C10_CUDA_IMPORT
|
| 24 |
+
#endif
|
| 25 |
+
#else // _WIN32
|
| 26 |
+
#if defined(__GNUC__)
|
| 27 |
+
#define C10_CUDA_EXPORT __attribute__((__visibility__("default")))
|
| 28 |
+
#else // defined(__GNUC__)
|
| 29 |
+
#define C10_CUDA_EXPORT
|
| 30 |
+
#endif // defined(__GNUC__)
|
| 31 |
+
#define C10_CUDA_IMPORT C10_CUDA_EXPORT
|
| 32 |
+
#endif // _WIN32
|
| 33 |
+
|
| 34 |
+
// This one is being used by libc10_cuda.so
|
| 35 |
+
#ifdef C10_CUDA_BUILD_MAIN_LIB
|
| 36 |
+
#define C10_CUDA_API C10_CUDA_EXPORT
|
| 37 |
+
#else
|
| 38 |
+
#define C10_CUDA_API C10_CUDA_IMPORT
|
| 39 |
+
#endif
|
| 40 |
+
|
| 41 |
+
/**
|
| 42 |
+
* The maximum number of GPUs that we recognizes.
|
| 43 |
+
*/
|
| 44 |
+
#define C10_COMPILE_TIME_MAX_GPUS 16
|
videollama2/lib/python3.10/site-packages/torch/include/c10/cuda/CUDAMathCompat.h
ADDED
|
@@ -0,0 +1,156 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
/* This file defines math functions compatible across different gpu
|
| 4 |
+
* platforms (currently CUDA and HIP).
|
| 5 |
+
*/
|
| 6 |
+
#if defined(__CUDACC__) || defined(__HIPCC__)
|
| 7 |
+
|
| 8 |
+
#include <c10/macros/Macros.h>
|
| 9 |
+
#include <c10/util/Exception.h>
|
| 10 |
+
|
| 11 |
+
#ifdef __HIPCC__
|
| 12 |
+
#define __MATH_FUNCTIONS_DECL__ inline C10_DEVICE
|
| 13 |
+
#else /* __HIPCC__ */
|
| 14 |
+
#ifdef __CUDACC_RTC__
|
| 15 |
+
#define __MATH_FUNCTIONS_DECL__ C10_HOST_DEVICE
|
| 16 |
+
#else /* __CUDACC_RTC__ */
|
| 17 |
+
#define __MATH_FUNCTIONS_DECL__ static inline C10_HOST_DEVICE
|
| 18 |
+
#endif /* __CUDACC_RTC__ */
|
| 19 |
+
#endif /* __HIPCC__ */
|
| 20 |
+
|
| 21 |
+
namespace c10 {
|
| 22 |
+
namespace cuda {
|
| 23 |
+
namespace compat {
|
| 24 |
+
|
| 25 |
+
__MATH_FUNCTIONS_DECL__ float abs(float x) {
|
| 26 |
+
return ::fabsf(x);
|
| 27 |
+
}
|
| 28 |
+
__MATH_FUNCTIONS_DECL__ double abs(double x) {
|
| 29 |
+
return ::fabs(x);
|
| 30 |
+
}
|
| 31 |
+
|
| 32 |
+
__MATH_FUNCTIONS_DECL__ float exp(float x) {
|
| 33 |
+
return ::expf(x);
|
| 34 |
+
}
|
| 35 |
+
__MATH_FUNCTIONS_DECL__ double exp(double x) {
|
| 36 |
+
return ::exp(x);
|
| 37 |
+
}
|
| 38 |
+
|
| 39 |
+
__MATH_FUNCTIONS_DECL__ float ceil(float x) {
|
| 40 |
+
return ::ceilf(x);
|
| 41 |
+
}
|
| 42 |
+
__MATH_FUNCTIONS_DECL__ double ceil(double x) {
|
| 43 |
+
return ::ceil(x);
|
| 44 |
+
}
|
| 45 |
+
|
| 46 |
+
__MATH_FUNCTIONS_DECL__ float copysign(float x, float y) {
|
| 47 |
+
#if defined(__CUDA_ARCH__) || defined(__HIPCC__)
|
| 48 |
+
return ::copysignf(x, y);
|
| 49 |
+
#else
|
| 50 |
+
// std::copysign gets ICE/Segfaults with gcc 7.5/8 on arm64
|
| 51 |
+
// (e.g. Jetson), see PyTorch PR #51834
|
| 52 |
+
// This host function needs to be here for the compiler but is never used
|
| 53 |
+
TORCH_INTERNAL_ASSERT(
|
| 54 |
+
false, "CUDAMathCompat copysign should not run on the CPU");
|
| 55 |
+
#endif
|
| 56 |
+
}
|
| 57 |
+
__MATH_FUNCTIONS_DECL__ double copysign(double x, double y) {
|
| 58 |
+
#if defined(__CUDA_ARCH__) || defined(__HIPCC__)
|
| 59 |
+
return ::copysign(x, y);
|
| 60 |
+
#else
|
| 61 |
+
// see above
|
| 62 |
+
TORCH_INTERNAL_ASSERT(
|
| 63 |
+
false, "CUDAMathCompat copysign should not run on the CPU");
|
| 64 |
+
#endif
|
| 65 |
+
}
|
| 66 |
+
|
| 67 |
+
__MATH_FUNCTIONS_DECL__ float floor(float x) {
|
| 68 |
+
return ::floorf(x);
|
| 69 |
+
}
|
| 70 |
+
__MATH_FUNCTIONS_DECL__ double floor(double x) {
|
| 71 |
+
return ::floor(x);
|
| 72 |
+
}
|
| 73 |
+
|
| 74 |
+
__MATH_FUNCTIONS_DECL__ float log(float x) {
|
| 75 |
+
return ::logf(x);
|
| 76 |
+
}
|
| 77 |
+
__MATH_FUNCTIONS_DECL__ double log(double x) {
|
| 78 |
+
return ::log(x);
|
| 79 |
+
}
|
| 80 |
+
|
| 81 |
+
__MATH_FUNCTIONS_DECL__ float log1p(float x) {
|
| 82 |
+
return ::log1pf(x);
|
| 83 |
+
}
|
| 84 |
+
|
| 85 |
+
__MATH_FUNCTIONS_DECL__ double log1p(double x) {
|
| 86 |
+
return ::log1p(x);
|
| 87 |
+
}
|
| 88 |
+
|
| 89 |
+
__MATH_FUNCTIONS_DECL__ float max(float x, float y) {
|
| 90 |
+
return ::fmaxf(x, y);
|
| 91 |
+
}
|
| 92 |
+
__MATH_FUNCTIONS_DECL__ double max(double x, double y) {
|
| 93 |
+
return ::fmax(x, y);
|
| 94 |
+
}
|
| 95 |
+
|
| 96 |
+
__MATH_FUNCTIONS_DECL__ float min(float x, float y) {
|
| 97 |
+
return ::fminf(x, y);
|
| 98 |
+
}
|
| 99 |
+
__MATH_FUNCTIONS_DECL__ double min(double x, double y) {
|
| 100 |
+
return ::fmin(x, y);
|
| 101 |
+
}
|
| 102 |
+
|
| 103 |
+
__MATH_FUNCTIONS_DECL__ float pow(float x, float y) {
|
| 104 |
+
return ::powf(x, y);
|
| 105 |
+
}
|
| 106 |
+
__MATH_FUNCTIONS_DECL__ double pow(double x, double y) {
|
| 107 |
+
return ::pow(x, y);
|
| 108 |
+
}
|
| 109 |
+
|
| 110 |
+
__MATH_FUNCTIONS_DECL__ void sincos(float x, float* sptr, float* cptr) {
|
| 111 |
+
return ::sincosf(x, sptr, cptr);
|
| 112 |
+
}
|
| 113 |
+
__MATH_FUNCTIONS_DECL__ void sincos(double x, double* sptr, double* cptr) {
|
| 114 |
+
return ::sincos(x, sptr, cptr);
|
| 115 |
+
}
|
| 116 |
+
|
| 117 |
+
__MATH_FUNCTIONS_DECL__ float sqrt(float x) {
|
| 118 |
+
return ::sqrtf(x);
|
| 119 |
+
}
|
| 120 |
+
__MATH_FUNCTIONS_DECL__ double sqrt(double x) {
|
| 121 |
+
return ::sqrt(x);
|
| 122 |
+
}
|
| 123 |
+
|
| 124 |
+
__MATH_FUNCTIONS_DECL__ float rsqrt(float x) {
|
| 125 |
+
return ::rsqrtf(x);
|
| 126 |
+
}
|
| 127 |
+
__MATH_FUNCTIONS_DECL__ double rsqrt(double x) {
|
| 128 |
+
return ::rsqrt(x);
|
| 129 |
+
}
|
| 130 |
+
|
| 131 |
+
__MATH_FUNCTIONS_DECL__ float tan(float x) {
|
| 132 |
+
return ::tanf(x);
|
| 133 |
+
}
|
| 134 |
+
__MATH_FUNCTIONS_DECL__ double tan(double x) {
|
| 135 |
+
return ::tan(x);
|
| 136 |
+
}
|
| 137 |
+
|
| 138 |
+
__MATH_FUNCTIONS_DECL__ float tanh(float x) {
|
| 139 |
+
return ::tanhf(x);
|
| 140 |
+
}
|
| 141 |
+
__MATH_FUNCTIONS_DECL__ double tanh(double x) {
|
| 142 |
+
return ::tanh(x);
|
| 143 |
+
}
|
| 144 |
+
|
| 145 |
+
__MATH_FUNCTIONS_DECL__ float normcdf(float x) {
|
| 146 |
+
return ::normcdff(x);
|
| 147 |
+
}
|
| 148 |
+
__MATH_FUNCTIONS_DECL__ double normcdf(double x) {
|
| 149 |
+
return ::normcdf(x);
|
| 150 |
+
}
|
| 151 |
+
|
| 152 |
+
} // namespace compat
|
| 153 |
+
} // namespace cuda
|
| 154 |
+
} // namespace c10
|
| 155 |
+
|
| 156 |
+
#endif
|
videollama2/lib/python3.10/site-packages/torch/include/c10/cuda/CUDAMiscFunctions.h
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// this file is to avoid circular dependency between CUDAFunctions.h and
|
| 3 |
+
// CUDAExceptions.h
|
| 4 |
+
|
| 5 |
+
#include <c10/cuda/CUDAMacros.h>
|
| 6 |
+
|
| 7 |
+
#include <mutex>
|
| 8 |
+
|
| 9 |
+
namespace c10 {
|
| 10 |
+
namespace cuda {
|
| 11 |
+
C10_CUDA_API const char* get_cuda_check_suffix() noexcept;
|
| 12 |
+
C10_CUDA_API std::mutex* getFreeMutex();
|
| 13 |
+
} // namespace cuda
|
| 14 |
+
} // namespace c10
|
videollama2/lib/python3.10/site-packages/torch/include/c10/cuda/CUDAStream.h
ADDED
|
@@ -0,0 +1,273 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <cstdint>
|
| 4 |
+
#include <utility>
|
| 5 |
+
|
| 6 |
+
#include <cuda_runtime_api.h>
|
| 7 |
+
|
| 8 |
+
#include <c10/core/DeviceGuard.h>
|
| 9 |
+
#include <c10/core/Stream.h>
|
| 10 |
+
#include <c10/cuda/CUDAFunctions.h>
|
| 11 |
+
#include <c10/util/Exception.h>
|
| 12 |
+
|
| 13 |
+
/*
|
| 14 |
+
* Stream pool note.
|
| 15 |
+
*
|
| 16 |
+
* A CUDAStream is an abstraction of an actual cuStream on the GPU. CUDAStreams
|
| 17 |
+
* are backed by cuStreams, but they use several pools to minimize the costs
|
| 18 |
+
* associated with creating, retaining, and destroying cuStreams.
|
| 19 |
+
*
|
| 20 |
+
* There are three pools per device, and a device's pools are lazily created.
|
| 21 |
+
*
|
| 22 |
+
* The first pool contains only the default stream. When the default stream
|
| 23 |
+
* is requested it's returned.
|
| 24 |
+
*
|
| 25 |
+
* The second pool is the "low priority" or "default priority" streams. In
|
| 26 |
+
* HIP builds there is no distinction between streams in this pool and streams
|
| 27 |
+
* in the third pool (below). There are 32 of these streams per device, and
|
| 28 |
+
* when a stream is requested one of these streams is returned round-robin.
|
| 29 |
+
* That is, the first stream requested is at index 0, the second at index 1...
|
| 30 |
+
* to index 31, then index 0 again.
|
| 31 |
+
*
|
| 32 |
+
* This means that if 33 low priority streams are requested, the first and
|
| 33 |
+
* last streams requested are actually the same stream (under the covers)
|
| 34 |
+
* and kernels enqueued on them cannot run concurrently.
|
| 35 |
+
*
|
| 36 |
+
* The third pool is the "high priority" streams. The third pool acts like
|
| 37 |
+
* the second pool except the streams are created with a higher priority.
|
| 38 |
+
*
|
| 39 |
+
* These pools suggest that stream users should prefer many short-lived streams,
|
| 40 |
+
* as the cost of acquiring and releasing streams is effectively zero. If
|
| 41 |
+
* many longer-lived streams are required in performance critical scenarios
|
| 42 |
+
* then the functionality here may need to be extended to allow, for example,
|
| 43 |
+
* "reserving" a subset of the pool so that other streams do not accidentally
|
| 44 |
+
* overlap the performance critical streams.
|
| 45 |
+
*
|
| 46 |
+
* Note: although the notion of "current stream for device" is thread local
|
| 47 |
+
* (every OS thread has a separate current stream, as one might expect),
|
| 48 |
+
* the stream pool is global across all threads; stream 0 is always stream 0
|
| 49 |
+
* no matter which thread you use it on. Multiple threads can synchronize
|
| 50 |
+
* on the same stream. Although the CUDA documentation is not very clear
|
| 51 |
+
* on the matter, streams are thread safe; e.g., it is safe to enqueue
|
| 52 |
+
* a kernel on the same stream from two different threads.
|
| 53 |
+
*/
|
| 54 |
+
|
| 55 |
+
namespace c10 {
|
| 56 |
+
namespace cuda {
|
| 57 |
+
|
| 58 |
+
static constexpr int max_compile_time_stream_priorities = 4;
|
| 59 |
+
|
| 60 |
+
// Value object representing a CUDA stream. This is just a wrapper
|
| 61 |
+
// around c10::Stream, but it comes with a little extra CUDA-specific
|
| 62 |
+
// functionality (conversion to cudaStream_t), and a guarantee that
|
| 63 |
+
// the wrapped c10::Stream really is a CUDA stream.
|
| 64 |
+
class C10_CUDA_API CUDAStream {
|
| 65 |
+
public:
|
| 66 |
+
enum Unchecked { UNCHECKED };
|
| 67 |
+
|
| 68 |
+
/// Construct a CUDAStream from a Stream. This construction is checked,
|
| 69 |
+
/// and will raise an error if the Stream is not, in fact, a CUDA stream.
|
| 70 |
+
explicit CUDAStream(Stream stream) : stream_(stream) {
|
| 71 |
+
TORCH_CHECK(stream_.device_type() == DeviceType::CUDA);
|
| 72 |
+
}
|
| 73 |
+
|
| 74 |
+
/// Construct a CUDAStream from a Stream with no error checking.
|
| 75 |
+
/// This constructor uses the "named" constructor idiom, and can
|
| 76 |
+
/// be invoked as: CUDAStream(CUDAStream::UNCHECKED, stream)
|
| 77 |
+
explicit CUDAStream(Unchecked, Stream stream) : stream_(stream) {}
|
| 78 |
+
|
| 79 |
+
bool operator==(const CUDAStream& other) const noexcept {
|
| 80 |
+
return unwrap() == other.unwrap();
|
| 81 |
+
}
|
| 82 |
+
|
| 83 |
+
bool operator!=(const CUDAStream& other) const noexcept {
|
| 84 |
+
return unwrap() != other.unwrap();
|
| 85 |
+
}
|
| 86 |
+
|
| 87 |
+
/// Implicit conversion to cudaStream_t.
|
| 88 |
+
operator cudaStream_t() const {
|
| 89 |
+
return stream();
|
| 90 |
+
}
|
| 91 |
+
|
| 92 |
+
/// Implicit conversion to Stream (a.k.a., forget that the stream is a
|
| 93 |
+
/// CUDA stream).
|
| 94 |
+
operator Stream() const {
|
| 95 |
+
return unwrap();
|
| 96 |
+
}
|
| 97 |
+
|
| 98 |
+
/// Used to avoid baking in device type explicitly to Python-side API.
|
| 99 |
+
DeviceType device_type() const {
|
| 100 |
+
return DeviceType::CUDA;
|
| 101 |
+
}
|
| 102 |
+
|
| 103 |
+
/// Get the CUDA device index that this stream is associated with.
|
| 104 |
+
DeviceIndex device_index() const {
|
| 105 |
+
return stream_.device_index();
|
| 106 |
+
}
|
| 107 |
+
|
| 108 |
+
/// Get the full Device that this stream is associated with. The Device
|
| 109 |
+
/// is guaranteed to be a CUDA device.
|
| 110 |
+
Device device() const {
|
| 111 |
+
return Device(DeviceType::CUDA, device_index());
|
| 112 |
+
}
|
| 113 |
+
|
| 114 |
+
/// Return the stream ID corresponding to this particular stream.
|
| 115 |
+
StreamId id() const {
|
| 116 |
+
return stream_.id();
|
| 117 |
+
}
|
| 118 |
+
|
| 119 |
+
bool query() const {
|
| 120 |
+
DeviceGuard guard{stream_.device()};
|
| 121 |
+
cudaError_t err = C10_CUDA_ERROR_HANDLED(cudaStreamQuery(stream()));
|
| 122 |
+
|
| 123 |
+
if (err == cudaSuccess) {
|
| 124 |
+
return true;
|
| 125 |
+
} else if (err != cudaErrorNotReady) {
|
| 126 |
+
C10_CUDA_CHECK(err);
|
| 127 |
+
} else {
|
| 128 |
+
// ignore and clear the error if not ready
|
| 129 |
+
(void)cudaGetLastError();
|
| 130 |
+
}
|
| 131 |
+
|
| 132 |
+
return false;
|
| 133 |
+
}
|
| 134 |
+
|
| 135 |
+
void synchronize() const {
|
| 136 |
+
DeviceGuard guard{stream_.device()};
|
| 137 |
+
c10::cuda::stream_synchronize(stream());
|
| 138 |
+
}
|
| 139 |
+
|
| 140 |
+
int priority() const {
|
| 141 |
+
DeviceGuard guard{stream_.device()};
|
| 142 |
+
int priority = 0;
|
| 143 |
+
C10_CUDA_CHECK(cudaStreamGetPriority(stream(), &priority));
|
| 144 |
+
return priority;
|
| 145 |
+
}
|
| 146 |
+
|
| 147 |
+
/// Explicit conversion to cudaStream_t.
|
| 148 |
+
cudaStream_t stream() const;
|
| 149 |
+
|
| 150 |
+
/// Explicit conversion to Stream.
|
| 151 |
+
Stream unwrap() const {
|
| 152 |
+
return stream_;
|
| 153 |
+
}
|
| 154 |
+
|
| 155 |
+
/// Reversibly pack a CUDAStream into a struct representation.
|
| 156 |
+
/// Previously the stream's data was packed into a single int64_t,
|
| 157 |
+
/// as it was assumed the fields would not require more than
|
| 158 |
+
/// 64 bits of storage in total.
|
| 159 |
+
/// See https://github.com/pytorch/pytorch/issues/75854
|
| 160 |
+
/// for more information regarding newer platforms that may violate
|
| 161 |
+
/// this assumption.
|
| 162 |
+
///
|
| 163 |
+
/// The CUDAStream can be unpacked using unpack().
|
| 164 |
+
struct c10::StreamData3 pack3() const {
|
| 165 |
+
return stream_.pack3();
|
| 166 |
+
}
|
| 167 |
+
|
| 168 |
+
// Unpack a CUDAStream from the 3 fields generated by pack().
|
| 169 |
+
static CUDAStream unpack3(
|
| 170 |
+
StreamId stream_id,
|
| 171 |
+
DeviceIndex device_index,
|
| 172 |
+
DeviceType device_type) {
|
| 173 |
+
return CUDAStream(Stream::unpack3(stream_id, device_index, device_type));
|
| 174 |
+
}
|
| 175 |
+
|
| 176 |
+
static std::tuple<int, int> priority_range() {
|
| 177 |
+
// Note: this returns the range of priority **supported by PyTorch**, not
|
| 178 |
+
// the range of priority **supported by CUDA**. The former is a subset of
|
| 179 |
+
// the latter.
|
| 180 |
+
int least_priority = 0, greatest_priority = 0;
|
| 181 |
+
C10_CUDA_CHECK(
|
| 182 |
+
cudaDeviceGetStreamPriorityRange(&least_priority, &greatest_priority));
|
| 183 |
+
#ifdef USE_ROCM
|
| 184 |
+
// See Note [HIP stream priorities]
|
| 185 |
+
TORCH_INTERNAL_ASSERT(
|
| 186 |
+
least_priority == 1, "Unexpected HIP stream priority range");
|
| 187 |
+
least_priority = 0;
|
| 188 |
+
#else
|
| 189 |
+
TORCH_INTERNAL_ASSERT(
|
| 190 |
+
least_priority == 0, "Unexpected CUDA stream priority range");
|
| 191 |
+
#endif
|
| 192 |
+
TORCH_INTERNAL_ASSERT(
|
| 193 |
+
greatest_priority <= -1, "Unexpected CUDA stream priority range");
|
| 194 |
+
greatest_priority = std::max(
|
| 195 |
+
-c10::cuda::max_compile_time_stream_priorities + 1, greatest_priority);
|
| 196 |
+
return std::make_tuple(least_priority, greatest_priority);
|
| 197 |
+
}
|
| 198 |
+
|
| 199 |
+
// Deleted for now; use CUDAEvent::block instead
|
| 200 |
+
// void synchronize_with(const CUDAEvent& event) const;
|
| 201 |
+
|
| 202 |
+
private:
|
| 203 |
+
Stream stream_;
|
| 204 |
+
};
|
| 205 |
+
|
| 206 |
+
/**
|
| 207 |
+
* Get a new stream from the CUDA stream pool. You can think of this
|
| 208 |
+
* as "creating" a new stream, but no such creation actually happens;
|
| 209 |
+
* instead, streams are preallocated from the pool and returned in a
|
| 210 |
+
* round-robin fashion.
|
| 211 |
+
*
|
| 212 |
+
* You can request a stream from the high priority pool by setting
|
| 213 |
+
* isHighPriority to true, or a stream for a specific device by setting device
|
| 214 |
+
* (defaulting to the current CUDA stream.)
|
| 215 |
+
*/
|
| 216 |
+
C10_API CUDAStream
|
| 217 |
+
getStreamFromPool(const bool isHighPriority = false, DeviceIndex device = -1);
|
| 218 |
+
// no default priority to disambiguate overloads
|
| 219 |
+
C10_API CUDAStream
|
| 220 |
+
getStreamFromPool(const int priority, DeviceIndex device = -1);
|
| 221 |
+
|
| 222 |
+
/**
|
| 223 |
+
* Get a CUDAStream from a externally allocated one.
|
| 224 |
+
*
|
| 225 |
+
* This is mainly for interoperability with different libraries where we
|
| 226 |
+
* want to operate on a non-torch allocated stream for data exchange or similar
|
| 227 |
+
* purposes
|
| 228 |
+
*/
|
| 229 |
+
C10_API CUDAStream
|
| 230 |
+
getStreamFromExternal(cudaStream_t ext_stream, DeviceIndex device_index);
|
| 231 |
+
|
| 232 |
+
/**
|
| 233 |
+
* Get the default CUDA stream, for the passed CUDA device, or for the
|
| 234 |
+
* current device if no device index is passed. The default stream is
|
| 235 |
+
* where most computation occurs when you aren't explicitly using
|
| 236 |
+
* streams.
|
| 237 |
+
*/
|
| 238 |
+
C10_API CUDAStream getDefaultCUDAStream(DeviceIndex device_index = -1);
|
| 239 |
+
|
| 240 |
+
/**
|
| 241 |
+
* Get the current CUDA stream, for the passed CUDA device, or for the
|
| 242 |
+
* current device if no device index is passed. The current CUDA stream
|
| 243 |
+
* will usually be the default CUDA stream for the device, but it may
|
| 244 |
+
* be different if someone called 'setCurrentCUDAStream' or used 'StreamGuard'
|
| 245 |
+
* or 'CUDAStreamGuard'.
|
| 246 |
+
*/
|
| 247 |
+
C10_API CUDAStream getCurrentCUDAStream(DeviceIndex device_index = -1);
|
| 248 |
+
|
| 249 |
+
/**
|
| 250 |
+
* Set the current stream on the device of the passed in stream to be
|
| 251 |
+
* the passed in stream. Yes, you read that right: this function
|
| 252 |
+
* has *nothing* to do with the current device: it toggles the current
|
| 253 |
+
* stream of the device of the passed stream.
|
| 254 |
+
*
|
| 255 |
+
* Confused? Avoid using this function; prefer using 'CUDAStreamGuard' instead
|
| 256 |
+
* (which will switch both your current device and current stream in the way you
|
| 257 |
+
* expect, and reset it back to its original state afterwards).
|
| 258 |
+
*/
|
| 259 |
+
C10_API void setCurrentCUDAStream(CUDAStream stream);
|
| 260 |
+
|
| 261 |
+
C10_API std::ostream& operator<<(std::ostream& stream, const CUDAStream& s);
|
| 262 |
+
|
| 263 |
+
} // namespace cuda
|
| 264 |
+
} // namespace c10
|
| 265 |
+
|
| 266 |
+
namespace std {
|
| 267 |
+
template <>
|
| 268 |
+
struct hash<c10::cuda::CUDAStream> {
|
| 269 |
+
size_t operator()(c10::cuda::CUDAStream s) const noexcept {
|
| 270 |
+
return std::hash<c10::Stream>{}(s.unwrap());
|
| 271 |
+
}
|
| 272 |
+
};
|
| 273 |
+
} // namespace std
|
videollama2/lib/python3.10/site-packages/torch/include/c10/cuda/driver_api.h
ADDED
|
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
#include <cuda.h>
|
| 3 |
+
#define NVML_NO_UNVERSIONED_FUNC_DEFS
|
| 4 |
+
#include <nvml.h>
|
| 5 |
+
|
| 6 |
+
#define C10_CUDA_DRIVER_CHECK(EXPR) \
|
| 7 |
+
do { \
|
| 8 |
+
CUresult __err = EXPR; \
|
| 9 |
+
if (__err != CUDA_SUCCESS) { \
|
| 10 |
+
const char* err_str; \
|
| 11 |
+
CUresult get_error_str_err C10_UNUSED = \
|
| 12 |
+
c10::cuda::DriverAPI::get()->cuGetErrorString_(__err, &err_str); \
|
| 13 |
+
if (get_error_str_err != CUDA_SUCCESS) { \
|
| 14 |
+
AT_ERROR("CUDA driver error: unknown error"); \
|
| 15 |
+
} else { \
|
| 16 |
+
AT_ERROR("CUDA driver error: ", err_str); \
|
| 17 |
+
} \
|
| 18 |
+
} \
|
| 19 |
+
} while (0)
|
| 20 |
+
|
| 21 |
+
#define C10_LIBCUDA_DRIVER_API(_) \
|
| 22 |
+
_(cuMemAddressReserve) \
|
| 23 |
+
_(cuMemRelease) \
|
| 24 |
+
_(cuMemMap) \
|
| 25 |
+
_(cuMemAddressFree) \
|
| 26 |
+
_(cuMemSetAccess) \
|
| 27 |
+
_(cuMemUnmap) \
|
| 28 |
+
_(cuMemCreate) \
|
| 29 |
+
_(cuGetErrorString)
|
| 30 |
+
|
| 31 |
+
#define C10_NVML_DRIVER_API(_) \
|
| 32 |
+
_(nvmlInit_v2) \
|
| 33 |
+
_(nvmlDeviceGetHandleByPciBusId_v2) \
|
| 34 |
+
_(nvmlDeviceGetComputeRunningProcesses)
|
| 35 |
+
|
| 36 |
+
namespace c10 {
|
| 37 |
+
namespace cuda {
|
| 38 |
+
|
| 39 |
+
struct DriverAPI {
|
| 40 |
+
#define CREATE_MEMBER(name) decltype(&name) name##_;
|
| 41 |
+
C10_LIBCUDA_DRIVER_API(CREATE_MEMBER)
|
| 42 |
+
C10_NVML_DRIVER_API(CREATE_MEMBER)
|
| 43 |
+
#undef CREATE_MEMBER
|
| 44 |
+
static DriverAPI* get();
|
| 45 |
+
static void* get_nvml_handle();
|
| 46 |
+
};
|
| 47 |
+
|
| 48 |
+
} // namespace cuda
|
| 49 |
+
} // namespace c10
|
videollama2/lib/python3.10/site-packages/torch/include/c10/cuda/impl/CUDAGuardImpl.h
ADDED
|
@@ -0,0 +1,211 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <c10/core/DeviceGuard.h>
|
| 4 |
+
#include <c10/core/impl/DeviceGuardImplInterface.h>
|
| 5 |
+
#include <c10/core/impl/GPUTrace.h>
|
| 6 |
+
#include <c10/macros/Macros.h>
|
| 7 |
+
#include <c10/util/Exception.h>
|
| 8 |
+
|
| 9 |
+
#include <c10/cuda/CUDACachingAllocator.h>
|
| 10 |
+
#include <c10/cuda/CUDAException.h>
|
| 11 |
+
#include <c10/cuda/CUDAFunctions.h>
|
| 12 |
+
#include <c10/cuda/CUDAStream.h>
|
| 13 |
+
|
| 14 |
+
#include <cuda_runtime_api.h>
|
| 15 |
+
|
| 16 |
+
namespace c10 {
|
| 17 |
+
namespace cuda {
|
| 18 |
+
namespace impl {
|
| 19 |
+
|
| 20 |
+
struct CUDAGuardImpl final : public c10::impl::DeviceGuardImplInterface {
|
| 21 |
+
static constexpr DeviceType static_type = DeviceType::CUDA;
|
| 22 |
+
|
| 23 |
+
CUDAGuardImpl() = default;
|
| 24 |
+
explicit CUDAGuardImpl(DeviceType t) {
|
| 25 |
+
TORCH_INTERNAL_ASSERT(t == DeviceType::CUDA);
|
| 26 |
+
}
|
| 27 |
+
DeviceType type() const override {
|
| 28 |
+
return DeviceType::CUDA;
|
| 29 |
+
}
|
| 30 |
+
Device exchangeDevice(Device d) const override {
|
| 31 |
+
TORCH_INTERNAL_ASSERT(d.is_cuda());
|
| 32 |
+
int old_device_index = c10::cuda::ExchangeDevice(d.index());
|
| 33 |
+
return Device(DeviceType::CUDA, old_device_index);
|
| 34 |
+
}
|
| 35 |
+
Device getDevice() const override {
|
| 36 |
+
int device;
|
| 37 |
+
C10_CUDA_CHECK(c10::cuda::GetDevice(&device));
|
| 38 |
+
return Device(DeviceType::CUDA, device);
|
| 39 |
+
}
|
| 40 |
+
c10::optional<Device> uncheckedGetDevice() const noexcept {
|
| 41 |
+
int device;
|
| 42 |
+
const auto err = C10_CUDA_ERROR_HANDLED(c10::cuda::GetDevice(&device));
|
| 43 |
+
C10_CUDA_CHECK_WARN(err);
|
| 44 |
+
if (err != cudaSuccess) {
|
| 45 |
+
return c10::nullopt;
|
| 46 |
+
}
|
| 47 |
+
return Device(DeviceType::CUDA, device);
|
| 48 |
+
}
|
| 49 |
+
void setDevice(Device d) const override {
|
| 50 |
+
TORCH_INTERNAL_ASSERT(d.is_cuda());
|
| 51 |
+
C10_CUDA_CHECK(c10::cuda::SetDevice(d.index()));
|
| 52 |
+
}
|
| 53 |
+
void uncheckedSetDevice(Device d) const noexcept override {
|
| 54 |
+
C10_CUDA_CHECK_WARN(c10::cuda::MaybeSetDevice(d.index()));
|
| 55 |
+
}
|
| 56 |
+
Stream getStream(Device d) const noexcept override {
|
| 57 |
+
return getCurrentCUDAStream(d.index()).unwrap();
|
| 58 |
+
}
|
| 59 |
+
Stream getDefaultStream(Device d) const override {
|
| 60 |
+
return getDefaultCUDAStream(d.index());
|
| 61 |
+
}
|
| 62 |
+
Stream getStreamFromGlobalPool(Device d, bool isHighPriority = false)
|
| 63 |
+
const override {
|
| 64 |
+
return getStreamFromPool(isHighPriority, d.index());
|
| 65 |
+
}
|
| 66 |
+
// NB: These do NOT set the current device
|
| 67 |
+
Stream exchangeStream(Stream s) const noexcept override {
|
| 68 |
+
CUDAStream cs(s);
|
| 69 |
+
auto old_stream = getCurrentCUDAStream(s.device().index());
|
| 70 |
+
setCurrentCUDAStream(cs);
|
| 71 |
+
return old_stream.unwrap();
|
| 72 |
+
}
|
| 73 |
+
DeviceIndex deviceCount() const noexcept override {
|
| 74 |
+
return device_count();
|
| 75 |
+
}
|
| 76 |
+
|
| 77 |
+
// Event-related functions
|
| 78 |
+
void createEvent(cudaEvent_t* cuda_event, const EventFlag flag) const {
|
| 79 |
+
// Maps PyTorch's Event::Flag to CUDA flag
|
| 80 |
+
auto cuda_flag = cudaEventDefault;
|
| 81 |
+
switch (flag) {
|
| 82 |
+
case EventFlag::PYTORCH_DEFAULT:
|
| 83 |
+
case EventFlag::CUDA_EVENT_DISABLE_TIMING:
|
| 84 |
+
cuda_flag = cudaEventDisableTiming;
|
| 85 |
+
break;
|
| 86 |
+
case EventFlag::BACKEND_DEFAULT:
|
| 87 |
+
case EventFlag::CUDA_EVENT_DEFAULT:
|
| 88 |
+
cuda_flag = cudaEventDefault;
|
| 89 |
+
break;
|
| 90 |
+
default:
|
| 91 |
+
TORCH_CHECK(false, "CUDA event received unknown flag");
|
| 92 |
+
}
|
| 93 |
+
|
| 94 |
+
C10_CUDA_CHECK(cudaEventCreateWithFlags(cuda_event, cuda_flag));
|
| 95 |
+
const c10::impl::PyInterpreter* interp = c10::impl::GPUTrace::get_trace();
|
| 96 |
+
if (C10_UNLIKELY(interp)) {
|
| 97 |
+
(*interp)->trace_gpu_event_creation(
|
| 98 |
+
reinterpret_cast<uintptr_t>(cuda_event));
|
| 99 |
+
}
|
| 100 |
+
}
|
| 101 |
+
|
| 102 |
+
void destroyEvent(void* event, const DeviceIndex device_index)
|
| 103 |
+
const noexcept override {
|
| 104 |
+
if (!event)
|
| 105 |
+
return;
|
| 106 |
+
auto cuda_event = static_cast<cudaEvent_t>(event);
|
| 107 |
+
int orig_device;
|
| 108 |
+
C10_CUDA_CHECK_WARN(c10::cuda::GetDevice(&orig_device));
|
| 109 |
+
C10_CUDA_CHECK_WARN(c10::cuda::SetDevice(device_index));
|
| 110 |
+
const c10::impl::PyInterpreter* interp = c10::impl::GPUTrace::get_trace();
|
| 111 |
+
if (C10_UNLIKELY(interp)) {
|
| 112 |
+
(*interp)->trace_gpu_event_deletion(
|
| 113 |
+
reinterpret_cast<uintptr_t>(cuda_event));
|
| 114 |
+
}
|
| 115 |
+
C10_CUDA_CHECK_WARN(cudaEventDestroy(cuda_event));
|
| 116 |
+
C10_CUDA_CHECK_WARN(c10::cuda::SetDevice(orig_device));
|
| 117 |
+
}
|
| 118 |
+
|
| 119 |
+
void record(
|
| 120 |
+
void** event,
|
| 121 |
+
const Stream& stream,
|
| 122 |
+
const DeviceIndex device_index,
|
| 123 |
+
const EventFlag flag) const override {
|
| 124 |
+
TORCH_CHECK(
|
| 125 |
+
device_index == -1 || device_index == stream.device_index(),
|
| 126 |
+
"Event device index ",
|
| 127 |
+
device_index,
|
| 128 |
+
" does not match recording stream's device index ",
|
| 129 |
+
stream.device_index(),
|
| 130 |
+
".");
|
| 131 |
+
|
| 132 |
+
cudaEvent_t cuda_event = static_cast<cudaEvent_t>(*event);
|
| 133 |
+
CUDAStream cuda_stream{stream};
|
| 134 |
+
|
| 135 |
+
// Moves to stream's device to record
|
| 136 |
+
const auto orig_device = getDevice();
|
| 137 |
+
setDevice(stream.device());
|
| 138 |
+
|
| 139 |
+
// Creates the event (lazily)
|
| 140 |
+
if (!cuda_event)
|
| 141 |
+
createEvent(&cuda_event, flag);
|
| 142 |
+
C10_CUDA_CHECK(cudaEventRecord(cuda_event, cuda_stream));
|
| 143 |
+
// Makes the void* point to the (possibly just allocated) CUDA event
|
| 144 |
+
*event = cuda_event;
|
| 145 |
+
const c10::impl::PyInterpreter* interp = c10::impl::GPUTrace::get_trace();
|
| 146 |
+
if (C10_UNLIKELY(interp)) {
|
| 147 |
+
(*interp)->trace_gpu_event_record(
|
| 148 |
+
reinterpret_cast<uintptr_t>(cuda_event),
|
| 149 |
+
reinterpret_cast<uintptr_t>(cuda_stream.stream()));
|
| 150 |
+
}
|
| 151 |
+
|
| 152 |
+
// Resets device
|
| 153 |
+
setDevice(orig_device);
|
| 154 |
+
}
|
| 155 |
+
|
| 156 |
+
void block(void* event, const Stream& stream) const override {
|
| 157 |
+
if (!event)
|
| 158 |
+
return;
|
| 159 |
+
cudaEvent_t cuda_event = static_cast<cudaEvent_t>(event);
|
| 160 |
+
CUDAStream cuda_stream{stream};
|
| 161 |
+
const auto orig_device = getDevice();
|
| 162 |
+
setDevice(stream.device());
|
| 163 |
+
C10_CUDA_CHECK(cudaStreamWaitEvent(
|
| 164 |
+
cuda_stream,
|
| 165 |
+
cuda_event,
|
| 166 |
+
/*flags (must be zero)=*/0));
|
| 167 |
+
const c10::impl::PyInterpreter* interp = c10::impl::GPUTrace::get_trace();
|
| 168 |
+
if (C10_UNLIKELY(interp)) {
|
| 169 |
+
(*interp)->trace_gpu_event_wait(
|
| 170 |
+
reinterpret_cast<uintptr_t>(cuda_event),
|
| 171 |
+
reinterpret_cast<uintptr_t>(cuda_stream.stream()));
|
| 172 |
+
}
|
| 173 |
+
setDevice(orig_device);
|
| 174 |
+
}
|
| 175 |
+
|
| 176 |
+
// May be called from any device
|
| 177 |
+
bool queryEvent(void* event) const override {
|
| 178 |
+
if (!event)
|
| 179 |
+
return true;
|
| 180 |
+
cudaEvent_t cuda_event = static_cast<cudaEvent_t>(event);
|
| 181 |
+
const cudaError_t err = C10_CUDA_ERROR_HANDLED(cudaEventQuery(cuda_event));
|
| 182 |
+
if (err != cudaErrorNotReady) {
|
| 183 |
+
C10_CUDA_CHECK(err);
|
| 184 |
+
} else {
|
| 185 |
+
// ignore and clear the error if not ready
|
| 186 |
+
(void)cudaGetLastError();
|
| 187 |
+
}
|
| 188 |
+
return (err == cudaSuccess);
|
| 189 |
+
}
|
| 190 |
+
|
| 191 |
+
// Stream-related functions
|
| 192 |
+
bool queryStream(const Stream& stream) const override {
|
| 193 |
+
CUDAStream cuda_stream{stream};
|
| 194 |
+
return cuda_stream.query();
|
| 195 |
+
}
|
| 196 |
+
|
| 197 |
+
void synchronizeStream(const Stream& stream) const override {
|
| 198 |
+
CUDAStream cuda_stream{stream};
|
| 199 |
+
cuda_stream.synchronize();
|
| 200 |
+
}
|
| 201 |
+
|
| 202 |
+
void recordDataPtrOnStream(const c10::DataPtr& data_ptr, const Stream& stream)
|
| 203 |
+
const override {
|
| 204 |
+
CUDAStream cuda_stream{stream};
|
| 205 |
+
CUDACachingAllocator::recordStream(data_ptr, cuda_stream);
|
| 206 |
+
}
|
| 207 |
+
};
|
| 208 |
+
|
| 209 |
+
} // namespace impl
|
| 210 |
+
} // namespace cuda
|
| 211 |
+
} // namespace c10
|
videollama2/lib/python3.10/site-packages/torch/include/c10/cuda/impl/CUDATest.h
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <c10/cuda/CUDAMacros.h>
|
| 4 |
+
|
| 5 |
+
namespace c10 {
|
| 6 |
+
namespace cuda {
|
| 7 |
+
namespace impl {
|
| 8 |
+
|
| 9 |
+
C10_CUDA_API int c10_cuda_test();
|
| 10 |
+
|
| 11 |
+
}
|
| 12 |
+
} // namespace cuda
|
| 13 |
+
} // namespace c10
|
videollama2/lib/python3.10/site-packages/torch/include/c10/cuda/impl/cuda_cmake_macros.h
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// Automatically generated header file for the C10 CUDA library. Do not
|
| 4 |
+
// include this file directly. Instead, include c10/cuda/CUDAMacros.h
|
| 5 |
+
|
| 6 |
+
#define C10_CUDA_BUILD_SHARED_LIBS
|
videollama2/lib/python3.10/site-packages/torch/include/c10/util/AlignOf.h
ADDED
|
@@ -0,0 +1,174 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
//===--- AlignOf.h - Portable calculation of type alignment -----*- C++ -*-===//
|
| 2 |
+
//
|
| 3 |
+
// The LLVM Compiler Infrastructure
|
| 4 |
+
//
|
| 5 |
+
// This file is distributed under the University of Illinois Open Source
|
| 6 |
+
// License. See LICENSE.TXT for details.
|
| 7 |
+
//
|
| 8 |
+
//===----------------------------------------------------------------------===//
|
| 9 |
+
//
|
| 10 |
+
// This file defines the AlignedCharArray and AlignedCharArrayUnion classes.
|
| 11 |
+
//
|
| 12 |
+
//===----------------------------------------------------------------------===//
|
| 13 |
+
|
| 14 |
+
// ATen: modified from llvm::AlignOf
|
| 15 |
+
// replaced LLVM_ALIGNAS with alignas
|
| 16 |
+
|
| 17 |
+
#pragma once
|
| 18 |
+
|
| 19 |
+
#include <cstddef>
|
| 20 |
+
|
| 21 |
+
namespace c10 {
|
| 22 |
+
|
| 23 |
+
/// \struct AlignedCharArray
|
| 24 |
+
/// \brief Helper for building an aligned character array type.
|
| 25 |
+
///
|
| 26 |
+
/// This template is used to explicitly build up a collection of aligned
|
| 27 |
+
/// character array types. We have to build these up using a macro and explicit
|
| 28 |
+
/// specialization to cope with MSVC (at least till 2015) where only an
|
| 29 |
+
/// integer literal can be used to specify an alignment constraint. Once built
|
| 30 |
+
/// up here, we can then begin to indirect between these using normal C++
|
| 31 |
+
/// template parameters.
|
| 32 |
+
|
| 33 |
+
// MSVC requires special handling here.
|
| 34 |
+
#ifndef _MSC_VER
|
| 35 |
+
|
| 36 |
+
template <size_t Alignment, size_t Size>
|
| 37 |
+
struct AlignedCharArray {
|
| 38 |
+
alignas(Alignment) char buffer[Size];
|
| 39 |
+
};
|
| 40 |
+
|
| 41 |
+
#else // _MSC_VER
|
| 42 |
+
|
| 43 |
+
/// \brief Create a type with an aligned char buffer.
|
| 44 |
+
template <size_t Alignment, size_t Size>
|
| 45 |
+
struct AlignedCharArray;
|
| 46 |
+
|
| 47 |
+
// We provide special variations of this template for the most common
|
| 48 |
+
// alignments because __declspec(align(...)) doesn't actually work when it is
|
| 49 |
+
// a member of a by-value function argument in MSVC, even if the alignment
|
| 50 |
+
// request is something reasonably like 8-byte or 16-byte. Note that we can't
|
| 51 |
+
// even include the declspec with the union that forces the alignment because
|
| 52 |
+
// MSVC warns on the existence of the declspec despite the union member forcing
|
| 53 |
+
// proper alignment.
|
| 54 |
+
|
| 55 |
+
template <size_t Size>
|
| 56 |
+
struct AlignedCharArray<1, Size> {
|
| 57 |
+
union {
|
| 58 |
+
char aligned;
|
| 59 |
+
char buffer[Size];
|
| 60 |
+
};
|
| 61 |
+
};
|
| 62 |
+
|
| 63 |
+
template <size_t Size>
|
| 64 |
+
struct AlignedCharArray<2, Size> {
|
| 65 |
+
union {
|
| 66 |
+
short aligned;
|
| 67 |
+
char buffer[Size];
|
| 68 |
+
};
|
| 69 |
+
};
|
| 70 |
+
|
| 71 |
+
template <size_t Size>
|
| 72 |
+
struct AlignedCharArray<4, Size> {
|
| 73 |
+
union {
|
| 74 |
+
int aligned;
|
| 75 |
+
char buffer[Size];
|
| 76 |
+
};
|
| 77 |
+
};
|
| 78 |
+
|
| 79 |
+
template <size_t Size>
|
| 80 |
+
struct AlignedCharArray<8, Size> {
|
| 81 |
+
union {
|
| 82 |
+
double aligned;
|
| 83 |
+
char buffer[Size];
|
| 84 |
+
};
|
| 85 |
+
};
|
| 86 |
+
|
| 87 |
+
// The rest of these are provided with a __declspec(align(...)) and we simply
|
| 88 |
+
// can't pass them by-value as function arguments on MSVC.
|
| 89 |
+
|
| 90 |
+
#define AT_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(x) \
|
| 91 |
+
template <size_t Size> \
|
| 92 |
+
struct AlignedCharArray<x, Size> { \
|
| 93 |
+
__declspec(align(x)) char buffer[Size]; \
|
| 94 |
+
};
|
| 95 |
+
|
| 96 |
+
AT_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(16)
|
| 97 |
+
AT_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(32)
|
| 98 |
+
AT_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(64)
|
| 99 |
+
AT_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(128)
|
| 100 |
+
|
| 101 |
+
#undef AT_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT
|
| 102 |
+
|
| 103 |
+
#endif // _MSC_VER
|
| 104 |
+
|
| 105 |
+
namespace detail {
|
| 106 |
+
template <
|
| 107 |
+
typename T1,
|
| 108 |
+
typename T2 = char,
|
| 109 |
+
typename T3 = char,
|
| 110 |
+
typename T4 = char,
|
| 111 |
+
typename T5 = char,
|
| 112 |
+
typename T6 = char,
|
| 113 |
+
typename T7 = char,
|
| 114 |
+
typename T8 = char,
|
| 115 |
+
typename T9 = char,
|
| 116 |
+
typename T10 = char>
|
| 117 |
+
class AlignerImpl {
|
| 118 |
+
T1 t1;
|
| 119 |
+
T2 t2;
|
| 120 |
+
T3 t3;
|
| 121 |
+
T4 t4;
|
| 122 |
+
T5 t5;
|
| 123 |
+
T6 t6;
|
| 124 |
+
T7 t7;
|
| 125 |
+
T8 t8;
|
| 126 |
+
T9 t9;
|
| 127 |
+
T10 t10;
|
| 128 |
+
|
| 129 |
+
public:
|
| 130 |
+
AlignerImpl() = delete;
|
| 131 |
+
};
|
| 132 |
+
|
| 133 |
+
template <
|
| 134 |
+
typename T1,
|
| 135 |
+
typename T2 = char,
|
| 136 |
+
typename T3 = char,
|
| 137 |
+
typename T4 = char,
|
| 138 |
+
typename T5 = char,
|
| 139 |
+
typename T6 = char,
|
| 140 |
+
typename T7 = char,
|
| 141 |
+
typename T8 = char,
|
| 142 |
+
typename T9 = char,
|
| 143 |
+
typename T10 = char>
|
| 144 |
+
union SizerImpl {
|
| 145 |
+
char arr1[sizeof(T1)], arr2[sizeof(T2)], arr3[sizeof(T3)], arr4[sizeof(T4)],
|
| 146 |
+
arr5[sizeof(T5)], arr6[sizeof(T6)], arr7[sizeof(T7)], arr8[sizeof(T8)],
|
| 147 |
+
arr9[sizeof(T9)], arr10[sizeof(T10)];
|
| 148 |
+
};
|
| 149 |
+
} // end namespace detail
|
| 150 |
+
|
| 151 |
+
/// \brief This union template exposes a suitably aligned and sized character
|
| 152 |
+
/// array member which can hold elements of any of up to ten types.
|
| 153 |
+
///
|
| 154 |
+
/// These types may be arrays, structs, or any other types. The goal is to
|
| 155 |
+
/// expose a char array buffer member which can be used as suitable storage for
|
| 156 |
+
/// a placement new of any of these types. Support for more than ten types can
|
| 157 |
+
/// be added at the cost of more boilerplate.
|
| 158 |
+
template <
|
| 159 |
+
typename T1,
|
| 160 |
+
typename T2 = char,
|
| 161 |
+
typename T3 = char,
|
| 162 |
+
typename T4 = char,
|
| 163 |
+
typename T5 = char,
|
| 164 |
+
typename T6 = char,
|
| 165 |
+
typename T7 = char,
|
| 166 |
+
typename T8 = char,
|
| 167 |
+
typename T9 = char,
|
| 168 |
+
typename T10 = char>
|
| 169 |
+
struct AlignedCharArrayUnion
|
| 170 |
+
: AlignedCharArray<
|
| 171 |
+
alignof(detail::AlignerImpl<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10>),
|
| 172 |
+
sizeof(::c10::detail::
|
| 173 |
+
SizerImpl<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10>)> {};
|
| 174 |
+
} // end namespace c10
|
videollama2/lib/python3.10/site-packages/torch/include/c10/util/ArrayRef.h
ADDED
|
@@ -0,0 +1,371 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
//===--- ArrayRef.h - Array Reference Wrapper -------------------*- C++ -*-===//
|
| 2 |
+
//
|
| 3 |
+
// The LLVM Compiler Infrastructure
|
| 4 |
+
//
|
| 5 |
+
// This file is distributed under the University of Illinois Open Source
|
| 6 |
+
// License. See LICENSE.TXT for details.
|
| 7 |
+
//
|
| 8 |
+
//===----------------------------------------------------------------------===//
|
| 9 |
+
|
| 10 |
+
// ATen: modified from llvm::ArrayRef.
|
| 11 |
+
// removed llvm-specific functionality
|
| 12 |
+
// removed some implicit const -> non-const conversions that rely on
|
| 13 |
+
// complicated std::enable_if meta-programming
|
| 14 |
+
// removed a bunch of slice variants for simplicity...
|
| 15 |
+
|
| 16 |
+
#pragma once
|
| 17 |
+
|
| 18 |
+
#include <c10/util/Deprecated.h>
|
| 19 |
+
#include <c10/util/Exception.h>
|
| 20 |
+
#include <c10/util/SmallVector.h>
|
| 21 |
+
|
| 22 |
+
#include <array>
|
| 23 |
+
#include <iterator>
|
| 24 |
+
#include <vector>
|
| 25 |
+
|
| 26 |
+
namespace c10 {
|
| 27 |
+
/// ArrayRef - Represent a constant reference to an array (0 or more elements
|
| 28 |
+
/// consecutively in memory), i.e. a start pointer and a length. It allows
|
| 29 |
+
/// various APIs to take consecutive elements easily and conveniently.
|
| 30 |
+
///
|
| 31 |
+
/// This class does not own the underlying data, it is expected to be used in
|
| 32 |
+
/// situations where the data resides in some other buffer, whose lifetime
|
| 33 |
+
/// extends past that of the ArrayRef. For this reason, it is not in general
|
| 34 |
+
/// safe to store an ArrayRef.
|
| 35 |
+
///
|
| 36 |
+
/// This is intended to be trivially copyable, so it should be passed by
|
| 37 |
+
/// value.
|
| 38 |
+
template <typename T>
|
| 39 |
+
class ArrayRef final {
|
| 40 |
+
public:
|
| 41 |
+
using iterator = const T*;
|
| 42 |
+
using const_iterator = const T*;
|
| 43 |
+
using size_type = size_t;
|
| 44 |
+
using value_type = T;
|
| 45 |
+
|
| 46 |
+
using reverse_iterator = std::reverse_iterator<iterator>;
|
| 47 |
+
|
| 48 |
+
private:
|
| 49 |
+
/// The start of the array, in an external buffer.
|
| 50 |
+
const T* Data;
|
| 51 |
+
|
| 52 |
+
/// The number of elements.
|
| 53 |
+
size_type Length;
|
| 54 |
+
|
| 55 |
+
void debugCheckNullptrInvariant() {
|
| 56 |
+
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
|
| 57 |
+
Data != nullptr || Length == 0,
|
| 58 |
+
"created ArrayRef with nullptr and non-zero length! c10::optional relies on this being illegal");
|
| 59 |
+
}
|
| 60 |
+
|
| 61 |
+
public:
|
| 62 |
+
/// @name Constructors
|
| 63 |
+
/// @{
|
| 64 |
+
|
| 65 |
+
/// Construct an empty ArrayRef.
|
| 66 |
+
/* implicit */ constexpr ArrayRef() : Data(nullptr), Length(0) {}
|
| 67 |
+
|
| 68 |
+
/// Construct an ArrayRef from a single element.
|
| 69 |
+
// TODO Make this explicit
|
| 70 |
+
constexpr ArrayRef(const T& OneElt) : Data(&OneElt), Length(1) {}
|
| 71 |
+
|
| 72 |
+
/// Construct an ArrayRef from a pointer and length.
|
| 73 |
+
C10_HOST_CONSTEXPR_EXCEPT_WIN_CUDA ArrayRef(const T* data, size_t length)
|
| 74 |
+
: Data(data), Length(length) {
|
| 75 |
+
debugCheckNullptrInvariant();
|
| 76 |
+
}
|
| 77 |
+
|
| 78 |
+
/// Construct an ArrayRef from a range.
|
| 79 |
+
C10_HOST_CONSTEXPR_EXCEPT_WIN_CUDA ArrayRef(const T* begin, const T* end)
|
| 80 |
+
: Data(begin), Length(end - begin) {
|
| 81 |
+
debugCheckNullptrInvariant();
|
| 82 |
+
}
|
| 83 |
+
|
| 84 |
+
/// Construct an ArrayRef from a SmallVector. This is templated in order to
|
| 85 |
+
/// avoid instantiating SmallVectorTemplateCommon<T> whenever we
|
| 86 |
+
/// copy-construct an ArrayRef.
|
| 87 |
+
template <typename U>
|
| 88 |
+
/* implicit */ ArrayRef(const SmallVectorTemplateCommon<T, U>& Vec)
|
| 89 |
+
: Data(Vec.data()), Length(Vec.size()) {
|
| 90 |
+
debugCheckNullptrInvariant();
|
| 91 |
+
}
|
| 92 |
+
|
| 93 |
+
template <
|
| 94 |
+
typename Container,
|
| 95 |
+
typename = std::enable_if_t<std::is_same<
|
| 96 |
+
std::remove_const_t<decltype(std::declval<Container>().data())>,
|
| 97 |
+
T*>::value>>
|
| 98 |
+
/* implicit */ ArrayRef(const Container& container)
|
| 99 |
+
: Data(container.data()), Length(container.size()) {
|
| 100 |
+
debugCheckNullptrInvariant();
|
| 101 |
+
}
|
| 102 |
+
|
| 103 |
+
/// Construct an ArrayRef from a std::vector.
|
| 104 |
+
// The enable_if stuff here makes sure that this isn't used for
|
| 105 |
+
// std::vector<bool>, because ArrayRef can't work on a std::vector<bool>
|
| 106 |
+
// bitfield.
|
| 107 |
+
template <typename A>
|
| 108 |
+
/* implicit */ ArrayRef(const std::vector<T, A>& Vec)
|
| 109 |
+
: Data(Vec.data()), Length(Vec.size()) {
|
| 110 |
+
static_assert(
|
| 111 |
+
!std::is_same<T, bool>::value,
|
| 112 |
+
"ArrayRef<bool> cannot be constructed from a std::vector<bool> bitfield.");
|
| 113 |
+
}
|
| 114 |
+
|
| 115 |
+
/// Construct an ArrayRef from a std::array
|
| 116 |
+
template <size_t N>
|
| 117 |
+
/* implicit */ constexpr ArrayRef(const std::array<T, N>& Arr)
|
| 118 |
+
: Data(Arr.data()), Length(N) {}
|
| 119 |
+
|
| 120 |
+
/// Construct an ArrayRef from a C array.
|
| 121 |
+
template <size_t N>
|
| 122 |
+
/* implicit */ constexpr ArrayRef(const T (&Arr)[N]) : Data(Arr), Length(N) {}
|
| 123 |
+
|
| 124 |
+
/// Construct an ArrayRef from a std::initializer_list.
|
| 125 |
+
/* implicit */ constexpr ArrayRef(const std::initializer_list<T>& Vec)
|
| 126 |
+
: Data(
|
| 127 |
+
std::begin(Vec) == std::end(Vec) ? static_cast<T*>(nullptr)
|
| 128 |
+
: std::begin(Vec)),
|
| 129 |
+
Length(Vec.size()) {}
|
| 130 |
+
|
| 131 |
+
/// @}
|
| 132 |
+
/// @name Simple Operations
|
| 133 |
+
/// @{
|
| 134 |
+
|
| 135 |
+
constexpr iterator begin() const {
|
| 136 |
+
return Data;
|
| 137 |
+
}
|
| 138 |
+
constexpr iterator end() const {
|
| 139 |
+
return Data + Length;
|
| 140 |
+
}
|
| 141 |
+
|
| 142 |
+
// These are actually the same as iterator, since ArrayRef only
|
| 143 |
+
// gives you const iterators.
|
| 144 |
+
constexpr const_iterator cbegin() const {
|
| 145 |
+
return Data;
|
| 146 |
+
}
|
| 147 |
+
constexpr const_iterator cend() const {
|
| 148 |
+
return Data + Length;
|
| 149 |
+
}
|
| 150 |
+
|
| 151 |
+
constexpr reverse_iterator rbegin() const {
|
| 152 |
+
return reverse_iterator(end());
|
| 153 |
+
}
|
| 154 |
+
constexpr reverse_iterator rend() const {
|
| 155 |
+
return reverse_iterator(begin());
|
| 156 |
+
}
|
| 157 |
+
|
| 158 |
+
/// empty - Check if the array is empty.
|
| 159 |
+
constexpr bool empty() const {
|
| 160 |
+
return Length == 0;
|
| 161 |
+
}
|
| 162 |
+
|
| 163 |
+
constexpr const T* data() const {
|
| 164 |
+
return Data;
|
| 165 |
+
}
|
| 166 |
+
|
| 167 |
+
/// size - Get the array size.
|
| 168 |
+
constexpr size_t size() const {
|
| 169 |
+
return Length;
|
| 170 |
+
}
|
| 171 |
+
|
| 172 |
+
/// front - Get the first element.
|
| 173 |
+
C10_HOST_CONSTEXPR_EXCEPT_WIN_CUDA const T& front() const {
|
| 174 |
+
TORCH_CHECK(
|
| 175 |
+
!empty(), "ArrayRef: attempted to access front() of empty list");
|
| 176 |
+
return Data[0];
|
| 177 |
+
}
|
| 178 |
+
|
| 179 |
+
/// back - Get the last element.
|
| 180 |
+
C10_HOST_CONSTEXPR_EXCEPT_WIN_CUDA const T& back() const {
|
| 181 |
+
TORCH_CHECK(!empty(), "ArrayRef: attempted to access back() of empty list");
|
| 182 |
+
return Data[Length - 1];
|
| 183 |
+
}
|
| 184 |
+
|
| 185 |
+
/// equals - Check for element-wise equality.
|
| 186 |
+
constexpr bool equals(ArrayRef RHS) const {
|
| 187 |
+
return Length == RHS.Length && std::equal(begin(), end(), RHS.begin());
|
| 188 |
+
}
|
| 189 |
+
|
| 190 |
+
/// slice(n, m) - Take M elements of the array starting at element N
|
| 191 |
+
C10_HOST_CONSTEXPR_EXCEPT_WIN_CUDA ArrayRef<T> slice(size_t N, size_t M)
|
| 192 |
+
const {
|
| 193 |
+
TORCH_CHECK(
|
| 194 |
+
N + M <= size(),
|
| 195 |
+
"ArrayRef: invalid slice, N = ",
|
| 196 |
+
N,
|
| 197 |
+
"; M = ",
|
| 198 |
+
M,
|
| 199 |
+
"; size = ",
|
| 200 |
+
size());
|
| 201 |
+
return ArrayRef<T>(data() + N, M);
|
| 202 |
+
}
|
| 203 |
+
|
| 204 |
+
/// slice(n) - Chop off the first N elements of the array.
|
| 205 |
+
C10_HOST_CONSTEXPR_EXCEPT_WIN_CUDA ArrayRef<T> slice(size_t N) const {
|
| 206 |
+
TORCH_CHECK(
|
| 207 |
+
N <= size(), "ArrayRef: invalid slice, N = ", N, "; size = ", size());
|
| 208 |
+
return slice(N, size() - N);
|
| 209 |
+
}
|
| 210 |
+
|
| 211 |
+
/// @}
|
| 212 |
+
/// @name Operator Overloads
|
| 213 |
+
/// @{
|
| 214 |
+
constexpr const T& operator[](size_t Index) const {
|
| 215 |
+
return Data[Index];
|
| 216 |
+
}
|
| 217 |
+
|
| 218 |
+
/// Vector compatibility
|
| 219 |
+
C10_HOST_CONSTEXPR_EXCEPT_WIN_CUDA const T& at(size_t Index) const {
|
| 220 |
+
TORCH_CHECK(
|
| 221 |
+
Index < Length,
|
| 222 |
+
"ArrayRef: invalid index Index = ",
|
| 223 |
+
Index,
|
| 224 |
+
"; Length = ",
|
| 225 |
+
Length);
|
| 226 |
+
return Data[Index];
|
| 227 |
+
}
|
| 228 |
+
|
| 229 |
+
/// Disallow accidental assignment from a temporary.
|
| 230 |
+
///
|
| 231 |
+
/// The declaration here is extra complicated so that "arrayRef = {}"
|
| 232 |
+
/// continues to select the move assignment operator.
|
| 233 |
+
template <typename U>
|
| 234 |
+
typename std::enable_if<std::is_same<U, T>::value, ArrayRef<T>>::type&
|
| 235 |
+
operator=(U&& Temporary) = delete;
|
| 236 |
+
|
| 237 |
+
/// Disallow accidental assignment from a temporary.
|
| 238 |
+
///
|
| 239 |
+
/// The declaration here is extra complicated so that "arrayRef = {}"
|
| 240 |
+
/// continues to select the move assignment operator.
|
| 241 |
+
template <typename U>
|
| 242 |
+
typename std::enable_if<std::is_same<U, T>::value, ArrayRef<T>>::type&
|
| 243 |
+
operator=(std::initializer_list<U>) = delete;
|
| 244 |
+
|
| 245 |
+
/// @}
|
| 246 |
+
/// @name Expensive Operations
|
| 247 |
+
/// @{
|
| 248 |
+
std::vector<T> vec() const {
|
| 249 |
+
return std::vector<T>(Data, Data + Length);
|
| 250 |
+
}
|
| 251 |
+
|
| 252 |
+
/// @}
|
| 253 |
+
};
|
| 254 |
+
|
| 255 |
+
template <typename T>
|
| 256 |
+
std::ostream& operator<<(std::ostream& out, ArrayRef<T> list) {
|
| 257 |
+
int i = 0;
|
| 258 |
+
out << "[";
|
| 259 |
+
for (const auto& e : list) {
|
| 260 |
+
if (i++ > 0)
|
| 261 |
+
out << ", ";
|
| 262 |
+
out << e;
|
| 263 |
+
}
|
| 264 |
+
out << "]";
|
| 265 |
+
return out;
|
| 266 |
+
}
|
| 267 |
+
|
| 268 |
+
/// @name ArrayRef Convenience constructors
|
| 269 |
+
/// @{
|
| 270 |
+
|
| 271 |
+
/// Construct an ArrayRef from a single element.
|
| 272 |
+
template <typename T>
|
| 273 |
+
ArrayRef<T> makeArrayRef(const T& OneElt) {
|
| 274 |
+
return OneElt;
|
| 275 |
+
}
|
| 276 |
+
|
| 277 |
+
/// Construct an ArrayRef from a pointer and length.
|
| 278 |
+
template <typename T>
|
| 279 |
+
ArrayRef<T> makeArrayRef(const T* data, size_t length) {
|
| 280 |
+
return ArrayRef<T>(data, length);
|
| 281 |
+
}
|
| 282 |
+
|
| 283 |
+
/// Construct an ArrayRef from a range.
|
| 284 |
+
template <typename T>
|
| 285 |
+
ArrayRef<T> makeArrayRef(const T* begin, const T* end) {
|
| 286 |
+
return ArrayRef<T>(begin, end);
|
| 287 |
+
}
|
| 288 |
+
|
| 289 |
+
/// Construct an ArrayRef from a SmallVector.
|
| 290 |
+
template <typename T>
|
| 291 |
+
ArrayRef<T> makeArrayRef(const SmallVectorImpl<T>& Vec) {
|
| 292 |
+
return Vec;
|
| 293 |
+
}
|
| 294 |
+
|
| 295 |
+
/// Construct an ArrayRef from a SmallVector.
|
| 296 |
+
template <typename T, unsigned N>
|
| 297 |
+
ArrayRef<T> makeArrayRef(const SmallVector<T, N>& Vec) {
|
| 298 |
+
return Vec;
|
| 299 |
+
}
|
| 300 |
+
|
| 301 |
+
/// Construct an ArrayRef from a std::vector.
|
| 302 |
+
template <typename T>
|
| 303 |
+
ArrayRef<T> makeArrayRef(const std::vector<T>& Vec) {
|
| 304 |
+
return Vec;
|
| 305 |
+
}
|
| 306 |
+
|
| 307 |
+
/// Construct an ArrayRef from a std::array.
|
| 308 |
+
template <typename T, std::size_t N>
|
| 309 |
+
ArrayRef<T> makeArrayRef(const std::array<T, N>& Arr) {
|
| 310 |
+
return Arr;
|
| 311 |
+
}
|
| 312 |
+
|
| 313 |
+
/// Construct an ArrayRef from an ArrayRef (no-op) (const)
|
| 314 |
+
template <typename T>
|
| 315 |
+
ArrayRef<T> makeArrayRef(const ArrayRef<T>& Vec) {
|
| 316 |
+
return Vec;
|
| 317 |
+
}
|
| 318 |
+
|
| 319 |
+
/// Construct an ArrayRef from an ArrayRef (no-op)
|
| 320 |
+
template <typename T>
|
| 321 |
+
ArrayRef<T>& makeArrayRef(ArrayRef<T>& Vec) {
|
| 322 |
+
return Vec;
|
| 323 |
+
}
|
| 324 |
+
|
| 325 |
+
/// Construct an ArrayRef from a C array.
|
| 326 |
+
template <typename T, size_t N>
|
| 327 |
+
ArrayRef<T> makeArrayRef(const T (&Arr)[N]) {
|
| 328 |
+
return ArrayRef<T>(Arr);
|
| 329 |
+
}
|
| 330 |
+
|
| 331 |
+
// WARNING: Template instantiation will NOT be willing to do an implicit
|
| 332 |
+
// conversions to get you to an c10::ArrayRef, which is why we need so
|
| 333 |
+
// many overloads.
|
| 334 |
+
|
| 335 |
+
template <typename T>
|
| 336 |
+
bool operator==(c10::ArrayRef<T> a1, c10::ArrayRef<T> a2) {
|
| 337 |
+
return a1.equals(a2);
|
| 338 |
+
}
|
| 339 |
+
|
| 340 |
+
template <typename T>
|
| 341 |
+
bool operator!=(c10::ArrayRef<T> a1, c10::ArrayRef<T> a2) {
|
| 342 |
+
return !a1.equals(a2);
|
| 343 |
+
}
|
| 344 |
+
|
| 345 |
+
template <typename T>
|
| 346 |
+
bool operator==(const std::vector<T>& a1, c10::ArrayRef<T> a2) {
|
| 347 |
+
return c10::ArrayRef<T>(a1).equals(a2);
|
| 348 |
+
}
|
| 349 |
+
|
| 350 |
+
template <typename T>
|
| 351 |
+
bool operator!=(const std::vector<T>& a1, c10::ArrayRef<T> a2) {
|
| 352 |
+
return !c10::ArrayRef<T>(a1).equals(a2);
|
| 353 |
+
}
|
| 354 |
+
|
| 355 |
+
template <typename T>
|
| 356 |
+
bool operator==(c10::ArrayRef<T> a1, const std::vector<T>& a2) {
|
| 357 |
+
return a1.equals(c10::ArrayRef<T>(a2));
|
| 358 |
+
}
|
| 359 |
+
|
| 360 |
+
template <typename T>
|
| 361 |
+
bool operator!=(c10::ArrayRef<T> a1, const std::vector<T>& a2) {
|
| 362 |
+
return !a1.equals(c10::ArrayRef<T>(a2));
|
| 363 |
+
}
|
| 364 |
+
|
| 365 |
+
using IntArrayRef = ArrayRef<int64_t>;
|
| 366 |
+
|
| 367 |
+
// This alias is deprecated because it doesn't make ownership
|
| 368 |
+
// semantics obvious. Use IntArrayRef instead!
|
| 369 |
+
C10_DEFINE_DEPRECATED_USING(IntList, ArrayRef<int64_t>)
|
| 370 |
+
|
| 371 |
+
} // namespace c10
|
videollama2/lib/python3.10/site-packages/torch/include/c10/util/BFloat16-inl.h
ADDED
|
@@ -0,0 +1,343 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <c10/macros/Macros.h>
|
| 4 |
+
#include <c10/util/bit_cast.h>
|
| 5 |
+
|
| 6 |
+
#include <limits>
|
| 7 |
+
|
| 8 |
+
C10_CLANG_DIAGNOSTIC_PUSH()
|
| 9 |
+
#if C10_CLANG_HAS_WARNING("-Wimplicit-int-float-conversion")
|
| 10 |
+
C10_CLANG_DIAGNOSTIC_IGNORE("-Wimplicit-int-float-conversion")
|
| 11 |
+
#endif
|
| 12 |
+
|
| 13 |
+
#if defined(SYCL_EXT_ONEAPI_BFLOAT16_MATH_FUNCTIONS)
|
| 14 |
+
#if defined(CL_SYCL_LANGUAGE_VERSION)
|
| 15 |
+
#include <CL/sycl.hpp> // for SYCL 1.2.1
|
| 16 |
+
#else
|
| 17 |
+
#include <sycl/sycl.hpp> // for SYCL 2020
|
| 18 |
+
#endif
|
| 19 |
+
#include <ext/oneapi/bfloat16.hpp>
|
| 20 |
+
#endif
|
| 21 |
+
|
| 22 |
+
namespace c10 {
|
| 23 |
+
|
| 24 |
+
/// Constructors
|
| 25 |
+
inline C10_HOST_DEVICE BFloat16::BFloat16(float value)
|
| 26 |
+
:
|
| 27 |
+
#if defined(__CUDACC__) && !defined(USE_ROCM) && defined(__CUDA_ARCH__) && \
|
| 28 |
+
__CUDA_ARCH__ >= 800
|
| 29 |
+
x(__bfloat16_as_ushort(__float2bfloat16(value)))
|
| 30 |
+
#elif defined(__SYCL_DEVICE_ONLY__) && \
|
| 31 |
+
defined(SYCL_EXT_ONEAPI_BFLOAT16_MATH_FUNCTIONS)
|
| 32 |
+
x(c10::bit_cast<uint16_t>(sycl::ext::oneapi::bfloat16(value)))
|
| 33 |
+
#else
|
| 34 |
+
// RNE by default
|
| 35 |
+
x(detail::round_to_nearest_even(value))
|
| 36 |
+
#endif
|
| 37 |
+
{
|
| 38 |
+
}
|
| 39 |
+
|
| 40 |
+
/// Implicit conversions
|
| 41 |
+
inline C10_HOST_DEVICE BFloat16::operator float() const {
|
| 42 |
+
#if defined(__CUDACC__) && !defined(USE_ROCM)
|
| 43 |
+
return __bfloat162float(*reinterpret_cast<const __nv_bfloat16*>(&x));
|
| 44 |
+
#elif defined(__SYCL_DEVICE_ONLY__) && \
|
| 45 |
+
defined(SYCL_EXT_ONEAPI_BFLOAT16_MATH_FUNCTIONS)
|
| 46 |
+
return float(*reinterpret_cast<const sycl::ext::oneapi::bfloat16*>(&x));
|
| 47 |
+
#else
|
| 48 |
+
return detail::f32_from_bits(x);
|
| 49 |
+
#endif
|
| 50 |
+
}
|
| 51 |
+
|
| 52 |
+
#if defined(__CUDACC__) && !defined(USE_ROCM)
|
| 53 |
+
inline C10_HOST_DEVICE BFloat16::BFloat16(const __nv_bfloat16& value) {
|
| 54 |
+
x = *reinterpret_cast<const unsigned short*>(&value);
|
| 55 |
+
}
|
| 56 |
+
inline C10_HOST_DEVICE BFloat16::operator __nv_bfloat16() const {
|
| 57 |
+
return *reinterpret_cast<const __nv_bfloat16*>(&x);
|
| 58 |
+
}
|
| 59 |
+
#endif
|
| 60 |
+
|
| 61 |
+
#if defined(SYCL_EXT_ONEAPI_BFLOAT16_MATH_FUNCTIONS)
|
| 62 |
+
inline C10_HOST_DEVICE BFloat16::BFloat16(
|
| 63 |
+
const sycl::ext::oneapi::bfloat16& value) {
|
| 64 |
+
x = *reinterpret_cast<const unsigned short*>(&value);
|
| 65 |
+
}
|
| 66 |
+
inline C10_HOST_DEVICE BFloat16::operator sycl::ext::oneapi::bfloat16() const {
|
| 67 |
+
return *reinterpret_cast<const sycl::ext::oneapi::bfloat16*>(&x);
|
| 68 |
+
}
|
| 69 |
+
#endif
|
| 70 |
+
|
| 71 |
+
// CUDA intrinsics
|
| 72 |
+
|
| 73 |
+
#if defined(__CUDACC__) || defined(__HIPCC__)
|
| 74 |
+
inline C10_DEVICE BFloat16 __ldg(const BFloat16* ptr) {
|
| 75 |
+
#if !defined(USE_ROCM) && defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 800
|
| 76 |
+
return __ldg(reinterpret_cast<const __nv_bfloat16*>(ptr));
|
| 77 |
+
#else
|
| 78 |
+
return *ptr;
|
| 79 |
+
#endif
|
| 80 |
+
}
|
| 81 |
+
#endif
|
| 82 |
+
|
| 83 |
+
/// Arithmetic
|
| 84 |
+
|
| 85 |
+
inline C10_HOST_DEVICE BFloat16
|
| 86 |
+
operator+(const BFloat16& a, const BFloat16& b) {
|
| 87 |
+
return static_cast<float>(a) + static_cast<float>(b);
|
| 88 |
+
}
|
| 89 |
+
|
| 90 |
+
inline C10_HOST_DEVICE BFloat16
|
| 91 |
+
operator-(const BFloat16& a, const BFloat16& b) {
|
| 92 |
+
return static_cast<float>(a) - static_cast<float>(b);
|
| 93 |
+
}
|
| 94 |
+
|
| 95 |
+
inline C10_HOST_DEVICE BFloat16
|
| 96 |
+
operator*(const BFloat16& a, const BFloat16& b) {
|
| 97 |
+
return static_cast<float>(a) * static_cast<float>(b);
|
| 98 |
+
}
|
| 99 |
+
|
| 100 |
+
inline C10_HOST_DEVICE BFloat16 operator/(const BFloat16& a, const BFloat16& b)
|
| 101 |
+
__ubsan_ignore_float_divide_by_zero__ {
|
| 102 |
+
return static_cast<float>(a) / static_cast<float>(b);
|
| 103 |
+
}
|
| 104 |
+
|
| 105 |
+
inline C10_HOST_DEVICE BFloat16 operator-(const BFloat16& a) {
|
| 106 |
+
return -static_cast<float>(a);
|
| 107 |
+
}
|
| 108 |
+
|
| 109 |
+
inline C10_HOST_DEVICE BFloat16& operator+=(BFloat16& a, const BFloat16& b) {
|
| 110 |
+
a = a + b;
|
| 111 |
+
return a;
|
| 112 |
+
}
|
| 113 |
+
|
| 114 |
+
inline C10_HOST_DEVICE BFloat16& operator-=(BFloat16& a, const BFloat16& b) {
|
| 115 |
+
a = a - b;
|
| 116 |
+
return a;
|
| 117 |
+
}
|
| 118 |
+
|
| 119 |
+
inline C10_HOST_DEVICE BFloat16& operator*=(BFloat16& a, const BFloat16& b) {
|
| 120 |
+
a = a * b;
|
| 121 |
+
return a;
|
| 122 |
+
}
|
| 123 |
+
|
| 124 |
+
inline C10_HOST_DEVICE BFloat16& operator/=(BFloat16& a, const BFloat16& b) {
|
| 125 |
+
a = a / b;
|
| 126 |
+
return a;
|
| 127 |
+
}
|
| 128 |
+
|
| 129 |
+
inline C10_HOST_DEVICE BFloat16& operator|(BFloat16& a, const BFloat16& b) {
|
| 130 |
+
a.x = a.x | b.x;
|
| 131 |
+
return a;
|
| 132 |
+
}
|
| 133 |
+
|
| 134 |
+
inline C10_HOST_DEVICE BFloat16& operator^(BFloat16& a, const BFloat16& b) {
|
| 135 |
+
a.x = a.x ^ b.x;
|
| 136 |
+
return a;
|
| 137 |
+
}
|
| 138 |
+
|
| 139 |
+
inline C10_HOST_DEVICE BFloat16& operator&(BFloat16& a, const BFloat16& b) {
|
| 140 |
+
a.x = a.x & b.x;
|
| 141 |
+
return a;
|
| 142 |
+
}
|
| 143 |
+
|
| 144 |
+
/// Arithmetic with floats
|
| 145 |
+
|
| 146 |
+
inline C10_HOST_DEVICE float operator+(BFloat16 a, float b) {
|
| 147 |
+
return static_cast<float>(a) + b;
|
| 148 |
+
}
|
| 149 |
+
inline C10_HOST_DEVICE float operator-(BFloat16 a, float b) {
|
| 150 |
+
return static_cast<float>(a) - b;
|
| 151 |
+
}
|
| 152 |
+
inline C10_HOST_DEVICE float operator*(BFloat16 a, float b) {
|
| 153 |
+
return static_cast<float>(a) * b;
|
| 154 |
+
}
|
| 155 |
+
inline C10_HOST_DEVICE float operator/(BFloat16 a, float b) {
|
| 156 |
+
return static_cast<float>(a) / b;
|
| 157 |
+
}
|
| 158 |
+
|
| 159 |
+
inline C10_HOST_DEVICE float operator+(float a, BFloat16 b) {
|
| 160 |
+
return a + static_cast<float>(b);
|
| 161 |
+
}
|
| 162 |
+
inline C10_HOST_DEVICE float operator-(float a, BFloat16 b) {
|
| 163 |
+
return a - static_cast<float>(b);
|
| 164 |
+
}
|
| 165 |
+
inline C10_HOST_DEVICE float operator*(float a, BFloat16 b) {
|
| 166 |
+
return a * static_cast<float>(b);
|
| 167 |
+
}
|
| 168 |
+
inline C10_HOST_DEVICE float operator/(float a, BFloat16 b) {
|
| 169 |
+
return a / static_cast<float>(b);
|
| 170 |
+
}
|
| 171 |
+
|
| 172 |
+
inline C10_HOST_DEVICE float& operator+=(float& a, const BFloat16& b) {
|
| 173 |
+
return a += static_cast<float>(b);
|
| 174 |
+
}
|
| 175 |
+
inline C10_HOST_DEVICE float& operator-=(float& a, const BFloat16& b) {
|
| 176 |
+
return a -= static_cast<float>(b);
|
| 177 |
+
}
|
| 178 |
+
inline C10_HOST_DEVICE float& operator*=(float& a, const BFloat16& b) {
|
| 179 |
+
return a *= static_cast<float>(b);
|
| 180 |
+
}
|
| 181 |
+
inline C10_HOST_DEVICE float& operator/=(float& a, const BFloat16& b) {
|
| 182 |
+
return a /= static_cast<float>(b);
|
| 183 |
+
}
|
| 184 |
+
|
| 185 |
+
/// Arithmetic with doubles
|
| 186 |
+
|
| 187 |
+
inline C10_HOST_DEVICE double operator+(BFloat16 a, double b) {
|
| 188 |
+
return static_cast<double>(a) + b;
|
| 189 |
+
}
|
| 190 |
+
inline C10_HOST_DEVICE double operator-(BFloat16 a, double b) {
|
| 191 |
+
return static_cast<double>(a) - b;
|
| 192 |
+
}
|
| 193 |
+
inline C10_HOST_DEVICE double operator*(BFloat16 a, double b) {
|
| 194 |
+
return static_cast<double>(a) * b;
|
| 195 |
+
}
|
| 196 |
+
inline C10_HOST_DEVICE double operator/(BFloat16 a, double b) {
|
| 197 |
+
return static_cast<double>(a) / b;
|
| 198 |
+
}
|
| 199 |
+
|
| 200 |
+
inline C10_HOST_DEVICE double operator+(double a, BFloat16 b) {
|
| 201 |
+
return a + static_cast<double>(b);
|
| 202 |
+
}
|
| 203 |
+
inline C10_HOST_DEVICE double operator-(double a, BFloat16 b) {
|
| 204 |
+
return a - static_cast<double>(b);
|
| 205 |
+
}
|
| 206 |
+
inline C10_HOST_DEVICE double operator*(double a, BFloat16 b) {
|
| 207 |
+
return a * static_cast<double>(b);
|
| 208 |
+
}
|
| 209 |
+
inline C10_HOST_DEVICE double operator/(double a, BFloat16 b) {
|
| 210 |
+
return a / static_cast<double>(b);
|
| 211 |
+
}
|
| 212 |
+
|
| 213 |
+
/// Arithmetic with ints
|
| 214 |
+
|
| 215 |
+
inline C10_HOST_DEVICE BFloat16 operator+(BFloat16 a, int b) {
|
| 216 |
+
return a + static_cast<BFloat16>(b);
|
| 217 |
+
}
|
| 218 |
+
inline C10_HOST_DEVICE BFloat16 operator-(BFloat16 a, int b) {
|
| 219 |
+
return a - static_cast<BFloat16>(b);
|
| 220 |
+
}
|
| 221 |
+
inline C10_HOST_DEVICE BFloat16 operator*(BFloat16 a, int b) {
|
| 222 |
+
return a * static_cast<BFloat16>(b);
|
| 223 |
+
}
|
| 224 |
+
inline C10_HOST_DEVICE BFloat16 operator/(BFloat16 a, int b) {
|
| 225 |
+
return a / static_cast<BFloat16>(b);
|
| 226 |
+
}
|
| 227 |
+
|
| 228 |
+
inline C10_HOST_DEVICE BFloat16 operator+(int a, BFloat16 b) {
|
| 229 |
+
return static_cast<BFloat16>(a) + b;
|
| 230 |
+
}
|
| 231 |
+
inline C10_HOST_DEVICE BFloat16 operator-(int a, BFloat16 b) {
|
| 232 |
+
return static_cast<BFloat16>(a) - b;
|
| 233 |
+
}
|
| 234 |
+
inline C10_HOST_DEVICE BFloat16 operator*(int a, BFloat16 b) {
|
| 235 |
+
return static_cast<BFloat16>(a) * b;
|
| 236 |
+
}
|
| 237 |
+
inline C10_HOST_DEVICE BFloat16 operator/(int a, BFloat16 b) {
|
| 238 |
+
return static_cast<BFloat16>(a) / b;
|
| 239 |
+
}
|
| 240 |
+
|
| 241 |
+
//// Arithmetic with int64_t
|
| 242 |
+
|
| 243 |
+
inline C10_HOST_DEVICE BFloat16 operator+(BFloat16 a, int64_t b) {
|
| 244 |
+
return a + static_cast<BFloat16>(b);
|
| 245 |
+
}
|
| 246 |
+
inline C10_HOST_DEVICE BFloat16 operator-(BFloat16 a, int64_t b) {
|
| 247 |
+
return a - static_cast<BFloat16>(b);
|
| 248 |
+
}
|
| 249 |
+
inline C10_HOST_DEVICE BFloat16 operator*(BFloat16 a, int64_t b) {
|
| 250 |
+
return a * static_cast<BFloat16>(b);
|
| 251 |
+
}
|
| 252 |
+
inline C10_HOST_DEVICE BFloat16 operator/(BFloat16 a, int64_t b) {
|
| 253 |
+
return a / static_cast<BFloat16>(b);
|
| 254 |
+
}
|
| 255 |
+
|
| 256 |
+
inline C10_HOST_DEVICE BFloat16 operator+(int64_t a, BFloat16 b) {
|
| 257 |
+
return static_cast<BFloat16>(a) + b;
|
| 258 |
+
}
|
| 259 |
+
inline C10_HOST_DEVICE BFloat16 operator-(int64_t a, BFloat16 b) {
|
| 260 |
+
return static_cast<BFloat16>(a) - b;
|
| 261 |
+
}
|
| 262 |
+
inline C10_HOST_DEVICE BFloat16 operator*(int64_t a, BFloat16 b) {
|
| 263 |
+
return static_cast<BFloat16>(a) * b;
|
| 264 |
+
}
|
| 265 |
+
inline C10_HOST_DEVICE BFloat16 operator/(int64_t a, BFloat16 b) {
|
| 266 |
+
return static_cast<BFloat16>(a) / b;
|
| 267 |
+
}
|
| 268 |
+
|
| 269 |
+
// Overloading < and > operators, because std::max and std::min use them.
|
| 270 |
+
|
| 271 |
+
inline C10_HOST_DEVICE bool operator>(BFloat16& lhs, BFloat16& rhs) {
|
| 272 |
+
return float(lhs) > float(rhs);
|
| 273 |
+
}
|
| 274 |
+
|
| 275 |
+
inline C10_HOST_DEVICE bool operator<(BFloat16& lhs, BFloat16& rhs) {
|
| 276 |
+
return float(lhs) < float(rhs);
|
| 277 |
+
}
|
| 278 |
+
|
| 279 |
+
} // namespace c10
|
| 280 |
+
|
| 281 |
+
namespace std {
|
| 282 |
+
|
| 283 |
+
template <>
|
| 284 |
+
class numeric_limits<c10::BFloat16> {
|
| 285 |
+
public:
|
| 286 |
+
static constexpr bool is_signed = true;
|
| 287 |
+
static constexpr bool is_specialized = true;
|
| 288 |
+
static constexpr bool is_integer = false;
|
| 289 |
+
static constexpr bool is_exact = false;
|
| 290 |
+
static constexpr bool has_infinity = true;
|
| 291 |
+
static constexpr bool has_quiet_NaN = true;
|
| 292 |
+
static constexpr bool has_signaling_NaN = true;
|
| 293 |
+
static constexpr auto has_denorm = numeric_limits<float>::has_denorm;
|
| 294 |
+
static constexpr auto has_denorm_loss =
|
| 295 |
+
numeric_limits<float>::has_denorm_loss;
|
| 296 |
+
static constexpr auto round_style = numeric_limits<float>::round_style;
|
| 297 |
+
static constexpr bool is_iec559 = false;
|
| 298 |
+
static constexpr bool is_bounded = true;
|
| 299 |
+
static constexpr bool is_modulo = false;
|
| 300 |
+
static constexpr int digits = 8;
|
| 301 |
+
static constexpr int digits10 = 2;
|
| 302 |
+
static constexpr int max_digits10 = 4;
|
| 303 |
+
static constexpr int radix = 2;
|
| 304 |
+
static constexpr int min_exponent = -125;
|
| 305 |
+
static constexpr int min_exponent10 = -37;
|
| 306 |
+
static constexpr int max_exponent = 128;
|
| 307 |
+
static constexpr int max_exponent10 = 38;
|
| 308 |
+
static constexpr auto traps = numeric_limits<float>::traps;
|
| 309 |
+
static constexpr auto tinyness_before =
|
| 310 |
+
numeric_limits<float>::tinyness_before;
|
| 311 |
+
|
| 312 |
+
static constexpr c10::BFloat16 min() {
|
| 313 |
+
return c10::BFloat16(0x0080, c10::BFloat16::from_bits());
|
| 314 |
+
}
|
| 315 |
+
static constexpr c10::BFloat16 lowest() {
|
| 316 |
+
return c10::BFloat16(0xFF7F, c10::BFloat16::from_bits());
|
| 317 |
+
}
|
| 318 |
+
static constexpr c10::BFloat16 max() {
|
| 319 |
+
return c10::BFloat16(0x7F7F, c10::BFloat16::from_bits());
|
| 320 |
+
}
|
| 321 |
+
static constexpr c10::BFloat16 epsilon() {
|
| 322 |
+
return c10::BFloat16(0x3C00, c10::BFloat16::from_bits());
|
| 323 |
+
}
|
| 324 |
+
static constexpr c10::BFloat16 round_error() {
|
| 325 |
+
return c10::BFloat16(0x3F00, c10::BFloat16::from_bits());
|
| 326 |
+
}
|
| 327 |
+
static constexpr c10::BFloat16 infinity() {
|
| 328 |
+
return c10::BFloat16(0x7F80, c10::BFloat16::from_bits());
|
| 329 |
+
}
|
| 330 |
+
static constexpr c10::BFloat16 quiet_NaN() {
|
| 331 |
+
return c10::BFloat16(0x7FC0, c10::BFloat16::from_bits());
|
| 332 |
+
}
|
| 333 |
+
static constexpr c10::BFloat16 signaling_NaN() {
|
| 334 |
+
return c10::BFloat16(0x7F80, c10::BFloat16::from_bits());
|
| 335 |
+
}
|
| 336 |
+
static constexpr c10::BFloat16 denorm_min() {
|
| 337 |
+
return c10::BFloat16(0x0001, c10::BFloat16::from_bits());
|
| 338 |
+
}
|
| 339 |
+
};
|
| 340 |
+
|
| 341 |
+
} // namespace std
|
| 342 |
+
|
| 343 |
+
C10_CLANG_DIAGNOSTIC_POP()
|
videollama2/lib/python3.10/site-packages/torch/include/c10/util/Backtrace.h
ADDED
|
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#ifndef C10_UTIL_BACKTRACE_H_
|
| 2 |
+
#define C10_UTIL_BACKTRACE_H_
|
| 3 |
+
|
| 4 |
+
#include <cstddef>
|
| 5 |
+
#include <string>
|
| 6 |
+
#include <typeinfo>
|
| 7 |
+
|
| 8 |
+
#include <c10/macros/Macros.h>
|
| 9 |
+
|
| 10 |
+
namespace c10 {
|
| 11 |
+
C10_API std::string get_backtrace(
|
| 12 |
+
size_t frames_to_skip = 0,
|
| 13 |
+
size_t maximum_number_of_frames = 64,
|
| 14 |
+
bool skip_python_frames = true);
|
| 15 |
+
} // namespace c10
|
| 16 |
+
|
| 17 |
+
#endif // C10_UTIL_BACKTRACE_H_
|
videollama2/lib/python3.10/site-packages/torch/include/c10/util/CallOnce.h
ADDED
|
@@ -0,0 +1,67 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <atomic>
|
| 4 |
+
#include <mutex>
|
| 5 |
+
#include <utility>
|
| 6 |
+
|
| 7 |
+
#include <c10/macros/Macros.h>
|
| 8 |
+
#include <c10/util/C++17.h>
|
| 9 |
+
|
| 10 |
+
namespace c10 {
|
| 11 |
+
|
| 12 |
+
// custom c10 call_once implementation to avoid the deadlock in std::call_once.
|
| 13 |
+
// The implementation here is a simplified version from folly and likely much
|
| 14 |
+
// much higher memory footprint.
|
| 15 |
+
template <typename Flag, typename F, typename... Args>
|
| 16 |
+
inline void call_once(Flag& flag, F&& f, Args&&... args) {
|
| 17 |
+
if (C10_LIKELY(flag.test_once())) {
|
| 18 |
+
return;
|
| 19 |
+
}
|
| 20 |
+
flag.call_once_slow(std::forward<F>(f), std::forward<Args>(args)...);
|
| 21 |
+
}
|
| 22 |
+
|
| 23 |
+
class once_flag {
|
| 24 |
+
public:
|
| 25 |
+
#ifndef _WIN32
|
| 26 |
+
// running into build error on MSVC. Can't seem to get a repro locally so I'm
|
| 27 |
+
// just avoiding constexpr
|
| 28 |
+
//
|
| 29 |
+
// C:/actions-runner/_work/pytorch/pytorch\c10/util/CallOnce.h(26): error:
|
| 30 |
+
// defaulted default constructor cannot be constexpr because the
|
| 31 |
+
// corresponding implicitly declared default constructor would not be
|
| 32 |
+
// constexpr 1 error detected in the compilation of
|
| 33 |
+
// "C:/actions-runner/_work/pytorch/pytorch/aten/src/ATen/cuda/cub.cu".
|
| 34 |
+
constexpr
|
| 35 |
+
#endif
|
| 36 |
+
once_flag() noexcept = default;
|
| 37 |
+
once_flag(const once_flag&) = delete;
|
| 38 |
+
once_flag& operator=(const once_flag&) = delete;
|
| 39 |
+
|
| 40 |
+
private:
|
| 41 |
+
template <typename Flag, typename F, typename... Args>
|
| 42 |
+
friend void call_once(Flag& flag, F&& f, Args&&... args);
|
| 43 |
+
|
| 44 |
+
template <typename F, typename... Args>
|
| 45 |
+
void call_once_slow(F&& f, Args&&... args) {
|
| 46 |
+
std::lock_guard<std::mutex> guard(mutex_);
|
| 47 |
+
if (init_.load(std::memory_order_relaxed)) {
|
| 48 |
+
return;
|
| 49 |
+
}
|
| 50 |
+
c10::guts::invoke(f, std::forward<Args>(args)...);
|
| 51 |
+
init_.store(true, std::memory_order_release);
|
| 52 |
+
}
|
| 53 |
+
|
| 54 |
+
bool test_once() {
|
| 55 |
+
return init_.load(std::memory_order_acquire);
|
| 56 |
+
}
|
| 57 |
+
|
| 58 |
+
void reset_once() {
|
| 59 |
+
init_.store(false, std::memory_order_release);
|
| 60 |
+
}
|
| 61 |
+
|
| 62 |
+
private:
|
| 63 |
+
std::mutex mutex_;
|
| 64 |
+
std::atomic<bool> init_{false};
|
| 65 |
+
};
|
| 66 |
+
|
| 67 |
+
} // namespace c10
|
videollama2/lib/python3.10/site-packages/torch/include/c10/util/DimVector.h
ADDED
|
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <c10/core/SymInt.h>
|
| 4 |
+
#include <c10/core/impl/SizesAndStrides.h>
|
| 5 |
+
#include <c10/util/SmallVector.h>
|
| 6 |
+
#include <cstdint>
|
| 7 |
+
|
| 8 |
+
namespace c10 {
|
| 9 |
+
|
| 10 |
+
constexpr size_t kDimVectorStaticSize = C10_SIZES_AND_STRIDES_MAX_INLINE_SIZE;
|
| 11 |
+
|
| 12 |
+
/// A container for sizes or strides
|
| 13 |
+
using DimVector = SmallVector<int64_t, kDimVectorStaticSize>;
|
| 14 |
+
using SymDimVector = SmallVector<c10::SymInt, kDimVectorStaticSize>;
|
| 15 |
+
|
| 16 |
+
} // namespace c10
|
videollama2/lib/python3.10/site-packages/torch/include/c10/util/FbcodeMaps.h
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#ifndef C10_UTIL_FBCODEMAPS_H_
|
| 2 |
+
#define C10_UTIL_FBCODEMAPS_H_
|
| 3 |
+
|
| 4 |
+
// Map typedefs so that we can use folly's F14 maps in fbcode without
|
| 5 |
+
// taking a folly dependency.
|
| 6 |
+
|
| 7 |
+
#ifdef FBCODE_CAFFE2
|
| 8 |
+
#include <folly/container/F14Map.h>
|
| 9 |
+
#include <folly/container/F14Set.h>
|
| 10 |
+
#else
|
| 11 |
+
#include <unordered_map>
|
| 12 |
+
#include <unordered_set>
|
| 13 |
+
#endif
|
| 14 |
+
|
| 15 |
+
namespace c10 {
|
| 16 |
+
#ifdef FBCODE_CAFFE2
|
| 17 |
+
template <typename Key, typename Value>
|
| 18 |
+
using FastMap = folly::F14FastMap<Key, Value>;
|
| 19 |
+
template <typename Key>
|
| 20 |
+
using FastSet = folly::F14FastSet<Key>;
|
| 21 |
+
#else
|
| 22 |
+
template <typename Key, typename Value>
|
| 23 |
+
using FastMap = std::unordered_map<Key, Value>;
|
| 24 |
+
template <typename Key>
|
| 25 |
+
using FastSet = std::unordered_set<Key>;
|
| 26 |
+
#endif
|
| 27 |
+
} // namespace c10
|
| 28 |
+
|
| 29 |
+
#endif // C10_UTIL_FBCODEMAPS_H_
|
videollama2/lib/python3.10/site-packages/torch/include/c10/util/Flags.h
ADDED
|
@@ -0,0 +1,226 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#ifndef C10_UTIL_FLAGS_H_
|
| 2 |
+
#define C10_UTIL_FLAGS_H_
|
| 3 |
+
|
| 4 |
+
/* Commandline flags support for C10.
|
| 5 |
+
*
|
| 6 |
+
* This is a portable commandline flags tool for c10, so we can optionally
|
| 7 |
+
* choose to use gflags or a lightweight custom implementation if gflags is
|
| 8 |
+
* not possible on a certain platform. If you have gflags installed, set the
|
| 9 |
+
* macro C10_USE_GFLAGS will seamlessly route everything to gflags.
|
| 10 |
+
*
|
| 11 |
+
* To define a flag foo of type bool default to true, do the following in the
|
| 12 |
+
* *global* namespace:
|
| 13 |
+
* C10_DEFINE_bool(foo, true, "An example.");
|
| 14 |
+
*
|
| 15 |
+
* To use it in another .cc file, you can use C10_DECLARE_* as follows:
|
| 16 |
+
* C10_DECLARE_bool(foo);
|
| 17 |
+
*
|
| 18 |
+
* In both cases, you can then access the flag via FLAGS_foo.
|
| 19 |
+
*
|
| 20 |
+
* It is recommended that you build with gflags. To learn more about the flags
|
| 21 |
+
* usage, refer to the gflags page here:
|
| 22 |
+
*
|
| 23 |
+
* https://gflags.github.io/gflags/
|
| 24 |
+
*
|
| 25 |
+
* Note about Python users / devs: gflags is initiated from a C++ function
|
| 26 |
+
* ParseCommandLineFlags, and is usually done in native binaries in the main
|
| 27 |
+
* function. As Python does not have a modifiable main function, it is usually
|
| 28 |
+
* difficult to change the flags after Python starts. Hence, it is recommended
|
| 29 |
+
* that one sets the default value of the flags to one that's acceptable in
|
| 30 |
+
* general - that will allow Python to run without wrong flags.
|
| 31 |
+
*/
|
| 32 |
+
|
| 33 |
+
#include <string>
|
| 34 |
+
|
| 35 |
+
#include <c10/macros/Macros.h>
|
| 36 |
+
#include <c10/util/Registry.h>
|
| 37 |
+
|
| 38 |
+
namespace c10 {
|
| 39 |
+
/**
|
| 40 |
+
* Sets the usage message when a commandline tool is called with "--help".
|
| 41 |
+
*/
|
| 42 |
+
C10_API void SetUsageMessage(const std::string& str);
|
| 43 |
+
|
| 44 |
+
/**
|
| 45 |
+
* Returns the usage message for the commandline tool set by SetUsageMessage.
|
| 46 |
+
*/
|
| 47 |
+
C10_API const char* UsageMessage();
|
| 48 |
+
|
| 49 |
+
/**
|
| 50 |
+
* Parses the commandline flags.
|
| 51 |
+
*
|
| 52 |
+
* This command parses all the commandline arguments passed in via pargc
|
| 53 |
+
* and argv. Once it is finished, partc and argv will contain the remaining
|
| 54 |
+
* commandline args that c10 does not deal with. Note that following
|
| 55 |
+
* convention, argv[0] contains the binary name and is not parsed.
|
| 56 |
+
*/
|
| 57 |
+
C10_API bool ParseCommandLineFlags(int* pargc, char*** pargv);
|
| 58 |
+
|
| 59 |
+
/**
|
| 60 |
+
* Checks if the commandline flags has already been passed.
|
| 61 |
+
*/
|
| 62 |
+
C10_API bool CommandLineFlagsHasBeenParsed();
|
| 63 |
+
|
| 64 |
+
} // namespace c10
|
| 65 |
+
|
| 66 |
+
////////////////////////////////////////////////////////////////////////////////
|
| 67 |
+
// Below are gflags and non-gflags specific implementations.
|
| 68 |
+
// In general, they define the following macros for one to declare (use
|
| 69 |
+
// C10_DECLARE) or define (use C10_DEFINE) flags:
|
| 70 |
+
// C10_{DECLARE,DEFINE}_{int,int64,double,bool,string}
|
| 71 |
+
////////////////////////////////////////////////////////////////////////////////
|
| 72 |
+
|
| 73 |
+
#ifdef C10_USE_GFLAGS
|
| 74 |
+
|
| 75 |
+
////////////////////////////////////////////////////////////////////////////////
|
| 76 |
+
// Begin gflags section: most functions are basically rerouted to gflags.
|
| 77 |
+
////////////////////////////////////////////////////////////////////////////////
|
| 78 |
+
#include <gflags/gflags.h>
|
| 79 |
+
|
| 80 |
+
// C10 uses hidden visibility by default. However, in gflags, it only uses
|
| 81 |
+
// export on Windows platform (with dllexport) but not on linux/mac (with
|
| 82 |
+
// default visibility). As a result, to ensure that we are always exporting
|
| 83 |
+
// global variables, we will redefine the GFLAGS_DLL_DEFINE_FLAG macro if we
|
| 84 |
+
// are building C10 as a shared library.
|
| 85 |
+
// This has to be done after the inclusion of gflags, because some early
|
| 86 |
+
// versions of gflags.h (e.g. 2.0 on ubuntu 14.04) directly defines the
|
| 87 |
+
// macros, so we need to do definition after gflags is done.
|
| 88 |
+
#ifdef GFLAGS_DLL_DEFINE_FLAG
|
| 89 |
+
#undef GFLAGS_DLL_DEFINE_FLAG
|
| 90 |
+
#endif // GFLAGS_DLL_DEFINE_FLAG
|
| 91 |
+
#ifdef GFLAGS_DLL_DECLARE_FLAG
|
| 92 |
+
#undef GFLAGS_DLL_DECLARE_FLAG
|
| 93 |
+
#endif // GFLAGS_DLL_DECLARE_FLAG
|
| 94 |
+
#define GFLAGS_DLL_DEFINE_FLAG C10_EXPORT
|
| 95 |
+
#define GFLAGS_DLL_DECLARE_FLAG C10_IMPORT
|
| 96 |
+
|
| 97 |
+
// gflags before 2.0 uses namespace google and after 2.1 uses namespace gflags.
|
| 98 |
+
// Using GFLAGS_GFLAGS_H_ to capture this change.
|
| 99 |
+
#ifndef GFLAGS_GFLAGS_H_
|
| 100 |
+
namespace gflags = google;
|
| 101 |
+
#endif // GFLAGS_GFLAGS_H_
|
| 102 |
+
|
| 103 |
+
// Motivation about the gflags wrapper:
|
| 104 |
+
// (1) We would need to make sure that the gflags version and the non-gflags
|
| 105 |
+
// version of C10 are going to expose the same flags abstraction. One should
|
| 106 |
+
// explicitly use FLAGS_flag_name to access the flags.
|
| 107 |
+
// (2) For flag names, it is recommended to start with c10_ to distinguish it
|
| 108 |
+
// from regular gflags flags. For example, do
|
| 109 |
+
// C10_DEFINE_BOOL(c10_my_flag, true, "An example");
|
| 110 |
+
// to allow one to use FLAGS_c10_my_flag.
|
| 111 |
+
// (3) Gflags has a design issue that does not properly expose the global flags,
|
| 112 |
+
// if one builds the library with -fvisibility=hidden. The current gflags (as of
|
| 113 |
+
// Aug 2018) only deals with the Windows case using dllexport, and not the Linux
|
| 114 |
+
// counterparts. As a result, we will explicitly use C10_EXPORT to export the
|
| 115 |
+
// flags defined in C10. This is done via a global reference, so the flag
|
| 116 |
+
// itself is not duplicated - under the hood it is the same global gflags flag.
|
| 117 |
+
#define C10_GFLAGS_DEF_WRAPPER(type, real_type, name, default_value, help_str) \
|
| 118 |
+
DEFINE_##type(name, default_value, help_str);
|
| 119 |
+
|
| 120 |
+
#define C10_DEFINE_int(name, default_value, help_str) \
|
| 121 |
+
C10_GFLAGS_DEF_WRAPPER(int32, gflags::int32, name, default_value, help_str)
|
| 122 |
+
#define C10_DEFINE_int32(name, default_value, help_str) \
|
| 123 |
+
C10_DEFINE_int(name, default_value, help_str)
|
| 124 |
+
#define C10_DEFINE_int64(name, default_value, help_str) \
|
| 125 |
+
C10_GFLAGS_DEF_WRAPPER(int64, gflags::int64, name, default_value, help_str)
|
| 126 |
+
#define C10_DEFINE_double(name, default_value, help_str) \
|
| 127 |
+
C10_GFLAGS_DEF_WRAPPER(double, double, name, default_value, help_str)
|
| 128 |
+
#define C10_DEFINE_bool(name, default_value, help_str) \
|
| 129 |
+
C10_GFLAGS_DEF_WRAPPER(bool, bool, name, default_value, help_str)
|
| 130 |
+
#define C10_DEFINE_string(name, default_value, help_str) \
|
| 131 |
+
C10_GFLAGS_DEF_WRAPPER(string, ::fLS::clstring, name, default_value, help_str)
|
| 132 |
+
|
| 133 |
+
// DECLARE_typed_var should be used in header files and in the global namespace.
|
| 134 |
+
#define C10_GFLAGS_DECLARE_WRAPPER(type, real_type, name) DECLARE_##type(name);
|
| 135 |
+
|
| 136 |
+
#define C10_DECLARE_int(name) \
|
| 137 |
+
C10_GFLAGS_DECLARE_WRAPPER(int32, gflags::int32, name)
|
| 138 |
+
#define C10_DECLARE_int32(name) C10_DECLARE_int(name)
|
| 139 |
+
#define C10_DECLARE_int64(name) \
|
| 140 |
+
C10_GFLAGS_DECLARE_WRAPPER(int64, gflags::int64, name)
|
| 141 |
+
#define C10_DECLARE_double(name) \
|
| 142 |
+
C10_GFLAGS_DECLARE_WRAPPER(double, double, name)
|
| 143 |
+
#define C10_DECLARE_bool(name) C10_GFLAGS_DECLARE_WRAPPER(bool, bool, name)
|
| 144 |
+
#define C10_DECLARE_string(name) \
|
| 145 |
+
C10_GFLAGS_DECLARE_WRAPPER(string, ::fLS::clstring, name)
|
| 146 |
+
|
| 147 |
+
////////////////////////////////////////////////////////////////////////////////
|
| 148 |
+
// End gflags section.
|
| 149 |
+
////////////////////////////////////////////////////////////////////////////////
|
| 150 |
+
|
| 151 |
+
#else // C10_USE_GFLAGS
|
| 152 |
+
|
| 153 |
+
////////////////////////////////////////////////////////////////////////////////
|
| 154 |
+
// Begin non-gflags section: providing equivalent functionality.
|
| 155 |
+
////////////////////////////////////////////////////////////////////////////////
|
| 156 |
+
|
| 157 |
+
namespace c10 {
|
| 158 |
+
|
| 159 |
+
class C10_API C10FlagParser {
|
| 160 |
+
public:
|
| 161 |
+
bool success() {
|
| 162 |
+
return success_;
|
| 163 |
+
}
|
| 164 |
+
|
| 165 |
+
protected:
|
| 166 |
+
template <typename T>
|
| 167 |
+
bool Parse(const std::string& content, T* value);
|
| 168 |
+
bool success_{false};
|
| 169 |
+
};
|
| 170 |
+
|
| 171 |
+
C10_DECLARE_REGISTRY(C10FlagsRegistry, C10FlagParser, const std::string&);
|
| 172 |
+
|
| 173 |
+
} // namespace c10
|
| 174 |
+
|
| 175 |
+
// The macros are defined outside the c10 namespace. In your code, you should
|
| 176 |
+
// write the C10_DEFINE_* and C10_DECLARE_* macros outside any namespace
|
| 177 |
+
// as well.
|
| 178 |
+
|
| 179 |
+
#define C10_DEFINE_typed_var(type, name, default_value, help_str) \
|
| 180 |
+
C10_EXPORT type FLAGS_##name = default_value; \
|
| 181 |
+
namespace c10 { \
|
| 182 |
+
namespace { \
|
| 183 |
+
class C10FlagParser_##name : public C10FlagParser { \
|
| 184 |
+
public: \
|
| 185 |
+
explicit C10FlagParser_##name(const std::string& content) { \
|
| 186 |
+
success_ = C10FlagParser::Parse<type>(content, &FLAGS_##name); \
|
| 187 |
+
} \
|
| 188 |
+
}; \
|
| 189 |
+
} \
|
| 190 |
+
RegistererC10FlagsRegistry g_C10FlagsRegistry_##name( \
|
| 191 |
+
#name, \
|
| 192 |
+
C10FlagsRegistry(), \
|
| 193 |
+
RegistererC10FlagsRegistry::DefaultCreator<C10FlagParser_##name>, \
|
| 194 |
+
"(" #type ", default " #default_value ") " help_str); \
|
| 195 |
+
}
|
| 196 |
+
|
| 197 |
+
#define C10_DEFINE_int(name, default_value, help_str) \
|
| 198 |
+
C10_DEFINE_typed_var(int, name, default_value, help_str)
|
| 199 |
+
#define C10_DEFINE_int32(name, default_value, help_str) \
|
| 200 |
+
C10_DEFINE_int(name, default_value, help_str)
|
| 201 |
+
#define C10_DEFINE_int64(name, default_value, help_str) \
|
| 202 |
+
C10_DEFINE_typed_var(int64_t, name, default_value, help_str)
|
| 203 |
+
#define C10_DEFINE_double(name, default_value, help_str) \
|
| 204 |
+
C10_DEFINE_typed_var(double, name, default_value, help_str)
|
| 205 |
+
#define C10_DEFINE_bool(name, default_value, help_str) \
|
| 206 |
+
C10_DEFINE_typed_var(bool, name, default_value, help_str)
|
| 207 |
+
#define C10_DEFINE_string(name, default_value, help_str) \
|
| 208 |
+
C10_DEFINE_typed_var(std::string, name, default_value, help_str)
|
| 209 |
+
|
| 210 |
+
// DECLARE_typed_var should be used in header files and in the global namespace.
|
| 211 |
+
#define C10_DECLARE_typed_var(type, name) C10_API extern type FLAGS_##name
|
| 212 |
+
|
| 213 |
+
#define C10_DECLARE_int(name) C10_DECLARE_typed_var(int, name)
|
| 214 |
+
#define C10_DECLARE_int32(name) C10_DECLARE_int(name)
|
| 215 |
+
#define C10_DECLARE_int64(name) C10_DECLARE_typed_var(int64_t, name)
|
| 216 |
+
#define C10_DECLARE_double(name) C10_DECLARE_typed_var(double, name)
|
| 217 |
+
#define C10_DECLARE_bool(name) C10_DECLARE_typed_var(bool, name)
|
| 218 |
+
#define C10_DECLARE_string(name) C10_DECLARE_typed_var(std::string, name)
|
| 219 |
+
|
| 220 |
+
////////////////////////////////////////////////////////////////////////////////
|
| 221 |
+
// End non-gflags section.
|
| 222 |
+
////////////////////////////////////////////////////////////////////////////////
|
| 223 |
+
|
| 224 |
+
#endif // C10_USE_GFLAGS
|
| 225 |
+
|
| 226 |
+
#endif // C10_UTIL_FLAGS_H_
|
videollama2/lib/python3.10/site-packages/torch/include/c10/util/Float8_e4m3fn.h
ADDED
|
@@ -0,0 +1,247 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
/// Defines the Float8_e4m3fn type (8-bit floating-point) including conversions
|
| 4 |
+
/// to standard C types and basic arithmetic operations. Note that arithmetic
|
| 5 |
+
/// operations are implemented by converting to floating point and
|
| 6 |
+
/// performing the operation in float32.
|
| 7 |
+
/// Binary configuration:
|
| 8 |
+
/// s eeee mmm
|
| 9 |
+
/// 1 sign bit
|
| 10 |
+
/// 4 exponent bits
|
| 11 |
+
/// 3 mantissa bits
|
| 12 |
+
/// bias = 7
|
| 13 |
+
///
|
| 14 |
+
/// Implementation based on the paper https://arxiv.org/pdf/2209.05433.pdf
|
| 15 |
+
/// and inspired by Half implementation from pytorch/c10/util/Half.h
|
| 16 |
+
|
| 17 |
+
#include <c10/macros/Macros.h>
|
| 18 |
+
#include <c10/util/C++17.h>
|
| 19 |
+
#include <c10/util/TypeSafeSignMath.h>
|
| 20 |
+
#include <c10/util/floating_point_utils.h>
|
| 21 |
+
#include <type_traits>
|
| 22 |
+
|
| 23 |
+
#if defined(__cplusplus) && (__cplusplus >= 201103L)
|
| 24 |
+
#include <cmath>
|
| 25 |
+
#include <cstdint>
|
| 26 |
+
#elif !defined(__OPENCL_VERSION__)
|
| 27 |
+
#include <math.h>
|
| 28 |
+
#include <stdint.h>
|
| 29 |
+
#endif
|
| 30 |
+
|
| 31 |
+
#ifdef _MSC_VER
|
| 32 |
+
#include <intrin.h>
|
| 33 |
+
#endif
|
| 34 |
+
|
| 35 |
+
#include <climits>
|
| 36 |
+
#include <cstdint>
|
| 37 |
+
#include <cstring>
|
| 38 |
+
#include <iosfwd>
|
| 39 |
+
#include <limits>
|
| 40 |
+
#include <sstream>
|
| 41 |
+
#include <stdexcept>
|
| 42 |
+
#include <string>
|
| 43 |
+
#include <utility>
|
| 44 |
+
|
| 45 |
+
#include <typeinfo> // operator typeid
|
| 46 |
+
|
| 47 |
+
namespace c10 {
|
| 48 |
+
|
| 49 |
+
namespace detail {
|
| 50 |
+
|
| 51 |
+
/*
|
| 52 |
+
* Convert a 8-bit floating-point number in fp8 E4M3FN format, in bit
|
| 53 |
+
* representation, to a 32-bit floating-point number in IEEE single-precision
|
| 54 |
+
* format, in bit representation.
|
| 55 |
+
*
|
| 56 |
+
* @note The implementation doesn't use any floating-point operations.
|
| 57 |
+
*/
|
| 58 |
+
inline C10_HOST_DEVICE float fp8e4m3fn_to_fp32_value(uint8_t input) {
|
| 59 |
+
/*
|
| 60 |
+
* Extend the fp8 E4M3FN number to 32 bits and shift to the
|
| 61 |
+
* upper part of the 32-bit word:
|
| 62 |
+
* +---+----+---+-----------------------------+
|
| 63 |
+
* | S |EEEE|MMM|0000 0000 0000 0000 0000 0000|
|
| 64 |
+
* +---+----+---+-----------------------------+
|
| 65 |
+
* Bits 31 27-30 24-26 0-23
|
| 66 |
+
*
|
| 67 |
+
* S - sign bit, E - bits of the biased exponent, M - bits of the mantissa, 0
|
| 68 |
+
* - zero bits.
|
| 69 |
+
*/
|
| 70 |
+
const uint32_t w = (uint32_t)input << 24;
|
| 71 |
+
/*
|
| 72 |
+
* Extract the sign of the input number into the high bit of the 32-bit word:
|
| 73 |
+
*
|
| 74 |
+
* +---+----------------------------------+
|
| 75 |
+
* | S |0000000 00000000 00000000 00000000|
|
| 76 |
+
* +---+----------------------------------+
|
| 77 |
+
* Bits 31 0-31
|
| 78 |
+
*/
|
| 79 |
+
const uint32_t sign = w & UINT32_C(0x80000000);
|
| 80 |
+
/*
|
| 81 |
+
* Extract mantissa and biased exponent of the input number into the bits 0-30
|
| 82 |
+
* of the 32-bit word:
|
| 83 |
+
*
|
| 84 |
+
* +---+----+---+-----------------------------+
|
| 85 |
+
* | S |EEEE|MMM|0000 0000 0000 0000 0000 0000|
|
| 86 |
+
* +---+----+---+-----------------------------+
|
| 87 |
+
* Bits 31 27-30 24-26 0-23
|
| 88 |
+
*/
|
| 89 |
+
const uint32_t nonsign = w & UINT32_C(0x7FFFFFFF);
|
| 90 |
+
/*
|
| 91 |
+
* Renorm shift is the number of bits to shift mantissa left to make the
|
| 92 |
+
* half-precision number normalized. If the initial number is normalized, some
|
| 93 |
+
* of its high 5 bits (sign == 0 and 4-bit exponent) equals one. In this case
|
| 94 |
+
* renorm_shift == 0. If the number is denormalize, renorm_shift > 0. Note
|
| 95 |
+
* that if we shift denormalized nonsign by renorm_shift, the unit bit of
|
| 96 |
+
* mantissa will shift into exponent, turning the biased exponent into 1, and
|
| 97 |
+
* making mantissa normalized (i.e. without leading 1).
|
| 98 |
+
*/
|
| 99 |
+
#if defined(__CUDA_ARCH__)
|
| 100 |
+
uint32_t renorm_shift = __clz(nonsign);
|
| 101 |
+
#elif defined(__SYCL_DEVICE_ONLY__)
|
| 102 |
+
// Note: zero is not a supported input into `__builtin_clz`
|
| 103 |
+
uint32_t renorm_shift =
|
| 104 |
+
nonsign != 0 ? __builtin_clz(nonsign) : sizeof(uint32_t) * CHAR_BIT;
|
| 105 |
+
#elif defined(_MSC_VER)
|
| 106 |
+
unsigned long nonsign_bsr;
|
| 107 |
+
_BitScanReverse(&nonsign_bsr, (unsigned long)nonsign);
|
| 108 |
+
uint32_t renorm_shift = (uint32_t)nonsign_bsr ^ 31;
|
| 109 |
+
#else
|
| 110 |
+
// Note: zero is not a supported input into `__builtin_clz`
|
| 111 |
+
uint32_t renorm_shift =
|
| 112 |
+
nonsign != 0 ? __builtin_clz(nonsign) : sizeof(uint32_t) * CHAR_BIT;
|
| 113 |
+
#endif
|
| 114 |
+
renorm_shift = renorm_shift > 4 ? renorm_shift - 4 : 0;
|
| 115 |
+
/*
|
| 116 |
+
* Iff fp8e4m3fn number has all exponent and mantissa bits set to 1,
|
| 117 |
+
* the addition overflows it into bit 31, and the subsequent shift turns the
|
| 118 |
+
* high 9 bits into 1. Thus inf_nan_mask == 0x7F800000 if the fp8e4m3fn number
|
| 119 |
+
* is Nan, 0x00000000 otherwise
|
| 120 |
+
*/
|
| 121 |
+
const int32_t inf_nan_mask =
|
| 122 |
+
((int32_t)(nonsign + 0x01000000) >> 8) & INT32_C(0x7F800000);
|
| 123 |
+
/*
|
| 124 |
+
* Iff nonsign is 0, it overflows into 0xFFFFFFFF, turning bit 31
|
| 125 |
+
* into 1. Otherwise, bit 31 remains 0. The signed shift right by 31
|
| 126 |
+
* broadcasts bit 31 into all bits of the zero_mask. Thus zero_mask ==
|
| 127 |
+
* 0xFFFFFFFF if the half-precision number was zero (+0.0h or -0.0h)
|
| 128 |
+
* 0x00000000 otherwise
|
| 129 |
+
*/
|
| 130 |
+
const int32_t zero_mask = (int32_t)(nonsign - 1) >> 31;
|
| 131 |
+
/*
|
| 132 |
+
* 1. Shift nonsign left by renorm_shift to normalize it (if the input
|
| 133 |
+
* was denormal)
|
| 134 |
+
* 2. Shift nonsign right by 4 so the exponent (4 bits originally)
|
| 135 |
+
* becomes an 8-bit field and 3-bit mantissa shifts into the 3 high
|
| 136 |
+
* bits of the 23-bit mantissa of IEEE single-precision number.
|
| 137 |
+
* 3. Add 0x78 to the exponent (starting at bit 23) to compensate the
|
| 138 |
+
* different in exponent bias (0x7F for single-precision number less 0x07
|
| 139 |
+
* for fp8e4m3fn number).
|
| 140 |
+
* 4. Subtract renorm_shift from the exponent (starting at bit 23) to
|
| 141 |
+
* account for renormalization. As renorm_shift is less than 0x78, this
|
| 142 |
+
* can be combined with step 3.
|
| 143 |
+
* 5. Binary OR with inf_nan_mask to turn the exponent into 0xFF if the
|
| 144 |
+
* input was NaN or infinity.
|
| 145 |
+
* 6. Binary ANDNOT with zero_mask to turn the mantissa and exponent
|
| 146 |
+
* into zero if the input was zero.
|
| 147 |
+
* 7. Combine with the sign of the input number.
|
| 148 |
+
*/
|
| 149 |
+
uint32_t result = sign |
|
| 150 |
+
((((nonsign << renorm_shift >> 4) + ((0x78 - renorm_shift) << 23)) |
|
| 151 |
+
inf_nan_mask) &
|
| 152 |
+
~zero_mask);
|
| 153 |
+
return fp32_from_bits(result);
|
| 154 |
+
}
|
| 155 |
+
|
| 156 |
+
/*
|
| 157 |
+
* Convert a 32-bit floating-point number in IEEE single-precision format to a
|
| 158 |
+
* 8-bit floating-point number in fp8 E4M3FN format, in bit representation.
|
| 159 |
+
*/
|
| 160 |
+
inline C10_HOST_DEVICE uint8_t fp8e4m3fn_from_fp32_value(float f) {
|
| 161 |
+
/*
|
| 162 |
+
* Binary representation of 480.0f, which is the first value
|
| 163 |
+
* not representable in fp8e4m3fn range:
|
| 164 |
+
* 0 1111 111 - fp8e4m3fn
|
| 165 |
+
* 0 10000111 11100000000000000000000 - fp32
|
| 166 |
+
*/
|
| 167 |
+
constexpr uint32_t fp8_max = UINT32_C(1087) << 20;
|
| 168 |
+
|
| 169 |
+
/*
|
| 170 |
+
* A mask for converting fp32 numbers lower than fp8e4m3fn normal range
|
| 171 |
+
* into denorm representation
|
| 172 |
+
* magic number: ((127 - 7) + (23 - 3) + 1)
|
| 173 |
+
*/
|
| 174 |
+
constexpr uint32_t denorm_mask = UINT32_C(141) << 23;
|
| 175 |
+
|
| 176 |
+
uint32_t f_bits = fp32_to_bits(f);
|
| 177 |
+
|
| 178 |
+
uint8_t result = 0u;
|
| 179 |
+
|
| 180 |
+
/*
|
| 181 |
+
* Extract the sign of the input number into the high bit of the 32-bit word:
|
| 182 |
+
*
|
| 183 |
+
* +---+----------------------------------+
|
| 184 |
+
* | S |0000000 00000000 00000000 00000000|
|
| 185 |
+
* +---+----------------------------------+
|
| 186 |
+
* Bits 31 0-31
|
| 187 |
+
*/
|
| 188 |
+
const uint32_t sign = f_bits & UINT32_C(0x80000000);
|
| 189 |
+
|
| 190 |
+
/*
|
| 191 |
+
* Set sign bit to 0
|
| 192 |
+
*/
|
| 193 |
+
f_bits ^= sign;
|
| 194 |
+
|
| 195 |
+
if (f_bits >= fp8_max) {
|
| 196 |
+
// NaN - all exponent and mantissa bits set to 1
|
| 197 |
+
result = 0x7f;
|
| 198 |
+
} else {
|
| 199 |
+
if (f_bits < (UINT32_C(121) << 23)) {
|
| 200 |
+
// Input number is smaller than 2^(-6), which is the smallest
|
| 201 |
+
// fp8e4m3fn normal number
|
| 202 |
+
f_bits =
|
| 203 |
+
fp32_to_bits(fp32_from_bits(f_bits) + fp32_from_bits(denorm_mask));
|
| 204 |
+
result = static_cast<uint8_t>(f_bits - denorm_mask);
|
| 205 |
+
} else {
|
| 206 |
+
// resulting mantissa is odd
|
| 207 |
+
uint8_t mant_odd = (f_bits >> 20) & 1;
|
| 208 |
+
|
| 209 |
+
// update exponent, rounding bias part 1
|
| 210 |
+
f_bits += ((uint32_t)(7 - 127) << 23) + 0x7FFFF;
|
| 211 |
+
|
| 212 |
+
// rounding bias part 2
|
| 213 |
+
f_bits += mant_odd;
|
| 214 |
+
|
| 215 |
+
// take the bits!
|
| 216 |
+
result = static_cast<uint8_t>(f_bits >> 20);
|
| 217 |
+
}
|
| 218 |
+
}
|
| 219 |
+
|
| 220 |
+
result |= static_cast<uint8_t>(sign >> 24);
|
| 221 |
+
return result;
|
| 222 |
+
}
|
| 223 |
+
|
| 224 |
+
} // namespace detail
|
| 225 |
+
|
| 226 |
+
struct alignas(1) Float8_e4m3fn {
|
| 227 |
+
uint8_t x;
|
| 228 |
+
|
| 229 |
+
struct from_bits_t {};
|
| 230 |
+
C10_HOST_DEVICE static constexpr from_bits_t from_bits() {
|
| 231 |
+
return from_bits_t();
|
| 232 |
+
}
|
| 233 |
+
|
| 234 |
+
Float8_e4m3fn() = default;
|
| 235 |
+
|
| 236 |
+
constexpr C10_HOST_DEVICE Float8_e4m3fn(uint8_t bits, from_bits_t)
|
| 237 |
+
: x(bits){};
|
| 238 |
+
inline C10_HOST_DEVICE Float8_e4m3fn(float value);
|
| 239 |
+
inline C10_HOST_DEVICE operator float() const;
|
| 240 |
+
inline C10_HOST_DEVICE bool isnan() const;
|
| 241 |
+
};
|
| 242 |
+
|
| 243 |
+
C10_API std::ostream& operator<<(std::ostream& out, const Float8_e4m3fn& value);
|
| 244 |
+
|
| 245 |
+
} // namespace c10
|
| 246 |
+
|
| 247 |
+
#include <c10/util/Float8_e4m3fn-inl.h> // IWYU pragma: keep
|
videollama2/lib/python3.10/site-packages/torch/include/c10/util/Float8_e5m2-inl.h
ADDED
|
@@ -0,0 +1,283 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <c10/macros/Macros.h>
|
| 4 |
+
#include <cstring>
|
| 5 |
+
#include <limits>
|
| 6 |
+
|
| 7 |
+
C10_CLANG_DIAGNOSTIC_PUSH()
|
| 8 |
+
#if C10_CLANG_HAS_WARNING("-Wimplicit-int-float-conversion")
|
| 9 |
+
C10_CLANG_DIAGNOSTIC_IGNORE("-Wimplicit-int-float-conversion")
|
| 10 |
+
#endif
|
| 11 |
+
|
| 12 |
+
#define EXP_WIDTH_FP8 5
|
| 13 |
+
#define MAN_WIDTH_FP8 2
|
| 14 |
+
#define EXP_BIAS_FP8 15
|
| 15 |
+
|
| 16 |
+
namespace c10 {
|
| 17 |
+
|
| 18 |
+
/// Constructors
|
| 19 |
+
|
| 20 |
+
inline C10_HOST_DEVICE Float8_e5m2::Float8_e5m2(float value)
|
| 21 |
+
: x(detail::fp8e5m2_from_fp32_value(value)) {}
|
| 22 |
+
|
| 23 |
+
/// Implicit conversions
|
| 24 |
+
|
| 25 |
+
inline C10_HOST_DEVICE Float8_e5m2::operator float() const {
|
| 26 |
+
return detail::fp8e5m2_to_fp32_value(x);
|
| 27 |
+
}
|
| 28 |
+
|
| 29 |
+
/// Special values helpers
|
| 30 |
+
|
| 31 |
+
inline C10_HOST_DEVICE bool Float8_e5m2::isnan() const {
|
| 32 |
+
return (x & 0b01111111) > 0b01111100;
|
| 33 |
+
}
|
| 34 |
+
|
| 35 |
+
inline C10_HOST_DEVICE bool Float8_e5m2::isinf() const {
|
| 36 |
+
return (x & 0b01111111) == 0b01111100;
|
| 37 |
+
}
|
| 38 |
+
|
| 39 |
+
/// Arithmetic
|
| 40 |
+
|
| 41 |
+
inline C10_HOST_DEVICE Float8_e5m2
|
| 42 |
+
operator+(const Float8_e5m2& a, const Float8_e5m2& b) {
|
| 43 |
+
return static_cast<float>(a) + static_cast<float>(b);
|
| 44 |
+
}
|
| 45 |
+
|
| 46 |
+
inline C10_HOST_DEVICE Float8_e5m2
|
| 47 |
+
operator-(const Float8_e5m2& a, const Float8_e5m2& b) {
|
| 48 |
+
return static_cast<float>(a) - static_cast<float>(b);
|
| 49 |
+
}
|
| 50 |
+
|
| 51 |
+
inline C10_HOST_DEVICE Float8_e5m2
|
| 52 |
+
operator*(const Float8_e5m2& a, const Float8_e5m2& b) {
|
| 53 |
+
return static_cast<float>(a) * static_cast<float>(b);
|
| 54 |
+
}
|
| 55 |
+
|
| 56 |
+
inline C10_HOST_DEVICE Float8_e5m2 operator/(
|
| 57 |
+
const Float8_e5m2& a,
|
| 58 |
+
const Float8_e5m2& b) __ubsan_ignore_float_divide_by_zero__ {
|
| 59 |
+
return static_cast<float>(a) / static_cast<float>(b);
|
| 60 |
+
}
|
| 61 |
+
|
| 62 |
+
inline C10_HOST_DEVICE Float8_e5m2 operator-(const Float8_e5m2& a) {
|
| 63 |
+
return -static_cast<float>(a);
|
| 64 |
+
}
|
| 65 |
+
|
| 66 |
+
inline C10_HOST_DEVICE Float8_e5m2& operator+=(
|
| 67 |
+
Float8_e5m2& a,
|
| 68 |
+
const Float8_e5m2& b) {
|
| 69 |
+
a = a + b;
|
| 70 |
+
return a;
|
| 71 |
+
}
|
| 72 |
+
|
| 73 |
+
inline C10_HOST_DEVICE Float8_e5m2& operator-=(
|
| 74 |
+
Float8_e5m2& a,
|
| 75 |
+
const Float8_e5m2& b) {
|
| 76 |
+
a = a - b;
|
| 77 |
+
return a;
|
| 78 |
+
}
|
| 79 |
+
|
| 80 |
+
inline C10_HOST_DEVICE Float8_e5m2& operator*=(
|
| 81 |
+
Float8_e5m2& a,
|
| 82 |
+
const Float8_e5m2& b) {
|
| 83 |
+
a = a * b;
|
| 84 |
+
return a;
|
| 85 |
+
}
|
| 86 |
+
|
| 87 |
+
inline C10_HOST_DEVICE Float8_e5m2& operator/=(
|
| 88 |
+
Float8_e5m2& a,
|
| 89 |
+
const Float8_e5m2& b) {
|
| 90 |
+
a = a / b;
|
| 91 |
+
return a;
|
| 92 |
+
}
|
| 93 |
+
|
| 94 |
+
/// Arithmetic with floats
|
| 95 |
+
|
| 96 |
+
inline C10_HOST_DEVICE float operator+(Float8_e5m2 a, float b) {
|
| 97 |
+
return static_cast<float>(a) + b;
|
| 98 |
+
}
|
| 99 |
+
inline C10_HOST_DEVICE float operator-(Float8_e5m2 a, float b) {
|
| 100 |
+
return static_cast<float>(a) - b;
|
| 101 |
+
}
|
| 102 |
+
inline C10_HOST_DEVICE float operator*(Float8_e5m2 a, float b) {
|
| 103 |
+
return static_cast<float>(a) * b;
|
| 104 |
+
}
|
| 105 |
+
inline C10_HOST_DEVICE float operator/(Float8_e5m2 a, float b)
|
| 106 |
+
__ubsan_ignore_float_divide_by_zero__ {
|
| 107 |
+
return static_cast<float>(a) / b;
|
| 108 |
+
}
|
| 109 |
+
|
| 110 |
+
inline C10_HOST_DEVICE float operator+(float a, Float8_e5m2 b) {
|
| 111 |
+
return a + static_cast<float>(b);
|
| 112 |
+
}
|
| 113 |
+
inline C10_HOST_DEVICE float operator-(float a, Float8_e5m2 b) {
|
| 114 |
+
return a - static_cast<float>(b);
|
| 115 |
+
}
|
| 116 |
+
inline C10_HOST_DEVICE float operator*(float a, Float8_e5m2 b) {
|
| 117 |
+
return a * static_cast<float>(b);
|
| 118 |
+
}
|
| 119 |
+
inline C10_HOST_DEVICE float operator/(float a, Float8_e5m2 b)
|
| 120 |
+
__ubsan_ignore_float_divide_by_zero__ {
|
| 121 |
+
return a / static_cast<float>(b);
|
| 122 |
+
}
|
| 123 |
+
|
| 124 |
+
inline C10_HOST_DEVICE float& operator+=(float& a, const Float8_e5m2& b) {
|
| 125 |
+
return a += static_cast<float>(b);
|
| 126 |
+
}
|
| 127 |
+
inline C10_HOST_DEVICE float& operator-=(float& a, const Float8_e5m2& b) {
|
| 128 |
+
return a -= static_cast<float>(b);
|
| 129 |
+
}
|
| 130 |
+
inline C10_HOST_DEVICE float& operator*=(float& a, const Float8_e5m2& b) {
|
| 131 |
+
return a *= static_cast<float>(b);
|
| 132 |
+
}
|
| 133 |
+
inline C10_HOST_DEVICE float& operator/=(float& a, const Float8_e5m2& b) {
|
| 134 |
+
return a /= static_cast<float>(b);
|
| 135 |
+
}
|
| 136 |
+
|
| 137 |
+
/// Arithmetic with doubles
|
| 138 |
+
|
| 139 |
+
inline C10_HOST_DEVICE double operator+(Float8_e5m2 a, double b) {
|
| 140 |
+
return static_cast<double>(a) + b;
|
| 141 |
+
}
|
| 142 |
+
inline C10_HOST_DEVICE double operator-(Float8_e5m2 a, double b) {
|
| 143 |
+
return static_cast<double>(a) - b;
|
| 144 |
+
}
|
| 145 |
+
inline C10_HOST_DEVICE double operator*(Float8_e5m2 a, double b) {
|
| 146 |
+
return static_cast<double>(a) * b;
|
| 147 |
+
}
|
| 148 |
+
inline C10_HOST_DEVICE double operator/(Float8_e5m2 a, double b)
|
| 149 |
+
__ubsan_ignore_float_divide_by_zero__ {
|
| 150 |
+
return static_cast<double>(a) / b;
|
| 151 |
+
}
|
| 152 |
+
|
| 153 |
+
inline C10_HOST_DEVICE double operator+(double a, Float8_e5m2 b) {
|
| 154 |
+
return a + static_cast<double>(b);
|
| 155 |
+
}
|
| 156 |
+
inline C10_HOST_DEVICE double operator-(double a, Float8_e5m2 b) {
|
| 157 |
+
return a - static_cast<double>(b);
|
| 158 |
+
}
|
| 159 |
+
inline C10_HOST_DEVICE double operator*(double a, Float8_e5m2 b) {
|
| 160 |
+
return a * static_cast<double>(b);
|
| 161 |
+
}
|
| 162 |
+
inline C10_HOST_DEVICE double operator/(double a, Float8_e5m2 b)
|
| 163 |
+
__ubsan_ignore_float_divide_by_zero__ {
|
| 164 |
+
return a / static_cast<double>(b);
|
| 165 |
+
}
|
| 166 |
+
|
| 167 |
+
/// Arithmetic with ints
|
| 168 |
+
|
| 169 |
+
inline C10_HOST_DEVICE Float8_e5m2 operator+(Float8_e5m2 a, int b) {
|
| 170 |
+
return a + static_cast<Float8_e5m2>(b);
|
| 171 |
+
}
|
| 172 |
+
inline C10_HOST_DEVICE Float8_e5m2 operator-(Float8_e5m2 a, int b) {
|
| 173 |
+
return a - static_cast<Float8_e5m2>(b);
|
| 174 |
+
}
|
| 175 |
+
inline C10_HOST_DEVICE Float8_e5m2 operator*(Float8_e5m2 a, int b) {
|
| 176 |
+
return a * static_cast<Float8_e5m2>(b);
|
| 177 |
+
}
|
| 178 |
+
inline C10_HOST_DEVICE Float8_e5m2 operator/(Float8_e5m2 a, int b) {
|
| 179 |
+
return a / static_cast<Float8_e5m2>(b);
|
| 180 |
+
}
|
| 181 |
+
|
| 182 |
+
inline C10_HOST_DEVICE Float8_e5m2 operator+(int a, Float8_e5m2 b) {
|
| 183 |
+
return static_cast<Float8_e5m2>(a) + b;
|
| 184 |
+
}
|
| 185 |
+
inline C10_HOST_DEVICE Float8_e5m2 operator-(int a, Float8_e5m2 b) {
|
| 186 |
+
return static_cast<Float8_e5m2>(a) - b;
|
| 187 |
+
}
|
| 188 |
+
inline C10_HOST_DEVICE Float8_e5m2 operator*(int a, Float8_e5m2 b) {
|
| 189 |
+
return static_cast<Float8_e5m2>(a) * b;
|
| 190 |
+
}
|
| 191 |
+
inline C10_HOST_DEVICE Float8_e5m2 operator/(int a, Float8_e5m2 b) {
|
| 192 |
+
return static_cast<Float8_e5m2>(a) / b;
|
| 193 |
+
}
|
| 194 |
+
|
| 195 |
+
//// Arithmetic with int64_t
|
| 196 |
+
|
| 197 |
+
inline C10_HOST_DEVICE Float8_e5m2 operator+(Float8_e5m2 a, int64_t b) {
|
| 198 |
+
return a + static_cast<Float8_e5m2>(b);
|
| 199 |
+
}
|
| 200 |
+
inline C10_HOST_DEVICE Float8_e5m2 operator-(Float8_e5m2 a, int64_t b) {
|
| 201 |
+
return a - static_cast<Float8_e5m2>(b);
|
| 202 |
+
}
|
| 203 |
+
inline C10_HOST_DEVICE Float8_e5m2 operator*(Float8_e5m2 a, int64_t b) {
|
| 204 |
+
return a * static_cast<Float8_e5m2>(b);
|
| 205 |
+
}
|
| 206 |
+
inline C10_HOST_DEVICE Float8_e5m2 operator/(Float8_e5m2 a, int64_t b) {
|
| 207 |
+
return a / static_cast<Float8_e5m2>(b);
|
| 208 |
+
}
|
| 209 |
+
|
| 210 |
+
inline C10_HOST_DEVICE Float8_e5m2 operator+(int64_t a, Float8_e5m2 b) {
|
| 211 |
+
return static_cast<Float8_e5m2>(a) + b;
|
| 212 |
+
}
|
| 213 |
+
inline C10_HOST_DEVICE Float8_e5m2 operator-(int64_t a, Float8_e5m2 b) {
|
| 214 |
+
return static_cast<Float8_e5m2>(a) - b;
|
| 215 |
+
}
|
| 216 |
+
inline C10_HOST_DEVICE Float8_e5m2 operator*(int64_t a, Float8_e5m2 b) {
|
| 217 |
+
return static_cast<Float8_e5m2>(a) * b;
|
| 218 |
+
}
|
| 219 |
+
inline C10_HOST_DEVICE Float8_e5m2 operator/(int64_t a, Float8_e5m2 b) {
|
| 220 |
+
return static_cast<Float8_e5m2>(a) / b;
|
| 221 |
+
}
|
| 222 |
+
|
| 223 |
+
/// NOTE: we do not define comparisons directly and instead rely on the implicit
|
| 224 |
+
/// conversion from c10::Float8_e5m2 to float.
|
| 225 |
+
|
| 226 |
+
} // namespace c10
|
| 227 |
+
|
| 228 |
+
namespace std {
|
| 229 |
+
|
| 230 |
+
template <>
|
| 231 |
+
class numeric_limits<c10::Float8_e5m2> {
|
| 232 |
+
public:
|
| 233 |
+
static constexpr bool is_signed = true;
|
| 234 |
+
static constexpr bool is_integer = false;
|
| 235 |
+
static constexpr bool is_specialized = true;
|
| 236 |
+
static constexpr bool is_exact = false;
|
| 237 |
+
static constexpr bool has_infinity = true;
|
| 238 |
+
static constexpr bool has_quiet_NaN = false;
|
| 239 |
+
static constexpr bool has_signaling_NaN = false;
|
| 240 |
+
static constexpr auto has_denorm = true;
|
| 241 |
+
static constexpr auto has_denorm_loss = true;
|
| 242 |
+
static constexpr auto round_style = numeric_limits<float>::round_style;
|
| 243 |
+
static constexpr bool is_iec559 = false;
|
| 244 |
+
static constexpr bool is_bounded = true;
|
| 245 |
+
static constexpr bool is_modulo = false;
|
| 246 |
+
static constexpr int digits = 3;
|
| 247 |
+
static constexpr int digits10 = 0;
|
| 248 |
+
static constexpr int max_digits10 = 2;
|
| 249 |
+
static constexpr int radix = 2;
|
| 250 |
+
static constexpr int min_exponent = -13;
|
| 251 |
+
static constexpr int min_exponent10 = -4;
|
| 252 |
+
static constexpr int max_exponent = 16;
|
| 253 |
+
static constexpr int max_exponent10 = 4;
|
| 254 |
+
static constexpr auto traps = numeric_limits<float>::traps;
|
| 255 |
+
static constexpr auto tinyness_before =
|
| 256 |
+
numeric_limits<float>::tinyness_before;
|
| 257 |
+
|
| 258 |
+
static constexpr c10::Float8_e5m2 min() {
|
| 259 |
+
return c10::Float8_e5m2(0x4, c10::Float8_e5m2::from_bits());
|
| 260 |
+
}
|
| 261 |
+
static constexpr c10::Float8_e5m2 max() {
|
| 262 |
+
return c10::Float8_e5m2(0x7B, c10::Float8_e5m2::from_bits());
|
| 263 |
+
}
|
| 264 |
+
static constexpr c10::Float8_e5m2 lowest() {
|
| 265 |
+
return c10::Float8_e5m2(0xFB, c10::Float8_e5m2::from_bits());
|
| 266 |
+
}
|
| 267 |
+
static constexpr c10::Float8_e5m2 epsilon() {
|
| 268 |
+
return c10::Float8_e5m2(0x34, c10::Float8_e5m2::from_bits());
|
| 269 |
+
}
|
| 270 |
+
static constexpr c10::Float8_e5m2 round_error() {
|
| 271 |
+
return c10::Float8_e5m2(0x38, c10::Float8_e5m2::from_bits());
|
| 272 |
+
}
|
| 273 |
+
static constexpr c10::Float8_e5m2 infinity() {
|
| 274 |
+
return c10::Float8_e5m2(0x7C, c10::Float8_e5m2::from_bits());
|
| 275 |
+
}
|
| 276 |
+
static constexpr c10::Float8_e5m2 denorm_min() {
|
| 277 |
+
return c10::Float8_e5m2(0x01, c10::Float8_e5m2::from_bits());
|
| 278 |
+
}
|
| 279 |
+
};
|
| 280 |
+
|
| 281 |
+
} // namespace std
|
| 282 |
+
|
| 283 |
+
C10_CLANG_DIAGNOSTIC_POP()
|
videollama2/lib/python3.10/site-packages/torch/include/c10/util/Float8_e5m2fnuz-inl.h
ADDED
|
@@ -0,0 +1,87 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <c10/macros/Macros.h>
|
| 4 |
+
#include <limits>
|
| 5 |
+
|
| 6 |
+
C10_CLANG_DIAGNOSTIC_PUSH()
|
| 7 |
+
#if C10_CLANG_HAS_WARNING("-Wimplicit-int-float-conversion")
|
| 8 |
+
C10_CLANG_DIAGNOSTIC_IGNORE("-Wimplicit-int-float-conversion")
|
| 9 |
+
#endif
|
| 10 |
+
|
| 11 |
+
namespace c10 {
|
| 12 |
+
|
| 13 |
+
/// Constructors
|
| 14 |
+
|
| 15 |
+
C10_HOST_DEVICE inline Float8_e5m2fnuz::Float8_e5m2fnuz(float value)
|
| 16 |
+
: x(detail::fp8e5m2fnuz_from_fp32_value(value)) {}
|
| 17 |
+
|
| 18 |
+
/// Implicit conversions
|
| 19 |
+
|
| 20 |
+
C10_HOST_DEVICE inline Float8_e5m2fnuz::operator float() const {
|
| 21 |
+
return detail::fp8e5m2fnuz_to_fp32_value(x);
|
| 22 |
+
}
|
| 23 |
+
|
| 24 |
+
/// Special values helpers
|
| 25 |
+
|
| 26 |
+
C10_HOST_DEVICE inline bool Float8_e5m2fnuz::isnan() const {
|
| 27 |
+
return x == 0b10000000;
|
| 28 |
+
}
|
| 29 |
+
|
| 30 |
+
} // namespace c10
|
| 31 |
+
|
| 32 |
+
namespace std {
|
| 33 |
+
|
| 34 |
+
template <>
|
| 35 |
+
class numeric_limits<c10::Float8_e5m2fnuz> {
|
| 36 |
+
public:
|
| 37 |
+
static constexpr bool is_signed = true;
|
| 38 |
+
static constexpr bool is_integer = false;
|
| 39 |
+
static constexpr bool is_specialized = true;
|
| 40 |
+
static constexpr bool is_exact = false;
|
| 41 |
+
static constexpr bool has_infinity = false;
|
| 42 |
+
static constexpr bool has_quiet_NaN = true;
|
| 43 |
+
static constexpr bool has_signaling_NaN = false;
|
| 44 |
+
static constexpr auto has_denorm = true;
|
| 45 |
+
static constexpr auto has_denorm_loss = true;
|
| 46 |
+
static constexpr auto round_style = numeric_limits<float>::round_style;
|
| 47 |
+
static constexpr bool is_iec559 = false;
|
| 48 |
+
static constexpr bool is_bounded = true;
|
| 49 |
+
static constexpr bool is_modulo = false;
|
| 50 |
+
static constexpr int digits = 3;
|
| 51 |
+
static constexpr int digits10 = 0;
|
| 52 |
+
static constexpr int max_digits10 = 2;
|
| 53 |
+
static constexpr int radix = 2;
|
| 54 |
+
static constexpr int min_exponent = -14;
|
| 55 |
+
static constexpr int min_exponent10 = -4;
|
| 56 |
+
static constexpr int max_exponent = 16;
|
| 57 |
+
static constexpr int max_exponent10 = 4;
|
| 58 |
+
static constexpr auto traps = numeric_limits<float>::traps;
|
| 59 |
+
static constexpr auto tinyness_before =
|
| 60 |
+
numeric_limits<float>::tinyness_before;
|
| 61 |
+
|
| 62 |
+
static constexpr c10::Float8_e5m2fnuz min() {
|
| 63 |
+
return c10::Float8_e5m2fnuz(0x04, c10::Float8_e5m2fnuz::from_bits());
|
| 64 |
+
}
|
| 65 |
+
static constexpr c10::Float8_e5m2fnuz max() {
|
| 66 |
+
return c10::Float8_e5m2fnuz(0x7F, c10::Float8_e5m2fnuz::from_bits());
|
| 67 |
+
}
|
| 68 |
+
static constexpr c10::Float8_e5m2fnuz lowest() {
|
| 69 |
+
return c10::Float8_e5m2fnuz(0xFF, c10::Float8_e5m2fnuz::from_bits());
|
| 70 |
+
}
|
| 71 |
+
static constexpr c10::Float8_e5m2fnuz epsilon() {
|
| 72 |
+
return c10::Float8_e5m2fnuz(0x34, c10::Float8_e5m2fnuz::from_bits());
|
| 73 |
+
}
|
| 74 |
+
static constexpr c10::Float8_e5m2fnuz round_error() {
|
| 75 |
+
return c10::Float8_e5m2fnuz(0x38, c10::Float8_e5m2fnuz::from_bits());
|
| 76 |
+
}
|
| 77 |
+
static constexpr c10::Float8_e5m2fnuz infinity() {
|
| 78 |
+
return c10::Float8_e5m2fnuz(0x80, c10::Float8_e5m2fnuz::from_bits());
|
| 79 |
+
}
|
| 80 |
+
static constexpr c10::Float8_e5m2fnuz denorm_min() {
|
| 81 |
+
return c10::Float8_e5m2fnuz(0x01, c10::Float8_e5m2fnuz::from_bits());
|
| 82 |
+
}
|
| 83 |
+
};
|
| 84 |
+
|
| 85 |
+
} // namespace std
|
| 86 |
+
|
| 87 |
+
C10_CLANG_DIAGNOSTIC_POP()
|
videollama2/lib/python3.10/site-packages/torch/include/c10/util/Load.h
ADDED
|
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
#include <c10/macros/Macros.h>
|
| 3 |
+
#include <cstring>
|
| 4 |
+
|
| 5 |
+
namespace c10 {
|
| 6 |
+
namespace detail {
|
| 7 |
+
|
| 8 |
+
template <typename T>
|
| 9 |
+
struct LoadImpl {
|
| 10 |
+
C10_HOST_DEVICE static T apply(const void* src) {
|
| 11 |
+
return *reinterpret_cast<const T*>(src);
|
| 12 |
+
}
|
| 13 |
+
};
|
| 14 |
+
|
| 15 |
+
template <>
|
| 16 |
+
struct LoadImpl<bool> {
|
| 17 |
+
C10_HOST_DEVICE static bool apply(const void* src) {
|
| 18 |
+
static_assert(sizeof(bool) == sizeof(char));
|
| 19 |
+
// NOTE: [Loading boolean values]
|
| 20 |
+
// Protect against invalid boolean values by loading as a byte
|
| 21 |
+
// first, then converting to bool (see gh-54789).
|
| 22 |
+
return *reinterpret_cast<const unsigned char*>(src);
|
| 23 |
+
}
|
| 24 |
+
};
|
| 25 |
+
|
| 26 |
+
} // namespace detail
|
| 27 |
+
|
| 28 |
+
template <typename T>
|
| 29 |
+
C10_HOST_DEVICE T load(const void* src) {
|
| 30 |
+
return c10::detail::LoadImpl<T>::apply(src);
|
| 31 |
+
}
|
| 32 |
+
|
| 33 |
+
template <typename scalar_t>
|
| 34 |
+
C10_HOST_DEVICE scalar_t load(const scalar_t* src) {
|
| 35 |
+
return c10::detail::LoadImpl<scalar_t>::apply(src);
|
| 36 |
+
}
|
| 37 |
+
|
| 38 |
+
} // namespace c10
|
videollama2/lib/python3.10/site-packages/torch/include/c10/util/MaybeOwned.h
ADDED
|
@@ -0,0 +1,233 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <c10/macros/Macros.h>
|
| 4 |
+
#include <c10/util/Exception.h>
|
| 5 |
+
#include <c10/util/in_place.h>
|
| 6 |
+
|
| 7 |
+
#include <memory>
|
| 8 |
+
#include <type_traits>
|
| 9 |
+
|
| 10 |
+
namespace c10 {
|
| 11 |
+
|
| 12 |
+
/// MaybeOwnedTraits<T> describes how to borrow from T. Here is how we
|
| 13 |
+
/// can implement borrowing from an arbitrary type T using a raw
|
| 14 |
+
/// pointer to const:
|
| 15 |
+
template <typename T>
|
| 16 |
+
struct MaybeOwnedTraitsGenericImpl {
|
| 17 |
+
using owned_type = T;
|
| 18 |
+
using borrow_type = const T*;
|
| 19 |
+
|
| 20 |
+
static borrow_type createBorrow(const owned_type& from) {
|
| 21 |
+
return &from;
|
| 22 |
+
}
|
| 23 |
+
|
| 24 |
+
static void assignBorrow(borrow_type& lhs, borrow_type rhs) {
|
| 25 |
+
lhs = rhs;
|
| 26 |
+
}
|
| 27 |
+
|
| 28 |
+
static void destroyBorrow(borrow_type& /*toDestroy*/) {}
|
| 29 |
+
|
| 30 |
+
static const owned_type& referenceFromBorrow(const borrow_type& borrow) {
|
| 31 |
+
return *borrow;
|
| 32 |
+
}
|
| 33 |
+
|
| 34 |
+
static const owned_type* pointerFromBorrow(const borrow_type& borrow) {
|
| 35 |
+
return borrow;
|
| 36 |
+
}
|
| 37 |
+
|
| 38 |
+
static bool debugBorrowIsValid(const borrow_type& borrow) {
|
| 39 |
+
return borrow != nullptr;
|
| 40 |
+
}
|
| 41 |
+
};
|
| 42 |
+
|
| 43 |
+
/// It is possible to eliminate the extra layer of indirection for
|
| 44 |
+
/// borrows for some types that we control. For examples, see
|
| 45 |
+
/// intrusive_ptr.h and TensorBody.h.
|
| 46 |
+
|
| 47 |
+
template <typename T>
|
| 48 |
+
struct MaybeOwnedTraits;
|
| 49 |
+
|
| 50 |
+
// Explicitly enable MaybeOwned<shared_ptr<T>>, rather than allowing
|
| 51 |
+
// MaybeOwned to be used for any type right away.
|
| 52 |
+
template <typename T>
|
| 53 |
+
struct MaybeOwnedTraits<std::shared_ptr<T>>
|
| 54 |
+
: public MaybeOwnedTraitsGenericImpl<std::shared_ptr<T>> {};
|
| 55 |
+
|
| 56 |
+
/// A smart pointer around either a borrowed or owned T. When
|
| 57 |
+
/// constructed with borrowed(), the caller MUST ensure that the
|
| 58 |
+
/// borrowed-from argument outlives this MaybeOwned<T>. Compare to
|
| 59 |
+
/// Rust's std::borrow::Cow
|
| 60 |
+
/// (https://doc.rust-lang.org/std/borrow/enum.Cow.html), but note
|
| 61 |
+
/// that it is probably not suitable for general use because C++ has
|
| 62 |
+
/// no borrow checking. Included here to support
|
| 63 |
+
/// Tensor::expect_contiguous.
|
| 64 |
+
template <typename T>
|
| 65 |
+
class MaybeOwned final {
|
| 66 |
+
using borrow_type = typename MaybeOwnedTraits<T>::borrow_type;
|
| 67 |
+
using owned_type = typename MaybeOwnedTraits<T>::owned_type;
|
| 68 |
+
|
| 69 |
+
bool isBorrowed_;
|
| 70 |
+
union {
|
| 71 |
+
borrow_type borrow_;
|
| 72 |
+
owned_type own_;
|
| 73 |
+
};
|
| 74 |
+
|
| 75 |
+
/// Don't use this; use borrowed() instead.
|
| 76 |
+
explicit MaybeOwned(const owned_type& t)
|
| 77 |
+
: isBorrowed_(true), borrow_(MaybeOwnedTraits<T>::createBorrow(t)) {}
|
| 78 |
+
|
| 79 |
+
/// Don't use this; use owned() instead.
|
| 80 |
+
explicit MaybeOwned(T&& t) noexcept(
|
| 81 |
+
std::is_nothrow_move_constructible<T>::value)
|
| 82 |
+
: isBorrowed_(false), own_(std::move(t)) {}
|
| 83 |
+
|
| 84 |
+
/// Don't use this; use owned() instead.
|
| 85 |
+
template <class... Args>
|
| 86 |
+
explicit MaybeOwned(in_place_t, Args&&... args)
|
| 87 |
+
: isBorrowed_(false), own_(std::forward<Args>(args)...) {}
|
| 88 |
+
|
| 89 |
+
public:
|
| 90 |
+
explicit MaybeOwned() : isBorrowed_(true), borrow_() {}
|
| 91 |
+
|
| 92 |
+
// Copying a borrow yields another borrow of the original, as with a
|
| 93 |
+
// T*. Copying an owned T yields another owned T for safety: no
|
| 94 |
+
// chains of borrowing by default! (Note you could get that behavior
|
| 95 |
+
// with MaybeOwned<T>::borrowed(*rhs) if you wanted it.)
|
| 96 |
+
MaybeOwned(const MaybeOwned& rhs) : isBorrowed_(rhs.isBorrowed_) {
|
| 97 |
+
if (C10_LIKELY(rhs.isBorrowed_)) {
|
| 98 |
+
MaybeOwnedTraits<T>::assignBorrow(borrow_, rhs.borrow_);
|
| 99 |
+
} else {
|
| 100 |
+
new (&own_) T(rhs.own_);
|
| 101 |
+
}
|
| 102 |
+
}
|
| 103 |
+
|
| 104 |
+
MaybeOwned& operator=(const MaybeOwned& rhs) {
|
| 105 |
+
if (this == &rhs) {
|
| 106 |
+
return *this;
|
| 107 |
+
}
|
| 108 |
+
if (C10_UNLIKELY(!isBorrowed_)) {
|
| 109 |
+
if (rhs.isBorrowed_) {
|
| 110 |
+
own_.~T();
|
| 111 |
+
MaybeOwnedTraits<T>::assignBorrow(borrow_, rhs.borrow_);
|
| 112 |
+
isBorrowed_ = true;
|
| 113 |
+
} else {
|
| 114 |
+
own_ = rhs.own_;
|
| 115 |
+
}
|
| 116 |
+
} else {
|
| 117 |
+
if (C10_LIKELY(rhs.isBorrowed_)) {
|
| 118 |
+
MaybeOwnedTraits<T>::assignBorrow(borrow_, rhs.borrow_);
|
| 119 |
+
} else {
|
| 120 |
+
MaybeOwnedTraits<T>::destroyBorrow(borrow_);
|
| 121 |
+
new (&own_) T(rhs.own_);
|
| 122 |
+
isBorrowed_ = false;
|
| 123 |
+
}
|
| 124 |
+
}
|
| 125 |
+
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(isBorrowed_ == rhs.isBorrowed_);
|
| 126 |
+
return *this;
|
| 127 |
+
}
|
| 128 |
+
|
| 129 |
+
MaybeOwned(MaybeOwned&& rhs) noexcept(
|
| 130 |
+
std::is_nothrow_move_constructible_v<T>&&
|
| 131 |
+
std::is_nothrow_move_assignable_v<borrow_type>)
|
| 132 |
+
: isBorrowed_(rhs.isBorrowed_) {
|
| 133 |
+
if (C10_LIKELY(rhs.isBorrowed_)) {
|
| 134 |
+
MaybeOwnedTraits<T>::assignBorrow(borrow_, rhs.borrow_);
|
| 135 |
+
} else {
|
| 136 |
+
new (&own_) T(std::move(rhs.own_));
|
| 137 |
+
}
|
| 138 |
+
}
|
| 139 |
+
|
| 140 |
+
MaybeOwned& operator=(MaybeOwned&& rhs) noexcept(
|
| 141 |
+
std::is_nothrow_move_assignable_v<T>&& std::is_nothrow_move_assignable_v<
|
| 142 |
+
borrow_type>&& std::is_nothrow_move_constructible_v<T>&&
|
| 143 |
+
std::is_nothrow_destructible_v<T>&&
|
| 144 |
+
std::is_nothrow_destructible_v<borrow_type>) {
|
| 145 |
+
if (this == &rhs) {
|
| 146 |
+
return *this;
|
| 147 |
+
}
|
| 148 |
+
if (C10_UNLIKELY(!isBorrowed_)) {
|
| 149 |
+
if (rhs.isBorrowed_) {
|
| 150 |
+
own_.~T();
|
| 151 |
+
MaybeOwnedTraits<T>::assignBorrow(borrow_, rhs.borrow_);
|
| 152 |
+
isBorrowed_ = true;
|
| 153 |
+
} else {
|
| 154 |
+
own_ = std::move(rhs.own_);
|
| 155 |
+
}
|
| 156 |
+
} else {
|
| 157 |
+
if (C10_LIKELY(rhs.isBorrowed_)) {
|
| 158 |
+
MaybeOwnedTraits<T>::assignBorrow(borrow_, rhs.borrow_);
|
| 159 |
+
} else {
|
| 160 |
+
MaybeOwnedTraits<T>::destroyBorrow(borrow_);
|
| 161 |
+
new (&own_) T(std::move(rhs.own_));
|
| 162 |
+
isBorrowed_ = false;
|
| 163 |
+
}
|
| 164 |
+
}
|
| 165 |
+
return *this;
|
| 166 |
+
}
|
| 167 |
+
|
| 168 |
+
static MaybeOwned borrowed(const T& t) {
|
| 169 |
+
return MaybeOwned(t);
|
| 170 |
+
}
|
| 171 |
+
|
| 172 |
+
static MaybeOwned owned(T&& t) noexcept(
|
| 173 |
+
std::is_nothrow_move_constructible<T>::value) {
|
| 174 |
+
return MaybeOwned(std::move(t));
|
| 175 |
+
}
|
| 176 |
+
|
| 177 |
+
template <class... Args>
|
| 178 |
+
static MaybeOwned owned(in_place_t, Args&&... args) {
|
| 179 |
+
return MaybeOwned(in_place, std::forward<Args>(args)...);
|
| 180 |
+
}
|
| 181 |
+
|
| 182 |
+
~MaybeOwned() noexcept(std::is_nothrow_destructible_v<T>&&
|
| 183 |
+
std::is_nothrow_destructible_v<borrow_type>) {
|
| 184 |
+
if (C10_UNLIKELY(!isBorrowed_)) {
|
| 185 |
+
own_.~T();
|
| 186 |
+
} else {
|
| 187 |
+
MaybeOwnedTraits<T>::destroyBorrow(borrow_);
|
| 188 |
+
}
|
| 189 |
+
}
|
| 190 |
+
|
| 191 |
+
// This is an implementation detail! You should know what you're doing
|
| 192 |
+
// if you are testing this. If you just want to guarantee ownership move
|
| 193 |
+
// this into a T
|
| 194 |
+
bool unsafeIsBorrowed() const {
|
| 195 |
+
return isBorrowed_;
|
| 196 |
+
}
|
| 197 |
+
|
| 198 |
+
const T& operator*() const& {
|
| 199 |
+
if (isBorrowed_) {
|
| 200 |
+
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
|
| 201 |
+
MaybeOwnedTraits<T>::debugBorrowIsValid(borrow_));
|
| 202 |
+
}
|
| 203 |
+
return C10_LIKELY(isBorrowed_)
|
| 204 |
+
? MaybeOwnedTraits<T>::referenceFromBorrow(borrow_)
|
| 205 |
+
: own_;
|
| 206 |
+
}
|
| 207 |
+
|
| 208 |
+
const T* operator->() const {
|
| 209 |
+
if (isBorrowed_) {
|
| 210 |
+
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
|
| 211 |
+
MaybeOwnedTraits<T>::debugBorrowIsValid(borrow_));
|
| 212 |
+
}
|
| 213 |
+
return C10_LIKELY(isBorrowed_)
|
| 214 |
+
? MaybeOwnedTraits<T>::pointerFromBorrow(borrow_)
|
| 215 |
+
: &own_;
|
| 216 |
+
}
|
| 217 |
+
|
| 218 |
+
// If borrowed, copy the underlying T. If owned, move from
|
| 219 |
+
// it. borrowed/owned state remains the same, and either we
|
| 220 |
+
// reference the same borrow as before or we are an owned moved-from
|
| 221 |
+
// T.
|
| 222 |
+
T operator*() && {
|
| 223 |
+
if (isBorrowed_) {
|
| 224 |
+
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
|
| 225 |
+
MaybeOwnedTraits<T>::debugBorrowIsValid(borrow_));
|
| 226 |
+
return MaybeOwnedTraits<T>::referenceFromBorrow(borrow_);
|
| 227 |
+
} else {
|
| 228 |
+
return std::move(own_);
|
| 229 |
+
}
|
| 230 |
+
}
|
| 231 |
+
};
|
| 232 |
+
|
| 233 |
+
} // namespace c10
|
videollama2/lib/python3.10/site-packages/torch/include/c10/util/Metaprogramming.h
ADDED
|
@@ -0,0 +1,226 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <c10/util/TypeList.h>
|
| 4 |
+
#include <functional>
|
| 5 |
+
#include <type_traits>
|
| 6 |
+
|
| 7 |
+
namespace c10 {
|
| 8 |
+
namespace guts {
|
| 9 |
+
|
| 10 |
+
/**
|
| 11 |
+
* Access information about result type or arguments from a function type.
|
| 12 |
+
* Example:
|
| 13 |
+
* using A = function_traits<int (float, double)>::return_type // A == int
|
| 14 |
+
* using A = function_traits<int (float, double)>::parameter_types::tuple_type
|
| 15 |
+
* // A == tuple<float, double>
|
| 16 |
+
*/
|
| 17 |
+
template <class Func>
|
| 18 |
+
struct function_traits {
|
| 19 |
+
static_assert(
|
| 20 |
+
!std::is_same<Func, Func>::value,
|
| 21 |
+
"In function_traits<Func>, Func must be a plain function type.");
|
| 22 |
+
};
|
| 23 |
+
template <class Result, class... Args>
|
| 24 |
+
struct function_traits<Result(Args...)> {
|
| 25 |
+
using func_type = Result(Args...);
|
| 26 |
+
using return_type = Result;
|
| 27 |
+
using parameter_types = typelist::typelist<Args...>;
|
| 28 |
+
static constexpr auto number_of_parameters = sizeof...(Args);
|
| 29 |
+
};
|
| 30 |
+
|
| 31 |
+
/**
|
| 32 |
+
* infer_function_traits: creates a `function_traits` type for a simple
|
| 33 |
+
* function (pointer) or functor (lambda/struct). Currently does not support
|
| 34 |
+
* class methods.
|
| 35 |
+
*/
|
| 36 |
+
|
| 37 |
+
template <typename Functor>
|
| 38 |
+
struct infer_function_traits {
|
| 39 |
+
using type = function_traits<
|
| 40 |
+
c10::guts::detail::strip_class_t<decltype(&Functor::operator())>>;
|
| 41 |
+
};
|
| 42 |
+
|
| 43 |
+
template <typename Result, typename... Args>
|
| 44 |
+
struct infer_function_traits<Result (*)(Args...)> {
|
| 45 |
+
using type = function_traits<Result(Args...)>;
|
| 46 |
+
};
|
| 47 |
+
|
| 48 |
+
template <typename Result, typename... Args>
|
| 49 |
+
struct infer_function_traits<Result(Args...)> {
|
| 50 |
+
using type = function_traits<Result(Args...)>;
|
| 51 |
+
};
|
| 52 |
+
|
| 53 |
+
template <typename T>
|
| 54 |
+
using infer_function_traits_t = typename infer_function_traits<T>::type;
|
| 55 |
+
|
| 56 |
+
/**
|
| 57 |
+
* make_function_traits: creates a `function_traits` type given a Return type
|
| 58 |
+
* and a typelist of Argument types
|
| 59 |
+
*
|
| 60 |
+
* Example:
|
| 61 |
+
* bool f(int, int);
|
| 62 |
+
*
|
| 63 |
+
* infer_function_traits_t<f> == make_function_traits_t<bool,
|
| 64 |
+
* typelist::typelist<int, int>>
|
| 65 |
+
*/
|
| 66 |
+
template <typename Result, typename ArgList>
|
| 67 |
+
struct make_function_traits {
|
| 68 |
+
static_assert(
|
| 69 |
+
false_t<ArgList>::value,
|
| 70 |
+
"In guts::make_function_traits<Result, TypeList>, the ArgList argument must be typelist<...>.");
|
| 71 |
+
};
|
| 72 |
+
|
| 73 |
+
template <typename Result, typename... Args>
|
| 74 |
+
struct make_function_traits<Result, typelist::typelist<Args...>> {
|
| 75 |
+
using type = function_traits<Result(Args...)>;
|
| 76 |
+
};
|
| 77 |
+
|
| 78 |
+
template <typename Result, typename ArgList>
|
| 79 |
+
using make_function_traits_t =
|
| 80 |
+
typename make_function_traits<Result, ArgList>::type;
|
| 81 |
+
|
| 82 |
+
/**
|
| 83 |
+
* make_offset_index_sequence<Start, N>
|
| 84 |
+
* Like make_index_sequence<N>, but starting from Start instead of 0.
|
| 85 |
+
*
|
| 86 |
+
* Example:
|
| 87 |
+
* make_offset_index_sequence<10, 3> == std::index_sequence<10, 11, 12>
|
| 88 |
+
*/
|
| 89 |
+
template <size_t Start, size_t N, size_t... Is>
|
| 90 |
+
struct make_offset_index_sequence_impl
|
| 91 |
+
: make_offset_index_sequence_impl<Start, N - 1, Start + N - 1, Is...> {
|
| 92 |
+
static_assert(
|
| 93 |
+
static_cast<int>(Start) >= 0,
|
| 94 |
+
"make_offset_index_sequence: Start < 0");
|
| 95 |
+
static_assert(static_cast<int>(N) >= 0, "make_offset_index_sequence: N < 0");
|
| 96 |
+
};
|
| 97 |
+
|
| 98 |
+
template <size_t Start, size_t... Is>
|
| 99 |
+
struct make_offset_index_sequence_impl<Start, 0, Is...> {
|
| 100 |
+
typedef std::index_sequence<Is...> type;
|
| 101 |
+
};
|
| 102 |
+
|
| 103 |
+
template <size_t Start, size_t N>
|
| 104 |
+
using make_offset_index_sequence =
|
| 105 |
+
typename make_offset_index_sequence_impl<Start, N>::type;
|
| 106 |
+
|
| 107 |
+
/**
|
| 108 |
+
* Use tuple_elements to extract a position-indexed subset of elements
|
| 109 |
+
* from the argument tuple into a result tuple.
|
| 110 |
+
*
|
| 111 |
+
* Example:
|
| 112 |
+
* std::tuple<int, const char*, double> t = std::make_tuple(0, "HEY", 2.0);
|
| 113 |
+
* std::tuple<int, double> result = tuple_elements(t, std::index_sequence<0,
|
| 114 |
+
* 2>());
|
| 115 |
+
*/
|
| 116 |
+
template <class Tuple, size_t... Is>
|
| 117 |
+
constexpr auto tuple_elements(Tuple t, std::index_sequence<Is...>) {
|
| 118 |
+
return std::tuple<std::tuple_element_t<Is, Tuple>...>(std::get<Is>(t)...);
|
| 119 |
+
}
|
| 120 |
+
|
| 121 |
+
/**
|
| 122 |
+
* Use tuple_take to extract the first or last n elements from the argument
|
| 123 |
+
* tuple into a result tuple.
|
| 124 |
+
*
|
| 125 |
+
* Example:
|
| 126 |
+
* std::tuple<int, const char*, double> t = std::make_tuple(0, "HEY", 2.0);
|
| 127 |
+
* std::tuple<int, const char*> first_two = tuple_take<decltype(t), 2>(t);
|
| 128 |
+
* std::tuple<const char*, double> last_two = tuple_take<decltype(t), -2>(t);
|
| 129 |
+
*/
|
| 130 |
+
template <class Tuple, int N, class Enable = void>
|
| 131 |
+
struct TupleTake {};
|
| 132 |
+
|
| 133 |
+
template <class Tuple, int N>
|
| 134 |
+
struct TupleTake<Tuple, N, std::enable_if_t<N >= 0, void>> {
|
| 135 |
+
static auto call(Tuple t) {
|
| 136 |
+
constexpr size_t size = std::tuple_size<Tuple>();
|
| 137 |
+
static_assert(N <= size, "tuple_take: N > size");
|
| 138 |
+
return tuple_elements(t, std::make_index_sequence<N>{});
|
| 139 |
+
}
|
| 140 |
+
};
|
| 141 |
+
|
| 142 |
+
template <class Tuple, int N>
|
| 143 |
+
struct TupleTake < Tuple,
|
| 144 |
+
N, std::enable_if_t<N<0, void>> {
|
| 145 |
+
static auto call(Tuple t) {
|
| 146 |
+
constexpr size_t size = std::tuple_size<Tuple>();
|
| 147 |
+
static_assert(-N <= size, "tuple_take: -N > size");
|
| 148 |
+
return tuple_elements(t, make_offset_index_sequence<size + N, -N>{});
|
| 149 |
+
}
|
| 150 |
+
};
|
| 151 |
+
|
| 152 |
+
template <class Tuple, int N>
|
| 153 |
+
auto tuple_take(Tuple t) {
|
| 154 |
+
return TupleTake<Tuple, N>::call(t);
|
| 155 |
+
}
|
| 156 |
+
|
| 157 |
+
/**
|
| 158 |
+
* Use tuple_slice to extract a contiguous subtuple from the argument.
|
| 159 |
+
*
|
| 160 |
+
* Example:
|
| 161 |
+
* std::tuple<int, const char*, double, bool> t = std::make_tuple(0,
|
| 162 |
+
* "HEY", 2.0, false); std::tuple<int, const char*> middle_two =
|
| 163 |
+
* tuple_slice<decltype(t), 1, 2>(t);
|
| 164 |
+
*/
|
| 165 |
+
template <class Tuple, size_t Start, size_t N>
|
| 166 |
+
constexpr auto tuple_slice(Tuple t) {
|
| 167 |
+
constexpr size_t size = std::tuple_size<Tuple>();
|
| 168 |
+
static_assert(Start + N <= size, "tuple_slice: Start + N > size");
|
| 169 |
+
return tuple_elements(t, make_offset_index_sequence<Start, N>{});
|
| 170 |
+
}
|
| 171 |
+
|
| 172 |
+
/**
|
| 173 |
+
* Use tuple_map to run a mapping function over a tuple to get a new tuple.
|
| 174 |
+
*
|
| 175 |
+
* Example 1:
|
| 176 |
+
* auto result = tuple_map(std::tuple<int32_t, int32_t, int32_t>(3, 4, 5), []
|
| 177 |
+
* (int32_t a) -> int16_t {return a+1;});
|
| 178 |
+
* // result == std::tuple<int16_t, int16_t, int16_t>(4, 5, 6)
|
| 179 |
+
*
|
| 180 |
+
* Example 2:
|
| 181 |
+
* struct Mapper {
|
| 182 |
+
* std::string operator()(int32_t a) const {
|
| 183 |
+
* return std::to_string(a);
|
| 184 |
+
* }
|
| 185 |
+
* int64_t operator()(const std::string& a) const {
|
| 186 |
+
* return atoi(a.c_str());
|
| 187 |
+
* }
|
| 188 |
+
* };
|
| 189 |
+
* auto result = tuple_map(std::tuple<int32_t, std::string>(3, "4"),
|
| 190 |
+
* Mapper());
|
| 191 |
+
* // result == std::tuple<std::string, int64_t>("3", 4)
|
| 192 |
+
*
|
| 193 |
+
* Example 3:
|
| 194 |
+
* struct A final {
|
| 195 |
+
* int32_t func() {
|
| 196 |
+
* return 5;
|
| 197 |
+
* }
|
| 198 |
+
* };
|
| 199 |
+
* struct B final {
|
| 200 |
+
* std::string func() {
|
| 201 |
+
* return "5";
|
| 202 |
+
* }
|
| 203 |
+
* };
|
| 204 |
+
* auto result = tuple_map(std::make_tuple(A(), B()), [] (auto a) { return
|
| 205 |
+
* a.func(); });
|
| 206 |
+
* // result == std::tuple<int32_t, std::string>(5, "5");
|
| 207 |
+
*/
|
| 208 |
+
namespace detail {
|
| 209 |
+
template <class Mapper, class... Args, size_t... Indices>
|
| 210 |
+
auto tuple_map(
|
| 211 |
+
std::tuple<Args...>&& tuple,
|
| 212 |
+
const Mapper& mapper,
|
| 213 |
+
std::index_sequence<Indices...>) {
|
| 214 |
+
return std::tuple<decltype(mapper(std::forward<Args>(std::get<Indices>(
|
| 215 |
+
tuple))))...>(mapper(std::forward<Args>(std::get<Indices>(tuple)))...);
|
| 216 |
+
}
|
| 217 |
+
} // namespace detail
|
| 218 |
+
|
| 219 |
+
template <class Mapper, class... Args>
|
| 220 |
+
auto tuple_map(std::tuple<Args...>&& tuple, const Mapper& mapper) {
|
| 221 |
+
return detail::tuple_map(
|
| 222 |
+
std::move(tuple), mapper, std::index_sequence_for<Args...>());
|
| 223 |
+
}
|
| 224 |
+
|
| 225 |
+
} // namespace guts
|
| 226 |
+
} // namespace c10
|
videollama2/lib/python3.10/site-packages/torch/include/c10/util/OptionalArrayRef.h
ADDED
|
@@ -0,0 +1,231 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// This file defines OptionalArrayRef<T>, a class that has almost the same
|
| 2 |
+
// exact functionality as c10::optional<ArrayRef<T>>, except that its
|
| 3 |
+
// converting constructor fixes a dangling pointer issue.
|
| 4 |
+
//
|
| 5 |
+
// The implicit converting constructor of both c10::optional<ArrayRef<T>> and
|
| 6 |
+
// std::optional<ArrayRef<T>> can cause the underlying ArrayRef<T> to store
|
| 7 |
+
// a dangling pointer. OptionalArrayRef<T> prevents this by wrapping
|
| 8 |
+
// a c10::optional<ArrayRef<T>> and fixing the constructor implementation.
|
| 9 |
+
//
|
| 10 |
+
// See https://github.com/pytorch/pytorch/issues/63645 for more on this.
|
| 11 |
+
|
| 12 |
+
#pragma once
|
| 13 |
+
|
| 14 |
+
#include <c10/util/ArrayRef.h>
|
| 15 |
+
#include <c10/util/Optional.h>
|
| 16 |
+
|
| 17 |
+
namespace c10 {
|
| 18 |
+
|
| 19 |
+
template <typename T>
|
| 20 |
+
class OptionalArrayRef final {
|
| 21 |
+
public:
|
| 22 |
+
// Constructors
|
| 23 |
+
|
| 24 |
+
constexpr OptionalArrayRef() noexcept = default;
|
| 25 |
+
|
| 26 |
+
constexpr OptionalArrayRef(nullopt_t) noexcept {}
|
| 27 |
+
|
| 28 |
+
OptionalArrayRef(const OptionalArrayRef& other) = default;
|
| 29 |
+
|
| 30 |
+
OptionalArrayRef(OptionalArrayRef&& other) noexcept = default;
|
| 31 |
+
|
| 32 |
+
constexpr OptionalArrayRef(const optional<ArrayRef<T>>& other) noexcept
|
| 33 |
+
: wrapped_opt_array_ref(other) {}
|
| 34 |
+
|
| 35 |
+
constexpr OptionalArrayRef(optional<ArrayRef<T>>&& other) noexcept
|
| 36 |
+
: wrapped_opt_array_ref(other) {}
|
| 37 |
+
|
| 38 |
+
constexpr OptionalArrayRef(const T& value) noexcept
|
| 39 |
+
: wrapped_opt_array_ref(value) {}
|
| 40 |
+
|
| 41 |
+
template <
|
| 42 |
+
typename U = ArrayRef<T>,
|
| 43 |
+
std::enable_if_t<
|
| 44 |
+
!std::is_same<std::decay_t<U>, OptionalArrayRef>::value &&
|
| 45 |
+
!std::is_same<std::decay_t<U>, in_place_t>::value &&
|
| 46 |
+
std::is_constructible<ArrayRef<T>, U&&>::value &&
|
| 47 |
+
std::is_convertible<U&&, ArrayRef<T>>::value &&
|
| 48 |
+
!std::is_convertible<U&&, T>::value,
|
| 49 |
+
bool> = false>
|
| 50 |
+
constexpr OptionalArrayRef(U&& value) noexcept(
|
| 51 |
+
std::is_nothrow_constructible<ArrayRef<T>, U&&>::value)
|
| 52 |
+
: wrapped_opt_array_ref(value) {}
|
| 53 |
+
|
| 54 |
+
template <
|
| 55 |
+
typename U = ArrayRef<T>,
|
| 56 |
+
std::enable_if_t<
|
| 57 |
+
!std::is_same<std::decay_t<U>, OptionalArrayRef>::value &&
|
| 58 |
+
!std::is_same<std::decay_t<U>, in_place_t>::value &&
|
| 59 |
+
std::is_constructible<ArrayRef<T>, U&&>::value &&
|
| 60 |
+
!std::is_convertible<U&&, ArrayRef<T>>::value,
|
| 61 |
+
bool> = false>
|
| 62 |
+
constexpr explicit OptionalArrayRef(U&& value) noexcept(
|
| 63 |
+
std::is_nothrow_constructible<ArrayRef<T>, U&&>::value)
|
| 64 |
+
: wrapped_opt_array_ref(value) {}
|
| 65 |
+
|
| 66 |
+
template <typename... Args>
|
| 67 |
+
constexpr explicit OptionalArrayRef(in_place_t ip, Args&&... args) noexcept
|
| 68 |
+
: wrapped_opt_array_ref(ip, args...) {}
|
| 69 |
+
|
| 70 |
+
template <typename U, typename... Args>
|
| 71 |
+
constexpr explicit OptionalArrayRef(
|
| 72 |
+
in_place_t ip,
|
| 73 |
+
std::initializer_list<U> il,
|
| 74 |
+
Args&&... args)
|
| 75 |
+
: wrapped_opt_array_ref(ip, il, args...) {}
|
| 76 |
+
|
| 77 |
+
constexpr OptionalArrayRef(const std::initializer_list<T>& Vec)
|
| 78 |
+
: wrapped_opt_array_ref(ArrayRef<T>(Vec)) {}
|
| 79 |
+
|
| 80 |
+
// Destructor
|
| 81 |
+
|
| 82 |
+
~OptionalArrayRef() = default;
|
| 83 |
+
|
| 84 |
+
// Assignment
|
| 85 |
+
|
| 86 |
+
constexpr OptionalArrayRef& operator=(nullopt_t) noexcept {
|
| 87 |
+
wrapped_opt_array_ref = c10::nullopt;
|
| 88 |
+
return *this;
|
| 89 |
+
}
|
| 90 |
+
|
| 91 |
+
OptionalArrayRef& operator=(const OptionalArrayRef& other) = default;
|
| 92 |
+
|
| 93 |
+
OptionalArrayRef& operator=(OptionalArrayRef&& other) noexcept = default;
|
| 94 |
+
|
| 95 |
+
constexpr OptionalArrayRef& operator=(
|
| 96 |
+
const optional<ArrayRef<T>>& other) noexcept {
|
| 97 |
+
wrapped_opt_array_ref = other;
|
| 98 |
+
return *this;
|
| 99 |
+
}
|
| 100 |
+
|
| 101 |
+
constexpr OptionalArrayRef& operator=(
|
| 102 |
+
optional<ArrayRef<T>>&& other) noexcept {
|
| 103 |
+
wrapped_opt_array_ref = other;
|
| 104 |
+
return *this;
|
| 105 |
+
}
|
| 106 |
+
|
| 107 |
+
template <typename U = ArrayRef<T>>
|
| 108 |
+
constexpr std::enable_if_t<
|
| 109 |
+
!std::is_same<std::decay_t<U>, OptionalArrayRef>::value &&
|
| 110 |
+
std::is_constructible<ArrayRef<T>, U&&>::value &&
|
| 111 |
+
std::is_assignable<ArrayRef<T>&, U&&>::value,
|
| 112 |
+
OptionalArrayRef&>
|
| 113 |
+
operator=(U&& value) noexcept(
|
| 114 |
+
std::is_nothrow_constructible<ArrayRef<T>, U&&>::value&&
|
| 115 |
+
std::is_nothrow_assignable<ArrayRef<T>&, U&&>::value) {
|
| 116 |
+
wrapped_opt_array_ref = value;
|
| 117 |
+
return *this;
|
| 118 |
+
}
|
| 119 |
+
|
| 120 |
+
// Observers
|
| 121 |
+
|
| 122 |
+
constexpr ArrayRef<T>* operator->() noexcept {
|
| 123 |
+
return &wrapped_opt_array_ref.value();
|
| 124 |
+
}
|
| 125 |
+
|
| 126 |
+
constexpr const ArrayRef<T>* operator->() const noexcept {
|
| 127 |
+
return &wrapped_opt_array_ref.value();
|
| 128 |
+
}
|
| 129 |
+
|
| 130 |
+
constexpr ArrayRef<T>& operator*() & noexcept {
|
| 131 |
+
return wrapped_opt_array_ref.value();
|
| 132 |
+
}
|
| 133 |
+
|
| 134 |
+
constexpr const ArrayRef<T>& operator*() const& noexcept {
|
| 135 |
+
return wrapped_opt_array_ref.value();
|
| 136 |
+
}
|
| 137 |
+
|
| 138 |
+
constexpr ArrayRef<T>&& operator*() && noexcept {
|
| 139 |
+
return std::move(wrapped_opt_array_ref.value());
|
| 140 |
+
}
|
| 141 |
+
|
| 142 |
+
constexpr const ArrayRef<T>&& operator*() const&& noexcept {
|
| 143 |
+
return std::move(wrapped_opt_array_ref.value());
|
| 144 |
+
}
|
| 145 |
+
|
| 146 |
+
constexpr explicit operator bool() const noexcept {
|
| 147 |
+
return wrapped_opt_array_ref.has_value();
|
| 148 |
+
}
|
| 149 |
+
|
| 150 |
+
constexpr bool has_value() const noexcept {
|
| 151 |
+
return wrapped_opt_array_ref.has_value();
|
| 152 |
+
}
|
| 153 |
+
|
| 154 |
+
constexpr ArrayRef<T>& value() & {
|
| 155 |
+
return wrapped_opt_array_ref.value();
|
| 156 |
+
}
|
| 157 |
+
|
| 158 |
+
constexpr const ArrayRef<T>& value() const& {
|
| 159 |
+
return wrapped_opt_array_ref.value();
|
| 160 |
+
}
|
| 161 |
+
|
| 162 |
+
constexpr ArrayRef<T>&& value() && {
|
| 163 |
+
return std::move(wrapped_opt_array_ref.value());
|
| 164 |
+
}
|
| 165 |
+
|
| 166 |
+
constexpr const ArrayRef<T>&& value() const&& {
|
| 167 |
+
return std::move(wrapped_opt_array_ref.value());
|
| 168 |
+
}
|
| 169 |
+
|
| 170 |
+
template <typename U>
|
| 171 |
+
constexpr std::
|
| 172 |
+
enable_if_t<std::is_convertible<U&&, ArrayRef<T>>::value, ArrayRef<T>>
|
| 173 |
+
value_or(U&& default_value) const& {
|
| 174 |
+
return wrapped_opt_array_ref.value_or(default_value);
|
| 175 |
+
}
|
| 176 |
+
|
| 177 |
+
template <typename U>
|
| 178 |
+
constexpr std::
|
| 179 |
+
enable_if_t<std::is_convertible<U&&, ArrayRef<T>>::value, ArrayRef<T>>
|
| 180 |
+
value_or(U&& default_value) && {
|
| 181 |
+
return wrapped_opt_array_ref.value_or(default_value);
|
| 182 |
+
}
|
| 183 |
+
|
| 184 |
+
// Modifiers
|
| 185 |
+
|
| 186 |
+
constexpr void swap(OptionalArrayRef& other) noexcept {
|
| 187 |
+
std::swap(wrapped_opt_array_ref, other.wrapped_opt_array_ref);
|
| 188 |
+
}
|
| 189 |
+
|
| 190 |
+
constexpr void reset() noexcept {
|
| 191 |
+
wrapped_opt_array_ref.reset();
|
| 192 |
+
}
|
| 193 |
+
|
| 194 |
+
template <typename... Args>
|
| 195 |
+
constexpr std::enable_if_t<
|
| 196 |
+
std::is_constructible<ArrayRef<T>, Args&&...>::value,
|
| 197 |
+
ArrayRef<T>&>
|
| 198 |
+
emplace(Args&&... args) noexcept(
|
| 199 |
+
std::is_nothrow_constructible<ArrayRef<T>, Args&&...>::value) {
|
| 200 |
+
return wrapped_opt_array_ref.emplace(args...);
|
| 201 |
+
}
|
| 202 |
+
|
| 203 |
+
template <typename U, typename... Args>
|
| 204 |
+
constexpr ArrayRef<T>& emplace(
|
| 205 |
+
std::initializer_list<U> il,
|
| 206 |
+
Args&&... args) noexcept {
|
| 207 |
+
return wrapped_opt_array_ref.emplace(il, args...);
|
| 208 |
+
}
|
| 209 |
+
|
| 210 |
+
private:
|
| 211 |
+
optional<ArrayRef<T>> wrapped_opt_array_ref;
|
| 212 |
+
};
|
| 213 |
+
|
| 214 |
+
using OptionalIntArrayRef = OptionalArrayRef<int64_t>;
|
| 215 |
+
|
| 216 |
+
inline bool operator==(
|
| 217 |
+
const OptionalIntArrayRef& a1,
|
| 218 |
+
const IntArrayRef& other) {
|
| 219 |
+
if (!a1.has_value()) {
|
| 220 |
+
return false;
|
| 221 |
+
}
|
| 222 |
+
return a1.value() == other;
|
| 223 |
+
}
|
| 224 |
+
|
| 225 |
+
inline bool operator==(
|
| 226 |
+
const c10::IntArrayRef& a1,
|
| 227 |
+
const c10::OptionalIntArrayRef& a2) {
|
| 228 |
+
return a2 == a1;
|
| 229 |
+
}
|
| 230 |
+
|
| 231 |
+
} // namespace c10
|
videollama2/lib/python3.10/site-packages/torch/include/c10/util/ThreadLocal.h
ADDED
|
@@ -0,0 +1,153 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <c10/macros/Macros.h>
|
| 4 |
+
|
| 5 |
+
/**
|
| 6 |
+
* Android versions with libgnustl incorrectly handle thread_local C++
|
| 7 |
+
* qualifier with composite types. NDK up to r17 version is affected.
|
| 8 |
+
*
|
| 9 |
+
* (A fix landed on Jun 4 2018:
|
| 10 |
+
* https://android-review.googlesource.com/c/toolchain/gcc/+/683601)
|
| 11 |
+
*
|
| 12 |
+
* In such cases, use c10::ThreadLocal<T> wrapper
|
| 13 |
+
* which is `pthread_*` based with smart pointer semantics.
|
| 14 |
+
*
|
| 15 |
+
* In addition, convenient macro C10_DEFINE_TLS_static is available.
|
| 16 |
+
* To define static TLS variable of type std::string, do the following
|
| 17 |
+
* ```
|
| 18 |
+
* C10_DEFINE_TLS_static(std::string, str_tls_);
|
| 19 |
+
* ///////
|
| 20 |
+
* {
|
| 21 |
+
* *str_tls_ = "abc";
|
| 22 |
+
* assert(str_tls_->length(), 3);
|
| 23 |
+
* }
|
| 24 |
+
* ```
|
| 25 |
+
*
|
| 26 |
+
* (see c10/test/util/ThreadLocal_test.cpp for more examples)
|
| 27 |
+
*/
|
| 28 |
+
#if !defined(C10_PREFER_CUSTOM_THREAD_LOCAL_STORAGE)
|
| 29 |
+
|
| 30 |
+
#if defined(C10_ANDROID) && defined(__GLIBCXX__) && __GLIBCXX__ < 20180604
|
| 31 |
+
#define C10_PREFER_CUSTOM_THREAD_LOCAL_STORAGE
|
| 32 |
+
#endif // defined(C10_ANDROID) && defined(__GLIBCXX__) && __GLIBCXX__ < 20180604
|
| 33 |
+
|
| 34 |
+
#endif // !defined(C10_PREFER_CUSTOM_THREAD_LOCAL_STORAGE)
|
| 35 |
+
|
| 36 |
+
#if defined(C10_PREFER_CUSTOM_THREAD_LOCAL_STORAGE)
|
| 37 |
+
#include <c10/util/Exception.h>
|
| 38 |
+
#include <errno.h>
|
| 39 |
+
#include <pthread.h>
|
| 40 |
+
#include <memory>
|
| 41 |
+
namespace c10 {
|
| 42 |
+
|
| 43 |
+
/**
|
| 44 |
+
* @brief Temporary thread_local C++ qualifier replacement for Android
|
| 45 |
+
* based on `pthread_*`.
|
| 46 |
+
* To be used with composite types that provide default ctor.
|
| 47 |
+
*/
|
| 48 |
+
template <typename Type>
|
| 49 |
+
class ThreadLocal {
|
| 50 |
+
public:
|
| 51 |
+
ThreadLocal() {
|
| 52 |
+
pthread_key_create(
|
| 53 |
+
&key_, [](void* buf) { delete static_cast<Type*>(buf); });
|
| 54 |
+
}
|
| 55 |
+
|
| 56 |
+
~ThreadLocal() {
|
| 57 |
+
if (void* current = pthread_getspecific(key_)) {
|
| 58 |
+
delete static_cast<Type*>(current);
|
| 59 |
+
}
|
| 60 |
+
|
| 61 |
+
pthread_key_delete(key_);
|
| 62 |
+
}
|
| 63 |
+
|
| 64 |
+
ThreadLocal(const ThreadLocal&) = delete;
|
| 65 |
+
ThreadLocal& operator=(const ThreadLocal&) = delete;
|
| 66 |
+
|
| 67 |
+
Type& get() {
|
| 68 |
+
if (void* current = pthread_getspecific(key_)) {
|
| 69 |
+
return *static_cast<Type*>(current);
|
| 70 |
+
}
|
| 71 |
+
|
| 72 |
+
std::unique_ptr<Type> ptr = std::make_unique<Type>();
|
| 73 |
+
if (0 == pthread_setspecific(key_, ptr.get())) {
|
| 74 |
+
return *ptr.release();
|
| 75 |
+
}
|
| 76 |
+
|
| 77 |
+
int err = errno;
|
| 78 |
+
TORCH_INTERNAL_ASSERT(false, "pthread_setspecific() failed, errno = ", err);
|
| 79 |
+
}
|
| 80 |
+
|
| 81 |
+
Type& operator*() {
|
| 82 |
+
return get();
|
| 83 |
+
}
|
| 84 |
+
|
| 85 |
+
Type* operator->() {
|
| 86 |
+
return &get();
|
| 87 |
+
}
|
| 88 |
+
|
| 89 |
+
private:
|
| 90 |
+
pthread_key_t key_;
|
| 91 |
+
};
|
| 92 |
+
|
| 93 |
+
} // namespace c10
|
| 94 |
+
|
| 95 |
+
#define C10_DEFINE_TLS_static(Type, Name) static ::c10::ThreadLocal<Type> Name
|
| 96 |
+
|
| 97 |
+
#define C10_DECLARE_TLS_class_static(Class, Type, Name) \
|
| 98 |
+
static ::c10::ThreadLocal<Type> Name
|
| 99 |
+
|
| 100 |
+
#define C10_DEFINE_TLS_class_static(Class, Type, Name) \
|
| 101 |
+
::c10::ThreadLocal<Type> Class::Name
|
| 102 |
+
|
| 103 |
+
#else // defined(C10_PREFER_CUSTOM_THREAD_LOCAL_STORAGE)
|
| 104 |
+
|
| 105 |
+
namespace c10 {
|
| 106 |
+
|
| 107 |
+
/**
|
| 108 |
+
* @brief Default thread_local implementation for non-Android cases.
|
| 109 |
+
* To be used with composite types that provide default ctor.
|
| 110 |
+
*/
|
| 111 |
+
template <typename Type>
|
| 112 |
+
class ThreadLocal {
|
| 113 |
+
public:
|
| 114 |
+
using Accessor = Type* (*)();
|
| 115 |
+
explicit ThreadLocal(Accessor accessor) : accessor_(accessor) {}
|
| 116 |
+
|
| 117 |
+
ThreadLocal(const ThreadLocal&) = delete;
|
| 118 |
+
ThreadLocal& operator=(const ThreadLocal&) = delete;
|
| 119 |
+
|
| 120 |
+
Type& get() {
|
| 121 |
+
return *accessor_();
|
| 122 |
+
}
|
| 123 |
+
|
| 124 |
+
Type& operator*() {
|
| 125 |
+
return get();
|
| 126 |
+
}
|
| 127 |
+
|
| 128 |
+
Type* operator->() {
|
| 129 |
+
return &get();
|
| 130 |
+
}
|
| 131 |
+
|
| 132 |
+
private:
|
| 133 |
+
Accessor accessor_;
|
| 134 |
+
};
|
| 135 |
+
|
| 136 |
+
} // namespace c10
|
| 137 |
+
|
| 138 |
+
#define C10_DEFINE_TLS_static(Type, Name) \
|
| 139 |
+
static ::c10::ThreadLocal<Type> Name([]() { \
|
| 140 |
+
static thread_local Type var; \
|
| 141 |
+
return &var; \
|
| 142 |
+
})
|
| 143 |
+
|
| 144 |
+
#define C10_DECLARE_TLS_class_static(Class, Type, Name) \
|
| 145 |
+
static ::c10::ThreadLocal<Type> Name
|
| 146 |
+
|
| 147 |
+
#define C10_DEFINE_TLS_class_static(Class, Type, Name) \
|
| 148 |
+
::c10::ThreadLocal<Type> Class::Name([]() { \
|
| 149 |
+
static thread_local Type var; \
|
| 150 |
+
return &var; \
|
| 151 |
+
})
|
| 152 |
+
|
| 153 |
+
#endif // defined(C10_PREFER_CUSTOM_THREAD_LOCAL_STORAGE)
|