Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes. See raw diff
- videochat2/lib/python3.10/site-packages/torch/include/ATen/core/ATenGeneral.h +3 -0
- videochat2/lib/python3.10/site-packages/torch/include/ATen/core/ATenOpList.h +13 -0
- videochat2/lib/python3.10/site-packages/torch/include/ATen/core/ATen_fwd.h +46 -0
- videochat2/lib/python3.10/site-packages/torch/include/ATen/core/ATen_pch.h +165 -0
- videochat2/lib/python3.10/site-packages/torch/include/ATen/core/Array.h +42 -0
- videochat2/lib/python3.10/site-packages/torch/include/ATen/core/Backtrace.h +2 -0
- videochat2/lib/python3.10/site-packages/torch/include/ATen/core/CachingHostAllocator.h +383 -0
- videochat2/lib/python3.10/site-packages/torch/include/ATen/core/CheckMemoryFormat.h +25 -0
- videochat2/lib/python3.10/site-packages/torch/include/ATen/core/DeprecatedTypeProperties.h +139 -0
- videochat2/lib/python3.10/site-packages/torch/include/ATen/core/DeprecatedTypePropertiesRegistry.h +33 -0
- videochat2/lib/python3.10/site-packages/torch/include/ATen/core/Dict.h +397 -0
- videochat2/lib/python3.10/site-packages/torch/include/ATen/core/Dict_inl.h +209 -0
- videochat2/lib/python3.10/site-packages/torch/include/ATen/core/DimVector.h +13 -0
- videochat2/lib/python3.10/site-packages/torch/include/ATen/core/Dimname.h +48 -0
- videochat2/lib/python3.10/site-packages/torch/include/ATen/core/DistributionsHelper.h +337 -0
- videochat2/lib/python3.10/site-packages/torch/include/ATen/core/Formatting.h +25 -0
- videochat2/lib/python3.10/site-packages/torch/include/ATen/core/Generator.h +191 -0
- videochat2/lib/python3.10/site-packages/torch/include/ATen/core/GeneratorForPrivateuseone.h +39 -0
- videochat2/lib/python3.10/site-packages/torch/include/ATen/core/IListRef_inl.h +201 -0
- videochat2/lib/python3.10/site-packages/torch/include/ATen/core/LegacyTypeDispatch.h +111 -0
- videochat2/lib/python3.10/site-packages/torch/include/ATen/core/MT19937RNGEngine.h +194 -0
- videochat2/lib/python3.10/site-packages/torch/include/ATen/core/NamedTensor.h +139 -0
- videochat2/lib/python3.10/site-packages/torch/include/ATen/core/NestedIntSymNodeImpl.h +187 -0
- videochat2/lib/python3.10/site-packages/torch/include/ATen/core/PythonFallbackKernel.h +27 -0
- videochat2/lib/python3.10/site-packages/torch/include/ATen/core/PythonOpRegistrationTrampoline.h +22 -0
- videochat2/lib/python3.10/site-packages/torch/include/ATen/core/QuantizerBase.h +84 -0
- videochat2/lib/python3.10/site-packages/torch/include/ATen/core/Range.h +25 -0
- videochat2/lib/python3.10/site-packages/torch/include/ATen/core/Reduction.h +14 -0
- videochat2/lib/python3.10/site-packages/torch/include/ATen/core/Scalar.h +1 -0
- videochat2/lib/python3.10/site-packages/torch/include/ATen/core/ScalarType.h +1 -0
- videochat2/lib/python3.10/site-packages/torch/include/ATen/core/Tensor.h +94 -0
- videochat2/lib/python3.10/site-packages/torch/include/ATen/core/TensorAccessor.h +277 -0
- videochat2/lib/python3.10/site-packages/torch/include/ATen/core/TensorBody.h +0 -0
- videochat2/lib/python3.10/site-packages/torch/include/ATen/core/TorchDispatchUtils.h +17 -0
- videochat2/lib/python3.10/site-packages/torch/include/ATen/core/TransformationHelper.h +175 -0
- videochat2/lib/python3.10/site-packages/torch/include/ATen/core/UndefinedTensorImpl.h +1 -0
- videochat2/lib/python3.10/site-packages/torch/include/ATen/core/VariableHooksInterface.h +83 -0
- videochat2/lib/python3.10/site-packages/torch/include/ATen/core/Variadic.h +92 -0
- videochat2/lib/python3.10/site-packages/torch/include/ATen/core/Vitals.h +91 -0
- videochat2/lib/python3.10/site-packages/torch/include/ATen/core/aten_interned_strings.h +2264 -0
- videochat2/lib/python3.10/site-packages/torch/include/ATen/core/blob.h +204 -0
- videochat2/lib/python3.10/site-packages/torch/include/ATen/core/boxing/BoxedKernel_impl.h +99 -0
- videochat2/lib/python3.10/site-packages/torch/include/ATen/core/boxing/OperatorKernel.h +27 -0
- videochat2/lib/python3.10/site-packages/torch/include/ATen/core/dynamic_type.h +239 -0
- videochat2/lib/python3.10/site-packages/torch/include/ATen/core/function.h +114 -0
- videochat2/lib/python3.10/site-packages/torch/include/ATen/core/function_schema.h +687 -0
- videochat2/lib/python3.10/site-packages/torch/include/ATen/core/function_schema_inl.h +75 -0
- videochat2/lib/python3.10/site-packages/torch/include/ATen/core/functional.h +54 -0
- videochat2/lib/python3.10/site-packages/torch/include/ATen/core/grad_mode.h +10 -0
- videochat2/lib/python3.10/site-packages/torch/include/ATen/core/interned_strings_class.h +32 -0
videochat2/lib/python3.10/site-packages/torch/include/ATen/core/ATenGeneral.h
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <c10/macros/Macros.h>
|
videochat2/lib/python3.10/site-packages/torch/include/ATen/core/ATenOpList.h
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <c10/macros/Export.h>
|
| 4 |
+
|
| 5 |
+
namespace c10 {
|
| 6 |
+
struct OperatorName;
|
| 7 |
+
}
|
| 8 |
+
|
| 9 |
+
namespace at {
|
| 10 |
+
|
| 11 |
+
// check if an op is a custom op (i.e. did not come from native_functions.yaml)
|
| 12 |
+
TORCH_API bool is_custom_op(const c10::OperatorName& opName);
|
| 13 |
+
}
|
videochat2/lib/python3.10/site-packages/torch/include/ATen/core/ATen_fwd.h
ADDED
|
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
#include <c10/core/QScheme.h>
|
| 3 |
+
|
| 4 |
+
// Forward declarations of core ATen types used in dispatch functions
|
| 5 |
+
namespace c10 {
|
| 6 |
+
|
| 7 |
+
template<typename T>
|
| 8 |
+
class List;
|
| 9 |
+
template<typename T>
|
| 10 |
+
class IListRef;
|
| 11 |
+
class Stream;
|
| 12 |
+
class Scalar;
|
| 13 |
+
class SymInt;
|
| 14 |
+
class SymIntList;
|
| 15 |
+
struct Storage;
|
| 16 |
+
struct TensorOptions;
|
| 17 |
+
template <typename T>
|
| 18 |
+
class ArrayRef;
|
| 19 |
+
template <typename T>
|
| 20 |
+
class OptionalArrayRef;
|
| 21 |
+
|
| 22 |
+
} // namespace c10
|
| 23 |
+
|
| 24 |
+
namespace at {
|
| 25 |
+
|
| 26 |
+
class Tensor;
|
| 27 |
+
class OptionalTensorRef;
|
| 28 |
+
struct Dimname;
|
| 29 |
+
struct Generator;
|
| 30 |
+
using TensorList = c10::ArrayRef<Tensor>;
|
| 31 |
+
using ITensorListRef = c10::IListRef<Tensor>;
|
| 32 |
+
using IOptTensorListRef = c10::IListRef<OptionalTensorRef>;
|
| 33 |
+
using DimnameList = c10::ArrayRef<Dimname>;
|
| 34 |
+
using IntArrayRef = c10::ArrayRef<int64_t>;
|
| 35 |
+
using OptionalIntArrayRef = c10::OptionalArrayRef<int64_t>;
|
| 36 |
+
using OptionalSymIntArrayRef = c10::OptionalArrayRef<c10::SymInt>;
|
| 37 |
+
|
| 38 |
+
using c10::Stream;
|
| 39 |
+
using c10::Storage;
|
| 40 |
+
using c10::QScheme;
|
| 41 |
+
using c10::Scalar;
|
| 42 |
+
using c10::SymInt;
|
| 43 |
+
using c10::SymIntList;
|
| 44 |
+
using c10::TensorOptions;
|
| 45 |
+
|
| 46 |
+
} // namespace at
|
videochat2/lib/python3.10/site-packages/torch/include/ATen/core/ATen_pch.h
ADDED
|
@@ -0,0 +1,165 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// This global header must not depend on native_functions.yaml or
|
| 2 |
+
// incremental builds will be next to useless
|
| 3 |
+
#pragma push_macro("TORCH_ASSERT_NO_OPERATORS")
|
| 4 |
+
#define TORCH_ASSERT_NO_OPERATORS
|
| 5 |
+
|
| 6 |
+
// This macro doesn't work if defined after the first time inttypes.h
|
| 7 |
+
// is included, so won't work anywhere if not defined here.
|
| 8 |
+
#ifndef __STDC_FORMAT_MACROS
|
| 9 |
+
#define __STDC_FORMAT_MACROS
|
| 10 |
+
#endif
|
| 11 |
+
#include <cinttypes>
|
| 12 |
+
|
| 13 |
+
// This list of headers was generated using a script that finds
|
| 14 |
+
// high-impact headers and then manually tweaked to remove OS specific
|
| 15 |
+
// or duplicate headers (e.g. <cassert> and <assert.h>) and to remove
|
| 16 |
+
// "impl" headers (e.g BFloat16-inl.h or complex_math.h in c10).
|
| 17 |
+
|
| 18 |
+
// To generate the initial list:
|
| 19 |
+
// 1. Build pytorch from scratch with all build caching disabled
|
| 20 |
+
// 2. Generate a build trace with ninjatracing (https://github.com/nico/ninjatracing)
|
| 21 |
+
// $ ninjatracing /path/to/pytorch/build/.ninja_log > trace_all.json
|
| 22 |
+
// 3. Run pch_gen.py from https://github.com/peterbell10/build_analysis/
|
| 23 |
+
// $ python pch_gen.py --threshold .80 --target torch_cpu --build_dir /path/to/pytorch/build --trace trace_all.json
|
| 24 |
+
// Where the threshold can be tweaked until c10 and some of ATen
|
| 25 |
+
// core are included but TORCH_ASSERT_NO_OPERATORS still passes.
|
| 26 |
+
|
| 27 |
+
#include <cerrno>
|
| 28 |
+
#include <cmath>
|
| 29 |
+
#include <cstddef>
|
| 30 |
+
#include <cstdint>
|
| 31 |
+
#include <cstdlib>
|
| 32 |
+
#include <cstring>
|
| 33 |
+
|
| 34 |
+
#include <algorithm>
|
| 35 |
+
#include <array>
|
| 36 |
+
#include <atomic>
|
| 37 |
+
#include <chrono>
|
| 38 |
+
#include <complex>
|
| 39 |
+
#include <deque>
|
| 40 |
+
#include <exception>
|
| 41 |
+
#include <functional>
|
| 42 |
+
#include <initializer_list>
|
| 43 |
+
#include <iomanip>
|
| 44 |
+
#include <iosfwd>
|
| 45 |
+
#include <iterator>
|
| 46 |
+
#include <limits>
|
| 47 |
+
#include <list>
|
| 48 |
+
#include <map>
|
| 49 |
+
#include <memory>
|
| 50 |
+
#include <mutex>
|
| 51 |
+
#include <new>
|
| 52 |
+
#include <numeric>
|
| 53 |
+
#include <ostream>
|
| 54 |
+
#include <sstream>
|
| 55 |
+
#include <stdexcept>
|
| 56 |
+
#include <string>
|
| 57 |
+
#include <tuple>
|
| 58 |
+
#include <type_traits>
|
| 59 |
+
#include <typeindex>
|
| 60 |
+
#include <typeinfo>
|
| 61 |
+
#include <unordered_map>
|
| 62 |
+
#include <unordered_set>
|
| 63 |
+
#include <utility>
|
| 64 |
+
#include <vector>
|
| 65 |
+
|
| 66 |
+
#include <c10/core/Allocator.h>
|
| 67 |
+
#include <c10/core/AutogradState.h>
|
| 68 |
+
#include <c10/core/Backend.h>
|
| 69 |
+
#include <c10/core/DefaultDtype.h>
|
| 70 |
+
#include <c10/core/Device.h>
|
| 71 |
+
#include <c10/core/DeviceType.h>
|
| 72 |
+
#include <c10/core/DispatchKey.h>
|
| 73 |
+
#include <c10/core/DispatchKeySet.h>
|
| 74 |
+
#include <c10/core/GeneratorImpl.h>
|
| 75 |
+
#include <c10/core/InferenceMode.h>
|
| 76 |
+
#include <c10/core/Layout.h>
|
| 77 |
+
#include <c10/core/MemoryFormat.h>
|
| 78 |
+
#include <c10/core/OptionalRef.h>
|
| 79 |
+
#include <c10/core/QScheme.h>
|
| 80 |
+
#include <c10/core/Scalar.h>
|
| 81 |
+
#include <c10/core/ScalarType.h>
|
| 82 |
+
#include <c10/core/ScalarTypeToTypeMeta.h>
|
| 83 |
+
#include <c10/core/Storage.h>
|
| 84 |
+
#include <c10/core/StorageImpl.h>
|
| 85 |
+
#include <c10/core/SymBool.h>
|
| 86 |
+
#include <c10/core/SymFloat.h>
|
| 87 |
+
#include <c10/core/SymInt.h>
|
| 88 |
+
#include <c10/core/SymIntArrayRef.h>
|
| 89 |
+
#include <c10/core/SymNodeImpl.h>
|
| 90 |
+
#include <c10/core/TensorImpl.h>
|
| 91 |
+
#include <c10/core/TensorOptions.h>
|
| 92 |
+
#include <c10/core/UndefinedTensorImpl.h>
|
| 93 |
+
#include <c10/core/WrapDimMinimal.h>
|
| 94 |
+
#include <c10/core/impl/LocalDispatchKeySet.h>
|
| 95 |
+
#include <c10/core/impl/PyInterpreter.h>
|
| 96 |
+
#include <c10/core/impl/SizesAndStrides.h>
|
| 97 |
+
|
| 98 |
+
#include <c10/macros/Export.h>
|
| 99 |
+
#include <c10/macros/Macros.h>
|
| 100 |
+
|
| 101 |
+
#include <c10/util/AlignOf.h>
|
| 102 |
+
#include <c10/util/ArrayRef.h>
|
| 103 |
+
#include <c10/util/BFloat16.h>
|
| 104 |
+
#include <c10/util/C++17.h>
|
| 105 |
+
#include <c10/util/ConstexprCrc.h>
|
| 106 |
+
#include <c10/util/Deprecated.h>
|
| 107 |
+
#include <c10/util/DimVector.h>
|
| 108 |
+
#include <c10/util/Exception.h>
|
| 109 |
+
#include <c10/util/ExclusivelyOwned.h>
|
| 110 |
+
#include <c10/util/Flags.h>
|
| 111 |
+
#include <c10/util/Float8_e4m3fn.h>
|
| 112 |
+
#include <c10/util/Float8_e5m2.h>
|
| 113 |
+
#include <c10/util/Float8_e4m3fnuz.h>
|
| 114 |
+
#include <c10/util/Float8_e5m2fnuz.h>
|
| 115 |
+
#include <c10/util/FunctionRef.h>
|
| 116 |
+
#include <c10/util/Half.h>
|
| 117 |
+
#include <c10/util/IdWrapper.h>
|
| 118 |
+
#include <c10/util/Logging.h>
|
| 119 |
+
#include <c10/util/MaybeOwned.h>
|
| 120 |
+
#include <c10/util/Metaprogramming.h>
|
| 121 |
+
#include <c10/util/Optional.h>
|
| 122 |
+
#include <c10/util/Registry.h>
|
| 123 |
+
#include <c10/util/SmallVector.h>
|
| 124 |
+
#include <c10/util/StringUtil.h>
|
| 125 |
+
#include <c10/util/ThreadLocalDebugInfo.h>
|
| 126 |
+
#include <c10/util/Type.h>
|
| 127 |
+
#include <c10/util/TypeCast.h>
|
| 128 |
+
#include <c10/util/TypeIndex.h>
|
| 129 |
+
#include <c10/util/TypeList.h>
|
| 130 |
+
#include <c10/util/TypeSafeSignMath.h>
|
| 131 |
+
#include <c10/util/TypeTraits.h>
|
| 132 |
+
#include <c10/util/UniqueVoidPtr.h>
|
| 133 |
+
#include <c10/util/accumulate.h>
|
| 134 |
+
#include <c10/util/bit_cast.h>
|
| 135 |
+
#include <c10/util/bits.h>
|
| 136 |
+
#include <c10/util/complex.h>
|
| 137 |
+
#include <c10/util/floating_point_utils.h>
|
| 138 |
+
#include <c10/util/intrusive_ptr.h>
|
| 139 |
+
#include <c10/util/irange.h>
|
| 140 |
+
#include <c10/util/llvmMathExtras.h>
|
| 141 |
+
#include <c10/util/python_stub.h>
|
| 142 |
+
#include <c10/util/qint32.h>
|
| 143 |
+
#include <c10/util/qint8.h>
|
| 144 |
+
#include <c10/util/quint2x4.h>
|
| 145 |
+
#include <c10/util/quint4x2.h>
|
| 146 |
+
#include <c10/util/quint8.h>
|
| 147 |
+
#include <c10/util/safe_numerics.h>
|
| 148 |
+
#include <c10/util/string_utils.h>
|
| 149 |
+
#include <c10/util/string_view.h>
|
| 150 |
+
#include <c10/util/typeid.h>
|
| 151 |
+
|
| 152 |
+
#include <ATen/StorageUtils.h>
|
| 153 |
+
#include <ATen/core/ATen_fwd.h>
|
| 154 |
+
#include <ATen/core/DeprecatedTypeProperties.h>
|
| 155 |
+
#include <ATen/core/DeprecatedTypePropertiesRegistry.h>
|
| 156 |
+
#include <ATen/core/DimVector.h>
|
| 157 |
+
#include <ATen/core/Dimname.h>
|
| 158 |
+
#include <ATen/core/Generator.h>
|
| 159 |
+
#include <ATen/core/NamedTensor.h>
|
| 160 |
+
#include <ATen/core/QuantizerBase.h>
|
| 161 |
+
#include <ATen/core/TensorAccessor.h>
|
| 162 |
+
#include <ATen/core/TensorBase.h>
|
| 163 |
+
#include <ATen/core/symbol.h>
|
| 164 |
+
|
| 165 |
+
#pragma pop_macro("TORCH_ASSERT_NO_OPERATORS")
|
videochat2/lib/python3.10/site-packages/torch/include/ATen/core/Array.h
ADDED
|
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// A fixed-size array type usable from both host and
|
| 4 |
+
// device code.
|
| 5 |
+
|
| 6 |
+
#include <c10/macros/Macros.h>
|
| 7 |
+
#include <c10/util/irange.h>
|
| 8 |
+
|
| 9 |
+
namespace at::detail {
|
| 10 |
+
|
| 11 |
+
template <typename T, int size_>
|
| 12 |
+
struct Array {
|
| 13 |
+
// NOLINTNEXTLINE(*c-array*)
|
| 14 |
+
T data[size_];
|
| 15 |
+
|
| 16 |
+
C10_HOST_DEVICE T operator[](int i) const {
|
| 17 |
+
return data[i];
|
| 18 |
+
}
|
| 19 |
+
C10_HOST_DEVICE T& operator[](int i) {
|
| 20 |
+
return data[i];
|
| 21 |
+
}
|
| 22 |
+
#if defined(USE_ROCM)
|
| 23 |
+
C10_HOST_DEVICE Array() = default;
|
| 24 |
+
C10_HOST_DEVICE Array(const Array&) = default;
|
| 25 |
+
C10_HOST_DEVICE Array& operator=(const Array&) = default;
|
| 26 |
+
#else
|
| 27 |
+
Array() = default;
|
| 28 |
+
Array(const Array&) = default;
|
| 29 |
+
Array& operator=(const Array&) = default;
|
| 30 |
+
#endif
|
| 31 |
+
static constexpr int size() {
|
| 32 |
+
return size_;
|
| 33 |
+
}
|
| 34 |
+
// Fill the array with x.
|
| 35 |
+
C10_HOST_DEVICE Array(T x) {
|
| 36 |
+
for (int i = 0; i < size_; i++) {
|
| 37 |
+
data[i] = x;
|
| 38 |
+
}
|
| 39 |
+
}
|
| 40 |
+
};
|
| 41 |
+
|
| 42 |
+
} // namespace at::detail
|
videochat2/lib/python3.10/site-packages/torch/include/ATen/core/Backtrace.h
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#include <c10/util/Backtrace.h>
|
| 2 |
+
#include <c10/util/Type.h>
|
videochat2/lib/python3.10/site-packages/torch/include/ATen/core/CachingHostAllocator.h
ADDED
|
@@ -0,0 +1,383 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#include <c10/core/Allocator.h>
|
| 2 |
+
#include <c10/util/flat_hash_map.h>
|
| 3 |
+
#include <c10/util/llvmMathExtras.h>
|
| 4 |
+
#include <optional>
|
| 5 |
+
|
| 6 |
+
#include <deque>
|
| 7 |
+
#include <mutex>
|
| 8 |
+
#include <set>
|
| 9 |
+
|
| 10 |
+
C10_DIAGNOSTIC_PUSH_AND_IGNORED_IF_DEFINED("-Wunused-parameter")
|
| 11 |
+
namespace at {
|
| 12 |
+
|
| 13 |
+
/**
|
| 14 |
+
* HostBlock is typically a fundamental memory block used in pinned memory. It
|
| 15 |
+
* is likely related to Event and Stream of device runtime. It is probably a
|
| 16 |
+
* base struct or interface that can be inherited and extended by each backend.
|
| 17 |
+
*/
|
| 18 |
+
template <typename S>
|
| 19 |
+
struct HostBlock {
|
| 20 |
+
// constructor for search key
|
| 21 |
+
HostBlock(size_t size) : size_(size) {}
|
| 22 |
+
|
| 23 |
+
HostBlock(size_t size, void* ptr) : size_(size), ptr_(ptr) {}
|
| 24 |
+
|
| 25 |
+
std::mutex mutex_;
|
| 26 |
+
size_t size_{0}; // block size in bytes
|
| 27 |
+
void* ptr_{nullptr}; // memory address
|
| 28 |
+
bool allocated_{false}; // in-use flag
|
| 29 |
+
size_t event_count_{0}; // number of related events
|
| 30 |
+
ska::flat_hash_set<S> streams_; // streams on which the block was used
|
| 31 |
+
};
|
| 32 |
+
|
| 33 |
+
template <typename B>
|
| 34 |
+
struct alignas(64) FreeBlockList {
|
| 35 |
+
std::mutex mutex_;
|
| 36 |
+
std::deque<B*> list_;
|
| 37 |
+
};
|
| 38 |
+
|
| 39 |
+
namespace {
|
| 40 |
+
// Max cached block sizes: (1 << MAX_SIZE_INDEX) bytes
|
| 41 |
+
constexpr size_t MAX_SIZE_INDEX = 64;
|
| 42 |
+
}
|
| 43 |
+
|
| 44 |
+
/**
|
| 45 |
+
* Note [HostAllocator design]
|
| 46 |
+
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
| 47 |
+
* We have three key data structures - the free list which stores blocks that
|
| 48 |
+
* are not currently used, the block list which stores all blocks that have been
|
| 49 |
+
* allocated, and the event queue which stores runtime events and their
|
| 50 |
+
* corresponding blocks.
|
| 51 |
+
*
|
| 52 |
+
* Each of these are protected by a separate mutex. The key design principles
|
| 53 |
+
* are to 1) only hold each mutex for the minimal amount of time possible, 2)
|
| 54 |
+
* never do any possible expensive operations (such as CUDA runtime API calls)
|
| 55 |
+
* while holding the lock.
|
| 56 |
+
*
|
| 57 |
+
* There are four public methods: allocate, free, record_event and empty_cache.
|
| 58 |
+
* 1) In the allocate path, we first check to see if we can service our
|
| 59 |
+
* request from this free list, and otherwise we create a new block with
|
| 60 |
+
* allocate_host_memory.
|
| 61 |
+
* 2) In the free path, we insert events (if required) into the event queue,
|
| 62 |
+
* and if possible insert our block back into the free list. In allocate, we
|
| 63 |
+
* first eagerly query events until we find one that is not ready, and insert
|
| 64 |
+
* the corresponding block onto the free list if all the events recorded for a
|
| 65 |
+
* block are ready.
|
| 66 |
+
* 3) In the record_event path, we simply insert the given stream into the set
|
| 67 |
+
* of streams tracked by the specified block. This set of streams is then
|
| 68 |
+
* consumed in the free path.
|
| 69 |
+
* 4) In the empty_cache path, we flush any available blocks into the free
|
| 70 |
+
* list. Remove all element of free list, then remove them from block list and
|
| 71 |
+
* release the associated pinned memory allocation via free_block.
|
| 72 |
+
*
|
| 73 |
+
* We generalize the caching host allocator into two parts: interface and
|
| 74 |
+
* implementation. For any new backend looking to integrate with host allocator
|
| 75 |
+
* and reuse caching mechanism, these two parts are necessary to be specialized.
|
| 76 |
+
*
|
| 77 |
+
* For the implementation, we provide a CachingHostAllocatorImpl struct
|
| 78 |
+
* to abstract the caching mechanism. Any backend needs to provide a customized
|
| 79 |
+
* implementation by specializing its own public functions and the related
|
| 80 |
+
* runtime functions. Its template parameter S represents runtime Stream, E
|
| 81 |
+
* denotes runtime Event, B indicates the fundamental memory block.
|
| 82 |
+
*
|
| 83 |
+
* For the interface, we provide a CachingHostAllocatorInterface struct as an
|
| 84 |
+
* interface. Any backend needs to derive its own host allocator from this
|
| 85 |
+
* interface. Its template parameter T refers to an implementation that
|
| 86 |
+
* inherited from CachingHostAllocatorImpl.
|
| 87 |
+
*
|
| 88 |
+
* So this design can share the caching mechanism across each backend, and
|
| 89 |
+
* provide flexibility to each backend. A backend can choose to follow this
|
| 90 |
+
* implementation or reuse them by extending and overriding them as necessary.
|
| 91 |
+
* Taking CUDA as an example, it specializes runtime related functions to reuse
|
| 92 |
+
* the caching mechanism. Additionally, it extends the allocator's functionality
|
| 93 |
+
* by adding the allocWithCudaHostRegister function to support page-locking the
|
| 94 |
+
* memory range used by CUDA. Of course, you can also refer to
|
| 95 |
+
* XPUCachingHostAllocator, which is a host caching allocator supported on XPU
|
| 96 |
+
* backend, to implement a basic host caching allocator.
|
| 97 |
+
*
|
| 98 |
+
* Some of the invariants here are less strict than they could be - for example,
|
| 99 |
+
* we do not enforce that free(Block* block) => block->event_count == 0. This is
|
| 100 |
+
* for compatibility reasons, and we can explore enforcing these in subsequent
|
| 101 |
+
* versions.
|
| 102 |
+
*
|
| 103 |
+
* Note that this caching host allocator does not split larger allocations into
|
| 104 |
+
* smaller blocks, unlike the caching device allocator.
|
| 105 |
+
*/
|
| 106 |
+
|
| 107 |
+
template <
|
| 108 |
+
typename S,
|
| 109 |
+
typename E,
|
| 110 |
+
typename B = HostBlock<S>>
|
| 111 |
+
struct CachingHostAllocatorImpl {
|
| 112 |
+
virtual ~CachingHostAllocatorImpl() = default;
|
| 113 |
+
|
| 114 |
+
public:
|
| 115 |
+
// return data_ptr and block pair.
|
| 116 |
+
virtual std::pair<void*, void*> allocate(size_t size) {
|
| 117 |
+
if (size == 0) {
|
| 118 |
+
return {nullptr, nullptr};
|
| 119 |
+
}
|
| 120 |
+
|
| 121 |
+
process_events();
|
| 122 |
+
|
| 123 |
+
// First, try to allocate from the free list
|
| 124 |
+
auto* block = get_free_block(size);
|
| 125 |
+
if (block) {
|
| 126 |
+
return {block->ptr_, reinterpret_cast<void*>(block)};
|
| 127 |
+
}
|
| 128 |
+
|
| 129 |
+
// Round up the allocation to the nearest power of two to improve reuse.
|
| 130 |
+
// These power of two sizes are also used to index into the free list.
|
| 131 |
+
size_t roundSize = c10::llvm::PowerOf2Ceil(size);
|
| 132 |
+
void* ptr = nullptr;
|
| 133 |
+
allocate_host_memory(roundSize, &ptr);
|
| 134 |
+
|
| 135 |
+
// Then, create a new block.
|
| 136 |
+
block = new B(roundSize, ptr);
|
| 137 |
+
block->allocated_ = true;
|
| 138 |
+
|
| 139 |
+
add_allocated_block(block);
|
| 140 |
+
return {block->ptr_, reinterpret_cast<void*>(block)};
|
| 141 |
+
}
|
| 142 |
+
|
| 143 |
+
virtual void free(void* ctx) {
|
| 144 |
+
if (!ctx) {
|
| 145 |
+
return;
|
| 146 |
+
}
|
| 147 |
+
|
| 148 |
+
// Note: we can assume that free is correctly paired with alloc, and thus we
|
| 149 |
+
// do not need to look up the ctx in blocks_.
|
| 150 |
+
auto* block = reinterpret_cast<B*>(ctx);
|
| 151 |
+
|
| 152 |
+
std::optional<std::vector<E>> events;
|
| 153 |
+
{
|
| 154 |
+
std::lock_guard<std::mutex> g(block->mutex_);
|
| 155 |
+
block->allocated_ = false;
|
| 156 |
+
if (block->streams_.empty()) {
|
| 157 |
+
TORCH_INTERNAL_ASSERT(block->event_count_ == 0);
|
| 158 |
+
} else {
|
| 159 |
+
events = std::vector<E>();
|
| 160 |
+
events->reserve(block->streams_.size());
|
| 161 |
+
for (auto stream : block->streams_) {
|
| 162 |
+
record_stream(events, stream);
|
| 163 |
+
}
|
| 164 |
+
block->event_count_ += events->size();
|
| 165 |
+
block->streams_.clear();
|
| 166 |
+
}
|
| 167 |
+
}
|
| 168 |
+
|
| 169 |
+
if (!events) {
|
| 170 |
+
auto index = size_index(block->size_);
|
| 171 |
+
std::lock_guard<std::mutex> g(free_list_[index].mutex_);
|
| 172 |
+
free_list_[index].list_.push_back(block);
|
| 173 |
+
} else {
|
| 174 |
+
// restore these events that record by used streams.
|
| 175 |
+
std::lock_guard<std::mutex> g(events_mutex_);
|
| 176 |
+
for (auto&& event : *events) {
|
| 177 |
+
events_.emplace_front(std::move(event), block);
|
| 178 |
+
}
|
| 179 |
+
}
|
| 180 |
+
}
|
| 181 |
+
|
| 182 |
+
virtual bool record_event(void* ptr, void* ctx, S stream) {
|
| 183 |
+
auto* block = reinterpret_cast<B*>(ctx);
|
| 184 |
+
|
| 185 |
+
// Note: we need to check if the passed-in `ctx` is valid. This is because
|
| 186 |
+
// `record_event` (via `CachingHostAllocator_recordEvent`) can be invoked on
|
| 187 |
+
// an arbitrary tensor, and is not guaranteed to correspond to a pinned
|
| 188 |
+
// memory allocation. Therefore, we need to check that `ctx` is valid before
|
| 189 |
+
// proceeding.
|
| 190 |
+
{
|
| 191 |
+
std::lock_guard<std::mutex> g(blocks_mutex_);
|
| 192 |
+
if (blocks_.find(block) != blocks_.end()) {
|
| 193 |
+
// Now we know this object is safe to access.
|
| 194 |
+
std::lock_guard<std::mutex> gb(block->mutex_);
|
| 195 |
+
TORCH_INTERNAL_ASSERT(block->allocated_);
|
| 196 |
+
block->streams_.insert(stream);
|
| 197 |
+
return true;
|
| 198 |
+
}
|
| 199 |
+
auto it = ptr_to_block_.find(ptr);
|
| 200 |
+
if (it != ptr_to_block_.end()) {
|
| 201 |
+
block = it->second;
|
| 202 |
+
std::lock_guard<std::mutex> g(block->mutex_);
|
| 203 |
+
TORCH_INTERNAL_ASSERT(block->allocated_);
|
| 204 |
+
block->streams_.insert(stream);
|
| 205 |
+
return true;
|
| 206 |
+
}
|
| 207 |
+
}
|
| 208 |
+
|
| 209 |
+
return false;
|
| 210 |
+
}
|
| 211 |
+
|
| 212 |
+
virtual void empty_cache() {
|
| 213 |
+
// Flush any available blocks into the free_list.
|
| 214 |
+
process_events();
|
| 215 |
+
|
| 216 |
+
// Remove all elements from the free list, remove them from the blocks
|
| 217 |
+
// list, and free the associated pinned memory allocation. This requires
|
| 218 |
+
// concurrently holding both the free list mutexes and the blocks mutex, and
|
| 219 |
+
// is the only function that concurrently holds multiple mutexes.
|
| 220 |
+
for (size_t i = 0; i < free_list_.size(); ++i) {
|
| 221 |
+
std::lock(free_list_[i].mutex_, blocks_mutex_);
|
| 222 |
+
std::lock_guard<std::mutex> gf(free_list_[i].mutex_, std::adopt_lock);
|
| 223 |
+
std::lock_guard<std::mutex> gb(blocks_mutex_, std::adopt_lock);
|
| 224 |
+
|
| 225 |
+
std::vector<B*> blocks_to_remove(free_list_[i].list_.begin(), free_list_[i].list_.end());
|
| 226 |
+
free_list_[i].list_.clear();
|
| 227 |
+
for (auto* block : blocks_to_remove) {
|
| 228 |
+
blocks_.erase(block);
|
| 229 |
+
ptr_to_block_.erase(block->ptr_);
|
| 230 |
+
free_block(block);
|
| 231 |
+
delete block;
|
| 232 |
+
}
|
| 233 |
+
}
|
| 234 |
+
}
|
| 235 |
+
|
| 236 |
+
inline size_t size_index(size_t size) {
|
| 237 |
+
return c10::llvm::Log2_64_Ceil(size);
|
| 238 |
+
}
|
| 239 |
+
|
| 240 |
+
virtual void copy_data(void* dest [[maybe_unused]], const void* src [[maybe_unused]], std::size_t count [[maybe_unused]]) const {
|
| 241 |
+
TORCH_CHECK_NOT_IMPLEMENTED(false, "Not implemented for copy_data");
|
| 242 |
+
}
|
| 243 |
+
|
| 244 |
+
private:
|
| 245 |
+
virtual void add_allocated_block(B* block) {
|
| 246 |
+
std::lock_guard<std::mutex> g(blocks_mutex_);
|
| 247 |
+
blocks_.insert(block);
|
| 248 |
+
ptr_to_block_.insert({block->ptr_, block});
|
| 249 |
+
}
|
| 250 |
+
|
| 251 |
+
virtual B* get_free_block(size_t size) {
|
| 252 |
+
auto index = size_index(size);
|
| 253 |
+
std::lock_guard<std::mutex> g(free_list_[index].mutex_);
|
| 254 |
+
if (free_list_[index].list_.size() > 0) {
|
| 255 |
+
B* block = free_list_[index].list_.back();
|
| 256 |
+
free_list_[index].list_.pop_back();
|
| 257 |
+
block->allocated_ = true;
|
| 258 |
+
return block;
|
| 259 |
+
}
|
| 260 |
+
return nullptr;
|
| 261 |
+
}
|
| 262 |
+
|
| 263 |
+
virtual void process_events() {
|
| 264 |
+
while (true) {
|
| 265 |
+
// Avoid calling cudaEventDestroy while holding a mutex, so move
|
| 266 |
+
// intermediate events out of the lock into this object.
|
| 267 |
+
// process the last event
|
| 268 |
+
std::optional<std::pair<E, B*>> processed;
|
| 269 |
+
{
|
| 270 |
+
std::lock_guard<std::mutex> g(events_mutex_);
|
| 271 |
+
if (!events_.empty()) {
|
| 272 |
+
processed = std::move(events_.back());
|
| 273 |
+
events_.pop_back();
|
| 274 |
+
}
|
| 275 |
+
}
|
| 276 |
+
|
| 277 |
+
if (!processed) {
|
| 278 |
+
return;
|
| 279 |
+
}
|
| 280 |
+
|
| 281 |
+
// otherwise, query the event
|
| 282 |
+
{
|
| 283 |
+
// now, see if we can handle this element
|
| 284 |
+
auto& event = processed->first;
|
| 285 |
+
if (!query_event(event)) {
|
| 286 |
+
// push the event onto the back if it's not ready.
|
| 287 |
+
{
|
| 288 |
+
std::lock_guard<std::mutex> g(events_mutex_);
|
| 289 |
+
events_.push_back(std::move(*processed));
|
| 290 |
+
}
|
| 291 |
+
return;
|
| 292 |
+
}
|
| 293 |
+
}
|
| 294 |
+
|
| 295 |
+
// Process the events.
|
| 296 |
+
TORCH_INTERNAL_ASSERT(processed);
|
| 297 |
+
auto* block = processed->second;
|
| 298 |
+
bool available = false;
|
| 299 |
+
{
|
| 300 |
+
std::lock_guard<std::mutex> g(block->mutex_);
|
| 301 |
+
TORCH_INTERNAL_ASSERT(!block->allocated_)
|
| 302 |
+
block->event_count_--;
|
| 303 |
+
if (block->event_count_ == 0) {
|
| 304 |
+
available = true;
|
| 305 |
+
}
|
| 306 |
+
}
|
| 307 |
+
|
| 308 |
+
if (available) {
|
| 309 |
+
auto index = size_index(block->size_);
|
| 310 |
+
std::lock_guard<std::mutex> g(free_list_[index].mutex_);
|
| 311 |
+
free_list_[index].list_.push_back(block);
|
| 312 |
+
}
|
| 313 |
+
}
|
| 314 |
+
}
|
| 315 |
+
|
| 316 |
+
/* These following functions are runtime-related. */
|
| 317 |
+
|
| 318 |
+
// Allocate page-locked memory on the host.
|
| 319 |
+
virtual void allocate_host_memory(size_t size, void** ptr) {
|
| 320 |
+
TORCH_CHECK_NOT_IMPLEMENTED(
|
| 321 |
+
false, "Not implemented for allocate_host_memory");
|
| 322 |
+
}
|
| 323 |
+
|
| 324 |
+
// Free block and release the pointer contained in block.
|
| 325 |
+
virtual void free_block(B* block) {
|
| 326 |
+
TORCH_CHECK_NOT_IMPLEMENTED(false, "Not implemented for free_block");
|
| 327 |
+
}
|
| 328 |
+
|
| 329 |
+
// Record an event on stream and store event into events.
|
| 330 |
+
virtual void record_stream(std::optional<std::vector<E>>& events, S stream) {
|
| 331 |
+
TORCH_CHECK_NOT_IMPLEMENTED(false, "Not implemented for record_stream");
|
| 332 |
+
}
|
| 333 |
+
|
| 334 |
+
// Query event if it is completed.
|
| 335 |
+
virtual bool query_event(E& event) {
|
| 336 |
+
TORCH_CHECK_NOT_IMPLEMENTED(false, "Not implemented for query_event");
|
| 337 |
+
}
|
| 338 |
+
|
| 339 |
+
alignas(64) std::mutex blocks_mutex_;
|
| 340 |
+
ska::flat_hash_set<B*> blocks_; // block list
|
| 341 |
+
ska::flat_hash_map<void*, B*> ptr_to_block_;
|
| 342 |
+
|
| 343 |
+
// We keep free list as a vector of free lists, one for each power of two
|
| 344 |
+
// size. This allows us to quickly find a free block of the right size.
|
| 345 |
+
// We use deque to store per size free list and guard the list with its own
|
| 346 |
+
// mutex.
|
| 347 |
+
alignas(64) std::vector<FreeBlockList<B>> free_list_ = std::vector<FreeBlockList<B>>(MAX_SIZE_INDEX);
|
| 348 |
+
|
| 349 |
+
alignas(64) std::mutex events_mutex_;
|
| 350 |
+
std::deque<std::pair<E, B*>> events_; // event queue paired with block
|
| 351 |
+
};
|
| 352 |
+
|
| 353 |
+
template <typename T>
|
| 354 |
+
struct CachingHostAllocatorInterface : public at::Allocator {
|
| 355 |
+
CachingHostAllocatorInterface() : impl_(std::make_unique<T>()) {}
|
| 356 |
+
|
| 357 |
+
at::DataPtr allocate(size_t size) override {
|
| 358 |
+
TORCH_CHECK_NOT_IMPLEMENTED(false, "Not implemented for allocate");
|
| 359 |
+
}
|
| 360 |
+
|
| 361 |
+
void free(void* ctx) {
|
| 362 |
+
impl_->free(ctx);
|
| 363 |
+
}
|
| 364 |
+
|
| 365 |
+
template <typename S>
|
| 366 |
+
bool record_event(void* ptr, void* ctx, S stream) {
|
| 367 |
+
return impl_->record_event(ptr, ctx, stream);
|
| 368 |
+
}
|
| 369 |
+
|
| 370 |
+
void empty_cache() {
|
| 371 |
+
impl_->empty_cache();
|
| 372 |
+
}
|
| 373 |
+
|
| 374 |
+
void copy_data(void* dest, const void* src, std::size_t count)
|
| 375 |
+
const override {
|
| 376 |
+
impl_->copy_data(dest, src, count);
|
| 377 |
+
}
|
| 378 |
+
|
| 379 |
+
std::unique_ptr<T> impl_;
|
| 380 |
+
};
|
| 381 |
+
|
| 382 |
+
} // namespace at
|
| 383 |
+
C10_DIAGNOSTIC_POP()
|
videochat2/lib/python3.10/site-packages/torch/include/ATen/core/CheckMemoryFormat.h
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#include <c10/core/TensorOptions.h>
|
| 2 |
+
|
| 3 |
+
namespace c10::impl {
|
| 4 |
+
|
| 5 |
+
inline std::optional<MemoryFormat>
|
| 6 |
+
check_tensor_options_and_extract_memory_format(
|
| 7 |
+
const TensorOptions& options,
|
| 8 |
+
std::optional<MemoryFormat> memory_format) {
|
| 9 |
+
TORCH_CHECK(
|
| 10 |
+
options.requires_grad_opt() == std::nullopt ||
|
| 11 |
+
options.requires_grad_opt().value() == false,
|
| 12 |
+
"Operators taking TensorOptions cannot take a TensorOptions with "
|
| 13 |
+
"options.requires_grad set as true. This isn't implemented yet.");
|
| 14 |
+
TORCH_CHECK(
|
| 15 |
+
!(options.has_memory_format() && memory_format.has_value()),
|
| 16 |
+
"Cannot set memory_format both in TensorOptions and explicit argument; please delete "
|
| 17 |
+
"the redundant setter.");
|
| 18 |
+
if (memory_format.has_value()) {
|
| 19 |
+
return memory_format;
|
| 20 |
+
} else {
|
| 21 |
+
return options.memory_format_opt();
|
| 22 |
+
}
|
| 23 |
+
}
|
| 24 |
+
|
| 25 |
+
} // namespace impl namespace c10
|
videochat2/lib/python3.10/site-packages/torch/include/ATen/core/DeprecatedTypeProperties.h
ADDED
|
@@ -0,0 +1,139 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <c10/core/Backend.h>
|
| 4 |
+
#include <c10/core/ScalarType.h>
|
| 5 |
+
#include <c10/core/Layout.h>
|
| 6 |
+
#include <c10/core/TensorOptions.h>
|
| 7 |
+
#include <c10/core/Storage.h>
|
| 8 |
+
#include <ATen/core/DeprecatedTypePropertiesRegistry.h>
|
| 9 |
+
#include <ATen/core/Generator.h>
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
namespace at {
|
| 13 |
+
|
| 14 |
+
class Tensor;
|
| 15 |
+
|
| 16 |
+
// This class specifies a Backend and a ScalarType. Currently, it primarily
|
| 17 |
+
// serves as a replacement return value for Tensor::type(). Previously,
|
| 18 |
+
// Tensor::type() returned Type&, but we are changing Type to not be
|
| 19 |
+
// dtype-specific.
|
| 20 |
+
class TORCH_API DeprecatedTypeProperties {
|
| 21 |
+
public:
|
| 22 |
+
DeprecatedTypeProperties(Backend backend, ScalarType scalar_type)
|
| 23 |
+
: backend_(backend), scalar_type_(scalar_type) {}
|
| 24 |
+
|
| 25 |
+
Backend backend() const {
|
| 26 |
+
return backend_;
|
| 27 |
+
}
|
| 28 |
+
|
| 29 |
+
Layout layout() const {
|
| 30 |
+
return layout_from_backend(backend_);
|
| 31 |
+
}
|
| 32 |
+
|
| 33 |
+
bool is_sparse() const {
|
| 34 |
+
return layout_from_backend(backend()) == kSparse;
|
| 35 |
+
}
|
| 36 |
+
|
| 37 |
+
bool is_sparse_csr() const {
|
| 38 |
+
return layout_from_backend(backend()) == kSparseCsr;
|
| 39 |
+
}
|
| 40 |
+
|
| 41 |
+
c10::DeviceType device_type() const {
|
| 42 |
+
return backendToDeviceType(backend_);
|
| 43 |
+
}
|
| 44 |
+
|
| 45 |
+
bool is_cuda() const {
|
| 46 |
+
return backendToDeviceType(backend_) == kCUDA;
|
| 47 |
+
}
|
| 48 |
+
|
| 49 |
+
ScalarType scalarType() const {
|
| 50 |
+
return scalar_type_;
|
| 51 |
+
}
|
| 52 |
+
|
| 53 |
+
caffe2::TypeMeta typeMeta() const {
|
| 54 |
+
return scalarTypeToTypeMeta(scalar_type_);
|
| 55 |
+
}
|
| 56 |
+
|
| 57 |
+
bool operator==(const DeprecatedTypeProperties& other) const {
|
| 58 |
+
return backend_ == other.backend() && scalar_type_ == other.scalarType();
|
| 59 |
+
}
|
| 60 |
+
|
| 61 |
+
bool operator!=(const DeprecatedTypeProperties& other) const {
|
| 62 |
+
return !(*this == other);
|
| 63 |
+
}
|
| 64 |
+
|
| 65 |
+
std::string toString() const {
|
| 66 |
+
std::string base_str;
|
| 67 |
+
if (backend_ == Backend::Undefined || scalar_type_ == ScalarType::Undefined) {
|
| 68 |
+
base_str = "UndefinedType";
|
| 69 |
+
} else {
|
| 70 |
+
base_str = std::string(at::toString(backend_)) + at::toString(scalar_type_) + "Type";
|
| 71 |
+
}
|
| 72 |
+
return base_str;
|
| 73 |
+
}
|
| 74 |
+
|
| 75 |
+
DeprecatedTypeProperties & toBackend(Backend b) const {
|
| 76 |
+
return globalDeprecatedTypePropertiesRegistry().getDeprecatedTypeProperties(
|
| 77 |
+
b, scalar_type_);
|
| 78 |
+
}
|
| 79 |
+
|
| 80 |
+
DeprecatedTypeProperties & toScalarType(ScalarType s) const {
|
| 81 |
+
return globalDeprecatedTypePropertiesRegistry().getDeprecatedTypeProperties(
|
| 82 |
+
backend_, s);
|
| 83 |
+
}
|
| 84 |
+
|
| 85 |
+
DeprecatedTypeProperties & cpu() const {
|
| 86 |
+
return toBackend(Backend::CPU);
|
| 87 |
+
}
|
| 88 |
+
|
| 89 |
+
DeprecatedTypeProperties & cuda() const {
|
| 90 |
+
return toBackend(Backend::CUDA);
|
| 91 |
+
}
|
| 92 |
+
|
| 93 |
+
DeprecatedTypeProperties & hip() const {
|
| 94 |
+
return toBackend(Backend::HIP);
|
| 95 |
+
}
|
| 96 |
+
|
| 97 |
+
DeprecatedTypeProperties & privateUser1() const {
|
| 98 |
+
return toBackend(Backend::PrivateUse1);
|
| 99 |
+
}
|
| 100 |
+
|
| 101 |
+
/// Constructs the `TensorOptions` from a type and a `device_index`.
|
| 102 |
+
TensorOptions options(int16_t device_index = -1) const {
|
| 103 |
+
return TensorOptions().dtype(typeMeta())
|
| 104 |
+
.device(device_type(), static_cast<c10::DeviceIndex>(device_index))
|
| 105 |
+
.layout(layout());
|
| 106 |
+
}
|
| 107 |
+
|
| 108 |
+
/// Constructs the `TensorOptions` from a type and a Device. Asserts that
|
| 109 |
+
/// the device type matches the device type of the type.
|
| 110 |
+
TensorOptions options(std::optional<Device> device_opt) const {
|
| 111 |
+
if (!device_opt.has_value()) {
|
| 112 |
+
return options(-1);
|
| 113 |
+
} else {
|
| 114 |
+
Device device = device_opt.value();
|
| 115 |
+
AT_ASSERT(device.type() == device_type());
|
| 116 |
+
return options(device.index());
|
| 117 |
+
}
|
| 118 |
+
}
|
| 119 |
+
|
| 120 |
+
operator TensorOptions() const {
|
| 121 |
+
return options();
|
| 122 |
+
}
|
| 123 |
+
|
| 124 |
+
int64_t id() const {
|
| 125 |
+
return static_cast<int64_t>(backend()) *
|
| 126 |
+
static_cast<int64_t>(ScalarType::NumOptions) +
|
| 127 |
+
static_cast<int64_t>(scalarType());
|
| 128 |
+
}
|
| 129 |
+
|
| 130 |
+
Tensor unsafeTensorFromTH(void * th_pointer, bool retain) const;
|
| 131 |
+
Storage unsafeStorageFromTH(void * th_pointer, bool retain) const;
|
| 132 |
+
Tensor copy(const Tensor & src, bool non_blocking=false, std::optional<Device> to_device={}) const;
|
| 133 |
+
|
| 134 |
+
private:
|
| 135 |
+
Backend backend_;
|
| 136 |
+
ScalarType scalar_type_;
|
| 137 |
+
};
|
| 138 |
+
|
| 139 |
+
} // namespace at
|
videochat2/lib/python3.10/site-packages/torch/include/ATen/core/DeprecatedTypePropertiesRegistry.h
ADDED
|
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// In order to preserve bc, we make DeprecatedTypeProperties instances unique
|
| 4 |
+
// just like they are for Type.
|
| 5 |
+
|
| 6 |
+
#include <c10/core/Backend.h>
|
| 7 |
+
#include <c10/core/ScalarType.h>
|
| 8 |
+
#include <memory>
|
| 9 |
+
|
| 10 |
+
namespace at {
|
| 11 |
+
|
| 12 |
+
class DeprecatedTypeProperties;
|
| 13 |
+
|
| 14 |
+
struct TORCH_API DeprecatedTypePropertiesDeleter {
|
| 15 |
+
void operator()(DeprecatedTypeProperties * ptr);
|
| 16 |
+
};
|
| 17 |
+
|
| 18 |
+
class TORCH_API DeprecatedTypePropertiesRegistry {
|
| 19 |
+
public:
|
| 20 |
+
DeprecatedTypePropertiesRegistry();
|
| 21 |
+
|
| 22 |
+
DeprecatedTypeProperties& getDeprecatedTypeProperties(Backend p, ScalarType s) const;
|
| 23 |
+
|
| 24 |
+
private:
|
| 25 |
+
// NOLINTNEXTLINE(*c-array*)
|
| 26 |
+
std::unique_ptr<DeprecatedTypeProperties> registry
|
| 27 |
+
[static_cast<int>(Backend::NumOptions)]
|
| 28 |
+
[static_cast<int>(ScalarType::NumOptions)];
|
| 29 |
+
};
|
| 30 |
+
|
| 31 |
+
TORCH_API DeprecatedTypePropertiesRegistry& globalDeprecatedTypePropertiesRegistry();
|
| 32 |
+
|
| 33 |
+
} // namespace at
|
videochat2/lib/python3.10/site-packages/torch/include/ATen/core/Dict.h
ADDED
|
@@ -0,0 +1,397 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <c10/macros/Macros.h>
|
| 4 |
+
#include <c10/macros/Export.h>
|
| 5 |
+
#include <c10/util/TypeTraits.h>
|
| 6 |
+
#include <c10/util/TypeList.h>
|
| 7 |
+
#include <c10/util/intrusive_ptr.h>
|
| 8 |
+
#include <c10/util/order_preserving_flat_hash_map.h>
|
| 9 |
+
#include <optional>
|
| 10 |
+
#include <ATen/core/TensorBody.h>
|
| 11 |
+
#include <ATen/core/jit_type_base.h>
|
| 12 |
+
|
| 13 |
+
namespace c10 {
|
| 14 |
+
struct IValue;
|
| 15 |
+
template<class Key, class Value> class Dict;
|
| 16 |
+
struct Type;
|
| 17 |
+
|
| 18 |
+
namespace impl {
|
| 19 |
+
|
| 20 |
+
using valid_dict_key_types = guts::typelist::typelist<
|
| 21 |
+
int64_t,
|
| 22 |
+
std::string,
|
| 23 |
+
double,
|
| 24 |
+
c10::complex<double>,
|
| 25 |
+
bool,
|
| 26 |
+
at::Tensor
|
| 27 |
+
>;
|
| 28 |
+
}
|
| 29 |
+
|
| 30 |
+
namespace detail {
|
| 31 |
+
|
| 32 |
+
struct DictKeyHash {
|
| 33 |
+
size_t operator()(const IValue& ivalue) const;
|
| 34 |
+
};
|
| 35 |
+
|
| 36 |
+
struct DictKeyEqualTo {
|
| 37 |
+
bool operator()(const IValue& lhs, const IValue& rhs) const;
|
| 38 |
+
};
|
| 39 |
+
|
| 40 |
+
struct DictImpl final : public c10::intrusive_ptr_target {
|
| 41 |
+
using dict_map_type = ska_ordered::order_preserving_flat_hash_map<IValue, IValue, DictKeyHash, DictKeyEqualTo>;
|
| 42 |
+
struct DictElementTypes final {
|
| 43 |
+
TypePtr keyType;
|
| 44 |
+
TypePtr valueType;
|
| 45 |
+
};
|
| 46 |
+
|
| 47 |
+
explicit DictImpl(dict_map_type dict_, DictElementTypes elementTypes_)
|
| 48 |
+
: dict(std::move(dict_))
|
| 49 |
+
, elementTypes(std::move(elementTypes_)) {}
|
| 50 |
+
dict_map_type dict;
|
| 51 |
+
|
| 52 |
+
DictElementTypes elementTypes;
|
| 53 |
+
|
| 54 |
+
intrusive_ptr<DictImpl> copy() const;
|
| 55 |
+
friend TORCH_API bool operator==(const DictImpl& lhs, const DictImpl& rhs);
|
| 56 |
+
};
|
| 57 |
+
|
| 58 |
+
}
|
| 59 |
+
|
| 60 |
+
namespace impl {
|
| 61 |
+
template<class Key, class Value, class Iterator> class DictIterator;
|
| 62 |
+
|
| 63 |
+
/**
|
| 64 |
+
* A reference to an entry in the Dict.
|
| 65 |
+
* Use the `key()` and `value()` methods to read the element.
|
| 66 |
+
*/
|
| 67 |
+
template<class Key, class Value, class Iterator>
|
| 68 |
+
class DictEntryRef final {
|
| 69 |
+
public:
|
| 70 |
+
explicit DictEntryRef(Iterator iterator)
|
| 71 |
+
: iterator_(std::move(iterator)) {}
|
| 72 |
+
|
| 73 |
+
decltype(auto) key() const {
|
| 74 |
+
return iterator_->first.template to<Key>();
|
| 75 |
+
}
|
| 76 |
+
|
| 77 |
+
decltype(auto) value() const {
|
| 78 |
+
return iterator_->second.template to<Value>();
|
| 79 |
+
}
|
| 80 |
+
|
| 81 |
+
template<class Value_>
|
| 82 |
+
void setValue(Value_&& value) const {
|
| 83 |
+
static_assert(std::is_constructible<Value, Value_>::value, "Wrong type for the value argument of setValue()");
|
| 84 |
+
iterator_->second = Value(std::forward<Value_>(value));
|
| 85 |
+
}
|
| 86 |
+
|
| 87 |
+
private:
|
| 88 |
+
// allow copying and moving, but only our friends (i.e. the Dict class) can do
|
| 89 |
+
// it. Copying/moving this reference wrapper would be too ambiguous to allow it
|
| 90 |
+
// in the public API.
|
| 91 |
+
DictEntryRef(const DictEntryRef&) = default;
|
| 92 |
+
DictEntryRef& operator=(const DictEntryRef&) = default;
|
| 93 |
+
DictEntryRef(DictEntryRef&&) noexcept = default;
|
| 94 |
+
DictEntryRef& operator=(DictEntryRef&& rhs) & noexcept = default;
|
| 95 |
+
|
| 96 |
+
Iterator iterator_;
|
| 97 |
+
friend class DictIterator<Key, Value, Iterator>;
|
| 98 |
+
friend class Dict<Key, Value>;
|
| 99 |
+
};
|
| 100 |
+
|
| 101 |
+
// this wraps map_type::iterator to make sure user code can't rely
|
| 102 |
+
// on it being the type of the underlying map.
|
| 103 |
+
template<class Key, class Value, class Iterator>
|
| 104 |
+
class DictIterator final {
|
| 105 |
+
public:
|
| 106 |
+
// C++17 friendly std::iterator implementation
|
| 107 |
+
using iterator_category = std::forward_iterator_tag;
|
| 108 |
+
using value_type = DictEntryRef<Key, Value, Iterator>;
|
| 109 |
+
using difference_type = std::ptrdiff_t;
|
| 110 |
+
using pointer = value_type*;
|
| 111 |
+
using reference = value_type&;
|
| 112 |
+
|
| 113 |
+
explicit DictIterator() = default;
|
| 114 |
+
~DictIterator() = default;
|
| 115 |
+
|
| 116 |
+
DictIterator(const DictIterator& rhs): entryRef_(rhs.entryRef_) {}
|
| 117 |
+
DictIterator(DictIterator&& rhs) noexcept: entryRef_(std::move(rhs.entryRef_)) {}
|
| 118 |
+
DictIterator& operator=(const DictIterator& rhs) {
|
| 119 |
+
entryRef_ = rhs.entryRef_;
|
| 120 |
+
return *this;
|
| 121 |
+
}
|
| 122 |
+
DictIterator& operator=(DictIterator&& rhs) noexcept {
|
| 123 |
+
entryRef_ = std::move(rhs.entryRef_);
|
| 124 |
+
return *this;
|
| 125 |
+
}
|
| 126 |
+
|
| 127 |
+
DictIterator& operator++() {
|
| 128 |
+
++entryRef_.iterator_;
|
| 129 |
+
return *this;
|
| 130 |
+
}
|
| 131 |
+
|
| 132 |
+
DictIterator operator++(int) {
|
| 133 |
+
DictIterator copy(*this);
|
| 134 |
+
++*this;
|
| 135 |
+
return copy;
|
| 136 |
+
}
|
| 137 |
+
|
| 138 |
+
const DictEntryRef<Key, Value, Iterator>& operator*() const {
|
| 139 |
+
return entryRef_;
|
| 140 |
+
}
|
| 141 |
+
|
| 142 |
+
const DictEntryRef<Key, Value, Iterator>* operator->() const {
|
| 143 |
+
return &entryRef_;
|
| 144 |
+
}
|
| 145 |
+
|
| 146 |
+
friend difference_type operator-(const DictIterator& lhs, const DictIterator& rhs) {
|
| 147 |
+
return lhs.entryRef_.iterator_ - rhs.entryRef_.iterator_;
|
| 148 |
+
}
|
| 149 |
+
|
| 150 |
+
private:
|
| 151 |
+
explicit DictIterator(Iterator iterator): entryRef_(std::move(iterator)) {}
|
| 152 |
+
|
| 153 |
+
const Iterator& get_iterator_() const {
|
| 154 |
+
return entryRef_.iterator_;
|
| 155 |
+
}
|
| 156 |
+
|
| 157 |
+
friend bool operator==(const DictIterator& lhs, const DictIterator& rhs) {
|
| 158 |
+
return lhs.get_iterator_() == rhs.get_iterator_();
|
| 159 |
+
}
|
| 160 |
+
|
| 161 |
+
friend bool operator!=(const DictIterator& lhs, const DictIterator& rhs) {
|
| 162 |
+
return lhs.get_iterator_() != rhs.get_iterator_();
|
| 163 |
+
}
|
| 164 |
+
|
| 165 |
+
friend bool operator<(const DictIterator& lhs, const DictIterator& rhs) {
|
| 166 |
+
return lhs.get_iterator_() < rhs.get_iterator_();
|
| 167 |
+
}
|
| 168 |
+
|
| 169 |
+
friend bool operator<=(const DictIterator& lhs, const DictIterator& rhs) {
|
| 170 |
+
return lhs.get_iterator_() <= rhs.get_iterator_();
|
| 171 |
+
}
|
| 172 |
+
|
| 173 |
+
friend bool operator>(const DictIterator& lhs, const DictIterator& rhs) {
|
| 174 |
+
return lhs.get_iterator_() > rhs.get_iterator_();
|
| 175 |
+
}
|
| 176 |
+
|
| 177 |
+
friend bool operator>=(const DictIterator& lhs, const DictIterator& rhs) {
|
| 178 |
+
return lhs.get_iterator_() >= rhs.get_iterator_();
|
| 179 |
+
}
|
| 180 |
+
|
| 181 |
+
DictEntryRef<Key, Value, Iterator> entryRef_;
|
| 182 |
+
|
| 183 |
+
friend class DictIterator<Key, Value, typename c10::detail::DictImpl::dict_map_type::iterator>;
|
| 184 |
+
friend class Dict<Key, Value>;
|
| 185 |
+
};
|
| 186 |
+
|
| 187 |
+
template<class Key, class Value> Dict<Key, Value> toTypedDict(Dict<IValue, IValue> dict);
|
| 188 |
+
template<class Key, class Value> Dict<IValue, IValue> toGenericDict(Dict<Key, Value> dict);
|
| 189 |
+
}
|
| 190 |
+
|
| 191 |
+
/**
|
| 192 |
+
* An object of this class stores a map from Key to Value.
|
| 193 |
+
*
|
| 194 |
+
* This is a pointer type. After a copy, both Dicts
|
| 195 |
+
* will share the same storage:
|
| 196 |
+
*
|
| 197 |
+
* > Dict<int, string> a;
|
| 198 |
+
* > Dict<int, string> b = a;
|
| 199 |
+
* > b.insert(3, "three");
|
| 200 |
+
* > ASSERT("three" == a.at(3));
|
| 201 |
+
*
|
| 202 |
+
* We use this class in the PyTorch kernel API because that
|
| 203 |
+
* allows us to do optimizations and switch out the underlying
|
| 204 |
+
* map implementation without breaking backwards compatibility
|
| 205 |
+
* for the kernel API.
|
| 206 |
+
*/
|
| 207 |
+
template<class Key, class Value>
|
| 208 |
+
class Dict final {
|
| 209 |
+
private:
|
| 210 |
+
static_assert((std::is_same_v<IValue, Key> && std::is_same_v<IValue, Value>) || guts::typelist::contains<impl::valid_dict_key_types, Key>::value, "Invalid Key type for Dict. We only support int64_t, double, bool, and string.");
|
| 211 |
+
|
| 212 |
+
// impl_ stores the underlying map as a ska_ordered::order_preserving_flat_hash_map.
|
| 213 |
+
// We intentionally don't offer conversion from/to
|
| 214 |
+
// order_preserving_flat_hash_map, return references to it or something like that,
|
| 215 |
+
// because such operations would get expensive if we switch out
|
| 216 |
+
// the actual map implementation.
|
| 217 |
+
// This is an intrusive_ptr because Dict is a pointer type.
|
| 218 |
+
// Invariant: This will never be a nullptr, there will always be a valid
|
| 219 |
+
// DictImpl.
|
| 220 |
+
c10::intrusive_ptr<detail::DictImpl> impl_;
|
| 221 |
+
|
| 222 |
+
explicit Dict(c10::intrusive_ptr<detail::DictImpl>&& impl);
|
| 223 |
+
friend struct IValue;
|
| 224 |
+
template<class K, class V> friend Dict<K, V> impl::toTypedDict(Dict<IValue, IValue>);
|
| 225 |
+
template<class K, class V> friend Dict<IValue, IValue> impl::toGenericDict(Dict<K, V>);
|
| 226 |
+
|
| 227 |
+
public:
|
| 228 |
+
using key_type = Key;
|
| 229 |
+
using mapped_type = Value;
|
| 230 |
+
using size_type = typename detail::DictImpl::dict_map_type::size_type;
|
| 231 |
+
using iterator = impl::DictIterator<Key, Value, typename detail::DictImpl::dict_map_type::iterator>;
|
| 232 |
+
|
| 233 |
+
/**
|
| 234 |
+
* Creates an empty dict.
|
| 235 |
+
*/
|
| 236 |
+
explicit Dict();
|
| 237 |
+
|
| 238 |
+
/**
|
| 239 |
+
* Create a generic dict with runtime type information.
|
| 240 |
+
* This only works for c10::impl::GenericDict and is not part of the public API
|
| 241 |
+
* but only supposed to be used internally by PyTorch.
|
| 242 |
+
*/
|
| 243 |
+
explicit Dict(TypePtr keyType, TypePtr valueType);
|
| 244 |
+
|
| 245 |
+
~Dict() = default;
|
| 246 |
+
|
| 247 |
+
Dict(const Dict&) = default;
|
| 248 |
+
Dict& operator=(const Dict&) = default;
|
| 249 |
+
|
| 250 |
+
/**
|
| 251 |
+
* Create a new Dict pointing to a deep copy of the same data.
|
| 252 |
+
* The Dict returned is a new dict with separate storage.
|
| 253 |
+
* Changes in it are not reflected in the original dict or vice versa.
|
| 254 |
+
*/
|
| 255 |
+
Dict copy() const;
|
| 256 |
+
|
| 257 |
+
/**
|
| 258 |
+
* Returns an iterator to the first element of the container.
|
| 259 |
+
* If the container is empty, the returned iterator will be equal to end().
|
| 260 |
+
*/
|
| 261 |
+
iterator begin() const;
|
| 262 |
+
|
| 263 |
+
/**
|
| 264 |
+
* Returns an iterator to the element following the last element of the container.
|
| 265 |
+
* This element acts as a placeholder; attempting to access it results in undefined behavior.
|
| 266 |
+
*/
|
| 267 |
+
iterator end() const;
|
| 268 |
+
|
| 269 |
+
/**
|
| 270 |
+
* Checks if the container has no elements.
|
| 271 |
+
*/
|
| 272 |
+
bool empty() const;
|
| 273 |
+
|
| 274 |
+
/**
|
| 275 |
+
* Returns the number of elements in the container.
|
| 276 |
+
*/
|
| 277 |
+
size_type size() const;
|
| 278 |
+
|
| 279 |
+
/**
|
| 280 |
+
* Erases all elements from the container. After this call, size() returns zero.
|
| 281 |
+
* Invalidates any references, pointers, or iterators referring to contained elements. May also invalidate past-the-end iterators.
|
| 282 |
+
*/
|
| 283 |
+
void clear() const;
|
| 284 |
+
|
| 285 |
+
/**
|
| 286 |
+
* Inserts element(s) into the container, if the container doesn't already contain an element with an equivalent key.
|
| 287 |
+
* May invalidate any references, pointers, or iterators referring to contained elements.
|
| 288 |
+
*
|
| 289 |
+
* @return A pair consisting of an iterator to the inserted element (or to the element that prevented the insertion) and a bool denoting whether the insertion took place.
|
| 290 |
+
*/
|
| 291 |
+
template<class Key_, class Value_>
|
| 292 |
+
std::pair<iterator, bool> insert(Key_&& key, Value_&& value) const;
|
| 293 |
+
|
| 294 |
+
/**
|
| 295 |
+
* If an element with the given key already exists, it is overwritten with the given value.
|
| 296 |
+
* Otherwise, a new element with the given key and value are inserted.
|
| 297 |
+
* May invalidate any references, pointers, or iterators referring to contained elements.
|
| 298 |
+
*
|
| 299 |
+
* @return The bool component is true if the insertion took place and false if the assignment took place. The iterator component is pointing at the element that was inserted or updated.
|
| 300 |
+
*/
|
| 301 |
+
template<class Key_, class Value_>
|
| 302 |
+
std::pair<iterator, bool> insert_or_assign(Key_&& key, Value_&& value) const;
|
| 303 |
+
|
| 304 |
+
/**
|
| 305 |
+
* Removes the element pointed to by iter.
|
| 306 |
+
* May invalidate any references, pointers, or iterators referring to contained elements.
|
| 307 |
+
* The iterator iter must be valid and dereferenceable. Thus the end() iterator (which is valid, but is not dereferenceable) cannot be used as a value for iter.
|
| 308 |
+
*/
|
| 309 |
+
void erase(iterator iter) const;
|
| 310 |
+
|
| 311 |
+
/**
|
| 312 |
+
* Removes the element with the given key, if it exists.
|
| 313 |
+
* May invalidate any references, pointers, or iterators referring to contained elements.
|
| 314 |
+
*
|
| 315 |
+
* @return The number of elements removed. This is either '1' if an element with the key existed, or '0' if it didn't.
|
| 316 |
+
*/
|
| 317 |
+
C10_NODISCARD size_t erase(const Key& key) const;
|
| 318 |
+
|
| 319 |
+
/**
|
| 320 |
+
* Returns the mapped value of the element with key equivalent to key.
|
| 321 |
+
* If no such element exists, an exception of type std::out_of_range is thrown.
|
| 322 |
+
*/
|
| 323 |
+
Value at(const Key& key) const;
|
| 324 |
+
|
| 325 |
+
/**
|
| 326 |
+
* Finds an element with key equivalent to key.
|
| 327 |
+
*
|
| 328 |
+
* @return Iterator to an element with key equivalent to key.
|
| 329 |
+
* If no such element is found, past-the-end (see end()) iterator is returned.
|
| 330 |
+
*/
|
| 331 |
+
iterator find(const Key& key) const;
|
| 332 |
+
|
| 333 |
+
/**
|
| 334 |
+
* Checks if there is an element with key equivalent to key in the container.
|
| 335 |
+
*
|
| 336 |
+
* @return true if there is such an element, otherwise false.
|
| 337 |
+
*/
|
| 338 |
+
bool contains(const Key& key) const;
|
| 339 |
+
|
| 340 |
+
/**
|
| 341 |
+
* Increase the capacity so that at least count elements can be stored without
|
| 342 |
+
* having to reallocate or rehash.
|
| 343 |
+
*/
|
| 344 |
+
void reserve(size_type count) const;
|
| 345 |
+
|
| 346 |
+
/**
|
| 347 |
+
* Value equality comparison. This function implements Python-like semantics for
|
| 348 |
+
* equality: two dicts with the same identity (e.g. same pointer) trivially
|
| 349 |
+
* compare equal, otherwise each element is compared for equality.
|
| 350 |
+
*/
|
| 351 |
+
template <class Key_, class Value_>
|
| 352 |
+
friend bool operator==(
|
| 353 |
+
const Dict<Key_, Value_>& lhs,
|
| 354 |
+
const Dict<Key_, Value_>& rhs);
|
| 355 |
+
template <class Key_, class Value_>
|
| 356 |
+
friend bool operator!=(
|
| 357 |
+
const Dict<Key_, Value_>& lhs,
|
| 358 |
+
const Dict<Key_, Value_>& rhs);
|
| 359 |
+
|
| 360 |
+
/**
|
| 361 |
+
* Identity comparison. Returns true if and only if `rhs` represents the same
|
| 362 |
+
* Dict object as `this`.
|
| 363 |
+
*/
|
| 364 |
+
bool is(const Dict& rhs) const;
|
| 365 |
+
|
| 366 |
+
// private API for now because the return type will change to TypePtr
|
| 367 |
+
// instead of std::optional<TypePtr> once types are mandatory.
|
| 368 |
+
TypePtr keyType() const;
|
| 369 |
+
TypePtr valueType() const;
|
| 370 |
+
|
| 371 |
+
// [unsafe set type]
|
| 372 |
+
// These functions mutate the tagged type of this dictionary in place.
|
| 373 |
+
// There is no checking that the members of the dictionary are instances
|
| 374 |
+
// of the new types, nor is there a check that other IValues which
|
| 375 |
+
// hold references to this dictionary have the right static type.
|
| 376 |
+
// This functionality is used only in the unpickler, where at
|
| 377 |
+
// creation type the real type of the dictionary is unknown, but
|
| 378 |
+
// then later recovered from the static type information of the
|
| 379 |
+
// unpickled object.
|
| 380 |
+
void unsafeSetKeyType(TypePtr t);
|
| 381 |
+
void unsafeSetValueType(TypePtr t);
|
| 382 |
+
};
|
| 383 |
+
|
| 384 |
+
namespace impl {
|
| 385 |
+
// GenericDict is how IValue stores dicts. It is, however, not part of the
|
| 386 |
+
// public API. Kernels should use Dicts with concrete Key, Value types instead
|
| 387 |
+
// (maybe except for some internal prim ops).
|
| 388 |
+
using GenericDict = Dict<IValue, IValue>;
|
| 389 |
+
|
| 390 |
+
}
|
| 391 |
+
}
|
| 392 |
+
|
| 393 |
+
namespace torch {
|
| 394 |
+
template<class Key, class Value> using Dict = c10::Dict<Key, Value>;
|
| 395 |
+
}
|
| 396 |
+
|
| 397 |
+
#include <ATen/core/Dict_inl.h> // IWYU pragma: keep
|
videochat2/lib/python3.10/site-packages/torch/include/ATen/core/Dict_inl.h
ADDED
|
@@ -0,0 +1,209 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <ATen/core/ivalue.h>
|
| 4 |
+
#include <c10/util/hash.h>
|
| 5 |
+
|
| 6 |
+
namespace c10 {
|
| 7 |
+
namespace detail {
|
| 8 |
+
inline bool DictKeyEqualTo::operator()(const IValue& lhs, const IValue& rhs) const {
|
| 9 |
+
if (lhs.isTensor() && rhs.isTensor()) {
|
| 10 |
+
// for tensors, we compare only by identity (following how it's done in Python).
|
| 11 |
+
return lhs.is(rhs);
|
| 12 |
+
}
|
| 13 |
+
// Otherwise, we first compare by identity for efficiency, then by value (see:
|
| 14 |
+
// [container equality])
|
| 15 |
+
return _fastEqualsForContainer(lhs, rhs);
|
| 16 |
+
}
|
| 17 |
+
}
|
| 18 |
+
|
| 19 |
+
template<class T> decltype(auto) getTypePtr();
|
| 20 |
+
std::string toString(const Type& type);
|
| 21 |
+
|
| 22 |
+
namespace impl {
|
| 23 |
+
|
| 24 |
+
template<class Key, class Value>
|
| 25 |
+
Dict<Key, Value> toTypedDict(GenericDict dict) {
|
| 26 |
+
TORCH_INTERNAL_ASSERT(*getTypePtr<Key>() == *dict.impl_->elementTypes.keyType, "Tried to cast a Dict<", toString(*dict.impl_->elementTypes.keyType), ", ", toString(*dict.impl_->elementTypes.valueType) ,"> to a Dict<", toString(*getTypePtr<Key>()), ", ", toString(*getTypePtr<Value>()), ">. Key types mismatch.");
|
| 27 |
+
TORCH_INTERNAL_ASSERT(*getTypePtr<Value>() == *dict.impl_->elementTypes.valueType, "Tried to cast a Dict<", toString(*dict.impl_->elementTypes.keyType), ", ", toString(*dict.impl_->elementTypes.valueType) ,"> to a Dict<", toString(*getTypePtr<Key>()), ", ", toString(*getTypePtr<Value>()), ">. Value types mismatch.");
|
| 28 |
+
|
| 29 |
+
return Dict<Key, Value>(std::move(dict.impl_));
|
| 30 |
+
}
|
| 31 |
+
|
| 32 |
+
template<class Key, class Value>
|
| 33 |
+
GenericDict toGenericDict(Dict<Key, Value> dict) {
|
| 34 |
+
return GenericDict(std::move(dict.impl_));
|
| 35 |
+
}
|
| 36 |
+
}
|
| 37 |
+
|
| 38 |
+
namespace detail {
|
| 39 |
+
|
| 40 |
+
inline size_t DictKeyHash::operator()(const IValue& ivalue) const {
|
| 41 |
+
if (ivalue.isInt()) {
|
| 42 |
+
return std::hash<int64_t>()(ivalue.toInt());
|
| 43 |
+
} else if (ivalue.isString()) {
|
| 44 |
+
return std::hash<c10::string_view>()(ivalue.toStringView());
|
| 45 |
+
} else if (ivalue.isDouble()) {
|
| 46 |
+
return std::hash<double>()(ivalue.toDouble());
|
| 47 |
+
} else if (ivalue.isComplexDouble()) {
|
| 48 |
+
return c10::hash<c10::complex<double>>()(ivalue.toComplexDouble());
|
| 49 |
+
} else if (ivalue.isBool()) {
|
| 50 |
+
return std::hash<bool>()(ivalue.toBool());
|
| 51 |
+
} else if (ivalue.isTensor()) {
|
| 52 |
+
return std::hash<TensorImpl*>()(ivalue.toTensor().unsafeGetTensorImpl());
|
| 53 |
+
} else if (ivalue.isDevice()) {
|
| 54 |
+
return std::hash<Device>()(ivalue.toDevice());
|
| 55 |
+
} else {
|
| 56 |
+
throw std::runtime_error(
|
| 57 |
+
"Can't hash IValues with tag '" + ivalue.tagKind() + "'");
|
| 58 |
+
}
|
| 59 |
+
}
|
| 60 |
+
|
| 61 |
+
inline intrusive_ptr<DictImpl> DictImpl::copy() const {
|
| 62 |
+
return make_intrusive<DictImpl>(dict, elementTypes);
|
| 63 |
+
}
|
| 64 |
+
|
| 65 |
+
}
|
| 66 |
+
|
| 67 |
+
template<class Key, class Value>
|
| 68 |
+
Dict<Key, Value>::Dict()
|
| 69 |
+
:Dict(make_intrusive<detail::DictImpl>(
|
| 70 |
+
detail::DictImpl::dict_map_type(),
|
| 71 |
+
detail::DictImpl::DictElementTypes{getTypePtr<Key>(), getTypePtr<Value>()})) {
|
| 72 |
+
static_assert(!std::is_same<Key, IValue>::value, "This constructor is not valid for Dict<IValue, _>. Please use c10::impl::GenericDict(keyType, valueType) instead.");
|
| 73 |
+
static_assert(!std::is_same<Value, IValue>::value, "This constructor is not valid for Dict<_, IValue>. Please use c10::impl::GenericDict(keyType, valueType) instead.");
|
| 74 |
+
}
|
| 75 |
+
|
| 76 |
+
template<class Key, class Value>
|
| 77 |
+
Dict<Key, Value>::Dict(TypePtr keyType, TypePtr valueType)
|
| 78 |
+
: Dict(make_intrusive<detail::DictImpl>(
|
| 79 |
+
detail::DictImpl::dict_map_type(),
|
| 80 |
+
detail::DictImpl::DictElementTypes {std::move(keyType), std::move(valueType)})) {
|
| 81 |
+
static_assert(std::is_same<Key, IValue>::value, "This constructor is only valid for c10::impl::GenericDict.");
|
| 82 |
+
static_assert(std::is_same<Value, IValue>::value, "This constructor is only valid for c10::impl::GenericDict.");
|
| 83 |
+
}
|
| 84 |
+
|
| 85 |
+
template<class Key, class Value>
|
| 86 |
+
Dict<Key, Value>::Dict(c10::intrusive_ptr<detail::DictImpl>&& impl): impl_(std::move(impl)) {}
|
| 87 |
+
|
| 88 |
+
template<class Key, class Value>
|
| 89 |
+
Dict<Key, Value> Dict<Key, Value>::copy() const {
|
| 90 |
+
return Dict<Key, Value>(impl_->copy());
|
| 91 |
+
}
|
| 92 |
+
|
| 93 |
+
template<class Key, class Value>
|
| 94 |
+
typename Dict<Key, Value>::iterator Dict<Key, Value>::begin() const {
|
| 95 |
+
return iterator{impl_->dict.begin()};
|
| 96 |
+
}
|
| 97 |
+
|
| 98 |
+
template<class Key, class Value>
|
| 99 |
+
typename Dict<Key, Value>::iterator Dict<Key, Value>::end() const {
|
| 100 |
+
return iterator{impl_->dict.end()};
|
| 101 |
+
}
|
| 102 |
+
|
| 103 |
+
template<class Key, class Value>
|
| 104 |
+
bool Dict<Key, Value>::empty() const {
|
| 105 |
+
return impl_->dict.empty();
|
| 106 |
+
}
|
| 107 |
+
|
| 108 |
+
template<class Key, class Value>
|
| 109 |
+
typename Dict<Key, Value>::size_type Dict<Key, Value>::size() const {
|
| 110 |
+
return impl_->dict.size();
|
| 111 |
+
}
|
| 112 |
+
|
| 113 |
+
template<class Key, class Value>
|
| 114 |
+
void Dict<Key, Value>::clear() const {
|
| 115 |
+
impl_->dict.clear();
|
| 116 |
+
}
|
| 117 |
+
|
| 118 |
+
template<class Key, class Value>
|
| 119 |
+
template<class Key_, class Value_>
|
| 120 |
+
std::pair<typename Dict<Key, Value>::iterator, bool> Dict<Key, Value>::insert(Key_&& key, Value_&& value) const {
|
| 121 |
+
static_assert(std::is_constructible<Key, Key_>::value, "Wrong type for the key argument of Dict::insert");
|
| 122 |
+
static_assert(std::is_constructible<Value, Value_>::value, "Wrong type for the value argument of Dict::insert");
|
| 123 |
+
auto inserted = impl_->dict.emplace(
|
| 124 |
+
Key(std::forward<Key_>(key)),
|
| 125 |
+
Value(std::forward<Value_>(value)));
|
| 126 |
+
return {iterator{inserted.first}, inserted.second};
|
| 127 |
+
}
|
| 128 |
+
|
| 129 |
+
template<class Key, class Value>
|
| 130 |
+
template<class Key_, class Value_>
|
| 131 |
+
std::pair<typename Dict<Key, Value>::iterator, bool> Dict<Key, Value>::insert_or_assign(Key_&& key, Value_&& value) const {
|
| 132 |
+
static_assert(std::is_constructible<Key, Key_>::value, "Wrong type for the key argument of Dict::insert_or_assign");
|
| 133 |
+
static_assert(std::is_constructible<Value, Value_>::value, "Wrong type for the value argument of Dict::insert_or_assign");
|
| 134 |
+
auto inserted = impl_->dict.insert_or_assign(
|
| 135 |
+
Key(std::forward<Key_>(key)),
|
| 136 |
+
Value(std::forward<Value_>(value)));
|
| 137 |
+
return {iterator{inserted.first}, inserted.second};
|
| 138 |
+
}
|
| 139 |
+
|
| 140 |
+
template<class Key, class Value>
|
| 141 |
+
void Dict<Key, Value>::erase(iterator iter) const {
|
| 142 |
+
impl_->dict.erase(iter.entryRef_.iterator_);
|
| 143 |
+
}
|
| 144 |
+
|
| 145 |
+
template<class Key, class Value>
|
| 146 |
+
C10_NODISCARD size_t Dict<Key, Value>::erase(const Key& key) const {
|
| 147 |
+
return impl_->dict.erase(key);
|
| 148 |
+
}
|
| 149 |
+
|
| 150 |
+
template<class Key, class Value>
|
| 151 |
+
Value Dict<Key, Value>::at(const Key& key) const {
|
| 152 |
+
return impl_->dict.at(key).template to<Value>();
|
| 153 |
+
}
|
| 154 |
+
|
| 155 |
+
template<class Key, class Value>
|
| 156 |
+
typename Dict<Key, Value>::iterator Dict<Key, Value>::find(const Key& key) const {
|
| 157 |
+
return iterator{impl_->dict.find(key)};
|
| 158 |
+
}
|
| 159 |
+
|
| 160 |
+
template<class Key, class Value>
|
| 161 |
+
bool Dict<Key, Value>::contains(const Key& key) const {
|
| 162 |
+
return end() != find(key);
|
| 163 |
+
}
|
| 164 |
+
|
| 165 |
+
template<class Key, class Value>
|
| 166 |
+
void Dict<Key, Value>::reserve(size_type count) const {
|
| 167 |
+
impl_->dict.reserve(count);
|
| 168 |
+
}
|
| 169 |
+
|
| 170 |
+
template<class Key, class Value>
|
| 171 |
+
TypePtr Dict<Key, Value>::keyType() const {
|
| 172 |
+
return impl_->elementTypes.keyType;
|
| 173 |
+
}
|
| 174 |
+
|
| 175 |
+
template<class Key, class Value>
|
| 176 |
+
TypePtr Dict<Key, Value>::valueType() const {
|
| 177 |
+
return impl_->elementTypes.valueType;
|
| 178 |
+
}
|
| 179 |
+
template <class Key, class Value>
|
| 180 |
+
void Dict<Key, Value>::unsafeSetKeyType(TypePtr t) {
|
| 181 |
+
impl_->elementTypes.keyType = std::move(t);
|
| 182 |
+
}
|
| 183 |
+
|
| 184 |
+
template <class Key, class Value>
|
| 185 |
+
void Dict<Key, Value>::unsafeSetValueType(TypePtr t) {
|
| 186 |
+
impl_->elementTypes.valueType = std::move(t);
|
| 187 |
+
}
|
| 188 |
+
|
| 189 |
+
template <class Key_, class Value_>
|
| 190 |
+
bool operator==(const Dict<Key_, Value_>& lhs, const Dict<Key_, Value_>& rhs) {
|
| 191 |
+
// Dicts with the same identity trivially compare equal.
|
| 192 |
+
if (lhs.impl_ == rhs.impl_) {
|
| 193 |
+
return true;
|
| 194 |
+
}
|
| 195 |
+
|
| 196 |
+
// Otherwise compare the values
|
| 197 |
+
return *lhs.impl_ == *rhs.impl_;
|
| 198 |
+
}
|
| 199 |
+
|
| 200 |
+
template <class Key_, class Value_>
|
| 201 |
+
bool operator!=(const Dict<Key_, Value_>& lhs, const Dict<Key_, Value_>& rhs) {
|
| 202 |
+
return !(lhs == rhs);
|
| 203 |
+
}
|
| 204 |
+
|
| 205 |
+
template <class Key, class Value>
|
| 206 |
+
bool Dict<Key, Value>::is(const Dict& rhs) const {
|
| 207 |
+
return this->impl_ == rhs.impl_;
|
| 208 |
+
}
|
| 209 |
+
}
|
videochat2/lib/python3.10/site-packages/torch/include/ATen/core/DimVector.h
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
#include <c10/util/DimVector.h>
|
| 3 |
+
|
| 4 |
+
namespace at {
|
| 5 |
+
|
| 6 |
+
// Re-declaring 'DimVector' type and size inside 'at' namespace.
|
| 7 |
+
// This is done to avoid modifying every use into their 'c10'
|
| 8 |
+
// equivalent.
|
| 9 |
+
|
| 10 |
+
using c10::kDimVectorStaticSize;
|
| 11 |
+
using c10::DimVector;
|
| 12 |
+
|
| 13 |
+
} // namespace at
|
videochat2/lib/python3.10/site-packages/torch/include/ATen/core/Dimname.h
ADDED
|
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <ATen/core/symbol.h>
|
| 4 |
+
#include <c10/util/ArrayRef.h>
|
| 5 |
+
#include <optional>
|
| 6 |
+
#include <ostream>
|
| 7 |
+
|
| 8 |
+
namespace at {
|
| 9 |
+
|
| 10 |
+
enum class NameType: uint8_t { BASIC, WILDCARD };
|
| 11 |
+
|
| 12 |
+
struct TORCH_API Dimname {
|
| 13 |
+
static Dimname fromSymbol(Symbol name);
|
| 14 |
+
static Dimname wildcard();
|
| 15 |
+
static bool isValidName(const std::string& name);
|
| 16 |
+
|
| 17 |
+
NameType type() const { return type_; }
|
| 18 |
+
Symbol symbol() const { return name_; }
|
| 19 |
+
|
| 20 |
+
bool isBasic() const { return type_ == NameType::BASIC; }
|
| 21 |
+
bool isWildcard() const { return type_ == NameType::WILDCARD; }
|
| 22 |
+
|
| 23 |
+
bool matches(Dimname other) const;
|
| 24 |
+
std::optional<Dimname> unify(Dimname other) const;
|
| 25 |
+
|
| 26 |
+
private:
|
| 27 |
+
Dimname(Symbol name)
|
| 28 |
+
: name_(name), type_(NameType::BASIC) {}
|
| 29 |
+
Dimname(Symbol name, NameType type)
|
| 30 |
+
: name_(name), type_(type) {}
|
| 31 |
+
|
| 32 |
+
Symbol name_;
|
| 33 |
+
NameType type_;
|
| 34 |
+
};
|
| 35 |
+
|
| 36 |
+
using DimnameList = c10::ArrayRef<Dimname>;
|
| 37 |
+
|
| 38 |
+
TORCH_API std::ostream& operator<<(std::ostream& out, const Dimname& dimname);
|
| 39 |
+
|
| 40 |
+
inline bool operator==(const Dimname& lhs, const Dimname& rhs) {
|
| 41 |
+
return lhs.symbol() == rhs.symbol();
|
| 42 |
+
}
|
| 43 |
+
|
| 44 |
+
inline bool operator!=(const Dimname& lhs, const Dimname& rhs) {
|
| 45 |
+
return !(lhs == rhs);
|
| 46 |
+
}
|
| 47 |
+
|
| 48 |
+
} // namespace at
|
videochat2/lib/python3.10/site-packages/torch/include/ATen/core/DistributionsHelper.h
ADDED
|
@@ -0,0 +1,337 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <ATen/core/Array.h>
|
| 4 |
+
#include <ATen/core/TransformationHelper.h>
|
| 5 |
+
#include <c10/util/Half.h>
|
| 6 |
+
#include <c10/util/BFloat16.h>
|
| 7 |
+
#include <c10/util/MathConstants.h>
|
| 8 |
+
#include <c10/macros/Macros.h>
|
| 9 |
+
|
| 10 |
+
#include <cmath>
|
| 11 |
+
#include <limits>
|
| 12 |
+
#include <optional>
|
| 13 |
+
#include <type_traits>
|
| 14 |
+
|
| 15 |
+
/**
|
| 16 |
+
* Distributions kernel adapted from THRandom.cpp
|
| 17 |
+
* The kernels try to follow std::random distributions signature
|
| 18 |
+
* For instance: in ATen
|
| 19 |
+
* auto gen = at::detail::createCPUGenerator();
|
| 20 |
+
* at::uniform_real_distribution<double> uniform(0, 1);
|
| 21 |
+
* auto sample = uniform(gen.get());
|
| 22 |
+
*
|
| 23 |
+
* vs std::random
|
| 24 |
+
*
|
| 25 |
+
* std::mt19937 gen;
|
| 26 |
+
* std::uniform_real_distribution uniform(0, 1);
|
| 27 |
+
* auto sample = uniform(gen);
|
| 28 |
+
*/
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
namespace at {
|
| 32 |
+
namespace {
|
| 33 |
+
|
| 34 |
+
/**
|
| 35 |
+
* Samples a discrete uniform distribution in the range [base, base+range) of type T
|
| 36 |
+
*/
|
| 37 |
+
template <typename T>
|
| 38 |
+
struct uniform_int_from_to_distribution {
|
| 39 |
+
|
| 40 |
+
C10_HOST_DEVICE inline uniform_int_from_to_distribution(uint64_t range, int64_t base) : range_(range), base_(base) {}
|
| 41 |
+
|
| 42 |
+
template <typename RNG>
|
| 43 |
+
C10_HOST_DEVICE inline T operator()(RNG generator) {
|
| 44 |
+
if ((
|
| 45 |
+
std::is_same<T, int64_t>::value ||
|
| 46 |
+
std::is_same<T, double>::value ||
|
| 47 |
+
std::is_same<T, float>::value ||
|
| 48 |
+
std::is_same<T, at::BFloat16>::value) && range_ >= 1ULL << 32)
|
| 49 |
+
{
|
| 50 |
+
return transformation::uniform_int_from_to<T>(generator->random64(), range_, base_);
|
| 51 |
+
} else {
|
| 52 |
+
return transformation::uniform_int_from_to<T>(generator->random(), range_, base_);
|
| 53 |
+
}
|
| 54 |
+
}
|
| 55 |
+
|
| 56 |
+
private:
|
| 57 |
+
uint64_t range_;
|
| 58 |
+
int64_t base_;
|
| 59 |
+
};
|
| 60 |
+
|
| 61 |
+
/**
|
| 62 |
+
* Samples a discrete uniform distribution in the range [min_value(int64_t), max_value(int64_t)]
|
| 63 |
+
*/
|
| 64 |
+
template <typename T>
|
| 65 |
+
struct uniform_int_full_range_distribution {
|
| 66 |
+
|
| 67 |
+
template <typename RNG>
|
| 68 |
+
C10_HOST_DEVICE inline T operator()(RNG generator) {
|
| 69 |
+
return transformation::uniform_int_full_range<T>(generator->random64());
|
| 70 |
+
}
|
| 71 |
+
|
| 72 |
+
};
|
| 73 |
+
|
| 74 |
+
/**
|
| 75 |
+
* Samples a discrete uniform distribution in the range [0, max_value(T)] for integral types
|
| 76 |
+
* and [0, 2^mantissa] for floating-point types.
|
| 77 |
+
*/
|
| 78 |
+
template <typename T>
|
| 79 |
+
struct uniform_int_distribution {
|
| 80 |
+
|
| 81 |
+
template <typename RNG>
|
| 82 |
+
C10_HOST_DEVICE inline T operator()(RNG generator) {
|
| 83 |
+
if constexpr (std::is_same_v<T, double> || std::is_same_v<T, int64_t>) {
|
| 84 |
+
return transformation::uniform_int<T>(generator->random64());
|
| 85 |
+
} else {
|
| 86 |
+
return transformation::uniform_int<T>(generator->random());
|
| 87 |
+
}
|
| 88 |
+
}
|
| 89 |
+
|
| 90 |
+
};
|
| 91 |
+
|
| 92 |
+
/**
|
| 93 |
+
* Samples a uniform distribution in the range [from, to) of type T
|
| 94 |
+
*/
|
| 95 |
+
template <typename T>
|
| 96 |
+
struct uniform_real_distribution {
|
| 97 |
+
|
| 98 |
+
C10_HOST_DEVICE inline uniform_real_distribution(T from, T to) {
|
| 99 |
+
TORCH_CHECK_IF_NOT_ON_CUDA(from <= to);
|
| 100 |
+
TORCH_CHECK_IF_NOT_ON_CUDA(to - from <= std::numeric_limits<T>::max());
|
| 101 |
+
from_ = from;
|
| 102 |
+
to_ = to;
|
| 103 |
+
}
|
| 104 |
+
|
| 105 |
+
template <typename RNG>
|
| 106 |
+
C10_HOST_DEVICE inline dist_acctype<T> operator()(RNG generator){
|
| 107 |
+
if constexpr (std::is_same_v<T, double>) {
|
| 108 |
+
return transformation::uniform_real<T>(generator->random64(), from_, to_);
|
| 109 |
+
} else {
|
| 110 |
+
return transformation::uniform_real<T>(generator->random(), from_, to_);
|
| 111 |
+
}
|
| 112 |
+
}
|
| 113 |
+
|
| 114 |
+
private:
|
| 115 |
+
T from_;
|
| 116 |
+
T to_;
|
| 117 |
+
};
|
| 118 |
+
|
| 119 |
+
// The SFINAE checks introduced in #39816 looks overcomplicated and must revisited
|
| 120 |
+
// https://github.com/pytorch/pytorch/issues/40052
|
| 121 |
+
#define DISTRIBUTION_HELPER_GENERATE_HAS_MEMBER(member) \
|
| 122 |
+
template <typename T> \
|
| 123 |
+
struct has_member_##member \
|
| 124 |
+
{ \
|
| 125 |
+
typedef char yes; \
|
| 126 |
+
typedef long no; \
|
| 127 |
+
template <typename U> static yes test(decltype(&U::member)); \
|
| 128 |
+
template <typename U> static no test(...); \
|
| 129 |
+
static constexpr bool value = sizeof(test<T>(0)) == sizeof(yes); \
|
| 130 |
+
}
|
| 131 |
+
|
| 132 |
+
DISTRIBUTION_HELPER_GENERATE_HAS_MEMBER(next_double_normal_sample);
|
| 133 |
+
DISTRIBUTION_HELPER_GENERATE_HAS_MEMBER(set_next_double_normal_sample);
|
| 134 |
+
DISTRIBUTION_HELPER_GENERATE_HAS_MEMBER(next_float_normal_sample);
|
| 135 |
+
DISTRIBUTION_HELPER_GENERATE_HAS_MEMBER(set_next_float_normal_sample);
|
| 136 |
+
|
| 137 |
+
#define DISTRIBUTION_HELPER_GENERATE_NEXT_NORMAL_METHODS(TYPE) \
|
| 138 |
+
\
|
| 139 |
+
template <typename RNG, typename ret_type, \
|
| 140 |
+
typename std::enable_if_t<( \
|
| 141 |
+
has_member_next_##TYPE##_normal_sample<RNG>::value && \
|
| 142 |
+
has_member_set_next_##TYPE##_normal_sample<RNG>::value \
|
| 143 |
+
), int> = 0> \
|
| 144 |
+
C10_HOST_DEVICE inline bool maybe_get_next_##TYPE##_normal_sample(RNG* generator, ret_type* ret) { \
|
| 145 |
+
if (generator->next_##TYPE##_normal_sample()) { \
|
| 146 |
+
*ret = *(generator->next_##TYPE##_normal_sample()); \
|
| 147 |
+
generator->set_next_##TYPE##_normal_sample(std::optional<TYPE>()); \
|
| 148 |
+
return true; \
|
| 149 |
+
} \
|
| 150 |
+
return false; \
|
| 151 |
+
} \
|
| 152 |
+
\
|
| 153 |
+
template <typename RNG, typename ret_type, \
|
| 154 |
+
typename std::enable_if_t<( \
|
| 155 |
+
!has_member_next_##TYPE##_normal_sample<RNG>::value || \
|
| 156 |
+
!has_member_set_next_##TYPE##_normal_sample<RNG>::value \
|
| 157 |
+
), int> = 0> \
|
| 158 |
+
C10_HOST_DEVICE inline bool maybe_get_next_##TYPE##_normal_sample(RNG* /*generator*/, ret_type* /*ret*/) { \
|
| 159 |
+
return false; \
|
| 160 |
+
} \
|
| 161 |
+
\
|
| 162 |
+
template <typename RNG, typename ret_type, \
|
| 163 |
+
typename std::enable_if_t<( \
|
| 164 |
+
has_member_set_next_##TYPE##_normal_sample<RNG>::value \
|
| 165 |
+
), int> = 0> \
|
| 166 |
+
C10_HOST_DEVICE inline void maybe_set_next_##TYPE##_normal_sample(RNG* generator, ret_type cache) { \
|
| 167 |
+
generator->set_next_##TYPE##_normal_sample(cache); \
|
| 168 |
+
} \
|
| 169 |
+
\
|
| 170 |
+
template <typename RNG, typename ret_type, \
|
| 171 |
+
typename std::enable_if_t<( \
|
| 172 |
+
!has_member_set_next_##TYPE##_normal_sample<RNG>::value \
|
| 173 |
+
), int> = 0> \
|
| 174 |
+
C10_HOST_DEVICE inline void maybe_set_next_##TYPE##_normal_sample(RNG* /*generator*/, ret_type /*cache*/) { \
|
| 175 |
+
}
|
| 176 |
+
|
| 177 |
+
DISTRIBUTION_HELPER_GENERATE_NEXT_NORMAL_METHODS(double);
|
| 178 |
+
DISTRIBUTION_HELPER_GENERATE_NEXT_NORMAL_METHODS(float);
|
| 179 |
+
|
| 180 |
+
/**
|
| 181 |
+
* Samples a normal distribution using the Box-Muller method
|
| 182 |
+
* Takes mean and standard deviation as inputs
|
| 183 |
+
* Note that Box-muller method returns two samples at a time.
|
| 184 |
+
* Hence, we cache the "next" sample in the CPUGeneratorImpl class.
|
| 185 |
+
*/
|
| 186 |
+
template <typename T>
|
| 187 |
+
struct normal_distribution {
|
| 188 |
+
|
| 189 |
+
C10_HOST_DEVICE inline normal_distribution(T mean_in, T stdv_in) {
|
| 190 |
+
TORCH_CHECK_IF_NOT_ON_CUDA(stdv_in >= 0, "stdv_in must be positive: ", stdv_in);
|
| 191 |
+
mean = mean_in;
|
| 192 |
+
stdv = stdv_in;
|
| 193 |
+
}
|
| 194 |
+
|
| 195 |
+
template <typename RNG>
|
| 196 |
+
C10_HOST_DEVICE inline dist_acctype<T> operator()(RNG generator){
|
| 197 |
+
dist_acctype<T> ret;
|
| 198 |
+
// return cached values if available
|
| 199 |
+
if constexpr (std::is_same_v<T, double>) {
|
| 200 |
+
if (maybe_get_next_double_normal_sample(generator, &ret)) {
|
| 201 |
+
return transformation::normal(ret, mean, stdv);
|
| 202 |
+
}
|
| 203 |
+
} else {
|
| 204 |
+
if (maybe_get_next_float_normal_sample(generator, &ret)) {
|
| 205 |
+
return transformation::normal(ret, mean, stdv);
|
| 206 |
+
}
|
| 207 |
+
}
|
| 208 |
+
// otherwise generate new normal values
|
| 209 |
+
uniform_real_distribution<T> uniform(0.0, 1.0);
|
| 210 |
+
const dist_acctype<T> u1 = uniform(generator);
|
| 211 |
+
const dist_acctype<T> u2 = uniform(generator);
|
| 212 |
+
const dist_acctype<T> r = ::sqrt(static_cast<T>(-2.0) * ::log1p(-u2));
|
| 213 |
+
const dist_acctype<T> theta = static_cast<T>(2.0) * c10::pi<T> * u1;
|
| 214 |
+
if constexpr (std::is_same_v<T, double>) {
|
| 215 |
+
maybe_set_next_double_normal_sample(generator, r * ::sin(theta));
|
| 216 |
+
} else {
|
| 217 |
+
maybe_set_next_float_normal_sample(generator, r * ::sin(theta));
|
| 218 |
+
}
|
| 219 |
+
ret = r * ::cos(theta);
|
| 220 |
+
return transformation::normal(ret, mean, stdv);
|
| 221 |
+
}
|
| 222 |
+
|
| 223 |
+
private:
|
| 224 |
+
T mean;
|
| 225 |
+
T stdv;
|
| 226 |
+
};
|
| 227 |
+
|
| 228 |
+
template <typename T>
|
| 229 |
+
struct DiscreteDistributionType { using type = float; };
|
| 230 |
+
|
| 231 |
+
template <> struct DiscreteDistributionType<double> { using type = double; };
|
| 232 |
+
|
| 233 |
+
/**
|
| 234 |
+
* Samples a bernoulli distribution given a probability input
|
| 235 |
+
*/
|
| 236 |
+
template <typename T>
|
| 237 |
+
struct bernoulli_distribution {
|
| 238 |
+
|
| 239 |
+
C10_HOST_DEVICE inline bernoulli_distribution(T p_in) {
|
| 240 |
+
TORCH_CHECK_IF_NOT_ON_CUDA(p_in >= 0 && p_in <= 1);
|
| 241 |
+
p = p_in;
|
| 242 |
+
}
|
| 243 |
+
|
| 244 |
+
template <typename RNG>
|
| 245 |
+
C10_HOST_DEVICE inline T operator()(RNG generator) {
|
| 246 |
+
uniform_real_distribution<T> uniform(0.0, 1.0);
|
| 247 |
+
return transformation::bernoulli<T>(uniform(generator), p);
|
| 248 |
+
}
|
| 249 |
+
|
| 250 |
+
private:
|
| 251 |
+
T p;
|
| 252 |
+
};
|
| 253 |
+
|
| 254 |
+
/**
|
| 255 |
+
* Samples a geometric distribution given a probability input
|
| 256 |
+
*/
|
| 257 |
+
template <typename T>
|
| 258 |
+
struct geometric_distribution {
|
| 259 |
+
|
| 260 |
+
C10_HOST_DEVICE inline geometric_distribution(T p_in) {
|
| 261 |
+
TORCH_CHECK_IF_NOT_ON_CUDA(p_in > 0 && p_in < 1);
|
| 262 |
+
p = p_in;
|
| 263 |
+
}
|
| 264 |
+
|
| 265 |
+
template <typename RNG>
|
| 266 |
+
C10_HOST_DEVICE inline T operator()(RNG generator) {
|
| 267 |
+
uniform_real_distribution<T> uniform(0.0, 1.0);
|
| 268 |
+
return transformation::geometric<T>(uniform(generator), p);
|
| 269 |
+
}
|
| 270 |
+
|
| 271 |
+
private:
|
| 272 |
+
T p;
|
| 273 |
+
};
|
| 274 |
+
|
| 275 |
+
/**
|
| 276 |
+
* Samples an exponential distribution given a lambda input
|
| 277 |
+
*/
|
| 278 |
+
template <typename T>
|
| 279 |
+
struct exponential_distribution {
|
| 280 |
+
|
| 281 |
+
C10_HOST_DEVICE inline exponential_distribution(T lambda_in) : lambda(lambda_in) {}
|
| 282 |
+
|
| 283 |
+
template <typename RNG>
|
| 284 |
+
C10_HOST_DEVICE inline T operator()(RNG generator) {
|
| 285 |
+
uniform_real_distribution<T> uniform(0.0, 1.0);
|
| 286 |
+
return transformation::exponential<T>(uniform(generator), lambda);
|
| 287 |
+
}
|
| 288 |
+
|
| 289 |
+
private:
|
| 290 |
+
T lambda;
|
| 291 |
+
};
|
| 292 |
+
|
| 293 |
+
/**
|
| 294 |
+
* Samples a cauchy distribution given median and sigma as inputs
|
| 295 |
+
*/
|
| 296 |
+
template <typename T>
|
| 297 |
+
struct cauchy_distribution {
|
| 298 |
+
|
| 299 |
+
C10_HOST_DEVICE inline cauchy_distribution(T median_in, T sigma_in) : median(median_in), sigma(sigma_in) {}
|
| 300 |
+
|
| 301 |
+
template <typename RNG>
|
| 302 |
+
C10_HOST_DEVICE inline T operator()(RNG generator) {
|
| 303 |
+
uniform_real_distribution<T> uniform(0.0, 1.0);
|
| 304 |
+
return transformation::cauchy<T>(uniform(generator), median, sigma);
|
| 305 |
+
}
|
| 306 |
+
|
| 307 |
+
private:
|
| 308 |
+
T median;
|
| 309 |
+
T sigma;
|
| 310 |
+
};
|
| 311 |
+
|
| 312 |
+
/**
|
| 313 |
+
* Samples a lognormal distribution
|
| 314 |
+
* Takes mean and standard deviation as inputs
|
| 315 |
+
* Outputs two samples at a time
|
| 316 |
+
*/
|
| 317 |
+
template <typename T>
|
| 318 |
+
struct lognormal_distribution {
|
| 319 |
+
|
| 320 |
+
C10_HOST_DEVICE inline lognormal_distribution(T mean_in, T stdv_in) {
|
| 321 |
+
TORCH_CHECK_IF_NOT_ON_CUDA(stdv_in > 0);
|
| 322 |
+
mean = mean_in;
|
| 323 |
+
stdv = stdv_in;
|
| 324 |
+
}
|
| 325 |
+
|
| 326 |
+
template<typename RNG>
|
| 327 |
+
C10_HOST_DEVICE inline T operator()(RNG generator){
|
| 328 |
+
normal_distribution<T> normal(mean, stdv);
|
| 329 |
+
return transformation::log_normal<T>(normal(generator));
|
| 330 |
+
}
|
| 331 |
+
|
| 332 |
+
private:
|
| 333 |
+
T mean;
|
| 334 |
+
T stdv;
|
| 335 |
+
};
|
| 336 |
+
}
|
| 337 |
+
} // namespace at
|
videochat2/lib/python3.10/site-packages/torch/include/ATen/core/Formatting.h
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <ostream>
|
| 4 |
+
#include <string>
|
| 5 |
+
|
| 6 |
+
#include <c10/core/Scalar.h>
|
| 7 |
+
#include <ATen/core/Tensor.h>
|
| 8 |
+
|
| 9 |
+
namespace c10 {
|
| 10 |
+
TORCH_API std::ostream& operator<<(std::ostream& out, Backend b);
|
| 11 |
+
TORCH_API std::ostream& operator<<(std::ostream & out, const Scalar& s);
|
| 12 |
+
TORCH_API std::string toString(const Scalar& s);
|
| 13 |
+
}
|
| 14 |
+
namespace at {
|
| 15 |
+
|
| 16 |
+
TORCH_API std::ostream& operator<<(std::ostream& out, const DeprecatedTypeProperties& t);
|
| 17 |
+
TORCH_API std::ostream& print(
|
| 18 |
+
std::ostream& stream,
|
| 19 |
+
const Tensor& tensor,
|
| 20 |
+
int64_t linesize);
|
| 21 |
+
inline std::ostream& operator<<(std::ostream & out, const Tensor & t) {
|
| 22 |
+
return print(out,t,80);
|
| 23 |
+
}
|
| 24 |
+
TORCH_API void print(const Tensor & t, int64_t linesize=80);
|
| 25 |
+
}
|
videochat2/lib/python3.10/site-packages/torch/include/ATen/core/Generator.h
ADDED
|
@@ -0,0 +1,191 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <cstdint>
|
| 4 |
+
#include <deque>
|
| 5 |
+
#include <mutex>
|
| 6 |
+
#include <utility>
|
| 7 |
+
|
| 8 |
+
#include <c10/util/Exception.h>
|
| 9 |
+
#include <c10/util/intrusive_ptr.h>
|
| 10 |
+
#include <c10/core/Device.h>
|
| 11 |
+
#include <c10/core/DispatchKeySet.h>
|
| 12 |
+
|
| 13 |
+
// For the record I don't think this is a correct pimpl idiom.
|
| 14 |
+
// Including Impl header in interface header defeats the purpose
|
| 15 |
+
// because you can't change Impl private members without forcing
|
| 16 |
+
// everything that included the interface to rebuild.
|
| 17 |
+
// Impl should be forward-declared in the interface header instead.
|
| 18 |
+
#include <c10/core/GeneratorImpl.h>
|
| 19 |
+
|
| 20 |
+
/**
|
| 21 |
+
* Note [Generator]
|
| 22 |
+
* ~~~~~~~~~~~~~~~~
|
| 23 |
+
* A Pseudo Random Number Generator (PRNG) is an engine that uses an algorithm to
|
| 24 |
+
* generate a seemingly random sequence of numbers, that may be later be used in creating
|
| 25 |
+
* a random distribution. Such an engine almost always maintains a state and requires a
|
| 26 |
+
* seed to start off the creation of random numbers. Often times, users have
|
| 27 |
+
* found it beneficial to be able to explicitly create, retain, and destroy
|
| 28 |
+
* PRNG states and also be able to have control over the seed value.
|
| 29 |
+
*
|
| 30 |
+
* A Generator in ATen gives users the ability to read, write and modify a PRNG engine.
|
| 31 |
+
* For instance, it does so by letting users seed a PRNG engine, fork the state of the
|
| 32 |
+
* engine, etc.
|
| 33 |
+
*
|
| 34 |
+
* By default, there is one generator per device, and a device's generator is
|
| 35 |
+
* lazily created. A user can use the torch.Generator() api to create their own generator.
|
| 36 |
+
*/
|
| 37 |
+
|
| 38 |
+
/**
|
| 39 |
+
* Note [Acquire lock when using random generators]
|
| 40 |
+
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
| 41 |
+
* Generator and its derived classes are NOT thread-safe. Please note that most of the
|
| 42 |
+
* places where we have inserted locking for generators are historically based, and we
|
| 43 |
+
* haven't actually checked that everything is truly thread safe (and it probably isn't).
|
| 44 |
+
* Please use the public mutex_ when using any methods from these classes, except for the
|
| 45 |
+
* read-only methods. You can learn about the usage by looking into the unittests
|
| 46 |
+
* (aten/src/ATen/cpu_generator_test.cpp) and other places where we have used lock_guard.
|
| 47 |
+
*
|
| 48 |
+
* TODO: Look into changing the threading semantics of Generators in ATen (e.g., making
|
| 49 |
+
* them non-thread safe and instead making the generator state splittable, to accommodate
|
| 50 |
+
* forks into other threads).
|
| 51 |
+
*/
|
| 52 |
+
|
| 53 |
+
namespace at {
|
| 54 |
+
|
| 55 |
+
class Tensor;
|
| 56 |
+
|
| 57 |
+
struct TORCH_API Generator {
|
| 58 |
+
Generator() = default;
|
| 59 |
+
|
| 60 |
+
explicit Generator(c10::intrusive_ptr<c10::GeneratorImpl> gen_impl)
|
| 61 |
+
: impl_(std::move(gen_impl)) {
|
| 62 |
+
if (impl_.get() == nullptr) {
|
| 63 |
+
throw std::runtime_error("GeneratorImpl with nullptr is not supported");
|
| 64 |
+
}
|
| 65 |
+
}
|
| 66 |
+
|
| 67 |
+
bool operator==(const Generator& rhs) const {
|
| 68 |
+
return this->impl_ == rhs.impl_;
|
| 69 |
+
}
|
| 70 |
+
|
| 71 |
+
bool operator!=(const Generator& rhs) const {
|
| 72 |
+
return !((*this) == rhs);
|
| 73 |
+
}
|
| 74 |
+
|
| 75 |
+
bool defined() const {
|
| 76 |
+
return static_cast<bool>(impl_);
|
| 77 |
+
}
|
| 78 |
+
|
| 79 |
+
c10::GeneratorImpl* unsafeGetGeneratorImpl() const {
|
| 80 |
+
return impl_.get();
|
| 81 |
+
}
|
| 82 |
+
|
| 83 |
+
c10::GeneratorImpl* unsafeReleaseGeneratorImpl() {
|
| 84 |
+
return impl_.release();
|
| 85 |
+
}
|
| 86 |
+
|
| 87 |
+
const c10::intrusive_ptr<c10::GeneratorImpl>& getIntrusivePtr() const {
|
| 88 |
+
return impl_;
|
| 89 |
+
}
|
| 90 |
+
|
| 91 |
+
void set_current_seed(uint64_t seed) { impl_->set_current_seed(seed); }
|
| 92 |
+
// Sets the offset of Generator state to the desired offset. This is currently
|
| 93 |
+
// supported for only Philox based Generators, i.e., CUDA and MPS.
|
| 94 |
+
void set_offset(uint64_t offset) { impl_->set_offset(offset); }
|
| 95 |
+
|
| 96 |
+
// Returns the offset of Generator state. This is currently supported for only
|
| 97 |
+
// Philox based Generators, i.e., CUDA and MPS.
|
| 98 |
+
uint64_t get_offset() const { return impl_->get_offset(); }
|
| 99 |
+
|
| 100 |
+
uint64_t current_seed() const { return impl_->current_seed(); }
|
| 101 |
+
|
| 102 |
+
uint64_t seed() { return impl_->seed(); }
|
| 103 |
+
|
| 104 |
+
// Implementation not inlined to prevent cycle reference between
|
| 105 |
+
// `ATen/core/Generator.h` and `ATen/core/Tensor.h`
|
| 106 |
+
void set_state(const at::Tensor& new_state);
|
| 107 |
+
|
| 108 |
+
at::Tensor get_state() const;
|
| 109 |
+
|
| 110 |
+
void graphsafe_set_state(const Generator& new_state);
|
| 111 |
+
|
| 112 |
+
Generator graphsafe_get_state() const;
|
| 113 |
+
|
| 114 |
+
std::mutex& mutex() {
|
| 115 |
+
return impl_->mutex_;
|
| 116 |
+
}
|
| 117 |
+
|
| 118 |
+
DispatchKeySet key_set() const {
|
| 119 |
+
return impl_->key_set();
|
| 120 |
+
}
|
| 121 |
+
|
| 122 |
+
Device device() const { return impl_->device(); }
|
| 123 |
+
|
| 124 |
+
inline void set_pyobj(PyObject* pyobj) const noexcept {
|
| 125 |
+
impl_->set_pyobj(pyobj);
|
| 126 |
+
}
|
| 127 |
+
|
| 128 |
+
inline PyObject* pyobj() const noexcept {
|
| 129 |
+
return impl_->pyobj();
|
| 130 |
+
}
|
| 131 |
+
|
| 132 |
+
template<typename T>
|
| 133 |
+
T* get() const { return static_cast<T*>(impl_.get()); }
|
| 134 |
+
|
| 135 |
+
Generator clone() const {
|
| 136 |
+
return Generator(impl_->clone());
|
| 137 |
+
}
|
| 138 |
+
|
| 139 |
+
private:
|
| 140 |
+
c10::intrusive_ptr<c10::GeneratorImpl> impl_;
|
| 141 |
+
};
|
| 142 |
+
|
| 143 |
+
template<class Impl, class... Args>
|
| 144 |
+
Generator make_generator(Args&&... args) {
|
| 145 |
+
return Generator(c10::make_intrusive<Impl>(std::forward<Args>(args)...));
|
| 146 |
+
}
|
| 147 |
+
|
| 148 |
+
/**
|
| 149 |
+
* Utility function to static cast input Generator* to
|
| 150 |
+
* the backend generator type (CPU/CUDAGeneratorImpl etc.)
|
| 151 |
+
*/
|
| 152 |
+
template <typename T>
|
| 153 |
+
inline T * check_generator(std::optional<Generator> gen) {
|
| 154 |
+
TORCH_CHECK(gen.has_value(), "Expected Generator but received nullopt");
|
| 155 |
+
TORCH_CHECK(gen->defined(), "Generator with undefined implementation is not allowed");
|
| 156 |
+
TORCH_CHECK(T::device_type() == gen->device().type(), "Expected a '", T::device_type(), "' device type for generator but found '", gen->device().type(), "'");
|
| 157 |
+
return gen->get<T>();
|
| 158 |
+
}
|
| 159 |
+
|
| 160 |
+
/**
|
| 161 |
+
* Utility function used in tensor implementations, which
|
| 162 |
+
* supplies the default generator to tensors, if an input generator
|
| 163 |
+
* is not supplied. The input Generator* is also static casted to
|
| 164 |
+
* the backend generator type (CPU/CUDAGeneratorImpl etc.)
|
| 165 |
+
*/
|
| 166 |
+
template <typename T>
|
| 167 |
+
inline T* get_generator_or_default(const std::optional<Generator>& gen, const Generator& default_gen) {
|
| 168 |
+
return gen.has_value() && gen->defined() ? check_generator<T>(gen) : check_generator<T>(default_gen);
|
| 169 |
+
}
|
| 170 |
+
|
| 171 |
+
namespace detail {
|
| 172 |
+
|
| 173 |
+
/**
|
| 174 |
+
* Helper function for checking the validity of new random generator
|
| 175 |
+
* state. Right now following conditions are checked:
|
| 176 |
+
*
|
| 177 |
+
* - The new state tensor must be a torch.ByteTensor
|
| 178 |
+
* - Data of the new state tensor must be contiguous
|
| 179 |
+
*/
|
| 180 |
+
inline void check_rng_state(const c10::TensorImpl& new_state) {
|
| 181 |
+
TORCH_CHECK_TYPE(
|
| 182 |
+
new_state.layout() == kStrided && new_state.device().type() == kCPU && new_state.dtype() == kByte,
|
| 183 |
+
"RNG state must be a torch.ByteTensor"
|
| 184 |
+
);
|
| 185 |
+
|
| 186 |
+
TORCH_CHECK(new_state.is_contiguous(), "RNG state must be contiguous");
|
| 187 |
+
}
|
| 188 |
+
|
| 189 |
+
} // namespace detail
|
| 190 |
+
|
| 191 |
+
} // namespace at
|
videochat2/lib/python3.10/site-packages/torch/include/ATen/core/GeneratorForPrivateuseone.h
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <ATen/core/Generator.h>
|
| 4 |
+
#include <c10/util/intrusive_ptr.h>
|
| 5 |
+
|
| 6 |
+
namespace at {
|
| 7 |
+
|
| 8 |
+
using GeneratorFuncType = std::function<at::Generator(c10::DeviceIndex)>;
|
| 9 |
+
|
| 10 |
+
std::optional<GeneratorFuncType>& GetGeneratorPrivate();
|
| 11 |
+
|
| 12 |
+
class TORCH_API _GeneratorRegister {
|
| 13 |
+
public:
|
| 14 |
+
explicit _GeneratorRegister(const GeneratorFuncType& func);
|
| 15 |
+
};
|
| 16 |
+
|
| 17 |
+
TORCH_API at::Generator GetGeneratorForPrivateuse1(
|
| 18 |
+
c10::DeviceIndex device_index);
|
| 19 |
+
|
| 20 |
+
/**
|
| 21 |
+
* This is used to register Generator to PyTorch for `privateuse1` key.
|
| 22 |
+
*
|
| 23 |
+
* Usage: REGISTER_GENERATOR_PRIVATEUSE1(MakeGeneratorForPrivateuse1)
|
| 24 |
+
*
|
| 25 |
+
* class CustomGeneratorImpl : public c10::GeneratorImpl {
|
| 26 |
+
* CustomGeneratorImpl(DeviceIndex device_index = -1);
|
| 27 |
+
* explicit ~CustomGeneratorImpl() override = default;
|
| 28 |
+
* ...
|
| 29 |
+
* };
|
| 30 |
+
*
|
| 31 |
+
* at::Generator MakeGeneratorForPrivateuse1(c10::DeviceIndex id) {
|
| 32 |
+
* return at::make_generator<CustomGeneratorImpl>(id);
|
| 33 |
+
* }
|
| 34 |
+
*/
|
| 35 |
+
|
| 36 |
+
#define REGISTER_GENERATOR_PRIVATEUSE1(GeneratorPrivate) \
|
| 37 |
+
static auto temp##GeneratorPrivate = at::_GeneratorRegister(GeneratorPrivate);
|
| 38 |
+
|
| 39 |
+
} // namespace at
|
videochat2/lib/python3.10/site-packages/torch/include/ATen/core/IListRef_inl.h
ADDED
|
@@ -0,0 +1,201 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <ATen/core/List.h>
|
| 4 |
+
#include <ATen/core/Tensor.h>
|
| 5 |
+
|
| 6 |
+
namespace at {
|
| 7 |
+
class Tensor;
|
| 8 |
+
class OptionalTensorRef;
|
| 9 |
+
}
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
namespace c10::detail {
|
| 13 |
+
|
| 14 |
+
/*
|
| 15 |
+
* Specializations of `IListRefTagImplBase` that implement the default
|
| 16 |
+
* implementation for `IListRefTag::Unboxed`.
|
| 17 |
+
*/
|
| 18 |
+
template <typename T, typename ListElemT>
|
| 19 |
+
class IListRefTagImplBase<IListRefTag::Unboxed, T, ListElemT> {
|
| 20 |
+
public:
|
| 21 |
+
using elem_type = ListElemT;
|
| 22 |
+
using list_type = ArrayRef<elem_type>;
|
| 23 |
+
|
| 24 |
+
/*
|
| 25 |
+
* These `unwrap` static methods unwraps the inner containers out
|
| 26 |
+
* of `IListRef<T>` (and `IListRefIterator<T>`). They are required when
|
| 27 |
+
* the macro `TORCH_ILISTREF_UNWRAP` is called.
|
| 28 |
+
*/
|
| 29 |
+
static const list_type& unwrap(const IListRef<T>& ilist) {
|
| 30 |
+
return ilist.payload_.unboxed;
|
| 31 |
+
}
|
| 32 |
+
|
| 33 |
+
static typename list_type::const_iterator& unwrap(IListRefIterator<T>& it) {
|
| 34 |
+
return it.payload_.unboxed_iterator;
|
| 35 |
+
}
|
| 36 |
+
|
| 37 |
+
static const typename list_type::const_iterator& unwrap(
|
| 38 |
+
const IListRefIterator<T>& it) {
|
| 39 |
+
return it.payload_.unboxed_iterator;
|
| 40 |
+
}
|
| 41 |
+
|
| 42 |
+
/*
|
| 43 |
+
* We have these function (besides the `unwrap`s above) because the
|
| 44 |
+
* implementation for both `IListRef::operator[]` and `IListRefIterator::operator*`
|
| 45 |
+
* weren't syntatically equal for the existing tags at the time
|
| 46 |
+
* (`Unboxed` and `Boxed`).
|
| 47 |
+
*/
|
| 48 |
+
static IListRefConstRef<T> front(const list_type& lst) {
|
| 49 |
+
return lst.front();
|
| 50 |
+
}
|
| 51 |
+
|
| 52 |
+
static IListRefConstRef<T> iterator_get(
|
| 53 |
+
const typename list_type::const_iterator& it) {
|
| 54 |
+
return *it;
|
| 55 |
+
}
|
| 56 |
+
};
|
| 57 |
+
|
| 58 |
+
/*
|
| 59 |
+
* Specializations of `IListRefTagImplBase` that implement the default
|
| 60 |
+
* implementation for `IListRefTag::Boxed`.
|
| 61 |
+
*/
|
| 62 |
+
template <typename T, typename ListElemT>
|
| 63 |
+
class IListRefTagImplBase<IListRefTag::Boxed, T, ListElemT> {
|
| 64 |
+
public:
|
| 65 |
+
using elem_type = ListElemT;
|
| 66 |
+
using list_type = List<elem_type>;
|
| 67 |
+
|
| 68 |
+
static const list_type& unwrap(const IListRef<T>& ilist) {
|
| 69 |
+
return *ilist.payload_.boxed;
|
| 70 |
+
}
|
| 71 |
+
|
| 72 |
+
static typename list_type::const_iterator& unwrap(IListRefIterator<T>& it) {
|
| 73 |
+
return it.payload_.boxed_iterator;
|
| 74 |
+
}
|
| 75 |
+
|
| 76 |
+
static const typename list_type::const_iterator& unwrap(
|
| 77 |
+
const IListRefIterator<T>& it) {
|
| 78 |
+
return it.payload_.boxed_iterator;
|
| 79 |
+
}
|
| 80 |
+
|
| 81 |
+
static IListRefConstRef<T> front(const list_type& lst) {
|
| 82 |
+
return lst[0];
|
| 83 |
+
}
|
| 84 |
+
|
| 85 |
+
static IListRefConstRef<T> iterator_get(
|
| 86 |
+
const typename list_type::const_iterator& it) {
|
| 87 |
+
return (*it).get().toTensor();
|
| 88 |
+
}
|
| 89 |
+
};
|
| 90 |
+
|
| 91 |
+
/*
|
| 92 |
+
* Specializations of `IListRefTagImplBase` that implement the default
|
| 93 |
+
* implementation for `IListRefTag::Materialized`.
|
| 94 |
+
*/
|
| 95 |
+
template <typename T>
|
| 96 |
+
class IListRefTagImplBase<IListRefTag::Materialized, T, MaterializedIListRefElem<T>> {
|
| 97 |
+
public:
|
| 98 |
+
using elem_type = MaterializedIListRefElem<T>;
|
| 99 |
+
using list_type = MaterializedIListRef<T>;
|
| 100 |
+
|
| 101 |
+
static const list_type& unwrap(const IListRef<T>& ilist) {
|
| 102 |
+
return *ilist.payload_.materialized;
|
| 103 |
+
}
|
| 104 |
+
|
| 105 |
+
static typename list_type::const_iterator& unwrap(IListRefIterator<T>& it) {
|
| 106 |
+
return it.payload_.materialized_iterator;
|
| 107 |
+
}
|
| 108 |
+
|
| 109 |
+
static const typename list_type::const_iterator& unwrap(
|
| 110 |
+
const IListRefIterator<T>& it) {
|
| 111 |
+
return it.payload_.materialized_iterator;
|
| 112 |
+
}
|
| 113 |
+
|
| 114 |
+
static IListRefConstRef<T> front(const list_type& lst) {
|
| 115 |
+
return lst[0];
|
| 116 |
+
}
|
| 117 |
+
|
| 118 |
+
static IListRefConstRef<T> iterator_get(
|
| 119 |
+
const typename list_type::const_iterator& it) {
|
| 120 |
+
return *it;
|
| 121 |
+
}
|
| 122 |
+
};
|
| 123 |
+
|
| 124 |
+
/*
|
| 125 |
+
* [Note: ITensorListRef]
|
| 126 |
+
* Specializations necessary for `IListRef<at::Tensor>` type.
|
| 127 |
+
*
|
| 128 |
+
* Since the default implementations are usually done with supporting
|
| 129 |
+
* `Tensor` in mind, we only have to inherit from the base implementations.
|
| 130 |
+
*/
|
| 131 |
+
template <>
|
| 132 |
+
class IListRefTagImpl<IListRefTag::Unboxed, at::Tensor>
|
| 133 |
+
: public IListRefTagImplBase<IListRefTag::Unboxed, at::Tensor> {};
|
| 134 |
+
|
| 135 |
+
template <>
|
| 136 |
+
class IListRefTagImpl<IListRefTag::Boxed, at::Tensor>
|
| 137 |
+
: public IListRefTagImplBase<IListRefTag::Boxed, at::Tensor> {};
|
| 138 |
+
|
| 139 |
+
template <>
|
| 140 |
+
class IListRefTagImpl<IListRefTag::Materialized, at::Tensor>
|
| 141 |
+
: public IListRefTagImplBase<
|
| 142 |
+
IListRefTag::Materialized,
|
| 143 |
+
at::Tensor,
|
| 144 |
+
MaterializedIListRefElem<at::Tensor>> {};
|
| 145 |
+
|
| 146 |
+
/*
|
| 147 |
+
* [Note: IOptTensorListRef]
|
| 148 |
+
* Specializations necessary for `IListRef<at::OptionalTensorRef>` type.
|
| 149 |
+
*
|
| 150 |
+
* We can't get an `at::OptionalTensorRef` directly from an instance of
|
| 151 |
+
* `List<optional<Tensor>>` (the type that corresponds to the boxed world).
|
| 152 |
+
*
|
| 153 |
+
* So, the default implementation won't help us. Thus, we have to implement
|
| 154 |
+
* this method ourselves.
|
| 155 |
+
*/
|
| 156 |
+
template <>
|
| 157 |
+
class IListRefTagImpl<IListRefTag::Unboxed, at::OptionalTensorRef>
|
| 158 |
+
: public IListRefTagImplBase<IListRefTag::Unboxed, at::OptionalTensorRef> {};
|
| 159 |
+
|
| 160 |
+
template <>
|
| 161 |
+
class IListRefTagImpl<IListRefTag::Boxed, at::OptionalTensorRef>
|
| 162 |
+
: public IListRefTagImplBase<IListRefTag::Boxed, at::OptionalTensorRef, std::optional<at::Tensor>> {
|
| 163 |
+
|
| 164 |
+
public:
|
| 165 |
+
/*
|
| 166 |
+
* Given an instance of the types corresponding to the `Boxed` tag, we override
|
| 167 |
+
* the default implementation, so that we can return a `at::OptionalTensorRef`.
|
| 168 |
+
*/
|
| 169 |
+
static IListRefConstRef<at::OptionalTensorRef> iterator_get(
|
| 170 |
+
const typename list_type::const_iterator& it) {
|
| 171 |
+
const auto& ivalue = (*it).get();
|
| 172 |
+
if (!ivalue.isNone()) {
|
| 173 |
+
const auto& tensor = ivalue.toTensor();
|
| 174 |
+
return (tensor.defined()) ? tensor : at::OptionalTensorRef{};
|
| 175 |
+
}
|
| 176 |
+
return {};
|
| 177 |
+
}
|
| 178 |
+
};
|
| 179 |
+
|
| 180 |
+
template <>
|
| 181 |
+
class IListRefTagImpl<IListRefTag::Materialized, at::OptionalTensorRef>
|
| 182 |
+
: public IListRefTagImplBase<
|
| 183 |
+
IListRefTag::Materialized,
|
| 184 |
+
at::OptionalTensorRef,
|
| 185 |
+
MaterializedIListRefElem<at::OptionalTensorRef>> {};
|
| 186 |
+
|
| 187 |
+
} // namespace c10::detail
|
| 188 |
+
|
| 189 |
+
|
| 190 |
+
namespace at {
|
| 191 |
+
|
| 192 |
+
// [Note: ITensorListRef]
|
| 193 |
+
using ITensorListRef = c10::IListRef<at::Tensor>;
|
| 194 |
+
using ITensorListRefIterator = c10::IListRefIterator<at::Tensor>;
|
| 195 |
+
using MaterializedITensorListRef = c10::detail::MaterializedIListRef<at::Tensor>;
|
| 196 |
+
// [Note: IOptTensorListRef]
|
| 197 |
+
using IOptTensorListRef = c10::IListRef<at::OptionalTensorRef>;
|
| 198 |
+
using IOptTensorListRefIterator = c10::IListRefIterator<at::OptionalTensorRef>;
|
| 199 |
+
using MaterializedIOptTensorListRef = c10::detail::MaterializedIListRef<at::OptionalTensorRef>;
|
| 200 |
+
|
| 201 |
+
} // namespace at
|
videochat2/lib/python3.10/site-packages/torch/include/ATen/core/LegacyTypeDispatch.h
ADDED
|
@@ -0,0 +1,111 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// The legacy mechanism for dispatching operators in ATen is a Type
|
| 4 |
+
// object, which is essentially a giant virtual dispatch table
|
| 5 |
+
// for every operation we support dynamically dispatching over.
|
| 6 |
+
//
|
| 7 |
+
// This has been deprecated in favor of ATenDispatch, and in the future,
|
| 8 |
+
// c10 dispatcher.
|
| 9 |
+
// TODO: Clean up what remains here
|
| 10 |
+
|
| 11 |
+
#include <c10/core/impl/LocalDispatchKeySet.h>
|
| 12 |
+
|
| 13 |
+
namespace at {
|
| 14 |
+
|
| 15 |
+
// A RAII, thread local (!) guard that will disable dispatch to variable
|
| 16 |
+
// handler.
|
| 17 |
+
//
|
| 18 |
+
// NOTE [ Treating Variables as non-Variables in type dispatch ]
|
| 19 |
+
//
|
| 20 |
+
// What exactly does AutoDispatchBelowAutograd do? The short answer is, it causes
|
| 21 |
+
// dispatches on ATen functions to go to the non-variable implementation,
|
| 22 |
+
// bypassing autograd handling (and also profiling and tracing).
|
| 23 |
+
//
|
| 24 |
+
// To understand why this guard exists, it's helpful to understand the history
|
| 25 |
+
// behind how Variable was implemented. Previously, Variables were implemented
|
| 26 |
+
// as a wrapper on Tensors; so the act of processing a Variable involved
|
| 27 |
+
// unwrapping the underlying Tensor, and then calling the underlying base
|
| 28 |
+
// operation on /that/ operation
|
| 29 |
+
//
|
| 30 |
+
// However, after the Variable/Tensor merge, there is no concept of unwrapping
|
| 31 |
+
// a tensor anymore. If you just call the operation on the same variable
|
| 32 |
+
// again inside your VariableType handler, you'll dispatch back to
|
| 33 |
+
// VariableType, which is not what we want.
|
| 34 |
+
//
|
| 35 |
+
// The solution to the above problem is to add `at::AutoDispatchBelowAutograd`, which
|
| 36 |
+
// when enabled will cause `legacyTensorType()` and `getType()` to always return
|
| 37 |
+
// non-Variable type, even if the tensor being called on is a variable.
|
| 38 |
+
|
| 39 |
+
/* Note [AutoDispatchBelowAutograd]
|
| 40 |
+
* AutoDispatchBelowAutograd is **INTERNAL ONLY** that it should be used
|
| 41 |
+
* for kernel implementations and customized C++ kernels.
|
| 42 |
+
* If you are looking for a guard to run workload in inference mode, please use
|
| 43 |
+
* c10::InferenceMode RAII which is user facing API.
|
| 44 |
+
* In the past AutoDispatchBelowAutograd(or its old version AutoNonVariableTypeMode)
|
| 45 |
+
* was used in the user code for inference-only workload, this was under risk of
|
| 46 |
+
* producing wrong results silently in some edge cases. For example:
|
| 47 |
+
* ```
|
| 48 |
+
* torch::Tensor s = torch::ones({1, 2, 3}).set_requires_grad(true);
|
| 49 |
+
* torch::Tensor out = s * s;
|
| 50 |
+
* {
|
| 51 |
+
* at::AutoDispatchBelowAutograd guard;
|
| 52 |
+
* s.add_(1); // Skips version bump on `s`.
|
| 53 |
+
* }
|
| 54 |
+
* // WRONG GRADIENT! s.grad() are now computed using `s` value after the
|
| 55 |
+
* // inplace update.
|
| 56 |
+
* out.backward(torch::ones_like(out));
|
| 57 |
+
* ```
|
| 58 |
+
* Users should use `c10::InferenceMode` here so that it'll properly throw an
|
| 59 |
+
* error saying "one of the variables needed for gradient computation has be modified."
|
| 60 |
+
*/
|
| 61 |
+
struct TORCH_API AutoDispatchBelowAutograd {
|
| 62 |
+
AutoDispatchBelowAutograd() :
|
| 63 |
+
autograd_guard_(c10::autograd_dispatch_keyset) {
|
| 64 |
+
}
|
| 65 |
+
|
| 66 |
+
// disable all autograd dispatch keys
|
| 67 |
+
c10::impl::ExcludeDispatchKeyGuard autograd_guard_;
|
| 68 |
+
};
|
| 69 |
+
|
| 70 |
+
// TODO: AutoNonVariableTypeMode should be removed in release 1.10.
|
| 71 |
+
struct TORCH_API AutoNonVariableTypeMode {
|
| 72 |
+
AutoNonVariableTypeMode(bool enabled = true) :
|
| 73 |
+
autograd_guard_(c10::autograd_dispatch_keyset) {
|
| 74 |
+
TORCH_WARN_ONCE("AutoNonVariableTypeMode is deprecated and will be removed in 1.10 release. "
|
| 75 |
+
"For kernel implementations please use AutoDispatchBelowADInplaceOrView instead, "
|
| 76 |
+
"If you are looking for a user facing API to enable running your inference-only "
|
| 77 |
+
"workload, please use c10::InferenceMode. Using AutoDispatchBelowADInplaceOrView in user code "
|
| 78 |
+
"is under risk of producing silent wrong result in some edge cases. "
|
| 79 |
+
"See Note [AutoDispatchBelowAutograd] for more details.");
|
| 80 |
+
TORCH_INTERNAL_ASSERT(enabled);
|
| 81 |
+
}
|
| 82 |
+
|
| 83 |
+
// disable all autograd dispatch keys
|
| 84 |
+
c10::impl::ExcludeDispatchKeyGuard autograd_guard_;
|
| 85 |
+
};
|
| 86 |
+
|
| 87 |
+
struct TORCH_API AutoDispatchSkipFunctionalize {
|
| 88 |
+
AutoDispatchSkipFunctionalize() :
|
| 89 |
+
dispatch_key_guard_(c10::DispatchKeySet(c10::DispatchKey::Functionalize)) {
|
| 90 |
+
}
|
| 91 |
+
c10::impl::ExcludeDispatchKeyGuard dispatch_key_guard_;
|
| 92 |
+
};
|
| 93 |
+
|
| 94 |
+
/* Note [AutoDispatchBelowADInplaceOrView]
|
| 95 |
+
* AutoDispatchBelowADInplaceOrView is equivalent to AutoNonVariableTypeMode
|
| 96 |
+
* before we split inplace & view ops out of VariableType kernel.
|
| 97 |
+
* Note this guard is used in VariableType kernels for functional ops
|
| 98 |
+
* as well as ADInplaceOrView kernels for inplace/view ops to enforce the
|
| 99 |
+
* Invariant:
|
| 100 |
+
* Once you are in VariableType/ADInplaceOrView kernel for an op,
|
| 101 |
+
* you never go back to a kernel on same dispatch key until
|
| 102 |
+
* you finish the current op.
|
| 103 |
+
*/
|
| 104 |
+
struct TORCH_API AutoDispatchBelowADInplaceOrView {
|
| 105 |
+
AutoDispatchBelowADInplaceOrView() :
|
| 106 |
+
dispatch_key_guard_(c10::autograd_dispatch_keyset_with_ADInplaceOrView) {
|
| 107 |
+
}
|
| 108 |
+
// disable Autograd & ADInplaceOrView dispatch keys
|
| 109 |
+
c10::impl::ExcludeDispatchKeyGuard dispatch_key_guard_;
|
| 110 |
+
};
|
| 111 |
+
} // namespace at
|
videochat2/lib/python3.10/site-packages/torch/include/ATen/core/MT19937RNGEngine.h
ADDED
|
@@ -0,0 +1,194 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <c10/util/irange.h>
|
| 4 |
+
|
| 5 |
+
// define constants like M_PI and C keywords for MSVC
|
| 6 |
+
#ifdef _MSC_VER
|
| 7 |
+
#ifndef _USE_MATH_DEFINES
|
| 8 |
+
#define _USE_MATH_DEFINES
|
| 9 |
+
#endif
|
| 10 |
+
#include <math.h>
|
| 11 |
+
#endif
|
| 12 |
+
|
| 13 |
+
#include <array>
|
| 14 |
+
#include <cmath>
|
| 15 |
+
#include <cstdint>
|
| 16 |
+
|
| 17 |
+
namespace at {
|
| 18 |
+
|
| 19 |
+
constexpr int MERSENNE_STATE_N = 624;
|
| 20 |
+
constexpr int MERSENNE_STATE_M = 397;
|
| 21 |
+
constexpr uint32_t MATRIX_A = 0x9908b0df;
|
| 22 |
+
constexpr uint32_t UMASK = 0x80000000;
|
| 23 |
+
constexpr uint32_t LMASK = 0x7fffffff;
|
| 24 |
+
|
| 25 |
+
/**
|
| 26 |
+
* Note [Mt19937 Engine implementation]
|
| 27 |
+
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
| 28 |
+
* Originally implemented in:
|
| 29 |
+
* http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/MT2002/CODES/MTARCOK/mt19937ar-cok.c
|
| 30 |
+
* and modified with C++ constructs. Moreover the state array of the engine
|
| 31 |
+
* has been modified to hold 32 bit uints instead of 64 bits.
|
| 32 |
+
*
|
| 33 |
+
* Note that we reimplemented mt19937 instead of using std::mt19937 because,
|
| 34 |
+
* at::mt19937 turns out to be faster in the pytorch codebase. PyTorch builds with -O2
|
| 35 |
+
* by default and following are the benchmark numbers (benchmark code can be found at
|
| 36 |
+
* https://github.com/syed-ahmed/benchmark-rngs):
|
| 37 |
+
*
|
| 38 |
+
* with -O2
|
| 39 |
+
* Time to get 100000000 philox randoms with at::uniform_real_distribution = 0.462759s
|
| 40 |
+
* Time to get 100000000 at::mt19937 randoms with at::uniform_real_distribution = 0.39628s
|
| 41 |
+
* Time to get 100000000 std::mt19937 randoms with std::uniform_real_distribution = 0.352087s
|
| 42 |
+
* Time to get 100000000 std::mt19937 randoms with at::uniform_real_distribution = 0.419454s
|
| 43 |
+
*
|
| 44 |
+
* std::mt19937 is faster when used in conjunction with std::uniform_real_distribution,
|
| 45 |
+
* however we can't use std::uniform_real_distribution because of this bug:
|
| 46 |
+
* http://open-std.org/JTC1/SC22/WG21/docs/lwg-active.html#2524. Plus, even if we used
|
| 47 |
+
* std::uniform_real_distribution and filtered out the 1's, it is a different algorithm
|
| 48 |
+
* than what's in pytorch currently and that messes up the tests in tests_distributions.py.
|
| 49 |
+
* The other option, using std::mt19937 with at::uniform_real_distribution is a tad bit slower
|
| 50 |
+
* than at::mt19937 with at::uniform_real_distribution and hence, we went with the latter.
|
| 51 |
+
*
|
| 52 |
+
* Copyright notice:
|
| 53 |
+
* A C-program for MT19937, with initialization improved 2002/2/10.
|
| 54 |
+
* Coded by Takuji Nishimura and Makoto Matsumoto.
|
| 55 |
+
* This is a faster version by taking Shawn Cokus's optimization,
|
| 56 |
+
* Matthe Bellew's simplification, Isaku Wada's real version.
|
| 57 |
+
*
|
| 58 |
+
* Before using, initialize the state by using init_genrand(seed)
|
| 59 |
+
* or init_by_array(init_key, key_length).
|
| 60 |
+
*
|
| 61 |
+
* Copyright (C) 1997 - 2002, Makoto Matsumoto and Takuji Nishimura,
|
| 62 |
+
* All rights reserved.
|
| 63 |
+
*
|
| 64 |
+
* Redistribution and use in source and binary forms, with or without
|
| 65 |
+
* modification, are permitted provided that the following conditions
|
| 66 |
+
* are met:
|
| 67 |
+
*
|
| 68 |
+
* 1. Redistributions of source code must retain the above copyright
|
| 69 |
+
* notice, this list of conditions and the following disclaimer.
|
| 70 |
+
*
|
| 71 |
+
* 2. Redistributions in binary form must reproduce the above copyright
|
| 72 |
+
* notice, this list of conditions and the following disclaimer in the
|
| 73 |
+
* documentation and/or other materials provided with the distribution.
|
| 74 |
+
*
|
| 75 |
+
* 3. The names of its contributors may not be used to endorse or promote
|
| 76 |
+
* products derived from this software without specific prior written
|
| 77 |
+
* permission.
|
| 78 |
+
*
|
| 79 |
+
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
| 80 |
+
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
| 81 |
+
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
| 82 |
+
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
|
| 83 |
+
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
|
| 84 |
+
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
| 85 |
+
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
| 86 |
+
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
| 87 |
+
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
| 88 |
+
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
| 89 |
+
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
| 90 |
+
*
|
| 91 |
+
*
|
| 92 |
+
* Any feedback is very welcome.
|
| 93 |
+
* http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/emt.html
|
| 94 |
+
* email: m-mat @ math.sci.hiroshima-u.ac.jp (remove space)
|
| 95 |
+
*/
|
| 96 |
+
|
| 97 |
+
/**
|
| 98 |
+
* mt19937_data_pod is used to get POD data in and out
|
| 99 |
+
* of mt19937_engine. Used in torch.get_rng_state and
|
| 100 |
+
* torch.set_rng_state functions.
|
| 101 |
+
*/
|
| 102 |
+
struct mt19937_data_pod {
|
| 103 |
+
uint64_t seed_;
|
| 104 |
+
int left_;
|
| 105 |
+
bool seeded_;
|
| 106 |
+
uint32_t next_;
|
| 107 |
+
std::array<uint32_t, MERSENNE_STATE_N> state_;
|
| 108 |
+
};
|
| 109 |
+
|
| 110 |
+
class mt19937_engine {
|
| 111 |
+
public:
|
| 112 |
+
|
| 113 |
+
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
|
| 114 |
+
inline explicit mt19937_engine(uint64_t seed = 5489) {
|
| 115 |
+
init_with_uint32(seed);
|
| 116 |
+
}
|
| 117 |
+
|
| 118 |
+
inline mt19937_data_pod data() const {
|
| 119 |
+
return data_;
|
| 120 |
+
}
|
| 121 |
+
|
| 122 |
+
inline void set_data(const mt19937_data_pod& data) {
|
| 123 |
+
data_ = data;
|
| 124 |
+
}
|
| 125 |
+
|
| 126 |
+
inline uint64_t seed() const {
|
| 127 |
+
return data_.seed_;
|
| 128 |
+
}
|
| 129 |
+
|
| 130 |
+
inline bool is_valid() {
|
| 131 |
+
if ((data_.seeded_ == true)
|
| 132 |
+
&& (data_.left_ > 0 && data_.left_ <= MERSENNE_STATE_N)
|
| 133 |
+
&& (data_.next_ <= MERSENNE_STATE_N)) {
|
| 134 |
+
return true;
|
| 135 |
+
}
|
| 136 |
+
return false;
|
| 137 |
+
}
|
| 138 |
+
|
| 139 |
+
inline uint32_t operator()() {
|
| 140 |
+
if (--(data_.left_) == 0) {
|
| 141 |
+
next_state();
|
| 142 |
+
}
|
| 143 |
+
uint32_t y = *(data_.state_.data() + data_.next_++);
|
| 144 |
+
y ^= (y >> 11);
|
| 145 |
+
y ^= (y << 7) & 0x9d2c5680;
|
| 146 |
+
y ^= (y << 15) & 0xefc60000;
|
| 147 |
+
y ^= (y >> 18);
|
| 148 |
+
|
| 149 |
+
return y;
|
| 150 |
+
}
|
| 151 |
+
|
| 152 |
+
private:
|
| 153 |
+
mt19937_data_pod data_;
|
| 154 |
+
|
| 155 |
+
inline void init_with_uint32(uint64_t seed) {
|
| 156 |
+
data_.seed_ = seed;
|
| 157 |
+
data_.seeded_ = true;
|
| 158 |
+
data_.state_[0] = seed & 0xffffffff;
|
| 159 |
+
for (const auto j : c10::irange(1, MERSENNE_STATE_N)) {
|
| 160 |
+
data_.state_[j] = (1812433253 * (data_.state_[j-1] ^ (data_.state_[j-1] >> 30)) + j);
|
| 161 |
+
}
|
| 162 |
+
data_.left_ = 1;
|
| 163 |
+
data_.next_ = 0;
|
| 164 |
+
}
|
| 165 |
+
|
| 166 |
+
inline uint32_t mix_bits(uint32_t u, uint32_t v) {
|
| 167 |
+
return (u & UMASK) | (v & LMASK);
|
| 168 |
+
}
|
| 169 |
+
|
| 170 |
+
inline uint32_t twist(uint32_t u, uint32_t v) {
|
| 171 |
+
return (mix_bits(u,v) >> 1) ^ (v & 1 ? MATRIX_A : 0);
|
| 172 |
+
}
|
| 173 |
+
|
| 174 |
+
inline void next_state() {
|
| 175 |
+
uint32_t* p = data_.state_.data();
|
| 176 |
+
data_.left_ = MERSENNE_STATE_N;
|
| 177 |
+
data_.next_ = 0;
|
| 178 |
+
|
| 179 |
+
for(int j = MERSENNE_STATE_N - MERSENNE_STATE_M + 1; --j; p++) {
|
| 180 |
+
*p = p[MERSENNE_STATE_M] ^ twist(p[0], p[1]);
|
| 181 |
+
}
|
| 182 |
+
|
| 183 |
+
for(int j = MERSENNE_STATE_M; --j; p++) {
|
| 184 |
+
*p = p[MERSENNE_STATE_M - MERSENNE_STATE_N] ^ twist(p[0], p[1]);
|
| 185 |
+
}
|
| 186 |
+
|
| 187 |
+
*p = p[MERSENNE_STATE_M - MERSENNE_STATE_N] ^ twist(p[0], data_.state_[0]);
|
| 188 |
+
}
|
| 189 |
+
|
| 190 |
+
};
|
| 191 |
+
|
| 192 |
+
typedef mt19937_engine mt19937;
|
| 193 |
+
|
| 194 |
+
} // namespace at
|
videochat2/lib/python3.10/site-packages/torch/include/ATen/core/NamedTensor.h
ADDED
|
@@ -0,0 +1,139 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <ATen/core/Dimname.h>
|
| 4 |
+
#include <c10/core/TensorImpl.h>
|
| 5 |
+
|
| 6 |
+
namespace at {
|
| 7 |
+
|
| 8 |
+
class TensorBase;
|
| 9 |
+
|
| 10 |
+
// XXX: This file exists because TensorImpl is in c10, but Dimname is in ATen.
|
| 11 |
+
// Due to the c10/ATen library split, TensorImpl cannot depend on Dimname,
|
| 12 |
+
// so we have a couple of workarounds.
|
| 13 |
+
//
|
| 14 |
+
// In the long term, we'll move Dimname to c10 and everything in this file
|
| 15 |
+
// can be refactored out. The main blocker for that is that "c10::Symbol"
|
| 16 |
+
// actually exists outside of c10 and needs to be moved in.
|
| 17 |
+
|
| 18 |
+
// TensorImpl has a unique_ptr<NamedTensorMetaInterface> field.
|
| 19 |
+
// XXX: Ideally we would just put std::optional<vector<Dimname>> into TensorImpl.
|
| 20 |
+
//
|
| 21 |
+
// This class has an important invariant: there must be at least ONE
|
| 22 |
+
// non-wildcard
|
| 23 |
+
struct TORCH_API NamedTensorMeta final : public c10::NamedTensorMetaInterface {
|
| 24 |
+
// This enum is to remind people that the invariant on constructors is that
|
| 25 |
+
// the list of dimnames must have at least one non-wildcard
|
| 26 |
+
enum HAS_NON_WILDCARD {
|
| 27 |
+
HasNonWildcard
|
| 28 |
+
};
|
| 29 |
+
|
| 30 |
+
explicit NamedTensorMeta(HAS_NON_WILDCARD, DimnameList names)
|
| 31 |
+
: names_(names.vec()) {
|
| 32 |
+
check_invariants();
|
| 33 |
+
}
|
| 34 |
+
explicit NamedTensorMeta(HAS_NON_WILDCARD, std::vector<Dimname>&& names)
|
| 35 |
+
: names_(std::move(names)) {
|
| 36 |
+
check_invariants();
|
| 37 |
+
}
|
| 38 |
+
|
| 39 |
+
std::unique_ptr<c10::NamedTensorMetaInterface> clone() const override {
|
| 40 |
+
return std::make_unique<NamedTensorMeta>(HasNonWildcard, names_);
|
| 41 |
+
}
|
| 42 |
+
|
| 43 |
+
DimnameList names() const { return names_; }
|
| 44 |
+
|
| 45 |
+
// Used for an assertion in TensorImpl.h
|
| 46 |
+
int64_t slow_dim() const override {
|
| 47 |
+
return static_cast<int64_t>(names_.size());
|
| 48 |
+
}
|
| 49 |
+
|
| 50 |
+
void check_invariants() const {
|
| 51 |
+
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
|
| 52 |
+
std::any_of(names_.begin(), names_.end(), [](const Dimname& n) { return !n.isWildcard(); }));
|
| 53 |
+
}
|
| 54 |
+
|
| 55 |
+
void set_names(HAS_NON_WILDCARD, DimnameList new_names) {
|
| 56 |
+
TORCH_INTERNAL_ASSERT(new_names.size() == names_.size());
|
| 57 |
+
std::copy(new_names.begin(), new_names.end(), names_.begin());
|
| 58 |
+
check_invariants();
|
| 59 |
+
}
|
| 60 |
+
|
| 61 |
+
void set_names(HAS_NON_WILDCARD, std::vector<Dimname>&& new_names) {
|
| 62 |
+
TORCH_INTERNAL_ASSERT(new_names.size() == names_.size());
|
| 63 |
+
names_ = std::move(new_names);
|
| 64 |
+
check_invariants();
|
| 65 |
+
}
|
| 66 |
+
|
| 67 |
+
// INVARIANT: at least one Dimname is non-WILDCARD
|
| 68 |
+
std::vector<Dimname> names_;
|
| 69 |
+
};
|
| 70 |
+
|
| 71 |
+
// When NamesMode is disabled, then all operations ignore tensors' names fields.
|
| 72 |
+
// Concretely speaking, all tensors are treated as having nullopt names.
|
| 73 |
+
struct TORCH_API NamesMode {
|
| 74 |
+
static bool is_enabled();
|
| 75 |
+
static void set_enabled(bool enabled);
|
| 76 |
+
};
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
// A RAII, thread local (!) guard that enables or disables names upon
|
| 80 |
+
// construction, and sets it back to the original value upon destruction.
|
| 81 |
+
struct TORCH_API NoNamesGuard {
|
| 82 |
+
NoNamesGuard() : prev_mode(NamesMode::is_enabled()) {
|
| 83 |
+
NamesMode::set_enabled(false);
|
| 84 |
+
}
|
| 85 |
+
~NoNamesGuard() {
|
| 86 |
+
if (initialized) {
|
| 87 |
+
reset();
|
| 88 |
+
}
|
| 89 |
+
}
|
| 90 |
+
void reset() {
|
| 91 |
+
TORCH_INTERNAL_ASSERT(initialized);
|
| 92 |
+
NamesMode::set_enabled(prev_mode);
|
| 93 |
+
}
|
| 94 |
+
private:
|
| 95 |
+
bool prev_mode;
|
| 96 |
+
bool initialized{true};
|
| 97 |
+
};
|
| 98 |
+
|
| 99 |
+
void check_names_valid_for(const TensorBase& tensor, DimnameList names);
|
| 100 |
+
void check_names_valid_for(size_t tensor_dim, DimnameList names);
|
| 101 |
+
|
| 102 |
+
// Sets the names of `tensor` to be `names`.
|
| 103 |
+
TORCH_API const TensorBase& internal_set_names_inplace(const TensorBase& tensor, std::optional<DimnameList> names);
|
| 104 |
+
TORCH_API const TensorBase& internal_set_names_inplace(const TensorBase& tensor, std::vector<Dimname>&& names, bool validate_names);
|
| 105 |
+
|
| 106 |
+
constexpr size_t kMaxNamedTensorDim = 64;
|
| 107 |
+
|
| 108 |
+
DimnameList default_names(size_t len);
|
| 109 |
+
|
| 110 |
+
namespace impl {
|
| 111 |
+
|
| 112 |
+
// Some helper functions on TensorImpl. Useful for working with names in TH.
|
| 113 |
+
// XXX: Ideally these would exist as methods on TensorImpl
|
| 114 |
+
TORCH_API void internal_set_names_inplace(TensorImpl* impl, std::optional<DimnameList> names, bool validate_names);
|
| 115 |
+
TORCH_API void internal_set_names_inplace(TensorImpl* impl, std::vector<Dimname>&& names, bool validate_names);
|
| 116 |
+
|
| 117 |
+
void check_names_valid_for(TensorImpl* impl, DimnameList names);
|
| 118 |
+
|
| 119 |
+
// Returns true if the tensor's names exist and are not all 'None'.
|
| 120 |
+
// Returns false if the tensor's names don't exist (were not allocated),
|
| 121 |
+
// or if all names are 'None'.
|
| 122 |
+
// We treat not-allocated-names the same as allocated names that are all 'None'.
|
| 123 |
+
TORCH_API bool has_names(const TensorImpl* impl);
|
| 124 |
+
|
| 125 |
+
// Returns the names of the tensor's dimensions.
|
| 126 |
+
// Unnamed tensors are treated as having 'None' in all dimension; this method
|
| 127 |
+
// would return a DimnameList of all 'None's for an unnamed tensor.
|
| 128 |
+
TORCH_API DimnameList get_names(const TensorImpl* impl);
|
| 129 |
+
|
| 130 |
+
// This is more of an implementation detail; one should use impl::get_names /
|
| 131 |
+
// Tensor::names() whenever possible because it provides a cleaner API.
|
| 132 |
+
// Returns the names of the tensor if they have been allocated; returns nullopt
|
| 133 |
+
// instead if the haven't been. The names of a tensor are not allocated if a
|
| 134 |
+
// tensor is constructed with names=None.
|
| 135 |
+
TORCH_API std::optional<DimnameList> get_opt_names(const TensorImpl* impl);
|
| 136 |
+
|
| 137 |
+
} // namespace impl
|
| 138 |
+
|
| 139 |
+
} // namespace at
|
videochat2/lib/python3.10/site-packages/torch/include/ATen/core/NestedIntSymNodeImpl.h
ADDED
|
@@ -0,0 +1,187 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <c10/core/ConstantSymNodeImpl.h>
|
| 4 |
+
#include <c10/core/SymNodeImpl.h>
|
| 5 |
+
#include <c10/macros/Export.h>
|
| 6 |
+
#include <c10/util/Exception.h>
|
| 7 |
+
#include <c10/util/intrusive_ptr.h>
|
| 8 |
+
#include <cstdint>
|
| 9 |
+
#include <optional>
|
| 10 |
+
#include <string>
|
| 11 |
+
|
| 12 |
+
namespace c10 {
|
| 13 |
+
|
| 14 |
+
// The motivating usecase for this is to represent the ragged size structure
|
| 15 |
+
// of a jagged tensor [B, [s_0, s_1, s_2], D] as a single integer j0. This
|
| 16 |
+
// allows us to simply return [B, j0, D] if someone queries for the size of our
|
| 17 |
+
// tensor.
|
| 18 |
+
//
|
| 19 |
+
// Morally we define comparison between two nested ints to return true if
|
| 20 |
+
// that comparison holds for all corresponding elements of the arrays they
|
| 21 |
+
// represent. Comparison between a nested int and a plain int is defined
|
| 22 |
+
// similarly.
|
| 23 |
+
//
|
| 24 |
+
// To simulate this desired behavior but also avoid the O(N) cost of checking,
|
| 25 |
+
// we associate each raggedness pattern with an integer "id" that can be used as
|
| 26 |
+
// a proxy to evaluate equality. We also constrain the range of values for this
|
| 27 |
+
// as to enable inequality checks.
|
| 28 |
+
//
|
| 29 |
+
// We also support a positive integer scalar "coeff" that is used for computing
|
| 30 |
+
// strides. For example given, a [B, j0, D] tensor, it can be strided in two
|
| 31 |
+
// different ways: [D * j0, D, 1] and [j0, 1, sum(j0)]. The coeff is used to
|
| 32 |
+
// differentiate the two cases.
|
| 33 |
+
//
|
| 34 |
+
// During tracing the strides of the outputs need to be a function of the size
|
| 35 |
+
// and strides of the inputs so it is important that NestedIntSymNode itself is
|
| 36 |
+
// able to express this.
|
| 37 |
+
class TORCH_API NestedIntSymNodeImpl : public SymNodeImpl {
|
| 38 |
+
public:
|
| 39 |
+
// CAUTION: you should probably not be constructing these directly; please
|
| 40 |
+
// the higher-level API in python instead (TODO: actually introduce that).
|
| 41 |
+
explicit NestedIntSymNodeImpl(int64_t val, int64_t coeff)
|
| 42 |
+
: val_(val), coeff_(coeff) {}
|
| 43 |
+
|
| 44 |
+
bool bool_() override {
|
| 45 |
+
return false;
|
| 46 |
+
}
|
| 47 |
+
|
| 48 |
+
bool is_int() override {
|
| 49 |
+
return true;
|
| 50 |
+
}
|
| 51 |
+
|
| 52 |
+
bool is_float() override {
|
| 53 |
+
return false;
|
| 54 |
+
}
|
| 55 |
+
|
| 56 |
+
bool is_bool() override {
|
| 57 |
+
return false;
|
| 58 |
+
}
|
| 59 |
+
|
| 60 |
+
bool is_nested_int() const override {
|
| 61 |
+
return true;
|
| 62 |
+
}
|
| 63 |
+
|
| 64 |
+
bool has_hint() override {
|
| 65 |
+
return true;
|
| 66 |
+
}
|
| 67 |
+
|
| 68 |
+
c10::SymNode wrap_int(int64_t num) override {
|
| 69 |
+
return SymNode(c10::make_intrusive<ConstantSymNodeImpl<int64_t>>(num));
|
| 70 |
+
};
|
| 71 |
+
|
| 72 |
+
int64_t guard_int(const char* file, int64_t line) override {
|
| 73 |
+
TORCH_CHECK(false);
|
| 74 |
+
}
|
| 75 |
+
|
| 76 |
+
double guard_float(const char* file, int64_t line) override {
|
| 77 |
+
TORCH_CHECK(false, "not a float");
|
| 78 |
+
}
|
| 79 |
+
|
| 80 |
+
bool guard_bool(const char* file, int64_t line) override {
|
| 81 |
+
TORCH_CHECK(false, "not a bool");
|
| 82 |
+
}
|
| 83 |
+
|
| 84 |
+
int64_t int_() override {
|
| 85 |
+
TORCH_CHECK(false);
|
| 86 |
+
}
|
| 87 |
+
|
| 88 |
+
std::string str() override {
|
| 89 |
+
if (coeff_ == 1) {
|
| 90 |
+
return "j" + std::to_string(val_);
|
| 91 |
+
}
|
| 92 |
+
return std::to_string(coeff_) + "*j" + std::to_string(val_);
|
| 93 |
+
}
|
| 94 |
+
|
| 95 |
+
// NOTE [ Inequalities with nested int ]
|
| 96 |
+
//
|
| 97 |
+
// The semantics of nested int when it comes to relations is that it is
|
| 98 |
+
// treated as integer known to be within a certain range,
|
| 99 |
+
//
|
| 100 |
+
// j0 \in [2, int64_t::max]
|
| 101 |
+
//
|
| 102 |
+
// allowing us to answer queries like j0 >= 1 (True), and j0 == 0 (False).
|
| 103 |
+
// This is a useful default range for the raggedness pattern of a jagged
|
| 104 |
+
// tensor (1) since sizes are non-negative, and (2) we need to get past 0/1
|
| 105 |
+
// specialization checks.
|
| 106 |
+
//
|
| 107 |
+
// [ Indeterminate inequalities error out ]
|
| 108 |
+
//
|
| 109 |
+
// Given the semantic defined above, certain relations like j0 < 3 are thus
|
| 110 |
+
// indeterminable. In our impl today, evaluating such relations error
|
| 111 |
+
//
|
| 112 |
+
// It may seem convenient to just define indeterminate relations to return
|
| 113 |
+
// False, but the implementation we maintain in parallel using sympy does not
|
| 114 |
+
// allow this.
|
| 115 |
+
//
|
| 116 |
+
// Sympy only allows overriding of Ge. The other relations (Lt, Gt, Le) are,
|
| 117 |
+
// by consequence, all derived from Ge e.g., Lt(a, b) := !Ge(a, b). This
|
| 118 |
+
// would mean that means that if we define the indeterminate j0 >= 3 to be
|
| 119 |
+
// False, the also indeterminate j0 < 3 will be evaluated to be True!
|
| 120 |
+
//
|
| 121 |
+
// [ Coefficient are assumed positive ]
|
| 122 |
+
//
|
| 123 |
+
// For the purpose of computing inequalities, we consider the coefficient of
|
| 124 |
+
// the nested int to be a positive integer.
|
| 125 |
+
//
|
| 126 |
+
// Thus, no modifications are needed to the logic since
|
| 127 |
+
// j0 >= k implies coeff * j0 >= k
|
| 128 |
+
//
|
| 129 |
+
c10::SymNode eq(const c10::SymNode& other) override;
|
| 130 |
+
c10::SymNode ne(const c10::SymNode& other) override;
|
| 131 |
+
c10::SymNode ge(const c10::SymNode& other) override;
|
| 132 |
+
c10::SymNode gt(const c10::SymNode& other) override;
|
| 133 |
+
c10::SymNode lt(const c10::SymNode& other) override;
|
| 134 |
+
c10::SymNode le(const c10::SymNode& other) override;
|
| 135 |
+
c10::SymNode mul(const c10::SymNode& other) override;
|
| 136 |
+
|
| 137 |
+
std::optional<int64_t> nested_int() override {
|
| 138 |
+
return val_;
|
| 139 |
+
}
|
| 140 |
+
|
| 141 |
+
std::optional<int64_t> nested_int_coeff() override {
|
| 142 |
+
return coeff_;
|
| 143 |
+
}
|
| 144 |
+
|
| 145 |
+
bool is_symbolic() override {
|
| 146 |
+
return false;
|
| 147 |
+
}
|
| 148 |
+
|
| 149 |
+
c10::SymNode clone() override;
|
| 150 |
+
|
| 151 |
+
#define DEFINE_BINARY_NOT_SUPPORTED(name) \
|
| 152 |
+
c10::SymNode name(const c10::SymNode& other) override { \
|
| 153 |
+
TORCH_CHECK(false, #name " not supported by NestedIntSymNode"); \
|
| 154 |
+
}
|
| 155 |
+
|
| 156 |
+
DEFINE_BINARY_NOT_SUPPORTED(add)
|
| 157 |
+
DEFINE_BINARY_NOT_SUPPORTED(sub)
|
| 158 |
+
DEFINE_BINARY_NOT_SUPPORTED(truediv)
|
| 159 |
+
DEFINE_BINARY_NOT_SUPPORTED(pow)
|
| 160 |
+
DEFINE_BINARY_NOT_SUPPORTED(floordiv)
|
| 161 |
+
DEFINE_BINARY_NOT_SUPPORTED(mod)
|
| 162 |
+
DEFINE_BINARY_NOT_SUPPORTED(sym_min)
|
| 163 |
+
DEFINE_BINARY_NOT_SUPPORTED(sym_max)
|
| 164 |
+
DEFINE_BINARY_NOT_SUPPORTED(sym_and)
|
| 165 |
+
DEFINE_BINARY_NOT_SUPPORTED(sym_or)
|
| 166 |
+
|
| 167 |
+
#undef DEFINE_BINARY_NOT_SUPPORTED
|
| 168 |
+
|
| 169 |
+
#define DEFINE_NOT_SUPPORTED(name) \
|
| 170 |
+
c10::SymNode name() override { \
|
| 171 |
+
TORCH_CHECK(false, #name " is not supported by NestedIntSymNode"); \
|
| 172 |
+
}
|
| 173 |
+
|
| 174 |
+
DEFINE_NOT_SUPPORTED(sym_not)
|
| 175 |
+
DEFINE_NOT_SUPPORTED(ceil)
|
| 176 |
+
DEFINE_NOT_SUPPORTED(floor)
|
| 177 |
+
DEFINE_NOT_SUPPORTED(neg)
|
| 178 |
+
DEFINE_NOT_SUPPORTED(sym_float)
|
| 179 |
+
|
| 180 |
+
#undef DEFINE_NOT_SUPPORTED
|
| 181 |
+
|
| 182 |
+
private:
|
| 183 |
+
int64_t val_;
|
| 184 |
+
int64_t coeff_;
|
| 185 |
+
};
|
| 186 |
+
|
| 187 |
+
} // namespace c10
|
videochat2/lib/python3.10/site-packages/torch/include/ATen/core/PythonFallbackKernel.h
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
#include <ATen/core/TorchDispatchUtils.h>
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
namespace at::impl {
|
| 6 |
+
|
| 7 |
+
struct TORCH_API RestorePythonTLSSnapshot {
|
| 8 |
+
RestorePythonTLSSnapshot();
|
| 9 |
+
~RestorePythonTLSSnapshot();
|
| 10 |
+
|
| 11 |
+
private:
|
| 12 |
+
c10::impl::LocalDispatchKeySet saved_;
|
| 13 |
+
c10::impl::ForceDispatchKeyGuard guard_;
|
| 14 |
+
};
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
// RAII guard to make working with the above TLS safer.
|
| 18 |
+
struct TORCH_API MaybeSetTLSOnEntryGuard {
|
| 19 |
+
public:
|
| 20 |
+
MaybeSetTLSOnEntryGuard();
|
| 21 |
+
~MaybeSetTLSOnEntryGuard();
|
| 22 |
+
|
| 23 |
+
private:
|
| 24 |
+
bool value_set_;
|
| 25 |
+
};
|
| 26 |
+
|
| 27 |
+
} // namespace at::impl
|
videochat2/lib/python3.10/site-packages/torch/include/ATen/core/PythonOpRegistrationTrampoline.h
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <ATen/core/dispatch/Dispatcher.h>
|
| 4 |
+
|
| 5 |
+
// TODO: this can probably live in c10
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
namespace at::impl {
|
| 9 |
+
|
| 10 |
+
class TORCH_API PythonOpRegistrationTrampoline final {
|
| 11 |
+
static std::atomic<c10::impl::PyInterpreter*> interpreter_;
|
| 12 |
+
|
| 13 |
+
public:
|
| 14 |
+
// Returns true if you successfully registered yourself (that means
|
| 15 |
+
// you are in the hot seat for doing the operator registrations!)
|
| 16 |
+
static bool registerInterpreter(c10::impl::PyInterpreter*);
|
| 17 |
+
|
| 18 |
+
// Returns nullptr if no interpreter has been registered yet.
|
| 19 |
+
static c10::impl::PyInterpreter* getInterpreter();
|
| 20 |
+
};
|
| 21 |
+
|
| 22 |
+
} // namespace at::impl
|
videochat2/lib/python3.10/site-packages/torch/include/ATen/core/QuantizerBase.h
ADDED
|
@@ -0,0 +1,84 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <c10/core/ScalarType.h>
|
| 4 |
+
#include <c10/core/QScheme.h>
|
| 5 |
+
#include <c10/util/intrusive_ptr.h>
|
| 6 |
+
|
| 7 |
+
namespace at {
|
| 8 |
+
|
| 9 |
+
class Tensor;
|
| 10 |
+
struct QTensorImpl;
|
| 11 |
+
struct Quantizer;
|
| 12 |
+
using ConstQuantizerPtr = const c10::intrusive_ptr<Quantizer>&;
|
| 13 |
+
using QuantizerPtr = c10::intrusive_ptr<Quantizer>;
|
| 14 |
+
|
| 15 |
+
/**
|
| 16 |
+
* Quantizer is the class for storing all the information
|
| 17 |
+
* that's necessary to perform quantize and dequantize
|
| 18 |
+
* operation.
|
| 19 |
+
*
|
| 20 |
+
* We might have different types of quantization schemes and this is
|
| 21 |
+
* the base class for all quantizers.
|
| 22 |
+
*
|
| 23 |
+
* QTensorImpl will hold a pointer to Quantizer so that we can support
|
| 24 |
+
* different quantization schemes on Tensor.
|
| 25 |
+
*
|
| 26 |
+
* For example, the most common quantization scheme, Affine Quantization,
|
| 27 |
+
* requires scale and zero_point as parameters, we'll store scale and zero_point
|
| 28 |
+
* inside the instance and we can use it to quantize a float Tensor or
|
| 29 |
+
* dequantize a quantized Tensor.
|
| 30 |
+
*
|
| 31 |
+
* When you add new types of leaf Quantizer class, please also
|
| 32 |
+
* make sure to add a corresponding QScheme enum since
|
| 33 |
+
* they should have one to one mapping.
|
| 34 |
+
*
|
| 35 |
+
* Note about intrusive_ptr:
|
| 36 |
+
* Quantized Tensor holds an intrusive_ptr to Quantizer, and multiple Tensor can
|
| 37 |
+
* share the same Quantizer. Quantizer should be immutable.
|
| 38 |
+
*/
|
| 39 |
+
struct TORCH_API Quantizer : public c10::intrusive_ptr_target {
|
| 40 |
+
// NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members)
|
| 41 |
+
const ScalarType scalar_type_;
|
| 42 |
+
explicit Quantizer(ScalarType scalar_type) : scalar_type_(scalar_type) {}
|
| 43 |
+
~Quantizer() override;
|
| 44 |
+
|
| 45 |
+
// Copied from torch/csrc/jit/ir/scope.h
|
| 46 |
+
QuantizerPtr intrusive_from_this() {
|
| 47 |
+
c10::raw::intrusive_ptr::incref(this); // we are creating a new pointer
|
| 48 |
+
// from a raw `this` pointer
|
| 49 |
+
// so we need to bump the refcount
|
| 50 |
+
// to account for this ownership
|
| 51 |
+
return c10::intrusive_ptr<Quantizer>::reclaim(this);
|
| 52 |
+
}
|
| 53 |
+
|
| 54 |
+
/**
|
| 55 |
+
* Each concrete Quantizer type should have a unique QScheme type.
|
| 56 |
+
*/
|
| 57 |
+
virtual QScheme qscheme() const = 0;
|
| 58 |
+
|
| 59 |
+
ScalarType scalar_type() const {
|
| 60 |
+
return scalar_type_;
|
| 61 |
+
}
|
| 62 |
+
|
| 63 |
+
/**
|
| 64 |
+
* quantize a float Tensor into a quantized Tensor.
|
| 65 |
+
*/
|
| 66 |
+
virtual Tensor quantize(const Tensor& t) = 0;
|
| 67 |
+
|
| 68 |
+
/**
|
| 69 |
+
* dequantize a quantized Tensor into a float Tensor.
|
| 70 |
+
*/
|
| 71 |
+
virtual Tensor dequantize(const Tensor& t) = 0;
|
| 72 |
+
|
| 73 |
+
/**
|
| 74 |
+
* dequantize a quantized Tensor into a float Tensor, out= variant
|
| 75 |
+
*/
|
| 76 |
+
virtual Tensor& dequantize_out(Tensor& out, const Tensor& t) = 0;
|
| 77 |
+
|
| 78 |
+
/**
|
| 79 |
+
* Compare against `other` for equality.
|
| 80 |
+
*/
|
| 81 |
+
virtual bool equalTo(QuantizerPtr other) const = 0;
|
| 82 |
+
};
|
| 83 |
+
|
| 84 |
+
} // namespace at
|
videochat2/lib/python3.10/site-packages/torch/include/ATen/core/Range.h
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <cstdint>
|
| 4 |
+
#include <iosfwd>
|
| 5 |
+
|
| 6 |
+
namespace at {
|
| 7 |
+
|
| 8 |
+
struct Range {
|
| 9 |
+
Range(int64_t begin, int64_t end)
|
| 10 |
+
: begin(begin)
|
| 11 |
+
, end(end) {}
|
| 12 |
+
|
| 13 |
+
int64_t size() const { return end - begin; }
|
| 14 |
+
|
| 15 |
+
Range operator/(int64_t divisor) {
|
| 16 |
+
return Range(begin / divisor, end / divisor);
|
| 17 |
+
}
|
| 18 |
+
|
| 19 |
+
int64_t begin;
|
| 20 |
+
int64_t end;
|
| 21 |
+
};
|
| 22 |
+
|
| 23 |
+
std::ostream& operator<<(std::ostream& out, const Range& range);
|
| 24 |
+
|
| 25 |
+
} // namespace at
|
videochat2/lib/python3.10/site-packages/torch/include/ATen/core/Reduction.h
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
namespace at::Reduction {
|
| 4 |
+
|
| 5 |
+
// NB: Keep this in sync with Reduction class in torch/nn/_reduction.py
|
| 6 |
+
// These constants control the reduction behavior of loss functions.
|
| 7 |
+
// Ideally, this would be a scoped enum, but jit doesn't support that
|
| 8 |
+
enum Reduction {
|
| 9 |
+
None, // Do not reduce
|
| 10 |
+
Mean, // (Possibly weighted) mean of losses
|
| 11 |
+
Sum, // Sum losses
|
| 12 |
+
END
|
| 13 |
+
};
|
| 14 |
+
} // namespace at::Reduction
|
videochat2/lib/python3.10/site-packages/torch/include/ATen/core/Scalar.h
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
#include <c10/core/Scalar.h>
|
videochat2/lib/python3.10/site-packages/torch/include/ATen/core/ScalarType.h
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
#include <c10/core/ScalarType.h>
|
videochat2/lib/python3.10/site-packages/torch/include/ATen/core/Tensor.h
ADDED
|
@@ -0,0 +1,94 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <ATen/core/TensorBody.h>
|
| 4 |
+
#include <c10/util/Exception.h>
|
| 5 |
+
|
| 6 |
+
namespace at {
|
| 7 |
+
class TORCH_API OptionalTensorRef {
|
| 8 |
+
public:
|
| 9 |
+
OptionalTensorRef() = default;
|
| 10 |
+
|
| 11 |
+
~OptionalTensorRef() {
|
| 12 |
+
ref_.unsafeReleaseTensorImpl();
|
| 13 |
+
}
|
| 14 |
+
|
| 15 |
+
OptionalTensorRef(const TensorBase& src)
|
| 16 |
+
: ref_(Tensor::unsafe_borrow_t{}, src) {
|
| 17 |
+
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(src.defined());
|
| 18 |
+
}
|
| 19 |
+
|
| 20 |
+
OptionalTensorRef(const OptionalTensorRef& rhs)
|
| 21 |
+
: ref_(Tensor::unsafe_borrow_t{}, rhs.ref_) {}
|
| 22 |
+
|
| 23 |
+
OptionalTensorRef& operator=(OptionalTensorRef rhs) {
|
| 24 |
+
std::swap(ref_, rhs.ref_);
|
| 25 |
+
return *this;
|
| 26 |
+
}
|
| 27 |
+
|
| 28 |
+
bool has_value() const {
|
| 29 |
+
return ref_.defined();
|
| 30 |
+
}
|
| 31 |
+
|
| 32 |
+
const Tensor& getTensorRef() const & {
|
| 33 |
+
return ref_;
|
| 34 |
+
}
|
| 35 |
+
|
| 36 |
+
const Tensor& operator*() const & {
|
| 37 |
+
return ref_;
|
| 38 |
+
}
|
| 39 |
+
|
| 40 |
+
const Tensor* operator->() const & {
|
| 41 |
+
return &ref_;
|
| 42 |
+
}
|
| 43 |
+
|
| 44 |
+
operator bool() const {
|
| 45 |
+
return ref_.defined();
|
| 46 |
+
}
|
| 47 |
+
|
| 48 |
+
private:
|
| 49 |
+
Tensor ref_;
|
| 50 |
+
};
|
| 51 |
+
|
| 52 |
+
// Use to convert a TensorBase (that may be undefined) to an at::Tensor
|
| 53 |
+
// without bumping refcount.
|
| 54 |
+
class TORCH_API TensorRef {
|
| 55 |
+
public:
|
| 56 |
+
~TensorRef() {
|
| 57 |
+
ref_.unsafeReleaseTensorImpl();
|
| 58 |
+
}
|
| 59 |
+
|
| 60 |
+
TensorRef(const TensorBase& src)
|
| 61 |
+
: ref_(Tensor::unsafe_borrow_t{}, src) {}
|
| 62 |
+
|
| 63 |
+
const Tensor& operator*() const & {
|
| 64 |
+
return ref_;
|
| 65 |
+
}
|
| 66 |
+
private:
|
| 67 |
+
Tensor ref_;
|
| 68 |
+
};
|
| 69 |
+
|
| 70 |
+
template <typename T>
|
| 71 |
+
// NOLINTNEXTLINE(cppcoreguidelines-missing-std-forward)
|
| 72 |
+
auto Tensor::register_hook(T&& hook) const -> Tensor::hook_return_void_t<T> {
|
| 73 |
+
// Return the grad argument in case of a hook with void return type to have an
|
| 74 |
+
// std::function with Tensor return type
|
| 75 |
+
static_assert(std::is_same<decltype(hook(Tensor())), void>::value,
|
| 76 |
+
"Expected hook to return void");
|
| 77 |
+
return _register_hook([fn=std::forward<T>(hook)](const TensorBase& grad_base) {
|
| 78 |
+
TensorRef grad(grad_base);
|
| 79 |
+
fn(*grad);
|
| 80 |
+
return Tensor();
|
| 81 |
+
});
|
| 82 |
+
}
|
| 83 |
+
|
| 84 |
+
template <typename T>
|
| 85 |
+
// NOLINTNEXTLINE(cppcoreguidelines-missing-std-forward)
|
| 86 |
+
auto Tensor::register_hook(T&& hook) const -> Tensor::hook_return_var_t<T> {
|
| 87 |
+
return _register_hook([fn=std::forward<T>(hook)](const TensorBase& grad_base) {
|
| 88 |
+
TensorRef grad(grad_base);
|
| 89 |
+
Tensor ret = fn(*grad);
|
| 90 |
+
return TensorBase(std::move(ret));
|
| 91 |
+
});
|
| 92 |
+
}
|
| 93 |
+
|
| 94 |
+
} // namespace at
|
videochat2/lib/python3.10/site-packages/torch/include/ATen/core/TensorAccessor.h
ADDED
|
@@ -0,0 +1,277 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <c10/macros/Macros.h>
|
| 4 |
+
#include <c10/util/ArrayRef.h>
|
| 5 |
+
#include <c10/util/Deprecated.h>
|
| 6 |
+
#include <c10/util/Exception.h>
|
| 7 |
+
#include <c10/util/irange.h>
|
| 8 |
+
#include <cstddef>
|
| 9 |
+
#include <cstdint>
|
| 10 |
+
#include <type_traits>
|
| 11 |
+
|
| 12 |
+
namespace at {
|
| 13 |
+
|
| 14 |
+
// The PtrTraits argument to the TensorAccessor/GenericPackedTensorAccessor
|
| 15 |
+
// is used to enable the __restrict__ keyword/modifier for the data
|
| 16 |
+
// passed to cuda.
|
| 17 |
+
template <typename T>
|
| 18 |
+
struct DefaultPtrTraits {
|
| 19 |
+
typedef T* PtrType;
|
| 20 |
+
};
|
| 21 |
+
|
| 22 |
+
#if defined(__CUDACC__) || defined(__HIPCC__)
|
| 23 |
+
template <typename T>
|
| 24 |
+
struct RestrictPtrTraits {
|
| 25 |
+
typedef T* __restrict__ PtrType;
|
| 26 |
+
};
|
| 27 |
+
#endif
|
| 28 |
+
|
| 29 |
+
// TensorAccessorBase and TensorAccessor are used for both CPU and CUDA tensors.
|
| 30 |
+
// For CUDA tensors it is used in device code (only). This means that we restrict ourselves
|
| 31 |
+
// to functions and types available there (e.g. IntArrayRef isn't).
|
| 32 |
+
|
| 33 |
+
// The PtrTraits argument is only relevant to cuda to support `__restrict__` pointers.
|
| 34 |
+
template<typename T, size_t N, template <typename U> class PtrTraits = DefaultPtrTraits, typename index_t = int64_t>
|
| 35 |
+
class TensorAccessorBase {
|
| 36 |
+
public:
|
| 37 |
+
typedef typename PtrTraits<T>::PtrType PtrType;
|
| 38 |
+
|
| 39 |
+
C10_HOST_DEVICE TensorAccessorBase(
|
| 40 |
+
PtrType data_,
|
| 41 |
+
const index_t* sizes_,
|
| 42 |
+
const index_t* strides_)
|
| 43 |
+
: data_(data_), sizes_(sizes_), strides_(strides_) {}
|
| 44 |
+
C10_HOST IntArrayRef sizes() const {
|
| 45 |
+
return IntArrayRef(sizes_,N);
|
| 46 |
+
}
|
| 47 |
+
C10_HOST IntArrayRef strides() const {
|
| 48 |
+
return IntArrayRef(strides_,N);
|
| 49 |
+
}
|
| 50 |
+
C10_HOST_DEVICE index_t stride(index_t i) const {
|
| 51 |
+
return strides_[i];
|
| 52 |
+
}
|
| 53 |
+
C10_HOST_DEVICE index_t size(index_t i) const {
|
| 54 |
+
return sizes_[i];
|
| 55 |
+
}
|
| 56 |
+
C10_HOST_DEVICE PtrType data() {
|
| 57 |
+
return data_;
|
| 58 |
+
}
|
| 59 |
+
C10_HOST_DEVICE const PtrType data() const {
|
| 60 |
+
return data_;
|
| 61 |
+
}
|
| 62 |
+
protected:
|
| 63 |
+
PtrType data_;
|
| 64 |
+
const index_t* sizes_;
|
| 65 |
+
const index_t* strides_;
|
| 66 |
+
};
|
| 67 |
+
|
| 68 |
+
// The `TensorAccessor` is typically instantiated for CPU `Tensor`s using
|
| 69 |
+
// `Tensor.accessor<T, N>()`.
|
| 70 |
+
// For CUDA `Tensor`s, `GenericPackedTensorAccessor` is used on the host and only
|
| 71 |
+
// indexing on the device uses `TensorAccessor`s.
|
| 72 |
+
template<typename T, size_t N, template <typename U> class PtrTraits = DefaultPtrTraits, typename index_t = int64_t>
|
| 73 |
+
class TensorAccessor : public TensorAccessorBase<T,N,PtrTraits,index_t> {
|
| 74 |
+
public:
|
| 75 |
+
typedef typename PtrTraits<T>::PtrType PtrType;
|
| 76 |
+
|
| 77 |
+
C10_HOST_DEVICE TensorAccessor(
|
| 78 |
+
PtrType data_,
|
| 79 |
+
const index_t* sizes_,
|
| 80 |
+
const index_t* strides_)
|
| 81 |
+
: TensorAccessorBase<T, N, PtrTraits, index_t>(data_,sizes_,strides_) {}
|
| 82 |
+
|
| 83 |
+
C10_HOST_DEVICE TensorAccessor<T, N - 1, PtrTraits, index_t> operator[](index_t i) {
|
| 84 |
+
return TensorAccessor<T,N-1,PtrTraits,index_t>(this->data_ + this->strides_[0]*i,this->sizes_+1,this->strides_+1);
|
| 85 |
+
}
|
| 86 |
+
|
| 87 |
+
C10_HOST_DEVICE const TensorAccessor<T, N-1, PtrTraits, index_t> operator[](index_t i) const {
|
| 88 |
+
return TensorAccessor<T,N-1,PtrTraits,index_t>(this->data_ + this->strides_[0]*i,this->sizes_+1,this->strides_+1);
|
| 89 |
+
}
|
| 90 |
+
};
|
| 91 |
+
|
| 92 |
+
template<typename T, template <typename U> class PtrTraits, typename index_t>
|
| 93 |
+
class TensorAccessor<T,1,PtrTraits,index_t> : public TensorAccessorBase<T,1,PtrTraits,index_t> {
|
| 94 |
+
public:
|
| 95 |
+
typedef typename PtrTraits<T>::PtrType PtrType;
|
| 96 |
+
|
| 97 |
+
C10_HOST_DEVICE TensorAccessor(
|
| 98 |
+
PtrType data_,
|
| 99 |
+
const index_t* sizes_,
|
| 100 |
+
const index_t* strides_)
|
| 101 |
+
: TensorAccessorBase<T, 1, PtrTraits, index_t>(data_,sizes_,strides_) {}
|
| 102 |
+
C10_HOST_DEVICE T & operator[](index_t i) {
|
| 103 |
+
// NOLINTNEXTLINE(clang-analyzer-core.NullDereference)
|
| 104 |
+
return this->data_[this->strides_[0]*i];
|
| 105 |
+
}
|
| 106 |
+
C10_HOST_DEVICE const T & operator[](index_t i) const {
|
| 107 |
+
return this->data_[this->strides_[0]*i];
|
| 108 |
+
}
|
| 109 |
+
};
|
| 110 |
+
|
| 111 |
+
|
| 112 |
+
// GenericPackedTensorAccessorBase and GenericPackedTensorAccessor are used on for CUDA `Tensor`s on the host
|
| 113 |
+
// and as
|
| 114 |
+
// In contrast to `TensorAccessor`s, they copy the strides and sizes on instantiation (on the host)
|
| 115 |
+
// in order to transfer them on the device when calling kernels.
|
| 116 |
+
// On the device, indexing of multidimensional tensors gives to `TensorAccessor`s.
|
| 117 |
+
// Use RestrictPtrTraits as PtrTraits if you want the tensor's data pointer to be marked as __restrict__.
|
| 118 |
+
// Instantiation from data, sizes, strides is only needed on the host and std::copy isn't available
|
| 119 |
+
// on the device, so those functions are host only.
|
| 120 |
+
template<typename T, size_t N, template <typename U> class PtrTraits = DefaultPtrTraits, typename index_t = int64_t>
|
| 121 |
+
class GenericPackedTensorAccessorBase {
|
| 122 |
+
public:
|
| 123 |
+
typedef typename PtrTraits<T>::PtrType PtrType;
|
| 124 |
+
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
|
| 125 |
+
C10_HOST GenericPackedTensorAccessorBase(
|
| 126 |
+
PtrType data_,
|
| 127 |
+
const index_t* sizes_,
|
| 128 |
+
const index_t* strides_)
|
| 129 |
+
: data_(data_) {
|
| 130 |
+
std::copy(sizes_, sizes_ + N, std::begin(this->sizes_));
|
| 131 |
+
std::copy(strides_, strides_ + N, std::begin(this->strides_));
|
| 132 |
+
}
|
| 133 |
+
|
| 134 |
+
// if index_t is not int64_t, we want to have an int64_t constructor
|
| 135 |
+
template <typename source_index_t, class = std::enable_if_t<std::is_same_v<source_index_t, int64_t>>>
|
| 136 |
+
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
|
| 137 |
+
C10_HOST GenericPackedTensorAccessorBase(
|
| 138 |
+
PtrType data_,
|
| 139 |
+
const source_index_t* sizes_,
|
| 140 |
+
const source_index_t* strides_)
|
| 141 |
+
: data_(data_) {
|
| 142 |
+
for (const auto i : c10::irange(N)) {
|
| 143 |
+
this->sizes_[i] = sizes_[i];
|
| 144 |
+
this->strides_[i] = strides_[i];
|
| 145 |
+
}
|
| 146 |
+
}
|
| 147 |
+
|
| 148 |
+
C10_HOST_DEVICE index_t stride(index_t i) const {
|
| 149 |
+
return strides_[i];
|
| 150 |
+
}
|
| 151 |
+
C10_HOST_DEVICE index_t size(index_t i) const {
|
| 152 |
+
return sizes_[i];
|
| 153 |
+
}
|
| 154 |
+
C10_HOST_DEVICE PtrType data() {
|
| 155 |
+
return data_;
|
| 156 |
+
}
|
| 157 |
+
C10_HOST_DEVICE const PtrType data() const {
|
| 158 |
+
return data_;
|
| 159 |
+
}
|
| 160 |
+
protected:
|
| 161 |
+
PtrType data_;
|
| 162 |
+
// NOLINTNEXTLINE(*c-arrays*)
|
| 163 |
+
index_t sizes_[N];
|
| 164 |
+
// NOLINTNEXTLINE(*c-arrays*)
|
| 165 |
+
index_t strides_[N];
|
| 166 |
+
C10_HOST void bounds_check_(index_t i) const {
|
| 167 |
+
TORCH_CHECK_INDEX(
|
| 168 |
+
0 <= i && i < index_t{N},
|
| 169 |
+
"Index ",
|
| 170 |
+
i,
|
| 171 |
+
" is not within bounds of a tensor of dimension ",
|
| 172 |
+
N);
|
| 173 |
+
}
|
| 174 |
+
};
|
| 175 |
+
|
| 176 |
+
template<typename T, size_t N, template <typename U> class PtrTraits = DefaultPtrTraits, typename index_t = int64_t>
|
| 177 |
+
class GenericPackedTensorAccessor : public GenericPackedTensorAccessorBase<T,N,PtrTraits,index_t> {
|
| 178 |
+
public:
|
| 179 |
+
typedef typename PtrTraits<T>::PtrType PtrType;
|
| 180 |
+
|
| 181 |
+
C10_HOST GenericPackedTensorAccessor(
|
| 182 |
+
PtrType data_,
|
| 183 |
+
const index_t* sizes_,
|
| 184 |
+
const index_t* strides_)
|
| 185 |
+
: GenericPackedTensorAccessorBase<T, N, PtrTraits, index_t>(data_, sizes_, strides_) {}
|
| 186 |
+
|
| 187 |
+
// if index_t is not int64_t, we want to have an int64_t constructor
|
| 188 |
+
template <typename source_index_t, class = std::enable_if_t<std::is_same_v<source_index_t, int64_t>>>
|
| 189 |
+
C10_HOST GenericPackedTensorAccessor(
|
| 190 |
+
PtrType data_,
|
| 191 |
+
const source_index_t* sizes_,
|
| 192 |
+
const source_index_t* strides_)
|
| 193 |
+
: GenericPackedTensorAccessorBase<T, N, PtrTraits, index_t>(data_, sizes_, strides_) {}
|
| 194 |
+
|
| 195 |
+
C10_DEVICE TensorAccessor<T, N - 1, PtrTraits, index_t> operator[](index_t i) {
|
| 196 |
+
index_t* new_sizes = this->sizes_ + 1;
|
| 197 |
+
index_t* new_strides = this->strides_ + 1;
|
| 198 |
+
return TensorAccessor<T,N-1,PtrTraits,index_t>(this->data_ + this->strides_[0]*i, new_sizes, new_strides);
|
| 199 |
+
}
|
| 200 |
+
|
| 201 |
+
C10_DEVICE const TensorAccessor<T, N - 1, PtrTraits, index_t> operator[](index_t i) const {
|
| 202 |
+
const index_t* new_sizes = this->sizes_ + 1;
|
| 203 |
+
const index_t* new_strides = this->strides_ + 1;
|
| 204 |
+
return TensorAccessor<T,N-1,PtrTraits,index_t>(this->data_ + this->strides_[0]*i, new_sizes, new_strides);
|
| 205 |
+
}
|
| 206 |
+
|
| 207 |
+
/// Returns a PackedTensorAccessor of the same dimension after transposing the
|
| 208 |
+
/// two dimensions given. Does not actually move elements; transposition is
|
| 209 |
+
/// made by permuting the size/stride arrays. If the dimensions are not valid,
|
| 210 |
+
/// asserts.
|
| 211 |
+
C10_HOST GenericPackedTensorAccessor<T, N, PtrTraits, index_t> transpose(
|
| 212 |
+
index_t dim1,
|
| 213 |
+
index_t dim2) const {
|
| 214 |
+
this->bounds_check_(dim1);
|
| 215 |
+
this->bounds_check_(dim2);
|
| 216 |
+
GenericPackedTensorAccessor<T, N, PtrTraits, index_t> result(
|
| 217 |
+
this->data_, this->sizes_, this->strides_);
|
| 218 |
+
std::swap(result.strides_[dim1], result.strides_[dim2]);
|
| 219 |
+
std::swap(result.sizes_[dim1], result.sizes_[dim2]);
|
| 220 |
+
return result;
|
| 221 |
+
}
|
| 222 |
+
};
|
| 223 |
+
|
| 224 |
+
template<typename T, template <typename U> class PtrTraits, typename index_t>
|
| 225 |
+
class GenericPackedTensorAccessor<T,1,PtrTraits,index_t> : public GenericPackedTensorAccessorBase<T,1,PtrTraits,index_t> {
|
| 226 |
+
public:
|
| 227 |
+
typedef typename PtrTraits<T>::PtrType PtrType;
|
| 228 |
+
C10_HOST GenericPackedTensorAccessor(
|
| 229 |
+
PtrType data_,
|
| 230 |
+
const index_t* sizes_,
|
| 231 |
+
const index_t* strides_)
|
| 232 |
+
: GenericPackedTensorAccessorBase<T, 1, PtrTraits, index_t>(data_, sizes_, strides_) {}
|
| 233 |
+
|
| 234 |
+
// if index_t is not int64_t, we want to have an int64_t constructor
|
| 235 |
+
template <typename source_index_t, class = std::enable_if_t<std::is_same_v<source_index_t, int64_t>>>
|
| 236 |
+
C10_HOST GenericPackedTensorAccessor(
|
| 237 |
+
PtrType data_,
|
| 238 |
+
const source_index_t* sizes_,
|
| 239 |
+
const source_index_t* strides_)
|
| 240 |
+
: GenericPackedTensorAccessorBase<T, 1, PtrTraits, index_t>(data_, sizes_, strides_) {}
|
| 241 |
+
|
| 242 |
+
C10_DEVICE T & operator[](index_t i) {
|
| 243 |
+
return this->data_[this->strides_[0] * i];
|
| 244 |
+
}
|
| 245 |
+
C10_DEVICE const T& operator[](index_t i) const {
|
| 246 |
+
return this->data_[this->strides_[0]*i];
|
| 247 |
+
}
|
| 248 |
+
|
| 249 |
+
// Same as in the general N-dimensional case, but note that in the
|
| 250 |
+
// 1-dimensional case the returned PackedTensorAccessor will always be an
|
| 251 |
+
// identical copy of the original
|
| 252 |
+
C10_HOST GenericPackedTensorAccessor<T, 1, PtrTraits, index_t> transpose(
|
| 253 |
+
index_t dim1,
|
| 254 |
+
index_t dim2) const {
|
| 255 |
+
this->bounds_check_(dim1);
|
| 256 |
+
this->bounds_check_(dim2);
|
| 257 |
+
return GenericPackedTensorAccessor<T, 1, PtrTraits, index_t>(
|
| 258 |
+
this->data_, this->sizes_, this->strides_);
|
| 259 |
+
}
|
| 260 |
+
};
|
| 261 |
+
|
| 262 |
+
|
| 263 |
+
// Can't put this directly into the macro function args because of commas
|
| 264 |
+
#define AT_X GenericPackedTensorAccessor<T, N, PtrTraits, index_t>
|
| 265 |
+
|
| 266 |
+
// Old name for `GenericPackedTensorAccessor`
|
| 267 |
+
template <typename T, size_t N, template <typename U> class PtrTraits = DefaultPtrTraits, typename index_t = int64_t>
|
| 268 |
+
C10_DEFINE_DEPRECATED_USING(PackedTensorAccessor, AT_X)
|
| 269 |
+
|
| 270 |
+
#undef AT_X
|
| 271 |
+
|
| 272 |
+
template <typename T, size_t N, template <typename U> class PtrTraits = DefaultPtrTraits>
|
| 273 |
+
using PackedTensorAccessor32 = GenericPackedTensorAccessor<T, N, PtrTraits, int32_t>;
|
| 274 |
+
|
| 275 |
+
template <typename T, size_t N, template <typename U> class PtrTraits = DefaultPtrTraits>
|
| 276 |
+
using PackedTensorAccessor64 = GenericPackedTensorAccessor<T, N, PtrTraits, int64_t>;
|
| 277 |
+
} // namespace at
|
videochat2/lib/python3.10/site-packages/torch/include/ATen/core/TensorBody.h
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
videochat2/lib/python3.10/site-packages/torch/include/ATen/core/TorchDispatchUtils.h
ADDED
|
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <ATen/core/dispatch/Dispatcher.h>
|
| 4 |
+
#include <c10/core/impl/TorchDispatchModeTLS.h>
|
| 5 |
+
#include <c10/util/ArrayRef.h>
|
| 6 |
+
#include <torch/library.h>
|
| 7 |
+
#include <optional>
|
| 8 |
+
|
| 9 |
+
namespace at::impl {
|
| 10 |
+
|
| 11 |
+
TORCH_API bool tensor_has_dispatch(const at::Tensor& t);
|
| 12 |
+
TORCH_API bool tensorlist_has_dispatch(at::ITensorListRef li);
|
| 13 |
+
TORCH_API bool tensorlist_has_dispatch(
|
| 14 |
+
const c10::List<std::optional<at::Tensor>>& li);
|
| 15 |
+
using c10::impl::dispatch_mode_enabled;
|
| 16 |
+
|
| 17 |
+
} // namespace at::impl
|
videochat2/lib/python3.10/site-packages/torch/include/ATen/core/TransformationHelper.h
ADDED
|
@@ -0,0 +1,175 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#include <ATen/NumericUtils.h>
|
| 2 |
+
#include <c10/macros/Macros.h>
|
| 3 |
+
#include <c10/util/Half.h>
|
| 4 |
+
#include <c10/util/BFloat16.h>
|
| 5 |
+
#include <c10/util/MathConstants.h>
|
| 6 |
+
#include <cmath>
|
| 7 |
+
#include <cstdint>
|
| 8 |
+
#include <cassert>
|
| 9 |
+
#include <limits>
|
| 10 |
+
#include <type_traits>
|
| 11 |
+
|
| 12 |
+
namespace at {
|
| 13 |
+
|
| 14 |
+
// Using DistAccumType in accumulate types for distributions.
|
| 15 |
+
// Note: Ideally we'd be using ATen/AccumulateType.h but looks
|
| 16 |
+
// like the there is some inconsistency in how accumulate types
|
| 17 |
+
// are mapped currently, e.g. for the cpu side, float is mapped
|
| 18 |
+
// to double.
|
| 19 |
+
template <typename T>
|
| 20 |
+
struct DistAccumType { };
|
| 21 |
+
|
| 22 |
+
#if defined(__CUDACC__) || defined(__HIPCC__)
|
| 23 |
+
template <> struct DistAccumType<half> { using type = float; };
|
| 24 |
+
#endif
|
| 25 |
+
template <> struct DistAccumType<BFloat16> { using type = float; };
|
| 26 |
+
template <> struct DistAccumType<Half> { using type = float; };
|
| 27 |
+
template <> struct DistAccumType<float> { using type = float; };
|
| 28 |
+
template <> struct DistAccumType<double> { using type = double; };
|
| 29 |
+
|
| 30 |
+
template <typename T>
|
| 31 |
+
using dist_acctype = typename DistAccumType<T>::type;
|
| 32 |
+
|
| 33 |
+
namespace transformation {
|
| 34 |
+
|
| 35 |
+
/**
|
| 36 |
+
* A transformation function for `torch.Tensor.random_()`, when both `from` and `to` are specified.
|
| 37 |
+
* `range` is `to - from`
|
| 38 |
+
* `base` is `from`
|
| 39 |
+
*/
|
| 40 |
+
template <typename T, typename V>
|
| 41 |
+
C10_HOST_DEVICE inline T uniform_int_from_to(V val, uint64_t range, int64_t base) {
|
| 42 |
+
return static_cast<T>(static_cast<int64_t>((val % range) + base));
|
| 43 |
+
}
|
| 44 |
+
|
| 45 |
+
/**
|
| 46 |
+
* A transformation function for `torch.Tensor.random_()`, when `from=min_value(int64_t)` and to=None
|
| 47 |
+
*/
|
| 48 |
+
template <typename T, typename V>
|
| 49 |
+
C10_HOST_DEVICE inline T uniform_int_full_range(V val) {
|
| 50 |
+
return static_cast<T>(static_cast<int64_t>(val));
|
| 51 |
+
}
|
| 52 |
+
|
| 53 |
+
/**
|
| 54 |
+
* A transformation function for `torch.Tensor.random_()`, when used without specifying `from` and `to`.
|
| 55 |
+
* In order to prevent compiler warnings reported in GitHub issue 46391, T can't be float or double
|
| 56 |
+
* in this overloaded version
|
| 57 |
+
*/
|
| 58 |
+
template <typename T, typename V>
|
| 59 |
+
C10_HOST_DEVICE inline std::enable_if_t<!(std::is_floating_point_v<T>), T>uniform_int(V val) {
|
| 60 |
+
if constexpr (std::is_same_v<T, bool>) {
|
| 61 |
+
return static_cast<bool>(val & 1);
|
| 62 |
+
} else if constexpr (std::is_same_v<T, int64_t>) {
|
| 63 |
+
return static_cast<T>(val % (static_cast<uint64_t>(std::numeric_limits<T>::max()) + 1));
|
| 64 |
+
} else if constexpr (std::is_same_v<T, at::Half> || std::is_same_v<T, at::BFloat16>) {
|
| 65 |
+
return static_cast<T>(val % static_cast<uint64_t>((1ULL << std::numeric_limits<T>::digits) + 1));
|
| 66 |
+
} else if constexpr (std::is_integral_v<T>) {
|
| 67 |
+
return static_cast<T>(val % (static_cast<uint64_t>(std::numeric_limits<T>::max()) + 1));
|
| 68 |
+
} else {
|
| 69 |
+
assert(false);
|
| 70 |
+
return 0;
|
| 71 |
+
}
|
| 72 |
+
}
|
| 73 |
+
|
| 74 |
+
/**
|
| 75 |
+
* An overloaded transformation function for `torch.Tensor.random_()`, when used without specifying `from` and `to`,
|
| 76 |
+
* added to fix compiler warnings reported in GitHub issue 46391. T is either float or double in this version.
|
| 77 |
+
*/
|
| 78 |
+
template<typename T, typename V>
|
| 79 |
+
C10_HOST_DEVICE inline std::enable_if_t<std::is_floating_point_v<T>, T>uniform_int(V val) {
|
| 80 |
+
return static_cast<T>(val % static_cast<uint64_t>((1ULL << std::numeric_limits<T>::digits) + 1));
|
| 81 |
+
}
|
| 82 |
+
|
| 83 |
+
template <typename T, typename V>
|
| 84 |
+
C10_HOST_DEVICE inline dist_acctype<T> uniform_real(V val, T from, T to) {
|
| 85 |
+
constexpr auto MASK = static_cast<V>((static_cast<uint64_t>(1) << std::numeric_limits<T>::digits) - 1);
|
| 86 |
+
constexpr auto DIVISOR = static_cast<dist_acctype<T>>(1) / (static_cast<uint64_t>(1) << std::numeric_limits<T>::digits);
|
| 87 |
+
dist_acctype<T> x = (val & MASK) * DIVISOR;
|
| 88 |
+
return (x * (to - from) + from);
|
| 89 |
+
}
|
| 90 |
+
|
| 91 |
+
/**
|
| 92 |
+
* Transforms normally distributed `val` with mean 0.0 and standard deviation 1.0 to
|
| 93 |
+
* normally distributed with `mean` and standard deviation `std`.
|
| 94 |
+
*/
|
| 95 |
+
template <typename T>
|
| 96 |
+
C10_HOST_DEVICE inline T normal(T val, T mean, T std) {
|
| 97 |
+
return val * std + mean;
|
| 98 |
+
}
|
| 99 |
+
|
| 100 |
+
/**
|
| 101 |
+
* Transforms uniformly distributed `val` between 0.0 and 1.0 to
|
| 102 |
+
* Cauchy distribution with location parameter `median` and scale parameter `sigma`.
|
| 103 |
+
*/
|
| 104 |
+
template <typename T>
|
| 105 |
+
C10_HOST_DEVICE inline T cauchy(T val, T median, T sigma) {
|
| 106 |
+
// https://en.wikipedia.org/wiki/Cauchy_distribution#Cumulative_distribution_function
|
| 107 |
+
// __tanf overflows and returns `inf/-inf` when (val > 1 - eps) or (val < 0 + eps),
|
| 108 |
+
// thus we clip those values.
|
| 109 |
+
constexpr T eps = std::numeric_limits<T>::epsilon();
|
| 110 |
+
constexpr T one_minus_eps = 1 - eps;
|
| 111 |
+
constexpr T zero_plus_eps = 0 + eps;
|
| 112 |
+
val = (val > one_minus_eps ? one_minus_eps : val);
|
| 113 |
+
val = (val < zero_plus_eps ? zero_plus_eps : val);
|
| 114 |
+
return median + sigma * at::tan(c10::pi<T> * (val - static_cast<T>(0.5)));
|
| 115 |
+
}
|
| 116 |
+
|
| 117 |
+
template <>
|
| 118 |
+
C10_HOST_DEVICE inline double cauchy(double val, double median, double sigma) {
|
| 119 |
+
// https://en.wikipedia.org/wiki/Cauchy_distribution#Cumulative_distribution_function
|
| 120 |
+
return median + sigma * at::tan(c10::pi<double> * (val - static_cast<double>(0.5)));
|
| 121 |
+
}
|
| 122 |
+
|
| 123 |
+
/**
|
| 124 |
+
* Transforms uniformly distributed `val` between 0.0 and 1.0 to
|
| 125 |
+
* exponentially distributed with `lambda` parameter of the distribution.
|
| 126 |
+
*/
|
| 127 |
+
template <typename T>
|
| 128 |
+
C10_HOST_DEVICE inline T exponential(T val, T lambda) {
|
| 129 |
+
// https://en.wikipedia.org/wiki/Exponential_distribution#Generating_exponential_variates
|
| 130 |
+
// Different implementations for CUDA and CPU to preserve original logic
|
| 131 |
+
// TODO: must be investigated and unified!!!
|
| 132 |
+
// https://github.com/pytorch/pytorch/issues/38662
|
| 133 |
+
#if defined(__CUDACC__) || defined(__HIPCC__)
|
| 134 |
+
// BEFORE TOUCHING THIS CODE READ: https://github.com/pytorch/pytorch/issues/16706
|
| 135 |
+
// curand_uniform has (0,1] bounds. log(1) is 0 and exponential excludes 0.
|
| 136 |
+
// we need log to be not 0, and not underflow when converted to half
|
| 137 |
+
// fast __logf approximation can underflow, so set log to -epsilon/2 for 1 or close to 1 args
|
| 138 |
+
auto log = val >= static_cast<T>(1.) - std::numeric_limits<T>::epsilon() / 2
|
| 139 |
+
? -std::numeric_limits<T>::epsilon() / 2
|
| 140 |
+
: at::log(val);
|
| 141 |
+
return static_cast<T>(-1.0) / lambda * log;
|
| 142 |
+
#else
|
| 143 |
+
return static_cast<T>(-1.0) / lambda * at::log1p(-val);
|
| 144 |
+
#endif
|
| 145 |
+
}
|
| 146 |
+
|
| 147 |
+
/**
|
| 148 |
+
* Transforms uniformly distributed `val` between 0.0 and 1.0 to
|
| 149 |
+
* geometrically distributed with success probability `p`.
|
| 150 |
+
*/
|
| 151 |
+
template <typename T>
|
| 152 |
+
C10_HOST_DEVICE inline T geometric(T val, T p) {
|
| 153 |
+
// https://en.wikipedia.org/wiki/Geometric_distribution#Related_distributions
|
| 154 |
+
return static_cast<T>(::ceil(at::log(val) / at::log1p(-p)));
|
| 155 |
+
}
|
| 156 |
+
|
| 157 |
+
/**
|
| 158 |
+
* Transforms normally distributed `val` to log-normally distributed.
|
| 159 |
+
*/
|
| 160 |
+
template <typename T>
|
| 161 |
+
C10_HOST_DEVICE inline T log_normal(T val) {
|
| 162 |
+
// https://en.wikipedia.org/wiki/Log-normal_distribution#Mode,_median,_quantiles
|
| 163 |
+
return at::exp(val);
|
| 164 |
+
}
|
| 165 |
+
|
| 166 |
+
/**
|
| 167 |
+
* Transforms uniformly distributed `val` between 0.0 and 1.0 to
|
| 168 |
+
* bernoulli distributed with success probability `p`.
|
| 169 |
+
*/
|
| 170 |
+
template <typename T>
|
| 171 |
+
C10_HOST_DEVICE inline T bernoulli(T val, T p) {
|
| 172 |
+
return val < p;
|
| 173 |
+
}
|
| 174 |
+
|
| 175 |
+
}} // namespace at::transformation
|
videochat2/lib/python3.10/site-packages/torch/include/ATen/core/UndefinedTensorImpl.h
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
#include <c10/core/UndefinedTensorImpl.h>
|
videochat2/lib/python3.10/site-packages/torch/include/ATen/core/VariableHooksInterface.h
ADDED
|
@@ -0,0 +1,83 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <ATen/core/Tensor.h>
|
| 4 |
+
#include <c10/macros/Export.h>
|
| 5 |
+
|
| 6 |
+
// A little explanation about why this file exists at all. We have
|
| 7 |
+
// a few methods on Tensor class which require access to reified access to
|
| 8 |
+
// AutogradMeta. In open source, this isn't a big deal: we just access
|
| 9 |
+
// torch/csrc/autograd/variable.h from aten/src/ATen/core/Tensor.cpp and
|
| 10 |
+
// we can put the definitions inline. This is because everything gets balled
|
| 11 |
+
// into a single dynamic library in the end.
|
| 12 |
+
//
|
| 13 |
+
// However, inside our Facebook internal version of our build system, we
|
| 14 |
+
// have a split between aten and torch/csrc. So we cannot simply just
|
| 15 |
+
// cross this boundary. "Now wait," you might say, "Why don't we just
|
| 16 |
+
// merge the libraries inside Facebook". Well, the problem is that there
|
| 17 |
+
// are some downstream applications which are at binary size limit, and
|
| 18 |
+
// incorporating all of the extra code from libtorch would push them
|
| 19 |
+
// over (admarket/adreview/service:adreviewservice, see also
|
| 20 |
+
// https://github.com/pytorch/pytorch/pull/29299) So if you want to do that,
|
| 21 |
+
// we have to fix all of the services like this.
|
| 22 |
+
//
|
| 23 |
+
// I didn't want to block eliminating Tensor-Variable on this work, so I
|
| 24 |
+
// had to introduce another dynamic dispatch to get to the variable
|
| 25 |
+
// implementations (which live in torch/csrc/autograd/variable.cpp, FYI).
|
| 26 |
+
//
|
| 27 |
+
// I also considered using our existing dynamic dispatch mechanism, c10
|
| 28 |
+
// dispatcher, to do this. However, (1) some of the functions on Tensor
|
| 29 |
+
// have weird signatures that are not supported by autograd, and (2)
|
| 30 |
+
// see this bug https://github.com/pytorch/pytorch/issues/30102
|
| 31 |
+
|
| 32 |
+
namespace torch::autograd {
|
| 33 |
+
|
| 34 |
+
struct Node;
|
| 35 |
+
|
| 36 |
+
} // namespace torch::autograd
|
| 37 |
+
|
| 38 |
+
namespace at::impl {
|
| 39 |
+
|
| 40 |
+
struct TORCH_API VariableHooksInterface {
|
| 41 |
+
virtual ~VariableHooksInterface() = default;
|
| 42 |
+
virtual TensorBase tensor_data(const TensorBase&) const = 0;
|
| 43 |
+
virtual TensorBase variable_data(const TensorBase&) const = 0;
|
| 44 |
+
virtual const std::shared_ptr<torch::autograd::Node>& grad_fn(
|
| 45 |
+
const TensorBase&) const = 0;
|
| 46 |
+
virtual unsigned _register_hook(
|
| 47 |
+
const TensorBase&,
|
| 48 |
+
std::function<TensorBase(const TensorBase&)> hook) const = 0;
|
| 49 |
+
virtual void remove_hook(const TensorBase&, unsigned pos) const = 0;
|
| 50 |
+
virtual bool is_view(const TensorBase&) const = 0;
|
| 51 |
+
virtual const TensorBase& base(const TensorBase&) const = 0;
|
| 52 |
+
virtual const std::string& name(const TensorBase&) const = 0;
|
| 53 |
+
virtual bool is_leaf(const TensorBase&) const = 0;
|
| 54 |
+
virtual int64_t output_nr(const TensorBase&) const = 0;
|
| 55 |
+
virtual void set_data(const TensorBase&, const TensorBase&) const = 0;
|
| 56 |
+
virtual TensorBase data(const TensorBase&) const = 0;
|
| 57 |
+
virtual int64_t _version(const TensorBase&) const = 0;
|
| 58 |
+
virtual void retain_grad(const TensorBase&) const = 0;
|
| 59 |
+
virtual bool retains_grad(const TensorBase&) const = 0;
|
| 60 |
+
virtual void _backward(
|
| 61 |
+
const Tensor&,
|
| 62 |
+
TensorList,
|
| 63 |
+
const std::optional<Tensor>&,
|
| 64 |
+
std::optional<bool>,
|
| 65 |
+
bool) const = 0;
|
| 66 |
+
virtual void requires_grad_(const TensorBase&, bool) const = 0;
|
| 67 |
+
virtual void basic_autograd_not_implemented_fallback(
|
| 68 |
+
const c10::OperatorHandle& op,
|
| 69 |
+
c10::DispatchKeySet dispatch_keys,
|
| 70 |
+
torch::jit::Stack* stack) const = 0;
|
| 71 |
+
};
|
| 72 |
+
|
| 73 |
+
TORCH_API void SetVariableHooks(VariableHooksInterface* hooks);
|
| 74 |
+
TORCH_API VariableHooksInterface* GetVariableHooks();
|
| 75 |
+
TORCH_API bool HasVariableHooks();
|
| 76 |
+
|
| 77 |
+
struct TORCH_API VariableHooksRegisterer {
|
| 78 |
+
explicit VariableHooksRegisterer(VariableHooksInterface* hooks) {
|
| 79 |
+
SetVariableHooks(hooks);
|
| 80 |
+
}
|
| 81 |
+
};
|
| 82 |
+
|
| 83 |
+
} // namespace at::impl
|
videochat2/lib/python3.10/site-packages/torch/include/ATen/core/Variadic.h
ADDED
|
@@ -0,0 +1,92 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <utility>
|
| 4 |
+
|
| 5 |
+
#include <c10/util/ArrayRef.h>
|
| 6 |
+
#include <ATen/core/List.h>
|
| 7 |
+
|
| 8 |
+
namespace at {
|
| 9 |
+
|
| 10 |
+
// This class allows you to write variadic functions which
|
| 11 |
+
// call a (possibly overloaded) function on each argument,
|
| 12 |
+
// in order. This is most commonly used in autogenerated code,
|
| 13 |
+
// where it is convenient to have a function that can uniformly
|
| 14 |
+
// take arguments of different types. If your arguments
|
| 15 |
+
// are homogenous consider using a std::initializer_list instead.
|
| 16 |
+
//
|
| 17 |
+
// For examples of this in use, see torch/csrc/utils/variadic.h
|
| 18 |
+
template <typename F>
|
| 19 |
+
struct IterArgs {
|
| 20 |
+
template <typename... Args>
|
| 21 |
+
inline F& apply() {
|
| 22 |
+
return self();
|
| 23 |
+
}
|
| 24 |
+
|
| 25 |
+
// NB: Use perfect forwarding here, otherwise we'll make value
|
| 26 |
+
// copies of all arguments!
|
| 27 |
+
template <typename T, typename... Args>
|
| 28 |
+
inline F& apply(T&& arg, Args&&... args) {
|
| 29 |
+
self()(std::forward<T>(arg));
|
| 30 |
+
if (self().short_circuit()) {
|
| 31 |
+
return self();
|
| 32 |
+
} else {
|
| 33 |
+
return apply(std::forward<Args>(args)...);
|
| 34 |
+
}
|
| 35 |
+
}
|
| 36 |
+
|
| 37 |
+
// Here are some handy overloads which provide sensible
|
| 38 |
+
// defaults for container-like structures that one might
|
| 39 |
+
// be interested in recursing into. You can enable them
|
| 40 |
+
// by adding:
|
| 41 |
+
//
|
| 42 |
+
// using IterArgs<YourStructName>::operator()
|
| 43 |
+
//
|
| 44 |
+
// to your struct. These are not enabled by default because
|
| 45 |
+
// you may be able to process these structures more efficiently
|
| 46 |
+
// than handling them one-by-one.
|
| 47 |
+
|
| 48 |
+
template <typename T>
|
| 49 |
+
void operator()(c10::IListRef<T> args) {
|
| 50 |
+
for (const auto& arg : args) {
|
| 51 |
+
self()(arg);
|
| 52 |
+
if (self().short_circuit())
|
| 53 |
+
return;
|
| 54 |
+
}
|
| 55 |
+
}
|
| 56 |
+
|
| 57 |
+
template <typename T>
|
| 58 |
+
void operator()(at::ArrayRef<T> args) {
|
| 59 |
+
for (const auto& arg : args) {
|
| 60 |
+
self()(arg);
|
| 61 |
+
if (self().short_circuit())
|
| 62 |
+
return;
|
| 63 |
+
}
|
| 64 |
+
}
|
| 65 |
+
|
| 66 |
+
template <typename T>
|
| 67 |
+
void operator()(const torch::List<T>& args) {
|
| 68 |
+
for (const auto& arg : args) {
|
| 69 |
+
self()(arg);
|
| 70 |
+
if (self().short_circuit())
|
| 71 |
+
return;
|
| 72 |
+
}
|
| 73 |
+
}
|
| 74 |
+
|
| 75 |
+
// NB: we need to specify std::vector manually as C++ won't
|
| 76 |
+
// do an implicit conversion to make a template deduction go through.
|
| 77 |
+
template <typename T>
|
| 78 |
+
void operator()(const std::vector<T>& args) {
|
| 79 |
+
self()(at::ArrayRef<T>{args});
|
| 80 |
+
}
|
| 81 |
+
|
| 82 |
+
constexpr bool short_circuit() const {
|
| 83 |
+
return false;
|
| 84 |
+
}
|
| 85 |
+
|
| 86 |
+
private:
|
| 87 |
+
inline F& self() {
|
| 88 |
+
return *static_cast<F*>(this);
|
| 89 |
+
}
|
| 90 |
+
};
|
| 91 |
+
|
| 92 |
+
} // namespace torch
|
videochat2/lib/python3.10/site-packages/torch/include/ATen/core/Vitals.h
ADDED
|
@@ -0,0 +1,91 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
#include <ostream>
|
| 3 |
+
#include <sstream>
|
| 4 |
+
#include <unordered_map>
|
| 5 |
+
|
| 6 |
+
#include <c10/core/impl/LocalDispatchKeySet.h>
|
| 7 |
+
|
| 8 |
+
namespace at::vitals {
|
| 9 |
+
|
| 10 |
+
TORCH_API bool torchVitalEnabled();
|
| 11 |
+
|
| 12 |
+
struct TORCH_API TorchVitalAttr {
|
| 13 |
+
// always initialized to empty
|
| 14 |
+
std::string value = "";
|
| 15 |
+
template <typename T>
|
| 16 |
+
TorchVitalAttr& operator<<(const T& t) {
|
| 17 |
+
if (torchVitalEnabled()) {
|
| 18 |
+
std::stringstream ss;
|
| 19 |
+
ss << t;
|
| 20 |
+
value += ss.str();
|
| 21 |
+
}
|
| 22 |
+
return *this;
|
| 23 |
+
}
|
| 24 |
+
|
| 25 |
+
template <typename T>
|
| 26 |
+
void write(const T& t, bool force) {
|
| 27 |
+
if (force || torchVitalEnabled()) {
|
| 28 |
+
std::stringstream ss;
|
| 29 |
+
ss << t;
|
| 30 |
+
value = ss.str();
|
| 31 |
+
}
|
| 32 |
+
}
|
| 33 |
+
};
|
| 34 |
+
|
| 35 |
+
struct TORCH_API TorchVital {
|
| 36 |
+
std::string name;
|
| 37 |
+
std::unordered_map<std::string, TorchVitalAttr> attrs;
|
| 38 |
+
|
| 39 |
+
explicit TorchVital(std::string n) : name(std::move(n)) {}
|
| 40 |
+
TorchVital(const TorchVital&) = default;
|
| 41 |
+
TorchVital(TorchVital&&) = default;
|
| 42 |
+
TorchVital() = delete;
|
| 43 |
+
|
| 44 |
+
TorchVitalAttr& create(const std::string& attr);
|
| 45 |
+
TorchVitalAttr& create(const std::string& attr, bool force);
|
| 46 |
+
friend std::ostream& operator<<(std::ostream& os, const TorchVital& dt);
|
| 47 |
+
|
| 48 |
+
~TorchVital();
|
| 49 |
+
};
|
| 50 |
+
|
| 51 |
+
std::ostream& operator<<(std::ostream& os, TorchVital const& tv);
|
| 52 |
+
|
| 53 |
+
// A way to access vitals by string names instead of by global reference.
|
| 54 |
+
// This enables access to vitals from the PythonAPI.
|
| 55 |
+
class TORCH_API APIVitals {
|
| 56 |
+
public:
|
| 57 |
+
bool vitals_enabled;
|
| 58 |
+
|
| 59 |
+
// Set any vital sign that was added to the map.
|
| 60 |
+
bool setVital(
|
| 61 |
+
const std::string& vital_name,
|
| 62 |
+
const std::string& attr_name,
|
| 63 |
+
const std::string& value,
|
| 64 |
+
bool force = false);
|
| 65 |
+
std::string readVitals();
|
| 66 |
+
|
| 67 |
+
APIVitals();
|
| 68 |
+
|
| 69 |
+
// Ensure this stays a singleton
|
| 70 |
+
APIVitals(APIVitals const& other) = delete;
|
| 71 |
+
APIVitals(APIVitals&& other) = delete;
|
| 72 |
+
APIVitals& operator=(const APIVitals&) = delete;
|
| 73 |
+
APIVitals& operator=(APIVitals&&) = delete;
|
| 74 |
+
|
| 75 |
+
private:
|
| 76 |
+
std::unordered_map<std::string, TorchVital> name_map_;
|
| 77 |
+
};
|
| 78 |
+
|
| 79 |
+
extern TORCH_API APIVitals VitalsAPI;
|
| 80 |
+
|
| 81 |
+
} // namespace at::vitals
|
| 82 |
+
|
| 83 |
+
#define TORCH_VITAL_DECLARE(name) \
|
| 84 |
+
TORCH_API at::vitals::TorchVital TorchVital_##name;
|
| 85 |
+
|
| 86 |
+
#define TORCH_VITAL_DEFINE(name) \
|
| 87 |
+
TORCH_API at::vitals::TorchVital TorchVital_##name(#name);
|
| 88 |
+
|
| 89 |
+
#define TORCH_VITAL_BASE(name) TorchVital_##name
|
| 90 |
+
|
| 91 |
+
#define TORCH_VITAL(name, attr) TORCH_VITAL_BASE(name).create(#attr)
|
videochat2/lib/python3.10/site-packages/torch/include/ATen/core/aten_interned_strings.h
ADDED
|
@@ -0,0 +1,2264 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from aten_interned_strings.h
|
| 4 |
+
|
| 5 |
+
#if defined(TORCH_ASSERT_NO_OPERATORS) || defined(TORCH_ASSERT_ONLY_METHOD_OPERATORS)
|
| 6 |
+
#error This change adds a dependency on native_functions.yaml, \
|
| 7 |
+
meaning the file will need to be re-compiled every time an operator \
|
| 8 |
+
is changed or added. Consider if including <ATen/core/symbol.h> for \
|
| 9 |
+
the c10::Symbol class would be sufficient, or if your change would be \
|
| 10 |
+
better placed in another file.
|
| 11 |
+
#endif
|
| 12 |
+
|
| 13 |
+
// ATen symbols correspond exactly to operators defined in ATen. Every
|
| 14 |
+
// symbol here corresponds exactly to an ATen operation defined in
|
| 15 |
+
// native_functions.yaml; attributes are in one-to-one correspondence
|
| 16 |
+
// with their ATen name.
|
| 17 |
+
|
| 18 |
+
#define FORALL_ATEN_BASE_SYMBOLS(_) \
|
| 19 |
+
_(aten, __and__) \
|
| 20 |
+
_(aten, __iand__) \
|
| 21 |
+
_(aten, __ilshift__) \
|
| 22 |
+
_(aten, __ior__) \
|
| 23 |
+
_(aten, __irshift__) \
|
| 24 |
+
_(aten, __ixor__) \
|
| 25 |
+
_(aten, __lshift__) \
|
| 26 |
+
_(aten, __or__) \
|
| 27 |
+
_(aten, __rshift__) \
|
| 28 |
+
_(aten, __xor__) \
|
| 29 |
+
_(aten, _adaptive_avg_pool2d) \
|
| 30 |
+
_(aten, _adaptive_avg_pool2d_backward) \
|
| 31 |
+
_(aten, _adaptive_avg_pool3d) \
|
| 32 |
+
_(aten, _adaptive_avg_pool3d_backward) \
|
| 33 |
+
_(aten, _add_batch_dim) \
|
| 34 |
+
_(aten, _add_relu) \
|
| 35 |
+
_(aten, _add_relu_) \
|
| 36 |
+
_(aten, _addmm_activation) \
|
| 37 |
+
_(aten, _aminmax) \
|
| 38 |
+
_(aten, _amp_foreach_non_finite_check_and_unscale) \
|
| 39 |
+
_(aten, _amp_foreach_non_finite_check_and_unscale_) \
|
| 40 |
+
_(aten, _amp_update_scale) \
|
| 41 |
+
_(aten, _amp_update_scale_) \
|
| 42 |
+
_(aten, _assert_async) \
|
| 43 |
+
_(aten, _assert_scalar) \
|
| 44 |
+
_(aten, _assert_tensor_metadata) \
|
| 45 |
+
_(aten, _autocast_to_full_precision) \
|
| 46 |
+
_(aten, _autocast_to_reduced_precision) \
|
| 47 |
+
_(aten, _backward) \
|
| 48 |
+
_(aten, _batch_norm_impl_index) \
|
| 49 |
+
_(aten, _batch_norm_impl_index_backward) \
|
| 50 |
+
_(aten, _batch_norm_no_update) \
|
| 51 |
+
_(aten, _batch_norm_with_update) \
|
| 52 |
+
_(aten, _batch_norm_with_update_functional) \
|
| 53 |
+
_(aten, _cast_Byte) \
|
| 54 |
+
_(aten, _cast_Char) \
|
| 55 |
+
_(aten, _cast_Double) \
|
| 56 |
+
_(aten, _cast_Float) \
|
| 57 |
+
_(aten, _cast_Half) \
|
| 58 |
+
_(aten, _cast_Int) \
|
| 59 |
+
_(aten, _cast_Long) \
|
| 60 |
+
_(aten, _cast_Short) \
|
| 61 |
+
_(aten, _cdist_backward) \
|
| 62 |
+
_(aten, _cdist_forward) \
|
| 63 |
+
_(aten, _cholesky_solve_helper) \
|
| 64 |
+
_(aten, _choose_qparams_per_tensor) \
|
| 65 |
+
_(aten, _chunk_cat) \
|
| 66 |
+
_(aten, _coalesce) \
|
| 67 |
+
_(aten, _coalesced) \
|
| 68 |
+
_(aten, _coalesced_) \
|
| 69 |
+
_(aten, _compute_linear_combination) \
|
| 70 |
+
_(aten, _conj) \
|
| 71 |
+
_(aten, _conj_copy) \
|
| 72 |
+
_(aten, _conj_physical) \
|
| 73 |
+
_(aten, _conv_depthwise2d) \
|
| 74 |
+
_(aten, _convert_indices_from_coo_to_csr) \
|
| 75 |
+
_(aten, _convert_indices_from_csr_to_coo) \
|
| 76 |
+
_(aten, _convert_weight_to_int4pack) \
|
| 77 |
+
_(aten, _convolution) \
|
| 78 |
+
_(aten, _convolution_double_backward) \
|
| 79 |
+
_(aten, _convolution_mode) \
|
| 80 |
+
_(aten, _copy_from) \
|
| 81 |
+
_(aten, _copy_from_and_resize) \
|
| 82 |
+
_(aten, _cslt_compress) \
|
| 83 |
+
_(aten, _cslt_sparse_mm) \
|
| 84 |
+
_(aten, _cslt_sparse_mm_search) \
|
| 85 |
+
_(aten, _ctc_loss) \
|
| 86 |
+
_(aten, _ctc_loss_backward) \
|
| 87 |
+
_(aten, _cudnn_ctc_loss) \
|
| 88 |
+
_(aten, _cudnn_init_dropout_state) \
|
| 89 |
+
_(aten, _cudnn_rnn) \
|
| 90 |
+
_(aten, _cudnn_rnn_backward) \
|
| 91 |
+
_(aten, _cudnn_rnn_flatten_weight) \
|
| 92 |
+
_(aten, _cufft_clear_plan_cache) \
|
| 93 |
+
_(aten, _cufft_get_plan_cache_max_size) \
|
| 94 |
+
_(aten, _cufft_get_plan_cache_size) \
|
| 95 |
+
_(aten, _cufft_set_plan_cache_max_size) \
|
| 96 |
+
_(aten, _cummax_helper) \
|
| 97 |
+
_(aten, _cummin_helper) \
|
| 98 |
+
_(aten, _debug_has_internal_overlap) \
|
| 99 |
+
_(aten, _dimI) \
|
| 100 |
+
_(aten, _dimV) \
|
| 101 |
+
_(aten, _dim_arange) \
|
| 102 |
+
_(aten, _dirichlet_grad) \
|
| 103 |
+
_(aten, _efficient_attention_backward) \
|
| 104 |
+
_(aten, _efficient_attention_forward) \
|
| 105 |
+
_(aten, _efficientzerotensor) \
|
| 106 |
+
_(aten, _embedding_bag) \
|
| 107 |
+
_(aten, _embedding_bag_backward) \
|
| 108 |
+
_(aten, _embedding_bag_dense_backward) \
|
| 109 |
+
_(aten, _embedding_bag_forward_only) \
|
| 110 |
+
_(aten, _embedding_bag_per_sample_weights_backward) \
|
| 111 |
+
_(aten, _embedding_bag_sparse_backward) \
|
| 112 |
+
_(aten, _empty_affine_quantized) \
|
| 113 |
+
_(aten, _empty_per_channel_affine_quantized) \
|
| 114 |
+
_(aten, _euclidean_dist) \
|
| 115 |
+
_(aten, _fake_quantize_learnable_per_channel_affine) \
|
| 116 |
+
_(aten, _fake_quantize_learnable_per_channel_affine_backward) \
|
| 117 |
+
_(aten, _fake_quantize_learnable_per_tensor_affine) \
|
| 118 |
+
_(aten, _fake_quantize_learnable_per_tensor_affine_backward) \
|
| 119 |
+
_(aten, _fake_quantize_per_tensor_affine_cachemask_tensor_qparams) \
|
| 120 |
+
_(aten, _fft_c2c) \
|
| 121 |
+
_(aten, _fft_c2r) \
|
| 122 |
+
_(aten, _fft_r2c) \
|
| 123 |
+
_(aten, _fill_mem_eff_dropout_mask) \
|
| 124 |
+
_(aten, _fill_mem_eff_dropout_mask_) \
|
| 125 |
+
_(aten, _flash_attention_backward) \
|
| 126 |
+
_(aten, _flash_attention_forward) \
|
| 127 |
+
_(aten, _foobar) \
|
| 128 |
+
_(aten, _foreach_abs) \
|
| 129 |
+
_(aten, _foreach_abs_) \
|
| 130 |
+
_(aten, _foreach_acos) \
|
| 131 |
+
_(aten, _foreach_acos_) \
|
| 132 |
+
_(aten, _foreach_add) \
|
| 133 |
+
_(aten, _foreach_add_) \
|
| 134 |
+
_(aten, _foreach_addcdiv) \
|
| 135 |
+
_(aten, _foreach_addcdiv_) \
|
| 136 |
+
_(aten, _foreach_addcmul) \
|
| 137 |
+
_(aten, _foreach_addcmul_) \
|
| 138 |
+
_(aten, _foreach_asin) \
|
| 139 |
+
_(aten, _foreach_asin_) \
|
| 140 |
+
_(aten, _foreach_atan) \
|
| 141 |
+
_(aten, _foreach_atan_) \
|
| 142 |
+
_(aten, _foreach_ceil) \
|
| 143 |
+
_(aten, _foreach_ceil_) \
|
| 144 |
+
_(aten, _foreach_clamp_max) \
|
| 145 |
+
_(aten, _foreach_clamp_max_) \
|
| 146 |
+
_(aten, _foreach_clamp_min) \
|
| 147 |
+
_(aten, _foreach_clamp_min_) \
|
| 148 |
+
_(aten, _foreach_copy) \
|
| 149 |
+
_(aten, _foreach_copy_) \
|
| 150 |
+
_(aten, _foreach_cos) \
|
| 151 |
+
_(aten, _foreach_cos_) \
|
| 152 |
+
_(aten, _foreach_cosh) \
|
| 153 |
+
_(aten, _foreach_cosh_) \
|
| 154 |
+
_(aten, _foreach_div) \
|
| 155 |
+
_(aten, _foreach_div_) \
|
| 156 |
+
_(aten, _foreach_erf) \
|
| 157 |
+
_(aten, _foreach_erf_) \
|
| 158 |
+
_(aten, _foreach_erfc) \
|
| 159 |
+
_(aten, _foreach_erfc_) \
|
| 160 |
+
_(aten, _foreach_exp) \
|
| 161 |
+
_(aten, _foreach_exp_) \
|
| 162 |
+
_(aten, _foreach_expm1) \
|
| 163 |
+
_(aten, _foreach_expm1_) \
|
| 164 |
+
_(aten, _foreach_floor) \
|
| 165 |
+
_(aten, _foreach_floor_) \
|
| 166 |
+
_(aten, _foreach_frac) \
|
| 167 |
+
_(aten, _foreach_frac_) \
|
| 168 |
+
_(aten, _foreach_lerp) \
|
| 169 |
+
_(aten, _foreach_lerp_) \
|
| 170 |
+
_(aten, _foreach_lgamma) \
|
| 171 |
+
_(aten, _foreach_lgamma_) \
|
| 172 |
+
_(aten, _foreach_log) \
|
| 173 |
+
_(aten, _foreach_log10) \
|
| 174 |
+
_(aten, _foreach_log10_) \
|
| 175 |
+
_(aten, _foreach_log1p) \
|
| 176 |
+
_(aten, _foreach_log1p_) \
|
| 177 |
+
_(aten, _foreach_log2) \
|
| 178 |
+
_(aten, _foreach_log2_) \
|
| 179 |
+
_(aten, _foreach_log_) \
|
| 180 |
+
_(aten, _foreach_max) \
|
| 181 |
+
_(aten, _foreach_maximum) \
|
| 182 |
+
_(aten, _foreach_maximum_) \
|
| 183 |
+
_(aten, _foreach_minimum) \
|
| 184 |
+
_(aten, _foreach_minimum_) \
|
| 185 |
+
_(aten, _foreach_mul) \
|
| 186 |
+
_(aten, _foreach_mul_) \
|
| 187 |
+
_(aten, _foreach_neg) \
|
| 188 |
+
_(aten, _foreach_neg_) \
|
| 189 |
+
_(aten, _foreach_norm) \
|
| 190 |
+
_(aten, _foreach_pow) \
|
| 191 |
+
_(aten, _foreach_pow_) \
|
| 192 |
+
_(aten, _foreach_reciprocal) \
|
| 193 |
+
_(aten, _foreach_reciprocal_) \
|
| 194 |
+
_(aten, _foreach_round) \
|
| 195 |
+
_(aten, _foreach_round_) \
|
| 196 |
+
_(aten, _foreach_sigmoid) \
|
| 197 |
+
_(aten, _foreach_sigmoid_) \
|
| 198 |
+
_(aten, _foreach_sign) \
|
| 199 |
+
_(aten, _foreach_sign_) \
|
| 200 |
+
_(aten, _foreach_sin) \
|
| 201 |
+
_(aten, _foreach_sin_) \
|
| 202 |
+
_(aten, _foreach_sinh) \
|
| 203 |
+
_(aten, _foreach_sinh_) \
|
| 204 |
+
_(aten, _foreach_sqrt) \
|
| 205 |
+
_(aten, _foreach_sqrt_) \
|
| 206 |
+
_(aten, _foreach_sub) \
|
| 207 |
+
_(aten, _foreach_sub_) \
|
| 208 |
+
_(aten, _foreach_tan) \
|
| 209 |
+
_(aten, _foreach_tan_) \
|
| 210 |
+
_(aten, _foreach_tanh) \
|
| 211 |
+
_(aten, _foreach_tanh_) \
|
| 212 |
+
_(aten, _foreach_trunc) \
|
| 213 |
+
_(aten, _foreach_trunc_) \
|
| 214 |
+
_(aten, _foreach_zero) \
|
| 215 |
+
_(aten, _foreach_zero_) \
|
| 216 |
+
_(aten, _functional_assert_async) \
|
| 217 |
+
_(aten, _functional_assert_scalar) \
|
| 218 |
+
_(aten, _functional_sym_constrain_range) \
|
| 219 |
+
_(aten, _functional_sym_constrain_range_for_size) \
|
| 220 |
+
_(aten, _fused_adagrad) \
|
| 221 |
+
_(aten, _fused_adagrad_) \
|
| 222 |
+
_(aten, _fused_adam) \
|
| 223 |
+
_(aten, _fused_adam_) \
|
| 224 |
+
_(aten, _fused_adamw) \
|
| 225 |
+
_(aten, _fused_adamw_) \
|
| 226 |
+
_(aten, _fused_dropout) \
|
| 227 |
+
_(aten, _fused_moving_avg_obs_fq_helper) \
|
| 228 |
+
_(aten, _fused_moving_avg_obs_fq_helper_functional) \
|
| 229 |
+
_(aten, _fused_sdp_choice) \
|
| 230 |
+
_(aten, _fused_sgd) \
|
| 231 |
+
_(aten, _fused_sgd_) \
|
| 232 |
+
_(aten, _fw_primal) \
|
| 233 |
+
_(aten, _fw_primal_copy) \
|
| 234 |
+
_(aten, _gather_sparse_backward) \
|
| 235 |
+
_(aten, _grid_sampler_2d_cpu_fallback) \
|
| 236 |
+
_(aten, _grid_sampler_2d_cpu_fallback_backward) \
|
| 237 |
+
_(aten, _has_compatible_shallow_copy_type) \
|
| 238 |
+
_(aten, _has_same_storage_numel) \
|
| 239 |
+
_(aten, _histogramdd_bin_edges) \
|
| 240 |
+
_(aten, _histogramdd_from_bin_cts) \
|
| 241 |
+
_(aten, _histogramdd_from_bin_tensors) \
|
| 242 |
+
_(aten, _index_put_impl) \
|
| 243 |
+
_(aten, _index_put_impl_) \
|
| 244 |
+
_(aten, _indices) \
|
| 245 |
+
_(aten, _indices_copy) \
|
| 246 |
+
_(aten, _int_mm) \
|
| 247 |
+
_(aten, _is_all_true) \
|
| 248 |
+
_(aten, _is_any_true) \
|
| 249 |
+
_(aten, _is_zerotensor) \
|
| 250 |
+
_(aten, _jagged_to_padded_dense_forward) \
|
| 251 |
+
_(aten, _lazy_clone) \
|
| 252 |
+
_(aten, _linalg_check_errors) \
|
| 253 |
+
_(aten, _linalg_det) \
|
| 254 |
+
_(aten, _linalg_eigh) \
|
| 255 |
+
_(aten, _linalg_eigvals) \
|
| 256 |
+
_(aten, _linalg_slogdet) \
|
| 257 |
+
_(aten, _linalg_solve_ex) \
|
| 258 |
+
_(aten, _linalg_svd) \
|
| 259 |
+
_(aten, _local_scalar_dense) \
|
| 260 |
+
_(aten, _log_softmax) \
|
| 261 |
+
_(aten, _log_softmax_backward_data) \
|
| 262 |
+
_(aten, _logcumsumexp) \
|
| 263 |
+
_(aten, _lstm_mps) \
|
| 264 |
+
_(aten, _lu_with_info) \
|
| 265 |
+
_(aten, _make_dep_token) \
|
| 266 |
+
_(aten, _make_dual) \
|
| 267 |
+
_(aten, _make_dual_copy) \
|
| 268 |
+
_(aten, _make_per_channel_quantized_tensor) \
|
| 269 |
+
_(aten, _make_per_tensor_quantized_tensor) \
|
| 270 |
+
_(aten, _masked_scale) \
|
| 271 |
+
_(aten, _masked_softmax) \
|
| 272 |
+
_(aten, _masked_softmax_backward) \
|
| 273 |
+
_(aten, _mixed_dtypes_linear) \
|
| 274 |
+
_(aten, _mkldnn_reshape) \
|
| 275 |
+
_(aten, _mkldnn_transpose) \
|
| 276 |
+
_(aten, _mkldnn_transpose_) \
|
| 277 |
+
_(aten, _mps_convolution) \
|
| 278 |
+
_(aten, _mps_convolution_transpose) \
|
| 279 |
+
_(aten, _native_batch_norm_legit) \
|
| 280 |
+
_(aten, _native_batch_norm_legit_functional) \
|
| 281 |
+
_(aten, _native_batch_norm_legit_no_training) \
|
| 282 |
+
_(aten, _native_multi_head_attention) \
|
| 283 |
+
_(aten, _neg_view) \
|
| 284 |
+
_(aten, _neg_view_copy) \
|
| 285 |
+
_(aten, _nested_compute_contiguous_strides_offsets) \
|
| 286 |
+
_(aten, _nested_from_padded) \
|
| 287 |
+
_(aten, _nested_from_padded_and_nested_example) \
|
| 288 |
+
_(aten, _nested_get_jagged_dummy) \
|
| 289 |
+
_(aten, _nested_get_lengths) \
|
| 290 |
+
_(aten, _nested_get_max_seqlen) \
|
| 291 |
+
_(aten, _nested_get_min_seqlen) \
|
| 292 |
+
_(aten, _nested_get_offsets) \
|
| 293 |
+
_(aten, _nested_get_ragged_idx) \
|
| 294 |
+
_(aten, _nested_get_values) \
|
| 295 |
+
_(aten, _nested_get_values_copy) \
|
| 296 |
+
_(aten, _nested_select_backward) \
|
| 297 |
+
_(aten, _nested_sum_backward) \
|
| 298 |
+
_(aten, _nested_tensor_from_mask) \
|
| 299 |
+
_(aten, _nested_tensor_from_mask_left_aligned) \
|
| 300 |
+
_(aten, _nested_tensor_from_tensor_list) \
|
| 301 |
+
_(aten, _nested_tensor_size) \
|
| 302 |
+
_(aten, _nested_tensor_softmax_with_shape) \
|
| 303 |
+
_(aten, _nested_tensor_storage_offsets) \
|
| 304 |
+
_(aten, _nested_tensor_strides) \
|
| 305 |
+
_(aten, _nested_view_from_buffer) \
|
| 306 |
+
_(aten, _nested_view_from_buffer_copy) \
|
| 307 |
+
_(aten, _nested_view_from_jagged) \
|
| 308 |
+
_(aten, _nested_view_from_jagged_copy) \
|
| 309 |
+
_(aten, _new_zeros_with_same_feature_meta) \
|
| 310 |
+
_(aten, _nnpack_available) \
|
| 311 |
+
_(aten, _nnpack_spatial_convolution) \
|
| 312 |
+
_(aten, _nnz) \
|
| 313 |
+
_(aten, _pack_padded_sequence) \
|
| 314 |
+
_(aten, _pack_padded_sequence_backward) \
|
| 315 |
+
_(aten, _pad_circular) \
|
| 316 |
+
_(aten, _pad_enum) \
|
| 317 |
+
_(aten, _pad_packed_sequence) \
|
| 318 |
+
_(aten, _padded_dense_to_jagged_forward) \
|
| 319 |
+
_(aten, _pdist_backward) \
|
| 320 |
+
_(aten, _pdist_forward) \
|
| 321 |
+
_(aten, _pin_memory) \
|
| 322 |
+
_(aten, _prelu_kernel) \
|
| 323 |
+
_(aten, _prelu_kernel_backward) \
|
| 324 |
+
_(aten, _print) \
|
| 325 |
+
_(aten, _propagate_xla_data) \
|
| 326 |
+
_(aten, _remove_batch_dim) \
|
| 327 |
+
_(aten, _reshape_alias) \
|
| 328 |
+
_(aten, _reshape_alias_copy) \
|
| 329 |
+
_(aten, _reshape_copy) \
|
| 330 |
+
_(aten, _reshape_from_tensor) \
|
| 331 |
+
_(aten, _resize_output) \
|
| 332 |
+
_(aten, _resize_output_) \
|
| 333 |
+
_(aten, _rowwise_prune) \
|
| 334 |
+
_(aten, _safe_softmax) \
|
| 335 |
+
_(aten, _sample_dirichlet) \
|
| 336 |
+
_(aten, _saturate_weight_to_fp16) \
|
| 337 |
+
_(aten, _scaled_dot_product_attention_math) \
|
| 338 |
+
_(aten, _scaled_dot_product_attention_math_for_mps) \
|
| 339 |
+
_(aten, _scaled_dot_product_cudnn_attention) \
|
| 340 |
+
_(aten, _scaled_dot_product_cudnn_attention_backward) \
|
| 341 |
+
_(aten, _scaled_dot_product_efficient_attention) \
|
| 342 |
+
_(aten, _scaled_dot_product_efficient_attention_backward) \
|
| 343 |
+
_(aten, _scaled_dot_product_flash_attention) \
|
| 344 |
+
_(aten, _scaled_dot_product_flash_attention_backward) \
|
| 345 |
+
_(aten, _scaled_dot_product_flash_attention_for_cpu) \
|
| 346 |
+
_(aten, _scaled_dot_product_flash_attention_for_cpu_backward) \
|
| 347 |
+
_(aten, _scaled_dot_product_fused_attention_overrideable) \
|
| 348 |
+
_(aten, _scaled_dot_product_fused_attention_overrideable_backward) \
|
| 349 |
+
_(aten, _scaled_mm) \
|
| 350 |
+
_(aten, _segment_reduce_backward) \
|
| 351 |
+
_(aten, _shape_as_tensor) \
|
| 352 |
+
_(aten, _slow_conv2d_backward) \
|
| 353 |
+
_(aten, _slow_conv2d_forward) \
|
| 354 |
+
_(aten, _sobol_engine_draw) \
|
| 355 |
+
_(aten, _sobol_engine_ff) \
|
| 356 |
+
_(aten, _sobol_engine_ff_) \
|
| 357 |
+
_(aten, _sobol_engine_initialize_state) \
|
| 358 |
+
_(aten, _sobol_engine_initialize_state_) \
|
| 359 |
+
_(aten, _sobol_engine_scramble) \
|
| 360 |
+
_(aten, _sobol_engine_scramble_) \
|
| 361 |
+
_(aten, _softmax) \
|
| 362 |
+
_(aten, _softmax_backward_data) \
|
| 363 |
+
_(aten, _sparse_addmm) \
|
| 364 |
+
_(aten, _sparse_broadcast_to) \
|
| 365 |
+
_(aten, _sparse_broadcast_to_copy) \
|
| 366 |
+
_(aten, _sparse_bsc_tensor_unsafe) \
|
| 367 |
+
_(aten, _sparse_bsr_tensor_unsafe) \
|
| 368 |
+
_(aten, _sparse_compressed_tensor_unsafe) \
|
| 369 |
+
_(aten, _sparse_compressed_tensor_with_dims) \
|
| 370 |
+
_(aten, _sparse_coo_tensor_unsafe) \
|
| 371 |
+
_(aten, _sparse_coo_tensor_with_dims) \
|
| 372 |
+
_(aten, _sparse_coo_tensor_with_dims_and_tensors) \
|
| 373 |
+
_(aten, _sparse_csc_tensor_unsafe) \
|
| 374 |
+
_(aten, _sparse_csr_prod) \
|
| 375 |
+
_(aten, _sparse_csr_sum) \
|
| 376 |
+
_(aten, _sparse_csr_tensor_unsafe) \
|
| 377 |
+
_(aten, _sparse_log_softmax) \
|
| 378 |
+
_(aten, _sparse_log_softmax_backward_data) \
|
| 379 |
+
_(aten, _sparse_mask_projection) \
|
| 380 |
+
_(aten, _sparse_mm) \
|
| 381 |
+
_(aten, _sparse_mm_reduce_impl) \
|
| 382 |
+
_(aten, _sparse_mm_reduce_impl_backward) \
|
| 383 |
+
_(aten, _sparse_semi_structured_addmm) \
|
| 384 |
+
_(aten, _sparse_semi_structured_apply) \
|
| 385 |
+
_(aten, _sparse_semi_structured_apply_dense) \
|
| 386 |
+
_(aten, _sparse_semi_structured_linear) \
|
| 387 |
+
_(aten, _sparse_semi_structured_mm) \
|
| 388 |
+
_(aten, _sparse_semi_structured_tile) \
|
| 389 |
+
_(aten, _sparse_softmax) \
|
| 390 |
+
_(aten, _sparse_softmax_backward_data) \
|
| 391 |
+
_(aten, _sparse_sparse_matmul) \
|
| 392 |
+
_(aten, _sparse_sum) \
|
| 393 |
+
_(aten, _sparse_sum_backward) \
|
| 394 |
+
_(aten, _spdiags) \
|
| 395 |
+
_(aten, _spsolve) \
|
| 396 |
+
_(aten, _stack) \
|
| 397 |
+
_(aten, _standard_gamma) \
|
| 398 |
+
_(aten, _standard_gamma_grad) \
|
| 399 |
+
_(aten, _test_ambiguous_defaults) \
|
| 400 |
+
_(aten, _test_autograd_multiple_dispatch) \
|
| 401 |
+
_(aten, _test_autograd_multiple_dispatch_view) \
|
| 402 |
+
_(aten, _test_autograd_multiple_dispatch_view_copy) \
|
| 403 |
+
_(aten, _test_check_tensor) \
|
| 404 |
+
_(aten, _test_functorch_fallback) \
|
| 405 |
+
_(aten, _test_optional_filled_intlist) \
|
| 406 |
+
_(aten, _test_optional_floatlist) \
|
| 407 |
+
_(aten, _test_optional_intlist) \
|
| 408 |
+
_(aten, _test_parallel_materialize) \
|
| 409 |
+
_(aten, _test_serialization_subcmul) \
|
| 410 |
+
_(aten, _test_string_default) \
|
| 411 |
+
_(aten, _test_warn_in_autograd) \
|
| 412 |
+
_(aten, _thnn_differentiable_gru_cell_backward) \
|
| 413 |
+
_(aten, _thnn_differentiable_lstm_cell_backward) \
|
| 414 |
+
_(aten, _thnn_fused_gru_cell) \
|
| 415 |
+
_(aten, _thnn_fused_gru_cell_backward) \
|
| 416 |
+
_(aten, _thnn_fused_lstm_cell) \
|
| 417 |
+
_(aten, _thnn_fused_lstm_cell_backward) \
|
| 418 |
+
_(aten, _thnn_fused_lstm_cell_backward_impl) \
|
| 419 |
+
_(aten, _to_copy) \
|
| 420 |
+
_(aten, _to_cpu) \
|
| 421 |
+
_(aten, _to_dense) \
|
| 422 |
+
_(aten, _to_sparse) \
|
| 423 |
+
_(aten, _to_sparse_bsc) \
|
| 424 |
+
_(aten, _to_sparse_bsr) \
|
| 425 |
+
_(aten, _to_sparse_csc) \
|
| 426 |
+
_(aten, _to_sparse_csr) \
|
| 427 |
+
_(aten, _to_sparse_semi_structured) \
|
| 428 |
+
_(aten, _transform_bias_rescale_qkv) \
|
| 429 |
+
_(aten, _transformer_encoder_layer_fwd) \
|
| 430 |
+
_(aten, _trilinear) \
|
| 431 |
+
_(aten, _triton_multi_head_attention) \
|
| 432 |
+
_(aten, _triton_scaled_dot_attention) \
|
| 433 |
+
_(aten, _unique) \
|
| 434 |
+
_(aten, _unique2) \
|
| 435 |
+
_(aten, _unpack_dual) \
|
| 436 |
+
_(aten, _unsafe_index) \
|
| 437 |
+
_(aten, _unsafe_index_put) \
|
| 438 |
+
_(aten, _unsafe_masked_index) \
|
| 439 |
+
_(aten, _unsafe_masked_index_put_accumulate) \
|
| 440 |
+
_(aten, _unsafe_view) \
|
| 441 |
+
_(aten, _upsample_bicubic2d_aa) \
|
| 442 |
+
_(aten, _upsample_bicubic2d_aa_backward) \
|
| 443 |
+
_(aten, _upsample_bilinear2d_aa) \
|
| 444 |
+
_(aten, _upsample_bilinear2d_aa_backward) \
|
| 445 |
+
_(aten, _upsample_nearest_exact1d) \
|
| 446 |
+
_(aten, _upsample_nearest_exact1d_backward) \
|
| 447 |
+
_(aten, _upsample_nearest_exact2d) \
|
| 448 |
+
_(aten, _upsample_nearest_exact2d_backward) \
|
| 449 |
+
_(aten, _upsample_nearest_exact3d) \
|
| 450 |
+
_(aten, _upsample_nearest_exact3d_backward) \
|
| 451 |
+
_(aten, _use_cudnn_ctc_loss) \
|
| 452 |
+
_(aten, _use_cudnn_rnn_flatten_weight) \
|
| 453 |
+
_(aten, _validate_compressed_sparse_indices) \
|
| 454 |
+
_(aten, _validate_sparse_bsc_tensor_args) \
|
| 455 |
+
_(aten, _validate_sparse_bsr_tensor_args) \
|
| 456 |
+
_(aten, _validate_sparse_compressed_tensor_args) \
|
| 457 |
+
_(aten, _validate_sparse_coo_tensor_args) \
|
| 458 |
+
_(aten, _validate_sparse_csc_tensor_args) \
|
| 459 |
+
_(aten, _validate_sparse_csr_tensor_args) \
|
| 460 |
+
_(aten, _values) \
|
| 461 |
+
_(aten, _values_copy) \
|
| 462 |
+
_(aten, _version) \
|
| 463 |
+
_(aten, _weight_int4pack_mm) \
|
| 464 |
+
_(aten, _weight_int8pack_mm) \
|
| 465 |
+
_(aten, _weight_norm) \
|
| 466 |
+
_(aten, _weight_norm_differentiable_backward) \
|
| 467 |
+
_(aten, _weight_norm_interface) \
|
| 468 |
+
_(aten, _weight_norm_interface_backward) \
|
| 469 |
+
_(aten, _wrapped_linear_prepack) \
|
| 470 |
+
_(aten, _wrapped_quantized_linear_prepacked) \
|
| 471 |
+
_(aten, abs) \
|
| 472 |
+
_(aten, abs_) \
|
| 473 |
+
_(aten, absolute) \
|
| 474 |
+
_(aten, absolute_) \
|
| 475 |
+
_(aten, acos) \
|
| 476 |
+
_(aten, acos_) \
|
| 477 |
+
_(aten, acosh) \
|
| 478 |
+
_(aten, acosh_) \
|
| 479 |
+
_(aten, adaptive_avg_pool1d) \
|
| 480 |
+
_(aten, adaptive_avg_pool2d) \
|
| 481 |
+
_(aten, adaptive_avg_pool3d) \
|
| 482 |
+
_(aten, adaptive_avg_pool3d_backward) \
|
| 483 |
+
_(aten, adaptive_max_pool1d) \
|
| 484 |
+
_(aten, adaptive_max_pool2d) \
|
| 485 |
+
_(aten, adaptive_max_pool2d_backward) \
|
| 486 |
+
_(aten, adaptive_max_pool3d) \
|
| 487 |
+
_(aten, adaptive_max_pool3d_backward) \
|
| 488 |
+
_(aten, add) \
|
| 489 |
+
_(aten, add_) \
|
| 490 |
+
_(aten, addbmm) \
|
| 491 |
+
_(aten, addbmm_) \
|
| 492 |
+
_(aten, addcdiv) \
|
| 493 |
+
_(aten, addcdiv_) \
|
| 494 |
+
_(aten, addcmul) \
|
| 495 |
+
_(aten, addcmul_) \
|
| 496 |
+
_(aten, addmm) \
|
| 497 |
+
_(aten, addmm_) \
|
| 498 |
+
_(aten, addmv) \
|
| 499 |
+
_(aten, addmv_) \
|
| 500 |
+
_(aten, addr) \
|
| 501 |
+
_(aten, addr_) \
|
| 502 |
+
_(aten, adjoint) \
|
| 503 |
+
_(aten, affine_grid_generator) \
|
| 504 |
+
_(aten, affine_grid_generator_backward) \
|
| 505 |
+
_(aten, alias) \
|
| 506 |
+
_(aten, alias_copy) \
|
| 507 |
+
_(aten, align_as) \
|
| 508 |
+
_(aten, align_tensors) \
|
| 509 |
+
_(aten, align_to) \
|
| 510 |
+
_(aten, all) \
|
| 511 |
+
_(aten, allclose) \
|
| 512 |
+
_(aten, alpha_dropout) \
|
| 513 |
+
_(aten, alpha_dropout_) \
|
| 514 |
+
_(aten, amax) \
|
| 515 |
+
_(aten, amin) \
|
| 516 |
+
_(aten, aminmax) \
|
| 517 |
+
_(aten, angle) \
|
| 518 |
+
_(aten, any) \
|
| 519 |
+
_(aten, arange) \
|
| 520 |
+
_(aten, arccos) \
|
| 521 |
+
_(aten, arccos_) \
|
| 522 |
+
_(aten, arccosh) \
|
| 523 |
+
_(aten, arccosh_) \
|
| 524 |
+
_(aten, arcsin) \
|
| 525 |
+
_(aten, arcsin_) \
|
| 526 |
+
_(aten, arcsinh) \
|
| 527 |
+
_(aten, arcsinh_) \
|
| 528 |
+
_(aten, arctan) \
|
| 529 |
+
_(aten, arctan2) \
|
| 530 |
+
_(aten, arctan2_) \
|
| 531 |
+
_(aten, arctan_) \
|
| 532 |
+
_(aten, arctanh) \
|
| 533 |
+
_(aten, arctanh_) \
|
| 534 |
+
_(aten, argmax) \
|
| 535 |
+
_(aten, argmin) \
|
| 536 |
+
_(aten, argsort) \
|
| 537 |
+
_(aten, argwhere) \
|
| 538 |
+
_(aten, as_strided) \
|
| 539 |
+
_(aten, as_strided_) \
|
| 540 |
+
_(aten, as_strided_copy) \
|
| 541 |
+
_(aten, as_strided_scatter) \
|
| 542 |
+
_(aten, asin) \
|
| 543 |
+
_(aten, asin_) \
|
| 544 |
+
_(aten, asinh) \
|
| 545 |
+
_(aten, asinh_) \
|
| 546 |
+
_(aten, atan) \
|
| 547 |
+
_(aten, atan2) \
|
| 548 |
+
_(aten, atan2_) \
|
| 549 |
+
_(aten, atan_) \
|
| 550 |
+
_(aten, atanh) \
|
| 551 |
+
_(aten, atanh_) \
|
| 552 |
+
_(aten, atleast_1d) \
|
| 553 |
+
_(aten, atleast_2d) \
|
| 554 |
+
_(aten, atleast_3d) \
|
| 555 |
+
_(aten, avg_pool1d) \
|
| 556 |
+
_(aten, avg_pool2d) \
|
| 557 |
+
_(aten, avg_pool2d_backward) \
|
| 558 |
+
_(aten, avg_pool3d) \
|
| 559 |
+
_(aten, avg_pool3d_backward) \
|
| 560 |
+
_(aten, baddbmm) \
|
| 561 |
+
_(aten, baddbmm_) \
|
| 562 |
+
_(aten, bartlett_window) \
|
| 563 |
+
_(aten, batch_norm) \
|
| 564 |
+
_(aten, batch_norm_backward) \
|
| 565 |
+
_(aten, batch_norm_backward_elemt) \
|
| 566 |
+
_(aten, batch_norm_backward_reduce) \
|
| 567 |
+
_(aten, batch_norm_elemt) \
|
| 568 |
+
_(aten, batch_norm_gather_stats) \
|
| 569 |
+
_(aten, batch_norm_gather_stats_with_counts) \
|
| 570 |
+
_(aten, batch_norm_stats) \
|
| 571 |
+
_(aten, batch_norm_update_stats) \
|
| 572 |
+
_(aten, bernoulli) \
|
| 573 |
+
_(aten, bernoulli_) \
|
| 574 |
+
_(aten, bilinear) \
|
| 575 |
+
_(aten, binary_cross_entropy) \
|
| 576 |
+
_(aten, binary_cross_entropy_backward) \
|
| 577 |
+
_(aten, binary_cross_entropy_with_logits) \
|
| 578 |
+
_(aten, bincount) \
|
| 579 |
+
_(aten, binomial) \
|
| 580 |
+
_(aten, bitwise_and) \
|
| 581 |
+
_(aten, bitwise_and_) \
|
| 582 |
+
_(aten, bitwise_left_shift) \
|
| 583 |
+
_(aten, bitwise_left_shift_) \
|
| 584 |
+
_(aten, bitwise_not) \
|
| 585 |
+
_(aten, bitwise_not_) \
|
| 586 |
+
_(aten, bitwise_or) \
|
| 587 |
+
_(aten, bitwise_or_) \
|
| 588 |
+
_(aten, bitwise_right_shift) \
|
| 589 |
+
_(aten, bitwise_right_shift_) \
|
| 590 |
+
_(aten, bitwise_xor) \
|
| 591 |
+
_(aten, bitwise_xor_) \
|
| 592 |
+
_(aten, blackman_window) \
|
| 593 |
+
_(aten, block_diag) \
|
| 594 |
+
_(aten, bmm) \
|
| 595 |
+
_(aten, broadcast_tensors) \
|
| 596 |
+
_(aten, broadcast_to) \
|
| 597 |
+
_(aten, bucketize) \
|
| 598 |
+
_(aten, can_cast) \
|
| 599 |
+
_(aten, cartesian_prod) \
|
| 600 |
+
_(aten, cat) \
|
| 601 |
+
_(aten, cauchy) \
|
| 602 |
+
_(aten, cauchy_) \
|
| 603 |
+
_(aten, ccol_indices) \
|
| 604 |
+
_(aten, ccol_indices_copy) \
|
| 605 |
+
_(aten, cdist) \
|
| 606 |
+
_(aten, ceil) \
|
| 607 |
+
_(aten, ceil_) \
|
| 608 |
+
_(aten, celu) \
|
| 609 |
+
_(aten, celu_) \
|
| 610 |
+
_(aten, chain_matmul) \
|
| 611 |
+
_(aten, chalf) \
|
| 612 |
+
_(aten, channel_shuffle) \
|
| 613 |
+
_(aten, cholesky) \
|
| 614 |
+
_(aten, cholesky_inverse) \
|
| 615 |
+
_(aten, cholesky_solve) \
|
| 616 |
+
_(aten, choose_qparams_optimized) \
|
| 617 |
+
_(aten, chunk) \
|
| 618 |
+
_(aten, clamp) \
|
| 619 |
+
_(aten, clamp_) \
|
| 620 |
+
_(aten, clamp_max) \
|
| 621 |
+
_(aten, clamp_max_) \
|
| 622 |
+
_(aten, clamp_min) \
|
| 623 |
+
_(aten, clamp_min_) \
|
| 624 |
+
_(aten, clip) \
|
| 625 |
+
_(aten, clip_) \
|
| 626 |
+
_(aten, clone) \
|
| 627 |
+
_(aten, coalesce) \
|
| 628 |
+
_(aten, col2im) \
|
| 629 |
+
_(aten, col_indices) \
|
| 630 |
+
_(aten, col_indices_copy) \
|
| 631 |
+
_(aten, column_stack) \
|
| 632 |
+
_(aten, combinations) \
|
| 633 |
+
_(aten, complex) \
|
| 634 |
+
_(aten, concat) \
|
| 635 |
+
_(aten, concatenate) \
|
| 636 |
+
_(aten, conj) \
|
| 637 |
+
_(aten, conj_physical) \
|
| 638 |
+
_(aten, conj_physical_) \
|
| 639 |
+
_(aten, constant_pad_nd) \
|
| 640 |
+
_(aten, contiguous) \
|
| 641 |
+
_(aten, conv1d) \
|
| 642 |
+
_(aten, conv2d) \
|
| 643 |
+
_(aten, conv3d) \
|
| 644 |
+
_(aten, conv_depthwise3d) \
|
| 645 |
+
_(aten, conv_tbc) \
|
| 646 |
+
_(aten, conv_tbc_backward) \
|
| 647 |
+
_(aten, conv_transpose1d) \
|
| 648 |
+
_(aten, conv_transpose2d) \
|
| 649 |
+
_(aten, conv_transpose3d) \
|
| 650 |
+
_(aten, convolution) \
|
| 651 |
+
_(aten, convolution_backward) \
|
| 652 |
+
_(aten, convolution_backward_overrideable) \
|
| 653 |
+
_(aten, convolution_overrideable) \
|
| 654 |
+
_(aten, copy) \
|
| 655 |
+
_(aten, copy_) \
|
| 656 |
+
_(aten, copy_sparse_to_sparse) \
|
| 657 |
+
_(aten, copy_sparse_to_sparse_) \
|
| 658 |
+
_(aten, copysign) \
|
| 659 |
+
_(aten, copysign_) \
|
| 660 |
+
_(aten, corrcoef) \
|
| 661 |
+
_(aten, cos) \
|
| 662 |
+
_(aten, cos_) \
|
| 663 |
+
_(aten, cosh) \
|
| 664 |
+
_(aten, cosh_) \
|
| 665 |
+
_(aten, cosine_embedding_loss) \
|
| 666 |
+
_(aten, cosine_similarity) \
|
| 667 |
+
_(aten, count_nonzero) \
|
| 668 |
+
_(aten, cov) \
|
| 669 |
+
_(aten, cross) \
|
| 670 |
+
_(aten, cross_entropy_loss) \
|
| 671 |
+
_(aten, crow_indices) \
|
| 672 |
+
_(aten, crow_indices_copy) \
|
| 673 |
+
_(aten, ctc_loss) \
|
| 674 |
+
_(aten, cudnn_affine_grid_generator) \
|
| 675 |
+
_(aten, cudnn_affine_grid_generator_backward) \
|
| 676 |
+
_(aten, cudnn_batch_norm) \
|
| 677 |
+
_(aten, cudnn_batch_norm_backward) \
|
| 678 |
+
_(aten, cudnn_convolution) \
|
| 679 |
+
_(aten, cudnn_convolution_add_relu) \
|
| 680 |
+
_(aten, cudnn_convolution_relu) \
|
| 681 |
+
_(aten, cudnn_convolution_transpose) \
|
| 682 |
+
_(aten, cudnn_grid_sampler) \
|
| 683 |
+
_(aten, cudnn_grid_sampler_backward) \
|
| 684 |
+
_(aten, cudnn_is_acceptable) \
|
| 685 |
+
_(aten, cummax) \
|
| 686 |
+
_(aten, cummaxmin_backward) \
|
| 687 |
+
_(aten, cummin) \
|
| 688 |
+
_(aten, cumprod) \
|
| 689 |
+
_(aten, cumprod_) \
|
| 690 |
+
_(aten, cumprod_backward) \
|
| 691 |
+
_(aten, cumsum) \
|
| 692 |
+
_(aten, cumsum_) \
|
| 693 |
+
_(aten, cumulative_trapezoid) \
|
| 694 |
+
_(aten, data) \
|
| 695 |
+
_(aten, deg2rad) \
|
| 696 |
+
_(aten, deg2rad_) \
|
| 697 |
+
_(aten, dense_dim) \
|
| 698 |
+
_(aten, dequantize) \
|
| 699 |
+
_(aten, det) \
|
| 700 |
+
_(aten, detach) \
|
| 701 |
+
_(aten, detach_) \
|
| 702 |
+
_(aten, detach_copy) \
|
| 703 |
+
_(aten, diag) \
|
| 704 |
+
_(aten, diag_embed) \
|
| 705 |
+
_(aten, diagflat) \
|
| 706 |
+
_(aten, diagonal) \
|
| 707 |
+
_(aten, diagonal_backward) \
|
| 708 |
+
_(aten, diagonal_copy) \
|
| 709 |
+
_(aten, diagonal_scatter) \
|
| 710 |
+
_(aten, diff) \
|
| 711 |
+
_(aten, digamma) \
|
| 712 |
+
_(aten, digamma_) \
|
| 713 |
+
_(aten, dist) \
|
| 714 |
+
_(aten, div) \
|
| 715 |
+
_(aten, div_) \
|
| 716 |
+
_(aten, divide) \
|
| 717 |
+
_(aten, divide_) \
|
| 718 |
+
_(aten, dot) \
|
| 719 |
+
_(aten, dropout) \
|
| 720 |
+
_(aten, dropout_) \
|
| 721 |
+
_(aten, dsplit) \
|
| 722 |
+
_(aten, dstack) \
|
| 723 |
+
_(aten, einsum) \
|
| 724 |
+
_(aten, elu) \
|
| 725 |
+
_(aten, elu_) \
|
| 726 |
+
_(aten, elu_backward) \
|
| 727 |
+
_(aten, embedding) \
|
| 728 |
+
_(aten, embedding_backward) \
|
| 729 |
+
_(aten, embedding_bag) \
|
| 730 |
+
_(aten, embedding_dense_backward) \
|
| 731 |
+
_(aten, embedding_renorm) \
|
| 732 |
+
_(aten, embedding_renorm_) \
|
| 733 |
+
_(aten, embedding_sparse_backward) \
|
| 734 |
+
_(aten, empty) \
|
| 735 |
+
_(aten, empty_like) \
|
| 736 |
+
_(aten, empty_permuted) \
|
| 737 |
+
_(aten, empty_quantized) \
|
| 738 |
+
_(aten, empty_strided) \
|
| 739 |
+
_(aten, eq) \
|
| 740 |
+
_(aten, eq_) \
|
| 741 |
+
_(aten, equal) \
|
| 742 |
+
_(aten, erf) \
|
| 743 |
+
_(aten, erf_) \
|
| 744 |
+
_(aten, erfc) \
|
| 745 |
+
_(aten, erfc_) \
|
| 746 |
+
_(aten, erfinv) \
|
| 747 |
+
_(aten, erfinv_) \
|
| 748 |
+
_(aten, exp) \
|
| 749 |
+
_(aten, exp2) \
|
| 750 |
+
_(aten, exp2_) \
|
| 751 |
+
_(aten, exp_) \
|
| 752 |
+
_(aten, expand) \
|
| 753 |
+
_(aten, expand_as) \
|
| 754 |
+
_(aten, expand_copy) \
|
| 755 |
+
_(aten, expm1) \
|
| 756 |
+
_(aten, expm1_) \
|
| 757 |
+
_(aten, exponential) \
|
| 758 |
+
_(aten, exponential_) \
|
| 759 |
+
_(aten, eye) \
|
| 760 |
+
_(aten, fake_quantize_per_channel_affine) \
|
| 761 |
+
_(aten, fake_quantize_per_channel_affine_cachemask) \
|
| 762 |
+
_(aten, fake_quantize_per_channel_affine_cachemask_backward) \
|
| 763 |
+
_(aten, fake_quantize_per_tensor_affine) \
|
| 764 |
+
_(aten, fake_quantize_per_tensor_affine_cachemask) \
|
| 765 |
+
_(aten, fake_quantize_per_tensor_affine_cachemask_backward) \
|
| 766 |
+
_(aten, fbgemm_linear_fp16_weight) \
|
| 767 |
+
_(aten, fbgemm_linear_fp16_weight_fp32_activation) \
|
| 768 |
+
_(aten, fbgemm_linear_int8_weight) \
|
| 769 |
+
_(aten, fbgemm_linear_int8_weight_fp32_activation) \
|
| 770 |
+
_(aten, fbgemm_linear_quantize_weight) \
|
| 771 |
+
_(aten, fbgemm_pack_gemm_matrix_fp16) \
|
| 772 |
+
_(aten, fbgemm_pack_quantized_matrix) \
|
| 773 |
+
_(aten, feature_alpha_dropout) \
|
| 774 |
+
_(aten, feature_alpha_dropout_) \
|
| 775 |
+
_(aten, feature_dropout) \
|
| 776 |
+
_(aten, feature_dropout_) \
|
| 777 |
+
_(aten, fft_fft) \
|
| 778 |
+
_(aten, fft_fft2) \
|
| 779 |
+
_(aten, fft_fftfreq) \
|
| 780 |
+
_(aten, fft_fftn) \
|
| 781 |
+
_(aten, fft_fftshift) \
|
| 782 |
+
_(aten, fft_hfft) \
|
| 783 |
+
_(aten, fft_hfft2) \
|
| 784 |
+
_(aten, fft_hfftn) \
|
| 785 |
+
_(aten, fft_ifft) \
|
| 786 |
+
_(aten, fft_ifft2) \
|
| 787 |
+
_(aten, fft_ifftn) \
|
| 788 |
+
_(aten, fft_ifftshift) \
|
| 789 |
+
_(aten, fft_ihfft) \
|
| 790 |
+
_(aten, fft_ihfft2) \
|
| 791 |
+
_(aten, fft_ihfftn) \
|
| 792 |
+
_(aten, fft_irfft) \
|
| 793 |
+
_(aten, fft_irfft2) \
|
| 794 |
+
_(aten, fft_irfftn) \
|
| 795 |
+
_(aten, fft_rfft) \
|
| 796 |
+
_(aten, fft_rfft2) \
|
| 797 |
+
_(aten, fft_rfftfreq) \
|
| 798 |
+
_(aten, fft_rfftn) \
|
| 799 |
+
_(aten, fill) \
|
| 800 |
+
_(aten, fill_) \
|
| 801 |
+
_(aten, fill_diagonal) \
|
| 802 |
+
_(aten, fill_diagonal_) \
|
| 803 |
+
_(aten, fix) \
|
| 804 |
+
_(aten, fix_) \
|
| 805 |
+
_(aten, flatten) \
|
| 806 |
+
_(aten, flatten_dense_tensors) \
|
| 807 |
+
_(aten, flip) \
|
| 808 |
+
_(aten, fliplr) \
|
| 809 |
+
_(aten, flipud) \
|
| 810 |
+
_(aten, float_power) \
|
| 811 |
+
_(aten, float_power_) \
|
| 812 |
+
_(aten, floor) \
|
| 813 |
+
_(aten, floor_) \
|
| 814 |
+
_(aten, floor_divide) \
|
| 815 |
+
_(aten, floor_divide_) \
|
| 816 |
+
_(aten, fmax) \
|
| 817 |
+
_(aten, fmin) \
|
| 818 |
+
_(aten, fmod) \
|
| 819 |
+
_(aten, fmod_) \
|
| 820 |
+
_(aten, frac) \
|
| 821 |
+
_(aten, frac_) \
|
| 822 |
+
_(aten, fractional_max_pool2d) \
|
| 823 |
+
_(aten, fractional_max_pool2d_backward) \
|
| 824 |
+
_(aten, fractional_max_pool3d) \
|
| 825 |
+
_(aten, fractional_max_pool3d_backward) \
|
| 826 |
+
_(aten, frexp) \
|
| 827 |
+
_(aten, frobenius_norm) \
|
| 828 |
+
_(aten, from_file) \
|
| 829 |
+
_(aten, full) \
|
| 830 |
+
_(aten, full_like) \
|
| 831 |
+
_(aten, fused_moving_avg_obs_fake_quant) \
|
| 832 |
+
_(aten, gather) \
|
| 833 |
+
_(aten, gather_backward) \
|
| 834 |
+
_(aten, gcd) \
|
| 835 |
+
_(aten, gcd_) \
|
| 836 |
+
_(aten, ge) \
|
| 837 |
+
_(aten, ge_) \
|
| 838 |
+
_(aten, gelu) \
|
| 839 |
+
_(aten, gelu_) \
|
| 840 |
+
_(aten, gelu_backward) \
|
| 841 |
+
_(aten, geometric) \
|
| 842 |
+
_(aten, geometric_) \
|
| 843 |
+
_(aten, geqrf) \
|
| 844 |
+
_(aten, ger) \
|
| 845 |
+
_(aten, glu) \
|
| 846 |
+
_(aten, glu_backward) \
|
| 847 |
+
_(aten, glu_backward_jvp) \
|
| 848 |
+
_(aten, glu_jvp) \
|
| 849 |
+
_(aten, gradient) \
|
| 850 |
+
_(aten, greater) \
|
| 851 |
+
_(aten, greater_) \
|
| 852 |
+
_(aten, greater_equal) \
|
| 853 |
+
_(aten, greater_equal_) \
|
| 854 |
+
_(aten, grid_sampler) \
|
| 855 |
+
_(aten, grid_sampler_2d) \
|
| 856 |
+
_(aten, grid_sampler_2d_backward) \
|
| 857 |
+
_(aten, grid_sampler_3d) \
|
| 858 |
+
_(aten, grid_sampler_3d_backward) \
|
| 859 |
+
_(aten, group_norm) \
|
| 860 |
+
_(aten, gru) \
|
| 861 |
+
_(aten, gru_cell) \
|
| 862 |
+
_(aten, gt) \
|
| 863 |
+
_(aten, gt_) \
|
| 864 |
+
_(aten, hamming_window) \
|
| 865 |
+
_(aten, hann_window) \
|
| 866 |
+
_(aten, hardshrink) \
|
| 867 |
+
_(aten, hardshrink_backward) \
|
| 868 |
+
_(aten, hardsigmoid) \
|
| 869 |
+
_(aten, hardsigmoid_) \
|
| 870 |
+
_(aten, hardsigmoid_backward) \
|
| 871 |
+
_(aten, hardswish) \
|
| 872 |
+
_(aten, hardswish_) \
|
| 873 |
+
_(aten, hardswish_backward) \
|
| 874 |
+
_(aten, hardtanh) \
|
| 875 |
+
_(aten, hardtanh_) \
|
| 876 |
+
_(aten, hardtanh_backward) \
|
| 877 |
+
_(aten, heaviside) \
|
| 878 |
+
_(aten, heaviside_) \
|
| 879 |
+
_(aten, hinge_embedding_loss) \
|
| 880 |
+
_(aten, histc) \
|
| 881 |
+
_(aten, histogram) \
|
| 882 |
+
_(aten, histogramdd) \
|
| 883 |
+
_(aten, hsplit) \
|
| 884 |
+
_(aten, hspmm) \
|
| 885 |
+
_(aten, hstack) \
|
| 886 |
+
_(aten, huber_loss) \
|
| 887 |
+
_(aten, huber_loss_backward) \
|
| 888 |
+
_(aten, hypot) \
|
| 889 |
+
_(aten, hypot_) \
|
| 890 |
+
_(aten, i0) \
|
| 891 |
+
_(aten, i0_) \
|
| 892 |
+
_(aten, igamma) \
|
| 893 |
+
_(aten, igamma_) \
|
| 894 |
+
_(aten, igammac) \
|
| 895 |
+
_(aten, igammac_) \
|
| 896 |
+
_(aten, im2col) \
|
| 897 |
+
_(aten, imag) \
|
| 898 |
+
_(aten, index) \
|
| 899 |
+
_(aten, index_add) \
|
| 900 |
+
_(aten, index_add_) \
|
| 901 |
+
_(aten, index_copy) \
|
| 902 |
+
_(aten, index_copy_) \
|
| 903 |
+
_(aten, index_fill) \
|
| 904 |
+
_(aten, index_fill_) \
|
| 905 |
+
_(aten, index_put) \
|
| 906 |
+
_(aten, index_put_) \
|
| 907 |
+
_(aten, index_reduce) \
|
| 908 |
+
_(aten, index_reduce_) \
|
| 909 |
+
_(aten, index_select) \
|
| 910 |
+
_(aten, index_select_backward) \
|
| 911 |
+
_(aten, indices) \
|
| 912 |
+
_(aten, indices_copy) \
|
| 913 |
+
_(aten, infinitely_differentiable_gelu_backward) \
|
| 914 |
+
_(aten, inner) \
|
| 915 |
+
_(aten, instance_norm) \
|
| 916 |
+
_(aten, int_repr) \
|
| 917 |
+
_(aten, inverse) \
|
| 918 |
+
_(aten, is_coalesced) \
|
| 919 |
+
_(aten, is_complex) \
|
| 920 |
+
_(aten, is_conj) \
|
| 921 |
+
_(aten, is_distributed) \
|
| 922 |
+
_(aten, is_floating_point) \
|
| 923 |
+
_(aten, is_inference) \
|
| 924 |
+
_(aten, is_leaf) \
|
| 925 |
+
_(aten, is_neg) \
|
| 926 |
+
_(aten, is_nonzero) \
|
| 927 |
+
_(aten, is_pinned) \
|
| 928 |
+
_(aten, is_same_size) \
|
| 929 |
+
_(aten, is_set_to) \
|
| 930 |
+
_(aten, is_signed) \
|
| 931 |
+
_(aten, is_vulkan_available) \
|
| 932 |
+
_(aten, isclose) \
|
| 933 |
+
_(aten, isfinite) \
|
| 934 |
+
_(aten, isin) \
|
| 935 |
+
_(aten, isinf) \
|
| 936 |
+
_(aten, isnan) \
|
| 937 |
+
_(aten, isneginf) \
|
| 938 |
+
_(aten, isposinf) \
|
| 939 |
+
_(aten, isreal) \
|
| 940 |
+
_(aten, istft) \
|
| 941 |
+
_(aten, item) \
|
| 942 |
+
_(aten, kaiser_window) \
|
| 943 |
+
_(aten, kl_div) \
|
| 944 |
+
_(aten, kron) \
|
| 945 |
+
_(aten, kthvalue) \
|
| 946 |
+
_(aten, l1_loss) \
|
| 947 |
+
_(aten, layer_norm) \
|
| 948 |
+
_(aten, lcm) \
|
| 949 |
+
_(aten, lcm_) \
|
| 950 |
+
_(aten, ldexp) \
|
| 951 |
+
_(aten, ldexp_) \
|
| 952 |
+
_(aten, le) \
|
| 953 |
+
_(aten, le_) \
|
| 954 |
+
_(aten, leaky_relu) \
|
| 955 |
+
_(aten, leaky_relu_) \
|
| 956 |
+
_(aten, leaky_relu_backward) \
|
| 957 |
+
_(aten, lerp) \
|
| 958 |
+
_(aten, lerp_) \
|
| 959 |
+
_(aten, less) \
|
| 960 |
+
_(aten, less_) \
|
| 961 |
+
_(aten, less_equal) \
|
| 962 |
+
_(aten, less_equal_) \
|
| 963 |
+
_(aten, lgamma) \
|
| 964 |
+
_(aten, lgamma_) \
|
| 965 |
+
_(aten, lift) \
|
| 966 |
+
_(aten, lift_fresh) \
|
| 967 |
+
_(aten, lift_fresh_copy) \
|
| 968 |
+
_(aten, linalg_cholesky) \
|
| 969 |
+
_(aten, linalg_cholesky_ex) \
|
| 970 |
+
_(aten, linalg_cond) \
|
| 971 |
+
_(aten, linalg_cross) \
|
| 972 |
+
_(aten, linalg_det) \
|
| 973 |
+
_(aten, linalg_diagonal) \
|
| 974 |
+
_(aten, linalg_eig) \
|
| 975 |
+
_(aten, linalg_eigh) \
|
| 976 |
+
_(aten, linalg_eigvals) \
|
| 977 |
+
_(aten, linalg_eigvalsh) \
|
| 978 |
+
_(aten, linalg_householder_product) \
|
| 979 |
+
_(aten, linalg_inv) \
|
| 980 |
+
_(aten, linalg_inv_ex) \
|
| 981 |
+
_(aten, linalg_ldl_factor) \
|
| 982 |
+
_(aten, linalg_ldl_factor_ex) \
|
| 983 |
+
_(aten, linalg_ldl_solve) \
|
| 984 |
+
_(aten, linalg_lstsq) \
|
| 985 |
+
_(aten, linalg_lu) \
|
| 986 |
+
_(aten, linalg_lu_factor) \
|
| 987 |
+
_(aten, linalg_lu_factor_ex) \
|
| 988 |
+
_(aten, linalg_lu_solve) \
|
| 989 |
+
_(aten, linalg_matmul) \
|
| 990 |
+
_(aten, linalg_matrix_exp) \
|
| 991 |
+
_(aten, linalg_matrix_norm) \
|
| 992 |
+
_(aten, linalg_matrix_power) \
|
| 993 |
+
_(aten, linalg_matrix_rank) \
|
| 994 |
+
_(aten, linalg_multi_dot) \
|
| 995 |
+
_(aten, linalg_norm) \
|
| 996 |
+
_(aten, linalg_pinv) \
|
| 997 |
+
_(aten, linalg_qr) \
|
| 998 |
+
_(aten, linalg_slogdet) \
|
| 999 |
+
_(aten, linalg_solve) \
|
| 1000 |
+
_(aten, linalg_solve_ex) \
|
| 1001 |
+
_(aten, linalg_solve_triangular) \
|
| 1002 |
+
_(aten, linalg_svd) \
|
| 1003 |
+
_(aten, linalg_svdvals) \
|
| 1004 |
+
_(aten, linalg_tensorinv) \
|
| 1005 |
+
_(aten, linalg_tensorsolve) \
|
| 1006 |
+
_(aten, linalg_vander) \
|
| 1007 |
+
_(aten, linalg_vecdot) \
|
| 1008 |
+
_(aten, linalg_vector_norm) \
|
| 1009 |
+
_(aten, linear) \
|
| 1010 |
+
_(aten, linear_backward) \
|
| 1011 |
+
_(aten, linspace) \
|
| 1012 |
+
_(aten, log) \
|
| 1013 |
+
_(aten, log10) \
|
| 1014 |
+
_(aten, log10_) \
|
| 1015 |
+
_(aten, log1p) \
|
| 1016 |
+
_(aten, log1p_) \
|
| 1017 |
+
_(aten, log2) \
|
| 1018 |
+
_(aten, log2_) \
|
| 1019 |
+
_(aten, log_) \
|
| 1020 |
+
_(aten, log_normal) \
|
| 1021 |
+
_(aten, log_normal_) \
|
| 1022 |
+
_(aten, log_sigmoid) \
|
| 1023 |
+
_(aten, log_sigmoid_backward) \
|
| 1024 |
+
_(aten, log_sigmoid_forward) \
|
| 1025 |
+
_(aten, log_softmax) \
|
| 1026 |
+
_(aten, logaddexp) \
|
| 1027 |
+
_(aten, logaddexp2) \
|
| 1028 |
+
_(aten, logcumsumexp) \
|
| 1029 |
+
_(aten, logdet) \
|
| 1030 |
+
_(aten, logical_and) \
|
| 1031 |
+
_(aten, logical_and_) \
|
| 1032 |
+
_(aten, logical_not) \
|
| 1033 |
+
_(aten, logical_not_) \
|
| 1034 |
+
_(aten, logical_or) \
|
| 1035 |
+
_(aten, logical_or_) \
|
| 1036 |
+
_(aten, logical_xor) \
|
| 1037 |
+
_(aten, logical_xor_) \
|
| 1038 |
+
_(aten, logit) \
|
| 1039 |
+
_(aten, logit_) \
|
| 1040 |
+
_(aten, logit_backward) \
|
| 1041 |
+
_(aten, logspace) \
|
| 1042 |
+
_(aten, logsumexp) \
|
| 1043 |
+
_(aten, lshift) \
|
| 1044 |
+
_(aten, lstm) \
|
| 1045 |
+
_(aten, lstm_cell) \
|
| 1046 |
+
_(aten, lstm_mps_backward) \
|
| 1047 |
+
_(aten, lt) \
|
| 1048 |
+
_(aten, lt_) \
|
| 1049 |
+
_(aten, lu_solve) \
|
| 1050 |
+
_(aten, lu_unpack) \
|
| 1051 |
+
_(aten, mH) \
|
| 1052 |
+
_(aten, mT) \
|
| 1053 |
+
_(aten, margin_ranking_loss) \
|
| 1054 |
+
_(aten, masked_fill) \
|
| 1055 |
+
_(aten, masked_fill_) \
|
| 1056 |
+
_(aten, masked_scatter) \
|
| 1057 |
+
_(aten, masked_scatter_) \
|
| 1058 |
+
_(aten, masked_scatter_backward) \
|
| 1059 |
+
_(aten, masked_select) \
|
| 1060 |
+
_(aten, masked_select_backward) \
|
| 1061 |
+
_(aten, matmul) \
|
| 1062 |
+
_(aten, matmul_backward) \
|
| 1063 |
+
_(aten, matrix_H) \
|
| 1064 |
+
_(aten, matrix_exp) \
|
| 1065 |
+
_(aten, matrix_exp_backward) \
|
| 1066 |
+
_(aten, matrix_power) \
|
| 1067 |
+
_(aten, max) \
|
| 1068 |
+
_(aten, max_pool1d) \
|
| 1069 |
+
_(aten, max_pool1d_with_indices) \
|
| 1070 |
+
_(aten, max_pool2d) \
|
| 1071 |
+
_(aten, max_pool2d_backward) \
|
| 1072 |
+
_(aten, max_pool2d_with_indices) \
|
| 1073 |
+
_(aten, max_pool2d_with_indices_backward) \
|
| 1074 |
+
_(aten, max_pool3d) \
|
| 1075 |
+
_(aten, max_pool3d_with_indices) \
|
| 1076 |
+
_(aten, max_pool3d_with_indices_backward) \
|
| 1077 |
+
_(aten, max_unpool2d) \
|
| 1078 |
+
_(aten, max_unpool3d) \
|
| 1079 |
+
_(aten, maximum) \
|
| 1080 |
+
_(aten, mean) \
|
| 1081 |
+
_(aten, median) \
|
| 1082 |
+
_(aten, meshgrid) \
|
| 1083 |
+
_(aten, min) \
|
| 1084 |
+
_(aten, minimum) \
|
| 1085 |
+
_(aten, miopen_batch_norm) \
|
| 1086 |
+
_(aten, miopen_batch_norm_backward) \
|
| 1087 |
+
_(aten, miopen_convolution) \
|
| 1088 |
+
_(aten, miopen_convolution_add_relu) \
|
| 1089 |
+
_(aten, miopen_convolution_relu) \
|
| 1090 |
+
_(aten, miopen_convolution_transpose) \
|
| 1091 |
+
_(aten, miopen_depthwise_convolution) \
|
| 1092 |
+
_(aten, miopen_rnn) \
|
| 1093 |
+
_(aten, miopen_rnn_backward) \
|
| 1094 |
+
_(aten, mish) \
|
| 1095 |
+
_(aten, mish_) \
|
| 1096 |
+
_(aten, mish_backward) \
|
| 1097 |
+
_(aten, mkldnn_adaptive_avg_pool2d) \
|
| 1098 |
+
_(aten, mkldnn_adaptive_avg_pool2d_backward) \
|
| 1099 |
+
_(aten, mkldnn_convolution) \
|
| 1100 |
+
_(aten, mkldnn_linear) \
|
| 1101 |
+
_(aten, mkldnn_linear_backward) \
|
| 1102 |
+
_(aten, mkldnn_linear_backward_input) \
|
| 1103 |
+
_(aten, mkldnn_linear_backward_weights) \
|
| 1104 |
+
_(aten, mkldnn_max_pool2d) \
|
| 1105 |
+
_(aten, mkldnn_max_pool2d_backward) \
|
| 1106 |
+
_(aten, mkldnn_max_pool3d) \
|
| 1107 |
+
_(aten, mkldnn_max_pool3d_backward) \
|
| 1108 |
+
_(aten, mkldnn_reorder_conv2d_weight) \
|
| 1109 |
+
_(aten, mkldnn_reorder_conv3d_weight) \
|
| 1110 |
+
_(aten, mkldnn_rnn_layer) \
|
| 1111 |
+
_(aten, mkldnn_rnn_layer_backward) \
|
| 1112 |
+
_(aten, mm) \
|
| 1113 |
+
_(aten, mode) \
|
| 1114 |
+
_(aten, moveaxis) \
|
| 1115 |
+
_(aten, movedim) \
|
| 1116 |
+
_(aten, mps_convolution_backward) \
|
| 1117 |
+
_(aten, mps_convolution_transpose_backward) \
|
| 1118 |
+
_(aten, mse_loss) \
|
| 1119 |
+
_(aten, mse_loss_backward) \
|
| 1120 |
+
_(aten, msort) \
|
| 1121 |
+
_(aten, mul) \
|
| 1122 |
+
_(aten, mul_) \
|
| 1123 |
+
_(aten, multi_margin_loss) \
|
| 1124 |
+
_(aten, multi_margin_loss_backward) \
|
| 1125 |
+
_(aten, multilabel_margin_loss) \
|
| 1126 |
+
_(aten, multilabel_margin_loss_backward) \
|
| 1127 |
+
_(aten, multilabel_margin_loss_forward) \
|
| 1128 |
+
_(aten, multinomial) \
|
| 1129 |
+
_(aten, multiply) \
|
| 1130 |
+
_(aten, multiply_) \
|
| 1131 |
+
_(aten, mv) \
|
| 1132 |
+
_(aten, mvlgamma) \
|
| 1133 |
+
_(aten, mvlgamma_) \
|
| 1134 |
+
_(aten, nan_to_num) \
|
| 1135 |
+
_(aten, nan_to_num_) \
|
| 1136 |
+
_(aten, nanmean) \
|
| 1137 |
+
_(aten, nanmedian) \
|
| 1138 |
+
_(aten, nanquantile) \
|
| 1139 |
+
_(aten, nansum) \
|
| 1140 |
+
_(aten, narrow) \
|
| 1141 |
+
_(aten, narrow_copy) \
|
| 1142 |
+
_(aten, native_batch_norm) \
|
| 1143 |
+
_(aten, native_batch_norm_backward) \
|
| 1144 |
+
_(aten, native_channel_shuffle) \
|
| 1145 |
+
_(aten, native_dropout) \
|
| 1146 |
+
_(aten, native_dropout_backward) \
|
| 1147 |
+
_(aten, native_group_norm) \
|
| 1148 |
+
_(aten, native_group_norm_backward) \
|
| 1149 |
+
_(aten, native_layer_norm) \
|
| 1150 |
+
_(aten, native_layer_norm_backward) \
|
| 1151 |
+
_(aten, native_norm) \
|
| 1152 |
+
_(aten, ne) \
|
| 1153 |
+
_(aten, ne_) \
|
| 1154 |
+
_(aten, neg) \
|
| 1155 |
+
_(aten, neg_) \
|
| 1156 |
+
_(aten, negative) \
|
| 1157 |
+
_(aten, negative_) \
|
| 1158 |
+
_(aten, nested_to_padded_tensor) \
|
| 1159 |
+
_(aten, new_empty) \
|
| 1160 |
+
_(aten, new_empty_strided) \
|
| 1161 |
+
_(aten, new_full) \
|
| 1162 |
+
_(aten, new_ones) \
|
| 1163 |
+
_(aten, new_zeros) \
|
| 1164 |
+
_(aten, nextafter) \
|
| 1165 |
+
_(aten, nextafter_) \
|
| 1166 |
+
_(aten, nll_loss) \
|
| 1167 |
+
_(aten, nll_loss2d) \
|
| 1168 |
+
_(aten, nll_loss2d_backward) \
|
| 1169 |
+
_(aten, nll_loss2d_forward) \
|
| 1170 |
+
_(aten, nll_loss_backward) \
|
| 1171 |
+
_(aten, nll_loss_forward) \
|
| 1172 |
+
_(aten, nll_loss_nd) \
|
| 1173 |
+
_(aten, nonzero) \
|
| 1174 |
+
_(aten, nonzero_numpy) \
|
| 1175 |
+
_(aten, nonzero_static) \
|
| 1176 |
+
_(aten, norm) \
|
| 1177 |
+
_(aten, norm_except_dim) \
|
| 1178 |
+
_(aten, normal) \
|
| 1179 |
+
_(aten, normal_) \
|
| 1180 |
+
_(aten, normal_functional) \
|
| 1181 |
+
_(aten, not_equal) \
|
| 1182 |
+
_(aten, not_equal_) \
|
| 1183 |
+
_(aten, nuclear_norm) \
|
| 1184 |
+
_(aten, numpy_T) \
|
| 1185 |
+
_(aten, one_hot) \
|
| 1186 |
+
_(aten, ones) \
|
| 1187 |
+
_(aten, ones_like) \
|
| 1188 |
+
_(aten, orgqr) \
|
| 1189 |
+
_(aten, ormqr) \
|
| 1190 |
+
_(aten, outer) \
|
| 1191 |
+
_(aten, output_nr) \
|
| 1192 |
+
_(aten, pad) \
|
| 1193 |
+
_(aten, pad_sequence) \
|
| 1194 |
+
_(aten, pairwise_distance) \
|
| 1195 |
+
_(aten, pdist) \
|
| 1196 |
+
_(aten, permute) \
|
| 1197 |
+
_(aten, permute_copy) \
|
| 1198 |
+
_(aten, pin_memory) \
|
| 1199 |
+
_(aten, pinverse) \
|
| 1200 |
+
_(aten, pixel_shuffle) \
|
| 1201 |
+
_(aten, pixel_unshuffle) \
|
| 1202 |
+
_(aten, poisson) \
|
| 1203 |
+
_(aten, poisson_nll_loss) \
|
| 1204 |
+
_(aten, polar) \
|
| 1205 |
+
_(aten, polygamma) \
|
| 1206 |
+
_(aten, polygamma_) \
|
| 1207 |
+
_(aten, positive) \
|
| 1208 |
+
_(aten, pow) \
|
| 1209 |
+
_(aten, pow_) \
|
| 1210 |
+
_(aten, prelu) \
|
| 1211 |
+
_(aten, prod) \
|
| 1212 |
+
_(aten, promote_types) \
|
| 1213 |
+
_(aten, put) \
|
| 1214 |
+
_(aten, put_) \
|
| 1215 |
+
_(aten, q_per_channel_axis) \
|
| 1216 |
+
_(aten, q_per_channel_scales) \
|
| 1217 |
+
_(aten, q_per_channel_zero_points) \
|
| 1218 |
+
_(aten, q_scale) \
|
| 1219 |
+
_(aten, q_zero_point) \
|
| 1220 |
+
_(aten, qr) \
|
| 1221 |
+
_(aten, qscheme) \
|
| 1222 |
+
_(aten, quantile) \
|
| 1223 |
+
_(aten, quantize_per_channel) \
|
| 1224 |
+
_(aten, quantize_per_tensor) \
|
| 1225 |
+
_(aten, quantize_per_tensor_dynamic) \
|
| 1226 |
+
_(aten, quantized_batch_norm) \
|
| 1227 |
+
_(aten, quantized_gru_cell) \
|
| 1228 |
+
_(aten, quantized_lstm_cell) \
|
| 1229 |
+
_(aten, quantized_max_pool1d) \
|
| 1230 |
+
_(aten, quantized_max_pool2d) \
|
| 1231 |
+
_(aten, quantized_max_pool3d) \
|
| 1232 |
+
_(aten, quantized_rnn_relu_cell) \
|
| 1233 |
+
_(aten, quantized_rnn_tanh_cell) \
|
| 1234 |
+
_(aten, rad2deg) \
|
| 1235 |
+
_(aten, rad2deg_) \
|
| 1236 |
+
_(aten, rand) \
|
| 1237 |
+
_(aten, rand_like) \
|
| 1238 |
+
_(aten, randint) \
|
| 1239 |
+
_(aten, randint_like) \
|
| 1240 |
+
_(aten, randn) \
|
| 1241 |
+
_(aten, randn_like) \
|
| 1242 |
+
_(aten, random) \
|
| 1243 |
+
_(aten, random_) \
|
| 1244 |
+
_(aten, randperm) \
|
| 1245 |
+
_(aten, range) \
|
| 1246 |
+
_(aten, ravel) \
|
| 1247 |
+
_(aten, real) \
|
| 1248 |
+
_(aten, reciprocal) \
|
| 1249 |
+
_(aten, reciprocal_) \
|
| 1250 |
+
_(aten, record_stream) \
|
| 1251 |
+
_(aten, refine_names) \
|
| 1252 |
+
_(aten, reflection_pad1d) \
|
| 1253 |
+
_(aten, reflection_pad1d_backward) \
|
| 1254 |
+
_(aten, reflection_pad2d) \
|
| 1255 |
+
_(aten, reflection_pad2d_backward) \
|
| 1256 |
+
_(aten, reflection_pad3d) \
|
| 1257 |
+
_(aten, reflection_pad3d_backward) \
|
| 1258 |
+
_(aten, relu) \
|
| 1259 |
+
_(aten, relu6) \
|
| 1260 |
+
_(aten, relu6_) \
|
| 1261 |
+
_(aten, relu_) \
|
| 1262 |
+
_(aten, remainder) \
|
| 1263 |
+
_(aten, remainder_) \
|
| 1264 |
+
_(aten, rename) \
|
| 1265 |
+
_(aten, rename_) \
|
| 1266 |
+
_(aten, renorm) \
|
| 1267 |
+
_(aten, renorm_) \
|
| 1268 |
+
_(aten, repeat) \
|
| 1269 |
+
_(aten, repeat_interleave) \
|
| 1270 |
+
_(aten, replication_pad1d) \
|
| 1271 |
+
_(aten, replication_pad1d_backward) \
|
| 1272 |
+
_(aten, replication_pad2d) \
|
| 1273 |
+
_(aten, replication_pad2d_backward) \
|
| 1274 |
+
_(aten, replication_pad3d) \
|
| 1275 |
+
_(aten, replication_pad3d_backward) \
|
| 1276 |
+
_(aten, requires_grad) \
|
| 1277 |
+
_(aten, requires_grad_) \
|
| 1278 |
+
_(aten, reshape) \
|
| 1279 |
+
_(aten, reshape_as) \
|
| 1280 |
+
_(aten, resize) \
|
| 1281 |
+
_(aten, resize_) \
|
| 1282 |
+
_(aten, resize_as) \
|
| 1283 |
+
_(aten, resize_as_) \
|
| 1284 |
+
_(aten, resize_as_sparse) \
|
| 1285 |
+
_(aten, resize_as_sparse_) \
|
| 1286 |
+
_(aten, resolve_conj) \
|
| 1287 |
+
_(aten, resolve_neg) \
|
| 1288 |
+
_(aten, result_type) \
|
| 1289 |
+
_(aten, retain_grad) \
|
| 1290 |
+
_(aten, retains_grad) \
|
| 1291 |
+
_(aten, rms_norm) \
|
| 1292 |
+
_(aten, rnn_relu) \
|
| 1293 |
+
_(aten, rnn_relu_cell) \
|
| 1294 |
+
_(aten, rnn_tanh) \
|
| 1295 |
+
_(aten, rnn_tanh_cell) \
|
| 1296 |
+
_(aten, roll) \
|
| 1297 |
+
_(aten, rot90) \
|
| 1298 |
+
_(aten, round) \
|
| 1299 |
+
_(aten, round_) \
|
| 1300 |
+
_(aten, row_indices) \
|
| 1301 |
+
_(aten, row_indices_copy) \
|
| 1302 |
+
_(aten, row_stack) \
|
| 1303 |
+
_(aten, rrelu) \
|
| 1304 |
+
_(aten, rrelu_) \
|
| 1305 |
+
_(aten, rrelu_with_noise) \
|
| 1306 |
+
_(aten, rrelu_with_noise_) \
|
| 1307 |
+
_(aten, rrelu_with_noise_backward) \
|
| 1308 |
+
_(aten, rshift) \
|
| 1309 |
+
_(aten, rsqrt) \
|
| 1310 |
+
_(aten, rsqrt_) \
|
| 1311 |
+
_(aten, rsub) \
|
| 1312 |
+
_(aten, scalar_tensor) \
|
| 1313 |
+
_(aten, scaled_dot_product_attention) \
|
| 1314 |
+
_(aten, scatter) \
|
| 1315 |
+
_(aten, scatter_) \
|
| 1316 |
+
_(aten, scatter_add) \
|
| 1317 |
+
_(aten, scatter_add_) \
|
| 1318 |
+
_(aten, scatter_reduce) \
|
| 1319 |
+
_(aten, scatter_reduce_) \
|
| 1320 |
+
_(aten, searchsorted) \
|
| 1321 |
+
_(aten, segment_reduce) \
|
| 1322 |
+
_(aten, select) \
|
| 1323 |
+
_(aten, select_backward) \
|
| 1324 |
+
_(aten, select_copy) \
|
| 1325 |
+
_(aten, select_scatter) \
|
| 1326 |
+
_(aten, selu) \
|
| 1327 |
+
_(aten, selu_) \
|
| 1328 |
+
_(aten, set) \
|
| 1329 |
+
_(aten, set_) \
|
| 1330 |
+
_(aten, set_data) \
|
| 1331 |
+
_(aten, sgn) \
|
| 1332 |
+
_(aten, sgn_) \
|
| 1333 |
+
_(aten, sigmoid) \
|
| 1334 |
+
_(aten, sigmoid_) \
|
| 1335 |
+
_(aten, sigmoid_backward) \
|
| 1336 |
+
_(aten, sign) \
|
| 1337 |
+
_(aten, sign_) \
|
| 1338 |
+
_(aten, signbit) \
|
| 1339 |
+
_(aten, silu) \
|
| 1340 |
+
_(aten, silu_) \
|
| 1341 |
+
_(aten, silu_backward) \
|
| 1342 |
+
_(aten, sin) \
|
| 1343 |
+
_(aten, sin_) \
|
| 1344 |
+
_(aten, sinc) \
|
| 1345 |
+
_(aten, sinc_) \
|
| 1346 |
+
_(aten, sinh) \
|
| 1347 |
+
_(aten, sinh_) \
|
| 1348 |
+
_(aten, size) \
|
| 1349 |
+
_(aten, slice) \
|
| 1350 |
+
_(aten, slice_backward) \
|
| 1351 |
+
_(aten, slice_copy) \
|
| 1352 |
+
_(aten, slice_inverse) \
|
| 1353 |
+
_(aten, slice_scatter) \
|
| 1354 |
+
_(aten, slogdet) \
|
| 1355 |
+
_(aten, slow_conv3d) \
|
| 1356 |
+
_(aten, slow_conv3d_forward) \
|
| 1357 |
+
_(aten, slow_conv_dilated2d) \
|
| 1358 |
+
_(aten, slow_conv_dilated3d) \
|
| 1359 |
+
_(aten, slow_conv_transpose2d) \
|
| 1360 |
+
_(aten, slow_conv_transpose3d) \
|
| 1361 |
+
_(aten, smm) \
|
| 1362 |
+
_(aten, smooth_l1_loss) \
|
| 1363 |
+
_(aten, smooth_l1_loss_backward) \
|
| 1364 |
+
_(aten, soft_margin_loss) \
|
| 1365 |
+
_(aten, soft_margin_loss_backward) \
|
| 1366 |
+
_(aten, softmax) \
|
| 1367 |
+
_(aten, softplus) \
|
| 1368 |
+
_(aten, softplus_backward) \
|
| 1369 |
+
_(aten, softshrink) \
|
| 1370 |
+
_(aten, softshrink_backward) \
|
| 1371 |
+
_(aten, sort) \
|
| 1372 |
+
_(aten, sparse_bsc_tensor) \
|
| 1373 |
+
_(aten, sparse_bsr_tensor) \
|
| 1374 |
+
_(aten, sparse_compressed_tensor) \
|
| 1375 |
+
_(aten, sparse_coo_tensor) \
|
| 1376 |
+
_(aten, sparse_csc_tensor) \
|
| 1377 |
+
_(aten, sparse_csr_tensor) \
|
| 1378 |
+
_(aten, sparse_dim) \
|
| 1379 |
+
_(aten, sparse_mask) \
|
| 1380 |
+
_(aten, sparse_resize) \
|
| 1381 |
+
_(aten, sparse_resize_) \
|
| 1382 |
+
_(aten, sparse_resize_and_clear) \
|
| 1383 |
+
_(aten, sparse_resize_and_clear_) \
|
| 1384 |
+
_(aten, sparse_sampled_addmm) \
|
| 1385 |
+
_(aten, special_airy_ai) \
|
| 1386 |
+
_(aten, special_bessel_j0) \
|
| 1387 |
+
_(aten, special_bessel_j1) \
|
| 1388 |
+
_(aten, special_bessel_y0) \
|
| 1389 |
+
_(aten, special_bessel_y1) \
|
| 1390 |
+
_(aten, special_chebyshev_polynomial_t) \
|
| 1391 |
+
_(aten, special_chebyshev_polynomial_u) \
|
| 1392 |
+
_(aten, special_chebyshev_polynomial_v) \
|
| 1393 |
+
_(aten, special_chebyshev_polynomial_w) \
|
| 1394 |
+
_(aten, special_digamma) \
|
| 1395 |
+
_(aten, special_entr) \
|
| 1396 |
+
_(aten, special_erf) \
|
| 1397 |
+
_(aten, special_erfc) \
|
| 1398 |
+
_(aten, special_erfcx) \
|
| 1399 |
+
_(aten, special_erfinv) \
|
| 1400 |
+
_(aten, special_exp2) \
|
| 1401 |
+
_(aten, special_expit) \
|
| 1402 |
+
_(aten, special_expm1) \
|
| 1403 |
+
_(aten, special_gammainc) \
|
| 1404 |
+
_(aten, special_gammaincc) \
|
| 1405 |
+
_(aten, special_gammaln) \
|
| 1406 |
+
_(aten, special_hermite_polynomial_h) \
|
| 1407 |
+
_(aten, special_hermite_polynomial_he) \
|
| 1408 |
+
_(aten, special_i0) \
|
| 1409 |
+
_(aten, special_i0e) \
|
| 1410 |
+
_(aten, special_i1) \
|
| 1411 |
+
_(aten, special_i1e) \
|
| 1412 |
+
_(aten, special_laguerre_polynomial_l) \
|
| 1413 |
+
_(aten, special_legendre_polynomial_p) \
|
| 1414 |
+
_(aten, special_log1p) \
|
| 1415 |
+
_(aten, special_log_ndtr) \
|
| 1416 |
+
_(aten, special_log_softmax) \
|
| 1417 |
+
_(aten, special_logit) \
|
| 1418 |
+
_(aten, special_logsumexp) \
|
| 1419 |
+
_(aten, special_modified_bessel_i0) \
|
| 1420 |
+
_(aten, special_modified_bessel_i1) \
|
| 1421 |
+
_(aten, special_modified_bessel_k0) \
|
| 1422 |
+
_(aten, special_modified_bessel_k1) \
|
| 1423 |
+
_(aten, special_multigammaln) \
|
| 1424 |
+
_(aten, special_ndtr) \
|
| 1425 |
+
_(aten, special_ndtri) \
|
| 1426 |
+
_(aten, special_polygamma) \
|
| 1427 |
+
_(aten, special_psi) \
|
| 1428 |
+
_(aten, special_round) \
|
| 1429 |
+
_(aten, special_scaled_modified_bessel_k0) \
|
| 1430 |
+
_(aten, special_scaled_modified_bessel_k1) \
|
| 1431 |
+
_(aten, special_shifted_chebyshev_polynomial_t) \
|
| 1432 |
+
_(aten, special_shifted_chebyshev_polynomial_u) \
|
| 1433 |
+
_(aten, special_shifted_chebyshev_polynomial_v) \
|
| 1434 |
+
_(aten, special_shifted_chebyshev_polynomial_w) \
|
| 1435 |
+
_(aten, special_sinc) \
|
| 1436 |
+
_(aten, special_softmax) \
|
| 1437 |
+
_(aten, special_spherical_bessel_j0) \
|
| 1438 |
+
_(aten, special_xlog1py) \
|
| 1439 |
+
_(aten, special_xlogy) \
|
| 1440 |
+
_(aten, special_zeta) \
|
| 1441 |
+
_(aten, split) \
|
| 1442 |
+
_(aten, split_copy) \
|
| 1443 |
+
_(aten, split_with_sizes) \
|
| 1444 |
+
_(aten, split_with_sizes_copy) \
|
| 1445 |
+
_(aten, sqrt) \
|
| 1446 |
+
_(aten, sqrt_) \
|
| 1447 |
+
_(aten, square) \
|
| 1448 |
+
_(aten, square_) \
|
| 1449 |
+
_(aten, squeeze) \
|
| 1450 |
+
_(aten, squeeze_) \
|
| 1451 |
+
_(aten, squeeze_copy) \
|
| 1452 |
+
_(aten, sspaddmm) \
|
| 1453 |
+
_(aten, stack) \
|
| 1454 |
+
_(aten, std) \
|
| 1455 |
+
_(aten, std_mean) \
|
| 1456 |
+
_(aten, stft) \
|
| 1457 |
+
_(aten, stride) \
|
| 1458 |
+
_(aten, sub) \
|
| 1459 |
+
_(aten, sub_) \
|
| 1460 |
+
_(aten, subtract) \
|
| 1461 |
+
_(aten, subtract_) \
|
| 1462 |
+
_(aten, sum) \
|
| 1463 |
+
_(aten, sum_to_size) \
|
| 1464 |
+
_(aten, svd) \
|
| 1465 |
+
_(aten, swapaxes) \
|
| 1466 |
+
_(aten, swapaxes_) \
|
| 1467 |
+
_(aten, swapdims) \
|
| 1468 |
+
_(aten, swapdims_) \
|
| 1469 |
+
_(aten, sym_constrain_range) \
|
| 1470 |
+
_(aten, sym_constrain_range_for_size) \
|
| 1471 |
+
_(aten, sym_numel) \
|
| 1472 |
+
_(aten, sym_size) \
|
| 1473 |
+
_(aten, sym_storage_offset) \
|
| 1474 |
+
_(aten, sym_stride) \
|
| 1475 |
+
_(aten, t) \
|
| 1476 |
+
_(aten, t_) \
|
| 1477 |
+
_(aten, t_copy) \
|
| 1478 |
+
_(aten, take) \
|
| 1479 |
+
_(aten, take_along_dim) \
|
| 1480 |
+
_(aten, tan) \
|
| 1481 |
+
_(aten, tan_) \
|
| 1482 |
+
_(aten, tanh) \
|
| 1483 |
+
_(aten, tanh_) \
|
| 1484 |
+
_(aten, tanh_backward) \
|
| 1485 |
+
_(aten, tensor_split) \
|
| 1486 |
+
_(aten, tensordot) \
|
| 1487 |
+
_(aten, thnn_conv2d) \
|
| 1488 |
+
_(aten, threshold) \
|
| 1489 |
+
_(aten, threshold_) \
|
| 1490 |
+
_(aten, threshold_backward) \
|
| 1491 |
+
_(aten, tile) \
|
| 1492 |
+
_(aten, to) \
|
| 1493 |
+
_(aten, to_dense) \
|
| 1494 |
+
_(aten, to_dense_backward) \
|
| 1495 |
+
_(aten, to_mkldnn) \
|
| 1496 |
+
_(aten, to_mkldnn_backward) \
|
| 1497 |
+
_(aten, to_padded_tensor) \
|
| 1498 |
+
_(aten, to_sparse) \
|
| 1499 |
+
_(aten, to_sparse_bsc) \
|
| 1500 |
+
_(aten, to_sparse_bsr) \
|
| 1501 |
+
_(aten, to_sparse_csc) \
|
| 1502 |
+
_(aten, to_sparse_csr) \
|
| 1503 |
+
_(aten, topk) \
|
| 1504 |
+
_(aten, trace) \
|
| 1505 |
+
_(aten, trace_backward) \
|
| 1506 |
+
_(aten, transpose) \
|
| 1507 |
+
_(aten, transpose_) \
|
| 1508 |
+
_(aten, transpose_copy) \
|
| 1509 |
+
_(aten, trapezoid) \
|
| 1510 |
+
_(aten, trapz) \
|
| 1511 |
+
_(aten, triangular_solve) \
|
| 1512 |
+
_(aten, tril) \
|
| 1513 |
+
_(aten, tril_) \
|
| 1514 |
+
_(aten, tril_indices) \
|
| 1515 |
+
_(aten, triplet_margin_loss) \
|
| 1516 |
+
_(aten, triu) \
|
| 1517 |
+
_(aten, triu_) \
|
| 1518 |
+
_(aten, triu_indices) \
|
| 1519 |
+
_(aten, true_divide) \
|
| 1520 |
+
_(aten, true_divide_) \
|
| 1521 |
+
_(aten, trunc) \
|
| 1522 |
+
_(aten, trunc_) \
|
| 1523 |
+
_(aten, type_as) \
|
| 1524 |
+
_(aten, unbind) \
|
| 1525 |
+
_(aten, unbind_copy) \
|
| 1526 |
+
_(aten, unflatten) \
|
| 1527 |
+
_(aten, unflatten_dense_tensors) \
|
| 1528 |
+
_(aten, unfold) \
|
| 1529 |
+
_(aten, unfold_backward) \
|
| 1530 |
+
_(aten, unfold_copy) \
|
| 1531 |
+
_(aten, uniform) \
|
| 1532 |
+
_(aten, uniform_) \
|
| 1533 |
+
_(aten, unique_consecutive) \
|
| 1534 |
+
_(aten, unique_dim) \
|
| 1535 |
+
_(aten, unique_dim_consecutive) \
|
| 1536 |
+
_(aten, unsafe_chunk) \
|
| 1537 |
+
_(aten, unsafe_split) \
|
| 1538 |
+
_(aten, unsafe_split_with_sizes) \
|
| 1539 |
+
_(aten, unsqueeze) \
|
| 1540 |
+
_(aten, unsqueeze_) \
|
| 1541 |
+
_(aten, unsqueeze_copy) \
|
| 1542 |
+
_(aten, upsample_bicubic2d) \
|
| 1543 |
+
_(aten, upsample_bicubic2d_backward) \
|
| 1544 |
+
_(aten, upsample_bilinear2d) \
|
| 1545 |
+
_(aten, upsample_bilinear2d_backward) \
|
| 1546 |
+
_(aten, upsample_linear1d) \
|
| 1547 |
+
_(aten, upsample_linear1d_backward) \
|
| 1548 |
+
_(aten, upsample_nearest1d) \
|
| 1549 |
+
_(aten, upsample_nearest1d_backward) \
|
| 1550 |
+
_(aten, upsample_nearest2d) \
|
| 1551 |
+
_(aten, upsample_nearest2d_backward) \
|
| 1552 |
+
_(aten, upsample_nearest3d) \
|
| 1553 |
+
_(aten, upsample_nearest3d_backward) \
|
| 1554 |
+
_(aten, upsample_trilinear3d) \
|
| 1555 |
+
_(aten, upsample_trilinear3d_backward) \
|
| 1556 |
+
_(aten, value_selecting_reduction_backward) \
|
| 1557 |
+
_(aten, values) \
|
| 1558 |
+
_(aten, values_copy) \
|
| 1559 |
+
_(aten, vander) \
|
| 1560 |
+
_(aten, var) \
|
| 1561 |
+
_(aten, var_mean) \
|
| 1562 |
+
_(aten, vdot) \
|
| 1563 |
+
_(aten, view) \
|
| 1564 |
+
_(aten, view_as) \
|
| 1565 |
+
_(aten, view_as_complex) \
|
| 1566 |
+
_(aten, view_as_complex_copy) \
|
| 1567 |
+
_(aten, view_as_real) \
|
| 1568 |
+
_(aten, view_as_real_copy) \
|
| 1569 |
+
_(aten, view_copy) \
|
| 1570 |
+
_(aten, vsplit) \
|
| 1571 |
+
_(aten, vstack) \
|
| 1572 |
+
_(aten, where) \
|
| 1573 |
+
_(aten, xlogy) \
|
| 1574 |
+
_(aten, xlogy_) \
|
| 1575 |
+
_(aten, zero) \
|
| 1576 |
+
_(aten, zero_) \
|
| 1577 |
+
_(aten, zeros) \
|
| 1578 |
+
_(aten, zeros_like)
|
| 1579 |
+
|
| 1580 |
+
#define FORALL_ATTR_BASE_SYMBOLS(_) \
|
| 1581 |
+
_(attr, A) \
|
| 1582 |
+
_(attr, B) \
|
| 1583 |
+
_(attr, C) \
|
| 1584 |
+
_(attr, H) \
|
| 1585 |
+
_(attr, HxW) \
|
| 1586 |
+
_(attr, K) \
|
| 1587 |
+
_(attr, L) \
|
| 1588 |
+
_(attr, LD) \
|
| 1589 |
+
_(attr, LU) \
|
| 1590 |
+
_(attr, LU_data) \
|
| 1591 |
+
_(attr, LU_pivots) \
|
| 1592 |
+
_(attr, M) \
|
| 1593 |
+
_(attr, N) \
|
| 1594 |
+
_(attr, P) \
|
| 1595 |
+
_(attr, Q) \
|
| 1596 |
+
_(attr, R) \
|
| 1597 |
+
_(attr, S) \
|
| 1598 |
+
_(attr, U) \
|
| 1599 |
+
_(attr, UPLO) \
|
| 1600 |
+
_(attr, V) \
|
| 1601 |
+
_(attr, Vh) \
|
| 1602 |
+
_(attr, W) \
|
| 1603 |
+
_(attr, X) \
|
| 1604 |
+
_(attr, a) \
|
| 1605 |
+
_(attr, abs) \
|
| 1606 |
+
_(attr, accumulate) \
|
| 1607 |
+
_(attr, accumulate_matches) \
|
| 1608 |
+
_(attr, activation) \
|
| 1609 |
+
_(attr, addends) \
|
| 1610 |
+
_(attr, adjoint) \
|
| 1611 |
+
_(attr, alg_id) \
|
| 1612 |
+
_(attr, algorithm) \
|
| 1613 |
+
_(attr, alibi_slopes) \
|
| 1614 |
+
_(attr, align_corners) \
|
| 1615 |
+
_(attr, allow_tf32) \
|
| 1616 |
+
_(attr, alpha) \
|
| 1617 |
+
_(attr, amsgrad) \
|
| 1618 |
+
_(attr, anchor) \
|
| 1619 |
+
_(attr, angle) \
|
| 1620 |
+
_(attr, any) \
|
| 1621 |
+
_(attr, api_name) \
|
| 1622 |
+
_(attr, append) \
|
| 1623 |
+
_(attr, approximate) \
|
| 1624 |
+
_(attr, arg1) \
|
| 1625 |
+
_(attr, arg2) \
|
| 1626 |
+
_(attr, arg3) \
|
| 1627 |
+
_(attr, arg_out) \
|
| 1628 |
+
_(attr, assert_msg) \
|
| 1629 |
+
_(attr, assume_unique) \
|
| 1630 |
+
_(attr, atol) \
|
| 1631 |
+
_(attr, attn_bias) \
|
| 1632 |
+
_(attr, attn_mask) \
|
| 1633 |
+
_(attr, average_attn_weights) \
|
| 1634 |
+
_(attr, averaging_const) \
|
| 1635 |
+
_(attr, aweights) \
|
| 1636 |
+
_(attr, axis) \
|
| 1637 |
+
_(attr, axis0) \
|
| 1638 |
+
_(attr, axis1) \
|
| 1639 |
+
_(attr, b) \
|
| 1640 |
+
_(attr, b_hh) \
|
| 1641 |
+
_(attr, b_ih) \
|
| 1642 |
+
_(attr, bag_size) \
|
| 1643 |
+
_(attr, base) \
|
| 1644 |
+
_(attr, batch1) \
|
| 1645 |
+
_(attr, batch2) \
|
| 1646 |
+
_(attr, batch_dim) \
|
| 1647 |
+
_(attr, batch_first) \
|
| 1648 |
+
_(attr, batch_size) \
|
| 1649 |
+
_(attr, batch_sizes) \
|
| 1650 |
+
_(attr, benchmark) \
|
| 1651 |
+
_(attr, beta) \
|
| 1652 |
+
_(attr, beta1) \
|
| 1653 |
+
_(attr, beta2) \
|
| 1654 |
+
_(attr, bias) \
|
| 1655 |
+
_(attr, bias_defined) \
|
| 1656 |
+
_(attr, bias_g) \
|
| 1657 |
+
_(attr, bias_requires_grad) \
|
| 1658 |
+
_(attr, bias_sizes) \
|
| 1659 |
+
_(attr, bidirectional) \
|
| 1660 |
+
_(attr, bin_edges) \
|
| 1661 |
+
_(attr, bins) \
|
| 1662 |
+
_(attr, bit_width) \
|
| 1663 |
+
_(attr, blank) \
|
| 1664 |
+
_(attr, blocksize) \
|
| 1665 |
+
_(attr, boundaries) \
|
| 1666 |
+
_(attr, buffer) \
|
| 1667 |
+
_(attr, ccol_indices) \
|
| 1668 |
+
_(attr, cdim) \
|
| 1669 |
+
_(attr, cdist) \
|
| 1670 |
+
_(attr, ceil_mode) \
|
| 1671 |
+
_(attr, cell_state_fwd) \
|
| 1672 |
+
_(attr, center) \
|
| 1673 |
+
_(attr, ch_axis) \
|
| 1674 |
+
_(attr, check_errors) \
|
| 1675 |
+
_(attr, chunks) \
|
| 1676 |
+
_(attr, coalesced) \
|
| 1677 |
+
_(attr, coefficients) \
|
| 1678 |
+
_(attr, col) \
|
| 1679 |
+
_(attr, col_indices) \
|
| 1680 |
+
_(attr, col_offsets) \
|
| 1681 |
+
_(attr, col_offsets_hh) \
|
| 1682 |
+
_(attr, col_offsets_ih) \
|
| 1683 |
+
_(attr, compressed_A) \
|
| 1684 |
+
_(attr, compressed_idx) \
|
| 1685 |
+
_(attr, compressed_indices) \
|
| 1686 |
+
_(attr, compressed_indices_dtype) \
|
| 1687 |
+
_(attr, compute_log_sumexp) \
|
| 1688 |
+
_(attr, compute_mode) \
|
| 1689 |
+
_(attr, compute_uv) \
|
| 1690 |
+
_(attr, compute_v) \
|
| 1691 |
+
_(attr, condition) \
|
| 1692 |
+
_(attr, copy) \
|
| 1693 |
+
_(attr, correction) \
|
| 1694 |
+
_(attr, count) \
|
| 1695 |
+
_(attr, count_include_pad) \
|
| 1696 |
+
_(attr, counts) \
|
| 1697 |
+
_(attr, cpu_dtype) \
|
| 1698 |
+
_(attr, cpu_enabled) \
|
| 1699 |
+
_(attr, cpu_nested_shape_example) \
|
| 1700 |
+
_(attr, create_graph) \
|
| 1701 |
+
_(attr, crow_indices) \
|
| 1702 |
+
_(attr, cu_seqlens_k) \
|
| 1703 |
+
_(attr, cu_seqlens_q) \
|
| 1704 |
+
_(attr, cuda_dtype) \
|
| 1705 |
+
_(attr, cuda_enabled) \
|
| 1706 |
+
_(attr, cudnn_enable) \
|
| 1707 |
+
_(attr, cudnn_enabled) \
|
| 1708 |
+
_(attr, cum_seq_k) \
|
| 1709 |
+
_(attr, cum_seq_q) \
|
| 1710 |
+
_(attr, custom_mask_type) \
|
| 1711 |
+
_(attr, cx) \
|
| 1712 |
+
_(attr, cx_) \
|
| 1713 |
+
_(attr, cx_tmp) \
|
| 1714 |
+
_(attr, cy) \
|
| 1715 |
+
_(attr, cy_) \
|
| 1716 |
+
_(attr, d) \
|
| 1717 |
+
_(attr, dampening) \
|
| 1718 |
+
_(attr, data) \
|
| 1719 |
+
_(attr, decimals) \
|
| 1720 |
+
_(attr, delta) \
|
| 1721 |
+
_(attr, dense) \
|
| 1722 |
+
_(attr, dense_B) \
|
| 1723 |
+
_(attr, dense_dim) \
|
| 1724 |
+
_(attr, density) \
|
| 1725 |
+
_(attr, dep_token) \
|
| 1726 |
+
_(attr, descending) \
|
| 1727 |
+
_(attr, destination) \
|
| 1728 |
+
_(attr, deterministic) \
|
| 1729 |
+
_(attr, device) \
|
| 1730 |
+
_(attr, device_index) \
|
| 1731 |
+
_(attr, dgrad_glu) \
|
| 1732 |
+
_(attr, diagonal) \
|
| 1733 |
+
_(attr, diagonals) \
|
| 1734 |
+
_(attr, dilation) \
|
| 1735 |
+
_(attr, dim) \
|
| 1736 |
+
_(attr, dim0) \
|
| 1737 |
+
_(attr, dim1) \
|
| 1738 |
+
_(attr, dim2) \
|
| 1739 |
+
_(attr, dimension) \
|
| 1740 |
+
_(attr, dims) \
|
| 1741 |
+
_(attr, dims_other) \
|
| 1742 |
+
_(attr, dims_self) \
|
| 1743 |
+
_(attr, divisor_override) \
|
| 1744 |
+
_(attr, downscale_factor) \
|
| 1745 |
+
_(attr, driver) \
|
| 1746 |
+
_(attr, dropout) \
|
| 1747 |
+
_(attr, dropout_mask) \
|
| 1748 |
+
_(attr, dropout_p) \
|
| 1749 |
+
_(attr, dropout_seed) \
|
| 1750 |
+
_(attr, dropout_state) \
|
| 1751 |
+
_(attr, dst) \
|
| 1752 |
+
_(attr, dtype) \
|
| 1753 |
+
_(attr, dual) \
|
| 1754 |
+
_(attr, dummy) \
|
| 1755 |
+
_(attr, dx) \
|
| 1756 |
+
_(attr, edge_order) \
|
| 1757 |
+
_(attr, eigenvalues) \
|
| 1758 |
+
_(attr, eigenvectors) \
|
| 1759 |
+
_(attr, eigvals) \
|
| 1760 |
+
_(attr, eigvecs) \
|
| 1761 |
+
_(attr, element) \
|
| 1762 |
+
_(attr, elements) \
|
| 1763 |
+
_(attr, ellipsis_idx) \
|
| 1764 |
+
_(attr, embed_dim) \
|
| 1765 |
+
_(attr, enable_gqa) \
|
| 1766 |
+
_(attr, end) \
|
| 1767 |
+
_(attr, end_dim) \
|
| 1768 |
+
_(attr, eps) \
|
| 1769 |
+
_(attr, epsilon) \
|
| 1770 |
+
_(attr, equal_nan) \
|
| 1771 |
+
_(attr, equation) \
|
| 1772 |
+
_(attr, exp_avg_sqs) \
|
| 1773 |
+
_(attr, exp_avgs) \
|
| 1774 |
+
_(attr, expand1) \
|
| 1775 |
+
_(attr, expand2) \
|
| 1776 |
+
_(attr, expand3) \
|
| 1777 |
+
_(attr, exponent) \
|
| 1778 |
+
_(attr, exponential_average_factor) \
|
| 1779 |
+
_(attr, fake_quant_enabled) \
|
| 1780 |
+
_(attr, fake_quant_on) \
|
| 1781 |
+
_(attr, ffn_bias_1) \
|
| 1782 |
+
_(attr, ffn_bias_2) \
|
| 1783 |
+
_(attr, ffn_weight_1) \
|
| 1784 |
+
_(attr, ffn_weight_2) \
|
| 1785 |
+
_(attr, filename) \
|
| 1786 |
+
_(attr, fill) \
|
| 1787 |
+
_(attr, fill_value) \
|
| 1788 |
+
_(attr, flat) \
|
| 1789 |
+
_(attr, forward) \
|
| 1790 |
+
_(attr, found_inf) \
|
| 1791 |
+
_(attr, from) \
|
| 1792 |
+
_(attr, from_) \
|
| 1793 |
+
_(attr, full) \
|
| 1794 |
+
_(attr, full_matrices) \
|
| 1795 |
+
_(attr, fuse_transform_0213) \
|
| 1796 |
+
_(attr, fweights) \
|
| 1797 |
+
_(attr, g) \
|
| 1798 |
+
_(attr, gO) \
|
| 1799 |
+
_(attr, generator) \
|
| 1800 |
+
_(attr, ggI) \
|
| 1801 |
+
_(attr, ggW) \
|
| 1802 |
+
_(attr, ggb) \
|
| 1803 |
+
_(attr, glu) \
|
| 1804 |
+
_(attr, grad) \
|
| 1805 |
+
_(attr, grad_bias) \
|
| 1806 |
+
_(attr, grad_cy) \
|
| 1807 |
+
_(attr, grad_factor) \
|
| 1808 |
+
_(attr, grad_glu) \
|
| 1809 |
+
_(attr, grad_hy) \
|
| 1810 |
+
_(attr, grad_in) \
|
| 1811 |
+
_(attr, grad_input) \
|
| 1812 |
+
_(attr, grad_input_mask) \
|
| 1813 |
+
_(attr, grad_out) \
|
| 1814 |
+
_(attr, grad_out_) \
|
| 1815 |
+
_(attr, grad_output) \
|
| 1816 |
+
_(attr, grad_scale) \
|
| 1817 |
+
_(attr, grad_w) \
|
| 1818 |
+
_(attr, grad_weight) \
|
| 1819 |
+
_(attr, grad_x) \
|
| 1820 |
+
_(attr, grad_y) \
|
| 1821 |
+
_(attr, gradient) \
|
| 1822 |
+
_(attr, grads) \
|
| 1823 |
+
_(attr, grid) \
|
| 1824 |
+
_(attr, group) \
|
| 1825 |
+
_(attr, groups) \
|
| 1826 |
+
_(attr, growth_interval) \
|
| 1827 |
+
_(attr, growth_tracker) \
|
| 1828 |
+
_(attr, half_to_float) \
|
| 1829 |
+
_(attr, has_bias) \
|
| 1830 |
+
_(attr, has_biases) \
|
| 1831 |
+
_(attr, hermitian) \
|
| 1832 |
+
_(attr, hidden_bias) \
|
| 1833 |
+
_(attr, hidden_gates) \
|
| 1834 |
+
_(attr, hidden_size) \
|
| 1835 |
+
_(attr, high) \
|
| 1836 |
+
_(attr, hist) \
|
| 1837 |
+
_(attr, hop_length) \
|
| 1838 |
+
_(attr, hx) \
|
| 1839 |
+
_(attr, hx_) \
|
| 1840 |
+
_(attr, hy_) \
|
| 1841 |
+
_(attr, i1) \
|
| 1842 |
+
_(attr, i2) \
|
| 1843 |
+
_(attr, i3) \
|
| 1844 |
+
_(attr, ignore_index) \
|
| 1845 |
+
_(attr, imag) \
|
| 1846 |
+
_(attr, impl_index) \
|
| 1847 |
+
_(attr, implicit) \
|
| 1848 |
+
_(attr, include_last_offset) \
|
| 1849 |
+
_(attr, include_self) \
|
| 1850 |
+
_(attr, increasing) \
|
| 1851 |
+
_(attr, ind) \
|
| 1852 |
+
_(attr, index) \
|
| 1853 |
+
_(attr, index_dtype) \
|
| 1854 |
+
_(attr, indexing) \
|
| 1855 |
+
_(attr, indices) \
|
| 1856 |
+
_(attr, info) \
|
| 1857 |
+
_(attr, initial) \
|
| 1858 |
+
_(attr, innerKTiles) \
|
| 1859 |
+
_(attr, input) \
|
| 1860 |
+
_(attr, input1) \
|
| 1861 |
+
_(attr, input2) \
|
| 1862 |
+
_(attr, input3) \
|
| 1863 |
+
_(attr, input_bias) \
|
| 1864 |
+
_(attr, input_dtype) \
|
| 1865 |
+
_(attr, input_g) \
|
| 1866 |
+
_(attr, input_gates) \
|
| 1867 |
+
_(attr, input_lengths) \
|
| 1868 |
+
_(attr, input_scale) \
|
| 1869 |
+
_(attr, input_size) \
|
| 1870 |
+
_(attr, input_sizes) \
|
| 1871 |
+
_(attr, input_zero_point) \
|
| 1872 |
+
_(attr, inputs) \
|
| 1873 |
+
_(attr, interpolation) \
|
| 1874 |
+
_(attr, interpolation_mode) \
|
| 1875 |
+
_(attr, inv_scale) \
|
| 1876 |
+
_(attr, inverse) \
|
| 1877 |
+
_(attr, invert) \
|
| 1878 |
+
_(attr, invstd) \
|
| 1879 |
+
_(attr, is_causal) \
|
| 1880 |
+
_(attr, is_coalesced) \
|
| 1881 |
+
_(attr, is_crow) \
|
| 1882 |
+
_(attr, is_first_step) \
|
| 1883 |
+
_(attr, is_matrix) \
|
| 1884 |
+
_(attr, is_result) \
|
| 1885 |
+
_(attr, is_target) \
|
| 1886 |
+
_(attr, k) \
|
| 1887 |
+
_(attr, keepdim) \
|
| 1888 |
+
_(attr, kernel_size) \
|
| 1889 |
+
_(attr, key) \
|
| 1890 |
+
_(attr, label_smoothing) \
|
| 1891 |
+
_(attr, lambd) \
|
| 1892 |
+
_(attr, largest) \
|
| 1893 |
+
_(attr, last_dim_size) \
|
| 1894 |
+
_(attr, layersOutputs) \
|
| 1895 |
+
_(attr, layout) \
|
| 1896 |
+
_(attr, left) \
|
| 1897 |
+
_(attr, length) \
|
| 1898 |
+
_(attr, lengths) \
|
| 1899 |
+
_(attr, level) \
|
| 1900 |
+
_(attr, like) \
|
| 1901 |
+
_(attr, list) \
|
| 1902 |
+
_(attr, log_alpha) \
|
| 1903 |
+
_(attr, log_input) \
|
| 1904 |
+
_(attr, log_probs) \
|
| 1905 |
+
_(attr, log_target) \
|
| 1906 |
+
_(attr, logabsdet) \
|
| 1907 |
+
_(attr, logsumexp) \
|
| 1908 |
+
_(attr, low) \
|
| 1909 |
+
_(attr, lower) \
|
| 1910 |
+
_(attr, lr) \
|
| 1911 |
+
_(attr, lr_decay) \
|
| 1912 |
+
_(attr, ltm) \
|
| 1913 |
+
_(attr, m) \
|
| 1914 |
+
_(attr, mantissa) \
|
| 1915 |
+
_(attr, margin) \
|
| 1916 |
+
_(attr, mask) \
|
| 1917 |
+
_(attr, mask_check) \
|
| 1918 |
+
_(attr, mask_type) \
|
| 1919 |
+
_(attr, masked_grad) \
|
| 1920 |
+
_(attr, mat) \
|
| 1921 |
+
_(attr, mat1) \
|
| 1922 |
+
_(attr, mat1_meta) \
|
| 1923 |
+
_(attr, mat2) \
|
| 1924 |
+
_(attr, matrices) \
|
| 1925 |
+
_(attr, max) \
|
| 1926 |
+
_(attr, max_exp_avg_sqs) \
|
| 1927 |
+
_(attr, max_k) \
|
| 1928 |
+
_(attr, max_lengths) \
|
| 1929 |
+
_(attr, max_norm) \
|
| 1930 |
+
_(attr, max_q) \
|
| 1931 |
+
_(attr, max_seqlen) \
|
| 1932 |
+
_(attr, max_seqlen_k) \
|
| 1933 |
+
_(attr, max_seqlen_q) \
|
| 1934 |
+
_(attr, max_size) \
|
| 1935 |
+
_(attr, max_val) \
|
| 1936 |
+
_(attr, max_values) \
|
| 1937 |
+
_(attr, maximize) \
|
| 1938 |
+
_(attr, maximum_indices) \
|
| 1939 |
+
_(attr, maxnorm) \
|
| 1940 |
+
_(attr, mean) \
|
| 1941 |
+
_(attr, median) \
|
| 1942 |
+
_(attr, memory_format) \
|
| 1943 |
+
_(attr, meta) \
|
| 1944 |
+
_(attr, min) \
|
| 1945 |
+
_(attr, min_indices) \
|
| 1946 |
+
_(attr, min_seqlen) \
|
| 1947 |
+
_(attr, min_val) \
|
| 1948 |
+
_(attr, minlength) \
|
| 1949 |
+
_(attr, mode) \
|
| 1950 |
+
_(attr, momentum) \
|
| 1951 |
+
_(attr, momentum_buffer_list) \
|
| 1952 |
+
_(attr, n) \
|
| 1953 |
+
_(attr, n_bins) \
|
| 1954 |
+
_(attr, n_fft) \
|
| 1955 |
+
_(attr, names) \
|
| 1956 |
+
_(attr, nan) \
|
| 1957 |
+
_(attr, need_weights) \
|
| 1958 |
+
_(attr, neg_log_likelihood) \
|
| 1959 |
+
_(attr, negative) \
|
| 1960 |
+
_(attr, negative_slope) \
|
| 1961 |
+
_(attr, neginf) \
|
| 1962 |
+
_(attr, nested_size) \
|
| 1963 |
+
_(attr, nested_strides) \
|
| 1964 |
+
_(attr, nesterov) \
|
| 1965 |
+
_(attr, new_data) \
|
| 1966 |
+
_(attr, nnz) \
|
| 1967 |
+
_(attr, noise) \
|
| 1968 |
+
_(attr, non_blocking) \
|
| 1969 |
+
_(attr, norm) \
|
| 1970 |
+
_(attr, norm_bias_1) \
|
| 1971 |
+
_(attr, norm_bias_2) \
|
| 1972 |
+
_(attr, norm_first) \
|
| 1973 |
+
_(attr, norm_type) \
|
| 1974 |
+
_(attr, norm_weight_1) \
|
| 1975 |
+
_(attr, norm_weight_2) \
|
| 1976 |
+
_(attr, normalization) \
|
| 1977 |
+
_(attr, normalized) \
|
| 1978 |
+
_(attr, normalized_shape) \
|
| 1979 |
+
_(attr, nt_example) \
|
| 1980 |
+
_(attr, num_chunks) \
|
| 1981 |
+
_(attr, num_classes) \
|
| 1982 |
+
_(attr, num_generated) \
|
| 1983 |
+
_(attr, num_groups) \
|
| 1984 |
+
_(attr, num_head) \
|
| 1985 |
+
_(attr, num_heads) \
|
| 1986 |
+
_(attr, num_layers) \
|
| 1987 |
+
_(attr, num_parallel) \
|
| 1988 |
+
_(attr, num_samples) \
|
| 1989 |
+
_(attr, num_splits_key) \
|
| 1990 |
+
_(attr, num_weights) \
|
| 1991 |
+
_(attr, numel) \
|
| 1992 |
+
_(attr, observer_on) \
|
| 1993 |
+
_(attr, offset) \
|
| 1994 |
+
_(attr, offset2bag) \
|
| 1995 |
+
_(attr, offsets) \
|
| 1996 |
+
_(attr, onesided) \
|
| 1997 |
+
_(attr, ord) \
|
| 1998 |
+
_(attr, order) \
|
| 1999 |
+
_(attr, other) \
|
| 2000 |
+
_(attr, out) \
|
| 2001 |
+
_(attr, out0) \
|
| 2002 |
+
_(attr, out1) \
|
| 2003 |
+
_(attr, out2) \
|
| 2004 |
+
_(attr, out3) \
|
| 2005 |
+
_(attr, out4) \
|
| 2006 |
+
_(attr, out5) \
|
| 2007 |
+
_(attr, out6) \
|
| 2008 |
+
_(attr, out_channel) \
|
| 2009 |
+
_(attr, out_dim) \
|
| 2010 |
+
_(attr, out_dtype) \
|
| 2011 |
+
_(attr, out_int32) \
|
| 2012 |
+
_(attr, outdim) \
|
| 2013 |
+
_(attr, output) \
|
| 2014 |
+
_(attr, output_mask) \
|
| 2015 |
+
_(attr, output_padding) \
|
| 2016 |
+
_(attr, output_scale) \
|
| 2017 |
+
_(attr, output_size) \
|
| 2018 |
+
_(attr, output_zero_point) \
|
| 2019 |
+
_(attr, p) \
|
| 2020 |
+
_(attr, packed) \
|
| 2021 |
+
_(attr, packed_hh) \
|
| 2022 |
+
_(attr, packed_ih) \
|
| 2023 |
+
_(attr, packed_weight) \
|
| 2024 |
+
_(attr, pad) \
|
| 2025 |
+
_(attr, pad_mode) \
|
| 2026 |
+
_(attr, padded) \
|
| 2027 |
+
_(attr, padding) \
|
| 2028 |
+
_(attr, padding_idx) \
|
| 2029 |
+
_(attr, padding_mode) \
|
| 2030 |
+
_(attr, padding_side) \
|
| 2031 |
+
_(attr, padding_value) \
|
| 2032 |
+
_(attr, params) \
|
| 2033 |
+
_(attr, path) \
|
| 2034 |
+
_(attr, pdist) \
|
| 2035 |
+
_(attr, per_row_fake_quant) \
|
| 2036 |
+
_(attr, per_sample_weights) \
|
| 2037 |
+
_(attr, periodic) \
|
| 2038 |
+
_(attr, philox_offset) \
|
| 2039 |
+
_(attr, philox_seed) \
|
| 2040 |
+
_(attr, physical_layout) \
|
| 2041 |
+
_(attr, pin_memory) \
|
| 2042 |
+
_(attr, pivot) \
|
| 2043 |
+
_(attr, pivots) \
|
| 2044 |
+
_(attr, plain_idx) \
|
| 2045 |
+
_(attr, plain_indices) \
|
| 2046 |
+
_(attr, pos_weight) \
|
| 2047 |
+
_(attr, posinf) \
|
| 2048 |
+
_(attr, positive) \
|
| 2049 |
+
_(attr, pow) \
|
| 2050 |
+
_(attr, prepend) \
|
| 2051 |
+
_(attr, primal) \
|
| 2052 |
+
_(attr, prob) \
|
| 2053 |
+
_(attr, proj_bias) \
|
| 2054 |
+
_(attr, proj_size) \
|
| 2055 |
+
_(attr, proj_weight) \
|
| 2056 |
+
_(attr, q) \
|
| 2057 |
+
_(attr, qGroupSize) \
|
| 2058 |
+
_(attr, qScaleAndZeros) \
|
| 2059 |
+
_(attr, qkv) \
|
| 2060 |
+
_(attr, qkv_bias) \
|
| 2061 |
+
_(attr, qkv_weight) \
|
| 2062 |
+
_(attr, qtensor) \
|
| 2063 |
+
_(attr, quant_max) \
|
| 2064 |
+
_(attr, quant_min) \
|
| 2065 |
+
_(attr, quasi) \
|
| 2066 |
+
_(attr, query) \
|
| 2067 |
+
_(attr, r) \
|
| 2068 |
+
_(attr, ragged_idx) \
|
| 2069 |
+
_(attr, random_samples) \
|
| 2070 |
+
_(attr, range) \
|
| 2071 |
+
_(attr, rank) \
|
| 2072 |
+
_(attr, ratio) \
|
| 2073 |
+
_(attr, rcond) \
|
| 2074 |
+
_(attr, real) \
|
| 2075 |
+
_(attr, reduce) \
|
| 2076 |
+
_(attr, reduce_range) \
|
| 2077 |
+
_(attr, reduction) \
|
| 2078 |
+
_(attr, repeats) \
|
| 2079 |
+
_(attr, replacement) \
|
| 2080 |
+
_(attr, requires_grad) \
|
| 2081 |
+
_(attr, reserve) \
|
| 2082 |
+
_(attr, reserveSpace) \
|
| 2083 |
+
_(attr, reservedSpace) \
|
| 2084 |
+
_(attr, residuals) \
|
| 2085 |
+
_(attr, result) \
|
| 2086 |
+
_(attr, retain_graph) \
|
| 2087 |
+
_(attr, return_complex) \
|
| 2088 |
+
_(attr, return_counts) \
|
| 2089 |
+
_(attr, return_debug_mask) \
|
| 2090 |
+
_(attr, return_inverse) \
|
| 2091 |
+
_(attr, reverse) \
|
| 2092 |
+
_(attr, right) \
|
| 2093 |
+
_(attr, rounding_mode) \
|
| 2094 |
+
_(attr, row) \
|
| 2095 |
+
_(attr, row_indices) \
|
| 2096 |
+
_(attr, rstd) \
|
| 2097 |
+
_(attr, rtol) \
|
| 2098 |
+
_(attr, running_max) \
|
| 2099 |
+
_(attr, running_mean) \
|
| 2100 |
+
_(attr, running_min) \
|
| 2101 |
+
_(attr, running_var) \
|
| 2102 |
+
_(attr, s) \
|
| 2103 |
+
_(attr, save_invstd) \
|
| 2104 |
+
_(attr, save_mean) \
|
| 2105 |
+
_(attr, save_var) \
|
| 2106 |
+
_(attr, save_var_transform) \
|
| 2107 |
+
_(attr, saved_g) \
|
| 2108 |
+
_(attr, saved_norms) \
|
| 2109 |
+
_(attr, saved_v) \
|
| 2110 |
+
_(attr, scalar) \
|
| 2111 |
+
_(attr, scalar1) \
|
| 2112 |
+
_(attr, scalar2) \
|
| 2113 |
+
_(attr, scalars) \
|
| 2114 |
+
_(attr, scale) \
|
| 2115 |
+
_(attr, scale_a) \
|
| 2116 |
+
_(attr, scale_b) \
|
| 2117 |
+
_(attr, scale_backoff_factor) \
|
| 2118 |
+
_(attr, scale_factors) \
|
| 2119 |
+
_(attr, scale_grad_by_freq) \
|
| 2120 |
+
_(attr, scale_growth_factor) \
|
| 2121 |
+
_(attr, scale_hh) \
|
| 2122 |
+
_(attr, scale_ih) \
|
| 2123 |
+
_(attr, scale_result) \
|
| 2124 |
+
_(attr, scales) \
|
| 2125 |
+
_(attr, scales_d) \
|
| 2126 |
+
_(attr, scales_h) \
|
| 2127 |
+
_(attr, scales_w) \
|
| 2128 |
+
_(attr, sections) \
|
| 2129 |
+
_(attr, seed) \
|
| 2130 |
+
_(attr, self) \
|
| 2131 |
+
_(attr, self_is_result) \
|
| 2132 |
+
_(attr, self_num_batch_dims) \
|
| 2133 |
+
_(attr, self_or_result) \
|
| 2134 |
+
_(attr, self_sizes) \
|
| 2135 |
+
_(attr, seqlen_k) \
|
| 2136 |
+
_(attr, sequences) \
|
| 2137 |
+
_(attr, seqused_k) \
|
| 2138 |
+
_(attr, shape) \
|
| 2139 |
+
_(attr, shared) \
|
| 2140 |
+
_(attr, shared_storage_dqdkdv) \
|
| 2141 |
+
_(attr, shifts) \
|
| 2142 |
+
_(attr, side) \
|
| 2143 |
+
_(attr, sigma) \
|
| 2144 |
+
_(attr, sign) \
|
| 2145 |
+
_(attr, singular_values) \
|
| 2146 |
+
_(attr, size) \
|
| 2147 |
+
_(attr, sizes) \
|
| 2148 |
+
_(attr, skip_first) \
|
| 2149 |
+
_(attr, sobolstate) \
|
| 2150 |
+
_(attr, solution) \
|
| 2151 |
+
_(attr, some) \
|
| 2152 |
+
_(attr, sorted) \
|
| 2153 |
+
_(attr, sorted_sequence) \
|
| 2154 |
+
_(attr, sorter) \
|
| 2155 |
+
_(attr, source) \
|
| 2156 |
+
_(attr, spacing) \
|
| 2157 |
+
_(attr, sparse) \
|
| 2158 |
+
_(attr, sparse_dim) \
|
| 2159 |
+
_(attr, sparse_grad) \
|
| 2160 |
+
_(attr, split_size) \
|
| 2161 |
+
_(attr, split_sizes) \
|
| 2162 |
+
_(attr, src) \
|
| 2163 |
+
_(attr, stable) \
|
| 2164 |
+
_(attr, start) \
|
| 2165 |
+
_(attr, start_dim) \
|
| 2166 |
+
_(attr, state_steps) \
|
| 2167 |
+
_(attr, state_sums) \
|
| 2168 |
+
_(attr, std) \
|
| 2169 |
+
_(attr, step) \
|
| 2170 |
+
_(attr, steps) \
|
| 2171 |
+
_(attr, storage_offset) \
|
| 2172 |
+
_(attr, stride) \
|
| 2173 |
+
_(attr, sum_dy) \
|
| 2174 |
+
_(attr, sum_dy_xmu) \
|
| 2175 |
+
_(attr, sumdim) \
|
| 2176 |
+
_(attr, swap) \
|
| 2177 |
+
_(attr, symmetric_quant) \
|
| 2178 |
+
_(attr, t) \
|
| 2179 |
+
_(attr, tangent) \
|
| 2180 |
+
_(attr, target) \
|
| 2181 |
+
_(attr, target_lengths) \
|
| 2182 |
+
_(attr, targets) \
|
| 2183 |
+
_(attr, tau) \
|
| 2184 |
+
_(attr, tensor) \
|
| 2185 |
+
_(attr, tensor1) \
|
| 2186 |
+
_(attr, tensor2) \
|
| 2187 |
+
_(attr, tensor_indices_or_sections) \
|
| 2188 |
+
_(attr, tensors) \
|
| 2189 |
+
_(attr, tensors1) \
|
| 2190 |
+
_(attr, test_element) \
|
| 2191 |
+
_(attr, test_elements) \
|
| 2192 |
+
_(attr, the_template) \
|
| 2193 |
+
_(attr, theta) \
|
| 2194 |
+
_(attr, thread_masks) \
|
| 2195 |
+
_(attr, threshold) \
|
| 2196 |
+
_(attr, to) \
|
| 2197 |
+
_(attr, tol) \
|
| 2198 |
+
_(attr, total) \
|
| 2199 |
+
_(attr, total_L) \
|
| 2200 |
+
_(attr, total_length) \
|
| 2201 |
+
_(attr, total_weight) \
|
| 2202 |
+
_(attr, train) \
|
| 2203 |
+
_(attr, training) \
|
| 2204 |
+
_(attr, transpose) \
|
| 2205 |
+
_(attr, transpose_result) \
|
| 2206 |
+
_(attr, transposed) \
|
| 2207 |
+
_(attr, type1) \
|
| 2208 |
+
_(attr, type2) \
|
| 2209 |
+
_(attr, unbiased) \
|
| 2210 |
+
_(attr, unitriangular) \
|
| 2211 |
+
_(attr, unpack_data) \
|
| 2212 |
+
_(attr, unpack_pivots) \
|
| 2213 |
+
_(attr, unroll_dim) \
|
| 2214 |
+
_(attr, unsafe) \
|
| 2215 |
+
_(attr, update) \
|
| 2216 |
+
_(attr, upper) \
|
| 2217 |
+
_(attr, upscale_factor) \
|
| 2218 |
+
_(attr, use_cutlass) \
|
| 2219 |
+
_(attr, use_fast_accum) \
|
| 2220 |
+
_(attr, use_gelu) \
|
| 2221 |
+
_(attr, use_input_stats) \
|
| 2222 |
+
_(attr, v) \
|
| 2223 |
+
_(attr, value) \
|
| 2224 |
+
_(attr, values) \
|
| 2225 |
+
_(attr, var) \
|
| 2226 |
+
_(attr, vec) \
|
| 2227 |
+
_(attr, vec1) \
|
| 2228 |
+
_(attr, vec2) \
|
| 2229 |
+
_(attr, w_hh) \
|
| 2230 |
+
_(attr, w_ih) \
|
| 2231 |
+
_(attr, weight) \
|
| 2232 |
+
_(attr, weight0) \
|
| 2233 |
+
_(attr, weight1) \
|
| 2234 |
+
_(attr, weight2) \
|
| 2235 |
+
_(attr, weight3) \
|
| 2236 |
+
_(attr, weight4) \
|
| 2237 |
+
_(attr, weight_arr) \
|
| 2238 |
+
_(attr, weight_buf) \
|
| 2239 |
+
_(attr, weight_decay) \
|
| 2240 |
+
_(attr, weight_g) \
|
| 2241 |
+
_(attr, weight_scale) \
|
| 2242 |
+
_(attr, weight_stride0) \
|
| 2243 |
+
_(attr, weight_zero_point) \
|
| 2244 |
+
_(attr, weights) \
|
| 2245 |
+
_(attr, win_length) \
|
| 2246 |
+
_(attr, window) \
|
| 2247 |
+
_(attr, window_length) \
|
| 2248 |
+
_(attr, window_size) \
|
| 2249 |
+
_(attr, window_size_left) \
|
| 2250 |
+
_(attr, window_size_right) \
|
| 2251 |
+
_(attr, with_replacement) \
|
| 2252 |
+
_(attr, workspace) \
|
| 2253 |
+
_(attr, wrap) \
|
| 2254 |
+
_(attr, x) \
|
| 2255 |
+
_(attr, x1) \
|
| 2256 |
+
_(attr, x2) \
|
| 2257 |
+
_(attr, y) \
|
| 2258 |
+
_(attr, z) \
|
| 2259 |
+
_(attr, z_state) \
|
| 2260 |
+
_(attr, zero_infinity) \
|
| 2261 |
+
_(attr, zero_point) \
|
| 2262 |
+
_(attr, zero_point_hh) \
|
| 2263 |
+
_(attr, zero_point_ih) \
|
| 2264 |
+
_(attr, zero_points)
|
videochat2/lib/python3.10/site-packages/torch/include/ATen/core/blob.h
ADDED
|
@@ -0,0 +1,204 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <type_traits>
|
| 4 |
+
|
| 5 |
+
#include <c10/util/intrusive_ptr.h>
|
| 6 |
+
#include <c10/util/typeid.h>
|
| 7 |
+
#include <c10/macros/Macros.h>
|
| 8 |
+
|
| 9 |
+
namespace caffe2 {
|
| 10 |
+
|
| 11 |
+
class Tensor;
|
| 12 |
+
|
| 13 |
+
/**
|
| 14 |
+
* @brief Blob is a general container that hosts a typed pointer.
|
| 15 |
+
*
|
| 16 |
+
* A Blob hosts a pointer as well as its type, and takes charge of deleting it
|
| 17 |
+
* properly when the blob is deallocated or re-allocated with a new type. A blob
|
| 18 |
+
* could contain anything, although the most common case is to contain a Tensor.
|
| 19 |
+
*/
|
| 20 |
+
class TORCH_API Blob final : public c10::intrusive_ptr_target {
|
| 21 |
+
public:
|
| 22 |
+
/**
|
| 23 |
+
* Initializes an empty Blob.
|
| 24 |
+
*/
|
| 25 |
+
Blob() noexcept : meta_() {}
|
| 26 |
+
~Blob() override {
|
| 27 |
+
Reset();
|
| 28 |
+
}
|
| 29 |
+
|
| 30 |
+
Blob(Blob&& other) noexcept : Blob() {
|
| 31 |
+
swap(other);
|
| 32 |
+
}
|
| 33 |
+
|
| 34 |
+
Blob& operator=(Blob&& other) noexcept {
|
| 35 |
+
Blob(std::move(other)).swap(*this);
|
| 36 |
+
return *this;
|
| 37 |
+
}
|
| 38 |
+
|
| 39 |
+
/**
|
| 40 |
+
* Checks if the content stored in the blob is of type T.
|
| 41 |
+
*/
|
| 42 |
+
template <class T>
|
| 43 |
+
bool IsType() const noexcept {
|
| 44 |
+
return meta_.Match<T>();
|
| 45 |
+
}
|
| 46 |
+
|
| 47 |
+
/**
|
| 48 |
+
* Returns the meta info of the blob.
|
| 49 |
+
*/
|
| 50 |
+
const TypeMeta meta() const noexcept {
|
| 51 |
+
return meta_;
|
| 52 |
+
}
|
| 53 |
+
|
| 54 |
+
/**
|
| 55 |
+
* Returns a printable typename of the blob.
|
| 56 |
+
*/
|
| 57 |
+
c10::string_view TypeName() const noexcept {
|
| 58 |
+
return meta_.name();
|
| 59 |
+
}
|
| 60 |
+
|
| 61 |
+
/**
|
| 62 |
+
* @brief Gets the const reference of the stored object. The code checks if
|
| 63 |
+
* the stored object is of the desired type.
|
| 64 |
+
*/
|
| 65 |
+
// TODO(jerryzh): add a Get(c10::DeviceType) function?
|
| 66 |
+
template <class T>
|
| 67 |
+
const T& Get() const {
|
| 68 |
+
TORCH_INTERNAL_ASSERT(
|
| 69 |
+
IsType<T>(),
|
| 70 |
+
"wrong type for the Blob instance. Blob contains ",
|
| 71 |
+
meta_.name(),
|
| 72 |
+
" while caller expects ",
|
| 73 |
+
TypeMeta::TypeName<T>());
|
| 74 |
+
// TODO: after we add Get<Tensor>(c10::DeviceType)
|
| 75 |
+
// and changed all the callsites, we can add
|
| 76 |
+
// a static assert here to enforce T != Tensor
|
| 77 |
+
return *static_cast<const T*>(pointer_);
|
| 78 |
+
}
|
| 79 |
+
|
| 80 |
+
const void* GetRaw() const noexcept {
|
| 81 |
+
return pointer_;
|
| 82 |
+
}
|
| 83 |
+
void* GetRaw() noexcept {
|
| 84 |
+
return pointer_;
|
| 85 |
+
}
|
| 86 |
+
|
| 87 |
+
/**
|
| 88 |
+
* @brief Gets a mutable pointer to the stored object.
|
| 89 |
+
*
|
| 90 |
+
* If the current object is not of the right type, a new object is created
|
| 91 |
+
* and the old object is freed. Note that type T should have a default
|
| 92 |
+
* constructor. Otherwise, create the object yourself first, and use
|
| 93 |
+
* Reset().
|
| 94 |
+
*/
|
| 95 |
+
template <class T>
|
| 96 |
+
T* GetMutable() {
|
| 97 |
+
static_assert(
|
| 98 |
+
std::is_default_constructible<T>::value,
|
| 99 |
+
"GetMutable can't be called with non-default-constructible types. "
|
| 100 |
+
"Try using specialized methods");
|
| 101 |
+
if (IsType<T>()) {
|
| 102 |
+
return static_cast<T*>(pointer_);
|
| 103 |
+
} else {
|
| 104 |
+
// TODO Re-enable logging
|
| 105 |
+
// VLOG(1) << "Create new mutable object " << TypeMeta::TypeName<T>();
|
| 106 |
+
return Reset<T>(new T());
|
| 107 |
+
}
|
| 108 |
+
}
|
| 109 |
+
|
| 110 |
+
template <class T>
|
| 111 |
+
T* GetMutableOrNull() {
|
| 112 |
+
if (IsType<T>()) {
|
| 113 |
+
return static_cast<T*>(pointer_);
|
| 114 |
+
} else {
|
| 115 |
+
return nullptr;
|
| 116 |
+
}
|
| 117 |
+
}
|
| 118 |
+
|
| 119 |
+
/**
|
| 120 |
+
* Sets the underlying object to the allocated one. The Blob then takes over
|
| 121 |
+
* the ownership of the passed in pointer. If there is already an object in
|
| 122 |
+
* the Blob, the old object is freed.
|
| 123 |
+
*
|
| 124 |
+
* This is used when the underlying class T does not have a default ctor, or
|
| 125 |
+
* complex initializations needs to be done outside the blob.
|
| 126 |
+
*/
|
| 127 |
+
template <class T>
|
| 128 |
+
T* Reset(T* allocated) {
|
| 129 |
+
free_();
|
| 130 |
+
meta_ = TypeMeta::Make<T>();
|
| 131 |
+
pointer_ = static_cast<void*>(allocated);
|
| 132 |
+
has_ownership_ = true;
|
| 133 |
+
return allocated;
|
| 134 |
+
}
|
| 135 |
+
|
| 136 |
+
/**
|
| 137 |
+
* Sets the underlying object to the allocated one, but does not take over
|
| 138 |
+
* the ownership of the passed in pointer. If there is already an object in
|
| 139 |
+
* the Blob, the old object is freed.
|
| 140 |
+
*
|
| 141 |
+
* Unlike Reset, this does not take over the ownership of the pointer and the
|
| 142 |
+
* caller is responsible for making sure that the lifetime of the allocated
|
| 143 |
+
* blob outlasts the lifetime of any access to this blob, until another Reset
|
| 144 |
+
* call is made or the blob is destructed.
|
| 145 |
+
*/
|
| 146 |
+
template <class T>
|
| 147 |
+
std::remove_const_t<T>* ShareExternal(
|
| 148 |
+
std::remove_const_t<T>* allocated) {
|
| 149 |
+
return static_cast<T*>(ShareExternal(
|
| 150 |
+
static_cast<void*>(allocated),
|
| 151 |
+
TypeMeta::Make<std::remove_const_t<T>>()));
|
| 152 |
+
}
|
| 153 |
+
|
| 154 |
+
void* ShareExternal(void* allocated, const TypeMeta meta) {
|
| 155 |
+
free_();
|
| 156 |
+
meta_ = meta;
|
| 157 |
+
pointer_ = allocated;
|
| 158 |
+
has_ownership_ = false;
|
| 159 |
+
return allocated;
|
| 160 |
+
}
|
| 161 |
+
|
| 162 |
+
/**
|
| 163 |
+
* Resets the Blob to an empty one.
|
| 164 |
+
*/
|
| 165 |
+
void Reset() {
|
| 166 |
+
free_();
|
| 167 |
+
pointer_ = nullptr;
|
| 168 |
+
meta_ = TypeMeta();
|
| 169 |
+
has_ownership_ = false;
|
| 170 |
+
}
|
| 171 |
+
|
| 172 |
+
/**
|
| 173 |
+
* @brief Swaps the underlying storage of two blobs.
|
| 174 |
+
*/
|
| 175 |
+
void swap(Blob& rhs) noexcept {
|
| 176 |
+
using std::swap;
|
| 177 |
+
swap(meta_, rhs.meta_);
|
| 178 |
+
swap(pointer_, rhs.pointer_);
|
| 179 |
+
swap(has_ownership_, rhs.has_ownership_);
|
| 180 |
+
}
|
| 181 |
+
|
| 182 |
+
private:
|
| 183 |
+
void free_() {
|
| 184 |
+
if (has_ownership_ && pointer_ != nullptr) {
|
| 185 |
+
(*meta_.deleteFn())(pointer_);
|
| 186 |
+
}
|
| 187 |
+
}
|
| 188 |
+
|
| 189 |
+
TypeMeta meta_;
|
| 190 |
+
void* pointer_{nullptr};
|
| 191 |
+
bool has_ownership_{false};
|
| 192 |
+
|
| 193 |
+
C10_DISABLE_COPY_AND_ASSIGN(Blob);
|
| 194 |
+
};
|
| 195 |
+
|
| 196 |
+
inline void swap(Blob& lhs, Blob& rhs) noexcept {
|
| 197 |
+
lhs.swap(rhs);
|
| 198 |
+
}
|
| 199 |
+
|
| 200 |
+
inline std::ostream& operator<<(std::ostream& out, const Blob& v) {
|
| 201 |
+
return out << "Blob[" << v.TypeName() << "]";
|
| 202 |
+
}
|
| 203 |
+
|
| 204 |
+
} // namespace caffe2
|
videochat2/lib/python3.10/site-packages/torch/include/ATen/core/boxing/BoxedKernel_impl.h
ADDED
|
@@ -0,0 +1,99 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
namespace c10 {
|
| 4 |
+
|
| 5 |
+
inline BoxedKernel::BoxedKernel()
|
| 6 |
+
: functor_()
|
| 7 |
+
, boxed_kernel_func_(nullptr)
|
| 8 |
+
{}
|
| 9 |
+
|
| 10 |
+
inline BoxedKernel::BoxedKernel(std::unique_ptr<OperatorKernel> functor, InternalBoxedKernelFunction* boxed_kernel_func)
|
| 11 |
+
: functor_(std::move(functor))
|
| 12 |
+
, boxed_kernel_func_(boxed_kernel_func)
|
| 13 |
+
{}
|
| 14 |
+
|
| 15 |
+
template<BoxedKernel::BoxedKernelFunction* func>
|
| 16 |
+
inline void BoxedKernel::make_boxed_function(OperatorKernel*, const OperatorHandle& opHandle, DispatchKeySet, Stack* stack) {
|
| 17 |
+
// Note that we're dropping the DispatchKeySet argument.
|
| 18 |
+
// See Note [Plumbing Keys Through The Dispatcher 2] for details.
|
| 19 |
+
func(opHandle, stack);
|
| 20 |
+
}
|
| 21 |
+
|
| 22 |
+
template<BoxedKernel::BoxedKernelFunction_withDispatchKeys* func>
|
| 23 |
+
inline void BoxedKernel::make_boxed_function(OperatorKernel*, const OperatorHandle& opHandle, DispatchKeySet ks, Stack* stack) {
|
| 24 |
+
// See Note [Plumbing Keys Through The Dispatcher 2] for details.
|
| 25 |
+
func(opHandle, ks, stack);
|
| 26 |
+
}
|
| 27 |
+
|
| 28 |
+
inline bool BoxedKernel::isValid() const {
|
| 29 |
+
return boxed_kernel_func_ != nullptr;
|
| 30 |
+
}
|
| 31 |
+
|
| 32 |
+
inline bool BoxedKernel::isFallthrough() const {
|
| 33 |
+
return boxed_kernel_func_ == &fallthrough_kernel;
|
| 34 |
+
}
|
| 35 |
+
|
| 36 |
+
inline void BoxedKernel::callBoxed(const OperatorHandle& opHandle, DispatchKeySet dispatchKeySet, Stack* stack) const {
|
| 37 |
+
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
|
| 38 |
+
boxed_kernel_func_ != nullptr,
|
| 39 |
+
"Tried to call BoxedKernel::callBoxed() on an uninitialized BoxedKernel."
|
| 40 |
+
);
|
| 41 |
+
(*boxed_kernel_func_)(functor_.get(), opHandle, dispatchKeySet, stack);
|
| 42 |
+
}
|
| 43 |
+
|
| 44 |
+
template<BoxedKernel::BoxedKernelFunction* func>
|
| 45 |
+
inline BoxedKernel BoxedKernel::makeFromFunction() {
|
| 46 |
+
return BoxedKernel(
|
| 47 |
+
nullptr, // no functor_ object
|
| 48 |
+
&make_boxed_function<func>
|
| 49 |
+
);
|
| 50 |
+
}
|
| 51 |
+
|
| 52 |
+
template<BoxedKernel::BoxedKernelFunction_withDispatchKeys* func>
|
| 53 |
+
inline BoxedKernel BoxedKernel::makeFromFunction() {
|
| 54 |
+
return BoxedKernel(
|
| 55 |
+
nullptr, // no functor_ object
|
| 56 |
+
&make_boxed_function<func>
|
| 57 |
+
);
|
| 58 |
+
}
|
| 59 |
+
|
| 60 |
+
inline BoxedKernel BoxedKernel::makeFallthrough() {
|
| 61 |
+
return BoxedKernel(
|
| 62 |
+
nullptr, // no functor_ object
|
| 63 |
+
&fallthrough_kernel
|
| 64 |
+
);
|
| 65 |
+
}
|
| 66 |
+
|
| 67 |
+
inline BoxedKernel BoxedKernel::makeAmbiguousAutogradOther() {
|
| 68 |
+
return BoxedKernel(
|
| 69 |
+
nullptr, // no functor_ object
|
| 70 |
+
&ambiguous_autogradother_kernel
|
| 71 |
+
);
|
| 72 |
+
}
|
| 73 |
+
|
| 74 |
+
inline BoxedKernel BoxedKernel::makeNamedNotSupported() {
|
| 75 |
+
return BoxedKernel(
|
| 76 |
+
nullptr, // no functor_ object
|
| 77 |
+
&named_not_supported_kernel
|
| 78 |
+
);
|
| 79 |
+
}
|
| 80 |
+
|
| 81 |
+
template<class KernelFunctor>
|
| 82 |
+
inline BoxedKernel BoxedKernel::makeFromFunctor(std::unique_ptr<KernelFunctor> kernelFunctor) {
|
| 83 |
+
static_assert(std::is_base_of<OperatorKernel, KernelFunctor>::value, "Tried to call BoxedKernel::makeFromFunctor<KernelFunctor>, but the functor doesn't inherit from c10::OperatorKernel. Please have the functor inherit from it.");
|
| 84 |
+
return BoxedKernel(
|
| 85 |
+
std::move(kernelFunctor),
|
| 86 |
+
[](OperatorKernel* kernel, const OperatorHandle& op, DispatchKeySet ks, Stack* stack) {
|
| 87 |
+
(*static_cast<KernelFunctor*>(kernel))(op, ks, stack);
|
| 88 |
+
}
|
| 89 |
+
);
|
| 90 |
+
}
|
| 91 |
+
|
| 92 |
+
inline OperatorKernel* BoxedKernel::getFunctor() const {
|
| 93 |
+
return functor_.get();
|
| 94 |
+
}
|
| 95 |
+
inline BoxedKernel::InternalBoxedKernelFunction* BoxedKernel::getFnPtr() const {
|
| 96 |
+
return boxed_kernel_func_;
|
| 97 |
+
}
|
| 98 |
+
|
| 99 |
+
} // namespace c10
|
videochat2/lib/python3.10/site-packages/torch/include/ATen/core/boxing/OperatorKernel.h
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
#include <c10/util/intrusive_ptr.h>
|
| 3 |
+
|
| 4 |
+
namespace c10 {
|
| 5 |
+
|
| 6 |
+
/**
|
| 7 |
+
* Inherit from OperatorKernel to implement a c10 kernel.
|
| 8 |
+
*
|
| 9 |
+
* Example:
|
| 10 |
+
* > namespace {
|
| 11 |
+
* > class my_kernel_cpu final : public c10::OperatorKernel {
|
| 12 |
+
* > public:
|
| 13 |
+
* > Tensor operator()(Tensor a, Tensor b) {...}
|
| 14 |
+
* > };
|
| 15 |
+
* > }
|
| 16 |
+
*
|
| 17 |
+
* The kernel class is allowed to have members but these are equivalent
|
| 18 |
+
* to global variables. The kernel implementation is responsible for
|
| 19 |
+
* preventing race conditions on them.
|
| 20 |
+
*
|
| 21 |
+
* See below for how to register this kernel with PyTorch.
|
| 22 |
+
*/
|
| 23 |
+
struct TORCH_API OperatorKernel : public c10::intrusive_ptr_target {
|
| 24 |
+
~OperatorKernel() override = default;
|
| 25 |
+
};
|
| 26 |
+
|
| 27 |
+
} // namespace c10
|
videochat2/lib/python3.10/site-packages/torch/include/ATen/core/dynamic_type.h
ADDED
|
@@ -0,0 +1,239 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <cstdint>
|
| 4 |
+
#include <memory>
|
| 5 |
+
#include <type_traits>
|
| 6 |
+
|
| 7 |
+
#include <ATen/core/jit_type_base.h>
|
| 8 |
+
#include <optional>
|
| 9 |
+
|
| 10 |
+
namespace c10 {
|
| 11 |
+
|
| 12 |
+
using DynamicTypeBits = std::uint32_t;
|
| 13 |
+
#define DYNAMIC_TYPE_BIT(x) (1u << x)
|
| 14 |
+
|
| 15 |
+
constexpr DynamicTypeBits kDynamicCovariantTypeBit = DYNAMIC_TYPE_BIT(31);
|
| 16 |
+
constexpr DynamicTypeBits kDynamicAnyTypeBit = DYNAMIC_TYPE_BIT(30);
|
| 17 |
+
|
| 18 |
+
constexpr DynamicTypeBits kDynamicNoneTypeBit = DYNAMIC_TYPE_BIT(1);
|
| 19 |
+
constexpr DynamicTypeBits kDynamicIntTypeBit = DYNAMIC_TYPE_BIT(3);
|
| 20 |
+
constexpr DynamicTypeBits kDynamicFloatTypeBit = DYNAMIC_TYPE_BIT(4);
|
| 21 |
+
constexpr DynamicTypeBits kDynamicComplexTypeBit = DYNAMIC_TYPE_BIT(5);
|
| 22 |
+
constexpr DynamicTypeBits kDynamicListTypeBit = DYNAMIC_TYPE_BIT(7);
|
| 23 |
+
constexpr DynamicTypeBits kDynamicTupleTypeBit = DYNAMIC_TYPE_BIT(8);
|
| 24 |
+
constexpr DynamicTypeBits kDynamicClassTypeBit = DYNAMIC_TYPE_BIT(10);
|
| 25 |
+
|
| 26 |
+
#define FORALL_DYNAMIC_TYPES(_) \
|
| 27 |
+
_(Tensor, DYNAMIC_TYPE_BIT(0), 1) \
|
| 28 |
+
_(None, kDynamicNoneTypeBit, 1) \
|
| 29 |
+
_(Bool, DYNAMIC_TYPE_BIT(2), 1) \
|
| 30 |
+
_(Int, kDynamicIntTypeBit, 1) \
|
| 31 |
+
_(Float, kDynamicFloatTypeBit, 1) \
|
| 32 |
+
_(Complex, kDynamicComplexTypeBit, 1) \
|
| 33 |
+
_(Number, \
|
| 34 |
+
(kDynamicIntTypeBit | kDynamicFloatTypeBit | kDynamicComplexTypeBit), \
|
| 35 |
+
1) \
|
| 36 |
+
_(String, DYNAMIC_TYPE_BIT(6), 1) \
|
| 37 |
+
_(List, kDynamicListTypeBit, 0) \
|
| 38 |
+
_(Tuple, (kDynamicTupleTypeBit | kDynamicCovariantTypeBit), 0) \
|
| 39 |
+
_(Dict, DYNAMIC_TYPE_BIT(9), 0) \
|
| 40 |
+
_(Class, kDynamicClassTypeBit, 0) \
|
| 41 |
+
_(Optional, \
|
| 42 |
+
(DYNAMIC_TYPE_BIT(11) | kDynamicNoneTypeBit | kDynamicCovariantTypeBit), \
|
| 43 |
+
0) \
|
| 44 |
+
_(AnyList, (kDynamicListTypeBit | kDynamicAnyTypeBit), 1) \
|
| 45 |
+
_(AnyTuple, \
|
| 46 |
+
(kDynamicTupleTypeBit | kDynamicCovariantTypeBit | kDynamicAnyTypeBit), \
|
| 47 |
+
1) \
|
| 48 |
+
_(DeviceObj, DYNAMIC_TYPE_BIT(12), 1) \
|
| 49 |
+
_(StreamObj, DYNAMIC_TYPE_BIT(13), 1) \
|
| 50 |
+
_(Capsule, DYNAMIC_TYPE_BIT(14), 1) \
|
| 51 |
+
_(Generator, DYNAMIC_TYPE_BIT(15), 1) \
|
| 52 |
+
_(Storage, DYNAMIC_TYPE_BIT(16), 1) \
|
| 53 |
+
_(Var, DYNAMIC_TYPE_BIT(17), 0) \
|
| 54 |
+
_(AnyClass, (kDynamicClassTypeBit | kDynamicAnyTypeBit), 1) \
|
| 55 |
+
_(QScheme, DYNAMIC_TYPE_BIT(18), 1) \
|
| 56 |
+
_(Quantizer, DYNAMIC_TYPE_BIT(19), 1) \
|
| 57 |
+
_(AnyEnum, DYNAMIC_TYPE_BIT(20), 1) \
|
| 58 |
+
_(RRef, DYNAMIC_TYPE_BIT(21), 0) \
|
| 59 |
+
_(Future, DYNAMIC_TYPE_BIT(22), 0) \
|
| 60 |
+
_(Await, DYNAMIC_TYPE_BIT(23), 0) \
|
| 61 |
+
_(Any, 0xffffffff, 1)
|
| 62 |
+
|
| 63 |
+
#define FORALL_DYNAMIC_TYPES_FAKE(_) \
|
| 64 |
+
_(ScalarType, kDynamicIntTypeBit, 1) \
|
| 65 |
+
_(Layout, kDynamicIntTypeBit, 1) \
|
| 66 |
+
_(SymInt, kDynamicIntTypeBit, 1) \
|
| 67 |
+
_(MemoryFormat, kDynamicIntTypeBit, 1)
|
| 68 |
+
|
| 69 |
+
#define FORWARD_DECL_TYPE(NAME, _, __) struct NAME ## Type;
|
| 70 |
+
FORALL_DYNAMIC_TYPES(FORWARD_DECL_TYPE)
|
| 71 |
+
FORALL_DYNAMIC_TYPES_FAKE(FORWARD_DECL_TYPE)
|
| 72 |
+
#undef FORWARD_DECL_TYPE
|
| 73 |
+
|
| 74 |
+
class DynamicType;
|
| 75 |
+
using DynamicTypePtr = std::shared_ptr<DynamicType>;
|
| 76 |
+
|
| 77 |
+
/**
|
| 78 |
+
* DynamicType is designed as a low dependency type system for TorchScript. The
|
| 79 |
+
* existing JIT types are used for both compilation and runtime, which makes
|
| 80 |
+
* sense for server contexts because we often compile and run the model in
|
| 81 |
+
* the same process, however this doesn't hold for mobile devices where we
|
| 82 |
+
* always compiles a model ahead of time, therefore there will be dependencies
|
| 83 |
+
* which are not needed, but built with mobile runtime causing binary size
|
| 84 |
+
* bloat, by design. Every basic type like Int, Bool or String will bring their
|
| 85 |
+
* vtable, typeinfo, constructor, destructor and even more data from their
|
| 86 |
+
* specializations for STL types to the binary causing a long tail bloat.
|
| 87 |
+
*
|
| 88 |
+
* The core problem is about the complexity to implement and maintain a single
|
| 89 |
+
* type system for both analysis and execution purposes. Although they should
|
| 90 |
+
* have the exactly same semantics, in practice implement a unified abstraction
|
| 91 |
+
* adds conceptual and representational overhead for both sides of the world.
|
| 92 |
+
*
|
| 93 |
+
* To address the issues, DynamicType implements a minimal subset of JIT types
|
| 94 |
+
* and uses a generic algorithm to test all subtyping relations. To achieve
|
| 95 |
+
* this, we assign each dynamic type a single integer tag to represent its
|
| 96 |
+
* semantics. More specifically, a dynamic type is defined as a set of "control
|
| 97 |
+
* bits" and "data bits", where control bits describe the special behavior when
|
| 98 |
+
* testing a type and data bits map to identity of each nominal type. We use bit
|
| 99 |
+
* operations to perform all the tests.
|
| 100 |
+
*
|
| 101 |
+
* For example, a "covariant bit" is a control bit used to describe if a type
|
| 102 |
+
* is covariant, right now the most used one is tuple type, and in addition to
|
| 103 |
+
* the control bit, tuple type's data bit is the 8th bit from the LSB. Control
|
| 104 |
+
* bits start from MSB and data bits start from LSB.
|
| 105 |
+
*
|
| 106 |
+
* If two types are equal, then they are subtype of each other, also if the bits
|
| 107 |
+
* from one type tag is subset of the other tag, it automatically becomes a
|
| 108 |
+
* subtype of the other. This simplifies the subtyping logic a lot, and over the
|
| 109 |
+
* long term it is possible to adopt this scheme on the server side as well.
|
| 110 |
+
* Special cases can be added but they generally should not take too much code
|
| 111 |
+
* size.
|
| 112 |
+
*
|
| 113 |
+
* DynamicType may or may not inherit from c10::Type because it's not the core
|
| 114 |
+
* requirement of DynamicType to interface with existing JIT types, but we might
|
| 115 |
+
* want to inherit from c10::Type to reduce the migration cost.
|
| 116 |
+
*/
|
| 117 |
+
class DynamicType : public SharedType {
|
| 118 |
+
using ClassTypePtr = std::shared_ptr<const c10::ClassType>;
|
| 119 |
+
|
| 120 |
+
/**
|
| 121 |
+
* A implementation detail to support NamedTuple.
|
| 122 |
+
*/
|
| 123 |
+
struct LabeledDynamicType {
|
| 124 |
+
std::optional<std::string> label;
|
| 125 |
+
DynamicTypePtr ty;
|
| 126 |
+
explicit LabeledDynamicType(DynamicTypePtr t) : ty(std::move(t)) {}
|
| 127 |
+
|
| 128 |
+
bool equals(const LabeledDynamicType& other) const;
|
| 129 |
+
bool isSubtypeOf(const LabeledDynamicType& other) const;
|
| 130 |
+
};
|
| 131 |
+
|
| 132 |
+
public:
|
| 133 |
+
// TODO Change Ptr to DynamicTypePtr when all migrations are done.
|
| 134 |
+
using Ptr = TypePtr;
|
| 135 |
+
using ElementType = DynamicType;
|
| 136 |
+
~DynamicType() override;
|
| 137 |
+
|
| 138 |
+
struct Arguments {
|
| 139 |
+
Arguments() = default;
|
| 140 |
+
Arguments(c10::ArrayRef<TypePtr>);
|
| 141 |
+
Arguments(const std::vector<c10::string_view>&, c10::ArrayRef<TypePtr>);
|
| 142 |
+
std::vector<LabeledDynamicType> elems;
|
| 143 |
+
};
|
| 144 |
+
|
| 145 |
+
enum class Tag : DynamicTypeBits {
|
| 146 |
+
#define DYNAMIC_TYPE_ITEM(NAME, VAL, _) NAME = VAL,
|
| 147 |
+
FORALL_DYNAMIC_TYPES(DYNAMIC_TYPE_ITEM)
|
| 148 |
+
FORALL_DYNAMIC_TYPES_FAKE(DYNAMIC_TYPE_ITEM)
|
| 149 |
+
#undef DYNAMIC_TYPE_ITEM
|
| 150 |
+
};
|
| 151 |
+
|
| 152 |
+
bool equals(const Type& rhs) const override;
|
| 153 |
+
bool isSubtypeOfExt(const Type& rhs, std::ostream* why_not) const override;
|
| 154 |
+
std::string str() const override;
|
| 155 |
+
static const TypeKind Kind = TypeKind::DynamicType;
|
| 156 |
+
static TORCH_API DynamicTypePtr create(Type& ty);
|
| 157 |
+
|
| 158 |
+
explicit DynamicType(Tag, Arguments);
|
| 159 |
+
explicit DynamicType(Tag, c10::string_view, Arguments);
|
| 160 |
+
|
| 161 |
+
TypePtr containedType(size_t) const override;
|
| 162 |
+
size_t containedTypeSize() const override;
|
| 163 |
+
Tag tag() const {
|
| 164 |
+
return tag_;
|
| 165 |
+
}
|
| 166 |
+
const std::optional<std::string>& name() const {
|
| 167 |
+
return name_;
|
| 168 |
+
}
|
| 169 |
+
const Arguments& arguments() const {
|
| 170 |
+
return arguments_;
|
| 171 |
+
}
|
| 172 |
+
TORCH_API TypeKind dynamicKind() const;
|
| 173 |
+
|
| 174 |
+
// Should be used only on the server side to restore static type information.
|
| 175 |
+
#ifndef C10_MOBILE
|
| 176 |
+
TORCH_API
|
| 177 |
+
#endif
|
| 178 |
+
TypePtr fallback() const;
|
| 179 |
+
|
| 180 |
+
private:
|
| 181 |
+
bool symmetric() const override {
|
| 182 |
+
return false;
|
| 183 |
+
}
|
| 184 |
+
friend struct Type;
|
| 185 |
+
static std::shared_ptr<const DynamicType> create(const Type& ty);
|
| 186 |
+
DynamicType(const Type& other);
|
| 187 |
+
bool equals(const DynamicType& other) const;
|
| 188 |
+
|
| 189 |
+
template <typename F>
|
| 190 |
+
bool compareArguments(const DynamicType& other, const F& f) const {
|
| 191 |
+
if (arguments_.elems.size() != other.arguments_.elems.size()) {
|
| 192 |
+
return false;
|
| 193 |
+
}
|
| 194 |
+
for (size_t i = 0; i < arguments_.elems.size(); i++) {
|
| 195 |
+
if (!f(arguments_.elems[i], other.arguments_.elems[i])) {
|
| 196 |
+
return false;
|
| 197 |
+
}
|
| 198 |
+
}
|
| 199 |
+
return true;
|
| 200 |
+
}
|
| 201 |
+
|
| 202 |
+
Tag tag_;
|
| 203 |
+
std::optional<std::string> name_;
|
| 204 |
+
union {
|
| 205 |
+
Arguments arguments_;
|
| 206 |
+
ClassTypePtr class_;
|
| 207 |
+
};
|
| 208 |
+
};
|
| 209 |
+
|
| 210 |
+
template <typename T>
|
| 211 |
+
struct DynamicTypeTrait {
|
| 212 |
+
C10_NOINLINE static auto tagValue() {
|
| 213 |
+
TORCH_CHECK(false);
|
| 214 |
+
return DynamicType::Tag::Any;
|
| 215 |
+
}
|
| 216 |
+
};
|
| 217 |
+
|
| 218 |
+
namespace detail {
|
| 219 |
+
C10_NOINLINE DynamicTypePtr makeBaseType(DynamicType::Tag tag);
|
| 220 |
+
}
|
| 221 |
+
|
| 222 |
+
#define DYNAMIC_TYPE_TAG_VALUE(NAME, _, IS_BASE_TYPE) \
|
| 223 |
+
template <> \
|
| 224 |
+
struct TORCH_API DynamicTypeTrait<NAME##Type> { \
|
| 225 |
+
C10_ERASE static auto tagValue() { \
|
| 226 |
+
return DynamicType::Tag::NAME; \
|
| 227 |
+
} \
|
| 228 |
+
static constexpr bool isBaseType = IS_BASE_TYPE; \
|
| 229 |
+
template <typename T = const DynamicTypePtr&> \
|
| 230 |
+
static std::enable_if_t<isBaseType, T> getBaseType() { \
|
| 231 |
+
static auto type = detail::makeBaseType(tagValue()); \
|
| 232 |
+
return type; \
|
| 233 |
+
} \
|
| 234 |
+
}; // namespace c10
|
| 235 |
+
FORALL_DYNAMIC_TYPES(DYNAMIC_TYPE_TAG_VALUE)
|
| 236 |
+
FORALL_DYNAMIC_TYPES_FAKE(DYNAMIC_TYPE_TAG_VALUE)
|
| 237 |
+
#undef DYNAMIC_TYPE_TAG_VALUE
|
| 238 |
+
|
| 239 |
+
} // namespace c10
|
videochat2/lib/python3.10/site-packages/torch/include/ATen/core/function.h
ADDED
|
@@ -0,0 +1,114 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <ATen/core/function_schema.h>
|
| 4 |
+
#include <ATen/core/ivalue.h>
|
| 5 |
+
#include <ATen/core/qualified_name.h>
|
| 6 |
+
#include <c10/util/Exception.h>
|
| 7 |
+
#include <c10/util/FunctionRef.h>
|
| 8 |
+
|
| 9 |
+
namespace c10 {
|
| 10 |
+
struct FunctionSchema;
|
| 11 |
+
};
|
| 12 |
+
|
| 13 |
+
namespace at {
|
| 14 |
+
TORCH_API void launch(std::function<void()> func);
|
| 15 |
+
}
|
| 16 |
+
|
| 17 |
+
namespace torch::jit {
|
| 18 |
+
|
| 19 |
+
struct Graph;
|
| 20 |
+
struct Code;
|
| 21 |
+
|
| 22 |
+
namespace mobile {
|
| 23 |
+
struct Code;
|
| 24 |
+
}
|
| 25 |
+
|
| 26 |
+
using Stack = std::vector<at::IValue>;
|
| 27 |
+
using Kwargs = std::unordered_map<std::string, at::IValue>;
|
| 28 |
+
struct RecursiveMethodCallError : public std::exception {};
|
| 29 |
+
using TaskLauncher = std::function<void(std::function<void()>)>;
|
| 30 |
+
|
| 31 |
+
TORCH_API void preoptimizeGraph(
|
| 32 |
+
std::shared_ptr<Graph>& graph,
|
| 33 |
+
bool disable_autocast = false);
|
| 34 |
+
|
| 35 |
+
// A Function is a pure Graph with no implicit `self` object bound.
|
| 36 |
+
// It contains schema information and the executor that manages the
|
| 37 |
+
// execution of the function. Method is a wrapper around an
|
| 38 |
+
// underlying Function that also provides a `self` object.
|
| 39 |
+
struct TORCH_API Function {
|
| 40 |
+
Function() = default;
|
| 41 |
+
Function(const Function&) = default;
|
| 42 |
+
Function& operator=(const Function&) = default;
|
| 43 |
+
Function(Function&&) noexcept = default;
|
| 44 |
+
Function& operator=(Function&&) noexcept = default;
|
| 45 |
+
virtual c10::string_view doc_string() const {
|
| 46 |
+
static constexpr c10::string_view no_doc_string = "";
|
| 47 |
+
return no_doc_string;
|
| 48 |
+
}
|
| 49 |
+
|
| 50 |
+
virtual bool isGraphFunction() const {
|
| 51 |
+
return false;
|
| 52 |
+
}
|
| 53 |
+
|
| 54 |
+
virtual void run(Stack& stack) = 0;
|
| 55 |
+
|
| 56 |
+
virtual c10::intrusive_ptr<c10::ivalue::Future> runAsync(
|
| 57 |
+
Stack& /*stack*/,
|
| 58 |
+
// NOLINTNEXTLINE(performance-unnecessary-value-param)
|
| 59 |
+
C10_UNUSED TaskLauncher taskLauncher = at::launch) {
|
| 60 |
+
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(false);
|
| 61 |
+
return {};
|
| 62 |
+
}
|
| 63 |
+
|
| 64 |
+
at::IValue operator()(Stack stack, const Kwargs& kwargs = Kwargs()) {
|
| 65 |
+
getSchema().checkAndNormalizeInputs(stack, kwargs);
|
| 66 |
+
run(stack);
|
| 67 |
+
return stack.front();
|
| 68 |
+
}
|
| 69 |
+
|
| 70 |
+
virtual const c10::QualifiedName& qualname() const = 0;
|
| 71 |
+
|
| 72 |
+
const std::string& name() const {
|
| 73 |
+
return qualname().name();
|
| 74 |
+
}
|
| 75 |
+
|
| 76 |
+
// if this isn't yet defined, run its method_creator function
|
| 77 |
+
virtual void ensure_defined() = 0;
|
| 78 |
+
|
| 79 |
+
virtual const c10::FunctionSchema& getSchema() const = 0;
|
| 80 |
+
|
| 81 |
+
virtual size_t num_inputs() const = 0;
|
| 82 |
+
|
| 83 |
+
virtual Function& setSchema(c10::FunctionSchema schema) = 0;
|
| 84 |
+
|
| 85 |
+
// call() defines how different interpreter implementations interacts with
|
| 86 |
+
// Function objects. Basically interpreters need to provide a callback to
|
| 87 |
+
// communicate to Functions what to do if provided a Code object.
|
| 88 |
+
// Alternatively we could design the signature to return an optional Code
|
| 89 |
+
// object, but that requires special handling the null case in interpreter
|
| 90 |
+
// and the fallback behavior is not well defined by interpreter but rather
|
| 91 |
+
// Function themselves, so a callback approach is more reasonable than
|
| 92 |
+
// returning values.
|
| 93 |
+
// If call() returns true, then callback completes successfully, otherwise
|
| 94 |
+
// call() returns false.
|
| 95 |
+
|
| 96 |
+
// Overload for server interpreter, a bailout size is needed for graph
|
| 97 |
+
// executor.
|
| 98 |
+
virtual bool call(
|
| 99 |
+
Stack&,
|
| 100 |
+
std::optional<size_t>,
|
| 101 |
+
c10::function_ref<void(const Code&)>) {
|
| 102 |
+
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(false);
|
| 103 |
+
return false;
|
| 104 |
+
}
|
| 105 |
+
|
| 106 |
+
// Overload for mobile interpreter.
|
| 107 |
+
virtual bool call(Stack&, c10::function_ref<void(const mobile::Code&)>) {
|
| 108 |
+
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(false);
|
| 109 |
+
return false;
|
| 110 |
+
}
|
| 111 |
+
|
| 112 |
+
virtual ~Function() = default;
|
| 113 |
+
};
|
| 114 |
+
} // namespace torch::jit
|
videochat2/lib/python3.10/site-packages/torch/include/ATen/core/function_schema.h
ADDED
|
@@ -0,0 +1,687 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <c10/util/StringUtil.h>
|
| 4 |
+
#include <c10/util/string_view.h>
|
| 5 |
+
#include <c10/util/irange.h>
|
| 6 |
+
#include <ATen/core/jit_type.h>
|
| 7 |
+
#include <ATen/core/symbol.h>
|
| 8 |
+
#include <ATen/core/ivalue.h>
|
| 9 |
+
#include <ATen/core/alias_info.h>
|
| 10 |
+
#include <ATen/core/operator_name.h>
|
| 11 |
+
#include <ATen/core/dispatch/OperatorOptions.h>
|
| 12 |
+
#include <unordered_map>
|
| 13 |
+
#include <utility>
|
| 14 |
+
|
| 15 |
+
namespace c10 {
|
| 16 |
+
|
| 17 |
+
// schema as used in the compiler for resolving function calls and reporting
|
| 18 |
+
// errors. These objects should be constructed from C10 schema once those
|
| 19 |
+
// are available.
|
| 20 |
+
|
| 21 |
+
struct Argument;
|
| 22 |
+
struct FunctionSchema;
|
| 23 |
+
|
| 24 |
+
using AliasTypeSet = std::vector<TypePtr>;
|
| 25 |
+
|
| 26 |
+
bool operator==(const Argument& lhs, const Argument& rhs);
|
| 27 |
+
|
| 28 |
+
struct TORCH_API Argument {
|
| 29 |
+
Argument(
|
| 30 |
+
std::string name = "",
|
| 31 |
+
const TypePtr& type = nullptr,
|
| 32 |
+
std::optional<int32_t> N = std::nullopt,
|
| 33 |
+
std::optional<IValue> default_value = std::nullopt,
|
| 34 |
+
bool kwarg_only = false,
|
| 35 |
+
std::optional<AliasInfo> alias_info = std::nullopt)
|
| 36 |
+
: Argument(std::move(name), type, type, N, std::move(default_value), kwarg_only, std::move(alias_info)) {}
|
| 37 |
+
|
| 38 |
+
Argument(
|
| 39 |
+
std::string name,
|
| 40 |
+
TypePtr fake_type,
|
| 41 |
+
TypePtr real_type,
|
| 42 |
+
std::optional<int32_t> N = std::nullopt,
|
| 43 |
+
std::optional<IValue> default_value = std::nullopt,
|
| 44 |
+
bool kwarg_only = false,
|
| 45 |
+
std::optional<AliasInfo> alias_info = std::nullopt)
|
| 46 |
+
: name_(std::move(name)),
|
| 47 |
+
type_(fake_type ? std::move(fake_type) : TensorType::get()),
|
| 48 |
+
real_type_(real_type ? std::move(real_type) : type_),
|
| 49 |
+
N_(N),
|
| 50 |
+
default_value_(std::move(default_value)),
|
| 51 |
+
alias_info_(alias_info ? std::make_unique<AliasInfo>(std::move(*alias_info)) : nullptr),
|
| 52 |
+
kwarg_only_(kwarg_only) {
|
| 53 |
+
// this is an softly-enforced invariant for out arguments.
|
| 54 |
+
bool is_alias = alias_info_ != nullptr && alias_info_->isWrite();
|
| 55 |
+
is_out_ = kwarg_only_ && is_alias;
|
| 56 |
+
}
|
| 57 |
+
|
| 58 |
+
Argument(Argument&& rhs) noexcept = default;
|
| 59 |
+
|
| 60 |
+
Argument(const Argument& rhs)
|
| 61 |
+
: name_(rhs.name_),
|
| 62 |
+
type_(rhs.type_),
|
| 63 |
+
real_type_(rhs.real_type_),
|
| 64 |
+
N_(rhs.N_),
|
| 65 |
+
default_value_(rhs.default_value_),
|
| 66 |
+
alias_info_(rhs.alias_info_ ? std::make_unique<AliasInfo>(*rhs.alias_info_) : nullptr),
|
| 67 |
+
kwarg_only_(rhs.kwarg_only_),
|
| 68 |
+
is_out_(rhs.is_out_) {}
|
| 69 |
+
|
| 70 |
+
Argument& operator=(Argument&& rhs) = default;
|
| 71 |
+
|
| 72 |
+
Argument& operator=(const Argument& rhs) {
|
| 73 |
+
if (this != &rhs) {
|
| 74 |
+
name_ = rhs.name_;
|
| 75 |
+
type_ = rhs.type_;
|
| 76 |
+
real_type_ = rhs.real_type_;
|
| 77 |
+
N_ = rhs.N_;
|
| 78 |
+
default_value_ = rhs.default_value_;
|
| 79 |
+
alias_info_ = rhs.alias_info_ ? std::make_unique<AliasInfo>(*rhs.alias_info_) : nullptr;
|
| 80 |
+
kwarg_only_ = rhs.kwarg_only_;
|
| 81 |
+
is_out_ = rhs.is_out_;
|
| 82 |
+
}
|
| 83 |
+
return *this;
|
| 84 |
+
}
|
| 85 |
+
|
| 86 |
+
const std::string& name() const {
|
| 87 |
+
return name_;
|
| 88 |
+
}
|
| 89 |
+
const TypePtr& type() const {
|
| 90 |
+
return type_;
|
| 91 |
+
}
|
| 92 |
+
// if type() is non-null, this is guaranteed to be non-null (if no real
|
| 93 |
+
// type was provided, this takes on type()'s value)
|
| 94 |
+
const TypePtr& real_type() const {
|
| 95 |
+
return real_type_;
|
| 96 |
+
}
|
| 97 |
+
std::optional<int32_t> N() const {
|
| 98 |
+
return N_;
|
| 99 |
+
}
|
| 100 |
+
const std::optional<IValue>& default_value() const {
|
| 101 |
+
return default_value_;
|
| 102 |
+
}
|
| 103 |
+
bool kwarg_only() const {
|
| 104 |
+
return kwarg_only_;
|
| 105 |
+
}
|
| 106 |
+
|
| 107 |
+
bool is_out() const {
|
| 108 |
+
return is_out_;
|
| 109 |
+
}
|
| 110 |
+
|
| 111 |
+
C10_NODISCARD const AliasInfo* alias_info() const {
|
| 112 |
+
return alias_info_.get();
|
| 113 |
+
}
|
| 114 |
+
|
| 115 |
+
bool is_inferred_type() const {
|
| 116 |
+
bool is_inferred_type = false;
|
| 117 |
+
TORCH_INTERNAL_ASSERT(type_);
|
| 118 |
+
if (auto pt = type_->cast<TensorType>()) {
|
| 119 |
+
if (pt->isInferredType()) {
|
| 120 |
+
is_inferred_type = true;
|
| 121 |
+
}
|
| 122 |
+
}
|
| 123 |
+
return is_inferred_type;
|
| 124 |
+
}
|
| 125 |
+
|
| 126 |
+
std::string formatTypeMismatchMsg(const std::string& actual_type) const {
|
| 127 |
+
std::string inferred_type_hint;
|
| 128 |
+
if (is_inferred_type()) {
|
| 129 |
+
inferred_type_hint = c10::str(
|
| 130 |
+
"Inferred '",
|
| 131 |
+
name(),
|
| 132 |
+
"' to be of type 'Tensor' ",
|
| 133 |
+
"because it was not annotated with an explicit type.\n");
|
| 134 |
+
}
|
| 135 |
+
return c10::str(
|
| 136 |
+
"Expected a value of type '",
|
| 137 |
+
type()->repr_str(),
|
| 138 |
+
"' for argument '",
|
| 139 |
+
name(),
|
| 140 |
+
"' but instead found type '",
|
| 141 |
+
actual_type,
|
| 142 |
+
"'.\n",
|
| 143 |
+
inferred_type_hint);
|
| 144 |
+
}
|
| 145 |
+
|
| 146 |
+
Argument cloneWithType(const TypePtr& new_type) const {
|
| 147 |
+
return Argument(
|
| 148 |
+
name_,
|
| 149 |
+
new_type,
|
| 150 |
+
N_,
|
| 151 |
+
default_value_,
|
| 152 |
+
kwarg_only_,
|
| 153 |
+
alias_info_ ? std::optional<AliasInfo>(*alias_info_) : std::nullopt);
|
| 154 |
+
}
|
| 155 |
+
|
| 156 |
+
// this function checks whether this Argument is backward compatible with
|
| 157 |
+
// the old one. we consider the following cases are backward compatible:
|
| 158 |
+
// 1) two arguments are equal
|
| 159 |
+
// 2) this arg's type should be subtype of old
|
| 160 |
+
// 3) this arg must provide the same default value if old arg has one,
|
| 161 |
+
bool isBackwardCompatibleWith(
|
| 162 |
+
const Argument& old,
|
| 163 |
+
std::ostream* why_not=nullptr) const;
|
| 164 |
+
|
| 165 |
+
// this function checks whether this Argument is forward compatible with
|
| 166 |
+
// the old one. we consider the following cases are forward compatible:
|
| 167 |
+
// 1) two arguments are equal
|
| 168 |
+
// 2) this arg's type should be subtype of old
|
| 169 |
+
// 3) this arg must provide the same default value if old arg has one,
|
| 170 |
+
bool isForwardCompatibleWith(
|
| 171 |
+
const Argument& old,
|
| 172 |
+
std::ostream* why_not = nullptr) const;
|
| 173 |
+
|
| 174 |
+
private:
|
| 175 |
+
std::string name_;
|
| 176 |
+
TypePtr type_;
|
| 177 |
+
TypePtr real_type_; // this is ScalarType, not int, e.g.
|
| 178 |
+
// for list types, an optional statically known length for the list
|
| 179 |
+
// e.g. for int[3]: type = ListType::ofInts(), N = 3
|
| 180 |
+
// If present, this will allow scalars to be broadcast to this length to
|
| 181 |
+
// become a list.
|
| 182 |
+
std::optional<int32_t> N_;
|
| 183 |
+
|
| 184 |
+
std::optional<IValue> default_value_;
|
| 185 |
+
// AliasInfo is huge, so let's only allocate memory for it if
|
| 186 |
+
// necessary (which it isn't during schema parsing on startup, to
|
| 187 |
+
// give a pertinent example).
|
| 188 |
+
std::unique_ptr<AliasInfo> alias_info_;
|
| 189 |
+
// is this only specifiable as a keyword argument?
|
| 190 |
+
bool kwarg_only_;
|
| 191 |
+
// marks if the argument is out variant of the schema
|
| 192 |
+
bool is_out_;
|
| 193 |
+
};
|
| 194 |
+
|
| 195 |
+
inline bool operator==(const Argument& lhs, const Argument& rhs) {
|
| 196 |
+
return lhs.name() == rhs.name()
|
| 197 |
+
&& *lhs.type() == *rhs.type()
|
| 198 |
+
&& lhs.N() == rhs.N()
|
| 199 |
+
&& lhs.default_value() == rhs.default_value()
|
| 200 |
+
&& lhs.kwarg_only() == rhs.kwarg_only()
|
| 201 |
+
&& (lhs.alias_info() == rhs.alias_info()
|
| 202 |
+
|| (lhs.alias_info() != nullptr && rhs.alias_info() != nullptr
|
| 203 |
+
&& *lhs.alias_info() == *rhs.alias_info()));
|
| 204 |
+
}
|
| 205 |
+
|
| 206 |
+
inline bool operator!=(const Argument& lhs, const Argument& rhs) {
|
| 207 |
+
return !(lhs == rhs);
|
| 208 |
+
}
|
| 209 |
+
|
| 210 |
+
enum struct TORCH_API SchemaArgType { input, output };
|
| 211 |
+
|
| 212 |
+
/**
|
| 213 |
+
* struct SchemaArgument
|
| 214 |
+
*
|
| 215 |
+
* Structure used to represent arguments or returns for a schema.
|
| 216 |
+
*/
|
| 217 |
+
struct TORCH_API SchemaArgument {
|
| 218 |
+
SchemaArgType type;
|
| 219 |
+
size_t index;
|
| 220 |
+
SchemaArgument(SchemaArgType tpe, size_t idx) : type(tpe), index(idx) {}
|
| 221 |
+
bool operator==(const SchemaArgument& rhs) const {
|
| 222 |
+
return type == rhs.type && index == rhs.index;
|
| 223 |
+
}
|
| 224 |
+
};
|
| 225 |
+
|
| 226 |
+
bool operator==(const FunctionSchema& lhs, const FunctionSchema& rhs);
|
| 227 |
+
|
| 228 |
+
struct TORCH_API FunctionSchema {
|
| 229 |
+
FunctionSchema(
|
| 230 |
+
std::string name,
|
| 231 |
+
std::string overload_name,
|
| 232 |
+
std::vector<Argument> arguments,
|
| 233 |
+
std::vector<Argument> returns,
|
| 234 |
+
bool is_vararg = false,
|
| 235 |
+
bool is_varret = false)
|
| 236 |
+
: name_({std::move(name), std::move(overload_name)}),
|
| 237 |
+
arguments_(std::move(arguments)),
|
| 238 |
+
returns_(std::move(returns)),
|
| 239 |
+
is_vararg_(is_vararg),
|
| 240 |
+
is_varret_(is_varret) {
|
| 241 |
+
checkSchema();
|
| 242 |
+
}
|
| 243 |
+
|
| 244 |
+
FunctionSchema(
|
| 245 |
+
Symbol name,
|
| 246 |
+
std::string overload_name,
|
| 247 |
+
std::vector<Argument> arguments,
|
| 248 |
+
std::vector<Argument> returns,
|
| 249 |
+
bool is_vararg = false,
|
| 250 |
+
bool is_varret = false)
|
| 251 |
+
: FunctionSchema(
|
| 252 |
+
name.toQualString(),
|
| 253 |
+
std::move(overload_name),
|
| 254 |
+
std::move(arguments),
|
| 255 |
+
std::move(returns),
|
| 256 |
+
is_vararg,
|
| 257 |
+
is_varret) {
|
| 258 |
+
checkSchema();
|
| 259 |
+
}
|
| 260 |
+
|
| 261 |
+
// Checks whether this schema is backward compatible with the old one.
|
| 262 |
+
// The following conditions must be true:
|
| 263 |
+
// [Function structure] The new schema's name, overload-name, varargs, and
|
| 264 |
+
// return arity are the same.
|
| 265 |
+
// [Output Narrowing] The new schema's output type must be the same class
|
| 266 |
+
// or inherit from the old schema's output type.
|
| 267 |
+
// [Argument count] The new schema must have at least as many arguments as
|
| 268 |
+
// the old schema (considering the list of positional and kwargs).
|
| 269 |
+
// [Arg Compatibility] Every argument in the old schema has a corresponding
|
| 270 |
+
// argument in the new schema that:
|
| 271 |
+
// * is at the same position.
|
| 272 |
+
// * has the same name.
|
| 273 |
+
// * is either positional, or kwarg and the old argument was kwarg.
|
| 274 |
+
// * has the same type, or the old argument's type inherits from the
|
| 275 |
+
// new argument's type.
|
| 276 |
+
// [Default Values] Every new argument must have a default value.
|
| 277 |
+
// E.g.
|
| 278 |
+
// OK f_new(a, b, c=1) => f_old(a, b)
|
| 279 |
+
// NOK f_new(a, c=1, *, b) => f_old(a, *, b)
|
| 280 |
+
// OK f_new(a, b, *, c) => f_old(a, *, b, c)
|
| 281 |
+
// NOK f_new(a, *, b, c) -> f_old(a, b, *, c)
|
| 282 |
+
// NOK f_new(a, *, c, b) => f_old(a, *, b, c)
|
| 283 |
+
// OK f_new(a, *, b, c, d=1) => f_old(a, *, b, c)
|
| 284 |
+
bool isBackwardCompatibleWith(
|
| 285 |
+
const FunctionSchema& old,
|
| 286 |
+
std::ostream* why_not = nullptr) const;
|
| 287 |
+
|
| 288 |
+
// Checks whether this schema is forward compatible with the old one.
|
| 289 |
+
// The following conditions must be true:
|
| 290 |
+
// [Function structure] The new schema's name, overload-name, varargs, and
|
| 291 |
+
// return arity are the same.
|
| 292 |
+
// [Output Narrowing] The new schema's output type must be the same class
|
| 293 |
+
// or inherit from the old schema's output type.
|
| 294 |
+
// [Arg Compatibility] Every argument in the old schema has a corresponding
|
| 295 |
+
// argument in the new schema that:
|
| 296 |
+
// * is at the same position.
|
| 297 |
+
// * has the same name.
|
| 298 |
+
// * is either positional, or kwarg and the old argument was kwarg.
|
| 299 |
+
// * has the same type, or the old argument's type inherits from the
|
| 300 |
+
// new argument's type.
|
| 301 |
+
// [Default Values] Every new argument must have a default value.
|
| 302 |
+
// Each default value type should NOT be a container type.
|
| 303 |
+
// [Positioning] All defaults arguments MUST go after either old
|
| 304 |
+
// default arguments or the end of positional arguments
|
| 305 |
+
// and right BEFORE all out arguments
|
| 306 |
+
bool isForwardCompatibleWith(
|
| 307 |
+
const FunctionSchema& old,
|
| 308 |
+
std::ostringstream& why_not) const;
|
| 309 |
+
|
| 310 |
+
private:
|
| 311 |
+
OperatorName name_;
|
| 312 |
+
std::vector<Argument> arguments_;
|
| 313 |
+
std::vector<Argument> returns_;
|
| 314 |
+
// if true then this schema takes an arbitrary number of additional arguments
|
| 315 |
+
// after the argument specified in arguments
|
| 316 |
+
// currently this is used primarily to represent 'primitive' operators whose
|
| 317 |
+
// arguments are not checked by schema
|
| 318 |
+
bool is_vararg_;
|
| 319 |
+
bool is_varret_;
|
| 320 |
+
|
| 321 |
+
// if no alias information is directly specified, what kind of "default"
|
| 322 |
+
// alias information should we infer?
|
| 323 |
+
// NB: due to alias analysis kind merging, this may be nullopt. Eventually
|
| 324 |
+
// this should always be set no matter what
|
| 325 |
+
std::optional<AliasAnalysisKind> alias_kind_;
|
| 326 |
+
|
| 327 |
+
template <typename T>
|
| 328 |
+
void checkArg(const IValue& value, const Argument& argument, std::optional<size_t> pos) const;
|
| 329 |
+
|
| 330 |
+
void checkSchema() const {
|
| 331 |
+
bool seen_default_arg = false;
|
| 332 |
+
for (const auto& arg : arguments()) {
|
| 333 |
+
if (arg.default_value()) {
|
| 334 |
+
seen_default_arg = true;
|
| 335 |
+
} else {
|
| 336 |
+
// we have historically serialized broadcasting lists wo/default values,
|
| 337 |
+
// so to not break BC allow lists here
|
| 338 |
+
if (arg.type()->kind() == ListType::Kind) {
|
| 339 |
+
continue;
|
| 340 |
+
}
|
| 341 |
+
TORCH_INTERNAL_ASSERT(
|
| 342 |
+
!seen_default_arg || arg.kwarg_only(),
|
| 343 |
+
"Non-default positional argument follows default argument. Parameter ",
|
| 344 |
+
arg.name(),
|
| 345 |
+
" in ",
|
| 346 |
+
*this);
|
| 347 |
+
}
|
| 348 |
+
}
|
| 349 |
+
}
|
| 350 |
+
|
| 351 |
+
public:
|
| 352 |
+
|
| 353 |
+
void dump() const;
|
| 354 |
+
|
| 355 |
+
const OperatorName& operator_name() const {
|
| 356 |
+
return name_;
|
| 357 |
+
}
|
| 358 |
+
const std::string& name() const {
|
| 359 |
+
return name_.name;
|
| 360 |
+
}
|
| 361 |
+
const std::string& overload_name() const {
|
| 362 |
+
return name_.overload_name;
|
| 363 |
+
}
|
| 364 |
+
const std::vector<Argument>& arguments() const {
|
| 365 |
+
return arguments_;
|
| 366 |
+
}
|
| 367 |
+
const std::vector<Argument>& returns() const {
|
| 368 |
+
return returns_;
|
| 369 |
+
}
|
| 370 |
+
bool is_vararg() const {
|
| 371 |
+
return is_vararg_;
|
| 372 |
+
}
|
| 373 |
+
bool is_varret() const {
|
| 374 |
+
return is_varret_;
|
| 375 |
+
}
|
| 376 |
+
bool is_aliasing(const c10::SchemaArgument &argument) const {
|
| 377 |
+
TORCH_INTERNAL_ASSERT(
|
| 378 |
+
argument.index < getCorrectList(argument.type).size(),
|
| 379 |
+
"Invalid index for schema.");
|
| 380 |
+
const AliasInfo* aliasInfo = getCorrectList(argument.type)[argument.index].alias_info();
|
| 381 |
+
return aliasInfo;
|
| 382 |
+
}
|
| 383 |
+
bool is_mutable() const {
|
| 384 |
+
return std::any_of(
|
| 385 |
+
arguments_.cbegin(), arguments_.cend(), [](const Argument& arg) {
|
| 386 |
+
const AliasInfo* aliasInfo = arg.alias_info();
|
| 387 |
+
return aliasInfo && aliasInfo->isWrite();
|
| 388 |
+
});
|
| 389 |
+
}
|
| 390 |
+
bool is_mutable(const c10::SchemaArgument &argument) const {
|
| 391 |
+
TORCH_INTERNAL_ASSERT(
|
| 392 |
+
argument.index < getCorrectList(argument.type).size(),
|
| 393 |
+
"Invalid index for schema.");
|
| 394 |
+
const AliasInfo* aliasInfo = getCorrectList(argument.type)[argument.index].alias_info();
|
| 395 |
+
return aliasInfo && aliasInfo->isWrite();
|
| 396 |
+
}
|
| 397 |
+
bool is_mutable(c10::string_view name) const {
|
| 398 |
+
std::optional<int> index = argumentIndexWithName(name);
|
| 399 |
+
TORCH_INTERNAL_ASSERT(
|
| 400 |
+
index != std::nullopt, "Schema has no argument named ", name);
|
| 401 |
+
|
| 402 |
+
return is_mutable({c10::SchemaArgType::input, static_cast<size_t>(*index)});
|
| 403 |
+
}
|
| 404 |
+
|
| 405 |
+
// Returns whether lhs and rhs may alias directly.
|
| 406 |
+
// This does not account for cases where lhs or rhs are a container that
|
| 407 |
+
// may contain elements that alias the other argument.
|
| 408 |
+
// FunctionSchema::may_contain_alias will include that functionality.
|
| 409 |
+
bool may_alias(const SchemaArgument& lhs, const SchemaArgument& rhs) const;
|
| 410 |
+
|
| 411 |
+
// Returns whether lhs and rhs may alias directly or whether lhs/rhs are a container
|
| 412 |
+
// that may contain elements that alias the other argument.
|
| 413 |
+
// bidirectional = false only returns whether lhs may contain an alias of rhs
|
| 414 |
+
// while bidirectional = true returns both directions.
|
| 415 |
+
bool may_contain_alias(const SchemaArgument& lhs, const SchemaArgument& rhs, bool bidirectional = true) const;
|
| 416 |
+
|
| 417 |
+
// Returns whether the two AliasTypeSets contain any similarities
|
| 418 |
+
// ie: whether the two type sets can alias.
|
| 419 |
+
bool canAliasTypeSetsAlias(const std::optional<AliasTypeSet> &lhs, const std::optional<AliasTypeSet> &rhs) const;
|
| 420 |
+
|
| 421 |
+
// Recursively Finds all contained types within the AliasTypeSet.
|
| 422 |
+
std::optional<AliasTypeSet> getAliasTypeSetContainedTypes(const std::optional<AliasTypeSet> &aliasTypeSet) const;
|
| 423 |
+
|
| 424 |
+
// Similar to mapTypeToAliasTypeSet defined in alias_analysis.cpp.
|
| 425 |
+
// Used to map types to a type such that all types that can alias will be mapped to the same type.
|
| 426 |
+
// For example, calling this method on 'Optional[List[int]]' is the same as calling this method
|
| 427 |
+
// on 'List[int]'.
|
| 428 |
+
std::optional<AliasTypeSet> mapTypeToAliasTypeSet(const TypePtr& type) const;
|
| 429 |
+
|
| 430 |
+
// Returns either arguments() or returns() depending on the SchemaArgType
|
| 431 |
+
// output => returns(), input => arguments()
|
| 432 |
+
const std::vector<Argument>& getCorrectList(SchemaArgType type) const;
|
| 433 |
+
|
| 434 |
+
std::optional<int> argumentIndexWithName(c10::string_view name) const {
|
| 435 |
+
for (const auto i : c10::irange(arguments().size())) {
|
| 436 |
+
if(name == arguments()[i].name())
|
| 437 |
+
return i;
|
| 438 |
+
}
|
| 439 |
+
return std::nullopt;
|
| 440 |
+
}
|
| 441 |
+
FunctionSchema cloneWithName(std::string name, std::string overload_name) const {
|
| 442 |
+
return FunctionSchema(
|
| 443 |
+
std::move(name),
|
| 444 |
+
std::move(overload_name),
|
| 445 |
+
arguments(),
|
| 446 |
+
returns(),
|
| 447 |
+
is_vararg(),
|
| 448 |
+
is_varret()
|
| 449 |
+
);
|
| 450 |
+
}
|
| 451 |
+
FunctionSchema cloneWithArguments(std::vector<Argument> new_arguments) const {
|
| 452 |
+
return FunctionSchema(
|
| 453 |
+
name(),
|
| 454 |
+
overload_name(),
|
| 455 |
+
std::move(new_arguments),
|
| 456 |
+
returns(),
|
| 457 |
+
is_vararg(),
|
| 458 |
+
is_varret());
|
| 459 |
+
}
|
| 460 |
+
FunctionSchema cloneWithReturns(std::vector<Argument> new_returns) const {
|
| 461 |
+
return FunctionSchema(
|
| 462 |
+
name(),
|
| 463 |
+
overload_name(),
|
| 464 |
+
arguments(),
|
| 465 |
+
std::move(new_returns),
|
| 466 |
+
is_vararg(),
|
| 467 |
+
is_varret());
|
| 468 |
+
}
|
| 469 |
+
|
| 470 |
+
std::string formatTypeMismatchMsg(
|
| 471 |
+
const Argument& expected,
|
| 472 |
+
const std::string& actual_type,
|
| 473 |
+
std::optional<size_t> position = std::nullopt,
|
| 474 |
+
std::optional<std::string> value = std::nullopt) const;
|
| 475 |
+
|
| 476 |
+
FunctionSchema cloneWithRemappedTypes(
|
| 477 |
+
const std::function<TypePtr(TypePtr)> type_map) const;
|
| 478 |
+
|
| 479 |
+
FunctionSchema cloneWithRealTypes(bool with_symint=true) const;
|
| 480 |
+
|
| 481 |
+
// Check that inputs have the correct types and appends any missing default
|
| 482 |
+
// values.
|
| 483 |
+
template <typename T = c10::PlatformType>
|
| 484 |
+
void checkAndNormalizeInputs(
|
| 485 |
+
std::vector<IValue>& inputs,
|
| 486 |
+
const std::unordered_map<std::string, IValue>& kwargs =
|
| 487 |
+
std::unordered_map<std::string, IValue>{}) const;
|
| 488 |
+
|
| 489 |
+
std::string findErrorInKwargs(const std::vector<std::string>& kwargs) const;
|
| 490 |
+
|
| 491 |
+
bool hasAnyAliasInfo() const {
|
| 492 |
+
for (const auto& arg : arguments_) {
|
| 493 |
+
if (arg.alias_info() != nullptr) {
|
| 494 |
+
return true;
|
| 495 |
+
}
|
| 496 |
+
}
|
| 497 |
+
for (const auto& ret : returns_) {
|
| 498 |
+
if (ret.alias_info() != nullptr) {
|
| 499 |
+
return true;
|
| 500 |
+
}
|
| 501 |
+
}
|
| 502 |
+
return false;
|
| 503 |
+
}
|
| 504 |
+
|
| 505 |
+
|
| 506 |
+
// TODO remove the mutation here
|
| 507 |
+
bool isDefaultAliasAnalysisKind() const {
|
| 508 |
+
return !alias_kind_;
|
| 509 |
+
}
|
| 510 |
+
AliasAnalysisKind aliasAnalysis() const {
|
| 511 |
+
return alias_kind_.value_or(AliasAnalysisKind::CONSERVATIVE);
|
| 512 |
+
}
|
| 513 |
+
void setAliasAnalysis(AliasAnalysisKind v) {
|
| 514 |
+
alias_kind_ = v;
|
| 515 |
+
}
|
| 516 |
+
|
| 517 |
+
std::optional<c10::string_view> getNamespace() const {
|
| 518 |
+
return name_.getNamespace();
|
| 519 |
+
}
|
| 520 |
+
|
| 521 |
+
// Returns true if we successfully set the namespace (as there
|
| 522 |
+
// was none set, and false otherwise)
|
| 523 |
+
bool setNamespaceIfNotSet(const char* ns) {
|
| 524 |
+
return name_.setNamespaceIfNotSet(ns);
|
| 525 |
+
}
|
| 526 |
+
|
| 527 |
+
// can a function with this schema be substituted for a function of rhs's
|
| 528 |
+
// schema and have the program typecheck?
|
| 529 |
+
// as_method - if true, treat this schema as a method and ignore
|
| 530 |
+
// the first argument, which will be the object in both cases
|
| 531 |
+
bool isSubtypeOf(const FunctionSchema& rhs, bool as_method, std::ostream* why_not=nullptr) const;
|
| 532 |
+
};
|
| 533 |
+
|
| 534 |
+
inline bool operator==(const FunctionSchema& lhs, const FunctionSchema& rhs) {
|
| 535 |
+
return lhs.name() == rhs.name()
|
| 536 |
+
&& lhs.overload_name() == rhs.overload_name()
|
| 537 |
+
&& lhs.arguments() == rhs.arguments()
|
| 538 |
+
&& lhs.returns() == rhs.returns()
|
| 539 |
+
&& lhs.is_vararg() == rhs.is_vararg()
|
| 540 |
+
&& lhs.is_varret() == rhs.is_varret();
|
| 541 |
+
}
|
| 542 |
+
|
| 543 |
+
inline bool operator!=(const FunctionSchema& lhs, const FunctionSchema& rhs) {
|
| 544 |
+
return !(lhs == rhs);
|
| 545 |
+
}
|
| 546 |
+
|
| 547 |
+
// print out Argument, which is compatible with FunctionSchema parser
|
| 548 |
+
// full format: Type(alias)? name=default_value
|
| 549 |
+
inline std::ostream& operator<<(std::ostream& out, const Argument& arg) {
|
| 550 |
+
|
| 551 |
+
// for adjusting the ? position.
|
| 552 |
+
// in schema, we have Tensor?(a!) input, and t(a!)?.
|
| 553 |
+
// however, t?(a!) doesn't work with schema parser.
|
| 554 |
+
// so we always use Type(alias)? format
|
| 555 |
+
// real_type versus fake_type: in order to be compatible with FunctionSchema
|
| 556 |
+
// parser, printing an argument with either MemoryFormat or Layout type should
|
| 557 |
+
// give us the original schema string, hence printing out real_type.
|
| 558 |
+
auto type = arg.real_type();
|
| 559 |
+
bool is_opt = type->kind() == OptionalType::Kind;
|
| 560 |
+
auto unopt_type = is_opt ? type->castRaw<OptionalType>()->getElementType() : type;
|
| 561 |
+
|
| 562 |
+
if (unopt_type->kind() == ListType::Kind) {
|
| 563 |
+
// sized lists get size N from arg, not type
|
| 564 |
+
auto list = unopt_type->cast<c10::ListType>();
|
| 565 |
+
out << list->getElementType()->str();
|
| 566 |
+
if (arg.alias_info() && !arg.alias_info()->containedTypes().empty()){
|
| 567 |
+
out << arg.alias_info()->containedTypes()[0];
|
| 568 |
+
}
|
| 569 |
+
std::string N = "";
|
| 570 |
+
if (arg.N()) {
|
| 571 |
+
N = std::to_string(*arg.N());
|
| 572 |
+
}
|
| 573 |
+
out << "[" << N << "]";
|
| 574 |
+
} else {
|
| 575 |
+
out << unopt_type->str();
|
| 576 |
+
}
|
| 577 |
+
|
| 578 |
+
// print alias info if it has beforeSets.
|
| 579 |
+
if (arg.alias_info() && !arg.alias_info()->beforeSets().empty()) {
|
| 580 |
+
out << *arg.alias_info();
|
| 581 |
+
}
|
| 582 |
+
|
| 583 |
+
if (is_opt) {
|
| 584 |
+
out << "?";
|
| 585 |
+
}
|
| 586 |
+
|
| 587 |
+
if (!arg.name().empty()) {
|
| 588 |
+
out << " " << arg.name();
|
| 589 |
+
}
|
| 590 |
+
|
| 591 |
+
if (arg.default_value()) {
|
| 592 |
+
out << "=";
|
| 593 |
+
if ((type->kind() == c10::TypeKind::StringType ||
|
| 594 |
+
unopt_type->kind() == c10::TypeKind::StringType) &&
|
| 595 |
+
arg.default_value().value().isString()) {
|
| 596 |
+
printQuotedString(out, arg.default_value().value().toStringRef());
|
| 597 |
+
} else if (type->kind() == TypeKind::ListType && type->castRaw<ListType>()->getElementType()->kind() == c10::TypeKind::IntType) {
|
| 598 |
+
// We want to faithfully replicate JIT schema.
|
| 599 |
+
// in native_functions.yaml defaults for int arrays with a single value always look like
|
| 600 |
+
// int[2] stride=1
|
| 601 |
+
// instead of
|
| 602 |
+
// int[2] stride=[1, 1]
|
| 603 |
+
auto default_val = arg.default_value().value().toIntList();
|
| 604 |
+
if (default_val.size() > 1) {
|
| 605 |
+
auto all_defaults_the_same = true;
|
| 606 |
+
for (const auto i : c10::irange(1, default_val.size())) {
|
| 607 |
+
if (default_val[0] != default_val[i]) all_defaults_the_same = false;
|
| 608 |
+
}
|
| 609 |
+
if (all_defaults_the_same) {
|
| 610 |
+
out << default_val[0];
|
| 611 |
+
} else {
|
| 612 |
+
out << arg.default_value().value();
|
| 613 |
+
}
|
| 614 |
+
} else {
|
| 615 |
+
out << arg.default_value().value();
|
| 616 |
+
}
|
| 617 |
+
} else {
|
| 618 |
+
out << arg.default_value().value();
|
| 619 |
+
}
|
| 620 |
+
}
|
| 621 |
+
|
| 622 |
+
return out;
|
| 623 |
+
}
|
| 624 |
+
|
| 625 |
+
TORCH_API std::ostream& operator<<(std::ostream& out, const FunctionSchema& schema);
|
| 626 |
+
|
| 627 |
+
inline std::string toString(const FunctionSchema& schema) {
|
| 628 |
+
std::ostringstream str;
|
| 629 |
+
str << schema;
|
| 630 |
+
return str.str();
|
| 631 |
+
}
|
| 632 |
+
|
| 633 |
+
} // namespace c10
|
| 634 |
+
|
| 635 |
+
namespace std {
|
| 636 |
+
template<>
|
| 637 |
+
struct hash<c10::SchemaArgument> {
|
| 638 |
+
size_t operator()(const c10::SchemaArgument& arg) const
|
| 639 |
+
{
|
| 640 |
+
return c10::hash_combine(std::hash<size_t>()(arg.index), std::hash<size_t>()(static_cast<std::size_t>(arg.type)));
|
| 641 |
+
}
|
| 642 |
+
};
|
| 643 |
+
template<>
|
| 644 |
+
struct hash<c10::Argument> {
|
| 645 |
+
size_t operator()(const c10::Argument& arg) const
|
| 646 |
+
{
|
| 647 |
+
auto hash = std::hash<std::string>{}(arg.name());
|
| 648 |
+
auto type_hash = std::hash<c10::TypePtr>{}(arg.type());
|
| 649 |
+
auto kwarg_only_hash = std::hash<bool>{}(arg.kwarg_only());
|
| 650 |
+
hash = c10::hash_combine(hash, type_hash);
|
| 651 |
+
hash = c10::hash_combine(hash, kwarg_only_hash);
|
| 652 |
+
// hashing optional fields if they exist
|
| 653 |
+
if (arg.default_value()) {
|
| 654 |
+
auto default_value_hash = c10::hash<c10::IValue>{}(arg.default_value().value());
|
| 655 |
+
hash = c10::hash_combine(hash, default_value_hash);
|
| 656 |
+
}
|
| 657 |
+
if (arg.N()) {
|
| 658 |
+
auto N_hash = std::hash<int64_t>{}(*arg.N());
|
| 659 |
+
hash = c10::hash_combine(hash, N_hash);
|
| 660 |
+
}
|
| 661 |
+
if (arg.alias_info()) {
|
| 662 |
+
auto alias_info_hash = std::hash<c10::AliasInfo>{}(*arg.alias_info());
|
| 663 |
+
hash = c10::hash_combine(hash, alias_info_hash);
|
| 664 |
+
}
|
| 665 |
+
return hash;
|
| 666 |
+
}
|
| 667 |
+
};
|
| 668 |
+
template<>
|
| 669 |
+
struct hash<c10::FunctionSchema> {
|
| 670 |
+
size_t operator()(const c10::FunctionSchema& schema) const
|
| 671 |
+
{
|
| 672 |
+
auto hash = std::hash<c10::OperatorName>{}(schema.operator_name());
|
| 673 |
+
auto args_hash = c10::hash<std::vector<c10::Argument>>{}(schema.arguments());
|
| 674 |
+
auto returns_hash = c10::hash<std::vector<c10::Argument>>{}(schema.returns());
|
| 675 |
+
auto is_vararg_hash = std::hash<bool>{}(schema.is_vararg());
|
| 676 |
+
auto is_varret_hash = std::hash<bool>{}(schema.is_varret());
|
| 677 |
+
hash = c10::hash_combine(hash, args_hash);
|
| 678 |
+
hash = c10::hash_combine(hash, returns_hash);
|
| 679 |
+
hash = c10::hash_combine(hash, is_vararg_hash);
|
| 680 |
+
hash = c10::hash_combine(hash, is_varret_hash);
|
| 681 |
+
return hash;
|
| 682 |
+
}
|
| 683 |
+
};
|
| 684 |
+
} // namespace std
|
| 685 |
+
|
| 686 |
+
|
| 687 |
+
#include <ATen/core/function_schema_inl.h> // IWYU pragma: keep
|
videochat2/lib/python3.10/site-packages/torch/include/ATen/core/function_schema_inl.h
ADDED
|
@@ -0,0 +1,75 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
#include <ostream>
|
| 3 |
+
#include <sstream>
|
| 4 |
+
|
| 5 |
+
namespace c10 {
|
| 6 |
+
|
| 7 |
+
template<typename T>
|
| 8 |
+
inline void FunctionSchema::checkArg(
|
| 9 |
+
const IValue& value,
|
| 10 |
+
const Argument& argument,
|
| 11 |
+
std::optional<size_t> pos) const {
|
| 12 |
+
if (value.isTensor() && argument.type() == TensorType::get()) {
|
| 13 |
+
// Fast-path for the common case
|
| 14 |
+
return;
|
| 15 |
+
}
|
| 16 |
+
if (!value.type<T>()->isSubtypeOf(*argument.type())) {
|
| 17 |
+
TORCH_CHECK(
|
| 18 |
+
false,
|
| 19 |
+
formatTypeMismatchMsg(
|
| 20 |
+
argument, value.type<T>()->repr_str(), pos));
|
| 21 |
+
}
|
| 22 |
+
}
|
| 23 |
+
|
| 24 |
+
template <typename T>
|
| 25 |
+
inline void FunctionSchema::checkAndNormalizeInputs(
|
| 26 |
+
std::vector<IValue>& inputs,
|
| 27 |
+
const std::unordered_map<std::string, IValue>& kwargs) const {
|
| 28 |
+
// Do we have more inputs than the schema accepts?
|
| 29 |
+
TORCH_CHECK(
|
| 30 |
+
inputs.size() <= arguments().size(),
|
| 31 |
+
"Expected at most ",
|
| 32 |
+
arguments().size(),
|
| 33 |
+
" argument(s) for operator '",
|
| 34 |
+
name(),
|
| 35 |
+
"', but received ",
|
| 36 |
+
inputs.size(),
|
| 37 |
+
" argument(s). Declaration: ",
|
| 38 |
+
*this);
|
| 39 |
+
|
| 40 |
+
size_t consumed_kwargs = 0;
|
| 41 |
+
for (const auto pos : c10::irange(arguments().size())) {
|
| 42 |
+
const auto& argument = arguments()[pos];
|
| 43 |
+
if (pos < inputs.size()) {
|
| 44 |
+
checkArg<T>(inputs[pos], argument, pos);
|
| 45 |
+
continue;
|
| 46 |
+
}
|
| 47 |
+
auto it = kwargs.find(argument.name());
|
| 48 |
+
if (it != kwargs.end()) {
|
| 49 |
+
checkArg<T>(it->second, argument, std::nullopt);
|
| 50 |
+
inputs.push_back(it->second);
|
| 51 |
+
consumed_kwargs++;
|
| 52 |
+
continue;
|
| 53 |
+
}
|
| 54 |
+
if (argument.default_value()) {
|
| 55 |
+
inputs.push_back(*argument.default_value());
|
| 56 |
+
continue;
|
| 57 |
+
}
|
| 58 |
+
AT_ERROR(
|
| 59 |
+
name(),
|
| 60 |
+
"() is missing value for argument '",
|
| 61 |
+
argument.name(),
|
| 62 |
+
"'. Declaration: ",
|
| 63 |
+
*this);
|
| 64 |
+
}
|
| 65 |
+
if (consumed_kwargs != kwargs.size()) {
|
| 66 |
+
std::vector<std::string> names;
|
| 67 |
+
names.reserve(kwargs.size());
|
| 68 |
+
for(const auto& k : kwargs) {
|
| 69 |
+
names.emplace_back(k.first);
|
| 70 |
+
}
|
| 71 |
+
throw std::runtime_error(findErrorInKwargs(names));
|
| 72 |
+
}
|
| 73 |
+
}
|
| 74 |
+
|
| 75 |
+
} // namespace c10
|
videochat2/lib/python3.10/site-packages/torch/include/ATen/core/functional.h
ADDED
|
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <vector>
|
| 4 |
+
#include <c10/util/ArrayRef.h>
|
| 5 |
+
|
| 6 |
+
namespace c10 {
|
| 7 |
+
|
| 8 |
+
// The passed in function must take T by value (T), or by
|
| 9 |
+
// const reference (const T&); taking T by non-const reference
|
| 10 |
+
// will result in an error like:
|
| 11 |
+
//
|
| 12 |
+
// error: no type named 'type' in 'class std::invoke_result<foobar::__lambda, T>'
|
| 13 |
+
//
|
| 14 |
+
// No explicit template parameters are required.
|
| 15 |
+
|
| 16 |
+
// Overload for explicit function and ArrayRef
|
| 17 |
+
template<class F, class T>
|
| 18 |
+
inline auto fmap(const T& inputs, const F& fn) -> std::vector<decltype(fn(*inputs.begin()))> {
|
| 19 |
+
std::vector<decltype(fn(*inputs.begin()))> r;
|
| 20 |
+
r.reserve(inputs.size());
|
| 21 |
+
for(const auto & input : inputs)
|
| 22 |
+
r.push_back(fn(input));
|
| 23 |
+
return r;
|
| 24 |
+
}
|
| 25 |
+
|
| 26 |
+
// C++ forbids taking an address of a constructor, so here's a workaround...
|
| 27 |
+
// Overload for constructor (R) application
|
| 28 |
+
template<typename R, typename T>
|
| 29 |
+
inline std::vector<R> fmap(const T& inputs) {
|
| 30 |
+
std::vector<R> r;
|
| 31 |
+
r.reserve(inputs.size());
|
| 32 |
+
for(auto & input : inputs)
|
| 33 |
+
r.push_back(R(input));
|
| 34 |
+
return r;
|
| 35 |
+
}
|
| 36 |
+
|
| 37 |
+
template<typename F, typename T>
|
| 38 |
+
inline std::vector<T> filter(at::ArrayRef<T> inputs, const F& fn) {
|
| 39 |
+
std::vector<T> r;
|
| 40 |
+
r.reserve(inputs.size());
|
| 41 |
+
for(auto & input : inputs) {
|
| 42 |
+
if (fn(input)) {
|
| 43 |
+
r.push_back(input);
|
| 44 |
+
}
|
| 45 |
+
}
|
| 46 |
+
return r;
|
| 47 |
+
}
|
| 48 |
+
|
| 49 |
+
template<typename F, typename T>
|
| 50 |
+
inline std::vector<T> filter(const std::vector<T>& inputs, const F& fn) {
|
| 51 |
+
return filter<F, T>(static_cast<at::ArrayRef<T>>(inputs), fn);
|
| 52 |
+
}
|
| 53 |
+
|
| 54 |
+
} // namespace c10
|
videochat2/lib/python3.10/site-packages/torch/include/ATen/core/grad_mode.h
ADDED
|
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <c10/macros/Macros.h>
|
| 4 |
+
#include <c10/core/GradMode.h>
|
| 5 |
+
|
| 6 |
+
namespace at {
|
| 7 |
+
using GradMode = c10::GradMode;
|
| 8 |
+
using AutoGradMode = c10::AutoGradMode;
|
| 9 |
+
using NoGradGuard = c10::NoGradGuard;
|
| 10 |
+
}
|
videochat2/lib/python3.10/site-packages/torch/include/ATen/core/interned_strings_class.h
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#include <mutex>
|
| 2 |
+
#include <string>
|
| 3 |
+
#include <unordered_map>
|
| 4 |
+
#include <vector>
|
| 5 |
+
#include <ATen/core/symbol.h>
|
| 6 |
+
#include <c10/util/Exception.h>
|
| 7 |
+
|
| 8 |
+
namespace c10 {
|
| 9 |
+
|
| 10 |
+
struct TORCH_API InternedStrings {
|
| 11 |
+
InternedStrings();
|
| 12 |
+
Symbol symbol(const std::string& s);
|
| 13 |
+
std::pair<const char*, const char*> string(Symbol sym);
|
| 14 |
+
Symbol ns(Symbol sym);
|
| 15 |
+
|
| 16 |
+
private:
|
| 17 |
+
// prereq - holding mutex_
|
| 18 |
+
Symbol _symbol(const std::string& s);
|
| 19 |
+
std::pair<const char*, const char*> customString(Symbol sym);
|
| 20 |
+
std::unordered_map<std::string, Symbol> string_to_sym_;
|
| 21 |
+
|
| 22 |
+
struct SymbolInfo {
|
| 23 |
+
Symbol ns;
|
| 24 |
+
std::string qual_name;
|
| 25 |
+
std::string unqual_name;
|
| 26 |
+
};
|
| 27 |
+
std::vector<SymbolInfo> sym_to_info_;
|
| 28 |
+
|
| 29 |
+
std::mutex mutex_;
|
| 30 |
+
};
|
| 31 |
+
|
| 32 |
+
} // namespace c10
|