ZTWHHH commited on
Commit
c9a0738
·
verified ·
1 Parent(s): bc30b98

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. videochat2/lib/python3.10/site-packages/torch/include/ATen/SequenceNumber.h +13 -0
  2. videochat2/lib/python3.10/site-packages/torch/include/c10/core/CopyBytes.h +48 -0
  3. videochat2/lib/python3.10/site-packages/torch/include/c10/core/DynamicCast.h +125 -0
  4. videochat2/lib/python3.10/site-packages/torch/include/c10/core/OptionalRef.h +31 -0
  5. videochat2/lib/python3.10/site-packages/torch/include/c10/core/ScalarType.h +573 -0
  6. videochat2/lib/python3.10/site-packages/torch/include/c10/core/SymbolicShapeMeta.h +216 -0
  7. videochat2/lib/python3.10/site-packages/torch/include/c10/core/impl/HermeticPyObjectTLS.h +59 -0
  8. videochat2/lib/python3.10/site-packages/torch/include/c10/core/impl/InlineStreamGuard.h +256 -0
  9. videochat2/lib/python3.10/site-packages/torch/include/c10/core/impl/LocalDispatchKeySet.h +164 -0
  10. videochat2/lib/python3.10/site-packages/torch/include/c10/core/impl/PyObjectSlot.h +190 -0
  11. videochat2/lib/python3.10/site-packages/torch/include/c10/core/impl/PythonDispatcherTLS.h +24 -0
  12. videochat2/lib/python3.10/site-packages/torch/include/c10/cuda/CUDAAlgorithm.h +31 -0
  13. videochat2/lib/python3.10/site-packages/torch/include/c10/cuda/CUDACachingAllocator.h +499 -0
  14. videochat2/lib/python3.10/site-packages/torch/include/c10/cuda/CUDADeviceAssertion.h +96 -0
  15. videochat2/lib/python3.10/site-packages/torch/include/c10/cuda/CUDADeviceAssertionHost.h +164 -0
  16. videochat2/lib/python3.10/site-packages/torch/include/c10/cuda/CUDAException.h +100 -0
  17. videochat2/lib/python3.10/site-packages/torch/include/c10/cuda/CUDAMacros.h +51 -0
  18. videochat2/lib/python3.10/site-packages/torch/include/c10/cuda/CUDAMiscFunctions.h +12 -0
  19. videochat2/lib/python3.10/site-packages/torch/include/c10/cuda/impl/CUDAGuardImpl.h +249 -0
  20. videochat2/lib/python3.10/site-packages/torch/include/c10/cuda/impl/CUDATest.h +9 -0
  21. videochat2/lib/python3.10/site-packages/torch/include/c10/util/AlignOf.h +176 -0
  22. videochat2/lib/python3.10/site-packages/torch/include/c10/util/Array.h +18 -0
  23. videochat2/lib/python3.10/site-packages/torch/include/c10/util/ArrayRef.h +380 -0
  24. videochat2/lib/python3.10/site-packages/torch/include/c10/util/BFloat16-inl.h +361 -0
  25. videochat2/lib/python3.10/site-packages/torch/include/c10/util/BFloat16-math.h +292 -0
  26. videochat2/lib/python3.10/site-packages/torch/include/c10/util/Backtrace.h +31 -0
  27. videochat2/lib/python3.10/site-packages/torch/include/c10/util/C++17.h +142 -0
  28. videochat2/lib/python3.10/site-packages/torch/include/c10/util/CallOnce.h +67 -0
  29. videochat2/lib/python3.10/site-packages/torch/include/c10/util/DeadlockDetection.h +48 -0
  30. videochat2/lib/python3.10/site-packages/torch/include/c10/util/Deprecated.h +102 -0
  31. videochat2/lib/python3.10/site-packages/torch/include/c10/util/DimVector.h +17 -0
  32. videochat2/lib/python3.10/site-packages/torch/include/c10/util/DynamicCounter.h +49 -0
  33. videochat2/lib/python3.10/site-packages/torch/include/c10/util/FbcodeMaps.h +29 -0
  34. videochat2/lib/python3.10/site-packages/torch/include/c10/util/Flags.h +226 -0
  35. videochat2/lib/python3.10/site-packages/torch/include/c10/util/Float8_e4m3fn.h +240 -0
  36. videochat2/lib/python3.10/site-packages/torch/include/c10/util/Float8_e4m3fnuz.h +139 -0
  37. videochat2/lib/python3.10/site-packages/torch/include/c10/util/Float8_e5m2-inl.h +286 -0
  38. videochat2/lib/python3.10/site-packages/torch/include/c10/util/Float8_e5m2fnuz-inl.h +285 -0
  39. videochat2/lib/python3.10/site-packages/torch/include/c10/util/Float8_fnuz_cvt.h +64 -0
  40. videochat2/lib/python3.10/site-packages/torch/include/c10/util/FunctionRef.h +73 -0
  41. videochat2/lib/python3.10/site-packages/torch/include/c10/util/Half-inl.h +350 -0
  42. videochat2/lib/python3.10/site-packages/torch/include/c10/util/MathConstants.h +142 -0
  43. videochat2/lib/python3.10/site-packages/torch/include/c10/util/MaybeOwned.h +237 -0
  44. videochat2/lib/python3.10/site-packages/torch/include/c10/util/Metaprogramming.h +224 -0
  45. videochat2/lib/python3.10/site-packages/torch/include/c10/util/Optional.h +48 -0
  46. videochat2/lib/python3.10/site-packages/torch/include/c10/util/OptionalArrayRef.h +236 -0
  47. videochat2/lib/python3.10/site-packages/torch/include/c10/util/SmallVector.h +1467 -0
  48. videochat2/lib/python3.10/site-packages/torch/include/c10/util/ThreadLocal.h +153 -0
  49. videochat2/lib/python3.10/site-packages/torch/include/c10/util/TypeCast.h +195 -0
  50. videochat2/lib/python3.10/site-packages/torch/include/c10/util/TypeIndex.h +196 -0
videochat2/lib/python3.10/site-packages/torch/include/ATen/SequenceNumber.h ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/macros/Export.h>
4
+ #include <cstdint>
5
+
6
+ // A simple thread local enumeration, used to link forward and backward pass
7
+ // ops and is used by autograd and observers framework
8
+ namespace at::sequence_number {
9
+
10
+ TORCH_API uint64_t peek();
11
+ TORCH_API uint64_t get_and_increment();
12
+
13
+ } // namespace at::sequence_number
videochat2/lib/python3.10/site-packages/torch/include/c10/core/CopyBytes.h ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/Device.h>
4
+ #include <c10/core/DeviceType.h>
5
+ #include <c10/macros/Export.h>
6
+ #include <c10/macros/Macros.h>
7
+ #include <cstddef>
8
+
9
+ namespace c10 {
10
+
11
+ using CopyBytesFunction = void (*)(
12
+ size_t nbytes,
13
+ const void* src,
14
+ Device src_device,
15
+ void* dst,
16
+ Device dst_device);
17
+
18
+ struct C10_API _CopyBytesFunctionRegisterer {
19
+ _CopyBytesFunctionRegisterer(
20
+ DeviceType from,
21
+ DeviceType to,
22
+ CopyBytesFunction func_sync,
23
+ CopyBytesFunction func_async = nullptr);
24
+ };
25
+
26
+ #define REGISTER_COPY_BYTES_FUNCTION(from, to, ...) \
27
+ namespace { \
28
+ static _CopyBytesFunctionRegisterer C10_ANONYMOUS_VARIABLE( \
29
+ g_copy_function)(from, to, __VA_ARGS__); \
30
+ }
31
+
32
+ /*
33
+ * WARNING: Implementations for this function are currently registered from
34
+ * ATen and caffe2, not yet from c10. Don't use this if not either ATen
35
+ * or caffe2 is present as well.
36
+ * We can't move them yet, because the CUDA implementations aren't unified yet
37
+ * between ATen and caffe2.
38
+ * We're planning to move the implementations into c10/backend/xxx
39
+ * to make c10 self contained again.
40
+ */
41
+ C10_API void CopyBytes(
42
+ size_t nbytes,
43
+ const void* src,
44
+ Device src_device,
45
+ void* dst,
46
+ Device dst_device,
47
+ bool async);
48
+ } // namespace c10
videochat2/lib/python3.10/site-packages/torch/include/c10/core/DynamicCast.h ADDED
@@ -0,0 +1,125 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/ScalarType.h>
4
+ #include <c10/macros/Macros.h>
5
+ #include <c10/util/Load.h>
6
+ #include <c10/util/TypeCast.h>
7
+
8
+ namespace c10 {
9
+
10
+ // Dynamic type casting utils:
11
+ // - fetch_and_cast
12
+ // - cast_and_store
13
+ //
14
+ // fetch_and_cast fetch a value with dynamic type specified by a ScalarType
15
+ // from a void pointer and cast it to a static type.
16
+ //
17
+ // cast_and_store casts a static typed value into dynamic type specified
18
+ // by a ScalarType, and store it into a void pointer.
19
+ //
20
+ // NOTE:
21
+ //
22
+ // Dynamic casting allows us to support type promotion without blowing up
23
+ // the combination space: For example, without dynamic cast, in order to
24
+ // implement `add_` with type promotion, we would need something like
25
+ //
26
+ // AT_DISPATCH_ALL_TYPES(output.dtype(),
27
+ // AT_DISPATCH_ALL_TYPES(input1.dtype(),
28
+ // AT_DISPATCH_ALL_TYPES(input2.dtype(),
29
+ // [](arg0_t a, arg1_t b) -> out_t { return a + b; }
30
+ // )
31
+ // )
32
+ // )
33
+ //
34
+ // If we support N dtypes, the above code would generate the a+b kernel for
35
+ // all the N * N * N different supported types, the compilation time and
36
+ // binary size would become horrible.
37
+ //
38
+ // Dynamic casting might sounds like a bad idea in terms of performance.
39
+ // Especially if you ever do it in a loop, you are going to do a billion tests.
40
+ // But in practice it is not as bad as it might look:
41
+ //
42
+ // - on CPU, this is a branch that always has the same outcome, therefore
43
+ // hopefully the branch predictor could do the job pretty well
44
+ // - on GPU, these branches will not diverge, so we could still have the same
45
+ // warp executing the same line of code
46
+ // - Most kernels, like `add`, are bandwidth bound, adding a few clock cycles to
47
+ // check an integer does not hurt the performance much because the ALUs would
48
+ // wait for load instructions anyway.
49
+ //
50
+ // For the discussion and benchmark, refer to:
51
+ // - https://github.com/pytorch/pytorch/pull/28343
52
+ // - https://github.com/pytorch/pytorch/pull/28344
53
+ // - https://github.com/pytorch/pytorch/pull/28345
54
+ //
55
+
56
+ #ifdef C10_HOST_DEVICE
57
+ #define ERROR_UNSUPPORTED_CAST CUDA_KERNEL_ASSERT(false);
58
+ #else
59
+ #define ERROR_UNSUPPORTED_CAST TORCH_CHECK(false, "Unexpected scalar type");
60
+ #endif
61
+
62
+ // Fetch a value with dynamic type src_type from ptr, and cast it to static type
63
+ // dest_t.
64
+ #define FETCH_AND_CAST_CASE(type, scalartype) \
65
+ case ScalarType::scalartype: \
66
+ return c10::convert<dest_t>(c10::load<type>(ptr));
67
+
68
+ template <typename dest_t>
69
+ C10_HOST_DEVICE inline dest_t fetch_and_cast(
70
+ const ScalarType src_type,
71
+ const void* ptr) {
72
+ switch (src_type) {
73
+ AT_FORALL_SCALAR_TYPES_WITH_COMPLEX(FETCH_AND_CAST_CASE)
74
+ FETCH_AND_CAST_CASE(uint16_t, UInt16)
75
+ FETCH_AND_CAST_CASE(uint32_t, UInt32)
76
+ FETCH_AND_CAST_CASE(uint64_t, UInt64)
77
+ default:
78
+ ERROR_UNSUPPORTED_CAST
79
+ }
80
+ return dest_t(0); // just to avoid compiler warning
81
+ }
82
+
83
+ // Cast a value with static type src_t into dynamic dest_type, and store it to
84
+ // ptr.
85
+ #define CAST_AND_STORE_CASE(type, scalartype) \
86
+ case ScalarType::scalartype: \
87
+ *(type*)ptr = c10::convert<type>(value); \
88
+ return;
89
+ template <typename src_t>
90
+ C10_HOST_DEVICE inline void cast_and_store(
91
+ const ScalarType dest_type,
92
+ void* ptr,
93
+ src_t value) {
94
+ switch (dest_type) {
95
+ AT_FORALL_SCALAR_TYPES_WITH_COMPLEX(CAST_AND_STORE_CASE)
96
+ CAST_AND_STORE_CASE(uint16_t, UInt16)
97
+ CAST_AND_STORE_CASE(uint32_t, UInt32)
98
+ CAST_AND_STORE_CASE(uint64_t, UInt64)
99
+ default:;
100
+ }
101
+ ERROR_UNSUPPORTED_CAST
102
+ }
103
+
104
+ #define DEFINE_UNCASTABLE(T, scalartype_) \
105
+ template <> \
106
+ C10_HOST_DEVICE inline T fetch_and_cast<T>( \
107
+ const ScalarType src_type, const void* ptr) { \
108
+ CUDA_KERNEL_ASSERT(ScalarType::scalartype_ == src_type); \
109
+ return c10::load<T>(ptr); \
110
+ } \
111
+ template <> \
112
+ C10_HOST_DEVICE inline void cast_and_store<T>( \
113
+ const ScalarType dest_type, void* ptr, T value) { \
114
+ CUDA_KERNEL_ASSERT(ScalarType::scalartype_ == dest_type); \
115
+ *(T*)ptr = value; \
116
+ }
117
+
118
+ AT_FORALL_QINT_TYPES(DEFINE_UNCASTABLE)
119
+
120
+ #undef FETCH_AND_CAST_CASE
121
+ #undef CAST_AND_STORE_CASE
122
+ #undef DEFINE_UNCASTABLE
123
+ #undef ERROR_UNSUPPORTED_CAST
124
+
125
+ } // namespace c10
videochat2/lib/python3.10/site-packages/torch/include/c10/core/OptionalRef.h ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ namespace c10 {
4
+
5
+ template <typename T>
6
+ class OptionalRef {
7
+ public:
8
+ OptionalRef() : data_(nullptr) {}
9
+ OptionalRef(const T* data) : data_(data) {
10
+ TORCH_INTERNAL_ASSERT_DEBUG_ONLY(data_);
11
+ }
12
+ OptionalRef(const T& data) : data_(&data) {}
13
+
14
+ bool has_value() const {
15
+ return data_ != nullptr;
16
+ }
17
+
18
+ const T& get() const {
19
+ TORCH_INTERNAL_ASSERT_DEBUG_ONLY(data_);
20
+ return *data_;
21
+ }
22
+
23
+ operator bool() const {
24
+ return has_value();
25
+ }
26
+
27
+ private:
28
+ const T* data_;
29
+ };
30
+
31
+ } // namespace c10
videochat2/lib/python3.10/site-packages/torch/include/c10/core/ScalarType.h ADDED
@@ -0,0 +1,573 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/util/BFloat16.h>
4
+ #include <c10/util/Deprecated.h>
5
+ #include <c10/util/Exception.h>
6
+ #include <c10/util/Float8_e4m3fn.h>
7
+ #include <c10/util/Float8_e4m3fnuz.h>
8
+ #include <c10/util/Float8_e5m2.h>
9
+ #include <c10/util/Float8_e5m2fnuz.h>
10
+ #include <c10/util/Half.h>
11
+ #include <c10/util/bits.h>
12
+ #include <c10/util/complex.h>
13
+ #include <c10/util/qint32.h>
14
+ #include <c10/util/qint8.h>
15
+ #include <c10/util/quint2x4.h>
16
+ #include <c10/util/quint4x2.h>
17
+ #include <c10/util/quint8.h>
18
+
19
+ #include <array>
20
+ #include <cstddef>
21
+ #include <cstdint>
22
+ #include <limits>
23
+ #include <ostream>
24
+ #include <type_traits>
25
+ #include <unordered_map>
26
+
27
+ namespace c10 {
28
+
29
+ // dummy struct for uint1 to uint7, actual functionality
30
+ // of these dtypes will be implemented in python with Tensor subclass
31
+ template <unsigned int N>
32
+ struct dummy_uint1_7_t {};
33
+
34
+ // For the macros below:
35
+ //
36
+ // For users: If you want to macro some code for all non-QInt scalar types
37
+ // (i.e. types with complete information, you probably want one of the
38
+ // AT_FORALL_SCALAR_TYPES / AT_FORALL_SCALAR_TYPES_AND macros below, which are
39
+ // designed to behave similarly to the Dispatch macros with the same name.
40
+ //
41
+ // For adding a new dtype: In the beginning, we had an idea that there was a
42
+ // list of all scalar types, and you could use AT_FORALL_SCALAR_TYPES to
43
+ // iterate over them. But over the years we added weird types which couldn't
44
+ // be handled uniformly everywhere and so in the end we ended up with some
45
+ // mish-mosh of some helper macros, but mostly use sites making a call about
46
+ // what dtypes they can or can't support. So if you want to add a new dtype,
47
+ // the preferred resolution is to find a dtype similar to what you want,
48
+ // grep for it and edit all the sites you find this way. If you need to add
49
+ // a completely new kind of dtype, you're going to have to laboriously audit
50
+ // all of the sites everywhere to figure out how it should work. Consulting
51
+ // some old PRs where we added new dtypes (check history of this file) can
52
+ // help give you an idea where to start.
53
+
54
+ // NB: Order matters for this macro; it is relied upon in
55
+ // _promoteTypesLookup and the serialization format.
56
+ #define AT_FORALL_SCALAR_TYPES_WITH_COMPLEX_AND_QINTS(_) \
57
+ _(uint8_t, Byte) /* 0 */ \
58
+ _(int8_t, Char) /* 1 */ \
59
+ _(int16_t, Short) /* 2 */ \
60
+ _(int, Int) /* 3 */ \
61
+ _(int64_t, Long) /* 4 */ \
62
+ _(at::Half, Half) /* 5 */ \
63
+ _(float, Float) /* 6 */ \
64
+ _(double, Double) /* 7 */ \
65
+ _(c10::complex<c10::Half>, ComplexHalf) /* 8 */ \
66
+ _(c10::complex<float>, ComplexFloat) /* 9 */ \
67
+ _(c10::complex<double>, ComplexDouble) /* 10 */ \
68
+ _(bool, Bool) /* 11 */ \
69
+ _(c10::qint8, QInt8) /* 12 */ \
70
+ _(c10::quint8, QUInt8) /* 13 */ \
71
+ _(c10::qint32, QInt32) /* 14 */ \
72
+ _(at::BFloat16, BFloat16) /* 15 */ \
73
+ _(c10::quint4x2, QUInt4x2) /* 16 */ \
74
+ _(c10::quint2x4, QUInt2x4) /* 17 */ \
75
+ _(c10::bits1x8, Bits1x8) /* 18 */ \
76
+ _(c10::bits2x4, Bits2x4) /* 19 */ \
77
+ _(c10::bits4x2, Bits4x2) /* 20 */ \
78
+ _(c10::bits8, Bits8) /* 21 */ \
79
+ _(c10::bits16, Bits16) /* 22 */ \
80
+ _(c10::Float8_e5m2, Float8_e5m2) /* 23 */ \
81
+ _(c10::Float8_e4m3fn, Float8_e4m3fn) /* 24 */ \
82
+ _(c10::Float8_e5m2fnuz, Float8_e5m2fnuz) /* 25 */ \
83
+ _(c10::Float8_e4m3fnuz, Float8_e4m3fnuz) /* 26 */ \
84
+ _(uint16_t, UInt16) /* 27 */ \
85
+ _(uint32_t, UInt32) /* 28 */ \
86
+ _(uint64_t, UInt64) /* 29 */ \
87
+ _(c10::dummy_uint1_7_t<1>, UInt1) /* 30 */ \
88
+ _(c10::dummy_uint1_7_t<2>, UInt2) /* 31 */ \
89
+ _(c10::dummy_uint1_7_t<3>, UInt3) /* 32 */ \
90
+ _(c10::dummy_uint1_7_t<4>, UInt4) /* 33 */ \
91
+ _(c10::dummy_uint1_7_t<5>, UInt5) /* 34 */ \
92
+ _(c10::dummy_uint1_7_t<6>, UInt6) /* 35 */ \
93
+ _(c10::dummy_uint1_7_t<7>, UInt7) /* 36 */
94
+
95
+ // If you want to support ComplexHalf for real, add ComplexHalf
96
+ // into this macro (and change the name). But beware: convert()
97
+ // doesn't work for all the conversions you need...
98
+ //
99
+ // TODO: To add unsigned int types here, we must define accumulate type.
100
+ // But uint8 currently accumulates into int64, so we would have to make
101
+ // an inconsistent choice for the larger types. Difficult.
102
+ #define AT_FORALL_SCALAR_TYPES_WITH_COMPLEX_EXCEPT_COMPLEX_HALF_F8NZ(_) \
103
+ _(uint8_t, Byte) \
104
+ _(int8_t, Char) \
105
+ _(int16_t, Short) \
106
+ _(int, Int) \
107
+ _(int64_t, Long) \
108
+ _(at::Half, Half) \
109
+ _(float, Float) \
110
+ _(double, Double) \
111
+ _(c10::complex<float>, ComplexFloat) \
112
+ _(c10::complex<double>, ComplexDouble) \
113
+ _(bool, Bool) \
114
+ _(at::BFloat16, BFloat16) \
115
+ _(at::Float8_e5m2, Float8_e5m2) \
116
+ _(at::Float8_e4m3fn, Float8_e4m3fn)
117
+
118
+ // This macro controls many of our C++ APIs, including constructors
119
+ // for Scalar as well as the data() and item() accessors on Tensor
120
+ #define AT_FORALL_SCALAR_TYPES_WITH_COMPLEX(_) \
121
+ _(uint8_t, Byte) \
122
+ _(int8_t, Char) \
123
+ _(int16_t, Short) \
124
+ _(int, Int) \
125
+ _(int64_t, Long) \
126
+ _(at::Half, Half) \
127
+ _(float, Float) \
128
+ _(double, Double) \
129
+ _(c10::complex<c10::Half>, ComplexHalf) \
130
+ _(c10::complex<float>, ComplexFloat) \
131
+ _(c10::complex<double>, ComplexDouble) \
132
+ _(bool, Bool) \
133
+ _(at::BFloat16, BFloat16) \
134
+ _(at::Float8_e5m2, Float8_e5m2) \
135
+ _(at::Float8_e4m3fn, Float8_e4m3fn) \
136
+ _(at::Float8_e5m2fnuz, Float8_e5m2fnuz) \
137
+ _(at::Float8_e4m3fnuz, Float8_e4m3fnuz)
138
+
139
+ enum class ScalarType : int8_t {
140
+ #define DEFINE_ST_ENUM_VAL_(_1, n) n,
141
+ AT_FORALL_SCALAR_TYPES_WITH_COMPLEX_AND_QINTS(DEFINE_ST_ENUM_VAL_)
142
+ #undef DEFINE_ENUM_ST_ENUM_VAL_
143
+ Undefined,
144
+ NumOptions
145
+ };
146
+
147
+ constexpr uint16_t NumScalarTypes =
148
+ static_cast<uint16_t>(ScalarType::NumOptions);
149
+
150
+ namespace impl {
151
+
152
+ // These are used to map ScalarTypes to C++ types.
153
+
154
+ template <c10::ScalarType N>
155
+ struct ScalarTypeToCPPType;
156
+
157
+ #define SPECIALIZE_ScalarTypeToCPPType(cpp_type, scalar_type) \
158
+ template <> \
159
+ struct ScalarTypeToCPPType<c10::ScalarType::scalar_type> { \
160
+ using type = cpp_type; \
161
+ \
162
+ /* This is a workaround for the CUDA bug which prevents */ \
163
+ /* ::detail::ScalarTypeToCType<T>::type being used directly due to */ \
164
+ /* ambiguous reference which can't to be resolved. For some reason it */ \
165
+ /* can't pick between at::detail and at::cuda::detail. */ \
166
+ /* For repro example, please see: */ \
167
+ /* https://gist.github.com/izdeby/952ae7cf256ddb740a73776d39a7e7ba */ \
168
+ /* TODO: remove once the bug is fixed. */ \
169
+ static type t; \
170
+ };
171
+
172
+ AT_FORALL_SCALAR_TYPES_WITH_COMPLEX_AND_QINTS(SPECIALIZE_ScalarTypeToCPPType)
173
+
174
+ #undef SPECIALIZE_ScalarTypeToCPPType
175
+
176
+ template <c10::ScalarType N>
177
+ using ScalarTypeToCPPTypeT = typename ScalarTypeToCPPType<N>::type;
178
+
179
+ } // namespace impl
180
+
181
+ template <typename T>
182
+ struct CppTypeToScalarType;
183
+
184
+ #define SPECIALIZE_CppTypeToScalarType(cpp_type, scalar_type) \
185
+ template <> \
186
+ struct CppTypeToScalarType<cpp_type> \
187
+ : std:: \
188
+ integral_constant<c10::ScalarType, c10::ScalarType::scalar_type> { \
189
+ };
190
+
191
+ AT_FORALL_SCALAR_TYPES_WITH_COMPLEX_AND_QINTS(SPECIALIZE_CppTypeToScalarType)
192
+
193
+ #undef SPECIALIZE_CppTypeToScalarType
194
+
195
+ // NB: despite its generic sounding name, the macros that don't take _AND
196
+ // are mostly only used by tensorexpr
197
+ #define AT_FORALL_INT_TYPES(_) \
198
+ _(uint8_t, Byte) \
199
+ _(int8_t, Char) \
200
+ _(int16_t, Short) \
201
+ _(int, Int) \
202
+ _(int64_t, Long)
203
+
204
+ #define AT_FORALL_SCALAR_TYPES(_) \
205
+ _(uint8_t, Byte) \
206
+ _(int8_t, Char) \
207
+ _(int16_t, Short) \
208
+ _(int, Int) \
209
+ _(int64_t, Long) \
210
+ _(float, Float) \
211
+ _(double, Double)
212
+
213
+ // These macros are often controlling how many template instantiations we
214
+ // create for kernels. It is typically inappropriate to add new dtypes here,
215
+ // instead, new types should be added to use sites on a case-by-case basis.
216
+ // We generally are not accepting new dtypes due to binary size concerns.
217
+
218
+ #define AT_FORALL_SCALAR_TYPES_AND(SCALARTYPE, _) \
219
+ _(uint8_t, Byte) \
220
+ _(int8_t, Char) \
221
+ _(int16_t, Short) \
222
+ _(int, Int) \
223
+ _(int64_t, Long) \
224
+ _(float, Float) \
225
+ _(double, Double) \
226
+ _(decltype(::c10::impl::ScalarTypeToCPPType< \
227
+ ::c10::ScalarType::SCALARTYPE>::t), \
228
+ SCALARTYPE)
229
+
230
+ #define AT_FORALL_SCALAR_TYPES_AND2(SCALARTYPE1, SCALARTYPE2, _) \
231
+ _(uint8_t, Byte) \
232
+ _(int8_t, Char) \
233
+ _(int16_t, Short) \
234
+ _(int, Int) \
235
+ _(int64_t, Long) \
236
+ _(float, Float) \
237
+ _(double, Double) \
238
+ _(decltype(::c10::impl::ScalarTypeToCPPType< \
239
+ ::c10::ScalarType::SCALARTYPE1>::t), \
240
+ SCALARTYPE1) \
241
+ _(decltype(::c10::impl::ScalarTypeToCPPType< \
242
+ ::c10::ScalarType::SCALARTYPE2>::t), \
243
+ SCALARTYPE2)
244
+
245
+ #define AT_FORALL_SCALAR_TYPES_AND3(SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, _) \
246
+ _(uint8_t, Byte) \
247
+ _(int8_t, Char) \
248
+ _(int16_t, Short) \
249
+ _(int, Int) \
250
+ _(int64_t, Long) \
251
+ _(float, Float) \
252
+ _(double, Double) \
253
+ _(decltype(::c10::impl::ScalarTypeToCPPType< \
254
+ ::c10::ScalarType::SCALARTYPE1>::t), \
255
+ SCALARTYPE1) \
256
+ _(decltype(::c10::impl::ScalarTypeToCPPType< \
257
+ ::c10::ScalarType::SCALARTYPE2>::t), \
258
+ SCALARTYPE2) \
259
+ _(decltype(::c10::impl::ScalarTypeToCPPType< \
260
+ ::c10::ScalarType::SCALARTYPE3>::t), \
261
+ SCALARTYPE3)
262
+
263
+ #define AT_FORALL_SCALAR_TYPES_AND7( \
264
+ SCALARTYPE1, \
265
+ SCALARTYPE2, \
266
+ SCALARTYPE3, \
267
+ SCALARTYPE4, \
268
+ SCALARTYPE5, \
269
+ SCALARTYPE6, \
270
+ SCALARTYPE7, \
271
+ _) \
272
+ _(uint8_t, Byte) \
273
+ _(int8_t, Char) \
274
+ _(int16_t, Short) \
275
+ _(int, Int) \
276
+ _(int64_t, Long) \
277
+ _(float, Float) \
278
+ _(double, Double) \
279
+ _(decltype(::c10::impl::ScalarTypeToCPPType< \
280
+ ::c10::ScalarType::SCALARTYPE1>::t), \
281
+ SCALARTYPE1) \
282
+ _(decltype(::c10::impl::ScalarTypeToCPPType< \
283
+ ::c10::ScalarType::SCALARTYPE2>::t), \
284
+ SCALARTYPE2) \
285
+ _(decltype(::c10::impl::ScalarTypeToCPPType< \
286
+ ::c10::ScalarType::SCALARTYPE3>::t), \
287
+ SCALARTYPE3) \
288
+ _(decltype(::c10::impl::ScalarTypeToCPPType< \
289
+ ::c10::ScalarType::SCALARTYPE4>::t), \
290
+ SCALARTYPE4) \
291
+ _(decltype(::c10::impl::ScalarTypeToCPPType< \
292
+ ::c10::ScalarType::SCALARTYPE5>::t), \
293
+ SCALARTYPE5) \
294
+ _(decltype(::c10::impl::ScalarTypeToCPPType< \
295
+ ::c10::ScalarType::SCALARTYPE6>::t), \
296
+ SCALARTYPE6) \
297
+ _(decltype(::c10::impl::ScalarTypeToCPPType< \
298
+ ::c10::ScalarType::SCALARTYPE7>::t), \
299
+ SCALARTYPE7)
300
+
301
+ #define AT_FORALL_QINT_TYPES(_) \
302
+ _(c10::qint8, QInt8) \
303
+ _(c10::quint8, QUInt8) \
304
+ _(c10::qint32, QInt32) \
305
+ _(c10::quint4x2, QUInt4x2) \
306
+ _(c10::quint2x4, QUInt2x4)
307
+
308
+ #define AT_FORALL_COMPLEX_TYPES(_) \
309
+ _(c10::complex<float>, ComplexFloat) \
310
+ _(c10::complex<double>, ComplexDouble)
311
+
312
+ #define DEFINE_CONSTANT(_, name) \
313
+ constexpr ScalarType k##name = ScalarType::name;
314
+
315
+ // NOLINTNEXTLINE(clang-diagnostic-unused-const-variable)
316
+ AT_FORALL_SCALAR_TYPES_WITH_COMPLEX_AND_QINTS(DEFINE_CONSTANT)
317
+ #undef DEFINE_CONSTANT
318
+
319
+ inline const char* toString(ScalarType t) {
320
+ #define DEFINE_CASE(_, name) \
321
+ case ScalarType::name: \
322
+ return #name;
323
+
324
+ switch (t) {
325
+ AT_FORALL_SCALAR_TYPES_WITH_COMPLEX_AND_QINTS(DEFINE_CASE)
326
+ default:
327
+ return "UNKNOWN_SCALAR";
328
+ }
329
+ #undef DEFINE_CASE
330
+ }
331
+
332
+ inline size_t elementSize(ScalarType t) {
333
+ #define CASE_ELEMENTSIZE_CASE(ctype, name) \
334
+ case ScalarType::name: \
335
+ return sizeof(ctype);
336
+
337
+ switch (t) {
338
+ AT_FORALL_SCALAR_TYPES_WITH_COMPLEX_AND_QINTS(CASE_ELEMENTSIZE_CASE)
339
+ default:
340
+ TORCH_CHECK(false, "Unknown ScalarType");
341
+ }
342
+ #undef CASE_ELEMENTSIZE_CASE
343
+ }
344
+
345
+ inline bool isIntegralType(ScalarType t, bool includeBool) {
346
+ bool isIntegral =
347
+ (t == ScalarType::Byte || t == ScalarType::Char || t == ScalarType::Int ||
348
+ t == ScalarType::Long || t == ScalarType::Short ||
349
+ t == ScalarType::UInt16 || t == ScalarType::UInt32 ||
350
+ t == ScalarType::UInt64);
351
+
352
+ return isIntegral || (includeBool && t == ScalarType::Bool);
353
+ }
354
+
355
+ C10_DEPRECATED_MESSAGE(
356
+ "isIntegralType is deprecated. Please use the overload with 'includeBool' parameter instead.")
357
+ inline bool isIntegralType(ScalarType t) {
358
+ return isIntegralType(t, /*includeBool=*/false);
359
+ }
360
+
361
+ inline bool isFloat8Type(ScalarType t) {
362
+ return t == ScalarType::Float8_e5m2 || t == ScalarType::Float8_e5m2fnuz ||
363
+ t == ScalarType::Float8_e4m3fn || t == ScalarType::Float8_e4m3fnuz;
364
+ }
365
+
366
+ inline bool isReducedFloatingType(ScalarType t) {
367
+ return t == ScalarType::Half || t == ScalarType::BFloat16 || isFloat8Type(t);
368
+ }
369
+
370
+ inline bool isFloatingType(ScalarType t) {
371
+ return t == ScalarType::Double || t == ScalarType::Float ||
372
+ isReducedFloatingType(t);
373
+ }
374
+
375
+ inline bool isComplexType(ScalarType t) {
376
+ return (
377
+ t == ScalarType::ComplexHalf || t == ScalarType::ComplexFloat ||
378
+ t == ScalarType::ComplexDouble);
379
+ }
380
+
381
+ inline bool isQIntType(ScalarType t) {
382
+ // Don't forget to extend this when adding new QInt types
383
+ return t == ScalarType::QInt8 || t == ScalarType::QUInt8 ||
384
+ t == ScalarType::QInt32 || t == ScalarType::QUInt4x2 ||
385
+ t == ScalarType::QUInt2x4;
386
+ }
387
+
388
+ inline bool isBitsType(ScalarType t) {
389
+ return t == ScalarType::Bits1x8 || t == ScalarType::Bits2x4 ||
390
+ t == ScalarType::Bits4x2 || t == ScalarType::Bits8 ||
391
+ t == ScalarType::Bits16;
392
+ }
393
+
394
+ inline bool isBarebonesUnsignedType(ScalarType t) {
395
+ return t == ScalarType::UInt1 || t == ScalarType::UInt2 ||
396
+ t == ScalarType::UInt3 || t == ScalarType::UInt4 ||
397
+ t == ScalarType::UInt5 || t == ScalarType::UInt6 ||
398
+ t == ScalarType::UInt7 || t == ScalarType::UInt16 ||
399
+ t == ScalarType::UInt32 || t == ScalarType::UInt64;
400
+ }
401
+
402
+ inline ScalarType toQIntType(ScalarType t) {
403
+ switch (t) {
404
+ case ScalarType::Byte:
405
+ return ScalarType::QUInt8;
406
+ case ScalarType::Char:
407
+ return ScalarType::QInt8;
408
+ case ScalarType::Int:
409
+ return ScalarType::QInt32;
410
+ default:
411
+ return t;
412
+ }
413
+ }
414
+
415
+ inline ScalarType toUnderlying(ScalarType t) {
416
+ switch (t) {
417
+ case ScalarType::QUInt8:
418
+ case ScalarType::QUInt4x2:
419
+ [[fallthrough]];
420
+ case ScalarType::QUInt2x4:
421
+ return ScalarType::Byte;
422
+ case ScalarType::QInt8:
423
+ return ScalarType::Char;
424
+ case ScalarType::QInt32:
425
+ return ScalarType::Int;
426
+ default:
427
+ return t;
428
+ }
429
+ }
430
+
431
+ inline bool isSignedType(ScalarType t) {
432
+ #define CASE_ISSIGNED(name) \
433
+ case ScalarType::name: \
434
+ return std::numeric_limits< \
435
+ ::c10::impl::ScalarTypeToCPPTypeT<ScalarType::name>>::is_signed;
436
+
437
+ switch (t) {
438
+ case ScalarType::QInt8:
439
+ case ScalarType::QUInt8:
440
+ case ScalarType::QInt32:
441
+ case ScalarType::QUInt4x2:
442
+ case ScalarType::QUInt2x4:
443
+ TORCH_CHECK(false, "isSignedType not supported for quantized types");
444
+ case ScalarType::Bits1x8:
445
+ case ScalarType::Bits2x4:
446
+ case ScalarType::Bits4x2:
447
+ case ScalarType::Bits8:
448
+ case ScalarType::Bits16:
449
+ TORCH_CHECK(false, "Bits types are undefined");
450
+ CASE_ISSIGNED(UInt16);
451
+ CASE_ISSIGNED(UInt32);
452
+ CASE_ISSIGNED(UInt64);
453
+ CASE_ISSIGNED(BFloat16);
454
+ CASE_ISSIGNED(Float8_e5m2);
455
+ CASE_ISSIGNED(Float8_e5m2fnuz);
456
+ CASE_ISSIGNED(Float8_e4m3fn);
457
+ CASE_ISSIGNED(Float8_e4m3fnuz);
458
+ CASE_ISSIGNED(Byte);
459
+ CASE_ISSIGNED(Char);
460
+ CASE_ISSIGNED(Short);
461
+ CASE_ISSIGNED(Int);
462
+ CASE_ISSIGNED(Long);
463
+ CASE_ISSIGNED(Half);
464
+ CASE_ISSIGNED(Float);
465
+ CASE_ISSIGNED(Double);
466
+ CASE_ISSIGNED(ComplexHalf);
467
+ CASE_ISSIGNED(ComplexFloat);
468
+ CASE_ISSIGNED(ComplexDouble);
469
+ CASE_ISSIGNED(Bool);
470
+ case ScalarType::UInt1:
471
+ case ScalarType::UInt2:
472
+ case ScalarType::UInt3:
473
+ case ScalarType::UInt4:
474
+ case ScalarType::UInt5:
475
+ case ScalarType::UInt6:
476
+ case ScalarType::UInt7:
477
+ return true;
478
+ case ScalarType::Undefined:
479
+ case ScalarType::NumOptions:
480
+ break;
481
+ // Do not add default here, but rather define behavior of every new entry
482
+ // here. `-Wswitch-enum` would raise a warning in those cases.
483
+ }
484
+ TORCH_CHECK(false, "Unknown ScalarType ", t);
485
+ #undef CASE_ISSIGNED
486
+ }
487
+
488
+ inline bool isUnderlying(ScalarType type, ScalarType qtype) {
489
+ return type == toUnderlying(qtype);
490
+ }
491
+
492
+ inline ScalarType toRealValueType(ScalarType t) {
493
+ switch (t) {
494
+ case ScalarType::ComplexHalf:
495
+ return ScalarType::Half;
496
+ case ScalarType::ComplexFloat:
497
+ return ScalarType::Float;
498
+ case ScalarType::ComplexDouble:
499
+ return ScalarType::Double;
500
+ default:
501
+ return t;
502
+ }
503
+ }
504
+
505
+ inline ScalarType toComplexType(ScalarType t) {
506
+ switch (t) {
507
+ case ScalarType::BFloat16:
508
+ // BFloat16 has range equivalent to Float,
509
+ // so we map it to ComplexFloat.
510
+ return ScalarType::ComplexFloat;
511
+ case ScalarType::Half:
512
+ return ScalarType::ComplexHalf;
513
+ case ScalarType::Float:
514
+ return ScalarType::ComplexFloat;
515
+ case ScalarType::Double:
516
+ return ScalarType::ComplexDouble;
517
+ case ScalarType::ComplexHalf:
518
+ return ScalarType::ComplexHalf;
519
+ case ScalarType::ComplexFloat:
520
+ return ScalarType::ComplexFloat;
521
+ case ScalarType::ComplexDouble:
522
+ return ScalarType::ComplexDouble;
523
+ default:
524
+ TORCH_CHECK(false, "Unknown Complex ScalarType for ", t);
525
+ }
526
+ }
527
+
528
+ // see tensor_attributes.rst for detailed explanation and examples
529
+ // of casting rules.
530
+ inline bool canCast(const ScalarType from, const ScalarType to) {
531
+ // We disallow complex -> non complex, e.g., float_tensor *= complex is
532
+ // disallowed.
533
+ if (isComplexType(from) && !isComplexType(to)) {
534
+ return false;
535
+ }
536
+ // We disallow float -> integral, e.g., int_tensor *= float is disallowed.
537
+ if (isFloatingType(from) && isIntegralType(to, false)) {
538
+ return false;
539
+ }
540
+
541
+ // Treat bool as a distinct "category," to be consistent with type promotion
542
+ // rules (e.g. `bool_tensor + 5 -> int64_tensor`). If `5` was in the same
543
+ // category as `bool_tensor`, we would not promote. Differing categories
544
+ // implies `bool_tensor += 5` is disallowed.
545
+ //
546
+ // NB: numpy distinguishes "unsigned" as a category to get the desired
547
+ // `bool_tensor + 5 -> int64_tensor` behavior. We don't, because:
548
+ // * We don't want the performance hit of checking the runtime sign of
549
+ // Scalars.
550
+ // * `uint8_tensor + 5 -> int64_tensor` would be undesirable.
551
+ if (from != ScalarType::Bool && to == ScalarType::Bool) {
552
+ return false;
553
+ }
554
+ return true;
555
+ }
556
+
557
+ C10_API ScalarType promoteTypes(ScalarType a, ScalarType b);
558
+
559
+ inline std::ostream& operator<<(
560
+ std::ostream& stream,
561
+ at::ScalarType scalar_type) {
562
+ return stream << toString(scalar_type);
563
+ }
564
+
565
+ // Returns a pair of strings representing the names for each dtype.
566
+ // The returned pair is (name, legacy_name_if_applicable)
567
+ C10_API std::pair<std::string, std::string> getDtypeNames(
568
+ c10::ScalarType scalarType);
569
+
570
+ // Returns a map of string name to dtype.
571
+ C10_API const std::unordered_map<std::string, ScalarType>& getStringToDtypeMap();
572
+
573
+ } // namespace c10
videochat2/lib/python3.10/site-packages/torch/include/c10/core/SymbolicShapeMeta.h ADDED
@@ -0,0 +1,216 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <c10/core/SymBool.h>
3
+ #include <c10/core/SymInt.h>
4
+ #include <c10/macros/Export.h>
5
+ #include <c10/macros/Macros.h>
6
+ #include <c10/util/DimVector.h>
7
+
8
+ #include <atomic>
9
+ #include <cstdint>
10
+ #include <mutex>
11
+ #include <utility>
12
+
13
+ namespace c10 {
14
+
15
+ class C10_API SymbolicShapeMeta {
16
+ public:
17
+ // Basic metadata from which other quantities are derived
18
+ SymDimVector sizes_ = {0};
19
+ SymDimVector strides_ = {1};
20
+ SymInt storage_offset_ = 0;
21
+
22
+ bool strides_valid_ = true; // e.g. for sparse where there are no strides
23
+
24
+ SymbolicShapeMeta() = default;
25
+ SymbolicShapeMeta(const SymbolicShapeMeta& other);
26
+ SymbolicShapeMeta& operator=(const SymbolicShapeMeta& other) = delete;
27
+ SymbolicShapeMeta& operator=(SymbolicShapeMeta&& other) = delete;
28
+
29
+ void refresh_numel() {
30
+ // Non-const, don't need to hold mutables_ lock
31
+ available_.fetch_and(~numel_avail);
32
+ numel_ = 1;
33
+ }
34
+
35
+ void refresh_contiguous() {
36
+ // Non-const, don't need to hold mutables_ lock
37
+ available_.fetch_and(numel_avail);
38
+ is_contiguous_ = false;
39
+ is_channels_last_contiguous_ = false;
40
+ is_channels_last_3d_contiguous_ = false;
41
+ is_channels_last_ = false;
42
+ is_channels_last_3d_ = false;
43
+ is_non_overlapping_and_dense_ = false;
44
+ }
45
+
46
+ int64_t dim() const {
47
+ return static_cast<int64_t>(sizes_.size());
48
+ }
49
+
50
+ // Accessors for derived quantities, computed lazily on first access
51
+
52
+ bool has_numel() const {
53
+ return available_.load() & numel_avail;
54
+ }
55
+ bool has_is_contiguous() const {
56
+ return available_.load() & is_contiguous_avail;
57
+ }
58
+ bool has_is_channels_last_contiguous() const {
59
+ return available_.load() & is_channels_last_contiguous_avail;
60
+ }
61
+ bool has_is_channels_last_3d_contiguous() const {
62
+ return available_.load() & is_channels_last_3d_contiguous_avail;
63
+ }
64
+ bool has_is_channels_last() const {
65
+ return available_.load() & is_channels_last_avail;
66
+ }
67
+ bool has_is_channels_last_3d() const {
68
+ return available_.load() & is_channels_last_3d_avail;
69
+ }
70
+ bool has_is_non_overlapping_and_dense() const {
71
+ return available_.load() & is_non_overlapping_and_dense_avail;
72
+ }
73
+
74
+ // Accessors to cached derived properties
75
+ // DO NOT call with mutables_ lock held
76
+ const SymInt& numel() const {
77
+ if (C10_UNLIKELY(!has_numel())) {
78
+ init_numel();
79
+ }
80
+ return numel_;
81
+ }
82
+
83
+ const SymBool& is_contiguous() const {
84
+ if (C10_UNLIKELY(!has_is_contiguous())) {
85
+ init_is_contiguous();
86
+ }
87
+ return is_contiguous_;
88
+ }
89
+
90
+ const SymBool& is_channels_last_contiguous() const {
91
+ if (C10_UNLIKELY(!has_is_channels_last_contiguous())) {
92
+ init_is_channels_last_contiguous();
93
+ }
94
+ return is_channels_last_contiguous_;
95
+ }
96
+
97
+ const SymBool& is_channels_last_3d_contiguous() const {
98
+ if (C10_UNLIKELY(!has_is_channels_last_3d_contiguous())) {
99
+ init_is_channels_last_3d_contiguous();
100
+ }
101
+ return is_channels_last_3d_contiguous_;
102
+ }
103
+
104
+ const SymBool& is_channels_last() const {
105
+ if (C10_UNLIKELY(!has_is_channels_last())) {
106
+ init_is_channels_last();
107
+ }
108
+ return is_channels_last_;
109
+ }
110
+
111
+ const SymBool& is_channels_last_3d() const {
112
+ if (C10_UNLIKELY(!has_is_channels_last_3d())) {
113
+ init_is_channels_last_3d();
114
+ }
115
+ return is_channels_last_3d_;
116
+ }
117
+
118
+ const SymBool& is_non_overlapping_and_dense() const {
119
+ if (C10_UNLIKELY(!has_is_non_overlapping_and_dense())) {
120
+ init_is_non_overlapping_and_dense();
121
+ }
122
+ return is_non_overlapping_and_dense_;
123
+ }
124
+
125
+ // Assumptions so we can short-circuit computation
126
+ // NOTE: Don't need to lock mutables_ since these aren't const
127
+ void assume_contiguous(SymBool val = true) {
128
+ is_contiguous_ = std::move(val);
129
+ available_.fetch_or(is_contiguous_avail);
130
+ }
131
+ void assume_channels_last_contiguous(SymBool val = true) {
132
+ is_contiguous_ = std::move(val);
133
+ available_.fetch_or(is_channels_last_contiguous_avail);
134
+ }
135
+ void assume_channels_last_3d_contiguous(SymBool val = true) {
136
+ is_channels_last_3d_contiguous_ = std::move(val);
137
+ available_.fetch_or(is_channels_last_3d_contiguous_avail);
138
+ }
139
+ void assume_channels_last(SymBool val = true) {
140
+ is_channels_last_ = std::move(val);
141
+ available_.fetch_or(is_channels_last_avail);
142
+ }
143
+ void assume_channels_last_3d(SymBool val = true) {
144
+ is_channels_last_3d_ = std::move(val);
145
+ available_.fetch_or(is_channels_last_3d_avail);
146
+ }
147
+ void assume_non_overlapping_and_dense(SymBool val = true) {
148
+ is_non_overlapping_and_dense_ = std::move(val);
149
+ available_.fetch_or(is_non_overlapping_and_dense_avail);
150
+ }
151
+
152
+ private:
153
+ SymBool compute_contiguous() const;
154
+ SymBool compute_channels_last_contiguous_2d() const;
155
+ SymBool compute_channels_last_contiguous_3d() const;
156
+ SymBool compute_strides_like_channels_last_2d() const;
157
+ SymBool compute_strides_like_channels_last_3d() const;
158
+ SymBool compute_non_overlapping_and_dense() const;
159
+
160
+ // These are little wrappers over the real compute_ functions that
161
+ // can make use of other contiguity fields to short circuit.
162
+ // They need to be implemented separately for SymBool, as SymBool does
163
+ // not short circuit.
164
+ // TODO: should the SymBool cases avoid the short circuit? Need to reason
165
+ // if its correct, and reason if the simpler expressions are better for
166
+ // analysis (maybe not!)
167
+
168
+ SymBool compute_channels_last_contiguous_3d_dim5() const;
169
+ SymBool compute_channels_last_2d_dim5() const;
170
+ SymBool compute_channels_last_3d_dim5() const;
171
+ SymBool compute_is_non_overlapping_and_dense_dim4() const;
172
+ SymBool compute_is_non_overlapping_and_dense_dim5() const;
173
+ SymBool compute_is_non_overlapping_and_dense_anydim() const;
174
+
175
+ void init_numel() const;
176
+ void init_is_contiguous() const;
177
+ void init_is_channels_last_contiguous() const;
178
+ void init_is_channels_last_3d_contiguous() const;
179
+ void init_is_channels_last() const;
180
+ void init_is_channels_last_3d() const;
181
+ void init_is_non_overlapping_and_dense() const;
182
+
183
+ // NOTE: These only set if !has_foo()
184
+ void set_numel(SymInt val) const;
185
+ void set_is_contiguous(SymBool val) const;
186
+ void set_is_channels_last_contiguous(SymBool val) const;
187
+ void set_is_channels_last_3d_contiguous(SymBool val) const;
188
+ void set_is_channels_last(SymBool val) const;
189
+ void set_is_channels_last_3d(SymBool val) const;
190
+ void set_is_non_overlapping_and_dense(SymBool val) const;
191
+
192
+ // Lazily initialized variables, with the corresponding available_ flag
193
+ // indicating whether the value has been initialized
194
+ mutable std::atomic<int> available_{0};
195
+ enum avail {
196
+ numel_avail = 1 << 0,
197
+ is_contiguous_avail = 1 << 1,
198
+ is_channels_last_contiguous_avail = 1 << 2,
199
+ is_channels_last_3d_contiguous_avail = 1 << 3,
200
+ is_channels_last_avail = 1 << 4,
201
+ is_channels_last_3d_avail = 1 << 5,
202
+ is_non_overlapping_and_dense_avail = 1 << 6,
203
+ };
204
+
205
+ // Mutex to prevent races when initializing the variable from const accessors
206
+ mutable std::mutex mutables_;
207
+ mutable SymInt numel_ = 1;
208
+ mutable SymBool is_contiguous_{true};
209
+ mutable SymBool is_channels_last_contiguous_{false};
210
+ mutable SymBool is_channels_last_3d_contiguous_{false};
211
+ mutable SymBool is_channels_last_{false};
212
+ mutable SymBool is_channels_last_3d_{false};
213
+ mutable SymBool is_non_overlapping_and_dense_{true};
214
+ };
215
+
216
+ } // namespace c10
videochat2/lib/python3.10/site-packages/torch/include/c10/core/impl/HermeticPyObjectTLS.h ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/macros/Export.h>
4
+ #include <atomic>
5
+
6
+ namespace c10::impl {
7
+
8
+ // This TLS controls whether or not we permanently associate PyObject
9
+ // with Tensor the first time it is allocated. When hermetic PyObject
10
+ // TLS is enabled (state is true), we DO NOT save PyObjects to Tensor,
11
+ // meaning you get a distinct PyObject whenever you execute the code in
12
+ // question.
13
+ struct C10_API HermeticPyObjectTLS {
14
+ static void set_state(bool state);
15
+ static bool get_state() {
16
+ // Hypothetical fastpath if torchdeploy/multipy isn't used. Per
17
+ // https://www.open-std.org/jtc1/sc22/wg21/docs/papers/2020/p2055r0.pdf
18
+ // this qualifies relaxed access because it is a single-location data
19
+ // structure (only the boolean here).
20
+ //
21
+ // Forgetting about data races for a moment, is there a logical race?
22
+ //
23
+ // - Boolean only ever transitions from false to true. So the
24
+ // critical situation is when one interpreter is already running
25
+ // when a second interpreter switches haveState from false to true.
26
+ //
27
+ // - The first interpreter is indifferent whether or not it sees
28
+ // hasState true/false; obviously false works (this is what the
29
+ // interpreter was previously using; more directly, the interpreter
30
+ // calls into itself as the handler, so being hermetic is not
31
+ // required), and true simply means serviced python operator calls will
32
+ // be hermetic; in these cases it is expected to be functionally
33
+ // equivalent.
34
+ //
35
+ // - The second interpreter MUST see hasState true (as its requests will
36
+ // be forwarded to the first interpreter), but it is assumed that there
37
+ // is a synchronization between the interpreter initialization, and
38
+ // when we actually perform operations, so it is guaranteed to see
39
+ // hasState true.
40
+ //
41
+ // QED.
42
+ //
43
+ // This fastpath is currently disabled so that we can more easily test that
44
+ // hermetic mode works correctly even on stock build of PyTorch.
45
+ if (false && !haveState_.load(std::memory_order_relaxed))
46
+ return false;
47
+ return get_tls_state();
48
+ }
49
+ // Call this from the multipy/torchdeploy top level
50
+ static void init_state();
51
+
52
+ private:
53
+ // This only flipped once from false to true during torchdeploy/multipy
54
+ // initialization, and never again.
55
+ static std::atomic<bool> haveState_;
56
+ static bool get_tls_state();
57
+ };
58
+
59
+ } // namespace c10::impl
videochat2/lib/python3.10/site-packages/torch/include/c10/core/impl/InlineStreamGuard.h ADDED
@@ -0,0 +1,256 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/impl/InlineDeviceGuard.h>
4
+ #include <c10/util/ArrayRef.h>
5
+ #include <c10/util/irange.h>
6
+
7
+ namespace c10::impl {
8
+
9
+ /**
10
+ * A StreamGuard is an RAII class that changes the current device
11
+ * to the device corresponding to some stream, and changes the
12
+ * default stream on that device to be this stream.
13
+ *
14
+ * InlineStreamGuard is a helper class for implementing StreamGuards.
15
+ * See InlineDeviceGuard for guidance on how to use this class.
16
+ */
17
+ template <typename T>
18
+ class InlineStreamGuard : private InlineDeviceGuard<T> {
19
+ public:
20
+ /// No default constructor, see Note [Omitted default constructor from RAII]
21
+ explicit InlineStreamGuard() = delete;
22
+
23
+ /// Set the current device to the device associated with the passed stream,
24
+ /// and set the current stream on that device to the passed stream.
25
+ explicit InlineStreamGuard(Stream stream)
26
+ : InlineDeviceGuard<T>(stream.device()),
27
+ original_stream_of_original_device_(
28
+ this->impl_.getStream(original_device())),
29
+ original_stream_of_current_device_(this->impl_.exchangeStream(stream)),
30
+ current_stream_(stream) {}
31
+
32
+ /// This constructor exists purely for testing
33
+ template <
34
+ typename U = T,
35
+ typename = typename std::enable_if_t<std::is_same_v<U, VirtualGuardImpl>>>
36
+ explicit InlineStreamGuard(
37
+ Stream stream,
38
+ const DeviceGuardImplInterface* impl)
39
+ : InlineDeviceGuard<T>(
40
+ stream.device(),
41
+ impl ? impl : getDeviceGuardImpl(stream.device_type())),
42
+ original_stream_of_original_device_(
43
+ this->impl_.getStream(original_device())),
44
+ original_stream_of_current_device_(this->impl_.exchangeStream(stream)),
45
+ current_stream_(stream) {}
46
+
47
+ /// Copy is disallowed
48
+ InlineStreamGuard(const InlineStreamGuard<T>&) = delete;
49
+ InlineStreamGuard<T>& operator=(const InlineStreamGuard<T>&) = delete;
50
+
51
+ /// Move is disallowed, as StreamGuard does not have an uninitialized state,
52
+ /// which is required for moves on types with nontrivial destructors.
53
+ InlineStreamGuard(InlineStreamGuard<T>&& other) = delete;
54
+ InlineStreamGuard& operator=(InlineStreamGuard<T>&& other) = delete;
55
+
56
+ ~InlineStreamGuard() {
57
+ this->impl_.exchangeStream(original_stream_of_current_device_);
58
+ }
59
+
60
+ /// Resets the currently set stream to the original stream and
61
+ /// the currently set device to the original device. Then,
62
+ /// set the current device to the device associated with the passed stream,
63
+ /// and set the current stream on that device to the passed stream.
64
+ ///
65
+ /// NOTE: this implementation may skip some stream/device setting if
66
+ /// it can prove that it is unnecessary.
67
+ ///
68
+ /// WARNING: reset_stream does NOT preserve previously set streams on
69
+ /// different devices. If you need to set streams on multiple devices
70
+ /// use MultiStreamGuard instead.
71
+ void reset_stream(Stream stream) {
72
+ // TODO: make a version that takes an impl argument. Unfortunately,
73
+ // that will require SFINAE because impl is only valid for the
74
+ // VirtualGuardImpl specialization.
75
+ if (stream.device() == this->current_device()) {
76
+ this->impl_.exchangeStream(stream);
77
+ current_stream_ = stream;
78
+ } else {
79
+ // Destruct and reconstruct the StreamGuard in-place
80
+ this->impl_.exchangeStream(original_stream_of_current_device_);
81
+ this->reset_device(stream.device());
82
+ original_stream_of_current_device_ = this->impl_.exchangeStream(stream);
83
+ current_stream_ = stream;
84
+ }
85
+ }
86
+
87
+ // It's not clear if set_device should also reset the current stream
88
+ // if the device is unchanged; therefore, we don't provide it.
89
+ // The situation is somewhat clearer with reset_device, but it's still
90
+ // a pretty weird thing to do, so haven't added this either.
91
+
92
+ /// Returns the stream of the original device prior to this guard. Subtly,
93
+ /// the stream returned here is the original stream of the *original*
94
+ /// device; i.e., it's the stream that your computation *would* have
95
+ /// been put on, if it hadn't been for this meddling stream guard.
96
+ /// This is usually what you want.
97
+ Stream original_stream() const {
98
+ return original_stream_of_original_device_;
99
+ }
100
+
101
+ /// Returns the most recent stream that was set using this device guard,
102
+ /// either from construction, or via set_stream.
103
+ Stream current_stream() const {
104
+ return current_stream_;
105
+ }
106
+
107
+ /// Returns the most recent device that was set using this device guard,
108
+ /// either from construction, or via set_device/reset_device/set_index.
109
+ Device current_device() const {
110
+ return InlineDeviceGuard<T>::current_device();
111
+ }
112
+
113
+ /// Returns the device that was set at the most recent reset_stream(),
114
+ /// or otherwise the device at construction time.
115
+ Device original_device() const {
116
+ return InlineDeviceGuard<T>::original_device();
117
+ }
118
+
119
+ private:
120
+ Stream
121
+ original_stream_of_original_device_; // what the user probably cares about
122
+ Stream original_stream_of_current_device_; // what we need to restore
123
+ Stream current_stream_;
124
+ };
125
+
126
+ /**
127
+ * An OptionalStreamGuard is an RAII class that sets a device to some value on
128
+ * initialization, and resets the device to its original value on destruction.
129
+ * See InlineOptionalDeviceGuard for more guidance on how to use this class.
130
+ */
131
+ template <typename T>
132
+ class InlineOptionalStreamGuard {
133
+ public:
134
+ /// Creates an uninitialized stream guard.
135
+ explicit InlineOptionalStreamGuard()
136
+ : guard_() // See Note [Explicit initialization of optional fields]
137
+ {}
138
+
139
+ /// Set the current device to the device associated with the passed stream,
140
+ /// and set the current stream on that device to the passed stream,
141
+ /// if the passed stream is not nullopt.
142
+ explicit InlineOptionalStreamGuard(std::optional<Stream> stream_opt)
143
+ : guard_() {
144
+ if (stream_opt.has_value()) {
145
+ guard_.emplace(stream_opt.value());
146
+ }
147
+ }
148
+
149
+ /// All constructors of StreamGuard are valid for OptionalStreamGuard
150
+ template <typename... Args>
151
+ explicit InlineOptionalStreamGuard(Args&&... args)
152
+ : guard_(std::in_place, std::forward<Args>(args)...) {}
153
+
154
+ // See Note [Move construction for RAII guards is tricky]
155
+ InlineOptionalStreamGuard(InlineOptionalStreamGuard<T>&& other) = delete;
156
+
157
+ // See Note [Move assignment for RAII guards is tricky]
158
+ InlineOptionalStreamGuard& operator=(InlineOptionalStreamGuard&& other) =
159
+ delete;
160
+
161
+ /// Resets the currently set stream to the original stream and
162
+ /// the currently set device to the original device. Then,
163
+ /// set the current device to the device associated with the passed stream,
164
+ /// and set the current stream on that device to the passed stream.
165
+ /// Initializes the OptionalStreamGuard if it was not previously initialized.
166
+ void reset_stream(Stream stream) {
167
+ if (guard_.has_value()) {
168
+ guard_->reset_stream(stream);
169
+ } else {
170
+ guard_.emplace(stream);
171
+ }
172
+ }
173
+
174
+ /// Returns the stream that was set at the time the guard was most recently
175
+ /// initialized, or nullopt if the guard is uninitialized.
176
+ std::optional<Stream> original_stream() const {
177
+ return guard_.has_value() ? std::make_optional(guard_->original_stream())
178
+ : std::nullopt;
179
+ }
180
+
181
+ /// Returns the most recent stream that was set using this stream guard,
182
+ /// either from construction, or via reset_stream, if the guard is
183
+ /// initialized, or nullopt if the guard is uninitialized.
184
+ std::optional<Stream> current_stream() const {
185
+ return guard_.has_value() ? std::make_optional(guard_->current_stream())
186
+ : std::nullopt;
187
+ }
188
+
189
+ /// Restore the original device and stream, resetting this guard to
190
+ /// uninitialized state.
191
+ void reset() {
192
+ guard_.reset();
193
+ }
194
+
195
+ private:
196
+ std::optional<InlineStreamGuard<T>> guard_;
197
+ };
198
+
199
+ template <typename T>
200
+ class InlineMultiStreamGuard {
201
+ public:
202
+ /// Calls `set_stream` on each of the streams in the list.
203
+ /// This may be useful if you need to set different streams
204
+ /// for different devices.
205
+ explicit InlineMultiStreamGuard(ArrayRef<Stream> streams) {
206
+ if (!streams.empty()) {
207
+ impl_.emplace(getDeviceTypeOfStreams(streams));
208
+ original_streams_.reserve(streams.size());
209
+ for (const Stream& s : streams) {
210
+ original_streams_.emplace_back(this->impl_->exchangeStream(s));
211
+ }
212
+ }
213
+ }
214
+
215
+ /// Copy is disallowed
216
+ InlineMultiStreamGuard(const InlineMultiStreamGuard&) = delete;
217
+ InlineMultiStreamGuard<T>& operator=(const InlineMultiStreamGuard&) = delete;
218
+
219
+ /// Move is disallowed, as StreamGuard does not have an uninitialized state,
220
+ /// which is required for moves on types with nontrivial destructors.
221
+ InlineMultiStreamGuard(InlineMultiStreamGuard&& other) = delete;
222
+ InlineMultiStreamGuard& operator=(InlineMultiStreamGuard&& other) = delete;
223
+
224
+ ~InlineMultiStreamGuard() noexcept {
225
+ if (this->impl_.has_value()) {
226
+ for (const Stream& s : original_streams_) {
227
+ this->impl_->exchangeStream(s);
228
+ }
229
+ }
230
+ }
231
+
232
+ protected:
233
+ std::optional<T> impl_;
234
+
235
+ private:
236
+ /// The original streams that were active on all devices.
237
+ std::vector<Stream> original_streams_;
238
+
239
+ static DeviceType getDeviceTypeOfStreams(ArrayRef<Stream> streams) {
240
+ TORCH_INTERNAL_ASSERT(!streams.empty());
241
+ DeviceType type = streams[0].device_type();
242
+ for (const auto idx : c10::irange(1, streams.size())) {
243
+ TORCH_CHECK_VALUE(
244
+ streams[idx].device_type() == type,
245
+ "Streams have a mix of device types: stream 0 is on ",
246
+ streams[0].device(),
247
+ " while stream ",
248
+ idx,
249
+ " is on device ",
250
+ streams[idx].device());
251
+ }
252
+ return type;
253
+ }
254
+ };
255
+
256
+ } // namespace c10::impl
videochat2/lib/python3.10/site-packages/torch/include/c10/core/impl/LocalDispatchKeySet.h ADDED
@@ -0,0 +1,164 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/DispatchKeySet.h>
4
+ #include <c10/macros/Export.h>
5
+
6
+ // TLS management for DispatchKeySet (the "local" DispatchKeySet(s))
7
+ //
8
+ // This manages two thread-local DispatchKeySets:
9
+ //
10
+ // - The included type set, which adds a tensor type for consideration
11
+ // in dispatch. (For example, you might add Profiling to
12
+ // the included type set to turn on profiling on all tensor operations.)
13
+ //
14
+ // - The excluded type set, which disqualifies a tensor type from dispatch.
15
+ // (For example, after redispatching on variable, we disqualify
16
+ // Autograd so we don't attempt to handle variable again.)
17
+ // (Exclusion wins over inclusion.)
18
+ //
19
+ // NB: Originally, I implemented the excluded type set as storing the inverted
20
+ // set, but TLS is defined to be zero-initialized, so this doesn't actually work
21
+ // (if it's inverted, you want the set to be -1 initialized).
22
+
23
+ namespace c10::impl {
24
+
25
+ // POD version of LocalDispatchKeySet. Declared here just so that
26
+ // we can put it in the guards.
27
+ // This struct encapsulates special handling for TLS initialization
28
+ // in set_included()/included() API so that they reflect the truth.
29
+ // If you want to create PODLocalDispatchKeySet with non-zero state,
30
+ // use set_included() instead of default constructor.
31
+ struct C10_API PODLocalDispatchKeySet {
32
+ uint64_t included_;
33
+ uint64_t excluded_;
34
+
35
+ // See Note [TLS Initialization]
36
+ DispatchKeySet included() const {
37
+ return DispatchKeySet(DispatchKeySet::RAW, included_) ^
38
+ c10::default_included_set;
39
+ }
40
+ DispatchKeySet excluded() const {
41
+ return DispatchKeySet(DispatchKeySet::RAW, excluded_) ^
42
+ c10::default_excluded_set;
43
+ }
44
+
45
+ void set_included(DispatchKeySet x) {
46
+ included_ = (x ^ c10::default_included_set).raw_repr();
47
+ }
48
+ void set_excluded(DispatchKeySet x) {
49
+ excluded_ = (x ^ c10::default_excluded_set).raw_repr();
50
+ }
51
+ };
52
+ static_assert(
53
+ std::is_trivial_v<PODLocalDispatchKeySet>,
54
+ "PODLocalDispatchKeySet must be a POD type.");
55
+
56
+ struct C10_API LocalDispatchKeySet {
57
+ /* implicit */ LocalDispatchKeySet(PODLocalDispatchKeySet x)
58
+ : included_(x.included()), excluded_(x.excluded()) {}
59
+ DispatchKeySet included_;
60
+ DispatchKeySet excluded_;
61
+ };
62
+
63
+ // thread_local variables cannot be C10_API on Windows.
64
+ // Inlining this seems to break AutoDispatchBelowAutograd on Android.
65
+ #if defined(_MSC_VER) || defined(C10_ANDROID) || defined(C10_IPHONE)
66
+ C10_API LocalDispatchKeySet tls_local_dispatch_key_set();
67
+ #else // defined(_MSC_VER) || defined(C10_ANDROID) || defined(C10_IPHONE)
68
+ extern C10_API thread_local PODLocalDispatchKeySet raw_local_dispatch_key_set;
69
+
70
+ inline C10_API LocalDispatchKeySet tls_local_dispatch_key_set() {
71
+ // Don't let people fiddle with the thread_local directly just
72
+ // because they include this header.
73
+ return raw_local_dispatch_key_set;
74
+ }
75
+ #endif // defined(_MSC_VER) || defined(C10_ANDROID) || defined(C10_IPHONE)
76
+
77
+ // Internal, use ThreadLocalStateGuard
78
+ C10_API void _force_tls_local_dispatch_key_set(LocalDispatchKeySet key_set);
79
+
80
+ // RAII API for manipulating the thread-local dispatch state.
81
+
82
+ class C10_API IncludeDispatchKeyGuard {
83
+ public:
84
+ IncludeDispatchKeyGuard(DispatchKeySet);
85
+ IncludeDispatchKeyGuard(DispatchKey k)
86
+ : IncludeDispatchKeyGuard(DispatchKeySet(k)) {}
87
+ IncludeDispatchKeyGuard(const IncludeDispatchKeyGuard&) = delete;
88
+ IncludeDispatchKeyGuard operator=(const IncludeDispatchKeyGuard&) = delete;
89
+ IncludeDispatchKeyGuard(IncludeDispatchKeyGuard&&) = delete;
90
+ IncludeDispatchKeyGuard operator=(IncludeDispatchKeyGuard&&) = delete;
91
+ ~IncludeDispatchKeyGuard();
92
+
93
+ private:
94
+ // A little micro-optimization to save us from tls_get_addr call
95
+ // on destruction
96
+ PODLocalDispatchKeySet* tls_;
97
+ DispatchKeySet include_;
98
+ };
99
+
100
+ class C10_API ExcludeDispatchKeyGuard {
101
+ public:
102
+ ExcludeDispatchKeyGuard(DispatchKeySet);
103
+ ExcludeDispatchKeyGuard(DispatchKey k)
104
+ : ExcludeDispatchKeyGuard(DispatchKeySet(k)) {}
105
+ ExcludeDispatchKeyGuard(const ExcludeDispatchKeyGuard&) = delete;
106
+ ExcludeDispatchKeyGuard operator=(const ExcludeDispatchKeyGuard&) = delete;
107
+ ExcludeDispatchKeyGuard(ExcludeDispatchKeyGuard&&) = delete;
108
+ ExcludeDispatchKeyGuard operator=(ExcludeDispatchKeyGuard&&) = delete;
109
+ ~ExcludeDispatchKeyGuard();
110
+
111
+ private:
112
+ // A little micro-optimization to save us from tls_get_addr call
113
+ // on destruction
114
+ PODLocalDispatchKeySet* tls_;
115
+ DispatchKeySet exclude_;
116
+ };
117
+
118
+ struct C10_API ForceDispatchKeyGuard {
119
+ public:
120
+ ForceDispatchKeyGuard()
121
+ : saved_keyset_(c10::impl::tls_local_dispatch_key_set()) {}
122
+ ForceDispatchKeyGuard(c10::impl::LocalDispatchKeySet key_set)
123
+ : ForceDispatchKeyGuard() {
124
+ c10::impl::_force_tls_local_dispatch_key_set(key_set);
125
+ }
126
+ ForceDispatchKeyGuard(
127
+ c10::DispatchKeySet include,
128
+ c10::DispatchKeySet exclude)
129
+ : ForceDispatchKeyGuard() {
130
+ auto updated_set = saved_keyset_;
131
+ updated_set.included_ = include;
132
+ updated_set.excluded_ = exclude;
133
+ c10::impl::_force_tls_local_dispatch_key_set(updated_set);
134
+ }
135
+ ~ForceDispatchKeyGuard() {
136
+ c10::impl::_force_tls_local_dispatch_key_set(saved_keyset_);
137
+ }
138
+
139
+ private:
140
+ c10::impl::LocalDispatchKeySet saved_keyset_;
141
+ };
142
+
143
+ // Non-RAII API for manipulating the thread-local dispatch state.
144
+ // Please prefer the RAII API. The non-RAII API may be useful when
145
+ // the included/excluded state of a given DispatchKey must span
146
+ // many calls from the Python to the C++, so you cannot conveniently
147
+ // use an RAII guard.
148
+ //
149
+ // Example use case: a Python context manager that includes a certain
150
+ // DispatchKey, to ensure ops running under the context manager dispatch
151
+ // through that DispatchKey's registered overrides.
152
+ //
153
+ // The non-RAII API is less efficient than the RAII guards because both the
154
+ // getter and setter will do a tls_getaddr lookup (the RAII struct only needs
155
+ // one!)
156
+
157
+ C10_API bool tls_is_dispatch_key_excluded(DispatchKey x);
158
+ C10_API void tls_set_dispatch_key_excluded(DispatchKey x, bool desired_state);
159
+ C10_API bool tls_is_dispatch_key_included(DispatchKey x);
160
+ C10_API void tls_set_dispatch_key_included(DispatchKey x, bool desired_state);
161
+ C10_API bool tls_is_dispatch_keyset_excluded(DispatchKeySet ks);
162
+ C10_API bool tls_is_dispatch_keyset_included(DispatchKeySet ks);
163
+
164
+ } // namespace c10::impl
videochat2/lib/python3.10/site-packages/torch/include/c10/core/impl/PyObjectSlot.h ADDED
@@ -0,0 +1,190 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/impl/HermeticPyObjectTLS.h>
4
+ #include <c10/core/impl/PyInterpreter.h>
5
+ #include <c10/util/python_stub.h>
6
+ #include <optional>
7
+
8
+ #include <atomic>
9
+
10
+ namespace c10::impl {
11
+
12
+ struct C10_API PyObjectSlot {
13
+ public:
14
+ PyObjectSlot();
15
+
16
+ ~PyObjectSlot();
17
+
18
+ void maybe_destroy_pyobj();
19
+
20
+ // Associate the TensorImpl with the specified PyObject, and, if necessary,
21
+ // also tag the interpreter.
22
+ //
23
+ // NB: This lives in a header so that we can inline away the switch on status
24
+ //
25
+ // NB: THIS FUNCTION CAN RAISE AN EXCEPTION. Make sure to clean up after
26
+ // PyObject if necessary!
27
+ void init_pyobj(
28
+ PyInterpreter* self_interpreter,
29
+ PyObject* pyobj,
30
+ PyInterpreterStatus status) {
31
+ impl::PyInterpreter* expected = nullptr;
32
+ switch (status) {
33
+ case impl::PyInterpreterStatus::DEFINITELY_UNINITIALIZED:
34
+ // caller guarantees there is no multithreaded access; if there is
35
+ // no data race OK to do a relaxed store
36
+ pyobj_interpreter_.store(self_interpreter, std::memory_order_relaxed);
37
+ break;
38
+ case impl::PyInterpreterStatus::TAGGED_BY_US:
39
+ // no tagging is necessary, the tag is already correct
40
+ break;
41
+ case impl::PyInterpreterStatus::MAYBE_UNINITIALIZED:
42
+ // attempt to claim this TensorImpl with the specified interpreter
43
+ // tag
44
+ if (pyobj_interpreter_.compare_exchange_strong(
45
+ expected, self_interpreter, std::memory_order_acq_rel)) {
46
+ break;
47
+ }
48
+ // test if, actually, it was already tagged by us! this situation can't
49
+ // be caused by a race, but it could be caused by a situation
50
+ // where someone conservatively tagged the tensor as MAYBE_UNINITIALIZED
51
+ // (because they didn't pre-check the tag) when actually it was
52
+ // owned by the interpreter
53
+ if (expected == self_interpreter) {
54
+ break;
55
+ }
56
+ // fallthrough, we lost the race. We are guaranteed not to lose the
57
+ // race with ourself, as calls to init_pyobj with the same interpreter
58
+ // ID must be sequentialized by the GIL
59
+ [[fallthrough]];
60
+ case impl::PyInterpreterStatus::TAGGED_BY_OTHER:
61
+ TORCH_CHECK(
62
+ false,
63
+ "cannot allocate PyObject for Tensor on interpreter ",
64
+ self_interpreter,
65
+ " that has already been used by another torch deploy interpreter ",
66
+ pyobj_interpreter_.load());
67
+ }
68
+
69
+ // we are the ONLY thread that can have gotten to this point. It is not
70
+ // possible to conflict with another zero interpreter as access is protected
71
+ // by GIL
72
+ // NB: owns_pyobj tag is initially false
73
+ pyobj_ = pyobj;
74
+ }
75
+
76
+ // Query the PyObject interpreter. This may return null if there is no
77
+ // interpreter. This is racy!
78
+ PyInterpreter* pyobj_interpreter();
79
+
80
+ PyObject* _unchecked_untagged_pyobj() const;
81
+
82
+ // Test the interpreter tag. If tagged for the current interpreter, return
83
+ // a non-nullopt (but possibly null) PyObject. If (possibly) untagged,
84
+ // returns a nullopt. If it is definitely invalid, raises an error.
85
+ //
86
+ // If `ignore_hermetic_tls` is false and this function is called from a
87
+ // hermetic context (ie, `HermeticPyObjectTLS::get_state()` is true), then
88
+ // nullopt is returned. If `ignore_hermetic_tls` is true, then the hermetic
89
+ // context is ignored, allowing you to check the interpreter tag of a
90
+ // nonhermetic PyObject from within a hermetic context. This is necessary
91
+ // because there are some cases where the deallocator function of a
92
+ // nonhermetic PyObject is called from within a hermetic context, so it must
93
+ // be properly treated as a nonhermetic PyObject.
94
+ //
95
+ // NB: this lives in header so that we can avoid actually creating the
96
+ // std::optional
97
+ std::optional<PyObject*> check_pyobj(
98
+ PyInterpreter* self_interpreter,
99
+ bool ignore_hermetic_tls = false) const {
100
+ // Note [Memory ordering on Python interpreter tag]
101
+ impl::PyInterpreter* interpreter =
102
+ pyobj_interpreter_.load(std::memory_order_acquire);
103
+ if (interpreter == nullptr) {
104
+ // NB: This never returns DEFINITELY_UNINITIALIZED because there is
105
+ // always the possibility that another thread races to initialize
106
+ // after we query here. The only time when we can conclude a tensor
107
+ // is definitely uninitialized is when we have just allocated it and
108
+ // it cannot have escaped to other threads yet
109
+ return std::nullopt;
110
+ } else if (interpreter == self_interpreter) {
111
+ // NB: pyobj_ could still be null!
112
+ if (!ignore_hermetic_tls && c10::impl::HermeticPyObjectTLS::get_state()) {
113
+ return std::nullopt;
114
+ } else {
115
+ return std::make_optional(_unchecked_untagged_pyobj());
116
+ }
117
+ } else {
118
+ TORCH_CHECK(
119
+ false,
120
+ "cannot access PyObject for Tensor on interpreter ",
121
+ (*self_interpreter)->name(),
122
+ " that has already been used by another torch deploy interpreter ",
123
+ (*pyobj_interpreter_.load())->name());
124
+ }
125
+ }
126
+
127
+ // Clear the PyObject field for an interpreter, in situations where we
128
+ // statically know the tensor is tagged with our interpreter.
129
+ void unchecked_clear_pyobj(PyInterpreter* interpreter);
130
+
131
+ PyInterpreter& load_pyobj_interpreter() const;
132
+
133
+ // Check if the PyObjectSlot's interpreter is the same as the specified
134
+ // interpreter
135
+ bool check_interpreter(PyInterpreter* interpreter);
136
+
137
+ // Check if the PyObjectSlot is holding a PyObject, owned or non-owned
138
+ bool has_pyobj_nonhermetic();
139
+
140
+ bool owns_pyobj();
141
+
142
+ void set_owns_pyobj(bool b);
143
+
144
+ private:
145
+ // This field contains the interpreter tag for this object. See
146
+ // Note [Python interpreter tag] for general context
147
+ //
148
+ // Note [Memory ordering on Python interpreter tag]
149
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
150
+ // What memory_order do we need when accessing this atomic? We don't
151
+ // need a single total modification order (as provided by
152
+ // memory_order_seq_cst) as pyobj_interpreter_ is monotonic: it can only
153
+ // transition from -1 to some positive integer and never changes afterwards.
154
+ // Because there is only one modification, it trivially already has a total
155
+ // modification order (e.g., we don't need fences or locked instructions on
156
+ // x86)
157
+ //
158
+ // In fact, one could make a reasonable argument that relaxed reads are OK,
159
+ // due to the presence of external locking (GIL) to ensure that interactions
160
+ // with other data structures are still correctly synchronized, so that
161
+ // we fall in the "Single-Location Data Structures" case as described in
162
+ // http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2020/p2055r0.pdf
163
+ // However, on x86, it doesn't matter if I use acquire or relaxed on the load
164
+ // as I get the same assembly in both cases. So I just use the more
165
+ // conservative acquire (which will impede compiler optimizations but I don't
166
+ // care)
167
+ std::atomic<PyInterpreter*> pyobj_interpreter_;
168
+
169
+ // This field contains a reference to a PyObject representing this Tensor.
170
+ // If pyobj is nullptr, when we transfer Tensor to Python, we allocate a new
171
+ // PyObject for it and set this field. This field does not have to be
172
+ // protected by an atomic as it is only allowed to be accessed when you hold
173
+ // the GIL, or during destruction of the tensor.
174
+ //
175
+ // When a PyObject dies, you are obligated to clear this field
176
+ // (otherwise, you will try to use-after-free the pyobj); this currently
177
+ // occurs in THPVariable_clear in torch/csrc/autograd/python_variable.cpp
178
+ //
179
+ // NB: Ordinarily, this should not be a strong reference, as if the
180
+ // PyObject owns the Tensor, this would create a reference cycle.
181
+ // However, sometimes this ownership flips. To track who owns
182
+ // who, this has a single pointer tag indicating whether or not the
183
+ // C++ object owns the PyObject (the common case, zero, means PyObject
184
+ // owns the C++ object); see _unchecked_untagged_pyobj for raw access
185
+ // or check_pyobj for checked access. See references to PyObject
186
+ // resurrection in torch/csrc/autograd/python_variable.cpp
187
+ PyObject* pyobj_;
188
+ };
189
+
190
+ } // namespace c10::impl
videochat2/lib/python3.10/site-packages/torch/include/c10/core/impl/PythonDispatcherTLS.h ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/impl/PyInterpreter.h>
4
+ #include <c10/macros/Export.h>
5
+
6
+ namespace c10::impl {
7
+
8
+ struct C10_API PythonDispatcherTLS {
9
+ static void set_state(PyInterpreter* state);
10
+ static PyInterpreter* get_state();
11
+ static void reset_state();
12
+ };
13
+
14
+ struct C10_API DisablePythonDispatcher {
15
+ DisablePythonDispatcher() : old_(PythonDispatcherTLS::get_state()) {
16
+ PythonDispatcherTLS::set_state({});
17
+ }
18
+ ~DisablePythonDispatcher() {
19
+ PythonDispatcherTLS::set_state(old_);
20
+ }
21
+ PyInterpreter* old_;
22
+ };
23
+
24
+ } // namespace c10::impl
videochat2/lib/python3.10/site-packages/torch/include/c10/cuda/CUDAAlgorithm.h ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifdef THRUST_DEVICE_LOWER_BOUND_WORKS
2
+ #include <thrust/binary_search.h>
3
+ #include <thrust/device_vector.h>
4
+ #include <thrust/execution_policy.h>
5
+ #include <thrust/functional.h>
6
+ #endif
7
+ namespace c10::cuda {
8
+ #ifdef THRUST_DEVICE_LOWER_BOUND_WORKS
9
+ template <typename Iter, typename Scalar>
10
+ __forceinline__ __device__ Iter
11
+ lower_bound(Iter start, Iter end, Scalar value) {
12
+ return thrust::lower_bound(thrust::device, start, end, value);
13
+ }
14
+ #else
15
+ // thrust::lower_bound is broken on device, see
16
+ // https://github.com/NVIDIA/thrust/issues/1734 Implementation inspired by
17
+ // https://github.com/pytorch/pytorch/blob/805120ab572efef66425c9f595d9c6c464383336/aten/src/ATen/native/cuda/Bucketization.cu#L28
18
+ template <typename Iter, typename Scalar>
19
+ __device__ Iter lower_bound(Iter start, Iter end, Scalar value) {
20
+ while (start < end) {
21
+ auto mid = start + ((end - start) >> 1);
22
+ if (*mid < value) {
23
+ start = mid + 1;
24
+ } else {
25
+ end = mid;
26
+ }
27
+ }
28
+ return end;
29
+ }
30
+ #endif // THRUST_DEVICE_LOWER_BOUND_WORKS
31
+ } // namespace c10::cuda
videochat2/lib/python3.10/site-packages/torch/include/c10/cuda/CUDACachingAllocator.h ADDED
@@ -0,0 +1,499 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/CachingDeviceAllocator.h>
4
+ #include <c10/cuda/CUDAGraphsC10Utils.h>
5
+ #include <c10/cuda/CUDAMacros.h>
6
+ #include <c10/cuda/CUDAStream.h>
7
+ #include <c10/util/ApproximateClock.h>
8
+ #include <c10/util/Exception.h>
9
+ #include <c10/util/Registry.h>
10
+
11
+ #include <array>
12
+ #include <atomic>
13
+ #include <cstddef>
14
+ #include <cstdint>
15
+ #include <functional>
16
+ #include <memory>
17
+ #include <string>
18
+ #include <unordered_set>
19
+ #include <utility>
20
+
21
+ namespace c10 {
22
+
23
+ // Caching allocator will execute every registered callback if it unable to find
24
+ // block inside of already allocated area.
25
+ class C10_CUDA_API FreeMemoryCallback {
26
+ public:
27
+ virtual ~FreeMemoryCallback() = default;
28
+ virtual bool Execute() = 0;
29
+ };
30
+
31
+ C10_DECLARE_REGISTRY(FreeCudaMemoryCallbacksRegistry, FreeMemoryCallback);
32
+ #define REGISTER_FREE_MEMORY_CALLBACK(name, ...) \
33
+ C10_REGISTER_CLASS(FreeCudaMemoryCallbacksRegistry, name, __VA_ARGS__);
34
+ } // namespace c10
35
+ //
36
+ // TODO: Turn this into an honest to goodness class. I briefly attempted to do
37
+ // this, but it was a bit irritating to figure out how to also correctly
38
+ // apply pimpl pattern so I didn't have to leak any internal implementation
39
+ // details in the header (CUDACachingAllocator could be made a pimpl, but
40
+ // you also need to appropriately define a class which is a subclass
41
+ // of Allocator. Not impossible, but required a bit more surgery than
42
+ // I wanted to do at the time.)
43
+ //
44
+ // Why is this using a namespace rather than old-style THCCachingAllocator_
45
+ // prefix? Mostly because it made the HIPify rules easier to write; _ is
46
+ // not counted as a word boundary, so you would otherwise have to list each
47
+ // of these functions.
48
+
49
+ namespace c10::cuda::CUDACachingAllocator {
50
+
51
+ // Preserved only for BC reasons
52
+ // NOLINTNEXTLINE(misc-unused-using-decls)
53
+ using c10::CachingDeviceAllocator::DeviceStats;
54
+
55
+ extern const size_t kLargeBuffer;
56
+
57
+ typedef std::shared_ptr<GatheredContext> (*CreateContextFn)();
58
+
59
+ // Struct containing info of an allocation block (i.e. a fractional part of a
60
+ // cudaMalloc)..
61
+ struct BlockInfo {
62
+ size_t size = 0;
63
+ size_t requested_size = 0;
64
+ int32_t gc_counter = 0;
65
+ bool allocated = false;
66
+ bool active = false;
67
+ std::shared_ptr<GatheredContext>
68
+ context_when_allocated; // per-watcher context
69
+ };
70
+
71
+ // Struct containing info of a memory segment (i.e. one contiguous cudaMalloc).
72
+ struct SegmentInfo {
73
+ c10::DeviceIndex device = 0;
74
+ size_t address = 0;
75
+ size_t total_size = 0;
76
+ size_t requested_size = 0; // unrounded, actually requested size
77
+ size_t allocated_size = 0;
78
+ size_t active_size = 0;
79
+ cudaStream_t stream = nullptr;
80
+ bool is_large = false;
81
+ bool is_expandable = false;
82
+ MempoolId_t owner_private_pool_id = {0, 0};
83
+ std::vector<BlockInfo> blocks;
84
+ std::shared_ptr<GatheredContext> context_when_allocated;
85
+ };
86
+
87
+ struct AllocatorState {
88
+ virtual ~AllocatorState() = default;
89
+ };
90
+
91
+ union trace_time_ {
92
+ time_t t_;
93
+ approx_time_t approx_t_;
94
+ };
95
+
96
+ struct TraceEntry {
97
+ enum Action {
98
+ ALLOC, // API made to the caching allocator for new memory
99
+ FREE_REQUESTED, // API call made to the caching allocator to free memory
100
+ FREE_COMPLETED, // The allocator might have to delay a free because
101
+ // it is still in use on another stream via record_stream
102
+ // This event is generated when a free actually completes.
103
+ SEGMENT_ALLOC, // a call to cudaMalloc to get more memory from the OS
104
+ SEGMENT_FREE, // a call to cudaFree to return memory to the OS (e.g. to
105
+ // defragment or empty_caches)
106
+ SEGMENT_MAP, // a call to cuMemMap (used with expandable_segments)
107
+ SEGMENT_UNMAP, // unmap part of a segment (used with expandable segments)
108
+ SNAPSHOT, // a call to snapshot, used to correlate memory snapshots to trace
109
+ // events
110
+ OOM // the allocator threw an OutOfMemoryError (addr_ is the amount of free
111
+ // bytes reported by cuda)
112
+ };
113
+ TraceEntry(
114
+ Action action,
115
+ c10::DeviceIndex device,
116
+ size_t addr,
117
+ size_t size,
118
+ cudaStream_t stream,
119
+ approx_time_t time,
120
+ std::shared_ptr<GatheredContext> context = nullptr)
121
+ : action_(action),
122
+ device_(device),
123
+ addr_(addr),
124
+ context_(std::move(context)),
125
+ stream_(stream),
126
+ size_(size) {
127
+ time_.approx_t_ = time;
128
+ }
129
+ Action action_;
130
+ c10::DeviceIndex device_;
131
+ size_t addr_; // for OOM, this is the amount of free bytes reported by cuda
132
+ std::shared_ptr<GatheredContext> context_;
133
+ cudaStream_t stream_{};
134
+ size_t size_;
135
+ trace_time_ time_{};
136
+ };
137
+
138
+ // Calls made by record_function will save annotations
139
+ struct AnnotationEntry {
140
+ AnnotationEntry(c10::DeviceIndex device, approx_time_t time)
141
+ : device_(device) {
142
+ time_.approx_t_ = time;
143
+ }
144
+
145
+ void recordUserMetadata(const std::string& name, std::string value) {
146
+ metadata_[name] = std::move(value);
147
+ }
148
+
149
+ c10::DeviceIndex device_;
150
+ trace_time_ time_{};
151
+ std::unordered_map<std::string, std::string> metadata_;
152
+ };
153
+
154
+ struct AllocatorConfigInfo {
155
+ double garbage_collection_threshold;
156
+ size_t max_split_size;
157
+ size_t pinned_num_register_threads;
158
+ bool expandable_segments;
159
+ bool release_lock_on_malloc;
160
+ bool pinned_use_host_register;
161
+ std::string last_allocator_settings;
162
+ std::vector<size_t> roundup_power2_divisions;
163
+ };
164
+
165
+ struct SnapshotInfo {
166
+ std::vector<SegmentInfo> segments;
167
+ std::vector<std::vector<TraceEntry>> device_traces;
168
+ std::vector<AnnotationEntry> external_annotations;
169
+ AllocatorConfigInfo config_metadata;
170
+ };
171
+
172
+ // returns the pointers freed in the pool
173
+ // and the pointers allocated. Note: a pointer
174
+ // may appear in both freed and allocated
175
+ struct CheckpointDelta {
176
+ std::vector<void*> ptrs_freed;
177
+ std::vector<at::DataPtr> dataptrs_allocd;
178
+ };
179
+
180
+ enum struct RecordContext {
181
+ NEVER = 0,
182
+ STATE = 1, // only keep stacks for active allocations
183
+ ALLOC = 2, // additionally keep stacks for allocations in the trace history
184
+ ALL = 3, // additionally record stacks for when something is freed
185
+ };
186
+
187
+ using OutOfMemoryObserver = std::function<void(
188
+ int64_t device,
189
+ size_t allocated,
190
+ size_t device_total,
191
+ size_t device_free)>;
192
+
193
+ using AllocatorTraceTracker = std::function<void(const TraceEntry&)>;
194
+
195
+ struct ShareableHandle {
196
+ ptrdiff_t offset;
197
+ std::string handle;
198
+ };
199
+
200
+ class CUDAAllocator : public Allocator {
201
+ public:
202
+ virtual void* raw_alloc(size_t nbytes) = 0;
203
+ virtual void* raw_alloc_with_stream(size_t nbytes, cudaStream_t stream) = 0;
204
+ virtual void raw_delete(void* ptr) = 0;
205
+ virtual void init(int device_count) = 0;
206
+ virtual bool initialized() = 0;
207
+ virtual void setMemoryFraction(double fraction, c10::DeviceIndex device) = 0;
208
+ virtual void emptyCache() = 0;
209
+ virtual void cacheInfo(c10::DeviceIndex device, size_t* largestBlock) = 0;
210
+ virtual void* getBaseAllocation(void* ptr, size_t* size) = 0;
211
+ virtual void recordStream(const DataPtr&, CUDAStream stream) = 0;
212
+ virtual c10::CachingDeviceAllocator::DeviceStats getDeviceStats(
213
+ c10::DeviceIndex device) = 0;
214
+ virtual void resetAccumulatedStats(c10::DeviceIndex device) = 0;
215
+ virtual void resetPeakStats(c10::DeviceIndex device) = 0;
216
+ virtual SnapshotInfo snapshot() = 0;
217
+ virtual void beginAllocateToPool(
218
+ c10::DeviceIndex device,
219
+ MempoolId_t mempool_id,
220
+ std::function<bool(cudaStream_t)> filter) = 0;
221
+ virtual void endAllocateToPool(
222
+ c10::DeviceIndex device,
223
+ MempoolId_t mempool_id) = 0;
224
+ virtual void releasePool(c10::DeviceIndex device, MempoolId_t mempool_id) = 0;
225
+ // returns true if the allocated blocks are equal to expected live allocations
226
+ virtual bool checkPoolLiveAllocations(
227
+ c10::DeviceIndex device,
228
+ MempoolId_t mempool_id,
229
+ const std::unordered_set<void*>& expected_live_allocations) {
230
+ TORCH_CHECK(
231
+ false,
232
+ name(),
233
+ " does not yet support checkPoolLiveAllocations. "
234
+ "If you need it, please file an issue describing your use case.");
235
+ }
236
+ virtual ShareableHandle shareIpcHandle(void* ptr) = 0;
237
+ virtual std::shared_ptr<void> getIpcDevPtr(std::string handle) = 0;
238
+ virtual bool isHistoryEnabled() {
239
+ TORCH_CHECK(
240
+ false,
241
+ name(),
242
+ " does not yet support recordHistory. "
243
+ "If you need it, please file an issue describing your use case.");
244
+ }
245
+ virtual void recordHistory(
246
+ bool enabled,
247
+ CreateContextFn context_recorder,
248
+ size_t alloc_trace_max_entries,
249
+ RecordContext when) = 0;
250
+ virtual void recordAnnotation(
251
+ const std::vector<std::pair<std::string, std::string>>& md){};
252
+ virtual void attachOutOfMemoryObserver(OutOfMemoryObserver observer) = 0;
253
+
254
+ // Attached AllocatorTraceTracker callbacks will be called while the
255
+ // per-device allocator lock is held. Any additional locks taken from within
256
+ // the callback must be proven to always have the lock order that never
257
+ // triggers a deadlock. In particular, Python's GIL may be held when
258
+ // calling the allocator so it is unsafe to try to acquire the GIL in this
259
+ // callback.
260
+ virtual void attachAllocatorTraceTracker(AllocatorTraceTracker tracker) = 0;
261
+
262
+ virtual void enablePeerAccess(
263
+ c10::DeviceIndex dev,
264
+ c10::DeviceIndex dev_to_access) = 0;
265
+
266
+ // memory not allocated from cudaMalloc cannot be copied
267
+ // across devices using cudaMemcpyAsync if peer to peer access is disabled.
268
+ // instead it requires cudaMemcpyAsyncPeer
269
+ // with P2P Enabled, all combinations work
270
+ // with P2P Disabled:
271
+ // cudaMalloc cudaMallocAsync/cuMemMap
272
+ // cudaMemcpyAsyncPeer works works
273
+ // cudaMemcpyAsync works error
274
+
275
+ // This function performs chooses to use the Peer version of
276
+ // memcpy if required based on where the allocated put dst/src.
277
+ virtual cudaError_t memcpyAsync(
278
+ void* dst,
279
+ int dstDevice,
280
+ const void* src,
281
+ int srcDevice,
282
+ size_t count,
283
+ cudaStream_t stream,
284
+ bool p2p_enabled) = 0;
285
+ virtual std::shared_ptr<AllocatorState> getCheckpointState(
286
+ c10::DeviceIndex device,
287
+ MempoolId_t id) = 0;
288
+ virtual CheckpointDelta setCheckpointPoolState(
289
+ c10::DeviceIndex device,
290
+ std::shared_ptr<AllocatorState> pps) = 0;
291
+ virtual std::string name() = 0;
292
+ };
293
+
294
+ // Allocator object, statically initialized
295
+ // See BackendInitializer in CUDACachingAllocator.cpp.
296
+ // Atomic loads on x86 are just normal loads,
297
+ // (atomic stores are different), so reading this value
298
+ // is no different than loading a pointer.
299
+ C10_CUDA_API extern std::atomic<CUDAAllocator*> allocator;
300
+
301
+ inline CUDAAllocator* get() {
302
+ return allocator.load();
303
+ }
304
+
305
+ // Called directly by clients.
306
+ inline void* raw_alloc(size_t nbytes) {
307
+ return get()->raw_alloc(nbytes);
308
+ }
309
+
310
+ inline void* raw_alloc_with_stream(size_t nbytes, cudaStream_t stream) {
311
+ return get()->raw_alloc_with_stream(nbytes, stream);
312
+ }
313
+
314
+ inline void raw_delete(void* ptr) {
315
+ return get()->raw_delete(ptr);
316
+ }
317
+
318
+ inline void init(int device_count) {
319
+ return get()->init(device_count);
320
+ }
321
+
322
+ inline void setMemoryFraction(double fraction, c10::DeviceIndex device) {
323
+ return get()->setMemoryFraction(fraction, device);
324
+ }
325
+
326
+ inline void emptyCache() {
327
+ return get()->emptyCache();
328
+ }
329
+
330
+ inline void cacheInfo(c10::DeviceIndex device, size_t* largestBlock) {
331
+ return get()->cacheInfo(device, largestBlock);
332
+ }
333
+
334
+ inline void* getBaseAllocation(void* ptr, size_t* size) {
335
+ return get()->getBaseAllocation(ptr, size);
336
+ }
337
+
338
+ inline void recordStream(const DataPtr& dataPtr, CUDAStream stream) {
339
+ return get()->recordStream(dataPtr, stream);
340
+ }
341
+
342
+ inline c10::CachingDeviceAllocator::DeviceStats getDeviceStats(
343
+ c10::DeviceIndex device) {
344
+ return get()->getDeviceStats(device);
345
+ }
346
+
347
+ inline void resetAccumulatedStats(c10::DeviceIndex device) {
348
+ return get()->resetAccumulatedStats(device);
349
+ }
350
+
351
+ inline void resetPeakStats(c10::DeviceIndex device) {
352
+ return get()->resetPeakStats(device);
353
+ }
354
+
355
+ inline SnapshotInfo snapshot() {
356
+ return get()->snapshot();
357
+ }
358
+
359
+ inline std::shared_ptr<AllocatorState> getCheckpointState(
360
+ c10::DeviceIndex device,
361
+ MempoolId_t id) {
362
+ return get()->getCheckpointState(device, id);
363
+ }
364
+
365
+ inline CheckpointDelta setCheckpointPoolState(
366
+ c10::DeviceIndex device,
367
+ std::shared_ptr<AllocatorState> pps) {
368
+ return get()->setCheckpointPoolState(device, std::move(pps));
369
+ }
370
+
371
+ // CUDAGraph interactions
372
+ inline void beginAllocateToPool(
373
+ c10::DeviceIndex device,
374
+ MempoolId_t mempool_id,
375
+ std::function<bool(cudaStream_t)> filter) {
376
+ get()->beginAllocateToPool(device, mempool_id, std::move(filter));
377
+ }
378
+
379
+ inline void endAllocateToPool(c10::DeviceIndex device, MempoolId_t mempool_id) {
380
+ get()->endAllocateToPool(device, mempool_id);
381
+ }
382
+
383
+ inline void recordHistory(
384
+ bool enabled,
385
+ CreateContextFn context_recorder,
386
+ size_t alloc_trace_max_entries,
387
+ RecordContext when) {
388
+ return get()->recordHistory(
389
+ enabled, context_recorder, alloc_trace_max_entries, when);
390
+ }
391
+
392
+ inline void recordAnnotation(
393
+ const std::vector<std::pair<std::string, std::string>>& md) {
394
+ return get()->recordAnnotation(md);
395
+ }
396
+
397
+ inline bool isHistoryEnabled() {
398
+ return get()->isHistoryEnabled();
399
+ }
400
+
401
+ inline bool checkPoolLiveAllocations(
402
+ c10::DeviceIndex device,
403
+ MempoolId_t mempool_id,
404
+ const std::unordered_set<void*>& expected_live_allocations) {
405
+ return get()->checkPoolLiveAllocations(
406
+ device, mempool_id, expected_live_allocations);
407
+ }
408
+
409
+ inline void attachOutOfMemoryObserver(OutOfMemoryObserver observer) {
410
+ return get()->attachOutOfMemoryObserver(std::move(observer));
411
+ }
412
+
413
+ inline void attachAllocatorTraceTracker(AllocatorTraceTracker tracker) {
414
+ return get()->attachAllocatorTraceTracker(std::move(tracker));
415
+ }
416
+
417
+ inline void releasePool(c10::DeviceIndex device, MempoolId_t mempool_id) {
418
+ return get()->releasePool(device, mempool_id);
419
+ }
420
+ // Not part of CUDA_ALLOCATOR_BACKEND_INTERFACE
421
+ inline std::shared_ptr<void> getIpcDevPtr(std::string handle) {
422
+ return get()->getIpcDevPtr(std::move(handle));
423
+ }
424
+
425
+ inline ShareableHandle shareIpcHandle(void* ptr) {
426
+ return get()->shareIpcHandle(ptr);
427
+ }
428
+
429
+ inline std::string name() {
430
+ return get()->name();
431
+ }
432
+
433
+ inline cudaError_t memcpyAsync(
434
+ void* dst,
435
+ int dstDevice,
436
+ const void* src,
437
+ int srcDevice,
438
+ size_t count,
439
+ cudaStream_t stream,
440
+ bool p2p_enabled) {
441
+ return get()->memcpyAsync(
442
+ dst, dstDevice, src, srcDevice, count, stream, p2p_enabled);
443
+ }
444
+
445
+ inline void enablePeerAccess(
446
+ c10::DeviceIndex dev,
447
+ c10::DeviceIndex dev_to_access) {
448
+ return get()->enablePeerAccess(dev, dev_to_access);
449
+ }
450
+
451
+ } // namespace c10::cuda::CUDACachingAllocator
452
+
453
+ namespace c10::cuda {
454
+
455
+ // MemPool represents a pool of memory in a caching allocator. Currently,
456
+ // it's just the ID of the pool object maintained in the CUDACachingAllocator.
457
+ //
458
+ // An allocator pointer can be passed to the MemPool to define how the
459
+ // allocations should be done in the pool. For example: using a different
460
+ // system allocator such as ncclMemAlloc.
461
+ struct C10_CUDA_API MemPool {
462
+ MemPool(
463
+ CUDACachingAllocator::CUDAAllocator* allocator = nullptr,
464
+ bool is_user_created = true);
465
+
466
+ MempoolId_t id();
467
+ CUDACachingAllocator::CUDAAllocator* allocator();
468
+
469
+ private:
470
+ static std::atomic<CaptureId_t> uid_;
471
+ static std::atomic<CaptureId_t> uuid_;
472
+ CUDACachingAllocator::CUDAAllocator* allocator_;
473
+ bool is_user_created_;
474
+ MempoolId_t id_;
475
+ };
476
+
477
+ // MemPoolContext holds the currently active pool and stashes the previous
478
+ // pool. On deletion it makes the previous pool active.
479
+ struct C10_CUDA_API MemPoolContext {
480
+ MemPoolContext(MemPool* mempool);
481
+
482
+ ~MemPoolContext();
483
+
484
+ // getActiveMemPool() can be used to get the currently active pool.
485
+ // For instance: in CUDACachingAllocator, we can route allocations
486
+ // to a user provided allocator, by doing:
487
+ //
488
+ // auto active_pool = MemPoolContext::getActiveMemPool();
489
+ // if (active_pool && active_pool->allocator()) {
490
+ // ptr = active_pool->allocator()->raw_alloc(size);
491
+ // }
492
+ //
493
+ static MemPool* getActiveMemPool();
494
+
495
+ private:
496
+ MemPool* prev_mempool_;
497
+ };
498
+
499
+ } // namespace c10::cuda
videochat2/lib/python3.10/site-packages/torch/include/c10/cuda/CUDADeviceAssertion.h ADDED
@@ -0,0 +1,96 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/cuda/CUDAException.h>
4
+ #include <c10/macros/Macros.h>
5
+
6
+ namespace c10::cuda {
7
+
8
+ #ifdef TORCH_USE_CUDA_DSA
9
+ // Copy string from `src` to `dst`
10
+ static __device__ void dstrcpy(char* dst, const char* src) {
11
+ int i = 0;
12
+ // Copy string from source to destination, ensuring that it
13
+ // isn't longer than `C10_CUDA_DSA_MAX_STR_LEN-1`
14
+ while (*src != '\0' && i++ < C10_CUDA_DSA_MAX_STR_LEN - 1) {
15
+ *dst++ = *src++;
16
+ }
17
+ *dst = '\0';
18
+ }
19
+
20
+ static __device__ void dsa_add_new_assertion_failure(
21
+ DeviceAssertionsData* assertions_data,
22
+ const char* assertion_msg,
23
+ const char* filename,
24
+ const char* function_name,
25
+ const int line_number,
26
+ const uint32_t caller,
27
+ const dim3 block_id,
28
+ const dim3 thread_id) {
29
+ // `assertions_data` may be nullptr if device-side assertion checking
30
+ // is disabled at run-time. If it is disabled at compile time this
31
+ // function will never be called
32
+ if (!assertions_data) {
33
+ return;
34
+ }
35
+
36
+ // Atomically increment so other threads can fail at the same time
37
+ // Note that incrementing this means that the CPU can observe that
38
+ // a failure has happened and can begin to respond before we've
39
+ // written information about that failure out to the buffer.
40
+ const auto nid = atomicAdd(&(assertions_data->assertion_count), 1);
41
+
42
+ if (nid >= C10_CUDA_DSA_ASSERTION_COUNT) {
43
+ // At this point we're ran out of assertion buffer space.
44
+ // We could print a message about this, but that'd get
45
+ // spammy if a lot of threads did it, so we just silently
46
+ // ignore any other assertion failures. In most cases the
47
+ // failures will all probably be analogous anyway.
48
+ return;
49
+ }
50
+
51
+ // Write information about the assertion failure to memory.
52
+ // Note that this occurs only after the `assertion_count`
53
+ // increment broadcasts that there's been a problem.
54
+ auto& self = assertions_data->assertions[nid];
55
+ dstrcpy(self.assertion_msg, assertion_msg);
56
+ dstrcpy(self.filename, filename);
57
+ dstrcpy(self.function_name, function_name);
58
+ self.line_number = line_number;
59
+ self.caller = caller;
60
+ self.block_id[0] = block_id.x;
61
+ self.block_id[1] = block_id.y;
62
+ self.block_id[2] = block_id.z;
63
+ self.thread_id[0] = thread_id.x;
64
+ self.thread_id[1] = thread_id.y;
65
+ self.thread_id[2] = thread_id.z;
66
+ }
67
+
68
+ // Emulates a kernel assertion. The assertion won't stop the kernel's progress,
69
+ // so you should assume everything the kernel produces is garbage if there's an
70
+ // assertion failure.
71
+ // NOTE: This assumes that `assertions_data` and `assertion_caller_id` are
72
+ // arguments of the kernel and therefore accessible.
73
+ #define CUDA_KERNEL_ASSERT2(condition) \
74
+ do { \
75
+ if (C10_UNLIKELY(!(condition))) { \
76
+ /* Has an atomic element so threads can fail at the same time */ \
77
+ c10::cuda::dsa_add_new_assertion_failure( \
78
+ assertions_data, \
79
+ C10_STRINGIZE(condition), \
80
+ __FILE__, \
81
+ __FUNCTION__, \
82
+ __LINE__, \
83
+ assertion_caller_id, \
84
+ blockIdx, \
85
+ threadIdx); \
86
+ /* Now that the kernel has failed we early exit the kernel, but */ \
87
+ /* otherwise keep going and rely on the host to check UVM and */ \
88
+ /* determine we've had a problem */ \
89
+ return; \
90
+ } \
91
+ } while (false)
92
+ #else
93
+ #define CUDA_KERNEL_ASSERT2(condition) assert(condition)
94
+ #endif
95
+
96
+ } // namespace c10::cuda
videochat2/lib/python3.10/site-packages/torch/include/c10/cuda/CUDADeviceAssertionHost.h ADDED
@@ -0,0 +1,164 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/cuda/CUDAMacros.h>
4
+
5
+ #include <cstdint>
6
+ #include <memory>
7
+ #include <mutex>
8
+ #include <string>
9
+ #include <utility>
10
+ #include <vector>
11
+
12
+ #ifdef USE_CUDA
13
+ #define TORCH_USE_CUDA_DSA
14
+ #endif
15
+
16
+ /// Number of assertion failure messages we can store. If this is too small
17
+ /// threads will fail silently.
18
+ constexpr int C10_CUDA_DSA_ASSERTION_COUNT = 10;
19
+ constexpr int C10_CUDA_DSA_MAX_STR_LEN = 512;
20
+
21
+ namespace c10::cuda {
22
+
23
+ /// Holds information about any device-side assertions that fail.
24
+ /// Held in managed memory and access by both the CPU and the GPU.
25
+ struct DeviceAssertionData {
26
+ /// Stringification of the assertion
27
+ // NOLINTNEXTLINE(*-c-arrays)
28
+ char assertion_msg[C10_CUDA_DSA_MAX_STR_LEN]{};
29
+ /// File the assertion was in
30
+ // NOLINTNEXTLINE(*-c-arrays)
31
+ char filename[C10_CUDA_DSA_MAX_STR_LEN]{};
32
+ /// Name of the function the assertion was in
33
+ // NOLINTNEXTLINE(*-c-arrays)
34
+ char function_name[C10_CUDA_DSA_MAX_STR_LEN]{};
35
+ /// Line number the assertion was at
36
+ int line_number{};
37
+ /// Number uniquely identifying the kernel launch that triggered the assertion
38
+ uint32_t caller{};
39
+ /// block_id of the thread that failed the assertion
40
+ // NOLINTNEXTLINE(*-c-arrays)
41
+ int32_t block_id[3]{};
42
+ /// third_id of the thread that failed the assertion
43
+ // NOLINTNEXTLINE(*-c-arrays)
44
+ int32_t thread_id[3]{};
45
+ };
46
+
47
+ /// Used to hold assertions generated by the device
48
+ /// Held in managed memory and access by both the CPU and the GPU.
49
+ struct DeviceAssertionsData {
50
+ /// Total number of assertions found; a subset of thse will be recorded
51
+ /// in `assertions`
52
+ int32_t assertion_count{};
53
+ /// An array of assertions that will be written to in a race-free manner
54
+ // NOLINTNEXTLINE(*-c-arrays)
55
+ DeviceAssertionData assertions[C10_CUDA_DSA_ASSERTION_COUNT]{};
56
+ };
57
+
58
+ /// Use to hold info about kernel launches so that we can run kernels
59
+ /// asynchronously and still associate launches with device-side
60
+ /// assertion failures
61
+ struct CUDAKernelLaunchInfo {
62
+ /// Filename of the code where the kernel was launched from
63
+ const char* launch_filename;
64
+ /// Function from which the kernel was launched
65
+ const char* launch_function;
66
+ /// Line number of where the code was launched from
67
+ uint32_t launch_linenum;
68
+ /// Backtrace of where the kernel was launched from, only populated if
69
+ /// CUDAKernelLaunchRegistry::gather_launch_stacktrace is True
70
+ std::string launch_stacktrace;
71
+ /// Kernel that was launched
72
+ const char* kernel_name;
73
+ /// Device the kernel was launched on
74
+ int device;
75
+ /// Stream the kernel was launched on
76
+ int32_t stream;
77
+ /// A number that uniquely identifies the kernel launch
78
+ uint64_t generation_number;
79
+ };
80
+
81
+ /// Circular buffer used to hold information about kernel launches
82
+ /// this is later used to reconstruct how a device-side kernel assertion failure
83
+ /// occurred CUDAKernelLaunchRegistry is used as a singleton
84
+ class C10_CUDA_API CUDAKernelLaunchRegistry {
85
+ private:
86
+ /// Assume that this is the max number of kernel launches that might ever be
87
+ /// enqueued across all streams on a single device
88
+ static constexpr int max_kernel_launches = 1024;
89
+ /// How many kernel launch infos we've inserted. Used to ensure that circular
90
+ /// queue doesn't provide false information by always increasing, but also to
91
+ /// mark where we are inserting into the queue
92
+ #ifdef TORCH_USE_CUDA_DSA
93
+ uint64_t generation_number = 0;
94
+ #endif
95
+ /// Shared mutex between writer and accessor to ensure multi-threaded safety.
96
+ mutable std::mutex read_write_mutex;
97
+ /// Used to ensure prevent race conditions in GPU memory allocation
98
+ mutable std::mutex gpu_alloc_mutex;
99
+ /// Pointer to managed memory keeping track of device-side assertions. There
100
+ /// is one entry for each possible device the process might work with. Unused
101
+ /// entries are nullptrs. We could also use an unordered_set here, but this
102
+ /// vector design will be faster and the wasted memory is small since we
103
+ /// expect the number of GPUs per node will always be small
104
+ std::vector<
105
+ std::unique_ptr<DeviceAssertionsData, void (*)(DeviceAssertionsData*)>>
106
+ uvm_assertions;
107
+ /// A single circular buffer holds information about every kernel launch the
108
+ /// process makes across all devices.
109
+ std::vector<CUDAKernelLaunchInfo> kernel_launches;
110
+ bool check_env_for_enable_launch_stacktracing() const;
111
+ bool check_env_for_dsa_enabled() const;
112
+
113
+ public:
114
+ CUDAKernelLaunchRegistry();
115
+ /// Register a new kernel launch and obtain a generation number back to be
116
+ /// passed to the kernel
117
+ uint32_t insert(
118
+ const char* launch_filename,
119
+ const char* launch_function,
120
+ const uint32_t launch_linenum,
121
+ const char* kernel_name,
122
+ const int32_t stream_id);
123
+ /// Get copies of the kernel launch registry and each device's assertion
124
+ /// failure buffer so they can be inspected without raising race conditions
125
+ std::
126
+ pair<std::vector<DeviceAssertionsData>, std::vector<CUDAKernelLaunchInfo>>
127
+ snapshot() const;
128
+ /// Get a pointer to the current device's assertion failure buffer. If no such
129
+ /// buffer exists then one is created. This means that the first kernel launch
130
+ /// made on each device will be slightly slower because memory allocations are
131
+ /// required
132
+ DeviceAssertionsData* get_uvm_assertions_ptr_for_current_device();
133
+ /// Gets the global singleton of the registry
134
+ static CUDAKernelLaunchRegistry& get_singleton_ref();
135
+ /// If not all devices support DSA, we disable it
136
+ const bool do_all_devices_support_managed_memory = false;
137
+ /// Whether or not to gather stack traces when launching kernels
138
+ bool gather_launch_stacktrace = false;
139
+ /// Whether or not host-side DSA is enabled or disabled at run-time
140
+ /// Note: Device-side code cannot be enabled/disabled at run-time
141
+ bool enabled_at_runtime = false;
142
+ /// Whether or not a device has indicated a failure
143
+ bool has_failed() const;
144
+ #ifdef TORCH_USE_CUDA_DSA
145
+ const bool enabled_at_compile_time = true;
146
+ #else
147
+ const bool enabled_at_compile_time = false;
148
+ #endif
149
+ };
150
+
151
+ std::string c10_retrieve_device_side_assertion_info();
152
+
153
+ } // namespace c10::cuda
154
+
155
+ // Each kernel launched with TORCH_DSA_KERNEL_LAUNCH
156
+ // requires the same input arguments. We introduce the following macro to
157
+ // standardize these.
158
+ #define TORCH_DSA_KERNEL_ARGS \
159
+ [[maybe_unused]] c10::cuda::DeviceAssertionsData *const assertions_data, \
160
+ [[maybe_unused]] uint32_t assertion_caller_id
161
+
162
+ // This macro can be used to pass the DSA arguments onward to another
163
+ // function
164
+ #define TORCH_DSA_KERNEL_ARGS_PASS assertions_data, assertion_caller_id
videochat2/lib/python3.10/site-packages/torch/include/c10/cuda/CUDAException.h ADDED
@@ -0,0 +1,100 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/cuda/CUDADeviceAssertionHost.h>
4
+ #include <c10/cuda/CUDAMacros.h>
5
+ #include <c10/cuda/CUDAMiscFunctions.h>
6
+ #include <c10/macros/Macros.h>
7
+ #include <c10/util/Exception.h>
8
+ #include <c10/util/irange.h>
9
+ #include <cuda.h>
10
+
11
+ // Note [CHECK macro]
12
+ // ~~~~~~~~~~~~~~~~~~
13
+ // This is a macro so that AT_ERROR can get accurate __LINE__
14
+ // and __FILE__ information. We could split this into a short
15
+ // macro and a function implementation if we pass along __LINE__
16
+ // and __FILE__, but no one has found this worth doing.
17
+
18
+ // Used to denote errors from CUDA framework.
19
+ // This needs to be declared here instead util/Exception.h for proper conversion
20
+ // during hipify.
21
+ namespace c10 {
22
+ class C10_CUDA_API CUDAError : public c10::Error {
23
+ using Error::Error;
24
+ };
25
+ } // namespace c10
26
+
27
+ #define C10_CUDA_CHECK(EXPR) \
28
+ do { \
29
+ const cudaError_t __err = EXPR; \
30
+ c10::cuda::c10_cuda_check_implementation( \
31
+ static_cast<int32_t>(__err), \
32
+ __FILE__, \
33
+ __func__, /* Line number data type not well-defined between \
34
+ compilers, so we perform an explicit cast */ \
35
+ static_cast<uint32_t>(__LINE__), \
36
+ true); \
37
+ } while (0)
38
+
39
+ #define C10_CUDA_CHECK_WARN(EXPR) \
40
+ do { \
41
+ const cudaError_t __err = EXPR; \
42
+ if (C10_UNLIKELY(__err != cudaSuccess)) { \
43
+ auto error_unused C10_UNUSED = cudaGetLastError(); \
44
+ (void)error_unused; \
45
+ TORCH_WARN("CUDA warning: ", cudaGetErrorString(__err)); \
46
+ } \
47
+ } while (0)
48
+
49
+ // Indicates that a CUDA error is handled in a non-standard way
50
+ #define C10_CUDA_ERROR_HANDLED(EXPR) EXPR
51
+
52
+ // Intentionally ignore a CUDA error
53
+ #define C10_CUDA_IGNORE_ERROR(EXPR) \
54
+ do { \
55
+ const cudaError_t __err = EXPR; \
56
+ if (C10_UNLIKELY(__err != cudaSuccess)) { \
57
+ cudaError_t error_unused C10_UNUSED = cudaGetLastError(); \
58
+ (void)error_unused; \
59
+ } \
60
+ } while (0)
61
+
62
+ // Clear the last CUDA error
63
+ #define C10_CUDA_CLEAR_ERROR() \
64
+ do { \
65
+ cudaError_t error_unused C10_UNUSED = cudaGetLastError(); \
66
+ (void)error_unused; \
67
+ } while (0)
68
+
69
+ // This should be used directly after every kernel launch to ensure
70
+ // the launch happened correctly and provide an early, close-to-source
71
+ // diagnostic if it didn't.
72
+ #define C10_CUDA_KERNEL_LAUNCH_CHECK() C10_CUDA_CHECK(cudaGetLastError())
73
+
74
+ /// Launches a CUDA kernel appending to it all the information need to handle
75
+ /// device-side assertion failures. Checks that the launch was successful.
76
+ #define TORCH_DSA_KERNEL_LAUNCH( \
77
+ kernel, blocks, threads, shared_mem, stream, ...) \
78
+ do { \
79
+ auto& launch_registry = \
80
+ c10::cuda::CUDAKernelLaunchRegistry::get_singleton_ref(); \
81
+ kernel<<<blocks, threads, shared_mem, stream>>>( \
82
+ __VA_ARGS__, \
83
+ launch_registry.get_uvm_assertions_ptr_for_current_device(), \
84
+ launch_registry.insert( \
85
+ __FILE__, __FUNCTION__, __LINE__, #kernel, stream.id())); \
86
+ C10_CUDA_KERNEL_LAUNCH_CHECK(); \
87
+ } while (0)
88
+
89
+ namespace c10::cuda {
90
+
91
+ /// In the event of a CUDA failure, formats a nice error message about that
92
+ /// failure and also checks for device-side assertion failures
93
+ C10_CUDA_API void c10_cuda_check_implementation(
94
+ const int32_t err,
95
+ const char* filename,
96
+ const char* function_name,
97
+ const int line_number,
98
+ const bool include_device_assertions);
99
+
100
+ } // namespace c10::cuda
videochat2/lib/python3.10/site-packages/torch/include/c10/cuda/CUDAMacros.h ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #ifndef C10_USING_CUSTOM_GENERATED_MACROS
4
+
5
+ // We have not yet modified the AMD HIP build to generate this file so
6
+ // we add an extra option to specifically ignore it.
7
+ #ifndef C10_CUDA_NO_CMAKE_CONFIGURE_FILE
8
+ #include <c10/cuda/impl/cuda_cmake_macros.h>
9
+ #endif // C10_CUDA_NO_CMAKE_CONFIGURE_FILE
10
+
11
+ #endif
12
+
13
+ // See c10/macros/Export.h for a detailed explanation of what the function
14
+ // of these macros are. We need one set of macros for every separate library
15
+ // we build.
16
+
17
+ #ifdef _WIN32
18
+ #if defined(C10_CUDA_BUILD_SHARED_LIBS)
19
+ #define C10_CUDA_EXPORT __declspec(dllexport)
20
+ #define C10_CUDA_IMPORT __declspec(dllimport)
21
+ #else
22
+ #define C10_CUDA_EXPORT
23
+ #define C10_CUDA_IMPORT
24
+ #endif
25
+ #else // _WIN32
26
+ #if defined(__GNUC__)
27
+ #define C10_CUDA_EXPORT __attribute__((__visibility__("default")))
28
+ #else // defined(__GNUC__)
29
+ #define C10_CUDA_EXPORT
30
+ #endif // defined(__GNUC__)
31
+ #define C10_CUDA_IMPORT C10_CUDA_EXPORT
32
+ #endif // _WIN32
33
+
34
+ // This one is being used by libc10_cuda.so
35
+ #ifdef C10_CUDA_BUILD_MAIN_LIB
36
+ #define C10_CUDA_API C10_CUDA_EXPORT
37
+ #else
38
+ #define C10_CUDA_API C10_CUDA_IMPORT
39
+ #endif
40
+
41
+ /**
42
+ * The maximum number of GPUs that we recognizes. Increasing this beyond the
43
+ * initial limit of 16 broke Caffe2 testing, hence the ifdef guards.
44
+ * This value cannot be more than 128 because our DeviceIndex is a uint8_t.
45
+ o */
46
+ #ifdef FBCODE_CAFFE2
47
+ // fbcode depends on this value being 16
48
+ #define C10_COMPILE_TIME_MAX_GPUS 16
49
+ #else
50
+ #define C10_COMPILE_TIME_MAX_GPUS 120
51
+ #endif
videochat2/lib/python3.10/site-packages/torch/include/c10/cuda/CUDAMiscFunctions.h ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // this file is to avoid circular dependency between CUDAFunctions.h and
3
+ // CUDAExceptions.h
4
+
5
+ #include <c10/cuda/CUDAMacros.h>
6
+
7
+ #include <mutex>
8
+
9
+ namespace c10::cuda {
10
+ C10_CUDA_API const char* get_cuda_check_suffix() noexcept;
11
+ C10_CUDA_API std::mutex* getFreeMutex();
12
+ } // namespace c10::cuda
videochat2/lib/python3.10/site-packages/torch/include/c10/cuda/impl/CUDAGuardImpl.h ADDED
@@ -0,0 +1,249 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/impl/DeviceGuardImplInterface.h>
4
+ #include <c10/core/impl/GPUTrace.h>
5
+ #include <c10/macros/Macros.h>
6
+ #include <c10/util/Exception.h>
7
+
8
+ #include <c10/cuda/CUDACachingAllocator.h>
9
+ #include <c10/cuda/CUDAException.h>
10
+ #include <c10/cuda/CUDAFunctions.h>
11
+ #include <c10/cuda/CUDAStream.h>
12
+
13
+ #include <c10/core/Device.h>
14
+ #include <c10/core/DeviceType.h>
15
+ #include <c10/core/Stream.h>
16
+ #include <c10/core/impl/PyInterpreter.h>
17
+ #include <cuda_runtime_api.h>
18
+ #include <cstdint>
19
+ #include <optional>
20
+
21
+ namespace c10::cuda::impl {
22
+
23
+ struct CUDAGuardImpl final : public c10::impl::DeviceGuardImplInterface {
24
+ static constexpr DeviceType static_type = DeviceType::CUDA;
25
+
26
+ CUDAGuardImpl() = default;
27
+ explicit CUDAGuardImpl(DeviceType t) {
28
+ TORCH_INTERNAL_ASSERT(t == DeviceType::CUDA);
29
+ }
30
+ DeviceType type() const override {
31
+ return DeviceType::CUDA;
32
+ }
33
+ Device exchangeDevice(Device d) const override {
34
+ TORCH_INTERNAL_ASSERT(d.is_cuda());
35
+ auto old_device_index = c10::cuda::ExchangeDevice(d.index());
36
+ return Device(DeviceType::CUDA, old_device_index);
37
+ }
38
+ Device getDevice() const override {
39
+ DeviceIndex device = 0;
40
+ C10_CUDA_CHECK(c10::cuda::GetDevice(&device));
41
+ return Device(DeviceType::CUDA, device);
42
+ }
43
+ std::optional<Device> uncheckedGetDevice() const noexcept {
44
+ DeviceIndex device{-1};
45
+ const auto err = C10_CUDA_ERROR_HANDLED(c10::cuda::GetDevice(&device));
46
+ C10_CUDA_CHECK_WARN(err);
47
+ if (err != cudaSuccess) {
48
+ return std::nullopt;
49
+ }
50
+ return Device(DeviceType::CUDA, device);
51
+ }
52
+ void setDevice(Device d) const override {
53
+ TORCH_INTERNAL_ASSERT(d.is_cuda());
54
+ C10_CUDA_CHECK(c10::cuda::SetDevice(d.index()));
55
+ }
56
+ void uncheckedSetDevice(Device d) const noexcept override {
57
+ C10_CUDA_CHECK_WARN(c10::cuda::MaybeSetDevice(d.index()));
58
+ }
59
+ Stream getStream(Device d) const noexcept override {
60
+ return getCurrentCUDAStream(d.index()).unwrap();
61
+ }
62
+ Stream getDefaultStream(Device d) const override {
63
+ return getDefaultCUDAStream(d.index());
64
+ }
65
+ Stream getNewStream(Device d, int priority = 0) const override {
66
+ return getStreamFromPool(priority, d.index());
67
+ }
68
+ Stream getStreamFromGlobalPool(Device d, bool isHighPriority = false)
69
+ const override {
70
+ return getStreamFromPool(isHighPriority, d.index());
71
+ }
72
+ // NB: These do NOT set the current device
73
+ Stream exchangeStream(Stream s) const noexcept override {
74
+ CUDAStream cs(s);
75
+ auto old_stream = getCurrentCUDAStream(s.device().index());
76
+ setCurrentCUDAStream(cs);
77
+ return old_stream.unwrap();
78
+ }
79
+ DeviceIndex deviceCount() const noexcept override {
80
+ return device_count();
81
+ }
82
+
83
+ // Event-related functions
84
+ void createEvent(cudaEvent_t* cuda_event, const EventFlag flag) const {
85
+ // Maps PyTorch's Event::Flag to CUDA flag
86
+ auto cuda_flag = cudaEventDefault;
87
+ switch (flag) {
88
+ case EventFlag::PYTORCH_DEFAULT:
89
+ cuda_flag = cudaEventDisableTiming;
90
+ break;
91
+ case EventFlag::BACKEND_DEFAULT:
92
+ cuda_flag = cudaEventDefault;
93
+ break;
94
+ default:
95
+ TORCH_CHECK(false, "CUDA event received unknown flag");
96
+ }
97
+
98
+ C10_CUDA_CHECK(cudaEventCreateWithFlags(cuda_event, cuda_flag));
99
+ const c10::impl::PyInterpreter* interp = c10::impl::GPUTrace::get_trace();
100
+ if (C10_UNLIKELY(interp)) {
101
+ (*interp)->trace_gpu_event_creation(
102
+ c10::kCUDA, reinterpret_cast<uintptr_t>(cuda_event));
103
+ }
104
+ }
105
+
106
+ void destroyEvent(void* event, const DeviceIndex device_index)
107
+ const noexcept override {
108
+ if (!event)
109
+ return;
110
+ auto cuda_event = static_cast<cudaEvent_t>(event);
111
+ DeviceIndex orig_device{-1};
112
+ C10_CUDA_CHECK_WARN(c10::cuda::GetDevice(&orig_device));
113
+ C10_CUDA_CHECK_WARN(c10::cuda::SetDevice(device_index));
114
+ const c10::impl::PyInterpreter* interp = c10::impl::GPUTrace::get_trace();
115
+ if (C10_UNLIKELY(interp)) {
116
+ (*interp)->trace_gpu_event_deletion(
117
+ c10::kCUDA, reinterpret_cast<uintptr_t>(cuda_event));
118
+ }
119
+ C10_CUDA_CHECK_WARN(cudaEventDestroy(cuda_event));
120
+ C10_CUDA_CHECK_WARN(c10::cuda::SetDevice(orig_device));
121
+ }
122
+
123
+ void record(
124
+ void** event,
125
+ const Stream& stream,
126
+ const DeviceIndex device_index,
127
+ const EventFlag flag) const override {
128
+ TORCH_CHECK(
129
+ device_index == -1 || device_index == stream.device_index(),
130
+ "Event device index ",
131
+ device_index,
132
+ " does not match recording stream's device index ",
133
+ stream.device_index(),
134
+ ".");
135
+
136
+ cudaEvent_t cuda_event = static_cast<cudaEvent_t>(*event);
137
+ CUDAStream cuda_stream{stream};
138
+
139
+ // Moves to stream's device to record
140
+ const auto orig_device = getDevice();
141
+ setDevice(stream.device());
142
+
143
+ // Creates the event (lazily)
144
+ if (!cuda_event)
145
+ createEvent(&cuda_event, flag);
146
+ C10_CUDA_CHECK(cudaEventRecord(cuda_event, cuda_stream));
147
+ // Makes the void* point to the (possibly just allocated) CUDA event
148
+ *event = cuda_event;
149
+ const c10::impl::PyInterpreter* interp = c10::impl::GPUTrace::get_trace();
150
+ if (C10_UNLIKELY(interp)) {
151
+ (*interp)->trace_gpu_event_record(
152
+ c10::kCUDA,
153
+ reinterpret_cast<uintptr_t>(cuda_event),
154
+ reinterpret_cast<uintptr_t>(cuda_stream.stream()));
155
+ }
156
+
157
+ // Resets device
158
+ setDevice(orig_device);
159
+ }
160
+
161
+ void block(void* event, const Stream& stream) const override {
162
+ if (!event)
163
+ return;
164
+ cudaEvent_t cuda_event = static_cast<cudaEvent_t>(event);
165
+ CUDAStream cuda_stream{stream};
166
+ const auto orig_device = getDevice();
167
+ setDevice(stream.device());
168
+ C10_CUDA_CHECK(cudaStreamWaitEvent(
169
+ cuda_stream,
170
+ cuda_event,
171
+ /*flags (must be zero)=*/0));
172
+ const c10::impl::PyInterpreter* interp = c10::impl::GPUTrace::get_trace();
173
+ if (C10_UNLIKELY(interp)) {
174
+ (*interp)->trace_gpu_event_wait(
175
+ c10::kCUDA,
176
+ reinterpret_cast<uintptr_t>(cuda_event),
177
+ reinterpret_cast<uintptr_t>(cuda_stream.stream()));
178
+ }
179
+ setDevice(orig_device);
180
+ }
181
+
182
+ // May be called from any device
183
+ bool queryEvent(void* event) const override {
184
+ if (!event)
185
+ return true;
186
+ cudaEvent_t cuda_event = static_cast<cudaEvent_t>(event);
187
+ // Note: cudaEventQuery can be safely called from any device
188
+ const cudaError_t err = C10_CUDA_ERROR_HANDLED(cudaEventQuery(cuda_event));
189
+ if (err != cudaErrorNotReady) {
190
+ C10_CUDA_CHECK(err);
191
+ } else {
192
+ // ignore and clear the error if not ready
193
+ (void)cudaGetLastError();
194
+ }
195
+ return (err == cudaSuccess);
196
+ }
197
+
198
+ // Stream-related functions
199
+ bool queryStream(const Stream& stream) const override {
200
+ CUDAStream cuda_stream{stream};
201
+ return cuda_stream.query();
202
+ }
203
+
204
+ void synchronizeStream(const Stream& stream) const override {
205
+ CUDAStream cuda_stream{stream};
206
+ cuda_stream.synchronize();
207
+ }
208
+
209
+ void synchronizeEvent(void* event) const override {
210
+ if (!event)
211
+ return;
212
+ cudaEvent_t cuda_event = static_cast<cudaEvent_t>(event);
213
+ const c10::impl::PyInterpreter* interp = c10::impl::GPUTrace::get_trace();
214
+ if (C10_UNLIKELY(interp)) {
215
+ (*interp)->trace_gpu_event_synchronization(
216
+ c10::kCUDA, reinterpret_cast<uintptr_t>(cuda_event));
217
+ }
218
+ // Note: cudaEventSynchronize can be safely called from any device
219
+ C10_CUDA_CHECK(cudaEventSynchronize(cuda_event));
220
+ }
221
+
222
+ void recordDataPtrOnStream(const c10::DataPtr& data_ptr, const Stream& stream)
223
+ const override {
224
+ CUDAStream cuda_stream{stream};
225
+ CUDACachingAllocator::recordStream(data_ptr, cuda_stream);
226
+ }
227
+
228
+ double elapsedTime(void* event1, void* event2, const DeviceIndex device_index)
229
+ const override {
230
+ TORCH_CHECK(
231
+ event1 && event2,
232
+ "Both events must be recorded before calculating elapsed time.");
233
+ // Even though cudaEventElapsedTime can be safely called from any device, if
234
+ // the current device is not initialized, it will create a new cuda context,
235
+ // which will consume a lot of memory.
236
+ DeviceIndex orig_device{-1};
237
+ C10_CUDA_CHECK(c10::cuda::GetDevice(&orig_device));
238
+ C10_CUDA_CHECK(c10::cuda::SetDevice(device_index));
239
+ cudaEvent_t cuda_event1 = static_cast<cudaEvent_t>(event1);
240
+ cudaEvent_t cuda_event2 = static_cast<cudaEvent_t>(event2);
241
+ float time_ms = 0;
242
+ // raise cudaErrorNotReady if either event is recorded but not yet completed
243
+ C10_CUDA_CHECK(cudaEventElapsedTime(&time_ms, cuda_event1, cuda_event2));
244
+ C10_CUDA_CHECK(c10::cuda::SetDevice(orig_device));
245
+ return static_cast<double>(time_ms);
246
+ }
247
+ };
248
+
249
+ } // namespace c10::cuda::impl
videochat2/lib/python3.10/site-packages/torch/include/c10/cuda/impl/CUDATest.h ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/cuda/CUDAMacros.h>
4
+
5
+ namespace c10::cuda::impl {
6
+
7
+ C10_CUDA_API int c10_cuda_test();
8
+
9
+ }
videochat2/lib/python3.10/site-packages/torch/include/c10/util/AlignOf.h ADDED
@@ -0,0 +1,176 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ //===--- AlignOf.h - Portable calculation of type alignment -----*- C++ -*-===//
2
+ //
3
+ // The LLVM Compiler Infrastructure
4
+ //
5
+ // This file is distributed under the University of Illinois Open Source
6
+ // License. See LICENSE.TXT for details.
7
+ //
8
+ //===----------------------------------------------------------------------===//
9
+ //
10
+ // This file defines the AlignedCharArray and AlignedCharArrayUnion classes.
11
+ //
12
+ //===----------------------------------------------------------------------===//
13
+
14
+ // ATen: modified from llvm::AlignOf
15
+ // replaced LLVM_ALIGNAS with alignas
16
+
17
+ #pragma once
18
+
19
+ #include <cstddef>
20
+
21
+ namespace c10 {
22
+
23
+ /// \struct AlignedCharArray
24
+ /// \brief Helper for building an aligned character array type.
25
+ ///
26
+ /// This template is used to explicitly build up a collection of aligned
27
+ /// character array types. We have to build these up using a macro and explicit
28
+ /// specialization to cope with MSVC (at least till 2015) where only an
29
+ /// integer literal can be used to specify an alignment constraint. Once built
30
+ /// up here, we can then begin to indirect between these using normal C++
31
+ /// template parameters.
32
+
33
+ // MSVC requires special handling here.
34
+ #ifndef _MSC_VER
35
+
36
+ template <size_t Alignment, size_t Size>
37
+ struct AlignedCharArray {
38
+ // NOLINTNEXTLINE(*c-arrays)
39
+ alignas(Alignment) char buffer[Size];
40
+ };
41
+
42
+ #else // _MSC_VER
43
+
44
+ /// \brief Create a type with an aligned char buffer.
45
+ template <size_t Alignment, size_t Size>
46
+ struct AlignedCharArray;
47
+
48
+ // We provide special variations of this template for the most common
49
+ // alignments because __declspec(align(...)) doesn't actually work when it is
50
+ // a member of a by-value function argument in MSVC, even if the alignment
51
+ // request is something reasonably like 8-byte or 16-byte. Note that we can't
52
+ // even include the declspec with the union that forces the alignment because
53
+ // MSVC warns on the existence of the declspec despite the union member forcing
54
+ // proper alignment.
55
+
56
+ template <size_t Size>
57
+ struct AlignedCharArray<1, Size> {
58
+ union {
59
+ char aligned;
60
+ char buffer[Size];
61
+ };
62
+ };
63
+
64
+ template <size_t Size>
65
+ struct AlignedCharArray<2, Size> {
66
+ union {
67
+ short aligned;
68
+ char buffer[Size];
69
+ };
70
+ };
71
+
72
+ template <size_t Size>
73
+ struct AlignedCharArray<4, Size> {
74
+ union {
75
+ int aligned;
76
+ char buffer[Size];
77
+ };
78
+ };
79
+
80
+ template <size_t Size>
81
+ struct AlignedCharArray<8, Size> {
82
+ union {
83
+ double aligned;
84
+ char buffer[Size];
85
+ };
86
+ };
87
+
88
+ // The rest of these are provided with a __declspec(align(...)) and we simply
89
+ // can't pass them by-value as function arguments on MSVC.
90
+
91
+ #define AT_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(x) \
92
+ template <size_t Size> \
93
+ struct AlignedCharArray<x, Size> { \
94
+ __declspec(align(x)) char buffer[Size]; \
95
+ };
96
+
97
+ AT_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(16)
98
+ AT_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(32)
99
+ AT_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(64)
100
+ AT_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(128)
101
+
102
+ #undef AT_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT
103
+
104
+ #endif // _MSC_VER
105
+
106
+ namespace detail {
107
+ template <
108
+ typename T1,
109
+ typename T2 = char,
110
+ typename T3 = char,
111
+ typename T4 = char,
112
+ typename T5 = char,
113
+ typename T6 = char,
114
+ typename T7 = char,
115
+ typename T8 = char,
116
+ typename T9 = char,
117
+ typename T10 = char>
118
+ class AlignerImpl {
119
+ T1 t1;
120
+ T2 t2;
121
+ T3 t3;
122
+ T4 t4;
123
+ T5 t5;
124
+ T6 t6;
125
+ T7 t7;
126
+ T8 t8;
127
+ T9 t9;
128
+ T10 t10;
129
+
130
+ public:
131
+ AlignerImpl() = delete;
132
+ };
133
+
134
+ template <
135
+ typename T1,
136
+ typename T2 = char,
137
+ typename T3 = char,
138
+ typename T4 = char,
139
+ typename T5 = char,
140
+ typename T6 = char,
141
+ typename T7 = char,
142
+ typename T8 = char,
143
+ typename T9 = char,
144
+ typename T10 = char>
145
+ union SizerImpl {
146
+ // NOLINTNEXTLINE(*c-arrays)
147
+ char arr1[sizeof(T1)], arr2[sizeof(T2)], arr3[sizeof(T3)], arr4[sizeof(T4)],
148
+ arr5[sizeof(T5)], arr6[sizeof(T6)], arr7[sizeof(T7)], arr8[sizeof(T8)],
149
+ arr9[sizeof(T9)], arr10[sizeof(T10)];
150
+ };
151
+ } // end namespace detail
152
+
153
+ /// \brief This union template exposes a suitably aligned and sized character
154
+ /// array member which can hold elements of any of up to ten types.
155
+ ///
156
+ /// These types may be arrays, structs, or any other types. The goal is to
157
+ /// expose a char array buffer member which can be used as suitable storage for
158
+ /// a placement new of any of these types. Support for more than ten types can
159
+ /// be added at the cost of more boilerplate.
160
+ template <
161
+ typename T1,
162
+ typename T2 = char,
163
+ typename T3 = char,
164
+ typename T4 = char,
165
+ typename T5 = char,
166
+ typename T6 = char,
167
+ typename T7 = char,
168
+ typename T8 = char,
169
+ typename T9 = char,
170
+ typename T10 = char>
171
+ struct AlignedCharArrayUnion
172
+ : AlignedCharArray<
173
+ alignof(detail::AlignerImpl<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10>),
174
+ sizeof(::c10::detail::
175
+ SizerImpl<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10>)> {};
176
+ } // end namespace c10
videochat2/lib/python3.10/site-packages/torch/include/c10/util/Array.h ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <array>
4
+ #include <utility>
5
+
6
+ namespace c10 {
7
+
8
+ // This helper function creates a constexpr std::array
9
+ // From a compile time list of values, without requiring you to explicitly
10
+ // write out the length.
11
+ //
12
+ // See also https://stackoverflow.com/a/26351760/23845
13
+ template <typename V, typename... T>
14
+ inline constexpr auto array_of(T&&... t) -> std::array<V, sizeof...(T)> {
15
+ return {{std::forward<T>(t)...}};
16
+ }
17
+
18
+ } // namespace c10
videochat2/lib/python3.10/site-packages/torch/include/c10/util/ArrayRef.h ADDED
@@ -0,0 +1,380 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ //===--- ArrayRef.h - Array Reference Wrapper -------------------*- C++ -*-===//
2
+ //
3
+ // The LLVM Compiler Infrastructure
4
+ //
5
+ // This file is distributed under the University of Illinois Open Source
6
+ // License. See LICENSE.TXT for details.
7
+ //
8
+ //===----------------------------------------------------------------------===//
9
+
10
+ // ATen: modified from llvm::ArrayRef.
11
+ // removed llvm-specific functionality
12
+ // removed some implicit const -> non-const conversions that rely on
13
+ // complicated std::enable_if meta-programming
14
+ // removed a bunch of slice variants for simplicity...
15
+
16
+ #pragma once
17
+
18
+ #include <c10/macros/Macros.h>
19
+ #include <c10/util/Deprecated.h>
20
+ #include <c10/util/Exception.h>
21
+ #include <c10/util/SmallVector.h>
22
+
23
+ #include <array>
24
+ #include <cstddef>
25
+ #include <cstdint>
26
+ #include <initializer_list>
27
+ #include <iterator>
28
+ #include <ostream>
29
+ #include <type_traits>
30
+ #include <vector>
31
+
32
+ namespace c10 {
33
+ /// ArrayRef - Represent a constant reference to an array (0 or more elements
34
+ /// consecutively in memory), i.e. a start pointer and a length. It allows
35
+ /// various APIs to take consecutive elements easily and conveniently.
36
+ ///
37
+ /// This class does not own the underlying data, it is expected to be used in
38
+ /// situations where the data resides in some other buffer, whose lifetime
39
+ /// extends past that of the ArrayRef. For this reason, it is not in general
40
+ /// safe to store an ArrayRef.
41
+ ///
42
+ /// This is intended to be trivially copyable, so it should be passed by
43
+ /// value.
44
+ template <typename T>
45
+ class ArrayRef final {
46
+ public:
47
+ using iterator = const T*;
48
+ using const_iterator = const T*;
49
+ using size_type = size_t;
50
+ using value_type = T;
51
+
52
+ using reverse_iterator = std::reverse_iterator<iterator>;
53
+
54
+ private:
55
+ /// The start of the array, in an external buffer.
56
+ const T* Data;
57
+
58
+ /// The number of elements.
59
+ size_type Length;
60
+
61
+ void debugCheckNullptrInvariant() {
62
+ TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
63
+ Data != nullptr || Length == 0,
64
+ "created ArrayRef with nullptr and non-zero length! std::optional relies on this being illegal");
65
+ }
66
+
67
+ public:
68
+ /// @name Constructors
69
+ /// @{
70
+
71
+ /// Construct an empty ArrayRef.
72
+ /* implicit */ constexpr ArrayRef() : Data(nullptr), Length(0) {}
73
+
74
+ /// Construct an ArrayRef from a single element.
75
+ // TODO Make this explicit
76
+ constexpr ArrayRef(const T& OneElt) : Data(&OneElt), Length(1) {}
77
+
78
+ /// Construct an ArrayRef from a pointer and length.
79
+ C10_HOST_CONSTEXPR_EXCEPT_WIN_CUDA ArrayRef(const T* data, size_t length)
80
+ : Data(data), Length(length) {
81
+ debugCheckNullptrInvariant();
82
+ }
83
+
84
+ /// Construct an ArrayRef from a range.
85
+ C10_HOST_CONSTEXPR_EXCEPT_WIN_CUDA ArrayRef(const T* begin, const T* end)
86
+ : Data(begin), Length(end - begin) {
87
+ debugCheckNullptrInvariant();
88
+ }
89
+
90
+ /// Construct an ArrayRef from a SmallVector. This is templated in order to
91
+ /// avoid instantiating SmallVectorTemplateCommon<T> whenever we
92
+ /// copy-construct an ArrayRef.
93
+ template <typename U>
94
+ /* implicit */ ArrayRef(const SmallVectorTemplateCommon<T, U>& Vec)
95
+ : Data(Vec.data()), Length(Vec.size()) {
96
+ debugCheckNullptrInvariant();
97
+ }
98
+
99
+ template <
100
+ typename Container,
101
+ typename = std::enable_if_t<std::is_same_v<
102
+ std::remove_const_t<decltype(std::declval<Container>().data())>,
103
+ T*>>>
104
+ /* implicit */ ArrayRef(const Container& container)
105
+ : Data(container.data()), Length(container.size()) {
106
+ debugCheckNullptrInvariant();
107
+ }
108
+
109
+ /// Construct an ArrayRef from a std::vector.
110
+ // The enable_if stuff here makes sure that this isn't used for
111
+ // std::vector<bool>, because ArrayRef can't work on a std::vector<bool>
112
+ // bitfield.
113
+ template <typename A>
114
+ /* implicit */ ArrayRef(const std::vector<T, A>& Vec)
115
+ : Data(Vec.data()), Length(Vec.size()) {
116
+ static_assert(
117
+ !std::is_same<T, bool>::value,
118
+ "ArrayRef<bool> cannot be constructed from a std::vector<bool> bitfield.");
119
+ }
120
+
121
+ /// Construct an ArrayRef from a std::array
122
+ template <size_t N>
123
+ /* implicit */ constexpr ArrayRef(const std::array<T, N>& Arr)
124
+ : Data(Arr.data()), Length(N) {}
125
+
126
+ /// Construct an ArrayRef from a C array.
127
+ template <size_t N>
128
+ // NOLINTNEXTLINE(*c-arrays*)
129
+ /* implicit */ constexpr ArrayRef(const T (&Arr)[N]) : Data(Arr), Length(N) {}
130
+
131
+ /// Construct an ArrayRef from a std::initializer_list.
132
+ /* implicit */ constexpr ArrayRef(const std::initializer_list<T>& Vec)
133
+ : Data(
134
+ std::begin(Vec) == std::end(Vec) ? static_cast<T*>(nullptr)
135
+ : std::begin(Vec)),
136
+ Length(Vec.size()) {}
137
+
138
+ /// @}
139
+ /// @name Simple Operations
140
+ /// @{
141
+
142
+ constexpr iterator begin() const {
143
+ return Data;
144
+ }
145
+ constexpr iterator end() const {
146
+ return Data + Length;
147
+ }
148
+
149
+ // These are actually the same as iterator, since ArrayRef only
150
+ // gives you const iterators.
151
+ constexpr const_iterator cbegin() const {
152
+ return Data;
153
+ }
154
+ constexpr const_iterator cend() const {
155
+ return Data + Length;
156
+ }
157
+
158
+ constexpr reverse_iterator rbegin() const {
159
+ return reverse_iterator(end());
160
+ }
161
+ constexpr reverse_iterator rend() const {
162
+ return reverse_iterator(begin());
163
+ }
164
+
165
+ /// empty - Check if the array is empty.
166
+ constexpr bool empty() const {
167
+ return Length == 0;
168
+ }
169
+
170
+ constexpr const T* data() const {
171
+ return Data;
172
+ }
173
+
174
+ /// size - Get the array size.
175
+ constexpr size_t size() const {
176
+ return Length;
177
+ }
178
+
179
+ /// front - Get the first element.
180
+ C10_HOST_CONSTEXPR_EXCEPT_WIN_CUDA const T& front() const {
181
+ TORCH_CHECK(
182
+ !empty(), "ArrayRef: attempted to access front() of empty list");
183
+ return Data[0];
184
+ }
185
+
186
+ /// back - Get the last element.
187
+ C10_HOST_CONSTEXPR_EXCEPT_WIN_CUDA const T& back() const {
188
+ TORCH_CHECK(!empty(), "ArrayRef: attempted to access back() of empty list");
189
+ return Data[Length - 1];
190
+ }
191
+
192
+ /// equals - Check for element-wise equality.
193
+ constexpr bool equals(ArrayRef RHS) const {
194
+ return Length == RHS.Length && std::equal(begin(), end(), RHS.begin());
195
+ }
196
+
197
+ /// slice(n, m) - Take M elements of the array starting at element N
198
+ C10_HOST_CONSTEXPR_EXCEPT_WIN_CUDA ArrayRef<T> slice(size_t N, size_t M)
199
+ const {
200
+ TORCH_CHECK(
201
+ N + M <= size(),
202
+ "ArrayRef: invalid slice, N = ",
203
+ N,
204
+ "; M = ",
205
+ M,
206
+ "; size = ",
207
+ size());
208
+ return ArrayRef<T>(data() + N, M);
209
+ }
210
+
211
+ /// slice(n) - Chop off the first N elements of the array.
212
+ C10_HOST_CONSTEXPR_EXCEPT_WIN_CUDA ArrayRef<T> slice(size_t N) const {
213
+ TORCH_CHECK(
214
+ N <= size(), "ArrayRef: invalid slice, N = ", N, "; size = ", size());
215
+ return slice(N, size() - N);
216
+ }
217
+
218
+ /// @}
219
+ /// @name Operator Overloads
220
+ /// @{
221
+ constexpr const T& operator[](size_t Index) const {
222
+ return Data[Index];
223
+ }
224
+
225
+ /// Vector compatibility
226
+ C10_HOST_CONSTEXPR_EXCEPT_WIN_CUDA const T& at(size_t Index) const {
227
+ TORCH_CHECK(
228
+ Index < Length,
229
+ "ArrayRef: invalid index Index = ",
230
+ Index,
231
+ "; Length = ",
232
+ Length);
233
+ return Data[Index];
234
+ }
235
+
236
+ /// Disallow accidental assignment from a temporary.
237
+ ///
238
+ /// The declaration here is extra complicated so that "arrayRef = {}"
239
+ /// continues to select the move assignment operator.
240
+ template <typename U>
241
+ std::enable_if_t<std::is_same_v<U, T>, ArrayRef<T>>& operator=(
242
+ // NOLINTNEXTLINE(cppcoreguidelines-missing-std-forward)
243
+ U&& Temporary) = delete;
244
+
245
+ /// Disallow accidental assignment from a temporary.
246
+ ///
247
+ /// The declaration here is extra complicated so that "arrayRef = {}"
248
+ /// continues to select the move assignment operator.
249
+ template <typename U>
250
+ std::enable_if_t<std::is_same_v<U, T>, ArrayRef<T>>& operator=(
251
+ std::initializer_list<U>) = delete;
252
+
253
+ /// @}
254
+ /// @name Expensive Operations
255
+ /// @{
256
+ std::vector<T> vec() const {
257
+ return std::vector<T>(Data, Data + Length);
258
+ }
259
+
260
+ /// @}
261
+ };
262
+
263
+ template <typename T>
264
+ std::ostream& operator<<(std::ostream& out, ArrayRef<T> list) {
265
+ int i = 0;
266
+ out << "[";
267
+ for (const auto& e : list) {
268
+ if (i++ > 0)
269
+ out << ", ";
270
+ out << e;
271
+ }
272
+ out << "]";
273
+ return out;
274
+ }
275
+
276
+ /// @name ArrayRef Convenience constructors
277
+ /// @{
278
+
279
+ /// Construct an ArrayRef from a single element.
280
+ template <typename T>
281
+ ArrayRef<T> makeArrayRef(const T& OneElt) {
282
+ return OneElt;
283
+ }
284
+
285
+ /// Construct an ArrayRef from a pointer and length.
286
+ template <typename T>
287
+ ArrayRef<T> makeArrayRef(const T* data, size_t length) {
288
+ return ArrayRef<T>(data, length);
289
+ }
290
+
291
+ /// Construct an ArrayRef from a range.
292
+ template <typename T>
293
+ ArrayRef<T> makeArrayRef(const T* begin, const T* end) {
294
+ return ArrayRef<T>(begin, end);
295
+ }
296
+
297
+ /// Construct an ArrayRef from a SmallVector.
298
+ template <typename T>
299
+ ArrayRef<T> makeArrayRef(const SmallVectorImpl<T>& Vec) {
300
+ return Vec;
301
+ }
302
+
303
+ /// Construct an ArrayRef from a SmallVector.
304
+ template <typename T, unsigned N>
305
+ ArrayRef<T> makeArrayRef(const SmallVector<T, N>& Vec) {
306
+ return Vec;
307
+ }
308
+
309
+ /// Construct an ArrayRef from a std::vector.
310
+ template <typename T>
311
+ ArrayRef<T> makeArrayRef(const std::vector<T>& Vec) {
312
+ return Vec;
313
+ }
314
+
315
+ /// Construct an ArrayRef from a std::array.
316
+ template <typename T, std::size_t N>
317
+ ArrayRef<T> makeArrayRef(const std::array<T, N>& Arr) {
318
+ return Arr;
319
+ }
320
+
321
+ /// Construct an ArrayRef from an ArrayRef (no-op) (const)
322
+ template <typename T>
323
+ ArrayRef<T> makeArrayRef(const ArrayRef<T>& Vec) {
324
+ return Vec;
325
+ }
326
+
327
+ /// Construct an ArrayRef from an ArrayRef (no-op)
328
+ template <typename T>
329
+ ArrayRef<T>& makeArrayRef(ArrayRef<T>& Vec) {
330
+ return Vec;
331
+ }
332
+
333
+ /// Construct an ArrayRef from a C array.
334
+ template <typename T, size_t N>
335
+ // NOLINTNEXTLINE(*c-arrays*)
336
+ ArrayRef<T> makeArrayRef(const T (&Arr)[N]) {
337
+ return ArrayRef<T>(Arr);
338
+ }
339
+
340
+ // WARNING: Template instantiation will NOT be willing to do an implicit
341
+ // conversions to get you to an c10::ArrayRef, which is why we need so
342
+ // many overloads.
343
+
344
+ template <typename T>
345
+ bool operator==(c10::ArrayRef<T> a1, c10::ArrayRef<T> a2) {
346
+ return a1.equals(a2);
347
+ }
348
+
349
+ template <typename T>
350
+ bool operator!=(c10::ArrayRef<T> a1, c10::ArrayRef<T> a2) {
351
+ return !a1.equals(a2);
352
+ }
353
+
354
+ template <typename T>
355
+ bool operator==(const std::vector<T>& a1, c10::ArrayRef<T> a2) {
356
+ return c10::ArrayRef<T>(a1).equals(a2);
357
+ }
358
+
359
+ template <typename T>
360
+ bool operator!=(const std::vector<T>& a1, c10::ArrayRef<T> a2) {
361
+ return !c10::ArrayRef<T>(a1).equals(a2);
362
+ }
363
+
364
+ template <typename T>
365
+ bool operator==(c10::ArrayRef<T> a1, const std::vector<T>& a2) {
366
+ return a1.equals(c10::ArrayRef<T>(a2));
367
+ }
368
+
369
+ template <typename T>
370
+ bool operator!=(c10::ArrayRef<T> a1, const std::vector<T>& a2) {
371
+ return !a1.equals(c10::ArrayRef<T>(a2));
372
+ }
373
+
374
+ using IntArrayRef = ArrayRef<int64_t>;
375
+
376
+ // This alias is deprecated because it doesn't make ownership
377
+ // semantics obvious. Use IntArrayRef instead!
378
+ C10_DEFINE_DEPRECATED_USING(IntList, ArrayRef<int64_t>)
379
+
380
+ } // namespace c10
videochat2/lib/python3.10/site-packages/torch/include/c10/util/BFloat16-inl.h ADDED
@@ -0,0 +1,361 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/macros/Macros.h>
4
+ #include <c10/util/bit_cast.h>
5
+
6
+ #include <limits>
7
+
8
+ C10_CLANG_DIAGNOSTIC_PUSH()
9
+ #if C10_CLANG_HAS_WARNING("-Wimplicit-int-float-conversion")
10
+ C10_CLANG_DIAGNOSTIC_IGNORE("-Wimplicit-int-float-conversion")
11
+ #endif
12
+
13
+ #if defined(SYCL_EXT_ONEAPI_BFLOAT16_MATH_FUNCTIONS)
14
+ #if defined(CL_SYCL_LANGUAGE_VERSION)
15
+ #include <CL/sycl.hpp> // for SYCL 1.2.1
16
+ #else
17
+ #include <sycl/sycl.hpp> // for SYCL 2020
18
+ #endif
19
+ #include <ext/oneapi/bfloat16.hpp>
20
+ #endif
21
+
22
+ namespace c10 {
23
+
24
+ /// Constructors
25
+ inline C10_HOST_DEVICE BFloat16::BFloat16(float value)
26
+ :
27
+ #if defined(__CUDACC__) && !defined(USE_ROCM) && defined(__CUDA_ARCH__) && \
28
+ __CUDA_ARCH__ >= 800
29
+ x(__bfloat16_as_ushort(__float2bfloat16(value)))
30
+ #elif defined(__SYCL_DEVICE_ONLY__) && \
31
+ defined(SYCL_EXT_ONEAPI_BFLOAT16_MATH_FUNCTIONS)
32
+ x(c10::bit_cast<uint16_t>(sycl::ext::oneapi::bfloat16(value)))
33
+ #else
34
+ // RNE by default
35
+ x(detail::round_to_nearest_even(value))
36
+ #endif
37
+ {
38
+ }
39
+
40
+ /// Implicit conversions
41
+ inline C10_HOST_DEVICE BFloat16::operator float() const {
42
+ #if defined(__CUDACC__) && !defined(USE_ROCM)
43
+ return __bfloat162float(*reinterpret_cast<const __nv_bfloat16*>(&x));
44
+ #elif defined(__SYCL_DEVICE_ONLY__) && \
45
+ defined(SYCL_EXT_ONEAPI_BFLOAT16_MATH_FUNCTIONS)
46
+ return float(*reinterpret_cast<const sycl::ext::oneapi::bfloat16*>(&x));
47
+ #else
48
+ return detail::f32_from_bits(x);
49
+ #endif
50
+ }
51
+
52
+ #if defined(__CUDACC__) && !defined(USE_ROCM)
53
+ inline C10_HOST_DEVICE BFloat16::BFloat16(const __nv_bfloat16& value) {
54
+ x = *reinterpret_cast<const unsigned short*>(&value);
55
+ }
56
+ inline C10_HOST_DEVICE BFloat16::operator __nv_bfloat16() const {
57
+ return *reinterpret_cast<const __nv_bfloat16*>(&x);
58
+ }
59
+ #endif
60
+ #if defined(__HIPCC__) && defined(USE_ROCM)
61
+ // 6.2.0 introduced __hip_bfloat16_raw
62
+ #if defined(__BF16_HOST_DEVICE__)
63
+ inline C10_HOST_DEVICE BFloat16::BFloat16(const __hip_bfloat16& value) {
64
+ x = __hip_bfloat16_raw(value).x;
65
+ }
66
+ inline C10_HOST_DEVICE BFloat16::operator __hip_bfloat16() const {
67
+ return __hip_bfloat16(__hip_bfloat16_raw{x});
68
+ }
69
+ #else // !defined(__BF16_HOST_DEVICE__)
70
+ inline C10_HOST_DEVICE BFloat16::BFloat16(const __hip_bfloat16& value) {
71
+ x = value.data;
72
+ }
73
+ inline C10_HOST_DEVICE BFloat16::operator __hip_bfloat16() const {
74
+ return __hip_bfloat16{x};
75
+ }
76
+ #endif // !defined(__BF16_HOST_DEVICE__)
77
+ #endif // defined(__HIPCC__) && defined(USE_ROCM)
78
+
79
+ #if defined(SYCL_EXT_ONEAPI_BFLOAT16_MATH_FUNCTIONS)
80
+ inline C10_HOST_DEVICE BFloat16::BFloat16(
81
+ const sycl::ext::oneapi::bfloat16& value) {
82
+ x = *reinterpret_cast<const unsigned short*>(&value);
83
+ }
84
+ inline C10_HOST_DEVICE BFloat16::operator sycl::ext::oneapi::bfloat16() const {
85
+ return *reinterpret_cast<const sycl::ext::oneapi::bfloat16*>(&x);
86
+ }
87
+ #endif
88
+
89
+ // CUDA intrinsics
90
+
91
+ #if defined(__CUDACC__) || defined(__HIPCC__)
92
+ inline C10_DEVICE BFloat16 __ldg(const BFloat16* ptr) {
93
+ #if !defined(USE_ROCM) && defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 800
94
+ return __ldg(reinterpret_cast<const __nv_bfloat16*>(ptr));
95
+ #else
96
+ return *ptr;
97
+ #endif
98
+ }
99
+ #endif
100
+
101
+ /// Arithmetic
102
+
103
+ inline C10_HOST_DEVICE BFloat16
104
+ operator+(const BFloat16& a, const BFloat16& b) {
105
+ return static_cast<float>(a) + static_cast<float>(b);
106
+ }
107
+
108
+ inline C10_HOST_DEVICE BFloat16
109
+ operator-(const BFloat16& a, const BFloat16& b) {
110
+ return static_cast<float>(a) - static_cast<float>(b);
111
+ }
112
+
113
+ inline C10_HOST_DEVICE BFloat16
114
+ operator*(const BFloat16& a, const BFloat16& b) {
115
+ return static_cast<float>(a) * static_cast<float>(b);
116
+ }
117
+
118
+ inline C10_HOST_DEVICE BFloat16 operator/(const BFloat16& a, const BFloat16& b)
119
+ __ubsan_ignore_float_divide_by_zero__ {
120
+ return static_cast<float>(a) / static_cast<float>(b);
121
+ }
122
+
123
+ inline C10_HOST_DEVICE BFloat16 operator-(const BFloat16& a) {
124
+ return -static_cast<float>(a);
125
+ }
126
+
127
+ inline C10_HOST_DEVICE BFloat16& operator+=(BFloat16& a, const BFloat16& b) {
128
+ a = a + b;
129
+ return a;
130
+ }
131
+
132
+ inline C10_HOST_DEVICE BFloat16& operator-=(BFloat16& a, const BFloat16& b) {
133
+ a = a - b;
134
+ return a;
135
+ }
136
+
137
+ inline C10_HOST_DEVICE BFloat16& operator*=(BFloat16& a, const BFloat16& b) {
138
+ a = a * b;
139
+ return a;
140
+ }
141
+
142
+ inline C10_HOST_DEVICE BFloat16& operator/=(BFloat16& a, const BFloat16& b) {
143
+ a = a / b;
144
+ return a;
145
+ }
146
+
147
+ inline C10_HOST_DEVICE BFloat16& operator|(BFloat16& a, const BFloat16& b) {
148
+ a.x = a.x | b.x;
149
+ return a;
150
+ }
151
+
152
+ inline C10_HOST_DEVICE BFloat16& operator^(BFloat16& a, const BFloat16& b) {
153
+ a.x = a.x ^ b.x;
154
+ return a;
155
+ }
156
+
157
+ inline C10_HOST_DEVICE BFloat16& operator&(BFloat16& a, const BFloat16& b) {
158
+ a.x = a.x & b.x;
159
+ return a;
160
+ }
161
+
162
+ /// Arithmetic with floats
163
+
164
+ inline C10_HOST_DEVICE float operator+(BFloat16 a, float b) {
165
+ return static_cast<float>(a) + b;
166
+ }
167
+ inline C10_HOST_DEVICE float operator-(BFloat16 a, float b) {
168
+ return static_cast<float>(a) - b;
169
+ }
170
+ inline C10_HOST_DEVICE float operator*(BFloat16 a, float b) {
171
+ return static_cast<float>(a) * b;
172
+ }
173
+ inline C10_HOST_DEVICE float operator/(BFloat16 a, float b) {
174
+ return static_cast<float>(a) / b;
175
+ }
176
+
177
+ inline C10_HOST_DEVICE float operator+(float a, BFloat16 b) {
178
+ return a + static_cast<float>(b);
179
+ }
180
+ inline C10_HOST_DEVICE float operator-(float a, BFloat16 b) {
181
+ return a - static_cast<float>(b);
182
+ }
183
+ inline C10_HOST_DEVICE float operator*(float a, BFloat16 b) {
184
+ return a * static_cast<float>(b);
185
+ }
186
+ inline C10_HOST_DEVICE float operator/(float a, BFloat16 b) {
187
+ return a / static_cast<float>(b);
188
+ }
189
+
190
+ inline C10_HOST_DEVICE float& operator+=(float& a, const BFloat16& b) {
191
+ return a += static_cast<float>(b);
192
+ }
193
+ inline C10_HOST_DEVICE float& operator-=(float& a, const BFloat16& b) {
194
+ return a -= static_cast<float>(b);
195
+ }
196
+ inline C10_HOST_DEVICE float& operator*=(float& a, const BFloat16& b) {
197
+ return a *= static_cast<float>(b);
198
+ }
199
+ inline C10_HOST_DEVICE float& operator/=(float& a, const BFloat16& b) {
200
+ return a /= static_cast<float>(b);
201
+ }
202
+
203
+ /// Arithmetic with doubles
204
+
205
+ inline C10_HOST_DEVICE double operator+(BFloat16 a, double b) {
206
+ return static_cast<double>(a) + b;
207
+ }
208
+ inline C10_HOST_DEVICE double operator-(BFloat16 a, double b) {
209
+ return static_cast<double>(a) - b;
210
+ }
211
+ inline C10_HOST_DEVICE double operator*(BFloat16 a, double b) {
212
+ return static_cast<double>(a) * b;
213
+ }
214
+ inline C10_HOST_DEVICE double operator/(BFloat16 a, double b) {
215
+ return static_cast<double>(a) / b;
216
+ }
217
+
218
+ inline C10_HOST_DEVICE double operator+(double a, BFloat16 b) {
219
+ return a + static_cast<double>(b);
220
+ }
221
+ inline C10_HOST_DEVICE double operator-(double a, BFloat16 b) {
222
+ return a - static_cast<double>(b);
223
+ }
224
+ inline C10_HOST_DEVICE double operator*(double a, BFloat16 b) {
225
+ return a * static_cast<double>(b);
226
+ }
227
+ inline C10_HOST_DEVICE double operator/(double a, BFloat16 b) {
228
+ return a / static_cast<double>(b);
229
+ }
230
+
231
+ /// Arithmetic with ints
232
+
233
+ inline C10_HOST_DEVICE BFloat16 operator+(BFloat16 a, int b) {
234
+ return a + static_cast<BFloat16>(b);
235
+ }
236
+ inline C10_HOST_DEVICE BFloat16 operator-(BFloat16 a, int b) {
237
+ return a - static_cast<BFloat16>(b);
238
+ }
239
+ inline C10_HOST_DEVICE BFloat16 operator*(BFloat16 a, int b) {
240
+ return a * static_cast<BFloat16>(b);
241
+ }
242
+ inline C10_HOST_DEVICE BFloat16 operator/(BFloat16 a, int b) {
243
+ return a / static_cast<BFloat16>(b);
244
+ }
245
+
246
+ inline C10_HOST_DEVICE BFloat16 operator+(int a, BFloat16 b) {
247
+ return static_cast<BFloat16>(a) + b;
248
+ }
249
+ inline C10_HOST_DEVICE BFloat16 operator-(int a, BFloat16 b) {
250
+ return static_cast<BFloat16>(a) - b;
251
+ }
252
+ inline C10_HOST_DEVICE BFloat16 operator*(int a, BFloat16 b) {
253
+ return static_cast<BFloat16>(a) * b;
254
+ }
255
+ inline C10_HOST_DEVICE BFloat16 operator/(int a, BFloat16 b) {
256
+ return static_cast<BFloat16>(a) / b;
257
+ }
258
+
259
+ //// Arithmetic with int64_t
260
+
261
+ inline C10_HOST_DEVICE BFloat16 operator+(BFloat16 a, int64_t b) {
262
+ return a + static_cast<BFloat16>(b);
263
+ }
264
+ inline C10_HOST_DEVICE BFloat16 operator-(BFloat16 a, int64_t b) {
265
+ return a - static_cast<BFloat16>(b);
266
+ }
267
+ inline C10_HOST_DEVICE BFloat16 operator*(BFloat16 a, int64_t b) {
268
+ return a * static_cast<BFloat16>(b);
269
+ }
270
+ inline C10_HOST_DEVICE BFloat16 operator/(BFloat16 a, int64_t b) {
271
+ return a / static_cast<BFloat16>(b);
272
+ }
273
+
274
+ inline C10_HOST_DEVICE BFloat16 operator+(int64_t a, BFloat16 b) {
275
+ return static_cast<BFloat16>(a) + b;
276
+ }
277
+ inline C10_HOST_DEVICE BFloat16 operator-(int64_t a, BFloat16 b) {
278
+ return static_cast<BFloat16>(a) - b;
279
+ }
280
+ inline C10_HOST_DEVICE BFloat16 operator*(int64_t a, BFloat16 b) {
281
+ return static_cast<BFloat16>(a) * b;
282
+ }
283
+ inline C10_HOST_DEVICE BFloat16 operator/(int64_t a, BFloat16 b) {
284
+ return static_cast<BFloat16>(a) / b;
285
+ }
286
+
287
+ // Overloading < and > operators, because std::max and std::min use them.
288
+
289
+ inline C10_HOST_DEVICE bool operator>(BFloat16& lhs, BFloat16& rhs) {
290
+ return float(lhs) > float(rhs);
291
+ }
292
+
293
+ inline C10_HOST_DEVICE bool operator<(BFloat16& lhs, BFloat16& rhs) {
294
+ return float(lhs) < float(rhs);
295
+ }
296
+
297
+ } // namespace c10
298
+
299
+ namespace std {
300
+
301
+ template <>
302
+ class numeric_limits<c10::BFloat16> {
303
+ public:
304
+ static constexpr bool is_signed = true;
305
+ static constexpr bool is_specialized = true;
306
+ static constexpr bool is_integer = false;
307
+ static constexpr bool is_exact = false;
308
+ static constexpr bool has_infinity = true;
309
+ static constexpr bool has_quiet_NaN = true;
310
+ static constexpr bool has_signaling_NaN = true;
311
+ static constexpr auto has_denorm = numeric_limits<float>::has_denorm;
312
+ static constexpr auto has_denorm_loss =
313
+ numeric_limits<float>::has_denorm_loss;
314
+ static constexpr auto round_style = numeric_limits<float>::round_style;
315
+ static constexpr bool is_iec559 = false;
316
+ static constexpr bool is_bounded = true;
317
+ static constexpr bool is_modulo = false;
318
+ static constexpr int digits = 8;
319
+ static constexpr int digits10 = 2;
320
+ static constexpr int max_digits10 = 4;
321
+ static constexpr int radix = 2;
322
+ static constexpr int min_exponent = -125;
323
+ static constexpr int min_exponent10 = -37;
324
+ static constexpr int max_exponent = 128;
325
+ static constexpr int max_exponent10 = 38;
326
+ static constexpr auto traps = numeric_limits<float>::traps;
327
+ static constexpr auto tinyness_before =
328
+ numeric_limits<float>::tinyness_before;
329
+
330
+ static constexpr c10::BFloat16 min() {
331
+ return c10::BFloat16(0x0080, c10::BFloat16::from_bits());
332
+ }
333
+ static constexpr c10::BFloat16 lowest() {
334
+ return c10::BFloat16(0xFF7F, c10::BFloat16::from_bits());
335
+ }
336
+ static constexpr c10::BFloat16 max() {
337
+ return c10::BFloat16(0x7F7F, c10::BFloat16::from_bits());
338
+ }
339
+ static constexpr c10::BFloat16 epsilon() {
340
+ return c10::BFloat16(0x3C00, c10::BFloat16::from_bits());
341
+ }
342
+ static constexpr c10::BFloat16 round_error() {
343
+ return c10::BFloat16(0x3F00, c10::BFloat16::from_bits());
344
+ }
345
+ static constexpr c10::BFloat16 infinity() {
346
+ return c10::BFloat16(0x7F80, c10::BFloat16::from_bits());
347
+ }
348
+ static constexpr c10::BFloat16 quiet_NaN() {
349
+ return c10::BFloat16(0x7FC0, c10::BFloat16::from_bits());
350
+ }
351
+ static constexpr c10::BFloat16 signaling_NaN() {
352
+ return c10::BFloat16(0x7F80, c10::BFloat16::from_bits());
353
+ }
354
+ static constexpr c10::BFloat16 denorm_min() {
355
+ return c10::BFloat16(0x0001, c10::BFloat16::from_bits());
356
+ }
357
+ };
358
+
359
+ } // namespace std
360
+
361
+ C10_CLANG_DIAGNOSTIC_POP()
videochat2/lib/python3.10/site-packages/torch/include/c10/util/BFloat16-math.h ADDED
@@ -0,0 +1,292 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/util/BFloat16.h>
4
+ #include <c10/util/Half.h>
5
+
6
+ C10_CLANG_DIAGNOSTIC_PUSH()
7
+ #if C10_CLANG_HAS_WARNING("-Wimplicit-float-conversion")
8
+ C10_CLANG_DIAGNOSTIC_IGNORE("-Wimplicit-float-conversion")
9
+ #endif
10
+
11
+ namespace std {
12
+
13
+ template <typename T>
14
+ struct is_reduced_floating_point
15
+ : std::integral_constant<
16
+ bool,
17
+ std::is_same_v<T, c10::Half> || std::is_same_v<T, c10::BFloat16>> {};
18
+
19
+ template <typename T>
20
+ constexpr bool is_reduced_floating_point_v =
21
+ is_reduced_floating_point<T>::value;
22
+
23
+ template <
24
+ typename T,
25
+ typename std::enable_if_t<is_reduced_floating_point_v<T>, int> = 0>
26
+ inline T acos(T a) {
27
+ return std::acos(float(a));
28
+ }
29
+ template <
30
+ typename T,
31
+ typename std::enable_if_t<is_reduced_floating_point_v<T>, int> = 0>
32
+ inline T asin(T a) {
33
+ return std::asin(float(a));
34
+ }
35
+ template <
36
+ typename T,
37
+ typename std::enable_if_t<is_reduced_floating_point_v<T>, int> = 0>
38
+ inline T atan(T a) {
39
+ return std::atan(float(a));
40
+ }
41
+ template <
42
+ typename T,
43
+ typename std::enable_if_t<is_reduced_floating_point_v<T>, int> = 0>
44
+ inline T atanh(T a) {
45
+ return std::atanh(float(a));
46
+ }
47
+ template <
48
+ typename T,
49
+ typename std::enable_if_t<is_reduced_floating_point_v<T>, int> = 0>
50
+ inline T erf(T a) {
51
+ return std::erf(float(a));
52
+ }
53
+ template <
54
+ typename T,
55
+ typename std::enable_if_t<is_reduced_floating_point_v<T>, int> = 0>
56
+ inline T erfc(T a) {
57
+ return std::erfc(float(a));
58
+ }
59
+ template <
60
+ typename T,
61
+ typename std::enable_if_t<is_reduced_floating_point_v<T>, int> = 0>
62
+ inline T exp(T a) {
63
+ return std::exp(float(a));
64
+ }
65
+ template <
66
+ typename T,
67
+ typename std::enable_if_t<is_reduced_floating_point_v<T>, int> = 0>
68
+ inline T expm1(T a) {
69
+ return std::expm1(float(a));
70
+ }
71
+ template <
72
+ typename T,
73
+ typename std::enable_if_t<is_reduced_floating_point_v<T>, int> = 0>
74
+ inline bool isfinite(T a) {
75
+ return std::isfinite(float(a));
76
+ }
77
+ template <
78
+ typename T,
79
+ typename std::enable_if_t<is_reduced_floating_point_v<T>, int> = 0>
80
+ inline T log(T a) {
81
+ return std::log(float(a));
82
+ }
83
+ template <
84
+ typename T,
85
+ typename std::enable_if_t<is_reduced_floating_point_v<T>, int> = 0>
86
+ inline T log10(T a) {
87
+ return std::log10(float(a));
88
+ }
89
+ template <
90
+ typename T,
91
+ typename std::enable_if_t<is_reduced_floating_point_v<T>, int> = 0>
92
+ inline T log1p(T a) {
93
+ return std::log1p(float(a));
94
+ }
95
+ template <
96
+ typename T,
97
+ typename std::enable_if_t<is_reduced_floating_point_v<T>, int> = 0>
98
+ inline T log2(T a) {
99
+ return std::log2(float(a));
100
+ }
101
+ template <
102
+ typename T,
103
+ typename std::enable_if_t<is_reduced_floating_point_v<T>, int> = 0>
104
+ inline T ceil(T a) {
105
+ return std::ceil(float(a));
106
+ }
107
+ template <
108
+ typename T,
109
+ typename std::enable_if_t<is_reduced_floating_point_v<T>, int> = 0>
110
+ inline T cos(T a) {
111
+ return std::cos(float(a));
112
+ }
113
+ template <
114
+ typename T,
115
+ typename std::enable_if_t<is_reduced_floating_point_v<T>, int> = 0>
116
+ inline T floor(T a) {
117
+ return std::floor(float(a));
118
+ }
119
+ template <
120
+ typename T,
121
+ typename std::enable_if_t<is_reduced_floating_point_v<T>, int> = 0>
122
+ inline T nearbyint(T a) {
123
+ return std::nearbyint(float(a));
124
+ }
125
+ template <
126
+ typename T,
127
+ typename std::enable_if_t<is_reduced_floating_point_v<T>, int> = 0>
128
+ inline T sin(T a) {
129
+ return std::sin(float(a));
130
+ }
131
+ template <
132
+ typename T,
133
+ typename std::enable_if_t<is_reduced_floating_point_v<T>, int> = 0>
134
+ inline T tan(T a) {
135
+ return std::tan(float(a));
136
+ }
137
+ template <
138
+ typename T,
139
+ typename std::enable_if_t<is_reduced_floating_point_v<T>, int> = 0>
140
+ inline T sinh(T a) {
141
+ return std::sinh(float(a));
142
+ }
143
+ template <
144
+ typename T,
145
+ typename std::enable_if_t<is_reduced_floating_point_v<T>, int> = 0>
146
+ inline T cosh(T a) {
147
+ return std::cosh(float(a));
148
+ }
149
+ template <
150
+ typename T,
151
+ typename std::enable_if_t<is_reduced_floating_point_v<T>, int> = 0>
152
+ inline T tanh(T a) {
153
+ return std::tanh(float(a));
154
+ }
155
+ template <
156
+ typename T,
157
+ typename std::enable_if_t<is_reduced_floating_point_v<T>, int> = 0>
158
+ inline T trunc(T a) {
159
+ return std::trunc(float(a));
160
+ }
161
+ template <
162
+ typename T,
163
+ typename std::enable_if_t<is_reduced_floating_point_v<T>, int> = 0>
164
+ inline T lgamma(T a) {
165
+ return std::lgamma(float(a));
166
+ }
167
+ template <
168
+ typename T,
169
+ typename std::enable_if_t<is_reduced_floating_point_v<T>, int> = 0>
170
+ inline T sqrt(T a) {
171
+ return std::sqrt(float(a));
172
+ }
173
+ template <
174
+ typename T,
175
+ typename std::enable_if_t<is_reduced_floating_point_v<T>, int> = 0>
176
+ inline T rsqrt(T a) {
177
+ return 1.0 / std::sqrt(float(a));
178
+ }
179
+ template <
180
+ typename T,
181
+ typename std::enable_if_t<is_reduced_floating_point_v<T>, int> = 0>
182
+ inline T abs(T a) {
183
+ return std::abs(float(a));
184
+ }
185
+ #if defined(_MSC_VER) && defined(__CUDACC__)
186
+ template <
187
+ typename T,
188
+ typename std::enable_if_t<is_reduced_floating_point_v<T>, int> = 0>
189
+ inline T pow(T a, double b) {
190
+ return std::pow(float(a), float(b));
191
+ }
192
+ #else
193
+ template <
194
+ typename T,
195
+ typename std::enable_if_t<is_reduced_floating_point_v<T>, int> = 0>
196
+ inline T pow(T a, double b) {
197
+ return std::pow(float(a), b);
198
+ }
199
+ #endif
200
+ template <
201
+ typename T,
202
+ typename std::enable_if_t<is_reduced_floating_point_v<T>, int> = 0>
203
+ inline T pow(T a, T b) {
204
+ return std::pow(float(a), float(b));
205
+ }
206
+ template <
207
+ typename T,
208
+ typename std::enable_if_t<is_reduced_floating_point_v<T>, int> = 0>
209
+ inline T fmod(T a, T b) {
210
+ return std::fmod(float(a), float(b));
211
+ }
212
+
213
+ /*
214
+ The following function is inspired from the implementation in `musl`
215
+ Link to License: https://git.musl-libc.org/cgit/musl/tree/COPYRIGHT
216
+ ----------------------------------------------------------------------
217
+ Copyright © 2005-2020 Rich Felker, et al.
218
+
219
+ Permission is hereby granted, free of charge, to any person obtaining
220
+ a copy of this software and associated documentation files (the
221
+ "Software"), to deal in the Software without restriction, including
222
+ without limitation the rights to use, copy, modify, merge, publish,
223
+ distribute, sublicense, and/or sell copies of the Software, and to
224
+ permit persons to whom the Software is furnished to do so, subject to
225
+ the following conditions:
226
+
227
+ The above copyright notice and this permission notice shall be
228
+ included in all copies or substantial portions of the Software.
229
+
230
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
231
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
232
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
233
+ IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
234
+ CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
235
+ TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
236
+ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
237
+ ----------------------------------------------------------------------
238
+ */
239
+ template <
240
+ typename T,
241
+ typename std::enable_if_t<is_reduced_floating_point_v<T>, int> = 0>
242
+ C10_HOST_DEVICE inline T nextafter(T from, T to) {
243
+ // Reference:
244
+ // https://git.musl-libc.org/cgit/musl/tree/src/math/nextafter.c
245
+ using int_repr_t = uint16_t;
246
+ constexpr uint8_t bits = 16;
247
+ union {
248
+ T f;
249
+ int_repr_t i;
250
+ } ufrom = {from}, uto = {to};
251
+
252
+ // get a mask to get the sign bit i.e. MSB
253
+ int_repr_t sign_mask = int_repr_t{1} << (bits - 1);
254
+
255
+ // short-circuit: if either is NaN, return NaN
256
+ if (from != from || to != to) {
257
+ return from + to;
258
+ }
259
+
260
+ // short-circuit: if they are exactly the same.
261
+ if (ufrom.i == uto.i) {
262
+ return from;
263
+ }
264
+
265
+ // mask the sign-bit to zero i.e. positive
266
+ // equivalent to abs(x)
267
+ int_repr_t abs_from = ufrom.i & ~sign_mask;
268
+ int_repr_t abs_to = uto.i & ~sign_mask;
269
+ if (abs_from == 0) {
270
+ // if both are zero but with different sign,
271
+ // preserve the sign of `to`.
272
+ if (abs_to == 0) {
273
+ return to;
274
+ }
275
+ // smallest subnormal with sign of `to`.
276
+ ufrom.i = (uto.i & sign_mask) | int_repr_t{1};
277
+ return ufrom.f;
278
+ }
279
+
280
+ // if abs(from) > abs(to) or sign(from) != sign(to)
281
+ if (abs_from > abs_to || ((ufrom.i ^ uto.i) & sign_mask)) {
282
+ ufrom.i--;
283
+ } else {
284
+ ufrom.i++;
285
+ }
286
+
287
+ return ufrom.f;
288
+ }
289
+
290
+ } // namespace std
291
+
292
+ C10_CLANG_DIAGNOSTIC_POP()
videochat2/lib/python3.10/site-packages/torch/include/c10/util/Backtrace.h ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef C10_UTIL_BACKTRACE_H_
2
+ #define C10_UTIL_BACKTRACE_H_
3
+
4
+ #include <cstddef>
5
+ #include <memory>
6
+ #include <string>
7
+ #include <typeinfo>
8
+
9
+ #include <c10/macros/Macros.h>
10
+ #include <c10/util/Lazy.h>
11
+
12
+ namespace c10 {
13
+
14
+ // Symbolizing the backtrace can be expensive; pass it around as a lazy string
15
+ // so it is symbolized only if actually needed.
16
+ using Backtrace = std::shared_ptr<const LazyValue<std::string>>;
17
+
18
+ // DEPRECATED: Prefer get_lazy_backtrace().
19
+ C10_API std::string get_backtrace(
20
+ size_t frames_to_skip = 0,
21
+ size_t maximum_number_of_frames = 64,
22
+ bool skip_python_frames = true);
23
+
24
+ C10_API Backtrace get_lazy_backtrace(
25
+ size_t frames_to_skip = 0,
26
+ size_t maximum_number_of_frames = 64,
27
+ bool skip_python_frames = true);
28
+
29
+ } // namespace c10
30
+
31
+ #endif // C10_UTIL_BACKTRACE_H_
videochat2/lib/python3.10/site-packages/torch/include/c10/util/C++17.h ADDED
@@ -0,0 +1,142 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #ifndef C10_UTIL_CPP17_H_
3
+ #define C10_UTIL_CPP17_H_
4
+
5
+ #include <c10/macros/Macros.h>
6
+ #include <functional>
7
+ #include <memory>
8
+ #include <type_traits>
9
+ #include <utility>
10
+
11
+ #if !defined(__clang__) && !defined(_MSC_VER) && defined(__GNUC__) && \
12
+ __GNUC__ < 9
13
+ #error \
14
+ "You're trying to build PyTorch with a too old version of GCC. We need GCC 9 or later."
15
+ #endif
16
+
17
+ #if defined(__clang__) && __clang_major__ < 9
18
+ #error \
19
+ "You're trying to build PyTorch with a too old version of Clang. We need Clang 9 or later."
20
+ #endif
21
+
22
+ #if (defined(_MSC_VER) && (!defined(_MSVC_LANG) || _MSVC_LANG < 201703L)) || \
23
+ (!defined(_MSC_VER) && __cplusplus < 201703L)
24
+ #error You need C++17 to compile PyTorch
25
+ #endif
26
+
27
+ #if defined(_WIN32) && (defined(min) || defined(max))
28
+ #error Macro clash with min and max -- define NOMINMAX when compiling your program on Windows
29
+ #endif
30
+
31
+ /*
32
+ * This header adds some polyfills with C++17 functionality
33
+ */
34
+
35
+ namespace c10 {
36
+
37
+ // std::is_pod is deprecated in C++20, std::is_standard_layout and
38
+ // std::is_trivial are introduced in C++11, std::conjunction has been introduced
39
+ // in C++17.
40
+ template <typename T>
41
+ using is_pod = std::conjunction<std::is_standard_layout<T>, std::is_trivial<T>>;
42
+
43
+ template <typename T>
44
+ constexpr bool is_pod_v = is_pod<T>::value;
45
+
46
+ namespace guts {
47
+
48
+ template <typename Base, typename Child, typename... Args>
49
+ std::enable_if_t<
50
+ !std::is_array_v<Base> && !std::is_array_v<Child> &&
51
+ std::is_base_of_v<Base, Child>,
52
+ std::unique_ptr<Base>>
53
+ make_unique_base(Args&&... args) {
54
+ return std::unique_ptr<Base>(new Child(std::forward<Args>(args)...));
55
+ }
56
+
57
+ #if defined(__cpp_lib_apply) && !defined(__CUDA_ARCH__) && !defined(__HIP__)
58
+
59
+ template <class F, class Tuple>
60
+ C10_HOST_DEVICE inline constexpr decltype(auto) apply(F&& f, Tuple&& t) {
61
+ return std::apply(std::forward<F>(f), std::forward<Tuple>(t));
62
+ }
63
+
64
+ #else
65
+
66
+ // Implementation from http://en.cppreference.com/w/cpp/utility/apply (but
67
+ // modified)
68
+ // TODO This is an incomplete implementation of std::apply, not working for
69
+ // member functions.
70
+ namespace detail {
71
+ template <class F, class Tuple, std::size_t... INDEX>
72
+ #if defined(_MSC_VER)
73
+ // MSVC has a problem with the decltype() return type, but it also doesn't need
74
+ // it
75
+ C10_HOST_DEVICE constexpr auto apply_impl(
76
+ F&& f,
77
+ Tuple&& t,
78
+ std::index_sequence<INDEX...>)
79
+ #else
80
+ // GCC/Clang need the decltype() return type
81
+ C10_HOST_DEVICE constexpr decltype(auto) apply_impl(
82
+ F&& f,
83
+ Tuple&& t,
84
+ std::index_sequence<INDEX...>)
85
+ #endif
86
+ {
87
+ return std::forward<F>(f)(std::get<INDEX>(std::forward<Tuple>(t))...);
88
+ }
89
+ } // namespace detail
90
+
91
+ template <class F, class Tuple>
92
+ C10_HOST_DEVICE constexpr decltype(auto) apply(F&& f, Tuple&& t) {
93
+ return detail::apply_impl(
94
+ std::forward<F>(f),
95
+ std::forward<Tuple>(t),
96
+ std::make_index_sequence<
97
+ std::tuple_size<std::remove_reference_t<Tuple>>::value>{});
98
+ }
99
+
100
+ #endif
101
+
102
+ template <typename Functor, typename... Args>
103
+ std::enable_if_t<
104
+ std::is_member_pointer_v<std::decay_t<Functor>>,
105
+ typename std::invoke_result_t<Functor, Args...>>
106
+ invoke(Functor&& f, Args&&... args) {
107
+ return std::mem_fn(std::forward<Functor>(f))(std::forward<Args>(args)...);
108
+ }
109
+
110
+ template <typename Functor, typename... Args>
111
+ std::enable_if_t<
112
+ !std::is_member_pointer_v<std::decay_t<Functor>>,
113
+ typename std::invoke_result_t<Functor, Args...>>
114
+ invoke(Functor&& f, Args&&... args) {
115
+ return std::forward<Functor>(f)(std::forward<Args>(args)...);
116
+ }
117
+
118
+ namespace detail {
119
+ struct _identity final {
120
+ template <class T>
121
+ using type_identity = T;
122
+
123
+ template <class T>
124
+ decltype(auto) operator()(T&& arg) {
125
+ return std::forward<T>(arg);
126
+ }
127
+ };
128
+
129
+ template <class Func, class Enable = void>
130
+ struct function_takes_identity_argument : std::false_type {};
131
+
132
+ template <class Func>
133
+ struct function_takes_identity_argument<
134
+ Func,
135
+ std::void_t<decltype(std::declval<Func>()(_identity()))>> : std::true_type {
136
+ };
137
+ } // namespace detail
138
+
139
+ } // namespace guts
140
+ } // namespace c10
141
+
142
+ #endif // C10_UTIL_CPP17_H_
videochat2/lib/python3.10/site-packages/torch/include/c10/util/CallOnce.h ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <atomic>
4
+ #include <mutex>
5
+ #include <utility>
6
+
7
+ #include <c10/macros/Macros.h>
8
+ #include <c10/util/C++17.h>
9
+
10
+ namespace c10 {
11
+
12
+ // custom c10 call_once implementation to avoid the deadlock in std::call_once.
13
+ // The implementation here is a simplified version from folly and likely much
14
+ // much higher memory footprint.
15
+ template <typename Flag, typename F, typename... Args>
16
+ inline void call_once(Flag& flag, F&& f, Args&&... args) {
17
+ if (C10_LIKELY(flag.test_once())) {
18
+ return;
19
+ }
20
+ flag.call_once_slow(std::forward<F>(f), std::forward<Args>(args)...);
21
+ }
22
+
23
+ class once_flag {
24
+ public:
25
+ #ifndef _WIN32
26
+ // running into build error on MSVC. Can't seem to get a repro locally so I'm
27
+ // just avoiding constexpr
28
+ //
29
+ // C:/actions-runner/_work/pytorch/pytorch\c10/util/CallOnce.h(26): error:
30
+ // defaulted default constructor cannot be constexpr because the
31
+ // corresponding implicitly declared default constructor would not be
32
+ // constexpr 1 error detected in the compilation of
33
+ // "C:/actions-runner/_work/pytorch/pytorch/aten/src/ATen/cuda/cub.cu".
34
+ constexpr
35
+ #endif
36
+ once_flag() noexcept = default;
37
+ once_flag(const once_flag&) = delete;
38
+ once_flag& operator=(const once_flag&) = delete;
39
+
40
+ private:
41
+ template <typename Flag, typename F, typename... Args>
42
+ friend void call_once(Flag& flag, F&& f, Args&&... args);
43
+
44
+ template <typename F, typename... Args>
45
+ void call_once_slow(F&& f, Args&&... args) {
46
+ std::lock_guard<std::mutex> guard(mutex_);
47
+ if (init_.load(std::memory_order_relaxed)) {
48
+ return;
49
+ }
50
+ c10::guts::invoke(std::forward<F>(f), std::forward<Args>(args)...);
51
+ init_.store(true, std::memory_order_release);
52
+ }
53
+
54
+ bool test_once() {
55
+ return init_.load(std::memory_order_acquire);
56
+ }
57
+
58
+ void reset_once() {
59
+ init_.store(false, std::memory_order_release);
60
+ }
61
+
62
+ private:
63
+ std::mutex mutex_;
64
+ std::atomic<bool> init_{false};
65
+ };
66
+
67
+ } // namespace c10
videochat2/lib/python3.10/site-packages/torch/include/c10/util/DeadlockDetection.h ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/macros/Export.h>
4
+ #include <c10/util/Exception.h>
5
+
6
+ /// This file provides some simple utilities for detecting common deadlocks in
7
+ /// PyTorch. For now, we focus exclusively on detecting Python GIL deadlocks,
8
+ /// as the GIL is a wide ranging lock that is taken out in many situations.
9
+ /// The basic strategy is before performing an operation that may block, you
10
+ /// can use TORCH_ASSERT_NO_GIL_WITHOUT_PYTHON_DEP() to assert that the GIL is
11
+ /// not held. This macro is to be used in contexts where no static dependency
12
+ /// on Python is available (we will handle indirecting a virtual call for you).
13
+ ///
14
+ /// If the GIL is held by a torchdeploy interpreter, we always report false.
15
+ /// If you are in a context where Python bindings are available, it's better
16
+ /// to directly assert on PyGILState_Check (as it avoids a vcall and also
17
+ /// works correctly with torchdeploy.)
18
+
19
+ #define TORCH_ASSERT_NO_GIL_WITHOUT_PYTHON_DEP() \
20
+ TORCH_INTERNAL_ASSERT( \
21
+ !c10::impl::check_python_gil(), \
22
+ "Holding GIL before a blocking operation! Please release the GIL before blocking, or see https://github.com/pytorch/pytorch/issues/56297 for how to release the GIL for destructors of objects")
23
+
24
+ namespace c10::impl {
25
+
26
+ C10_API bool check_python_gil();
27
+
28
+ struct C10_API PythonGILHooks {
29
+ virtual ~PythonGILHooks() = default;
30
+ // Returns true if we hold the GIL. If not linked against Python we
31
+ // always return false.
32
+ virtual bool check_python_gil() const = 0;
33
+ };
34
+
35
+ C10_API void SetPythonGILHooks(PythonGILHooks* factory);
36
+
37
+ // DO NOT call this registerer from a torch deploy instance! You will clobber
38
+ // other registrations
39
+ struct C10_API PythonGILHooksRegisterer {
40
+ explicit PythonGILHooksRegisterer(PythonGILHooks* factory) {
41
+ SetPythonGILHooks(factory);
42
+ }
43
+ ~PythonGILHooksRegisterer() {
44
+ SetPythonGILHooks(nullptr);
45
+ }
46
+ };
47
+
48
+ } // namespace c10::impl
videochat2/lib/python3.10/site-packages/torch/include/c10/util/Deprecated.h ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ /**
4
+ * This file provides portable macros for marking declarations
5
+ * as deprecated. You should generally use C10_DEPRECATED,
6
+ * except when marking 'using' declarations as deprecated,
7
+ * in which case you should use C10_DEFINE_DEPRECATED_USING
8
+ * (due to portability concerns).
9
+ */
10
+
11
+ // Sample usage:
12
+ //
13
+ // C10_DEPRECATED void bad_func();
14
+ // struct C10_DEPRECATED BadStruct {
15
+ // ...
16
+ // };
17
+
18
+ // NB: __cplusplus doesn't work for MSVC, so for now MSVC always uses
19
+ // the "__declspec(deprecated)" implementation and not the C++14
20
+ // "[[deprecated]]" attribute. We tried enabling "[[deprecated]]" for C++14 on
21
+ // MSVC, but ran into issues with some older MSVC versions.
22
+ #if (defined(__cplusplus) && __cplusplus >= 201402L)
23
+ #define C10_DEPRECATED [[deprecated]]
24
+ #define C10_DEPRECATED_MESSAGE(message) [[deprecated(message)]]
25
+ #elif defined(__GNUC__)
26
+ #define C10_DEPRECATED __attribute__((deprecated))
27
+ // TODO Is there some way to implement this?
28
+ #define C10_DEPRECATED_MESSAGE(message) __attribute__((deprecated))
29
+
30
+ #elif defined(_MSC_VER)
31
+ #define C10_DEPRECATED __declspec(deprecated)
32
+ #define C10_DEPRECATED_MESSAGE(message) __declspec(deprecated(message))
33
+ #else
34
+ #warning "You need to implement C10_DEPRECATED for this compiler"
35
+ #define C10_DEPRECATED
36
+ #endif
37
+
38
+ // Sample usage:
39
+ //
40
+ // C10_DEFINE_DEPRECATED_USING(BadType, int)
41
+ //
42
+ // which is the portable version of
43
+ //
44
+ // using BadType [[deprecated]] = int;
45
+
46
+ // technically [[deprecated]] syntax is from c++14 standard, but it works in
47
+ // many compilers.
48
+ #if defined(__has_cpp_attribute)
49
+ #if __has_cpp_attribute(deprecated) && !defined(__CUDACC__)
50
+ #define C10_DEFINE_DEPRECATED_USING(TypeName, TypeThingy) \
51
+ using TypeName [[deprecated]] = TypeThingy;
52
+ #endif
53
+ #endif
54
+
55
+ #if defined(_MSC_VER)
56
+ #if defined(__CUDACC__)
57
+ // neither [[deprecated]] nor __declspec(deprecated) work on nvcc on Windows;
58
+ // you get the error:
59
+ //
60
+ // error: attribute does not apply to any entity
61
+ //
62
+ // So we just turn the macro off in this case.
63
+ #if defined(C10_DEFINE_DEPRECATED_USING)
64
+ #undef C10_DEFINE_DEPRECATED_USING
65
+ #endif
66
+ #define C10_DEFINE_DEPRECATED_USING(TypeName, TypeThingy) \
67
+ using TypeName = TypeThingy;
68
+ #else
69
+ // [[deprecated]] does work in windows without nvcc, though msc doesn't support
70
+ // `__has_cpp_attribute` when c++14 is supported, otherwise
71
+ // __declspec(deprecated) is used as the alternative.
72
+ #ifndef C10_DEFINE_DEPRECATED_USING
73
+ #if defined(_MSVC_LANG) && _MSVC_LANG >= 201402L
74
+ #define C10_DEFINE_DEPRECATED_USING(TypeName, TypeThingy) \
75
+ using TypeName [[deprecated]] = TypeThingy;
76
+ #else
77
+ #define C10_DEFINE_DEPRECATED_USING(TypeName, TypeThingy) \
78
+ using TypeName = __declspec(deprecated) TypeThingy;
79
+ #endif
80
+ #endif
81
+ #endif
82
+ #endif
83
+
84
+ #if !defined(C10_DEFINE_DEPRECATED_USING) && defined(__GNUC__)
85
+ // nvcc has a bug where it doesn't understand __attribute__((deprecated))
86
+ // declarations even when the host compiler supports it. We'll only use this gcc
87
+ // attribute when not cuda, and when using a GCC compiler that doesn't support
88
+ // the c++14 syntax we checked for above (available in __GNUC__ >= 5)
89
+ #if !defined(__CUDACC__)
90
+ #define C10_DEFINE_DEPRECATED_USING(TypeName, TypeThingy) \
91
+ using TypeName __attribute__((deprecated)) = TypeThingy;
92
+ #else
93
+ // using cuda + gcc < 5, neither deprecated syntax is available so turning off.
94
+ #define C10_DEFINE_DEPRECATED_USING(TypeName, TypeThingy) \
95
+ using TypeName = TypeThingy;
96
+ #endif
97
+ #endif
98
+
99
+ #if !defined(C10_DEFINE_DEPRECATED_USING)
100
+ #warning "You need to implement C10_DEFINE_DEPRECATED_USING for this compiler"
101
+ #define C10_DEFINE_DEPRECATED_USING
102
+ #endif
videochat2/lib/python3.10/site-packages/torch/include/c10/util/DimVector.h ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/SymInt.h>
4
+ #include <c10/core/impl/SizesAndStrides.h>
5
+ #include <c10/util/SmallVector.h>
6
+ #include <cstddef>
7
+ #include <cstdint>
8
+
9
+ namespace c10 {
10
+
11
+ constexpr size_t kDimVectorStaticSize = C10_SIZES_AND_STRIDES_MAX_INLINE_SIZE;
12
+
13
+ /// A container for sizes or strides
14
+ using DimVector = SmallVector<int64_t, kDimVectorStaticSize>;
15
+ using SymDimVector = SmallVector<c10::SymInt, kDimVectorStaticSize>;
16
+
17
+ } // namespace c10
videochat2/lib/python3.10/site-packages/torch/include/c10/util/DynamicCounter.h ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <functional>
4
+ #include <memory>
5
+ #include <string_view>
6
+
7
+ #include <c10/macros/Macros.h>
8
+
9
+ namespace c10::monitor {
10
+
11
+ class C10_API DynamicCounter {
12
+ public:
13
+ using Callback = std::function<int64_t()>;
14
+
15
+ // Creates a dynamic counter that can be queried at any point in time by
16
+ // multiple backends. Only one counter with a given key can exist at any point
17
+ // in time.
18
+ //
19
+ // The callback is invoked every time the counter is queried.
20
+ // The callback must be thread-safe.
21
+ // The callback must not throw.
22
+ // The callback must not block.
23
+ DynamicCounter(std::string_view key, Callback getCounterCallback);
24
+
25
+ // Unregisters the callback.
26
+ // Waits for all ongoing callback invocations to finish.
27
+ ~DynamicCounter();
28
+
29
+ private:
30
+ struct Guard;
31
+ std::unique_ptr<Guard> guard_;
32
+ };
33
+
34
+ namespace detail {
35
+ class DynamicCounterBackendIf {
36
+ public:
37
+ virtual ~DynamicCounterBackendIf() = default;
38
+
39
+ virtual void registerCounter(
40
+ std::string_view key,
41
+ DynamicCounter::Callback getCounterCallback) = 0;
42
+ // MUST wait for all ongoing callback invocations to finish
43
+ virtual void unregisterCounter(std::string_view key) = 0;
44
+ };
45
+
46
+ void C10_API
47
+ registerDynamicCounterBackend(std::unique_ptr<DynamicCounterBackendIf>);
48
+ } // namespace detail
49
+ } // namespace c10::monitor
videochat2/lib/python3.10/site-packages/torch/include/c10/util/FbcodeMaps.h ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef C10_UTIL_FBCODEMAPS_H_
2
+ #define C10_UTIL_FBCODEMAPS_H_
3
+
4
+ // Map typedefs so that we can use folly's F14 maps in fbcode without
5
+ // taking a folly dependency.
6
+
7
+ #ifdef FBCODE_CAFFE2
8
+ #include <folly/container/F14Map.h>
9
+ #include <folly/container/F14Set.h>
10
+ #else
11
+ #include <unordered_map>
12
+ #include <unordered_set>
13
+ #endif
14
+
15
+ namespace c10 {
16
+ #ifdef FBCODE_CAFFE2
17
+ template <typename Key, typename Value>
18
+ using FastMap = folly::F14FastMap<Key, Value>;
19
+ template <typename Key>
20
+ using FastSet = folly::F14FastSet<Key>;
21
+ #else
22
+ template <typename Key, typename Value>
23
+ using FastMap = std::unordered_map<Key, Value>;
24
+ template <typename Key>
25
+ using FastSet = std::unordered_set<Key>;
26
+ #endif
27
+ } // namespace c10
28
+
29
+ #endif // C10_UTIL_FBCODEMAPS_H_
videochat2/lib/python3.10/site-packages/torch/include/c10/util/Flags.h ADDED
@@ -0,0 +1,226 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef C10_UTIL_FLAGS_H_
2
+ #define C10_UTIL_FLAGS_H_
3
+
4
+ /* Commandline flags support for C10.
5
+ *
6
+ * This is a portable commandline flags tool for c10, so we can optionally
7
+ * choose to use gflags or a lightweight custom implementation if gflags is
8
+ * not possible on a certain platform. If you have gflags installed, set the
9
+ * macro C10_USE_GFLAGS will seamlessly route everything to gflags.
10
+ *
11
+ * To define a flag foo of type bool default to true, do the following in the
12
+ * *global* namespace:
13
+ * C10_DEFINE_bool(foo, true, "An example.");
14
+ *
15
+ * To use it in another .cc file, you can use C10_DECLARE_* as follows:
16
+ * C10_DECLARE_bool(foo);
17
+ *
18
+ * In both cases, you can then access the flag via FLAGS_foo.
19
+ *
20
+ * It is recommended that you build with gflags. To learn more about the flags
21
+ * usage, refer to the gflags page here:
22
+ *
23
+ * https://gflags.github.io/gflags/
24
+ *
25
+ * Note about Python users / devs: gflags is initiated from a C++ function
26
+ * ParseCommandLineFlags, and is usually done in native binaries in the main
27
+ * function. As Python does not have a modifiable main function, it is usually
28
+ * difficult to change the flags after Python starts. Hence, it is recommended
29
+ * that one sets the default value of the flags to one that's acceptable in
30
+ * general - that will allow Python to run without wrong flags.
31
+ */
32
+
33
+ #include <c10/macros/Export.h>
34
+ #include <string>
35
+
36
+ #include <c10/util/Registry.h>
37
+
38
+ namespace c10 {
39
+ /**
40
+ * Sets the usage message when a commandline tool is called with "--help".
41
+ */
42
+ C10_API void SetUsageMessage(const std::string& str);
43
+
44
+ /**
45
+ * Returns the usage message for the commandline tool set by SetUsageMessage.
46
+ */
47
+ C10_API const char* UsageMessage();
48
+
49
+ /**
50
+ * Parses the commandline flags.
51
+ *
52
+ * This command parses all the commandline arguments passed in via pargc
53
+ * and argv. Once it is finished, partc and argv will contain the remaining
54
+ * commandline args that c10 does not deal with. Note that following
55
+ * convention, argv[0] contains the binary name and is not parsed.
56
+ */
57
+ C10_API bool ParseCommandLineFlags(int* pargc, char*** pargv);
58
+
59
+ /**
60
+ * Checks if the commandline flags has already been passed.
61
+ */
62
+ C10_API bool CommandLineFlagsHasBeenParsed();
63
+
64
+ } // namespace c10
65
+
66
+ ////////////////////////////////////////////////////////////////////////////////
67
+ // Below are gflags and non-gflags specific implementations.
68
+ // In general, they define the following macros for one to declare (use
69
+ // C10_DECLARE) or define (use C10_DEFINE) flags:
70
+ // C10_{DECLARE,DEFINE}_{int,int64,double,bool,string}
71
+ ////////////////////////////////////////////////////////////////////////////////
72
+
73
+ #ifdef C10_USE_GFLAGS
74
+
75
+ ////////////////////////////////////////////////////////////////////////////////
76
+ // Begin gflags section: most functions are basically rerouted to gflags.
77
+ ////////////////////////////////////////////////////////////////////////////////
78
+ #include <gflags/gflags.h>
79
+
80
+ // C10 uses hidden visibility by default. However, in gflags, it only uses
81
+ // export on Windows platform (with dllexport) but not on linux/mac (with
82
+ // default visibility). As a result, to ensure that we are always exporting
83
+ // global variables, we will redefine the GFLAGS_DLL_DEFINE_FLAG macro if we
84
+ // are building C10 as a shared library.
85
+ // This has to be done after the inclusion of gflags, because some early
86
+ // versions of gflags.h (e.g. 2.0 on ubuntu 14.04) directly defines the
87
+ // macros, so we need to do definition after gflags is done.
88
+ #ifdef GFLAGS_DLL_DEFINE_FLAG
89
+ #undef GFLAGS_DLL_DEFINE_FLAG
90
+ #endif // GFLAGS_DLL_DEFINE_FLAG
91
+ #ifdef GFLAGS_DLL_DECLARE_FLAG
92
+ #undef GFLAGS_DLL_DECLARE_FLAG
93
+ #endif // GFLAGS_DLL_DECLARE_FLAG
94
+ #define GFLAGS_DLL_DEFINE_FLAG C10_EXPORT
95
+ #define GFLAGS_DLL_DECLARE_FLAG C10_IMPORT
96
+
97
+ // gflags before 2.0 uses namespace google and after 2.1 uses namespace gflags.
98
+ // Using GFLAGS_GFLAGS_H_ to capture this change.
99
+ #ifndef GFLAGS_GFLAGS_H_
100
+ namespace gflags = google;
101
+ #endif // GFLAGS_GFLAGS_H_
102
+
103
+ // Motivation about the gflags wrapper:
104
+ // (1) We would need to make sure that the gflags version and the non-gflags
105
+ // version of C10 are going to expose the same flags abstraction. One should
106
+ // explicitly use FLAGS_flag_name to access the flags.
107
+ // (2) For flag names, it is recommended to start with c10_ to distinguish it
108
+ // from regular gflags flags. For example, do
109
+ // C10_DEFINE_BOOL(c10_my_flag, true, "An example");
110
+ // to allow one to use FLAGS_c10_my_flag.
111
+ // (3) Gflags has a design issue that does not properly expose the global flags,
112
+ // if one builds the library with -fvisibility=hidden. The current gflags (as of
113
+ // Aug 2018) only deals with the Windows case using dllexport, and not the Linux
114
+ // counterparts. As a result, we will explicitly use C10_EXPORT to export the
115
+ // flags defined in C10. This is done via a global reference, so the flag
116
+ // itself is not duplicated - under the hood it is the same global gflags flag.
117
+ #define C10_GFLAGS_DEF_WRAPPER(type, real_type, name, default_value, help_str) \
118
+ DEFINE_##type(name, default_value, help_str);
119
+
120
+ #define C10_DEFINE_int(name, default_value, help_str) \
121
+ C10_GFLAGS_DEF_WRAPPER(int32, gflags::int32, name, default_value, help_str)
122
+ #define C10_DEFINE_int32(name, default_value, help_str) \
123
+ C10_DEFINE_int(name, default_value, help_str)
124
+ #define C10_DEFINE_int64(name, default_value, help_str) \
125
+ C10_GFLAGS_DEF_WRAPPER(int64, gflags::int64, name, default_value, help_str)
126
+ #define C10_DEFINE_double(name, default_value, help_str) \
127
+ C10_GFLAGS_DEF_WRAPPER(double, double, name, default_value, help_str)
128
+ #define C10_DEFINE_bool(name, default_value, help_str) \
129
+ C10_GFLAGS_DEF_WRAPPER(bool, bool, name, default_value, help_str)
130
+ #define C10_DEFINE_string(name, default_value, help_str) \
131
+ C10_GFLAGS_DEF_WRAPPER(string, ::fLS::clstring, name, default_value, help_str)
132
+
133
+ // DECLARE_typed_var should be used in header files and in the global namespace.
134
+ #define C10_GFLAGS_DECLARE_WRAPPER(type, real_type, name) DECLARE_##type(name);
135
+
136
+ #define C10_DECLARE_int(name) \
137
+ C10_GFLAGS_DECLARE_WRAPPER(int32, gflags::int32, name)
138
+ #define C10_DECLARE_int32(name) C10_DECLARE_int(name)
139
+ #define C10_DECLARE_int64(name) \
140
+ C10_GFLAGS_DECLARE_WRAPPER(int64, gflags::int64, name)
141
+ #define C10_DECLARE_double(name) \
142
+ C10_GFLAGS_DECLARE_WRAPPER(double, double, name)
143
+ #define C10_DECLARE_bool(name) C10_GFLAGS_DECLARE_WRAPPER(bool, bool, name)
144
+ #define C10_DECLARE_string(name) \
145
+ C10_GFLAGS_DECLARE_WRAPPER(string, ::fLS::clstring, name)
146
+
147
+ ////////////////////////////////////////////////////////////////////////////////
148
+ // End gflags section.
149
+ ////////////////////////////////////////////////////////////////////////////////
150
+
151
+ #else // C10_USE_GFLAGS
152
+
153
+ ////////////////////////////////////////////////////////////////////////////////
154
+ // Begin non-gflags section: providing equivalent functionality.
155
+ ////////////////////////////////////////////////////////////////////////////////
156
+
157
+ namespace c10 {
158
+
159
+ class C10_API C10FlagParser {
160
+ public:
161
+ bool success() {
162
+ return success_;
163
+ }
164
+
165
+ protected:
166
+ template <typename T>
167
+ bool Parse(const std::string& content, T* value);
168
+ bool success_{false};
169
+ };
170
+
171
+ C10_DECLARE_REGISTRY(C10FlagsRegistry, C10FlagParser, const std::string&);
172
+
173
+ } // namespace c10
174
+
175
+ // The macros are defined outside the c10 namespace. In your code, you should
176
+ // write the C10_DEFINE_* and C10_DECLARE_* macros outside any namespace
177
+ // as well.
178
+
179
+ #define C10_DEFINE_typed_var(type, name, default_value, help_str) \
180
+ C10_EXPORT type FLAGS_##name = default_value; \
181
+ namespace c10 { \
182
+ namespace { \
183
+ class C10FlagParser_##name : public C10FlagParser { \
184
+ public: \
185
+ explicit C10FlagParser_##name(const std::string& content) { \
186
+ success_ = C10FlagParser::Parse<type>(content, &FLAGS_##name); \
187
+ } \
188
+ }; \
189
+ } \
190
+ RegistererC10FlagsRegistry g_C10FlagsRegistry_##name( \
191
+ #name, \
192
+ C10FlagsRegistry(), \
193
+ RegistererC10FlagsRegistry::DefaultCreator<C10FlagParser_##name>, \
194
+ "(" #type ", default " #default_value ") " help_str); \
195
+ }
196
+
197
+ #define C10_DEFINE_int(name, default_value, help_str) \
198
+ C10_DEFINE_typed_var(int, name, default_value, help_str)
199
+ #define C10_DEFINE_int32(name, default_value, help_str) \
200
+ C10_DEFINE_int(name, default_value, help_str)
201
+ #define C10_DEFINE_int64(name, default_value, help_str) \
202
+ C10_DEFINE_typed_var(int64_t, name, default_value, help_str)
203
+ #define C10_DEFINE_double(name, default_value, help_str) \
204
+ C10_DEFINE_typed_var(double, name, default_value, help_str)
205
+ #define C10_DEFINE_bool(name, default_value, help_str) \
206
+ C10_DEFINE_typed_var(bool, name, default_value, help_str)
207
+ #define C10_DEFINE_string(name, default_value, help_str) \
208
+ C10_DEFINE_typed_var(std::string, name, default_value, help_str)
209
+
210
+ // DECLARE_typed_var should be used in header files and in the global namespace.
211
+ #define C10_DECLARE_typed_var(type, name) C10_API extern type FLAGS_##name
212
+
213
+ #define C10_DECLARE_int(name) C10_DECLARE_typed_var(int, name)
214
+ #define C10_DECLARE_int32(name) C10_DECLARE_int(name)
215
+ #define C10_DECLARE_int64(name) C10_DECLARE_typed_var(int64_t, name)
216
+ #define C10_DECLARE_double(name) C10_DECLARE_typed_var(double, name)
217
+ #define C10_DECLARE_bool(name) C10_DECLARE_typed_var(bool, name)
218
+ #define C10_DECLARE_string(name) C10_DECLARE_typed_var(std::string, name)
219
+
220
+ ////////////////////////////////////////////////////////////////////////////////
221
+ // End non-gflags section.
222
+ ////////////////////////////////////////////////////////////////////////////////
223
+
224
+ #endif // C10_USE_GFLAGS
225
+
226
+ #endif // C10_UTIL_FLAGS_H_
videochat2/lib/python3.10/site-packages/torch/include/c10/util/Float8_e4m3fn.h ADDED
@@ -0,0 +1,240 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ /// Defines the Float8_e4m3fn type (8-bit floating-point) including conversions
4
+ /// to standard C types and basic arithmetic operations. Note that arithmetic
5
+ /// operations are implemented by converting to floating point and
6
+ /// performing the operation in float32.
7
+ /// Binary configuration:
8
+ /// s eeee mmm
9
+ /// 1 sign bit
10
+ /// 4 exponent bits
11
+ /// 3 mantissa bits
12
+ /// bias = 7
13
+ ///
14
+ /// Implementation based on the paper https://arxiv.org/pdf/2209.05433.pdf
15
+ /// and inspired by Half implementation from pytorch/c10/util/Half.h
16
+
17
+ #include <c10/macros/Macros.h>
18
+ #include <c10/util/floating_point_utils.h>
19
+
20
+ #if defined(__cplusplus)
21
+ #include <cmath>
22
+ #include <cstdint>
23
+ #elif !defined(__OPENCL_VERSION__)
24
+ #include <math.h>
25
+ #include <stdint.h>
26
+ #endif
27
+
28
+ #ifdef _MSC_VER
29
+ #include <intrin.h>
30
+ #endif
31
+
32
+ #include <climits>
33
+ #include <iostream>
34
+
35
+ namespace c10 {
36
+
37
+ namespace detail {
38
+
39
+ /*
40
+ * Convert a 8-bit floating-point number in fp8 E4M3FN format, in bit
41
+ * representation, to a 32-bit floating-point number in IEEE single-precision
42
+ * format, in bit representation.
43
+ *
44
+ * @note The implementation doesn't use any floating-point operations.
45
+ */
46
+ inline C10_HOST_DEVICE float fp8e4m3fn_to_fp32_value(uint8_t input) {
47
+ /*
48
+ * Extend the fp8 E4M3FN number to 32 bits and shift to the
49
+ * upper part of the 32-bit word:
50
+ * +---+----+---+-----------------------------+
51
+ * | S |EEEE|MMM|0000 0000 0000 0000 0000 0000|
52
+ * +---+----+---+-----------------------------+
53
+ * Bits 31 27-30 24-26 0-23
54
+ *
55
+ * S - sign bit, E - bits of the biased exponent, M - bits of the mantissa, 0
56
+ * - zero bits.
57
+ */
58
+ const uint32_t w = (uint32_t)input << 24;
59
+ /*
60
+ * Extract the sign of the input number into the high bit of the 32-bit word:
61
+ *
62
+ * +---+----------------------------------+
63
+ * | S |0000000 00000000 00000000 00000000|
64
+ * +---+----------------------------------+
65
+ * Bits 31 0-31
66
+ */
67
+ const uint32_t sign = w & UINT32_C(0x80000000);
68
+ /*
69
+ * Extract mantissa and biased exponent of the input number into the bits 0-30
70
+ * of the 32-bit word:
71
+ *
72
+ * +---+----+---+-----------------------------+
73
+ * | S |EEEE|MMM|0000 0000 0000 0000 0000 0000|
74
+ * +---+----+---+-----------------------------+
75
+ * Bits 31 27-30 24-26 0-23
76
+ */
77
+ const uint32_t nonsign = w & UINT32_C(0x7FFFFFFF);
78
+ /*
79
+ * Renorm shift is the number of bits to shift mantissa left to make the
80
+ * half-precision number normalized. If the initial number is normalized, some
81
+ * of its high 5 bits (sign == 0 and 4-bit exponent) equals one. In this case
82
+ * renorm_shift == 0. If the number is denormalize, renorm_shift > 0. Note
83
+ * that if we shift denormalized nonsign by renorm_shift, the unit bit of
84
+ * mantissa will shift into exponent, turning the biased exponent into 1, and
85
+ * making mantissa normalized (i.e. without leading 1).
86
+ */
87
+ #if defined(__CUDA_ARCH__) || defined(__HIP_DEVICE_COMPILE__)
88
+ uint32_t renorm_shift = __clz(nonsign);
89
+ #elif defined(__SYCL_DEVICE_ONLY__)
90
+ // Note: zero is not a supported input into `__builtin_clz`
91
+ uint32_t renorm_shift =
92
+ nonsign != 0 ? __builtin_clz(nonsign) : sizeof(uint32_t) * CHAR_BIT;
93
+ #elif defined(_MSC_VER)
94
+ unsigned long nonsign_bsr;
95
+ _BitScanReverse(&nonsign_bsr, (unsigned long)nonsign);
96
+ uint32_t renorm_shift = (uint32_t)nonsign_bsr ^ 31;
97
+ #else
98
+ // Note: zero is not a supported input into `__builtin_clz`
99
+ uint32_t renorm_shift =
100
+ nonsign != 0 ? __builtin_clz(nonsign) : sizeof(uint32_t) * CHAR_BIT;
101
+ #endif
102
+ renorm_shift = renorm_shift > 4 ? renorm_shift - 4 : 0;
103
+ /*
104
+ * Iff fp8e4m3fn number has all exponent and mantissa bits set to 1,
105
+ * the addition overflows it into bit 31, and the subsequent shift turns the
106
+ * high 9 bits into 1. Thus inf_nan_mask == 0x7F800000 if the fp8e4m3fn number
107
+ * is Nan, 0x00000000 otherwise
108
+ */
109
+ const int32_t inf_nan_mask =
110
+ ((int32_t)(nonsign + 0x01000000) >> 8) & INT32_C(0x7F800000);
111
+ /*
112
+ * Iff nonsign is 0, it overflows into 0xFFFFFFFF, turning bit 31
113
+ * into 1. Otherwise, bit 31 remains 0. The signed shift right by 31
114
+ * broadcasts bit 31 into all bits of the zero_mask. Thus zero_mask ==
115
+ * 0xFFFFFFFF if the half-precision number was zero (+0.0h or -0.0h)
116
+ * 0x00000000 otherwise
117
+ */
118
+ const int32_t zero_mask = (int32_t)(nonsign - 1) >> 31;
119
+ /*
120
+ * 1. Shift nonsign left by renorm_shift to normalize it (if the input
121
+ * was denormal)
122
+ * 2. Shift nonsign right by 4 so the exponent (4 bits originally)
123
+ * becomes an 8-bit field and 3-bit mantissa shifts into the 3 high
124
+ * bits of the 23-bit mantissa of IEEE single-precision number.
125
+ * 3. Add 0x78 to the exponent (starting at bit 23) to compensate the
126
+ * different in exponent bias (0x7F for single-precision number less 0x07
127
+ * for fp8e4m3fn number).
128
+ * 4. Subtract renorm_shift from the exponent (starting at bit 23) to
129
+ * account for renormalization. As renorm_shift is less than 0x78, this
130
+ * can be combined with step 3.
131
+ * 5. Binary OR with inf_nan_mask to turn the exponent into 0xFF if the
132
+ * input was NaN or infinity.
133
+ * 6. Binary ANDNOT with zero_mask to turn the mantissa and exponent
134
+ * into zero if the input was zero.
135
+ * 7. Combine with the sign of the input number.
136
+ */
137
+ uint32_t result = sign |
138
+ ((((nonsign << renorm_shift >> 4) + ((0x78 - renorm_shift) << 23)) |
139
+ inf_nan_mask) &
140
+ ~zero_mask);
141
+ return fp32_from_bits(result);
142
+ }
143
+
144
+ /*
145
+ * Convert a 32-bit floating-point number in IEEE single-precision format to a
146
+ * 8-bit floating-point number in fp8 E4M3FN format, in bit representation.
147
+ */
148
+ inline C10_HOST_DEVICE uint8_t fp8e4m3fn_from_fp32_value(float f) {
149
+ /*
150
+ * Binary representation of 480.0f, which is the first value
151
+ * not representable in fp8e4m3fn range:
152
+ * 0 1111 111 - fp8e4m3fn
153
+ * 0 10000111 11100000000000000000000 - fp32
154
+ */
155
+ constexpr uint32_t fp8_max = UINT32_C(1087) << 20;
156
+
157
+ /*
158
+ * A mask for converting fp32 numbers lower than fp8e4m3fn normal range
159
+ * into denorm representation
160
+ * magic number: ((127 - 7) + (23 - 3) + 1)
161
+ */
162
+ constexpr uint32_t denorm_mask = UINT32_C(141) << 23;
163
+
164
+ uint32_t f_bits = fp32_to_bits(f);
165
+
166
+ uint8_t result = 0u;
167
+
168
+ /*
169
+ * Extract the sign of the input number into the high bit of the 32-bit word:
170
+ *
171
+ * +---+----------------------------------+
172
+ * | S |0000000 00000000 00000000 00000000|
173
+ * +---+----------------------------------+
174
+ * Bits 31 0-31
175
+ */
176
+ const uint32_t sign = f_bits & UINT32_C(0x80000000);
177
+
178
+ /*
179
+ * Set sign bit to 0
180
+ */
181
+ f_bits ^= sign;
182
+
183
+ if (f_bits >= fp8_max) {
184
+ // NaN - all exponent and mantissa bits set to 1
185
+ result = 0x7f;
186
+ } else {
187
+ if (f_bits < (UINT32_C(121) << 23)) {
188
+ // Input number is smaller than 2^(-6), which is the smallest
189
+ // fp8e4m3fn normal number
190
+ f_bits =
191
+ fp32_to_bits(fp32_from_bits(f_bits) + fp32_from_bits(denorm_mask));
192
+ result = static_cast<uint8_t>(f_bits - denorm_mask);
193
+ } else {
194
+ // resulting mantissa is odd
195
+ uint8_t mant_odd = (f_bits >> 20) & 1;
196
+
197
+ // update exponent, rounding bias part 1
198
+ f_bits += ((uint32_t)(7 - 127) << 23) + 0x7FFFF;
199
+
200
+ // rounding bias part 2
201
+ f_bits += mant_odd;
202
+
203
+ // take the bits!
204
+ result = static_cast<uint8_t>(f_bits >> 20);
205
+ }
206
+ }
207
+
208
+ result |= static_cast<uint8_t>(sign >> 24);
209
+ return result;
210
+ }
211
+
212
+ } // namespace detail
213
+
214
+ struct alignas(1) Float8_e4m3fn {
215
+ uint8_t x;
216
+
217
+ struct from_bits_t {};
218
+ C10_HOST_DEVICE static constexpr from_bits_t from_bits() {
219
+ return from_bits_t();
220
+ }
221
+
222
+ Float8_e4m3fn() = default;
223
+
224
+ constexpr C10_HOST_DEVICE Float8_e4m3fn(uint8_t bits, from_bits_t)
225
+ : x(bits) {}
226
+ inline C10_HOST_DEVICE Float8_e4m3fn(float value);
227
+ inline C10_HOST_DEVICE operator float() const;
228
+ inline C10_HOST_DEVICE bool isnan() const;
229
+ };
230
+
231
+ C10_API inline std::ostream& operator<<(
232
+ std::ostream& out,
233
+ const Float8_e4m3fn& value) {
234
+ out << (float)value;
235
+ return out;
236
+ }
237
+
238
+ } // namespace c10
239
+
240
+ #include <c10/util/Float8_e4m3fn-inl.h> // IWYU pragma: keep
videochat2/lib/python3.10/site-packages/torch/include/c10/util/Float8_e4m3fnuz.h ADDED
@@ -0,0 +1,139 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ /// Defines the Float8_e4m3fnuz type (8-bit floating-point) including
4
+ /// conversions to standard C types and basic arithmetic operations. Note that
5
+ /// arithmetic operations are implemented by converting to floating point and
6
+ /// performing the operation in float32.
7
+ /// Binary configuration remains the same as Float8_e4m3fn:
8
+ /// s eeee mmm
9
+ /// 1 sign bit
10
+ /// 4 exponent bits
11
+ /// 3 mantissa bits
12
+ /// The key differences versus Float8_e4m3fn are:
13
+ /// bias = 8
14
+ /// no infinities or negative zero
15
+ /// NaN only when sign bit is 1, rest all 0s
16
+ ///
17
+ /// Implementation based on the paper https://arxiv.org/pdf/2206.02915.pdf and
18
+ /// the existing Float8_e4m3fn implementation.
19
+
20
+ #include <c10/macros/Export.h>
21
+ #include <c10/macros/Macros.h>
22
+ #include <c10/util/floating_point_utils.h>
23
+ #include <type_traits>
24
+
25
+ #if defined(__cplusplus)
26
+ #include <cstdint>
27
+ #elif !defined(__OPENCL_VERSION__)
28
+ #include <math.h>
29
+ #include <stdint.h>
30
+ #endif
31
+
32
+ #include <iosfwd>
33
+ #include <ostream>
34
+
35
+ namespace c10 {
36
+
37
+ namespace detail {
38
+
39
+ /*
40
+ * Convert a 32-bit floating-point number in IEEE single-precision format to a
41
+ * 8-bit floating-point number in fp8 E4M3FNUZ format, in bit representation.
42
+ */
43
+ inline C10_HOST_DEVICE uint8_t fp8e4m3fnuz_from_fp32_value(float f) {
44
+ /*
45
+ * Binary representation of 256.0f, which is the first value not representable
46
+ * (i.e. the first value which would overflow in to the sign bit, resulting in
47
+ * a NaN) in fp8e4m3fnuz range:
48
+ * 1 0000 000 - fp8e4m3fnuz
49
+ * 0 10000111 00000000000000000000000 - fp32
50
+ */
51
+ constexpr uint32_t fnuz_max = UINT32_C(0x87) << 23;
52
+
53
+ /*
54
+ * A mask for converting fp32 numbers lower than fp8e4m3fnuz normal range
55
+ * into denorm representation
56
+ * magic number: ((127 - 8) + (23 - 3) + 1)
57
+ */
58
+ constexpr uint32_t denorm_mask = UINT32_C(0x8C) << 23;
59
+
60
+ uint32_t f_bits = fp32_to_bits(f);
61
+
62
+ uint32_t result = 0u;
63
+
64
+ /*
65
+ * Extract the sign of the input number into the high bit of the 32-bit word:
66
+ *
67
+ * +---+----------------------------------+
68
+ * | S |0000000 00000000 00000000 00000000|
69
+ * +---+----------------------------------+
70
+ * Bits 31 0-31
71
+ */
72
+ const uint32_t sign = f_bits & UINT32_C(0x80000000);
73
+
74
+ /*
75
+ * Set sign bit to 0
76
+ */
77
+ f_bits ^= sign;
78
+
79
+ if (f_bits >= fnuz_max) {
80
+ // NaN -- sign bit set to 1, rest 0s.
81
+ return 0x80;
82
+ }
83
+
84
+ if (f_bits < (UINT32_C(0x78) << 23) /* 2^-7 in float32 */) {
85
+ // Input exponent is less than -7, the smallest e4m3fnuz exponent, so the
86
+ // number will become subnormal.
87
+ f_bits = fp32_to_bits(fp32_from_bits(f_bits) + fp32_from_bits(denorm_mask));
88
+ result = static_cast<uint8_t>(f_bits - denorm_mask);
89
+ if (result == 0) {
90
+ // fnuz types don't have negative zero.
91
+ return 0;
92
+ }
93
+ } else {
94
+ // resulting mantissa is odd
95
+ uint8_t mant_odd = (f_bits >> 20) & 1;
96
+
97
+ // update exponent, rounding bias part 1
98
+ f_bits += ((uint32_t)(8 - 127) << 23) + 0x7FFFF;
99
+
100
+ // rounding bias part 2
101
+ f_bits += mant_odd;
102
+
103
+ // take the bits!
104
+ result = static_cast<uint8_t>(f_bits >> 20);
105
+ }
106
+
107
+ result |= sign >> 24;
108
+ return result;
109
+ }
110
+
111
+ } // namespace detail
112
+
113
+ struct alignas(1) Float8_e4m3fnuz {
114
+ uint8_t x;
115
+
116
+ struct from_bits_t {};
117
+ C10_HOST_DEVICE static constexpr from_bits_t from_bits() {
118
+ return from_bits_t();
119
+ }
120
+
121
+ Float8_e4m3fnuz() = default;
122
+
123
+ constexpr C10_HOST_DEVICE Float8_e4m3fnuz(uint8_t bits, from_bits_t)
124
+ : x(bits) {}
125
+ inline C10_HOST_DEVICE Float8_e4m3fnuz(float value);
126
+ inline C10_HOST_DEVICE operator float() const;
127
+ inline C10_HOST_DEVICE bool isnan() const;
128
+ };
129
+
130
+ C10_API inline std::ostream& operator<<(
131
+ std::ostream& out,
132
+ const Float8_e4m3fnuz& value) {
133
+ out << (float)value;
134
+ return out;
135
+ }
136
+
137
+ } // namespace c10
138
+
139
+ #include <c10/util/Float8_e4m3fnuz-inl.h> // IWYU pragma: keep
videochat2/lib/python3.10/site-packages/torch/include/c10/util/Float8_e5m2-inl.h ADDED
@@ -0,0 +1,286 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/macros/Macros.h>
4
+ #include <cstring>
5
+ #include <limits>
6
+
7
+ C10_CLANG_DIAGNOSTIC_PUSH()
8
+ #if C10_CLANG_HAS_WARNING("-Wimplicit-int-float-conversion")
9
+ C10_CLANG_DIAGNOSTIC_IGNORE("-Wimplicit-int-float-conversion")
10
+ #endif
11
+
12
+ #define EXP_WIDTH_FP8 5
13
+ #define MAN_WIDTH_FP8 2
14
+ #define EXP_BIAS_FP8 15
15
+
16
+ namespace c10 {
17
+
18
+ /// Constructors
19
+
20
+ inline C10_HOST_DEVICE Float8_e5m2::Float8_e5m2(float value)
21
+ : x(detail::fp8e5m2_from_fp32_value(value)) {}
22
+
23
+ /// Implicit conversions
24
+
25
+ inline C10_HOST_DEVICE Float8_e5m2::operator float() const {
26
+ return detail::fp8e5m2_to_fp32_value(x);
27
+ }
28
+
29
+ /// Special values helpers
30
+
31
+ inline C10_HOST_DEVICE bool Float8_e5m2::isnan() const {
32
+ return (x & 0b01111111) > 0b01111100;
33
+ }
34
+
35
+ inline C10_HOST_DEVICE bool Float8_e5m2::isinf() const {
36
+ return (x & 0b01111111) == 0b01111100;
37
+ }
38
+
39
+ /// Arithmetic
40
+
41
+ inline C10_HOST_DEVICE Float8_e5m2
42
+ operator+(const Float8_e5m2& a, const Float8_e5m2& b) {
43
+ return static_cast<float>(a) + static_cast<float>(b);
44
+ }
45
+
46
+ inline C10_HOST_DEVICE Float8_e5m2
47
+ operator-(const Float8_e5m2& a, const Float8_e5m2& b) {
48
+ return static_cast<float>(a) - static_cast<float>(b);
49
+ }
50
+
51
+ inline C10_HOST_DEVICE Float8_e5m2
52
+ operator*(const Float8_e5m2& a, const Float8_e5m2& b) {
53
+ return static_cast<float>(a) * static_cast<float>(b);
54
+ }
55
+
56
+ inline C10_HOST_DEVICE Float8_e5m2 operator/(
57
+ const Float8_e5m2& a,
58
+ const Float8_e5m2& b) __ubsan_ignore_float_divide_by_zero__ {
59
+ return static_cast<float>(a) / static_cast<float>(b);
60
+ }
61
+
62
+ inline C10_HOST_DEVICE Float8_e5m2 operator-(const Float8_e5m2& a) {
63
+ return -static_cast<float>(a);
64
+ }
65
+
66
+ inline C10_HOST_DEVICE Float8_e5m2& operator+=(
67
+ Float8_e5m2& a,
68
+ const Float8_e5m2& b) {
69
+ a = a + b;
70
+ return a;
71
+ }
72
+
73
+ inline C10_HOST_DEVICE Float8_e5m2& operator-=(
74
+ Float8_e5m2& a,
75
+ const Float8_e5m2& b) {
76
+ a = a - b;
77
+ return a;
78
+ }
79
+
80
+ inline C10_HOST_DEVICE Float8_e5m2& operator*=(
81
+ Float8_e5m2& a,
82
+ const Float8_e5m2& b) {
83
+ a = a * b;
84
+ return a;
85
+ }
86
+
87
+ inline C10_HOST_DEVICE Float8_e5m2& operator/=(
88
+ Float8_e5m2& a,
89
+ const Float8_e5m2& b) {
90
+ a = a / b;
91
+ return a;
92
+ }
93
+
94
+ /// Arithmetic with floats
95
+
96
+ inline C10_HOST_DEVICE float operator+(Float8_e5m2 a, float b) {
97
+ return static_cast<float>(a) + b;
98
+ }
99
+ inline C10_HOST_DEVICE float operator-(Float8_e5m2 a, float b) {
100
+ return static_cast<float>(a) - b;
101
+ }
102
+ inline C10_HOST_DEVICE float operator*(Float8_e5m2 a, float b) {
103
+ return static_cast<float>(a) * b;
104
+ }
105
+ inline C10_HOST_DEVICE float operator/(Float8_e5m2 a, float b)
106
+ __ubsan_ignore_float_divide_by_zero__ {
107
+ return static_cast<float>(a) / b;
108
+ }
109
+
110
+ inline C10_HOST_DEVICE float operator+(float a, Float8_e5m2 b) {
111
+ return a + static_cast<float>(b);
112
+ }
113
+ inline C10_HOST_DEVICE float operator-(float a, Float8_e5m2 b) {
114
+ return a - static_cast<float>(b);
115
+ }
116
+ inline C10_HOST_DEVICE float operator*(float a, Float8_e5m2 b) {
117
+ return a * static_cast<float>(b);
118
+ }
119
+ inline C10_HOST_DEVICE float operator/(float a, Float8_e5m2 b)
120
+ __ubsan_ignore_float_divide_by_zero__ {
121
+ return a / static_cast<float>(b);
122
+ }
123
+
124
+ inline C10_HOST_DEVICE float& operator+=(float& a, const Float8_e5m2& b) {
125
+ return a += static_cast<float>(b);
126
+ }
127
+ inline C10_HOST_DEVICE float& operator-=(float& a, const Float8_e5m2& b) {
128
+ return a -= static_cast<float>(b);
129
+ }
130
+ inline C10_HOST_DEVICE float& operator*=(float& a, const Float8_e5m2& b) {
131
+ return a *= static_cast<float>(b);
132
+ }
133
+ inline C10_HOST_DEVICE float& operator/=(float& a, const Float8_e5m2& b) {
134
+ return a /= static_cast<float>(b);
135
+ }
136
+
137
+ /// Arithmetic with doubles
138
+
139
+ inline C10_HOST_DEVICE double operator+(Float8_e5m2 a, double b) {
140
+ return static_cast<double>(a) + b;
141
+ }
142
+ inline C10_HOST_DEVICE double operator-(Float8_e5m2 a, double b) {
143
+ return static_cast<double>(a) - b;
144
+ }
145
+ inline C10_HOST_DEVICE double operator*(Float8_e5m2 a, double b) {
146
+ return static_cast<double>(a) * b;
147
+ }
148
+ inline C10_HOST_DEVICE double operator/(Float8_e5m2 a, double b)
149
+ __ubsan_ignore_float_divide_by_zero__ {
150
+ return static_cast<double>(a) / b;
151
+ }
152
+
153
+ inline C10_HOST_DEVICE double operator+(double a, Float8_e5m2 b) {
154
+ return a + static_cast<double>(b);
155
+ }
156
+ inline C10_HOST_DEVICE double operator-(double a, Float8_e5m2 b) {
157
+ return a - static_cast<double>(b);
158
+ }
159
+ inline C10_HOST_DEVICE double operator*(double a, Float8_e5m2 b) {
160
+ return a * static_cast<double>(b);
161
+ }
162
+ inline C10_HOST_DEVICE double operator/(double a, Float8_e5m2 b)
163
+ __ubsan_ignore_float_divide_by_zero__ {
164
+ return a / static_cast<double>(b);
165
+ }
166
+
167
+ /// Arithmetic with ints
168
+
169
+ inline C10_HOST_DEVICE Float8_e5m2 operator+(Float8_e5m2 a, int b) {
170
+ return a + static_cast<Float8_e5m2>(b);
171
+ }
172
+ inline C10_HOST_DEVICE Float8_e5m2 operator-(Float8_e5m2 a, int b) {
173
+ return a - static_cast<Float8_e5m2>(b);
174
+ }
175
+ inline C10_HOST_DEVICE Float8_e5m2 operator*(Float8_e5m2 a, int b) {
176
+ return a * static_cast<Float8_e5m2>(b);
177
+ }
178
+ inline C10_HOST_DEVICE Float8_e5m2 operator/(Float8_e5m2 a, int b) {
179
+ return a / static_cast<Float8_e5m2>(b);
180
+ }
181
+
182
+ inline C10_HOST_DEVICE Float8_e5m2 operator+(int a, Float8_e5m2 b) {
183
+ return static_cast<Float8_e5m2>(a) + b;
184
+ }
185
+ inline C10_HOST_DEVICE Float8_e5m2 operator-(int a, Float8_e5m2 b) {
186
+ return static_cast<Float8_e5m2>(a) - b;
187
+ }
188
+ inline C10_HOST_DEVICE Float8_e5m2 operator*(int a, Float8_e5m2 b) {
189
+ return static_cast<Float8_e5m2>(a) * b;
190
+ }
191
+ inline C10_HOST_DEVICE Float8_e5m2 operator/(int a, Float8_e5m2 b) {
192
+ return static_cast<Float8_e5m2>(a) / b;
193
+ }
194
+
195
+ //// Arithmetic with int64_t
196
+
197
+ inline C10_HOST_DEVICE Float8_e5m2 operator+(Float8_e5m2 a, int64_t b) {
198
+ return a + static_cast<Float8_e5m2>(b);
199
+ }
200
+ inline C10_HOST_DEVICE Float8_e5m2 operator-(Float8_e5m2 a, int64_t b) {
201
+ return a - static_cast<Float8_e5m2>(b);
202
+ }
203
+ inline C10_HOST_DEVICE Float8_e5m2 operator*(Float8_e5m2 a, int64_t b) {
204
+ return a * static_cast<Float8_e5m2>(b);
205
+ }
206
+ inline C10_HOST_DEVICE Float8_e5m2 operator/(Float8_e5m2 a, int64_t b) {
207
+ return a / static_cast<Float8_e5m2>(b);
208
+ }
209
+
210
+ inline C10_HOST_DEVICE Float8_e5m2 operator+(int64_t a, Float8_e5m2 b) {
211
+ return static_cast<Float8_e5m2>(a) + b;
212
+ }
213
+ inline C10_HOST_DEVICE Float8_e5m2 operator-(int64_t a, Float8_e5m2 b) {
214
+ return static_cast<Float8_e5m2>(a) - b;
215
+ }
216
+ inline C10_HOST_DEVICE Float8_e5m2 operator*(int64_t a, Float8_e5m2 b) {
217
+ return static_cast<Float8_e5m2>(a) * b;
218
+ }
219
+ inline C10_HOST_DEVICE Float8_e5m2 operator/(int64_t a, Float8_e5m2 b) {
220
+ return static_cast<Float8_e5m2>(a) / b;
221
+ }
222
+
223
+ /// NOTE: we do not define comparisons directly and instead rely on the implicit
224
+ /// conversion from c10::Float8_e5m2 to float.
225
+
226
+ } // namespace c10
227
+
228
+ namespace std {
229
+
230
+ template <>
231
+ class numeric_limits<c10::Float8_e5m2> {
232
+ public:
233
+ static constexpr bool is_signed = true;
234
+ static constexpr bool is_integer = false;
235
+ static constexpr bool is_specialized = true;
236
+ static constexpr bool is_exact = false;
237
+ static constexpr bool has_infinity = true;
238
+ static constexpr bool has_quiet_NaN = true;
239
+ static constexpr bool has_signaling_NaN = false;
240
+ static constexpr auto has_denorm = true;
241
+ static constexpr auto has_denorm_loss = true;
242
+ static constexpr auto round_style = numeric_limits<float>::round_style;
243
+ static constexpr bool is_iec559 = false;
244
+ static constexpr bool is_bounded = true;
245
+ static constexpr bool is_modulo = false;
246
+ static constexpr int digits = 3;
247
+ static constexpr int digits10 = 0;
248
+ static constexpr int max_digits10 = 2;
249
+ static constexpr int radix = 2;
250
+ static constexpr int min_exponent = -13;
251
+ static constexpr int min_exponent10 = -4;
252
+ static constexpr int max_exponent = 16;
253
+ static constexpr int max_exponent10 = 4;
254
+ static constexpr auto traps = numeric_limits<float>::traps;
255
+ static constexpr auto tinyness_before =
256
+ numeric_limits<float>::tinyness_before;
257
+
258
+ static constexpr c10::Float8_e5m2 min() {
259
+ return c10::Float8_e5m2(0x4, c10::Float8_e5m2::from_bits());
260
+ }
261
+ static constexpr c10::Float8_e5m2 max() {
262
+ return c10::Float8_e5m2(0x7B, c10::Float8_e5m2::from_bits());
263
+ }
264
+ static constexpr c10::Float8_e5m2 lowest() {
265
+ return c10::Float8_e5m2(0xFB, c10::Float8_e5m2::from_bits());
266
+ }
267
+ static constexpr c10::Float8_e5m2 epsilon() {
268
+ return c10::Float8_e5m2(0x34, c10::Float8_e5m2::from_bits());
269
+ }
270
+ static constexpr c10::Float8_e5m2 round_error() {
271
+ return c10::Float8_e5m2(0x38, c10::Float8_e5m2::from_bits());
272
+ }
273
+ static constexpr c10::Float8_e5m2 infinity() {
274
+ return c10::Float8_e5m2(0x7C, c10::Float8_e5m2::from_bits());
275
+ }
276
+ static constexpr c10::Float8_e5m2 quiet_NaN() {
277
+ return c10::Float8_e5m2(0x7F, c10::Float8_e5m2::from_bits());
278
+ }
279
+ static constexpr c10::Float8_e5m2 denorm_min() {
280
+ return c10::Float8_e5m2(0x01, c10::Float8_e5m2::from_bits());
281
+ }
282
+ };
283
+
284
+ } // namespace std
285
+
286
+ C10_CLANG_DIAGNOSTIC_POP()
videochat2/lib/python3.10/site-packages/torch/include/c10/util/Float8_e5m2fnuz-inl.h ADDED
@@ -0,0 +1,285 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/macros/Macros.h>
4
+ #include <c10/util/Float8_fnuz_cvt.h>
5
+ #include <cstring>
6
+ #include <limits>
7
+
8
+ C10_CLANG_DIAGNOSTIC_PUSH()
9
+ #if C10_CLANG_HAS_WARNING("-Wimplicit-int-float-conversion")
10
+ C10_CLANG_DIAGNOSTIC_IGNORE("-Wimplicit-int-float-conversion")
11
+ #endif
12
+
13
+ namespace c10 {
14
+
15
+ /// Constructors
16
+
17
+ inline C10_HOST_DEVICE Float8_e5m2fnuz::Float8_e5m2fnuz(float value)
18
+ : x(detail::fp8e5m2fnuz_from_fp32_value(value)) {}
19
+
20
+ /// Implicit conversions
21
+
22
+ inline C10_HOST_DEVICE Float8_e5m2fnuz::operator float() const {
23
+ return detail::fp8_fnuz_to_fp32_value<5, 2>(x);
24
+ }
25
+
26
+ /// Special values helpers
27
+
28
+ inline C10_HOST_DEVICE bool Float8_e5m2fnuz::isnan() const {
29
+ return x == 0b10000000;
30
+ }
31
+
32
+ inline C10_HOST_DEVICE bool Float8_e5m2fnuz::isinf() const {
33
+ return false;
34
+ }
35
+
36
+ /// Arithmetic
37
+
38
+ inline C10_HOST_DEVICE Float8_e5m2fnuz
39
+ operator+(const Float8_e5m2fnuz& a, const Float8_e5m2fnuz& b) {
40
+ return static_cast<float>(a) + static_cast<float>(b);
41
+ }
42
+
43
+ inline C10_HOST_DEVICE Float8_e5m2fnuz
44
+ operator-(const Float8_e5m2fnuz& a, const Float8_e5m2fnuz& b) {
45
+ return static_cast<float>(a) - static_cast<float>(b);
46
+ }
47
+
48
+ inline C10_HOST_DEVICE Float8_e5m2fnuz
49
+ operator*(const Float8_e5m2fnuz& a, const Float8_e5m2fnuz& b) {
50
+ return static_cast<float>(a) * static_cast<float>(b);
51
+ }
52
+
53
+ inline C10_HOST_DEVICE Float8_e5m2fnuz operator/(
54
+ const Float8_e5m2fnuz& a,
55
+ const Float8_e5m2fnuz& b) __ubsan_ignore_float_divide_by_zero__ {
56
+ return static_cast<float>(a) / static_cast<float>(b);
57
+ }
58
+
59
+ inline C10_HOST_DEVICE Float8_e5m2fnuz operator-(const Float8_e5m2fnuz& a) {
60
+ return -static_cast<float>(a);
61
+ }
62
+
63
+ inline C10_HOST_DEVICE Float8_e5m2fnuz& operator+=(
64
+ Float8_e5m2fnuz& a,
65
+ const Float8_e5m2fnuz& b) {
66
+ a = a + b;
67
+ return a;
68
+ }
69
+
70
+ inline C10_HOST_DEVICE Float8_e5m2fnuz& operator-=(
71
+ Float8_e5m2fnuz& a,
72
+ const Float8_e5m2fnuz& b) {
73
+ a = a - b;
74
+ return a;
75
+ }
76
+
77
+ inline C10_HOST_DEVICE Float8_e5m2fnuz& operator*=(
78
+ Float8_e5m2fnuz& a,
79
+ const Float8_e5m2fnuz& b) {
80
+ a = a * b;
81
+ return a;
82
+ }
83
+
84
+ inline C10_HOST_DEVICE Float8_e5m2fnuz& operator/=(
85
+ Float8_e5m2fnuz& a,
86
+ const Float8_e5m2fnuz& b) {
87
+ a = a / b;
88
+ return a;
89
+ }
90
+
91
+ /// Arithmetic with floats
92
+
93
+ inline C10_HOST_DEVICE float operator+(Float8_e5m2fnuz a, float b) {
94
+ return static_cast<float>(a) + b;
95
+ }
96
+ inline C10_HOST_DEVICE float operator-(Float8_e5m2fnuz a, float b) {
97
+ return static_cast<float>(a) - b;
98
+ }
99
+ inline C10_HOST_DEVICE float operator*(Float8_e5m2fnuz a, float b) {
100
+ return static_cast<float>(a) * b;
101
+ }
102
+ inline C10_HOST_DEVICE float operator/(Float8_e5m2fnuz a, float b)
103
+ __ubsan_ignore_float_divide_by_zero__ {
104
+ return static_cast<float>(a) / b;
105
+ }
106
+
107
+ inline C10_HOST_DEVICE float operator+(float a, Float8_e5m2fnuz b) {
108
+ return a + static_cast<float>(b);
109
+ }
110
+ inline C10_HOST_DEVICE float operator-(float a, Float8_e5m2fnuz b) {
111
+ return a - static_cast<float>(b);
112
+ }
113
+ inline C10_HOST_DEVICE float operator*(float a, Float8_e5m2fnuz b) {
114
+ return a * static_cast<float>(b);
115
+ }
116
+ inline C10_HOST_DEVICE float operator/(float a, Float8_e5m2fnuz b)
117
+ __ubsan_ignore_float_divide_by_zero__ {
118
+ return a / static_cast<float>(b);
119
+ }
120
+
121
+ inline C10_HOST_DEVICE float& operator+=(float& a, const Float8_e5m2fnuz& b) {
122
+ return a += static_cast<float>(b);
123
+ }
124
+ inline C10_HOST_DEVICE float& operator-=(float& a, const Float8_e5m2fnuz& b) {
125
+ return a -= static_cast<float>(b);
126
+ }
127
+ inline C10_HOST_DEVICE float& operator*=(float& a, const Float8_e5m2fnuz& b) {
128
+ return a *= static_cast<float>(b);
129
+ }
130
+ inline C10_HOST_DEVICE float& operator/=(float& a, const Float8_e5m2fnuz& b) {
131
+ return a /= static_cast<float>(b);
132
+ }
133
+
134
+ /// Arithmetic with doubles
135
+
136
+ inline C10_HOST_DEVICE double operator+(Float8_e5m2fnuz a, double b) {
137
+ return static_cast<double>(a) + b;
138
+ }
139
+ inline C10_HOST_DEVICE double operator-(Float8_e5m2fnuz a, double b) {
140
+ return static_cast<double>(a) - b;
141
+ }
142
+ inline C10_HOST_DEVICE double operator*(Float8_e5m2fnuz a, double b) {
143
+ return static_cast<double>(a) * b;
144
+ }
145
+ inline C10_HOST_DEVICE double operator/(Float8_e5m2fnuz a, double b)
146
+ __ubsan_ignore_float_divide_by_zero__ {
147
+ return static_cast<double>(a) / b;
148
+ }
149
+
150
+ inline C10_HOST_DEVICE double operator+(double a, Float8_e5m2fnuz b) {
151
+ return a + static_cast<double>(b);
152
+ }
153
+ inline C10_HOST_DEVICE double operator-(double a, Float8_e5m2fnuz b) {
154
+ return a - static_cast<double>(b);
155
+ }
156
+ inline C10_HOST_DEVICE double operator*(double a, Float8_e5m2fnuz b) {
157
+ return a * static_cast<double>(b);
158
+ }
159
+ inline C10_HOST_DEVICE double operator/(double a, Float8_e5m2fnuz b)
160
+ __ubsan_ignore_float_divide_by_zero__ {
161
+ return a / static_cast<double>(b);
162
+ }
163
+
164
+ /// Arithmetic with ints
165
+
166
+ inline C10_HOST_DEVICE Float8_e5m2fnuz operator+(Float8_e5m2fnuz a, int b) {
167
+ return a + static_cast<Float8_e5m2fnuz>(b);
168
+ }
169
+ inline C10_HOST_DEVICE Float8_e5m2fnuz operator-(Float8_e5m2fnuz a, int b) {
170
+ return a - static_cast<Float8_e5m2fnuz>(b);
171
+ }
172
+ inline C10_HOST_DEVICE Float8_e5m2fnuz operator*(Float8_e5m2fnuz a, int b) {
173
+ return a * static_cast<Float8_e5m2fnuz>(b);
174
+ }
175
+ inline C10_HOST_DEVICE Float8_e5m2fnuz operator/(Float8_e5m2fnuz a, int b) {
176
+ return a / static_cast<Float8_e5m2fnuz>(b);
177
+ }
178
+
179
+ inline C10_HOST_DEVICE Float8_e5m2fnuz operator+(int a, Float8_e5m2fnuz b) {
180
+ return static_cast<Float8_e5m2fnuz>(a) + b;
181
+ }
182
+ inline C10_HOST_DEVICE Float8_e5m2fnuz operator-(int a, Float8_e5m2fnuz b) {
183
+ return static_cast<Float8_e5m2fnuz>(a) - b;
184
+ }
185
+ inline C10_HOST_DEVICE Float8_e5m2fnuz operator*(int a, Float8_e5m2fnuz b) {
186
+ return static_cast<Float8_e5m2fnuz>(a) * b;
187
+ }
188
+ inline C10_HOST_DEVICE Float8_e5m2fnuz operator/(int a, Float8_e5m2fnuz b) {
189
+ return static_cast<Float8_e5m2fnuz>(a) / b;
190
+ }
191
+
192
+ //// Arithmetic with int64_t
193
+
194
+ inline C10_HOST_DEVICE Float8_e5m2fnuz operator+(Float8_e5m2fnuz a, int64_t b) {
195
+ return a + static_cast<Float8_e5m2fnuz>(b);
196
+ }
197
+ inline C10_HOST_DEVICE Float8_e5m2fnuz operator-(Float8_e5m2fnuz a, int64_t b) {
198
+ return a - static_cast<Float8_e5m2fnuz>(b);
199
+ }
200
+ inline C10_HOST_DEVICE Float8_e5m2fnuz operator*(Float8_e5m2fnuz a, int64_t b) {
201
+ return a * static_cast<Float8_e5m2fnuz>(b);
202
+ }
203
+ inline C10_HOST_DEVICE Float8_e5m2fnuz operator/(Float8_e5m2fnuz a, int64_t b) {
204
+ return a / static_cast<Float8_e5m2fnuz>(b);
205
+ }
206
+
207
+ inline C10_HOST_DEVICE Float8_e5m2fnuz operator+(int64_t a, Float8_e5m2fnuz b) {
208
+ return static_cast<Float8_e5m2fnuz>(a) + b;
209
+ }
210
+ inline C10_HOST_DEVICE Float8_e5m2fnuz operator-(int64_t a, Float8_e5m2fnuz b) {
211
+ return static_cast<Float8_e5m2fnuz>(a) - b;
212
+ }
213
+ inline C10_HOST_DEVICE Float8_e5m2fnuz operator*(int64_t a, Float8_e5m2fnuz b) {
214
+ return static_cast<Float8_e5m2fnuz>(a) * b;
215
+ }
216
+ inline C10_HOST_DEVICE Float8_e5m2fnuz operator/(int64_t a, Float8_e5m2fnuz b) {
217
+ return static_cast<Float8_e5m2fnuz>(a) / b;
218
+ }
219
+
220
+ /// NOTE: we do not define comparisons directly and instead rely on the implicit
221
+ /// conversion from c10::Float8_e5m2fnuz to float.
222
+
223
+ } // namespace c10
224
+
225
+ namespace std {
226
+
227
+ template <>
228
+ class numeric_limits<c10::Float8_e5m2fnuz> {
229
+ public:
230
+ static constexpr bool is_signed = true;
231
+ static constexpr bool is_integer = false;
232
+ static constexpr bool is_specialized = true;
233
+ static constexpr bool is_exact = false;
234
+ static constexpr bool has_infinity = false;
235
+ static constexpr bool has_quiet_NaN = true;
236
+ static constexpr bool has_signaling_NaN = false;
237
+ static constexpr auto has_denorm = true;
238
+ static constexpr auto has_denorm_loss = true;
239
+ static constexpr auto round_style = numeric_limits<float>::round_style;
240
+ static constexpr bool is_iec559 = false;
241
+ static constexpr bool is_bounded = true;
242
+ static constexpr bool is_modulo = false;
243
+ static constexpr int digits = 3;
244
+ static constexpr int digits10 = 0;
245
+ static constexpr int max_digits10 = 2;
246
+ static constexpr int radix = 2;
247
+ static constexpr int min_exponent = -14;
248
+ static constexpr int min_exponent10 = -4;
249
+ static constexpr int max_exponent = 16;
250
+ static constexpr int max_exponent10 = 4;
251
+ static constexpr auto traps = numeric_limits<float>::traps;
252
+ static constexpr auto tinyness_before =
253
+ numeric_limits<float>::tinyness_before;
254
+
255
+ static constexpr c10::Float8_e5m2fnuz min() {
256
+ return c10::Float8_e5m2fnuz(0x04, c10::Float8_e5m2fnuz::from_bits());
257
+ }
258
+ static constexpr c10::Float8_e5m2fnuz max() {
259
+ return c10::Float8_e5m2fnuz(0x7F, c10::Float8_e5m2fnuz::from_bits());
260
+ }
261
+ static constexpr c10::Float8_e5m2fnuz lowest() {
262
+ return c10::Float8_e5m2fnuz(0xFF, c10::Float8_e5m2fnuz::from_bits());
263
+ }
264
+ static constexpr c10::Float8_e5m2fnuz epsilon() {
265
+ return c10::Float8_e5m2fnuz(0x34, c10::Float8_e5m2fnuz::from_bits());
266
+ }
267
+ static constexpr c10::Float8_e5m2fnuz round_error() {
268
+ return c10::Float8_e5m2fnuz(0x38, c10::Float8_e5m2fnuz::from_bits());
269
+ }
270
+ static constexpr c10::Float8_e5m2fnuz infinity() {
271
+ return c10::Float8_e5m2fnuz(0x80, c10::Float8_e5m2fnuz::from_bits());
272
+ }
273
+ // TODO(future): we are mapping neg_zero to both inf and NaN, this is
274
+ // surprising and we should figure out what to do about it.
275
+ static constexpr c10::Float8_e5m2fnuz quiet_NaN() {
276
+ return c10::Float8_e5m2fnuz(0x80, c10::Float8_e5m2fnuz::from_bits());
277
+ }
278
+ static constexpr c10::Float8_e5m2fnuz denorm_min() {
279
+ return c10::Float8_e5m2fnuz(0x01, c10::Float8_e5m2fnuz::from_bits());
280
+ }
281
+ };
282
+
283
+ } // namespace std
284
+
285
+ C10_CLANG_DIAGNOSTIC_POP()
videochat2/lib/python3.10/site-packages/torch/include/c10/util/Float8_fnuz_cvt.h ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/util/floating_point_utils.h>
4
+
5
+ #include <cstdint>
6
+
7
+ #if defined(SYCL_LANGUAGE_VERSION)
8
+ #include <sycl/sycl.hpp>
9
+ #endif
10
+
11
+ namespace c10::detail {
12
+
13
+ /*
14
+ * Convert a 8-bit floating-point number in either f8 E4M3FNUZ or bf8 E5M2FNUZ
15
+ * format, in bit representation, to a 32-bit floating-point number.
16
+ */
17
+ template <uint32_t we, uint32_t wm>
18
+ inline C10_HOST_DEVICE float fp8_fnuz_to_fp32_value(uint8_t x) {
19
+ static_assert((we == 4 && wm == 3) || (we == 5 && wm == 2));
20
+ constexpr uint32_t weo = 8;
21
+ constexpr uint32_t wmo = 23;
22
+
23
+ if (x == 0) {
24
+ return 0;
25
+ }
26
+
27
+ if (x == 0x80) {
28
+ constexpr uint32_t ifNaN = 0x7F800001;
29
+ return fp32_from_bits(ifNaN);
30
+ }
31
+
32
+ uint32_t mantissa = x & ((1 << wm) - 1);
33
+ uint32_t exponent = (x & 0x7F) >> wm;
34
+
35
+ // subnormal input
36
+ if (exponent == 0) {
37
+ // guaranteed mantissa!=0 since cases 0x0 and 0x80 are handled above
38
+ #if defined(__CUDA_ARCH__) || defined(__HIP_DEVICE_COMPILE__)
39
+ uint32_t renorm_shift = __clz(mantissa);
40
+ #elif defined(__SYCL_DEVICE_ONLY__)
41
+ uint32_t renorm_shift = sycl::clz(mantissa);
42
+ #elif defined(_MSC_VER)
43
+ unsigned long nonsign_bsr;
44
+ _BitScanReverse(&nonsign_bsr, (unsigned long)mantissa);
45
+ uint32_t renorm_shift = (uint32_t)nonsign_bsr ^ 31;
46
+ #else
47
+ uint32_t renorm_shift = __builtin_clz(mantissa);
48
+ #endif
49
+ uint32_t sh = 1 + renorm_shift - (32 - wm);
50
+ mantissa <<= sh;
51
+ exponent += 1 - sh;
52
+ mantissa &= ((1 << wm) - 1);
53
+ }
54
+
55
+ const uint32_t exp_low_cutoff = (1 << (weo - 1)) - (1 << (we - 1));
56
+ exponent += exp_low_cutoff - 1;
57
+ mantissa <<= wmo - wm;
58
+
59
+ uint32_t sign = x >> 7;
60
+ uint32_t retval = (sign << 31) | (exponent << 23) | mantissa;
61
+ return fp32_from_bits(retval);
62
+ }
63
+
64
+ } // namespace c10::detail
videochat2/lib/python3.10/site-packages/torch/include/c10/util/FunctionRef.h ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ //===- llvm/ADT/STLExtras.h - Useful STL related functions ------*- C++ -*-===//
2
+ //
3
+ // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
+ // See https://llvm.org/LICENSE.txt for license information.
5
+ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
+ //
7
+ //===----------------------------------------------------------------------===//
8
+ //
9
+ // This file contains some templates that are useful if you are working with the
10
+ // STL at all.
11
+ //
12
+ // No library is required when using these functions.
13
+ //
14
+ //===----------------------------------------------------------------------===//
15
+
16
+ // c10: modified from llvm::function_ref
17
+ // c10: added more SFINAE to enable use in overloaded functions
18
+
19
+ #pragma once
20
+
21
+ #include <cstdint>
22
+ #include <type_traits>
23
+ #include <utility>
24
+
25
+ namespace c10 {
26
+
27
+ /// An efficient, type-erasing, non-owning reference to a callable. This is
28
+ /// intended for use as the type of a function parameter that is not used
29
+ /// after the function in question returns.
30
+ ///
31
+ /// This class does not own the callable, so it is not in general safe to store
32
+ /// a function_ref.
33
+ template <typename Fn>
34
+ class function_ref;
35
+
36
+ template <typename Ret, typename... Params>
37
+ class function_ref<Ret(Params...)> {
38
+ Ret (*callback)(intptr_t callable, Params... params) = nullptr;
39
+ intptr_t callable{};
40
+
41
+ template <typename Callable>
42
+ static Ret callback_fn(intptr_t callable, Params... params) {
43
+ return (*reinterpret_cast<Callable*>(callable))(
44
+ std::forward<Params>(params)...);
45
+ }
46
+
47
+ public:
48
+ function_ref() = default;
49
+ function_ref(std::nullptr_t) {}
50
+
51
+ template <typename Callable>
52
+ function_ref(
53
+ // NOLINTNEXTLINE(cppcoreguidelines-missing-std-forward)
54
+ Callable&& callable,
55
+ std::enable_if_t<
56
+ !std::is_same_v<std::remove_reference_t<Callable>, function_ref>>* =
57
+ nullptr,
58
+ std::enable_if_t<std::is_convertible_v<
59
+ typename std::invoke_result_t<Callable, Params...>,
60
+ Ret>>* = nullptr)
61
+ : callback(callback_fn<std::remove_reference_t<Callable>>),
62
+ callable(reinterpret_cast<intptr_t>(&callable)) {}
63
+
64
+ Ret operator()(Params... params) const {
65
+ return callback(callable, std::forward<Params>(params)...);
66
+ }
67
+
68
+ operator bool() const {
69
+ return callback;
70
+ }
71
+ };
72
+
73
+ } // namespace c10
videochat2/lib/python3.10/site-packages/torch/include/c10/util/Half-inl.h ADDED
@@ -0,0 +1,350 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/macros/Macros.h>
4
+ #include <c10/util/bit_cast.h>
5
+
6
+ #include <cstring>
7
+ #include <limits>
8
+
9
+ #ifdef __CUDACC__
10
+ #include <cuda_fp16.h>
11
+ #endif
12
+
13
+ #ifdef __HIPCC__
14
+ #include <hip/hip_fp16.h>
15
+ #endif
16
+
17
+ #if defined(CL_SYCL_LANGUAGE_VERSION)
18
+ #include <CL/sycl.hpp> // for SYCL 1.2.1
19
+ #elif defined(SYCL_LANGUAGE_VERSION)
20
+ #include <sycl/sycl.hpp> // for SYCL 2020
21
+ #endif
22
+
23
+ #if (defined(CPU_CAPABILITY_AVX2) || defined(CPU_CAPABILITY_AVX512)) && \
24
+ !defined(__APPLE__)
25
+ #include <ATen/cpu/vec/vec_half.h>
26
+ #endif
27
+
28
+ C10_CLANG_DIAGNOSTIC_PUSH()
29
+ #if C10_CLANG_HAS_WARNING("-Wimplicit-int-float-conversion")
30
+ C10_CLANG_DIAGNOSTIC_IGNORE("-Wimplicit-int-float-conversion")
31
+ #endif
32
+
33
+ namespace c10 {
34
+
35
+ #if defined(__aarch64__) && !defined(__CUDACC__)
36
+ /// Constructors
37
+ inline Half::Half(float16_t value) : x(detail::fp16_to_bits(value)) {}
38
+ inline Half::operator float16_t() const {
39
+ return detail::fp16_from_bits(x);
40
+ }
41
+ #else
42
+
43
+ inline C10_HOST_DEVICE Half::Half(float value)
44
+ :
45
+ #if defined(__CUDA_ARCH__) || defined(__HIP_DEVICE_COMPILE__)
46
+ x(__half_as_short(__float2half(value)))
47
+ #elif defined(__SYCL_DEVICE_ONLY__)
48
+ x(c10::bit_cast<uint16_t>(sycl::half(value)))
49
+ #elif (defined(CPU_CAPABILITY_AVX2) || defined(CPU_CAPABILITY_AVX512)) && \
50
+ !defined(__APPLE__)
51
+ x(at::vec::float2half_scalar(value))
52
+ #else
53
+ x(detail::fp16_ieee_from_fp32_value(value))
54
+ #endif
55
+ {
56
+ }
57
+
58
+ /// Implicit conversions
59
+
60
+ inline C10_HOST_DEVICE Half::operator float() const {
61
+ #if defined(__CUDA_ARCH__) || defined(__HIP_DEVICE_COMPILE__)
62
+ return __half2float(*reinterpret_cast<const __half*>(&x));
63
+ #elif defined(__SYCL_DEVICE_ONLY__)
64
+ return float(c10::bit_cast<sycl::half>(x));
65
+ #elif (defined(CPU_CAPABILITY_AVX2) || defined(CPU_CAPABILITY_AVX512)) && \
66
+ !defined(__APPLE__)
67
+ return at::vec::half2float_scalar(x);
68
+ #elif defined(__aarch64__) && !defined(__CUDACC__)
69
+ return detail::native_fp16_to_fp32_value(x);
70
+ #else
71
+ return detail::fp16_ieee_to_fp32_value(x);
72
+ #endif
73
+ }
74
+
75
+ #endif /* !defined(__aarch64__) || defined(__CUDACC__) \
76
+ */
77
+
78
+ #if defined(__CUDACC__) || defined(__HIPCC__)
79
+ inline C10_HOST_DEVICE Half::Half(const __half& value) {
80
+ x = *reinterpret_cast<const unsigned short*>(&value);
81
+ }
82
+ inline C10_HOST_DEVICE Half::operator __half() const {
83
+ return *reinterpret_cast<const __half*>(&x);
84
+ }
85
+ #endif
86
+
87
+ #ifdef SYCL_LANGUAGE_VERSION
88
+ inline C10_HOST_DEVICE Half::Half(const sycl::half& value) {
89
+ x = *reinterpret_cast<const unsigned short*>(&value);
90
+ }
91
+ inline C10_HOST_DEVICE Half::operator sycl::half() const {
92
+ return *reinterpret_cast<const sycl::half*>(&x);
93
+ }
94
+ #endif
95
+
96
+ // CUDA intrinsics
97
+
98
+ #if (defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 350)) || \
99
+ (defined(__clang__) && defined(__CUDA__))
100
+ inline __device__ Half __ldg(const Half* ptr) {
101
+ return __ldg(reinterpret_cast<const __half*>(ptr));
102
+ }
103
+ #endif
104
+
105
+ /// Arithmetic
106
+
107
+ inline C10_HOST_DEVICE Half operator+(const Half& a, const Half& b) {
108
+ return static_cast<float>(a) + static_cast<float>(b);
109
+ }
110
+
111
+ inline C10_HOST_DEVICE Half operator-(const Half& a, const Half& b) {
112
+ return static_cast<float>(a) - static_cast<float>(b);
113
+ }
114
+
115
+ inline C10_HOST_DEVICE Half operator*(const Half& a, const Half& b) {
116
+ return static_cast<float>(a) * static_cast<float>(b);
117
+ }
118
+
119
+ inline C10_HOST_DEVICE Half operator/(const Half& a, const Half& b)
120
+ __ubsan_ignore_float_divide_by_zero__ {
121
+ return static_cast<float>(a) / static_cast<float>(b);
122
+ }
123
+
124
+ inline C10_HOST_DEVICE Half operator-(const Half& a) {
125
+ #if (defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 530) || \
126
+ defined(__HIP_DEVICE_COMPILE__)
127
+ return __hneg(a);
128
+ #elif defined(__SYCL_DEVICE_ONLY__)
129
+ return -c10::bit_cast<sycl::half>(a);
130
+ #else
131
+ return -static_cast<float>(a);
132
+ #endif
133
+ }
134
+
135
+ inline C10_HOST_DEVICE Half& operator+=(Half& a, const Half& b) {
136
+ a = a + b;
137
+ return a;
138
+ }
139
+
140
+ inline C10_HOST_DEVICE Half& operator-=(Half& a, const Half& b) {
141
+ a = a - b;
142
+ return a;
143
+ }
144
+
145
+ inline C10_HOST_DEVICE Half& operator*=(Half& a, const Half& b) {
146
+ a = a * b;
147
+ return a;
148
+ }
149
+
150
+ inline C10_HOST_DEVICE Half& operator/=(Half& a, const Half& b) {
151
+ a = a / b;
152
+ return a;
153
+ }
154
+
155
+ /// Arithmetic with floats
156
+
157
+ inline C10_HOST_DEVICE float operator+(Half a, float b) {
158
+ return static_cast<float>(a) + b;
159
+ }
160
+ inline C10_HOST_DEVICE float operator-(Half a, float b) {
161
+ return static_cast<float>(a) - b;
162
+ }
163
+ inline C10_HOST_DEVICE float operator*(Half a, float b) {
164
+ return static_cast<float>(a) * b;
165
+ }
166
+ inline C10_HOST_DEVICE float operator/(Half a, float b)
167
+ __ubsan_ignore_float_divide_by_zero__ {
168
+ return static_cast<float>(a) / b;
169
+ }
170
+
171
+ inline C10_HOST_DEVICE float operator+(float a, Half b) {
172
+ return a + static_cast<float>(b);
173
+ }
174
+ inline C10_HOST_DEVICE float operator-(float a, Half b) {
175
+ return a - static_cast<float>(b);
176
+ }
177
+ inline C10_HOST_DEVICE float operator*(float a, Half b) {
178
+ return a * static_cast<float>(b);
179
+ }
180
+ inline C10_HOST_DEVICE float operator/(float a, Half b)
181
+ __ubsan_ignore_float_divide_by_zero__ {
182
+ return a / static_cast<float>(b);
183
+ }
184
+
185
+ inline C10_HOST_DEVICE float& operator+=(float& a, const Half& b) {
186
+ return a += static_cast<float>(b);
187
+ }
188
+ inline C10_HOST_DEVICE float& operator-=(float& a, const Half& b) {
189
+ return a -= static_cast<float>(b);
190
+ }
191
+ inline C10_HOST_DEVICE float& operator*=(float& a, const Half& b) {
192
+ return a *= static_cast<float>(b);
193
+ }
194
+ inline C10_HOST_DEVICE float& operator/=(float& a, const Half& b) {
195
+ return a /= static_cast<float>(b);
196
+ }
197
+
198
+ /// Arithmetic with doubles
199
+
200
+ inline C10_HOST_DEVICE double operator+(Half a, double b) {
201
+ return static_cast<double>(a) + b;
202
+ }
203
+ inline C10_HOST_DEVICE double operator-(Half a, double b) {
204
+ return static_cast<double>(a) - b;
205
+ }
206
+ inline C10_HOST_DEVICE double operator*(Half a, double b) {
207
+ return static_cast<double>(a) * b;
208
+ }
209
+ inline C10_HOST_DEVICE double operator/(Half a, double b)
210
+ __ubsan_ignore_float_divide_by_zero__ {
211
+ return static_cast<double>(a) / b;
212
+ }
213
+
214
+ inline C10_HOST_DEVICE double operator+(double a, Half b) {
215
+ return a + static_cast<double>(b);
216
+ }
217
+ inline C10_HOST_DEVICE double operator-(double a, Half b) {
218
+ return a - static_cast<double>(b);
219
+ }
220
+ inline C10_HOST_DEVICE double operator*(double a, Half b) {
221
+ return a * static_cast<double>(b);
222
+ }
223
+ inline C10_HOST_DEVICE double operator/(double a, Half b)
224
+ __ubsan_ignore_float_divide_by_zero__ {
225
+ return a / static_cast<double>(b);
226
+ }
227
+
228
+ /// Arithmetic with ints
229
+
230
+ inline C10_HOST_DEVICE Half operator+(Half a, int b) {
231
+ return a + static_cast<Half>(b);
232
+ }
233
+ inline C10_HOST_DEVICE Half operator-(Half a, int b) {
234
+ return a - static_cast<Half>(b);
235
+ }
236
+ inline C10_HOST_DEVICE Half operator*(Half a, int b) {
237
+ return a * static_cast<Half>(b);
238
+ }
239
+ inline C10_HOST_DEVICE Half operator/(Half a, int b) {
240
+ return a / static_cast<Half>(b);
241
+ }
242
+
243
+ inline C10_HOST_DEVICE Half operator+(int a, Half b) {
244
+ return static_cast<Half>(a) + b;
245
+ }
246
+ inline C10_HOST_DEVICE Half operator-(int a, Half b) {
247
+ return static_cast<Half>(a) - b;
248
+ }
249
+ inline C10_HOST_DEVICE Half operator*(int a, Half b) {
250
+ return static_cast<Half>(a) * b;
251
+ }
252
+ inline C10_HOST_DEVICE Half operator/(int a, Half b) {
253
+ return static_cast<Half>(a) / b;
254
+ }
255
+
256
+ //// Arithmetic with int64_t
257
+
258
+ inline C10_HOST_DEVICE Half operator+(Half a, int64_t b) {
259
+ return a + static_cast<Half>(b);
260
+ }
261
+ inline C10_HOST_DEVICE Half operator-(Half a, int64_t b) {
262
+ return a - static_cast<Half>(b);
263
+ }
264
+ inline C10_HOST_DEVICE Half operator*(Half a, int64_t b) {
265
+ return a * static_cast<Half>(b);
266
+ }
267
+ inline C10_HOST_DEVICE Half operator/(Half a, int64_t b) {
268
+ return a / static_cast<Half>(b);
269
+ }
270
+
271
+ inline C10_HOST_DEVICE Half operator+(int64_t a, Half b) {
272
+ return static_cast<Half>(a) + b;
273
+ }
274
+ inline C10_HOST_DEVICE Half operator-(int64_t a, Half b) {
275
+ return static_cast<Half>(a) - b;
276
+ }
277
+ inline C10_HOST_DEVICE Half operator*(int64_t a, Half b) {
278
+ return static_cast<Half>(a) * b;
279
+ }
280
+ inline C10_HOST_DEVICE Half operator/(int64_t a, Half b) {
281
+ return static_cast<Half>(a) / b;
282
+ }
283
+
284
+ /// NOTE: we do not define comparisons directly and instead rely on the implicit
285
+ /// conversion from c10::Half to float.
286
+
287
+ } // namespace c10
288
+
289
+ namespace std {
290
+
291
+ template <>
292
+ class numeric_limits<c10::Half> {
293
+ public:
294
+ static constexpr bool is_specialized = true;
295
+ static constexpr bool is_signed = true;
296
+ static constexpr bool is_integer = false;
297
+ static constexpr bool is_exact = false;
298
+ static constexpr bool has_infinity = true;
299
+ static constexpr bool has_quiet_NaN = true;
300
+ static constexpr bool has_signaling_NaN = true;
301
+ static constexpr auto has_denorm = numeric_limits<float>::has_denorm;
302
+ static constexpr auto has_denorm_loss =
303
+ numeric_limits<float>::has_denorm_loss;
304
+ static constexpr auto round_style = numeric_limits<float>::round_style;
305
+ static constexpr bool is_iec559 = true;
306
+ static constexpr bool is_bounded = true;
307
+ static constexpr bool is_modulo = false;
308
+ static constexpr int digits = 11;
309
+ static constexpr int digits10 = 3;
310
+ static constexpr int max_digits10 = 5;
311
+ static constexpr int radix = 2;
312
+ static constexpr int min_exponent = -13;
313
+ static constexpr int min_exponent10 = -4;
314
+ static constexpr int max_exponent = 16;
315
+ static constexpr int max_exponent10 = 4;
316
+ static constexpr auto traps = numeric_limits<float>::traps;
317
+ static constexpr auto tinyness_before =
318
+ numeric_limits<float>::tinyness_before;
319
+ static constexpr c10::Half min() {
320
+ return c10::Half(0x0400, c10::Half::from_bits());
321
+ }
322
+ static constexpr c10::Half lowest() {
323
+ return c10::Half(0xFBFF, c10::Half::from_bits());
324
+ }
325
+ static constexpr c10::Half max() {
326
+ return c10::Half(0x7BFF, c10::Half::from_bits());
327
+ }
328
+ static constexpr c10::Half epsilon() {
329
+ return c10::Half(0x1400, c10::Half::from_bits());
330
+ }
331
+ static constexpr c10::Half round_error() {
332
+ return c10::Half(0x3800, c10::Half::from_bits());
333
+ }
334
+ static constexpr c10::Half infinity() {
335
+ return c10::Half(0x7C00, c10::Half::from_bits());
336
+ }
337
+ static constexpr c10::Half quiet_NaN() {
338
+ return c10::Half(0x7E00, c10::Half::from_bits());
339
+ }
340
+ static constexpr c10::Half signaling_NaN() {
341
+ return c10::Half(0x7D00, c10::Half::from_bits());
342
+ }
343
+ static constexpr c10::Half denorm_min() {
344
+ return c10::Half(0x0001, c10::Half::from_bits());
345
+ }
346
+ };
347
+
348
+ } // namespace std
349
+
350
+ C10_CLANG_DIAGNOSTIC_POP()
videochat2/lib/python3.10/site-packages/torch/include/c10/util/MathConstants.h ADDED
@@ -0,0 +1,142 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/macros/Macros.h>
4
+ #include <c10/util/BFloat16.h>
5
+ #include <c10/util/Half.h>
6
+
7
+ C10_CLANG_DIAGNOSTIC_PUSH()
8
+ #if C10_CLANG_HAS_WARNING("-Wimplicit-float-conversion")
9
+ C10_CLANG_DIAGNOSTIC_IGNORE("-Wimplicit-float-conversion")
10
+ #endif
11
+
12
+ namespace c10 {
13
+ // TODO: Replace me with inline constexpr variable when C++17 becomes available
14
+ namespace detail {
15
+ template <typename T>
16
+ C10_HOST_DEVICE inline constexpr T e() {
17
+ return static_cast<T>(2.718281828459045235360287471352662);
18
+ }
19
+
20
+ template <typename T>
21
+ C10_HOST_DEVICE inline constexpr T euler() {
22
+ return static_cast<T>(0.577215664901532860606512090082402);
23
+ }
24
+
25
+ template <typename T>
26
+ C10_HOST_DEVICE inline constexpr T frac_1_pi() {
27
+ return static_cast<T>(0.318309886183790671537767526745028);
28
+ }
29
+
30
+ template <typename T>
31
+ C10_HOST_DEVICE inline constexpr T frac_1_sqrt_pi() {
32
+ return static_cast<T>(0.564189583547756286948079451560772);
33
+ }
34
+
35
+ template <typename T>
36
+ C10_HOST_DEVICE inline constexpr T frac_sqrt_2() {
37
+ return static_cast<T>(0.707106781186547524400844362104849);
38
+ }
39
+
40
+ template <typename T>
41
+ C10_HOST_DEVICE inline constexpr T frac_sqrt_3() {
42
+ return static_cast<T>(0.577350269189625764509148780501957);
43
+ }
44
+
45
+ template <typename T>
46
+ C10_HOST_DEVICE inline constexpr T golden_ratio() {
47
+ return static_cast<T>(1.618033988749894848204586834365638);
48
+ }
49
+
50
+ template <typename T>
51
+ C10_HOST_DEVICE inline constexpr T ln_10() {
52
+ return static_cast<T>(2.302585092994045684017991454684364);
53
+ }
54
+
55
+ template <typename T>
56
+ C10_HOST_DEVICE inline constexpr T ln_2() {
57
+ return static_cast<T>(0.693147180559945309417232121458176);
58
+ }
59
+
60
+ template <typename T>
61
+ C10_HOST_DEVICE inline constexpr T log_10_e() {
62
+ return static_cast<T>(0.434294481903251827651128918916605);
63
+ }
64
+
65
+ template <typename T>
66
+ C10_HOST_DEVICE inline constexpr T log_2_e() {
67
+ return static_cast<T>(1.442695040888963407359924681001892);
68
+ }
69
+
70
+ template <typename T>
71
+ C10_HOST_DEVICE inline constexpr T pi() {
72
+ return static_cast<T>(3.141592653589793238462643383279502);
73
+ }
74
+
75
+ template <typename T>
76
+ C10_HOST_DEVICE inline constexpr T sqrt_2() {
77
+ return static_cast<T>(1.414213562373095048801688724209698);
78
+ }
79
+
80
+ template <typename T>
81
+ C10_HOST_DEVICE inline constexpr T sqrt_3() {
82
+ return static_cast<T>(1.732050807568877293527446341505872);
83
+ }
84
+
85
+ template <>
86
+ C10_HOST_DEVICE inline constexpr BFloat16 pi<BFloat16>() {
87
+ // According to
88
+ // https://en.wikipedia.org/wiki/Bfloat16_floating-point_format#Special_values
89
+ // pi is encoded as 4049
90
+ return BFloat16(0x4049, BFloat16::from_bits());
91
+ }
92
+
93
+ template <>
94
+ C10_HOST_DEVICE inline constexpr Half pi<Half>() {
95
+ return Half(0x4248, Half::from_bits());
96
+ }
97
+ } // namespace detail
98
+
99
+ template <typename T>
100
+ constexpr T e = c10::detail::e<T>();
101
+
102
+ template <typename T>
103
+ constexpr T euler = c10::detail::euler<T>();
104
+
105
+ template <typename T>
106
+ constexpr T frac_1_pi = c10::detail::frac_1_pi<T>();
107
+
108
+ template <typename T>
109
+ constexpr T frac_1_sqrt_pi = c10::detail::frac_1_sqrt_pi<T>();
110
+
111
+ template <typename T>
112
+ constexpr T frac_sqrt_2 = c10::detail::frac_sqrt_2<T>();
113
+
114
+ template <typename T>
115
+ constexpr T frac_sqrt_3 = c10::detail::frac_sqrt_3<T>();
116
+
117
+ template <typename T>
118
+ constexpr T golden_ratio = c10::detail::golden_ratio<T>();
119
+
120
+ template <typename T>
121
+ constexpr T ln_10 = c10::detail::ln_10<T>();
122
+
123
+ template <typename T>
124
+ constexpr T ln_2 = c10::detail::ln_2<T>();
125
+
126
+ template <typename T>
127
+ constexpr T log_10_e = c10::detail::log_10_e<T>();
128
+
129
+ template <typename T>
130
+ constexpr T log_2_e = c10::detail::log_2_e<T>();
131
+
132
+ template <typename T>
133
+ constexpr T pi = c10::detail::pi<T>();
134
+
135
+ template <typename T>
136
+ constexpr T sqrt_2 = c10::detail::sqrt_2<T>();
137
+
138
+ template <typename T>
139
+ constexpr T sqrt_3 = c10::detail::sqrt_3<T>();
140
+ } // namespace c10
141
+
142
+ C10_CLANG_DIAGNOSTIC_POP()
videochat2/lib/python3.10/site-packages/torch/include/c10/util/MaybeOwned.h ADDED
@@ -0,0 +1,237 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/macros/Macros.h>
4
+ #include <c10/util/Exception.h>
5
+
6
+ #include <memory>
7
+ #include <type_traits>
8
+ #include <utility>
9
+
10
+ namespace c10 {
11
+
12
+ /// MaybeOwnedTraits<T> describes how to borrow from T. Here is how we
13
+ /// can implement borrowing from an arbitrary type T using a raw
14
+ /// pointer to const:
15
+ template <typename T>
16
+ struct MaybeOwnedTraitsGenericImpl {
17
+ using owned_type = T;
18
+ using borrow_type = const T*;
19
+
20
+ static borrow_type createBorrow(const owned_type& from) {
21
+ return &from;
22
+ }
23
+
24
+ static void assignBorrow(borrow_type& lhs, borrow_type rhs) {
25
+ lhs = rhs;
26
+ }
27
+
28
+ static void destroyBorrow(borrow_type& /*toDestroy*/) {}
29
+
30
+ static const owned_type& referenceFromBorrow(const borrow_type& borrow) {
31
+ return *borrow;
32
+ }
33
+
34
+ static const owned_type* pointerFromBorrow(const borrow_type& borrow) {
35
+ return borrow;
36
+ }
37
+
38
+ static bool debugBorrowIsValid(const borrow_type& borrow) {
39
+ return borrow != nullptr;
40
+ }
41
+ };
42
+
43
+ /// It is possible to eliminate the extra layer of indirection for
44
+ /// borrows for some types that we control. For examples, see
45
+ /// intrusive_ptr.h and TensorBody.h.
46
+
47
+ template <typename T>
48
+ struct MaybeOwnedTraits;
49
+
50
+ // Explicitly enable MaybeOwned<shared_ptr<T>>, rather than allowing
51
+ // MaybeOwned to be used for any type right away.
52
+ template <typename T>
53
+ struct MaybeOwnedTraits<std::shared_ptr<T>>
54
+ : public MaybeOwnedTraitsGenericImpl<std::shared_ptr<T>> {};
55
+
56
+ /// A smart pointer around either a borrowed or owned T. When
57
+ /// constructed with borrowed(), the caller MUST ensure that the
58
+ /// borrowed-from argument outlives this MaybeOwned<T>. Compare to
59
+ /// Rust's std::borrow::Cow
60
+ /// (https://doc.rust-lang.org/std/borrow/enum.Cow.html), but note
61
+ /// that it is probably not suitable for general use because C++ has
62
+ /// no borrow checking. Included here to support
63
+ /// Tensor::expect_contiguous.
64
+ template <typename T>
65
+ class MaybeOwned final {
66
+ using borrow_type = typename MaybeOwnedTraits<T>::borrow_type;
67
+ using owned_type = typename MaybeOwnedTraits<T>::owned_type;
68
+
69
+ bool isBorrowed_;
70
+ union {
71
+ borrow_type borrow_;
72
+ owned_type own_;
73
+ };
74
+
75
+ /// Don't use this; use borrowed() instead.
76
+ explicit MaybeOwned(const owned_type& t)
77
+ : isBorrowed_(true), borrow_(MaybeOwnedTraits<T>::createBorrow(t)) {}
78
+
79
+ /// Don't use this; use owned() instead.
80
+ explicit MaybeOwned(T&& t) noexcept(std::is_nothrow_move_constructible_v<T>)
81
+ : isBorrowed_(false), own_(std::move(t)) {}
82
+
83
+ /// Don't use this; use owned() instead.
84
+ template <class... Args>
85
+ explicit MaybeOwned(std::in_place_t, Args&&... args)
86
+ : isBorrowed_(false), own_(std::forward<Args>(args)...) {}
87
+
88
+ public:
89
+ explicit MaybeOwned() : isBorrowed_(true), borrow_() {}
90
+
91
+ // Copying a borrow yields another borrow of the original, as with a
92
+ // T*. Copying an owned T yields another owned T for safety: no
93
+ // chains of borrowing by default! (Note you could get that behavior
94
+ // with MaybeOwned<T>::borrowed(*rhs) if you wanted it.)
95
+ MaybeOwned(const MaybeOwned& rhs) : isBorrowed_(rhs.isBorrowed_) {
96
+ if (C10_LIKELY(rhs.isBorrowed_)) {
97
+ MaybeOwnedTraits<T>::assignBorrow(borrow_, rhs.borrow_);
98
+ } else {
99
+ new (&own_) T(rhs.own_);
100
+ }
101
+ }
102
+
103
+ MaybeOwned& operator=(const MaybeOwned& rhs) {
104
+ if (this == &rhs) {
105
+ return *this;
106
+ }
107
+ if (C10_UNLIKELY(!isBorrowed_)) {
108
+ if (rhs.isBorrowed_) {
109
+ own_.~T();
110
+ MaybeOwnedTraits<T>::assignBorrow(borrow_, rhs.borrow_);
111
+ isBorrowed_ = true;
112
+ } else {
113
+ own_ = rhs.own_;
114
+ }
115
+ } else {
116
+ if (C10_LIKELY(rhs.isBorrowed_)) {
117
+ MaybeOwnedTraits<T>::assignBorrow(borrow_, rhs.borrow_);
118
+ } else {
119
+ MaybeOwnedTraits<T>::destroyBorrow(borrow_);
120
+ new (&own_) T(rhs.own_);
121
+ isBorrowed_ = false;
122
+ }
123
+ }
124
+ TORCH_INTERNAL_ASSERT_DEBUG_ONLY(isBorrowed_ == rhs.isBorrowed_);
125
+ return *this;
126
+ }
127
+
128
+ MaybeOwned(MaybeOwned&& rhs) noexcept(
129
+ // NOLINTNEXTLINE(*-noexcept-move-*)
130
+ std::is_nothrow_move_constructible_v<T> &&
131
+ std::is_nothrow_move_assignable_v<borrow_type>)
132
+ : isBorrowed_(rhs.isBorrowed_) {
133
+ if (C10_LIKELY(rhs.isBorrowed_)) {
134
+ MaybeOwnedTraits<T>::assignBorrow(borrow_, rhs.borrow_);
135
+ } else {
136
+ new (&own_) T(std::move(rhs.own_));
137
+ }
138
+ }
139
+
140
+ MaybeOwned& operator=(MaybeOwned&& rhs) noexcept(
141
+ std::is_nothrow_move_assignable_v<T> &&
142
+ std::is_nothrow_move_assignable_v<borrow_type> &&
143
+ std::is_nothrow_move_constructible_v<T> &&
144
+ // NOLINTNEXTLINE(*-noexcept-move-*)
145
+ std::is_nothrow_destructible_v<T> &&
146
+ std::is_nothrow_destructible_v<borrow_type>) {
147
+ if (this == &rhs) {
148
+ return *this;
149
+ }
150
+ if (C10_UNLIKELY(!isBorrowed_)) {
151
+ if (rhs.isBorrowed_) {
152
+ own_.~T();
153
+ MaybeOwnedTraits<T>::assignBorrow(borrow_, rhs.borrow_);
154
+ isBorrowed_ = true;
155
+ } else {
156
+ own_ = std::move(rhs.own_);
157
+ }
158
+ } else {
159
+ if (C10_LIKELY(rhs.isBorrowed_)) {
160
+ MaybeOwnedTraits<T>::assignBorrow(borrow_, rhs.borrow_);
161
+ } else {
162
+ MaybeOwnedTraits<T>::destroyBorrow(borrow_);
163
+ new (&own_) T(std::move(rhs.own_));
164
+ isBorrowed_ = false;
165
+ }
166
+ }
167
+ return *this;
168
+ }
169
+
170
+ static MaybeOwned borrowed(const T& t) {
171
+ return MaybeOwned(t);
172
+ }
173
+
174
+ static MaybeOwned owned(T&& t) noexcept(
175
+ std::is_nothrow_move_constructible_v<T>) {
176
+ return MaybeOwned(std::move(t));
177
+ }
178
+
179
+ template <class... Args>
180
+ static MaybeOwned owned(std::in_place_t, Args&&... args) {
181
+ return MaybeOwned(std::in_place, std::forward<Args>(args)...);
182
+ }
183
+
184
+ ~MaybeOwned() noexcept(
185
+ // NOLINTNEXTLINE(*-noexcept-destructor)
186
+ std::is_nothrow_destructible_v<T> &&
187
+ std::is_nothrow_destructible_v<borrow_type>) {
188
+ if (C10_UNLIKELY(!isBorrowed_)) {
189
+ own_.~T();
190
+ } else {
191
+ MaybeOwnedTraits<T>::destroyBorrow(borrow_);
192
+ }
193
+ }
194
+
195
+ // This is an implementation detail! You should know what you're doing
196
+ // if you are testing this. If you just want to guarantee ownership move
197
+ // this into a T
198
+ bool unsafeIsBorrowed() const {
199
+ return isBorrowed_;
200
+ }
201
+
202
+ const T& operator*() const& {
203
+ if (isBorrowed_) {
204
+ TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
205
+ MaybeOwnedTraits<T>::debugBorrowIsValid(borrow_));
206
+ }
207
+ return C10_LIKELY(isBorrowed_)
208
+ ? MaybeOwnedTraits<T>::referenceFromBorrow(borrow_)
209
+ : own_;
210
+ }
211
+
212
+ const T* operator->() const {
213
+ if (isBorrowed_) {
214
+ TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
215
+ MaybeOwnedTraits<T>::debugBorrowIsValid(borrow_));
216
+ }
217
+ return C10_LIKELY(isBorrowed_)
218
+ ? MaybeOwnedTraits<T>::pointerFromBorrow(borrow_)
219
+ : &own_;
220
+ }
221
+
222
+ // If borrowed, copy the underlying T. If owned, move from
223
+ // it. borrowed/owned state remains the same, and either we
224
+ // reference the same borrow as before or we are an owned moved-from
225
+ // T.
226
+ T operator*() && {
227
+ if (isBorrowed_) {
228
+ TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
229
+ MaybeOwnedTraits<T>::debugBorrowIsValid(borrow_));
230
+ return MaybeOwnedTraits<T>::referenceFromBorrow(borrow_);
231
+ } else {
232
+ return std::move(own_);
233
+ }
234
+ }
235
+ };
236
+
237
+ } // namespace c10
videochat2/lib/python3.10/site-packages/torch/include/c10/util/Metaprogramming.h ADDED
@@ -0,0 +1,224 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/util/TypeList.h>
4
+ #include <type_traits>
5
+
6
+ namespace c10::guts {
7
+
8
+ /**
9
+ * Access information about result type or arguments from a function type.
10
+ * Example:
11
+ * using A = function_traits<int (float, double)>::return_type // A == int
12
+ * using A = function_traits<int (float, double)>::parameter_types::tuple_type
13
+ * // A == tuple<float, double>
14
+ */
15
+ template <class Func>
16
+ struct function_traits {
17
+ static_assert(
18
+ !std::is_same_v<Func, Func>,
19
+ "In function_traits<Func>, Func must be a plain function type.");
20
+ };
21
+ template <class Result, class... Args>
22
+ struct function_traits<Result(Args...)> {
23
+ using func_type = Result(Args...);
24
+ using return_type = Result;
25
+ using parameter_types = typelist::typelist<Args...>;
26
+ static constexpr auto number_of_parameters = sizeof...(Args);
27
+ };
28
+
29
+ /**
30
+ * infer_function_traits: creates a `function_traits` type for a simple
31
+ * function (pointer) or functor (lambda/struct). Currently does not support
32
+ * class methods.
33
+ */
34
+
35
+ template <typename Functor>
36
+ struct infer_function_traits {
37
+ using type = function_traits<
38
+ c10::guts::detail::strip_class_t<decltype(&Functor::operator())>>;
39
+ };
40
+
41
+ template <typename Result, typename... Args>
42
+ struct infer_function_traits<Result (*)(Args...)> {
43
+ using type = function_traits<Result(Args...)>;
44
+ };
45
+
46
+ template <typename Result, typename... Args>
47
+ struct infer_function_traits<Result(Args...)> {
48
+ using type = function_traits<Result(Args...)>;
49
+ };
50
+
51
+ template <typename T>
52
+ using infer_function_traits_t = typename infer_function_traits<T>::type;
53
+
54
+ /**
55
+ * make_function_traits: creates a `function_traits` type given a Return type
56
+ * and a typelist of Argument types
57
+ *
58
+ * Example:
59
+ * bool f(int, int);
60
+ *
61
+ * infer_function_traits_t<f> == make_function_traits_t<bool,
62
+ * typelist::typelist<int, int>>
63
+ */
64
+ template <typename Result, typename ArgList>
65
+ struct make_function_traits {
66
+ static_assert(
67
+ false_t<ArgList>::value,
68
+ "In guts::make_function_traits<Result, TypeList>, the ArgList argument must be typelist<...>.");
69
+ };
70
+
71
+ template <typename Result, typename... Args>
72
+ struct make_function_traits<Result, typelist::typelist<Args...>> {
73
+ using type = function_traits<Result(Args...)>;
74
+ };
75
+
76
+ template <typename Result, typename ArgList>
77
+ using make_function_traits_t =
78
+ typename make_function_traits<Result, ArgList>::type;
79
+
80
+ /**
81
+ * make_offset_index_sequence<Start, N>
82
+ * Like make_index_sequence<N>, but starting from Start instead of 0.
83
+ *
84
+ * Example:
85
+ * make_offset_index_sequence<10, 3> == std::index_sequence<10, 11, 12>
86
+ */
87
+ template <size_t Start, size_t N, size_t... Is>
88
+ struct make_offset_index_sequence_impl
89
+ : make_offset_index_sequence_impl<Start, N - 1, Start + N - 1, Is...> {
90
+ static_assert(
91
+ static_cast<int>(Start) >= 0,
92
+ "make_offset_index_sequence: Start < 0");
93
+ static_assert(static_cast<int>(N) >= 0, "make_offset_index_sequence: N < 0");
94
+ };
95
+
96
+ template <size_t Start, size_t... Is>
97
+ struct make_offset_index_sequence_impl<Start, 0, Is...> {
98
+ typedef std::index_sequence<Is...> type;
99
+ };
100
+
101
+ template <size_t Start, size_t N>
102
+ using make_offset_index_sequence =
103
+ typename make_offset_index_sequence_impl<Start, N>::type;
104
+
105
+ /**
106
+ * Use tuple_elements to extract a position-indexed subset of elements
107
+ * from the argument tuple into a result tuple.
108
+ *
109
+ * Example:
110
+ * std::tuple<int, const char*, double> t = std::make_tuple(0, "HEY", 2.0);
111
+ * std::tuple<int, double> result = tuple_elements(t, std::index_sequence<0,
112
+ * 2>());
113
+ */
114
+ template <class Tuple, size_t... Is>
115
+ constexpr auto tuple_elements(Tuple t, std::index_sequence<Is...>) {
116
+ return std::tuple<std::tuple_element_t<Is, Tuple>...>(std::get<Is>(t)...);
117
+ }
118
+
119
+ /**
120
+ * Use tuple_take to extract the first or last n elements from the argument
121
+ * tuple into a result tuple.
122
+ *
123
+ * Example:
124
+ * std::tuple<int, const char*, double> t = std::make_tuple(0, "HEY", 2.0);
125
+ * std::tuple<int, const char*> first_two = tuple_take<decltype(t), 2>(t);
126
+ * std::tuple<const char*, double> last_two = tuple_take<decltype(t), -2>(t);
127
+ */
128
+ template <class Tuple, int N, class Enable = void>
129
+ struct TupleTake {};
130
+
131
+ template <class Tuple, int N>
132
+ struct TupleTake<Tuple, N, std::enable_if_t<N >= 0, void>> {
133
+ static auto call(Tuple t) {
134
+ constexpr size_t size = std::tuple_size<Tuple>();
135
+ static_assert(N <= size, "tuple_take: N > size");
136
+ return tuple_elements(t, std::make_index_sequence<N>{});
137
+ }
138
+ };
139
+
140
+ template <class Tuple, int N>
141
+ struct TupleTake < Tuple,
142
+ N, std::enable_if_t<N<0, void>> {
143
+ static auto call(Tuple t) {
144
+ constexpr size_t size = std::tuple_size<Tuple>();
145
+ static_assert(-N <= size, "tuple_take: -N > size");
146
+ return tuple_elements(t, make_offset_index_sequence<size + N, -N>{});
147
+ }
148
+ };
149
+
150
+ template <class Tuple, int N>
151
+ auto tuple_take(Tuple t) {
152
+ return TupleTake<Tuple, N>::call(t);
153
+ }
154
+
155
+ /**
156
+ * Use tuple_slice to extract a contiguous subtuple from the argument.
157
+ *
158
+ * Example:
159
+ * std::tuple<int, const char*, double, bool> t = std::make_tuple(0,
160
+ * "HEY", 2.0, false); std::tuple<int, const char*> middle_two =
161
+ * tuple_slice<decltype(t), 1, 2>(t);
162
+ */
163
+ template <class Tuple, size_t Start, size_t N>
164
+ constexpr auto tuple_slice(Tuple t) {
165
+ constexpr size_t size = std::tuple_size<Tuple>();
166
+ static_assert(Start + N <= size, "tuple_slice: Start + N > size");
167
+ return tuple_elements(t, make_offset_index_sequence<Start, N>{});
168
+ }
169
+
170
+ /**
171
+ * Use tuple_map to run a mapping function over a tuple to get a new tuple.
172
+ *
173
+ * Example 1:
174
+ * auto result = tuple_map(std::tuple<int32_t, int32_t, int32_t>(3, 4, 5), []
175
+ * (int32_t a) -> int16_t {return a+1;});
176
+ * // result == std::tuple<int16_t, int16_t, int16_t>(4, 5, 6)
177
+ *
178
+ * Example 2:
179
+ * struct Mapper {
180
+ * std::string operator()(int32_t a) const {
181
+ * return std::to_string(a);
182
+ * }
183
+ * int64_t operator()(const std::string& a) const {
184
+ * return atoi(a.c_str());
185
+ * }
186
+ * };
187
+ * auto result = tuple_map(std::tuple<int32_t, std::string>(3, "4"),
188
+ * Mapper());
189
+ * // result == std::tuple<std::string, int64_t>("3", 4)
190
+ *
191
+ * Example 3:
192
+ * struct A final {
193
+ * int32_t func() {
194
+ * return 5;
195
+ * }
196
+ * };
197
+ * struct B final {
198
+ * std::string func() {
199
+ * return "5";
200
+ * }
201
+ * };
202
+ * auto result = tuple_map(std::make_tuple(A(), B()), [] (auto a) { return
203
+ * a.func(); });
204
+ * // result == std::tuple<int32_t, std::string>(5, "5");
205
+ */
206
+ namespace detail {
207
+ template <class Mapper, class... Args, size_t... Indices>
208
+ auto tuple_map(
209
+ // NOLINTNEXTLINE(cppcoreguidelines-rvalue-reference-param-not-moved)
210
+ std::tuple<Args...>&& tuple,
211
+ const Mapper& mapper,
212
+ std::index_sequence<Indices...>) {
213
+ return std::tuple<decltype(mapper(std::forward<Args>(std::get<Indices>(
214
+ tuple))))...>(mapper(std::forward<Args>(std::get<Indices>(tuple)))...);
215
+ }
216
+ } // namespace detail
217
+
218
+ template <class Mapper, class... Args>
219
+ auto tuple_map(std::tuple<Args...>&& tuple, const Mapper& mapper) {
220
+ return detail::tuple_map(
221
+ std::move(tuple), mapper, std::index_sequence_for<Args...>());
222
+ }
223
+
224
+ } // namespace c10::guts
videochat2/lib/python3.10/site-packages/torch/include/c10/util/Optional.h ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef C10_UTIL_OPTIONAL_H_
2
+ #define C10_UTIL_OPTIONAL_H_
3
+
4
+ #include <optional>
5
+ #include <type_traits>
6
+
7
+ // Macros.h is not needed, but it does namespace shenanigans that lots
8
+ // of downstream code seems to rely on. Feel free to remove it and fix
9
+ // up builds.
10
+
11
+ namespace c10 {
12
+ // NOLINTNEXTLINE(misc-unused-using-decls)
13
+ using std::bad_optional_access;
14
+ // NOLINTNEXTLINE(misc-unused-using-decls)
15
+ using std::make_optional;
16
+ // NOLINTNEXTLINE(misc-unused-using-decls)
17
+ using std::nullopt;
18
+ // NOLINTNEXTLINE(misc-unused-using-decls)
19
+ using std::nullopt_t;
20
+ // NOLINTNEXTLINE(misc-unused-using-decls)
21
+ using std::optional;
22
+
23
+ namespace detail_ {
24
+ // the call to convert<A>(b) has return type A and converts b to type A iff b
25
+ // decltype(b) is implicitly convertible to A
26
+ template <class U>
27
+ constexpr U convert(U v) {
28
+ return v;
29
+ }
30
+ } // namespace detail_
31
+ template <class T, class F>
32
+ constexpr T value_or_else(const std::optional<T>& v, F&& func) {
33
+ static_assert(
34
+ std::is_convertible_v<typename std::invoke_result_t<F>, T>,
35
+ "func parameters must be a callable that returns a type convertible to the value stored in the optional");
36
+ return v.has_value() ? *v : detail_::convert<T>(std::forward<F>(func)());
37
+ }
38
+
39
+ template <class T, class F>
40
+ constexpr T value_or_else(std::optional<T>&& v, F&& func) {
41
+ static_assert(
42
+ std::is_convertible_v<typename std::invoke_result_t<F>, T>,
43
+ "func parameters must be a callable that returns a type convertible to the value stored in the optional");
44
+ return v.has_value() ? constexpr_move(std::move(v).contained_val())
45
+ : detail_::convert<T>(std::forward<F>(func)());
46
+ }
47
+ } // namespace c10
48
+ #endif // C10_UTIL_OPTIONAL_H_
videochat2/lib/python3.10/site-packages/torch/include/c10/util/OptionalArrayRef.h ADDED
@@ -0,0 +1,236 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // This file defines OptionalArrayRef<T>, a class that has almost the same
2
+ // exact functionality as std::optional<ArrayRef<T>>, except that its
3
+ // converting constructor fixes a dangling pointer issue.
4
+ //
5
+ // The implicit converting constructor of both std::optional<ArrayRef<T>> and
6
+ // std::optional<ArrayRef<T>> can cause the underlying ArrayRef<T> to store
7
+ // a dangling pointer. OptionalArrayRef<T> prevents this by wrapping
8
+ // a std::optional<ArrayRef<T>> and fixing the constructor implementation.
9
+ //
10
+ // See https://github.com/pytorch/pytorch/issues/63645 for more on this.
11
+
12
+ #pragma once
13
+
14
+ #include <c10/util/ArrayRef.h>
15
+ #include <cstdint>
16
+ #include <initializer_list>
17
+ #include <optional>
18
+ #include <type_traits>
19
+ #include <utility>
20
+
21
+ namespace c10 {
22
+
23
+ template <typename T>
24
+ class OptionalArrayRef final {
25
+ public:
26
+ // Constructors
27
+
28
+ constexpr OptionalArrayRef() noexcept = default;
29
+
30
+ constexpr OptionalArrayRef(std::nullopt_t) noexcept {}
31
+
32
+ OptionalArrayRef(const OptionalArrayRef& other) = default;
33
+
34
+ OptionalArrayRef(OptionalArrayRef&& other) noexcept = default;
35
+
36
+ constexpr OptionalArrayRef(const std::optional<ArrayRef<T>>& other) noexcept
37
+ : wrapped_opt_array_ref(other) {}
38
+
39
+ constexpr OptionalArrayRef(std::optional<ArrayRef<T>>&& other) noexcept
40
+ : wrapped_opt_array_ref(std::move(other)) {}
41
+
42
+ constexpr OptionalArrayRef(const T& value) noexcept
43
+ : wrapped_opt_array_ref(value) {}
44
+
45
+ template <
46
+ typename U = ArrayRef<T>,
47
+ std::enable_if_t<
48
+ !std::is_same_v<std::decay_t<U>, OptionalArrayRef> &&
49
+ !std::is_same_v<std::decay_t<U>, std::in_place_t> &&
50
+ std::is_constructible_v<ArrayRef<T>, U&&> &&
51
+ std::is_convertible_v<U&&, ArrayRef<T>> &&
52
+ !std::is_convertible_v<U&&, T>,
53
+ bool> = false>
54
+ constexpr OptionalArrayRef(U&& value) noexcept(
55
+ std::is_nothrow_constructible_v<ArrayRef<T>, U&&>)
56
+ : wrapped_opt_array_ref(std::forward<U>(value)) {}
57
+
58
+ template <
59
+ typename U = ArrayRef<T>,
60
+ std::enable_if_t<
61
+ !std::is_same_v<std::decay_t<U>, OptionalArrayRef> &&
62
+ !std::is_same_v<std::decay_t<U>, std::in_place_t> &&
63
+ std::is_constructible_v<ArrayRef<T>, U&&> &&
64
+ !std::is_convertible_v<U&&, ArrayRef<T>>,
65
+ bool> = false>
66
+ constexpr explicit OptionalArrayRef(U&& value) noexcept(
67
+ std::is_nothrow_constructible_v<ArrayRef<T>, U&&>)
68
+ : wrapped_opt_array_ref(std::forward<U>(value)) {}
69
+
70
+ template <typename... Args>
71
+ constexpr explicit OptionalArrayRef(
72
+ std::in_place_t ip,
73
+ Args&&... args) noexcept
74
+ : wrapped_opt_array_ref(ip, std::forward<Args>(args)...) {}
75
+
76
+ template <typename U, typename... Args>
77
+ constexpr explicit OptionalArrayRef(
78
+ std::in_place_t ip,
79
+ std::initializer_list<U> il,
80
+ Args&&... args)
81
+ : wrapped_opt_array_ref(ip, il, std::forward<Args>(args)...) {}
82
+
83
+ constexpr OptionalArrayRef(const std::initializer_list<T>& Vec)
84
+ : wrapped_opt_array_ref(ArrayRef<T>(Vec)) {}
85
+
86
+ // Destructor
87
+
88
+ ~OptionalArrayRef() = default;
89
+
90
+ // Assignment
91
+
92
+ constexpr OptionalArrayRef& operator=(std::nullopt_t) noexcept {
93
+ wrapped_opt_array_ref = std::nullopt;
94
+ return *this;
95
+ }
96
+
97
+ OptionalArrayRef& operator=(const OptionalArrayRef& other) = default;
98
+
99
+ OptionalArrayRef& operator=(OptionalArrayRef&& other) noexcept = default;
100
+
101
+ constexpr OptionalArrayRef& operator=(
102
+ const std::optional<ArrayRef<T>>& other) noexcept {
103
+ wrapped_opt_array_ref = other;
104
+ return *this;
105
+ }
106
+
107
+ constexpr OptionalArrayRef& operator=(
108
+ std::optional<ArrayRef<T>>&& other) noexcept {
109
+ wrapped_opt_array_ref = std::move(other);
110
+ return *this;
111
+ }
112
+
113
+ template <
114
+ typename U = ArrayRef<T>,
115
+ typename = std::enable_if_t<
116
+ !std::is_same_v<std::decay_t<U>, OptionalArrayRef> &&
117
+ std::is_constructible_v<ArrayRef<T>, U&&> &&
118
+ std::is_assignable_v<ArrayRef<T>&, U&&>>>
119
+ constexpr OptionalArrayRef& operator=(U&& value) noexcept(
120
+ std::is_nothrow_constructible_v<ArrayRef<T>, U&&> &&
121
+ std::is_nothrow_assignable_v<ArrayRef<T>&, U&&>) {
122
+ wrapped_opt_array_ref = std::forward<U>(value);
123
+ return *this;
124
+ }
125
+
126
+ // Observers
127
+
128
+ constexpr ArrayRef<T>* operator->() noexcept {
129
+ return &wrapped_opt_array_ref.value();
130
+ }
131
+
132
+ constexpr const ArrayRef<T>* operator->() const noexcept {
133
+ return &wrapped_opt_array_ref.value();
134
+ }
135
+
136
+ constexpr ArrayRef<T>& operator*() & noexcept {
137
+ return wrapped_opt_array_ref.value();
138
+ }
139
+
140
+ constexpr const ArrayRef<T>& operator*() const& noexcept {
141
+ return wrapped_opt_array_ref.value();
142
+ }
143
+
144
+ constexpr ArrayRef<T>&& operator*() && noexcept {
145
+ return std::move(wrapped_opt_array_ref.value());
146
+ }
147
+
148
+ constexpr const ArrayRef<T>&& operator*() const&& noexcept {
149
+ return std::move(wrapped_opt_array_ref.value());
150
+ }
151
+
152
+ constexpr explicit operator bool() const noexcept {
153
+ return wrapped_opt_array_ref.has_value();
154
+ }
155
+
156
+ constexpr bool has_value() const noexcept {
157
+ return wrapped_opt_array_ref.has_value();
158
+ }
159
+
160
+ constexpr ArrayRef<T>& value() & {
161
+ return wrapped_opt_array_ref.value();
162
+ }
163
+
164
+ constexpr const ArrayRef<T>& value() const& {
165
+ return wrapped_opt_array_ref.value();
166
+ }
167
+
168
+ constexpr ArrayRef<T>&& value() && {
169
+ return std::move(wrapped_opt_array_ref.value());
170
+ }
171
+
172
+ constexpr const ArrayRef<T>&& value() const&& {
173
+ return std::move(wrapped_opt_array_ref.value());
174
+ }
175
+
176
+ template <typename U>
177
+ constexpr std::
178
+ enable_if_t<std::is_convertible_v<U&&, ArrayRef<T>>, ArrayRef<T>>
179
+ value_or(U&& default_value) const& {
180
+ return wrapped_opt_array_ref.value_or(std::forward<U>(default_value));
181
+ }
182
+
183
+ template <typename U>
184
+ constexpr std::
185
+ enable_if_t<std::is_convertible_v<U&&, ArrayRef<T>>, ArrayRef<T>>
186
+ value_or(U&& default_value) && {
187
+ return wrapped_opt_array_ref.value_or(std::forward<U>(default_value));
188
+ }
189
+
190
+ // Modifiers
191
+
192
+ constexpr void swap(OptionalArrayRef& other) noexcept {
193
+ std::swap(wrapped_opt_array_ref, other.wrapped_opt_array_ref);
194
+ }
195
+
196
+ constexpr void reset() noexcept {
197
+ wrapped_opt_array_ref.reset();
198
+ }
199
+
200
+ template <typename... Args>
201
+ constexpr std::
202
+ enable_if_t<std::is_constructible_v<ArrayRef<T>, Args&&...>, ArrayRef<T>&>
203
+ emplace(Args&&... args) noexcept(
204
+ std::is_nothrow_constructible_v<ArrayRef<T>, Args&&...>) {
205
+ return wrapped_opt_array_ref.emplace(std::forward<Args>(args)...);
206
+ }
207
+
208
+ template <typename U, typename... Args>
209
+ constexpr ArrayRef<T>& emplace(
210
+ std::initializer_list<U> il,
211
+ Args&&... args) noexcept {
212
+ return wrapped_opt_array_ref.emplace(il, std::forward<Args>(args)...);
213
+ }
214
+
215
+ private:
216
+ std::optional<ArrayRef<T>> wrapped_opt_array_ref;
217
+ };
218
+
219
+ using OptionalIntArrayRef = OptionalArrayRef<int64_t>;
220
+
221
+ inline bool operator==(
222
+ const OptionalIntArrayRef& a1,
223
+ const IntArrayRef& other) {
224
+ if (!a1.has_value()) {
225
+ return false;
226
+ }
227
+ return a1.value() == other;
228
+ }
229
+
230
+ inline bool operator==(
231
+ const c10::IntArrayRef& a1,
232
+ const c10::OptionalIntArrayRef& a2) {
233
+ return a2 == a1;
234
+ }
235
+
236
+ } // namespace c10
videochat2/lib/python3.10/site-packages/torch/include/c10/util/SmallVector.h ADDED
@@ -0,0 +1,1467 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ //===- llvm/ADT/SmallVector.h - 'Normally small' vectors --------*- C++ -*-===//
2
+ //
3
+ // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
+ // See https://llvm.org/LICENSE.txt for license information.
5
+ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
+ //
7
+ //===----------------------------------------------------------------------===//
8
+ //
9
+ // This file defines the SmallVector class.
10
+ //
11
+ //===----------------------------------------------------------------------===//
12
+
13
+ // ATen: modified from llvm::SmallVector.
14
+ // used std::is_trivially_{copy,move}_constructible
15
+ // replaced iterator_range constructor with inline Container&& constructor
16
+ // replaced LLVM_NODISCARD, LLVM_LIKELY, and LLVM_UNLIKELY with c10 equivalents
17
+ // removed LLVM_GSL_OWNER
18
+ // added SmallVector::at
19
+ // added operator<< for std::ostream
20
+ // added C10_API to export SmallVectorBase
21
+
22
+ #pragma once
23
+
24
+ #include <c10/macros/Macros.h>
25
+ #include <c10/util/AlignOf.h>
26
+
27
+ #include <algorithm>
28
+ #include <cassert>
29
+ #include <cstddef>
30
+ #include <cstdlib>
31
+ #include <cstring>
32
+ #include <functional>
33
+ #include <initializer_list>
34
+ #include <iterator>
35
+ #include <limits>
36
+ #include <memory>
37
+ #include <ostream>
38
+ #include <type_traits>
39
+ #include <utility>
40
+
41
+ namespace c10 {
42
+
43
+ /// This is all the stuff common to all SmallVectors.
44
+ ///
45
+ /// The template parameter specifies the type which should be used to hold the
46
+ /// Size and Capacity of the SmallVector, so it can be adjusted.
47
+ /// Using 32 bit size is desirable to shrink the size of the SmallVector.
48
+ /// Using 64 bit size is desirable for cases like SmallVector<char>, where a
49
+ /// 32 bit size would limit the vector to ~4GB. SmallVectors are used for
50
+ /// buffering bitcode output - which can exceed 4GB.
51
+ template <class Size_T>
52
+ class C10_API SmallVectorBase {
53
+ protected:
54
+ void* BeginX;
55
+ Size_T Size = 0, Capacity;
56
+
57
+ /// The maximum value of the Size_T used.
58
+ static constexpr size_t SizeTypeMax() {
59
+ return std::numeric_limits<Size_T>::max();
60
+ }
61
+
62
+ SmallVectorBase(void* FirstEl, size_t TotalCapacity)
63
+ : BeginX(FirstEl), Capacity(TotalCapacity) {}
64
+
65
+ /// This is a helper for \a grow() that's out of line to reduce code
66
+ /// duplication. This function will report a fatal error if it can't grow at
67
+ /// least to \p MinSize.
68
+ void* mallocForGrow(size_t MinSize, size_t TSize, size_t& NewCapacity);
69
+
70
+ /// This is an implementation of the grow() method which only works
71
+ /// on POD-like data types and is out of line to reduce code duplication.
72
+ /// This function will report a fatal error if it cannot increase capacity.
73
+ void grow_pod(const void* FirstEl, size_t MinSize, size_t TSize);
74
+
75
+ public:
76
+ SmallVectorBase() = delete;
77
+ size_t size() const {
78
+ return Size;
79
+ }
80
+ size_t capacity() const {
81
+ return Capacity;
82
+ }
83
+
84
+ C10_NODISCARD bool empty() const {
85
+ return !Size;
86
+ }
87
+
88
+ /// Set the array size to \p N, which the current array must have enough
89
+ /// capacity for.
90
+ ///
91
+ /// This does not construct or destroy any elements in the vector.
92
+ ///
93
+ /// Clients can use this in conjunction with capacity() to write past the end
94
+ /// of the buffer when they know that more elements are available, and only
95
+ /// update the size later. This avoids the cost of value initializing elements
96
+ /// which will only be overwritten.
97
+ void set_size(size_t N) {
98
+ assert(N <= capacity());
99
+ Size = N;
100
+ }
101
+ };
102
+
103
+ template <class T>
104
+ using SmallVectorSizeType =
105
+ std::conditional_t<sizeof(T) < 4 && sizeof(void*) >= 8, uint64_t, uint32_t>;
106
+
107
+ /// Figure out the offset of the first element.
108
+ template <class T, typename = void>
109
+ struct SmallVectorAlignmentAndSize {
110
+ // NOLINTNEXTLINE(*c-arrays*)
111
+ alignas(SmallVectorBase<SmallVectorSizeType<T>>) char Base[sizeof(
112
+ SmallVectorBase<SmallVectorSizeType<T>>)];
113
+ // NOLINTNEXTLINE(*c-arrays*)
114
+ alignas(T) char FirstEl[sizeof(T)];
115
+ };
116
+
117
+ /// This is the part of SmallVectorTemplateBase which does not depend on whether
118
+ /// the type T is a POD. The extra dummy template argument is used by ArrayRef
119
+ /// to avoid unnecessarily requiring T to be complete.
120
+ template <typename T, typename = void>
121
+ class SmallVectorTemplateCommon
122
+ : public SmallVectorBase<SmallVectorSizeType<T>> {
123
+ using Base = SmallVectorBase<SmallVectorSizeType<T>>;
124
+
125
+ /// Find the address of the first element. For this pointer math to be valid
126
+ /// with small-size of 0 for T with lots of alignment, it's important that
127
+ /// SmallVectorStorage is properly-aligned even for small-size of 0.
128
+ void* getFirstEl() const {
129
+ return const_cast<void*>(reinterpret_cast<const void*>(
130
+ reinterpret_cast<const char*>(this) +
131
+ offsetof(SmallVectorAlignmentAndSize<T>, FirstEl)));
132
+ }
133
+ // Space after 'FirstEl' is clobbered, do not add any instance vars after it.
134
+
135
+ protected:
136
+ SmallVectorTemplateCommon(size_t Size) : Base(getFirstEl(), Size) {}
137
+
138
+ void grow_pod(size_t MinSize, size_t TSize) {
139
+ Base::grow_pod(getFirstEl(), MinSize, TSize);
140
+ }
141
+
142
+ /// Return true if this is a smallvector which has not had dynamic
143
+ /// memory allocated for it.
144
+ bool isSmall() const {
145
+ return this->BeginX == getFirstEl();
146
+ }
147
+
148
+ /// Put this vector in a state of being small.
149
+ void resetToSmall() {
150
+ this->BeginX = getFirstEl();
151
+ this->Size = this->Capacity = 0; // FIXME: Setting Capacity to 0 is suspect.
152
+ }
153
+
154
+ /// Return true if V is an internal reference to the given range.
155
+ bool isReferenceToRange(const void* V, const void* First, const void* Last)
156
+ const {
157
+ // Use std::less to avoid UB.
158
+ std::less<> LessThan;
159
+ return !LessThan(V, First) && LessThan(V, Last);
160
+ }
161
+
162
+ /// Return true if V is an internal reference to this vector.
163
+ bool isReferenceToStorage(const void* V) const {
164
+ return isReferenceToRange(V, this->begin(), this->end());
165
+ }
166
+
167
+ /// Return true if First and Last form a valid (possibly empty) range in this
168
+ /// vector's storage.
169
+ bool isRangeInStorage(const void* First, const void* Last) const {
170
+ // Use std::less to avoid UB.
171
+ std::less<> LessThan;
172
+ return !LessThan(First, this->begin()) && !LessThan(Last, First) &&
173
+ !LessThan(this->end(), Last);
174
+ }
175
+
176
+ /// Return true unless Elt will be invalidated by resizing the vector to
177
+ /// NewSize.
178
+ bool isSafeToReferenceAfterResize(const void* Elt, size_t NewSize) {
179
+ // Past the end.
180
+ if (C10_LIKELY(!isReferenceToStorage(Elt)))
181
+ return true;
182
+
183
+ // Return false if Elt will be destroyed by shrinking.
184
+ if (NewSize <= this->size())
185
+ return Elt < this->begin() + NewSize;
186
+
187
+ // Return false if we need to grow.
188
+ return NewSize <= this->capacity();
189
+ }
190
+
191
+ /// Check whether Elt will be invalidated by resizing the vector to NewSize.
192
+ void assertSafeToReferenceAfterResize(const void* Elt, size_t NewSize) {
193
+ (void)Elt; // Suppress unused variable warning
194
+ (void)NewSize; // Suppress unused variable warning
195
+ assert(
196
+ isSafeToReferenceAfterResize(Elt, NewSize) &&
197
+ "Attempting to reference an element of the vector in an operation "
198
+ "that invalidates it");
199
+ }
200
+
201
+ /// Check whether Elt will be invalidated by increasing the size of the
202
+ /// vector by N.
203
+ void assertSafeToAdd(const void* Elt, size_t N = 1) {
204
+ this->assertSafeToReferenceAfterResize(Elt, this->size() + N);
205
+ }
206
+
207
+ /// Check whether any part of the range will be invalidated by clearing.
208
+ void assertSafeToReferenceAfterClear(const T* From, const T* To) {
209
+ if (From == To)
210
+ return;
211
+ this->assertSafeToReferenceAfterResize(From, 0);
212
+ this->assertSafeToReferenceAfterResize(To - 1, 0);
213
+ }
214
+ template <
215
+ class ItTy,
216
+ std::enable_if_t<!std::is_same_v<std::remove_const_t<ItTy>, T*>, bool> =
217
+ false>
218
+ void assertSafeToReferenceAfterClear(ItTy, ItTy) {}
219
+
220
+ /// Check whether any part of the range will be invalidated by growing.
221
+ void assertSafeToAddRange(const T* From, const T* To) {
222
+ if (From == To)
223
+ return;
224
+ this->assertSafeToAdd(From, To - From);
225
+ this->assertSafeToAdd(To - 1, To - From);
226
+ }
227
+ template <
228
+ class ItTy,
229
+ std::enable_if_t<!std::is_same_v<std::remove_const_t<ItTy>, T*>, bool> =
230
+ false>
231
+ void assertSafeToAddRange(ItTy, ItTy) {}
232
+
233
+ /// Reserve enough space to add one element, and return the updated element
234
+ /// pointer in case it was a reference to the storage.
235
+ template <class U>
236
+ static const T* reserveForParamAndGetAddressImpl(
237
+ U* This,
238
+ const T& Elt,
239
+ size_t N) {
240
+ size_t NewSize = This->size() + N;
241
+ if (C10_LIKELY(NewSize <= This->capacity()))
242
+ return &Elt;
243
+
244
+ bool ReferencesStorage = false;
245
+ int64_t Index = -1;
246
+ if constexpr (!U::TakesParamByValue) {
247
+ if (C10_UNLIKELY(This->isReferenceToStorage(&Elt))) {
248
+ ReferencesStorage = true;
249
+ Index = &Elt - This->begin();
250
+ }
251
+ }
252
+ This->grow(NewSize);
253
+ return ReferencesStorage ? This->begin() + Index : &Elt;
254
+ }
255
+
256
+ public:
257
+ using size_type = size_t;
258
+ using difference_type = ptrdiff_t;
259
+ using value_type = T;
260
+ using iterator = T*;
261
+ using const_iterator = const T*;
262
+
263
+ using const_reverse_iterator = std::reverse_iterator<const_iterator>;
264
+ using reverse_iterator = std::reverse_iterator<iterator>;
265
+
266
+ using reference = T&;
267
+ using const_reference = const T&;
268
+ using pointer = T*;
269
+ using const_pointer = const T*;
270
+
271
+ using Base::capacity;
272
+ using Base::empty;
273
+ using Base::size;
274
+
275
+ // forward iterator creation methods.
276
+ iterator begin() {
277
+ return (iterator)this->BeginX;
278
+ }
279
+ const_iterator begin() const {
280
+ return (const_iterator)this->BeginX;
281
+ }
282
+ iterator end() {
283
+ return begin() + size();
284
+ }
285
+ const_iterator end() const {
286
+ return begin() + size();
287
+ }
288
+
289
+ // reverse iterator creation methods.
290
+ reverse_iterator rbegin() {
291
+ return reverse_iterator(end());
292
+ }
293
+ const_reverse_iterator rbegin() const {
294
+ return const_reverse_iterator(end());
295
+ }
296
+ reverse_iterator rend() {
297
+ return reverse_iterator(begin());
298
+ }
299
+ const_reverse_iterator rend() const {
300
+ return const_reverse_iterator(begin());
301
+ }
302
+
303
+ size_type size_in_bytes() const {
304
+ return size() * sizeof(T);
305
+ }
306
+ constexpr size_type max_size() const {
307
+ return std::min(this->SizeTypeMax(), size_type(-1) / sizeof(T));
308
+ }
309
+
310
+ size_t capacity_in_bytes() const {
311
+ return capacity() * sizeof(T);
312
+ }
313
+
314
+ /// Return a pointer to the vector's buffer, even if empty().
315
+ pointer data() {
316
+ return pointer(begin());
317
+ }
318
+ /// Return a pointer to the vector's buffer, even if empty().
319
+ const_pointer data() const {
320
+ return const_pointer(begin());
321
+ }
322
+
323
+ // SmallVector::at is NOT from LLVM.
324
+ reference at(size_type idx) {
325
+ assert(idx < size());
326
+ return begin()[idx];
327
+ }
328
+ const_reference at(size_type idx) const {
329
+ assert(idx < size());
330
+ return begin()[idx];
331
+ }
332
+ reference operator[](size_type idx) {
333
+ assert(idx < size());
334
+ return begin()[idx];
335
+ }
336
+ const_reference operator[](size_type idx) const {
337
+ assert(idx < size());
338
+ return begin()[idx];
339
+ }
340
+
341
+ reference front() {
342
+ assert(!empty());
343
+ return begin()[0];
344
+ }
345
+ const_reference front() const {
346
+ assert(!empty());
347
+ return begin()[0];
348
+ }
349
+
350
+ reference back() {
351
+ assert(!empty());
352
+ return end()[-1];
353
+ }
354
+ const_reference back() const {
355
+ assert(!empty());
356
+ return end()[-1];
357
+ }
358
+ };
359
+
360
+ /// SmallVectorTemplateBase<TriviallyCopyable = false> - This is where we put
361
+ /// method implementations that are designed to work with non-trivial T's.
362
+ ///
363
+ /// We approximate is_trivially_copyable with trivial move/copy construction and
364
+ /// trivial destruction. While the standard doesn't specify that you're allowed
365
+ /// copy these types with memcpy, there is no way for the type to observe this.
366
+ /// This catches the important case of std::pair<POD, POD>, which is not
367
+ /// trivially assignable.
368
+ ///
369
+ /// XXX: if build fails here fall back to C10_IS_TRIVIALLY_COPYABLE and make a
370
+ /// note
371
+ template <
372
+ typename T,
373
+ bool = (std::is_trivially_copy_constructible_v<T>)&&(
374
+ std::is_trivially_move_constructible_v<
375
+ T>)&&std::is_trivially_destructible_v<T>>
376
+ class SmallVectorTemplateBase : public SmallVectorTemplateCommon<T> {
377
+ friend class SmallVectorTemplateCommon<T>;
378
+
379
+ protected:
380
+ static constexpr bool TakesParamByValue = false;
381
+ using ValueParamT = const T&;
382
+
383
+ SmallVectorTemplateBase(size_t Size) : SmallVectorTemplateCommon<T>(Size) {}
384
+
385
+ static void destroy_range(T* S, T* E) {
386
+ while (S != E) {
387
+ --E;
388
+ E->~T();
389
+ }
390
+ }
391
+
392
+ /// Move the range [I, E) into the uninitialized memory starting with "Dest",
393
+ /// constructing elements as needed.
394
+ template <typename It1, typename It2>
395
+ static void uninitialized_move(It1 I, It1 E, It2 Dest) {
396
+ std::uninitialized_copy(
397
+ std::make_move_iterator(I), std::make_move_iterator(E), Dest);
398
+ }
399
+
400
+ /// Copy the range [I, E) onto the uninitialized memory starting with "Dest",
401
+ /// constructing elements as needed.
402
+ template <typename It1, typename It2>
403
+ static void uninitialized_copy(It1 I, It1 E, It2 Dest) {
404
+ std::uninitialized_copy(I, E, Dest);
405
+ }
406
+
407
+ /// Grow the allocated memory (without initializing new elements), doubling
408
+ /// the size of the allocated memory. Guarantees space for at least one more
409
+ /// element, or MinSize more elements if specified.
410
+ void grow(size_t MinSize = 0);
411
+
412
+ /// Create a new allocation big enough for \p MinSize and pass back its size
413
+ /// in \p NewCapacity. This is the first section of \a grow().
414
+ T* mallocForGrow(size_t MinSize, size_t& NewCapacity) {
415
+ return static_cast<T*>(
416
+ SmallVectorBase<SmallVectorSizeType<T>>::mallocForGrow(
417
+ MinSize, sizeof(T), NewCapacity));
418
+ }
419
+
420
+ /// Move existing elements over to the new allocation \p NewElts, the middle
421
+ /// section of \a grow().
422
+ void moveElementsForGrow(T* NewElts);
423
+
424
+ /// Transfer ownership of the allocation, finishing up \a grow().
425
+ void takeAllocationForGrow(T* NewElts, size_t NewCapacity);
426
+
427
+ /// Reserve enough space to add one element, and return the updated element
428
+ /// pointer in case it was a reference to the storage.
429
+ const T* reserveForParamAndGetAddress(const T& Elt, size_t N = 1) {
430
+ return this->reserveForParamAndGetAddressImpl(this, Elt, N);
431
+ }
432
+
433
+ /// Reserve enough space to add one element, and return the updated element
434
+ /// pointer in case it was a reference to the storage.
435
+ T* reserveForParamAndGetAddress(T& Elt, size_t N = 1) {
436
+ return const_cast<T*>(this->reserveForParamAndGetAddressImpl(this, Elt, N));
437
+ }
438
+
439
+ static T&& forward_value_param(T&& V) {
440
+ return std::move(V);
441
+ }
442
+ static const T& forward_value_param(const T& V) {
443
+ return V;
444
+ }
445
+
446
+ void growAndAssign(size_t NumElts, const T& Elt) {
447
+ // Grow manually in case Elt is an internal reference.
448
+ size_t NewCapacity = 0;
449
+ T* NewElts = mallocForGrow(NumElts, NewCapacity);
450
+ std::uninitialized_fill_n(NewElts, NumElts, Elt);
451
+ this->destroy_range(this->begin(), this->end());
452
+ takeAllocationForGrow(NewElts, NewCapacity);
453
+ this->set_size(NumElts);
454
+ }
455
+
456
+ template <typename... ArgTypes>
457
+ T& growAndEmplaceBack(ArgTypes&&... Args) {
458
+ // Grow manually in case one of Args is an internal reference.
459
+ size_t NewCapacity = 0;
460
+ T* NewElts = mallocForGrow(0, NewCapacity);
461
+ ::new ((void*)(NewElts + this->size())) T(std::forward<ArgTypes>(Args)...);
462
+ moveElementsForGrow(NewElts);
463
+ takeAllocationForGrow(NewElts, NewCapacity);
464
+ this->set_size(this->size() + 1);
465
+ return this->back();
466
+ }
467
+
468
+ public:
469
+ void push_back(const T& Elt) {
470
+ const T* EltPtr = reserveForParamAndGetAddress(Elt);
471
+ ::new ((void*)this->end()) T(*EltPtr);
472
+ this->set_size(this->size() + 1);
473
+ }
474
+
475
+ // NOLINTNEXTLINE(cppcoreguidelines-rvalue-reference-param-not-moved)
476
+ void push_back(T&& Elt) {
477
+ T* EltPtr = reserveForParamAndGetAddress(Elt);
478
+ ::new ((void*)this->end()) T(::std::move(*EltPtr));
479
+ this->set_size(this->size() + 1);
480
+ }
481
+
482
+ void pop_back() {
483
+ this->set_size(this->size() - 1);
484
+ this->end()->~T();
485
+ }
486
+ };
487
+
488
+ // Define this out-of-line to dissuade the C++ compiler from inlining it.
489
+ template <typename T, bool TriviallyCopyable>
490
+ void SmallVectorTemplateBase<T, TriviallyCopyable>::grow(size_t MinSize) {
491
+ size_t NewCapacity = 0;
492
+ T* NewElts = mallocForGrow(MinSize, NewCapacity);
493
+ moveElementsForGrow(NewElts);
494
+ takeAllocationForGrow(NewElts, NewCapacity);
495
+ }
496
+
497
+ // Define this out-of-line to dissuade the C++ compiler from inlining it.
498
+ template <typename T, bool TriviallyCopyable>
499
+ void SmallVectorTemplateBase<T, TriviallyCopyable>::moveElementsForGrow(
500
+ T* NewElts) {
501
+ // Move the elements over.
502
+ this->uninitialized_move(this->begin(), this->end(), NewElts);
503
+
504
+ // Destroy the original elements.
505
+ destroy_range(this->begin(), this->end());
506
+ }
507
+
508
+ // Define this out-of-line to dissuade the C++ compiler from inlining it.
509
+ template <typename T, bool TriviallyCopyable>
510
+ void SmallVectorTemplateBase<T, TriviallyCopyable>::takeAllocationForGrow(
511
+ T* NewElts,
512
+ size_t NewCapacity) {
513
+ // If this wasn't grown from the inline copy, deallocate the old space.
514
+ if (!this->isSmall())
515
+ free(this->begin());
516
+
517
+ this->BeginX = NewElts;
518
+ this->Capacity = NewCapacity;
519
+ }
520
+
521
+ /// SmallVectorTemplateBase<TriviallyCopyable = true> - This is where we put
522
+ /// method implementations that are designed to work with trivially copyable
523
+ /// T's. This allows using memcpy in place of copy/move construction and
524
+ /// skipping destruction.
525
+ template <typename T>
526
+ class SmallVectorTemplateBase<T, true> : public SmallVectorTemplateCommon<T> {
527
+ friend class SmallVectorTemplateCommon<T>;
528
+
529
+ protected:
530
+ /// True if it's cheap enough to take parameters by value. Doing so avoids
531
+ /// overhead related to mitigations for reference invalidation.
532
+ static constexpr bool TakesParamByValue = sizeof(T) <= 2 * sizeof(void*);
533
+
534
+ /// Either const T& or T, depending on whether it's cheap enough to take
535
+ /// parameters by value.
536
+ using ValueParamT = std::conditional_t<TakesParamByValue, T, const T&>;
537
+
538
+ SmallVectorTemplateBase(size_t Size) : SmallVectorTemplateCommon<T>(Size) {}
539
+
540
+ // No need to do a destroy loop for POD's.
541
+ static void destroy_range(T*, T*) {}
542
+
543
+ /// Move the range [I, E) onto the uninitialized memory
544
+ /// starting with "Dest", constructing elements into it as needed.
545
+ template <typename It1, typename It2>
546
+ static void uninitialized_move(It1 I, It1 E, It2 Dest) {
547
+ // Just do a copy.
548
+ uninitialized_copy(I, E, Dest);
549
+ }
550
+
551
+ /// Copy the range [I, E) onto the uninitialized memory
552
+ /// starting with "Dest", constructing elements into it as needed.
553
+ template <typename It1, typename It2>
554
+ static void uninitialized_copy(It1 I, It1 E, It2 Dest) {
555
+ // Arbitrary iterator types; just use the basic implementation.
556
+ std::uninitialized_copy(I, E, Dest);
557
+ }
558
+
559
+ /// Copy the range [I, E) onto the uninitialized memory
560
+ /// starting with "Dest", constructing elements into it as needed.
561
+ template <typename T1, typename T2>
562
+ static void uninitialized_copy(
563
+ T1* I,
564
+ T1* E,
565
+ T2* Dest,
566
+ std::enable_if_t<std::is_same_v<std::remove_const_t<T1>, T2>>* =
567
+ nullptr) {
568
+ // Use memcpy for PODs iterated by pointers (which includes SmallVector
569
+ // iterators): std::uninitialized_copy optimizes to memmove, but we can
570
+ // use memcpy here. Note that I and E are iterators and thus might be
571
+ // invalid for memcpy if they are equal.
572
+ if (I != E)
573
+ memcpy(reinterpret_cast<void*>(Dest), I, (E - I) * sizeof(T));
574
+ }
575
+
576
+ /// Double the size of the allocated memory, guaranteeing space for at
577
+ /// least one more element or MinSize if specified.
578
+ void grow(size_t MinSize = 0) {
579
+ this->grow_pod(MinSize, sizeof(T));
580
+ }
581
+
582
+ /// Reserve enough space to add one element, and return the updated element
583
+ /// pointer in case it was a reference to the storage.
584
+ const T* reserveForParamAndGetAddress(const T& Elt, size_t N = 1) {
585
+ return this->reserveForParamAndGetAddressImpl(this, Elt, N);
586
+ }
587
+
588
+ /// Reserve enough space to add one element, and return the updated element
589
+ /// pointer in case it was a reference to the storage.
590
+ T* reserveForParamAndGetAddress(T& Elt, size_t N = 1) {
591
+ return const_cast<T*>(this->reserveForParamAndGetAddressImpl(this, Elt, N));
592
+ }
593
+
594
+ /// Copy \p V or return a reference, depending on \a ValueParamT.
595
+ static ValueParamT forward_value_param(ValueParamT V) {
596
+ return V;
597
+ }
598
+
599
+ void growAndAssign(size_t NumElts, T Elt) {
600
+ // Elt has been copied in case it's an internal reference, side-stepping
601
+ // reference invalidation problems without losing the realloc optimization.
602
+ this->set_size(0);
603
+ this->grow(NumElts);
604
+ std::uninitialized_fill_n(this->begin(), NumElts, Elt);
605
+ this->set_size(NumElts);
606
+ }
607
+
608
+ template <typename... ArgTypes>
609
+ T& growAndEmplaceBack(ArgTypes&&... Args) {
610
+ // Use push_back with a copy in case Args has an internal reference,
611
+ // side-stepping reference invalidation problems without losing the realloc
612
+ // optimization.
613
+ push_back(T(std::forward<ArgTypes>(Args)...));
614
+ return this->back();
615
+ }
616
+
617
+ public:
618
+ void push_back(ValueParamT Elt) {
619
+ const T* EltPtr = reserveForParamAndGetAddress(Elt);
620
+ memcpy(reinterpret_cast<void*>(this->end()), EltPtr, sizeof(T));
621
+ this->set_size(this->size() + 1);
622
+ }
623
+
624
+ void pop_back() {
625
+ this->set_size(this->size() - 1);
626
+ }
627
+ };
628
+
629
+ /// This class consists of common code factored out of the SmallVector class to
630
+ /// reduce code duplication based on the SmallVector 'N' template parameter.
631
+ template <typename T>
632
+ class SmallVectorImpl : public SmallVectorTemplateBase<T> {
633
+ using SuperClass = SmallVectorTemplateBase<T>;
634
+
635
+ public:
636
+ using iterator = typename SuperClass::iterator;
637
+ using const_iterator = typename SuperClass::const_iterator;
638
+ using reference = typename SuperClass::reference;
639
+ using size_type = typename SuperClass::size_type;
640
+
641
+ protected:
642
+ using SmallVectorTemplateBase<T>::TakesParamByValue;
643
+ using ValueParamT = typename SuperClass::ValueParamT;
644
+
645
+ // Default ctor - Initialize to empty.
646
+ explicit SmallVectorImpl(unsigned N) : SmallVectorTemplateBase<T>(N) {}
647
+
648
+ public:
649
+ SmallVectorImpl(const SmallVectorImpl&) = delete;
650
+
651
+ ~SmallVectorImpl() {
652
+ // Subclass has already destructed this vector's elements.
653
+ // If this wasn't grown from the inline copy, deallocate the old space.
654
+ if (!this->isSmall())
655
+ free(this->begin());
656
+ }
657
+
658
+ void clear() {
659
+ this->destroy_range(this->begin(), this->end());
660
+ this->Size = 0;
661
+ }
662
+
663
+ private:
664
+ template <bool ForOverwrite>
665
+ void resizeImpl(size_type N) {
666
+ if (N < this->size()) {
667
+ this->pop_back_n(this->size() - N);
668
+ } else if (N > this->size()) {
669
+ this->reserve(N);
670
+ for (auto I = this->end(), E = this->begin() + N; I != E; ++I)
671
+ if (ForOverwrite)
672
+ new (&*I) T;
673
+ else
674
+ new (&*I) T();
675
+ this->set_size(N);
676
+ }
677
+ }
678
+
679
+ public:
680
+ void resize(size_type N) {
681
+ resizeImpl<false>(N);
682
+ }
683
+
684
+ /// Like resize, but \ref T is POD, the new values won't be initialized.
685
+ void resize_for_overwrite(size_type N) {
686
+ resizeImpl<true>(N);
687
+ }
688
+
689
+ void resize(size_type N, ValueParamT NV) {
690
+ if (N == this->size())
691
+ return;
692
+
693
+ if (N < this->size()) {
694
+ this->pop_back_n(this->size() - N);
695
+ return;
696
+ }
697
+
698
+ // N > this->size(). Defer to append.
699
+ this->append(N - this->size(), NV);
700
+ }
701
+
702
+ void reserve(size_type N) {
703
+ if (this->capacity() < N)
704
+ this->grow(N);
705
+ }
706
+
707
+ void pop_back_n(size_type NumItems) {
708
+ assert(this->size() >= NumItems);
709
+ this->destroy_range(this->end() - NumItems, this->end());
710
+ this->set_size(this->size() - NumItems);
711
+ }
712
+
713
+ C10_NODISCARD T pop_back_val() {
714
+ T Result = ::std::move(this->back());
715
+ this->pop_back();
716
+ return Result;
717
+ }
718
+
719
+ void swap(SmallVectorImpl& RHS) noexcept;
720
+
721
+ /// Add the specified range to the end of the SmallVector.
722
+ template <
723
+ typename in_iter,
724
+ typename = std::enable_if_t<std::is_convertible_v<
725
+ typename std::iterator_traits<in_iter>::iterator_category,
726
+ std::input_iterator_tag>>>
727
+ void append(in_iter in_start, in_iter in_end) {
728
+ this->assertSafeToAddRange(in_start, in_end);
729
+ size_type NumInputs = std::distance(in_start, in_end);
730
+ this->reserve(this->size() + NumInputs);
731
+ this->uninitialized_copy(in_start, in_end, this->end());
732
+ this->set_size(this->size() + NumInputs);
733
+ }
734
+
735
+ /// Append \p NumInputs copies of \p Elt to the end.
736
+ void append(size_type NumInputs, ValueParamT Elt) {
737
+ const T* EltPtr = this->reserveForParamAndGetAddress(Elt, NumInputs);
738
+ std::uninitialized_fill_n(this->end(), NumInputs, *EltPtr);
739
+ this->set_size(this->size() + NumInputs);
740
+ }
741
+
742
+ void append(std::initializer_list<T> IL) {
743
+ append(IL.begin(), IL.end());
744
+ }
745
+
746
+ void append(const SmallVectorImpl& RHS) {
747
+ append(RHS.begin(), RHS.end());
748
+ }
749
+
750
+ void assign(size_type NumElts, ValueParamT Elt) {
751
+ // Note that Elt could be an internal reference.
752
+ if (NumElts > this->capacity()) {
753
+ this->growAndAssign(NumElts, Elt);
754
+ return;
755
+ }
756
+
757
+ // Assign over existing elements.
758
+ std::fill_n(this->begin(), std::min(NumElts, this->size()), Elt);
759
+ if (NumElts > this->size())
760
+ std::uninitialized_fill_n(this->end(), NumElts - this->size(), Elt);
761
+ else if (NumElts < this->size())
762
+ this->destroy_range(this->begin() + NumElts, this->end());
763
+ this->set_size(NumElts);
764
+ }
765
+
766
+ // FIXME: Consider assigning over existing elements, rather than clearing &
767
+ // re-initializing them - for all assign(...) variants.
768
+
769
+ template <
770
+ typename in_iter,
771
+ typename = std::enable_if_t<std::is_convertible_v<
772
+ typename std::iterator_traits<in_iter>::iterator_category,
773
+ std::input_iterator_tag>>>
774
+ void assign(in_iter in_start, in_iter in_end) {
775
+ this->assertSafeToReferenceAfterClear(in_start, in_end);
776
+ clear();
777
+ append(in_start, in_end);
778
+ }
779
+
780
+ void assign(std::initializer_list<T> IL) {
781
+ clear();
782
+ append(IL);
783
+ }
784
+
785
+ void assign(const SmallVectorImpl& RHS) {
786
+ assign(RHS.begin(), RHS.end());
787
+ }
788
+
789
+ iterator erase(iterator I) {
790
+ assert(
791
+ this->isReferenceToStorage(I) && "Iterator to erase is out of bounds.");
792
+
793
+ iterator N = I;
794
+ // Shift all elts down one.
795
+ std::move(I + 1, this->end(), I);
796
+ // Drop the last elt.
797
+ this->pop_back();
798
+ return (N);
799
+ }
800
+
801
+ iterator erase(iterator S, iterator E) {
802
+ assert(this->isRangeInStorage(S, E) && "Range to erase is out of bounds.");
803
+
804
+ iterator N = S;
805
+ // Shift all elts down.
806
+ iterator I = std::move(E, this->end(), S);
807
+ // Drop the last elts.
808
+ this->destroy_range(I, this->end());
809
+ this->set_size(I - this->begin());
810
+ return (N);
811
+ }
812
+
813
+ private:
814
+ template <class ArgType>
815
+ iterator insert_one_impl(iterator I, ArgType&& Elt) {
816
+ // Callers ensure that ArgType is derived from T.
817
+ static_assert(
818
+ std::is_same<std::remove_const_t<std::remove_reference_t<ArgType>>, T>::
819
+ value,
820
+ "ArgType must be derived from T!");
821
+
822
+ if (I == this->end()) { // Important special case for empty vector.
823
+ this->push_back(::std::forward<ArgType>(Elt));
824
+ return this->end() - 1;
825
+ }
826
+
827
+ assert(
828
+ this->isReferenceToStorage(I) &&
829
+ "Insertion iterator is out of bounds.");
830
+
831
+ // Grow if necessary.
832
+ size_t Index = I - this->begin();
833
+ std::remove_reference_t<ArgType>* EltPtr =
834
+ this->reserveForParamAndGetAddress(Elt);
835
+ I = this->begin() + Index;
836
+
837
+ ::new ((void*)this->end()) T(::std::move(this->back()));
838
+ // Push everything else over.
839
+ std::move_backward(I, this->end() - 1, this->end());
840
+ this->set_size(this->size() + 1);
841
+
842
+ // If we just moved the element we're inserting, be sure to update
843
+ // the reference (never happens if TakesParamByValue).
844
+ static_assert(
845
+ !TakesParamByValue || std::is_same<ArgType, T>::value,
846
+ "ArgType must be 'T' when taking by value!");
847
+ if (!TakesParamByValue && this->isReferenceToRange(EltPtr, I, this->end()))
848
+ ++EltPtr;
849
+
850
+ *I = ::std::forward<ArgType>(*EltPtr);
851
+ return I;
852
+ }
853
+
854
+ public:
855
+ iterator insert(iterator I, T&& Elt) {
856
+ return insert_one_impl(I, this->forward_value_param(std::move(Elt)));
857
+ }
858
+
859
+ iterator insert(iterator I, const T& Elt) {
860
+ return insert_one_impl(I, this->forward_value_param(Elt));
861
+ }
862
+
863
+ iterator insert(iterator I, size_type NumToInsert, ValueParamT Elt) {
864
+ // Convert iterator to elt# to avoid invalidating iterator when we reserve()
865
+ size_t InsertElt = I - this->begin();
866
+
867
+ if (I == this->end()) { // Important special case for empty vector.
868
+ append(NumToInsert, Elt);
869
+ return this->begin() + InsertElt;
870
+ }
871
+
872
+ assert(
873
+ this->isReferenceToStorage(I) &&
874
+ "Insertion iterator is out of bounds.");
875
+
876
+ // Ensure there is enough space, and get the (maybe updated) address of
877
+ // Elt.
878
+ const T* EltPtr = this->reserveForParamAndGetAddress(Elt, NumToInsert);
879
+
880
+ // Uninvalidate the iterator.
881
+ I = this->begin() + InsertElt;
882
+
883
+ // If there are more elements between the insertion point and the end of the
884
+ // range than there are being inserted, we can use a simple approach to
885
+ // insertion. Since we already reserved space, we know that this won't
886
+ // reallocate the vector.
887
+ if (size_t(this->end() - I) >= NumToInsert) {
888
+ T* OldEnd = this->end();
889
+ append(
890
+ std::move_iterator<iterator>(this->end() - NumToInsert),
891
+ std::move_iterator<iterator>(this->end()));
892
+
893
+ // Copy the existing elements that get replaced.
894
+ std::move_backward(I, OldEnd - NumToInsert, OldEnd);
895
+
896
+ // If we just moved the element we're inserting, be sure to update
897
+ // the reference (never happens if TakesParamByValue).
898
+ if (!TakesParamByValue && I <= EltPtr && EltPtr < this->end())
899
+ EltPtr += NumToInsert;
900
+
901
+ std::fill_n(I, NumToInsert, *EltPtr);
902
+ return I;
903
+ }
904
+
905
+ // Otherwise, we're inserting more elements than exist already, and we're
906
+ // not inserting at the end.
907
+
908
+ // Move over the elements that we're about to overwrite.
909
+ T* OldEnd = this->end();
910
+ this->set_size(this->size() + NumToInsert);
911
+ size_t NumOverwritten = OldEnd - I;
912
+ this->uninitialized_move(I, OldEnd, this->end() - NumOverwritten);
913
+
914
+ // If we just moved the element we're inserting, be sure to update
915
+ // the reference (never happens if TakesParamByValue).
916
+ if (!TakesParamByValue && I <= EltPtr && EltPtr < this->end())
917
+ EltPtr += NumToInsert;
918
+
919
+ // Replace the overwritten part.
920
+ std::fill_n(I, NumOverwritten, *EltPtr);
921
+
922
+ // Insert the non-overwritten middle part.
923
+ std::uninitialized_fill_n(OldEnd, NumToInsert - NumOverwritten, *EltPtr);
924
+ return I;
925
+ }
926
+
927
+ template <
928
+ typename ItTy,
929
+ typename = std::enable_if_t<std::is_convertible_v<
930
+ typename std::iterator_traits<ItTy>::iterator_category,
931
+ std::input_iterator_tag>>>
932
+ iterator insert(iterator I, ItTy From, ItTy To) {
933
+ // Convert iterator to elt# to avoid invalidating iterator when we reserve()
934
+ size_t InsertElt = I - this->begin();
935
+
936
+ if (I == this->end()) { // Important special case for empty vector.
937
+ append(From, To);
938
+ return this->begin() + InsertElt;
939
+ }
940
+
941
+ assert(
942
+ this->isReferenceToStorage(I) &&
943
+ "Insertion iterator is out of bounds.");
944
+
945
+ // Check that the reserve that follows doesn't invalidate the iterators.
946
+ this->assertSafeToAddRange(From, To);
947
+
948
+ size_t NumToInsert = std::distance(From, To);
949
+
950
+ // Ensure there is enough space.
951
+ reserve(this->size() + NumToInsert);
952
+
953
+ // Uninvalidate the iterator.
954
+ I = this->begin() + InsertElt;
955
+
956
+ // If there are more elements between the insertion point and the end of the
957
+ // range than there are being inserted, we can use a simple approach to
958
+ // insertion. Since we already reserved space, we know that this won't
959
+ // reallocate the vector.
960
+ if (size_t(this->end() - I) >= NumToInsert) {
961
+ T* OldEnd = this->end();
962
+ append(
963
+ std::move_iterator<iterator>(this->end() - NumToInsert),
964
+ std::move_iterator<iterator>(this->end()));
965
+
966
+ // Copy the existing elements that get replaced.
967
+ std::move_backward(I, OldEnd - NumToInsert, OldEnd);
968
+
969
+ std::copy(From, To, I);
970
+ return I;
971
+ }
972
+
973
+ // Otherwise, we're inserting more elements than exist already, and we're
974
+ // not inserting at the end.
975
+
976
+ // Move over the elements that we're about to overwrite.
977
+ T* OldEnd = this->end();
978
+ this->set_size(this->size() + NumToInsert);
979
+ size_t NumOverwritten = OldEnd - I;
980
+ this->uninitialized_move(I, OldEnd, this->end() - NumOverwritten);
981
+
982
+ // Replace the overwritten part.
983
+ for (T* J = I; NumOverwritten > 0; --NumOverwritten) {
984
+ *J = *From;
985
+ ++J;
986
+ ++From;
987
+ }
988
+
989
+ // Insert the non-overwritten middle part.
990
+ this->uninitialized_copy(From, To, OldEnd);
991
+ return I;
992
+ }
993
+
994
+ void insert(iterator I, std::initializer_list<T> IL) {
995
+ insert(I, IL.begin(), IL.end());
996
+ }
997
+
998
+ template <typename... ArgTypes>
999
+ reference emplace_back(ArgTypes&&... Args) {
1000
+ if (C10_UNLIKELY(this->size() >= this->capacity()))
1001
+ return this->growAndEmplaceBack(std::forward<ArgTypes>(Args)...);
1002
+
1003
+ ::new ((void*)this->end()) T(std::forward<ArgTypes>(Args)...);
1004
+ this->set_size(this->size() + 1);
1005
+ return this->back();
1006
+ }
1007
+
1008
+ SmallVectorImpl& operator=(const SmallVectorImpl& RHS);
1009
+
1010
+ SmallVectorImpl& operator=(SmallVectorImpl&& RHS) noexcept(
1011
+ std::is_nothrow_move_constructible_v<T> &&
1012
+ std::is_nothrow_destructible_v<T>);
1013
+
1014
+ bool operator==(const SmallVectorImpl& RHS) const {
1015
+ if (this->size() != RHS.size())
1016
+ return false;
1017
+ return std::equal(this->begin(), this->end(), RHS.begin());
1018
+ }
1019
+ bool operator!=(const SmallVectorImpl& RHS) const {
1020
+ return !(*this == RHS);
1021
+ }
1022
+
1023
+ bool operator<(const SmallVectorImpl& RHS) const {
1024
+ return std::lexicographical_compare(
1025
+ this->begin(), this->end(), RHS.begin(), RHS.end());
1026
+ }
1027
+ };
1028
+
1029
+ template <typename T>
1030
+ void SmallVectorImpl<T>::swap(SmallVectorImpl<T>& RHS) noexcept {
1031
+ if (this == &RHS)
1032
+ return;
1033
+
1034
+ // We can only avoid copying elements if neither vector is small.
1035
+ if (!this->isSmall() && !RHS.isSmall()) {
1036
+ std::swap(this->BeginX, RHS.BeginX);
1037
+ std::swap(this->Size, RHS.Size);
1038
+ std::swap(this->Capacity, RHS.Capacity);
1039
+ return;
1040
+ }
1041
+ this->reserve(RHS.size());
1042
+ RHS.reserve(this->size());
1043
+
1044
+ // Swap the shared elements.
1045
+ size_t NumShared = this->size();
1046
+ if (NumShared > RHS.size())
1047
+ NumShared = RHS.size();
1048
+ for (size_type i = 0; i != NumShared; ++i)
1049
+ std::swap((*this)[i], RHS[i]);
1050
+
1051
+ // Copy over the extra elts.
1052
+ if (this->size() > RHS.size()) {
1053
+ size_t EltDiff = this->size() - RHS.size();
1054
+ this->uninitialized_copy(this->begin() + NumShared, this->end(), RHS.end());
1055
+ RHS.set_size(RHS.size() + EltDiff);
1056
+ this->destroy_range(this->begin() + NumShared, this->end());
1057
+ this->set_size(NumShared);
1058
+ } else if (RHS.size() > this->size()) {
1059
+ size_t EltDiff = RHS.size() - this->size();
1060
+ this->uninitialized_copy(RHS.begin() + NumShared, RHS.end(), this->end());
1061
+ this->set_size(this->size() + EltDiff);
1062
+ this->destroy_range(RHS.begin() + NumShared, RHS.end());
1063
+ RHS.set_size(NumShared);
1064
+ }
1065
+ }
1066
+
1067
+ template <typename T>
1068
+ SmallVectorImpl<T>& SmallVectorImpl<T>::operator=(
1069
+ const SmallVectorImpl<T>& RHS) {
1070
+ // Avoid self-assignment.
1071
+ if (this == &RHS)
1072
+ return *this;
1073
+
1074
+ // If we already have sufficient space, assign the common elements, then
1075
+ // destroy any excess.
1076
+ size_t RHSSize = RHS.size();
1077
+ size_t CurSize = this->size();
1078
+ if (CurSize >= RHSSize) {
1079
+ // Assign common elements.
1080
+ iterator NewEnd;
1081
+ if (RHSSize)
1082
+ NewEnd = std::copy(RHS.begin(), RHS.begin() + RHSSize, this->begin());
1083
+ else
1084
+ NewEnd = this->begin();
1085
+
1086
+ // Destroy excess elements.
1087
+ this->destroy_range(NewEnd, this->end());
1088
+
1089
+ // Trim.
1090
+ this->set_size(RHSSize);
1091
+ return *this;
1092
+ }
1093
+
1094
+ // If we have to grow to have enough elements, destroy the current elements.
1095
+ // This allows us to avoid copying them during the grow.
1096
+ // FIXME: don't do this if they're efficiently moveable.
1097
+ if (this->capacity() < RHSSize) {
1098
+ // Destroy current elements.
1099
+ this->clear();
1100
+ CurSize = 0;
1101
+ this->grow(RHSSize);
1102
+ } else if (CurSize) {
1103
+ // Otherwise, use assignment for the already-constructed elements.
1104
+ std::copy(RHS.begin(), RHS.begin() + CurSize, this->begin());
1105
+ }
1106
+
1107
+ // Copy construct the new elements in place.
1108
+ this->uninitialized_copy(
1109
+ RHS.begin() + CurSize, RHS.end(), this->begin() + CurSize);
1110
+
1111
+ // Set end.
1112
+ this->set_size(RHSSize);
1113
+ return *this;
1114
+ }
1115
+
1116
+ template <typename T>
1117
+ SmallVectorImpl<T>& SmallVectorImpl<T>::
1118
+ operator=(SmallVectorImpl<T>&& RHS) noexcept(
1119
+ std::is_nothrow_move_constructible_v<T> &&
1120
+ std::is_nothrow_destructible_v<T>) {
1121
+ // Avoid self-assignment.
1122
+ if (this == &RHS)
1123
+ return *this;
1124
+
1125
+ // If the RHS isn't small, clear this vector and then steal its buffer.
1126
+ if (!RHS.isSmall()) {
1127
+ this->destroy_range(this->begin(), this->end());
1128
+ if (!this->isSmall())
1129
+ free(this->begin());
1130
+ this->BeginX = RHS.BeginX;
1131
+ this->Size = RHS.Size;
1132
+ this->Capacity = RHS.Capacity;
1133
+ RHS.resetToSmall();
1134
+ return *this;
1135
+ }
1136
+
1137
+ // If we already have sufficient space, assign the common elements, then
1138
+ // destroy any excess.
1139
+ size_t RHSSize = RHS.size();
1140
+ size_t CurSize = this->size();
1141
+ if (CurSize >= RHSSize) {
1142
+ // Assign common elements.
1143
+ iterator NewEnd = this->begin();
1144
+ if (RHSSize)
1145
+ NewEnd = std::move(RHS.begin(), RHS.end(), NewEnd);
1146
+
1147
+ // Destroy excess elements and trim the bounds.
1148
+ this->destroy_range(NewEnd, this->end());
1149
+ this->set_size(RHSSize);
1150
+
1151
+ // Clear the RHS.
1152
+ RHS.clear();
1153
+
1154
+ return *this;
1155
+ }
1156
+
1157
+ // If we have to grow to have enough elements, destroy the current elements.
1158
+ // This allows us to avoid copying them during the grow.
1159
+ // FIXME: this may not actually make any sense if we can efficiently move
1160
+ // elements.
1161
+ if (this->capacity() < RHSSize) {
1162
+ // Destroy current elements.
1163
+ this->clear();
1164
+ CurSize = 0;
1165
+ this->grow(RHSSize);
1166
+ } else if (CurSize) {
1167
+ // Otherwise, use assignment for the already-constructed elements.
1168
+ std::move(RHS.begin(), RHS.begin() + CurSize, this->begin());
1169
+ }
1170
+
1171
+ // Move-construct the new elements in place.
1172
+ this->uninitialized_move(
1173
+ RHS.begin() + CurSize, RHS.end(), this->begin() + CurSize);
1174
+
1175
+ // Set end.
1176
+ this->set_size(RHSSize);
1177
+
1178
+ RHS.clear();
1179
+ return *this;
1180
+ }
1181
+
1182
+ /// Storage for the SmallVector elements. This is specialized for the N=0 case
1183
+ /// to avoid allocating unnecessary storage.
1184
+ template <typename T, unsigned N>
1185
+ struct SmallVectorStorage {
1186
+ alignas(T) char InlineElts[N * sizeof(T)];
1187
+ };
1188
+
1189
+ /// We need the storage to be properly aligned even for small-size of 0 so that
1190
+ /// the pointer math in \a SmallVectorTemplateCommon::getFirstEl() is
1191
+ /// well-defined.
1192
+ template <typename T>
1193
+ struct alignas(T) SmallVectorStorage<T, 0> {};
1194
+
1195
+ /// Forward declaration of SmallVector so that
1196
+ /// calculateSmallVectorDefaultInlinedElements can reference
1197
+ /// `sizeof(SmallVector<T, 0>)`.
1198
+ template <typename T, unsigned N>
1199
+ class /* LLVM_GSL_OWNER */ SmallVector;
1200
+
1201
+ /// Helper class for calculating the default number of inline elements for
1202
+ /// `SmallVector<T>`.
1203
+ ///
1204
+ /// This should be migrated to a constexpr function when our minimum
1205
+ /// compiler support is enough for multi-statement constexpr functions.
1206
+ template <typename T>
1207
+ struct CalculateSmallVectorDefaultInlinedElements {
1208
+ // Parameter controlling the default number of inlined elements
1209
+ // for `SmallVector<T>`.
1210
+ //
1211
+ // The default number of inlined elements ensures that
1212
+ // 1. There is at least one inlined element.
1213
+ // 2. `sizeof(SmallVector<T>) <= kPreferredSmallVectorSizeof` unless
1214
+ // it contradicts 1.
1215
+ static constexpr size_t kPreferredSmallVectorSizeof = 64;
1216
+
1217
+ // static_assert that sizeof(T) is not "too big".
1218
+ //
1219
+ // Because our policy guarantees at least one inlined element, it is possible
1220
+ // for an arbitrarily large inlined element to allocate an arbitrarily large
1221
+ // amount of inline storage. We generally consider it an antipattern for a
1222
+ // SmallVector to allocate an excessive amount of inline storage, so we want
1223
+ // to call attention to these cases and make sure that users are making an
1224
+ // intentional decision if they request a lot of inline storage.
1225
+ //
1226
+ // We want this assertion to trigger in pathological cases, but otherwise
1227
+ // not be too easy to hit. To accomplish that, the cutoff is actually somewhat
1228
+ // larger than kPreferredSmallVectorSizeof (otherwise,
1229
+ // `SmallVector<SmallVector<T>>` would be one easy way to trip it, and that
1230
+ // pattern seems useful in practice).
1231
+ //
1232
+ // One wrinkle is that this assertion is in theory non-portable, since
1233
+ // sizeof(T) is in general platform-dependent. However, we don't expect this
1234
+ // to be much of an issue, because most LLVM development happens on 64-bit
1235
+ // hosts, and therefore sizeof(T) is expected to *decrease* when compiled for
1236
+ // 32-bit hosts, dodging the issue. The reverse situation, where development
1237
+ // happens on a 32-bit host and then fails due to sizeof(T) *increasing* on a
1238
+ // 64-bit host, is expected to be very rare.
1239
+ static_assert(
1240
+ sizeof(T) <= 256,
1241
+ "You are trying to use a default number of inlined elements for "
1242
+ "`SmallVector<T>` but `sizeof(T)` is really big! Please use an "
1243
+ "explicit number of inlined elements with `SmallVector<T, N>` to make "
1244
+ "sure you really want that much inline storage.");
1245
+
1246
+ // Discount the size of the header itself when calculating the maximum inline
1247
+ // bytes.
1248
+ static constexpr size_t PreferredInlineBytes =
1249
+ kPreferredSmallVectorSizeof - sizeof(SmallVector<T, 0>);
1250
+ static constexpr size_t NumElementsThatFit = PreferredInlineBytes / sizeof(T);
1251
+ static constexpr size_t value =
1252
+ NumElementsThatFit == 0 ? 1 : NumElementsThatFit;
1253
+ };
1254
+
1255
+ /// This is a 'vector' (really, a variable-sized array), optimized
1256
+ /// for the case when the array is small. It contains some number of elements
1257
+ /// in-place, which allows it to avoid heap allocation when the actual number of
1258
+ /// elements is below that threshold. This allows normal "small" cases to be
1259
+ /// fast without losing generality for large inputs.
1260
+ ///
1261
+ /// \note
1262
+ /// In the absence of a well-motivated choice for the number of inlined
1263
+ /// elements \p N, it is recommended to use \c SmallVector<T> (that is,
1264
+ /// omitting the \p N). This will choose a default number of inlined elements
1265
+ /// reasonable for allocation on the stack (for example, trying to keep \c
1266
+ /// sizeof(SmallVector<T>) around 64 bytes).
1267
+ ///
1268
+ /// \warning This does not attempt to be exception safe.
1269
+ ///
1270
+ /// \see https://llvm.org/docs/ProgrammersManual.html#llvm-adt-smallvector-h
1271
+ template <
1272
+ typename T,
1273
+ unsigned N = CalculateSmallVectorDefaultInlinedElements<T>::value>
1274
+ class /* LLVM_GSL_OWNER */ SmallVector : public SmallVectorImpl<T>,
1275
+ SmallVectorStorage<T, N> {
1276
+ public:
1277
+ SmallVector() : SmallVectorImpl<T>(N) {}
1278
+
1279
+ ~SmallVector() {
1280
+ // Destroy the constructed elements in the vector.
1281
+ this->destroy_range(this->begin(), this->end());
1282
+ }
1283
+
1284
+ explicit SmallVector(size_t Size, const T& Value = T())
1285
+ : SmallVectorImpl<T>(N) {
1286
+ this->assign(Size, Value);
1287
+ }
1288
+
1289
+ template <
1290
+ typename ItTy,
1291
+ typename = std::enable_if_t<std::is_convertible_v<
1292
+ typename std::iterator_traits<ItTy>::iterator_category,
1293
+ std::input_iterator_tag>>>
1294
+ SmallVector(ItTy S, ItTy E) : SmallVectorImpl<T>(N) {
1295
+ this->append(S, E);
1296
+ }
1297
+
1298
+ // note: The enable_if restricts Container to types that have a .begin() and
1299
+ // .end() that return valid input iterators.
1300
+ template <
1301
+ typename Container,
1302
+ std::enable_if_t<
1303
+ std::is_convertible_v<
1304
+ typename std::iterator_traits<
1305
+ decltype(std::declval<Container>()
1306
+ .begin())>::iterator_category,
1307
+ std::input_iterator_tag> &&
1308
+ std::is_convertible_v<
1309
+ typename std::iterator_traits<
1310
+ decltype(std::declval<Container>()
1311
+ .end())>::iterator_category,
1312
+ std::input_iterator_tag>,
1313
+ int> = 0>
1314
+ explicit SmallVector(Container&& c) : SmallVectorImpl<T>(N) {
1315
+ this->append(c.begin(), c.end());
1316
+ }
1317
+
1318
+ SmallVector(std::initializer_list<T> IL) : SmallVectorImpl<T>(N) {
1319
+ this->assign(IL);
1320
+ }
1321
+
1322
+ SmallVector(const SmallVector& RHS) : SmallVectorImpl<T>(N) {
1323
+ if (!RHS.empty())
1324
+ SmallVectorImpl<T>::operator=(RHS);
1325
+ }
1326
+
1327
+ SmallVector& operator=(const SmallVector& RHS) {
1328
+ SmallVectorImpl<T>::operator=(RHS);
1329
+ return *this;
1330
+ }
1331
+
1332
+ SmallVector(SmallVector&& RHS) noexcept(
1333
+ std::is_nothrow_move_assignable_v<SmallVectorImpl<T>>)
1334
+ : SmallVectorImpl<T>(N) {
1335
+ if (!RHS.empty())
1336
+ SmallVectorImpl<T>::operator=(::std::move(RHS));
1337
+ }
1338
+
1339
+ // note: The enable_if restricts Container to types that have a .begin() and
1340
+ // .end() that return valid input iterators.
1341
+ template <
1342
+ typename Container,
1343
+ std::enable_if_t<
1344
+ std::is_convertible_v<
1345
+ typename std::iterator_traits<
1346
+ decltype(std::declval<Container>()
1347
+ .begin())>::iterator_category,
1348
+ std::input_iterator_tag> &&
1349
+ std::is_convertible_v<
1350
+ typename std::iterator_traits<
1351
+ decltype(std::declval<Container>()
1352
+ .end())>::iterator_category,
1353
+ std::input_iterator_tag>,
1354
+ int> = 0>
1355
+ SmallVector& operator=(const Container& RHS) {
1356
+ this->assign(RHS.begin(), RHS.end());
1357
+ return *this;
1358
+ }
1359
+
1360
+ SmallVector(SmallVectorImpl<T>&& RHS) noexcept(
1361
+ std::is_nothrow_move_assignable_v<SmallVectorImpl<T>>)
1362
+ : SmallVectorImpl<T>(N) {
1363
+ if (!RHS.empty())
1364
+ SmallVectorImpl<T>::operator=(::std::move(RHS));
1365
+ }
1366
+
1367
+ SmallVector& operator=(SmallVector&& RHS) noexcept(
1368
+ std::is_nothrow_move_assignable_v<SmallVectorImpl<T>>) {
1369
+ SmallVectorImpl<T>::operator=(::std::move(RHS));
1370
+ return *this;
1371
+ }
1372
+
1373
+ SmallVector& operator=(SmallVectorImpl<T>&& RHS) noexcept(
1374
+ std::is_nothrow_move_constructible_v<SmallVectorImpl<T>>) {
1375
+ SmallVectorImpl<T>::operator=(::std::move(RHS));
1376
+ return *this;
1377
+ }
1378
+
1379
+ // note: The enable_if restricts Container to types that have a .begin() and
1380
+ // .end() that return valid input iterators.
1381
+ template <
1382
+ typename Container,
1383
+ std::enable_if_t<
1384
+ std::is_convertible_v<
1385
+ typename std::iterator_traits<
1386
+ decltype(std::declval<Container>()
1387
+ .begin())>::iterator_category,
1388
+ std::input_iterator_tag> &&
1389
+ std::is_convertible_v<
1390
+ typename std::iterator_traits<
1391
+ decltype(std::declval<Container>()
1392
+ .end())>::iterator_category,
1393
+ std::input_iterator_tag>,
1394
+ int> = 0>
1395
+ // NOLINTNEXTLINE(cppcoreguidelines-missing-std-forward)
1396
+ SmallVector& operator=(Container&& C) {
1397
+ this->assign(C.begin(), C.end());
1398
+ return *this;
1399
+ }
1400
+
1401
+ SmallVector& operator=(std::initializer_list<T> IL) {
1402
+ this->assign(IL);
1403
+ return *this;
1404
+ }
1405
+ };
1406
+
1407
+ template <typename T, unsigned N>
1408
+ inline size_t capacity_in_bytes(const SmallVector<T, N>& X) {
1409
+ return X.capacity_in_bytes();
1410
+ }
1411
+
1412
+ template <typename T, unsigned N>
1413
+ std::ostream& operator<<(std::ostream& out, const SmallVector<T, N>& list) {
1414
+ int i = 0;
1415
+ out << "[";
1416
+ for (auto e : list) {
1417
+ if (i++ > 0)
1418
+ out << ", ";
1419
+ out << e;
1420
+ }
1421
+ out << "]";
1422
+ return out;
1423
+ }
1424
+
1425
+ template <typename RangeType>
1426
+ using ValueTypeFromRangeType = std::remove_const_t<
1427
+ std::remove_reference_t<decltype(*std::begin(std::declval<RangeType&>()))>>;
1428
+
1429
+ /// Given a range of type R, iterate the entire range and return a
1430
+ /// SmallVector with elements of the vector. This is useful, for example,
1431
+ /// when you want to iterate a range and then sort the results.
1432
+ template <unsigned Size, typename R>
1433
+ // NOLINTNEXTLINE(cppcoreguidelines-missing-std-forward)
1434
+ SmallVector<ValueTypeFromRangeType<R>, Size> to_vector(R&& Range) {
1435
+ return {std::begin(Range), std::end(Range)};
1436
+ }
1437
+ template <typename R>
1438
+ SmallVector<
1439
+ ValueTypeFromRangeType<R>,
1440
+ CalculateSmallVectorDefaultInlinedElements<
1441
+ ValueTypeFromRangeType<R>>::value>
1442
+ // NOLINTNEXTLINE(cppcoreguidelines-missing-std-forward)
1443
+ to_vector(R&& Range) {
1444
+ return {std::begin(Range), std::end(Range)};
1445
+ }
1446
+
1447
+ } // end namespace c10
1448
+
1449
+ namespace std {
1450
+
1451
+ /// Implement std::swap in terms of SmallVector swap.
1452
+ template <typename T>
1453
+ inline void swap(
1454
+ c10::SmallVectorImpl<T>& LHS,
1455
+ c10::SmallVectorImpl<T>& RHS) noexcept {
1456
+ LHS.swap(RHS);
1457
+ }
1458
+
1459
+ /// Implement std::swap in terms of SmallVector swap.
1460
+ template <typename T, unsigned N>
1461
+ inline void swap(
1462
+ c10::SmallVector<T, N>& LHS,
1463
+ c10::SmallVector<T, N>& RHS) noexcept {
1464
+ LHS.swap(RHS);
1465
+ }
1466
+
1467
+ } // end namespace std
videochat2/lib/python3.10/site-packages/torch/include/c10/util/ThreadLocal.h ADDED
@@ -0,0 +1,153 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/macros/Macros.h>
4
+
5
+ /**
6
+ * Android versions with libgnustl incorrectly handle thread_local C++
7
+ * qualifier with composite types. NDK up to r17 version is affected.
8
+ *
9
+ * (A fix landed on Jun 4 2018:
10
+ * https://android-review.googlesource.com/c/toolchain/gcc/+/683601)
11
+ *
12
+ * In such cases, use c10::ThreadLocal<T> wrapper
13
+ * which is `pthread_*` based with smart pointer semantics.
14
+ *
15
+ * In addition, convenient macro C10_DEFINE_TLS_static is available.
16
+ * To define static TLS variable of type std::string, do the following
17
+ * ```
18
+ * C10_DEFINE_TLS_static(std::string, str_tls_);
19
+ * ///////
20
+ * {
21
+ * *str_tls_ = "abc";
22
+ * assert(str_tls_->length(), 3);
23
+ * }
24
+ * ```
25
+ *
26
+ * (see c10/test/util/ThreadLocal_test.cpp for more examples)
27
+ */
28
+ #if !defined(C10_PREFER_CUSTOM_THREAD_LOCAL_STORAGE)
29
+
30
+ #if defined(C10_ANDROID) && defined(__GLIBCXX__) && __GLIBCXX__ < 20180604
31
+ #define C10_PREFER_CUSTOM_THREAD_LOCAL_STORAGE
32
+ #endif // defined(C10_ANDROID) && defined(__GLIBCXX__) && __GLIBCXX__ < 20180604
33
+
34
+ #endif // !defined(C10_PREFER_CUSTOM_THREAD_LOCAL_STORAGE)
35
+
36
+ #if defined(C10_PREFER_CUSTOM_THREAD_LOCAL_STORAGE)
37
+ #include <c10/util/Exception.h>
38
+ #include <errno.h>
39
+ #include <pthread.h>
40
+ #include <memory>
41
+ namespace c10 {
42
+
43
+ /**
44
+ * @brief Temporary thread_local C++ qualifier replacement for Android
45
+ * based on `pthread_*`.
46
+ * To be used with composite types that provide default ctor.
47
+ */
48
+ template <typename Type>
49
+ class ThreadLocal {
50
+ public:
51
+ ThreadLocal() {
52
+ pthread_key_create(
53
+ &key_, [](void* buf) { delete static_cast<Type*>(buf); });
54
+ }
55
+
56
+ ~ThreadLocal() {
57
+ if (void* current = pthread_getspecific(key_)) {
58
+ delete static_cast<Type*>(current);
59
+ }
60
+
61
+ pthread_key_delete(key_);
62
+ }
63
+
64
+ ThreadLocal(const ThreadLocal&) = delete;
65
+ ThreadLocal& operator=(const ThreadLocal&) = delete;
66
+
67
+ Type& get() {
68
+ if (void* current = pthread_getspecific(key_)) {
69
+ return *static_cast<Type*>(current);
70
+ }
71
+
72
+ std::unique_ptr<Type> ptr = std::make_unique<Type>();
73
+ if (0 == pthread_setspecific(key_, ptr.get())) {
74
+ return *ptr.release();
75
+ }
76
+
77
+ int err = errno;
78
+ TORCH_INTERNAL_ASSERT(false, "pthread_setspecific() failed, errno = ", err);
79
+ }
80
+
81
+ Type& operator*() {
82
+ return get();
83
+ }
84
+
85
+ Type* operator->() {
86
+ return &get();
87
+ }
88
+
89
+ private:
90
+ pthread_key_t key_;
91
+ };
92
+
93
+ } // namespace c10
94
+
95
+ #define C10_DEFINE_TLS_static(Type, Name) static ::c10::ThreadLocal<Type> Name
96
+
97
+ #define C10_DECLARE_TLS_class_static(Class, Type, Name) \
98
+ static ::c10::ThreadLocal<Type> Name
99
+
100
+ #define C10_DEFINE_TLS_class_static(Class, Type, Name) \
101
+ ::c10::ThreadLocal<Type> Class::Name
102
+
103
+ #else // defined(C10_PREFER_CUSTOM_THREAD_LOCAL_STORAGE)
104
+
105
+ namespace c10 {
106
+
107
+ /**
108
+ * @brief Default thread_local implementation for non-Android cases.
109
+ * To be used with composite types that provide default ctor.
110
+ */
111
+ template <typename Type>
112
+ class ThreadLocal {
113
+ public:
114
+ using Accessor = Type* (*)();
115
+ explicit ThreadLocal(Accessor accessor) : accessor_(accessor) {}
116
+
117
+ ThreadLocal(const ThreadLocal&) = delete;
118
+ ThreadLocal& operator=(const ThreadLocal&) = delete;
119
+
120
+ Type& get() {
121
+ return *accessor_();
122
+ }
123
+
124
+ Type& operator*() {
125
+ return get();
126
+ }
127
+
128
+ Type* operator->() {
129
+ return &get();
130
+ }
131
+
132
+ private:
133
+ Accessor accessor_;
134
+ };
135
+
136
+ } // namespace c10
137
+
138
+ #define C10_DEFINE_TLS_static(Type, Name) \
139
+ static ::c10::ThreadLocal<Type> Name([]() { \
140
+ static thread_local Type var; \
141
+ return &var; \
142
+ })
143
+
144
+ #define C10_DECLARE_TLS_class_static(Class, Type, Name) \
145
+ static ::c10::ThreadLocal<Type> Name
146
+
147
+ #define C10_DEFINE_TLS_class_static(Class, Type, Name) \
148
+ ::c10::ThreadLocal<Type> Class::Name([]() { \
149
+ static thread_local Type var; \
150
+ return &var; \
151
+ })
152
+
153
+ #endif // defined(C10_PREFER_CUSTOM_THREAD_LOCAL_STORAGE)
videochat2/lib/python3.10/site-packages/torch/include/c10/util/TypeCast.h ADDED
@@ -0,0 +1,195 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <c10/macros/Macros.h>
3
+ #include <c10/util/BFloat16.h>
4
+ #include <c10/util/Float8_e4m3fn.h>
5
+ #include <c10/util/Float8_e4m3fnuz.h>
6
+ #include <c10/util/Float8_e5m2.h>
7
+ #include <c10/util/Float8_e5m2fnuz.h>
8
+ #include <c10/util/Half.h>
9
+ #include <c10/util/complex.h>
10
+
11
+ #include <type_traits>
12
+
13
+ C10_CLANG_DIAGNOSTIC_PUSH()
14
+ #if C10_CLANG_HAS_WARNING("-Wimplicit-float-conversion")
15
+ C10_CLANG_DIAGNOSTIC_IGNORE("-Wimplicit-float-conversion")
16
+ #endif
17
+ #if C10_CLANG_HAS_WARNING("-Wimplicit-int-float-conversion")
18
+ C10_CLANG_DIAGNOSTIC_IGNORE("-Wimplicit-int-float-conversion")
19
+ #endif
20
+
21
+ namespace c10 {
22
+
23
+ template <typename dest_t, typename src_t>
24
+ struct needs_real {
25
+ constexpr static bool value =
26
+ (is_complex<src_t>::value && !is_complex<dest_t>::value);
27
+ };
28
+
29
+ template <bool, typename src_t>
30
+ struct maybe_real {
31
+ C10_HOST_DEVICE static inline src_t apply(src_t src) {
32
+ return src;
33
+ }
34
+ };
35
+
36
+ template <typename src_t>
37
+ struct maybe_real<true, src_t> {
38
+ C10_HOST_DEVICE static inline decltype(auto) apply(src_t src) {
39
+ return src.real();
40
+ }
41
+ };
42
+
43
+ template <bool, typename src_t>
44
+ struct maybe_bool {
45
+ C10_HOST_DEVICE static inline src_t apply(src_t src) {
46
+ return src;
47
+ }
48
+ };
49
+
50
+ template <typename src_t>
51
+ struct maybe_bool<true, src_t> {
52
+ C10_HOST_DEVICE static inline decltype(auto) apply(src_t src) {
53
+ // Don't use bool operator so as to to also compile for ComplexHalf.
54
+ return src.real() || src.imag();
55
+ }
56
+ };
57
+
58
+ // Note: deliberately ignores undefined behavior, consistent with NumPy.
59
+ // PyTorch's type conversions can cause a variety of undefined behavior,
60
+ // including float to integral overflow and signed to unsigned integer overflow.
61
+ // Some of this undefined behavior is addressed below.
62
+ template <typename dest_t, typename src_t>
63
+ struct static_cast_with_inter_type {
64
+ C10_HOST_DEVICE __ubsan_ignore_undefined__ static inline dest_t apply(
65
+ src_t src) {
66
+ constexpr bool real = needs_real<dest_t, src_t>::value;
67
+ auto r = maybe_real<real, src_t>::apply(src);
68
+ return static_cast<dest_t>(r);
69
+ }
70
+ };
71
+
72
+ // Partial template specialization for casting to bool.
73
+ // Need to handle complex types separately, as we don't
74
+ // simply want to cast the real part to bool.
75
+ template <typename src_t>
76
+ struct static_cast_with_inter_type<bool, src_t> {
77
+ C10_HOST_DEVICE static inline bool apply(src_t src) {
78
+ constexpr bool complex = needs_real<bool, src_t>::value;
79
+ return static_cast<bool>(maybe_bool<complex, src_t>::apply(src));
80
+ }
81
+ };
82
+
83
+ // Partial template instantiation for casting to uint8.
84
+ // Note: Converting from negative float values to unsigned integer types is
85
+ // undefined behavior in C++, and current CPU and GPU compilers exhibit
86
+ // divergent behavior. Casting from negative float values to signed
87
+ // integer types and then to unsigned integer types is not undefined,
88
+ // however, so this cast improves the consistency of type conversions
89
+ // to uint8 across compilers.
90
+ // Further note: Type conversions across compilers still have other undefined
91
+ // and divergent behavior.
92
+ template <typename src_t>
93
+ struct static_cast_with_inter_type<uint8_t, src_t> {
94
+ C10_HOST_DEVICE __ubsan_ignore_undefined__ static inline uint8_t apply(
95
+ src_t src) {
96
+ constexpr bool real = needs_real<uint8_t, src_t>::value;
97
+ return static_cast<uint8_t>(
98
+ static_cast<int64_t>(maybe_real<real, src_t>::apply(src)));
99
+ }
100
+ };
101
+
102
+ template <>
103
+ struct static_cast_with_inter_type<c10::complex<c10::Half>, c10::BFloat16> {
104
+ C10_HOST_DEVICE __ubsan_ignore_undefined__ static inline c10::complex<
105
+ c10::Half>
106
+ apply(c10::BFloat16 src) {
107
+ return static_cast<c10::complex<c10::Half>>(c10::complex<float>{src});
108
+ }
109
+ };
110
+
111
+ template <>
112
+ struct static_cast_with_inter_type<c10::complex<c10::Half>, c10::Float8_e5m2> {
113
+ C10_HOST_DEVICE __ubsan_ignore_undefined__ static inline c10::complex<
114
+ c10::Half>
115
+ apply(c10::Float8_e5m2 src) {
116
+ return static_cast<c10::complex<c10::Half>>(c10::complex<float>{src});
117
+ }
118
+ };
119
+
120
+ template <>
121
+ struct static_cast_with_inter_type<
122
+ c10::complex<c10::Half>,
123
+ c10::Float8_e5m2fnuz> {
124
+ C10_HOST_DEVICE __ubsan_ignore_undefined__ static inline c10::complex<
125
+ c10::Half>
126
+ apply(c10::Float8_e5m2fnuz src) {
127
+ return static_cast<c10::complex<c10::Half>>(c10::complex<float>{src});
128
+ }
129
+ };
130
+
131
+ template <>
132
+ struct static_cast_with_inter_type<
133
+ c10::complex<c10::Half>,
134
+ c10::Float8_e4m3fn> {
135
+ C10_HOST_DEVICE __ubsan_ignore_undefined__ static inline c10::complex<
136
+ c10::Half>
137
+ apply(c10::Float8_e4m3fn src) {
138
+ return static_cast<c10::complex<c10::Half>>(c10::complex<float>{src});
139
+ }
140
+ };
141
+
142
+ template <>
143
+ struct static_cast_with_inter_type<
144
+ c10::complex<c10::Half>,
145
+ c10::Float8_e4m3fnuz> {
146
+ C10_HOST_DEVICE __ubsan_ignore_undefined__ static inline c10::complex<
147
+ c10::Half>
148
+ apply(c10::Float8_e4m3fnuz src) {
149
+ return static_cast<c10::complex<c10::Half>>(c10::complex<float>{src});
150
+ }
151
+ };
152
+
153
+ template <>
154
+ struct static_cast_with_inter_type<c10::complex<c10::Half>, c10::Half> {
155
+ C10_HOST_DEVICE __ubsan_ignore_undefined__ static inline c10::complex<
156
+ c10::Half>
157
+ apply(c10::Half src) {
158
+ return static_cast<c10::complex<c10::Half>>(c10::complex<float>{src});
159
+ }
160
+ };
161
+
162
+ template <>
163
+ struct static_cast_with_inter_type<
164
+ c10::complex<c10::Half>,
165
+ c10::complex<double>> {
166
+ C10_HOST_DEVICE __ubsan_ignore_undefined__ static inline c10::complex<
167
+ c10::Half>
168
+ apply(c10::complex<double> src) {
169
+ return static_cast<c10::complex<c10::Half>>(
170
+ static_cast<c10::complex<float>>(src));
171
+ }
172
+ };
173
+
174
+ template <typename To, typename From>
175
+ C10_HOST_DEVICE To convert(From f) {
176
+ return static_cast_with_inter_type<To, From>::apply(f);
177
+ }
178
+
179
+ // Define separately to avoid being inlined and prevent code-size bloat
180
+ [[noreturn]] C10_API void report_overflow(const char* name);
181
+
182
+ template <typename To, typename From>
183
+ To checked_convert(From f, const char* name) {
184
+ // Converting to bool can't overflow so we exclude this case from checking.
185
+ if (!std::is_same_v<To, bool> && overflows<To, From>(f)) {
186
+ report_overflow(name);
187
+ }
188
+ return convert<To, From>(f);
189
+ }
190
+
191
+ } // namespace c10
192
+
193
+ C10_CLANG_DIAGNOSTIC_POP()
194
+
195
+ // Trigger tests for D25440771. TODO: Remove this line any time you want.
videochat2/lib/python3.10/site-packages/torch/include/c10/util/TypeIndex.h ADDED
@@ -0,0 +1,196 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/util/ConstexprCrc.h>
4
+ #include <c10/util/IdWrapper.h>
5
+ #include <c10/util/string_view.h>
6
+ #include <cstdint>
7
+ #include <ostream>
8
+ #include <stdexcept>
9
+ #include <string>
10
+ #include <type_traits>
11
+
12
+ namespace c10::util {
13
+
14
+ // TODO Make it work for more compilers
15
+
16
+ // Intel compiler works
17
+ #if defined(__INTEL_COMPILER)
18
+ #define C10_TYPENAME_SUPPORTS_CONSTEXPR 0
19
+ #define C10_TYPENAME_CONSTEXPR
20
+
21
+ // Clang works
22
+ #elif defined(__clang__)
23
+
24
+ // except for NVCC
25
+ #if defined(__CUDACC__)
26
+ #define C10_TYPENAME_SUPPORTS_CONSTEXPR 0
27
+ #define C10_TYPENAME_CONSTEXPR
28
+ #else
29
+ #define C10_TYPENAME_SUPPORTS_CONSTEXPR 1
30
+ #define C10_TYPENAME_CONSTEXPR constexpr
31
+ #endif
32
+
33
+ // Windows works
34
+ #elif defined(_MSC_VER)
35
+
36
+ // except for NVCC
37
+ #if defined(__CUDACC__)
38
+ #define C10_TYPENAME_SUPPORTS_CONSTEXPR 0
39
+ #define C10_TYPENAME_CONSTEXPR
40
+ #else
41
+ #define C10_TYPENAME_SUPPORTS_CONSTEXPR 1
42
+ #define C10_TYPENAME_CONSTEXPR constexpr
43
+ #endif
44
+
45
+ // GCC works
46
+ #elif defined(__GNUC__)
47
+
48
+ // except when gcc < 9
49
+ #if (__GNUC__ < 9) || defined(__CUDACC__)
50
+ #define C10_TYPENAME_SUPPORTS_CONSTEXPR 0
51
+ #define C10_TYPENAME_CONSTEXPR
52
+ #else
53
+ #define C10_TYPENAME_SUPPORTS_CONSTEXPR 1
54
+ #define C10_TYPENAME_CONSTEXPR constexpr
55
+ #endif
56
+
57
+ // some other compiler we don't know about
58
+ #else
59
+ #define C10_TYPENAME_SUPPORTS_CONSTEXPR 1
60
+ #define C10_TYPENAME_CONSTEXPR constexpr
61
+ #endif
62
+
63
+ struct type_index final : IdWrapper<type_index, uint64_t> {
64
+ constexpr explicit type_index(uint64_t checksum) : IdWrapper(checksum) {}
65
+
66
+ // Allow usage in std::map / std::set
67
+ // TODO Disallow this and rather use std::unordered_map/set everywhere
68
+ friend constexpr bool operator<(type_index lhs, type_index rhs) noexcept {
69
+ return lhs.underlyingId() < rhs.underlyingId();
70
+ }
71
+
72
+ friend std::ostream& operator<<(std::ostream& stream, type_index typeId) {
73
+ return stream << typeId.underlyingId();
74
+ }
75
+ };
76
+
77
+ namespace detail {
78
+
79
+ #if !defined(__clang__) && !defined(_MSC_VER) && defined(__GNUC__) && \
80
+ __GNUC__ < 5
81
+ // Getting __PRETTY_FUNCTION__ at compile time only works with GCC >= 5
82
+ #error "You're running a too old version of GCC. We need GCC 5 or later."
83
+ #endif
84
+
85
+ #if defined(__clang__) && __clang_major__ < 4
86
+ // Getting __PRETTY_FUNCTION__ at compile time only works with Clang >= 4
87
+ #error "You're running a too old version of Clang. We need Clang 4 or later."
88
+ #endif
89
+
90
+ inline constexpr string_view extract(
91
+ string_view prefix,
92
+ string_view suffix,
93
+ string_view str) {
94
+ #if !defined(__CUDA_ARCH__) // CUDA doesn't like std::logic_error in device code
95
+ return (!str.starts_with(prefix) || !str.ends_with(suffix))
96
+ ? (throw std::logic_error("Invalid pattern"), string_view())
97
+ : str.substr(prefix.size(), str.size() - prefix.size() - suffix.size());
98
+ #else
99
+ return str.substr(prefix.size(), str.size() - prefix.size() - suffix.size());
100
+ #endif
101
+ }
102
+
103
+ template <typename T>
104
+ inline C10_TYPENAME_CONSTEXPR c10::string_view fully_qualified_type_name_impl() {
105
+ #if defined(_MSC_VER) && !defined(__clang__)
106
+ #if defined(__NVCC__)
107
+ return extract(
108
+ "c10::basic_string_view<char> c10::util::detail::fully_qualified_type_name_impl<",
109
+ ">()",
110
+ __FUNCSIG__);
111
+ #else
112
+ return extract(
113
+ "class c10::basic_string_view<char> __cdecl c10::util::detail::fully_qualified_type_name_impl<",
114
+ ">(void)",
115
+ __FUNCSIG__);
116
+ #endif
117
+ #elif defined(__clang__)
118
+ return extract(
119
+ "c10::string_view c10::util::detail::fully_qualified_type_name_impl() [T = ",
120
+ "]",
121
+ __PRETTY_FUNCTION__);
122
+ #elif defined(__GNUC__)
123
+ return extract(
124
+ #if C10_TYPENAME_SUPPORTS_CONSTEXPR
125
+ "constexpr c10::string_view c10::util::detail::fully_qualified_type_name_impl() [with T = ",
126
+ #else
127
+ "c10::string_view c10::util::detail::fully_qualified_type_name_impl() [with T = ",
128
+ #endif
129
+ "; c10::string_view = c10::basic_string_view<char>]",
130
+ __PRETTY_FUNCTION__);
131
+ #endif
132
+ }
133
+
134
+ #if !defined(__CUDA_ARCH__)
135
+ template <typename T>
136
+ inline constexpr uint64_t type_index_impl() {
137
+ // Idea: __PRETTY_FUNCTION__ (or __FUNCSIG__ on msvc) contains a qualified name
138
+ // of this function, including its template parameter, i.e. including the
139
+ // type we want an id for. We use this name and run crc64 on it to get a type
140
+ // id.
141
+ #if defined(_MSC_VER) && !defined(__clang__)
142
+ return crc64(__FUNCSIG__, sizeof(__FUNCSIG__)).checksum();
143
+ #elif defined(__clang__)
144
+ return crc64(__PRETTY_FUNCTION__, sizeof(__PRETTY_FUNCTION__)).checksum();
145
+ #elif defined(__GNUC__)
146
+ return crc64(__PRETTY_FUNCTION__, sizeof(__PRETTY_FUNCTION__)).checksum();
147
+ #endif
148
+ }
149
+ #endif
150
+
151
+ } // namespace detail
152
+
153
+ template <typename T>
154
+ inline constexpr type_index get_type_index() {
155
+ #if !defined(__CUDA_ARCH__)
156
+ // To enforce that this is really computed at compile time, we pass the
157
+ // type index through std::integral_constant.
158
+ return type_index{std::integral_constant<
159
+ uint64_t,
160
+ detail::type_index_impl<std::decay_t<T>>()>::value};
161
+ #else
162
+ // There's nothing in theory preventing us from running this on device code
163
+ // except for nvcc throwing a compiler error if we enable it.
164
+ return (abort(), type_index(0));
165
+ #endif
166
+ }
167
+
168
+ #if !defined(TORCH_PEDANTIC)
169
+ // Use precomputed hashsum for std::string
170
+ // Needed to workaround ambiguity in class name resolution
171
+ // into __PRETTY_FUNCTION__ when abovementioned class is defined in inlined
172
+ // namespace. In multi-ABI C++ library, `std::string` is an alias to
173
+ // `std::__cxx11::basic_string<char>` which depending on compiler flags can be
174
+ // resolved to `basic_string<char>` either in `std` namespace or in
175
+ // `std::__cxx11` one (`__cxx11` is an inline namespace)
176
+ template <>
177
+ inline constexpr type_index get_type_index<std::string>() {
178
+ // hashsum for std::basic_string<char>
179
+ return type_index{4193213214807308375ULL};
180
+ }
181
+ #endif
182
+
183
+ template <typename T>
184
+ inline C10_TYPENAME_CONSTEXPR string_view
185
+ get_fully_qualified_type_name() noexcept {
186
+ #if C10_TYPENAME_SUPPORTS_CONSTEXPR
187
+ constexpr
188
+ #else
189
+ static
190
+ #endif
191
+ string_view name = detail::fully_qualified_type_name_impl<T>();
192
+ return name;
193
+ }
194
+ } // namespace c10::util
195
+
196
+ C10_DEFINE_HASH_FOR_IDWRAPPER(c10::util::type_index);