ZTWHHH commited on
Commit
aa23b2e
·
verified ·
1 Parent(s): e4dcac0

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +5 -0
  2. videochat2/lib/python3.10/site-packages/torch/_inductor/__pycache__/ir.cpython-310.pyc +3 -0
  3. videochat2/lib/python3.10/site-packages/torch/_inductor/__pycache__/lowering.cpython-310.pyc +3 -0
  4. videochat2/lib/python3.10/site-packages/torch/_inductor/__pycache__/scheduler.cpython-310.pyc +3 -0
  5. videochat2/lib/python3.10/site-packages/torch/ao/__pycache__/__init__.cpython-310.pyc +0 -0
  6. videochat2/lib/python3.10/site-packages/torch/ao/ns/fx/__pycache__/graph_matcher.cpython-310.pyc +0 -0
  7. videochat2/lib/python3.10/site-packages/torch/ao/ns/fx/__pycache__/mappings.cpython-310.pyc +0 -0
  8. videochat2/lib/python3.10/site-packages/torch/ao/ns/fx/__pycache__/pattern_utils.cpython-310.pyc +0 -0
  9. videochat2/lib/python3.10/site-packages/torch/ao/ns/fx/__pycache__/utils.cpython-310.pyc +0 -0
  10. videochat2/lib/python3.10/site-packages/torch/ao/ns/fx/__pycache__/weight_utils.cpython-310.pyc +0 -0
  11. videochat2/lib/python3.10/site-packages/torch/include/c10/core/Allocator.h +319 -0
  12. videochat2/lib/python3.10/site-packages/torch/include/c10/core/AutogradState.h +72 -0
  13. videochat2/lib/python3.10/site-packages/torch/include/c10/core/Backend.h +387 -0
  14. videochat2/lib/python3.10/site-packages/torch/include/c10/core/CachingDeviceAllocator.h +131 -0
  15. videochat2/lib/python3.10/site-packages/torch/include/c10/core/CompileTimeFunctionPointer.h +57 -0
  16. videochat2/lib/python3.10/site-packages/torch/include/c10/core/ConstantSymNodeImpl.h +110 -0
  17. videochat2/lib/python3.10/site-packages/torch/include/c10/core/DefaultDtype.h +15 -0
  18. videochat2/lib/python3.10/site-packages/torch/include/c10/core/DefaultTensorOptions.h +45 -0
  19. videochat2/lib/python3.10/site-packages/torch/include/c10/core/Device.h +216 -0
  20. videochat2/lib/python3.10/site-packages/torch/include/c10/core/DeviceArray.h +28 -0
  21. videochat2/lib/python3.10/site-packages/torch/include/c10/core/DeviceGuard.h +199 -0
  22. videochat2/lib/python3.10/site-packages/torch/include/c10/core/DispatchKey.h +747 -0
  23. videochat2/lib/python3.10/site-packages/torch/include/c10/core/DispatchKeySet.h +949 -0
  24. videochat2/lib/python3.10/site-packages/torch/include/c10/core/Event.h +137 -0
  25. videochat2/lib/python3.10/site-packages/torch/include/c10/core/GeneratorImpl.h +110 -0
  26. videochat2/lib/python3.10/site-packages/torch/include/c10/core/GradMode.h +44 -0
  27. videochat2/lib/python3.10/site-packages/torch/include/c10/core/InferenceMode.h +86 -0
  28. videochat2/lib/python3.10/site-packages/torch/include/c10/core/Layout.h +78 -0
  29. videochat2/lib/python3.10/site-packages/torch/include/c10/core/PyHandleCache.h +76 -0
  30. videochat2/lib/python3.10/site-packages/torch/include/c10/core/QEngine.h +46 -0
  31. videochat2/lib/python3.10/site-packages/torch/include/c10/core/RefcountedDeleter.h +52 -0
  32. videochat2/lib/python3.10/site-packages/torch/include/c10/core/SafePyObject.h +118 -0
  33. videochat2/lib/python3.10/site-packages/torch/include/c10/core/Scalar.h +467 -0
  34. videochat2/lib/python3.10/site-packages/torch/include/c10/core/ScalarTypeToTypeMeta.h +57 -0
  35. videochat2/lib/python3.10/site-packages/torch/include/c10/core/Storage.h +272 -0
  36. videochat2/lib/python3.10/site-packages/torch/include/c10/core/StorageImpl.h +330 -0
  37. videochat2/lib/python3.10/site-packages/torch/include/c10/core/Stream.h +176 -0
  38. videochat2/lib/python3.10/site-packages/torch/include/c10/core/StreamGuard.h +170 -0
  39. videochat2/lib/python3.10/site-packages/torch/include/c10/core/SymIntArrayRef.h +89 -0
  40. videochat2/lib/python3.10/site-packages/torch/include/c10/core/TensorImpl.h +0 -0
  41. videochat2/lib/python3.10/site-packages/torch/include/c10/core/TensorOptions.h +787 -0
  42. videochat2/lib/python3.10/site-packages/torch/include/c10/core/UndefinedTensorImpl.h +49 -0
  43. videochat2/lib/python3.10/site-packages/torch/include/c10/core/alignment.h +21 -0
  44. videochat2/lib/python3.10/site-packages/torch/include/c10/core/impl/COWDeleter.h +66 -0
  45. videochat2/lib/python3.10/site-packages/torch/include/c10/core/impl/DeviceGuardImplInterface.h +365 -0
  46. videochat2/lib/python3.10/site-packages/torch/include/c10/core/impl/FakeGuardImpl.h +102 -0
  47. videochat2/lib/python3.10/site-packages/torch/include/c10/core/impl/InlineDeviceGuard.h +429 -0
  48. videochat2/lib/python3.10/site-packages/torch/include/c10/core/impl/InlineEvent.h +139 -0
  49. videochat2/lib/python3.10/site-packages/torch/include/c10/core/impl/PyInterpreter.h +263 -0
  50. videochat2/lib/python3.10/site-packages/torch/include/c10/core/impl/TorchDispatchModeTLS.h +67 -0
.gitattributes CHANGED
@@ -908,3 +908,8 @@ videochat2/lib/python3.10/site-packages/tensorflow/compiler/tf2xla/ops/_xla_ops.
908
  videochat2/lib/python3.10/site-packages/tensorflow/python/saved_model/pywrap_saved_model.so filter=lfs diff=lfs merge=lfs -text
909
  videochat2/lib/python3.10/site-packages/sympy/physics/continuum_mechanics/__pycache__/beam.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
910
  videochat2/lib/python3.10/site-packages/sympy/polys/matrices/__pycache__/domainmatrix.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
908
  videochat2/lib/python3.10/site-packages/tensorflow/python/saved_model/pywrap_saved_model.so filter=lfs diff=lfs merge=lfs -text
909
  videochat2/lib/python3.10/site-packages/sympy/physics/continuum_mechanics/__pycache__/beam.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
910
  videochat2/lib/python3.10/site-packages/sympy/polys/matrices/__pycache__/domainmatrix.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
911
+ videochat2/lib/python3.10/site-packages/torch/_inductor/__pycache__/scheduler.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
912
+ videollama2/lib/python3.10/site-packages/nvidia/nccl/lib/libnccl.so.2 filter=lfs diff=lfs merge=lfs -text
913
+ videochat2/lib/python3.10/site-packages/torch/_inductor/__pycache__/lowering.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
914
+ videochat2/lib/python3.10/site-packages/torch/_inductor/__pycache__/ir.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
915
+ videochat2/lib/python3.10/site-packages/torch/lib/libc10_cuda.so filter=lfs diff=lfs merge=lfs -text
videochat2/lib/python3.10/site-packages/torch/_inductor/__pycache__/ir.cpython-310.pyc ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:404d657d14cc41db84a64c8e1a9775d9fc1fce5ad361359cf5b90e9894300321
3
+ size 194105
videochat2/lib/python3.10/site-packages/torch/_inductor/__pycache__/lowering.cpython-310.pyc ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5f3d8514f8dd09fd4ea81308f55c08a0f65cbbeae5c9836b711e53bf2a46bd07
3
+ size 154739
videochat2/lib/python3.10/site-packages/torch/_inductor/__pycache__/scheduler.cpython-310.pyc ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4f4214888f17bbb3efa35c8ea6a3c622057330155c06dc63b40130d2caa5bb76
3
+ size 112954
videochat2/lib/python3.10/site-packages/torch/ao/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (470 Bytes). View file
 
videochat2/lib/python3.10/site-packages/torch/ao/ns/fx/__pycache__/graph_matcher.cpython-310.pyc ADDED
Binary file (12 kB). View file
 
videochat2/lib/python3.10/site-packages/torch/ao/ns/fx/__pycache__/mappings.cpython-310.pyc ADDED
Binary file (9.97 kB). View file
 
videochat2/lib/python3.10/site-packages/torch/ao/ns/fx/__pycache__/pattern_utils.cpython-310.pyc ADDED
Binary file (4.05 kB). View file
 
videochat2/lib/python3.10/site-packages/torch/ao/ns/fx/__pycache__/utils.cpython-310.pyc ADDED
Binary file (12.6 kB). View file
 
videochat2/lib/python3.10/site-packages/torch/ao/ns/fx/__pycache__/weight_utils.cpython-310.pyc ADDED
Binary file (6.84 kB). View file
 
videochat2/lib/python3.10/site-packages/torch/include/c10/core/Allocator.h ADDED
@@ -0,0 +1,319 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <cstddef>
4
+ #include <cstdint>
5
+ #include <functional>
6
+ #include <memory>
7
+ #include <utility>
8
+
9
+ #include <c10/core/Device.h>
10
+ #include <c10/core/DeviceType.h>
11
+ #include <c10/macros/Export.h>
12
+ #include <c10/macros/Macros.h>
13
+ #include <c10/util/Exception.h>
14
+ #include <c10/util/ThreadLocalDebugInfo.h>
15
+ #include <c10/util/UniqueVoidPtr.h>
16
+
17
+ namespace c10 {
18
+
19
+ // A DataPtr is a unique pointer (with an attached deleter and some
20
+ // context for the deleter) to some memory, which also records what
21
+ // device is for its data.
22
+ //
23
+ // nullptr DataPtrs can still have a nontrivial device; this allows
24
+ // us to treat zero-size allocations uniformly with non-zero allocations.
25
+ //
26
+ class C10_API DataPtr {
27
+ private:
28
+ c10::detail::UniqueVoidPtr ptr_;
29
+ Device device_;
30
+
31
+ public:
32
+ // Choice of CPU here is arbitrary; if there's an "undefined" device
33
+ // we could use that too
34
+ DataPtr() : ptr_(), device_(DeviceType::CPU) {}
35
+ DataPtr(void* data, Device device) : ptr_(data), device_(device) {}
36
+ DataPtr(void* data, void* ctx, DeleterFnPtr ctx_deleter, Device device)
37
+ : ptr_(data, ctx, ctx_deleter), device_(device) {}
38
+ void* operator->() const {
39
+ return ptr_.get();
40
+ }
41
+ void clear() {
42
+ ptr_.clear();
43
+ }
44
+ void* get() const {
45
+ return ptr_.get();
46
+ }
47
+ void* mutable_get() {
48
+ return ptr_.get();
49
+ }
50
+ void* get_context() const {
51
+ return ptr_.get_context();
52
+ }
53
+ void* release_context() {
54
+ return ptr_.release_context();
55
+ }
56
+ std::unique_ptr<void, DeleterFnPtr>&& move_context() {
57
+ return ptr_.move_context();
58
+ }
59
+ operator bool() const {
60
+ return static_cast<bool>(ptr_);
61
+ }
62
+ template <typename T>
63
+ T* cast_context(DeleterFnPtr expected_deleter) const {
64
+ return ptr_.cast_context<T>(expected_deleter);
65
+ }
66
+ DeleterFnPtr get_deleter() const {
67
+ return ptr_.get_deleter();
68
+ }
69
+ /**
70
+ * Compare the deleter in a DataPtr to expected_deleter.
71
+ * If it matches, replace the deleter with new_deleter
72
+ * and return true; otherwise, does nothing and returns
73
+ * false.
74
+ *
75
+ * In general, it is not safe to unconditionally set the
76
+ * deleter on a DataPtr, because you don't know what
77
+ * the deleter is, and thus will have a hard time properly
78
+ * disposing of the deleter without storing the original
79
+ * deleter (this is difficult to do, because DeleterFnPtr
80
+ * is not a closure, and because the context on DataPtr is
81
+ * only a single word, you generally don't have enough
82
+ * space to store both the original deleter and its context).
83
+ * However, in some cases, you know /exactly/ what the deleter
84
+ * is, and you have a new deleter that manually wraps
85
+ * the old one. In this case, you can safely swap the deleter
86
+ * after asserting that the deleters line up.
87
+ *
88
+ * What are the requirements on new_deleter? It must still
89
+ * properly dispose of the void* pointer passed in as its argument,
90
+ * where void* is whatever the context of the original deleter
91
+ * is. So in general, you expect the new deleter to look something
92
+ * like this:
93
+ *
94
+ * [](void* ptr) {
95
+ * some_new_stuff(ptr);
96
+ * get_orig_allocator()->raw_deleter(ptr);
97
+ * }
98
+ *
99
+ * Note that it won't work to close over the original
100
+ * allocator; you don't have enough space to do that! Also,
101
+ * it's unsafe to assume that the passed in pointer in
102
+ * question is the memory pointer in question; it might not
103
+ * be; be sure to read the source code of the Allocator
104
+ * in question to confirm this.
105
+ */
106
+ C10_NODISCARD bool compare_exchange_deleter(
107
+ DeleterFnPtr expected_deleter,
108
+ DeleterFnPtr new_deleter) {
109
+ return ptr_.compare_exchange_deleter(expected_deleter, new_deleter);
110
+ }
111
+ Device device() const {
112
+ return device_;
113
+ }
114
+ // Unsafely mutates the device on a DataPtr. Under normal use,
115
+ // you should never actually need to call this function.
116
+ // We need this for the implementation of the hack detailed
117
+ // in Note [Masquerading as CUDA]
118
+ void unsafe_set_device(Device device) {
119
+ device_ = device;
120
+ }
121
+ };
122
+
123
+ // NB: Device is NOT tested for here; a CUDA nullptr is as much a nullptr as a
124
+ // CPU nullptr
125
+
126
+ inline bool operator==(const DataPtr& dp, std::nullptr_t) noexcept {
127
+ return !dp;
128
+ }
129
+ inline bool operator==(std::nullptr_t, const DataPtr& dp) noexcept {
130
+ return !dp;
131
+ }
132
+ inline bool operator!=(const DataPtr& dp, std::nullptr_t) noexcept {
133
+ return dp;
134
+ }
135
+ inline bool operator!=(std::nullptr_t, const DataPtr& dp) noexcept {
136
+ return dp;
137
+ }
138
+
139
+ // Note [raw_allocate/raw_deallocate and Thrust]
140
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
141
+ // Thrust's support for custom allocators requires us to write something
142
+ // like this:
143
+ //
144
+ // class ThrustAllocator {
145
+ // char* allocate(size_t);
146
+ // void deallocate(char*, size_t);
147
+ // };
148
+ //
149
+ // This is not good for our unique_ptr based allocator interface, as
150
+ // there is no way to get to the context when we free.
151
+ //
152
+ // However, in some cases the context is exactly the same as
153
+ // the data pointer. In this case, we can support the "raw"
154
+ // allocate and deallocate interface. This is what
155
+ // raw_deleter signifies. By default, it returns a nullptr, which means that
156
+ // the raw interface is not implemented. Be sure to implement it whenever
157
+ // possible, or the raw interface will incorrectly reported as unsupported,
158
+ // when it is actually possible.
159
+
160
+ struct C10_API Allocator {
161
+ virtual ~Allocator() = default;
162
+
163
+ virtual DataPtr allocate(size_t n) = 0;
164
+
165
+ // Clones an allocation that came from this allocator.
166
+ //
167
+ // To perform the copy, this function calls `copy_data`, which
168
+ // must be implemented by derived classes.
169
+ //
170
+ // Note that this explicitly ignores any context that may have been
171
+ // attached to the input data.
172
+ //
173
+ // Requires: input data was allocated by the same allocator.
174
+ DataPtr clone(const void* data, std::size_t n);
175
+
176
+ // Checks if DataPtr has a simple context, not wrapped with any out of the
177
+ // ordinary contexts.
178
+ virtual bool is_simple_data_ptr(const DataPtr& data_ptr) const;
179
+
180
+ // If this returns a non nullptr, it means that allocate()
181
+ // is guaranteed to return a unique_ptr with this deleter attached;
182
+ // it means the rawAllocate and rawDeallocate APIs are safe to use.
183
+ // This function MUST always return the same BoundDeleter.
184
+ virtual DeleterFnPtr raw_deleter() const {
185
+ return nullptr;
186
+ }
187
+ void* raw_allocate(size_t n) {
188
+ auto dptr = allocate(n);
189
+ AT_ASSERT(dptr.get() == dptr.get_context());
190
+ return dptr.release_context();
191
+ }
192
+ void raw_deallocate(void* ptr) {
193
+ auto d = raw_deleter();
194
+ AT_ASSERT(d);
195
+ d(ptr);
196
+ }
197
+
198
+ // Copies data from one allocation to another.
199
+ // Pure virtual, so derived classes must define behavior.
200
+ // Derived class implementation can simply call `default_copy_data`
201
+ // to use `std::memcpy`.
202
+ //
203
+ // Requires: src and dest were allocated by this allocator
204
+ // Requires: src and dest both have length >= count
205
+ virtual void copy_data(void* dest, const void* src, std::size_t count)
206
+ const = 0;
207
+
208
+ protected:
209
+ // Uses `std::memcpy` to copy data.
210
+ // Child classes can use this as `copy_data` when an alternative copy
211
+ // API is not needed.
212
+ void default_copy_data(void* dest, const void* src, std::size_t count) const;
213
+ };
214
+
215
+ // This context is used to generate DataPtr which have arbitrary
216
+ // std::function deleters associated with them. In some user facing
217
+ // functions, we give a (user-friendly) interface for constructing
218
+ // tensors from external data which take an arbitrary std::function
219
+ // deleter. Grep for InefficientStdFunctionContext to find these
220
+ // occurrences.
221
+ //
222
+ // This context is inefficient because we have to do a dynamic
223
+ // allocation InefficientStdFunctionContext, on top of the dynamic
224
+ // allocation which is implied by std::function itself.
225
+ struct C10_API InefficientStdFunctionContext {
226
+ void* ptr_;
227
+ std::function<void(void*)> deleter_;
228
+ InefficientStdFunctionContext(void* ptr, std::function<void(void*)> deleter)
229
+ : ptr_(ptr), deleter_(std::move(deleter)) {}
230
+ ~InefficientStdFunctionContext() {
231
+ if (deleter_) {
232
+ deleter_(ptr_);
233
+ }
234
+ }
235
+ static DataPtr makeDataPtr(
236
+ void* ptr,
237
+ std::function<void(void*)> deleter,
238
+ Device device);
239
+ };
240
+
241
+ /** Set the allocator for DeviceType `t`. The passed in allocator pointer is
242
+ * expected to have static lifetime; this function does NOT take ownership
243
+ * of the raw pointer. (The reason for this is to prevent existing pointers
244
+ * to an allocator of a particular device from being invalidated when
245
+ * SetAllocator is called.)
246
+ *
247
+ * Also note that this is not thread-safe, and we assume this function will
248
+ * only be called during initialization.
249
+ *
250
+ * The 'priority' flag is introduced when we want to overwrite the default
251
+ * allocator, since the allocators are set statically. The default priority
252
+ * is 0, which means the lowest. Only higher or equal priority can overwrite
253
+ * existing ones.
254
+ */
255
+ C10_API void SetAllocator(DeviceType t, Allocator* alloc, uint8_t priority = 0);
256
+ C10_API Allocator* GetAllocator(const DeviceType& t);
257
+
258
+ template <DeviceType t>
259
+ struct AllocatorRegisterer {
260
+ explicit AllocatorRegisterer(Allocator* alloc) {
261
+ SetAllocator(t, alloc);
262
+ }
263
+ };
264
+
265
+ #define REGISTER_ALLOCATOR(t, f) \
266
+ namespace { \
267
+ static c10::AllocatorRegisterer<t> g_allocator_d(f); \
268
+ }
269
+
270
+ // An interface for reporting thread local memory usage
271
+ // per device
272
+ struct C10_API MemoryReportingInfoBase : public c10::DebugInfoBase {
273
+ MemoryReportingInfoBase();
274
+ ~MemoryReportingInfoBase() override = default;
275
+
276
+ /**
277
+ * alloc_size corresponds to the size of the ptr.
278
+ *
279
+ * total_allocated corresponds to total allocated memory.
280
+ *
281
+ * total_reserved corresponds to total size of memory pool, both used and
282
+ * unused, if applicable.
283
+ */
284
+ virtual void reportMemoryUsage(
285
+ void* ptr,
286
+ int64_t alloc_size,
287
+ size_t total_allocated,
288
+ size_t total_reserved,
289
+ Device device) = 0;
290
+
291
+ virtual void reportOutOfMemory(
292
+ int64_t alloc_size,
293
+ size_t total_allocated,
294
+ size_t total_reserved,
295
+ Device device);
296
+
297
+ virtual bool memoryProfilingEnabled() const = 0;
298
+ };
299
+
300
+ C10_API bool memoryProfilingEnabled();
301
+ C10_API void reportMemoryUsageToProfiler(
302
+ void* ptr,
303
+ int64_t alloc_size,
304
+ size_t total_allocated,
305
+ size_t total_reserved,
306
+ Device device);
307
+
308
+ C10_API void reportOutOfMemoryToProfiler(
309
+ int64_t alloc_size,
310
+ size_t total_allocated,
311
+ size_t total_reserved,
312
+ Device device);
313
+
314
+ // used to hold traceback information in allocators
315
+ struct GatheredContext {
316
+ virtual ~GatheredContext() = default;
317
+ };
318
+
319
+ } // namespace c10
videochat2/lib/python3.10/site-packages/torch/include/c10/core/AutogradState.h ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/macros/Export.h>
4
+
5
+ namespace c10 {
6
+
7
+ // Structure used to pack all the thread local boolean
8
+ // flags used by autograd
9
+ struct C10_API AutogradState {
10
+ static AutogradState& get_tls_state();
11
+ static void set_tls_state(AutogradState state);
12
+
13
+ AutogradState(
14
+ bool grad_mode,
15
+ bool inference_mode,
16
+ bool fw_grad_mode,
17
+ bool multithreading_enabled)
18
+ : grad_mode_(grad_mode),
19
+ inference_mode_(inference_mode),
20
+ fw_grad_mode_(fw_grad_mode),
21
+ multithreading_enabled_(multithreading_enabled),
22
+ view_replay_enabled_(false) {}
23
+
24
+ void set_grad_mode(bool enabled) {
25
+ grad_mode_ = enabled;
26
+ }
27
+
28
+ void set_fw_grad_mode(bool enabled) {
29
+ fw_grad_mode_ = enabled;
30
+ }
31
+
32
+ void set_inference_mode(bool enabled) {
33
+ inference_mode_ = enabled;
34
+ }
35
+
36
+ void set_multithreading_enabled(bool multithreading_enabled) {
37
+ multithreading_enabled_ = multithreading_enabled;
38
+ }
39
+
40
+ void set_view_replay_enabled(bool view_replay_enabled) {
41
+ view_replay_enabled_ = view_replay_enabled;
42
+ }
43
+
44
+ bool get_grad_mode() const {
45
+ return grad_mode_;
46
+ }
47
+
48
+ bool get_fw_grad_mode() const {
49
+ return fw_grad_mode_;
50
+ }
51
+
52
+ bool get_inference_mode() const {
53
+ return inference_mode_;
54
+ }
55
+
56
+ bool get_multithreading_enabled() const {
57
+ return multithreading_enabled_;
58
+ }
59
+
60
+ bool get_view_replay_enabled() const {
61
+ return view_replay_enabled_;
62
+ }
63
+
64
+ private:
65
+ bool grad_mode_ : 1;
66
+ bool inference_mode_ : 1;
67
+ bool fw_grad_mode_ : 1;
68
+ bool multithreading_enabled_ : 1;
69
+ bool view_replay_enabled_ : 1;
70
+ };
71
+
72
+ } // namespace c10
videochat2/lib/python3.10/site-packages/torch/include/c10/core/Backend.h ADDED
@@ -0,0 +1,387 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/DeviceType.h>
4
+ #include <c10/core/DispatchKey.h>
5
+ #include <c10/core/DispatchKeySet.h>
6
+ #include <c10/util/Exception.h>
7
+
8
+ #include <stdexcept>
9
+
10
+ namespace c10 {
11
+
12
+ /**
13
+ * This legacy enum class defines the set of backends supported by old school,
14
+ * code generated Type-based ATen. A "backend" in this sense roughly
15
+ * corresponds to the cartesian product of (device type, layout), but restricted
16
+ * only to combinations which we actually have kernels for. Backend does NOT
17
+ * include dtype.
18
+ *
19
+ * The reason we are sunsetting this enum class is because it doesn't allow for
20
+ * open registration; e.g., if you want to add SparseXLA, you'd have to
21
+ * edit this enum; you wouldn't be able to do it out of tree. DispatchKey is
22
+ * the replacement for Backend which supports open registration.
23
+ *
24
+ * NB: The concept of 'Backend' here disagrees with the notion of backend
25
+ * exposed to users in torch.backends. Backend here is something like "CPU"
26
+ * or "SparseCUDA"; backend in torch.backends is something like "MKL" or
27
+ * "CUDNN".
28
+ */
29
+ enum class Backend {
30
+ CPU,
31
+ CUDA,
32
+ HIP,
33
+ VE,
34
+ FPGA,
35
+ IPU,
36
+ XPU,
37
+ SparseCPU,
38
+ SparseCUDA,
39
+ SparseCsrCPU,
40
+ SparseCsrCUDA,
41
+ SparseHIP,
42
+ SparseVE,
43
+ SparseXPU,
44
+ SparsePrivateUse1,
45
+ SparseCsrHIP,
46
+ SparseCsrVE,
47
+ SparseCsrXPU,
48
+ SparseCsrPrivateUse1,
49
+ MAIA,
50
+ XLA,
51
+ Vulkan,
52
+ Metal,
53
+ Meta,
54
+ QuantizedCPU,
55
+ QuantizedCUDA,
56
+ QuantizedXPU,
57
+ QuantizedPrivateUse1,
58
+ Undefined,
59
+ MkldnnCPU,
60
+ MPS,
61
+ HPU,
62
+ Lazy,
63
+ MTIA,
64
+ PrivateUse1,
65
+ NumOptions
66
+ };
67
+
68
+ inline Backend dispatchKeyToBackend(DispatchKey t) {
69
+ if (t == DispatchKey::CPU || t == DispatchKey::AutogradCPU) {
70
+ return Backend::CPU;
71
+ } else if (t == DispatchKey::CUDA || t == DispatchKey::AutogradCUDA) {
72
+ return Backend::CUDA;
73
+ } else if (t == DispatchKey::HIP) {
74
+ return Backend::HIP;
75
+ } else if (t == DispatchKey::VE) {
76
+ return Backend::VE;
77
+ } else if (t == DispatchKey::FPGA) {
78
+ return Backend::FPGA;
79
+ } else if (t == DispatchKey::MAIA) {
80
+ return Backend::MAIA;
81
+ } else if (t == DispatchKey::XLA || t == DispatchKey::AutogradXLA) {
82
+ return Backend::XLA;
83
+ } else if (t == DispatchKey::Lazy || t == DispatchKey::AutogradLazy) {
84
+ return Backend::Lazy;
85
+ } else if (t == DispatchKey::MPS || t == DispatchKey::AutogradMPS) {
86
+ return Backend::MPS;
87
+ } else if (t == DispatchKey::Vulkan) {
88
+ return Backend::Vulkan;
89
+ } else if (t == DispatchKey::Metal) {
90
+ return Backend::Metal;
91
+ } else if (t == DispatchKey::Meta) {
92
+ return Backend::Meta;
93
+ } else if (t == DispatchKey::SparseCPU) {
94
+ return Backend::SparseCPU;
95
+ } else if (t == DispatchKey::SparseCUDA) {
96
+ return Backend::SparseCUDA;
97
+ } else if (t == DispatchKey::SparseHIP) {
98
+ return Backend::SparseHIP;
99
+ } else if (t == DispatchKey::SparseVE) {
100
+ return Backend::SparseVE;
101
+ } else if (t == DispatchKey::SparsePrivateUse1) {
102
+ return Backend::SparsePrivateUse1;
103
+ } else if (t == DispatchKey::SparseCsrCPU) {
104
+ return Backend::SparseCsrCPU;
105
+ } else if (t == DispatchKey::SparseCsrCUDA) {
106
+ return Backend::SparseCsrCUDA;
107
+ } else if (t == DispatchKey::SparseCsrHIP) {
108
+ return Backend::SparseCsrHIP;
109
+ } else if (t == DispatchKey::SparseCsrVE) {
110
+ return Backend::SparseCsrVE;
111
+ } else if (t == DispatchKey::SparseCsrPrivateUse1) {
112
+ return Backend::SparseCsrPrivateUse1;
113
+ } else if (t == DispatchKey::MkldnnCPU) {
114
+ return Backend::MkldnnCPU;
115
+ } else if (t == DispatchKey::QuantizedCPU) {
116
+ return Backend::QuantizedCPU;
117
+ } else if (t == DispatchKey::QuantizedCUDA) {
118
+ return Backend::QuantizedCUDA;
119
+ } else if (t == DispatchKey::IPU || t == DispatchKey::AutogradIPU) {
120
+ return Backend::IPU;
121
+ } else if (t == DispatchKey::XPU || t == DispatchKey::AutogradXPU) {
122
+ return Backend::XPU;
123
+ } else if (t == DispatchKey::SparseXPU) {
124
+ return Backend::SparseXPU;
125
+ } else if (t == DispatchKey::SparseCsrXPU) {
126
+ return Backend::SparseCsrXPU;
127
+ } else if (t == DispatchKey::QuantizedXPU) {
128
+ return Backend::QuantizedXPU;
129
+ } else if (t == DispatchKey::QuantizedPrivateUse1) {
130
+ return Backend::QuantizedPrivateUse1;
131
+ } else if (t == DispatchKey::HPU || t == DispatchKey::AutogradHPU) {
132
+ return Backend::HPU;
133
+ } else if (t == DispatchKey::MTIA || t == DispatchKey::AutogradMTIA) {
134
+ return Backend::MTIA;
135
+ } else if (
136
+ t == DispatchKey::PrivateUse1 || t == DispatchKey::AutogradPrivateUse1) {
137
+ return Backend::PrivateUse1;
138
+ } else if (t == DispatchKey::Undefined) {
139
+ return Backend::Undefined;
140
+ } else {
141
+ TORCH_CHECK(false, "Unrecognized tensor type ID: ", t);
142
+ }
143
+ }
144
+
145
+ inline DispatchKey backendToDispatchKey(Backend b) {
146
+ switch (b) {
147
+ case Backend::CPU:
148
+ return DispatchKey::CPU;
149
+ case Backend::CUDA:
150
+ return DispatchKey::CUDA;
151
+ case Backend::HIP:
152
+ return DispatchKey::HIP;
153
+ case Backend::VE:
154
+ return DispatchKey::VE;
155
+ case Backend::FPGA:
156
+ return DispatchKey::FPGA;
157
+ case Backend::MAIA:
158
+ return DispatchKey::MAIA;
159
+ case Backend::XLA:
160
+ return DispatchKey::XLA;
161
+ case Backend::Lazy:
162
+ return DispatchKey::Lazy;
163
+ case Backend::IPU:
164
+ return DispatchKey::IPU;
165
+ case Backend::XPU:
166
+ return DispatchKey::XPU;
167
+ case Backend::SparseXPU:
168
+ return DispatchKey::SparseXPU;
169
+ case Backend::SparseCsrXPU:
170
+ return DispatchKey::SparseCsrXPU;
171
+ case Backend::SparseCPU:
172
+ return DispatchKey::SparseCPU;
173
+ case Backend::SparseCUDA:
174
+ return DispatchKey::SparseCUDA;
175
+ case Backend::SparseHIP:
176
+ return DispatchKey::SparseHIP;
177
+ case Backend::SparseVE:
178
+ return DispatchKey::SparseVE;
179
+ case Backend::SparsePrivateUse1:
180
+ return DispatchKey::SparsePrivateUse1;
181
+ case Backend::SparseCsrCPU:
182
+ return DispatchKey::SparseCsrCPU;
183
+ case Backend::SparseCsrCUDA:
184
+ return DispatchKey::SparseCsrCUDA;
185
+ case Backend::SparseCsrHIP:
186
+ return DispatchKey::SparseCsrHIP;
187
+ case Backend::SparseCsrVE:
188
+ return DispatchKey::SparseCsrVE;
189
+ case Backend::SparseCsrPrivateUse1:
190
+ return DispatchKey::SparseCsrPrivateUse1;
191
+ case Backend::MkldnnCPU:
192
+ return DispatchKey::MkldnnCPU;
193
+ case Backend::Vulkan:
194
+ return DispatchKey::Vulkan;
195
+ case Backend::Metal:
196
+ return DispatchKey::Metal;
197
+ case Backend::Meta:
198
+ return DispatchKey::Meta;
199
+ case Backend::QuantizedCPU:
200
+ return DispatchKey::QuantizedCPU;
201
+ case Backend::QuantizedCUDA:
202
+ return DispatchKey::QuantizedCUDA;
203
+ case Backend::QuantizedPrivateUse1:
204
+ return DispatchKey::QuantizedPrivateUse1;
205
+ case Backend::Undefined:
206
+ return DispatchKey::Undefined;
207
+ case Backend::MPS:
208
+ return DispatchKey::MPS;
209
+ case Backend::HPU:
210
+ return DispatchKey::HPU;
211
+ case Backend::MTIA:
212
+ return DispatchKey::MTIA;
213
+ case Backend::PrivateUse1:
214
+ return DispatchKey::PrivateUse1;
215
+ default:
216
+ throw std::runtime_error("Unknown backend");
217
+ }
218
+ }
219
+
220
+ inline DeviceType backendToDeviceType(Backend b) {
221
+ switch (b) {
222
+ case Backend::CPU:
223
+ case Backend::MkldnnCPU:
224
+ case Backend::SparseCPU:
225
+ case Backend::SparseCsrCPU:
226
+ case Backend::QuantizedCPU:
227
+ return DeviceType::CPU;
228
+ case Backend::CUDA:
229
+ case Backend::SparseCUDA:
230
+ case Backend::QuantizedCUDA:
231
+ case Backend::SparseCsrCUDA:
232
+ return DeviceType::CUDA;
233
+ case Backend::HIP:
234
+ return DeviceType::HIP;
235
+ case Backend::VE:
236
+ return DeviceType::VE;
237
+ case Backend::FPGA:
238
+ return DeviceType::FPGA;
239
+ case Backend::MAIA:
240
+ return DeviceType::MAIA;
241
+ case Backend::XLA:
242
+ return DeviceType::XLA;
243
+ case Backend::Lazy:
244
+ return DeviceType::Lazy;
245
+ case Backend::SparseHIP:
246
+ return DeviceType::HIP;
247
+ case Backend::SparseVE:
248
+ return DeviceType::VE;
249
+ case Backend::SparseCsrHIP:
250
+ return DeviceType::HIP;
251
+ case Backend::SparseCsrVE:
252
+ return DeviceType::VE;
253
+ case Backend::IPU:
254
+ return DeviceType::IPU;
255
+ case Backend::XPU:
256
+ case Backend::SparseXPU:
257
+ case Backend::SparseCsrXPU:
258
+ case Backend::QuantizedXPU:
259
+ return DeviceType::XPU;
260
+ case Backend::Vulkan:
261
+ return DeviceType::Vulkan;
262
+ case Backend::Metal:
263
+ return DeviceType::Metal;
264
+ case Backend::Meta:
265
+ return DeviceType::Meta;
266
+ case Backend::MPS:
267
+ return DeviceType::MPS;
268
+ case Backend::HPU:
269
+ return DeviceType::HPU;
270
+ case Backend::MTIA:
271
+ return DeviceType::MTIA;
272
+ case Backend::PrivateUse1:
273
+ case Backend::SparsePrivateUse1:
274
+ case Backend::SparseCsrPrivateUse1:
275
+ case Backend::QuantizedPrivateUse1:
276
+ return DeviceType::PrivateUse1;
277
+ case Backend::Undefined:
278
+ TORCH_CHECK(false, "Undefined backend is not a valid device type");
279
+ default:
280
+ TORCH_CHECK(false, "Unknown backend");
281
+ }
282
+ }
283
+
284
+ inline const char* toString(Backend b) {
285
+ switch (b) {
286
+ case Backend::CPU:
287
+ return "CPU";
288
+ case Backend::CUDA:
289
+ return "CUDA";
290
+ case Backend::HIP:
291
+ return "HIP";
292
+ case Backend::VE:
293
+ return "VE";
294
+ case Backend::FPGA:
295
+ return "FPGA";
296
+ case Backend::XPU:
297
+ return "XPU";
298
+ case Backend::IPU:
299
+ return "IPU";
300
+ case Backend::MAIA:
301
+ return "MAIA";
302
+ case Backend::XLA:
303
+ return "XLA";
304
+ case Backend::Lazy:
305
+ return "Lazy";
306
+ case Backend::MPS:
307
+ return "MPS";
308
+ case Backend::SparseCPU:
309
+ return "SparseCPU";
310
+ case Backend::SparseCUDA:
311
+ return "SparseCUDA";
312
+ case Backend::SparseHIP:
313
+ return "SparseHIP";
314
+ case Backend::SparseVE:
315
+ return "SparseVE";
316
+ case Backend::SparseXPU:
317
+ return "SparseXPU";
318
+ case Backend::SparsePrivateUse1:
319
+ return "SparsePrivateUse1";
320
+ case Backend::SparseCsrCPU:
321
+ return "SparseCsrCPU";
322
+ case Backend::SparseCsrCUDA:
323
+ return "SparseCsrCUDA";
324
+ case Backend::SparseCsrHIP:
325
+ return "SparseCsrHIP";
326
+ case Backend::SparseCsrVE:
327
+ return "SparseCsrVE";
328
+ case Backend::SparseCsrXPU:
329
+ return "SparseCsrXPU";
330
+ case Backend::SparseCsrPrivateUse1:
331
+ return "SparseCsrPrivateUse1";
332
+ case Backend::MkldnnCPU:
333
+ return "MkldnnCPU";
334
+ case Backend::Vulkan:
335
+ return "Vulkan";
336
+ case Backend::Metal:
337
+ return "Metal";
338
+ case Backend::Meta:
339
+ return "Meta";
340
+ case Backend::QuantizedCPU:
341
+ return "QuantizedCPU";
342
+ case Backend::QuantizedCUDA:
343
+ return "QuantizedCUDA";
344
+ case Backend::QuantizedXPU:
345
+ return "QuantizedXPU";
346
+ case Backend::QuantizedPrivateUse1:
347
+ return "QuantizedPrivateUse1";
348
+ case Backend::HPU:
349
+ return "HPU";
350
+ case Backend::MTIA:
351
+ return "MTIA";
352
+ case Backend::PrivateUse1:
353
+ return "PrivateUseOne";
354
+ default:
355
+ return "UNKNOWN_BACKEND";
356
+ }
357
+ }
358
+
359
+ inline bool isSparse(Backend b) {
360
+ switch (b) {
361
+ case Backend::SparseXPU:
362
+ case Backend::SparseCPU:
363
+ case Backend::SparseCUDA:
364
+ case Backend::SparseHIP:
365
+ case Backend::SparseVE:
366
+ case Backend::SparsePrivateUse1:
367
+ return true;
368
+ default:
369
+ return false;
370
+ }
371
+ }
372
+
373
+ inline bool isSparseCsr(Backend b) {
374
+ switch (b) {
375
+ case Backend::SparseCsrXPU:
376
+ case Backend::SparseCsrCPU:
377
+ case Backend::SparseCsrCUDA:
378
+ case Backend::SparseCsrHIP:
379
+ case Backend::SparseCsrVE:
380
+ case Backend::SparseCsrPrivateUse1:
381
+ return true;
382
+ default:
383
+ return false;
384
+ }
385
+ }
386
+
387
+ } // namespace c10
videochat2/lib/python3.10/site-packages/torch/include/c10/core/CachingDeviceAllocator.h ADDED
@@ -0,0 +1,131 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/Allocator.h>
4
+ #include <c10/util/irange.h>
5
+
6
+ #include <array>
7
+
8
+ namespace c10::CachingDeviceAllocator {
9
+
10
+ struct Stat {
11
+ void increase(size_t amount) {
12
+ current += static_cast<int64_t>(amount);
13
+ peak = std::max(current, peak);
14
+ allocated += static_cast<int64_t>(amount);
15
+ }
16
+
17
+ void decrease(size_t amount) {
18
+ current -= static_cast<int64_t>(amount);
19
+ TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
20
+ current >= 0,
21
+ "Negative tracked stat in device allocator (likely logic error).");
22
+ freed += static_cast<int64_t>(amount);
23
+ }
24
+
25
+ void reset_accumulated() {
26
+ allocated = 0;
27
+ freed = 0;
28
+ }
29
+
30
+ void reset_peak() {
31
+ peak = current;
32
+ }
33
+
34
+ int64_t current = 0;
35
+ int64_t peak = 0;
36
+ int64_t allocated = 0;
37
+ int64_t freed = 0;
38
+ };
39
+
40
+ enum struct StatType : uint64_t {
41
+ AGGREGATE = 0,
42
+ SMALL_POOL = 1,
43
+ LARGE_POOL = 2,
44
+ NUM_TYPES = 3 // remember to update this whenever a new stat type is added
45
+ };
46
+
47
+ using StatArray = std::array<Stat, static_cast<size_t>(StatType::NUM_TYPES)>;
48
+ using StatTypes = std::array<bool, static_cast<size_t>(StatType::NUM_TYPES)>;
49
+
50
+ template <typename Func>
51
+ void for_each_selected_stat_type(const StatTypes& stat_types, Func f) {
52
+ for (const auto stat_type : c10::irange(stat_types.size())) {
53
+ if (stat_types[stat_type]) {
54
+ f(stat_type);
55
+ }
56
+ }
57
+ }
58
+
59
+ // Struct containing memory allocator summary statistics for a device.
60
+ struct DeviceStats {
61
+ // COUNT: allocations requested by client code
62
+ StatArray allocation;
63
+ // COUNT: number of allocated segments from device memory allocation.
64
+ StatArray segment;
65
+ // COUNT: number of active memory blocks (allocated or used by stream)
66
+ StatArray active;
67
+ // COUNT: number of inactive, split memory blocks (unallocated but can't be
68
+ // released via device memory deallocation)
69
+ StatArray inactive_split;
70
+
71
+ // SUM: bytes allocated by this memory alocator
72
+ StatArray allocated_bytes;
73
+ // SUM: bytes reserved by this memory allocator (both free and used)
74
+ StatArray reserved_bytes;
75
+ // SUM: bytes within active memory blocks
76
+ StatArray active_bytes;
77
+ // SUM: bytes within inactive, split memory blocks
78
+ StatArray inactive_split_bytes;
79
+ // SUM: bytes requested by client code
80
+ StatArray requested_bytes;
81
+
82
+ // COUNT: total number of failed calls to device malloc necessitating cache
83
+ // flushes.
84
+ int64_t num_alloc_retries = 0;
85
+
86
+ // COUNT: total number of OOMs (i.e. failed calls to device memory allocation
87
+ // after cache flush)
88
+ int64_t num_ooms = 0;
89
+
90
+ // COUNT: total number of oversize blocks allocated from pool
91
+ Stat oversize_allocations;
92
+
93
+ // COUNT: total number of oversize blocks requiring malloc
94
+ Stat oversize_segments;
95
+
96
+ // COUNT: total number of synchronize_and_free_events() calls
97
+ int64_t num_sync_all_streams = 0;
98
+
99
+ // COUNT: total number of device memory allocation calls. This includes both
100
+ // mapped and malloced memory.
101
+ int64_t num_device_alloc = 0;
102
+
103
+ // COUNT: total number of device memory deallocation calls. This includes both
104
+ // un-mapped and free memory.
105
+ int64_t num_device_free = 0;
106
+
107
+ // SIZE: maximum block size that is allowed to be split.
108
+ int64_t max_split_size = 0;
109
+ };
110
+
111
+ // Size pretty-printer
112
+ inline std::string format_size(uint64_t size) {
113
+ std::ostringstream os;
114
+ os.precision(2);
115
+ os << std::fixed;
116
+ if (size <= 1024) {
117
+ os << size << " bytes";
118
+ } else if (size <= 1048576) {
119
+ os << (static_cast<double>(size) / 1024.0);
120
+ os << " KiB";
121
+ } else if (size <= 1073741824ULL) {
122
+ os << static_cast<double>(size) / 1048576.0;
123
+ os << " MiB";
124
+ } else {
125
+ os << static_cast<double>(size) / 1073741824.0;
126
+ os << " GiB";
127
+ }
128
+ return os.str();
129
+ }
130
+
131
+ } // namespace c10::CachingDeviceAllocator
videochat2/lib/python3.10/site-packages/torch/include/c10/core/CompileTimeFunctionPointer.h ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/util/TypeTraits.h>
4
+ #include <type_traits>
5
+
6
+ namespace c10 {
7
+
8
+ /**
9
+ * Represent a function pointer as a C++ type.
10
+ * This allows using the function pointer as a type
11
+ * in a template and calling it from inside the template
12
+ * allows the compiler to inline the call because it
13
+ * knows the function pointer at compile time.
14
+ *
15
+ * Example 1:
16
+ * int add(int a, int b) {return a + b;}
17
+ * using Add = TORCH_FN_TYPE(add);
18
+ * template<class Func> struct Executor {
19
+ * int execute(int a, int b) {
20
+ * return Func::func_ptr()(a, b);
21
+ * }
22
+ * };
23
+ * Executor<Add> executor;
24
+ * EXPECT_EQ(3, executor.execute(1, 2));
25
+ *
26
+ * Example 2:
27
+ * int add(int a, int b) {return a + b;}
28
+ * template<class Func> int execute(Func, int a, int b) {
29
+ * return Func::func_ptr()(a, b);
30
+ * }
31
+ * EXPECT_EQ(3, execute(TORCH_FN(add), 1, 2));
32
+ */
33
+ template <class FuncType_, FuncType_* func_ptr_>
34
+ struct CompileTimeFunctionPointer final {
35
+ static_assert(
36
+ guts::is_function_type<FuncType_>::value,
37
+ "TORCH_FN can only wrap function types.");
38
+ using FuncType = FuncType_;
39
+
40
+ static constexpr FuncType* func_ptr() {
41
+ return func_ptr_;
42
+ }
43
+ };
44
+
45
+ template <class T>
46
+ struct is_compile_time_function_pointer : std::false_type {};
47
+ template <class FuncType, FuncType* func_ptr>
48
+ struct is_compile_time_function_pointer<
49
+ CompileTimeFunctionPointer<FuncType, func_ptr>> : std::true_type {};
50
+
51
+ } // namespace c10
52
+
53
+ #define TORCH_FN_TYPE(func) \
54
+ ::c10::CompileTimeFunctionPointer< \
55
+ std::remove_pointer_t<std::remove_reference_t<decltype(func)>>, \
56
+ func>
57
+ #define TORCH_FN(func) TORCH_FN_TYPE(func)()
videochat2/lib/python3.10/site-packages/torch/include/c10/core/ConstantSymNodeImpl.h ADDED
@@ -0,0 +1,110 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/SymNodeImpl.h>
4
+ #include <c10/macros/Export.h>
5
+ #include <c10/util/Exception.h>
6
+ #include <cstdint>
7
+ #include <optional>
8
+ #include <string>
9
+ #include <variant>
10
+
11
+ namespace c10 {
12
+
13
+ // Unlike other SymNodeImpl, this cannot be "dispatched" conventionally,
14
+ // as it typically needs to defer to another SymNodeImpl
15
+ //
16
+ // Can either represent a bool, int (don't support float yet) this is useful
17
+ // for representing otherwise unrepresentable large negative integer constant.
18
+ template <typename T>
19
+ class C10_API ConstantSymNodeImpl : public SymNodeImpl {
20
+ static_assert(
21
+ ::std::is_same_v<T, int64_t> || ::std::is_same_v<T, bool>,
22
+ "ConstantSymNodeImpl can only accept int64_t or bool types");
23
+
24
+ public:
25
+ ConstantSymNodeImpl(T val) : value_(val) {}
26
+
27
+ bool is_int() override {
28
+ return is_int_();
29
+ }
30
+ bool is_bool() override {
31
+ return is_bool_();
32
+ }
33
+ bool is_float() override {
34
+ return false;
35
+ }
36
+ int64_t guard_int(
37
+ const char* file [[maybe_unused]],
38
+ int64_t line [[maybe_unused]]) override {
39
+ TORCH_CHECK(is_int(), "not an int");
40
+ return int_();
41
+ }
42
+ bool guard_bool(
43
+ const char* file [[maybe_unused]],
44
+ int64_t line [[maybe_unused]]) override {
45
+ TORCH_CHECK(is_bool(), "not a bool");
46
+ return bool_();
47
+ }
48
+ double guard_float(
49
+ const char* file [[maybe_unused]],
50
+ int64_t line [[maybe_unused]]) override {
51
+ TORCH_CHECK(false, "not a float");
52
+ }
53
+ int64_t int_() override {
54
+ TORCH_CHECK(is_int(), "not an int");
55
+ return ::std::get<int64_t>(value_);
56
+ }
57
+ bool bool_() override {
58
+ TORCH_CHECK(is_bool(), "not a bool");
59
+ return ::std::get<bool>(value_);
60
+ }
61
+ bool has_hint() override {
62
+ return true;
63
+ }
64
+ c10::SymNode eq(const c10::SymNode& other) override;
65
+ c10::SymNode ne(const c10::SymNode& other) override;
66
+ c10::SymNode ge(const c10::SymNode& other) override;
67
+ c10::SymNode le(const c10::SymNode& other) override;
68
+ c10::SymNode lt(const c10::SymNode& other) override;
69
+ c10::SymNode gt(const c10::SymNode& other) override;
70
+ c10::SymNode mul(const c10::SymNode& other) override;
71
+ ::std::string str() override {
72
+ if constexpr (is_int_()) {
73
+ return ::std::to_string(::std::get<int64_t>(value_));
74
+ } else {
75
+ return ::std::get<bool>(value_) ? "true" : "false";
76
+ }
77
+ }
78
+ std::optional<int64_t> constant_int() override {
79
+ if constexpr (is_int_()) {
80
+ return ::std::get<int64_t>(value_);
81
+ } else {
82
+ return std::nullopt;
83
+ }
84
+ }
85
+ std::optional<bool> constant_bool() override {
86
+ if constexpr (is_bool_()) {
87
+ return ::std::get<bool>(value_);
88
+ } else {
89
+ return std::nullopt;
90
+ }
91
+ }
92
+ bool is_constant() override {
93
+ return true;
94
+ }
95
+ bool is_symbolic() override {
96
+ return false;
97
+ }
98
+
99
+ private:
100
+ ::std::variant<int64_t, bool> value_;
101
+
102
+ static constexpr bool is_int_() {
103
+ return ::std::is_same_v<T, int64_t>;
104
+ }
105
+ static constexpr bool is_bool_() {
106
+ return ::std::is_same_v<T, bool>;
107
+ }
108
+ };
109
+
110
+ } // namespace c10
videochat2/lib/python3.10/site-packages/torch/include/c10/core/DefaultDtype.h ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/ScalarType.h>
4
+ #include <c10/macros/Export.h>
5
+
6
+ namespace caffe2 {
7
+ class TypeMeta;
8
+ } // namespace caffe2
9
+
10
+ namespace c10 {
11
+ C10_API void set_default_dtype(caffe2::TypeMeta dtype);
12
+ C10_API const caffe2::TypeMeta get_default_dtype();
13
+ C10_API ScalarType get_default_dtype_as_scalartype();
14
+ C10_API const caffe2::TypeMeta get_default_complex_dtype();
15
+ } // namespace c10
videochat2/lib/python3.10/site-packages/torch/include/c10/core/DefaultTensorOptions.h ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/Device.h>
4
+ #include <c10/core/DeviceType.h>
5
+ #include <c10/core/Layout.h>
6
+ #include <c10/core/ScalarType.h>
7
+ #include <c10/util/typeid.h>
8
+
9
+ namespace c10 {
10
+
11
+ struct TensorOptions;
12
+
13
+ /// Like TensorOptions, but all fields are guaranteed to be filled.
14
+ struct DefaultTensorOptions {
15
+ DefaultTensorOptions() = default;
16
+
17
+ caffe2::TypeMeta dtype() const noexcept {
18
+ return dtype_;
19
+ }
20
+ Device device() const noexcept {
21
+ return device_;
22
+ }
23
+ Layout layout() const noexcept {
24
+ return layout_;
25
+ }
26
+ bool requires_grad() const noexcept {
27
+ return requires_grad_;
28
+ }
29
+
30
+ // Defined in TensorOptions.h
31
+ inline DefaultTensorOptions& merge(const TensorOptions& options);
32
+
33
+ private:
34
+ caffe2::TypeMeta dtype_ = caffe2::TypeMeta::Make<float>(); // 64-bit
35
+ Device device_ = at::kCPU; // 32-bit
36
+ Layout layout_ = at::kStrided; // 8-bit
37
+ bool requires_grad_ = false; // 8-bit
38
+ };
39
+
40
+ inline const DefaultTensorOptions& getDefaultTensorOptions() {
41
+ static const auto options = DefaultTensorOptions();
42
+ return options;
43
+ }
44
+
45
+ } // namespace c10
videochat2/lib/python3.10/site-packages/torch/include/c10/core/Device.h ADDED
@@ -0,0 +1,216 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/DeviceType.h>
4
+ #include <c10/macros/Export.h>
5
+ #include <c10/util/Exception.h>
6
+
7
+ #include <cstddef>
8
+ #include <cstdint>
9
+ #include <functional>
10
+ #include <iosfwd>
11
+ #include <string>
12
+
13
+ namespace c10 {
14
+
15
+ /// An index representing a specific device; e.g., the 1 in GPU 1.
16
+ /// A DeviceIndex is not independently meaningful without knowing
17
+ /// the DeviceType it is associated; try to use Device rather than
18
+ /// DeviceIndex directly.
19
+ using DeviceIndex = int8_t;
20
+
21
+ /// Represents a compute device on which a tensor is located. A device is
22
+ /// uniquely identified by a type, which specifies the type of machine it is
23
+ /// (e.g. CPU or CUDA GPU), and a device index or ordinal, which identifies the
24
+ /// specific compute device when there is more than one of a certain type. The
25
+ /// device index is optional, and in its defaulted state represents (abstractly)
26
+ /// "the current device". Further, there are two constraints on the value of the
27
+ /// device index, if one is explicitly stored:
28
+ /// 1. A negative index represents the current device, a non-negative index
29
+ /// represents a specific, concrete device,
30
+ /// 2. When the device type is CPU, the device index must be zero.
31
+ struct C10_API Device final {
32
+ using Type = DeviceType;
33
+
34
+ /// Constructs a new `Device` from a `DeviceType` and an optional device
35
+ /// index.
36
+ /* implicit */ Device(DeviceType type, DeviceIndex index = -1)
37
+ : type_(type), index_(index) {
38
+ validate();
39
+ }
40
+
41
+ /// Constructs a `Device` from a string description, for convenience.
42
+ /// The string supplied must follow the following schema:
43
+ /// `(cpu|cuda)[:<device-index>]`
44
+ /// where `cpu` or `cuda` specifies the device type, and
45
+ /// `:<device-index>` optionally specifies a device index.
46
+ /* implicit */ Device(const std::string& device_string);
47
+
48
+ /// Returns true if the type and index of this `Device` matches that of
49
+ /// `other`.
50
+ bool operator==(const Device& other) const noexcept {
51
+ return this->type_ == other.type_ && this->index_ == other.index_;
52
+ }
53
+
54
+ /// Returns true if the type or index of this `Device` differs from that of
55
+ /// `other`.
56
+ bool operator!=(const Device& other) const noexcept {
57
+ return !(*this == other);
58
+ }
59
+
60
+ /// Sets the device index.
61
+ void set_index(DeviceIndex index) {
62
+ index_ = index;
63
+ }
64
+
65
+ /// Returns the type of device this is.
66
+ DeviceType type() const noexcept {
67
+ return type_;
68
+ }
69
+
70
+ /// Returns the optional index.
71
+ DeviceIndex index() const noexcept {
72
+ return index_;
73
+ }
74
+
75
+ /// Returns true if the device has a non-default index.
76
+ bool has_index() const noexcept {
77
+ return index_ != -1;
78
+ }
79
+
80
+ /// Return true if the device is of CUDA type.
81
+ bool is_cuda() const noexcept {
82
+ return type_ == DeviceType::CUDA;
83
+ }
84
+
85
+ /// Return true if the device is of PrivateUse1 type.
86
+ bool is_privateuseone() const noexcept {
87
+ return type_ == DeviceType::PrivateUse1;
88
+ }
89
+
90
+ /// Return true if the device is of MPS type.
91
+ bool is_mps() const noexcept {
92
+ return type_ == DeviceType::MPS;
93
+ }
94
+
95
+ /// Return true if the device is of HIP type.
96
+ bool is_hip() const noexcept {
97
+ return type_ == DeviceType::HIP;
98
+ }
99
+
100
+ /// Return true if the device is of VE type.
101
+ bool is_ve() const noexcept {
102
+ return type_ == DeviceType::VE;
103
+ }
104
+
105
+ /// Return true if the device is of XPU type.
106
+ bool is_xpu() const noexcept {
107
+ return type_ == DeviceType::XPU;
108
+ }
109
+
110
+ /// Return true if the device is of IPU type.
111
+ bool is_ipu() const noexcept {
112
+ return type_ == DeviceType::IPU;
113
+ }
114
+
115
+ /// Return true if the device is of XLA type.
116
+ bool is_xla() const noexcept {
117
+ return type_ == DeviceType::XLA;
118
+ }
119
+
120
+ /// Return true if the device is of MTIA type.
121
+ bool is_mtia() const noexcept {
122
+ return type_ == DeviceType::MTIA;
123
+ }
124
+
125
+ /// Return true if the device is of HPU type.
126
+ bool is_hpu() const noexcept {
127
+ return type_ == DeviceType::HPU;
128
+ }
129
+
130
+ /// Return true if the device is of Lazy type.
131
+ bool is_lazy() const noexcept {
132
+ return type_ == DeviceType::Lazy;
133
+ }
134
+
135
+ /// Return true if the device is of Vulkan type.
136
+ bool is_vulkan() const noexcept {
137
+ return type_ == DeviceType::Vulkan;
138
+ }
139
+
140
+ /// Return true if the device is of Metal type.
141
+ bool is_metal() const noexcept {
142
+ return type_ == DeviceType::Metal;
143
+ }
144
+
145
+ /// Return true if the device is of MAIA type.
146
+ bool is_maia() const noexcept {
147
+ return type_ == DeviceType::MAIA;
148
+ }
149
+
150
+ /// Return true if the device is of META type.
151
+ bool is_meta() const noexcept {
152
+ return type_ == DeviceType::Meta;
153
+ }
154
+
155
+ /// Return true if the device is of CPU type.
156
+ bool is_cpu() const noexcept {
157
+ return type_ == DeviceType::CPU;
158
+ }
159
+
160
+ /// Return true if the device supports arbitrary strides.
161
+ bool supports_as_strided() const noexcept {
162
+ return type_ != DeviceType::IPU && type_ != DeviceType::XLA &&
163
+ type_ != DeviceType::Lazy && type_ != DeviceType::MTIA;
164
+ }
165
+
166
+ /// Same string as returned from operator<<.
167
+ std::string str() const;
168
+
169
+ private:
170
+ DeviceType type_;
171
+ DeviceIndex index_ = -1;
172
+ void validate() {
173
+ // Removing these checks in release builds noticeably improves
174
+ // performance in micro-benchmarks.
175
+ // This is safe to do, because backends that use the DeviceIndex
176
+ // have a later check when we actually try to switch to that device.
177
+ TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
178
+ index_ >= -1,
179
+ "Device index must be -1 or non-negative, got ",
180
+ static_cast<int>(index_));
181
+ TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
182
+ !is_cpu() || index_ <= 0,
183
+ "CPU device index must be -1 or zero, got ",
184
+ static_cast<int>(index_));
185
+ }
186
+ };
187
+
188
+ C10_API std::ostream& operator<<(std::ostream& stream, const Device& device);
189
+
190
+ } // namespace c10
191
+
192
+ namespace std {
193
+ template <>
194
+ struct hash<c10::Device> {
195
+ size_t operator()(c10::Device d) const noexcept {
196
+ // Are you here because this static assert failed? Make sure you ensure
197
+ // that the bitmasking code below is updated accordingly!
198
+ static_assert(sizeof(c10::DeviceType) == 1, "DeviceType is not 8-bit");
199
+ static_assert(sizeof(c10::DeviceIndex) == 1, "DeviceIndex is not 8-bit");
200
+ // Note [Hazard when concatenating signed integers]
201
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
202
+ // We must first convert to a same-sized unsigned type, before promoting to
203
+ // the result type, to prevent sign extension when any of the values is -1.
204
+ // If sign extension occurs, you'll clobber all of the values in the MSB
205
+ // half of the resulting integer.
206
+ //
207
+ // Technically, by C/C++ integer promotion rules, we only need one of the
208
+ // uint32_t casts to the result type, but we put in both for explicitness's
209
+ // sake.
210
+ uint32_t bits = static_cast<uint32_t>(static_cast<uint8_t>(d.type()))
211
+ << 16 |
212
+ static_cast<uint32_t>(static_cast<uint8_t>(d.index()));
213
+ return std::hash<uint32_t>{}(bits);
214
+ }
215
+ };
216
+ } // namespace std
videochat2/lib/python3.10/site-packages/torch/include/c10/core/DeviceArray.h ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include <c10/core/Allocator.h>
2
+ #include <c10/util/Exception.h>
3
+ #include <cstddef>
4
+ #include <cstdint>
5
+ #include <type_traits>
6
+
7
+ namespace c10 {
8
+
9
+ template <typename T>
10
+ class DeviceArray {
11
+ public:
12
+ DeviceArray(c10::Allocator& allocator, size_t size)
13
+ : data_ptr_(allocator.allocate(size * sizeof(T))) {
14
+ static_assert(std::is_trivial<T>::value, "T must be a trivial type");
15
+ TORCH_INTERNAL_ASSERT(
16
+ 0 == (reinterpret_cast<intptr_t>(data_ptr_.get()) % alignof(T)),
17
+ "c10::DeviceArray: Allocated memory is not aligned for this data type");
18
+ }
19
+
20
+ T* get() {
21
+ return static_cast<T*>(data_ptr_.get());
22
+ }
23
+
24
+ private:
25
+ c10::DataPtr data_ptr_;
26
+ };
27
+
28
+ } // namespace c10
videochat2/lib/python3.10/site-packages/torch/include/c10/core/DeviceGuard.h ADDED
@@ -0,0 +1,199 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/Device.h>
4
+ #include <c10/core/impl/DeviceGuardImplInterface.h>
5
+ #include <c10/core/impl/InlineDeviceGuard.h>
6
+ #include <c10/core/impl/VirtualGuardImpl.h>
7
+ #include <c10/util/Optional.h>
8
+
9
+ namespace c10 {
10
+
11
+ /// RAII guard that sets a certain default device in its constructor, and
12
+ /// changes it back to the device that was originally active upon destruction.
13
+ ///
14
+ /// The device is always reset to the one that was active at the time of
15
+ /// construction of the guard. Even if you `set_device` after construction, the
16
+ /// destructor will still reset the device to the one that was active at
17
+ /// construction time.
18
+ ///
19
+ /// This device guard does NOT have an uninitialized state; it is guaranteed
20
+ /// to reset a device on exit. If you are in a situation where you *might*
21
+ /// want to setup a guard (i.e., are looking for the moral equivalent
22
+ /// of std::optional<DeviceGuard>), see OptionalDeviceGuard.
23
+ class DeviceGuard {
24
+ public:
25
+ /// No default constructor; see Note [Omitted default constructor from RAII]
26
+ explicit DeviceGuard() = delete;
27
+
28
+ /// Set the current device to the passed Device.
29
+ explicit DeviceGuard(Device device) : guard_(device) {}
30
+
31
+ /// This constructor is for testing only.
32
+ explicit DeviceGuard(
33
+ Device device,
34
+ const impl::DeviceGuardImplInterface* impl)
35
+ : guard_(device, impl) {}
36
+
37
+ /// Copy is disallowed
38
+ DeviceGuard(const DeviceGuard&) = delete;
39
+ DeviceGuard& operator=(const DeviceGuard&) = delete;
40
+
41
+ /// Move is disallowed, as DeviceGuard does not have an uninitialized state,
42
+ /// which is required for moves on types with nontrivial destructors.
43
+ DeviceGuard(DeviceGuard&& other) = delete;
44
+ DeviceGuard& operator=(DeviceGuard&& other) = delete;
45
+
46
+ /// Sets the device to the given one. The specified device must be consistent
47
+ /// with the device type originally specified during guard construction.
48
+ ///
49
+ /// TODO: The consistency check here is inconsistent with StreamGuard's
50
+ /// behavior with set_stream, where a stream on a different device than
51
+ /// the original one isn't an error; we just reset the stream and then
52
+ /// switch devices.
53
+ void reset_device(at::Device device) {
54
+ guard_.reset_device(device);
55
+ }
56
+
57
+ /// This method is for testing only.
58
+ void reset_device(
59
+ at::Device device,
60
+ const impl::DeviceGuardImplInterface* impl) {
61
+ guard_.reset_device(device, impl);
62
+ }
63
+
64
+ /// Sets the device index to the given one. The device type is inferred
65
+ /// from the original device type the guard was constructed with.
66
+ void set_index(DeviceIndex index) {
67
+ guard_.set_index(index);
68
+ }
69
+
70
+ /// Returns the device that was set at the time the guard was constructed.
71
+ Device original_device() const {
72
+ return guard_.original_device();
73
+ }
74
+
75
+ /// Returns the most recent device that was set using this device guard,
76
+ /// either from construction, or via set_device.
77
+ Device current_device() const {
78
+ return guard_.current_device();
79
+ }
80
+
81
+ private:
82
+ impl::InlineDeviceGuard<impl::VirtualGuardImpl> guard_;
83
+ };
84
+
85
+ /**
86
+ * A OptionalDeviceGuard is an RAII class that sets a device to some value on
87
+ * initialization, and resets the device to its original value on destruction.
88
+ * Morally, a OptionalDeviceGuard is equivalent to std::optional<DeviceGuard>,
89
+ * but with extra constructors and methods as appropriate.
90
+ *
91
+ * Besides its obvious use (optionally applying a DeviceGuard),
92
+ * OptionalDeviceGuard is often also used for the following idiom:
93
+ *
94
+ * OptionalDeviceGuard g;
95
+ * for (const auto& t : tensors) {
96
+ * g.set_device(t.device());
97
+ * do_something_with(t);
98
+ * }
99
+ *
100
+ * This usage is marginally more efficient than constructing a DeviceGuard every
101
+ * iteration of the for loop, as it avoids an unnecessary device reset.
102
+ *
103
+ * Unlike DeviceGuard, a OptionalDeviceGuard may be uninitialized. This occurs
104
+ * when you use the nullary constructor, or pass a nullopt to the constructor.
105
+ * Uninitialized OptionalDeviceGuards do *nothing*; they do not know what the
106
+ * original device was and they do not reset on destruction. This is why
107
+ * original_device() and current_device() return std::optional<Device> rather
108
+ * than Device (as they do in DeviceGuard), and also is why we didn't just
109
+ * provide OptionalDeviceGuard by default and hide DeviceGuard from users.
110
+ *
111
+ * The semantics of an OptionalDeviceGuard are exactly explained by thinking
112
+ * of it as an std::optional<DeviceGuard>. In particular, an initialized
113
+ * OptionalDeviceGuard doesn't restore device to its value at construction; it
114
+ * restores device to its value *at initialization*. So if you have the
115
+ * program:
116
+ *
117
+ * setDevice(1);
118
+ * OptionalDeviceGuard g;
119
+ * setDevice(2);
120
+ * g.reset_device(Device(DeviceType::CUDA, 3)); // initializes!
121
+ *
122
+ * On destruction, g will reset device to 2, rather than 1.
123
+ *
124
+ * An uninitialized OptionalDeviceGuard is distinct from a (initialized)
125
+ * DeviceGuard whose original_device_ and current_device_ match, since the
126
+ * DeviceGuard will still reset the device to original_device_.
127
+ */
128
+ class OptionalDeviceGuard {
129
+ public:
130
+ /// Create an uninitialized guard. Set the guard later using reset_device.
131
+ explicit OptionalDeviceGuard() = default;
132
+
133
+ /// Initialize the guard, setting the current device to the passed Device.
134
+ explicit OptionalDeviceGuard(Device device) : guard_(device) {}
135
+
136
+ /// Initialize the guard if a Device is passed; otherwise leave the
137
+ /// guard uninitialized.
138
+ explicit OptionalDeviceGuard(std::optional<Device> device) : guard_(device) {}
139
+
140
+ /// Constructor for testing only.
141
+ explicit OptionalDeviceGuard(
142
+ Device device,
143
+ const impl::DeviceGuardImplInterface* impl)
144
+ : guard_(device, impl) {}
145
+
146
+ /// Copy is disallowed
147
+ OptionalDeviceGuard(const OptionalDeviceGuard&) = delete;
148
+ OptionalDeviceGuard& operator=(const OptionalDeviceGuard&) = delete;
149
+
150
+ /// Move is disallowed
151
+ /// See Note [Explicit initialization of optional fields]
152
+ /// and // Note [Move construction for RAII guards is tricky]
153
+ /// for rationale.
154
+ OptionalDeviceGuard(OptionalDeviceGuard&& other) = delete;
155
+ OptionalDeviceGuard& operator=(OptionalDeviceGuard&& other) = delete;
156
+
157
+ /// Sets the device to the given one. The specified device must be consistent
158
+ /// with the device type originally specified during guard construction.
159
+ void reset_device(at::Device device) {
160
+ guard_.reset_device(device);
161
+ }
162
+
163
+ /// For testing only
164
+ void reset_device(
165
+ at::Device device,
166
+ const impl::DeviceGuardImplInterface* impl) {
167
+ guard_.reset_device(device, impl);
168
+ }
169
+
170
+ /// Returns the device that was set at the time the guard was constructed.
171
+ std::optional<Device> original_device() const {
172
+ return guard_.original_device();
173
+ }
174
+
175
+ /// Returns the most recent device that was set using this device guard,
176
+ /// either from construction, or via reset_device.
177
+ std::optional<Device> current_device() const {
178
+ return guard_.current_device();
179
+ }
180
+
181
+ private:
182
+ impl::InlineOptionalDeviceGuard<impl::VirtualGuardImpl> guard_{};
183
+ };
184
+
185
+ // Note [Whither the DeviceGuard boilerplate]
186
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
187
+ // Design note: in principle, we could avoid these wrappers using:
188
+ //
189
+ // using DeviceGuard = impl::InlineDeviceGuard<impl::VirtualGuardImpl>;
190
+ // using OptionalDeviceGuard =
191
+ // impl::InlineOptionalDeviceGuard<impl::VirtualGuardImpl>;
192
+ //
193
+ // But the error messages are worse, and our users can't just look at the
194
+ // header file to find out what's going on. Furthermore, for specializations
195
+ // like CUDAStreamGuard, it can be profitable to replace some interfaces with
196
+ // refined types (e.g., return CUDAStream instead of Stream). So, we eat
197
+ // the boilerplate and write out the API explicitly.
198
+
199
+ } // namespace c10
videochat2/lib/python3.10/site-packages/torch/include/c10/core/DispatchKey.h ADDED
@@ -0,0 +1,747 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/DeviceType.h>
4
+ #include <c10/macros/Export.h>
5
+ #include <cstddef>
6
+ #include <cstdint>
7
+ #include <functional>
8
+ #include <ostream>
9
+ #include <string>
10
+
11
+ namespace c10 {
12
+
13
+ // Semantically, each value of BackendComponent identifies a "backend" for our
14
+ // dispatch. Some functionalities that we may dispatch to are allowed to
15
+ // register different handlers for each backend. The BackendComponent is then
16
+ // used to figure out which backend implementation to dispatch to.
17
+
18
+ // In implementation terms, the backend component identifies a specific "bit" in
19
+ // a DispatchKeySet. The bits in the DispatchKeySet are split between the bottom
20
+ // ~12 "BackendComponent" bits, while the remaining upper bits are assigned to
21
+ // functionalities. When we encounter a functionality bit that is known to be
22
+ // customizable per-backend, then we also look at the lower BackendComponent
23
+ // bits and take the highest bit to determine which backend's implementation to
24
+ // use.
25
+
26
+ // WARNING! If you add a new backend component to the end of this list,
27
+ // make sure you register it before Meta.
28
+ // Meta must be at the end so that meta key in tls triggers meta kernels.
29
+ // (But you shouldn't: private use keys should have higher precedence than all
30
+ // built-in keys)
31
+
32
+ // If you add a new (non-privateuse) backend here,
33
+ // make sure to add an Autograd<Backend> fallthrough kernel
34
+ // in aten/src/ATen/core/VariableFallbackKernel.cpp
35
+
36
+ #define C10_FORALL_BACKEND_COMPONENTS(_, extra) \
37
+ _(CPU, extra) \
38
+ _(CUDA, extra) \
39
+ _(HIP, extra) \
40
+ _(XLA, extra) \
41
+ _(MPS, extra) \
42
+ _(IPU, extra) \
43
+ _(XPU, extra) \
44
+ _(HPU, extra) \
45
+ _(VE, extra) \
46
+ _(Lazy, extra) \
47
+ _(MTIA, extra) \
48
+ _(PrivateUse1, extra) \
49
+ _(PrivateUse2, extra) \
50
+ _(PrivateUse3, extra) \
51
+ _(Meta, extra)
52
+
53
+ // WARNING! If we add a new per-backend functionality key that has higher
54
+ // priority than Autograd, then make sure you update EndOfRuntimeBackendKeys
55
+
56
+ #define C10_FORALL_FUNCTIONALITY_KEYS(_) \
57
+ _(Dense, ) \
58
+ _(Quantized, Quantized) \
59
+ _(Sparse, Sparse) \
60
+ _(SparseCsr, SparseCsr) \
61
+ _(NestedTensor, NestedTensor) \
62
+ _(AutogradFunctionality, Autograd)
63
+
64
+ enum class BackendComponent : uint8_t {
65
+
66
+ // A "backend" is colloquially used to refer to handlers for dispatch
67
+ // which actually implement the numerics of an operation in question.
68
+ //
69
+ // Due to the nature of the enum, these backends are specified in
70
+ // an ordered way, but for most backends this order is not semantically
71
+ // meaningful (e.g., it's valid to reorder these backends without changing
72
+ // semantics). The only situation when backend ordering is meaningful
73
+ // is when the backend participates in multiple dispatch with another
74
+ // backend; e.g., CPU and CUDA (cuda must have higher priority).
75
+
76
+ // These keys don't correspond to individual kernels.
77
+ // Instead, they represent the backends that are allowed to override specific
78
+ // pieces of functionality:
79
+ // - dense kernels (e.g. DispatchKey::CPU)
80
+ // - sparse kernels (e.g. DispatchKey::SparseCPU)
81
+ // - quantized kernels (e.g. DispatchKey::QuantizedCPU)
82
+ // - autograd kernels (e.g. DispatchKey::AutogradCPU)
83
+ // We reserve space in the runtime operator table for this full cross product
84
+ // of
85
+ // [backends in this enum] x [keys below that are explicitly marked as having
86
+ // per-backend functionality]
87
+ //
88
+ // A meta tensor is a tensor without any data associated with it. (They
89
+ // have also colloquially been referred to as tensors on the "null" device).
90
+ // A meta tensor can be used to dry run operators without actually doing any
91
+ // computation, e.g., add on two meta tensors would give you another meta
92
+ // tensor with the output shape and dtype, but wouldn't actually add anything.
93
+
94
+ InvalidBit = 0,
95
+ #define DEFINE_BACKEND_COMPONENT(n, _) n##Bit,
96
+ C10_FORALL_BACKEND_COMPONENTS(DEFINE_BACKEND_COMPONENT, unused)
97
+ #undef DEFINE_BACKEND_COMPONENT
98
+
99
+ // Define an alias to represent end of backend dispatch keys.
100
+ // If you add new backend keys after PrivateUse3, please also update it here.
101
+ EndOfBackendKeys = MetaBit,
102
+ };
103
+
104
+ // Semantically, a dispatch key identifies a possible "level" in our
105
+ // dispatch, for which a handler may be registered. Each handler corresponds
106
+ // to a type of functionality.
107
+ //
108
+ // In implementation terms, the dispatch key identifies a specific "bit" in a
109
+ // DispatchKeySet. Higher bit indexes get handled by dispatching first (because
110
+ // we "count leading zeros" when we extract the highest priority dispatch
111
+ // key.)
112
+ //
113
+ // Note [DispatchKey Classification]
114
+ // This enum actually contains several types of keys, which are explained
115
+ // in more detail further down:
116
+ // (1) non-customizable backends (e.g. FPGA)
117
+ // (2) non-customizable functionalities (e.g. Functionalize)
118
+ // (3) functionalized that are customizable per backend (e.g. Dense, Sparse,
119
+ // AutogradFunctionality) (4) per-backend instances of customizable
120
+ // functionalities (e.g. CPU, SparseCPU, AutogradCPU) (5) alias keys (e.g.
121
+ // CompositeImplicitAutograd)
122
+ //
123
+ // Of the categories above, it's important to note:
124
+ // (a) which keys are assigned individual bits in a DispatchKeySet
125
+ // (b) which keys are assigned individual slots in the runtime operator table
126
+ // ("Runtime keys")
127
+ //
128
+ // (1), (2) and (3) all get their own dedicated bits in the DispatchKeySet.
129
+ // (1), (2) and (4) all get their own dedicated slots in the runtime operator
130
+ // table.
131
+
132
+ // See Note [DispatchKeySet Internal Representation] for more details.
133
+ //
134
+ // NOTE: Keep the list in sync with `DispatchKey` in torchgen/model.py
135
+ enum class DispatchKey : uint16_t {
136
+
137
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~ UNDEFINED ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ //
138
+ // This is not a "real" functionality, but it exists to give us a "nullopt"
139
+ // element we can return for cases when a DispatchKeySet contains no elements.
140
+ // You can think a more semantically accurate definition of DispatchKey is:
141
+ //
142
+ // using DispatchKey = std::optional<RealDispatchKey>
143
+ //
144
+ // and Undefined == nullopt. We didn't actually represent
145
+ // it this way because std::optional<RealDispatchKey> would take two
146
+ // words, when DispatchKey fits in eight bits.
147
+
148
+ Undefined = 0,
149
+
150
+ // Define an alias for Undefined to represent CatchAll (long term
151
+ // this will get eliminated, but for now it's convenient)
152
+ CatchAll = Undefined,
153
+
154
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~ Functionality Keys ~~~~~~~~~~~~~~~~~~~~~~ //
155
+ // Every value in the enum (up to EndOfFunctionalityKeys)
156
+ // corresponds to an individual "functionality" that can be dispatched to.
157
+ // This is represented in the DispatchKeySet by assigning each of these enum
158
+ // values
159
+ // to each of the remaining (64 - len(BackendComponent)) bits.
160
+ //
161
+ // Most of these functionalities have a single handler assigned to them,
162
+ // making them "runtime keys".
163
+ // That map to a single slot in the runtime operator table.
164
+ //
165
+ // A few functionalities are allowed to be customizable per backend.
166
+ // See [Note: Per-Backend Functionality Dispatch Keys] for details.
167
+
168
+ // See [Note: Per-Backend Functionality Dispatch Keys]
169
+ Dense,
170
+
171
+ // Below are non-extensible backends.
172
+ // These are backends that currently don't have their own overrides for
173
+ // Autograd/Sparse/Quantized kernels,
174
+ // and we therefore don't waste space in the runtime operator table allocating
175
+ // space for them.
176
+ // If any of these backends ever need to customize, e.g., Autograd, then we'll
177
+ // need to add a DispatchKey::*Bit for them.
178
+
179
+ // TODO: put this in BackendComponents
180
+ FPGA, // Xilinx support lives out of tree at
181
+ // https://gitlab.com/pytorch-complex/vitis_kernels
182
+
183
+ // TODO: put this in BackendComponents
184
+ // MAIA backend lives out of tree
185
+ // - test/cpp_extensions/maia_extension.cpp
186
+ // - test/test_torch.py
187
+ // - aten/src/ATen/test/extension_backend_test.cpp
188
+ MAIA,
189
+
190
+ Vulkan, // TODO: put this in BackendComponents
191
+ Metal, // TODO: put this in BackendComponents
192
+
193
+ // See [Note: Per-Backend Functionality Dispatch Keys]
194
+ Quantized,
195
+
196
+ // This backend is to support custom RNGs; it lets you go
197
+ // to a different kernel if you pass in a generator that is not a
198
+ // traditional CPUGeneratorImpl/CUDAGeneratorImpl. To make use of this
199
+ // key:
200
+ // 1) set it as a second parameter of at::Generator constructor call in
201
+ // the user-defined PRNG class.
202
+ // 2) use it as a dispatch key while registering custom kernels
203
+ // (templatized kernels specialized for user-defined PRNG class)
204
+ // intended for out of tree use; tested by aten/src/ATen/test/rng_test.cpp
205
+ CustomRNGKeyId,
206
+
207
+ // TODO: Make Mkldnn a functionality key, so we can give it Meta
208
+ // support
209
+ // Here are backends which specify more specialized operators
210
+ // based on the layout of the tensor. Note that the sparse backends
211
+ // are one case where ordering matters: sparse multi-dispatches with
212
+ // the corresponding dense tensors, and must be handled before them.
213
+ MkldnnCPU, // registered at build/aten/src/ATen/RegisterMkldnnCPU.cpp
214
+ // NB: not to be confused with MKLDNN, which is Caffe2 only
215
+
216
+ // See [Note: Per-Backend Functionality Dispatch Keys]
217
+ Sparse,
218
+
219
+ SparseCsr,
220
+
221
+ NestedTensor,
222
+
223
+ // In some situations, it is not immediately obvious what the correct
224
+ // backend for function is, because the function in question doesn't
225
+ // have any "tensor" arguments. In this case, a BackendSelect function
226
+ // can be registered to implement the custom determination of the
227
+ // correct backend.
228
+ BackendSelect,
229
+
230
+ Python,
231
+
232
+ // Out-of-core key for Fake Tensor in torchdistx.
233
+ // See https://pytorch.org/torchdistx/latest/fake_tensor.html
234
+ // TODO: delete this in favor of Python-implemented fake tensor
235
+ Fake,
236
+ // See Note [Out-of-tree vmap+grad prototype]. The purpose of this key
237
+ // is to insert code after the "autograd subsystem" runs, so this key should
238
+ // be directly after ADInplaceOrView and all of the autograd keys.
239
+ FuncTorchDynamicLayerBackMode,
240
+
241
+ // Alias and mutation removal.
242
+ // If some backends want to opt into only alias removal or only mutation
243
+ // removal,
244
+ // we can consider adding separate keys dedicated to those individual passes.
245
+ // See Note [Functionalization Pass In Core] for details.
246
+ Functionalize,
247
+
248
+ // The named dispatch key is set for any tensors with named dimensions.
249
+ // Although we have a dispatch key for named tensors, for historical reasons,
250
+ // this dispatch key doesn't do any of the substantive functionality for named
251
+ // tensor (though, hypothetically, it could!) At the moment, it's just
252
+ // responsible for letting us give good error messages when operations
253
+ // don't support named tensors.
254
+ //
255
+ // NB: If you ever consider moving named tensor functionality into
256
+ // this dispatch key, note that it might be necessary add another dispatch
257
+ // key that triggers before composite operators, in case a composite operator
258
+ // has named dimension propagation that doesn't match that of its
259
+ // constituent parts.
260
+ // TODO: delete this once torchdim lands in functorch
261
+ Named,
262
+
263
+ // The Conjugate dispatch key is set for any tensors that need to perform
264
+ // conjugation
265
+ // This is implemented at a dispatch level right before any backends run
266
+ Conjugate,
267
+
268
+ // The Negative dispatch key is set for any tensors that need to perform
269
+ // negation
270
+ // This is implemented at a dispatch level right before any backends run
271
+ Negative,
272
+
273
+ ZeroTensor, // registered at build/aten/src/ATen/RegisterZeroTensor.cpp
274
+
275
+ // Note [ADInplaceOrView key]
276
+ // ADInplaceOrView key is used by inplace or view ops to register a kernel
277
+ // that does additional setup for future autograd computation.
278
+ //
279
+ // 1. For inplace ops this kernel does version bump
280
+ // 2. For view ops this kernel does `as_view` setup where we properly setup
281
+ // DifferentiableViewMeta on the view tensors.
282
+ //
283
+ // For other ops it's fallthrough kernel since there's no extra
284
+ // work to do.
285
+ //
286
+ // Note [Dream: skip VariableType kernel when requires_grad=false]
287
+ //
288
+ // In an ideal world where we can skip VariableType kernel for inputs
289
+ // with requires_grad=false, instead of a fallthrough kernel, we'll
290
+ // register a kernel shown below to all functional ops as well:
291
+ // torch::Tensor my_functional_op(...) {
292
+ // {
293
+ // // Note for every op in VariableType, you need to go through
294
+ // // `AutoDispatchBelowADInplaceOrView` guard exactly once to add the
295
+ // // key to TLS excluded set. If you don't go through it at all,
296
+ // // inplace/view ops called through `at::` inside your backend
297
+ // // kernel will dispatch to ADInplaceOrView kernels and do a lot
298
+ // // of extra work.
299
+ // at::AutoDispatchBelowADInplaceOrView guard;
300
+ // at::redispatch::my_functional_op(...);
301
+ // }
302
+ // }
303
+ // But this work is currently blocked since it adds an extra dispatch
304
+ // for all ops and it's non-trivial overhead at model level(a few percents).
305
+ // Thus our current approach takes advantage of the fact every kernel go
306
+ // through VariableType kernel first and pulls the
307
+ // `at::AutoDispatchBelowADInplaceOrView` guard of functional ops
308
+ // up to the `VariableType` kernel. Thus we only add the extra dispatch
309
+ // to view/inplace ops to minimize its perf impact to real models.
310
+ ADInplaceOrView,
311
+ // Note [Alias Dispatch Key : Autograd]
312
+ // All backends are oblivious to autograd; autograd is handled as a
313
+ // layer which happens on top of all backends. It inspects the autograd
314
+ // metadata of all inputs, determines what autograd metadata should be
315
+ // constructed by the output, and otherwise defers to the backend to
316
+ // actually do the numeric computation. Autograd contains
317
+ // the bulk of this logic.
318
+
319
+ // Autograd is now an alias dispatch key which by default maps to all
320
+ // backend-specific autograd keys.
321
+ // Backend-specific allow backends to override the default kernel registered
322
+ // to Autograd key as needed.
323
+ // For example, XLA wants to define autograd for einsum directly.
324
+ // Registering a custom autograd implementation at the XLA key won't work
325
+ // because we process Autograd before XLA. This key has higher priority and
326
+ // gets processed first. You generally should NOT redispatch after handling
327
+ // autograd here (since that would result in execution of the Autograd
328
+ // operator, which you're trying to skip). In AutogradXLA implementations,
329
+ // you are responsible for handling autograd yourself, or deferring to other
330
+ // operators which support autograd.
331
+
332
+ // Currently we only have backend-specific autograd keys for CPU/CUDA/XLA and
333
+ // reserved user-defined backends. All other in-tree backends share the
334
+ // AutogradOther key. We can add specific autograd key for those backends
335
+ // upon request.
336
+ AutogradOther,
337
+
338
+ // See [Note: Per-Backend Functionality Dispatch Keys]
339
+ AutogradFunctionality,
340
+
341
+ // NestedTensor is an example of something that isn't a "real backend"
342
+ // (because it mostly consists of redispatching kernels)
343
+ // but it would like to override autograd functionality in C++.
344
+ // We can handle cases like this by adding an extra functionality key
345
+ // exclusively for handling autograd for NestedTensor.
346
+ // lives out of tree at
347
+ // https://github.com/pytorch/nestedtensor
348
+ AutogradNestedTensor,
349
+
350
+ Tracer,
351
+
352
+ // TODO: make Autocast a functionality key
353
+ // Autocasting precedes VariableTypeId, to ensure casts are autograd-exposed
354
+ // and inputs are saved for backward in the post-autocast type.
355
+ AutocastCPU,
356
+ AutocastXPU,
357
+ AutocastIPU,
358
+ AutocastHPU,
359
+ AutocastXLA,
360
+ // AutocastXLA is only being used for TPUs. XLA GPUs continue to use
361
+ // AutocastCUDA.
362
+ AutocastMPS,
363
+ AutocastCUDA,
364
+ AutocastPrivateUse1,
365
+
366
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~ WRAPPERS ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ //
367
+ // There are a number of alternative modes which may want to handle before
368
+ // autograd; for example, error checking, tracing, profiling or vmap. They
369
+ // go here.
370
+
371
+ FuncTorchBatched, // See Note [Out-of-tree vmap+grad prototype]
372
+
373
+ // Dispatch key for BatchedTensorImpl wrapping a nested tensor.
374
+ BatchedNestedTensor,
375
+
376
+ FuncTorchVmapMode, // See Note [Out-of-tree vmap+grad prototype]
377
+
378
+ // This is the dispatch key for BatchedTensorImpl, which is used to implement
379
+ // batching rules for vmap.
380
+ Batched,
381
+
382
+ // When we are inside a vmap, all tensors dispatch on this key.
383
+ // See Note: [DispatchKey::VmapMode usage] for more details.
384
+ VmapMode,
385
+
386
+ FuncTorchGradWrapper, // See Note [Out-of-tree vmap+grad prototype]
387
+
388
+ // Out-of-core key for Deferred Module Initialization in torchdistx.
389
+ // See https://pytorch.org/torchdistx/latest/deferred_init.html
390
+ DeferredInit,
391
+
392
+ // Used by Python key logic to know the set of tls on entry to the dispatcher
393
+ // This kernel assumes it is the top-most non-functorch-related DispatchKey.
394
+ // If you add a key above, make sure to update the fallback implementation for
395
+ // this.
396
+ PythonTLSSnapshot,
397
+
398
+ // This key should be at the very top of the dispatcher
399
+ FuncTorchDynamicLayerFrontMode, // See Note [Out-of-tree vmap+grad prototype]
400
+
401
+ // TESTING: This is intended to be a generic testing tensor type id.
402
+ // Don't use it for anything real; its only acceptable use is within a single
403
+ // process test. Use it by creating a TensorImpl with this DispatchKey, and
404
+ // then registering operators to operate on this type id. See
405
+ // aten/src/ATen/core/dispatch/backend_fallback_test.cpp for a usage example.
406
+ TESTING_ONLY_GenericWrapper,
407
+
408
+ // TESTING: This is intended to be a generic testing tensor type id.
409
+ // Don't use it for anything real; its only acceptable use is within a ingle
410
+ // process test. Use it by toggling the mode on and off via
411
+ // TESTING_ONLY_tls_generic_mode_set_enabled and then registering operators
412
+ // to operate on this type id. See
413
+ // aten/src/ATen/core/dispatch/backend_fallback_test.cpp
414
+ // for a usage example
415
+ TESTING_ONLY_GenericMode,
416
+
417
+ // This key is used for pre-dispatch tracing in make_fx.
418
+ // It has lower priority than the PythonDispatcher key
419
+ // because we use the PythonDispatcher to intercept the key from python,
420
+ // and avoid having to implement it in C++.
421
+ PreDispatch,
422
+
423
+ // This is a bypass that allows you to skip running the C++ dispatcher
424
+ // entirely
425
+ PythonDispatcher,
426
+
427
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ FIN ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ //
428
+ EndOfFunctionalityKeys, // End of functionality keys.
429
+
430
+ // ~~~~~~~~~~~~~~ "Dense" Per-Backend Dispatch keys ~~~~~~~~~~~~~~~~~~~~ //
431
+ // Here are backends which you think of as traditionally specifying
432
+ // how to implement operations on some device.
433
+
434
+ #define DEFINE_PER_BACKEND_KEYS_FOR_BACKEND(n, prefix) prefix##n,
435
+
436
+ #define DEFINE_PER_BACKEND_KEYS(fullname, prefix) \
437
+ StartOf##fullname##Backends, \
438
+ C10_FORALL_BACKEND_COMPONENTS( \
439
+ DEFINE_PER_BACKEND_KEYS_FOR_BACKEND, prefix) \
440
+ EndOf##fullname##Backends = prefix##Meta,
441
+
442
+ C10_FORALL_FUNCTIONALITY_KEYS(DEFINE_PER_BACKEND_KEYS)
443
+
444
+ #undef DEFINE_PER_BACKEND_KEYS
445
+ #undef DEFINE_PER_BACKEND_KEYS_FOR_BACKEND
446
+
447
+ EndOfRuntimeBackendKeys = EndOfAutogradFunctionalityBackends,
448
+
449
+ // ~~~~~~~~~~~~~~~~~~~~~~ Alias Dispatch Keys ~~~~~~~~~~~~~~~~~~~~~~~~~~ //
450
+ // Note [Alias Dispatch Keys]
451
+ // Alias dispatch keys are synthetic dispatch keys which map to multiple
452
+ // runtime dispatch keys. Alisa keys have precedence, but they are always
453
+ // lower precedence than runtime keys. You can register a kernel to an
454
+ // alias key, the kernel might be populated to the mapped runtime keys
455
+ // during dispatch table computation.
456
+ // If a runtime dispatch key has multiple kernels from alias keys, which
457
+ // kernel wins is done based on the precedence of alias keys (but runtime
458
+ // keys always have precedence over alias keys).
459
+ // Alias keys won't be directly called during runtime.
460
+
461
+ // See Note [Alias Dispatch Key : Autograd]
462
+ Autograd,
463
+ CompositeImplicitAutograd, // registered at
464
+ // build/aten/src/ATen/RegisterCompositeImplicitAutograd.cpp
465
+
466
+ // Note: The alias keyset for FuncTorchBatchedDecomposition is disjoint from
467
+ // all
468
+ // other alias keysets
469
+ // and so precedence order doesn't matter
470
+ FuncTorchBatchedDecomposition, // registered at
471
+ // build/aten/src/ATen/RegisterFuncTorchBatchedDecomposition.cpp
472
+ // Note: The alias keyset for CompositeImplicitAutogradNestedTensor is
473
+ // disjoint from all other alias keysets
474
+ CompositeImplicitAutogradNestedTensor, // registered at
475
+ // build/aten/src/ATen/RegisterCompositeImplicitAutogradNestedTensor.cpp
476
+ CompositeExplicitAutograd, // registered at
477
+ // build/aten/src/ATen/RegisterCompositeExplicitAutograd.cpp
478
+ // See Note [CompositeExplicitAutogradNonFunctional Key]
479
+ CompositeExplicitAutogradNonFunctional, // registered at
480
+ // build/aten/src/ATen/RegisterCompositeExplicitAutograd.cpp
481
+
482
+ // Define an alias key to represent end of alias dispatch keys.
483
+ // If you add new alias keys after Autograd, please also update it here.
484
+ StartOfAliasKeys = Autograd,
485
+ EndOfAliasKeys = CompositeExplicitAutogradNonFunctional, //
486
+
487
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~ BC ALIASES ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ //
488
+ // The aliases exist for backwards compatibility reasons, they shouldn't
489
+ // be used
490
+ CPUTensorId = CPU,
491
+ CUDATensorId = CUDA,
492
+ DefaultBackend = CompositeExplicitAutograd,
493
+ PrivateUse1_PreAutograd = AutogradPrivateUse1,
494
+ PrivateUse2_PreAutograd = AutogradPrivateUse2,
495
+ PrivateUse3_PreAutograd = AutogradPrivateUse3,
496
+ Autocast = AutocastCUDA,
497
+ };
498
+
499
+ // Note [Private use DispatchKey]
500
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~
501
+ // Private use tensor IDs are preallocated tensor type IDs for use in user
502
+ // applications. Similar to private use fields in HTTP, they can be used
503
+ // by end users for experimental or private applications, without needing
504
+ // to "standardize" the tensor ID (which would be done by submitting a PR
505
+ // to PyTorch to add your type ID).
506
+ //
507
+ // Private use tensor IDs are appropriate to use if you want to experiment
508
+ // with adding a new tensor type (without having to patch PyTorch first) or
509
+ // have a private, non-distributed application that needs to make use of a
510
+ // new tensor type. Private use tensor IDs are NOT appropriate to use for
511
+ // libraries intended to be distributed to further users: please contact
512
+ // the PyTorch developers to get a type ID registered in this case.
513
+ //
514
+ // We provide two classes of private user tensor id: regular DispatchKeys
515
+ // and Autograd DispatchKeys. DispatchKeys serve the role of ordinary "backend"
516
+ // DispatchKeys; if you were adding support for a new type of accelerator, you
517
+ // would use a backend DispatchKey, and ideally automatically reuse
518
+ // AutogradOther definitions already defined in PyTorch. AutogradPrivateUse
519
+ // DispatchKeys serve as "wrapper" DispatchKeys: they are only necessary for
520
+ // tensors that compose multiple internal tensors, and for cases when the
521
+ // built-in autograd formulas for operators are not appropriate.
522
+
523
+ static_assert(
524
+ (static_cast<uint8_t>(BackendComponent::EndOfBackendKeys) +
525
+ static_cast<uint8_t>(DispatchKey::EndOfFunctionalityKeys)) <= 64,
526
+ "The BackendComponent and DispatchKey enums (below EndOfFunctionalityKeys)"
527
+ " both map to backend and functionality bits"
528
+ " into a 64-bit bitmask; you must have less than 64 total entries between them");
529
+
530
+ // Check if a DispatchKey is an alias mapping to other runtime keys.
531
+ constexpr bool isAliasDispatchKey(DispatchKey k) {
532
+ return k >= DispatchKey::StartOfAliasKeys && k <= DispatchKey::EndOfAliasKeys;
533
+ }
534
+
535
+ // [Note: Per-Backend Functionality Dispatch Keys]
536
+ // Check if a DispatchKey is a per-backend functionality key
537
+ // Any functionalities that can be customized per-backend should be added here.
538
+ // These keys correspond to functionalities that can be customized individually
539
+ // per backend. While they only take up one bit in the `DispatchKeySet` bitset,
540
+ // they map to (# backends) slots in the operator table.
541
+ // Each of these keys also has a separate set of "runtime keys" in the dispatch
542
+ // key enum, per backend, which *do* map to the individual operator table slots.
543
+ // For example, the "Sparse" key maps to an individual bit in the
544
+ // DispatchKeySet, while `SparseCPU`, `SparseCUDA`, etc all map to individual
545
+ // slots in the runtime operator table.
546
+
547
+ constexpr bool isPerBackendFunctionalityKey(DispatchKey k) {
548
+ if (k == DispatchKey::Dense || k == DispatchKey::Quantized ||
549
+ k == DispatchKey::Sparse || k == DispatchKey::SparseCsr ||
550
+ k == DispatchKey::AutogradFunctionality ||
551
+ k == DispatchKey::NestedTensor) {
552
+ return true;
553
+ } else {
554
+ return false;
555
+ }
556
+ }
557
+
558
+ // Note that this includes Undefined in the total count.
559
+ // BUT EndOfFunctionalityKeys is its own (placeholder) key.
560
+ // e.g. Undefined=0, Dense=1, Sparse=2, EndOfFunctionalityKeys=3.
561
+ // In the above example, there are 3 total functionality keys.
562
+ constexpr uint8_t num_functionality_keys =
563
+ static_cast<uint8_t>(DispatchKey::EndOfFunctionalityKeys);
564
+
565
+ constexpr uint8_t num_backends =
566
+ static_cast<uint8_t>(BackendComponent::EndOfBackendKeys);
567
+
568
+ // Note [No More Than 16 Backends]
569
+ // Search for this note to find places in the code where the "no more than 16
570
+ // backends" invariant is baked in.
571
+ static_assert(
572
+ static_cast<uint8_t>(BackendComponent::EndOfBackendKeys) <= 16,
573
+ "BackendComponent currently only supports <= 16 backends. If we really need to extend this, \
574
+ there are a few places where this invariant is baked in");
575
+
576
+ constexpr uint8_t numPerBackendFunctionalityKeys() {
577
+ uint8_t count = 0;
578
+ for (uint8_t k = 0; k <= num_functionality_keys; ++k) {
579
+ if (isPerBackendFunctionalityKey(static_cast<DispatchKey>(k)))
580
+ ++count;
581
+ }
582
+ return count;
583
+ }
584
+
585
+ #if defined(C10_MOBILE_TRIM_DISPATCH_KEYS)
586
+ // See [Note: Trimmed Mobile Dispatch Keys]
587
+ constexpr uint16_t num_runtime_entries = 8;
588
+ #else
589
+ constexpr uint16_t num_runtime_entries = num_functionality_keys +
590
+ (numPerBackendFunctionalityKeys() * (num_backends - 1));
591
+ #endif
592
+
593
+ // See Note [No More Than 16 Backends]
594
+ constexpr uint16_t full_backend_mask =
595
+ (static_cast<uint16_t>(1) << num_backends) - 1;
596
+
597
+ C10_API const char* toString(DispatchKey);
598
+ C10_API const char* toString(BackendComponent);
599
+ C10_API std::ostream& operator<<(std::ostream&, DispatchKey);
600
+ C10_API std::ostream& operator<<(std::ostream&, BackendComponent);
601
+
602
+ C10_API DispatchKey getAutogradKeyFromBackend(BackendComponent k);
603
+
604
+ // Parses a string into a dispatch key.
605
+ // If the string cannot be correctly parsed, throws an exception.
606
+ C10_API c10::DispatchKey parseDispatchKey(const std::string& k);
607
+
608
+ // These are some convenience identifiers for dispatch keys which are
609
+ // shorter to type than their long counterparts. Note that some of these
610
+ // dispatch keys directly correspond to DeviceType; and most APIs that
611
+ // accept DispatchKey also accept DeviceType; e.g.,
612
+ // torch::dispatch(torch::kCPU, ...) is also valid.
613
+ constexpr DispatchKey kAutograd = DispatchKey::Autograd;
614
+
615
+ // See Note [The Ordering of Per-Backend Dispatch Keys Matters!]
616
+ // This function relies on the invariant that the dispatch keys between
617
+ // StartOfDenseBackends and EndOfRuntimeBackendKeys are ordered by backend
618
+ // in the same order as `BackendComponent`.
619
+ constexpr BackendComponent toBackendComponent(DispatchKey k) {
620
+ if (k >= DispatchKey::StartOfDenseBackends &&
621
+ k <= DispatchKey::EndOfDenseBackends) {
622
+ return static_cast<BackendComponent>(
623
+ static_cast<uint8_t>(k) -
624
+ static_cast<uint8_t>(DispatchKey::StartOfDenseBackends));
625
+ } else if (
626
+ k >= DispatchKey::StartOfQuantizedBackends &&
627
+ k <= DispatchKey::EndOfQuantizedBackends) {
628
+ return static_cast<BackendComponent>(
629
+ static_cast<uint8_t>(k) -
630
+ static_cast<uint8_t>(DispatchKey::StartOfQuantizedBackends));
631
+ } else if (
632
+ k >= DispatchKey::StartOfSparseBackends &&
633
+ k <= DispatchKey::EndOfSparseBackends) {
634
+ return static_cast<BackendComponent>(
635
+ static_cast<uint8_t>(k) -
636
+ static_cast<uint8_t>(DispatchKey::StartOfSparseBackends));
637
+ } else if (
638
+ k >= DispatchKey::StartOfSparseCsrBackends &&
639
+ k <= DispatchKey::EndOfSparseCsrBackends) {
640
+ return static_cast<BackendComponent>(
641
+ static_cast<uint8_t>(k) -
642
+ static_cast<uint8_t>(DispatchKey::StartOfSparseCsrBackends));
643
+ } else if (
644
+ k >= DispatchKey::StartOfNestedTensorBackends &&
645
+ k <= DispatchKey::EndOfNestedTensorBackends) {
646
+ return static_cast<BackendComponent>(
647
+ static_cast<uint8_t>(k) -
648
+ static_cast<uint8_t>(DispatchKey::StartOfNestedTensorBackends));
649
+ } else if (
650
+ k >= DispatchKey::StartOfAutogradFunctionalityBackends &&
651
+ k <= DispatchKey::EndOfAutogradFunctionalityBackends) {
652
+ return static_cast<BackendComponent>(
653
+ static_cast<uint8_t>(k) -
654
+ static_cast<uint8_t>(
655
+ DispatchKey::StartOfAutogradFunctionalityBackends));
656
+ } else {
657
+ return BackendComponent::InvalidBit;
658
+ }
659
+ }
660
+
661
+ constexpr DispatchKey toFunctionalityKey(DispatchKey k) {
662
+ if (k <= DispatchKey::EndOfFunctionalityKeys) {
663
+ return k;
664
+ } else if (k <= DispatchKey::EndOfDenseBackends) {
665
+ return DispatchKey::Dense;
666
+ } else if (k <= DispatchKey::EndOfQuantizedBackends) {
667
+ return DispatchKey::Quantized;
668
+ } else if (k <= DispatchKey::EndOfSparseBackends) {
669
+ return DispatchKey::Sparse;
670
+ } else if (k <= DispatchKey::EndOfSparseCsrBackends) {
671
+ return DispatchKey::SparseCsr;
672
+ } else if (k <= DispatchKey::EndOfNestedTensorBackends) {
673
+ return DispatchKey::NestedTensor;
674
+ } else if (k <= DispatchKey::EndOfAutogradFunctionalityBackends) {
675
+ return DispatchKey::AutogradFunctionality;
676
+ } else {
677
+ return DispatchKey::Undefined;
678
+ }
679
+ }
680
+
681
+ BackendComponent toBackendComponent(DeviceType device_type);
682
+
683
+ // Given (DispatchKey::Dense, BackendComponent::CUDABit), returns
684
+ // DispatchKey::CUDA.
685
+ // See Note [The Ordering of Per-Backend Dispatch Keys Matters!]
686
+ // This function relies on the invariant that the dispatch keys between
687
+ // StartOfDenseBackends and EndOfRuntimeBackendKeys are ordered by backend
688
+ // in the same order as `BackendComponent`.
689
+ constexpr DispatchKey toRuntimePerBackendFunctionalityKey(
690
+ DispatchKey functionality_k,
691
+ BackendComponent backend_k) {
692
+ if (functionality_k == DispatchKey::Dense) {
693
+ return static_cast<DispatchKey>(
694
+ static_cast<uint8_t>(DispatchKey::StartOfDenseBackends) +
695
+ static_cast<uint8_t>(backend_k));
696
+ }
697
+ if (functionality_k == DispatchKey::Sparse) {
698
+ return static_cast<DispatchKey>(
699
+ static_cast<uint8_t>(DispatchKey::StartOfSparseBackends) +
700
+ static_cast<uint8_t>(backend_k));
701
+ }
702
+ if (functionality_k == DispatchKey::SparseCsr) {
703
+ return static_cast<DispatchKey>(
704
+ static_cast<uint8_t>(DispatchKey::StartOfSparseCsrBackends) +
705
+ static_cast<uint8_t>(backend_k));
706
+ }
707
+ if (functionality_k == DispatchKey::Quantized) {
708
+ return static_cast<DispatchKey>(
709
+ static_cast<uint8_t>(DispatchKey::StartOfQuantizedBackends) +
710
+ static_cast<uint8_t>(backend_k));
711
+ }
712
+ if (functionality_k == DispatchKey::NestedTensor) {
713
+ return static_cast<DispatchKey>(
714
+ static_cast<uint8_t>(DispatchKey::StartOfNestedTensorBackends) +
715
+ static_cast<uint8_t>(backend_k));
716
+ }
717
+ if (functionality_k == DispatchKey::AutogradFunctionality) {
718
+ return static_cast<DispatchKey>(
719
+ static_cast<uint8_t>(
720
+ DispatchKey::StartOfAutogradFunctionalityBackends) +
721
+ static_cast<uint8_t>(backend_k));
722
+ }
723
+ return DispatchKey::Undefined;
724
+ }
725
+
726
+ } // namespace c10
727
+
728
+ namespace torch {
729
+ // Expose the constant, but not the TYPE (DispatchKey is an implementation
730
+ // detail!)
731
+ // NOLINTNEXTLINE(misc-unused-using-decls)
732
+ using c10::kAutograd;
733
+ } // namespace torch
734
+
735
+ // NB: You really shouldn't use this instance; this enum is guaranteed
736
+ // to be pretty small so a regular array should be acceptable.
737
+ namespace std {
738
+ template <>
739
+ struct hash<c10::DispatchKey> {
740
+ typedef size_t result_type;
741
+ typedef c10::DispatchKey argument_type;
742
+
743
+ size_t operator()(c10::DispatchKey x) const {
744
+ return static_cast<size_t>(x);
745
+ }
746
+ };
747
+ } // namespace std
videochat2/lib/python3.10/site-packages/torch/include/c10/core/DispatchKeySet.h ADDED
@@ -0,0 +1,949 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <c10/core/DispatchKey.h>
3
+ #include <c10/macros/Export.h>
4
+ #include <c10/macros/Macros.h>
5
+ #include <c10/util/Exception.h>
6
+ #include <c10/util/Metaprogramming.h>
7
+ #include <c10/util/TypeList.h>
8
+ #include <c10/util/llvmMathExtras.h>
9
+ #include <array>
10
+ #include <cstddef>
11
+ #include <cstdint>
12
+ #include <initializer_list>
13
+ #include <iterator>
14
+ #include <ostream>
15
+ #include <string>
16
+ #include <type_traits>
17
+
18
+ namespace c10 {
19
+
20
+ struct FunctionalityOffsetAndMask {
21
+ // empty constructor shouldn't be used; only needed to initialize
22
+ // the array before populating it.
23
+ FunctionalityOffsetAndMask() = default;
24
+ FunctionalityOffsetAndMask(uint16_t offset, uint16_t mask)
25
+ : offset(offset), mask(mask) {}
26
+ // This needs to big enough to cover the size of the operator table.
27
+ uint16_t offset{};
28
+ // See Note [No More Than 16 Backends]
29
+ // This mask needs to be big enough to mask all of the backend bits.
30
+ // We probably don't ever want to have more than 16 backend bits, so uint16_t
31
+ // should be enough.
32
+ uint16_t mask{};
33
+ };
34
+ static_assert(
35
+ c10::num_runtime_entries < 65536,
36
+ "The dispatcher currently only supports up to 2^16 runtime entries");
37
+
38
+ C10_API std::array<FunctionalityOffsetAndMask, num_functionality_keys>
39
+ initializeFunctionalityOffsetsAndMasks();
40
+
41
+ C10_ALWAYS_INLINE static const std::
42
+ array<FunctionalityOffsetAndMask, num_functionality_keys>&
43
+ offsetsAndMasks() {
44
+ static auto offsets_and_masks_ = initializeFunctionalityOffsetsAndMasks();
45
+ return offsets_and_masks_;
46
+ }
47
+
48
+ // A representation of a set of DispatchKeys. A DispatchKeySet contains both
49
+ // "functionality" bits and "backend bits", and every tensor holds its own
50
+ // DispatchKeySet. The Dispatcher implements multiple dispatch by grabbing the
51
+ // keyset on every input tensor, or’ing them together, and dispatching to a
52
+ // specific piece of functionality. The functionality bits are *ordered*. When
53
+ // multiple functionality bits are set, we use the highest priority
54
+ // functionality. Similarly, multiple backend bits can theoretically be set if
55
+ // you call an operator with multiple tensors from difference devices (e.g. CPU
56
+ // and CUDA), although support for mixed device dispatch is limited (the only
57
+ // kernels that gracefully handle mixed device inputs for now are cuda kernels
58
+ // that take in a scalar cpu tensor).
59
+
60
+ // A representation of a set of DispatchKeys. A tensor may have multiple
61
+ // tensor type ids, e.g., a Variable tensor can also be a CPU tensor; the
62
+ // DispatchKeySet specifies what type ids apply. The internal representation is
63
+ // as a 64-bit bit set (this means only 64 tensor type ids are supported).
64
+ //
65
+ // As mentioned above, DispatchKeys are ordered; thus, we can ask questions like
66
+ // "what is the highest priority DispatchKey in the set"? (The set itself is
67
+ // not ordered; two sets with the same ids will always have the ids ordered in
68
+ // the same way.)
69
+ //
70
+ // Note [DispatchKeySet Internal Representation]
71
+ // Internally, dispatch keys are packed into 64-bit DispatchKeySet objects
72
+ // that get passed around at runtime.
73
+ // However, there isn't necessarily a 1-to-1 mapping between bits in the keyset
74
+ // and individual dispatch keys.
75
+ //
76
+ // First: why do we have this distinction, and why not map every dispatch key
77
+ // directly to a bit? This is mostly because we have several types of
78
+ // functionalities that different backends would like to customize. For example,
79
+ // we have:
80
+ // - "Dense": CPU, CUDA, XLA, ... (~12 keys)
81
+ // - "Sparse": SparseCPU, SparseCUDA, ...
82
+ // - "SparseCsr": SparseCsrCPU, SparseCsrCUDA, ...
83
+ // - "Quantized": QuantizedCPU, QuantizedCUDA, QuantizedXLA, ...
84
+ // - "Autograd": AutogradCPU, AutogradCUDA, Autograd XLA, ...
85
+ // The problem is that total number of keys grows quadratically with [#
86
+ // backends] x [# functionalities], making it very difficult to map each key
87
+ // directly to a bit in a bitset without dramatically increasing the size of the
88
+ // bitset over time.
89
+ //
90
+ // The two enums (BackendComponent and DispatchKey) can be divided roughly into
91
+ // 5 categories.
92
+ //
93
+ // (1) "Building block" keys
94
+ // (a) backends: Everything in the BackendComponent enum (e.g. CPUBit,
95
+ // CUDABit) (b) functionalities: (per-backend) functionality-bit DispatchKeys
96
+ // (e.g. AutogradFunctionality, SparseCsr, Sparse, Dense)
97
+ // (2) "Runtime" keys
98
+ // (a) "non-customizable backends" (e.g. FPGA)
99
+ // (b) "non-customizable functionalities" (e.g. Functionalize)
100
+ // (c) "per-backend instances of customizable functionalities" (e.g. CPU,
101
+ // SparseCPU, AutogradCPU)
102
+ // (3) "Alias" DispatchKeys (see Note [Alias Dispatch Keys])
103
+ //
104
+ // (1) Building block keys always correspond to individual bits in a
105
+ // DispatchKeySet. They can also be combined in a DispatchKeySet to form actual
106
+ // runtime keys. e.g.
107
+ // auto dense_cpu_ks = DispatchKeySet({DispatchKey::CPUBit,
108
+ // DispatchKey::Dense});
109
+ // // The keyset has the runtime dense-cpu key.
110
+ // dense_cpu_ks.has(DispatchKey::CPU);
111
+ // // And it contains the building block keys too.
112
+ // dense_cpu_ks.has(DispatchKey::CPUBit);
113
+ // dense_cpu_ks.has(DispatchKey::Dense);
114
+ //
115
+ // Not every backend and not every functionality counts as a "building block
116
+ // key". This is mostly to give us more levers to pull in the design space.
117
+ // Backend keys and functionality keys that count as "building blocks" will
118
+ // contribute to a full cross product of functionality that can be overriden.
119
+ //
120
+ // For example, right now we have at least 12 "backend" building
121
+ // blocks (CPU, CUDA, XLA, ...) and at least 5 "functionality"
122
+ // building blocks (Dense, Sparse, SparseCsr, Quantized,
123
+ // AutogradFunctionality, ...). These keys together allow every
124
+ // dispatcher operator to be customized in up to 12*4 different
125
+ // ways. Each of those requires a slot in the operator table of every
126
+ // dispatcher operator. Not every piece of functionality necessarily
127
+ // needs to be customizable per-backend, and not every backend
128
+ // necessarily needs to be able to customize every type of
129
+ // functionality.
130
+ //
131
+ //
132
+ // (2) Every runtime key corresponds directly to a slot in an operator's runtime
133
+ // dispatch table, and you can directly register kernels to a runtime dispatch
134
+ // key.
135
+ //
136
+ // For per-backend functionalities like "Dense" or "AutogradFunctionality",
137
+ // you can think of the corresponding runtime dispatch keys as "instances" of
138
+ // that functionality, per backend. E.g. "CPU", "CUDA", "XLA", etc. are all
139
+ // runtime instances of the "Dense" building block key.
140
+
141
+ // (2a) and (2b) are represented identically in the DispatchKeySet logic:
142
+ // - backend-agnostic functionalities (e.g. FuncTorchBatched) are NOT
143
+ // customizable per backend.
144
+ // In order to do so, we'd need to promote it to a per-backend functionality
145
+ // "building block" key.
146
+ // - non-customizable backends (e.g. FPGA) can NOT customize existing
147
+ // functionality like Sparse, Autograd, etc.
148
+ // In order to do so, we'd need to promote it to a backend "building block"
149
+ // key.
150
+ //
151
+ // In both cases, these keys directly correspond to runtime slots in the
152
+ // operator table.
153
+ //
154
+ //
155
+ // (3) "Alias" keys
156
+ // See Note [Alias Dispatch Keys]
157
+ //
158
+ // Final note: for anyone making future changes to the Dispatcher +
159
+ // DispatchKeySet internals, there's a closed PR with a basic
160
+ // python-implementation of the Dispatcher that might be useful in quickly
161
+ // testing out and validating changes. See it at
162
+ // https://github.com/pytorch/pytorch/pull/68743
163
+
164
+ // An undefined tensor is one with an empty tensor type set.
165
+ class DispatchKeySet final {
166
+ public:
167
+ enum Full { FULL };
168
+ enum FullAfter { FULL_AFTER };
169
+ enum Raw { RAW };
170
+
171
+ // NB: default constructor representation as zero is MANDATORY as
172
+ // use of DispatchKeySet in TLS requires this.
173
+ constexpr DispatchKeySet() = default;
174
+
175
+ constexpr DispatchKeySet(Full)
176
+ : repr_((1ULL << (num_backends + num_functionality_keys - 1)) - 1) {}
177
+
178
+ constexpr DispatchKeySet(FullAfter, DispatchKey t)
179
+ // LSB after t are OK, but not t itself.
180
+ // "functionalities" have a notion of ordering (e.g. Autograd > Sparse >
181
+ // Quantized > Dense). But backends don't really have an ordering.
182
+ // Therefore, we're enforcing that FullAfter can only be used on
183
+ // "functionality" keys.
184
+ : repr_(
185
+ (1ULL
186
+ << (num_backends + static_cast<uint8_t>(toFunctionalityKey(t)) -
187
+ 1)) -
188
+ 1) {
189
+ *this = add(DispatchKey::PythonDispatcher);
190
+ }
191
+
192
+ // Public version of DispatchKeySet(uint64_t) API; external users
193
+ // must be explicit when they do this!
194
+ constexpr DispatchKeySet(Raw, uint64_t x) : repr_(x) {}
195
+
196
+ constexpr explicit DispatchKeySet(BackendComponent k) {
197
+ if (k == BackendComponent::InvalidBit) {
198
+ repr_ = 0;
199
+ } else {
200
+ repr_ = 1ULL << (static_cast<uint8_t>(k) - 1);
201
+ }
202
+ }
203
+
204
+ constexpr explicit DispatchKeySet(DispatchKey k) {
205
+ // NOLINTNEXTLINE(bugprone-branch-clone)
206
+ if (k == DispatchKey::Undefined) {
207
+ // Case 1: handle Undefined specifically
208
+ repr_ = 0;
209
+ } else if (k <= DispatchKey::EndOfFunctionalityKeys) {
210
+ // Case 2: handle "functionality-only" keys
211
+ // These keys have a functionality bit set, but no backend bits
212
+ // These can technically be either:
213
+ // - valid runtime keys (e.g. DispatchKey::AutogradOther,
214
+ // DispatchKey::FuncTorchBatched, etc)
215
+ // - "building block" keys that aren't actual runtime keys (e.g.
216
+ // DispatchKey::Dense or Sparse)
217
+ uint64_t functionality_val = 1ULL
218
+ << (num_backends + static_cast<uint8_t>(k) - 1);
219
+ repr_ = functionality_val;
220
+ } else if (k <= DispatchKey::EndOfRuntimeBackendKeys) {
221
+ // Case 3: "runtime" keys that have a functionality bit AND a backend bit.
222
+ // First compute which bit to flip for the functionality.
223
+ auto functionality_k = toFunctionalityKey(k);
224
+ // The - 1 is because Undefined is technically a "functionality" that
225
+ // doesn't show up in the bitset. So e.g. Dense is technically the second
226
+ // functionality, but the lowest functionality bit.
227
+ uint64_t functionality_val = 1ULL
228
+ << (num_backends + static_cast<uint8_t>(functionality_k) - 1);
229
+
230
+ // then compute which bit to flip for the backend
231
+ // Case 4a: handle the runtime instances of "per-backend functionality"
232
+ // keys For example, given DispatchKey::CPU, we should set:
233
+ // - the Dense functionality bit
234
+ // - the CPUBit backend bit
235
+ // first compute which bit to flip for the backend
236
+ auto backend_k = toBackendComponent(k);
237
+ uint64_t backend_val = backend_k == BackendComponent::InvalidBit
238
+ ? 0
239
+ : 1ULL << (static_cast<uint8_t>(backend_k) - 1);
240
+ repr_ = functionality_val + backend_val;
241
+ } else {
242
+ // At this point, we should have covered every case except for alias keys.
243
+ // Technically it would be possible to add alias dispatch keys to a
244
+ // DispatchKeySet, but the semantics are a little confusing and this
245
+ // currently isn't needed anywhere.
246
+ repr_ = 0;
247
+ }
248
+ }
249
+
250
+ constexpr uint64_t keys_to_repr(std::initializer_list<DispatchKey> ks) {
251
+ uint64_t repr = 0;
252
+ for (auto k : ks) {
253
+ repr |= DispatchKeySet(k).repr_;
254
+ }
255
+ return repr;
256
+ }
257
+
258
+ constexpr uint64_t backend_bits_to_repr(
259
+ std::initializer_list<BackendComponent> ks) {
260
+ uint64_t repr = 0;
261
+ for (auto k : ks) {
262
+ repr |= DispatchKeySet(k).repr_;
263
+ }
264
+ return repr;
265
+ }
266
+
267
+ explicit constexpr DispatchKeySet(std::initializer_list<DispatchKey> ks)
268
+ : repr_(keys_to_repr(ks)) {}
269
+
270
+ explicit constexpr DispatchKeySet(std::initializer_list<BackendComponent> ks)
271
+ // Note: for some reason, putting this logic directly in the constructor
272
+ // appears to fail to compile on CUDA 10.1.
273
+ // See an example internal failure at
274
+ // https://www.internalfb.com/intern/skycastle/run/76561193669136035/artifact/actionlog.76561193742069401.stderr
275
+ : repr_(backend_bits_to_repr(ks)) {}
276
+
277
+ // Test if a DispatchKey is in the set
278
+ inline bool has(DispatchKey t) const {
279
+ TORCH_INTERNAL_ASSERT_DEBUG_ONLY(t != DispatchKey::Undefined);
280
+ return has_all(DispatchKeySet(t));
281
+ }
282
+ constexpr bool has_backend(BackendComponent t) const {
283
+ return has_all(DispatchKeySet(t));
284
+ }
285
+
286
+ // Test if a DispatchKey is in the set
287
+ // Given a DispatchKeySet of functionality keys and (potentially) backend
288
+ // keys, tests if all of them are in the current set.
289
+ constexpr bool has_all(DispatchKeySet ks) const {
290
+ return static_cast<bool>((repr_ & ks.repr_) == ks.repr_);
291
+ }
292
+
293
+ // Given a DispatchKeySet of functionality keys and (potentially) backend
294
+ // keys, tests if any of them are in the current set. This could technically
295
+ // be pretty easily implemented using has(). It is strictly a perf
296
+ // optimization though. There are many places in the code base where we want
297
+ // to test for multiple functionality keys together. HOWEVER, runtime
298
+ // per-backend functionality keys aren't allowed to be used with this
299
+ // function, because you can end up with weird results. e.g.
300
+ // DispatchKeySet(DispatchKey::AutogradCPU).has_any(DispatchKeySet(DispatchKey::CPU))
301
+ // would return true.
302
+ inline bool has_any(DispatchKeySet ks) const {
303
+ TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
304
+ // Either there are no backend bits in the input keyset
305
+ ((ks.repr_ & full_backend_mask) == 0) ||
306
+ // or there are no per-backend-functionality bits
307
+ // See [Note: Per-Backend Functionality Dispatch Keys]
308
+ ((ks &
309
+ DispatchKeySet({
310
+ DispatchKey::Dense,
311
+ DispatchKey::Quantized,
312
+ DispatchKey::Sparse,
313
+ DispatchKey::SparseCsr,
314
+ DispatchKey::AutogradFunctionality,
315
+ })
316
+ .repr_) == 0));
317
+ return static_cast<bool>((repr_ & ks.repr_) != 0);
318
+ }
319
+ // Test if DispatchKeySet is a superset of ks.
320
+ bool isSupersetOf(DispatchKeySet ks) const {
321
+ return (repr_ & ks.repr_) == ks.repr_;
322
+ }
323
+ // Perform set union
324
+ constexpr DispatchKeySet operator|(DispatchKeySet other) const {
325
+ return DispatchKeySet(repr_ | other.repr_);
326
+ }
327
+ // Perform set intersection
328
+ constexpr DispatchKeySet operator&(DispatchKeySet other) const {
329
+ return DispatchKeySet(repr_ & other.repr_);
330
+ }
331
+ // Compute the set difference self - other,
332
+ // but ONLY for the functionality keys.
333
+ // Any backend bits set on self will remain unchanged.
334
+ // See Note [Removing keys from DispatchKeySet Only Affects Functionality
335
+ // Keys]
336
+ constexpr DispatchKeySet operator-(DispatchKeySet other) const {
337
+ return DispatchKeySet(repr_ & (full_backend_mask | ~other.repr_));
338
+ }
339
+
340
+ // Compute self ^ other
341
+ constexpr DispatchKeySet operator^(DispatchKeySet other) const {
342
+ return DispatchKeySet(repr_ ^ other.repr_);
343
+ }
344
+ bool operator==(DispatchKeySet other) const {
345
+ return repr_ == other.repr_;
346
+ }
347
+ bool operator!=(DispatchKeySet other) const {
348
+ return repr_ != other.repr_;
349
+ }
350
+ // Add a DispatchKey to the DispatchKey set. Does NOT mutate,
351
+ // returns the extended DispatchKeySet!
352
+ C10_NODISCARD constexpr DispatchKeySet add(DispatchKey t) const {
353
+ return *this | DispatchKeySet(t);
354
+ }
355
+ C10_NODISCARD constexpr DispatchKeySet add(DispatchKeySet ks) const {
356
+ return *this | ks;
357
+ }
358
+
359
+ // Remove a DispatchKey from the DispatchKey set.
360
+ // This is generally not an operation you should be doing
361
+ // (it's used to implement the printing overload, operator<<)
362
+ //
363
+ // Note [Removing keys from DispatchKeySet Only Affects Functionality Keys]
364
+ // Only functionality bits are allowed to be removed from a keyset.
365
+ // For now, we're only allowing removal of "functionality bits" from the
366
+ // keyset, which is specifically needed by the fallthrough key calculation
367
+ // logic. Why is removing backend bits problematic? Consider this example:
368
+ //
369
+ // DispatchKeySet([DispatchKey.CPU, DispatchKey.AutogradCUDA,
370
+ // DispatchKey.CUDA]).remove(DispatchKey.AutogradCUDA)
371
+ // DispatchKeySet([DispatchKey.CPU,
372
+ // DispatchKey.AutogradCUDA]).remove(DispatchKey.AutogradCUDA)
373
+ //
374
+ // What do we want to happen?
375
+ // Technically, we'd like it to be true that after removal,
376
+ // the first keyset still has the CUDA dispatch key while the second doesn't.
377
+ // Unfortunately there's no way to represent that, because the two keysets are
378
+ // represented the same way internally: functionality bits: Autograd, Dense
379
+ // backend bits: CPU, CUDA
380
+ //
381
+ // Instead, remove(DispatchKey.AutogradCPU) will only remove the "Autograd"
382
+ // bit from the bitset.
383
+ C10_NODISCARD constexpr DispatchKeySet remove(DispatchKey t) const {
384
+ return DispatchKeySet(
385
+ repr_ & ~(DispatchKeySet(t).repr_ & ~full_backend_mask));
386
+ }
387
+ // You're allowed to remove a backend bit from a DispatchKeySet,
388
+ // but you have to be explicit about it (remove_backend() instead of
389
+ // remove()).
390
+ constexpr DispatchKeySet remove_backend(BackendComponent b) const {
391
+ return DispatchKeySet(repr_ & ~(DispatchKeySet(b).repr_));
392
+ }
393
+ // Is the set empty? (AKA undefined tensor)
394
+ bool empty() const {
395
+ return repr_ == 0;
396
+ }
397
+ uint64_t raw_repr() {
398
+ return repr_;
399
+ }
400
+
401
+ DispatchKey highestFunctionalityKey() const {
402
+ auto functionality_idx = indexOfHighestBit();
403
+ // This means that none of the functionality bits were set.
404
+ if (functionality_idx < num_backends)
405
+ return DispatchKey::Undefined;
406
+ // The first num_backend bits in the keyset don't correspond to real
407
+ // dispatch keys.
408
+ return static_cast<DispatchKey>(functionality_idx - num_backends);
409
+ }
410
+
411
+ // This is similar like toBackendComponent(DispatchKey), but less restrictive.
412
+ // toBackendComponent() errors out if the key that it was passed has no
413
+ // backend bits, which is useful for error checking. We need a version of that
414
+ // here that can also handle "fake" backends like FPGA, because they need to
415
+ // map to the AutogradOther key. For those backends, we return
416
+ // BackendComponent::InvalidBit.
417
+ BackendComponent highestBackendKey() const {
418
+ // mask to mask out functionality bits
419
+ auto backend_idx =
420
+ DispatchKeySet(repr_ & full_backend_mask).indexOfHighestBit();
421
+ // all zeros across the backend bits means that no backend bits are set.
422
+ if (backend_idx == 0)
423
+ return BackendComponent::InvalidBit;
424
+ return static_cast<BackendComponent>(backend_idx);
425
+ }
426
+
427
+ // returns the DispatchKey of highest priority in the set.
428
+ DispatchKey highestPriorityTypeId() const {
429
+ auto functionality_k = highestFunctionalityKey();
430
+ if (isPerBackendFunctionalityKey(functionality_k)) {
431
+ return toRuntimePerBackendFunctionalityKey(
432
+ functionality_k, highestBackendKey());
433
+ }
434
+ return functionality_k;
435
+ }
436
+
437
+ // Returns the index of the most-significant bit in the keyset.
438
+ // This is used to as part of the calculation into the operator table to get:
439
+ // - the highest "functionality" bit in the keyset.
440
+ // - the highest "backend" bit in the keyset.
441
+ uint8_t indexOfHighestBit() const {
442
+ return 64 - llvm::countLeadingZeros(repr_);
443
+ }
444
+
445
+ #if defined(C10_MOBILE_TRIM_DISPATCH_KEYS)
446
+ // [Note: Trimmed Mobile Dispatch Keys]
447
+ /**
448
+ * The method below maps the dispatch key in the enum DispatchKey to an
449
+ * integer index in the dispatchTable_ array in OperatorEntry. The array
450
+ * is trimmed for mobile to reduce peak memory usage since it's
451
+ * unnecessary to reserve additional space for dispatch keys that will
452
+ * never be used on mobile.
453
+ */
454
+ int getDispatchTableIndexForDispatchKeySet() const {
455
+ auto dk = highestPriorityTypeId();
456
+ switch (dk) {
457
+ case DispatchKey::Undefined:
458
+ return 0;
459
+ case DispatchKey::CPU:
460
+ return 1;
461
+ case DispatchKey::QuantizedCPU:
462
+ return 2;
463
+ case DispatchKey::SparseCPU:
464
+ return 3;
465
+ case DispatchKey::BackendSelect:
466
+ return 4;
467
+ case DispatchKey::ADInplaceOrView:
468
+ return 5;
469
+ case DispatchKey::AutogradOther:
470
+ return 6;
471
+ case DispatchKey::AutogradCPU:
472
+ return 7;
473
+ default:
474
+ return -1;
475
+ }
476
+ }
477
+ #else
478
+ // returns the index in the operator table of highest priority key in the the
479
+ // keyset Note that we could in theory implement this using
480
+ // highestPriorityTypeId(), but this code is very hotpath and we can do it
481
+ // faster without it.
482
+ int getDispatchTableIndexForDispatchKeySet() const {
483
+ auto functionality_idx =
484
+ DispatchKeySet(repr_ >> num_backends).indexOfHighestBit();
485
+ auto offset_and_mask = offsetsAndMasks()[functionality_idx];
486
+ // Mask the functionality bits out first, then right-shift by 1.
487
+ // right-shifting by 1 because everything is zero-indexed.
488
+ // E.g. 000001 (CPU) should give us an offset of 0, 000010 (CUDA) should
489
+ // give us an offset of 1, etc.
490
+ auto backend_idx =
491
+ DispatchKeySet((repr_ & offset_and_mask.mask) >> 1).indexOfHighestBit();
492
+ return offset_and_mask.offset + backend_idx;
493
+ }
494
+ #endif
495
+
496
+ // returns the "index" of the highest priority backend in the keyset.
497
+ // This is pretty similar to getBackendKey(), but:
498
+ // - It's hotpath code (part of the runtime bitset calculation)
499
+ // - I's returns an integer index, not an enum value
500
+ // - Everything is shifted to the right by 1.
501
+ // BackendComponent::InvalidBit is technically the lowest enum value,
502
+ // but it isn't included in the runtime table. So CPUBit = 1, CUDABit = 2,
503
+ // etc.
504
+ uint64_t getBackendIndex() const {
505
+ return DispatchKeySet((repr_ & full_backend_mask) >> 1).indexOfHighestBit();
506
+ }
507
+
508
+ private:
509
+ constexpr DispatchKeySet(uint64_t repr) : repr_(repr) {}
510
+ uint64_t repr_ = 0;
511
+
512
+ public:
513
+ // STL iterator for DispatchKeySet. Iterates through all runtime DispatchKeys
514
+ // in the set. The iterator is only invalidated by the destruction of the
515
+ // underlying DispatchKeySet as the iterator stores a pointer to the raw
516
+ // representation of the DispatchKeySet. Note: When we encounter a per-backend
517
+ // functionality (e.g. Dense or Sparse), we will iterate through EVERY backend
518
+ // in the keyset, for that functionality. For example, if the next
519
+ // functionality key to iterate over is Autograd, and the backend bits in the
520
+ // keyset correspond to [BackendComponent::CPUBit, BackendComponent::CUDABit],
521
+ // then the next two keys we return will be DispatchKey::AutogradCPU,
522
+ // DispatchKey::AutogradCUDA (CPU first because it has lower precedence than
523
+ // CUDA in DispatchKey.h).
524
+ class iterator {
525
+ public:
526
+ using self_type = iterator;
527
+ using iterator_category = std::input_iterator_tag;
528
+ using value_type = DispatchKey;
529
+ using difference_type = ptrdiff_t;
530
+ using reference = value_type&;
531
+ using pointer = value_type*;
532
+ // final mask value should mask out the entire keyset
533
+ static const uint8_t end_iter_mask_val =
534
+ num_backends + num_functionality_keys;
535
+ // final key value should be the last DispatchKey
536
+ static const uint8_t end_iter_key_val = num_functionality_keys;
537
+
538
+ // current_dispatchkey_idx_ will iterate through all functionality bits.
539
+ // current_backendcomponent_idx_ will iterate through all backend bits.
540
+ explicit iterator(
541
+ const uint64_t* data_ptr,
542
+ uint8_t next_functionality = num_backends,
543
+ uint8_t next_backend = 0)
544
+ : data_ptr_(data_ptr),
545
+ next_functionality_(next_functionality),
546
+ next_backend_(next_backend),
547
+ // These are in an invalid state at construction time, and set by the
548
+ // first increment call
549
+ current_dispatchkey_idx_(end_iter_key_val),
550
+ current_backendcomponent_idx_(end_iter_key_val) {
551
+ // Go to the first key in the set
552
+ TORCH_INTERNAL_ASSERT(
553
+ next_functionality_ >= num_backends,
554
+ "num_backends=",
555
+ static_cast<uint32_t>(num_backends),
556
+ "next_functionality_=",
557
+ static_cast<uint32_t>(next_functionality_));
558
+ ++(*this);
559
+ }
560
+
561
+ C10_API self_type& operator++();
562
+
563
+ self_type operator++(int) {
564
+ self_type previous_iterator = *this;
565
+ ++(*this);
566
+ return previous_iterator;
567
+ }
568
+
569
+ bool operator==(const self_type& rhs) const {
570
+ return next_functionality_ == rhs.next_functionality_ &&
571
+ current_dispatchkey_idx_ == rhs.current_dispatchkey_idx_ &&
572
+ next_backend_ == rhs.next_backend_ &&
573
+ current_backendcomponent_idx_ == rhs.current_backendcomponent_idx_;
574
+ }
575
+ bool operator!=(const self_type& rhs) const {
576
+ return next_functionality_ != rhs.next_functionality_ ||
577
+ current_dispatchkey_idx_ != rhs.current_dispatchkey_idx_ ||
578
+ next_backend_ != rhs.next_backend_ ||
579
+ current_backendcomponent_idx_ != rhs.current_backendcomponent_idx_;
580
+ }
581
+ DispatchKey operator*() const {
582
+ auto functionality_key =
583
+ static_cast<DispatchKey>(current_dispatchkey_idx_);
584
+ if (isPerBackendFunctionalityKey(functionality_key)) {
585
+ auto next_key = toRuntimePerBackendFunctionalityKey(
586
+ functionality_key,
587
+ static_cast<BackendComponent>(current_backendcomponent_idx_));
588
+ // We expect all of the Dense, Sparse, Quantized, and Autograd keys to
589
+ // be ordered the same way with respect to their backends
590
+ TORCH_INTERNAL_ASSERT(
591
+ toBackendComponent(next_key) ==
592
+ static_cast<BackendComponent>(current_backendcomponent_idx_),
593
+ "Tried to map functionality key ",
594
+ toString(functionality_key),
595
+ " and backend bit ",
596
+ toString(
597
+ static_cast<BackendComponent>(current_backendcomponent_idx_)),
598
+ " to a runtime key, but ended up with ",
599
+ toString(next_key),
600
+ ". This can happen if the order of the backend dispatch keys in DispatchKey.h isn't consistent.",
601
+ " Please double check that enum for inconsistencies.");
602
+ return next_key;
603
+ } else {
604
+ return functionality_key;
605
+ }
606
+ }
607
+
608
+ private:
609
+ const uint64_t* data_ptr_;
610
+ uint8_t next_functionality_;
611
+ uint8_t next_backend_;
612
+ uint8_t current_dispatchkey_idx_;
613
+ uint8_t current_backendcomponent_idx_;
614
+ };
615
+
616
+ public:
617
+ // Returns iterator to the first key in the set. If no keys are in the
618
+ // set, then will return the end iterator.
619
+ iterator begin() const {
620
+ return iterator(&repr_);
621
+ }
622
+
623
+ // We do not need to iterate beyond EndOfFunctionalityKeys so we will treat
624
+ // this as the end iterator.
625
+ iterator end() const {
626
+ return iterator(&repr_, iterator::end_iter_mask_val);
627
+ }
628
+ };
629
+
630
+ C10_API std::string toString(DispatchKeySet);
631
+ C10_API std::ostream& operator<<(std::ostream&, DispatchKeySet);
632
+
633
+ C10_API inline int getDispatchTableIndexForDispatchKey(DispatchKey k) {
634
+ return DispatchKeySet(k).getDispatchTableIndexForDispatchKeySet();
635
+ }
636
+
637
+ // Alias key DispatchKey::Autograd maps to
638
+ // (autograd_dispatch_keyset x full_backend_mask)
639
+ // NB: keys in this set also get associated with CompositeImplicitAutograd
640
+ //
641
+ // Note [autograd_dispatch_keyset Does Not Include Backend Bits]
642
+ // We don't want to include any backend bits (BackendComponent::CPUBit, etc)
643
+ // directly in autograd_dispatch_keyset.
644
+ // Why? keysets like autograd_dispatch_keyset are commonly used to remove
645
+ // autograd keys from a DispatchKeySet throughout the code base. However, you
646
+ // are only allowed to remove functionality bits from a keyset, not backend
647
+ // bits. See Note [Removing keys from DispatchKeySet Only Affects Functionality
648
+ // Keys] for details. To be consistent and avoid confusion, we're explicitly
649
+ // setting up autograd_dispatch_keyset to not have any backend bits.
650
+ constexpr DispatchKeySet autograd_dispatch_keyset = DispatchKeySet({
651
+ DispatchKey::AutogradFunctionality,
652
+ DispatchKey::AutogradOther,
653
+ DispatchKey::AutogradNestedTensor,
654
+ });
655
+
656
+ constexpr DispatchKeySet autocast_dispatch_keyset = DispatchKeySet({
657
+ DispatchKey::AutocastCPU,
658
+ DispatchKey::AutocastMPS,
659
+ DispatchKey::AutocastCUDA,
660
+ DispatchKey::AutocastXPU,
661
+ DispatchKey::AutocastIPU,
662
+ DispatchKey::AutocastHPU,
663
+ DispatchKey::AutocastXLA,
664
+ DispatchKey::AutocastPrivateUse1,
665
+ });
666
+
667
+ // See Note [TLS Initialization]
668
+ constexpr DispatchKeySet default_included_set = DispatchKeySet({
669
+ DispatchKey::BackendSelect,
670
+ DispatchKey::ADInplaceOrView,
671
+ });
672
+
673
+ constexpr DispatchKeySet default_excluded_set = DispatchKeySet({
674
+ DispatchKey::AutocastCPU,
675
+ DispatchKey::AutocastMPS,
676
+ DispatchKey::AutocastCUDA,
677
+ DispatchKey::AutocastXPU,
678
+ DispatchKey::AutocastIPU,
679
+ DispatchKey::AutocastHPU,
680
+ DispatchKey::AutocastXLA,
681
+ DispatchKey::AutocastPrivateUse1,
682
+ });
683
+
684
+ constexpr DispatchKeySet autograd_dispatch_keyset_with_ADInplaceOrView =
685
+ autograd_dispatch_keyset | DispatchKeySet(DispatchKey::ADInplaceOrView);
686
+
687
+ constexpr DispatchKeySet python_ks = DispatchKeySet({
688
+ DispatchKey::Python,
689
+ DispatchKey::PythonTLSSnapshot,
690
+ });
691
+
692
+ constexpr DispatchKeySet sparse_ks = DispatchKeySet(DispatchKey::Sparse);
693
+
694
+ constexpr DispatchKeySet sparse_csr_ks = DispatchKeySet(DispatchKey::SparseCsr);
695
+
696
+ constexpr DispatchKeySet mkldnn_ks = DispatchKeySet(DispatchKey::MkldnnCPU);
697
+
698
+ // backend dispatch keys that map to DispatchKey::AutogradOther
699
+ // NB: keys in this set also get associated with CompositeImplicitAutograd
700
+ constexpr DispatchKeySet autogradother_backends =
701
+ DispatchKeySet(
702
+ // HIP and VE aren't in this list: they now have their own backend bits
703
+ // which means that they can now have their own Autograd keys.
704
+ // Technically, HIP will now redispatch to its own custom AutogradHIP
705
+ // slot in the runtime table.
706
+ {DispatchKey::FPGA,
707
+ DispatchKey::MAIA,
708
+ DispatchKey::Vulkan,
709
+ DispatchKey::Metal,
710
+ DispatchKey::CustomRNGKeyId,
711
+ DispatchKey::MkldnnCPU,
712
+ // Sparse and Quantized backends also live here.
713
+ DispatchKey::Sparse,
714
+ DispatchKey::SparseCsr,
715
+ DispatchKey::Quantized})
716
+ // Including the backend bits because this keyset is used during op
717
+ // registration, which requires looping over all runtime autogradother
718
+ // backend keys.
719
+ | DispatchKeySet(DispatchKeySet::RAW, full_backend_mask);
720
+
721
+ // The set of dispatch keys that come after autograd
722
+ // n.b. this relies on the fact that AutogradOther is currently the lowest
723
+ // Autograd key
724
+ constexpr DispatchKeySet after_autograd_keyset =
725
+ DispatchKeySet(DispatchKeySet::FULL_AFTER, c10::DispatchKey::AutogradOther);
726
+
727
+ // The set of dispatch keys that come after ADInplaceOrView
728
+ constexpr DispatchKeySet after_ADInplaceOrView_keyset = DispatchKeySet(
729
+ DispatchKeySet::FULL_AFTER,
730
+ c10::DispatchKey::ADInplaceOrView);
731
+
732
+ // The set of dispatch keys that come after Functionalize
733
+ constexpr DispatchKeySet after_func_keyset =
734
+ DispatchKeySet(DispatchKeySet::FULL_AFTER, c10::DispatchKey::Functionalize)
735
+ .remove(
736
+ // NOTE: we also need to remove ADInplaceOrView from the keyset when
737
+ // redispatching after the func kernels. This is because we're not
738
+ // calling the same op; we originally called an inplace op, and now
739
+ // we aren't. The original key calculation figured out which keys
740
+ // were Fallthrough based on the inplace op. That means that it did
741
+ // not include the ADInPlaceOrView kernel as a fallthrough key.
742
+ // However, we WANT the ADInPlaceOrView kernel to be ignored now
743
+ // that we're calling an out-of-place op. Re-invoking
744
+ // Dispatcher::call would re-run the Fallthrough key calculation and
745
+ // get us that, But at::redispatch is more performant. We can get
746
+ // away with it by explicitly removing the key here.
747
+ c10::DispatchKey::ADInplaceOrView);
748
+
749
+ constexpr DispatchKeySet backend_bitset_mask =
750
+ DispatchKeySet(DispatchKeySet::RAW, (1ULL << num_backends) - 1);
751
+
752
+ constexpr auto inplace_or_view_ks =
753
+ DispatchKeySet(DispatchKey::ADInplaceOrView);
754
+ constexpr auto autograd_cpu_ks = DispatchKeySet(DispatchKey::AutogradCPU);
755
+ constexpr auto autograd_ipu_ks = DispatchKeySet(DispatchKey::AutogradIPU);
756
+ constexpr auto autograd_xpu_ks = DispatchKeySet(DispatchKey::AutogradXPU);
757
+ constexpr auto autograd_cuda_ks = DispatchKeySet(DispatchKey::AutogradCUDA);
758
+ constexpr auto autograd_xla_ks = DispatchKeySet(DispatchKey::AutogradXLA);
759
+ constexpr auto autograd_lazy_ks = DispatchKeySet(DispatchKey::AutogradLazy);
760
+ constexpr auto autograd_meta_ks = DispatchKeySet(DispatchKey::AutogradMeta);
761
+ constexpr auto autograd_mps_ks = DispatchKeySet(DispatchKey::AutogradMPS);
762
+ constexpr auto autograd_hpu_ks = DispatchKeySet(DispatchKey::AutogradHPU);
763
+ constexpr auto autograd_privateuse1_ks =
764
+ DispatchKeySet(DispatchKey::AutogradPrivateUse1);
765
+ constexpr auto autograd_privateuse2_ks =
766
+ DispatchKeySet(DispatchKey::AutogradPrivateUse2);
767
+ constexpr auto autograd_privateuse3_ks =
768
+ DispatchKeySet(DispatchKey::AutogradPrivateUse3);
769
+ constexpr auto autograd_other_ks = DispatchKeySet(DispatchKey::AutogradOther);
770
+ constexpr auto autograd_nested =
771
+ DispatchKeySet(DispatchKey::AutogradNestedTensor);
772
+ // keyset corresponding to functorch keys that have their own dedicated
773
+ // TensorImpl subclass.
774
+ constexpr auto functorch_transforms_ks = DispatchKeySet(
775
+ {DispatchKey::FuncTorchBatched,
776
+ DispatchKey::FuncTorchVmapMode,
777
+ DispatchKey::Batched,
778
+ DispatchKey::VmapMode,
779
+ DispatchKey::FuncTorchGradWrapper});
780
+
781
+ constexpr auto functorch_batched_ks =
782
+ DispatchKeySet({DispatchKey::FuncTorchBatched});
783
+
784
+ // This keyset has:
785
+ // (1) the functionality bits corresponding to backends (dense, sparse,
786
+ // quantized) (2) all of the backend bits set
787
+ constexpr DispatchKeySet backend_functionality_keys =
788
+ DispatchKeySet({
789
+ DispatchKey::Dense,
790
+ DispatchKey::Quantized,
791
+ DispatchKey::Sparse,
792
+ DispatchKey::SparseCsr,
793
+ }) |
794
+ DispatchKeySet(DispatchKeySet::RAW, full_backend_mask);
795
+
796
+ struct OpTableOffsetAndMask {
797
+ uint16_t offset;
798
+ uint16_t backend_mask;
799
+ };
800
+
801
+ static_assert(
802
+ num_backends <= 16,
803
+ "Right now we expect the number of backends not to exceed 16. In the (unlikely) event"
804
+ " that this changes, the size of OpTableOffsetAndMask::backend_mask needs to be increased too.");
805
+
806
+ // true if t is a backend dispatch key
807
+ C10_API bool isBackendDispatchKey(DispatchKey t);
808
+
809
+ // Resolve alias dispatch key to DispatchKeySet if applicable
810
+ C10_API DispatchKeySet getRuntimeDispatchKeySet(DispatchKey t);
811
+
812
+ // Resolve alias dispatch key to DispatchKeySet if applicable,
813
+ // and check if k is a part of that set
814
+ C10_API bool runtimeDispatchKeySetHas(DispatchKey t, DispatchKey k);
815
+
816
+ // Returns a DispatchKeySet of all backend keys mapped to Autograd dispatch key
817
+ // t, DispatchKeySet is empty if t is not alias of DispatchKey::Autograd.
818
+ C10_API DispatchKeySet getBackendKeySetFromAutograd(DispatchKey t);
819
+
820
+ // Returns a DispatchKeySet of autograd related keys mapped to backend.
821
+ // for a given backend key, use the associated autograd key.
822
+ // for non-backend keys, use AutogradOther as a default.
823
+ // Note: it's convenient and fast to return a default here rather than (say)
824
+ // returning an std::optional<DispatchKey>, or throwing. But it makes callers
825
+ // responsible for either a) enforcing the invariant that only backend keys
826
+ // be passed as arguments, or b) interpreting our return value carefully.
827
+ inline DispatchKeySet getAutogradRelatedKeySetFromBackend(BackendComponent t) {
828
+ switch (t) {
829
+ case BackendComponent::CPUBit:
830
+ return inplace_or_view_ks | autograd_cpu_ks;
831
+ case BackendComponent::IPUBit:
832
+ return inplace_or_view_ks | autograd_ipu_ks;
833
+ case BackendComponent::XPUBit:
834
+ return inplace_or_view_ks | autograd_xpu_ks;
835
+ case BackendComponent::CUDABit:
836
+ return inplace_or_view_ks | autograd_cuda_ks;
837
+ case BackendComponent::XLABit:
838
+ return inplace_or_view_ks | autograd_xla_ks;
839
+ case BackendComponent::LazyBit:
840
+ return inplace_or_view_ks | autograd_lazy_ks;
841
+ case BackendComponent::MetaBit:
842
+ return inplace_or_view_ks | autograd_meta_ks;
843
+ case BackendComponent::MPSBit:
844
+ return inplace_or_view_ks | autograd_mps_ks;
845
+ case BackendComponent::HPUBit:
846
+ return inplace_or_view_ks | autograd_hpu_ks;
847
+ case BackendComponent::PrivateUse1Bit:
848
+ return inplace_or_view_ks | autograd_privateuse1_ks;
849
+ case BackendComponent::PrivateUse2Bit:
850
+ return inplace_or_view_ks | autograd_privateuse2_ks;
851
+ case BackendComponent::PrivateUse3Bit:
852
+ return inplace_or_view_ks | autograd_privateuse3_ks;
853
+ default:
854
+ return inplace_or_view_ks | autograd_other_ks;
855
+ }
856
+ }
857
+
858
+ // Returns a DispatchKeySet of autocast related keys mapped to backend.
859
+ inline DispatchKeySet getAutocastRelatedKeySetFromBackend(BackendComponent t) {
860
+ constexpr auto autocast_cpu_ks = DispatchKeySet(DispatchKey::AutocastCPU);
861
+ constexpr auto autocast_xpu_ks = DispatchKeySet(DispatchKey::AutocastXPU);
862
+ constexpr auto autocast_ipu_ks = DispatchKeySet(DispatchKey::AutocastIPU);
863
+ constexpr auto autocast_hpu_ks = DispatchKeySet(DispatchKey::AutocastHPU);
864
+ constexpr auto autocast_cuda_ks = DispatchKeySet(DispatchKey::AutocastCUDA);
865
+ constexpr auto autocast_xla_ks = DispatchKeySet(DispatchKey::AutocastXLA);
866
+ constexpr auto autocast_privateuse1_ks =
867
+ DispatchKeySet(DispatchKey::AutocastPrivateUse1);
868
+ constexpr auto autocast_mps_ks = DispatchKeySet(DispatchKey::AutocastMPS);
869
+ switch (t) {
870
+ case BackendComponent::CPUBit:
871
+ return autocast_cpu_ks;
872
+ case BackendComponent::XPUBit:
873
+ return autocast_xpu_ks;
874
+ case BackendComponent::IPUBit:
875
+ return autocast_ipu_ks;
876
+ case BackendComponent::HPUBit:
877
+ return autocast_hpu_ks;
878
+ case BackendComponent::CUDABit:
879
+ return autocast_cuda_ks;
880
+ case BackendComponent::XLABit:
881
+ return autocast_xla_ks;
882
+ case BackendComponent::PrivateUse1Bit:
883
+ return autocast_privateuse1_ks;
884
+ case BackendComponent::MPSBit:
885
+ return autocast_mps_ks;
886
+ default:
887
+ return DispatchKeySet();
888
+ }
889
+ }
890
+
891
+ // returns the "backend" DispatchKey of highest priority in the set.
892
+ // This is basically like highestBackendKey(), except that we have some
893
+ // "functionality" bits that correspond to backends (Sparse, Quantized)
894
+ inline DispatchKey highestPriorityBackendTypeId(DispatchKeySet ks) {
895
+ return (ks & backend_functionality_keys).highestPriorityTypeId();
896
+ }
897
+
898
+ // This API exists because we have a use case for checking
899
+ // getRuntimeDispatchKeySet(alias).has(DispatchKey::Undefined)
900
+ // in OperatorEntry.cpp but we disallow it in has() API.
901
+ C10_API bool isIncludedInAlias(DispatchKey k, DispatchKey alias);
902
+
903
+ // Historically, every tensor only had a single DispatchKey, and it was always
904
+ // something like CPU, and there wasn't any of this business where TLS
905
+ // could cause the DispatchKey of a tensor to change. But we still have some
906
+ // legacy code that is still using DispatchKey for things like instanceof
907
+ // checks; if at all possible, refactor the code to stop using DispatchKey in
908
+ // those cases.
909
+ inline DispatchKey legacyExtractDispatchKey(DispatchKeySet s) {
910
+ // NB: If you add any extra keys that can be stored in TensorImpl on
911
+ // top of existing "backend" keys like CPU/CUDA, you need to add it
912
+ // here. At the moment, autograd keys and ADInplaceOrView key need this
913
+ // treatment;
914
+ return (s - autograd_dispatch_keyset_with_ADInplaceOrView -
915
+ autocast_dispatch_keyset -
916
+ DispatchKeySet(
917
+ {DispatchKey::Functionalize,
918
+ DispatchKey::PythonTLSSnapshot,
919
+ DispatchKey::FuncTorchGradWrapper,
920
+ DispatchKey::FuncTorchVmapMode,
921
+ DispatchKey::FuncTorchBatched,
922
+ DispatchKey::Python}))
923
+ .highestPriorityTypeId();
924
+ }
925
+
926
+ template <class T>
927
+ using is_not_DispatchKeySet = std::negation<std::is_same<DispatchKeySet, T>>;
928
+
929
+ // Given a function type, constructs a function_traits type that drops the first
930
+ // parameter type if the first parameter is of type DispatchKeySet. NB:
931
+ // DispatchKeySet is currently explicitly hidden from JIT (mainly to avoid
932
+ // pushing unnecessary arguments on the stack - see Note [ Plumbing Keys Through
933
+ // the Dispatcher] for details). If at any point in the future we need to expose
934
+ // this type to JIT, revisit the usage of this type alias.
935
+ template <class FuncType>
936
+ using remove_DispatchKeySet_arg_from_func = guts::make_function_traits_t<
937
+ typename guts::infer_function_traits_t<FuncType>::return_type,
938
+ typename std::conditional_t<
939
+ std::is_same_v<
940
+ DispatchKeySet,
941
+ typename guts::typelist::head_with_default_t<
942
+ void,
943
+ typename guts::infer_function_traits_t<
944
+ FuncType>::parameter_types>>,
945
+ guts::typelist::drop_if_nonempty_t<
946
+ typename guts::infer_function_traits_t<FuncType>::parameter_types,
947
+ 1>,
948
+ typename guts::infer_function_traits_t<FuncType>::parameter_types>>;
949
+ } // namespace c10
videochat2/lib/python3.10/site-packages/torch/include/c10/core/Event.h ADDED
@@ -0,0 +1,137 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/Device.h>
4
+ #include <c10/core/DeviceType.h>
5
+ #include <c10/core/Stream.h>
6
+ #include <c10/core/impl/DeviceGuardImplInterface.h>
7
+ #include <c10/core/impl/InlineEvent.h>
8
+ #include <c10/core/impl/VirtualGuardImpl.h>
9
+
10
+ namespace c10 {
11
+
12
+ /**
13
+ * A backend-generic movable, not copyable, not thread-safe event.
14
+ *
15
+ * The design of this event follows that of CUDA and HIP events. These events
16
+ * are recorded and waited on by streams and can be rerecorded to,
17
+ * each rerecording essentially creating a new version of the event.
18
+ * For example, if (in CPU time), stream X is asked to record E,
19
+ * stream Y waits on E, and stream X is asked to record E again, then Y will
20
+ * wait for X to finish the first call to record and not the second, because
21
+ * it's waiting on the first version of event E, not the second.
22
+ * Querying an event only returns the status of its most recent version.
23
+ *
24
+ * Backend-generic events are implemented by this class and
25
+ * impl::InlineEvent. In addition to these events there are also
26
+ * some backend-specific events, like ATen's CUDAEvent. Each of these
27
+ * classes has its own use.
28
+ *
29
+ * impl::InlineEvent<...> or a backend-specific event should be
30
+ * preferred when the backend is known at compile time and known to
31
+ * be compiled. Backend-specific events may have additional functionality.
32
+ *
33
+ * This Event should be used if a particular backend may not be available,
34
+ * or the backend required is not known at compile time.
35
+ *
36
+ * These generic events are built on top of DeviceGuardImpls, analogous
37
+ * to DeviceGuard and InlineDeviceGuard. The name "DeviceGuardImpls,"
38
+ * is no longer entirely accurate, as these classes implement the
39
+ * backend-specific logic for a generic backend interface.
40
+ *
41
+ * See DeviceGuardImplInterface.h for a list of all supported flags.
42
+ */
43
+
44
+ struct Event final {
45
+ // Constructors
46
+ Event() = delete;
47
+ Event(
48
+ const DeviceType _device_type,
49
+ const EventFlag _flag = EventFlag::PYTORCH_DEFAULT)
50
+ : impl_{_device_type, _flag} {}
51
+
52
+ // Copy constructor and copy assignment operator (deleted)
53
+ Event(const Event&) = delete;
54
+ Event& operator=(const Event&) = delete;
55
+
56
+ // Move constructor and move assignment operator
57
+ Event(Event&&) noexcept = default;
58
+ Event& operator=(Event&&) noexcept = default;
59
+
60
+ // Destructor
61
+ ~Event() = default;
62
+
63
+ // Getters
64
+ Device device() const noexcept {
65
+ return Device(device_type(), device_index());
66
+ }
67
+ DeviceType device_type() const noexcept {
68
+ return impl_.device_type();
69
+ }
70
+ DeviceIndex device_index() const noexcept {
71
+ return impl_.device_index();
72
+ }
73
+ EventFlag flag() const noexcept {
74
+ return impl_.flag();
75
+ }
76
+ bool was_marked_for_recording() const noexcept {
77
+ return impl_.was_marked_for_recording();
78
+ }
79
+
80
+ /**
81
+ * Calls record() if and only if record() has never been called for this
82
+ * event. Note: because Event is not thread-safe recordOnce() may call
83
+ * record() multiple times if called from multiple threads.
84
+ */
85
+ void recordOnce(const Stream& stream) {
86
+ impl_.recordOnce(stream);
87
+ }
88
+
89
+ /**
90
+ * Increments the event's version and enqueues a job with this version
91
+ * in the stream's work queue. When the stream process that job
92
+ * it notifies all streams waiting on / blocked by that version of the
93
+ * event to continue and marks that version as recorded.
94
+ * */
95
+ void record(const Stream& stream) {
96
+ impl_.record(stream);
97
+ }
98
+
99
+ /**
100
+ * Does nothing if the event has not been scheduled to be recorded.
101
+ * If the event was previously enqueued to be recorded, a command
102
+ * to wait for the version of the event that exists at the time of this call
103
+ * is inserted in the stream's work queue.
104
+ * When the stream reaches this command it will stop processing
105
+ * additional commands until that version of the event is marked as recorded.
106
+ */
107
+ void block(const Stream& stream) const {
108
+ impl_.block(stream);
109
+ }
110
+
111
+ /**
112
+ * Returns true if (and only if)
113
+ * (1) the event has never been scheduled to be recorded
114
+ * (2) the current version is marked as recorded.
115
+ * Returns false otherwise.
116
+ */
117
+ bool query() const {
118
+ return impl_.query();
119
+ }
120
+
121
+ double elapsedTime(const Event& event) const {
122
+ return impl_.elapsedTime(event.impl_);
123
+ }
124
+
125
+ void* eventId() const {
126
+ return impl_.eventId();
127
+ }
128
+
129
+ void synchronize() const {
130
+ return impl_.synchronize();
131
+ }
132
+
133
+ private:
134
+ impl::InlineEvent<impl::VirtualGuardImpl> impl_;
135
+ };
136
+
137
+ } // namespace c10
videochat2/lib/python3.10/site-packages/torch/include/c10/core/GeneratorImpl.h ADDED
@@ -0,0 +1,110 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <cstdint>
4
+ #include <mutex>
5
+
6
+ #include <c10/core/Device.h>
7
+ #include <c10/core/DispatchKeySet.h>
8
+ #include <c10/core/TensorImpl.h>
9
+ #include <c10/macros/Export.h>
10
+ #include <c10/util/intrusive_ptr.h>
11
+ #include <c10/util/python_stub.h>
12
+
13
+ /**
14
+ * Note [Generator]
15
+ * ~~~~~~~~~~~~~~~~
16
+ * A Pseudo Random Number Generator (PRNG) is an engine that uses an algorithm
17
+ * to generate a seemingly random sequence of numbers, that may be later be used
18
+ * in creating a random distribution. Such an engine almost always maintains a
19
+ * state and requires a seed to start off the creation of random numbers. Often
20
+ * times, users have found it beneficial to be able to explicitly create,
21
+ * retain, and destroy PRNG states and also be able to have control over the
22
+ * seed value.
23
+ *
24
+ * A Generator in ATen gives users the ability to read, write and modify a PRNG
25
+ * engine. For instance, it does so by letting users seed a PRNG engine, fork
26
+ * the state of the engine, etc.
27
+ *
28
+ * By default, there is one generator per device, and a device's generator is
29
+ * lazily created. A user can use the torch.Generator() api to create their own
30
+ * generator. Currently torch.Generator() can only create a CPUGeneratorImpl.
31
+ */
32
+
33
+ /**
34
+ * Note [Acquire lock when using random generators]
35
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
36
+ * Generator and its derived classes are NOT thread-safe. Please note that most
37
+ * of the places where we have inserted locking for generators are historically
38
+ * based, and we haven't actually checked that everything is truly thread safe
39
+ * (and it probably isn't). Please use the public mutex_ when using any methods
40
+ * from these classes, except for the read-only methods. You can learn about the
41
+ * usage by looking into the unittests (aten/src/ATen/cpu_generator_test.cpp)
42
+ * and other places where we have used lock_guard.
43
+ *
44
+ * TODO: Look into changing the threading semantics of Generators in ATen (e.g.,
45
+ * making them non-thread safe and instead making the generator state
46
+ * splittable, to accommodate forks into other threads).
47
+ */
48
+
49
+ namespace c10 {
50
+
51
+ // The default seed is selected to be a large number
52
+ // with good distribution of 0s and 1s in bit representation
53
+ constexpr uint64_t default_rng_seed_val = 67280421310721;
54
+
55
+ struct C10_API GeneratorImpl : public c10::intrusive_ptr_target {
56
+ // Constructors
57
+ GeneratorImpl(Device device_in, DispatchKeySet key_set);
58
+
59
+ // Delete all copy and move assignment in favor of clone()
60
+ // method
61
+ GeneratorImpl(const GeneratorImpl& other) = delete;
62
+ GeneratorImpl(GeneratorImpl&& other) = delete;
63
+ GeneratorImpl& operator=(const GeneratorImpl& other) = delete;
64
+
65
+ ~GeneratorImpl() override = default;
66
+ c10::intrusive_ptr<GeneratorImpl> clone() const;
67
+
68
+ // Common methods for all generators
69
+ virtual void set_current_seed(uint64_t seed) = 0;
70
+ virtual void set_offset(uint64_t offset) = 0;
71
+ virtual uint64_t get_offset() const = 0;
72
+ virtual uint64_t current_seed() const = 0;
73
+ virtual uint64_t seed() = 0;
74
+ virtual void set_state(const c10::TensorImpl& new_state) = 0;
75
+ virtual c10::intrusive_ptr<c10::TensorImpl> get_state() const = 0;
76
+ virtual void graphsafe_set_state(
77
+ const c10::intrusive_ptr<c10::GeneratorImpl>& new_state);
78
+ virtual c10::intrusive_ptr<c10::GeneratorImpl> graphsafe_get_state() const;
79
+ Device device() const;
80
+
81
+ // See Note [Acquire lock when using random generators]
82
+ std::mutex mutex_;
83
+
84
+ DispatchKeySet key_set() const {
85
+ return key_set_;
86
+ }
87
+
88
+ inline void set_pyobj(PyObject* pyobj) noexcept {
89
+ pyobj_ = pyobj;
90
+ }
91
+
92
+ inline PyObject* pyobj() const noexcept {
93
+ return pyobj_;
94
+ }
95
+
96
+ protected:
97
+ Device device_;
98
+ DispatchKeySet key_set_;
99
+ PyObject* pyobj_ = nullptr;
100
+
101
+ virtual GeneratorImpl* clone_impl() const = 0;
102
+ };
103
+
104
+ namespace detail {
105
+
106
+ C10_API uint64_t getNonDeterministicRandom(bool is_cuda = false);
107
+
108
+ } // namespace detail
109
+
110
+ } // namespace c10
videochat2/lib/python3.10/site-packages/torch/include/c10/core/GradMode.h ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/AutogradState.h>
4
+ #include <c10/macros/Export.h>
5
+
6
+ namespace c10 {
7
+
8
+ struct C10_API GradMode {
9
+ static bool is_enabled();
10
+ static void set_enabled(bool enabled);
11
+ };
12
+
13
+ // A RAII, thread local (!) guard that enables or disables grad mode upon
14
+ // construction, and sets it back to the original value upon destruction.
15
+ struct C10_API AutoGradMode {
16
+ AutoGradMode(bool enabled) : prev_mode(GradMode::is_enabled()) {
17
+ GradMode::set_enabled(enabled);
18
+ }
19
+ ~AutoGradMode() {
20
+ GradMode::set_enabled(prev_mode);
21
+ }
22
+ bool prev_mode;
23
+ };
24
+
25
+ // A RAII, thread local (!) guard that stops future operations from building
26
+ // gradients.
27
+ struct C10_API NoGradGuard : public AutoGradMode {
28
+ NoGradGuard() : AutoGradMode(/*enabled=*/false) {}
29
+ };
30
+
31
+ // A RAII, thread local (!) guard that enables or disables forward grad mode
32
+ // upon construction, and sets it back to the original value upon destruction.
33
+ struct C10_API AutoFwGradMode {
34
+ AutoFwGradMode(bool enabled)
35
+ : prev_mode(AutogradState::get_tls_state().get_fw_grad_mode()) {
36
+ AutogradState::get_tls_state().set_fw_grad_mode(enabled);
37
+ }
38
+ ~AutoFwGradMode() {
39
+ AutogradState::get_tls_state().set_fw_grad_mode(prev_mode);
40
+ }
41
+ bool prev_mode;
42
+ };
43
+
44
+ } // namespace c10
videochat2/lib/python3.10/site-packages/torch/include/c10/core/InferenceMode.h ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/AutogradState.h>
4
+ #include <c10/core/DispatchKey.h>
5
+ #include <c10/core/DispatchKeySet.h>
6
+ #include <c10/core/impl/LocalDispatchKeySet.h>
7
+ #include <c10/macros/Export.h>
8
+
9
+ namespace c10 {
10
+
11
+ // A RAII, thread local (!) guard that enables or disables inference mode upon
12
+ // construction, and sets it back to the original value upon destruction.
13
+ struct C10_API InferenceMode {
14
+ // Note [Expected TLS state in InferenceMode]:
15
+ // InferenceMode: ADInplaceOrView not in
16
+ // raw_local_dispatch_key_set.included(),
17
+ // Autograd in raw_local_dispatch_key_set.excluded()
18
+ // GradMode is disabled.
19
+ // NormalMode: ADInplaceOrView in raw_local_dispatch_key_set.included(),
20
+ // Autograd not in raw_local_dispatch_key_set.excluded()
21
+ // GradMode is enabled by default unless toggled manually
22
+ // through other APIs, e.g. NoGradGuard.
23
+ //
24
+ // Invariant:
25
+ // - ADInplaceOrView is never in the excluded set
26
+ // - Autograd is never in the included set
27
+ // - Setting InferenceMode will set GradMode accordingly, but not vice versa.
28
+ //
29
+ // 1. Why do we put ADInplaceOrView in included set outside InferenceMode?
30
+ //
31
+ // Inplace update to inference tensor outside InferenceMode is not
32
+ // allowed. See Note [Inplace update inference tensor] for more details.
33
+ // Without going through ADInplaceOrView kernel, we cannot throw error
34
+ // for `inference_tensor.add_(1)` case.
35
+ //
36
+ // 2. Why not put ADInplaceOrView in the excluded set inside InferenceMode?
37
+ //
38
+ // For example:
39
+ // torch::Tensor a = torch::ones({1, 2, 3}).set_requires_grad(true);
40
+ // torch::Tensor k = a + 2;
41
+ // {
42
+ // c10::InferenceMode guard(true);
43
+ // k.add_(2);
44
+ // }
45
+ // `k.add_(2)` still need to go through ADInplaceOrView kernel so that it's
46
+ // prepared for future autograd.
47
+ //
48
+ // 3. Why does setting InferenceMode also set GradMode?
49
+ //
50
+ // This is required since InferenceMode is a faster and more restrictive
51
+ // version of NoGradGuard. All runtime checks using GradMode::is_enabled()
52
+ // are applicable to InferenceMode as well, e.g.
53
+ // `tensorTypeInCurrentExecutionContext` in interpreter.cpp.
54
+ InferenceMode(bool enabled = true)
55
+ : prev_mode(AutogradState::get_tls_state()),
56
+ prev_keyset(c10::impl::tls_local_dispatch_key_set()) {
57
+ // Enabling inference mode means disabling grad modes
58
+ // And disabling inference mode means enabling grad modes
59
+ AutogradState::set_tls_state(AutogradState(
60
+ /* grad_mode */ !enabled,
61
+ /* inference_mode */ enabled,
62
+ /* fw_grad_mode */ !enabled,
63
+ /* multithreading_enabled*/ !enabled));
64
+ DispatchKeySet included = enabled
65
+ ? prev_keyset.included_.remove(c10::DispatchKey::ADInplaceOrView)
66
+ : prev_keyset.included_.add(c10::DispatchKey::ADInplaceOrView);
67
+ DispatchKeySet excluded = enabled
68
+ ? (prev_keyset.excluded_ | c10::autograd_dispatch_keyset)
69
+ : (prev_keyset.excluded_ - c10::autograd_dispatch_keyset);
70
+ c10::impl::PODLocalDispatchKeySet cur_keyset{};
71
+ cur_keyset.set_included(included);
72
+ cur_keyset.set_excluded(excluded);
73
+ c10::impl::_force_tls_local_dispatch_key_set(cur_keyset);
74
+ }
75
+
76
+ ~InferenceMode() {
77
+ AutogradState::set_tls_state(prev_mode);
78
+ c10::impl::_force_tls_local_dispatch_key_set(prev_keyset);
79
+ }
80
+ static bool is_enabled();
81
+
82
+ private:
83
+ AutogradState prev_mode;
84
+ c10::impl::LocalDispatchKeySet prev_keyset;
85
+ };
86
+ } // namespace c10
videochat2/lib/python3.10/site-packages/torch/include/c10/core/Layout.h ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/Backend.h>
4
+ #include <c10/util/Exception.h>
5
+
6
+ #include <cstdint>
7
+ #include <ostream>
8
+
9
+ namespace c10 {
10
+ enum class Layout : int8_t {
11
+ Strided,
12
+ Sparse,
13
+ SparseCsr,
14
+ Mkldnn,
15
+ SparseCsc,
16
+ SparseBsr,
17
+ SparseBsc,
18
+ Jagged,
19
+ NumOptions
20
+ };
21
+
22
+ constexpr auto kStrided = Layout::Strided;
23
+ constexpr auto kSparse = Layout::Sparse;
24
+ constexpr auto kSparseCsr = Layout::SparseCsr;
25
+ constexpr auto kMkldnn = Layout::Mkldnn;
26
+ constexpr auto kSparseCsc = Layout::SparseCsc;
27
+ constexpr auto kSparseBsr = Layout::SparseBsr;
28
+ constexpr auto kSparseBsc = Layout::SparseBsc;
29
+ constexpr auto kJagged = Layout::Jagged;
30
+
31
+ inline Layout layout_from_backend(Backend backend) {
32
+ switch (backend) {
33
+ case Backend::SparseCPU:
34
+ case Backend::SparseCUDA:
35
+ case Backend::SparseHIP:
36
+ case Backend::SparseVE:
37
+ case Backend::SparseXPU:
38
+ case Backend::SparsePrivateUse1:
39
+ return Layout::Sparse;
40
+ case Backend::MkldnnCPU:
41
+ return Layout::Mkldnn;
42
+ case Backend::SparseCsrCPU:
43
+ case Backend::SparseCsrCUDA:
44
+ case Backend::SparseCsrHIP:
45
+ case Backend::SparseCsrVE:
46
+ case Backend::SparseCsrXPU:
47
+ TORCH_CHECK(
48
+ false,
49
+ "Cannot map Backend SparseCsr(CPU|CUDA|HIP|VE|XPU) to a unique layout.");
50
+ default:
51
+ return Layout::Strided;
52
+ }
53
+ }
54
+
55
+ inline std::ostream& operator<<(std::ostream& stream, at::Layout layout) {
56
+ switch (layout) {
57
+ case at::kStrided:
58
+ return stream << "Strided";
59
+ case at::kSparse:
60
+ return stream << "Sparse";
61
+ case at::kSparseCsr:
62
+ return stream << "SparseCsr";
63
+ case at::kSparseCsc:
64
+ return stream << "SparseCsc";
65
+ case at::kSparseBsr:
66
+ return stream << "SparseBsr";
67
+ case at::kSparseBsc:
68
+ return stream << "SparseBsc";
69
+ case at::kMkldnn:
70
+ return stream << "Mkldnn";
71
+ case at::kJagged:
72
+ return stream << "Jagged";
73
+ default:
74
+ TORCH_CHECK(false, "Unknown layout");
75
+ }
76
+ }
77
+
78
+ } // namespace c10
videochat2/lib/python3.10/site-packages/torch/include/c10/core/PyHandleCache.h ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/impl/PyInterpreter.h>
4
+ #include <c10/macros/Macros.h>
5
+ #include <c10/util/Exception.h>
6
+ #include <c10/util/python_stub.h>
7
+
8
+ #include <atomic>
9
+
10
+ namespace c10 {
11
+
12
+ // A PyHandleCache represents a cached pointer from a C++ object to
13
+ // a Python object that represents that object analogously in Python.
14
+ // Upon a cache hit, the relevant object can be retrieved after a test
15
+ // and then a memory load. Two conditions must hold to be able to use this
16
+ // class:
17
+ //
18
+ // - This must truly be a cache; e.g., the caller must be able to produce
19
+ // the object some other way if the cache hit misses.
20
+ //
21
+ // - This must truly be a handle; e.g., the Python object referenced by
22
+ // this class must have static lifetime. This means we don't have to
23
+ // maintain strong ownership or deallocate the object when the C++ object
24
+ // dies. Static lifetime is a good idea in conjunction with the cache,
25
+ // since if you are producing a fresh object on miss you won't be
26
+ // maintaining object identity. If you need bidirectional ownership,
27
+ // you will want to factor out the pattern in TensorImpl with
28
+ // resurrection.
29
+ //
30
+ // This cache is expected to not improve perf under torchdeploy, as one
31
+ // interpreter will fill up the cache, and all the interpreters will be
32
+ // unable to use the slot. A potential improvement is to have multiple
33
+ // slots (one per interpreter), which will work in deployment scenarios
34
+ // where there a stable, fixed number of interpreters. You can also store
35
+ // the relevant state in the Python library, rather than in the non-Python
36
+ // library (although in many cases, this is not convenient, as there may
37
+ // not be a way to conveniently index based on the object.)
38
+ class PyHandleCache {
39
+ public:
40
+ PyHandleCache() : pyinterpreter_(nullptr) {}
41
+
42
+ // Attempt to fetch the pointer from the cache, if the PyInterpreter
43
+ // matches. If it doesn't exist, or the cache entry is not valid,
44
+ // use slow_accessor to get the real pointer value and return that
45
+ // (possibly writing it to the cache, if the cache entry is
46
+ // available.)
47
+ template <typename F>
48
+ PyObject* ptr_or(impl::PyInterpreter* self_interpreter, F slow_accessor)
49
+ const {
50
+ // Note [Memory ordering on Python interpreter tag]
51
+ impl::PyInterpreter* interpreter =
52
+ pyinterpreter_.load(std::memory_order_acquire);
53
+ if (C10_LIKELY(interpreter == self_interpreter)) {
54
+ return data_;
55
+ } else if (interpreter == nullptr) {
56
+ auto* r = slow_accessor();
57
+ impl::PyInterpreter* expected = nullptr;
58
+ // attempt to claim this cache entry with the specified interpreter tag
59
+ if (pyinterpreter_.compare_exchange_strong(
60
+ expected, self_interpreter, std::memory_order_acq_rel)) {
61
+ data_ = r;
62
+ }
63
+ // This shouldn't be possible, as you should be GIL protected
64
+ TORCH_INTERNAL_ASSERT(expected != self_interpreter);
65
+ return r;
66
+ } else {
67
+ return slow_accessor();
68
+ }
69
+ }
70
+
71
+ private:
72
+ mutable std::atomic<impl::PyInterpreter*> pyinterpreter_;
73
+ mutable PyObject* data_{nullptr};
74
+ };
75
+
76
+ } // namespace c10
videochat2/lib/python3.10/site-packages/torch/include/c10/core/QEngine.h ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/util/Exception.h>
4
+ #include <cstdint>
5
+ #include <string>
6
+
7
+ namespace c10 {
8
+
9
+ /**
10
+ * QEngine is an enum that is used to select the engine to run quantized ops.
11
+ * Keep this enum in sync with get_qengine_id() in
12
+ * torch/backends/quantized/__init__.py
13
+ */
14
+ enum class QEngine : uint8_t {
15
+ NoQEngine = 0,
16
+ FBGEMM = 1,
17
+ QNNPACK = 2,
18
+ ONEDNN = 3,
19
+ X86 = 4,
20
+ };
21
+
22
+ constexpr auto kNoQEngine = QEngine::NoQEngine;
23
+ constexpr auto kFBGEMM = QEngine::FBGEMM;
24
+ constexpr auto kQNNPACK = QEngine::QNNPACK;
25
+ constexpr auto kONEDNN = QEngine::ONEDNN;
26
+ constexpr auto kX86 = QEngine::X86;
27
+
28
+ inline std::string toString(QEngine qengine) {
29
+ switch (qengine) {
30
+ case kNoQEngine:
31
+ return "NoQEngine";
32
+ case kFBGEMM:
33
+ return "FBGEMM";
34
+ case kQNNPACK:
35
+ return "QNNPACK";
36
+ case kONEDNN:
37
+ return "ONEDNN";
38
+ case kX86:
39
+ return "X86";
40
+ default:
41
+ TORCH_CHECK(
42
+ false, "Unrecognized Quantized Engine: ", static_cast<int>(qengine));
43
+ }
44
+ }
45
+
46
+ } // namespace c10
videochat2/lib/python3.10/site-packages/torch/include/c10/core/RefcountedDeleter.h ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/Storage.h>
4
+ #include <c10/macros/Export.h>
5
+ #include <c10/util/UniqueVoidPtr.h>
6
+
7
+ #include <atomic>
8
+ #include <memory>
9
+
10
+ namespace c10 {
11
+
12
+ // A RefcountedDeleterContext object is used as the `ctx` argument for DataPtr
13
+ // to implement a shared DataPtr. Normally, a DataPtr is unique, but we use
14
+ // this custom context and the `refcounted_deleter` function below to make the
15
+ // DataPtr act like a non-unique DataPtr. This context object holds onto an
16
+ // inner context and deleter function which handle the actual deletion of the
17
+ // data when the refcount reaches 0.
18
+ //
19
+ // This shared DataPtr feature is only used when storages are shared between
20
+ // multiple Python interpreters in MultiPy. Before storages had PyObject
21
+ // preservation, interpreters could just share the same StorageImpl instance.
22
+ // But now a StorageImpl can only be associated with one interpreter in order
23
+ // to properly manage a zombie PyObject. So we share storages across Python
24
+ // interpreters by creating a different StorageImpl instance for each one, but
25
+ // they all point to the same data.
26
+ struct C10_API RefcountedDeleterContext {
27
+ RefcountedDeleterContext(void* other_ctx, c10::DeleterFnPtr other_deleter)
28
+ : other_ctx(other_ctx, other_deleter), refcount(1) {}
29
+
30
+ std::unique_ptr<void, c10::DeleterFnPtr> other_ctx;
31
+ std::atomic_int refcount;
32
+ };
33
+
34
+ // `refcounted_deleter` is used as the `ctx_deleter` for DataPtr to implement
35
+ // a shared DataPtr.
36
+ //
37
+ // Warning: This should only be called on a pointer to
38
+ // a RefcountedDeleterContext that was allocated on the heap with `new`,
39
+ // because when the refcount reaches 0, the context is deleted with `delete`
40
+ C10_API void refcounted_deleter(void* ctx_);
41
+
42
+ // If the storage's DataPtr does not use `refcounted_deleter`, replace it with
43
+ // a DataPtr that does, so it can be shared between multiple StorageImpls
44
+ C10_API void maybeApplyRefcountedDeleter(const c10::Storage& storage);
45
+
46
+ // Create a new StorageImpl that points to the same data. If the original
47
+ // StorageImpl's DataPtr does not use `refcounted_deleter`, it will be replaced
48
+ // with one that does
49
+ C10_API c10::Storage newStorageImplFromRefcountedDataPtr(
50
+ const c10::Storage& storage);
51
+
52
+ } // namespace c10
videochat2/lib/python3.10/site-packages/torch/include/c10/core/SafePyObject.h ADDED
@@ -0,0 +1,118 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/impl/PyInterpreter.h>
4
+ #include <c10/macros/Export.h>
5
+ #include <c10/util/python_stub.h>
6
+ #include <utility>
7
+
8
+ namespace c10 {
9
+
10
+ // This is an safe owning holder for a PyObject, akin to pybind11's
11
+ // py::object, with two major differences:
12
+ //
13
+ // - It is in c10/core; i.e., you can use this type in contexts where
14
+ // you do not have a libpython dependency
15
+ //
16
+ // - It is multi-interpreter safe (ala torchdeploy); when you fetch
17
+ // the underlying PyObject* you are required to specify what the current
18
+ // interpreter context is and we will check that you match it.
19
+ //
20
+ // It is INVALID to store a reference to a Tensor object in this way;
21
+ // you should just use TensorImpl directly in that case!
22
+ struct C10_API SafePyObject {
23
+ // Steals a reference to data
24
+ SafePyObject(PyObject* data, c10::impl::PyInterpreter* pyinterpreter)
25
+ : data_(data), pyinterpreter_(pyinterpreter) {}
26
+ SafePyObject(SafePyObject&& other) noexcept
27
+ : data_(std::exchange(other.data_, nullptr)),
28
+ pyinterpreter_(other.pyinterpreter_) {}
29
+ // For now it's not used, so we just disallow it.
30
+ SafePyObject& operator=(SafePyObject&&) = delete;
31
+
32
+ SafePyObject(SafePyObject const& other)
33
+ : data_(other.data_), pyinterpreter_(other.pyinterpreter_) {
34
+ if (data_ != nullptr) {
35
+ (*pyinterpreter_)->incref(data_);
36
+ }
37
+ }
38
+
39
+ SafePyObject& operator=(SafePyObject const& other) {
40
+ if (this == &other) {
41
+ return *this; // Handle self-assignment
42
+ }
43
+ if (other.data_ != nullptr) {
44
+ (*other.pyinterpreter_)->incref(other.data_);
45
+ }
46
+ if (data_ != nullptr) {
47
+ (*pyinterpreter_)->decref(data_, /*has_pyobj_slot*/ false);
48
+ }
49
+ data_ = other.data_;
50
+ pyinterpreter_ = other.pyinterpreter_;
51
+ return *this;
52
+ }
53
+
54
+ ~SafePyObject() {
55
+ if (data_ != nullptr) {
56
+ (*pyinterpreter_)->decref(data_, /*has_pyobj_slot*/ false);
57
+ }
58
+ }
59
+
60
+ c10::impl::PyInterpreter& pyinterpreter() const {
61
+ return *pyinterpreter_;
62
+ }
63
+ PyObject* ptr(const c10::impl::PyInterpreter*) const;
64
+
65
+ // stop tracking the current object, and return it
66
+ PyObject* release() {
67
+ auto rv = data_;
68
+ data_ = nullptr;
69
+ return rv;
70
+ }
71
+
72
+ private:
73
+ PyObject* data_;
74
+ c10::impl::PyInterpreter* pyinterpreter_;
75
+ };
76
+
77
+ // A newtype wrapper around SafePyObject for type safety when a python object
78
+ // represents a specific type. Note that `T` is only used as a tag and isn't
79
+ // actually used for any true purpose.
80
+ template <typename T>
81
+ struct SafePyObjectT : private SafePyObject {
82
+ SafePyObjectT(PyObject* data, c10::impl::PyInterpreter* pyinterpreter)
83
+ : SafePyObject(data, pyinterpreter) {}
84
+ SafePyObjectT(SafePyObjectT&& other) noexcept : SafePyObject(other) {}
85
+ SafePyObjectT(SafePyObjectT const&) = delete;
86
+ SafePyObjectT& operator=(SafePyObjectT const&) = delete;
87
+
88
+ using SafePyObject::ptr;
89
+ using SafePyObject::pyinterpreter;
90
+ using SafePyObject::release;
91
+ };
92
+
93
+ // Like SafePyObject, but non-owning. Good for references to global PyObjects
94
+ // that will be leaked on interpreter exit. You get a copy constructor/assign
95
+ // this way.
96
+ struct C10_API SafePyHandle {
97
+ SafePyHandle() : data_(nullptr), pyinterpreter_(nullptr) {}
98
+ SafePyHandle(PyObject* data, c10::impl::PyInterpreter* pyinterpreter)
99
+ : data_(data), pyinterpreter_(pyinterpreter) {}
100
+
101
+ c10::impl::PyInterpreter& pyinterpreter() const {
102
+ return *pyinterpreter_;
103
+ }
104
+ PyObject* ptr(const c10::impl::PyInterpreter*) const;
105
+ void reset() {
106
+ data_ = nullptr;
107
+ pyinterpreter_ = nullptr;
108
+ }
109
+ operator bool() {
110
+ return data_;
111
+ }
112
+
113
+ private:
114
+ PyObject* data_;
115
+ c10::impl::PyInterpreter* pyinterpreter_;
116
+ };
117
+
118
+ } // namespace c10
videochat2/lib/python3.10/site-packages/torch/include/c10/core/Scalar.h ADDED
@@ -0,0 +1,467 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <cstdint>
4
+ #include <stdexcept>
5
+ #include <type_traits>
6
+ #include <utility>
7
+
8
+ #include <c10/core/OptionalRef.h>
9
+ #include <c10/core/ScalarType.h>
10
+ #include <c10/core/SymBool.h>
11
+ #include <c10/core/SymFloat.h>
12
+ #include <c10/core/SymInt.h>
13
+ #include <c10/core/SymNodeImpl.h>
14
+ #include <c10/macros/Export.h>
15
+ #include <c10/macros/Macros.h>
16
+ #include <c10/util/Deprecated.h>
17
+ #include <c10/util/Exception.h>
18
+ #include <c10/util/Half.h>
19
+ #include <c10/util/TypeCast.h>
20
+ #include <c10/util/complex.h>
21
+ #include <c10/util/intrusive_ptr.h>
22
+
23
+ namespace c10 {
24
+
25
+ /**
26
+ * Scalar represents a 0-dimensional tensor which contains a single element.
27
+ * Unlike a tensor, numeric literals (in C++) are implicitly convertible to
28
+ * Scalar (which is why, for example, we provide both add(Tensor) and
29
+ * add(Scalar) overloads for many operations). It may also be used in
30
+ * circumstances where you statically know a tensor is 0-dim and single size,
31
+ * but don't know its type.
32
+ */
33
+ class C10_API Scalar {
34
+ public:
35
+ Scalar() : Scalar(int64_t(0)) {}
36
+
37
+ void destroy() {
38
+ if (Tag::HAS_si == tag || Tag::HAS_sd == tag || Tag::HAS_sb == tag) {
39
+ raw::intrusive_ptr::decref(v.p);
40
+ v.p = nullptr;
41
+ }
42
+ }
43
+
44
+ ~Scalar() {
45
+ destroy();
46
+ }
47
+
48
+ #define DEFINE_IMPLICIT_CTOR(type, name) \
49
+ Scalar(type vv) : Scalar(vv, true) {}
50
+
51
+ AT_FORALL_SCALAR_TYPES_AND7(
52
+ Half,
53
+ BFloat16,
54
+ Float8_e5m2,
55
+ Float8_e4m3fn,
56
+ Float8_e5m2fnuz,
57
+ Float8_e4m3fnuz,
58
+ ComplexHalf,
59
+ DEFINE_IMPLICIT_CTOR)
60
+ AT_FORALL_COMPLEX_TYPES(DEFINE_IMPLICIT_CTOR)
61
+
62
+ // Helper constructors to allow Scalar creation from long and long long types
63
+ // As std::is_same_v<long, long long> is false(except Android), one needs to
64
+ // provide a constructor from either long or long long in addition to one from
65
+ // int64_t
66
+ #if defined(__APPLE__) || defined(__MACOSX)
67
+ static_assert(
68
+ std::is_same_v<long long, int64_t>,
69
+ "int64_t is the same as long long on MacOS");
70
+ Scalar(long vv) : Scalar(vv, true) {}
71
+ #endif
72
+ #if defined(_MSC_VER)
73
+ static_assert(
74
+ std::is_same_v<long long, int64_t>,
75
+ "int64_t is the same as long long on Windows");
76
+ Scalar(long vv) : Scalar(vv, true) {}
77
+ #endif
78
+ #if defined(__linux__) && !defined(__ANDROID__)
79
+ static_assert(
80
+ std::is_same_v<long, int64_t>,
81
+ "int64_t is the same as long on Linux");
82
+ Scalar(long long vv) : Scalar(vv, true) {}
83
+ #endif
84
+
85
+ Scalar(uint16_t vv) : Scalar(vv, true) {}
86
+ Scalar(uint32_t vv) : Scalar(vv, true) {}
87
+ Scalar(uint64_t vv) {
88
+ if (vv > static_cast<uint64_t>(INT64_MAX)) {
89
+ tag = Tag::HAS_u;
90
+ v.u = vv;
91
+ } else {
92
+ tag = Tag::HAS_i;
93
+ // NB: no need to use convert, we've already tested convertibility
94
+ v.i = static_cast<int64_t>(vv);
95
+ }
96
+ }
97
+
98
+ #undef DEFINE_IMPLICIT_CTOR
99
+
100
+ // Value* is both implicitly convertible to SymbolicVariable and bool which
101
+ // causes ambiguity error. Specialized constructor for bool resolves this
102
+ // problem.
103
+ template <
104
+ typename T,
105
+ typename std::enable_if_t<std::is_same_v<T, bool>, bool>* = nullptr>
106
+ Scalar(T vv) : tag(Tag::HAS_b) {
107
+ v.i = convert<int64_t, bool>(vv);
108
+ }
109
+
110
+ template <
111
+ typename T,
112
+ typename std::enable_if_t<std::is_same_v<T, c10::SymBool>, bool>* =
113
+ nullptr>
114
+ Scalar(T vv) : tag(Tag::HAS_sb) {
115
+ v.i = convert<int64_t, c10::SymBool>(vv);
116
+ }
117
+
118
+ #define DEFINE_ACCESSOR(type, name) \
119
+ type to##name() const { \
120
+ if (Tag::HAS_d == tag) { \
121
+ return checked_convert<type, double>(v.d, #type); \
122
+ } else if (Tag::HAS_z == tag) { \
123
+ return checked_convert<type, c10::complex<double>>(v.z, #type); \
124
+ } \
125
+ if (Tag::HAS_b == tag) { \
126
+ return checked_convert<type, bool>(v.i, #type); \
127
+ } else if (Tag::HAS_i == tag) { \
128
+ return checked_convert<type, int64_t>(v.i, #type); \
129
+ } else if (Tag::HAS_u == tag) { \
130
+ return checked_convert<type, uint64_t>(v.u, #type); \
131
+ } else if (Tag::HAS_si == tag) { \
132
+ return checked_convert<type, int64_t>( \
133
+ toSymInt().guard_int(__FILE__, __LINE__), #type); \
134
+ } else if (Tag::HAS_sd == tag) { \
135
+ return checked_convert<type, int64_t>( \
136
+ toSymFloat().guard_float(__FILE__, __LINE__), #type); \
137
+ } else if (Tag::HAS_sb == tag) { \
138
+ return checked_convert<type, int64_t>( \
139
+ toSymBool().guard_bool(__FILE__, __LINE__), #type); \
140
+ } \
141
+ TORCH_CHECK(false) \
142
+ }
143
+
144
+ // TODO: Support ComplexHalf accessor
145
+ AT_FORALL_SCALAR_TYPES_WITH_COMPLEX(DEFINE_ACCESSOR)
146
+ DEFINE_ACCESSOR(uint16_t, UInt16)
147
+ DEFINE_ACCESSOR(uint32_t, UInt32)
148
+ DEFINE_ACCESSOR(uint64_t, UInt64)
149
+
150
+ #undef DEFINE_ACCESSOR
151
+
152
+ SymInt toSymInt() const {
153
+ if (Tag::HAS_si == tag) {
154
+ return c10::SymInt(intrusive_ptr<SymNodeImpl>::reclaim_copy(
155
+ static_cast<SymNodeImpl*>(v.p)));
156
+ } else {
157
+ return toLong();
158
+ }
159
+ }
160
+
161
+ SymFloat toSymFloat() const {
162
+ if (Tag::HAS_sd == tag) {
163
+ return c10::SymFloat(intrusive_ptr<SymNodeImpl>::reclaim_copy(
164
+ static_cast<SymNodeImpl*>(v.p)));
165
+ } else {
166
+ return toDouble();
167
+ }
168
+ }
169
+
170
+ SymBool toSymBool() const {
171
+ if (Tag::HAS_sb == tag) {
172
+ return c10::SymBool(intrusive_ptr<SymNodeImpl>::reclaim_copy(
173
+ static_cast<SymNodeImpl*>(v.p)));
174
+ } else {
175
+ return toBool();
176
+ }
177
+ }
178
+
179
+ // also support scalar.to<int64_t>();
180
+ // Deleted for unsupported types, but specialized below for supported types
181
+ template <typename T>
182
+ T to() const = delete;
183
+
184
+ // audit uses of data_ptr
185
+ const void* data_ptr() const {
186
+ TORCH_INTERNAL_ASSERT(!isSymbolic());
187
+ return static_cast<const void*>(&v);
188
+ }
189
+
190
+ bool isFloatingPoint() const {
191
+ return Tag::HAS_d == tag || Tag::HAS_sd == tag;
192
+ }
193
+
194
+ C10_DEPRECATED_MESSAGE(
195
+ "isIntegral is deprecated. Please use the overload with 'includeBool' parameter instead.")
196
+ bool isIntegral() const {
197
+ return Tag::HAS_i == tag || Tag::HAS_si == tag || Tag::HAS_u == tag;
198
+ }
199
+ bool isIntegral(bool includeBool) const {
200
+ return Tag::HAS_i == tag || Tag::HAS_si == tag || Tag::HAS_u == tag ||
201
+ (includeBool && isBoolean());
202
+ }
203
+
204
+ bool isComplex() const {
205
+ return Tag::HAS_z == tag;
206
+ }
207
+ bool isBoolean() const {
208
+ return Tag::HAS_b == tag || Tag::HAS_sb == tag;
209
+ }
210
+
211
+ // you probably don't actually want these; they're mostly for testing
212
+ bool isSymInt() const {
213
+ return Tag::HAS_si == tag;
214
+ }
215
+ bool isSymFloat() const {
216
+ return Tag::HAS_sd == tag;
217
+ }
218
+ bool isSymBool() const {
219
+ return Tag::HAS_sb == tag;
220
+ }
221
+
222
+ bool isSymbolic() const {
223
+ return Tag::HAS_si == tag || Tag::HAS_sd == tag || Tag::HAS_sb == tag;
224
+ }
225
+
226
+ C10_ALWAYS_INLINE Scalar& operator=(Scalar&& other) noexcept {
227
+ if (&other == this) {
228
+ return *this;
229
+ }
230
+
231
+ destroy();
232
+ moveFrom(std::move(other));
233
+ return *this;
234
+ }
235
+
236
+ C10_ALWAYS_INLINE Scalar& operator=(const Scalar& other) {
237
+ if (&other == this) {
238
+ return *this;
239
+ }
240
+
241
+ *this = Scalar(other);
242
+ return *this;
243
+ }
244
+
245
+ Scalar operator-() const;
246
+ Scalar conj() const;
247
+ Scalar log() const;
248
+
249
+ template <
250
+ typename T,
251
+ typename std::enable_if_t<!c10::is_complex<T>::value, int> = 0>
252
+ bool equal(T num) const {
253
+ if (isComplex()) {
254
+ TORCH_INTERNAL_ASSERT(!isSymbolic());
255
+ auto val = v.z;
256
+ return (val.real() == num) && (val.imag() == T());
257
+ } else if (isFloatingPoint()) {
258
+ TORCH_CHECK(!isSymbolic(), "NYI SymFloat equality");
259
+ return v.d == num;
260
+ } else if (tag == Tag::HAS_i) {
261
+ if (overflows<T>(v.i, /* strict_unsigned */ true)) {
262
+ return false;
263
+ } else {
264
+ return static_cast<T>(v.i) == num;
265
+ }
266
+ } else if (tag == Tag::HAS_u) {
267
+ if (overflows<T>(v.u, /* strict_unsigned */ true)) {
268
+ return false;
269
+ } else {
270
+ return static_cast<T>(v.u) == num;
271
+ }
272
+ } else if (tag == Tag::HAS_si) {
273
+ TORCH_INTERNAL_ASSERT(false, "NYI SymInt equality");
274
+ } else if (isBoolean()) {
275
+ // boolean scalar does not equal to a non boolean value
276
+ TORCH_INTERNAL_ASSERT(!isSymbolic());
277
+ return false;
278
+ } else {
279
+ TORCH_INTERNAL_ASSERT(false);
280
+ }
281
+ }
282
+
283
+ template <
284
+ typename T,
285
+ typename std::enable_if_t<c10::is_complex<T>::value, int> = 0>
286
+ bool equal(T num) const {
287
+ if (isComplex()) {
288
+ TORCH_INTERNAL_ASSERT(!isSymbolic());
289
+ return v.z == num;
290
+ } else if (isFloatingPoint()) {
291
+ TORCH_CHECK(!isSymbolic(), "NYI SymFloat equality");
292
+ return (v.d == num.real()) && (num.imag() == T());
293
+ } else if (tag == Tag::HAS_i) {
294
+ if (overflows<T>(v.i, /* strict_unsigned */ true)) {
295
+ return false;
296
+ } else {
297
+ return static_cast<T>(v.i) == num.real() && num.imag() == T();
298
+ }
299
+ } else if (tag == Tag::HAS_u) {
300
+ if (overflows<T>(v.u, /* strict_unsigned */ true)) {
301
+ return false;
302
+ } else {
303
+ return static_cast<T>(v.u) == num.real() && num.imag() == T();
304
+ }
305
+ } else if (tag == Tag::HAS_si) {
306
+ TORCH_INTERNAL_ASSERT(false, "NYI SymInt equality");
307
+ } else if (isBoolean()) {
308
+ // boolean scalar does not equal to a non boolean value
309
+ TORCH_INTERNAL_ASSERT(!isSymbolic());
310
+ return false;
311
+ } else {
312
+ TORCH_INTERNAL_ASSERT(false);
313
+ }
314
+ }
315
+
316
+ bool equal(bool num) const {
317
+ if (isBoolean()) {
318
+ TORCH_INTERNAL_ASSERT(!isSymbolic());
319
+ return static_cast<bool>(v.i) == num;
320
+ } else {
321
+ return false;
322
+ }
323
+ }
324
+
325
+ ScalarType type() const {
326
+ if (isComplex()) {
327
+ return ScalarType::ComplexDouble;
328
+ } else if (isFloatingPoint()) {
329
+ return ScalarType::Double;
330
+ } else if (isIntegral(/*includeBool=*/false)) {
331
+ // Represent all integers as long, UNLESS it is unsigned and therefore
332
+ // unrepresentable as long
333
+ if (Tag::HAS_u == tag) {
334
+ return ScalarType::UInt64;
335
+ }
336
+ return ScalarType::Long;
337
+ } else if (isBoolean()) {
338
+ return ScalarType::Bool;
339
+ } else {
340
+ throw std::runtime_error("Unknown scalar type.");
341
+ }
342
+ }
343
+
344
+ Scalar(Scalar&& rhs) noexcept : tag(rhs.tag) {
345
+ moveFrom(std::move(rhs));
346
+ }
347
+
348
+ Scalar(const Scalar& rhs) : tag(rhs.tag), v(rhs.v) {
349
+ if (isSymbolic()) {
350
+ c10::raw::intrusive_ptr::incref(v.p);
351
+ }
352
+ }
353
+
354
+ Scalar(c10::SymInt si) {
355
+ if (auto m = si.maybe_as_int()) {
356
+ tag = Tag::HAS_i;
357
+ v.i = *m;
358
+ } else {
359
+ tag = Tag::HAS_si;
360
+ v.p = std::move(si).release();
361
+ }
362
+ }
363
+
364
+ Scalar(c10::SymFloat sd) {
365
+ if (sd.is_symbolic()) {
366
+ tag = Tag::HAS_sd;
367
+ v.p = std::move(sd).release();
368
+ } else {
369
+ tag = Tag::HAS_d;
370
+ v.d = sd.as_float_unchecked();
371
+ }
372
+ }
373
+
374
+ Scalar(c10::SymBool sb) {
375
+ if (auto m = sb.maybe_as_bool()) {
376
+ tag = Tag::HAS_b;
377
+ v.i = *m;
378
+ } else {
379
+ tag = Tag::HAS_sb;
380
+ v.p = std::move(sb).release();
381
+ }
382
+ }
383
+
384
+ // We can't set v in the initializer list using the
385
+ // syntax v{ .member = ... } because it doesn't work on MSVC
386
+ private:
387
+ enum class Tag { HAS_d, HAS_i, HAS_u, HAS_z, HAS_b, HAS_sd, HAS_si, HAS_sb };
388
+
389
+ // Note [Meaning of HAS_u]
390
+ // ~~~~~~~~~~~~~~~~~~~~~~~
391
+ // HAS_u is a bit special. On its face, it just means that we
392
+ // are holding an unsigned integer. However, we generally don't
393
+ // distinguish between different bit sizes in Scalar (e.g., we represent
394
+ // float as double), instead, it represents a mathematical notion
395
+ // of some quantity (integral versus floating point). So actually,
396
+ // HAS_u is used solely to represent unsigned integers that could
397
+ // not be represented as a signed integer. That means only uint64_t
398
+ // potentially can get this tag; smaller types like uint8_t fits into a
399
+ // regular int and so for BC reasons we keep as an int.
400
+
401
+ // NB: assumes that self has already been cleared
402
+ // NOLINTNEXTLINE(cppcoreguidelines-rvalue-reference-param-not-moved)
403
+ C10_ALWAYS_INLINE void moveFrom(Scalar&& rhs) noexcept {
404
+ v = rhs.v;
405
+ tag = rhs.tag;
406
+ if (rhs.tag == Tag::HAS_si || rhs.tag == Tag::HAS_sd ||
407
+ rhs.tag == Tag::HAS_sb) {
408
+ // Move out of scalar
409
+ rhs.tag = Tag::HAS_i;
410
+ rhs.v.i = 0;
411
+ }
412
+ }
413
+
414
+ Tag tag;
415
+
416
+ union v_t {
417
+ double d{};
418
+ int64_t i;
419
+ // See Note [Meaning of HAS_u]
420
+ uint64_t u;
421
+ c10::complex<double> z;
422
+ c10::intrusive_ptr_target* p;
423
+ // NOLINTNEXTLINE(modernize-use-equals-default)
424
+ v_t() {} // default constructor
425
+ } v;
426
+
427
+ template <
428
+ typename T,
429
+ typename std::enable_if_t<
430
+ std::is_integral_v<T> && !std::is_same_v<T, bool>,
431
+ bool>* = nullptr>
432
+ Scalar(T vv, bool) : tag(Tag::HAS_i) {
433
+ v.i = convert<decltype(v.i), T>(vv);
434
+ }
435
+
436
+ template <
437
+ typename T,
438
+ typename std::enable_if_t<
439
+ !std::is_integral_v<T> && !c10::is_complex<T>::value,
440
+ bool>* = nullptr>
441
+ Scalar(T vv, bool) : tag(Tag::HAS_d) {
442
+ v.d = convert<decltype(v.d), T>(vv);
443
+ }
444
+
445
+ template <
446
+ typename T,
447
+ typename std::enable_if_t<c10::is_complex<T>::value, bool>* = nullptr>
448
+ Scalar(T vv, bool) : tag(Tag::HAS_z) {
449
+ v.z = convert<decltype(v.z), T>(vv);
450
+ }
451
+ };
452
+
453
+ using OptionalScalarRef = c10::OptionalRef<Scalar>;
454
+
455
+ // define the scalar.to<int64_t>() specializations
456
+ #define DEFINE_TO(T, name) \
457
+ template <> \
458
+ inline T Scalar::to<T>() const { \
459
+ return to##name(); \
460
+ }
461
+ AT_FORALL_SCALAR_TYPES_WITH_COMPLEX(DEFINE_TO)
462
+ DEFINE_TO(uint16_t, UInt16)
463
+ DEFINE_TO(uint32_t, UInt32)
464
+ DEFINE_TO(uint64_t, UInt64)
465
+ #undef DEFINE_TO
466
+
467
+ } // namespace c10
videochat2/lib/python3.10/site-packages/torch/include/c10/core/ScalarTypeToTypeMeta.h ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/ScalarType.h>
4
+ #include <c10/util/Optional.h>
5
+ #include <c10/util/typeid.h>
6
+
7
+ // these just expose TypeMeta/ScalarType bridge functions in c10
8
+ // TODO move to typeid.h (or codemod away) when TypeMeta et al
9
+ // are moved from caffe2 to c10 (see note at top of typeid.h)
10
+
11
+ namespace c10 {
12
+
13
+ /**
14
+ * convert ScalarType enum values to TypeMeta handles
15
+ */
16
+ inline caffe2::TypeMeta scalarTypeToTypeMeta(ScalarType scalar_type) {
17
+ return caffe2::TypeMeta::fromScalarType(scalar_type);
18
+ }
19
+
20
+ /**
21
+ * convert TypeMeta handles to ScalarType enum values
22
+ */
23
+ inline ScalarType typeMetaToScalarType(caffe2::TypeMeta dtype) {
24
+ return dtype.toScalarType();
25
+ }
26
+
27
+ /**
28
+ * typeMetaToScalarType(), lifted to optional
29
+ */
30
+ inline std::optional<at::ScalarType> optTypeMetaToScalarType(
31
+ std::optional<caffe2::TypeMeta> type_meta) {
32
+ if (!type_meta.has_value()) {
33
+ return std::nullopt;
34
+ }
35
+ return type_meta->toScalarType();
36
+ }
37
+
38
+ /**
39
+ * convenience: equality across TypeMeta/ScalarType conversion
40
+ */
41
+ inline bool operator==(ScalarType t, caffe2::TypeMeta m) {
42
+ return m.isScalarType(t);
43
+ }
44
+
45
+ inline bool operator==(caffe2::TypeMeta m, ScalarType t) {
46
+ return t == m;
47
+ }
48
+
49
+ inline bool operator!=(ScalarType t, caffe2::TypeMeta m) {
50
+ return !(t == m);
51
+ }
52
+
53
+ inline bool operator!=(caffe2::TypeMeta m, ScalarType t) {
54
+ return !(t == m);
55
+ }
56
+
57
+ } // namespace c10
videochat2/lib/python3.10/site-packages/torch/include/c10/core/Storage.h ADDED
@@ -0,0 +1,272 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/Allocator.h>
4
+ #include <c10/core/Device.h>
5
+ #include <c10/core/DeviceType.h>
6
+ #include <c10/core/StorageImpl.h>
7
+ #include <c10/core/SymInt.h>
8
+ #include <c10/macros/Export.h>
9
+ #include <c10/util/Exception.h>
10
+ #include <c10/util/ExclusivelyOwned.h>
11
+ #include <c10/util/MaybeOwned.h>
12
+ #include <c10/util/UniqueVoidPtr.h>
13
+ #include <c10/util/intrusive_ptr.h>
14
+ #include <cstddef>
15
+ #include <utility>
16
+
17
+ namespace c10 {
18
+
19
+ struct Storage;
20
+
21
+ C10_API bool isSharedStorageAlias(
22
+ const Storage& storage0,
23
+ const Storage& storage1);
24
+
25
+ struct C10_API Storage {
26
+ public:
27
+ struct use_byte_size_t {};
28
+ struct unsafe_borrow_t {
29
+ explicit unsafe_borrow_t() = default;
30
+ };
31
+
32
+ Storage() = default;
33
+ Storage(c10::intrusive_ptr<StorageImpl> ptr)
34
+ : storage_impl_(std::move(ptr)) {}
35
+
36
+ // Allocates memory buffer using given allocator and creates a storage with it
37
+ Storage(
38
+ use_byte_size_t /*use_byte_size*/,
39
+ const SymInt& size_bytes,
40
+ Allocator* allocator = nullptr,
41
+ bool resizable = false)
42
+ : storage_impl_(c10::make_intrusive<StorageImpl>(
43
+ StorageImpl::use_byte_size_t(),
44
+ size_bytes,
45
+ allocator,
46
+ resizable)) {}
47
+
48
+ // Creates storage with pre-allocated memory buffer. Allocator is given for
49
+ // potential future reallocations, however it can be nullptr if the storage
50
+ // is non-resizable
51
+ Storage(
52
+ use_byte_size_t /*use_byte_size*/,
53
+ size_t size_bytes,
54
+ at::DataPtr data_ptr,
55
+ at::Allocator* allocator = nullptr,
56
+ bool resizable = false)
57
+ : storage_impl_(c10::make_intrusive<StorageImpl>(
58
+ StorageImpl::use_byte_size_t(),
59
+ size_bytes,
60
+ std::move(data_ptr),
61
+ allocator,
62
+ resizable)) {}
63
+
64
+ protected:
65
+ explicit Storage(unsafe_borrow_t, const Storage& rhs)
66
+ : storage_impl_(c10::intrusive_ptr<c10::StorageImpl>::reclaim(
67
+ rhs.storage_impl_.get())) {}
68
+
69
+ friend MaybeOwnedTraits<Storage>;
70
+
71
+ public:
72
+ // Legacy constructor for partially initialized (dtype or memory) storages
73
+ // that can be temporarily created with Caffe2 APIs. See the note on top of
74
+ // TensorImpl.h for details.
75
+ static Storage create_legacy(at::Device device) {
76
+ auto allocator = GetAllocator(device.type());
77
+ return Storage(c10::make_intrusive<StorageImpl>(
78
+ StorageImpl::use_byte_size_t(),
79
+ 0,
80
+ allocator->allocate(0), // materialize a non-default Device.
81
+ allocator,
82
+ true));
83
+ }
84
+
85
+ // Mimic create_legacy, but without requiring a newly-created StorageImpl.
86
+ void reset_legacy() {
87
+ TORCH_CHECK(resizable() && allocator());
88
+ set_nbytes(0);
89
+ set_data_ptr_noswap(allocator()->allocate(0));
90
+ }
91
+
92
+ // TODO: remove later
93
+ void set_nbytes(size_t size_bytes) const {
94
+ storage_impl_->set_nbytes(size_bytes);
95
+ }
96
+
97
+ void set_nbytes(c10::SymInt size_bytes) const {
98
+ storage_impl_->set_nbytes(std::move(size_bytes));
99
+ }
100
+
101
+ bool resizable() const {
102
+ return storage_impl_->resizable();
103
+ }
104
+
105
+ size_t nbytes() const {
106
+ return storage_impl_->nbytes();
107
+ }
108
+
109
+ SymInt sym_nbytes() const {
110
+ return storage_impl_->sym_nbytes();
111
+ }
112
+ // get() use here is to get const-correctness
113
+
114
+ const void* data() const {
115
+ return storage_impl_->data();
116
+ }
117
+
118
+ void* mutable_data() const {
119
+ return storage_impl_->mutable_data();
120
+ }
121
+
122
+ at::DataPtr& mutable_data_ptr() const {
123
+ return storage_impl_->mutable_data_ptr();
124
+ }
125
+
126
+ const at::DataPtr& data_ptr() const {
127
+ return storage_impl_->data_ptr();
128
+ }
129
+
130
+ // Returns the previous data_ptr
131
+ at::DataPtr set_data_ptr(at::DataPtr&& data_ptr) const {
132
+ return storage_impl_->set_data_ptr(std::move(data_ptr));
133
+ }
134
+
135
+ void set_data_ptr_noswap(at::DataPtr&& data_ptr) const {
136
+ return storage_impl_->set_data_ptr_noswap(std::move(data_ptr));
137
+ }
138
+
139
+ DeviceType device_type() const {
140
+ return storage_impl_->device_type();
141
+ }
142
+
143
+ at::Allocator* allocator() const {
144
+ return storage_impl_->allocator();
145
+ }
146
+
147
+ at::Device device() const {
148
+ return storage_impl_->device();
149
+ }
150
+
151
+ StorageImpl* unsafeReleaseStorageImpl() {
152
+ return storage_impl_.release();
153
+ }
154
+
155
+ StorageImpl* unsafeGetStorageImpl() const noexcept {
156
+ return storage_impl_.get();
157
+ }
158
+
159
+ c10::weak_intrusive_ptr<StorageImpl> getWeakStorageImpl() const {
160
+ return c10::weak_intrusive_ptr<StorageImpl>(storage_impl_);
161
+ }
162
+
163
+ operator bool() const {
164
+ return storage_impl_;
165
+ }
166
+
167
+ size_t use_count() const {
168
+ return storage_impl_.use_count();
169
+ }
170
+
171
+ inline bool unique() const {
172
+ return storage_impl_.unique();
173
+ }
174
+
175
+ bool is_alias_of(const Storage& other) const {
176
+ return (
177
+ storage_impl_ == other.storage_impl_ ||
178
+ isSharedStorageAlias(*this, other));
179
+ }
180
+
181
+ void UniqueStorageShareExternalPointer(
182
+ void* src,
183
+ size_t capacity,
184
+ DeleterFnPtr d = nullptr) {
185
+ if (!storage_impl_.unique()) {
186
+ TORCH_CHECK(
187
+ false,
188
+ "UniqueStorageShareExternalPointer can only be called when use_count == 1");
189
+ }
190
+ storage_impl_->UniqueStorageShareExternalPointer(src, capacity, d);
191
+ }
192
+
193
+ void UniqueStorageShareExternalPointer(
194
+ at::DataPtr&& data_ptr,
195
+ size_t capacity) {
196
+ if (!storage_impl_.unique()) {
197
+ TORCH_CHECK(
198
+ false,
199
+ "UniqueStorageShareExternalPointer can only be called when use_count == 1");
200
+ }
201
+ storage_impl_->UniqueStorageShareExternalPointer(
202
+ std::move(data_ptr), capacity);
203
+ }
204
+
205
+ protected:
206
+ c10::intrusive_ptr<StorageImpl> storage_impl_;
207
+ };
208
+
209
+ template <>
210
+ struct MaybeOwnedTraits<c10::Storage> {
211
+ using owned_type = c10::Storage;
212
+ using borrow_type = c10::Storage;
213
+
214
+ static borrow_type createBorrow(const owned_type& from) {
215
+ return borrow_type(borrow_type::unsafe_borrow_t{}, from);
216
+ }
217
+
218
+ static void assignBorrow(borrow_type& lhs, const borrow_type& rhs) {
219
+ lhs.unsafeReleaseStorageImpl();
220
+ lhs = borrow_type(borrow_type::unsafe_borrow_t{}, rhs);
221
+ }
222
+
223
+ static void destroyBorrow(borrow_type& toDestroy) {
224
+ toDestroy.unsafeReleaseStorageImpl(); // "leak" it, but it was already +0.
225
+ }
226
+
227
+ static const owned_type& referenceFromBorrow(const borrow_type& borrow) {
228
+ return borrow;
229
+ }
230
+
231
+ static const owned_type* pointerFromBorrow(const borrow_type& borrow) {
232
+ return &borrow;
233
+ }
234
+
235
+ static bool debugBorrowIsValid(const borrow_type& /*borrow*/) {
236
+ return true;
237
+ }
238
+ };
239
+
240
+ template <>
241
+ struct ExclusivelyOwnedTraits<c10::Storage> {
242
+ using repr_type = c10::Storage;
243
+ using pointer_type = c10::Storage*;
244
+ using const_pointer_type = const c10::Storage*;
245
+
246
+ static repr_type nullRepr() {
247
+ return c10::Storage();
248
+ }
249
+
250
+ template <class... Args>
251
+ static repr_type createInPlace(Args&&... args) {
252
+ return c10::Storage(std::forward<Args>(args)...);
253
+ }
254
+
255
+ static repr_type moveToRepr(c10::Storage&& x) {
256
+ return std::move(x);
257
+ }
258
+
259
+ static c10::Storage take(c10::Storage& x) {
260
+ return std::move(x);
261
+ }
262
+
263
+ static pointer_type getImpl(repr_type& x) {
264
+ return &x;
265
+ }
266
+
267
+ static const_pointer_type getImpl(const repr_type& x) {
268
+ return &x;
269
+ }
270
+ };
271
+
272
+ } // namespace c10
videochat2/lib/python3.10/site-packages/torch/include/c10/core/StorageImpl.h ADDED
@@ -0,0 +1,330 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/Allocator.h>
4
+ #include <c10/core/Device.h>
5
+ #include <c10/core/DeviceType.h>
6
+ #include <c10/core/SymInt.h>
7
+ #include <c10/core/impl/COW.h>
8
+ #include <c10/core/impl/COWDeleter.h>
9
+ #include <c10/core/impl/PyObjectSlot.h>
10
+ #include <c10/macros/Export.h>
11
+ #include <c10/util/Exception.h>
12
+ #include <c10/util/UniqueVoidPtr.h>
13
+ #include <c10/util/intrusive_ptr.h>
14
+ #include <cstddef>
15
+ #include <utility>
16
+
17
+ namespace c10 {
18
+
19
+ C10_API void throwNullDataPtrError();
20
+ C10_API void warnDeprecatedDataPtr();
21
+
22
+ // A storage represents the underlying backing data buffer for a
23
+ // tensor. This concept was inherited from the original Torch7
24
+ // codebase; we'd kind of like to get rid of the concept
25
+ // (see https://github.com/pytorch/pytorch/issues/14797) but
26
+ // it's hard work and no one has gotten around to doing it.
27
+ //
28
+ // NB: storage is supposed to uniquely own a data pointer; e.g.,
29
+ // two non-null data pointers alias if and only if they are from
30
+ // the same storage. Technically you can violate this invariant
31
+ // (e.g., you can create a non-owning StorageImpl with at::from_blob)
32
+ // but a lot of things won't work correctly, including:
33
+ //
34
+ // - An ordinary deleter on such a storage is wrong, because normal deleters
35
+ // assume unique ownership, but if you have two storages at the same data,
36
+ // that implies there is some sort of shared ownership. So your deleter would
37
+ // have to actually be internally doing some sort of refcount thing
38
+ // - Deepcopy in Python side relies on storage equality and not data pointer
39
+ // equality; so if there are two separate storages pointing to the same data,
40
+ // the data will actually get duplicated in that case (one data ptr before,
41
+ // two data ptrs after)
42
+ // - Version counts won't work correctly, because we do all VC tracking at the
43
+ // level of storages (unless you explicitly disconnect the VC with detach);
44
+ // mutation because data pointers are the same are totally untracked
45
+ struct C10_API StorageImpl : public c10::intrusive_ptr_target {
46
+ public:
47
+ struct use_byte_size_t {};
48
+
49
+ StorageImpl(
50
+ use_byte_size_t /*use_byte_size*/,
51
+ SymInt size_bytes,
52
+ at::DataPtr data_ptr,
53
+ at::Allocator* allocator,
54
+ bool resizable)
55
+ : data_ptr_(std::move(data_ptr)),
56
+ size_bytes_(std::move(size_bytes)),
57
+ size_bytes_is_heap_allocated_(size_bytes_.is_heap_allocated()),
58
+ resizable_(resizable),
59
+ received_cuda_(false),
60
+ allocator_(allocator) {
61
+ if (resizable) {
62
+ TORCH_INTERNAL_ASSERT(
63
+ allocator_, "For resizable storage, allocator must be provided");
64
+ }
65
+ refresh_has_data_ptr_check();
66
+ }
67
+
68
+ StorageImpl(
69
+ use_byte_size_t /*use_byte_size*/,
70
+ const SymInt& size_bytes,
71
+ at::Allocator* allocator,
72
+ bool resizable)
73
+ : StorageImpl(
74
+ use_byte_size_t(),
75
+ size_bytes,
76
+ size_bytes.is_heap_allocated()
77
+ ? allocator->allocate(0)
78
+ : allocator->allocate(size_bytes.as_int_unchecked()),
79
+ allocator,
80
+ resizable) {}
81
+
82
+ StorageImpl& operator=(StorageImpl&& other) = delete;
83
+ StorageImpl& operator=(const StorageImpl&) = delete;
84
+ StorageImpl() = delete;
85
+ StorageImpl(StorageImpl&& other) = delete;
86
+ StorageImpl(const StorageImpl&) = delete;
87
+ ~StorageImpl() override = default;
88
+
89
+ void reset() {
90
+ data_ptr_.clear();
91
+ size_bytes_ = 0;
92
+ size_bytes_is_heap_allocated_ = false;
93
+ }
94
+
95
+ // Destructor doesn't call release_resources because it's
96
+ // unnecessary; don't forget to change that if needed!
97
+ void release_resources() override {
98
+ data_ptr_.clear();
99
+ }
100
+
101
+ size_t nbytes() const {
102
+ // OK to do this instead of maybe_as_int as nbytes is guaranteed positive
103
+ TORCH_CHECK(!size_bytes_is_heap_allocated_);
104
+ return size_bytes_.as_int_unchecked();
105
+ }
106
+
107
+ SymInt sym_nbytes() const {
108
+ return size_bytes_;
109
+ }
110
+
111
+ // TODO: remove later
112
+ void set_nbytes(size_t size_bytes) {
113
+ size_bytes_ = static_cast<int64_t>(size_bytes);
114
+ size_bytes_is_heap_allocated_ = false;
115
+ }
116
+
117
+ void set_nbytes(c10::SymInt size_bytes) {
118
+ size_bytes_ = std::move(size_bytes);
119
+ }
120
+
121
+ bool resizable() const {
122
+ return resizable_;
123
+ }
124
+
125
+ const at::DataPtr& data_ptr() const {
126
+ return data_ptr_;
127
+ }
128
+
129
+ at::DataPtr& mutable_data_ptr() {
130
+ if (C10_UNLIKELY(has_data_ptr_check_)) {
131
+ if (throw_on_mutable_data_ptr_) {
132
+ throwNullDataPtrError();
133
+ }
134
+ if (warn_deprecated_on_mutable_data_ptr_) {
135
+ warnDeprecatedDataPtr();
136
+ }
137
+ maybe_materialize_cow();
138
+ }
139
+ return data_ptr_;
140
+ }
141
+
142
+ // Returns the data_ptr. Bypasses all checks.
143
+ at::DataPtr& _mutable_data_ptr_no_checks() {
144
+ return data_ptr_;
145
+ }
146
+
147
+ // Returns the previous data_ptr
148
+ at::DataPtr set_data_ptr(at::DataPtr&& data_ptr) {
149
+ // We need to materialize the old COW DataPtr because it is
150
+ // being returned as mutable.
151
+ maybe_materialize_cow();
152
+ return set_data_ptr_no_materialize_cow(std::move(data_ptr));
153
+ }
154
+
155
+ void set_data_ptr_noswap(at::DataPtr&& data_ptr) {
156
+ data_ptr_ = std::move(data_ptr);
157
+ refresh_has_data_ptr_check();
158
+ }
159
+
160
+ const void* data() const {
161
+ return data_ptr_.get();
162
+ }
163
+
164
+ void* mutable_data() {
165
+ if (C10_UNLIKELY(has_data_ptr_check_)) {
166
+ if (throw_on_mutable_data_ptr_) {
167
+ throwNullDataPtrError();
168
+ }
169
+ if (warn_deprecated_on_mutable_data_ptr_) {
170
+ warnDeprecatedDataPtr();
171
+ }
172
+ maybe_materialize_cow();
173
+ }
174
+ return data_ptr_.mutable_get();
175
+ }
176
+
177
+ at::DeviceType device_type() const {
178
+ return data_ptr_.device().type();
179
+ }
180
+
181
+ at::Allocator* allocator() {
182
+ return allocator_;
183
+ }
184
+
185
+ const at::Allocator* allocator() const {
186
+ return allocator_;
187
+ }
188
+
189
+ // You generally shouldn't use this method, but it is occasionally
190
+ // useful if you want to override how a tensor will be reallocated,
191
+ // after it was already allocated (and its initial allocator was
192
+ // set)
193
+ void set_allocator(at::Allocator* allocator) {
194
+ allocator_ = allocator;
195
+ }
196
+
197
+ Device device() const {
198
+ return data_ptr_.device();
199
+ }
200
+
201
+ void set_resizable(bool resizable) {
202
+ if (resizable) {
203
+ // We need an allocator to be resizable
204
+ AT_ASSERT(allocator_);
205
+ }
206
+ resizable_ = resizable;
207
+ }
208
+
209
+ /**
210
+ * Can only be called when use_count is 1
211
+ */
212
+ void UniqueStorageShareExternalPointer(
213
+ void* src,
214
+ size_t size_bytes,
215
+ DeleterFnPtr d = nullptr) {
216
+ UniqueStorageShareExternalPointer(
217
+ at::DataPtr(src, src, d, data_ptr_.device()), size_bytes);
218
+ }
219
+
220
+ /**
221
+ * Can only be called when use_count is 1
222
+ */
223
+ void UniqueStorageShareExternalPointer(
224
+ at::DataPtr&& data_ptr,
225
+ size_t size_bytes) {
226
+ data_ptr_ = std::move(data_ptr);
227
+ size_bytes_ = static_cast<int64_t>(size_bytes);
228
+ size_bytes_is_heap_allocated_ = false;
229
+ allocator_ = nullptr;
230
+ resizable_ = false;
231
+ }
232
+
233
+ // This method can be used only after storage construction and cannot be used
234
+ // to modify storage status
235
+ void set_received_cuda(bool received_cuda) {
236
+ received_cuda_ = received_cuda;
237
+ }
238
+
239
+ bool received_cuda() {
240
+ return received_cuda_;
241
+ }
242
+
243
+ impl::PyObjectSlot* pyobj_slot() {
244
+ return &pyobj_slot_;
245
+ }
246
+
247
+ const impl::PyObjectSlot* pyobj_slot() const {
248
+ return &pyobj_slot_;
249
+ }
250
+
251
+ void set_throw_on_mutable_data_ptr() {
252
+ throw_on_mutable_data_ptr_ = true;
253
+ refresh_has_data_ptr_check();
254
+ }
255
+
256
+ void set_warn_deprecated_on_mutable_data_ptr() {
257
+ warn_deprecated_on_mutable_data_ptr_ = true;
258
+ refresh_has_data_ptr_check();
259
+ }
260
+
261
+ protected:
262
+ // materialize_cow_storage needs to call set_data_ptr_no_materlize_cow
263
+ friend void c10::impl::cow::materialize_cow_storage(StorageImpl& storage);
264
+
265
+ // Returns the previous data_ptr. If the old data_ptr was COW,
266
+ // this avoids materializing it
267
+ at::DataPtr set_data_ptr_no_materialize_cow(at::DataPtr&& data_ptr) {
268
+ at::DataPtr old_data_ptr(std::move(data_ptr_));
269
+ data_ptr_ = std::move(data_ptr);
270
+ refresh_has_data_ptr_check();
271
+ return old_data_ptr;
272
+ }
273
+
274
+ private:
275
+ void refresh_has_data_ptr_check() {
276
+ has_data_ptr_check_ = is_cow() || throw_on_mutable_data_ptr_ ||
277
+ warn_deprecated_on_mutable_data_ptr_;
278
+ }
279
+
280
+ inline bool is_cow() const {
281
+ return c10::impl::cow::is_cow_data_ptr(data_ptr_);
282
+ }
283
+
284
+ // Triggers a copy if this is a copy-on-write tensor.
285
+ void maybe_materialize_cow() {
286
+ if (is_cow()) {
287
+ impl::cow::materialize_cow_storage(*this);
288
+ }
289
+ }
290
+
291
+ DataPtr data_ptr_;
292
+ SymInt size_bytes_;
293
+ bool size_bytes_is_heap_allocated_;
294
+ bool resizable_;
295
+ // Identifies that Storage was received from another process and doesn't have
296
+ // local to process cuda memory allocation
297
+ bool received_cuda_;
298
+ // All special checks in data/data_ptr calls are guarded behind this single
299
+ // boolean. This is for performance: .data/.data_ptr calls are commonly in the
300
+ // hot-path.
301
+ bool has_data_ptr_check_ = false;
302
+ // If we should throw when mutable_data_ptr() or mutable_data() is called.
303
+ bool throw_on_mutable_data_ptr_ = false;
304
+ // If we warn when mutable_data_ptr() or mutable_data() is called.
305
+ bool warn_deprecated_on_mutable_data_ptr_ = false;
306
+ Allocator* allocator_;
307
+ impl::PyObjectSlot pyobj_slot_;
308
+ };
309
+
310
+ // Declare StorageImpl create function pointer types.
311
+ using StorageImplCreateHelper = intrusive_ptr<StorageImpl> (*)(
312
+ StorageImpl::use_byte_size_t,
313
+ SymInt size_bytes,
314
+ DataPtr data_ptr,
315
+ Allocator* allocator,
316
+ bool resizable);
317
+
318
+ C10_API void SetStorageImplCreate(DeviceType t, StorageImplCreateHelper fptr);
319
+
320
+ C10_API StorageImplCreateHelper GetStorageImplCreate(DeviceType t);
321
+
322
+ C10_API c10::intrusive_ptr<c10::StorageImpl> make_storage_impl(
323
+ c10::StorageImpl::use_byte_size_t use_byte_size,
324
+ c10::SymInt size_bytes,
325
+ c10::DataPtr data_ptr,
326
+ c10::Allocator* allocator,
327
+ bool resizable,
328
+ std::optional<at::Device> device_opt);
329
+
330
+ } // namespace c10
videochat2/lib/python3.10/site-packages/torch/include/c10/core/Stream.h ADDED
@@ -0,0 +1,176 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/Device.h>
4
+ #include <c10/core/DeviceType.h>
5
+ #include <c10/macros/Export.h>
6
+ #include <c10/util/Exception.h>
7
+ #include <cstddef>
8
+ #include <cstdint>
9
+ #include <functional>
10
+ #include <ostream>
11
+
12
+ namespace c10 {
13
+
14
+ /// An index representing a specific stream. A StreamId is not independently
15
+ /// meaningful without knowing the Device it is associated with; try to
16
+ /// use Stream rather than StreamId directly.
17
+ ///
18
+ /// StreamIds are opaque; they are assigned by some DeviceType-specific
19
+ /// numbering system which is not visible to the user. HOWEVER, we
20
+ /// guarantee that StreamId 0 is always a valid stream, and corresponds
21
+ /// to some sort of "default" stream.
22
+ using StreamId = int64_t;
23
+
24
+ struct C10_API StreamData3 {
25
+ StreamId stream_id;
26
+ DeviceIndex device_index;
27
+ DeviceType device_type;
28
+ };
29
+
30
+ // NB: I decided not to call the above StreamIndex to avoid confusion with
31
+ // DeviceIndex. This way, you access device index with index(), and stream id
32
+ // with id()
33
+
34
+ /**
35
+ * A stream is a software mechanism used to synchronize launched kernels
36
+ * without requiring explicit synchronizations between kernels. The basic
37
+ * model is that every kernel launch is associated with a stream: every
38
+ * kernel on the same stream is implicitly synchronized so that if I launch
39
+ * kernels A and B on the same stream, A is guaranteed to finish before B
40
+ * launches. If I want B to run concurrently with A, I must schedule
41
+ * it on a different stream.
42
+ *
43
+ * The Stream class is a backend agnostic value class representing a stream
44
+ * which I may schedule a kernel on. Every stream is associated with a device,
45
+ * which is recorded in stream, which is used to avoid confusion about which
46
+ * device a stream refers to.
47
+ *
48
+ * Streams are explicitly thread-safe, in the sense that it is OK to pass
49
+ * a Stream from one thread to another, and kernels queued from two different
50
+ * threads will still get serialized appropriately. (Of course, the
51
+ * time when the kernels get queued is undetermined unless you synchronize
52
+ * host side ;)
53
+ *
54
+ * Stream does NOT have a default constructor. Streams are for expert
55
+ * users; if you want to use Streams, we're going to assume you know
56
+ * how to deal with C++ template error messages if you try to
57
+ * resize() a vector of Streams.
58
+ *
59
+ * Known instances of streams in backends:
60
+ *
61
+ * - cudaStream_t (CUDA)
62
+ * - hipStream_t (HIP)
63
+ * - cl_command_queue (OpenCL) (NB: Caffe2's existing OpenCL integration
64
+ * does NOT support command queues.)
65
+ *
66
+ * Because this class is device agnostic, it cannot provide backend-specific
67
+ * functionality (e.g., get the cudaStream_t of a CUDA stream.) There are
68
+ * wrapper classes which provide this functionality, e.g., CUDAStream.
69
+ */
70
+ class C10_API Stream final {
71
+ private:
72
+ Device device_;
73
+ StreamId id_;
74
+
75
+ public:
76
+ enum Unsafe { UNSAFE };
77
+ enum Default { DEFAULT };
78
+
79
+ /// Unsafely construct a stream from a Device and a StreamId. In
80
+ /// general, only specific implementations of streams for a
81
+ /// backend should manufacture Stream directly in this way; other users
82
+ /// should use the provided APIs to get a stream. In particular,
83
+ /// we don't require backends to give any guarantees about non-zero
84
+ /// StreamIds; they are welcome to allocate in whatever way they like.
85
+ explicit Stream(Unsafe, Device device, StreamId id)
86
+ : device_(device), id_(id) {}
87
+
88
+ /// Construct the default stream of a Device. The default stream is
89
+ /// NOT the same as the current stream; default stream is a fixed stream
90
+ /// that never changes, whereas the current stream may be changed by
91
+ /// StreamGuard.
92
+ explicit Stream(Default, Device device) : device_(device), id_(0) {}
93
+
94
+ bool operator==(const Stream& other) const noexcept {
95
+ return this->device_ == other.device_ && this->id_ == other.id_;
96
+ }
97
+ bool operator!=(const Stream& other) const noexcept {
98
+ return !(*this == other);
99
+ }
100
+
101
+ Device device() const noexcept {
102
+ return device_;
103
+ }
104
+ DeviceType device_type() const noexcept {
105
+ return device_.type();
106
+ }
107
+ DeviceIndex device_index() const noexcept {
108
+ return device_.index();
109
+ }
110
+ StreamId id() const noexcept {
111
+ return id_;
112
+ }
113
+
114
+ // Enqueues a wait instruction in the stream's work queue.
115
+ // This instruction is a no-op unless the event is marked
116
+ // for recording. In that case the stream stops processing
117
+ // until the event is recorded.
118
+ template <typename T>
119
+ void wait(const T& event) const {
120
+ event.block(*this);
121
+ }
122
+
123
+ // Return whether all asynchronous work previously enqueued on this stream
124
+ // has completed running on the device.
125
+ bool query() const;
126
+
127
+ // Wait (by blocking the calling thread) until all asynchronous work enqueued
128
+ // on this stream has completed running on the device.
129
+ void synchronize() const;
130
+
131
+ // The purpose of this function is to more conveniently permit binding
132
+ // of Stream to and from Python. Without packing, I have to setup a whole
133
+ // class with two fields (device and stream id); with packing I can just
134
+ // store a single uint64_t.
135
+ //
136
+ // The particular way we pack streams into a uint64_t is considered an
137
+ // implementation detail and should not be relied upon.
138
+ uint64_t hash() const noexcept {
139
+ // Concat these together into a 64-bit integer
140
+ uint64_t bits = static_cast<uint64_t>(device_type()) << 56 |
141
+ static_cast<uint64_t>(device_index()) << 48 |
142
+ // Remove the sign extension part of the 64-bit address because
143
+ // the id might be used to hold a pointer.
144
+ (static_cast<uint64_t>(id()) & ((1ull << 48) - 1));
145
+ return bits;
146
+ }
147
+
148
+ struct StreamData3 pack3() const {
149
+ return {id(), device_index(), device_type()};
150
+ }
151
+
152
+ static Stream unpack3(
153
+ StreamId stream_id,
154
+ DeviceIndex device_index,
155
+ DeviceType device_type) {
156
+ TORCH_CHECK(isValidDeviceType(device_type));
157
+ return Stream(UNSAFE, Device(device_type, device_index), stream_id);
158
+ }
159
+
160
+ // I decided NOT to provide setters on this class, because really,
161
+ // why would you change the device of a stream? Just construct
162
+ // it correctly from the beginning dude.
163
+ };
164
+
165
+ C10_API std::ostream& operator<<(std::ostream& stream, const Stream& s);
166
+
167
+ } // namespace c10
168
+
169
+ namespace std {
170
+ template <>
171
+ struct hash<c10::Stream> {
172
+ size_t operator()(c10::Stream s) const noexcept {
173
+ return std::hash<uint64_t>{}(s.hash());
174
+ }
175
+ };
176
+ } // namespace std
videochat2/lib/python3.10/site-packages/torch/include/c10/core/StreamGuard.h ADDED
@@ -0,0 +1,170 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/Device.h>
4
+ #include <c10/core/Stream.h>
5
+ #include <c10/core/impl/InlineStreamGuard.h>
6
+ #include <c10/core/impl/VirtualGuardImpl.h>
7
+ #include <c10/util/ArrayRef.h>
8
+ #include <c10/util/Optional.h>
9
+
10
+ namespace c10 {
11
+
12
+ /**
13
+ * A StreamGuard is an RAII class that changes the current device
14
+ * to the device corresponding to some stream, and changes the
15
+ * default stream on that device to be this stream.
16
+ *
17
+ * Use of StreamGuard is HIGHLY discouraged in operator definitions. In
18
+ * a single operator, you probably don't know enough about the global
19
+ * state of the world to profitably decide how to set streams. Let
20
+ * the caller handle this appropriately, and just use the current stream
21
+ * in your operator code.
22
+ *
23
+ * This StreamGuard does NOT have an uninitialized state; it is guaranteed
24
+ * to reset the stream and device on exit. If you are in a situation
25
+ * where you *might* want to setup a stream guard, see OptionalStreamGuard.
26
+ */
27
+ struct StreamGuard {
28
+ /// No default constructor, see Note [Omitted default constructor from RAII]
29
+ explicit StreamGuard() = delete;
30
+
31
+ /// Set the current device to the device associated with the passed stream,
32
+ /// and set the current stream on that device to the passed stream.
33
+ explicit StreamGuard(Stream stream) : guard_(stream) {}
34
+
35
+ /// Copy is disallowed
36
+ StreamGuard(const StreamGuard&) = delete;
37
+ StreamGuard& operator=(const StreamGuard&) = delete;
38
+
39
+ /// Move is disallowed, as StreamGuard does not have an uninitialized state,
40
+ /// which is required for moves on types with nontrivial destructors.
41
+ StreamGuard(StreamGuard&& other) = delete;
42
+ StreamGuard& operator=(StreamGuard&& other) = delete;
43
+
44
+ /// Resets the currently set stream to the original stream and
45
+ /// the currently set device to the original device. Then,
46
+ /// set the current device to the device associated with the passed stream,
47
+ /// and set the current stream on that device to the passed stream.
48
+ ///
49
+ /// NOTE: this implementation may skip some stream/device setting if
50
+ /// it can prove that it is unnecessary.
51
+ ///
52
+ /// WARNING: reset_stream does NOT preserve previously set streams on
53
+ /// different devices. If you need to set streams on multiple devices
54
+ /// on , use MultiStreamGuard instead.
55
+ void reset_stream(Stream stream) {
56
+ guard_.reset_stream(stream);
57
+ }
58
+
59
+ /// Returns the stream that was set at the time the guard was constructed.
60
+ Stream original_stream() const {
61
+ return guard_.original_stream();
62
+ }
63
+
64
+ /// Returns the most recent stream that was set using this device guard,
65
+ /// either from construction, or via set_stream.
66
+ Stream current_stream() const {
67
+ return guard_.current_stream();
68
+ }
69
+
70
+ /// Returns the most recent device that was set using this device guard,
71
+ /// either from construction, or via set_device/reset_device/set_index.
72
+ Device current_device() const {
73
+ return guard_.current_device();
74
+ }
75
+
76
+ /// Returns the device that was set at the most recent reset_stream(),
77
+ /// or otherwise the device at construction time.
78
+ Device original_device() const {
79
+ return guard_.original_device();
80
+ }
81
+
82
+ private:
83
+ c10::impl::InlineStreamGuard<impl::VirtualGuardImpl> guard_;
84
+ };
85
+
86
+ /**
87
+ * An OptionalStreamGuard is an RAII class that sets a device to some value on
88
+ * initialization, and resets the device to its original value on destruction.
89
+ * See OptionalDeviceGuard for more guidance on how to use this class.
90
+ */
91
+ struct OptionalStreamGuard {
92
+ /// Create an uninitialized guard.
93
+ explicit OptionalStreamGuard() = default;
94
+
95
+ /// Set the current device to the device associated with the passed stream,
96
+ /// and set the current stream on that device to the passed stream.
97
+ explicit OptionalStreamGuard(Stream stream) : guard_(stream) {}
98
+
99
+ /// Set the current device to the device associated with the passed stream,
100
+ /// and set the current stream on that device to the passed stream,
101
+ /// if the passed stream is not nullopt.
102
+ explicit OptionalStreamGuard(std::optional<Stream> stream_opt)
103
+ : guard_(stream_opt) {}
104
+
105
+ /// Copy is disallowed
106
+ OptionalStreamGuard(const OptionalStreamGuard&) = delete;
107
+ OptionalStreamGuard& operator=(const OptionalStreamGuard&) = delete;
108
+
109
+ // See Note [Move construction for RAII guards is tricky]
110
+ OptionalStreamGuard(OptionalStreamGuard&& other) = delete;
111
+
112
+ // See Note [Move assignment for RAII guards is tricky]
113
+ OptionalStreamGuard& operator=(OptionalStreamGuard&& other) = delete;
114
+
115
+ /// Resets the currently set stream to the original stream and
116
+ /// the currently set device to the original device. Then,
117
+ /// set the current device to the device associated with the passed stream,
118
+ /// and set the current stream on that device to the passed stream.
119
+ /// Initializes the guard if it was not previously initialized.
120
+ void reset_stream(Stream stream) {
121
+ guard_.reset_stream(stream);
122
+ }
123
+
124
+ /// Returns the stream that was set at the time the guard was most recently
125
+ /// initialized, or nullopt if the guard is uninitialized.
126
+ std::optional<Stream> original_stream() const {
127
+ return guard_.original_stream();
128
+ }
129
+
130
+ /// Returns the most recent stream that was set using this stream guard,
131
+ /// either from construction, or via reset_stream, if the guard is
132
+ /// initialized, or nullopt if the guard is uninitialized.
133
+ std::optional<Stream> current_stream() const {
134
+ return guard_.current_stream();
135
+ }
136
+
137
+ /// Restore the original device and stream, resetting this guard to
138
+ /// uninitialized state.
139
+ void reset() {
140
+ guard_.reset();
141
+ }
142
+
143
+ private:
144
+ c10::impl::InlineOptionalStreamGuard<impl::VirtualGuardImpl> guard_{};
145
+ };
146
+
147
+ /**
148
+ * A MultiStreamGuard is an RAII class that sets the current streams of a set of
149
+ * devices all at once, and resets them to their original values on destruction.
150
+ */
151
+ struct MultiStreamGuard {
152
+ /// Set the current streams to the passed streams on each of their respective
153
+ /// devices.
154
+ explicit MultiStreamGuard(ArrayRef<Stream> streams) : guard_(streams) {}
155
+
156
+ /// Copy is disallowed
157
+ MultiStreamGuard(const MultiStreamGuard&) = delete;
158
+ MultiStreamGuard& operator=(const MultiStreamGuard&) = delete;
159
+
160
+ // See Note [Move construction for RAII guards is tricky]
161
+ MultiStreamGuard(MultiStreamGuard&& other) = delete;
162
+
163
+ // See Note [Move assignment for RAII guards is tricky]
164
+ MultiStreamGuard& operator=(MultiStreamGuard&& other) = delete;
165
+
166
+ private:
167
+ c10::impl::InlineMultiStreamGuard<impl::VirtualGuardImpl> guard_;
168
+ };
169
+
170
+ } // namespace c10
videochat2/lib/python3.10/site-packages/torch/include/c10/core/SymIntArrayRef.h ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/SymInt.h>
4
+ #include <c10/util/ArrayRef.h>
5
+ #include <c10/util/DimVector.h>
6
+ #include <c10/util/Exception.h>
7
+ #include <c10/util/irange.h>
8
+ #include <cstdint>
9
+ #include <optional>
10
+
11
+ namespace c10 {
12
+ using SymIntArrayRef = ArrayRef<SymInt>;
13
+
14
+ inline at::IntArrayRef asIntArrayRefUnchecked(c10::SymIntArrayRef ar) {
15
+ return IntArrayRef(reinterpret_cast<const int64_t*>(ar.data()), ar.size());
16
+ }
17
+
18
+ // TODO: a SymIntArrayRef containing a heap allocated large negative integer
19
+ // can actually technically be converted to an IntArrayRef... but not with
20
+ // the non-owning API we have here. We can't reinterpet cast; we have to
21
+ // allocate another buffer and write the integers into it. If you need it,
22
+ // we can do it. But I don't think you need it.
23
+
24
+ inline std::optional<at::IntArrayRef> asIntArrayRefSlowOpt(
25
+ c10::SymIntArrayRef ar) {
26
+ for (const c10::SymInt& sci : ar) {
27
+ if (sci.is_heap_allocated()) {
28
+ return std::nullopt;
29
+ }
30
+ }
31
+
32
+ return {asIntArrayRefUnchecked(ar)};
33
+ }
34
+
35
+ inline at::IntArrayRef asIntArrayRefSlow(
36
+ c10::SymIntArrayRef ar,
37
+ const char* file,
38
+ int64_t line) {
39
+ for (const c10::SymInt& sci : ar) {
40
+ TORCH_CHECK(
41
+ !sci.is_heap_allocated(),
42
+ file,
43
+ ":",
44
+ line,
45
+ ": SymIntArrayRef expected to contain only concrete integers");
46
+ }
47
+ return asIntArrayRefUnchecked(ar);
48
+ }
49
+
50
+ // Even slower than asIntArrayRefSlow, as it forces an allocation for a
51
+ // destination int, BUT it is able to force specialization (it never errors)
52
+ inline c10::DimVector asIntArrayRefSlowAlloc(
53
+ c10::SymIntArrayRef ar,
54
+ const char* file,
55
+ int64_t line) {
56
+ c10::DimVector res(ar.size(), 0);
57
+ for (const auto i : c10::irange(ar.size())) {
58
+ res[i] = ar[i].guard_int(file, line);
59
+ }
60
+ return res;
61
+ }
62
+
63
+ #define C10_AS_INTARRAYREF_SLOW(a) c10::asIntArrayRefSlow(a, __FILE__, __LINE__)
64
+ #define C10_AS_INTARRAYREF_SLOW_ALLOC(a) \
65
+ c10::asIntArrayRefSlowAlloc(a, __FILE__, __LINE__)
66
+
67
+ // Prefer using a more semantic constructor, like
68
+ // fromIntArrayRefKnownNonNegative
69
+ inline SymIntArrayRef fromIntArrayRefUnchecked(IntArrayRef array_ref) {
70
+ return SymIntArrayRef(
71
+ reinterpret_cast<const SymInt*>(array_ref.data()), array_ref.size());
72
+ }
73
+
74
+ inline SymIntArrayRef fromIntArrayRefKnownNonNegative(IntArrayRef array_ref) {
75
+ return fromIntArrayRefUnchecked(array_ref);
76
+ }
77
+
78
+ inline SymIntArrayRef fromIntArrayRefSlow(IntArrayRef array_ref) {
79
+ for (long i : array_ref) {
80
+ TORCH_CHECK(
81
+ SymInt::check_range(i),
82
+ "IntArrayRef contains an int that cannot be represented as a SymInt: ",
83
+ i);
84
+ }
85
+ return SymIntArrayRef(
86
+ reinterpret_cast<const SymInt*>(array_ref.data()), array_ref.size());
87
+ }
88
+
89
+ } // namespace c10
videochat2/lib/python3.10/site-packages/torch/include/c10/core/TensorImpl.h ADDED
The diff for this file is too large to render. See raw diff
 
videochat2/lib/python3.10/site-packages/torch/include/c10/core/TensorOptions.h ADDED
@@ -0,0 +1,787 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/Backend.h>
4
+ #include <c10/core/DefaultDtype.h>
5
+ #include <c10/core/Device.h>
6
+ #include <c10/core/DeviceType.h>
7
+ #include <c10/core/DispatchKey.h>
8
+ #include <c10/core/Layout.h>
9
+ #include <c10/core/MemoryFormat.h>
10
+ #include <c10/core/ScalarType.h>
11
+ #include <c10/core/ScalarTypeToTypeMeta.h>
12
+
13
+ #include <c10/macros/Export.h>
14
+ #include <c10/macros/Macros.h>
15
+ #include <c10/util/Exception.h>
16
+ #include <optional>
17
+
18
+ #include <cstdint>
19
+ #include <iosfwd>
20
+ #include <string>
21
+ #include <type_traits>
22
+ #include <utility>
23
+
24
+ namespace c10 {
25
+
26
+ DispatchKey computeDispatchKey(
27
+ std::optional<ScalarType> dtype,
28
+ std::optional<Layout> layout,
29
+ std::optional<Device> device);
30
+
31
+ inline ScalarType dtype_or_default(std::optional<ScalarType> dtype) {
32
+ return value_or_else(dtype, [] { return get_default_dtype_as_scalartype(); });
33
+ }
34
+
35
+ inline caffe2::TypeMeta dtype_or_default(
36
+ std::optional<caffe2::TypeMeta> dtype) {
37
+ return value_or_else(dtype, [] { return get_default_dtype(); });
38
+ }
39
+
40
+ inline Layout layout_or_default(std::optional<Layout> layout) {
41
+ return layout.value_or(kStrided);
42
+ }
43
+
44
+ inline Device device_or_default(std::optional<Device> device) {
45
+ return value_or_else(device, [] { return Device(kCPU); });
46
+ }
47
+
48
+ inline bool pinned_memory_or_default(std::optional<bool> pinned_memory) {
49
+ return pinned_memory.value_or(false);
50
+ }
51
+
52
+ /// A class to encapsulate construction axes of an Tensor. TensorOptions was
53
+ /// designed to support the Python style API for specifying construction options
54
+ /// on factory functions, e.g.,
55
+ ///
56
+ /// torch.zeros(2, 3, dtype=torch.int32)
57
+ ///
58
+ /// Because C++ doesn't natively support keyword arguments, there must be
59
+ /// another way of specifying keyword-like arguments. TensorOptions is a
60
+ /// builder class which can be used to construct this "dictionary" of keyword
61
+ /// arguments: functions which support TensorOptions conventionally take this
62
+ /// argument optionally as their last argument.
63
+ ///
64
+ /// WARNING: In PyTorch, there are `torch::` variants of factory functions,
65
+ /// e.g., torch::zeros for at::zeros. These return Variables (while the
66
+ /// stock ATen functions return plain Tensors). If you mix these functions
67
+ /// up, you WILL BE SAD.
68
+ ///
69
+ /// Rather than use the constructor of this class directly, you should prefer to
70
+ /// use the constructor functions, and then chain setter methods on top of them.
71
+ ///
72
+ /// at::device(at::kCUDA).dtype(kInt)
73
+ /// at::dtype(at::kInt)
74
+ ///
75
+ /// Additionally, anywhere a TensorOptions is expected, you can directly
76
+ /// pass at::kCUDA / at::kInt, and it will implicitly convert to a
77
+ /// TensorOptions.
78
+ ///
79
+ /// Here are some recommended ways to create a 2x2 tensor of zeros
80
+ /// with certain properties. These all *implicitly* make use of
81
+ /// TensorOptions, even if they don't mention the class explicitly:
82
+ ///
83
+ /// at::zeros({2,2}, at::kCUDA);
84
+ /// at::zeros({2,2}, at::kLong);
85
+ /// at::zeros({2,2}, at::device(at::kCUDA).dtype(at::kLong()));
86
+ /// at::zeros({2,2}, at::device({at::kCUDA, 1})); // place on device 1
87
+ /// at::zeros({2,2}, at::requires_grad());
88
+ ///
89
+
90
+ /// NOTE [ TensorOptions Constructors ]
91
+ ///
92
+ /// TensorOptions is like a dictionary with entries from the set:
93
+ /// {requires_grad, device, dtype, layout}, where each entry may be
94
+ /// unspecified (i.e., is optional). It is used to specify the properties of
95
+ /// tensors in many places both in C++ internal and API, e.g., tensor factory
96
+ /// methods like `at::empty({10}, options)`, tensor conversions like
97
+ /// `tensor.to(...)`, etc.
98
+ ///
99
+ /// To provide a simple API that is consistent with Python, where one can do
100
+ /// `torch.empty(sizes, X)` with `X` being a `torch.device`, `torch.dtype`, or a
101
+ /// `torch.layout`, we want TensorOptions to be implicitly convertible from
102
+ /// `ScalarType dtype`, `Layout layout` and `Device device`. Therefore, we have
103
+ /// three implicit constructors from each of these three types.
104
+ ///
105
+ /// This is sufficient for `ScalarType` and `Layout` as they are simple Enum
106
+ /// classes. However, `Device` is an ordinary class with implicit constructors
107
+ /// `Device(DeviceType, DeviceIndex = -1)` and `Device(std::string)` to be
108
+ /// consistent with Python API, where strings are treated as equivalent with a
109
+ /// `torch.device` object (e.g., "cuda:1" can be passed to everywhere a
110
+ /// `torch.device("cuda:1")` is accepted). To support the syntax
111
+ /// `at::empty({10}, {kCUDA, 1})` and `tensor.to(kCUDA)`, we need to make sure
112
+ /// that `TensorOptions` is implicitly constructible with any arguments that a
113
+ /// `Device` can constructed from. So we have,
114
+ ///
115
+ /// /* implicit */ TensorOptions(T&& device) : TensorOptions() {
116
+ /// this->set_device(device);
117
+ /// }
118
+ ///
119
+ /// template <typename... Args,
120
+ /// typename = std::enable_if_t<std::is_constructible<Device,
121
+ /// Args&&...>::value>>
122
+ /// /* implicit */ TensorOptions(Args&&... args)
123
+ /// : TensorOptions(Device(std::forward<Args>(args)...)) {}
124
+ ///
125
+ ///
126
+ /// But this will be problematic. Consider this: `TensorOptions({kCUDA, 1})`.
127
+ /// Compiler will complain about ambiguity between the copy constructor and the
128
+ /// `Device` constructor because `{kCUDA, 1}` can be converted to both a
129
+ /// `TensorOption` and a `Device`.
130
+ ///
131
+ /// To get around this, we templatize the `Device` constructor. Since overload
132
+ /// resolution is done before template resolution, our problem is solved.
133
+
134
+ DispatchKey computeDispatchKey(
135
+ std::optional<ScalarType> dtype,
136
+ std::optional<Layout> layout,
137
+ std::optional<Device> device);
138
+
139
+ struct C10_API TensorOptions {
140
+ TensorOptions()
141
+ : requires_grad_(false),
142
+ pinned_memory_(false),
143
+ has_device_(false),
144
+ has_dtype_(false),
145
+ has_layout_(false),
146
+ has_requires_grad_(false),
147
+ has_pinned_memory_(false),
148
+ has_memory_format_(false) {}
149
+
150
+ /// Constructs a `TensorOptions` object with the given layout.
151
+ /* implicit */ TensorOptions(Layout layout) : TensorOptions() {
152
+ this->set_layout(layout);
153
+ }
154
+
155
+ /// Constructs a `TensorOptions` object with the given device.
156
+ /// See NOTE [ TensorOptions Constructors ] on why this is templatized.
157
+ template <
158
+ typename T,
159
+ typename = std::enable_if_t<std::is_same_v<std::decay_t<T>, Device>>>
160
+ /* implicit */ TensorOptions(T&& device) : TensorOptions() {
161
+ this->set_device(std::forward<T>(device));
162
+ }
163
+
164
+ /// Constructs a `TensorOptions` object from arguments allowed in `Device`
165
+ /// constructors.
166
+ ///
167
+ /// See NOTE [ TensorOptions Constructors ].
168
+ ///
169
+ /// NB: Ideally we only allow implicit constructors here. But there is no easy
170
+ /// way to detect them. So we have this one that allows explicit
171
+ /// constructors too.
172
+ template <
173
+ typename... Args,
174
+ typename = std::enable_if_t<std::is_constructible_v<Device, Args&&...>>>
175
+ /* implicit */ TensorOptions(Args&&... args)
176
+ : TensorOptions(Device(std::forward<Args>(args)...)) {}
177
+
178
+ /// Constructs a `TensorOptions` object with the given dtype.
179
+ /* implicit */ TensorOptions(caffe2::TypeMeta dtype) : TensorOptions() {
180
+ this->set_dtype(dtype);
181
+ }
182
+
183
+ /// legacy constructor to support ScalarType
184
+ /* implicit */ TensorOptions(ScalarType dtype) : TensorOptions() {
185
+ this->set_dtype(dtype);
186
+ }
187
+
188
+ /// Constructs a `TensorOptions` object with the given memory format.
189
+ /* implicit */ TensorOptions(MemoryFormat memory_format) : TensorOptions() {
190
+ set_memory_format(memory_format);
191
+ }
192
+
193
+ /// Return a copy of `TensorOptions` with `device` set to the given one, or
194
+ /// cleared if `device` is `nullopt`.
195
+ C10_NODISCARD TensorOptions
196
+ device(std::optional<Device> device) const noexcept {
197
+ TensorOptions r = *this;
198
+ r.set_device(device);
199
+ return r;
200
+ }
201
+
202
+ /// Return a copy of `TensorOptions` with `device` set to the given one.
203
+ /// (This overload ensures that variadic template std::optional constructor
204
+ /// for Device work correctly.)
205
+ template <typename... Args>
206
+ C10_NODISCARD TensorOptions device(Args&&... args) const noexcept {
207
+ return device(
208
+ std::optional<Device>(std::in_place, std::forward<Args>(args)...));
209
+ }
210
+
211
+ /// Return a copy of `TensorOptions`, but with device set to CUDA, and the
212
+ /// device index set to the given one.
213
+ ///
214
+ /// TODO: This function encourages bad behavior (assuming CUDA is
215
+ /// the only device that matters). Get rid of it / rename it.
216
+ C10_NODISCARD TensorOptions
217
+ device_index(c10::DeviceIndex device_index) const noexcept {
218
+ return device(Device::Type::CUDA, device_index);
219
+ }
220
+
221
+ /// Return a copy of `TensorOptions` with `dtype` set to the given one.
222
+ C10_NODISCARD TensorOptions
223
+ dtype(std::optional<caffe2::TypeMeta> dtype) const noexcept {
224
+ TensorOptions r = *this;
225
+ r.set_dtype(dtype);
226
+ return r;
227
+ }
228
+
229
+ // legacy function to support ScalarType
230
+ C10_NODISCARD TensorOptions
231
+ dtype(std::optional<ScalarType> dtype) const noexcept {
232
+ TensorOptions r = *this;
233
+ r.set_dtype(dtype);
234
+ return r;
235
+ }
236
+
237
+ // Since dtype is taken...
238
+ template <typename T>
239
+ TensorOptions& dtype() {
240
+ dtype_ = caffe2::TypeMeta::Make<T>();
241
+ has_dtype_ = true;
242
+ return *this;
243
+ }
244
+
245
+ /// Sets the layout of the `TensorOptions`.
246
+ C10_NODISCARD TensorOptions
247
+ layout(std::optional<Layout> layout) const noexcept {
248
+ TensorOptions r = *this;
249
+ r.set_layout(layout);
250
+ return r;
251
+ }
252
+
253
+ /// Sets the `requires_grad` property of the `TensorOptions`.
254
+ C10_NODISCARD TensorOptions
255
+ requires_grad(std::optional<bool> requires_grad) const noexcept {
256
+ TensorOptions r = *this;
257
+ r.set_requires_grad(requires_grad);
258
+ return r;
259
+ }
260
+
261
+ /// Sets the `pinned_memory` property on the `TensorOptions`.
262
+ C10_NODISCARD TensorOptions
263
+ pinned_memory(std::optional<bool> pinned_memory) const noexcept {
264
+ TensorOptions r = *this;
265
+ r.set_pinned_memory(pinned_memory);
266
+ return r;
267
+ }
268
+
269
+ /// Sets the `memory_format` property on `TensorOptions`.
270
+ C10_NODISCARD TensorOptions
271
+ memory_format(std::optional<MemoryFormat> memory_format) const noexcept {
272
+ TensorOptions r = *this;
273
+ r.set_memory_format(memory_format);
274
+ return r;
275
+ }
276
+
277
+ /// Returns the device of the `TensorOptions`.
278
+ Device device() const noexcept {
279
+ return device_or_default(device_opt());
280
+ }
281
+
282
+ /// Returns whether the device is specified.
283
+ bool has_device() const noexcept {
284
+ return has_device_;
285
+ }
286
+
287
+ /// Returns the device of the `TensorOptions`, or `std::nullopt` if
288
+ /// device is not specified.
289
+ std::optional<Device> device_opt() const noexcept {
290
+ return has_device_ ? std::make_optional(device_) : std::nullopt;
291
+ }
292
+
293
+ /// Returns the device index of the `TensorOptions`.
294
+ c10::DeviceIndex device_index() const noexcept {
295
+ return device().index();
296
+ }
297
+
298
+ /// Returns the dtype of the `TensorOptions`.
299
+ caffe2::TypeMeta dtype() const noexcept {
300
+ return dtype_or_default(dtype_opt());
301
+ }
302
+
303
+ /// Returns whether the dtype is specified.
304
+ bool has_dtype() const noexcept {
305
+ return has_dtype_;
306
+ }
307
+
308
+ /// Returns the dtype of the `TensorOptions`, or `std::nullopt` if
309
+ /// device is not specified.
310
+ std::optional<caffe2::TypeMeta> dtype_opt() const noexcept {
311
+ return has_dtype_ ? std::make_optional(dtype_) : std::nullopt;
312
+ }
313
+
314
+ /// Returns the layout of the `TensorOptions`.
315
+ Layout layout() const noexcept {
316
+ return layout_or_default(layout_opt());
317
+ }
318
+
319
+ /// Returns whether the layout is specified.
320
+ bool has_layout() const noexcept {
321
+ return has_layout_;
322
+ }
323
+
324
+ /// Returns the layout of the `TensorOptions`, or `std::nullopt` if
325
+ /// layout is not specified.
326
+ std::optional<Layout> layout_opt() const noexcept {
327
+ return has_layout_ ? std::make_optional(layout_) : std::nullopt;
328
+ }
329
+
330
+ /// Returns the `requires_grad` property of the `TensorOptions`.
331
+ bool requires_grad() const noexcept {
332
+ return has_requires_grad_ ? requires_grad_ : false;
333
+ }
334
+
335
+ /// Returns whether the `requires_grad` is specified.
336
+ bool has_requires_grad() const noexcept {
337
+ return has_requires_grad_;
338
+ }
339
+
340
+ /// Returns the `requires_grad` property of the `TensorOptions`, or
341
+ /// `std::nullopt` if `requires_grad` is not specified.
342
+ std::optional<bool> requires_grad_opt() const noexcept {
343
+ return has_requires_grad_ ? std::make_optional(requires_grad_)
344
+ : std::nullopt;
345
+ }
346
+
347
+ /// Returns the `pinned_memory` property of the `TensorOptions`.
348
+ bool pinned_memory() const noexcept {
349
+ return pinned_memory_or_default(pinned_memory_opt());
350
+ }
351
+
352
+ /// Returns whether the `pinned_memory` is specified.
353
+ bool has_pinned_memory() const noexcept {
354
+ return has_pinned_memory_;
355
+ }
356
+
357
+ /// Returns if the layout is sparse
358
+ bool is_sparse() const {
359
+ return layout_ == c10::Layout::Sparse;
360
+ }
361
+
362
+ /// Returns if the layout is sparse CSR, deprecated, use
363
+ /// is_sparse_compressed() instead
364
+ bool is_sparse_csr() const {
365
+ return layout_ == c10::Layout::SparseCsr;
366
+ }
367
+
368
+ bool is_sparse_compressed() const {
369
+ return layout_ == c10::Layout::SparseCsr ||
370
+ layout_ == c10::Layout::SparseCsc ||
371
+ layout_ == c10::Layout::SparseBsr || layout_ == c10::Layout::SparseBsc;
372
+ }
373
+
374
+ // For compatibility with legacy tensor.type() comparisons
375
+ bool type_equal(const TensorOptions& other) const {
376
+ return computeDispatchKey() == other.computeDispatchKey() &&
377
+ typeMetaToScalarType(dtype_) == typeMetaToScalarType(other.dtype());
378
+ }
379
+
380
+ /// Returns the `pinned_memory` property of the `TensorOptions`, or
381
+ /// `std::nullopt` if `pinned_memory` is not specified.
382
+ std::optional<bool> pinned_memory_opt() const noexcept {
383
+ return has_pinned_memory_ ? std::make_optional(pinned_memory_)
384
+ : std::nullopt;
385
+ }
386
+
387
+ /// Returns whether the `memory_layout` is specified
388
+ bool has_memory_format() const noexcept {
389
+ return has_memory_format_;
390
+ }
391
+
392
+ // NB: memory_format() getter is PURPOSELY not defined, as the default
393
+ // behavior of memory_format varies from function to function.
394
+
395
+ /// Returns the `memory_layout` property of `TensorOptions, or
396
+ /// `std::nullopt` if `memory_format` is not specified.
397
+ std::optional<MemoryFormat> memory_format_opt() const noexcept {
398
+ return has_memory_format_ ? std::make_optional(memory_format_)
399
+ : std::nullopt;
400
+ }
401
+
402
+ // Resolves the ATen backend specified by the current construction axes.
403
+ // TODO: Deprecate this
404
+ Backend backend() const {
405
+ return at::dispatchKeyToBackend(computeDispatchKey());
406
+ }
407
+
408
+ /// Return the right-biased merge of two TensorOptions. This has the
409
+ /// effect of overwriting settings from self with specified options
410
+ /// of options.
411
+ ///
412
+ /// NB: This merging operation does NOT respect device merges.
413
+ /// For example, if you device({kCUDA, 1}).merge_in(kCUDA)
414
+ /// you will get kCUDA in the end! Functions like Tensor.new_empty
415
+ /// ensure the right device is selected anyway by way of a
416
+ /// device guard.
417
+ ///
418
+ TensorOptions merge_in(TensorOptions options) const noexcept {
419
+ TensorOptions merged = *this;
420
+ if (options.has_device())
421
+ merged.set_device(options.device_opt());
422
+ if (options.has_dtype())
423
+ merged.set_dtype(options.dtype_opt());
424
+ if (options.has_layout())
425
+ merged.set_layout(options.layout_opt());
426
+ // NB: requires grad is right biased; not a logical AND/OR!
427
+ if (options.has_requires_grad())
428
+ merged.set_requires_grad(options.requires_grad_opt());
429
+ if (options.has_pinned_memory())
430
+ merged.set_pinned_memory(options.pinned_memory_opt());
431
+ if (options.has_memory_format())
432
+ merged.set_memory_format(options.memory_format_opt());
433
+ return merged;
434
+ }
435
+
436
+ // TODO remove after TensorOptions rationalization
437
+ TensorOptions merge_memory_format(
438
+ std::optional<MemoryFormat> optional_memory_format) const noexcept {
439
+ TensorOptions merged = *this;
440
+ if (optional_memory_format.has_value()) {
441
+ merged.set_memory_format(*optional_memory_format);
442
+ }
443
+ return merged;
444
+ }
445
+
446
+ // INVARIANT: computeDispatchKey returns only the subset of dispatch keys for
447
+ // which dispatchKeyToBackend is injective, if it is defined at all (for
448
+ // the most part, this just means that this function never returns an
449
+ // Autograd key)
450
+ DispatchKey computeDispatchKey() const {
451
+ return c10::computeDispatchKey(
452
+ optTypeMetaToScalarType(dtype_opt()), layout_opt(), device_opt());
453
+ }
454
+
455
+ private:
456
+ // These methods are currently private because I'm not sure if it's wise
457
+ // to actually publish them. They are methods because I need them in
458
+ // the constructor and the functional API implementation.
459
+ //
460
+ // If you really, really need it, you can make these public, but check if you
461
+ // couldn't just do what you need with the functional API. Similarly, these
462
+ // methods are not chainable, because if you wanted chaining, you probably
463
+ // want to use the functional API instead. (It's probably OK to make
464
+ // these chainable, because these functions are all explicitly annotated
465
+ // with a ref-qualifier, the trailing &, that makes them illegal to call
466
+ // on temporaries.)
467
+
468
+ /// Mutably set the device of `TensorOptions`.
469
+ void set_device(std::optional<Device> device) & noexcept {
470
+ if (device) {
471
+ device_ = *device;
472
+ has_device_ = true;
473
+ } else {
474
+ has_device_ = false;
475
+ }
476
+ }
477
+
478
+ /// Mutably set the dtype of `TensorOptions`.
479
+ void set_dtype(std::optional<caffe2::TypeMeta> dtype) & noexcept {
480
+ if (dtype) {
481
+ dtype_ = *dtype;
482
+ has_dtype_ = true;
483
+ } else {
484
+ has_dtype_ = false;
485
+ }
486
+ }
487
+
488
+ // legacy function to support ScalarType
489
+ void set_dtype(std::optional<ScalarType> dtype) & noexcept {
490
+ if (dtype) {
491
+ dtype_ = scalarTypeToTypeMeta(*dtype);
492
+ has_dtype_ = true;
493
+ } else {
494
+ has_dtype_ = false;
495
+ }
496
+ }
497
+
498
+ /// Mutably set the layout of `TensorOptions`.
499
+ void set_layout(std::optional<Layout> layout) & noexcept {
500
+ if (layout) {
501
+ layout_ = *layout;
502
+ has_layout_ = true;
503
+ } else {
504
+ has_layout_ = false;
505
+ }
506
+ }
507
+
508
+ /// Mutably set the `requires_grad` property of `TensorOptions`.
509
+ void set_requires_grad(std::optional<bool> requires_grad) & noexcept {
510
+ if (requires_grad) {
511
+ requires_grad_ = *requires_grad;
512
+ has_requires_grad_ = true;
513
+ } else {
514
+ has_requires_grad_ = false;
515
+ }
516
+ }
517
+
518
+ /// Mutably set the `pinned_memory` property of `TensorOptions`.
519
+ void set_pinned_memory(std::optional<bool> pinned_memory) & noexcept {
520
+ if (pinned_memory) {
521
+ pinned_memory_ = *pinned_memory;
522
+ has_pinned_memory_ = true;
523
+ } else {
524
+ has_pinned_memory_ = false;
525
+ }
526
+ }
527
+
528
+ /// Mutably set the `memory_Format` property of `TensorOptions`.
529
+ void set_memory_format(std::optional<MemoryFormat> memory_format) & noexcept {
530
+ if (memory_format) {
531
+ memory_format_ = *memory_format;
532
+ has_memory_format_ = true;
533
+ } else {
534
+ has_memory_format_ = false;
535
+ }
536
+ }
537
+
538
+ // WARNING: If you edit TensorOptions to add more options, you
539
+ // may need to adjust the implementation of Tensor::options.
540
+ // The criteria for whether or not Tensor::options must be adjusted
541
+ // is whether or not the new option you added should preserved
542
+ // by functions such as empty_like(); if it should be preserved,
543
+ // you must adjust options().
544
+ //
545
+ // TODO: MemoryFormat is not implemented in this way
546
+
547
+ // NB: We didn't use std::optional here, because then we can't pack
548
+ // the has_***_ boolean fields.
549
+
550
+ Device device_ = at::kCPU; // 16-bit
551
+ caffe2::TypeMeta dtype_ = caffe2::TypeMeta::Make<float>(); // 16-bit
552
+ Layout layout_ = at::kStrided; // 8-bit
553
+ MemoryFormat memory_format_ = MemoryFormat::Contiguous; // 8-bit
554
+
555
+ // Bitmask required here to get this to fit inside 32 bits (or even 64 bits,
556
+ // for that matter)
557
+
558
+ bool requires_grad_ : 1;
559
+ bool pinned_memory_ : 1;
560
+
561
+ bool has_device_ : 1;
562
+ bool has_dtype_ : 1;
563
+ bool has_layout_ : 1;
564
+ bool has_requires_grad_ : 1;
565
+ bool has_pinned_memory_ : 1;
566
+ bool has_memory_format_ : 1;
567
+ };
568
+
569
+ // We should aspire to fit in one machine-size word; but a size greater than two
570
+ // words is too much. (We are doing terribly on 32-bit archs, where we require
571
+ // three machine size words to store tensor options. Eek!)
572
+ static_assert(
573
+ sizeof(TensorOptions) <= sizeof(int64_t) * 2,
574
+ "TensorOptions must fit in 128-bits");
575
+
576
+ /// Convenience function that returns a `TensorOptions` object with the `dtype`
577
+ /// set to the given one.
578
+ inline TensorOptions dtype(caffe2::TypeMeta dtype) {
579
+ return TensorOptions().dtype(dtype);
580
+ }
581
+
582
+ // legacy function to support ScalarType
583
+ inline TensorOptions dtype(ScalarType dtype) {
584
+ return TensorOptions().dtype(scalarTypeToTypeMeta(dtype));
585
+ }
586
+
587
+ /// Convenience function that returns a `TensorOptions` object with the `layout`
588
+ /// set to the given one.
589
+ inline TensorOptions layout(Layout layout) {
590
+ return TensorOptions().layout(layout);
591
+ }
592
+
593
+ /// Convenience function that returns a `TensorOptions` object with the `device`
594
+ /// set to the given one.
595
+ inline TensorOptions device(Device device) {
596
+ return TensorOptions().device(device);
597
+ }
598
+
599
+ /// Convenience function that returns a `TensorOptions` object with the
600
+ /// `device` set to CUDA and the `device_index` set to the given one.
601
+ inline TensorOptions device_index(c10::DeviceIndex device_index) {
602
+ return TensorOptions().device_index(device_index);
603
+ }
604
+
605
+ /// Convenience function that returns a `TensorOptions` object with the
606
+ /// `requires_grad` set to the given one.
607
+ inline TensorOptions requires_grad(bool requires_grad = true) {
608
+ return TensorOptions().requires_grad(requires_grad);
609
+ }
610
+
611
+ /// Convenience function that returns a `TensorOptions` object with the
612
+ /// `memory_format` set to the given one.
613
+ inline TensorOptions memory_format(MemoryFormat memory_format) {
614
+ return TensorOptions().memory_format(memory_format);
615
+ }
616
+
617
+ C10_API std::ostream& operator<<(
618
+ std::ostream& stream,
619
+ const TensorOptions& options);
620
+
621
+ template <typename T>
622
+ inline TensorOptions dtype() {
623
+ return dtype(caffe2::TypeMeta::Make<T>());
624
+ }
625
+
626
+ inline std::string toString(const TensorOptions& options) {
627
+ std::ostringstream stream;
628
+ stream << options;
629
+ return stream.str();
630
+ }
631
+
632
+ // This is intended to be a centralized location by which we can determine
633
+ // what an appropriate DispatchKey for a tensor is.
634
+ inline DispatchKey computeDispatchKey(
635
+ std::optional<ScalarType> dtype,
636
+ std::optional<Layout> layout,
637
+ std::optional<Device> device) {
638
+ const auto layout_ = layout_or_default(layout);
639
+ const auto device_ = device_or_default(device);
640
+ switch (layout_) {
641
+ case Layout::Jagged:
642
+ case Layout::Strided: {
643
+ const auto dtype_ = dtype_or_default(dtype);
644
+ switch (device_.type()) {
645
+ #define DO_CASE(device, _) \
646
+ case c10::DeviceType::device: { \
647
+ if (isQIntType(dtype_)) { \
648
+ return DispatchKey::Quantized##device; \
649
+ } \
650
+ return DispatchKey::device; \
651
+ }
652
+ C10_FORALL_BACKEND_DEVICE_TYPES(DO_CASE, unused)
653
+ #undef DO_CASE
654
+ case c10::DeviceType::FPGA:
655
+ return DispatchKey::FPGA;
656
+ case c10::DeviceType::MAIA:
657
+ return DispatchKey::MAIA;
658
+ case c10::DeviceType::Vulkan:
659
+ return DispatchKey::Vulkan;
660
+ case c10::DeviceType::Metal:
661
+ return DispatchKey::Metal;
662
+ case c10::DeviceType::MKLDNN:
663
+ case c10::DeviceType::OPENGL:
664
+ case c10::DeviceType::OPENCL:
665
+ case c10::DeviceType::IDEEP:
666
+ TORCH_INTERNAL_ASSERT(
667
+ 0,
668
+ "This is a grandfathered Caffe2 device type ",
669
+ device_.type(),
670
+ ", it shouldn't ever convert to a DispatchKey. File a bug describing what you were doing if you think this is in error.");
671
+ default:
672
+ TORCH_CHECK_NOT_IMPLEMENTED(
673
+ false,
674
+ "Unsupported device type for dense layout: ",
675
+ device_.type());
676
+ }
677
+ }
678
+ case Layout::Sparse:
679
+ switch (device_.type()) {
680
+ #define DO_CASE(device, _) \
681
+ case c10::DeviceType::device: { \
682
+ return DispatchKey::Sparse##device; \
683
+ }
684
+ C10_FORALL_BACKEND_DEVICE_TYPES(DO_CASE, unused)
685
+ #undef DO_CASE
686
+ default:
687
+ TORCH_CHECK_NOT_IMPLEMENTED(
688
+ false,
689
+ "Unsupported device type for sparse layout: ",
690
+ device_.type());
691
+ }
692
+ case Layout::Mkldnn:
693
+ switch (device_.type()) {
694
+ case c10::DeviceType::CPU:
695
+ return DispatchKey::MkldnnCPU;
696
+ default:
697
+ TORCH_CHECK_NOT_IMPLEMENTED(
698
+ false,
699
+ "Unsupported device type for mkldnn layout: ",
700
+ device_.type());
701
+ }
702
+ case Layout::SparseCsr:
703
+ case Layout::SparseCsc:
704
+ case Layout::SparseBsr:
705
+ case Layout::SparseBsc:
706
+ switch (device_.type()) {
707
+ #define DO_CASE(device, _) \
708
+ case c10::DeviceType::device: { \
709
+ return DispatchKey::SparseCsr##device; \
710
+ }
711
+ C10_FORALL_BACKEND_DEVICE_TYPES(DO_CASE, unused)
712
+ #undef DO_CASE
713
+ default:
714
+ TORCH_CHECK_NOT_IMPLEMENTED(
715
+ false,
716
+ "Unsupported device type for ",
717
+ layout_,
718
+ " layout: ",
719
+ device_.type());
720
+ }
721
+ default:
722
+ TORCH_CHECK(false, "Unsupported layout: ", layout_);
723
+ }
724
+ }
725
+
726
+ inline Layout dispatchKeyToLayout(DispatchKey dispatch_key) {
727
+ switch (dispatch_key) {
728
+ #define DO_CASE(bc, _) case DispatchKey::Sparse##bc:
729
+ C10_FORALL_BACKEND_COMPONENTS(DO_CASE, unused)
730
+ #undef DO_CASE
731
+ return Layout::Sparse;
732
+ #define DO_CASE(bc, _) case DispatchKey::SparseCsr##bc:
733
+ C10_FORALL_BACKEND_COMPONENTS(DO_CASE, unused)
734
+ #undef DO_CASE
735
+ TORCH_CHECK(
736
+ false, "Cannot map DispatchKey ", dispatch_key, " to a unique layout.");
737
+ case DispatchKey::MkldnnCPU:
738
+ return Layout::Mkldnn;
739
+ default:
740
+ return Layout::Strided;
741
+ }
742
+ }
743
+
744
+ inline c10::DeviceType dispatchKeyToDeviceType(DispatchKey dispatch_key) {
745
+ switch (dispatch_key) {
746
+ // stuff that's real
747
+ #define DO_CASE(suffix, prefix) \
748
+ case DispatchKey::prefix##suffix: \
749
+ return c10::DeviceType::suffix;
750
+ #define DO_CASES(_, prefix) C10_FORALL_BACKEND_DEVICE_TYPES(DO_CASE, prefix)
751
+ C10_FORALL_FUNCTIONALITY_KEYS(DO_CASES)
752
+ #undef DO_CASES
753
+ #undef DO_CASE
754
+
755
+ case DispatchKey::MkldnnCPU:
756
+ return c10::DeviceType::CPU;
757
+ case DispatchKey::Vulkan:
758
+ return c10::DeviceType::Vulkan;
759
+
760
+ case DispatchKey::MAIA:
761
+ return c10::DeviceType::MAIA;
762
+ default:
763
+ TORCH_CHECK(
764
+ false,
765
+ "DispatchKey ",
766
+ dispatch_key,
767
+ " doesn't correspond to a device");
768
+ }
769
+ }
770
+
771
+ inline TensorOptions dispatchKeyToTensorOptions(DispatchKey dispatch_key) {
772
+ return TensorOptions()
773
+ .layout(dispatchKeyToLayout(dispatch_key))
774
+ .device(dispatchKeyToDeviceType(dispatch_key));
775
+ }
776
+
777
+ namespace detail {
778
+ inline bool backend_supports_empty_operator(const TensorOptions& options) {
779
+ // Quantized backends don't support at::empty().
780
+ // They have separate operators like at::empty_quantized() that take in
781
+ // extra information about how to quantize the tensor.
782
+ return !isQIntType(typeMetaToScalarType(options.dtype()));
783
+ }
784
+
785
+ } // namespace detail
786
+
787
+ } // namespace c10
videochat2/lib/python3.10/site-packages/torch/include/c10/core/UndefinedTensorImpl.h ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/MemoryFormat.h>
4
+ #include <c10/core/SymIntArrayRef.h>
5
+ #include <c10/core/TensorImpl.h>
6
+ #include <c10/macros/Export.h>
7
+ #include <c10/util/ArrayRef.h>
8
+ #include <cstdint>
9
+
10
+ namespace c10 {
11
+
12
+ struct C10_API UndefinedTensorImpl final : public TensorImpl {
13
+ public:
14
+ // Without this, we get:
15
+ // error: identifier "at::UndefinedTensorImpl::_singleton" is undefined in
16
+ // device code
17
+ // (ostensibly because the constexpr tricks MSVC into trying to compile this
18
+ // function for device as well).
19
+ #ifdef _WIN32
20
+ static inline TensorImpl* singleton() {
21
+ return &getInstance();
22
+ }
23
+ #else
24
+ static constexpr inline TensorImpl* singleton() {
25
+ return &_singleton;
26
+ }
27
+ #endif
28
+
29
+ #ifdef DEBUG
30
+ bool has_storage() const override;
31
+ #endif
32
+ void set_storage_offset(int64_t offset) override;
33
+
34
+ protected:
35
+ bool is_contiguous_custom(MemoryFormat format) const override;
36
+ IntArrayRef strides_custom() const override;
37
+ SymIntArrayRef sym_strides_custom() const override;
38
+
39
+ private:
40
+ UndefinedTensorImpl();
41
+ #ifdef _WIN32
42
+ static UndefinedTensorImpl& getInstance();
43
+ #else
44
+ static UndefinedTensorImpl _singleton;
45
+ #endif
46
+ const char* tensorimpl_type_name() const override;
47
+ };
48
+
49
+ } // namespace c10
videochat2/lib/python3.10/site-packages/torch/include/c10/core/alignment.h ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <cstddef>
4
+
5
+ namespace c10 {
6
+
7
+ #ifdef C10_MOBILE
8
+ // Use 16-byte alignment on mobile
9
+ // - ARM NEON AArch32 and AArch64
10
+ // - x86[-64] < AVX
11
+ constexpr size_t gAlignment = 16;
12
+ #else
13
+ // Use 64-byte alignment should be enough for computation up to AVX512.
14
+ constexpr size_t gAlignment = 64;
15
+ #endif
16
+
17
+ constexpr size_t gPagesize = 4096;
18
+ // since the default thp pagesize is 2MB, enable thp only
19
+ // for buffers of size 2MB or larger to avoid memory bloating
20
+ constexpr size_t gAlloc_threshold_thp = static_cast<size_t>(2) * 1024 * 1024;
21
+ } // namespace c10
videochat2/lib/python3.10/site-packages/torch/include/c10/core/impl/COWDeleter.h ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/macros/Export.h>
4
+ #include <c10/util/UniqueVoidPtr.h>
5
+
6
+ #include <atomic>
7
+ #include <cstdint>
8
+ #include <memory>
9
+ #include <shared_mutex>
10
+ #include <variant>
11
+
12
+ namespace c10::impl::cow {
13
+
14
+ // A COWDeleterContext object is used as the `ctx` argument for DataPtr
15
+ // to implement a Copy-on-write (COW) DataPtr.
16
+ class C10_API COWDeleterContext {
17
+ public:
18
+ // Creates an instance, holding the pair of data and original
19
+ // deleter.
20
+ //
21
+ // Note that the deleter will only be called in our destructor if
22
+ // the last reference to this goes away without getting
23
+ // materialized.
24
+ explicit COWDeleterContext(std::unique_ptr<void, DeleterFnPtr> data);
25
+
26
+ // Increments the current refcount.
27
+ void increment_refcount();
28
+
29
+ // See README.md in this directory to understand the locking
30
+ // strategy.
31
+
32
+ // Represents a reference to the context.
33
+ //
34
+ // This is returned by decrement_refcount to allow the caller to
35
+ // copy the data under the shared lock.
36
+ using NotLastReference = std::shared_lock<std::shared_mutex>;
37
+
38
+ // Represents the last reference to the context.
39
+ //
40
+ // This will be returned by decrement_refcount when it is the last
41
+ // reference remaining and after any pending copies have completed.
42
+ using LastReference = std::unique_ptr<void, DeleterFnPtr>;
43
+
44
+ // Decrements the refcount, returning a handle indicating what to
45
+ // do with it.
46
+ std::variant<NotLastReference, LastReference> decrement_refcount();
47
+
48
+ private:
49
+ // The destructor is hidden, this should only ever be used within
50
+ // UniqueVoidPtr using cow::delete_context as the deleter.
51
+ ~COWDeleterContext();
52
+
53
+ std::shared_mutex mutex_;
54
+ std::unique_ptr<void, DeleterFnPtr> data_;
55
+ std::atomic<std::int64_t> refcount_ = 1;
56
+ };
57
+
58
+ // `cow_deleter` is used as the `ctx_deleter` for DataPtr to implement a COW
59
+ // DataPtr.
60
+ //
61
+ // Warning: This should only be called on a pointer to a COWDeleterContext that
62
+ // was allocated on the heap with `new`, because when the refcount reaches 0,
63
+ // the context is deleted with `delete`.
64
+ C10_API void cow_deleter(void* ctx);
65
+
66
+ } // namespace c10::impl::cow
videochat2/lib/python3.10/site-packages/torch/include/c10/core/impl/DeviceGuardImplInterface.h ADDED
@@ -0,0 +1,365 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/Device.h>
4
+ #include <c10/core/DeviceType.h>
5
+ #include <c10/core/Stream.h>
6
+ #include <c10/util/Exception.h>
7
+
8
+ // Just for C10_ANONYMOUS_VARIABLE
9
+ #include <c10/util/Registry.h>
10
+
11
+ #include <atomic>
12
+
13
+ namespace c10 {
14
+
15
+ // Forward declaration
16
+ class DataPtr;
17
+
18
+ /**
19
+ * Note [Flags defining the behavior of events]
20
+ *
21
+ * PYTORCH_DEFAULT and BACKEND_DEFAULT are valid for all backends. The
22
+ * BACKEND_DEFAULT is what a particular backend would select if no
23
+ * flags were given. PYTORCH_DEFAULT is the PyTorch's framework default
24
+ * choice for events on that backend, which may not be the same.
25
+ *
26
+ * The mapping of PYTORCH_DEFAULT and BACKEND_DEFAULT is done by each
27
+ * backend implementation.
28
+ */
29
+ enum class EventFlag {
30
+ // Disable timing
31
+ PYTORCH_DEFAULT,
32
+ // Enable timing
33
+ BACKEND_DEFAULT,
34
+ // FOR TESTING ONLY
35
+ INVALID
36
+ };
37
+
38
+ namespace impl {
39
+
40
+ /**
41
+ * DeviceGuardImplInterface represents the virtual interface which provides
42
+ * functionality to provide an RAII class for device and stream switching,
43
+ * via DeviceGuard. Every distinct device type, e.g., CUDA and HIP, is
44
+ * expected to implement and register an implementation of this interface.
45
+ * All classes which inherit from DeviceGuardImplInterface should be declared
46
+ * 'final'.
47
+ *
48
+ * This class exists because we provide a unified interface for performing
49
+ * device guards via DeviceGuard, but we cannot assume that we have actually
50
+ * compiled against the, e.g., CUDA library, which actually implements
51
+ * this guard functionality. In this case, a dynamic dispatch is required
52
+ * to cross the library boundary.
53
+ *
54
+ * If possible, you should directly use implementations of this interface;
55
+ * those uses will be devirtualized.
56
+ */
57
+ struct C10_API DeviceGuardImplInterface {
58
+ DeviceGuardImplInterface() = default;
59
+ DeviceGuardImplInterface(const DeviceGuardImplInterface&) = default;
60
+ DeviceGuardImplInterface& operator=(const DeviceGuardImplInterface&) =
61
+ default;
62
+ DeviceGuardImplInterface(DeviceGuardImplInterface&&) noexcept = default;
63
+ DeviceGuardImplInterface& operator=(DeviceGuardImplInterface&&) noexcept =
64
+ default;
65
+
66
+ /**
67
+ * Return the type of device managed by this guard implementation.
68
+ */
69
+ virtual DeviceType type() const = 0;
70
+
71
+ /**
72
+ * Set the current device to Device, and return the previous Device.
73
+ */
74
+ virtual Device exchangeDevice(Device) const = 0;
75
+ // NB: Implementations of exchangeDevice can be a bit boilerplatey. You might
76
+ // consider replacing exchangeDevice with a non-virtual function with a baked
77
+ // in implementation; however, note that this will triple the number of
78
+ // virtual calls (when you implement exchangeDevice in a final subclass,
79
+ // the compiler gets to devirtualize everything; it won't do that if you don't
80
+ // define it in the subclass!) A common way to solve this problem is to use
81
+ // some sort of CRTP; however, we can template DeviceGuardImplInterface since
82
+ // we really *do* need it to be virtual. A little boilerplate seems easiest
83
+ // to explain. (Another way around this problem is to provide inline
84
+ // functions that provide the default implementations, but this seems a little
85
+ // hard to explain. In any case, we're only going to have on order of ten
86
+ // implementations of this anyway.)
87
+
88
+ /**
89
+ * Get the current device.
90
+ */
91
+ virtual Device getDevice() const = 0;
92
+
93
+ /**
94
+ * Set the current device to Device.
95
+ */
96
+ virtual void setDevice(Device) const = 0;
97
+
98
+ /**
99
+ * Set the current device to Device, without checking for errors
100
+ * (so, e.g., this can be called from a destructor).
101
+ */
102
+ virtual void uncheckedSetDevice(Device) const noexcept = 0;
103
+
104
+ /**
105
+ * Get the current stream for a given device.
106
+ */
107
+ virtual Stream getStream(Device) const noexcept = 0;
108
+
109
+ /**
110
+ * Get the default stream for a given device.
111
+ */
112
+ virtual Stream getDefaultStream(Device) const {
113
+ TORCH_CHECK(false, "Backend doesn't support acquiring a default stream.")
114
+ }
115
+
116
+ /**
117
+ * Get a stream from the global pool for a given device.
118
+ */
119
+ virtual Stream getStreamFromGlobalPool(Device, bool isHighPriority = false)
120
+ const {
121
+ (void)isHighPriority; // Suppress unused variable warning
122
+ TORCH_CHECK(false, "Backend doesn't support acquiring a stream from pool.")
123
+ }
124
+
125
+ /**
126
+ * Return a new stream for a given device and priority. The stream will be
127
+ * copied and shared around, device backend should be able to correctly handle
128
+ * the lifetime of the stream.
129
+ */
130
+ virtual Stream getNewStream(Device, int priority = 0) const {
131
+ (void)priority;
132
+ TORCH_CHECK(false, "Backend doesn't support create a new Stream.")
133
+ }
134
+
135
+ /**
136
+ * Set a stream to be the thread local current stream for its device.
137
+ * Return the previous stream for that device. You are NOT required
138
+ * to set the current device to match the device of this stream.
139
+ */
140
+ virtual Stream exchangeStream(Stream) const noexcept = 0;
141
+
142
+ /**
143
+ * Destroys the given event.
144
+ */
145
+ virtual void destroyEvent(void* /*event*/, const DeviceIndex /*device_index*/)
146
+ const noexcept {}
147
+
148
+ /**
149
+ * Increments the event's version and enqueues a job with this version
150
+ * in the stream's work queue. When the stream process that job
151
+ * it notifies all streams waiting on / blocked by that version of the
152
+ * event to continue and marks that version as recorded.
153
+ * */
154
+ virtual void record(
155
+ void** /*event*/,
156
+ const Stream& /*stream*/,
157
+ const DeviceIndex /*device_index*/,
158
+ const c10::EventFlag /*flag*/) const {
159
+ TORCH_CHECK(false, "Backend doesn't support events.");
160
+ }
161
+
162
+ /**
163
+ * Does nothing if the event has not been scheduled to be recorded.
164
+ * If the event was previously enqueued to be recorded, a command
165
+ * to wait for the version of the event that exists at the time of this call
166
+ * is inserted in the stream's work queue.
167
+ * When the stream reaches this command it will stop processing
168
+ * additional commands until that version of the event is marked as recorded.
169
+ */
170
+ virtual void block(void* /*event*/, const Stream& /*stream*/) const {
171
+ TORCH_CHECK(false, "Backend doesn't support events.");
172
+ }
173
+
174
+ /**
175
+ * Returns true if (and only if)
176
+ * (1) the event has never been scheduled to be recorded
177
+ * (2) the current version is marked as recorded.
178
+ * Returns false otherwise.
179
+ */
180
+ virtual bool queryEvent(void* /*event*/) const {
181
+ TORCH_CHECK(false, "Backend doesn't support events.");
182
+ }
183
+
184
+ /**
185
+ * Get the number of devices. WARNING: This is REQUIRED to not raise
186
+ * an exception. If there is some sort of problem, e.g., driver error,
187
+ * you should report that there are zero available devices.
188
+ */
189
+ virtual DeviceIndex deviceCount() const noexcept = 0;
190
+
191
+ /**
192
+ * Return true if all the work previously enqueued on the stream for
193
+ * asynchronous execution has completed running on the device.
194
+ */
195
+ virtual bool queryStream(const Stream& /*stream*/) const {
196
+ TORCH_CHECK(false, "Backend doesn't support querying streams.");
197
+ }
198
+
199
+ /**
200
+ * Wait (by blocking the calling thread) until all the work previously
201
+ * enqueued on the stream has completed running on the device.
202
+ */
203
+ virtual void synchronizeStream(const Stream& /*stream*/) const {
204
+ TORCH_CHECK(false, "Backend doesn't support synchronizing streams.");
205
+ }
206
+
207
+ /**
208
+ * Wait (by blocking the calling thread) until all the work previously
209
+ * recorded on the event has completed running on the device.
210
+ */
211
+ virtual void synchronizeEvent(void* /*event*/) const {
212
+ TORCH_CHECK(false, "Backend doesn't support synchronizing events.");
213
+ }
214
+
215
+ /**
216
+ * Ensure the caching allocator (if any) is aware that the given DataPtr is
217
+ * being used on the given stream, and that it should thus avoid recycling the
218
+ * DataPtr until all work on that stream is done.
219
+ */
220
+ virtual void recordDataPtrOnStream(const c10::DataPtr&, const Stream&) const {
221
+ }
222
+
223
+ /**
224
+ * Fetch the elapsed time between two recorded events.
225
+ */
226
+ virtual double elapsedTime(
227
+ void* /*event1*/,
228
+ void* /*event2*/,
229
+ const DeviceIndex /*device_index*/) const {
230
+ TORCH_CHECK(false, "Backend doesn't support elapsedTime.");
231
+ }
232
+
233
+ /**
234
+ * Intended use of this class is to leak the DeviceGuardImpl at program end.
235
+ * So you better not call the destructor, buster!
236
+ */
237
+ virtual ~DeviceGuardImplInterface() = default;
238
+ };
239
+
240
+ // A no-op device guard impl that doesn't do anything interesting. Useful
241
+ // for devices that don't actually have a concept of device index. Prominent
242
+ // examples are CPU and Meta.
243
+ template <DeviceType D>
244
+ struct NoOpDeviceGuardImpl final : public DeviceGuardImplInterface {
245
+ NoOpDeviceGuardImpl() = default;
246
+ DeviceType type() const override {
247
+ return D;
248
+ }
249
+ Device exchangeDevice(Device) const override {
250
+ return Device(D, -1); // no-op
251
+ }
252
+ Device getDevice() const override {
253
+ return Device(D, -1);
254
+ }
255
+ void setDevice(Device) const override {
256
+ // no-op
257
+ }
258
+ void uncheckedSetDevice(Device) const noexcept override {
259
+ // no-op
260
+ }
261
+ Stream getStream(Device) const noexcept override {
262
+ // no-op
263
+ return Stream(Stream::DEFAULT, Device(D, -1));
264
+ }
265
+
266
+ Stream getNewStream(Device, int priority = 0) const override {
267
+ // no-op
268
+ (void)priority;
269
+ return Stream(Stream::DEFAULT, Device(D, -1));
270
+ }
271
+
272
+ // NB: These do NOT set the current device
273
+ Stream exchangeStream(Stream) const noexcept override {
274
+ // no-op
275
+ return Stream(Stream::DEFAULT, Device(D, -1));
276
+ }
277
+ DeviceIndex deviceCount() const noexcept override {
278
+ return 1;
279
+ }
280
+
281
+ // Event-related functions
282
+ void record(
283
+ void** /*event*/,
284
+ const Stream& /*stream*/,
285
+ const DeviceIndex /*device_index*/,
286
+ const EventFlag /*flag*/) const override {
287
+ TORCH_CHECK(false, D, " backend doesn't support events.");
288
+ }
289
+ void block(void* /*event*/, const Stream& /*stream*/) const override {
290
+ TORCH_CHECK(false, D, " backend doesn't support events.")
291
+ }
292
+ bool queryEvent(void* /*event*/) const override {
293
+ TORCH_CHECK(false, D, " backend doesn't support events.")
294
+ }
295
+ void destroyEvent(void* /*event*/, const DeviceIndex /*device_index*/)
296
+ const noexcept override {}
297
+
298
+ // Stream-related functions
299
+ bool queryStream(const Stream& /*stream*/) const override {
300
+ return true;
301
+ }
302
+ void synchronizeStream(const Stream& /*stream*/) const override {
303
+ // Don't wait for anything.
304
+ }
305
+ };
306
+
307
+ // The registry is NON-owning. Each stored pointer is std::atomic so
308
+ // that under all interleavings of registry calls the structure is
309
+ // race-free. This doesn't cost us anything on reads in X86. (An
310
+ // unsynchronized implementation probably is OK too, but I didn't want
311
+ // to prove that we never read from device_guard_impl_registry at the
312
+ // same time some registration is occurring. Shiver.)
313
+ //
314
+ // I'd like this registry to be valid even at program destruction time
315
+ // (in case someone uses a DeviceGuard in a destructor to do some cleanup
316
+ // in the CUDA API.) Since there are no direct accesses of the underlying
317
+ // owning objects which I can use to enforce initialization order (unlike
318
+ // in a Meyer singleton), it implies that you must *leak* objects when
319
+ // putting them in the registry. This is done by deleting the destructor
320
+ // on DeviceGuardImplInterface.
321
+ // NOLINTNEXTLINE(*c-arrays*)
322
+ extern C10_API std::atomic<const DeviceGuardImplInterface*>
323
+ device_guard_impl_registry[static_cast<size_t>(
324
+ DeviceType::COMPILE_TIME_MAX_DEVICE_TYPES)];
325
+
326
+ // I can't conveniently use c10/util/Registry.h for the following reason:
327
+ // c10/util/Registry.h gives me a slow way of Create'ing a object of some
328
+ // interface from the registry, but no way of quickly accessing an already
329
+ // created object. I'll be banging on getDeviceGuardImpl every time we do a
330
+ // DeviceGuard, so I really don't want to be doing an unordered_map lookup.
331
+ // Better if the registration mechanism directly drops its implementation
332
+ // into device_guard_impl_registry.
333
+
334
+ class C10_API DeviceGuardImplRegistrar {
335
+ public:
336
+ DeviceGuardImplRegistrar(DeviceType, const DeviceGuardImplInterface*);
337
+ };
338
+
339
+ #define C10_REGISTER_GUARD_IMPL(DevType, DeviceGuardImpl) \
340
+ static ::c10::impl::DeviceGuardImplRegistrar C10_ANONYMOUS_VARIABLE( \
341
+ g_##DeviceType)(::c10::DeviceType::DevType, new DeviceGuardImpl());
342
+
343
+ inline const DeviceGuardImplInterface* getDeviceGuardImpl(DeviceType type) {
344
+ // Two adjacent int16_t fields DeviceType and DeviceIndex has field access
345
+ // miscompiled on NVCC. To workaround this issue, we apply a mask to the
346
+ // DeviceType. First check if the DeviceType is 16-bit.
347
+ // FB employees can see
348
+ // https://fb.workplace.com/groups/llvm.gcc/permalink/4053565044692080/
349
+ // for more details
350
+ static_assert(sizeof(DeviceType) == 1, "DeviceType is not 8-bit");
351
+ auto p = device_guard_impl_registry[static_cast<size_t>(type) & 0xFF].load();
352
+
353
+ // This seems to be the first place where you make use of a device
354
+ // when you pass devices to factory functions. Give a nicer error
355
+ // message in this case.
356
+ TORCH_CHECK(p, "PyTorch is not linked with support for ", type, " devices");
357
+ return p;
358
+ }
359
+
360
+ inline bool hasDeviceGuardImpl(DeviceType type) {
361
+ return device_guard_impl_registry[static_cast<size_t>(type)].load();
362
+ }
363
+
364
+ } // namespace impl
365
+ } // namespace c10
videochat2/lib/python3.10/site-packages/torch/include/c10/core/impl/FakeGuardImpl.h ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/impl/DeviceGuardImplInterface.h>
4
+
5
+ #include <array>
6
+
7
+ namespace c10::impl {
8
+
9
+ // FakeGuardImpl is hardcoded to have eight devices. Not for
10
+ // any good reason, just to simplify code.
11
+ constexpr DeviceIndex kFakeGuardImplMaxDevices = 8;
12
+
13
+ /**
14
+ * A fake implementation of DeviceGuardImplInterface suitable for testing.
15
+ * The current device is modeled as a mutable field in the guard implementation
16
+ * class. See DeviceGuard_test.cpp for an example use.
17
+ */
18
+ template <DeviceType T>
19
+ struct FakeGuardImpl final : public DeviceGuardImplInterface {
20
+ static constexpr DeviceType static_type = T;
21
+ // Runtime device type is not used
22
+ FakeGuardImpl(DeviceType) {}
23
+ FakeGuardImpl() = default;
24
+ DeviceType type() const override {
25
+ return T;
26
+ }
27
+ Device exchangeDevice(Device d) const override {
28
+ AT_ASSERT(d.type() == type());
29
+ AT_ASSERT(d.index() < kFakeGuardImplMaxDevices);
30
+ Device old_device = getDevice();
31
+ if (old_device.index() != d.index()) {
32
+ current_device_ = d.index();
33
+ }
34
+ return old_device;
35
+ }
36
+ Device getDevice() const override {
37
+ return Device(type(), current_device_);
38
+ }
39
+ void setDevice(Device d) const override {
40
+ AT_ASSERT(d.type() == type());
41
+ AT_ASSERT(d.index() >= 0);
42
+ AT_ASSERT(d.index() < kFakeGuardImplMaxDevices);
43
+ current_device_ = d.index();
44
+ }
45
+ void uncheckedSetDevice(Device d) const noexcept override {
46
+ current_device_ = d.index();
47
+ }
48
+ Stream getStream(Device d) const noexcept override {
49
+ return Stream(Stream::UNSAFE, d, current_streams_[d.index()]);
50
+ }
51
+ Stream exchangeStream(Stream s) const noexcept override {
52
+ auto old_id = current_streams_[s.device_index()];
53
+ current_streams_[s.device_index()] = s.id();
54
+ return Stream(Stream::UNSAFE, s.device(), old_id);
55
+ }
56
+ DeviceIndex deviceCount() const noexcept override {
57
+ return kFakeGuardImplMaxDevices;
58
+ }
59
+
60
+ // Event-related functions
61
+ void record(
62
+ void** event,
63
+ const Stream& stream,
64
+ const DeviceIndex device_index,
65
+ const EventFlag flag) const override {}
66
+ void block(void* event, const Stream& stream) const override {}
67
+ bool queryEvent(void* event) const override {
68
+ return true;
69
+ }
70
+ void destroyEvent(void* event, const DeviceIndex device_index)
71
+ const noexcept override {}
72
+
73
+ // Convenience methods for testing
74
+ static DeviceIndex getDeviceIndex() {
75
+ return current_device_;
76
+ }
77
+ static void setDeviceIndex(DeviceIndex i) {
78
+ AT_ASSERT(i >= 0);
79
+ AT_ASSERT(i < kFakeGuardImplMaxDevices);
80
+ current_device_ = i;
81
+ }
82
+ static StreamId getCurrentStreamIdFor(DeviceIndex i) {
83
+ return current_streams_.at(i);
84
+ }
85
+ static void resetStreams() {
86
+ current_streams_.fill(0);
87
+ }
88
+
89
+ private:
90
+ thread_local static DeviceIndex current_device_;
91
+ thread_local static std::array<StreamId, kFakeGuardImplMaxDevices>
92
+ current_streams_;
93
+ };
94
+
95
+ template <DeviceType T>
96
+ thread_local DeviceIndex FakeGuardImpl<T>::current_device_ = 0;
97
+
98
+ template <DeviceType T>
99
+ thread_local std::array<StreamId, kFakeGuardImplMaxDevices>
100
+ FakeGuardImpl<T>::current_streams_ = {0, 0, 0, 0, 0, 0, 0, 0};
101
+
102
+ } // namespace c10::impl
videochat2/lib/python3.10/site-packages/torch/include/c10/core/impl/InlineDeviceGuard.h ADDED
@@ -0,0 +1,429 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // This file provides implementations of InlineDeviceGuard and
4
+ // InlineOptionalDeviceGuard.
5
+
6
+ #include <c10/core/Device.h>
7
+ #include <c10/core/DeviceType.h>
8
+ #include <c10/core/impl/DeviceGuardImplInterface.h>
9
+ #include <c10/core/impl/VirtualGuardImpl.h>
10
+ #include <c10/util/Exception.h>
11
+ #include <c10/util/Optional.h>
12
+ #include <type_traits>
13
+ #include <utility>
14
+
15
+ namespace c10::impl {
16
+
17
+ /**
18
+ * A DeviceGuard is an RAII class that sets a device to some value
19
+ * on construction, and resets the device to its original value on
20
+ * destruction.
21
+ *
22
+ * InlineDeviceGuard is a helper class for implementing DeviceGuards.
23
+ * It is templated over a DeviceGuardImpl (anything that implements
24
+ * DeviceGuardImplInterface). There are two primary ways to instantiate
25
+ * InlineDeviceGuard:
26
+ *
27
+ * - With a concrete implementation of DeviceGuardImpl, e.g., CUDAGuardImpl.
28
+ * This is the best way to use InlineDeviceGuard, as all calls are
29
+ * devirtualized, giving you code as efficient as straight line
30
+ * calls to cudaGetDevice/cudaSetDevice.
31
+ *
32
+ * - With VirtualGuardImpl, which does a virtual dispatch to a DeviceGuardImpl
33
+ * retrieved from a DeviceType registry. We have explicitly instantiated
34
+ * InlineDeviceGuard this way as c10::DeviceGuard.
35
+ *
36
+ * If you are in a hurry, you can use InlineDeviceGuard directly:
37
+ *
38
+ * using CUDAGuard = impl::InlineDeviceGuard<CUDAGuardImpl>;
39
+ *
40
+ * However, you can provide a better user experience if you explicitly write a
41
+ * wrapper class that itself contains the template instantiation:
42
+ *
43
+ * class CUDAGuard {
44
+ * public:
45
+ * // ... the API ...
46
+ * private:
47
+ * impl::InlineDeviceGuard<CUDAGuardImpl> guard_;
48
+ * }
49
+ *
50
+ * The wrapper class provides a good place to write documentation, and helps
51
+ * avoid weird template instantiation errors when a user incorrectly uses the
52
+ * class.
53
+ *
54
+ * If you need to test this class, consider instantiating it with FakeGuardImpl.
55
+ */
56
+ template <typename T>
57
+ class InlineDeviceGuard {
58
+ public:
59
+ // Note [Omitted default constructor from RAII]
60
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
61
+ // In principle, we could add a default constructor to
62
+ // DeviceGuard which reads the current device and promises to
63
+ // restore to that device on exit. However, most cases where you
64
+ // would have written this, you probably meant to actually just
65
+ // use OptionalDeviceGuard (since you don't actually need the
66
+ // restore to happen if you don't ever actually set the device).
67
+ // We remove the constructor here to encourage you to think about
68
+ // what you actually want to happen.
69
+ explicit InlineDeviceGuard() = delete;
70
+
71
+ /// Set the current device to the passed Device.
72
+ explicit InlineDeviceGuard(Device device)
73
+ : impl_(device.type()),
74
+ original_device_(
75
+ device.index() == -1 ? impl_.getDevice()
76
+ : impl_.exchangeDevice(device)),
77
+ current_device_(device.index() == -1 ? original_device_ : device) {}
78
+
79
+ /// Set the current device index to the passed DeviceIndex. (The
80
+ /// device type is inferred from the template parameter T).
81
+ template <
82
+ typename U = T,
83
+ typename =
84
+ typename std::enable_if_t<!std::is_same_v<U, VirtualGuardImpl>>>
85
+ explicit InlineDeviceGuard(DeviceIndex device_index)
86
+ : InlineDeviceGuard(Device(U::static_type, device_index)) {}
87
+
88
+ /// Construct an InlineDeviceGuard using VirtualGuardImpl with an explicit
89
+ /// DeviceGuardImplInterface pointer.
90
+ template <
91
+ typename U = T,
92
+ typename = typename std::enable_if_t<std::is_same_v<U, VirtualGuardImpl>>>
93
+ explicit InlineDeviceGuard(
94
+ Device device,
95
+ const DeviceGuardImplInterface* impl)
96
+ : impl_(
97
+ VirtualGuardImpl(impl ? impl : getDeviceGuardImpl(device.type()))),
98
+ original_device_(
99
+ device.index() == -1 ? impl_.getDevice()
100
+ : impl_.exchangeDevice(device)),
101
+ current_device_(device.index() == -1 ? original_device_ : device) {}
102
+
103
+ /// Copy is disallowed
104
+ InlineDeviceGuard(const InlineDeviceGuard<T>&) = delete;
105
+ InlineDeviceGuard<T>& operator=(const InlineDeviceGuard<T>&) = delete;
106
+
107
+ /// Move is disallowed, as DeviceGuard does not have an uninitialized state,
108
+ /// which is required for moves on types with nontrivial destructors.
109
+ InlineDeviceGuard(InlineDeviceGuard<T>&& other) = delete;
110
+ InlineDeviceGuard& operator=(InlineDeviceGuard<T>&& other) = delete;
111
+
112
+ ~InlineDeviceGuard() {
113
+ impl_.uncheckedSetDevice(original_device_);
114
+ }
115
+
116
+ /// Sets the device to the given one.
117
+ template <
118
+ typename U = T,
119
+ typename std::enable_if_t<!std::is_same_v<U, VirtualGuardImpl>, int> = 0>
120
+ void set_device(at::Device device) {
121
+ AT_ASSERT(
122
+ (U::static_type == DeviceType::HIP && device.is_cuda()) ||
123
+ device.type() == U::static_type);
124
+ auto index = device.index();
125
+ if (index == -1)
126
+ return;
127
+ impl_.setDevice(device);
128
+ current_device_ = device;
129
+ }
130
+
131
+ /// Resets the currently set device to its original device, and then sets the
132
+ /// current device to the passed device. This is effectively equivalent to
133
+ /// set_device when a guard supports only a single device type.
134
+ template <typename U = T>
135
+ typename std::enable_if_t<!std::is_same_v<U, VirtualGuardImpl>> reset_device(
136
+ at::Device device) {
137
+ set_device(device);
138
+ }
139
+
140
+ /// Resets the currently set device to its original device, and then sets the
141
+ /// current device to the passed device (for a possibly different device
142
+ /// type).
143
+ ///
144
+ /// This method is named reset_device to highlight the fact that previous
145
+ /// device settings from this guard are NOT preserved, even if the device
146
+ /// has a different device type. For example:
147
+ ///
148
+ /// // CUDA device is 0
149
+ /// DeviceGuard g(Device(kCUDA, 1));
150
+ /// g.reset_device(Device(kHIP, 2));
151
+ /// // CUDA device is 0 (!!)
152
+ ///
153
+ /// NOTE: this implementation may skip some device setting if it can prove
154
+ /// that it is unnecessary.
155
+ ///
156
+ /// Optional argument is for testing only.
157
+ template <typename U = T>
158
+ typename std::enable_if_t<std::is_same_v<U, VirtualGuardImpl>> reset_device(
159
+ at::Device device,
160
+ const impl::DeviceGuardImplInterface* impl = nullptr) {
161
+ auto index = device.index();
162
+ if (index == -1)
163
+ return;
164
+ if (device.type() == original_device_.type()) {
165
+ AT_ASSERT(impl == nullptr || impl->type() == device.type());
166
+ impl_.setDevice(device);
167
+ current_device_ = device;
168
+ } else {
169
+ // Destruct and reconstruct the DeviceGuard in place
170
+ impl_.setDevice(original_device_);
171
+ impl_ = !impl ? VirtualGuardImpl(device.type()) : VirtualGuardImpl(impl);
172
+ original_device_ = impl_.exchangeDevice(device);
173
+ current_device_ = device;
174
+ }
175
+ }
176
+
177
+ /// Sets the device index to the given one. The device type is inferred
178
+ /// from the original device type.
179
+ void set_index(DeviceIndex index) {
180
+ reset_device(Device(original_device_.type(), index));
181
+ }
182
+
183
+ /// Returns the device that was set at the time the most recent
184
+ /// reset_device(), or otherwise the device at construction time.
185
+ Device original_device() const {
186
+ return original_device_;
187
+ }
188
+
189
+ /// Returns the most recent device that was set using this device guard,
190
+ /// either from construction, or via set_device/reset_device/set_index.
191
+ Device current_device() const {
192
+ return current_device_;
193
+ }
194
+
195
+ protected:
196
+ T impl_;
197
+
198
+ private:
199
+ Device original_device_;
200
+ Device current_device_;
201
+ };
202
+
203
+ /**
204
+ * A OptionalDeviceGuard is an RAII class that sets a device to some value on
205
+ * initialization, and resets the device to its original value on destruction.
206
+ *
207
+ * InlineOptionalDeviceGuard is a helper class for implementing
208
+ * OptionalDeviceGuards. See guidance in InlineDeviceGuard on how to
209
+ * use this. See OptionalDeviceGuard for user-oriented usage notes.
210
+ */
211
+ template <typename T>
212
+ class InlineOptionalDeviceGuard {
213
+ public:
214
+ // Note [Explicit initialization of optional fields]
215
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
216
+ // Explicit initialization of optional fields
217
+ // required to workaround an nvcc bug; see
218
+ // https://github.com/pytorch/pytorch/issues/12117
219
+
220
+ /// Creates an uninitialized OptionalDeviceGuard.
221
+ explicit InlineOptionalDeviceGuard()
222
+ : guard_() // See Note [Explicit initialization of optional fields]
223
+ {}
224
+
225
+ /// Set the current device to the passed Device, if it is not nullopt.
226
+ explicit InlineOptionalDeviceGuard(std::optional<Device> device_opt)
227
+ : guard_() { // See Note [Explicit initialization of optional fields]
228
+ if (device_opt.has_value()) {
229
+ guard_.emplace(device_opt.value());
230
+ }
231
+ }
232
+
233
+ /// Set the current device to the passed DeviceIndex, if it is not nullopt.
234
+ template <
235
+ typename U = T,
236
+ typename =
237
+ typename std::enable_if_t<!std::is_same_v<U, VirtualGuardImpl>>>
238
+ explicit InlineOptionalDeviceGuard(
239
+ std::optional<DeviceIndex> device_index_opt)
240
+ : guard_() { // See Note [Explicit initialization of optional fields]
241
+ if (device_index_opt.has_value()) {
242
+ guard_.emplace(device_index_opt.value());
243
+ }
244
+ }
245
+
246
+ /// All constructors of DeviceGuard are valid for OptionalDeviceGuard
247
+ /// and result in initialized OptionalDeviceGuard.
248
+ template <typename... Args>
249
+ explicit InlineOptionalDeviceGuard(Args&&... args)
250
+ : guard_(std::in_place, std::forward<Args>(args)...) {}
251
+
252
+ // TODO: Consider reading Tensor and TensorList constructors here, when
253
+ // Tensor moves to c10. (These are only valid on OptionalDeviceGuard,
254
+ // because a Tensor may be undefined, in which case we need an uninitialized
255
+ // tensor guard.)
256
+
257
+ // Note [Move construction for RAII guards is tricky]
258
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
259
+ // In principle, move construction is useful for terminating
260
+ // the lifetime of a `OptionalDeviceGuard` early; for example:
261
+ //
262
+ // // current device is d0
263
+ // OptionalDeviceGuard g1(d1);
264
+ // // current device is d1
265
+ // {
266
+ // OptionalDeviceGuard g2(std::move(g1));
267
+ // }
268
+ // // current device is d0!!
269
+ //
270
+ // However, it's difficult to implement the move constructor
271
+ // in a way that works in all situations. For example, consider
272
+ // the following example:
273
+ //
274
+ // OptionalDeviceGuard g1(d1);
275
+ // {
276
+ // OptionalDeviceGuard g2(d2);
277
+ // {
278
+ // OptionalDeviceGuard g3(std::move(g1)); // !!!
279
+ // }
280
+ // }
281
+ //
282
+ // What should the current device be while g3 in scope... and what
283
+ // should it be after it goes out of scope? What about g2?
284
+ // There don't seem to be satisfactory answers for these questions.
285
+ //
286
+ // It's in principle possible to raise an error when this occurs
287
+ // by doing some extra thread-local bookkeeping. But why bother?
288
+ // Just don't provide the constructor.
289
+ InlineOptionalDeviceGuard(InlineOptionalDeviceGuard<T>&& other) = delete;
290
+
291
+ // Note [Move assignment for RAII guards is tricky]
292
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
293
+ // Move assignment is deleted, because you need to know which guard was
294
+ // defined "first", as that guard's original_device_ wins--with the current
295
+ // representation, we have no way of telling which is the case. (Move
296
+ // construction does not have this problem, as one guard is always
297
+ // uninitialized.)
298
+ //
299
+ // We can make this clear by way of a pair of examples:
300
+ //
301
+ // Example 1:
302
+ //
303
+ // // initial device is n0
304
+ // {
305
+ // CUDAGuard g1(n1);
306
+ // {
307
+ // CUDAGuard g2(n2);
308
+ // // current device should be n2
309
+ // g1 = std::move(g2);
310
+ // // current device should still be n2
311
+ // }
312
+ // // current device should still be n2
313
+ // }
314
+ // // current device should be n0
315
+ //
316
+ // Example 2 (flip the order of the two guards):
317
+ //
318
+ // // initial device is n0
319
+ // {
320
+ // CUDAGuard g2(n2);
321
+ // {
322
+ // CUDAGuard g1(n1);
323
+ // // current device should be n1
324
+ // g1 = std::move(g2);
325
+ // // current device should be n2
326
+ // }
327
+ // // current device should be n0 (since g2 has been vacated)
328
+ // }
329
+ //
330
+ // In both examples, we need g1 to restore to n0 after move assignment.
331
+ // However, in example 1, this is determined by the restore value of g1
332
+ // (prior to the move). In example 2, however, it is determined by the the
333
+ // restore value of g2(!!). We don't know which one should win, without having
334
+ // a way of telling which guard was allocated first.
335
+ //
336
+ // We could solve this with an extra thread-local variable. But no one is
337
+ // actually using move-assignment. So just get rid of it.
338
+ InlineOptionalDeviceGuard& operator=(InlineOptionalDeviceGuard&& other) =
339
+ delete;
340
+
341
+ /// Sets the device to the given one. Initializes OptionalDeviceGuard if it
342
+ /// is not already initialized.
343
+ template <
344
+ typename U = T,
345
+ typename =
346
+ typename std::enable_if_t<!std::is_same_v<U, VirtualGuardImpl>>>
347
+ void set_device(at::Device device) {
348
+ if (!guard_.has_value()) {
349
+ guard_.emplace(device);
350
+ } else {
351
+ guard_->set_device(device);
352
+ }
353
+ }
354
+
355
+ /// Resets the currently set device to its original device, and then sets the
356
+ /// current device to the passed device (for a possibly different device
357
+ /// type). Initializes OptionalDeviceGuard if it is not already initialized.
358
+ ///
359
+ /// See notes on why this is called reset_device on InlineDeviceGuard.
360
+ ///
361
+ /// Optional argument is for testing only.
362
+ template <
363
+ typename U = T,
364
+ typename = typename std::enable_if_t<std::is_same_v<U, VirtualGuardImpl>>>
365
+ void reset_device(
366
+ at::Device device,
367
+ const DeviceGuardImplInterface* impl = nullptr) {
368
+ if (!guard_.has_value()) {
369
+ guard_.emplace(device, impl);
370
+ } else {
371
+ guard_->reset_device(device, impl);
372
+ }
373
+ }
374
+
375
+ /// Resets the currently set device to its original device, and then sets the
376
+ /// current device to the passed device. Initializes the guard if it is
377
+ /// not already initialized. This is effectively equivalent to set_device
378
+ /// when a guard supports only a single device type.
379
+ template <
380
+ typename U = T,
381
+ typename =
382
+ typename std::enable_if_t<!std::is_same_v<U, VirtualGuardImpl>>>
383
+ void reset_device(at::Device device) {
384
+ if (!guard_.has_value()) {
385
+ guard_.emplace(device);
386
+ } else {
387
+ guard_->reset_device(device);
388
+ }
389
+ }
390
+
391
+ /// Sets the device index to the given one. The device type is statically
392
+ /// known.
393
+ template <
394
+ typename U = T,
395
+ typename =
396
+ typename std::enable_if_t<!std::is_same_v<U, VirtualGuardImpl>>>
397
+ void set_index(DeviceIndex index) {
398
+ if (!guard_.has_value()) {
399
+ guard_.emplace(index);
400
+ } else {
401
+ guard_->set_index(index);
402
+ }
403
+ }
404
+
405
+ /// Returns the device that was set immediately prior to initialization of
406
+ /// the, guard, or nullopt if the guard is uninitialized.
407
+ std::optional<Device> original_device() const {
408
+ return guard_.has_value() ? std::make_optional(guard_->original_device())
409
+ : std::nullopt;
410
+ }
411
+
412
+ /// Returns the most recent device that was set using this device guard,
413
+ /// either from construction, or via set_device, if the guard is initialized,
414
+ /// or nullopt if the guard is uninitialized.
415
+ std::optional<Device> current_device() const {
416
+ return guard_.has_value() ? std::make_optional(guard_->current_device())
417
+ : std::nullopt;
418
+ }
419
+
420
+ /// Restore the original device, resetting this guard to uninitialized state.
421
+ void reset() {
422
+ guard_.reset();
423
+ }
424
+
425
+ private:
426
+ std::optional<InlineDeviceGuard<T>> guard_;
427
+ };
428
+
429
+ } // namespace c10::impl
videochat2/lib/python3.10/site-packages/torch/include/c10/core/impl/InlineEvent.h ADDED
@@ -0,0 +1,139 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/DeviceType.h>
4
+ #include <c10/core/Stream.h>
5
+ #include <c10/core/impl/DeviceGuardImplInterface.h>
6
+ #include <c10/util/Exception.h>
7
+
8
+ namespace c10::impl {
9
+
10
+ template <typename T>
11
+ struct InlineEvent final {
12
+ InlineEvent() = delete;
13
+ InlineEvent(
14
+ const DeviceType _device_type,
15
+ const EventFlag _flag = EventFlag::PYTORCH_DEFAULT)
16
+ : backend_{_device_type}, device_type_{_device_type}, flag_{_flag} {}
17
+
18
+ // Copy constructor and copy assignment operator (deleted)
19
+ InlineEvent(const InlineEvent&) = delete;
20
+ InlineEvent& operator=(const InlineEvent&) = delete;
21
+
22
+ // Move constructor and move assignment operator
23
+ InlineEvent(InlineEvent&& other) noexcept
24
+ : event_(other.event_),
25
+ backend_(std::move(other.backend_)),
26
+ device_type_(other.device_type_),
27
+ device_index_(other.device_index_),
28
+ flag_(other.flag_),
29
+ was_marked_for_recording_(other.was_marked_for_recording_) {
30
+ other.event_ = nullptr;
31
+ }
32
+ InlineEvent& operator=(InlineEvent&& other) noexcept {
33
+ swap(other);
34
+ return *this;
35
+ }
36
+
37
+ void swap(InlineEvent& other) noexcept {
38
+ std::swap(event_, other.event_);
39
+ std::swap(backend_, other.backend_);
40
+ std::swap(device_type_, other.device_type_);
41
+ std::swap(device_index_, other.device_index_);
42
+ std::swap(flag_, other.flag_);
43
+ std::swap(was_marked_for_recording_, other.was_marked_for_recording_);
44
+ }
45
+
46
+ ~InlineEvent() noexcept {
47
+ if (event_)
48
+ backend_.destroyEvent(event_, device_index_);
49
+ }
50
+
51
+ DeviceType device_type() const noexcept {
52
+ return device_type_;
53
+ }
54
+ DeviceIndex device_index() const noexcept {
55
+ return device_index_;
56
+ }
57
+ EventFlag flag() const noexcept {
58
+ return flag_;
59
+ }
60
+ bool was_marked_for_recording() const noexcept {
61
+ return was_marked_for_recording_;
62
+ }
63
+
64
+ void recordOnce(const Stream& stream) {
65
+ if (!was_marked_for_recording_)
66
+ record(stream);
67
+ }
68
+
69
+ void record(const Stream& stream) {
70
+ TORCH_CHECK(
71
+ stream.device_type() == device_type_,
72
+ "Event device type ",
73
+ DeviceTypeName(device_type_),
74
+ " does not match recording stream's device type ",
75
+ DeviceTypeName(stream.device_type()),
76
+ ".");
77
+
78
+ backend_.record(&event_, stream, device_index_, flag_);
79
+ was_marked_for_recording_ = true;
80
+ device_index_ = stream.device_index();
81
+ }
82
+
83
+ void block(const Stream& stream) const {
84
+ if (!was_marked_for_recording_)
85
+ return;
86
+
87
+ TORCH_CHECK(
88
+ stream.device_type() == device_type_,
89
+ "Event device type ",
90
+ DeviceTypeName(device_type_),
91
+ " does not match blocking stream's device type ",
92
+ DeviceTypeName(stream.device_type()),
93
+ ".");
94
+
95
+ backend_.block(event_, stream);
96
+ }
97
+
98
+ bool query() const {
99
+ if (!was_marked_for_recording_)
100
+ return true;
101
+ return backend_.queryEvent(event_);
102
+ }
103
+
104
+ void* eventId() const {
105
+ return event_;
106
+ }
107
+
108
+ double elapsedTime(const InlineEvent& other) const {
109
+ TORCH_CHECK(
110
+ other.was_marked_for_recording(),
111
+ "other was not marked for recording.");
112
+ TORCH_CHECK(
113
+ was_marked_for_recording(), "self was not marked for recording.");
114
+ TORCH_CHECK(
115
+ other.device_type() == device_type_,
116
+ "Event device type ",
117
+ DeviceTypeName(device_type_),
118
+ " does not match other's device type ",
119
+ DeviceTypeName(other.device_type()),
120
+ ".");
121
+ return backend_.elapsedTime(event_, other.event_, device_index_);
122
+ }
123
+
124
+ void synchronize() const {
125
+ if (!was_marked_for_recording_)
126
+ return;
127
+ backend_.synchronizeEvent(event_);
128
+ }
129
+
130
+ private:
131
+ void* event_ = nullptr;
132
+ T backend_;
133
+ DeviceType device_type_;
134
+ DeviceIndex device_index_ = -1;
135
+ EventFlag flag_ = EventFlag::PYTORCH_DEFAULT;
136
+ bool was_marked_for_recording_ = false;
137
+ };
138
+
139
+ } // namespace c10::impl
videochat2/lib/python3.10/site-packages/torch/include/c10/core/impl/PyInterpreter.h ADDED
@@ -0,0 +1,263 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/Device.h>
4
+ #include <c10/core/DispatchKeySet.h>
5
+ #include <c10/core/Layout.h>
6
+ #include <c10/core/MemoryFormat.h>
7
+ #include <c10/core/SymIntArrayRef.h>
8
+ #include <c10/macros/Export.h>
9
+ #include <c10/util/ArrayRef.h>
10
+ #include <c10/util/intrusive_ptr.h>
11
+ #include <c10/util/python_stub.h>
12
+ #include <string>
13
+ #include <vector>
14
+
15
+ // Forward declarations
16
+
17
+ namespace c10 {
18
+ struct IValue;
19
+ class OperatorHandle;
20
+ struct TensorImpl;
21
+ } // namespace c10
22
+
23
+ namespace torch::jit {
24
+ using Stack = std::vector<c10::IValue>;
25
+ }
26
+
27
+ // Actual implementation
28
+
29
+ namespace c10::impl {
30
+
31
+ struct C10_API PyInterpreter;
32
+
33
+ // Note [Python interpreter tag]
34
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
35
+ // Traditionally, PyTorch is layered such that our Python library
36
+ // (libtorch_python) references our pure C++ library (libtorch) as the
37
+ // natural order of things. However, sometimes this natural order is
38
+ // subverted: C++ objects refer to Python objects (for example, we
39
+ // store a PyObject* pointer on TensorImpl so that converting from a
40
+ // C++ Tensor to a Python Tensor is just a memory dereference).
41
+ //
42
+ // These unusual orderings must be treated with care. To start, you need to
43
+ // virtualize the destructor so that the PyObject can be decref'ed on
44
+ // destruction (because the C++ object itself doesn't know anything about
45
+ // Python--remember, layering!). This process itself is fraught, since
46
+ // acquiring the GIL could lead to deadlocks if someone is blocking on you
47
+ // while holding the GIL. Furthermore, if the C++ objects outlive the
48
+ // interpreter (which can happen if you stash them in a static global
49
+ // variable defined in libtorch), you may attempt to decref the object when
50
+ // the Python interpreter has already been shutdown.
51
+ //
52
+ // BUT WAIT, IT GETS WORSE. With torchdeploy, there may be multiple Python
53
+ // interpreters in a single process. If a C++ object is accessible from
54
+ // multiple interpreters, we must take care not to accidentally pass a
55
+ // PyObject from one interpreter with another interpreter.
56
+ //
57
+ // To prevent these mixups, we introduce a PyInterpreter "tag" (object with
58
+ // a vtable), which specifies a specific Python interpreter.
59
+ //
60
+ // - Any given object can be associated with AT MOST one Python interpreter.
61
+ // We represent the interpreter tag as a memory address to an instance of
62
+ // a virtual class that is allocated once per interpreter (this is so that
63
+ // we can request the interpreter to perform operations for us, if
64
+ // necessary).
65
+ //
66
+ // - It can be recorded with a PyObject (PyInterpreterObject) so that
67
+ // we know what interpreter the object is associated with, and we can
68
+ // raise an error if you try to use the PyObject from the wrong
69
+ // interpreter context.
70
+ //
71
+ // - It contains a vtable that can be used to perform various Python
72
+ // operations from ordinary C++ code that ordinarily wouldn't be accessible
73
+ // from libtorch.
74
+ //
75
+ // A simple use case is when a C++ object must be associated with a PyObject.
76
+ // However, for TensorImpl, we lazily allocate a PyObject the first time the
77
+ // object passes into Python. The invariants for this situation are more
78
+ // subtle:
79
+ //
80
+ // - A given TensorImpl's interpreter tag can only go from uninitialized to
81
+ // tagged; once tagged, this is a quiescent state (once tagged to an
82
+ // interpreter, ALWAYS tagged to that interpreter)
83
+ //
84
+ // - A thread may mutate the PyObject field of a TensorImpl if and only if it
85
+ // holds the GIL for the interpreter tagged on the TensorImpl. (If the
86
+ // TensorImpl is not tagged, it must first atomically claim its tag before it
87
+ // can validly write)
88
+ //
89
+ // WARNING: This class has to be written very carefully, because it may be
90
+ // possible for a Tensor to have a reference an interpreter corresponding to
91
+ // a shared library that has ALREADY BEEN UNLOADED. This makes blindly calling
92
+ // virtual methods very dangerous, because the vtable may be garbage at that
93
+ // point (on a good day, you might get "pure virtual method called").
94
+ //
95
+ // The idea to solve this problem is we always leak PyInterpreters (so they
96
+ // always stay live even after dlclose), and make sure we can disarm their
97
+ // virtual methods by indirecting through a separate PyInterpreterVTable
98
+ // object. This can be replaced with a no-op vtable from libc10.so, which
99
+ // is guaranteed to stick around until the bitter end.
100
+ //
101
+ // NB: The downside with representing PyInterpreter tags as full objects is that
102
+ // it takes an extra word on TensorImpl. If tags were instead just integer
103
+ // indices, on 64-bit architectures we could pack the tag and PyObject together
104
+ // into a single atomic word. On 32-bit architectures we could simply say that
105
+ // only one Python interpreter is supported (erroring if a nontrivial
106
+ // interpreter tag is attempted to be set).
107
+ //
108
+ // The difficulty with this scheme is we need to maintain an out-of-line table
109
+ // to get at the PyInterpreters so that we can do virtual method calls on them,
110
+ // and registration/deregistration to this table must be done in a thread safe
111
+ // manner. This can be easily done if the number of possible PyInterpreters is
112
+ // small enough (e.g., 8-bit integer) by simply preallocating an array of
113
+ // sufficient size to hold all possible interpreters. Surely 128 threads is
114
+ // more than enough for anyone!
115
+ //
116
+ // I didn't decide to do this technique at the moment, because the extra word
117
+ // added by the PyInterpreter tag takes us to 24 words, which means that we
118
+ // still fit inside three eight word cache lines. If you need to penny pinch
119
+ // another word consider doing this!
120
+
121
+ struct C10_API PyInterpreterVTable {
122
+ virtual ~PyInterpreterVTable() = default;
123
+
124
+ // Report the name of this interpreter
125
+ virtual std::string name() const = 0;
126
+
127
+ // Run Py_INCREF on a PyObject.
128
+ virtual void incref(PyObject* pyobj) const = 0;
129
+ // Run Py_DECREF on a PyObject. We DO NOT assume the GIL is held on call
130
+ // See NOTE [PyInterpreter::decref takes a `has_pyobj_slot` arg]
131
+ virtual void decref(PyObject* pyobj, bool has_pyobj_slot) const = 0;
132
+
133
+ // Perform a detach by deferring to the __torch_dispatch__ implementation of
134
+ // detach, which will also arrange for the PyObject to get copied in this
135
+ // situation
136
+ virtual c10::intrusive_ptr<TensorImpl> detach(
137
+ const TensorImpl* self) const = 0;
138
+
139
+ // Invoke the Python boxed fallback dispatch to go back into Python
140
+ virtual void dispatch(const c10::OperatorHandle& op, torch::jit::Stack* stack)
141
+ const = 0;
142
+
143
+ virtual void reportErrorCallback(PyObject* callback, DispatchKey key)
144
+ const = 0;
145
+
146
+ // This is only invoked in the multipy/torchdeploy situation from
147
+ // pythonOpRegistrationTrampoline; this lets us get to the Python
148
+ // interpreter to actually find the appropriate Python op registration
149
+ // entry to call.
150
+ virtual void python_op_registration_trampoline(
151
+ const c10::OperatorHandle& op,
152
+ c10::DispatchKey,
153
+ c10::DispatchKeySet keyset,
154
+ torch::jit::Stack* stack,
155
+ bool with_keyset,
156
+ bool with_op) const = 0;
157
+
158
+ virtual void throw_abstract_impl_not_imported_error(
159
+ std::string opname,
160
+ const char* pymodule,
161
+ const char* context) const = 0;
162
+
163
+ // Invoke the Python dispatcher to handle this call
164
+ virtual void python_dispatcher(
165
+ const c10::OperatorHandle& op,
166
+ c10::DispatchKeySet,
167
+ torch::jit::Stack* stack) const = 0;
168
+
169
+ virtual bool is_contiguous(const TensorImpl* self, at::MemoryFormat)
170
+ const = 0;
171
+ virtual bool is_strides_like(const TensorImpl* self, at::MemoryFormat)
172
+ const = 0;
173
+ virtual bool is_non_overlapping_and_dense(const TensorImpl* self) const = 0;
174
+ virtual c10::Device device(const TensorImpl* self) const = 0;
175
+ virtual int64_t dim(const TensorImpl* self) const = 0;
176
+ virtual c10::IntArrayRef strides(const TensorImpl* self) const = 0;
177
+ virtual c10::IntArrayRef sizes(const TensorImpl* self) const = 0;
178
+ virtual c10::SymIntArrayRef sym_sizes(const TensorImpl* self) const = 0;
179
+ virtual c10::Layout layout(const TensorImpl* self) const = 0;
180
+ virtual int64_t numel(const TensorImpl* self) const = 0;
181
+ virtual c10::SymInt sym_numel(const TensorImpl* self) const = 0;
182
+ virtual c10::SymIntArrayRef sym_strides(const TensorImpl* self) const = 0;
183
+ virtual c10::SymInt sym_storage_offset(const TensorImpl* self) const = 0;
184
+
185
+ virtual void trace_gpu_event_creation(
186
+ c10::DeviceType device_type,
187
+ uintptr_t event) const = 0;
188
+ virtual void trace_gpu_event_deletion(
189
+ c10::DeviceType device_type,
190
+ uintptr_t event) const = 0;
191
+ virtual void trace_gpu_event_record(
192
+ c10::DeviceType device_type,
193
+ uintptr_t event,
194
+ uintptr_t stream) const = 0;
195
+ virtual void trace_gpu_event_wait(
196
+ c10::DeviceType device_type,
197
+ uintptr_t event,
198
+ uintptr_t stream) const = 0;
199
+ virtual void trace_gpu_memory_allocation(
200
+ c10::DeviceType device_type,
201
+ uintptr_t ptr) const = 0;
202
+ virtual void trace_gpu_memory_deallocation(
203
+ c10::DeviceType device_type,
204
+ uintptr_t ptr) const = 0;
205
+ virtual void trace_gpu_stream_creation(
206
+ c10::DeviceType device_type,
207
+ uintptr_t stream) const = 0;
208
+ virtual void trace_gpu_device_synchronization(
209
+ c10::DeviceType device_type) const = 0;
210
+ virtual void trace_gpu_stream_synchronization(
211
+ c10::DeviceType device_type,
212
+ uintptr_t stream) const = 0;
213
+ virtual void trace_gpu_event_synchronization(
214
+ c10::DeviceType device_type,
215
+ uintptr_t event) const = 0;
216
+
217
+ virtual void reset_backward_hooks(const TensorImpl* self) const = 0;
218
+ };
219
+
220
+ struct C10_API PyInterpreter {
221
+ const PyInterpreterVTable* vtable_;
222
+
223
+ PyInterpreter(const PyInterpreterVTable* vtable) : vtable_(vtable){};
224
+
225
+ const PyInterpreterVTable& operator*() const noexcept {
226
+ return *vtable_;
227
+ }
228
+ const PyInterpreterVTable* operator->() const noexcept {
229
+ return vtable_;
230
+ }
231
+
232
+ // Disarm this PyInterpreter, making all of its methods noops.
233
+ // The vtable pointer is not an atomic at the moment, which means
234
+ // a disarm() invocation that is concurrent with active destructors
235
+ // is not thread safe and will trigger TSAN. My hope is that this
236
+ // situations doesn't ever actually happen; tensor destruction should
237
+ // quiesce when a dlclose happens, and any long lived tensors whose
238
+ // destructors would be disarmed here only begin the destruction process
239
+ // on process shutdown (long after the dlclose has occurred).
240
+ void disarm() noexcept;
241
+ };
242
+
243
+ // PyInterpreterStatus describes what the state of its interpreter tag
244
+ // is, relative to the thread currently holding the GIL.
245
+ enum class PyInterpreterStatus {
246
+ // We just allocated the Tensor, it hasn't escaped to other threads,
247
+ // we know that it definitely hasn't been tagged to be associated
248
+ // with an interpreter.
249
+ DEFINITELY_UNINITIALIZED,
250
+ // We queried the interpreter field and it looked uninitialized. But
251
+ // another thread may have raced with us to tag it with some other
252
+ // interpreter id. So we will have to do a CEX to make sure we can
253
+ // actually nab it.
254
+ MAYBE_UNINITIALIZED,
255
+ // We queried the interpreter field and it was tagged to belong to us.
256
+ // This means we have sole write access (as we hold the GIL for this
257
+ // interpreter)
258
+ TAGGED_BY_US,
259
+ // Someone else tagged this. We can't use this TensorImpl from Python.
260
+ TAGGED_BY_OTHER,
261
+ };
262
+
263
+ } // namespace c10::impl
videochat2/lib/python3.10/site-packages/torch/include/c10/core/impl/TorchDispatchModeTLS.h ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/SafePyObject.h>
4
+ #include <c10/macros/Export.h>
5
+
6
+ namespace c10::impl {
7
+
8
+ enum class TorchDispatchModeKey : int8_t {
9
+ FAKE,
10
+ PROXY,
11
+ FUNCTIONAL,
12
+ NUM_MODE_KEYS
13
+ };
14
+
15
+ using PyObject_TorchDispatchMode = SafePyObjectT<TorchDispatchModeKey>;
16
+
17
+ struct C10_API TorchDispatchModeTLS {
18
+ // This API is NOT invariant safe.
19
+ // It must not take in an infra mode that uses TorchDispatchModeKey
20
+ // If you're pushing an infra mode onto the stack, we expect
21
+ // you to use set_mode
22
+ static void push_non_infra_mode_onto_stack(
23
+ std::shared_ptr<PyObject_TorchDispatchMode> mode);
24
+ // Pops the top mode of the stack,
25
+ // giving precedence to user modes before attempting to pop
26
+ // any infra modes
27
+ static const std::shared_ptr<PyObject_TorchDispatchMode> pop_stack();
28
+ // Returns the highest-priority infra mode on the stack,
29
+ // along with its mode key.
30
+ static const std::
31
+ tuple<std::shared_ptr<PyObject_TorchDispatchMode>, TorchDispatchModeKey>
32
+ pop_highest_infra_mode();
33
+
34
+ static const std::shared_ptr<PyObject_TorchDispatchMode>& get_stack_at(
35
+ int64_t idx);
36
+ static int64_t stack_len();
37
+
38
+ static const std::optional<std::shared_ptr<PyObject_TorchDispatchMode>>
39
+ get_mode(TorchDispatchModeKey mode_key);
40
+ static const std::optional<std::shared_ptr<PyObject_TorchDispatchMode>>
41
+ unset_mode(TorchDispatchModeKey mode_key);
42
+ static void set_mode(
43
+ const std::shared_ptr<PyObject_TorchDispatchMode>& mode,
44
+ TorchDispatchModeKey mode_key);
45
+
46
+ static const TorchDispatchModeTLS& get_state();
47
+ static void set_state(TorchDispatchModeTLS state);
48
+
49
+ static bool any_modes_set(bool skip_infra_modes = false);
50
+
51
+ private:
52
+ std::vector<std::shared_ptr<PyObject_TorchDispatchMode>> stack_;
53
+ // Users are allowed to push multiple ProxyTorchDispatchMode objects onto the
54
+ // stack
55
+ // However, we only allow a single FakeTensorMode onto the stack at a time
56
+ // (Pushing additional FakeTensorModes onto the stack is a no-op)
57
+ std::array<
58
+ std::optional<std::shared_ptr<PyObject_TorchDispatchMode>>,
59
+ static_cast<size_t>(TorchDispatchModeKey::NUM_MODE_KEYS)>
60
+ infra_modes_;
61
+ };
62
+
63
+ C10_API bool dispatch_mode_enabled();
64
+
65
+ C10_API std::string to_string(TorchDispatchModeKey mode_key);
66
+
67
+ } // namespace c10::impl