ZTWHHH commited on
Commit
3a9e293
·
verified ·
1 Parent(s): 0c4c9e0

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. llava_next/include/python3.10/bltinmodule.h +14 -0
  2. llava_next/include/python3.10/fileobject.h +49 -0
  3. llava_next/include/python3.10/opcode.h +172 -0
  4. llava_next/include/python3.10/patchlevel.h +35 -0
  5. llava_next/include/python3.10/pylifecycle.h +74 -0
  6. llava_next/include/python3.10/structmember.h +75 -0
  7. parrot/lib/python3.10/site-packages/torch/include/c10/core/AutogradState.h +72 -0
  8. parrot/lib/python3.10/site-packages/torch/include/c10/core/Backend.h +387 -0
  9. parrot/lib/python3.10/site-packages/torch/include/c10/core/CPUAllocator.h +59 -0
  10. parrot/lib/python3.10/site-packages/torch/include/c10/core/CompileTimeFunctionPointer.h +57 -0
  11. parrot/lib/python3.10/site-packages/torch/include/c10/core/Contiguity.h +129 -0
  12. parrot/lib/python3.10/site-packages/torch/include/c10/core/CopyBytes.h +48 -0
  13. parrot/lib/python3.10/site-packages/torch/include/c10/core/DefaultDtype.h +15 -0
  14. parrot/lib/python3.10/site-packages/torch/include/c10/core/DefaultTensorOptions.h +45 -0
  15. parrot/lib/python3.10/site-packages/torch/include/c10/core/Device.h +216 -0
  16. parrot/lib/python3.10/site-packages/torch/include/c10/core/DeviceArray.h +28 -0
  17. parrot/lib/python3.10/site-packages/torch/include/c10/core/DispatchKey.h +746 -0
  18. parrot/lib/python3.10/site-packages/torch/include/c10/core/DynamicCast.h +125 -0
  19. parrot/lib/python3.10/site-packages/torch/include/c10/core/GradMode.h +44 -0
  20. parrot/lib/python3.10/site-packages/torch/include/c10/core/InferenceMode.h +86 -0
  21. parrot/lib/python3.10/site-packages/torch/include/c10/core/MemoryFormat.h +290 -0
  22. parrot/lib/python3.10/site-packages/torch/include/c10/core/OptionalRef.h +31 -0
  23. parrot/lib/python3.10/site-packages/torch/include/c10/core/PyHandleCache.h +76 -0
  24. parrot/lib/python3.10/site-packages/torch/include/c10/core/QScheme.h +50 -0
  25. parrot/lib/python3.10/site-packages/torch/include/c10/core/RefcountedDeleter.h +52 -0
  26. parrot/lib/python3.10/site-packages/torch/include/c10/core/SafePyObject.h +99 -0
  27. parrot/lib/python3.10/site-packages/torch/include/c10/core/ScalarType.h +564 -0
  28. parrot/lib/python3.10/site-packages/torch/include/c10/core/ScalarTypeToTypeMeta.h +57 -0
  29. parrot/lib/python3.10/site-packages/torch/include/c10/core/Storage.h +272 -0
  30. parrot/lib/python3.10/site-packages/torch/include/c10/core/StorageImpl.h +330 -0
  31. parrot/lib/python3.10/site-packages/torch/include/c10/core/Stream.h +176 -0
  32. parrot/lib/python3.10/site-packages/torch/include/c10/core/StreamGuard.h +170 -0
  33. parrot/lib/python3.10/site-packages/torch/include/c10/core/SymFloat.h +113 -0
  34. parrot/lib/python3.10/site-packages/torch/include/c10/core/SymInt.h +423 -0
  35. parrot/lib/python3.10/site-packages/torch/include/c10/core/SymNodeImpl.h +236 -0
  36. parrot/lib/python3.10/site-packages/torch/include/c10/core/SymbolicShapeMeta.h +214 -0
  37. parrot/lib/python3.10/site-packages/torch/include/c10/core/TensorImpl.h +0 -0
  38. parrot/lib/python3.10/site-packages/torch/include/c10/core/TensorOptions.h +787 -0
  39. parrot/lib/python3.10/site-packages/torch/include/c10/core/UndefinedTensorImpl.h +42 -0
  40. parrot/lib/python3.10/site-packages/torch/include/c10/core/WrapDimMinimal.h +48 -0
  41. parrot/lib/python3.10/site-packages/torch/include/c10/core/alignment.h +21 -0
  42. parrot/lib/python3.10/site-packages/torch/include/c10/core/impl/DeviceGuardImplInterface.h +365 -0
  43. parrot/lib/python3.10/site-packages/torch/include/c10/core/impl/HermeticPyObjectTLS.h +59 -0
  44. parrot/lib/python3.10/site-packages/torch/include/c10/core/impl/InlineDeviceGuard.h +428 -0
  45. parrot/lib/python3.10/site-packages/torch/include/c10/core/impl/InlineStreamGuard.h +255 -0
  46. parrot/lib/python3.10/site-packages/torch/include/c10/core/impl/LocalDispatchKeySet.h +164 -0
  47. parrot/lib/python3.10/site-packages/torch/include/c10/core/impl/PyObjectSlot.h +190 -0
  48. parrot/lib/python3.10/site-packages/torch/include/c10/core/impl/PythonDispatcherTLS.h +24 -0
  49. parrot/lib/python3.10/site-packages/torch/include/c10/core/impl/alloc_cpu.h +12 -0
  50. parrot/lib/python3.10/site-packages/torch/include/c10/macros/Export.h +160 -0
llava_next/include/python3.10/bltinmodule.h ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef Py_BLTINMODULE_H
2
+ #define Py_BLTINMODULE_H
3
+ #ifdef __cplusplus
4
+ extern "C" {
5
+ #endif
6
+
7
+ PyAPI_DATA(PyTypeObject) PyFilter_Type;
8
+ PyAPI_DATA(PyTypeObject) PyMap_Type;
9
+ PyAPI_DATA(PyTypeObject) PyZip_Type;
10
+
11
+ #ifdef __cplusplus
12
+ }
13
+ #endif
14
+ #endif /* !Py_BLTINMODULE_H */
llava_next/include/python3.10/fileobject.h ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* File object interface (what's left of it -- see io.py) */
2
+
3
+ #ifndef Py_FILEOBJECT_H
4
+ #define Py_FILEOBJECT_H
5
+ #ifdef __cplusplus
6
+ extern "C" {
7
+ #endif
8
+
9
+ #define PY_STDIOTEXTMODE "b"
10
+
11
+ PyAPI_FUNC(PyObject *) PyFile_FromFd(int, const char *, const char *, int,
12
+ const char *, const char *,
13
+ const char *, int);
14
+ PyAPI_FUNC(PyObject *) PyFile_GetLine(PyObject *, int);
15
+ PyAPI_FUNC(int) PyFile_WriteObject(PyObject *, PyObject *, int);
16
+ PyAPI_FUNC(int) PyFile_WriteString(const char *, PyObject *);
17
+ PyAPI_FUNC(int) PyObject_AsFileDescriptor(PyObject *);
18
+
19
+ /* The default encoding used by the platform file system APIs
20
+ If non-NULL, this is different than the default encoding for strings
21
+ */
22
+ PyAPI_DATA(const char *) Py_FileSystemDefaultEncoding;
23
+ #if !defined(Py_LIMITED_API) || Py_LIMITED_API+0 >= 0x03060000
24
+ PyAPI_DATA(const char *) Py_FileSystemDefaultEncodeErrors;
25
+ #endif
26
+ PyAPI_DATA(int) Py_HasFileSystemDefaultEncoding;
27
+
28
+ #if !defined(Py_LIMITED_API) || Py_LIMITED_API+0 >= 0x03070000
29
+ PyAPI_DATA(int) Py_UTF8Mode;
30
+ #endif
31
+
32
+ /* A routine to check if a file descriptor can be select()-ed. */
33
+ #ifdef _MSC_VER
34
+ /* On Windows, any socket fd can be select()-ed, no matter how high */
35
+ #define _PyIsSelectable_fd(FD) (1)
36
+ #else
37
+ #define _PyIsSelectable_fd(FD) ((unsigned int)(FD) < (unsigned int)FD_SETSIZE)
38
+ #endif
39
+
40
+ #ifndef Py_LIMITED_API
41
+ # define Py_CPYTHON_FILEOBJECT_H
42
+ # include "cpython/fileobject.h"
43
+ # undef Py_CPYTHON_FILEOBJECT_H
44
+ #endif
45
+
46
+ #ifdef __cplusplus
47
+ }
48
+ #endif
49
+ #endif /* !Py_FILEOBJECT_H */
llava_next/include/python3.10/opcode.h ADDED
@@ -0,0 +1,172 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Auto-generated by Tools/scripts/generate_opcode_h.py from Lib/opcode.py */
2
+ #ifndef Py_OPCODE_H
3
+ #define Py_OPCODE_H
4
+ #ifdef __cplusplus
5
+ extern "C" {
6
+ #endif
7
+
8
+
9
+ /* Instruction opcodes for compiled code */
10
+ #define POP_TOP 1
11
+ #define ROT_TWO 2
12
+ #define ROT_THREE 3
13
+ #define DUP_TOP 4
14
+ #define DUP_TOP_TWO 5
15
+ #define ROT_FOUR 6
16
+ #define NOP 9
17
+ #define UNARY_POSITIVE 10
18
+ #define UNARY_NEGATIVE 11
19
+ #define UNARY_NOT 12
20
+ #define UNARY_INVERT 15
21
+ #define BINARY_MATRIX_MULTIPLY 16
22
+ #define INPLACE_MATRIX_MULTIPLY 17
23
+ #define BINARY_POWER 19
24
+ #define BINARY_MULTIPLY 20
25
+ #define BINARY_MODULO 22
26
+ #define BINARY_ADD 23
27
+ #define BINARY_SUBTRACT 24
28
+ #define BINARY_SUBSCR 25
29
+ #define BINARY_FLOOR_DIVIDE 26
30
+ #define BINARY_TRUE_DIVIDE 27
31
+ #define INPLACE_FLOOR_DIVIDE 28
32
+ #define INPLACE_TRUE_DIVIDE 29
33
+ #define GET_LEN 30
34
+ #define MATCH_MAPPING 31
35
+ #define MATCH_SEQUENCE 32
36
+ #define MATCH_KEYS 33
37
+ #define COPY_DICT_WITHOUT_KEYS 34
38
+ #define WITH_EXCEPT_START 49
39
+ #define GET_AITER 50
40
+ #define GET_ANEXT 51
41
+ #define BEFORE_ASYNC_WITH 52
42
+ #define END_ASYNC_FOR 54
43
+ #define INPLACE_ADD 55
44
+ #define INPLACE_SUBTRACT 56
45
+ #define INPLACE_MULTIPLY 57
46
+ #define INPLACE_MODULO 59
47
+ #define STORE_SUBSCR 60
48
+ #define DELETE_SUBSCR 61
49
+ #define BINARY_LSHIFT 62
50
+ #define BINARY_RSHIFT 63
51
+ #define BINARY_AND 64
52
+ #define BINARY_XOR 65
53
+ #define BINARY_OR 66
54
+ #define INPLACE_POWER 67
55
+ #define GET_ITER 68
56
+ #define GET_YIELD_FROM_ITER 69
57
+ #define PRINT_EXPR 70
58
+ #define LOAD_BUILD_CLASS 71
59
+ #define YIELD_FROM 72
60
+ #define GET_AWAITABLE 73
61
+ #define LOAD_ASSERTION_ERROR 74
62
+ #define INPLACE_LSHIFT 75
63
+ #define INPLACE_RSHIFT 76
64
+ #define INPLACE_AND 77
65
+ #define INPLACE_XOR 78
66
+ #define INPLACE_OR 79
67
+ #define LIST_TO_TUPLE 82
68
+ #define RETURN_VALUE 83
69
+ #define IMPORT_STAR 84
70
+ #define SETUP_ANNOTATIONS 85
71
+ #define YIELD_VALUE 86
72
+ #define POP_BLOCK 87
73
+ #define POP_EXCEPT 89
74
+ #define HAVE_ARGUMENT 90
75
+ #define STORE_NAME 90
76
+ #define DELETE_NAME 91
77
+ #define UNPACK_SEQUENCE 92
78
+ #define FOR_ITER 93
79
+ #define UNPACK_EX 94
80
+ #define STORE_ATTR 95
81
+ #define DELETE_ATTR 96
82
+ #define STORE_GLOBAL 97
83
+ #define DELETE_GLOBAL 98
84
+ #define ROT_N 99
85
+ #define LOAD_CONST 100
86
+ #define LOAD_NAME 101
87
+ #define BUILD_TUPLE 102
88
+ #define BUILD_LIST 103
89
+ #define BUILD_SET 104
90
+ #define BUILD_MAP 105
91
+ #define LOAD_ATTR 106
92
+ #define COMPARE_OP 107
93
+ #define IMPORT_NAME 108
94
+ #define IMPORT_FROM 109
95
+ #define JUMP_FORWARD 110
96
+ #define JUMP_IF_FALSE_OR_POP 111
97
+ #define JUMP_IF_TRUE_OR_POP 112
98
+ #define JUMP_ABSOLUTE 113
99
+ #define POP_JUMP_IF_FALSE 114
100
+ #define POP_JUMP_IF_TRUE 115
101
+ #define LOAD_GLOBAL 116
102
+ #define IS_OP 117
103
+ #define CONTAINS_OP 118
104
+ #define RERAISE 119
105
+ #define JUMP_IF_NOT_EXC_MATCH 121
106
+ #define SETUP_FINALLY 122
107
+ #define LOAD_FAST 124
108
+ #define STORE_FAST 125
109
+ #define DELETE_FAST 126
110
+ #define GEN_START 129
111
+ #define RAISE_VARARGS 130
112
+ #define CALL_FUNCTION 131
113
+ #define MAKE_FUNCTION 132
114
+ #define BUILD_SLICE 133
115
+ #define LOAD_CLOSURE 135
116
+ #define LOAD_DEREF 136
117
+ #define STORE_DEREF 137
118
+ #define DELETE_DEREF 138
119
+ #define CALL_FUNCTION_KW 141
120
+ #define CALL_FUNCTION_EX 142
121
+ #define SETUP_WITH 143
122
+ #define EXTENDED_ARG 144
123
+ #define LIST_APPEND 145
124
+ #define SET_ADD 146
125
+ #define MAP_ADD 147
126
+ #define LOAD_CLASSDEREF 148
127
+ #define MATCH_CLASS 152
128
+ #define SETUP_ASYNC_WITH 154
129
+ #define FORMAT_VALUE 155
130
+ #define BUILD_CONST_KEY_MAP 156
131
+ #define BUILD_STRING 157
132
+ #define LOAD_METHOD 160
133
+ #define CALL_METHOD 161
134
+ #define LIST_EXTEND 162
135
+ #define SET_UPDATE 163
136
+ #define DICT_MERGE 164
137
+ #define DICT_UPDATE 165
138
+ #ifdef NEED_OPCODE_JUMP_TABLES
139
+ static uint32_t _PyOpcode_RelativeJump[8] = {
140
+ 0U,
141
+ 0U,
142
+ 536870912U,
143
+ 67125248U,
144
+ 67141632U,
145
+ 0U,
146
+ 0U,
147
+ 0U,
148
+ };
149
+ static uint32_t _PyOpcode_Jump[8] = {
150
+ 0U,
151
+ 0U,
152
+ 536870912U,
153
+ 101695488U,
154
+ 67141632U,
155
+ 0U,
156
+ 0U,
157
+ 0U,
158
+ };
159
+ #endif /* OPCODE_TABLES */
160
+
161
+ /* EXCEPT_HANDLER is a special, implicit block type which is created when
162
+ entering an except handler. It is not an opcode but we define it here
163
+ as we want it to be available to both frameobject.c and ceval.c, while
164
+ remaining private.*/
165
+ #define EXCEPT_HANDLER 257
166
+
167
+ #define HAS_ARG(op) ((op) >= HAVE_ARGUMENT)
168
+
169
+ #ifdef __cplusplus
170
+ }
171
+ #endif
172
+ #endif /* !Py_OPCODE_H */
llava_next/include/python3.10/patchlevel.h ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ /* Python version identification scheme.
3
+
4
+ When the major or minor version changes, the VERSION variable in
5
+ configure.ac must also be changed.
6
+
7
+ There is also (independent) API version information in modsupport.h.
8
+ */
9
+
10
+ /* Values for PY_RELEASE_LEVEL */
11
+ #define PY_RELEASE_LEVEL_ALPHA 0xA
12
+ #define PY_RELEASE_LEVEL_BETA 0xB
13
+ #define PY_RELEASE_LEVEL_GAMMA 0xC /* For release candidates */
14
+ #define PY_RELEASE_LEVEL_FINAL 0xF /* Serial should be 0 here */
15
+ /* Higher for patch releases */
16
+
17
+ /* Version parsed out into numeric values */
18
+ /*--start constants--*/
19
+ #define PY_MAJOR_VERSION 3
20
+ #define PY_MINOR_VERSION 10
21
+ #define PY_MICRO_VERSION 16
22
+ #define PY_RELEASE_LEVEL PY_RELEASE_LEVEL_FINAL
23
+ #define PY_RELEASE_SERIAL 0
24
+
25
+ /* Version as a string */
26
+ #define PY_VERSION "3.10.16"
27
+ /*--end constants--*/
28
+
29
+ /* Version as a single 4-byte hex number, e.g. 0x010502B2 == 1.5.2b2.
30
+ Use this for numeric comparisons, e.g. #if PY_VERSION_HEX >= ... */
31
+ #define PY_VERSION_HEX ((PY_MAJOR_VERSION << 24) | \
32
+ (PY_MINOR_VERSION << 16) | \
33
+ (PY_MICRO_VERSION << 8) | \
34
+ (PY_RELEASE_LEVEL << 4) | \
35
+ (PY_RELEASE_SERIAL << 0))
llava_next/include/python3.10/pylifecycle.h ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ /* Interfaces to configure, query, create & destroy the Python runtime */
3
+
4
+ #ifndef Py_PYLIFECYCLE_H
5
+ #define Py_PYLIFECYCLE_H
6
+ #ifdef __cplusplus
7
+ extern "C" {
8
+ #endif
9
+
10
+
11
+ /* Initialization and finalization */
12
+ PyAPI_FUNC(void) Py_Initialize(void);
13
+ PyAPI_FUNC(void) Py_InitializeEx(int);
14
+ PyAPI_FUNC(void) Py_Finalize(void);
15
+ #if !defined(Py_LIMITED_API) || Py_LIMITED_API+0 >= 0x03060000
16
+ PyAPI_FUNC(int) Py_FinalizeEx(void);
17
+ #endif
18
+ PyAPI_FUNC(int) Py_IsInitialized(void);
19
+
20
+ /* Subinterpreter support */
21
+ PyAPI_FUNC(PyThreadState *) Py_NewInterpreter(void);
22
+ PyAPI_FUNC(void) Py_EndInterpreter(PyThreadState *);
23
+
24
+
25
+ /* Py_PyAtExit is for the atexit module, Py_AtExit is for low-level
26
+ * exit functions.
27
+ */
28
+ PyAPI_FUNC(int) Py_AtExit(void (*func)(void));
29
+
30
+ PyAPI_FUNC(void) _Py_NO_RETURN Py_Exit(int);
31
+
32
+ /* Bootstrap __main__ (defined in Modules/main.c) */
33
+ PyAPI_FUNC(int) Py_Main(int argc, wchar_t **argv);
34
+ PyAPI_FUNC(int) Py_BytesMain(int argc, char **argv);
35
+
36
+ /* In pathconfig.c */
37
+ PyAPI_FUNC(void) Py_SetProgramName(const wchar_t *);
38
+ PyAPI_FUNC(wchar_t *) Py_GetProgramName(void);
39
+
40
+ PyAPI_FUNC(void) Py_SetPythonHome(const wchar_t *);
41
+ PyAPI_FUNC(wchar_t *) Py_GetPythonHome(void);
42
+
43
+ PyAPI_FUNC(wchar_t *) Py_GetProgramFullPath(void);
44
+
45
+ PyAPI_FUNC(wchar_t *) Py_GetPrefix(void);
46
+ PyAPI_FUNC(wchar_t *) Py_GetExecPrefix(void);
47
+ PyAPI_FUNC(wchar_t *) Py_GetPath(void);
48
+ PyAPI_FUNC(void) Py_SetPath(const wchar_t *);
49
+ #ifdef MS_WINDOWS
50
+ int _Py_CheckPython3(void);
51
+ #endif
52
+
53
+ /* In their own files */
54
+ PyAPI_FUNC(const char *) Py_GetVersion(void);
55
+ PyAPI_FUNC(const char *) Py_GetPlatform(void);
56
+ PyAPI_FUNC(const char *) Py_GetCopyright(void);
57
+ PyAPI_FUNC(const char *) Py_GetCompiler(void);
58
+ PyAPI_FUNC(const char *) Py_GetBuildInfo(void);
59
+
60
+ /* Signals */
61
+ typedef void (*PyOS_sighandler_t)(int);
62
+ PyAPI_FUNC(PyOS_sighandler_t) PyOS_getsig(int);
63
+ PyAPI_FUNC(PyOS_sighandler_t) PyOS_setsig(int, PyOS_sighandler_t);
64
+
65
+ #ifndef Py_LIMITED_API
66
+ # define Py_CPYTHON_PYLIFECYCLE_H
67
+ # include "cpython/pylifecycle.h"
68
+ # undef Py_CPYTHON_PYLIFECYCLE_H
69
+ #endif
70
+
71
+ #ifdef __cplusplus
72
+ }
73
+ #endif
74
+ #endif /* !Py_PYLIFECYCLE_H */
llava_next/include/python3.10/structmember.h ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef Py_STRUCTMEMBER_H
2
+ #define Py_STRUCTMEMBER_H
3
+ #ifdef __cplusplus
4
+ extern "C" {
5
+ #endif
6
+
7
+
8
+ /* Interface to map C struct members to Python object attributes */
9
+
10
+ #include <stddef.h> /* For offsetof */
11
+
12
+ /* An array of PyMemberDef structures defines the name, type and offset
13
+ of selected members of a C structure. These can be read by
14
+ PyMember_GetOne() and set by PyMember_SetOne() (except if their READONLY
15
+ flag is set). The array must be terminated with an entry whose name
16
+ pointer is NULL. */
17
+
18
+ typedef struct PyMemberDef {
19
+ const char *name;
20
+ int type;
21
+ Py_ssize_t offset;
22
+ int flags;
23
+ const char *doc;
24
+ } PyMemberDef;
25
+
26
+ /* Types */
27
+ #define T_SHORT 0
28
+ #define T_INT 1
29
+ #define T_LONG 2
30
+ #define T_FLOAT 3
31
+ #define T_DOUBLE 4
32
+ #define T_STRING 5
33
+ #define T_OBJECT 6
34
+ /* XXX the ordering here is weird for binary compatibility */
35
+ #define T_CHAR 7 /* 1-character string */
36
+ #define T_BYTE 8 /* 8-bit signed int */
37
+ /* unsigned variants: */
38
+ #define T_UBYTE 9
39
+ #define T_USHORT 10
40
+ #define T_UINT 11
41
+ #define T_ULONG 12
42
+
43
+ /* Added by Jack: strings contained in the structure */
44
+ #define T_STRING_INPLACE 13
45
+
46
+ /* Added by Lillo: bools contained in the structure (assumed char) */
47
+ #define T_BOOL 14
48
+
49
+ #define T_OBJECT_EX 16 /* Like T_OBJECT, but raises AttributeError
50
+ when the value is NULL, instead of
51
+ converting to None. */
52
+ #define T_LONGLONG 17
53
+ #define T_ULONGLONG 18
54
+
55
+ #define T_PYSSIZET 19 /* Py_ssize_t */
56
+ #define T_NONE 20 /* Value is always None */
57
+
58
+
59
+ /* Flags */
60
+ #define READONLY 1
61
+ #define READ_RESTRICTED 2
62
+ #define PY_WRITE_RESTRICTED 4
63
+ #define RESTRICTED (READ_RESTRICTED | PY_WRITE_RESTRICTED)
64
+
65
+ #define PY_AUDIT_READ READ_RESTRICTED
66
+
67
+ /* Current API, use this */
68
+ PyAPI_FUNC(PyObject *) PyMember_GetOne(const char *, struct PyMemberDef *);
69
+ PyAPI_FUNC(int) PyMember_SetOne(char *, struct PyMemberDef *, PyObject *);
70
+
71
+
72
+ #ifdef __cplusplus
73
+ }
74
+ #endif
75
+ #endif /* !Py_STRUCTMEMBER_H */
parrot/lib/python3.10/site-packages/torch/include/c10/core/AutogradState.h ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/macros/Export.h>
4
+
5
+ namespace c10 {
6
+
7
+ // Structure used to pack all the thread local boolean
8
+ // flags used by autograd
9
+ struct C10_API AutogradState {
10
+ static AutogradState& get_tls_state();
11
+ static void set_tls_state(AutogradState state);
12
+
13
+ AutogradState(
14
+ bool grad_mode,
15
+ bool inference_mode,
16
+ bool fw_grad_mode,
17
+ bool multithreading_enabled)
18
+ : grad_mode_(grad_mode),
19
+ inference_mode_(inference_mode),
20
+ fw_grad_mode_(fw_grad_mode),
21
+ multithreading_enabled_(multithreading_enabled),
22
+ view_replay_enabled_(false) {}
23
+
24
+ void set_grad_mode(bool enabled) {
25
+ grad_mode_ = enabled;
26
+ }
27
+
28
+ void set_fw_grad_mode(bool enabled) {
29
+ fw_grad_mode_ = enabled;
30
+ }
31
+
32
+ void set_inference_mode(bool enabled) {
33
+ inference_mode_ = enabled;
34
+ }
35
+
36
+ void set_multithreading_enabled(bool multithreading_enabled) {
37
+ multithreading_enabled_ = multithreading_enabled;
38
+ }
39
+
40
+ void set_view_replay_enabled(bool view_replay_enabled) {
41
+ view_replay_enabled_ = view_replay_enabled;
42
+ }
43
+
44
+ bool get_grad_mode() const {
45
+ return grad_mode_;
46
+ }
47
+
48
+ bool get_fw_grad_mode() const {
49
+ return fw_grad_mode_;
50
+ }
51
+
52
+ bool get_inference_mode() const {
53
+ return inference_mode_;
54
+ }
55
+
56
+ bool get_multithreading_enabled() const {
57
+ return multithreading_enabled_;
58
+ }
59
+
60
+ bool get_view_replay_enabled() const {
61
+ return view_replay_enabled_;
62
+ }
63
+
64
+ private:
65
+ bool grad_mode_ : 1;
66
+ bool inference_mode_ : 1;
67
+ bool fw_grad_mode_ : 1;
68
+ bool multithreading_enabled_ : 1;
69
+ bool view_replay_enabled_ : 1;
70
+ };
71
+
72
+ } // namespace c10
parrot/lib/python3.10/site-packages/torch/include/c10/core/Backend.h ADDED
@@ -0,0 +1,387 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/DeviceType.h>
4
+ #include <c10/core/DispatchKey.h>
5
+ #include <c10/core/DispatchKeySet.h>
6
+ #include <c10/util/Exception.h>
7
+
8
+ #include <stdexcept>
9
+
10
+ namespace c10 {
11
+
12
+ /**
13
+ * This legacy enum class defines the set of backends supported by old school,
14
+ * code generated Type-based ATen. A "backend" in this sense roughly
15
+ * corresponds to the cartesian product of (device type, layout), but restricted
16
+ * only to combinations which we actually have kernels for. Backend does NOT
17
+ * include dtype.
18
+ *
19
+ * The reason we are sunsetting this enum class is because it doesn't allow for
20
+ * open registration; e.g., if you want to add SparseXLA, you'd have to
21
+ * edit this enum; you wouldn't be able to do it out of tree. DispatchKey is
22
+ * the replacement for Backend which supports open registration.
23
+ *
24
+ * NB: The concept of 'Backend' here disagrees with the notion of backend
25
+ * exposed to users in torch.backends. Backend here is something like "CPU"
26
+ * or "SparseCUDA"; backend in torch.backends is something like "MKL" or
27
+ * "CUDNN".
28
+ */
29
+ enum class Backend {
30
+ CPU,
31
+ CUDA,
32
+ HIP,
33
+ VE,
34
+ FPGA,
35
+ IPU,
36
+ XPU,
37
+ SparseCPU,
38
+ SparseCUDA,
39
+ SparseCsrCPU,
40
+ SparseCsrCUDA,
41
+ SparseHIP,
42
+ SparseVE,
43
+ SparseXPU,
44
+ SparsePrivateUse1,
45
+ SparseCsrHIP,
46
+ SparseCsrVE,
47
+ SparseCsrXPU,
48
+ SparseCsrPrivateUse1,
49
+ MAIA,
50
+ XLA,
51
+ Vulkan,
52
+ Metal,
53
+ Meta,
54
+ QuantizedCPU,
55
+ QuantizedCUDA,
56
+ QuantizedXPU,
57
+ QuantizedPrivateUse1,
58
+ Undefined,
59
+ MkldnnCPU,
60
+ MPS,
61
+ HPU,
62
+ Lazy,
63
+ MTIA,
64
+ PrivateUse1,
65
+ NumOptions
66
+ };
67
+
68
+ inline Backend dispatchKeyToBackend(DispatchKey t) {
69
+ if (t == DispatchKey::CPU || t == DispatchKey::AutogradCPU) {
70
+ return Backend::CPU;
71
+ } else if (t == DispatchKey::CUDA || t == DispatchKey::AutogradCUDA) {
72
+ return Backend::CUDA;
73
+ } else if (t == DispatchKey::HIP) {
74
+ return Backend::HIP;
75
+ } else if (t == DispatchKey::VE) {
76
+ return Backend::VE;
77
+ } else if (t == DispatchKey::FPGA) {
78
+ return Backend::FPGA;
79
+ } else if (t == DispatchKey::MAIA) {
80
+ return Backend::MAIA;
81
+ } else if (t == DispatchKey::XLA || t == DispatchKey::AutogradXLA) {
82
+ return Backend::XLA;
83
+ } else if (t == DispatchKey::Lazy || t == DispatchKey::AutogradLazy) {
84
+ return Backend::Lazy;
85
+ } else if (t == DispatchKey::MPS || t == DispatchKey::AutogradMPS) {
86
+ return Backend::MPS;
87
+ } else if (t == DispatchKey::Vulkan) {
88
+ return Backend::Vulkan;
89
+ } else if (t == DispatchKey::Metal) {
90
+ return Backend::Metal;
91
+ } else if (t == DispatchKey::Meta) {
92
+ return Backend::Meta;
93
+ } else if (t == DispatchKey::SparseCPU) {
94
+ return Backend::SparseCPU;
95
+ } else if (t == DispatchKey::SparseCUDA) {
96
+ return Backend::SparseCUDA;
97
+ } else if (t == DispatchKey::SparseHIP) {
98
+ return Backend::SparseHIP;
99
+ } else if (t == DispatchKey::SparseVE) {
100
+ return Backend::SparseVE;
101
+ } else if (t == DispatchKey::SparsePrivateUse1) {
102
+ return Backend::SparsePrivateUse1;
103
+ } else if (t == DispatchKey::SparseCsrCPU) {
104
+ return Backend::SparseCsrCPU;
105
+ } else if (t == DispatchKey::SparseCsrCUDA) {
106
+ return Backend::SparseCsrCUDA;
107
+ } else if (t == DispatchKey::SparseCsrHIP) {
108
+ return Backend::SparseCsrHIP;
109
+ } else if (t == DispatchKey::SparseCsrVE) {
110
+ return Backend::SparseCsrVE;
111
+ } else if (t == DispatchKey::SparseCsrPrivateUse1) {
112
+ return Backend::SparseCsrPrivateUse1;
113
+ } else if (t == DispatchKey::MkldnnCPU) {
114
+ return Backend::MkldnnCPU;
115
+ } else if (t == DispatchKey::QuantizedCPU) {
116
+ return Backend::QuantizedCPU;
117
+ } else if (t == DispatchKey::QuantizedCUDA) {
118
+ return Backend::QuantizedCUDA;
119
+ } else if (t == DispatchKey::IPU || t == DispatchKey::AutogradIPU) {
120
+ return Backend::IPU;
121
+ } else if (t == DispatchKey::XPU || t == DispatchKey::AutogradXPU) {
122
+ return Backend::XPU;
123
+ } else if (t == DispatchKey::SparseXPU) {
124
+ return Backend::SparseXPU;
125
+ } else if (t == DispatchKey::SparseCsrXPU) {
126
+ return Backend::SparseCsrXPU;
127
+ } else if (t == DispatchKey::QuantizedXPU) {
128
+ return Backend::QuantizedXPU;
129
+ } else if (t == DispatchKey::QuantizedPrivateUse1) {
130
+ return Backend::QuantizedPrivateUse1;
131
+ } else if (t == DispatchKey::HPU || t == DispatchKey::AutogradHPU) {
132
+ return Backend::HPU;
133
+ } else if (t == DispatchKey::MTIA || t == DispatchKey::AutogradMTIA) {
134
+ return Backend::MTIA;
135
+ } else if (
136
+ t == DispatchKey::PrivateUse1 || t == DispatchKey::AutogradPrivateUse1) {
137
+ return Backend::PrivateUse1;
138
+ } else if (t == DispatchKey::Undefined) {
139
+ return Backend::Undefined;
140
+ } else {
141
+ TORCH_CHECK(false, "Unrecognized tensor type ID: ", t);
142
+ }
143
+ }
144
+
145
+ inline DispatchKey backendToDispatchKey(Backend b) {
146
+ switch (b) {
147
+ case Backend::CPU:
148
+ return DispatchKey::CPU;
149
+ case Backend::CUDA:
150
+ return DispatchKey::CUDA;
151
+ case Backend::HIP:
152
+ return DispatchKey::HIP;
153
+ case Backend::VE:
154
+ return DispatchKey::VE;
155
+ case Backend::FPGA:
156
+ return DispatchKey::FPGA;
157
+ case Backend::MAIA:
158
+ return DispatchKey::MAIA;
159
+ case Backend::XLA:
160
+ return DispatchKey::XLA;
161
+ case Backend::Lazy:
162
+ return DispatchKey::Lazy;
163
+ case Backend::IPU:
164
+ return DispatchKey::IPU;
165
+ case Backend::XPU:
166
+ return DispatchKey::XPU;
167
+ case Backend::SparseXPU:
168
+ return DispatchKey::SparseXPU;
169
+ case Backend::SparseCsrXPU:
170
+ return DispatchKey::SparseCsrXPU;
171
+ case Backend::SparseCPU:
172
+ return DispatchKey::SparseCPU;
173
+ case Backend::SparseCUDA:
174
+ return DispatchKey::SparseCUDA;
175
+ case Backend::SparseHIP:
176
+ return DispatchKey::SparseHIP;
177
+ case Backend::SparseVE:
178
+ return DispatchKey::SparseVE;
179
+ case Backend::SparsePrivateUse1:
180
+ return DispatchKey::SparsePrivateUse1;
181
+ case Backend::SparseCsrCPU:
182
+ return DispatchKey::SparseCsrCPU;
183
+ case Backend::SparseCsrCUDA:
184
+ return DispatchKey::SparseCsrCUDA;
185
+ case Backend::SparseCsrHIP:
186
+ return DispatchKey::SparseCsrHIP;
187
+ case Backend::SparseCsrVE:
188
+ return DispatchKey::SparseCsrVE;
189
+ case Backend::SparseCsrPrivateUse1:
190
+ return DispatchKey::SparseCsrPrivateUse1;
191
+ case Backend::MkldnnCPU:
192
+ return DispatchKey::MkldnnCPU;
193
+ case Backend::Vulkan:
194
+ return DispatchKey::Vulkan;
195
+ case Backend::Metal:
196
+ return DispatchKey::Metal;
197
+ case Backend::Meta:
198
+ return DispatchKey::Meta;
199
+ case Backend::QuantizedCPU:
200
+ return DispatchKey::QuantizedCPU;
201
+ case Backend::QuantizedCUDA:
202
+ return DispatchKey::QuantizedCUDA;
203
+ case Backend::QuantizedPrivateUse1:
204
+ return DispatchKey::QuantizedPrivateUse1;
205
+ case Backend::Undefined:
206
+ return DispatchKey::Undefined;
207
+ case Backend::MPS:
208
+ return DispatchKey::MPS;
209
+ case Backend::HPU:
210
+ return DispatchKey::HPU;
211
+ case Backend::MTIA:
212
+ return DispatchKey::MTIA;
213
+ case Backend::PrivateUse1:
214
+ return DispatchKey::PrivateUse1;
215
+ default:
216
+ throw std::runtime_error("Unknown backend");
217
+ }
218
+ }
219
+
220
+ inline DeviceType backendToDeviceType(Backend b) {
221
+ switch (b) {
222
+ case Backend::CPU:
223
+ case Backend::MkldnnCPU:
224
+ case Backend::SparseCPU:
225
+ case Backend::SparseCsrCPU:
226
+ case Backend::QuantizedCPU:
227
+ return DeviceType::CPU;
228
+ case Backend::CUDA:
229
+ case Backend::SparseCUDA:
230
+ case Backend::QuantizedCUDA:
231
+ case Backend::SparseCsrCUDA:
232
+ return DeviceType::CUDA;
233
+ case Backend::HIP:
234
+ return DeviceType::HIP;
235
+ case Backend::VE:
236
+ return DeviceType::VE;
237
+ case Backend::FPGA:
238
+ return DeviceType::FPGA;
239
+ case Backend::MAIA:
240
+ return DeviceType::MAIA;
241
+ case Backend::XLA:
242
+ return DeviceType::XLA;
243
+ case Backend::Lazy:
244
+ return DeviceType::Lazy;
245
+ case Backend::SparseHIP:
246
+ return DeviceType::HIP;
247
+ case Backend::SparseVE:
248
+ return DeviceType::VE;
249
+ case Backend::SparseCsrHIP:
250
+ return DeviceType::HIP;
251
+ case Backend::SparseCsrVE:
252
+ return DeviceType::VE;
253
+ case Backend::IPU:
254
+ return DeviceType::IPU;
255
+ case Backend::XPU:
256
+ case Backend::SparseXPU:
257
+ case Backend::SparseCsrXPU:
258
+ case Backend::QuantizedXPU:
259
+ return DeviceType::XPU;
260
+ case Backend::Vulkan:
261
+ return DeviceType::Vulkan;
262
+ case Backend::Metal:
263
+ return DeviceType::Metal;
264
+ case Backend::Meta:
265
+ return DeviceType::Meta;
266
+ case Backend::MPS:
267
+ return DeviceType::MPS;
268
+ case Backend::HPU:
269
+ return DeviceType::HPU;
270
+ case Backend::MTIA:
271
+ return DeviceType::MTIA;
272
+ case Backend::PrivateUse1:
273
+ case Backend::SparsePrivateUse1:
274
+ case Backend::SparseCsrPrivateUse1:
275
+ case Backend::QuantizedPrivateUse1:
276
+ return DeviceType::PrivateUse1;
277
+ case Backend::Undefined:
278
+ TORCH_CHECK(false, "Undefined backend is not a valid device type");
279
+ default:
280
+ TORCH_CHECK(false, "Unknown backend");
281
+ }
282
+ }
283
+
284
+ inline const char* toString(Backend b) {
285
+ switch (b) {
286
+ case Backend::CPU:
287
+ return "CPU";
288
+ case Backend::CUDA:
289
+ return "CUDA";
290
+ case Backend::HIP:
291
+ return "HIP";
292
+ case Backend::VE:
293
+ return "VE";
294
+ case Backend::FPGA:
295
+ return "FPGA";
296
+ case Backend::XPU:
297
+ return "XPU";
298
+ case Backend::IPU:
299
+ return "IPU";
300
+ case Backend::MAIA:
301
+ return "MAIA";
302
+ case Backend::XLA:
303
+ return "XLA";
304
+ case Backend::Lazy:
305
+ return "Lazy";
306
+ case Backend::MPS:
307
+ return "MPS";
308
+ case Backend::SparseCPU:
309
+ return "SparseCPU";
310
+ case Backend::SparseCUDA:
311
+ return "SparseCUDA";
312
+ case Backend::SparseHIP:
313
+ return "SparseHIP";
314
+ case Backend::SparseVE:
315
+ return "SparseVE";
316
+ case Backend::SparseXPU:
317
+ return "SparseXPU";
318
+ case Backend::SparsePrivateUse1:
319
+ return "SparsePrivateUse1";
320
+ case Backend::SparseCsrCPU:
321
+ return "SparseCsrCPU";
322
+ case Backend::SparseCsrCUDA:
323
+ return "SparseCsrCUDA";
324
+ case Backend::SparseCsrHIP:
325
+ return "SparseCsrHIP";
326
+ case Backend::SparseCsrVE:
327
+ return "SparseCsrVE";
328
+ case Backend::SparseCsrXPU:
329
+ return "SparseCsrXPU";
330
+ case Backend::SparseCsrPrivateUse1:
331
+ return "SparseCsrPrivateUse1";
332
+ case Backend::MkldnnCPU:
333
+ return "MkldnnCPU";
334
+ case Backend::Vulkan:
335
+ return "Vulkan";
336
+ case Backend::Metal:
337
+ return "Metal";
338
+ case Backend::Meta:
339
+ return "Meta";
340
+ case Backend::QuantizedCPU:
341
+ return "QuantizedCPU";
342
+ case Backend::QuantizedCUDA:
343
+ return "QuantizedCUDA";
344
+ case Backend::QuantizedXPU:
345
+ return "QuantizedXPU";
346
+ case Backend::QuantizedPrivateUse1:
347
+ return "QuantizedPrivateUse1";
348
+ case Backend::HPU:
349
+ return "HPU";
350
+ case Backend::MTIA:
351
+ return "MTIA";
352
+ case Backend::PrivateUse1:
353
+ return "PrivateUseOne";
354
+ default:
355
+ return "UNKNOWN_BACKEND";
356
+ }
357
+ }
358
+
359
+ inline bool isSparse(Backend b) {
360
+ switch (b) {
361
+ case Backend::SparseXPU:
362
+ case Backend::SparseCPU:
363
+ case Backend::SparseCUDA:
364
+ case Backend::SparseHIP:
365
+ case Backend::SparseVE:
366
+ case Backend::SparsePrivateUse1:
367
+ return true;
368
+ default:
369
+ return false;
370
+ }
371
+ }
372
+
373
+ inline bool isSparseCsr(Backend b) {
374
+ switch (b) {
375
+ case Backend::SparseCsrXPU:
376
+ case Backend::SparseCsrCPU:
377
+ case Backend::SparseCsrCUDA:
378
+ case Backend::SparseCsrHIP:
379
+ case Backend::SparseCsrVE:
380
+ case Backend::SparseCsrPrivateUse1:
381
+ return true;
382
+ default:
383
+ return false;
384
+ }
385
+ }
386
+
387
+ } // namespace c10
parrot/lib/python3.10/site-packages/torch/include/c10/core/CPUAllocator.h ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <cstdint>
4
+ #include <cstring>
5
+ #include <mutex>
6
+ #include <unordered_map>
7
+
8
+ #include <c10/core/Allocator.h>
9
+ #include <c10/macros/Export.h>
10
+ #include <c10/util/Flags.h>
11
+
12
+ // TODO: rename to c10
13
+ C10_DECLARE_bool(caffe2_report_cpu_memory_usage);
14
+
15
+ namespace c10 {
16
+
17
+ using MemoryDeleter = void (*)(void*);
18
+
19
+ // A helper function that is basically doing nothing.
20
+ C10_API void NoDelete(void*);
21
+
22
+ // A simple struct that is used to report C10's memory allocation,
23
+ // deallocation status and out-of-memory events to the profiler
24
+ class C10_API ProfiledCPUMemoryReporter {
25
+ public:
26
+ ProfiledCPUMemoryReporter() = default;
27
+ void New(void* ptr, size_t nbytes);
28
+ void OutOfMemory(size_t nbytes);
29
+ void Delete(void* ptr);
30
+
31
+ private:
32
+ std::mutex mutex_;
33
+ std::unordered_map<void*, size_t> size_table_;
34
+ size_t allocated_ = 0;
35
+ size_t log_cnt_ = 0;
36
+ };
37
+
38
+ C10_API ProfiledCPUMemoryReporter& profiledCPUMemoryReporter();
39
+
40
+ // Get the CPU Allocator.
41
+ C10_API at::Allocator* GetCPUAllocator();
42
+ // Sets the CPU allocator to the given allocator: the caller gives away the
43
+ // ownership of the pointer.
44
+ C10_API void SetCPUAllocator(at::Allocator* alloc, uint8_t priority = 0);
45
+
46
+ // Get the Default CPU Allocator
47
+ C10_API at::Allocator* GetDefaultCPUAllocator();
48
+
49
+ // Get the Default Mobile CPU Allocator
50
+ C10_API at::Allocator* GetDefaultMobileCPUAllocator();
51
+
52
+ // The CPUCachingAllocator is experimental and might disappear in the future.
53
+ // The only place that uses it is in StaticRuntime.
54
+ // Set the CPU Caching Allocator
55
+ C10_API void SetCPUCachingAllocator(Allocator* alloc, uint8_t priority = 0);
56
+ // Get the CPU Caching Allocator
57
+ C10_API Allocator* GetCPUCachingAllocator();
58
+
59
+ } // namespace c10
parrot/lib/python3.10/site-packages/torch/include/c10/core/CompileTimeFunctionPointer.h ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/util/TypeTraits.h>
4
+ #include <type_traits>
5
+
6
+ namespace c10 {
7
+
8
+ /**
9
+ * Represent a function pointer as a C++ type.
10
+ * This allows using the function pointer as a type
11
+ * in a template and calling it from inside the template
12
+ * allows the compiler to inline the call because it
13
+ * knows the function pointer at compile time.
14
+ *
15
+ * Example 1:
16
+ * int add(int a, int b) {return a + b;}
17
+ * using Add = TORCH_FN_TYPE(add);
18
+ * template<class Func> struct Executor {
19
+ * int execute(int a, int b) {
20
+ * return Func::func_ptr()(a, b);
21
+ * }
22
+ * };
23
+ * Executor<Add> executor;
24
+ * EXPECT_EQ(3, executor.execute(1, 2));
25
+ *
26
+ * Example 2:
27
+ * int add(int a, int b) {return a + b;}
28
+ * template<class Func> int execute(Func, int a, int b) {
29
+ * return Func::func_ptr()(a, b);
30
+ * }
31
+ * EXPECT_EQ(3, execute(TORCH_FN(add), 1, 2));
32
+ */
33
+ template <class FuncType_, FuncType_* func_ptr_>
34
+ struct CompileTimeFunctionPointer final {
35
+ static_assert(
36
+ guts::is_function_type<FuncType_>::value,
37
+ "TORCH_FN can only wrap function types.");
38
+ using FuncType = FuncType_;
39
+
40
+ static constexpr FuncType* func_ptr() {
41
+ return func_ptr_;
42
+ }
43
+ };
44
+
45
+ template <class T>
46
+ struct is_compile_time_function_pointer : std::false_type {};
47
+ template <class FuncType, FuncType* func_ptr>
48
+ struct is_compile_time_function_pointer<
49
+ CompileTimeFunctionPointer<FuncType, func_ptr>> : std::true_type {};
50
+
51
+ } // namespace c10
52
+
53
+ #define TORCH_FN_TYPE(func) \
54
+ ::c10::CompileTimeFunctionPointer< \
55
+ std::remove_pointer_t<std::remove_reference_t<decltype(func)>>, \
56
+ func>
57
+ #define TORCH_FN(func) TORCH_FN_TYPE(func)()
parrot/lib/python3.10/site-packages/torch/include/c10/core/Contiguity.h ADDED
@@ -0,0 +1,129 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <c10/core/SymBool.h>
3
+ #include <c10/core/SymInt.h>
4
+ #include <c10/util/ArrayRef.h>
5
+ #include <c10/util/SmallVector.h>
6
+ #include <c10/util/irange.h>
7
+
8
+ #include <algorithm>
9
+ #include <cstdint>
10
+
11
+ namespace c10 {
12
+
13
+ template <typename T>
14
+ bool _compute_contiguous(ArrayRef<T> sizes, ArrayRef<T> strides, T numel) {
15
+ bool is_contiguous = true;
16
+ if (TORCH_GUARD_SIZE_OBLIVIOUS(sym_eq(numel, 0))) {
17
+ return is_contiguous;
18
+ }
19
+ T z = 1;
20
+ // NB: make sure we do signed arithmetic
21
+ for (int64_t d = int64_t(sizes.size()) - 1; d >= 0; d--) {
22
+ const auto& size_d = sizes[d];
23
+ if (TORCH_GUARD_SIZE_OBLIVIOUS(sym_ne(size_d, 1))) {
24
+ if (TORCH_GUARD_SIZE_OBLIVIOUS(sym_eq(strides[d], z))) {
25
+ z *= size_d;
26
+ } else {
27
+ is_contiguous = false;
28
+ break;
29
+ }
30
+ }
31
+ }
32
+ return is_contiguous;
33
+ }
34
+
35
+ template <typename T>
36
+ bool _compute_channels_last_contiguous_2d(
37
+ ArrayRef<T> sizes,
38
+ ArrayRef<T> strides) {
39
+ // Please don't combine these code, constant array is used here to let
40
+ // compiler fully unroll the loop to get better performance
41
+ switch (sizes.size()) {
42
+ case 4: {
43
+ T expected = 1;
44
+ for (auto& d : {1, 3, 2, 0}) {
45
+ const auto& size_d = sizes[d];
46
+ if (TORCH_GUARD_SIZE_OBLIVIOUS(sym_ne(size_d, 1))) {
47
+ if (TORCH_GUARD_SIZE_OBLIVIOUS(sym_ne(strides[d], expected))) {
48
+ return false;
49
+ }
50
+ expected *= size_d;
51
+ }
52
+ }
53
+ return true;
54
+ }
55
+ // NOLINTNEXTLINE(bugprone-branch-clone)
56
+ case 3:
57
+ // TODO dim == 3 case will be enabled once it is fully tested
58
+ return false;
59
+ default:
60
+ return false;
61
+ }
62
+ }
63
+
64
+ template <typename T>
65
+ bool _compute_channels_last_contiguous_3d(
66
+ ArrayRef<T> sizes,
67
+ ArrayRef<T> strides) {
68
+ // Please don't combine these code, constant array is used here to let
69
+ // compiler fully unroll the loop to get better performance
70
+ switch (sizes.size()) {
71
+ case 5: {
72
+ T expected = 1;
73
+ for (auto& d : {1, 4, 3, 2, 0}) {
74
+ const auto& size_d = sizes[d];
75
+ if (TORCH_GUARD_SIZE_OBLIVIOUS(sym_ne(size_d, 1))) {
76
+ if (TORCH_GUARD_SIZE_OBLIVIOUS(sym_ne(strides[d], expected))) {
77
+ return false;
78
+ }
79
+ expected *= size_d;
80
+ }
81
+ }
82
+ return true;
83
+ }
84
+ // NOLINTNEXTLINE(bugprone-branch-clone)
85
+ case 4:
86
+ // TODO dim == 4 case will be enabled once it is fully tested
87
+ return false;
88
+ default:
89
+ return false;
90
+ }
91
+ }
92
+
93
+ template <typename T>
94
+ bool _compute_non_overlapping_and_dense(
95
+ ArrayRef<T> sizes,
96
+ ArrayRef<T> strides) {
97
+ auto dim = sizes.size();
98
+ if (dim == 1) {
99
+ return sizes[0] < 2 || strides[0] == 1;
100
+ }
101
+ SmallVector<int64_t, 5> perm;
102
+ perm.resize(dim);
103
+ for (const auto i : c10::irange(dim)) {
104
+ perm[i] = i;
105
+ }
106
+ // Sort by strides, leaving 0 and 1 sized dims at the end of the array
107
+ std::sort(perm.begin(), perm.end(), [&](int64_t a, int64_t b) {
108
+ if (sizes[a] < 2) {
109
+ return false;
110
+ } else if (sizes[b] < 2) {
111
+ return true;
112
+ }
113
+ return strides[a] < strides[b];
114
+ });
115
+ T require_stride = 1;
116
+ for (const auto i : c10::irange(dim)) {
117
+ const auto& size_perm_i = sizes[perm[i]];
118
+ if (size_perm_i < 2) {
119
+ return true;
120
+ }
121
+ if (strides[perm[i]] != require_stride) {
122
+ return false;
123
+ }
124
+ require_stride *= size_perm_i;
125
+ }
126
+ return true;
127
+ }
128
+
129
+ } // namespace c10
parrot/lib/python3.10/site-packages/torch/include/c10/core/CopyBytes.h ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/Device.h>
4
+ #include <c10/core/DeviceType.h>
5
+ #include <c10/macros/Export.h>
6
+ #include <c10/macros/Macros.h>
7
+ #include <cstddef>
8
+
9
+ namespace c10 {
10
+
11
+ using CopyBytesFunction = void (*)(
12
+ size_t nbytes,
13
+ const void* src,
14
+ Device src_device,
15
+ void* dst,
16
+ Device dst_device);
17
+
18
+ struct C10_API _CopyBytesFunctionRegisterer {
19
+ _CopyBytesFunctionRegisterer(
20
+ DeviceType from,
21
+ DeviceType to,
22
+ CopyBytesFunction func_sync,
23
+ CopyBytesFunction func_async = nullptr);
24
+ };
25
+
26
+ #define REGISTER_COPY_BYTES_FUNCTION(from, to, ...) \
27
+ namespace { \
28
+ static _CopyBytesFunctionRegisterer C10_ANONYMOUS_VARIABLE( \
29
+ g_copy_function)(from, to, __VA_ARGS__); \
30
+ }
31
+
32
+ /*
33
+ * WARNING: Implementations for this function are currently registered from
34
+ * ATen and caffe2, not yet from c10. Don't use this if not either ATen
35
+ * or caffe2 is present as well.
36
+ * We can't move them yet, because the CUDA implementations aren't unified yet
37
+ * between ATen and caffe2.
38
+ * We're planning to move the implementations into c10/backend/xxx
39
+ * to make c10 self contained again.
40
+ */
41
+ C10_API void CopyBytes(
42
+ size_t nbytes,
43
+ const void* src,
44
+ Device src_device,
45
+ void* dst,
46
+ Device dst_device,
47
+ bool async);
48
+ } // namespace c10
parrot/lib/python3.10/site-packages/torch/include/c10/core/DefaultDtype.h ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/ScalarType.h>
4
+ #include <c10/macros/Export.h>
5
+
6
+ namespace caffe2 {
7
+ class TypeMeta;
8
+ } // namespace caffe2
9
+
10
+ namespace c10 {
11
+ C10_API void set_default_dtype(caffe2::TypeMeta dtype);
12
+ C10_API const caffe2::TypeMeta get_default_dtype();
13
+ C10_API ScalarType get_default_dtype_as_scalartype();
14
+ C10_API const caffe2::TypeMeta get_default_complex_dtype();
15
+ } // namespace c10
parrot/lib/python3.10/site-packages/torch/include/c10/core/DefaultTensorOptions.h ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/Device.h>
4
+ #include <c10/core/DeviceType.h>
5
+ #include <c10/core/Layout.h>
6
+ #include <c10/core/ScalarType.h>
7
+ #include <c10/util/typeid.h>
8
+
9
+ namespace c10 {
10
+
11
+ struct TensorOptions;
12
+
13
+ /// Like TensorOptions, but all fields are guaranteed to be filled.
14
+ struct DefaultTensorOptions {
15
+ DefaultTensorOptions() = default;
16
+
17
+ caffe2::TypeMeta dtype() const noexcept {
18
+ return dtype_;
19
+ }
20
+ Device device() const noexcept {
21
+ return device_;
22
+ }
23
+ Layout layout() const noexcept {
24
+ return layout_;
25
+ }
26
+ bool requires_grad() const noexcept {
27
+ return requires_grad_;
28
+ }
29
+
30
+ // Defined in TensorOptions.h
31
+ inline DefaultTensorOptions& merge(const TensorOptions& options);
32
+
33
+ private:
34
+ caffe2::TypeMeta dtype_ = caffe2::TypeMeta::Make<float>(); // 64-bit
35
+ Device device_ = at::kCPU; // 32-bit
36
+ Layout layout_ = at::kStrided; // 8-bit
37
+ bool requires_grad_ = false; // 8-bit
38
+ };
39
+
40
+ inline const DefaultTensorOptions& getDefaultTensorOptions() {
41
+ static const auto options = DefaultTensorOptions();
42
+ return options;
43
+ }
44
+
45
+ } // namespace c10
parrot/lib/python3.10/site-packages/torch/include/c10/core/Device.h ADDED
@@ -0,0 +1,216 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/DeviceType.h>
4
+ #include <c10/macros/Export.h>
5
+ #include <c10/util/Exception.h>
6
+
7
+ #include <cstddef>
8
+ #include <cstdint>
9
+ #include <functional>
10
+ #include <iosfwd>
11
+ #include <string>
12
+
13
+ namespace c10 {
14
+
15
+ /// An index representing a specific device; e.g., the 1 in GPU 1.
16
+ /// A DeviceIndex is not independently meaningful without knowing
17
+ /// the DeviceType it is associated; try to use Device rather than
18
+ /// DeviceIndex directly.
19
+ using DeviceIndex = int8_t;
20
+
21
+ /// Represents a compute device on which a tensor is located. A device is
22
+ /// uniquely identified by a type, which specifies the type of machine it is
23
+ /// (e.g. CPU or CUDA GPU), and a device index or ordinal, which identifies the
24
+ /// specific compute device when there is more than one of a certain type. The
25
+ /// device index is optional, and in its defaulted state represents (abstractly)
26
+ /// "the current device". Further, there are two constraints on the value of the
27
+ /// device index, if one is explicitly stored:
28
+ /// 1. A negative index represents the current device, a non-negative index
29
+ /// represents a specific, concrete device,
30
+ /// 2. When the device type is CPU, the device index must be zero.
31
+ struct C10_API Device final {
32
+ using Type = DeviceType;
33
+
34
+ /// Constructs a new `Device` from a `DeviceType` and an optional device
35
+ /// index.
36
+ /* implicit */ Device(DeviceType type, DeviceIndex index = -1)
37
+ : type_(type), index_(index) {
38
+ validate();
39
+ }
40
+
41
+ /// Constructs a `Device` from a string description, for convenience.
42
+ /// The string supplied must follow the following schema:
43
+ /// `(cpu|cuda)[:<device-index>]`
44
+ /// where `cpu` or `cuda` specifies the device type, and
45
+ /// `:<device-index>` optionally specifies a device index.
46
+ /* implicit */ Device(const std::string& device_string);
47
+
48
+ /// Returns true if the type and index of this `Device` matches that of
49
+ /// `other`.
50
+ bool operator==(const Device& other) const noexcept {
51
+ return this->type_ == other.type_ && this->index_ == other.index_;
52
+ }
53
+
54
+ /// Returns true if the type or index of this `Device` differs from that of
55
+ /// `other`.
56
+ bool operator!=(const Device& other) const noexcept {
57
+ return !(*this == other);
58
+ }
59
+
60
+ /// Sets the device index.
61
+ void set_index(DeviceIndex index) {
62
+ index_ = index;
63
+ }
64
+
65
+ /// Returns the type of device this is.
66
+ DeviceType type() const noexcept {
67
+ return type_;
68
+ }
69
+
70
+ /// Returns the optional index.
71
+ DeviceIndex index() const noexcept {
72
+ return index_;
73
+ }
74
+
75
+ /// Returns true if the device has a non-default index.
76
+ bool has_index() const noexcept {
77
+ return index_ != -1;
78
+ }
79
+
80
+ /// Return true if the device is of CUDA type.
81
+ bool is_cuda() const noexcept {
82
+ return type_ == DeviceType::CUDA;
83
+ }
84
+
85
+ /// Return true if the device is of PrivateUse1 type.
86
+ bool is_privateuseone() const noexcept {
87
+ return type_ == DeviceType::PrivateUse1;
88
+ }
89
+
90
+ /// Return true if the device is of MPS type.
91
+ bool is_mps() const noexcept {
92
+ return type_ == DeviceType::MPS;
93
+ }
94
+
95
+ /// Return true if the device is of HIP type.
96
+ bool is_hip() const noexcept {
97
+ return type_ == DeviceType::HIP;
98
+ }
99
+
100
+ /// Return true if the device is of VE type.
101
+ bool is_ve() const noexcept {
102
+ return type_ == DeviceType::VE;
103
+ }
104
+
105
+ /// Return true if the device is of XPU type.
106
+ bool is_xpu() const noexcept {
107
+ return type_ == DeviceType::XPU;
108
+ }
109
+
110
+ /// Return true if the device is of IPU type.
111
+ bool is_ipu() const noexcept {
112
+ return type_ == DeviceType::IPU;
113
+ }
114
+
115
+ /// Return true if the device is of XLA type.
116
+ bool is_xla() const noexcept {
117
+ return type_ == DeviceType::XLA;
118
+ }
119
+
120
+ /// Return true if the device is of MTIA type.
121
+ bool is_mtia() const noexcept {
122
+ return type_ == DeviceType::MTIA;
123
+ }
124
+
125
+ /// Return true if the device is of HPU type.
126
+ bool is_hpu() const noexcept {
127
+ return type_ == DeviceType::HPU;
128
+ }
129
+
130
+ /// Return true if the device is of Lazy type.
131
+ bool is_lazy() const noexcept {
132
+ return type_ == DeviceType::Lazy;
133
+ }
134
+
135
+ /// Return true if the device is of Vulkan type.
136
+ bool is_vulkan() const noexcept {
137
+ return type_ == DeviceType::Vulkan;
138
+ }
139
+
140
+ /// Return true if the device is of Metal type.
141
+ bool is_metal() const noexcept {
142
+ return type_ == DeviceType::Metal;
143
+ }
144
+
145
+ /// Return true if the device is of MAIA type.
146
+ bool is_maia() const noexcept {
147
+ return type_ == DeviceType::MAIA;
148
+ }
149
+
150
+ /// Return true if the device is of META type.
151
+ bool is_meta() const noexcept {
152
+ return type_ == DeviceType::Meta;
153
+ }
154
+
155
+ /// Return true if the device is of CPU type.
156
+ bool is_cpu() const noexcept {
157
+ return type_ == DeviceType::CPU;
158
+ }
159
+
160
+ /// Return true if the device supports arbitrary strides.
161
+ bool supports_as_strided() const noexcept {
162
+ return type_ != DeviceType::IPU && type_ != DeviceType::XLA &&
163
+ type_ != DeviceType::Lazy && type_ != DeviceType::MTIA;
164
+ }
165
+
166
+ /// Same string as returned from operator<<.
167
+ std::string str() const;
168
+
169
+ private:
170
+ DeviceType type_;
171
+ DeviceIndex index_ = -1;
172
+ void validate() {
173
+ // Removing these checks in release builds noticeably improves
174
+ // performance in micro-benchmarks.
175
+ // This is safe to do, because backends that use the DeviceIndex
176
+ // have a later check when we actually try to switch to that device.
177
+ TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
178
+ index_ >= -1,
179
+ "Device index must be -1 or non-negative, got ",
180
+ static_cast<int>(index_));
181
+ TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
182
+ !is_cpu() || index_ <= 0,
183
+ "CPU device index must be -1 or zero, got ",
184
+ static_cast<int>(index_));
185
+ }
186
+ };
187
+
188
+ C10_API std::ostream& operator<<(std::ostream& stream, const Device& device);
189
+
190
+ } // namespace c10
191
+
192
+ namespace std {
193
+ template <>
194
+ struct hash<c10::Device> {
195
+ size_t operator()(c10::Device d) const noexcept {
196
+ // Are you here because this static assert failed? Make sure you ensure
197
+ // that the bitmasking code below is updated accordingly!
198
+ static_assert(sizeof(c10::DeviceType) == 1, "DeviceType is not 8-bit");
199
+ static_assert(sizeof(c10::DeviceIndex) == 1, "DeviceIndex is not 8-bit");
200
+ // Note [Hazard when concatenating signed integers]
201
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
202
+ // We must first convert to a same-sized unsigned type, before promoting to
203
+ // the result type, to prevent sign extension when any of the values is -1.
204
+ // If sign extension occurs, you'll clobber all of the values in the MSB
205
+ // half of the resulting integer.
206
+ //
207
+ // Technically, by C/C++ integer promotion rules, we only need one of the
208
+ // uint32_t casts to the result type, but we put in both for explicitness's
209
+ // sake.
210
+ uint32_t bits = static_cast<uint32_t>(static_cast<uint8_t>(d.type()))
211
+ << 16 |
212
+ static_cast<uint32_t>(static_cast<uint8_t>(d.index()));
213
+ return std::hash<uint32_t>{}(bits);
214
+ }
215
+ };
216
+ } // namespace std
parrot/lib/python3.10/site-packages/torch/include/c10/core/DeviceArray.h ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include <c10/core/Allocator.h>
2
+ #include <c10/util/Exception.h>
3
+ #include <cstddef>
4
+ #include <cstdint>
5
+ #include <type_traits>
6
+
7
+ namespace c10 {
8
+
9
+ template <typename T>
10
+ class DeviceArray {
11
+ public:
12
+ DeviceArray(c10::Allocator& allocator, size_t size)
13
+ : data_ptr_(allocator.allocate(size * sizeof(T))) {
14
+ static_assert(std::is_trivial<T>::value, "T must be a trivial type");
15
+ TORCH_INTERNAL_ASSERT(
16
+ 0 == (reinterpret_cast<intptr_t>(data_ptr_.get()) % alignof(T)),
17
+ "c10::DeviceArray: Allocated memory is not aligned for this data type");
18
+ }
19
+
20
+ T* get() {
21
+ return static_cast<T*>(data_ptr_.get());
22
+ }
23
+
24
+ private:
25
+ c10::DataPtr data_ptr_;
26
+ };
27
+
28
+ } // namespace c10
parrot/lib/python3.10/site-packages/torch/include/c10/core/DispatchKey.h ADDED
@@ -0,0 +1,746 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/DeviceType.h>
4
+ #include <c10/macros/Export.h>
5
+ #include <cstddef>
6
+ #include <cstdint>
7
+ #include <functional>
8
+ #include <ostream>
9
+ #include <string>
10
+
11
+ namespace c10 {
12
+
13
+ // Semantically, each value of BackendComponent identifies a "backend" for our
14
+ // dispatch. Some functionalities that we may dispatch to are allowed to
15
+ // register different handlers for each backend. The BackendComponent is then
16
+ // used to figure out which backend implementation to dispatch to.
17
+
18
+ // In implementation terms, the backend component identifies a specific "bit" in
19
+ // a DispatchKeySet. The bits in the DispatchKeySet are split between the bottom
20
+ // ~12 "BackendComponent" bits, while the remaining upper bits are assigned to
21
+ // functionalities. When we encounter a functionality bit that is known to be
22
+ // customizable per-backend, then we also look at the lower BackendComponent
23
+ // bits and take the highest bit to determine which backend's implementation to
24
+ // use.
25
+
26
+ // WARNING! If you add a new backend component to the end of this list,
27
+ // make sure you register it before Meta.
28
+ // Meta must be at the end so that meta key in tls triggers meta kernels.
29
+ // (But you shouldn't: private use keys should have higher precedence than all
30
+ // built-in keys)
31
+
32
+ // If you add a new (non-privateuse) backend here,
33
+ // make sure to add an Autograd<Backend> fallthrough kernel
34
+ // in aten/src/ATen/core/VariableFallbackKernel.cpp
35
+
36
+ #define C10_FORALL_BACKEND_COMPONENTS(_, extra) \
37
+ _(CPU, extra) \
38
+ _(CUDA, extra) \
39
+ _(HIP, extra) \
40
+ _(XLA, extra) \
41
+ _(MPS, extra) \
42
+ _(IPU, extra) \
43
+ _(XPU, extra) \
44
+ _(HPU, extra) \
45
+ _(VE, extra) \
46
+ _(Lazy, extra) \
47
+ _(MTIA, extra) \
48
+ _(PrivateUse1, extra) \
49
+ _(PrivateUse2, extra) \
50
+ _(PrivateUse3, extra) \
51
+ _(Meta, extra)
52
+
53
+ // WARNING! If we add a new per-backend functionality key that has higher
54
+ // priority than Autograd, then make sure you update EndOfRuntimeBackendKeys
55
+
56
+ #define C10_FORALL_FUNCTIONALITY_KEYS(_) \
57
+ _(Dense, ) \
58
+ _(Quantized, Quantized) \
59
+ _(Sparse, Sparse) \
60
+ _(SparseCsr, SparseCsr) \
61
+ _(NestedTensor, NestedTensor) \
62
+ _(AutogradFunctionality, Autograd)
63
+
64
+ enum class BackendComponent : uint8_t {
65
+
66
+ // A "backend" is colloquially used to refer to handlers for dispatch
67
+ // which actually implement the numerics of an operation in question.
68
+ //
69
+ // Due to the nature of the enum, these backends are specified in
70
+ // an ordered way, but for most backends this order is not semantically
71
+ // meaningful (e.g., it's valid to reorder these backends without changing
72
+ // semantics). The only situation when backend ordering is meaningful
73
+ // is when the backend participates in multiple dispatch with another
74
+ // backend; e.g., CPU and CUDA (cuda must have higher priority).
75
+
76
+ // These keys don't correspond to individual kernels.
77
+ // Instead, they represent the backends that are allowed to override specific
78
+ // pieces of functionality:
79
+ // - dense kernels (e.g. DispatchKey::CPU)
80
+ // - sparse kernels (e.g. DispatchKey::SparseCPU)
81
+ // - quantized kernels (e.g. DispatchKey::QuantizedCPU)
82
+ // - autograd kernels (e.g. DispatchKey::AutogradCPU)
83
+ // We reserve space in the runtime operator table for this full cross product
84
+ // of
85
+ // [backends in this enum] x [keys below that are explicitly marked as having
86
+ // per-backend functionality]
87
+ //
88
+ // A meta tensor is a tensor without any data associated with it. (They
89
+ // have also colloquially been referred to as tensors on the "null" device).
90
+ // A meta tensor can be used to dry run operators without actually doing any
91
+ // computation, e.g., add on two meta tensors would give you another meta
92
+ // tensor with the output shape and dtype, but wouldn't actually add anything.
93
+
94
+ InvalidBit = 0,
95
+ #define DEFINE_BACKEND_COMPONENT(n, _) n##Bit,
96
+ C10_FORALL_BACKEND_COMPONENTS(DEFINE_BACKEND_COMPONENT, unused)
97
+ #undef DEFINE_BACKEND_COMPONENT
98
+
99
+ // Define an alias to represent end of backend dispatch keys.
100
+ // If you add new backend keys after PrivateUse3, please also update it here.
101
+ EndOfBackendKeys = MetaBit,
102
+ };
103
+
104
+ // Semantically, a dispatch key identifies a possible "level" in our
105
+ // dispatch, for which a handler may be registered. Each handler corresponds
106
+ // to a type of functionality.
107
+ //
108
+ // In implementation terms, the dispatch key identifies a specific "bit" in a
109
+ // DispatchKeySet. Higher bit indexes get handled by dispatching first (because
110
+ // we "count leading zeros" when we extract the highest priority dispatch
111
+ // key.)
112
+ //
113
+ // Note [DispatchKey Classification]
114
+ // This enum actually contains several types of keys, which are explained
115
+ // in more detail further down:
116
+ // (1) non-customizable backends (e.g. FPGA)
117
+ // (2) non-customizable functionalities (e.g. Functionalize)
118
+ // (3) functionalized that are customizable per backend (e.g. Dense, Sparse,
119
+ // AutogradFunctionality) (4) per-backend instances of customizable
120
+ // functionalities (e.g. CPU, SparseCPU, AutogradCPU) (5) alias keys (e.g.
121
+ // CompositeImplicitAutograd)
122
+ //
123
+ // Of the categories above, it's important to note:
124
+ // (a) which keys are assigned individual bits in a DispatchKeySet
125
+ // (b) which keys are assigned individual slots in the runtime operator table
126
+ // ("Runtime keys")
127
+ //
128
+ // (1), (2) and (3) all get their own dedicated bits in the DispatchKeySet.
129
+ // (1), (2) and (4) all get their own dedicated slots in the runtime operator
130
+ // table.
131
+
132
+ // See Note [DispatchKeySet Internal Representation] for more details.
133
+ //
134
+ // NOTE: Keep the list in sync with `DispatchKey` in torchgen/model.py
135
+ enum class DispatchKey : uint16_t {
136
+
137
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~ UNDEFINED ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ //
138
+ // This is not a "real" functionality, but it exists to give us a "nullopt"
139
+ // element we can return for cases when a DispatchKeySet contains no elements.
140
+ // You can think a more semantically accurate definition of DispatchKey is:
141
+ //
142
+ // using DispatchKey = optional<RealDispatchKey>
143
+ //
144
+ // and Undefined == nullopt. We didn't actually represent
145
+ // it this way because optional<RealDispatchKey> would take two
146
+ // words, when DispatchKey fits in eight bits.
147
+
148
+ Undefined = 0,
149
+
150
+ // Define an alias for Undefined to represent CatchAll (long term
151
+ // this will get eliminated, but for now it's convenient)
152
+ CatchAll = Undefined,
153
+
154
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~ Functionality Keys ~~~~~~~~~~~~~~~~~~~~~~ //
155
+ // Every value in the enum (up to EndOfFunctionalityKeys)
156
+ // corresponds to an individual "functionality" that can be dispatched to.
157
+ // This is represented in the DispatchKeySet by assigning each of these enum
158
+ // values
159
+ // to each of the remaining (64 - len(BackendComponent)) bits.
160
+ //
161
+ // Most of these functionalities have a single handler assigned to them,
162
+ // making them "runtime keys".
163
+ // That map to a single slot in the runtime operator table.
164
+ //
165
+ // A few functionalities are allowed to be customizable per backend.
166
+ // See [Note: Per-Backend Functionality Dispatch Keys] for details.
167
+
168
+ // See [Note: Per-Backend Functionality Dispatch Keys]
169
+ Dense,
170
+
171
+ // Below are non-extensible backends.
172
+ // These are backends that currently don't have their own overrides for
173
+ // Autograd/Sparse/Quantized kernels,
174
+ // and we therefore don't waste space in the runtime operator table allocating
175
+ // space for them.
176
+ // If any of these backends ever need to customize, e.g., Autograd, then we'll
177
+ // need to add a DispatchKey::*Bit for them.
178
+
179
+ // TODO: put this in BackendComponents
180
+ FPGA, // Xilinx support lives out of tree at
181
+ // https://gitlab.com/pytorch-complex/vitis_kernels
182
+
183
+ // TODO: put this in BackendComponents
184
+ // MAIA backend lives out of tree
185
+ // - test/cpp_extensions/maia_extension.cpp
186
+ // - test/test_torch.py
187
+ // - aten/src/ATen/test/extension_backend_test.cpp
188
+ MAIA,
189
+
190
+ Vulkan, // TODO: put this in BackendComponents
191
+ Metal, // TODO: put this in BackendComponents
192
+
193
+ // See [Note: Per-Backend Functionality Dispatch Keys]
194
+ Quantized,
195
+
196
+ // This backend is to support custom RNGs; it lets you go
197
+ // to a different kernel if you pass in a generator that is not a
198
+ // traditional CPUGeneratorImpl/CUDAGeneratorImpl. To make use of this
199
+ // key:
200
+ // 1) set it as a second parameter of at::Generator constructor call in
201
+ // the user-defined PRNG class.
202
+ // 2) use it as a dispatch key while registering custom kernels
203
+ // (templatized kernels specialized for user-defined PRNG class)
204
+ // intended for out of tree use; tested by aten/src/ATen/test/rng_test.cpp
205
+ CustomRNGKeyId,
206
+
207
+ // TODO: Make Mkldnn a functionality key, so we can give it Meta
208
+ // support
209
+ // Here are backends which specify more specialized operators
210
+ // based on the layout of the tensor. Note that the sparse backends
211
+ // are one case where ordering matters: sparse multi-dispatches with
212
+ // the corresponding dense tensors, and must be handled before them.
213
+ MkldnnCPU, // registered at build/aten/src/ATen/RegisterMkldnnCPU.cpp
214
+ // NB: not to be confused with MKLDNN, which is Caffe2 only
215
+
216
+ // See [Note: Per-Backend Functionality Dispatch Keys]
217
+ Sparse,
218
+
219
+ SparseCsr,
220
+
221
+ NestedTensor,
222
+
223
+ // In some situations, it is not immediately obvious what the correct
224
+ // backend for function is, because the function in question doesn't
225
+ // have any "tensor" arguments. In this case, a BackendSelect function
226
+ // can be registered to implement the custom determination of the
227
+ // correct backend.
228
+ BackendSelect,
229
+
230
+ Python,
231
+
232
+ // Out-of-core key for Fake Tensor in torchdistx.
233
+ // See https://pytorch.org/torchdistx/latest/fake_tensor.html
234
+ // TODO: delete this in favor of Python-implemented fake tensor
235
+ Fake,
236
+ // See Note [Out-of-tree vmap+grad prototype]. The purpose of this key
237
+ // is to insert code after the "autograd subsystem" runs, so this key should
238
+ // be directly after ADInplaceOrView and all of the autograd keys.
239
+ FuncTorchDynamicLayerBackMode,
240
+
241
+ // Alias and mutation removal.
242
+ // If some backends want to opt into only alias removal or only mutation
243
+ // removal,
244
+ // we can consider adding separate keys dedicated to those individual passes.
245
+ // See Note [Functionalization Pass In Core] for details.
246
+ Functionalize,
247
+
248
+ // The named dispatch key is set for any tensors with named dimensions.
249
+ // Although we have a dispatch key for named tensors, for historical reasons,
250
+ // this dispatch key doesn't do any of the substantive functionality for named
251
+ // tensor (though, hypothetically, it could!) At the moment, it's just
252
+ // responsible for letting us give good error messages when operations
253
+ // don't support named tensors.
254
+ //
255
+ // NB: If you ever consider moving named tensor functionality into
256
+ // this dispatch key, note that it might be necessary add another dispatch
257
+ // key that triggers before composite operators, in case a composite operator
258
+ // has named dimension propagation that doesn't match that of its
259
+ // constituent parts.
260
+ // TODO: delete this once torchdim lands in functorch
261
+ Named,
262
+
263
+ // The Conjugate dispatch key is set for any tensors that need to perform
264
+ // conjugation
265
+ // This is implemented at a dispatch level right before any backends run
266
+ Conjugate,
267
+
268
+ // The Negative dispatch key is set for any tensors that need to perform
269
+ // negation
270
+ // This is implemented at a dispatch level right before any backends run
271
+ Negative,
272
+
273
+ ZeroTensor, // registered at build/aten/src/ATen/RegisterZeroTensor.cpp
274
+
275
+ // Note [ADInplaceOrView key]
276
+ // ADInplaceOrView key is used by inplace or view ops to register a kernel
277
+ // that does additional setup for future autograd computation.
278
+ //
279
+ // 1. For inplace ops this kernel does version bump
280
+ // 2. For view ops this kernel does `as_view` setup where we properly setup
281
+ // DifferentiableViewMeta on the view tensors.
282
+ //
283
+ // For other ops it's fallthrough kernel since there's no extra
284
+ // work to do.
285
+ //
286
+ // Note [Dream: skip VariableType kernel when requires_grad=false]
287
+ //
288
+ // In an ideal world where we can skip VariableType kernel for inputs
289
+ // with requires_grad=false, instead of a fallthrough kernel, we'll
290
+ // register a kernel shown below to all functional ops as well:
291
+ // torch::Tensor my_functional_op(...) {
292
+ // {
293
+ // // Note for every op in VariableType, you need to go through
294
+ // // `AutoDispatchBelowADInplaceOrView` guard exactly once to add the
295
+ // // key to TLS excluded set. If you don't go through it at all,
296
+ // // inplace/view ops called through `at::` inside your backend
297
+ // // kernel will dispatch to ADInplaceOrView kernels and do a lot
298
+ // // of extra work.
299
+ // at::AutoDispatchBelowADInplaceOrView guard;
300
+ // at::redispatch::my_functional_op(...);
301
+ // }
302
+ // }
303
+ // But this work is currently blocked since it adds an extra dispatch
304
+ // for all ops and it's non-trivial overhead at model level(a few percents).
305
+ // Thus our current approach takes advantage of the fact every kernel go
306
+ // through VariableType kernel first and pulls the
307
+ // `at::AutoDispatchBelowADInplaceOrView` guard of functional ops
308
+ // up to the `VariableType` kernel. Thus we only add the extra dispatch
309
+ // to view/inplace ops to minimize its perf impact to real models.
310
+ ADInplaceOrView,
311
+ // Note [Alias Dispatch Key : Autograd]
312
+ // All backends are oblivious to autograd; autograd is handled as a
313
+ // layer which happens on top of all backends. It inspects the autograd
314
+ // metadata of all inputs, determines what autograd metadata should be
315
+ // constructed by the output, and otherwise defers to the backend to
316
+ // actually do the numeric computation. Autograd contains
317
+ // the bulk of this logic.
318
+
319
+ // Autograd is now an alias dispatch key which by default maps to all
320
+ // backend-specific autograd keys.
321
+ // Backend-specific allow backends to override the default kernel registered
322
+ // to Autograd key as needed.
323
+ // For example, XLA wants to define autograd for einsum directly.
324
+ // Registering a custom autograd implementation at the XLA key won't work
325
+ // because we process Autograd before XLA. This key has higher priority and
326
+ // gets processed first. You generally should NOT redispatch after handling
327
+ // autograd here (since that would result in execution of the Autograd
328
+ // operator, which you're trying to skip). In AutogradXLA implementations,
329
+ // you are responsible for handling autograd yourself, or deferring to other
330
+ // operators which support autograd.
331
+
332
+ // Currently we only have backend-specific autograd keys for CPU/CUDA/XLA and
333
+ // reserved user-defined backends. All other in-tree backends share the
334
+ // AutogradOther key. We can add specific autograd key for those backends
335
+ // upon request.
336
+ AutogradOther,
337
+
338
+ // See [Note: Per-Backend Functionality Dispatch Keys]
339
+ AutogradFunctionality,
340
+
341
+ // NestedTensor is an example of something that isn't a "real backend"
342
+ // (because it mostly consists of redispatching kernels)
343
+ // but it would like to override autograd functionality in C++.
344
+ // We can handle cases like this by adding an extra functionality key
345
+ // exclusively for handling autograd for NestedTensor.
346
+ // lives out of tree at
347
+ // https://github.com/pytorch/nestedtensor
348
+ AutogradNestedTensor,
349
+
350
+ Tracer,
351
+
352
+ // TODO: make Autocast a functionality key
353
+ // Autocasting precedes VariableTypeId, to ensure casts are autograd-exposed
354
+ // and inputs are saved for backward in the post-autocast type.
355
+ AutocastCPU,
356
+ AutocastXPU,
357
+ AutocastIPU,
358
+ AutocastHPU,
359
+ AutocastXLA,
360
+ // AutocastXLA is only being used for TPUs. XLA GPUs continue to use
361
+ // AutocastCUDA.
362
+ AutocastCUDA,
363
+ AutocastPrivateUse1,
364
+
365
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~ WRAPPERS ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ //
366
+ // There are a number of alternative modes which may want to handle before
367
+ // autograd; for example, error checking, tracing, profiling or vmap. They
368
+ // go here.
369
+
370
+ FuncTorchBatched, // See Note [Out-of-tree vmap+grad prototype]
371
+
372
+ // Dispatch key for BatchedTensorImpl wrapping a nested tensor.
373
+ BatchedNestedTensor,
374
+
375
+ FuncTorchVmapMode, // See Note [Out-of-tree vmap+grad prototype]
376
+
377
+ // This is the dispatch key for BatchedTensorImpl, which is used to implement
378
+ // batching rules for vmap.
379
+ Batched,
380
+
381
+ // When we are inside a vmap, all tensors dispatch on this key.
382
+ // See Note: [DispatchKey::VmapMode usage] for more details.
383
+ VmapMode,
384
+
385
+ FuncTorchGradWrapper, // See Note [Out-of-tree vmap+grad prototype]
386
+
387
+ // Out-of-core key for Deferred Module Initialization in torchdistx.
388
+ // See https://pytorch.org/torchdistx/latest/deferred_init.html
389
+ DeferredInit,
390
+
391
+ // Used by Python key logic to know the set of tls on entry to the dispatcher
392
+ // This kernel assumes it is the top-most non-functorch-related DispatchKey.
393
+ // If you add a key above, make sure to update the fallback implementation for
394
+ // this.
395
+ PythonTLSSnapshot,
396
+
397
+ // This key should be at the very top of the dispatcher
398
+ FuncTorchDynamicLayerFrontMode, // See Note [Out-of-tree vmap+grad prototype]
399
+
400
+ // TESTING: This is intended to be a generic testing tensor type id.
401
+ // Don't use it for anything real; its only acceptable use is within a single
402
+ // process test. Use it by creating a TensorImpl with this DispatchKey, and
403
+ // then registering operators to operate on this type id. See
404
+ // aten/src/ATen/core/dispatch/backend_fallback_test.cpp for a usage example.
405
+ TESTING_ONLY_GenericWrapper,
406
+
407
+ // TESTING: This is intended to be a generic testing tensor type id.
408
+ // Don't use it for anything real; its only acceptable use is within a ingle
409
+ // process test. Use it by toggling the mode on and off via
410
+ // TESTING_ONLY_tls_generic_mode_set_enabled and then registering operators
411
+ // to operate on this type id. See
412
+ // aten/src/ATen/core/dispatch/backend_fallback_test.cpp
413
+ // for a usage example
414
+ TESTING_ONLY_GenericMode,
415
+
416
+ // This key is used for pre-dispatch tracing in make_fx.
417
+ // It has lower priority than the PythonDispatcher key
418
+ // because we use the PythonDispatcher to intercept the key from python,
419
+ // and avoid having to implement it in C++.
420
+ PreDispatch,
421
+
422
+ // This is a bypass that allows you to skip running the C++ dispatcher
423
+ // entirely
424
+ PythonDispatcher,
425
+
426
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ FIN ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ //
427
+ EndOfFunctionalityKeys, // End of functionality keys.
428
+
429
+ // ~~~~~~~~~~~~~~ "Dense" Per-Backend Dispatch keys ~~~~~~~~~~~~~~~~~~~~ //
430
+ // Here are backends which you think of as traditionally specifying
431
+ // how to implement operations on some device.
432
+
433
+ #define DEFINE_PER_BACKEND_KEYS_FOR_BACKEND(n, prefix) prefix##n,
434
+
435
+ #define DEFINE_PER_BACKEND_KEYS(fullname, prefix) \
436
+ StartOf##fullname##Backends, \
437
+ C10_FORALL_BACKEND_COMPONENTS( \
438
+ DEFINE_PER_BACKEND_KEYS_FOR_BACKEND, prefix) \
439
+ EndOf##fullname##Backends = prefix##Meta,
440
+
441
+ C10_FORALL_FUNCTIONALITY_KEYS(DEFINE_PER_BACKEND_KEYS)
442
+
443
+ #undef DEFINE_PER_BACKEND_KEYS
444
+ #undef DEFINE_PER_BACKEND_KEYS_FOR_BACKEND
445
+
446
+ EndOfRuntimeBackendKeys = EndOfAutogradFunctionalityBackends,
447
+
448
+ // ~~~~~~~~~~~~~~~~~~~~~~ Alias Dispatch Keys ~~~~~~~~~~~~~~~~~~~~~~~~~~ //
449
+ // Note [Alias Dispatch Keys]
450
+ // Alias dispatch keys are synthetic dispatch keys which map to multiple
451
+ // runtime dispatch keys. Alisa keys have precedence, but they are always
452
+ // lower precedence than runtime keys. You can register a kernel to an
453
+ // alias key, the kernel might be populated to the mapped runtime keys
454
+ // during dispatch table computation.
455
+ // If a runtime dispatch key has multiple kernels from alias keys, which
456
+ // kernel wins is done based on the precedence of alias keys (but runtime
457
+ // keys always have precedence over alias keys).
458
+ // Alias keys won't be directly called during runtime.
459
+
460
+ // See Note [Alias Dispatch Key : Autograd]
461
+ Autograd,
462
+ CompositeImplicitAutograd, // registered at
463
+ // build/aten/src/ATen/RegisterCompositeImplicitAutograd.cpp
464
+
465
+ // Note: The alias keyset for FuncTorchBatchedDecomposition is disjoint from
466
+ // all
467
+ // other alias keysets
468
+ // and so precedence order doesn't matter
469
+ FuncTorchBatchedDecomposition, // registered at
470
+ // build/aten/src/ATen/RegisterFuncTorchBatchedDecomposition.cpp
471
+ // Note: The alias keyset for CompositeImplicitAutogradNestedTensor is
472
+ // disjoint from all other alias keysets
473
+ CompositeImplicitAutogradNestedTensor, // registered at
474
+ // build/aten/src/ATen/RegisterCompositeImplicitAutogradNestedTensor.cpp
475
+ CompositeExplicitAutograd, // registered at
476
+ // build/aten/src/ATen/RegisterCompositeExplicitAutograd.cpp
477
+ // See Note [CompositeExplicitAutogradNonFunctional Key]
478
+ CompositeExplicitAutogradNonFunctional, // registered at
479
+ // build/aten/src/ATen/RegisterCompositeExplicitAutograd.cpp
480
+
481
+ // Define an alias key to represent end of alias dispatch keys.
482
+ // If you add new alias keys after Autograd, please also update it here.
483
+ StartOfAliasKeys = Autograd,
484
+ EndOfAliasKeys = CompositeExplicitAutogradNonFunctional, //
485
+
486
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~ BC ALIASES ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ //
487
+ // The aliases exist for backwards compatibility reasons, they shouldn't
488
+ // be used
489
+ CPUTensorId = CPU,
490
+ CUDATensorId = CUDA,
491
+ DefaultBackend = CompositeExplicitAutograd,
492
+ PrivateUse1_PreAutograd = AutogradPrivateUse1,
493
+ PrivateUse2_PreAutograd = AutogradPrivateUse2,
494
+ PrivateUse3_PreAutograd = AutogradPrivateUse3,
495
+ Autocast = AutocastCUDA,
496
+ };
497
+
498
+ // Note [Private use DispatchKey]
499
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~
500
+ // Private use tensor IDs are preallocated tensor type IDs for use in user
501
+ // applications. Similar to private use fields in HTTP, they can be used
502
+ // by end users for experimental or private applications, without needing
503
+ // to "standardize" the tensor ID (which would be done by submitting a PR
504
+ // to PyTorch to add your type ID).
505
+ //
506
+ // Private use tensor IDs are appropriate to use if you want to experiment
507
+ // with adding a new tensor type (without having to patch PyTorch first) or
508
+ // have a private, non-distributed application that needs to make use of a
509
+ // new tensor type. Private use tensor IDs are NOT appropriate to use for
510
+ // libraries intended to be distributed to further users: please contact
511
+ // the PyTorch developers to get a type ID registered in this case.
512
+ //
513
+ // We provide two classes of private user tensor id: regular DispatchKeys
514
+ // and Autograd DispatchKeys. DispatchKeys serve the role of ordinary "backend"
515
+ // DispatchKeys; if you were adding support for a new type of accelerator, you
516
+ // would use a backend DispatchKey, and ideally automatically reuse
517
+ // AutogradOther definitions already defined in PyTorch. AutogradPrivateUse
518
+ // DispatchKeys serve as "wrapper" DispatchKeys: they are only necessary for
519
+ // tensors that compose multiple internal tensors, and for cases when the
520
+ // built-in autograd formulas for operators are not appropriate.
521
+
522
+ static_assert(
523
+ (static_cast<uint8_t>(BackendComponent::EndOfBackendKeys) +
524
+ static_cast<uint8_t>(DispatchKey::EndOfFunctionalityKeys)) <= 64,
525
+ "The BackendComponent and DispatchKey enums (below EndOfFunctionalityKeys)"
526
+ " both map to backend and functionality bits"
527
+ " into a 64-bit bitmask; you must have less than 64 total entries between them");
528
+
529
+ // Check if a DispatchKey is an alias mapping to other runtime keys.
530
+ constexpr bool isAliasDispatchKey(DispatchKey k) {
531
+ return k >= DispatchKey::StartOfAliasKeys && k <= DispatchKey::EndOfAliasKeys;
532
+ }
533
+
534
+ // [Note: Per-Backend Functionality Dispatch Keys]
535
+ // Check if a DispatchKey is a per-backend functionality key
536
+ // Any functionalities that can be customized per-backend should be added here.
537
+ // These keys correspond to functionalities that can be customized individually
538
+ // per backend. While they only take up one bit in the `DispatchKeySet` bitset,
539
+ // they map to (# backends) slots in the operator table.
540
+ // Each of these keys also has a separate set of "runtime keys" in the dispatch
541
+ // key enum, per backend, which *do* map to the individual operator table slots.
542
+ // For example, the "Sparse" key maps to an individual bit in the
543
+ // DispatchKeySet, while `SparseCPU`, `SparseCUDA`, etc all map to individual
544
+ // slots in the runtime operator table.
545
+
546
+ constexpr bool isPerBackendFunctionalityKey(DispatchKey k) {
547
+ if (k == DispatchKey::Dense || k == DispatchKey::Quantized ||
548
+ k == DispatchKey::Sparse || k == DispatchKey::SparseCsr ||
549
+ k == DispatchKey::AutogradFunctionality ||
550
+ k == DispatchKey::NestedTensor) {
551
+ return true;
552
+ } else {
553
+ return false;
554
+ }
555
+ }
556
+
557
+ // Note that this includes Undefined in the total count.
558
+ // BUT EndOfFunctionalityKeys is its own (placeholder) key.
559
+ // e.g. Undefined=0, Dense=1, Sparse=2, EndOfFunctionalityKeys=3.
560
+ // In the above example, there are 3 total functionality keys.
561
+ constexpr uint8_t num_functionality_keys =
562
+ static_cast<uint8_t>(DispatchKey::EndOfFunctionalityKeys);
563
+
564
+ constexpr uint8_t num_backends =
565
+ static_cast<uint8_t>(BackendComponent::EndOfBackendKeys);
566
+
567
+ // Note [No More Than 16 Backends]
568
+ // Search for this note to find places in the code where the "no more than 16
569
+ // backends" invariant is baked in.
570
+ static_assert(
571
+ static_cast<uint8_t>(BackendComponent::EndOfBackendKeys) <= 16,
572
+ "BackendComponent currently only supports <= 16 backends. If we really need to extend this, \
573
+ there are a few places where this invariant is baked in");
574
+
575
+ constexpr uint8_t numPerBackendFunctionalityKeys() {
576
+ uint8_t count = 0;
577
+ for (uint8_t k = 0; k <= num_functionality_keys; ++k) {
578
+ if (isPerBackendFunctionalityKey(static_cast<DispatchKey>(k)))
579
+ ++count;
580
+ }
581
+ return count;
582
+ }
583
+
584
+ #if defined(C10_MOBILE_TRIM_DISPATCH_KEYS)
585
+ // See [Note: Trimmed Mobile Dispatch Keys]
586
+ constexpr uint16_t num_runtime_entries = 8;
587
+ #else
588
+ constexpr uint16_t num_runtime_entries = num_functionality_keys +
589
+ (numPerBackendFunctionalityKeys() * (num_backends - 1));
590
+ #endif
591
+
592
+ // See Note [No More Than 16 Backends]
593
+ constexpr uint16_t full_backend_mask =
594
+ (static_cast<uint16_t>(1) << num_backends) - 1;
595
+
596
+ C10_API const char* toString(DispatchKey);
597
+ C10_API const char* toString(BackendComponent);
598
+ C10_API std::ostream& operator<<(std::ostream&, DispatchKey);
599
+ C10_API std::ostream& operator<<(std::ostream&, BackendComponent);
600
+
601
+ C10_API DispatchKey getAutogradKeyFromBackend(BackendComponent k);
602
+
603
+ // Parses a string into a dispatch key.
604
+ // If the string cannot be correctly parsed, throws an exception.
605
+ C10_API c10::DispatchKey parseDispatchKey(const std::string& k);
606
+
607
+ // These are some convenience identifiers for dispatch keys which are
608
+ // shorter to type than their long counterparts. Note that some of these
609
+ // dispatch keys directly correspond to DeviceType; and most APIs that
610
+ // accept DispatchKey also accept DeviceType; e.g.,
611
+ // torch::dispatch(torch::kCPU, ...) is also valid.
612
+ constexpr DispatchKey kAutograd = DispatchKey::Autograd;
613
+
614
+ // See Note [The Ordering of Per-Backend Dispatch Keys Matters!]
615
+ // This function relies on the invariant that the dispatch keys between
616
+ // StartOfDenseBackends and EndOfRuntimeBackendKeys are ordered by backend
617
+ // in the same order as `BackendComponent`.
618
+ constexpr BackendComponent toBackendComponent(DispatchKey k) {
619
+ if (k >= DispatchKey::StartOfDenseBackends &&
620
+ k <= DispatchKey::EndOfDenseBackends) {
621
+ return static_cast<BackendComponent>(
622
+ static_cast<uint8_t>(k) -
623
+ static_cast<uint8_t>(DispatchKey::StartOfDenseBackends));
624
+ } else if (
625
+ k >= DispatchKey::StartOfQuantizedBackends &&
626
+ k <= DispatchKey::EndOfQuantizedBackends) {
627
+ return static_cast<BackendComponent>(
628
+ static_cast<uint8_t>(k) -
629
+ static_cast<uint8_t>(DispatchKey::StartOfQuantizedBackends));
630
+ } else if (
631
+ k >= DispatchKey::StartOfSparseBackends &&
632
+ k <= DispatchKey::EndOfSparseBackends) {
633
+ return static_cast<BackendComponent>(
634
+ static_cast<uint8_t>(k) -
635
+ static_cast<uint8_t>(DispatchKey::StartOfSparseBackends));
636
+ } else if (
637
+ k >= DispatchKey::StartOfSparseCsrBackends &&
638
+ k <= DispatchKey::EndOfSparseCsrBackends) {
639
+ return static_cast<BackendComponent>(
640
+ static_cast<uint8_t>(k) -
641
+ static_cast<uint8_t>(DispatchKey::StartOfSparseCsrBackends));
642
+ } else if (
643
+ k >= DispatchKey::StartOfNestedTensorBackends &&
644
+ k <= DispatchKey::EndOfNestedTensorBackends) {
645
+ return static_cast<BackendComponent>(
646
+ static_cast<uint8_t>(k) -
647
+ static_cast<uint8_t>(DispatchKey::StartOfNestedTensorBackends));
648
+ } else if (
649
+ k >= DispatchKey::StartOfAutogradFunctionalityBackends &&
650
+ k <= DispatchKey::EndOfAutogradFunctionalityBackends) {
651
+ return static_cast<BackendComponent>(
652
+ static_cast<uint8_t>(k) -
653
+ static_cast<uint8_t>(
654
+ DispatchKey::StartOfAutogradFunctionalityBackends));
655
+ } else {
656
+ return BackendComponent::InvalidBit;
657
+ }
658
+ }
659
+
660
+ constexpr DispatchKey toFunctionalityKey(DispatchKey k) {
661
+ if (k <= DispatchKey::EndOfFunctionalityKeys) {
662
+ return k;
663
+ } else if (k <= DispatchKey::EndOfDenseBackends) {
664
+ return DispatchKey::Dense;
665
+ } else if (k <= DispatchKey::EndOfQuantizedBackends) {
666
+ return DispatchKey::Quantized;
667
+ } else if (k <= DispatchKey::EndOfSparseBackends) {
668
+ return DispatchKey::Sparse;
669
+ } else if (k <= DispatchKey::EndOfSparseCsrBackends) {
670
+ return DispatchKey::SparseCsr;
671
+ } else if (k <= DispatchKey::EndOfNestedTensorBackends) {
672
+ return DispatchKey::NestedTensor;
673
+ } else if (k <= DispatchKey::EndOfAutogradFunctionalityBackends) {
674
+ return DispatchKey::AutogradFunctionality;
675
+ } else {
676
+ return DispatchKey::Undefined;
677
+ }
678
+ }
679
+
680
+ BackendComponent toBackendComponent(DeviceType device_type);
681
+
682
+ // Given (DispatchKey::Dense, BackendComponent::CUDABit), returns
683
+ // DispatchKey::CUDA.
684
+ // See Note [The Ordering of Per-Backend Dispatch Keys Matters!]
685
+ // This function relies on the invariant that the dispatch keys between
686
+ // StartOfDenseBackends and EndOfRuntimeBackendKeys are ordered by backend
687
+ // in the same order as `BackendComponent`.
688
+ constexpr DispatchKey toRuntimePerBackendFunctionalityKey(
689
+ DispatchKey functionality_k,
690
+ BackendComponent backend_k) {
691
+ if (functionality_k == DispatchKey::Dense) {
692
+ return static_cast<DispatchKey>(
693
+ static_cast<uint8_t>(DispatchKey::StartOfDenseBackends) +
694
+ static_cast<uint8_t>(backend_k));
695
+ }
696
+ if (functionality_k == DispatchKey::Sparse) {
697
+ return static_cast<DispatchKey>(
698
+ static_cast<uint8_t>(DispatchKey::StartOfSparseBackends) +
699
+ static_cast<uint8_t>(backend_k));
700
+ }
701
+ if (functionality_k == DispatchKey::SparseCsr) {
702
+ return static_cast<DispatchKey>(
703
+ static_cast<uint8_t>(DispatchKey::StartOfSparseCsrBackends) +
704
+ static_cast<uint8_t>(backend_k));
705
+ }
706
+ if (functionality_k == DispatchKey::Quantized) {
707
+ return static_cast<DispatchKey>(
708
+ static_cast<uint8_t>(DispatchKey::StartOfQuantizedBackends) +
709
+ static_cast<uint8_t>(backend_k));
710
+ }
711
+ if (functionality_k == DispatchKey::NestedTensor) {
712
+ return static_cast<DispatchKey>(
713
+ static_cast<uint8_t>(DispatchKey::StartOfNestedTensorBackends) +
714
+ static_cast<uint8_t>(backend_k));
715
+ }
716
+ if (functionality_k == DispatchKey::AutogradFunctionality) {
717
+ return static_cast<DispatchKey>(
718
+ static_cast<uint8_t>(
719
+ DispatchKey::StartOfAutogradFunctionalityBackends) +
720
+ static_cast<uint8_t>(backend_k));
721
+ }
722
+ return DispatchKey::Undefined;
723
+ }
724
+
725
+ } // namespace c10
726
+
727
+ namespace torch {
728
+ // Expose the constant, but not the TYPE (DispatchKey is an implementation
729
+ // detail!)
730
+ // NOLINTNEXTLINE(misc-unused-using-decls)
731
+ using c10::kAutograd;
732
+ } // namespace torch
733
+
734
+ // NB: You really shouldn't use this instance; this enum is guaranteed
735
+ // to be pretty small so a regular array should be acceptable.
736
+ namespace std {
737
+ template <>
738
+ struct hash<c10::DispatchKey> {
739
+ typedef size_t result_type;
740
+ typedef c10::DispatchKey argument_type;
741
+
742
+ size_t operator()(c10::DispatchKey x) const {
743
+ return static_cast<size_t>(x);
744
+ }
745
+ };
746
+ } // namespace std
parrot/lib/python3.10/site-packages/torch/include/c10/core/DynamicCast.h ADDED
@@ -0,0 +1,125 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/ScalarType.h>
4
+ #include <c10/macros/Macros.h>
5
+ #include <c10/util/Load.h>
6
+ #include <c10/util/TypeCast.h>
7
+
8
+ namespace c10 {
9
+
10
+ // Dynamic type casting utils:
11
+ // - fetch_and_cast
12
+ // - cast_and_store
13
+ //
14
+ // fetch_and_cast fetch a value with dynamic type specified by a ScalarType
15
+ // from a void pointer and cast it to a static type.
16
+ //
17
+ // cast_and_store casts a static typed value into dynamic type specified
18
+ // by a ScalarType, and store it into a void pointer.
19
+ //
20
+ // NOTE:
21
+ //
22
+ // Dynamic casting allows us to support type promotion without blowing up
23
+ // the combination space: For example, without dynamic cast, in order to
24
+ // implement `add_` with type promotion, we would need something like
25
+ //
26
+ // AT_DISPATCH_ALL_TYPES(output.dtype(),
27
+ // AT_DISPATCH_ALL_TYPES(input1.dtype(),
28
+ // AT_DISPATCH_ALL_TYPES(input2.dtype(),
29
+ // [](arg0_t a, arg1_t b) -> out_t { return a + b; }
30
+ // )
31
+ // )
32
+ // )
33
+ //
34
+ // If we support N dtypes, the above code would generate the a+b kernel for
35
+ // all the N * N * N different supported types, the compilation time and
36
+ // binary size would become horrible.
37
+ //
38
+ // Dynamic casting might sounds like a bad idea in terms of performance.
39
+ // Especially if you ever do it in a loop, you are going to do a billion tests.
40
+ // But in practice it is not as bad as it might look:
41
+ //
42
+ // - on CPU, this is a branch that always has the same outcome, therefore
43
+ // hopefully the branch predictor could do the job pretty well
44
+ // - on GPU, these branches will not diverge, so we could still have the same
45
+ // warp executing the same line of code
46
+ // - Most kernels, like `add`, are bandwidth bound, adding a few clock cycles to
47
+ // check an integer does not hurt the performance much because the ALUs would
48
+ // wait for load instructions anyway.
49
+ //
50
+ // For the discussion and benchmark, refer to:
51
+ // - https://github.com/pytorch/pytorch/pull/28343
52
+ // - https://github.com/pytorch/pytorch/pull/28344
53
+ // - https://github.com/pytorch/pytorch/pull/28345
54
+ //
55
+
56
+ #ifdef C10_HOST_DEVICE
57
+ #define ERROR_UNSUPPORTED_CAST CUDA_KERNEL_ASSERT(false);
58
+ #else
59
+ #define ERROR_UNSUPPORTED_CAST TORCH_CHECK(false, "Unexpected scalar type");
60
+ #endif
61
+
62
+ // Fetch a value with dynamic type src_type from ptr, and cast it to static type
63
+ // dest_t.
64
+ #define FETCH_AND_CAST_CASE(type, scalartype) \
65
+ case ScalarType::scalartype: \
66
+ return c10::convert<dest_t>(c10::load<type>(ptr));
67
+
68
+ template <typename dest_t>
69
+ C10_HOST_DEVICE inline dest_t fetch_and_cast(
70
+ const ScalarType src_type,
71
+ const void* ptr) {
72
+ switch (src_type) {
73
+ AT_FORALL_SCALAR_TYPES_WITH_COMPLEX(FETCH_AND_CAST_CASE)
74
+ FETCH_AND_CAST_CASE(uint16_t, UInt16)
75
+ FETCH_AND_CAST_CASE(uint32_t, UInt32)
76
+ FETCH_AND_CAST_CASE(uint64_t, UInt64)
77
+ default:
78
+ ERROR_UNSUPPORTED_CAST
79
+ }
80
+ return dest_t(0); // just to avoid compiler warning
81
+ }
82
+
83
+ // Cast a value with static type src_t into dynamic dest_type, and store it to
84
+ // ptr.
85
+ #define CAST_AND_STORE_CASE(type, scalartype) \
86
+ case ScalarType::scalartype: \
87
+ *(type*)ptr = c10::convert<type>(value); \
88
+ return;
89
+ template <typename src_t>
90
+ C10_HOST_DEVICE inline void cast_and_store(
91
+ const ScalarType dest_type,
92
+ void* ptr,
93
+ src_t value) {
94
+ switch (dest_type) {
95
+ AT_FORALL_SCALAR_TYPES_WITH_COMPLEX(CAST_AND_STORE_CASE)
96
+ CAST_AND_STORE_CASE(uint16_t, UInt16)
97
+ CAST_AND_STORE_CASE(uint32_t, UInt32)
98
+ CAST_AND_STORE_CASE(uint64_t, UInt64)
99
+ default:;
100
+ }
101
+ ERROR_UNSUPPORTED_CAST
102
+ }
103
+
104
+ #define DEFINE_UNCASTABLE(T, scalartype_) \
105
+ template <> \
106
+ C10_HOST_DEVICE inline T fetch_and_cast<T>( \
107
+ const ScalarType src_type, const void* ptr) { \
108
+ CUDA_KERNEL_ASSERT(ScalarType::scalartype_ == src_type); \
109
+ return c10::load<T>(ptr); \
110
+ } \
111
+ template <> \
112
+ C10_HOST_DEVICE inline void cast_and_store<T>( \
113
+ const ScalarType dest_type, void* ptr, T value) { \
114
+ CUDA_KERNEL_ASSERT(ScalarType::scalartype_ == dest_type); \
115
+ *(T*)ptr = value; \
116
+ }
117
+
118
+ AT_FORALL_QINT_TYPES(DEFINE_UNCASTABLE)
119
+
120
+ #undef FETCH_AND_CAST_CASE
121
+ #undef CAST_AND_STORE_CASE
122
+ #undef DEFINE_UNCASTABLE
123
+ #undef ERROR_UNSUPPORTED_CAST
124
+
125
+ } // namespace c10
parrot/lib/python3.10/site-packages/torch/include/c10/core/GradMode.h ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/AutogradState.h>
4
+ #include <c10/macros/Export.h>
5
+
6
+ namespace c10 {
7
+
8
+ struct C10_API GradMode {
9
+ static bool is_enabled();
10
+ static void set_enabled(bool enabled);
11
+ };
12
+
13
+ // A RAII, thread local (!) guard that enables or disables grad mode upon
14
+ // construction, and sets it back to the original value upon destruction.
15
+ struct C10_API AutoGradMode {
16
+ AutoGradMode(bool enabled) : prev_mode(GradMode::is_enabled()) {
17
+ GradMode::set_enabled(enabled);
18
+ }
19
+ ~AutoGradMode() {
20
+ GradMode::set_enabled(prev_mode);
21
+ }
22
+ bool prev_mode;
23
+ };
24
+
25
+ // A RAII, thread local (!) guard that stops future operations from building
26
+ // gradients.
27
+ struct C10_API NoGradGuard : public AutoGradMode {
28
+ NoGradGuard() : AutoGradMode(/*enabled=*/false) {}
29
+ };
30
+
31
+ // A RAII, thread local (!) guard that enables or disables forward grad mode
32
+ // upon construction, and sets it back to the original value upon destruction.
33
+ struct C10_API AutoFwGradMode {
34
+ AutoFwGradMode(bool enabled)
35
+ : prev_mode(AutogradState::get_tls_state().get_fw_grad_mode()) {
36
+ AutogradState::get_tls_state().set_fw_grad_mode(enabled);
37
+ }
38
+ ~AutoFwGradMode() {
39
+ AutogradState::get_tls_state().set_fw_grad_mode(prev_mode);
40
+ }
41
+ bool prev_mode;
42
+ };
43
+
44
+ } // namespace c10
parrot/lib/python3.10/site-packages/torch/include/c10/core/InferenceMode.h ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/AutogradState.h>
4
+ #include <c10/core/DispatchKey.h>
5
+ #include <c10/core/DispatchKeySet.h>
6
+ #include <c10/core/impl/LocalDispatchKeySet.h>
7
+ #include <c10/macros/Export.h>
8
+
9
+ namespace c10 {
10
+
11
+ // A RAII, thread local (!) guard that enables or disables inference mode upon
12
+ // construction, and sets it back to the original value upon destruction.
13
+ struct C10_API InferenceMode {
14
+ // Note [Expected TLS state in InferenceMode]:
15
+ // InferenceMode: ADInplaceOrView not in
16
+ // raw_local_dispatch_key_set.included(),
17
+ // Autograd in raw_local_dispatch_key_set.excluded()
18
+ // GradMode is disabled.
19
+ // NormalMode: ADInplaceOrView in raw_local_dispatch_key_set.included(),
20
+ // Autograd not in raw_local_dispatch_key_set.excluded()
21
+ // GradMode is enabled by default unless toggled manually
22
+ // through other APIs, e.g. NoGradGuard.
23
+ //
24
+ // Invariant:
25
+ // - ADInplaceOrView is never in the excluded set
26
+ // - Autograd is never in the included set
27
+ // - Setting InferenceMode will set GradMode accordingly, but not vice versa.
28
+ //
29
+ // 1. Why do we put ADInplaceOrView in included set outside InferenceMode?
30
+ //
31
+ // Inplace update to inference tensor outside InferenceMode is not
32
+ // allowed. See Note [Inplace update inference tensor] for more details.
33
+ // Without going through ADInplaceOrView kernel, we cannot throw error
34
+ // for `inference_tensor.add_(1)` case.
35
+ //
36
+ // 2. Why not put ADInplaceOrView in the excluded set inside InferenceMode?
37
+ //
38
+ // For example:
39
+ // torch::Tensor a = torch::ones({1, 2, 3}).set_requires_grad(true);
40
+ // torch::Tensor k = a + 2;
41
+ // {
42
+ // c10::InferenceMode guard(true);
43
+ // k.add_(2);
44
+ // }
45
+ // `k.add_(2)` still need to go through ADInplaceOrView kernel so that it's
46
+ // prepared for future autograd.
47
+ //
48
+ // 3. Why does setting InferenceMode also set GradMode?
49
+ //
50
+ // This is required since InferenceMode is a faster and more restrictive
51
+ // version of NoGradGuard. All runtime checks using GradMode::is_enabled()
52
+ // are applicable to InferenceMode as well, e.g.
53
+ // `tensorTypeInCurrentExecutionContext` in interpreter.cpp.
54
+ InferenceMode(bool enabled = true)
55
+ : prev_mode(AutogradState::get_tls_state()),
56
+ prev_keyset(c10::impl::tls_local_dispatch_key_set()) {
57
+ // Enabling inference mode means disabling grad modes
58
+ // And disabling inference mode means enabling grad modes
59
+ AutogradState::set_tls_state(AutogradState(
60
+ /* grad_mode */ !enabled,
61
+ /* inference_mode */ enabled,
62
+ /* fw_grad_mode */ !enabled,
63
+ /* multithreading_enabled*/ !enabled));
64
+ DispatchKeySet included = enabled
65
+ ? prev_keyset.included_.remove(c10::DispatchKey::ADInplaceOrView)
66
+ : prev_keyset.included_.add(c10::DispatchKey::ADInplaceOrView);
67
+ DispatchKeySet excluded = enabled
68
+ ? (prev_keyset.excluded_ | c10::autograd_dispatch_keyset)
69
+ : (prev_keyset.excluded_ - c10::autograd_dispatch_keyset);
70
+ c10::impl::PODLocalDispatchKeySet cur_keyset{};
71
+ cur_keyset.set_included(included);
72
+ cur_keyset.set_excluded(excluded);
73
+ c10::impl::_force_tls_local_dispatch_key_set(cur_keyset);
74
+ }
75
+
76
+ ~InferenceMode() {
77
+ AutogradState::set_tls_state(prev_mode);
78
+ c10::impl::_force_tls_local_dispatch_key_set(prev_keyset);
79
+ }
80
+ static bool is_enabled();
81
+
82
+ private:
83
+ AutogradState prev_mode;
84
+ c10::impl::LocalDispatchKeySet prev_keyset;
85
+ };
86
+ } // namespace c10
parrot/lib/python3.10/site-packages/torch/include/c10/core/MemoryFormat.h ADDED
@@ -0,0 +1,290 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/util/ArrayRef.h>
4
+ #include <c10/util/Exception.h>
5
+
6
+ #include <cstdint>
7
+ #include <ostream>
8
+ #include <vector>
9
+
10
+ // Memory format is not the property of a Tensor. It is the way to tell an
11
+ // operator how the result should be organized in memory and nothing more. That
12
+ // means memory format should never be used as return value for any tensor state
13
+ // interrogation functions (internally and externally).
14
+ //
15
+ // Possible options are:
16
+ // Preserve:
17
+ // If any of the input tensors is in channels_last format, operator output
18
+ // should be in channels_last format
19
+ //
20
+ // Contiguous:
21
+ // Regardless of input tensors format, the output should be contiguous
22
+ // Tensor.
23
+ //
24
+ // ChannelsLast:
25
+ // Regardless of input tensors format, the output should be in channels_last
26
+ // format.
27
+
28
+ namespace c10 {
29
+ enum class MemoryFormat : int8_t {
30
+ Contiguous,
31
+ Preserve,
32
+ ChannelsLast,
33
+ ChannelsLast3d,
34
+ NumOptions
35
+ };
36
+
37
+ // If you are seeing this, it means that this call site was not checked if
38
+ // the memory format could be preserved, and it was switched to old default
39
+ // behaviour of contiguous
40
+ #define LEGACY_CONTIGUOUS_MEMORY_FORMAT c10::get_contiguous_memory_format()
41
+
42
+ inline MemoryFormat get_contiguous_memory_format() {
43
+ return MemoryFormat::Contiguous;
44
+ }
45
+
46
+ inline std::ostream& operator<<(
47
+ std::ostream& stream,
48
+ at::MemoryFormat memory_format) {
49
+ switch (memory_format) {
50
+ case MemoryFormat::Preserve:
51
+ return stream << "Preserve";
52
+ case MemoryFormat::Contiguous:
53
+ return stream << "Contiguous";
54
+ case MemoryFormat::ChannelsLast:
55
+ return stream << "ChannelsLast";
56
+ case MemoryFormat::ChannelsLast3d:
57
+ return stream << "ChannelsLast3d";
58
+ default:
59
+ TORCH_CHECK(false, "Unknown memory format ", memory_format);
60
+ }
61
+ }
62
+
63
+ // Note: Hardcoded the channel last stride indices here to get better
64
+ // performance
65
+ template <typename T>
66
+ inline std::vector<T> get_channels_last_strides_2d(ArrayRef<T> sizes) {
67
+ std::vector<T> strides(sizes.size());
68
+ switch (sizes.size()) {
69
+ case 4:
70
+ strides[1] = 1;
71
+ strides[3] = sizes[1];
72
+ strides[2] = strides[3] * sizes[3];
73
+ strides[0] = strides[2] * sizes[2];
74
+ return strides;
75
+ case 3:
76
+ strides[0] = 1;
77
+ strides[2] = sizes[0];
78
+ strides[1] = strides[2] * sizes[2];
79
+ return strides;
80
+ default:
81
+ TORCH_INTERNAL_ASSERT(
82
+ false, "ChannelsLast2d doesn't support size ", sizes.size());
83
+ }
84
+ }
85
+
86
+ inline std::vector<int64_t> get_channels_last_strides_2d(IntArrayRef sizes) {
87
+ return get_channels_last_strides_2d<int64_t>(sizes);
88
+ }
89
+
90
+ template <typename T>
91
+ std::vector<T> get_channels_last_strides_3d(ArrayRef<T> sizes) {
92
+ std::vector<T> strides(sizes.size());
93
+ switch (sizes.size()) {
94
+ case 5:
95
+ strides[1] = 1;
96
+ strides[4] = sizes[1];
97
+ strides[3] = strides[4] * sizes[4];
98
+ strides[2] = strides[3] * sizes[3];
99
+ strides[0] = strides[2] * sizes[2];
100
+ return strides;
101
+ case 4:
102
+ strides[0] = 1;
103
+ strides[3] = sizes[0];
104
+ strides[2] = strides[3] * sizes[3];
105
+ strides[1] = strides[2] * sizes[2];
106
+ return strides;
107
+ default:
108
+ TORCH_INTERNAL_ASSERT(
109
+ false, "ChannelsLast3d doesn't support size ", sizes.size());
110
+ }
111
+ }
112
+
113
+ inline std::vector<int64_t> get_channels_last_strides_3d(IntArrayRef sizes) {
114
+ return get_channels_last_strides_3d<int64_t>(sizes);
115
+ }
116
+
117
+ // NOTE:
118
+ // Below are Helper functions for is_channels_last_strides_xd.
119
+ // 1. Please do not combine these helper functions, each helper function handles
120
+ // exactly one case of sizes + memory_format, by doing this, the strides indices
121
+ // will be a constant array and we can access it using constant index number,
122
+ // the compiler will fully unroll the loop on strides indices to gain a better
123
+ // performance.
124
+ // 2. No error check in helper function, caller ensures the correctness of the
125
+ // input
126
+ // 3. All helper functions have similar comments, only 1st helper function is
127
+ // commented here.
128
+ template <typename T>
129
+ inline bool is_channels_last_strides_2d_s4(
130
+ const ArrayRef<T> sizes,
131
+ const ArrayRef<T> strides) {
132
+ T min = 0;
133
+ // special case for trivial C dimension. default to NCHW
134
+ if (strides[1] == 0) {
135
+ return false;
136
+ }
137
+ // loop strides indices
138
+ for (auto& d : {1, 3, 2, 0}) {
139
+ if (sizes[d] == 0) {
140
+ return false;
141
+ }
142
+ if (strides[d] < min) {
143
+ return false;
144
+ }
145
+ // Fallback to NCHW as default layout for ambiguous cases
146
+ // This is the flaw of implicit memory_format from strides.
147
+ // N111 tensor with identical strides for size 1 dimension;
148
+ // Two cases could lead us here:
149
+ // a. N111 contiguous Tensor ([N,1,1,1]@[1,1,1,1])
150
+ // b. N11W contiguous Tensor sliced on the W-dimension.
151
+ // ([N,1,1,1]@[W,W,W,W])
152
+ if (d == 0 && min == strides[1]) {
153
+ return false;
154
+ }
155
+ // This is necessary to:
156
+ // 1. distinguish the memory_format of N1H1;
157
+ // [H, 1, 1, 1] channels_last stride
158
+ // [H, H, 1, 1] contiguous stride
159
+ // 2. permutation of 1C1W:
160
+ // [1, C, 1, H]@[HC, H, H, 1] transpose(1, 3)
161
+ // [1, H, 1, C]@[HC, 1, H, H] shouldn't be identified as channels_last
162
+ min = strides[d];
163
+ if (sizes[d] > 1) {
164
+ min *= sizes[d];
165
+ }
166
+ }
167
+ return true;
168
+ }
169
+
170
+ template <typename T>
171
+ inline bool is_channels_last_strides_3d_s5(
172
+ const ArrayRef<T> sizes,
173
+ const ArrayRef<T> strides) {
174
+ T min = 0;
175
+ if (strides[1] == 0) {
176
+ return false;
177
+ }
178
+ for (auto& d : {1, 4, 3, 2, 0}) {
179
+ if (sizes[d] == 0) {
180
+ return false;
181
+ }
182
+ if (strides[d] < min) {
183
+ return false;
184
+ }
185
+ if (d == 0 && min == strides[1]) {
186
+ return false;
187
+ }
188
+ min = strides[d];
189
+ if (sizes[d] > 1) {
190
+ min *= sizes[d];
191
+ }
192
+ }
193
+ return true;
194
+ }
195
+
196
+ // Note [Ambiguous is_channels_last_strides_xd]
197
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
198
+ // The flaw of carrying memory_format implicitly through strides is very hard
199
+ // to WAR properly. issue #24090
200
+ // Without the history of permutation, we can't infer the memory_format of a
201
+ // tensor from the snapshot of its size & stride
202
+ // e.g.
203
+ //
204
+ // 1. We can NOT specify the memory_format of N111 tensor through strides in a
205
+ // meaningful way;
206
+ //
207
+ // 2. Two path that ended up with identical size/stride
208
+ // N11W contiguous tensor sliced at w-dimension becomes [N,1,1,1]@[W,W,W,W]
209
+ // NC11 channels_last tensor sliced at c-dimension becomes [N,1,1,1]@[C,C,C,C]
210
+ // So if we see a tensor [N,1,1,1]@[X,X,X,X], there's no way for us to infer
211
+ // the memory_format of the original tensor.
212
+ //
213
+ // Due to the limitations, our temporary WAR `is_channels_last_strides` does the
214
+ // best effort to infer whether the original memory_format of a tensor is
215
+ // at::MemoryFormat::ChannelsLast. The two objectives of this function (ordered
216
+ // by their importance):
217
+ // 1. Ensure that normal shape manipulation does not accidentally change the
218
+ // MemoryFormat of an existing tensor.
219
+ // 2. Allows user to mark MemoryFormat::ChannelsLast to tensors;
220
+ //
221
+ // The function does so via checking strides of the tensor, including strides of
222
+ // size-1 dimensions. Although conventionally PyTorch implies no restriction on
223
+ // trivial stride (stride for size-1 dimension).
224
+ //
225
+ // Note that this approach is a compromise. We did not solve the problem
226
+ // completely. Many cases we will not be able to infer the correct memory
227
+ // format.
228
+ // The implementation of `is_channels_last_strides` is to serve the objectives:
229
+ // MemoryFormat::ChannelsLast has to be explicitly opted-in (no accidental
230
+ // conversion); Best effort to maintain the ChannelsLast flag.
231
+ //
232
+ // Due to the fact that this is not a bulletproof solution, through testing
233
+ // (aten/src/ATen/test/memory_format_test.cpp)
234
+ // a. we ensure that the common tasks are supported;
235
+ // a. we identify corner cases where the implementation compromises on.
236
+ //
237
+ // By the time accumulated permutation is enabled to replace implicit
238
+ // memory_format through strides, we should be updating our tests and fix the
239
+ // issues in our tests.
240
+ //
241
+ // We use Channels Last 2d as an example above.
242
+ // This is a general problem for all the is_channels_last_strides_xd
243
+ // implementation. Please check the helper functions
244
+ // (is_channels_last_strides_*d_s*) for more details.
245
+
246
+ template <typename T>
247
+ inline bool is_channels_last_strides_2d(
248
+ const ArrayRef<T> sizes,
249
+ const ArrayRef<T> strides) {
250
+ switch (sizes.size()) {
251
+ case 4:
252
+ return is_channels_last_strides_2d_s4(sizes, strides);
253
+ // NOLINTNEXTLINE(bugprone-branch-clone)
254
+ case 3:
255
+ // TODO dim == 3 case will be enabled once it is fully tested
256
+ return false;
257
+ default:
258
+ return false;
259
+ }
260
+ }
261
+
262
+ template <typename T>
263
+ inline bool is_channels_last_strides_3d(
264
+ const ArrayRef<T> sizes,
265
+ const ArrayRef<T> strides) {
266
+ switch (sizes.size()) {
267
+ case 5:
268
+ return is_channels_last_strides_3d_s5(sizes, strides);
269
+ // NOLINTNEXTLINE(bugprone-branch-clone)
270
+ case 4:
271
+ // TODO dim == 4 case will be enabled once it is fully tested
272
+ return false;
273
+ default:
274
+ return false;
275
+ }
276
+ }
277
+
278
+ inline bool is_channels_last_strides_2d(
279
+ const IntArrayRef sizes,
280
+ const IntArrayRef strides) {
281
+ return is_channels_last_strides_2d<int64_t>(sizes, strides);
282
+ }
283
+
284
+ inline bool is_channels_last_strides_3d(
285
+ const IntArrayRef sizes,
286
+ const IntArrayRef strides) {
287
+ return is_channels_last_strides_3d<int64_t>(sizes, strides);
288
+ }
289
+
290
+ } // namespace c10
parrot/lib/python3.10/site-packages/torch/include/c10/core/OptionalRef.h ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ namespace c10 {
4
+
5
+ template <typename T>
6
+ class OptionalRef {
7
+ public:
8
+ OptionalRef() : data_(nullptr) {}
9
+ OptionalRef(const T* data) : data_(data) {
10
+ TORCH_INTERNAL_ASSERT_DEBUG_ONLY(data_);
11
+ }
12
+ OptionalRef(const T& data) : data_(&data) {}
13
+
14
+ bool has_value() const {
15
+ return data_ != nullptr;
16
+ }
17
+
18
+ const T& get() const {
19
+ TORCH_INTERNAL_ASSERT_DEBUG_ONLY(data_);
20
+ return *data_;
21
+ }
22
+
23
+ operator bool() const {
24
+ return has_value();
25
+ }
26
+
27
+ private:
28
+ const T* data_;
29
+ };
30
+
31
+ } // namespace c10
parrot/lib/python3.10/site-packages/torch/include/c10/core/PyHandleCache.h ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/impl/PyInterpreter.h>
4
+ #include <c10/macros/Macros.h>
5
+ #include <c10/util/Exception.h>
6
+ #include <c10/util/python_stub.h>
7
+
8
+ #include <atomic>
9
+
10
+ namespace c10 {
11
+
12
+ // A PyHandleCache represents a cached pointer from a C++ object to
13
+ // a Python object that represents that object analogously in Python.
14
+ // Upon a cache hit, the relevant object can be retrieved after a test
15
+ // and then a memory load. Two conditions must hold to be able to use this
16
+ // class:
17
+ //
18
+ // - This must truly be a cache; e.g., the caller must be able to produce
19
+ // the object some other way if the cache hit misses.
20
+ //
21
+ // - This must truly be a handle; e.g., the Python object referenced by
22
+ // this class must have static lifetime. This means we don't have to
23
+ // maintain strong ownership or deallocate the object when the C++ object
24
+ // dies. Static lifetime is a good idea in conjunction with the cache,
25
+ // since if you are producing a fresh object on miss you won't be
26
+ // maintaining object identity. If you need bidirectional ownership,
27
+ // you will want to factor out the pattern in TensorImpl with
28
+ // resurrection.
29
+ //
30
+ // This cache is expected to not improve perf under torchdeploy, as one
31
+ // interpreter will fill up the cache, and all the interpreters will be
32
+ // unable to use the slot. A potential improvement is to have multiple
33
+ // slots (one per interpreter), which will work in deployment scenarios
34
+ // where there a stable, fixed number of interpreters. You can also store
35
+ // the relevant state in the Python library, rather than in the non-Python
36
+ // library (although in many cases, this is not convenient, as there may
37
+ // not be a way to conveniently index based on the object.)
38
+ class PyHandleCache {
39
+ public:
40
+ PyHandleCache() : pyinterpreter_(nullptr) {}
41
+
42
+ // Attempt to fetch the pointer from the cache, if the PyInterpreter
43
+ // matches. If it doesn't exist, or the cache entry is not valid,
44
+ // use slow_accessor to get the real pointer value and return that
45
+ // (possibly writing it to the cache, if the cache entry is
46
+ // available.)
47
+ template <typename F>
48
+ PyObject* ptr_or(impl::PyInterpreter* self_interpreter, F slow_accessor)
49
+ const {
50
+ // Note [Memory ordering on Python interpreter tag]
51
+ impl::PyInterpreter* interpreter =
52
+ pyinterpreter_.load(std::memory_order_acquire);
53
+ if (C10_LIKELY(interpreter == self_interpreter)) {
54
+ return data_;
55
+ } else if (interpreter == nullptr) {
56
+ auto* r = slow_accessor();
57
+ impl::PyInterpreter* expected = nullptr;
58
+ // attempt to claim this cache entry with the specified interpreter tag
59
+ if (pyinterpreter_.compare_exchange_strong(
60
+ expected, self_interpreter, std::memory_order_acq_rel)) {
61
+ data_ = r;
62
+ }
63
+ // This shouldn't be possible, as you should be GIL protected
64
+ TORCH_INTERNAL_ASSERT(expected != self_interpreter);
65
+ return r;
66
+ } else {
67
+ return slow_accessor();
68
+ }
69
+ }
70
+
71
+ private:
72
+ mutable std::atomic<impl::PyInterpreter*> pyinterpreter_;
73
+ mutable PyObject* data_{nullptr};
74
+ };
75
+
76
+ } // namespace c10
parrot/lib/python3.10/site-packages/torch/include/c10/core/QScheme.h ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/util/Exception.h>
4
+ #include <cstdint>
5
+ #include <string>
6
+
7
+ namespace c10 {
8
+
9
+ /**
10
+ * QScheme is an enum that specifies the type of quantization. This has a one
11
+ * to one correspondence with Quantizer
12
+ * Please refer to ATen/quantized/Quantizer.h to see the Quantizers classes.
13
+ * Keep this file in sync with torch/nn/_qscheme.py
14
+ */
15
+ enum class QScheme : uint8_t {
16
+ PER_TENSOR_AFFINE = 0,
17
+ PER_CHANNEL_AFFINE = 1,
18
+ PER_TENSOR_SYMMETRIC = 2,
19
+ PER_CHANNEL_SYMMETRIC = 3,
20
+ PER_CHANNEL_AFFINE_FLOAT_QPARAMS = 4,
21
+ COMPILE_TIME_NUM_QSCHEMES = 5,
22
+ };
23
+
24
+ constexpr auto kPerTensorAffine = QScheme::PER_TENSOR_AFFINE;
25
+ constexpr auto kPerChannelAffine = QScheme::PER_CHANNEL_AFFINE;
26
+ constexpr auto kPerTensorSymmetric = QScheme::PER_TENSOR_SYMMETRIC;
27
+ constexpr auto kPerChannelSymmetric = QScheme::PER_CHANNEL_SYMMETRIC;
28
+ constexpr auto kPerChannelAffineFloatQParams =
29
+ QScheme::PER_CHANNEL_AFFINE_FLOAT_QPARAMS;
30
+ constexpr int COMPILE_TIME_NUM_QSCHEMES =
31
+ static_cast<int>(QScheme::COMPILE_TIME_NUM_QSCHEMES);
32
+
33
+ inline std::string toString(QScheme qscheme) {
34
+ switch (qscheme) {
35
+ case kPerTensorAffine:
36
+ return "per_tensor_affine";
37
+ case kPerChannelAffine:
38
+ return "per_channel_affine";
39
+ case kPerTensorSymmetric:
40
+ return "per_tensor_symmetric";
41
+ case kPerChannelSymmetric:
42
+ return "per_channel_symmetric";
43
+ case kPerChannelAffineFloatQParams:
44
+ return "per_channel_affine_float_qparams";
45
+ default:
46
+ TORCH_CHECK(false, "Unrecognized qscheme: ", static_cast<int>(qscheme));
47
+ }
48
+ }
49
+
50
+ } // namespace c10
parrot/lib/python3.10/site-packages/torch/include/c10/core/RefcountedDeleter.h ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/Storage.h>
4
+ #include <c10/macros/Export.h>
5
+ #include <c10/util/UniqueVoidPtr.h>
6
+
7
+ #include <atomic>
8
+ #include <memory>
9
+
10
+ namespace c10 {
11
+
12
+ // A RefcountedDeleterContext object is used as the `ctx` argument for DataPtr
13
+ // to implement a shared DataPtr. Normally, a DataPtr is unique, but we use
14
+ // this custom context and the `refcounted_deleter` function below to make the
15
+ // DataPtr act like a non-unique DataPtr. This context object holds onto an
16
+ // inner context and deleter function which handle the actual deletion of the
17
+ // data when the refcount reaches 0.
18
+ //
19
+ // This shared DataPtr feature is only used when storages are shared between
20
+ // multiple Python interpreters in MultiPy. Before storages had PyObject
21
+ // preservation, interpreters could just share the same StorageImpl instance.
22
+ // But now a StorageImpl can only be associated with one interpreter in order
23
+ // to properly manage a zombie PyObject. So we share storages across Python
24
+ // interpreters by creating a different StorageImpl instance for each one, but
25
+ // they all point to the same data.
26
+ struct C10_API RefcountedDeleterContext {
27
+ RefcountedDeleterContext(void* other_ctx, c10::DeleterFnPtr other_deleter)
28
+ : other_ctx(other_ctx, other_deleter), refcount(1) {}
29
+
30
+ std::unique_ptr<void, c10::DeleterFnPtr> other_ctx;
31
+ std::atomic_int refcount;
32
+ };
33
+
34
+ // `refcounted_deleter` is used as the `ctx_deleter` for DataPtr to implement
35
+ // a shared DataPtr.
36
+ //
37
+ // Warning: This should only be called on a pointer to
38
+ // a RefcountedDeleterContext that was allocated on the heap with `new`,
39
+ // because when the refcount reaches 0, the context is deleted with `delete`
40
+ C10_API void refcounted_deleter(void* ctx_);
41
+
42
+ // If the storage's DataPtr does not use `refcounted_deleter`, replace it with
43
+ // a DataPtr that does, so it can be shared between multiple StorageImpls
44
+ C10_API void maybeApplyRefcountedDeleter(const c10::Storage& storage);
45
+
46
+ // Create a new StorageImpl that points to the same data. If the original
47
+ // StorageImpl's DataPtr does not use `refcounted_deleter`, it will be replaced
48
+ // with one that does
49
+ C10_API c10::Storage newStorageImplFromRefcountedDataPtr(
50
+ const c10::Storage& storage);
51
+
52
+ } // namespace c10
parrot/lib/python3.10/site-packages/torch/include/c10/core/SafePyObject.h ADDED
@@ -0,0 +1,99 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/impl/PyInterpreter.h>
4
+ #include <c10/macros/Export.h>
5
+ #include <c10/util/python_stub.h>
6
+ #include <utility>
7
+
8
+ namespace c10 {
9
+
10
+ // This is an safe owning holder for a PyObject, akin to pybind11's
11
+ // py::object, with two major differences:
12
+ //
13
+ // - It is in c10/core; i.e., you can use this type in contexts where
14
+ // you do not have a libpython dependency
15
+ //
16
+ // - It is multi-interpreter safe (ala torchdeploy); when you fetch
17
+ // the underlying PyObject* you are required to specify what the current
18
+ // interpreter context is and we will check that you match it.
19
+ //
20
+ // It is INVALID to store a reference to a Tensor object in this way;
21
+ // you should just use TensorImpl directly in that case!
22
+ struct C10_API SafePyObject {
23
+ // Steals a reference to data
24
+ SafePyObject(PyObject* data, c10::impl::PyInterpreter* pyinterpreter)
25
+ : data_(data), pyinterpreter_(pyinterpreter) {}
26
+ SafePyObject(SafePyObject&& other) noexcept
27
+ : data_(std::exchange(other.data_, nullptr)),
28
+ pyinterpreter_(other.pyinterpreter_) {}
29
+
30
+ // In principle this could be copyable if we add an incref to PyInterpreter
31
+ // but for now it's easier to just disallow it.
32
+ SafePyObject(SafePyObject const&) = delete;
33
+ SafePyObject& operator=(SafePyObject const&) = delete;
34
+
35
+ ~SafePyObject() {
36
+ if (data_ != nullptr) {
37
+ (*pyinterpreter_)->decref(data_, /*has_pyobj_slot*/ false);
38
+ }
39
+ }
40
+
41
+ c10::impl::PyInterpreter& pyinterpreter() const {
42
+ return *pyinterpreter_;
43
+ }
44
+ PyObject* ptr(const c10::impl::PyInterpreter*) const;
45
+
46
+ // stop tracking the current object, and return it
47
+ PyObject* release() {
48
+ auto rv = data_;
49
+ data_ = nullptr;
50
+ return rv;
51
+ }
52
+
53
+ private:
54
+ PyObject* data_;
55
+ c10::impl::PyInterpreter* pyinterpreter_;
56
+ };
57
+
58
+ // A newtype wrapper around SafePyObject for type safety when a python object
59
+ // represents a specific type. Note that `T` is only used as a tag and isn't
60
+ // actually used for any true purpose.
61
+ template <typename T>
62
+ struct SafePyObjectT : private SafePyObject {
63
+ SafePyObjectT(PyObject* data, c10::impl::PyInterpreter* pyinterpreter)
64
+ : SafePyObject(data, pyinterpreter) {}
65
+ SafePyObjectT(SafePyObjectT&& other) noexcept : SafePyObject(other) {}
66
+ SafePyObjectT(SafePyObjectT const&) = delete;
67
+ SafePyObjectT& operator=(SafePyObjectT const&) = delete;
68
+
69
+ using SafePyObject::ptr;
70
+ using SafePyObject::pyinterpreter;
71
+ using SafePyObject::release;
72
+ };
73
+
74
+ // Like SafePyObject, but non-owning. Good for references to global PyObjects
75
+ // that will be leaked on interpreter exit. You get a copy constructor/assign
76
+ // this way.
77
+ struct C10_API SafePyHandle {
78
+ SafePyHandle() : data_(nullptr), pyinterpreter_(nullptr) {}
79
+ SafePyHandle(PyObject* data, c10::impl::PyInterpreter* pyinterpreter)
80
+ : data_(data), pyinterpreter_(pyinterpreter) {}
81
+
82
+ c10::impl::PyInterpreter& pyinterpreter() const {
83
+ return *pyinterpreter_;
84
+ }
85
+ PyObject* ptr(const c10::impl::PyInterpreter*) const;
86
+ void reset() {
87
+ data_ = nullptr;
88
+ pyinterpreter_ = nullptr;
89
+ }
90
+ operator bool() {
91
+ return data_;
92
+ }
93
+
94
+ private:
95
+ PyObject* data_;
96
+ c10::impl::PyInterpreter* pyinterpreter_;
97
+ };
98
+
99
+ } // namespace c10
parrot/lib/python3.10/site-packages/torch/include/c10/core/ScalarType.h ADDED
@@ -0,0 +1,564 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/util/BFloat16.h>
4
+ #include <c10/util/Deprecated.h>
5
+ #include <c10/util/Exception.h>
6
+ #include <c10/util/Float8_e4m3fn.h>
7
+ #include <c10/util/Float8_e4m3fnuz.h>
8
+ #include <c10/util/Float8_e5m2.h>
9
+ #include <c10/util/Float8_e5m2fnuz.h>
10
+ #include <c10/util/Half.h>
11
+ #include <c10/util/bits.h>
12
+ #include <c10/util/complex.h>
13
+ #include <c10/util/qint32.h>
14
+ #include <c10/util/qint8.h>
15
+ #include <c10/util/quint2x4.h>
16
+ #include <c10/util/quint4x2.h>
17
+ #include <c10/util/quint8.h>
18
+
19
+ #include <array>
20
+ #include <cstddef>
21
+ #include <cstdint>
22
+ #include <limits>
23
+ #include <ostream>
24
+ #include <type_traits>
25
+
26
+ namespace c10 {
27
+
28
+ // dummy struct for uint1 to uint7, actual functionality
29
+ // of these dtypes will be implemented in python with Tensor subclass
30
+ template <unsigned int N>
31
+ struct dummy_uint1_7_t {};
32
+
33
+ // For the macros below:
34
+ //
35
+ // For users: If you want to macro some code for all non-QInt scalar types
36
+ // (i.e. types with complete information, you probably want one of the
37
+ // AT_FORALL_SCALAR_TYPES / AT_FORALL_SCALAR_TYPES_AND macros below, which are
38
+ // designed to behave similarly to the Dispatch macros with the same name.
39
+ //
40
+ // For adding a new dtype: In the beginning, we had an idea that there was a
41
+ // list of all scalar types, and you could use AT_FORALL_SCALAR_TYPES to
42
+ // iterate over them. But over the years we added weird types which couldn't
43
+ // be handled uniformly everywhere and so in the end we ended up with some
44
+ // mish-mosh of some helper macros, but mostly use sites making a call about
45
+ // what dtypes they can or can't support. So if you want to add a new dtype,
46
+ // the preferred resolution is to find a dtype similar to what you want,
47
+ // grep for it and edit all the sites you find this way. If you need to add
48
+ // a completely new kind of dtype, you're going to have to laboriously audit
49
+ // all of the sites everywhere to figure out how it should work. Consulting
50
+ // some old PRs where we added new dtypes (check history of this file) can
51
+ // help give you an idea where to start.
52
+
53
+ // NB: Order matters for this macro; it is relied upon in
54
+ // _promoteTypesLookup and the serialization format.
55
+ #define AT_FORALL_SCALAR_TYPES_WITH_COMPLEX_AND_QINTS(_) \
56
+ _(uint8_t, Byte) /* 0 */ \
57
+ _(int8_t, Char) /* 1 */ \
58
+ _(int16_t, Short) /* 2 */ \
59
+ _(int, Int) /* 3 */ \
60
+ _(int64_t, Long) /* 4 */ \
61
+ _(at::Half, Half) /* 5 */ \
62
+ _(float, Float) /* 6 */ \
63
+ _(double, Double) /* 7 */ \
64
+ _(c10::complex<c10::Half>, ComplexHalf) /* 8 */ \
65
+ _(c10::complex<float>, ComplexFloat) /* 9 */ \
66
+ _(c10::complex<double>, ComplexDouble) /* 10 */ \
67
+ _(bool, Bool) /* 11 */ \
68
+ _(c10::qint8, QInt8) /* 12 */ \
69
+ _(c10::quint8, QUInt8) /* 13 */ \
70
+ _(c10::qint32, QInt32) /* 14 */ \
71
+ _(at::BFloat16, BFloat16) /* 15 */ \
72
+ _(c10::quint4x2, QUInt4x2) /* 16 */ \
73
+ _(c10::quint2x4, QUInt2x4) /* 17 */ \
74
+ _(c10::bits1x8, Bits1x8) /* 18 */ \
75
+ _(c10::bits2x4, Bits2x4) /* 19 */ \
76
+ _(c10::bits4x2, Bits4x2) /* 20 */ \
77
+ _(c10::bits8, Bits8) /* 21 */ \
78
+ _(c10::bits16, Bits16) /* 22 */ \
79
+ _(c10::Float8_e5m2, Float8_e5m2) /* 23 */ \
80
+ _(c10::Float8_e4m3fn, Float8_e4m3fn) /* 24 */ \
81
+ _(c10::Float8_e5m2fnuz, Float8_e5m2fnuz) /* 25 */ \
82
+ _(c10::Float8_e4m3fnuz, Float8_e4m3fnuz) /* 26 */ \
83
+ _(uint16_t, UInt16) /* 27 */ \
84
+ _(uint32_t, UInt32) /* 28 */ \
85
+ _(uint64_t, UInt64) /* 29 */ \
86
+ _(c10::dummy_uint1_7_t<1>, UInt1) /* 30 */ \
87
+ _(c10::dummy_uint1_7_t<2>, UInt2) /* 31 */ \
88
+ _(c10::dummy_uint1_7_t<3>, UInt3) /* 32 */ \
89
+ _(c10::dummy_uint1_7_t<4>, UInt4) /* 33 */ \
90
+ _(c10::dummy_uint1_7_t<5>, UInt5) /* 34 */ \
91
+ _(c10::dummy_uint1_7_t<6>, UInt6) /* 35 */ \
92
+ _(c10::dummy_uint1_7_t<7>, UInt7) /* 36 */
93
+
94
+ // If you want to support ComplexHalf for real, add ComplexHalf
95
+ // into this macro (and change the name). But beware: convert()
96
+ // doesn't work for all the conversions you need...
97
+ //
98
+ // TODO: To add unsigned int types here, we must define accumulate type.
99
+ // But uint8 currently accumulates into int64, so we would have to make
100
+ // an inconsistent choice for the larger types. Difficult.
101
+ #define AT_FORALL_SCALAR_TYPES_WITH_COMPLEX_EXCEPT_COMPLEX_HALF_F8NZ(_) \
102
+ _(uint8_t, Byte) \
103
+ _(int8_t, Char) \
104
+ _(int16_t, Short) \
105
+ _(int, Int) \
106
+ _(int64_t, Long) \
107
+ _(at::Half, Half) \
108
+ _(float, Float) \
109
+ _(double, Double) \
110
+ _(c10::complex<float>, ComplexFloat) \
111
+ _(c10::complex<double>, ComplexDouble) \
112
+ _(bool, Bool) \
113
+ _(at::BFloat16, BFloat16) \
114
+ _(at::Float8_e5m2, Float8_e5m2) \
115
+ _(at::Float8_e4m3fn, Float8_e4m3fn)
116
+
117
+ // This macro controls many of our C++ APIs, including constructors
118
+ // for Scalar as well as the data() and item() accessors on Tensor
119
+ #define AT_FORALL_SCALAR_TYPES_WITH_COMPLEX(_) \
120
+ _(uint8_t, Byte) \
121
+ _(int8_t, Char) \
122
+ _(int16_t, Short) \
123
+ _(int, Int) \
124
+ _(int64_t, Long) \
125
+ _(at::Half, Half) \
126
+ _(float, Float) \
127
+ _(double, Double) \
128
+ _(c10::complex<c10::Half>, ComplexHalf) \
129
+ _(c10::complex<float>, ComplexFloat) \
130
+ _(c10::complex<double>, ComplexDouble) \
131
+ _(bool, Bool) \
132
+ _(at::BFloat16, BFloat16) \
133
+ _(at::Float8_e5m2, Float8_e5m2) \
134
+ _(at::Float8_e4m3fn, Float8_e4m3fn) \
135
+ _(at::Float8_e5m2fnuz, Float8_e5m2fnuz) \
136
+ _(at::Float8_e4m3fnuz, Float8_e4m3fnuz)
137
+
138
+ enum class ScalarType : int8_t {
139
+ #define DEFINE_ST_ENUM_VAL_(_1, n) n,
140
+ AT_FORALL_SCALAR_TYPES_WITH_COMPLEX_AND_QINTS(DEFINE_ST_ENUM_VAL_)
141
+ #undef DEFINE_ENUM_ST_ENUM_VAL_
142
+ Undefined,
143
+ NumOptions
144
+ };
145
+
146
+ constexpr uint16_t NumScalarTypes =
147
+ static_cast<uint16_t>(ScalarType::NumOptions);
148
+
149
+ namespace impl {
150
+
151
+ // These are used to map ScalarTypes to C++ types.
152
+
153
+ template <c10::ScalarType N>
154
+ struct ScalarTypeToCPPType;
155
+
156
+ #define SPECIALIZE_ScalarTypeToCPPType(cpp_type, scalar_type) \
157
+ template <> \
158
+ struct ScalarTypeToCPPType<c10::ScalarType::scalar_type> { \
159
+ using type = cpp_type; \
160
+ \
161
+ /* This is a workaround for the CUDA bug which prevents */ \
162
+ /* ::detail::ScalarTypeToCType<T>::type being used directly due to */ \
163
+ /* ambiguous reference which can't to be resolved. For some reason it */ \
164
+ /* can't pick between at::detail and at::cuda::detail. */ \
165
+ /* For repro example, please see: */ \
166
+ /* https://gist.github.com/izdeby/952ae7cf256ddb740a73776d39a7e7ba */ \
167
+ /* TODO: remove once the bug is fixed. */ \
168
+ static type t; \
169
+ };
170
+
171
+ AT_FORALL_SCALAR_TYPES_WITH_COMPLEX_AND_QINTS(SPECIALIZE_ScalarTypeToCPPType)
172
+
173
+ #undef SPECIALIZE_ScalarTypeToCPPType
174
+
175
+ template <c10::ScalarType N>
176
+ using ScalarTypeToCPPTypeT = typename ScalarTypeToCPPType<N>::type;
177
+
178
+ } // namespace impl
179
+
180
+ template <typename T>
181
+ struct CppTypeToScalarType;
182
+
183
+ #define SPECIALIZE_CppTypeToScalarType(cpp_type, scalar_type) \
184
+ template <> \
185
+ struct CppTypeToScalarType<cpp_type> \
186
+ : std:: \
187
+ integral_constant<c10::ScalarType, c10::ScalarType::scalar_type> { \
188
+ };
189
+
190
+ AT_FORALL_SCALAR_TYPES_WITH_COMPLEX_AND_QINTS(SPECIALIZE_CppTypeToScalarType)
191
+
192
+ #undef SPECIALIZE_CppTypeToScalarType
193
+
194
+ // NB: despite its generic sounding name, the macros that don't take _AND
195
+ // are mostly only used by tensorexpr
196
+ #define AT_FORALL_INT_TYPES(_) \
197
+ _(uint8_t, Byte) \
198
+ _(int8_t, Char) \
199
+ _(int16_t, Short) \
200
+ _(int, Int) \
201
+ _(int64_t, Long)
202
+
203
+ #define AT_FORALL_SCALAR_TYPES(_) \
204
+ _(uint8_t, Byte) \
205
+ _(int8_t, Char) \
206
+ _(int16_t, Short) \
207
+ _(int, Int) \
208
+ _(int64_t, Long) \
209
+ _(float, Float) \
210
+ _(double, Double)
211
+
212
+ // These macros are often controlling how many template instantiations we
213
+ // create for kernels. It is typically inappropriate to add new dtypes here,
214
+ // instead, new types should be added to use sites on a case-by-case basis.
215
+ // We generally are not accepting new dtypes due to binary size concerns.
216
+
217
+ #define AT_FORALL_SCALAR_TYPES_AND(SCALARTYPE, _) \
218
+ _(uint8_t, Byte) \
219
+ _(int8_t, Char) \
220
+ _(int16_t, Short) \
221
+ _(int, Int) \
222
+ _(int64_t, Long) \
223
+ _(float, Float) \
224
+ _(double, Double) \
225
+ _(decltype(::c10::impl::ScalarTypeToCPPType< \
226
+ ::c10::ScalarType::SCALARTYPE>::t), \
227
+ SCALARTYPE)
228
+
229
+ #define AT_FORALL_SCALAR_TYPES_AND2(SCALARTYPE1, SCALARTYPE2, _) \
230
+ _(uint8_t, Byte) \
231
+ _(int8_t, Char) \
232
+ _(int16_t, Short) \
233
+ _(int, Int) \
234
+ _(int64_t, Long) \
235
+ _(float, Float) \
236
+ _(double, Double) \
237
+ _(decltype(::c10::impl::ScalarTypeToCPPType< \
238
+ ::c10::ScalarType::SCALARTYPE1>::t), \
239
+ SCALARTYPE1) \
240
+ _(decltype(::c10::impl::ScalarTypeToCPPType< \
241
+ ::c10::ScalarType::SCALARTYPE2>::t), \
242
+ SCALARTYPE2)
243
+
244
+ #define AT_FORALL_SCALAR_TYPES_AND3(SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, _) \
245
+ _(uint8_t, Byte) \
246
+ _(int8_t, Char) \
247
+ _(int16_t, Short) \
248
+ _(int, Int) \
249
+ _(int64_t, Long) \
250
+ _(float, Float) \
251
+ _(double, Double) \
252
+ _(decltype(::c10::impl::ScalarTypeToCPPType< \
253
+ ::c10::ScalarType::SCALARTYPE1>::t), \
254
+ SCALARTYPE1) \
255
+ _(decltype(::c10::impl::ScalarTypeToCPPType< \
256
+ ::c10::ScalarType::SCALARTYPE2>::t), \
257
+ SCALARTYPE2) \
258
+ _(decltype(::c10::impl::ScalarTypeToCPPType< \
259
+ ::c10::ScalarType::SCALARTYPE3>::t), \
260
+ SCALARTYPE3)
261
+
262
+ #define AT_FORALL_SCALAR_TYPES_AND7( \
263
+ SCALARTYPE1, \
264
+ SCALARTYPE2, \
265
+ SCALARTYPE3, \
266
+ SCALARTYPE4, \
267
+ SCALARTYPE5, \
268
+ SCALARTYPE6, \
269
+ SCALARTYPE7, \
270
+ _) \
271
+ _(uint8_t, Byte) \
272
+ _(int8_t, Char) \
273
+ _(int16_t, Short) \
274
+ _(int, Int) \
275
+ _(int64_t, Long) \
276
+ _(float, Float) \
277
+ _(double, Double) \
278
+ _(decltype(::c10::impl::ScalarTypeToCPPType< \
279
+ ::c10::ScalarType::SCALARTYPE1>::t), \
280
+ SCALARTYPE1) \
281
+ _(decltype(::c10::impl::ScalarTypeToCPPType< \
282
+ ::c10::ScalarType::SCALARTYPE2>::t), \
283
+ SCALARTYPE2) \
284
+ _(decltype(::c10::impl::ScalarTypeToCPPType< \
285
+ ::c10::ScalarType::SCALARTYPE3>::t), \
286
+ SCALARTYPE3) \
287
+ _(decltype(::c10::impl::ScalarTypeToCPPType< \
288
+ ::c10::ScalarType::SCALARTYPE4>::t), \
289
+ SCALARTYPE4) \
290
+ _(decltype(::c10::impl::ScalarTypeToCPPType< \
291
+ ::c10::ScalarType::SCALARTYPE5>::t), \
292
+ SCALARTYPE5) \
293
+ _(decltype(::c10::impl::ScalarTypeToCPPType< \
294
+ ::c10::ScalarType::SCALARTYPE6>::t), \
295
+ SCALARTYPE6) \
296
+ _(decltype(::c10::impl::ScalarTypeToCPPType< \
297
+ ::c10::ScalarType::SCALARTYPE7>::t), \
298
+ SCALARTYPE7)
299
+
300
+ #define AT_FORALL_QINT_TYPES(_) \
301
+ _(c10::qint8, QInt8) \
302
+ _(c10::quint8, QUInt8) \
303
+ _(c10::qint32, QInt32) \
304
+ _(c10::quint4x2, QUInt4x2) \
305
+ _(c10::quint2x4, QUInt2x4)
306
+
307
+ #define AT_FORALL_COMPLEX_TYPES(_) \
308
+ _(c10::complex<float>, ComplexFloat) \
309
+ _(c10::complex<double>, ComplexDouble)
310
+
311
+ #define DEFINE_CONSTANT(_, name) \
312
+ constexpr ScalarType k##name = ScalarType::name;
313
+
314
+ // NOLINTNEXTLINE(clang-diagnostic-unused-const-variable)
315
+ AT_FORALL_SCALAR_TYPES_WITH_COMPLEX_AND_QINTS(DEFINE_CONSTANT)
316
+ #undef DEFINE_CONSTANT
317
+
318
+ inline const char* toString(ScalarType t) {
319
+ #define DEFINE_CASE(_, name) \
320
+ case ScalarType::name: \
321
+ return #name;
322
+
323
+ switch (t) {
324
+ AT_FORALL_SCALAR_TYPES_WITH_COMPLEX_AND_QINTS(DEFINE_CASE)
325
+ default:
326
+ return "UNKNOWN_SCALAR";
327
+ }
328
+ #undef DEFINE_CASE
329
+ }
330
+
331
+ inline size_t elementSize(ScalarType t) {
332
+ #define CASE_ELEMENTSIZE_CASE(ctype, name) \
333
+ case ScalarType::name: \
334
+ return sizeof(ctype);
335
+
336
+ switch (t) {
337
+ AT_FORALL_SCALAR_TYPES_WITH_COMPLEX_AND_QINTS(CASE_ELEMENTSIZE_CASE)
338
+ default:
339
+ TORCH_CHECK(false, "Unknown ScalarType");
340
+ }
341
+ #undef CASE_ELEMENTSIZE_CASE
342
+ }
343
+
344
+ inline bool isIntegralType(ScalarType t, bool includeBool) {
345
+ bool isIntegral =
346
+ (t == ScalarType::Byte || t == ScalarType::Char || t == ScalarType::Int ||
347
+ t == ScalarType::Long || t == ScalarType::Short ||
348
+ t == ScalarType::UInt16 || t == ScalarType::UInt32 ||
349
+ t == ScalarType::UInt64);
350
+
351
+ return isIntegral || (includeBool && t == ScalarType::Bool);
352
+ }
353
+
354
+ C10_DEPRECATED_MESSAGE(
355
+ "isIntegralType is deprecated. Please use the overload with 'includeBool' parameter instead.")
356
+ inline bool isIntegralType(ScalarType t) {
357
+ return isIntegralType(t, /*includeBool=*/false);
358
+ }
359
+
360
+ inline bool isFloat8Type(ScalarType t) {
361
+ return t == ScalarType::Float8_e5m2 || t == ScalarType::Float8_e5m2fnuz ||
362
+ t == ScalarType::Float8_e4m3fn || t == ScalarType::Float8_e4m3fnuz;
363
+ }
364
+
365
+ inline bool isReducedFloatingType(ScalarType t) {
366
+ return t == ScalarType::Half || t == ScalarType::BFloat16 || isFloat8Type(t);
367
+ }
368
+
369
+ inline bool isFloatingType(ScalarType t) {
370
+ return t == ScalarType::Double || t == ScalarType::Float ||
371
+ isReducedFloatingType(t);
372
+ }
373
+
374
+ inline bool isComplexType(ScalarType t) {
375
+ return (
376
+ t == ScalarType::ComplexHalf || t == ScalarType::ComplexFloat ||
377
+ t == ScalarType::ComplexDouble);
378
+ }
379
+
380
+ inline bool isQIntType(ScalarType t) {
381
+ // Don't forget to extend this when adding new QInt types
382
+ return t == ScalarType::QInt8 || t == ScalarType::QUInt8 ||
383
+ t == ScalarType::QInt32 || t == ScalarType::QUInt4x2 ||
384
+ t == ScalarType::QUInt2x4;
385
+ }
386
+
387
+ inline bool isBitsType(ScalarType t) {
388
+ return t == ScalarType::Bits1x8 || t == ScalarType::Bits2x4 ||
389
+ t == ScalarType::Bits4x2 || t == ScalarType::Bits8 ||
390
+ t == ScalarType::Bits16;
391
+ }
392
+
393
+ inline bool isBarebonesUnsignedType(ScalarType t) {
394
+ return t == ScalarType::UInt1 || t == ScalarType::UInt2 ||
395
+ t == ScalarType::UInt3 || t == ScalarType::UInt4 ||
396
+ t == ScalarType::UInt5 || t == ScalarType::UInt6 ||
397
+ t == ScalarType::UInt7 || t == ScalarType::UInt16 ||
398
+ t == ScalarType::UInt32 || t == ScalarType::UInt64;
399
+ }
400
+
401
+ inline ScalarType toQIntType(ScalarType t) {
402
+ switch (t) {
403
+ case ScalarType::Byte:
404
+ return ScalarType::QUInt8;
405
+ case ScalarType::Char:
406
+ return ScalarType::QInt8;
407
+ case ScalarType::Int:
408
+ return ScalarType::QInt32;
409
+ default:
410
+ return t;
411
+ }
412
+ }
413
+
414
+ inline ScalarType toUnderlying(ScalarType t) {
415
+ switch (t) {
416
+ case ScalarType::QUInt8:
417
+ case ScalarType::QUInt4x2:
418
+ [[fallthrough]];
419
+ case ScalarType::QUInt2x4:
420
+ return ScalarType::Byte;
421
+ case ScalarType::QInt8:
422
+ return ScalarType::Char;
423
+ case ScalarType::QInt32:
424
+ return ScalarType::Int;
425
+ default:
426
+ return t;
427
+ }
428
+ }
429
+
430
+ inline bool isSignedType(ScalarType t) {
431
+ #define CASE_ISSIGNED(name) \
432
+ case ScalarType::name: \
433
+ return std::numeric_limits< \
434
+ ::c10::impl::ScalarTypeToCPPTypeT<ScalarType::name>>::is_signed;
435
+
436
+ switch (t) {
437
+ case ScalarType::QInt8:
438
+ case ScalarType::QUInt8:
439
+ case ScalarType::QInt32:
440
+ case ScalarType::QUInt4x2:
441
+ case ScalarType::QUInt2x4:
442
+ TORCH_CHECK(false, "isSignedType not supported for quantized types");
443
+ case ScalarType::Bits1x8:
444
+ case ScalarType::Bits2x4:
445
+ case ScalarType::Bits4x2:
446
+ case ScalarType::Bits8:
447
+ case ScalarType::Bits16:
448
+ TORCH_CHECK(false, "Bits types are undefined");
449
+ CASE_ISSIGNED(UInt16);
450
+ CASE_ISSIGNED(UInt32);
451
+ CASE_ISSIGNED(UInt64);
452
+ CASE_ISSIGNED(BFloat16);
453
+ CASE_ISSIGNED(Float8_e5m2);
454
+ CASE_ISSIGNED(Float8_e5m2fnuz);
455
+ CASE_ISSIGNED(Float8_e4m3fn);
456
+ CASE_ISSIGNED(Float8_e4m3fnuz);
457
+ CASE_ISSIGNED(Byte);
458
+ CASE_ISSIGNED(Char);
459
+ CASE_ISSIGNED(Short);
460
+ CASE_ISSIGNED(Int);
461
+ CASE_ISSIGNED(Long);
462
+ CASE_ISSIGNED(Half);
463
+ CASE_ISSIGNED(Float);
464
+ CASE_ISSIGNED(Double);
465
+ CASE_ISSIGNED(ComplexHalf);
466
+ CASE_ISSIGNED(ComplexFloat);
467
+ CASE_ISSIGNED(ComplexDouble);
468
+ CASE_ISSIGNED(Bool);
469
+ case ScalarType::UInt1:
470
+ case ScalarType::UInt2:
471
+ case ScalarType::UInt3:
472
+ case ScalarType::UInt4:
473
+ case ScalarType::UInt5:
474
+ case ScalarType::UInt6:
475
+ case ScalarType::UInt7:
476
+ return true;
477
+ case ScalarType::Undefined:
478
+ case ScalarType::NumOptions:
479
+ break;
480
+ // Do not add default here, but rather define behavior of every new entry
481
+ // here. `-Wswitch-enum` would raise a warning in those cases.
482
+ }
483
+ TORCH_CHECK(false, "Unknown ScalarType ", t);
484
+ #undef CASE_ISSIGNED
485
+ }
486
+
487
+ inline bool isUnderlying(ScalarType type, ScalarType qtype) {
488
+ return type == toUnderlying(qtype);
489
+ }
490
+
491
+ inline ScalarType toRealValueType(ScalarType t) {
492
+ switch (t) {
493
+ case ScalarType::ComplexHalf:
494
+ return ScalarType::Half;
495
+ case ScalarType::ComplexFloat:
496
+ return ScalarType::Float;
497
+ case ScalarType::ComplexDouble:
498
+ return ScalarType::Double;
499
+ default:
500
+ return t;
501
+ }
502
+ }
503
+
504
+ inline ScalarType toComplexType(ScalarType t) {
505
+ switch (t) {
506
+ case ScalarType::BFloat16:
507
+ // BFloat16 has range equivalent to Float,
508
+ // so we map it to ComplexFloat.
509
+ return ScalarType::ComplexFloat;
510
+ case ScalarType::Half:
511
+ return ScalarType::ComplexHalf;
512
+ case ScalarType::Float:
513
+ return ScalarType::ComplexFloat;
514
+ case ScalarType::Double:
515
+ return ScalarType::ComplexDouble;
516
+ case ScalarType::ComplexHalf:
517
+ return ScalarType::ComplexHalf;
518
+ case ScalarType::ComplexFloat:
519
+ return ScalarType::ComplexFloat;
520
+ case ScalarType::ComplexDouble:
521
+ return ScalarType::ComplexDouble;
522
+ default:
523
+ TORCH_CHECK(false, "Unknown Complex ScalarType for ", t);
524
+ }
525
+ }
526
+
527
+ // see tensor_attributes.rst for detailed explanation and examples
528
+ // of casting rules.
529
+ inline bool canCast(const ScalarType from, const ScalarType to) {
530
+ // We disallow complex -> non complex, e.g., float_tensor *= complex is
531
+ // disallowed.
532
+ if (isComplexType(from) && !isComplexType(to)) {
533
+ return false;
534
+ }
535
+ // We disallow float -> integral, e.g., int_tensor *= float is disallowed.
536
+ if (isFloatingType(from) && isIntegralType(to, false)) {
537
+ return false;
538
+ }
539
+
540
+ // Treat bool as a distinct "category," to be consistent with type promotion
541
+ // rules (e.g. `bool_tensor + 5 -> int64_tensor`). If `5` was in the same
542
+ // category as `bool_tensor`, we would not promote. Differing categories
543
+ // implies `bool_tensor += 5` is disallowed.
544
+ //
545
+ // NB: numpy distinguishes "unsigned" as a category to get the desired
546
+ // `bool_tensor + 5 -> int64_tensor` behavior. We don't, because:
547
+ // * We don't want the performance hit of checking the runtime sign of
548
+ // Scalars.
549
+ // * `uint8_tensor + 5 -> int64_tensor` would be undesirable.
550
+ if (from != ScalarType::Bool && to == ScalarType::Bool) {
551
+ return false;
552
+ }
553
+ return true;
554
+ }
555
+
556
+ C10_API ScalarType promoteTypes(ScalarType a, ScalarType b);
557
+
558
+ inline std::ostream& operator<<(
559
+ std::ostream& stream,
560
+ at::ScalarType scalar_type) {
561
+ return stream << toString(scalar_type);
562
+ }
563
+
564
+ } // namespace c10
parrot/lib/python3.10/site-packages/torch/include/c10/core/ScalarTypeToTypeMeta.h ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/ScalarType.h>
4
+ #include <c10/util/Optional.h>
5
+ #include <c10/util/typeid.h>
6
+
7
+ // these just expose TypeMeta/ScalarType bridge functions in c10
8
+ // TODO move to typeid.h (or codemod away) when TypeMeta et al
9
+ // are moved from caffe2 to c10 (see note at top of typeid.h)
10
+
11
+ namespace c10 {
12
+
13
+ /**
14
+ * convert ScalarType enum values to TypeMeta handles
15
+ */
16
+ inline caffe2::TypeMeta scalarTypeToTypeMeta(ScalarType scalar_type) {
17
+ return caffe2::TypeMeta::fromScalarType(scalar_type);
18
+ }
19
+
20
+ /**
21
+ * convert TypeMeta handles to ScalarType enum values
22
+ */
23
+ inline ScalarType typeMetaToScalarType(caffe2::TypeMeta dtype) {
24
+ return dtype.toScalarType();
25
+ }
26
+
27
+ /**
28
+ * typeMetaToScalarType(), lifted to optional
29
+ */
30
+ inline optional<at::ScalarType> optTypeMetaToScalarType(
31
+ optional<caffe2::TypeMeta> type_meta) {
32
+ if (!type_meta.has_value()) {
33
+ return c10::nullopt;
34
+ }
35
+ return type_meta->toScalarType();
36
+ }
37
+
38
+ /**
39
+ * convenience: equality across TypeMeta/ScalarType conversion
40
+ */
41
+ inline bool operator==(ScalarType t, caffe2::TypeMeta m) {
42
+ return m.isScalarType(t);
43
+ }
44
+
45
+ inline bool operator==(caffe2::TypeMeta m, ScalarType t) {
46
+ return t == m;
47
+ }
48
+
49
+ inline bool operator!=(ScalarType t, caffe2::TypeMeta m) {
50
+ return !(t == m);
51
+ }
52
+
53
+ inline bool operator!=(caffe2::TypeMeta m, ScalarType t) {
54
+ return !(t == m);
55
+ }
56
+
57
+ } // namespace c10
parrot/lib/python3.10/site-packages/torch/include/c10/core/Storage.h ADDED
@@ -0,0 +1,272 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/Allocator.h>
4
+ #include <c10/core/Device.h>
5
+ #include <c10/core/DeviceType.h>
6
+ #include <c10/core/StorageImpl.h>
7
+ #include <c10/core/SymInt.h>
8
+ #include <c10/macros/Export.h>
9
+ #include <c10/util/Exception.h>
10
+ #include <c10/util/ExclusivelyOwned.h>
11
+ #include <c10/util/MaybeOwned.h>
12
+ #include <c10/util/UniqueVoidPtr.h>
13
+ #include <c10/util/intrusive_ptr.h>
14
+ #include <cstddef>
15
+ #include <utility>
16
+
17
+ namespace c10 {
18
+
19
+ struct Storage;
20
+
21
+ C10_API bool isSharedStorageAlias(
22
+ const Storage& storage0,
23
+ const Storage& storage1);
24
+
25
+ struct C10_API Storage {
26
+ public:
27
+ struct use_byte_size_t {};
28
+ struct unsafe_borrow_t {
29
+ explicit unsafe_borrow_t() = default;
30
+ };
31
+
32
+ Storage() = default;
33
+ Storage(c10::intrusive_ptr<StorageImpl> ptr)
34
+ : storage_impl_(std::move(ptr)) {}
35
+
36
+ // Allocates memory buffer using given allocator and creates a storage with it
37
+ Storage(
38
+ use_byte_size_t /*use_byte_size*/,
39
+ const SymInt& size_bytes,
40
+ Allocator* allocator = nullptr,
41
+ bool resizable = false)
42
+ : storage_impl_(c10::make_intrusive<StorageImpl>(
43
+ StorageImpl::use_byte_size_t(),
44
+ size_bytes,
45
+ allocator,
46
+ resizable)) {}
47
+
48
+ // Creates storage with pre-allocated memory buffer. Allocator is given for
49
+ // potential future reallocations, however it can be nullptr if the storage
50
+ // is non-resizable
51
+ Storage(
52
+ use_byte_size_t /*use_byte_size*/,
53
+ size_t size_bytes,
54
+ at::DataPtr data_ptr,
55
+ at::Allocator* allocator = nullptr,
56
+ bool resizable = false)
57
+ : storage_impl_(c10::make_intrusive<StorageImpl>(
58
+ StorageImpl::use_byte_size_t(),
59
+ size_bytes,
60
+ std::move(data_ptr),
61
+ allocator,
62
+ resizable)) {}
63
+
64
+ protected:
65
+ explicit Storage(unsafe_borrow_t, const Storage& rhs)
66
+ : storage_impl_(c10::intrusive_ptr<c10::StorageImpl>::reclaim(
67
+ rhs.storage_impl_.get())) {}
68
+
69
+ friend MaybeOwnedTraits<Storage>;
70
+
71
+ public:
72
+ // Legacy constructor for partially initialized (dtype or memory) storages
73
+ // that can be temporarily created with Caffe2 APIs. See the note on top of
74
+ // TensorImpl.h for details.
75
+ static Storage create_legacy(at::Device device) {
76
+ auto allocator = GetAllocator(device.type());
77
+ return Storage(c10::make_intrusive<StorageImpl>(
78
+ StorageImpl::use_byte_size_t(),
79
+ 0,
80
+ allocator->allocate(0), // materialize a non-default Device.
81
+ allocator,
82
+ true));
83
+ }
84
+
85
+ // Mimic create_legacy, but without requiring a newly-created StorageImpl.
86
+ void reset_legacy() {
87
+ TORCH_CHECK(resizable() && allocator());
88
+ set_nbytes(0);
89
+ set_data_ptr_noswap(allocator()->allocate(0));
90
+ }
91
+
92
+ // TODO: remove later
93
+ void set_nbytes(size_t size_bytes) const {
94
+ storage_impl_->set_nbytes(size_bytes);
95
+ }
96
+
97
+ void set_nbytes(c10::SymInt size_bytes) const {
98
+ storage_impl_->set_nbytes(std::move(size_bytes));
99
+ }
100
+
101
+ bool resizable() const {
102
+ return storage_impl_->resizable();
103
+ }
104
+
105
+ size_t nbytes() const {
106
+ return storage_impl_->nbytes();
107
+ }
108
+
109
+ SymInt sym_nbytes() const {
110
+ return storage_impl_->sym_nbytes();
111
+ }
112
+ // get() use here is to get const-correctness
113
+
114
+ const void* data() const {
115
+ return storage_impl_->data();
116
+ }
117
+
118
+ void* mutable_data() const {
119
+ return storage_impl_->mutable_data();
120
+ }
121
+
122
+ at::DataPtr& mutable_data_ptr() const {
123
+ return storage_impl_->mutable_data_ptr();
124
+ }
125
+
126
+ const at::DataPtr& data_ptr() const {
127
+ return storage_impl_->data_ptr();
128
+ }
129
+
130
+ // Returns the previous data_ptr
131
+ at::DataPtr set_data_ptr(at::DataPtr&& data_ptr) const {
132
+ return storage_impl_->set_data_ptr(std::move(data_ptr));
133
+ }
134
+
135
+ void set_data_ptr_noswap(at::DataPtr&& data_ptr) const {
136
+ return storage_impl_->set_data_ptr_noswap(std::move(data_ptr));
137
+ }
138
+
139
+ DeviceType device_type() const {
140
+ return storage_impl_->device_type();
141
+ }
142
+
143
+ at::Allocator* allocator() const {
144
+ return storage_impl_->allocator();
145
+ }
146
+
147
+ at::Device device() const {
148
+ return storage_impl_->device();
149
+ }
150
+
151
+ StorageImpl* unsafeReleaseStorageImpl() {
152
+ return storage_impl_.release();
153
+ }
154
+
155
+ StorageImpl* unsafeGetStorageImpl() const noexcept {
156
+ return storage_impl_.get();
157
+ }
158
+
159
+ c10::weak_intrusive_ptr<StorageImpl> getWeakStorageImpl() const {
160
+ return c10::weak_intrusive_ptr<StorageImpl>(storage_impl_);
161
+ }
162
+
163
+ operator bool() const {
164
+ return storage_impl_;
165
+ }
166
+
167
+ size_t use_count() const {
168
+ return storage_impl_.use_count();
169
+ }
170
+
171
+ inline bool unique() const {
172
+ return storage_impl_.unique();
173
+ }
174
+
175
+ bool is_alias_of(const Storage& other) const {
176
+ return (
177
+ storage_impl_ == other.storage_impl_ ||
178
+ isSharedStorageAlias(*this, other));
179
+ }
180
+
181
+ void UniqueStorageShareExternalPointer(
182
+ void* src,
183
+ size_t capacity,
184
+ DeleterFnPtr d = nullptr) {
185
+ if (!storage_impl_.unique()) {
186
+ TORCH_CHECK(
187
+ false,
188
+ "UniqueStorageShareExternalPointer can only be called when use_count == 1");
189
+ }
190
+ storage_impl_->UniqueStorageShareExternalPointer(src, capacity, d);
191
+ }
192
+
193
+ void UniqueStorageShareExternalPointer(
194
+ at::DataPtr&& data_ptr,
195
+ size_t capacity) {
196
+ if (!storage_impl_.unique()) {
197
+ TORCH_CHECK(
198
+ false,
199
+ "UniqueStorageShareExternalPointer can only be called when use_count == 1");
200
+ }
201
+ storage_impl_->UniqueStorageShareExternalPointer(
202
+ std::move(data_ptr), capacity);
203
+ }
204
+
205
+ protected:
206
+ c10::intrusive_ptr<StorageImpl> storage_impl_;
207
+ };
208
+
209
+ template <>
210
+ struct MaybeOwnedTraits<c10::Storage> {
211
+ using owned_type = c10::Storage;
212
+ using borrow_type = c10::Storage;
213
+
214
+ static borrow_type createBorrow(const owned_type& from) {
215
+ return borrow_type(borrow_type::unsafe_borrow_t{}, from);
216
+ }
217
+
218
+ static void assignBorrow(borrow_type& lhs, const borrow_type& rhs) {
219
+ lhs.unsafeReleaseStorageImpl();
220
+ lhs = borrow_type(borrow_type::unsafe_borrow_t{}, rhs);
221
+ }
222
+
223
+ static void destroyBorrow(borrow_type& toDestroy) {
224
+ toDestroy.unsafeReleaseStorageImpl(); // "leak" it, but it was already +0.
225
+ }
226
+
227
+ static const owned_type& referenceFromBorrow(const borrow_type& borrow) {
228
+ return borrow;
229
+ }
230
+
231
+ static const owned_type* pointerFromBorrow(const borrow_type& borrow) {
232
+ return &borrow;
233
+ }
234
+
235
+ static bool debugBorrowIsValid(const borrow_type& /*borrow*/) {
236
+ return true;
237
+ }
238
+ };
239
+
240
+ template <>
241
+ struct ExclusivelyOwnedTraits<c10::Storage> {
242
+ using repr_type = c10::Storage;
243
+ using pointer_type = c10::Storage*;
244
+ using const_pointer_type = const c10::Storage*;
245
+
246
+ static repr_type nullRepr() {
247
+ return c10::Storage();
248
+ }
249
+
250
+ template <class... Args>
251
+ static repr_type createInPlace(Args&&... args) {
252
+ return c10::Storage(std::forward<Args>(args)...);
253
+ }
254
+
255
+ static repr_type moveToRepr(c10::Storage&& x) {
256
+ return std::move(x);
257
+ }
258
+
259
+ static c10::Storage take(c10::Storage& x) {
260
+ return std::move(x);
261
+ }
262
+
263
+ static pointer_type getImpl(repr_type& x) {
264
+ return &x;
265
+ }
266
+
267
+ static const_pointer_type getImpl(const repr_type& x) {
268
+ return &x;
269
+ }
270
+ };
271
+
272
+ } // namespace c10
parrot/lib/python3.10/site-packages/torch/include/c10/core/StorageImpl.h ADDED
@@ -0,0 +1,330 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/Allocator.h>
4
+ #include <c10/core/Device.h>
5
+ #include <c10/core/DeviceType.h>
6
+ #include <c10/core/SymInt.h>
7
+ #include <c10/core/impl/COW.h>
8
+ #include <c10/core/impl/COWDeleter.h>
9
+ #include <c10/core/impl/PyObjectSlot.h>
10
+ #include <c10/macros/Export.h>
11
+ #include <c10/util/Exception.h>
12
+ #include <c10/util/UniqueVoidPtr.h>
13
+ #include <c10/util/intrusive_ptr.h>
14
+ #include <cstddef>
15
+ #include <utility>
16
+
17
+ namespace c10 {
18
+
19
+ C10_API void throwNullDataPtrError();
20
+ C10_API void warnDeprecatedDataPtr();
21
+
22
+ // A storage represents the underlying backing data buffer for a
23
+ // tensor. This concept was inherited from the original Torch7
24
+ // codebase; we'd kind of like to get rid of the concept
25
+ // (see https://github.com/pytorch/pytorch/issues/14797) but
26
+ // it's hard work and no one has gotten around to doing it.
27
+ //
28
+ // NB: storage is supposed to uniquely own a data pointer; e.g.,
29
+ // two non-null data pointers alias if and only if they are from
30
+ // the same storage. Technically you can violate this invariant
31
+ // (e.g., you can create a non-owning StorageImpl with at::from_blob)
32
+ // but a lot of things won't work correctly, including:
33
+ //
34
+ // - An ordinary deleter on such a storage is wrong, because normal deleters
35
+ // assume unique ownership, but if you have two storages at the same data,
36
+ // that implies there is some sort of shared ownership. So your deleter would
37
+ // have to actually be internally doing some sort of refcount thing
38
+ // - Deepcopy in Python side relies on storage equality and not data pointer
39
+ // equality; so if there are two separate storages pointing to the same data,
40
+ // the data will actually get duplicated in that case (one data ptr before,
41
+ // two data ptrs after)
42
+ // - Version counts won't work correctly, because we do all VC tracking at the
43
+ // level of storages (unless you explicitly disconnect the VC with detach);
44
+ // mutation because data pointers are the same are totally untracked
45
+ struct C10_API StorageImpl : public c10::intrusive_ptr_target {
46
+ public:
47
+ struct use_byte_size_t {};
48
+
49
+ StorageImpl(
50
+ use_byte_size_t /*use_byte_size*/,
51
+ SymInt size_bytes,
52
+ at::DataPtr data_ptr,
53
+ at::Allocator* allocator,
54
+ bool resizable)
55
+ : data_ptr_(std::move(data_ptr)),
56
+ size_bytes_(std::move(size_bytes)),
57
+ size_bytes_is_heap_allocated_(size_bytes_.is_heap_allocated()),
58
+ resizable_(resizable),
59
+ received_cuda_(false),
60
+ allocator_(allocator) {
61
+ if (resizable) {
62
+ TORCH_INTERNAL_ASSERT(
63
+ allocator_, "For resizable storage, allocator must be provided");
64
+ }
65
+ refresh_has_data_ptr_check();
66
+ }
67
+
68
+ StorageImpl(
69
+ use_byte_size_t /*use_byte_size*/,
70
+ const SymInt& size_bytes,
71
+ at::Allocator* allocator,
72
+ bool resizable)
73
+ : StorageImpl(
74
+ use_byte_size_t(),
75
+ size_bytes,
76
+ size_bytes.is_heap_allocated()
77
+ ? allocator->allocate(0)
78
+ : allocator->allocate(size_bytes.as_int_unchecked()),
79
+ allocator,
80
+ resizable) {}
81
+
82
+ StorageImpl& operator=(StorageImpl&& other) = delete;
83
+ StorageImpl& operator=(const StorageImpl&) = delete;
84
+ StorageImpl() = delete;
85
+ StorageImpl(StorageImpl&& other) = delete;
86
+ StorageImpl(const StorageImpl&) = delete;
87
+ ~StorageImpl() override = default;
88
+
89
+ void reset() {
90
+ data_ptr_.clear();
91
+ size_bytes_ = 0;
92
+ size_bytes_is_heap_allocated_ = false;
93
+ }
94
+
95
+ // Destructor doesn't call release_resources because it's
96
+ // unnecessary; don't forget to change that if needed!
97
+ void release_resources() override {
98
+ data_ptr_.clear();
99
+ }
100
+
101
+ size_t nbytes() const {
102
+ // OK to do this instead of maybe_as_int as nbytes is guaranteed positive
103
+ TORCH_CHECK(!size_bytes_is_heap_allocated_);
104
+ return size_bytes_.as_int_unchecked();
105
+ }
106
+
107
+ SymInt sym_nbytes() const {
108
+ return size_bytes_;
109
+ }
110
+
111
+ // TODO: remove later
112
+ void set_nbytes(size_t size_bytes) {
113
+ size_bytes_ = static_cast<int64_t>(size_bytes);
114
+ size_bytes_is_heap_allocated_ = false;
115
+ }
116
+
117
+ void set_nbytes(c10::SymInt size_bytes) {
118
+ size_bytes_ = std::move(size_bytes);
119
+ }
120
+
121
+ bool resizable() const {
122
+ return resizable_;
123
+ }
124
+
125
+ const at::DataPtr& data_ptr() const {
126
+ return data_ptr_;
127
+ }
128
+
129
+ at::DataPtr& mutable_data_ptr() {
130
+ if (C10_UNLIKELY(has_data_ptr_check_)) {
131
+ if (throw_on_mutable_data_ptr_) {
132
+ throwNullDataPtrError();
133
+ }
134
+ if (warn_deprecated_on_mutable_data_ptr_) {
135
+ warnDeprecatedDataPtr();
136
+ }
137
+ maybe_materialize_cow();
138
+ }
139
+ return data_ptr_;
140
+ }
141
+
142
+ // Returns the data_ptr. Bypasses all checks.
143
+ at::DataPtr& _mutable_data_ptr_no_checks() {
144
+ return data_ptr_;
145
+ }
146
+
147
+ // Returns the previous data_ptr
148
+ at::DataPtr set_data_ptr(at::DataPtr&& data_ptr) {
149
+ // We need to materialize the old COW DataPtr because it is
150
+ // being returned as mutable.
151
+ maybe_materialize_cow();
152
+ return set_data_ptr_no_materialize_cow(std::move(data_ptr));
153
+ }
154
+
155
+ void set_data_ptr_noswap(at::DataPtr&& data_ptr) {
156
+ data_ptr_ = std::move(data_ptr);
157
+ refresh_has_data_ptr_check();
158
+ }
159
+
160
+ const void* data() const {
161
+ return data_ptr_.get();
162
+ }
163
+
164
+ void* mutable_data() {
165
+ if (C10_UNLIKELY(has_data_ptr_check_)) {
166
+ if (throw_on_mutable_data_ptr_) {
167
+ throwNullDataPtrError();
168
+ }
169
+ if (warn_deprecated_on_mutable_data_ptr_) {
170
+ warnDeprecatedDataPtr();
171
+ }
172
+ maybe_materialize_cow();
173
+ }
174
+ return data_ptr_.mutable_get();
175
+ }
176
+
177
+ at::DeviceType device_type() const {
178
+ return data_ptr_.device().type();
179
+ }
180
+
181
+ at::Allocator* allocator() {
182
+ return allocator_;
183
+ }
184
+
185
+ const at::Allocator* allocator() const {
186
+ return allocator_;
187
+ }
188
+
189
+ // You generally shouldn't use this method, but it is occasionally
190
+ // useful if you want to override how a tensor will be reallocated,
191
+ // after it was already allocated (and its initial allocator was
192
+ // set)
193
+ void set_allocator(at::Allocator* allocator) {
194
+ allocator_ = allocator;
195
+ }
196
+
197
+ Device device() const {
198
+ return data_ptr_.device();
199
+ }
200
+
201
+ void set_resizable(bool resizable) {
202
+ if (resizable) {
203
+ // We need an allocator to be resizable
204
+ AT_ASSERT(allocator_);
205
+ }
206
+ resizable_ = resizable;
207
+ }
208
+
209
+ /**
210
+ * Can only be called when use_count is 1
211
+ */
212
+ void UniqueStorageShareExternalPointer(
213
+ void* src,
214
+ size_t size_bytes,
215
+ DeleterFnPtr d = nullptr) {
216
+ UniqueStorageShareExternalPointer(
217
+ at::DataPtr(src, src, d, data_ptr_.device()), size_bytes);
218
+ }
219
+
220
+ /**
221
+ * Can only be called when use_count is 1
222
+ */
223
+ void UniqueStorageShareExternalPointer(
224
+ at::DataPtr&& data_ptr,
225
+ size_t size_bytes) {
226
+ data_ptr_ = std::move(data_ptr);
227
+ size_bytes_ = static_cast<int64_t>(size_bytes);
228
+ size_bytes_is_heap_allocated_ = false;
229
+ allocator_ = nullptr;
230
+ resizable_ = false;
231
+ }
232
+
233
+ // This method can be used only after storage construction and cannot be used
234
+ // to modify storage status
235
+ void set_received_cuda(bool received_cuda) {
236
+ received_cuda_ = received_cuda;
237
+ }
238
+
239
+ bool received_cuda() {
240
+ return received_cuda_;
241
+ }
242
+
243
+ impl::PyObjectSlot* pyobj_slot() {
244
+ return &pyobj_slot_;
245
+ }
246
+
247
+ const impl::PyObjectSlot* pyobj_slot() const {
248
+ return &pyobj_slot_;
249
+ }
250
+
251
+ void set_throw_on_mutable_data_ptr() {
252
+ throw_on_mutable_data_ptr_ = true;
253
+ refresh_has_data_ptr_check();
254
+ }
255
+
256
+ void set_warn_deprecated_on_mutable_data_ptr() {
257
+ warn_deprecated_on_mutable_data_ptr_ = true;
258
+ refresh_has_data_ptr_check();
259
+ }
260
+
261
+ protected:
262
+ // materialize_cow_storage needs to call set_data_ptr_no_materlize_cow
263
+ friend void c10::impl::cow::materialize_cow_storage(StorageImpl& storage);
264
+
265
+ // Returns the previous data_ptr. If the old data_ptr was COW,
266
+ // this avoids materializing it
267
+ at::DataPtr set_data_ptr_no_materialize_cow(at::DataPtr&& data_ptr) {
268
+ at::DataPtr old_data_ptr(std::move(data_ptr_));
269
+ data_ptr_ = std::move(data_ptr);
270
+ refresh_has_data_ptr_check();
271
+ return old_data_ptr;
272
+ }
273
+
274
+ private:
275
+ void refresh_has_data_ptr_check() {
276
+ has_data_ptr_check_ = is_cow() || throw_on_mutable_data_ptr_ ||
277
+ warn_deprecated_on_mutable_data_ptr_;
278
+ }
279
+
280
+ inline bool is_cow() const {
281
+ return c10::impl::cow::is_cow_data_ptr(data_ptr_);
282
+ }
283
+
284
+ // Triggers a copy if this is a copy-on-write tensor.
285
+ void maybe_materialize_cow() {
286
+ if (is_cow()) {
287
+ impl::cow::materialize_cow_storage(*this);
288
+ }
289
+ }
290
+
291
+ DataPtr data_ptr_;
292
+ SymInt size_bytes_;
293
+ bool size_bytes_is_heap_allocated_;
294
+ bool resizable_;
295
+ // Identifies that Storage was received from another process and doesn't have
296
+ // local to process cuda memory allocation
297
+ bool received_cuda_;
298
+ // All special checks in data/data_ptr calls are guarded behind this single
299
+ // boolean. This is for performance: .data/.data_ptr calls are commonly in the
300
+ // hot-path.
301
+ bool has_data_ptr_check_ = false;
302
+ // If we should throw when mutable_data_ptr() or mutable_data() is called.
303
+ bool throw_on_mutable_data_ptr_ = false;
304
+ // If we warn when mutable_data_ptr() or mutable_data() is called.
305
+ bool warn_deprecated_on_mutable_data_ptr_ = false;
306
+ Allocator* allocator_;
307
+ impl::PyObjectSlot pyobj_slot_;
308
+ };
309
+
310
+ // Declare StorageImpl create function pointer types.
311
+ using StorageImplCreateHelper = intrusive_ptr<StorageImpl> (*)(
312
+ StorageImpl::use_byte_size_t,
313
+ SymInt size_bytes,
314
+ DataPtr data_ptr,
315
+ Allocator* allocator,
316
+ bool resizable);
317
+
318
+ C10_API void SetStorageImplCreate(DeviceType t, StorageImplCreateHelper fptr);
319
+
320
+ C10_API StorageImplCreateHelper GetStorageImplCreate(DeviceType t);
321
+
322
+ C10_API c10::intrusive_ptr<c10::StorageImpl> make_storage_impl(
323
+ c10::StorageImpl::use_byte_size_t use_byte_size,
324
+ c10::SymInt size_bytes,
325
+ c10::DataPtr data_ptr,
326
+ c10::Allocator* allocator,
327
+ bool resizable,
328
+ std::optional<at::Device> device_opt);
329
+
330
+ } // namespace c10
parrot/lib/python3.10/site-packages/torch/include/c10/core/Stream.h ADDED
@@ -0,0 +1,176 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/Device.h>
4
+ #include <c10/core/DeviceType.h>
5
+ #include <c10/macros/Export.h>
6
+ #include <c10/util/Exception.h>
7
+ #include <cstddef>
8
+ #include <cstdint>
9
+ #include <functional>
10
+ #include <ostream>
11
+
12
+ namespace c10 {
13
+
14
+ /// An index representing a specific stream. A StreamId is not independently
15
+ /// meaningful without knowing the Device it is associated with; try to
16
+ /// use Stream rather than StreamId directly.
17
+ ///
18
+ /// StreamIds are opaque; they are assigned by some DeviceType-specific
19
+ /// numbering system which is not visible to the user. HOWEVER, we
20
+ /// guarantee that StreamId 0 is always a valid stream, and corresponds
21
+ /// to some sort of "default" stream.
22
+ using StreamId = int64_t;
23
+
24
+ struct C10_API StreamData3 {
25
+ StreamId stream_id;
26
+ DeviceIndex device_index;
27
+ DeviceType device_type;
28
+ };
29
+
30
+ // NB: I decided not to call the above StreamIndex to avoid confusion with
31
+ // DeviceIndex. This way, you access device index with index(), and stream id
32
+ // with id()
33
+
34
+ /**
35
+ * A stream is a software mechanism used to synchronize launched kernels
36
+ * without requiring explicit synchronizations between kernels. The basic
37
+ * model is that every kernel launch is associated with a stream: every
38
+ * kernel on the same stream is implicitly synchronized so that if I launch
39
+ * kernels A and B on the same stream, A is guaranteed to finish before B
40
+ * launches. If I want B to run concurrently with A, I must schedule
41
+ * it on a different stream.
42
+ *
43
+ * The Stream class is a backend agnostic value class representing a stream
44
+ * which I may schedule a kernel on. Every stream is associated with a device,
45
+ * which is recorded in stream, which is used to avoid confusion about which
46
+ * device a stream refers to.
47
+ *
48
+ * Streams are explicitly thread-safe, in the sense that it is OK to pass
49
+ * a Stream from one thread to another, and kernels queued from two different
50
+ * threads will still get serialized appropriately. (Of course, the
51
+ * time when the kernels get queued is undetermined unless you synchronize
52
+ * host side ;)
53
+ *
54
+ * Stream does NOT have a default constructor. Streams are for expert
55
+ * users; if you want to use Streams, we're going to assume you know
56
+ * how to deal with C++ template error messages if you try to
57
+ * resize() a vector of Streams.
58
+ *
59
+ * Known instances of streams in backends:
60
+ *
61
+ * - cudaStream_t (CUDA)
62
+ * - hipStream_t (HIP)
63
+ * - cl_command_queue (OpenCL) (NB: Caffe2's existing OpenCL integration
64
+ * does NOT support command queues.)
65
+ *
66
+ * Because this class is device agnostic, it cannot provide backend-specific
67
+ * functionality (e.g., get the cudaStream_t of a CUDA stream.) There are
68
+ * wrapper classes which provide this functionality, e.g., CUDAStream.
69
+ */
70
+ class C10_API Stream final {
71
+ private:
72
+ Device device_;
73
+ StreamId id_;
74
+
75
+ public:
76
+ enum Unsafe { UNSAFE };
77
+ enum Default { DEFAULT };
78
+
79
+ /// Unsafely construct a stream from a Device and a StreamId. In
80
+ /// general, only specific implementations of streams for a
81
+ /// backend should manufacture Stream directly in this way; other users
82
+ /// should use the provided APIs to get a stream. In particular,
83
+ /// we don't require backends to give any guarantees about non-zero
84
+ /// StreamIds; they are welcome to allocate in whatever way they like.
85
+ explicit Stream(Unsafe, Device device, StreamId id)
86
+ : device_(device), id_(id) {}
87
+
88
+ /// Construct the default stream of a Device. The default stream is
89
+ /// NOT the same as the current stream; default stream is a fixed stream
90
+ /// that never changes, whereas the current stream may be changed by
91
+ /// StreamGuard.
92
+ explicit Stream(Default, Device device) : device_(device), id_(0) {}
93
+
94
+ bool operator==(const Stream& other) const noexcept {
95
+ return this->device_ == other.device_ && this->id_ == other.id_;
96
+ }
97
+ bool operator!=(const Stream& other) const noexcept {
98
+ return !(*this == other);
99
+ }
100
+
101
+ Device device() const noexcept {
102
+ return device_;
103
+ }
104
+ DeviceType device_type() const noexcept {
105
+ return device_.type();
106
+ }
107
+ DeviceIndex device_index() const noexcept {
108
+ return device_.index();
109
+ }
110
+ StreamId id() const noexcept {
111
+ return id_;
112
+ }
113
+
114
+ // Enqueues a wait instruction in the stream's work queue.
115
+ // This instruction is a no-op unless the event is marked
116
+ // for recording. In that case the stream stops processing
117
+ // until the event is recorded.
118
+ template <typename T>
119
+ void wait(const T& event) const {
120
+ event.block(*this);
121
+ }
122
+
123
+ // Return whether all asynchronous work previously enqueued on this stream
124
+ // has completed running on the device.
125
+ bool query() const;
126
+
127
+ // Wait (by blocking the calling thread) until all asynchronous work enqueued
128
+ // on this stream has completed running on the device.
129
+ void synchronize() const;
130
+
131
+ // The purpose of this function is to more conveniently permit binding
132
+ // of Stream to and from Python. Without packing, I have to setup a whole
133
+ // class with two fields (device and stream id); with packing I can just
134
+ // store a single uint64_t.
135
+ //
136
+ // The particular way we pack streams into a uint64_t is considered an
137
+ // implementation detail and should not be relied upon.
138
+ uint64_t hash() const noexcept {
139
+ // Concat these together into a 64-bit integer
140
+ uint64_t bits = static_cast<uint64_t>(device_type()) << 56 |
141
+ static_cast<uint64_t>(device_index()) << 48 |
142
+ // Remove the sign extension part of the 64-bit address because
143
+ // the id might be used to hold a pointer.
144
+ (static_cast<uint64_t>(id()) & ((1ull << 48) - 1));
145
+ return bits;
146
+ }
147
+
148
+ struct StreamData3 pack3() const {
149
+ return {id(), device_index(), device_type()};
150
+ }
151
+
152
+ static Stream unpack3(
153
+ StreamId stream_id,
154
+ DeviceIndex device_index,
155
+ DeviceType device_type) {
156
+ TORCH_CHECK(isValidDeviceType(device_type));
157
+ return Stream(UNSAFE, Device(device_type, device_index), stream_id);
158
+ }
159
+
160
+ // I decided NOT to provide setters on this class, because really,
161
+ // why would you change the device of a stream? Just construct
162
+ // it correctly from the beginning dude.
163
+ };
164
+
165
+ C10_API std::ostream& operator<<(std::ostream& stream, const Stream& s);
166
+
167
+ } // namespace c10
168
+
169
+ namespace std {
170
+ template <>
171
+ struct hash<c10::Stream> {
172
+ size_t operator()(c10::Stream s) const noexcept {
173
+ return std::hash<uint64_t>{}(s.hash());
174
+ }
175
+ };
176
+ } // namespace std
parrot/lib/python3.10/site-packages/torch/include/c10/core/StreamGuard.h ADDED
@@ -0,0 +1,170 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/Device.h>
4
+ #include <c10/core/Stream.h>
5
+ #include <c10/core/impl/InlineStreamGuard.h>
6
+ #include <c10/core/impl/VirtualGuardImpl.h>
7
+ #include <c10/util/ArrayRef.h>
8
+ #include <c10/util/Optional.h>
9
+
10
+ namespace c10 {
11
+
12
+ /**
13
+ * A StreamGuard is an RAII class that changes the current device
14
+ * to the device corresponding to some stream, and changes the
15
+ * default stream on that device to be this stream.
16
+ *
17
+ * Use of StreamGuard is HIGHLY discouraged in operator definitions. In
18
+ * a single operator, you probably don't know enough about the global
19
+ * state of the world to profitably decide how to set streams. Let
20
+ * the caller handle this appropriately, and just use the current stream
21
+ * in your operator code.
22
+ *
23
+ * This StreamGuard does NOT have an uninitialized state; it is guaranteed
24
+ * to reset the stream and device on exit. If you are in a situation
25
+ * where you *might* want to setup a stream guard, see OptionalStreamGuard.
26
+ */
27
+ struct StreamGuard {
28
+ /// No default constructor, see Note [Omitted default constructor from RAII]
29
+ explicit StreamGuard() = delete;
30
+
31
+ /// Set the current device to the device associated with the passed stream,
32
+ /// and set the current stream on that device to the passed stream.
33
+ explicit StreamGuard(Stream stream) : guard_(stream) {}
34
+
35
+ /// Copy is disallowed
36
+ StreamGuard(const StreamGuard&) = delete;
37
+ StreamGuard& operator=(const StreamGuard&) = delete;
38
+
39
+ /// Move is disallowed, as StreamGuard does not have an uninitialized state,
40
+ /// which is required for moves on types with nontrivial destructors.
41
+ StreamGuard(StreamGuard&& other) = delete;
42
+ StreamGuard& operator=(StreamGuard&& other) = delete;
43
+
44
+ /// Resets the currently set stream to the original stream and
45
+ /// the currently set device to the original device. Then,
46
+ /// set the current device to the device associated with the passed stream,
47
+ /// and set the current stream on that device to the passed stream.
48
+ ///
49
+ /// NOTE: this implementation may skip some stream/device setting if
50
+ /// it can prove that it is unnecessary.
51
+ ///
52
+ /// WARNING: reset_stream does NOT preserve previously set streams on
53
+ /// different devices. If you need to set streams on multiple devices
54
+ /// on , use MultiStreamGuard instead.
55
+ void reset_stream(Stream stream) {
56
+ guard_.reset_stream(stream);
57
+ }
58
+
59
+ /// Returns the stream that was set at the time the guard was constructed.
60
+ Stream original_stream() const {
61
+ return guard_.original_stream();
62
+ }
63
+
64
+ /// Returns the most recent stream that was set using this device guard,
65
+ /// either from construction, or via set_stream.
66
+ Stream current_stream() const {
67
+ return guard_.current_stream();
68
+ }
69
+
70
+ /// Returns the most recent device that was set using this device guard,
71
+ /// either from construction, or via set_device/reset_device/set_index.
72
+ Device current_device() const {
73
+ return guard_.current_device();
74
+ }
75
+
76
+ /// Returns the device that was set at the most recent reset_stream(),
77
+ /// or otherwise the device at construction time.
78
+ Device original_device() const {
79
+ return guard_.original_device();
80
+ }
81
+
82
+ private:
83
+ c10::impl::InlineStreamGuard<impl::VirtualGuardImpl> guard_;
84
+ };
85
+
86
+ /**
87
+ * An OptionalStreamGuard is an RAII class that sets a device to some value on
88
+ * initialization, and resets the device to its original value on destruction.
89
+ * See OptionalDeviceGuard for more guidance on how to use this class.
90
+ */
91
+ struct OptionalStreamGuard {
92
+ /// Create an uninitialized guard.
93
+ explicit OptionalStreamGuard() = default;
94
+
95
+ /// Set the current device to the device associated with the passed stream,
96
+ /// and set the current stream on that device to the passed stream.
97
+ explicit OptionalStreamGuard(Stream stream) : guard_(stream) {}
98
+
99
+ /// Set the current device to the device associated with the passed stream,
100
+ /// and set the current stream on that device to the passed stream,
101
+ /// if the passed stream is not nullopt.
102
+ explicit OptionalStreamGuard(optional<Stream> stream_opt)
103
+ : guard_(stream_opt) {}
104
+
105
+ /// Copy is disallowed
106
+ OptionalStreamGuard(const OptionalStreamGuard&) = delete;
107
+ OptionalStreamGuard& operator=(const OptionalStreamGuard&) = delete;
108
+
109
+ // See Note [Move construction for RAII guards is tricky]
110
+ OptionalStreamGuard(OptionalStreamGuard&& other) = delete;
111
+
112
+ // See Note [Move assignment for RAII guards is tricky]
113
+ OptionalStreamGuard& operator=(OptionalStreamGuard&& other) = delete;
114
+
115
+ /// Resets the currently set stream to the original stream and
116
+ /// the currently set device to the original device. Then,
117
+ /// set the current device to the device associated with the passed stream,
118
+ /// and set the current stream on that device to the passed stream.
119
+ /// Initializes the guard if it was not previously initialized.
120
+ void reset_stream(Stream stream) {
121
+ guard_.reset_stream(stream);
122
+ }
123
+
124
+ /// Returns the stream that was set at the time the guard was most recently
125
+ /// initialized, or nullopt if the guard is uninitialized.
126
+ optional<Stream> original_stream() const {
127
+ return guard_.original_stream();
128
+ }
129
+
130
+ /// Returns the most recent stream that was set using this stream guard,
131
+ /// either from construction, or via reset_stream, if the guard is
132
+ /// initialized, or nullopt if the guard is uninitialized.
133
+ optional<Stream> current_stream() const {
134
+ return guard_.current_stream();
135
+ }
136
+
137
+ /// Restore the original device and stream, resetting this guard to
138
+ /// uninitialized state.
139
+ void reset() {
140
+ guard_.reset();
141
+ }
142
+
143
+ private:
144
+ c10::impl::InlineOptionalStreamGuard<impl::VirtualGuardImpl> guard_{};
145
+ };
146
+
147
+ /**
148
+ * A MultiStreamGuard is an RAII class that sets the current streams of a set of
149
+ * devices all at once, and resets them to their original values on destruction.
150
+ */
151
+ struct MultiStreamGuard {
152
+ /// Set the current streams to the passed streams on each of their respective
153
+ /// devices.
154
+ explicit MultiStreamGuard(ArrayRef<Stream> streams) : guard_(streams) {}
155
+
156
+ /// Copy is disallowed
157
+ MultiStreamGuard(const MultiStreamGuard&) = delete;
158
+ MultiStreamGuard& operator=(const MultiStreamGuard&) = delete;
159
+
160
+ // See Note [Move construction for RAII guards is tricky]
161
+ MultiStreamGuard(MultiStreamGuard&& other) = delete;
162
+
163
+ // See Note [Move assignment for RAII guards is tricky]
164
+ MultiStreamGuard& operator=(MultiStreamGuard&& other) = delete;
165
+
166
+ private:
167
+ c10::impl::InlineMultiStreamGuard<impl::VirtualGuardImpl> guard_;
168
+ };
169
+
170
+ } // namespace c10
parrot/lib/python3.10/site-packages/torch/include/c10/core/SymFloat.h ADDED
@@ -0,0 +1,113 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/SymBool.h>
4
+ #include <c10/core/SymNodeImpl.h>
5
+ #include <c10/macros/Export.h>
6
+ #include <c10/macros/Macros.h>
7
+ #include <c10/util/Exception.h>
8
+ #include <c10/util/intrusive_ptr.h>
9
+
10
+ #include <cstdint>
11
+ #include <limits>
12
+ #include <ostream>
13
+ #include <utility>
14
+
15
+ namespace c10 {
16
+
17
+ // NB: this is actually double precision; we're using the Python naming here
18
+ class C10_API SymFloat {
19
+ public:
20
+ /*implicit*/ SymFloat(double d) : data_(d){};
21
+ SymFloat(SymNode ptr)
22
+ : data_(std::numeric_limits<double>::quiet_NaN()), ptr_(std::move(ptr)) {
23
+ TORCH_CHECK(ptr_->is_float());
24
+ };
25
+ SymFloat() : data_(0.0) {}
26
+
27
+ SymNodeImpl* toSymNodeImplUnowned() const {
28
+ return ptr_.get();
29
+ }
30
+
31
+ SymNodeImpl* release() && {
32
+ return std::move(ptr_).release();
33
+ }
34
+
35
+ // Only valid if is_symbolic()
36
+ SymNode toSymNodeImpl() const;
37
+
38
+ // Guaranteed to return a SymNode, wrapping using base if necessary
39
+ SymNode wrap_node(const SymNode& base) const;
40
+
41
+ double expect_float() const {
42
+ TORCH_CHECK(!is_symbolic());
43
+ return data_;
44
+ }
45
+
46
+ SymFloat operator+(const SymFloat&) const;
47
+ SymFloat operator-(const SymFloat&) const;
48
+ SymFloat operator*(const SymFloat&) const;
49
+ SymFloat operator/(const SymFloat&) const;
50
+
51
+ SymBool sym_eq(const SymFloat&) const;
52
+ SymBool sym_ne(const SymFloat&) const;
53
+ SymBool sym_lt(const SymFloat&) const;
54
+ SymBool sym_le(const SymFloat&) const;
55
+ SymBool sym_gt(const SymFloat&) const;
56
+ SymBool sym_ge(const SymFloat&) const;
57
+
58
+ bool operator==(const SymFloat& o) const {
59
+ return sym_eq(o).guard_bool(__FILE__, __LINE__);
60
+ }
61
+ bool operator!=(const SymFloat& o) const {
62
+ return sym_ne(o).guard_bool(__FILE__, __LINE__);
63
+ }
64
+ bool operator<(const SymFloat& o) const {
65
+ return sym_lt(o).guard_bool(__FILE__, __LINE__);
66
+ }
67
+ bool operator<=(const SymFloat& o) const {
68
+ return sym_le(o).guard_bool(__FILE__, __LINE__);
69
+ }
70
+ bool operator>(const SymFloat& o) const {
71
+ return sym_gt(o).guard_bool(__FILE__, __LINE__);
72
+ }
73
+ bool operator>=(const SymFloat& o) const {
74
+ return sym_ge(o).guard_bool(__FILE__, __LINE__);
75
+ }
76
+
77
+ SymFloat min(const SymFloat& sci) const;
78
+ SymFloat max(const SymFloat& sci) const;
79
+
80
+ // Need guidance on where to put this code
81
+ SymFloat sqrt() const;
82
+
83
+ // Insert a guard for the float to be its concrete value, and then return
84
+ // that value. This operation always works, even if the float is symbolic,
85
+ // so long as we know what the underlying value is. Don't blindly put this
86
+ // everywhere; you can cause overspecialization of PyTorch programs with
87
+ // this method.
88
+ //
89
+ // It should be called as guard_float(__FILE__, __LINE__). The file and line
90
+ // number can be used to diagnose overspecialization.
91
+ double guard_float(const char* file, int64_t line) const;
92
+
93
+ bool has_hint() const;
94
+
95
+ // N.B. It's important to keep this definition in the header
96
+ // as we expect if checks to be folded for mobile builds
97
+ // where `is_symbolic` is always false
98
+ C10_ALWAYS_INLINE bool is_symbolic() const {
99
+ return ptr_;
100
+ }
101
+
102
+ double as_float_unchecked() const {
103
+ return data_;
104
+ }
105
+
106
+ private:
107
+ // TODO: optimize to union
108
+ double data_;
109
+ SymNode ptr_;
110
+ };
111
+
112
+ C10_API std::ostream& operator<<(std::ostream& os, const SymFloat& s);
113
+ } // namespace c10
parrot/lib/python3.10/site-packages/torch/include/c10/core/SymInt.h ADDED
@@ -0,0 +1,423 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/SymBool.h>
4
+ #include <c10/core/SymNodeImpl.h>
5
+ #include <c10/macros/Export.h>
6
+ #include <c10/macros/Macros.h>
7
+ #include <c10/util/Exception.h>
8
+ #include <c10/util/Optional.h>
9
+
10
+ #include <cstdint>
11
+ #include <iterator>
12
+ #include <numeric>
13
+ #include <ostream>
14
+ #include <type_traits>
15
+
16
+ namespace c10 {
17
+
18
+ class SymFloat;
19
+
20
+ // SymInt represents either a regular int64_t, or a symbolic integer
21
+ // (represented in a type erased way as SymNode). The intention is for SymInt
22
+ // to represent symbolic sizes that arise when doing shape computation in
23
+ // operator kernels. This allows for tracing through programs without baking in
24
+ // concrete sizes into kernel calls.
25
+ //
26
+ // SymInt has an API equivalent to int64_t. In particular, it is a value type.
27
+ // Internally, SymInt is represented in a clever packed way, so that it only
28
+ // occupies one word of space; but morally, it is a union between an int64_t
29
+ // and an intrusive pointer to SymNodeImpl.
30
+ //
31
+ // Invariant: the referenced SymNodeImpl is guaranteed to be a SymNode where
32
+ // is_int() returns true
33
+
34
+ class C10_API SymInt {
35
+ public:
36
+ enum Unchecked {
37
+ UNCHECKED,
38
+ };
39
+
40
+ /*implicit*/ SymInt(int64_t d) : data_(d) {
41
+ if (is_heap_allocated()) {
42
+ // Large negative number, heap allocate it
43
+ promote_to_negative();
44
+ }
45
+ };
46
+ SymInt() : data_(0) {}
47
+ SymInt(SymNode n);
48
+
49
+ // unchecked c-tor accepting raw `data_`
50
+ // One appropriate use for this is when you are constructing a symint
51
+ // in a situation where you know it is non-negative (or, if it is negative,
52
+ // the negative value is -1; i.e., not user controlled)
53
+ SymInt(Unchecked, int64_t d) : data_(d) {}
54
+
55
+ // TODO: these implementations are not optimal because they allocate a
56
+ // temporary and then use the move constructor/assignment
57
+ SymInt(const SymInt& s) : data_(0) {
58
+ if (s.is_heap_allocated()) {
59
+ *this = SymInt(s.toSymNode());
60
+ } else {
61
+ data_ = s.data_;
62
+ }
63
+ }
64
+ SymInt(SymInt&& s) noexcept : data_(s.data_) {
65
+ s.data_ = 0;
66
+ }
67
+
68
+ SymInt& operator=(const SymInt& s) {
69
+ if (this != &s) {
70
+ if (s.is_heap_allocated()) {
71
+ *this = SymInt(s.toSymNode());
72
+ } else {
73
+ data_ = s.data_;
74
+ }
75
+ }
76
+ return *this;
77
+ }
78
+ SymInt& operator=(SymInt&& s) noexcept {
79
+ if (this != &s) {
80
+ release_(); // release the current SymNode if any
81
+ data_ = s.data_;
82
+ if (s.is_heap_allocated())
83
+ s.data_ = 0;
84
+ };
85
+ return *this;
86
+ }
87
+
88
+ SymNodeImpl* toSymNodeImplUnowned() const {
89
+ TORCH_INTERNAL_ASSERT_DEBUG_ONLY(is_heap_allocated());
90
+ uint64_t unextended_bits = static_cast<uint64_t>(data_) & ~MASK;
91
+ uint64_t sign_bit_mask = 1ULL << (62 - 1);
92
+ // https://stackoverflow.com/questions/42534749/signed-extension-from-24-bit-to-32-bit-in-c
93
+ uint64_t extended_bits = (unextended_bits ^ sign_bit_mask) - sign_bit_mask;
94
+ return static_cast<SymNodeImpl*>(
95
+ // NOLINTNEXTLINE(performance-no-int-to-ptr)
96
+ reinterpret_cast<void*>(static_cast<uintptr_t>(extended_bits)));
97
+ }
98
+
99
+ void release_() {
100
+ if (is_heap_allocated()) {
101
+ SymNode::reclaim(toSymNodeImplUnowned()); // steal
102
+ }
103
+ }
104
+
105
+ SymNodeImpl* release() && {
106
+ #ifndef C10_MOBILE
107
+ TORCH_INTERNAL_ASSERT(is_heap_allocated());
108
+ auto* r = toSymNodeImplUnowned();
109
+ data_ = 0; // transfer ownership
110
+ return r;
111
+ #else
112
+ TORCH_INTERNAL_ASSERT(false);
113
+ #endif
114
+ }
115
+
116
+ // Only valid if is_heap_allocated()
117
+ SymNode toSymNode() const;
118
+
119
+ // Guaranteed to return a SymNode, wrapping using base if necessary
120
+ SymNode wrap_node(const SymNode& base) const;
121
+
122
+ ~SymInt() {
123
+ release_();
124
+ }
125
+
126
+ // Require the int to be non-symbolic, and if it is symbolic raise an
127
+ // error. This is safe to use for C++ code that doesn't work for symbolic
128
+ // shapes, and you don't have time to fix it immediately, as if we
129
+ // try to trigger the path in C++ you'll appropriately get an error
130
+ int64_t expect_int() const {
131
+ if (auto r = maybe_as_int()) {
132
+ return *r;
133
+ }
134
+ TORCH_CHECK_ALWAYS_SHOW_CPP_STACKTRACE(
135
+ false, "when unpacking SymInt, expected int but got ", *this);
136
+ }
137
+
138
+ // Test if we have a hint for this int (e.g., guard_int would work).
139
+ // Most of the time this is true; it is only false when you have
140
+ // an unbacked SymInt.
141
+ bool has_hint() const;
142
+
143
+ // Insert a guard for the int to be its concrete value, and then return
144
+ // that value. This operation always works, even if the int is symbolic,
145
+ // so long as we know what the underlying value is (e.g., this won't work
146
+ // if you call it on the size of nonzero output). Don't blindly put this
147
+ // everywhere; you can cause overspecialization of PyTorch programs with
148
+ // this method.
149
+ //
150
+ // It should be called as guard_int(__FILE__, __LINE__). The file and line
151
+ // number can be used to diagnose overspecialization.
152
+ int64_t guard_int(const char* file, int64_t line) const;
153
+
154
+ // Insert a guard that this SymInt must be size-like, returning true if
155
+ // the integer actually is >= 0. Unlike manually performing a >= 0 test,
156
+ // if the SymInt in question is an unbacked SymInt (or, potentially in the
157
+ // future, if it contains unbacked SymInts), we will also treat the
158
+ // unbacked SymInt as statically testing >= 2 (which will prevent us from
159
+ // choking on, e.g., contiguity checks.)
160
+ bool expect_size(const char* file, int64_t line) const;
161
+
162
+ // Distinguish actual symbolic values from constants stored on the heap
163
+ bool is_symbolic() const {
164
+ return is_heap_allocated() &&
165
+ !toSymNodeImplUnowned()->constant_int().has_value();
166
+ }
167
+
168
+ // N.B. It's important to keep this definition in the header
169
+ // as we expect if checks to be folded for mobile builds
170
+ // where `is_heap_allocated` is always false and optimize dead code paths
171
+ C10_ALWAYS_INLINE bool is_heap_allocated() const {
172
+ #ifdef C10_MOBILE
173
+ return false;
174
+ #else
175
+ return !check_range(data_);
176
+ #endif
177
+ }
178
+
179
+ SymInt operator+(const SymInt& sci) const;
180
+ SymInt operator-(const SymInt& sci) const;
181
+ SymInt operator*(const SymInt& sci) const;
182
+ SymInt operator/(const SymInt& sci) const;
183
+ SymInt operator%(const SymInt& sci) const;
184
+ void operator*=(const SymInt& sci);
185
+ void operator+=(const SymInt& sci);
186
+ void operator/=(const SymInt& sci);
187
+
188
+ SymInt clone() const;
189
+
190
+ SymBool sym_eq(const SymInt&) const;
191
+ SymBool sym_ne(const SymInt&) const;
192
+ SymBool sym_lt(const SymInt&) const;
193
+ SymBool sym_le(const SymInt&) const;
194
+ SymBool sym_gt(const SymInt&) const;
195
+ SymBool sym_ge(const SymInt&) const;
196
+
197
+ bool operator==(const SymInt& o) const {
198
+ return sym_eq(o).guard_bool(__FILE__, __LINE__);
199
+ }
200
+ bool operator!=(const SymInt& o) const {
201
+ return sym_ne(o).guard_bool(__FILE__, __LINE__);
202
+ }
203
+ bool operator<(const SymInt& o) const {
204
+ return sym_lt(o).guard_bool(__FILE__, __LINE__);
205
+ }
206
+ bool operator<=(const SymInt& o) const {
207
+ return sym_le(o).guard_bool(__FILE__, __LINE__);
208
+ }
209
+ bool operator>(const SymInt& o) const {
210
+ return sym_gt(o).guard_bool(__FILE__, __LINE__);
211
+ }
212
+ bool operator>=(const SymInt& o) const {
213
+ return sym_ge(o).guard_bool(__FILE__, __LINE__);
214
+ }
215
+
216
+ SymInt min(const SymInt& sci) const;
217
+ SymInt max(const SymInt& sci) const;
218
+
219
+ // If both are symbolic, this checks if
220
+ // they share the same node.
221
+ // If both are not symbolic this just checks normal equality.
222
+ bool is_same(const SymInt& other) const;
223
+
224
+ operator SymFloat() const;
225
+
226
+ // Don't use this. Prefer maybe_as_int instead
227
+ int64_t as_int_unchecked() const {
228
+ TORCH_INTERNAL_ASSERT_DEBUG_ONLY(!is_heap_allocated());
229
+ return data_;
230
+ }
231
+
232
+ std::optional<int64_t> maybe_as_int() const {
233
+ if (!is_heap_allocated()) {
234
+ return c10::make_optional(data_);
235
+ }
236
+ auto* node = toSymNodeImplUnowned();
237
+ if (auto c = node->constant_int()) {
238
+ return c;
239
+ }
240
+ return node->maybe_as_int();
241
+ }
242
+
243
+ // Return whether the integer is directly coercible to a SymInt
244
+ // without requiring heap allocation. You don't need to use this
245
+ // to check if you can pass an integer to SymInt; this is guaranteed
246
+ // to work (it just might heap allocate!)
247
+ static bool check_range(int64_t i) {
248
+ return i > MAX_UNREPRESENTABLE_INT;
249
+ }
250
+
251
+ // Return the min representable integer as a SymInt without
252
+ // heap allocation. For quantities that count bytes (or larger),
253
+ // this is still much larger than you need, so you may consider
254
+ // using this as a more efficient version of MIN_INT
255
+ static constexpr int64_t min_representable_int() {
256
+ return MAX_UNREPRESENTABLE_INT + 1;
257
+ }
258
+
259
+ private:
260
+ void promote_to_negative();
261
+
262
+ // Constraints on the internal representation:
263
+ //
264
+ // - Should represent positive and small negative ints
265
+ // - No conversion necessary for operations on ints
266
+ // - Must represent valid 64-bit pointers
267
+ // - Is symbolic test should be FAST (two arithmetic instructions is too
268
+ // much).
269
+ // This code being a hotpath is based on Strobelight profiles of
270
+ // is_heap_allocated(). FB only: https://fburl.com/strobelight/5l50ncxd
271
+ // (you will need to change the time window).
272
+ //
273
+ // So, the scheme is to reserve large negative numbers (assuming
274
+ // two's complement):
275
+ //
276
+ // - 0b0.... means we are a positive int
277
+ // - 0b11... means we are a small negative int
278
+ // - 0b10... means we are are a pointer. This means that
279
+ // [-2^63, -2^62-1] are not representable as ints.
280
+ // We don't actually need all of this space as on x86_64
281
+ // as the top 16bits aren't used for anything
282
+ static constexpr uint64_t MASK = 1ULL << 63 | 1ULL << 62 | 1ULL << 61;
283
+ static constexpr uint64_t IS_SYM = 1ULL << 63 | 1ULL << 61;
284
+ // We must manually translate the bit pattern test into a greater
285
+ // than test because compiler doesn't figure it out:
286
+ // https://godbolt.org/z/356aferaW
287
+ static constexpr int64_t MAX_UNREPRESENTABLE_INT =
288
+ -1LL & static_cast<int64_t>(~(1ULL << 62));
289
+ int64_t data_;
290
+ };
291
+
292
+ /// Sum of a list of SymInt; accumulates into the c10::SymInt expression
293
+ template <
294
+ typename C,
295
+ typename std::enable_if_t<
296
+ std::is_same_v<typename C::value_type, c10::SymInt>,
297
+ int> = 0>
298
+ inline c10::SymInt multiply_integers(const C& container) {
299
+ return std::accumulate(
300
+ container.begin(),
301
+ container.end(),
302
+ c10::SymInt(1),
303
+ [](const c10::SymInt& a, const c10::SymInt& b) { return a * b; });
304
+ }
305
+
306
+ template <
307
+ typename Iter,
308
+ typename = std::enable_if_t<std::is_same_v<
309
+ typename std::iterator_traits<Iter>::value_type,
310
+ c10::SymInt>>>
311
+ inline c10::SymInt multiply_integers(Iter begin, Iter end) {
312
+ return std::accumulate(
313
+ begin,
314
+ end,
315
+ c10::SymInt(1),
316
+ [](const c10::SymInt& a, const c10::SymInt& b) { return a * b; });
317
+ }
318
+
319
+ #define DECLARE_SYMINT_OP_INTONLY(scalar_t, RetTy) \
320
+ C10_API RetTy operator%(const SymInt& a, scalar_t b); \
321
+ C10_API RetTy operator%(scalar_t a, const SymInt& b);
322
+
323
+ #define DECLARE_SYMINT_OP(scalar_t, RetTy) \
324
+ C10_API RetTy operator+(const SymInt& a, scalar_t b); \
325
+ C10_API RetTy operator-(const SymInt& a, scalar_t b); \
326
+ C10_API RetTy operator*(const SymInt& a, scalar_t b); \
327
+ C10_API RetTy operator/(const SymInt& a, scalar_t b); \
328
+ C10_API RetTy operator+(scalar_t a, const SymInt& b); \
329
+ C10_API RetTy operator-(scalar_t a, const SymInt& b); \
330
+ C10_API RetTy operator*(scalar_t a, const SymInt& b); \
331
+ C10_API RetTy operator/(scalar_t a, const SymInt& b); \
332
+ C10_API bool operator==(const SymInt& a, scalar_t b); \
333
+ C10_API bool operator!=(const SymInt& a, scalar_t b); \
334
+ C10_API bool operator<(const SymInt& a, scalar_t b); \
335
+ C10_API bool operator<=(const SymInt& a, scalar_t b); \
336
+ C10_API bool operator>(const SymInt& a, scalar_t b); \
337
+ C10_API bool operator>=(const SymInt& a, scalar_t b); \
338
+ C10_API bool operator==(scalar_t a, const SymInt& b); \
339
+ C10_API bool operator!=(scalar_t a, const SymInt& b); \
340
+ C10_API bool operator<(scalar_t a, const SymInt& b); \
341
+ C10_API bool operator<=(scalar_t a, const SymInt& b); \
342
+ C10_API bool operator>(scalar_t a, const SymInt& b); \
343
+ C10_API bool operator>=(scalar_t a, const SymInt& b);
344
+
345
+ DECLARE_SYMINT_OP_INTONLY(int64_t, SymInt)
346
+ DECLARE_SYMINT_OP_INTONLY(int32_t, SymInt)
347
+ DECLARE_SYMINT_OP_INTONLY(uint64_t, SymInt)
348
+ DECLARE_SYMINT_OP_INTONLY(uint32_t, SymInt)
349
+ DECLARE_SYMINT_OP(int64_t, SymInt)
350
+ DECLARE_SYMINT_OP(int32_t, SymInt) // make sure constants work
351
+ DECLARE_SYMINT_OP(uint64_t, SymInt)
352
+ DECLARE_SYMINT_OP(uint32_t, SymInt)
353
+ DECLARE_SYMINT_OP(double, SymFloat)
354
+ DECLARE_SYMINT_OP(float, SymFloat) // just for completeness
355
+
356
+ // On OSX size_t is different than uint64_t so we have to
357
+ // define it separately
358
+ #if defined(__APPLE__)
359
+ DECLARE_SYMINT_OP_INTONLY(size_t, SymInt)
360
+ DECLARE_SYMINT_OP(size_t, SymInt)
361
+ #endif
362
+
363
+ #undef DECLARE_SYMINT_OP
364
+
365
+ C10_API std::ostream& operator<<(std::ostream& os, const SymInt& s);
366
+ C10_API SymInt operator-(const SymInt& s);
367
+
368
+ inline bool sym_eq(int64_t a, int64_t b) {
369
+ return a == b;
370
+ }
371
+
372
+ inline SymBool sym_eq(const SymInt& a, const SymInt& b) {
373
+ return a.sym_eq(b);
374
+ }
375
+
376
+ inline bool sym_ne(int64_t a, int64_t b) {
377
+ return a != b;
378
+ }
379
+
380
+ inline SymBool sym_ne(const SymInt& a, const SymInt& b) {
381
+ return a.sym_ne(b);
382
+ }
383
+
384
+ inline bool sym_lt(int64_t a, int64_t b) {
385
+ return a < b;
386
+ }
387
+
388
+ inline SymBool sym_lt(const SymInt& a, const SymInt& b) {
389
+ return a.sym_lt(b);
390
+ }
391
+
392
+ inline bool sym_le(int64_t a, int64_t b) {
393
+ return a <= b;
394
+ }
395
+
396
+ inline SymBool sym_le(const SymInt& a, const SymInt& b) {
397
+ return a.sym_le(b);
398
+ }
399
+
400
+ inline bool sym_gt(int64_t a, int64_t b) {
401
+ return a > b;
402
+ }
403
+
404
+ inline SymBool sym_gt(const SymInt& a, const SymInt& b) {
405
+ return a.sym_gt(b);
406
+ }
407
+
408
+ inline bool sym_ge(int64_t a, int64_t b) {
409
+ return a >= b;
410
+ }
411
+
412
+ inline SymBool sym_ge(const SymInt& a, const SymInt& b) {
413
+ return a.sym_ge(b);
414
+ }
415
+
416
+ inline bool definitely_true(
417
+ const c10::SymBool& b,
418
+ const char* file,
419
+ int64_t line) {
420
+ return b.has_hint() && b.guard_bool(file, line);
421
+ }
422
+
423
+ } // namespace c10
parrot/lib/python3.10/site-packages/torch/include/c10/core/SymNodeImpl.h ADDED
@@ -0,0 +1,236 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/macros/Export.h>
4
+ #include <c10/util/ArrayRef.h>
5
+ #include <c10/util/Exception.h>
6
+ #include <c10/util/Optional.h>
7
+ #include <c10/util/intrusive_ptr.h>
8
+ #include <cstdint>
9
+ #include <ostream>
10
+ #include <string>
11
+
12
+ namespace c10 {
13
+
14
+ class SymNodeImpl;
15
+ using SymNode = c10::intrusive_ptr<SymNodeImpl>;
16
+
17
+ // When you add a method, you also need to edit
18
+ // torch/csrc/jit/python/init.cpp
19
+ // torch/csrc/utils/python_symnode.h
20
+ // c10/core/ConstantSymNodeImpl.h
21
+ class C10_API SymNodeImpl : public c10::intrusive_ptr_target {
22
+ public:
23
+ ~SymNodeImpl() override = default;
24
+
25
+ template <typename T>
26
+ c10::intrusive_ptr<T> dyn_cast() const {
27
+ return c10::intrusive_ptr<T>::reclaim_copy(dynamic_cast<T*>(this));
28
+ }
29
+
30
+ // these could be pure virtual when we implement LTC versions
31
+ virtual bool is_int() {
32
+ TORCH_CHECK(false, "NYI");
33
+ }
34
+ virtual bool is_bool() {
35
+ TORCH_CHECK(false, "NYI");
36
+ }
37
+ virtual bool is_float() {
38
+ TORCH_CHECK(false, "NYI");
39
+ }
40
+ virtual bool is_nested_int() const {
41
+ return false;
42
+ }
43
+ virtual SymNode add(const SymNode& other) {
44
+ TORCH_CHECK(false, "NYI");
45
+ }
46
+ virtual SymNode sub(const SymNode& other) {
47
+ TORCH_CHECK(false, "NYI");
48
+ }
49
+ virtual SymNode mul(const SymNode& other) {
50
+ TORCH_CHECK(false, "NYI");
51
+ }
52
+ // NB: legacy, prefer float_truediv or int_truediv
53
+ virtual SymNode truediv(const SymNode& other) {
54
+ TORCH_CHECK(false, "NYI");
55
+ }
56
+ virtual SymNode float_truediv(const SymNode& other) {
57
+ return truediv(other);
58
+ }
59
+ virtual SymNode int_truediv(const SymNode& other) {
60
+ return truediv(other);
61
+ }
62
+ // NB: legacy, prefer float_pow or pow_by_natural
63
+ virtual SymNode pow(const SymNode& other) {
64
+ TORCH_CHECK(false, "NYI");
65
+ }
66
+ virtual SymNode float_pow(const SymNode& other) {
67
+ return pow(other);
68
+ }
69
+ virtual SymNode pow_by_natural(const SymNode& other) {
70
+ return pow(other);
71
+ }
72
+ // NB: legacy, prefer int_floordiv
73
+ virtual SymNode floordiv(const SymNode& other) {
74
+ TORCH_CHECK(false, "NYI");
75
+ }
76
+ virtual SymNode int_floordiv(const SymNode& other) {
77
+ return floordiv(other);
78
+ }
79
+ virtual SymNode mod(const SymNode& other) {
80
+ TORCH_CHECK(false, "NYI");
81
+ }
82
+ virtual SymNode eq(const SymNode& other) {
83
+ TORCH_CHECK(false, "NYI");
84
+ }
85
+ virtual SymNode ne(const SymNode& other) {
86
+ TORCH_CHECK(false, "NYI");
87
+ }
88
+ virtual SymNode gt(const SymNode& other) {
89
+ TORCH_CHECK(false, "NYI");
90
+ }
91
+ virtual SymNode lt(const SymNode& other) {
92
+ TORCH_CHECK(false, "NYI");
93
+ }
94
+ virtual SymNode le(const SymNode& other) {
95
+ TORCH_CHECK(false, "NYI");
96
+ }
97
+ virtual SymNode ge(const SymNode& other) {
98
+ TORCH_CHECK(false, "NYI");
99
+ }
100
+ virtual SymNode ceil() {
101
+ TORCH_CHECK(false, "NYI");
102
+ }
103
+ virtual SymNode floor() {
104
+ TORCH_CHECK(false, "NYI");
105
+ }
106
+ virtual SymNode neg() {
107
+ TORCH_CHECK(false, "NYI");
108
+ };
109
+ virtual SymNode sym_min(const SymNode& other) {
110
+ TORCH_CHECK(false, "NYI");
111
+ };
112
+ virtual SymNode sym_max(const SymNode& other) {
113
+ TORCH_CHECK(false, "NYI");
114
+ };
115
+ virtual SymNode sym_or(const SymNode& other) {
116
+ TORCH_CHECK(false, "NYI");
117
+ };
118
+ virtual SymNode sym_and(const SymNode& other) {
119
+ TORCH_CHECK(false, "NYI");
120
+ };
121
+ virtual SymNode sym_not() {
122
+ TORCH_CHECK(false, "NYI");
123
+ };
124
+ virtual SymNode sym_ite(const SymNode& then_val, const SymNode& else_val) {
125
+ TORCH_CHECK(false, "NYI");
126
+ };
127
+ // NB: self is ignored here, only the arguments are used
128
+ virtual SymNode is_contiguous(
129
+ ArrayRef<SymNode> sizes,
130
+ ArrayRef<SymNode> strides) {
131
+ TORCH_CHECK(false, "NYI");
132
+ };
133
+ virtual SymNode is_channels_last_contiguous_2d(
134
+ ArrayRef<SymNode> sizes,
135
+ ArrayRef<SymNode> strides) {
136
+ TORCH_CHECK(false, "NYI");
137
+ };
138
+ virtual SymNode is_channels_last_contiguous_3d(
139
+ ArrayRef<SymNode> sizes,
140
+ ArrayRef<SymNode> strides) {
141
+ TORCH_CHECK(false, "NYI");
142
+ };
143
+ virtual SymNode is_channels_last_strides_2d(
144
+ ArrayRef<SymNode> sizes,
145
+ ArrayRef<SymNode> strides) {
146
+ TORCH_CHECK(false, "NYI");
147
+ };
148
+ virtual SymNode is_channels_last_strides_3d(
149
+ ArrayRef<SymNode> sizes,
150
+ ArrayRef<SymNode> strides) {
151
+ TORCH_CHECK(false, "NYI");
152
+ };
153
+ virtual SymNode is_non_overlapping_and_dense(
154
+ ArrayRef<SymNode> sizes,
155
+ ArrayRef<SymNode> strides) {
156
+ TORCH_CHECK(false, "NYI");
157
+ };
158
+ virtual SymNode clone() {
159
+ TORCH_CHECK(false, "NYI");
160
+ };
161
+ virtual SymNode sym_float() {
162
+ TORCH_CHECK(false, "NYI");
163
+ }
164
+ virtual SymNode wrap_int(int64_t num) {
165
+ TORCH_CHECK(false, "NYI");
166
+ };
167
+ virtual SymNode wrap_float(double num) {
168
+ TORCH_CHECK(false, "NYI");
169
+ };
170
+ virtual SymNode wrap_bool(bool num) {
171
+ TORCH_CHECK(false, "NYI");
172
+ };
173
+ virtual int64_t guard_int(const char* file, int64_t line) {
174
+ TORCH_CHECK(false, "NYI");
175
+ };
176
+ virtual bool guard_bool(const char* file, int64_t line) {
177
+ TORCH_CHECK(false, "NYI");
178
+ };
179
+ virtual double guard_float(const char* file, int64_t line) {
180
+ TORCH_CHECK(false, "NYI");
181
+ };
182
+ virtual bool guard_size_oblivious(const char* file, int64_t line) {
183
+ // No improvement for unbacked SymBools by default, replace this
184
+ // with a better implementation!
185
+ return guard_bool(file, line);
186
+ }
187
+ virtual bool expect_true(const char* file, int64_t line) {
188
+ // No improvement for unbacked SymBools by default, replace this
189
+ // with a better implementation!
190
+ return guard_bool(file, line);
191
+ };
192
+ virtual bool expect_size(const char* file, int64_t line) {
193
+ // No improvement for unbacked SymInts by default, replace this
194
+ // with a better implementation!
195
+ return ge(wrap_int(0))->guard_bool(file, line);
196
+ };
197
+ virtual int64_t int_() {
198
+ TORCH_CHECK(false, "NYI");
199
+ };
200
+ virtual bool bool_() {
201
+ TORCH_CHECK(false, "NYI");
202
+ };
203
+ virtual bool has_hint() {
204
+ TORCH_CHECK(false, "NYI");
205
+ };
206
+ virtual std::string str() {
207
+ TORCH_CHECK(false, "NYI");
208
+ };
209
+ virtual std::optional<int64_t> nested_int() {
210
+ return c10::nullopt;
211
+ }
212
+ virtual std::optional<int64_t> nested_int_coeff() {
213
+ return c10::nullopt;
214
+ }
215
+ virtual std::optional<int64_t> constant_int() {
216
+ return c10::nullopt;
217
+ }
218
+ virtual std::optional<bool> constant_bool() {
219
+ return c10::nullopt;
220
+ }
221
+ virtual std::optional<int64_t> maybe_as_int() {
222
+ return c10::nullopt;
223
+ }
224
+ virtual bool is_constant() {
225
+ return false;
226
+ }
227
+ virtual bool is_symbolic() {
228
+ return true;
229
+ }
230
+ std::ostream& operator<<(std::ostream& os) {
231
+ os << str();
232
+ return os;
233
+ }
234
+ };
235
+
236
+ } // namespace c10
parrot/lib/python3.10/site-packages/torch/include/c10/core/SymbolicShapeMeta.h ADDED
@@ -0,0 +1,214 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <c10/core/SymBool.h>
3
+ #include <c10/core/SymInt.h>
4
+ #include <c10/macros/Export.h>
5
+ #include <c10/macros/Macros.h>
6
+ #include <c10/util/DimVector.h>
7
+
8
+ #include <atomic>
9
+ #include <cstdint>
10
+ #include <mutex>
11
+ #include <utility>
12
+
13
+ namespace c10 {
14
+
15
+ class C10_API SymbolicShapeMeta {
16
+ public:
17
+ // Basic metadata from which other quantities are derived
18
+ SymDimVector sizes_ = {0};
19
+ SymDimVector strides_ = {1};
20
+ SymInt storage_offset_ = 0;
21
+
22
+ bool strides_valid_ = true; // e.g. for sparse where there are no strides
23
+
24
+ SymbolicShapeMeta() = default;
25
+ SymbolicShapeMeta(const SymbolicShapeMeta& other);
26
+
27
+ void refresh_numel() {
28
+ // Non-const, don't need to hold mutables_ lock
29
+ available_.fetch_and(~numel_avail);
30
+ numel_ = 1;
31
+ }
32
+
33
+ void refresh_contiguous() {
34
+ // Non-const, don't need to hold mutables_ lock
35
+ available_.fetch_and(numel_avail);
36
+ is_contiguous_ = false;
37
+ is_channels_last_contiguous_ = false;
38
+ is_channels_last_3d_contiguous_ = false;
39
+ is_channels_last_ = false;
40
+ is_channels_last_3d_ = false;
41
+ is_non_overlapping_and_dense_ = false;
42
+ }
43
+
44
+ int64_t dim() const {
45
+ return static_cast<int64_t>(sizes_.size());
46
+ }
47
+
48
+ // Accessors for derived quantities, computed lazily on first access
49
+
50
+ bool has_numel() const {
51
+ return available_.load() & numel_avail;
52
+ }
53
+ bool has_is_contiguous() const {
54
+ return available_.load() & is_contiguous_avail;
55
+ }
56
+ bool has_is_channels_last_contiguous() const {
57
+ return available_.load() & is_channels_last_contiguous_avail;
58
+ }
59
+ bool has_is_channels_last_3d_contiguous() const {
60
+ return available_.load() & is_channels_last_3d_contiguous_avail;
61
+ }
62
+ bool has_is_channels_last() const {
63
+ return available_.load() & is_channels_last_avail;
64
+ }
65
+ bool has_is_channels_last_3d() const {
66
+ return available_.load() & is_channels_last_3d_avail;
67
+ }
68
+ bool has_is_non_overlapping_and_dense() const {
69
+ return available_.load() & is_non_overlapping_and_dense_avail;
70
+ }
71
+
72
+ // Accessors to cached derived properties
73
+ // DO NOT call with mutables_ lock held
74
+ const SymInt& numel() const {
75
+ if (C10_UNLIKELY(!has_numel())) {
76
+ init_numel();
77
+ }
78
+ return numel_;
79
+ }
80
+
81
+ const SymBool& is_contiguous() const {
82
+ if (C10_UNLIKELY(!has_is_contiguous())) {
83
+ init_is_contiguous();
84
+ }
85
+ return is_contiguous_;
86
+ }
87
+
88
+ const SymBool& is_channels_last_contiguous() const {
89
+ if (C10_UNLIKELY(!has_is_channels_last_contiguous())) {
90
+ init_is_channels_last_contiguous();
91
+ }
92
+ return is_channels_last_contiguous_;
93
+ }
94
+
95
+ const SymBool& is_channels_last_3d_contiguous() const {
96
+ if (C10_UNLIKELY(!has_is_channels_last_3d_contiguous())) {
97
+ init_is_channels_last_3d_contiguous();
98
+ }
99
+ return is_channels_last_3d_contiguous_;
100
+ }
101
+
102
+ const SymBool& is_channels_last() const {
103
+ if (C10_UNLIKELY(!has_is_channels_last())) {
104
+ init_is_channels_last();
105
+ }
106
+ return is_channels_last_;
107
+ }
108
+
109
+ const SymBool& is_channels_last_3d() const {
110
+ if (C10_UNLIKELY(!has_is_channels_last_3d())) {
111
+ init_is_channels_last_3d();
112
+ }
113
+ return is_channels_last_3d_;
114
+ }
115
+
116
+ const SymBool& is_non_overlapping_and_dense() const {
117
+ if (C10_UNLIKELY(!has_is_non_overlapping_and_dense())) {
118
+ init_is_non_overlapping_and_dense();
119
+ }
120
+ return is_non_overlapping_and_dense_;
121
+ }
122
+
123
+ // Assumptions so we can short-circuit computation
124
+ // NOTE: Don't need to lock mutables_ since these aren't const
125
+ void assume_contiguous(SymBool val = true) {
126
+ is_contiguous_ = std::move(val);
127
+ available_.fetch_or(is_contiguous_avail);
128
+ }
129
+ void assume_channels_last_contiguous(SymBool val = true) {
130
+ is_contiguous_ = std::move(val);
131
+ available_.fetch_or(is_channels_last_contiguous_avail);
132
+ }
133
+ void assume_channels_last_3d_contiguous(SymBool val = true) {
134
+ is_channels_last_3d_contiguous_ = std::move(val);
135
+ available_.fetch_or(is_channels_last_3d_contiguous_avail);
136
+ }
137
+ void assume_channels_last(SymBool val = true) {
138
+ is_channels_last_ = std::move(val);
139
+ available_.fetch_or(is_channels_last_avail);
140
+ }
141
+ void assume_channels_last_3d(SymBool val = true) {
142
+ is_channels_last_3d_ = std::move(val);
143
+ available_.fetch_or(is_channels_last_3d_avail);
144
+ }
145
+ void assume_non_overlapping_and_dense(SymBool val = true) {
146
+ is_non_overlapping_and_dense_ = std::move(val);
147
+ available_.fetch_or(is_non_overlapping_and_dense_avail);
148
+ }
149
+
150
+ private:
151
+ SymBool compute_contiguous() const;
152
+ SymBool compute_channels_last_contiguous_2d() const;
153
+ SymBool compute_channels_last_contiguous_3d() const;
154
+ SymBool compute_strides_like_channels_last_2d() const;
155
+ SymBool compute_strides_like_channels_last_3d() const;
156
+ SymBool compute_non_overlapping_and_dense() const;
157
+
158
+ // These are little wrappers over the real compute_ functions that
159
+ // can make use of other contiguity fields to short circuit.
160
+ // They need to be implemented separately for SymBool, as SymBool does
161
+ // not short circuit.
162
+ // TODO: should the SymBool cases avoid the short circuit? Need to reason
163
+ // if its correct, and reason if the simpler expressions are better for
164
+ // analysis (maybe not!)
165
+
166
+ SymBool compute_channels_last_contiguous_3d_dim5() const;
167
+ SymBool compute_channels_last_2d_dim5() const;
168
+ SymBool compute_channels_last_3d_dim5() const;
169
+ SymBool compute_is_non_overlapping_and_dense_dim4() const;
170
+ SymBool compute_is_non_overlapping_and_dense_dim5() const;
171
+ SymBool compute_is_non_overlapping_and_dense_anydim() const;
172
+
173
+ void init_numel() const;
174
+ void init_is_contiguous() const;
175
+ void init_is_channels_last_contiguous() const;
176
+ void init_is_channels_last_3d_contiguous() const;
177
+ void init_is_channels_last() const;
178
+ void init_is_channels_last_3d() const;
179
+ void init_is_non_overlapping_and_dense() const;
180
+
181
+ // NOTE: These only set if !has_foo()
182
+ void set_numel(SymInt val) const;
183
+ void set_is_contiguous(SymBool val) const;
184
+ void set_is_channels_last_contiguous(SymBool val) const;
185
+ void set_is_channels_last_3d_contiguous(SymBool val) const;
186
+ void set_is_channels_last(SymBool val) const;
187
+ void set_is_channels_last_3d(SymBool val) const;
188
+ void set_is_non_overlapping_and_dense(SymBool val) const;
189
+
190
+ // Lazily initialized variables, with the corresponding available_ flag
191
+ // indicating whether the value has been initialized
192
+ mutable std::atomic<int> available_{0};
193
+ enum avail {
194
+ numel_avail = 1 << 0,
195
+ is_contiguous_avail = 1 << 1,
196
+ is_channels_last_contiguous_avail = 1 << 2,
197
+ is_channels_last_3d_contiguous_avail = 1 << 3,
198
+ is_channels_last_avail = 1 << 4,
199
+ is_channels_last_3d_avail = 1 << 5,
200
+ is_non_overlapping_and_dense_avail = 1 << 6,
201
+ };
202
+
203
+ // Mutex to prevent races when initializing the variable from const accessors
204
+ mutable std::mutex mutables_;
205
+ mutable SymInt numel_ = 1;
206
+ mutable SymBool is_contiguous_{true};
207
+ mutable SymBool is_channels_last_contiguous_{false};
208
+ mutable SymBool is_channels_last_3d_contiguous_{false};
209
+ mutable SymBool is_channels_last_{false};
210
+ mutable SymBool is_channels_last_3d_{false};
211
+ mutable SymBool is_non_overlapping_and_dense_{true};
212
+ };
213
+
214
+ } // namespace c10
parrot/lib/python3.10/site-packages/torch/include/c10/core/TensorImpl.h ADDED
The diff for this file is too large to render. See raw diff
 
parrot/lib/python3.10/site-packages/torch/include/c10/core/TensorOptions.h ADDED
@@ -0,0 +1,787 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/Backend.h>
4
+ #include <c10/core/DefaultDtype.h>
5
+ #include <c10/core/Device.h>
6
+ #include <c10/core/DeviceType.h>
7
+ #include <c10/core/DispatchKey.h>
8
+ #include <c10/core/Layout.h>
9
+ #include <c10/core/MemoryFormat.h>
10
+ #include <c10/core/ScalarType.h>
11
+ #include <c10/core/ScalarTypeToTypeMeta.h>
12
+
13
+ #include <c10/macros/Export.h>
14
+ #include <c10/macros/Macros.h>
15
+ #include <c10/util/Exception.h>
16
+ #include <c10/util/Optional.h>
17
+
18
+ #include <cstdint>
19
+ #include <iosfwd>
20
+ #include <string>
21
+ #include <type_traits>
22
+ #include <utility>
23
+
24
+ namespace c10 {
25
+
26
+ DispatchKey computeDispatchKey(
27
+ std::optional<ScalarType> dtype,
28
+ std::optional<Layout> layout,
29
+ std::optional<Device> device);
30
+
31
+ inline ScalarType dtype_or_default(std::optional<ScalarType> dtype) {
32
+ return value_or_else(dtype, [] { return get_default_dtype_as_scalartype(); });
33
+ }
34
+
35
+ inline caffe2::TypeMeta dtype_or_default(
36
+ std::optional<caffe2::TypeMeta> dtype) {
37
+ return value_or_else(dtype, [] { return get_default_dtype(); });
38
+ }
39
+
40
+ inline Layout layout_or_default(std::optional<Layout> layout) {
41
+ return layout.value_or(kStrided);
42
+ }
43
+
44
+ inline Device device_or_default(std::optional<Device> device) {
45
+ return value_or_else(device, [] { return Device(kCPU); });
46
+ }
47
+
48
+ inline bool pinned_memory_or_default(std::optional<bool> pinned_memory) {
49
+ return pinned_memory.value_or(false);
50
+ }
51
+
52
+ /// A class to encapsulate construction axes of an Tensor. TensorOptions was
53
+ /// designed to support the Python style API for specifying construction options
54
+ /// on factory functions, e.g.,
55
+ ///
56
+ /// torch.zeros(2, 3, dtype=torch.int32)
57
+ ///
58
+ /// Because C++ doesn't natively support keyword arguments, there must be
59
+ /// another way of specifying keyword-like arguments. TensorOptions is a
60
+ /// builder class which can be used to construct this "dictionary" of keyword
61
+ /// arguments: functions which support TensorOptions conventionally take this
62
+ /// argument optionally as their last argument.
63
+ ///
64
+ /// WARNING: In PyTorch, there are `torch::` variants of factory functions,
65
+ /// e.g., torch::zeros for at::zeros. These return Variables (while the
66
+ /// stock ATen functions return plain Tensors). If you mix these functions
67
+ /// up, you WILL BE SAD.
68
+ ///
69
+ /// Rather than use the constructor of this class directly, you should prefer to
70
+ /// use the constructor functions, and then chain setter methods on top of them.
71
+ ///
72
+ /// at::device(at::kCUDA).dtype(kInt)
73
+ /// at::dtype(at::kInt)
74
+ ///
75
+ /// Additionally, anywhere a TensorOptions is expected, you can directly
76
+ /// pass at::kCUDA / at::kInt, and it will implicitly convert to a
77
+ /// TensorOptions.
78
+ ///
79
+ /// Here are some recommended ways to create a 2x2 tensor of zeros
80
+ /// with certain properties. These all *implicitly* make use of
81
+ /// TensorOptions, even if they don't mention the class explicitly:
82
+ ///
83
+ /// at::zeros({2,2}, at::kCUDA);
84
+ /// at::zeros({2,2}, at::kLong);
85
+ /// at::zeros({2,2}, at::device(at::kCUDA).dtype(at::kLong()));
86
+ /// at::zeros({2,2}, at::device({at::kCUDA, 1})); // place on device 1
87
+ /// at::zeros({2,2}, at::requires_grad());
88
+ ///
89
+
90
+ /// NOTE [ TensorOptions Constructors ]
91
+ ///
92
+ /// TensorOptions is like a dictionary with entries from the set:
93
+ /// {requires_grad, device, dtype, layout}, where each entry may be
94
+ /// unspecified (i.e., is optional). It is used to specify the properties of
95
+ /// tensors in many places both in C++ internal and API, e.g., tensor factory
96
+ /// methods like `at::empty({10}, options)`, tensor conversions like
97
+ /// `tensor.to(...)`, etc.
98
+ ///
99
+ /// To provide a simple API that is consistent with Python, where one can do
100
+ /// `torch.empty(sizes, X)` with `X` being a `torch.device`, `torch.dtype`, or a
101
+ /// `torch.layout`, we want TensorOptions to be implicitly convertible from
102
+ /// `ScalarType dtype`, `Layout layout` and `Device device`. Therefore, we have
103
+ /// three implicit constructors from each of these three types.
104
+ ///
105
+ /// This is sufficient for `ScalarType` and `Layout` as they are simple Enum
106
+ /// classes. However, `Device` is an ordinary class with implicit constructors
107
+ /// `Device(DeviceType, DeviceIndex = -1)` and `Device(std::string)` to be
108
+ /// consistent with Python API, where strings are treated as equivalent with a
109
+ /// `torch.device` object (e.g., "cuda:1" can be passed to everywhere a
110
+ /// `torch.device("cuda:1")` is accepted). To support the syntax
111
+ /// `at::empty({10}, {kCUDA, 1})` and `tensor.to(kCUDA)`, we need to make sure
112
+ /// that `TensorOptions` is implicitly constructible with any arguments that a
113
+ /// `Device` can constructed from. So we have,
114
+ ///
115
+ /// /* implicit */ TensorOptions(T&& device) : TensorOptions() {
116
+ /// this->set_device(device);
117
+ /// }
118
+ ///
119
+ /// template <typename... Args,
120
+ /// typename = std::enable_if_t<std::is_constructible<Device,
121
+ /// Args&&...>::value>>
122
+ /// /* implicit */ TensorOptions(Args&&... args)
123
+ /// : TensorOptions(Device(std::forward<Args>(args)...)) {}
124
+ ///
125
+ ///
126
+ /// But this will be problematic. Consider this: `TensorOptions({kCUDA, 1})`.
127
+ /// Compiler will complain about ambiguity between the copy constructor and the
128
+ /// `Device` constructor because `{kCUDA, 1}` can be converted to both a
129
+ /// `TensorOption` and a `Device`.
130
+ ///
131
+ /// To get around this, we templatize the `Device` constructor. Since overload
132
+ /// resolution is done before template resolution, our problem is solved.
133
+
134
+ DispatchKey computeDispatchKey(
135
+ optional<ScalarType> dtype,
136
+ optional<Layout> layout,
137
+ optional<Device> device);
138
+
139
+ struct C10_API TensorOptions {
140
+ TensorOptions()
141
+ : requires_grad_(false),
142
+ pinned_memory_(false),
143
+ has_device_(false),
144
+ has_dtype_(false),
145
+ has_layout_(false),
146
+ has_requires_grad_(false),
147
+ has_pinned_memory_(false),
148
+ has_memory_format_(false) {}
149
+
150
+ /// Constructs a `TensorOptions` object with the given layout.
151
+ /* implicit */ TensorOptions(Layout layout) : TensorOptions() {
152
+ this->set_layout(layout);
153
+ }
154
+
155
+ /// Constructs a `TensorOptions` object with the given device.
156
+ /// See NOTE [ TensorOptions Constructors ] on why this is templatized.
157
+ template <
158
+ typename T,
159
+ typename = std::enable_if_t<std::is_same_v<std::decay_t<T>, Device>>>
160
+ /* implicit */ TensorOptions(T&& device) : TensorOptions() {
161
+ this->set_device(std::forward<T>(device));
162
+ }
163
+
164
+ /// Constructs a `TensorOptions` object from arguments allowed in `Device`
165
+ /// constructors.
166
+ ///
167
+ /// See NOTE [ TensorOptions Constructors ].
168
+ ///
169
+ /// NB: Ideally we only allow implicit constructors here. But there is no easy
170
+ /// way to detect them. So we have this one that allows explicit
171
+ /// constructors too.
172
+ template <
173
+ typename... Args,
174
+ typename = std::enable_if_t<std::is_constructible_v<Device, Args&&...>>>
175
+ /* implicit */ TensorOptions(Args&&... args)
176
+ : TensorOptions(Device(std::forward<Args>(args)...)) {}
177
+
178
+ /// Constructs a `TensorOptions` object with the given dtype.
179
+ /* implicit */ TensorOptions(caffe2::TypeMeta dtype) : TensorOptions() {
180
+ this->set_dtype(dtype);
181
+ }
182
+
183
+ /// legacy constructor to support ScalarType
184
+ /* implicit */ TensorOptions(ScalarType dtype) : TensorOptions() {
185
+ this->set_dtype(dtype);
186
+ }
187
+
188
+ /// Constructs a `TensorOptions` object with the given memory format.
189
+ /* implicit */ TensorOptions(MemoryFormat memory_format) : TensorOptions() {
190
+ set_memory_format(memory_format);
191
+ }
192
+
193
+ /// Return a copy of `TensorOptions` with `device` set to the given one, or
194
+ /// cleared if `device` is `nullopt`.
195
+ C10_NODISCARD TensorOptions
196
+ device(std::optional<Device> device) const noexcept {
197
+ TensorOptions r = *this;
198
+ r.set_device(device);
199
+ return r;
200
+ }
201
+
202
+ /// Return a copy of `TensorOptions` with `device` set to the given one.
203
+ /// (This overload ensures that variadic template std::optional constructor
204
+ /// for Device work correctly.)
205
+ template <typename... Args>
206
+ C10_NODISCARD TensorOptions device(Args&&... args) const noexcept {
207
+ return device(
208
+ std::optional<Device>(std::in_place, std::forward<Args>(args)...));
209
+ }
210
+
211
+ /// Return a copy of `TensorOptions`, but with device set to CUDA, and the
212
+ /// device index set to the given one.
213
+ ///
214
+ /// TODO: This function encourages bad behavior (assuming CUDA is
215
+ /// the only device that matters). Get rid of it / rename it.
216
+ C10_NODISCARD TensorOptions
217
+ device_index(c10::DeviceIndex device_index) const noexcept {
218
+ return device(Device::Type::CUDA, device_index);
219
+ }
220
+
221
+ /// Return a copy of `TensorOptions` with `dtype` set to the given one.
222
+ C10_NODISCARD TensorOptions
223
+ dtype(std::optional<caffe2::TypeMeta> dtype) const noexcept {
224
+ TensorOptions r = *this;
225
+ r.set_dtype(dtype);
226
+ return r;
227
+ }
228
+
229
+ // legacy function to support ScalarType
230
+ C10_NODISCARD TensorOptions
231
+ dtype(std::optional<ScalarType> dtype) const noexcept {
232
+ TensorOptions r = *this;
233
+ r.set_dtype(dtype);
234
+ return r;
235
+ }
236
+
237
+ // Since dtype is taken...
238
+ template <typename T>
239
+ TensorOptions& dtype() {
240
+ dtype_ = caffe2::TypeMeta::Make<T>();
241
+ has_dtype_ = true;
242
+ return *this;
243
+ }
244
+
245
+ /// Sets the layout of the `TensorOptions`.
246
+ C10_NODISCARD TensorOptions
247
+ layout(std::optional<Layout> layout) const noexcept {
248
+ TensorOptions r = *this;
249
+ r.set_layout(layout);
250
+ return r;
251
+ }
252
+
253
+ /// Sets the `requires_grad` property of the `TensorOptions`.
254
+ C10_NODISCARD TensorOptions
255
+ requires_grad(std::optional<bool> requires_grad) const noexcept {
256
+ TensorOptions r = *this;
257
+ r.set_requires_grad(requires_grad);
258
+ return r;
259
+ }
260
+
261
+ /// Sets the `pinned_memory` property on the `TensorOptions`.
262
+ C10_NODISCARD TensorOptions
263
+ pinned_memory(std::optional<bool> pinned_memory) const noexcept {
264
+ TensorOptions r = *this;
265
+ r.set_pinned_memory(pinned_memory);
266
+ return r;
267
+ }
268
+
269
+ /// Sets the `memory_format` property on `TensorOptions`.
270
+ C10_NODISCARD TensorOptions
271
+ memory_format(std::optional<MemoryFormat> memory_format) const noexcept {
272
+ TensorOptions r = *this;
273
+ r.set_memory_format(memory_format);
274
+ return r;
275
+ }
276
+
277
+ /// Returns the device of the `TensorOptions`.
278
+ Device device() const noexcept {
279
+ return device_or_default(device_opt());
280
+ }
281
+
282
+ /// Returns whether the device is specified.
283
+ bool has_device() const noexcept {
284
+ return has_device_;
285
+ }
286
+
287
+ /// Returns the device of the `TensorOptions`, or `c10::nullopt` if
288
+ /// device is not specified.
289
+ std::optional<Device> device_opt() const noexcept {
290
+ return has_device_ ? c10::make_optional(device_) : c10::nullopt;
291
+ }
292
+
293
+ /// Returns the device index of the `TensorOptions`.
294
+ c10::DeviceIndex device_index() const noexcept {
295
+ return device().index();
296
+ }
297
+
298
+ /// Returns the dtype of the `TensorOptions`.
299
+ caffe2::TypeMeta dtype() const noexcept {
300
+ return dtype_or_default(dtype_opt());
301
+ }
302
+
303
+ /// Returns whether the dtype is specified.
304
+ bool has_dtype() const noexcept {
305
+ return has_dtype_;
306
+ }
307
+
308
+ /// Returns the dtype of the `TensorOptions`, or `c10::nullopt` if
309
+ /// device is not specified.
310
+ std::optional<caffe2::TypeMeta> dtype_opt() const noexcept {
311
+ return has_dtype_ ? c10::make_optional(dtype_) : c10::nullopt;
312
+ }
313
+
314
+ /// Returns the layout of the `TensorOptions`.
315
+ Layout layout() const noexcept {
316
+ return layout_or_default(layout_opt());
317
+ }
318
+
319
+ /// Returns whether the layout is specified.
320
+ bool has_layout() const noexcept {
321
+ return has_layout_;
322
+ }
323
+
324
+ /// Returns the layout of the `TensorOptions`, or `c10::nullopt` if
325
+ /// layout is not specified.
326
+ std::optional<Layout> layout_opt() const noexcept {
327
+ return has_layout_ ? c10::make_optional(layout_) : c10::nullopt;
328
+ }
329
+
330
+ /// Returns the `requires_grad` property of the `TensorOptions`.
331
+ bool requires_grad() const noexcept {
332
+ return has_requires_grad_ ? requires_grad_ : false;
333
+ }
334
+
335
+ /// Returns whether the `requires_grad` is specified.
336
+ bool has_requires_grad() const noexcept {
337
+ return has_requires_grad_;
338
+ }
339
+
340
+ /// Returns the `requires_grad` property of the `TensorOptions`, or
341
+ /// `c10::nullopt` if `requires_grad` is not specified.
342
+ std::optional<bool> requires_grad_opt() const noexcept {
343
+ return has_requires_grad_ ? c10::make_optional(requires_grad_)
344
+ : c10::nullopt;
345
+ }
346
+
347
+ /// Returns the `pinned_memory` property of the `TensorOptions`.
348
+ bool pinned_memory() const noexcept {
349
+ return pinned_memory_or_default(pinned_memory_opt());
350
+ }
351
+
352
+ /// Returns whether the `pinned_memory` is specified.
353
+ bool has_pinned_memory() const noexcept {
354
+ return has_pinned_memory_;
355
+ }
356
+
357
+ /// Returns if the layout is sparse
358
+ bool is_sparse() const {
359
+ return layout_ == c10::Layout::Sparse;
360
+ }
361
+
362
+ /// Returns if the layout is sparse CSR, deprecated, use
363
+ /// is_sparse_compressed() instead
364
+ bool is_sparse_csr() const {
365
+ return layout_ == c10::Layout::SparseCsr;
366
+ }
367
+
368
+ bool is_sparse_compressed() const {
369
+ return layout_ == c10::Layout::SparseCsr ||
370
+ layout_ == c10::Layout::SparseCsc ||
371
+ layout_ == c10::Layout::SparseBsr || layout_ == c10::Layout::SparseBsc;
372
+ }
373
+
374
+ // For compatibility with legacy tensor.type() comparisons
375
+ bool type_equal(const TensorOptions& other) const {
376
+ return computeDispatchKey() == other.computeDispatchKey() &&
377
+ typeMetaToScalarType(dtype_) == typeMetaToScalarType(other.dtype());
378
+ }
379
+
380
+ /// Returns the `pinned_memory` property of the `TensorOptions`, or
381
+ /// `c10::nullopt` if `pinned_memory` is not specified.
382
+ std::optional<bool> pinned_memory_opt() const noexcept {
383
+ return has_pinned_memory_ ? c10::make_optional(pinned_memory_)
384
+ : c10::nullopt;
385
+ }
386
+
387
+ /// Returns whether the `memory_layout` is specified
388
+ bool has_memory_format() const noexcept {
389
+ return has_memory_format_;
390
+ }
391
+
392
+ // NB: memory_format() getter is PURPOSELY not defined, as the default
393
+ // behavior of memory_format varies from function to function.
394
+
395
+ /// Returns the `memory_layout` property of `TensorOptions, or
396
+ /// `c10::nullopt` if `memory_format` is not specified.
397
+ std::optional<MemoryFormat> memory_format_opt() const noexcept {
398
+ return has_memory_format_ ? c10::make_optional(memory_format_)
399
+ : c10::nullopt;
400
+ }
401
+
402
+ // Resolves the ATen backend specified by the current construction axes.
403
+ // TODO: Deprecate this
404
+ Backend backend() const {
405
+ return at::dispatchKeyToBackend(computeDispatchKey());
406
+ }
407
+
408
+ /// Return the right-biased merge of two TensorOptions. This has the
409
+ /// effect of overwriting settings from self with specified options
410
+ /// of options.
411
+ ///
412
+ /// NB: This merging operation does NOT respect device merges.
413
+ /// For example, if you device({kCUDA, 1}).merge_in(kCUDA)
414
+ /// you will get kCUDA in the end! Functions like Tensor.new_empty
415
+ /// ensure the right device is selected anyway by way of a
416
+ /// device guard.
417
+ ///
418
+ TensorOptions merge_in(TensorOptions options) const noexcept {
419
+ TensorOptions merged = *this;
420
+ if (options.has_device())
421
+ merged.set_device(options.device_opt());
422
+ if (options.has_dtype())
423
+ merged.set_dtype(options.dtype_opt());
424
+ if (options.has_layout())
425
+ merged.set_layout(options.layout_opt());
426
+ // NB: requires grad is right biased; not a logical AND/OR!
427
+ if (options.has_requires_grad())
428
+ merged.set_requires_grad(options.requires_grad_opt());
429
+ if (options.has_pinned_memory())
430
+ merged.set_pinned_memory(options.pinned_memory_opt());
431
+ if (options.has_memory_format())
432
+ merged.set_memory_format(options.memory_format_opt());
433
+ return merged;
434
+ }
435
+
436
+ // TODO remove after TensorOptions rationalization
437
+ TensorOptions merge_memory_format(
438
+ std::optional<MemoryFormat> optional_memory_format) const noexcept {
439
+ TensorOptions merged = *this;
440
+ if (optional_memory_format.has_value()) {
441
+ merged.set_memory_format(*optional_memory_format);
442
+ }
443
+ return merged;
444
+ }
445
+
446
+ // INVARIANT: computeDispatchKey returns only the subset of dispatch keys for
447
+ // which dispatchKeyToBackend is injective, if it is defined at all (for
448
+ // the most part, this just means that this function never returns an
449
+ // Autograd key)
450
+ DispatchKey computeDispatchKey() const {
451
+ return c10::computeDispatchKey(
452
+ optTypeMetaToScalarType(dtype_opt()), layout_opt(), device_opt());
453
+ }
454
+
455
+ private:
456
+ // These methods are currently private because I'm not sure if it's wise
457
+ // to actually publish them. They are methods because I need them in
458
+ // the constructor and the functional API implementation.
459
+ //
460
+ // If you really, really need it, you can make these public, but check if you
461
+ // couldn't just do what you need with the functional API. Similarly, these
462
+ // methods are not chainable, because if you wanted chaining, you probably
463
+ // want to use the functional API instead. (It's probably OK to make
464
+ // these chainable, because these functions are all explicitly annotated
465
+ // with a ref-qualifier, the trailing &, that makes them illegal to call
466
+ // on temporaries.)
467
+
468
+ /// Mutably set the device of `TensorOptions`.
469
+ void set_device(std::optional<Device> device) & noexcept {
470
+ if (device) {
471
+ device_ = *device;
472
+ has_device_ = true;
473
+ } else {
474
+ has_device_ = false;
475
+ }
476
+ }
477
+
478
+ /// Mutably set the dtype of `TensorOptions`.
479
+ void set_dtype(std::optional<caffe2::TypeMeta> dtype) & noexcept {
480
+ if (dtype) {
481
+ dtype_ = *dtype;
482
+ has_dtype_ = true;
483
+ } else {
484
+ has_dtype_ = false;
485
+ }
486
+ }
487
+
488
+ // legacy function to support ScalarType
489
+ void set_dtype(std::optional<ScalarType> dtype) & noexcept {
490
+ if (dtype) {
491
+ dtype_ = scalarTypeToTypeMeta(*dtype);
492
+ has_dtype_ = true;
493
+ } else {
494
+ has_dtype_ = false;
495
+ }
496
+ }
497
+
498
+ /// Mutably set the layout of `TensorOptions`.
499
+ void set_layout(std::optional<Layout> layout) & noexcept {
500
+ if (layout) {
501
+ layout_ = *layout;
502
+ has_layout_ = true;
503
+ } else {
504
+ has_layout_ = false;
505
+ }
506
+ }
507
+
508
+ /// Mutably set the `requires_grad` property of `TensorOptions`.
509
+ void set_requires_grad(std::optional<bool> requires_grad) & noexcept {
510
+ if (requires_grad) {
511
+ requires_grad_ = *requires_grad;
512
+ has_requires_grad_ = true;
513
+ } else {
514
+ has_requires_grad_ = false;
515
+ }
516
+ }
517
+
518
+ /// Mutably set the `pinned_memory` property of `TensorOptions`.
519
+ void set_pinned_memory(std::optional<bool> pinned_memory) & noexcept {
520
+ if (pinned_memory) {
521
+ pinned_memory_ = *pinned_memory;
522
+ has_pinned_memory_ = true;
523
+ } else {
524
+ has_pinned_memory_ = false;
525
+ }
526
+ }
527
+
528
+ /// Mutably set the `memory_Format` property of `TensorOptions`.
529
+ void set_memory_format(std::optional<MemoryFormat> memory_format) & noexcept {
530
+ if (memory_format) {
531
+ memory_format_ = *memory_format;
532
+ has_memory_format_ = true;
533
+ } else {
534
+ has_memory_format_ = false;
535
+ }
536
+ }
537
+
538
+ // WARNING: If you edit TensorOptions to add more options, you
539
+ // may need to adjust the implementation of Tensor::options.
540
+ // The criteria for whether or not Tensor::options must be adjusted
541
+ // is whether or not the new option you added should preserved
542
+ // by functions such as empty_like(); if it should be preserved,
543
+ // you must adjust options().
544
+ //
545
+ // TODO: MemoryFormat is not implemented in this way
546
+
547
+ // NB: We didn't use std::optional here, because then we can't pack
548
+ // the has_***_ boolean fields.
549
+
550
+ Device device_ = at::kCPU; // 16-bit
551
+ caffe2::TypeMeta dtype_ = caffe2::TypeMeta::Make<float>(); // 16-bit
552
+ Layout layout_ = at::kStrided; // 8-bit
553
+ MemoryFormat memory_format_ = MemoryFormat::Contiguous; // 8-bit
554
+
555
+ // Bitmask required here to get this to fit inside 32 bits (or even 64 bits,
556
+ // for that matter)
557
+
558
+ bool requires_grad_ : 1;
559
+ bool pinned_memory_ : 1;
560
+
561
+ bool has_device_ : 1;
562
+ bool has_dtype_ : 1;
563
+ bool has_layout_ : 1;
564
+ bool has_requires_grad_ : 1;
565
+ bool has_pinned_memory_ : 1;
566
+ bool has_memory_format_ : 1;
567
+ };
568
+
569
+ // We should aspire to fit in one machine-size word; but a size greater than two
570
+ // words is too much. (We are doing terribly on 32-bit archs, where we require
571
+ // three machine size words to store tensor options. Eek!)
572
+ static_assert(
573
+ sizeof(TensorOptions) <= sizeof(int64_t) * 2,
574
+ "TensorOptions must fit in 128-bits");
575
+
576
+ /// Convenience function that returns a `TensorOptions` object with the `dtype`
577
+ /// set to the given one.
578
+ inline TensorOptions dtype(caffe2::TypeMeta dtype) {
579
+ return TensorOptions().dtype(dtype);
580
+ }
581
+
582
+ // legacy function to support ScalarType
583
+ inline TensorOptions dtype(ScalarType dtype) {
584
+ return TensorOptions().dtype(scalarTypeToTypeMeta(dtype));
585
+ }
586
+
587
+ /// Convenience function that returns a `TensorOptions` object with the `layout`
588
+ /// set to the given one.
589
+ inline TensorOptions layout(Layout layout) {
590
+ return TensorOptions().layout(layout);
591
+ }
592
+
593
+ /// Convenience function that returns a `TensorOptions` object with the `device`
594
+ /// set to the given one.
595
+ inline TensorOptions device(Device device) {
596
+ return TensorOptions().device(device);
597
+ }
598
+
599
+ /// Convenience function that returns a `TensorOptions` object with the
600
+ /// `device` set to CUDA and the `device_index` set to the given one.
601
+ inline TensorOptions device_index(c10::DeviceIndex device_index) {
602
+ return TensorOptions().device_index(device_index);
603
+ }
604
+
605
+ /// Convenience function that returns a `TensorOptions` object with the
606
+ /// `requires_grad` set to the given one.
607
+ inline TensorOptions requires_grad(bool requires_grad = true) {
608
+ return TensorOptions().requires_grad(requires_grad);
609
+ }
610
+
611
+ /// Convenience function that returns a `TensorOptions` object with the
612
+ /// `memory_format` set to the given one.
613
+ inline TensorOptions memory_format(MemoryFormat memory_format) {
614
+ return TensorOptions().memory_format(memory_format);
615
+ }
616
+
617
+ C10_API std::ostream& operator<<(
618
+ std::ostream& stream,
619
+ const TensorOptions& options);
620
+
621
+ template <typename T>
622
+ inline TensorOptions dtype() {
623
+ return dtype(caffe2::TypeMeta::Make<T>());
624
+ }
625
+
626
+ inline std::string toString(const TensorOptions& options) {
627
+ std::ostringstream stream;
628
+ stream << options;
629
+ return stream.str();
630
+ }
631
+
632
+ // This is intended to be a centralized location by which we can determine
633
+ // what an appropriate DispatchKey for a tensor is.
634
+ inline DispatchKey computeDispatchKey(
635
+ std::optional<ScalarType> dtype,
636
+ std::optional<Layout> layout,
637
+ std::optional<Device> device) {
638
+ const auto layout_ = layout_or_default(layout);
639
+ const auto device_ = device_or_default(device);
640
+ switch (layout_) {
641
+ case Layout::Jagged:
642
+ case Layout::Strided: {
643
+ const auto dtype_ = dtype_or_default(dtype);
644
+ switch (device_.type()) {
645
+ #define DO_CASE(device, _) \
646
+ case c10::DeviceType::device: { \
647
+ if (isQIntType(dtype_)) { \
648
+ return DispatchKey::Quantized##device; \
649
+ } \
650
+ return DispatchKey::device; \
651
+ }
652
+ C10_FORALL_BACKEND_DEVICE_TYPES(DO_CASE, unused)
653
+ #undef DO_CASE
654
+ case c10::DeviceType::FPGA:
655
+ return DispatchKey::FPGA;
656
+ case c10::DeviceType::MAIA:
657
+ return DispatchKey::MAIA;
658
+ case c10::DeviceType::Vulkan:
659
+ return DispatchKey::Vulkan;
660
+ case c10::DeviceType::Metal:
661
+ return DispatchKey::Metal;
662
+ case c10::DeviceType::MKLDNN:
663
+ case c10::DeviceType::OPENGL:
664
+ case c10::DeviceType::OPENCL:
665
+ case c10::DeviceType::IDEEP:
666
+ TORCH_INTERNAL_ASSERT(
667
+ 0,
668
+ "This is a grandfathered Caffe2 device type ",
669
+ device_.type(),
670
+ ", it shouldn't ever convert to a DispatchKey. File a bug describing what you were doing if you think this is in error.");
671
+ default:
672
+ TORCH_CHECK_NOT_IMPLEMENTED(
673
+ false,
674
+ "Unsupported device type for dense layout: ",
675
+ device_.type());
676
+ }
677
+ }
678
+ case Layout::Sparse:
679
+ switch (device_.type()) {
680
+ #define DO_CASE(device, _) \
681
+ case c10::DeviceType::device: { \
682
+ return DispatchKey::Sparse##device; \
683
+ }
684
+ C10_FORALL_BACKEND_DEVICE_TYPES(DO_CASE, unused)
685
+ #undef DO_CASE
686
+ default:
687
+ TORCH_CHECK_NOT_IMPLEMENTED(
688
+ false,
689
+ "Unsupported device type for sparse layout: ",
690
+ device_.type());
691
+ }
692
+ case Layout::Mkldnn:
693
+ switch (device_.type()) {
694
+ case c10::DeviceType::CPU:
695
+ return DispatchKey::MkldnnCPU;
696
+ default:
697
+ TORCH_CHECK_NOT_IMPLEMENTED(
698
+ false,
699
+ "Unsupported device type for mkldnn layout: ",
700
+ device_.type());
701
+ }
702
+ case Layout::SparseCsr:
703
+ case Layout::SparseCsc:
704
+ case Layout::SparseBsr:
705
+ case Layout::SparseBsc:
706
+ switch (device_.type()) {
707
+ #define DO_CASE(device, _) \
708
+ case c10::DeviceType::device: { \
709
+ return DispatchKey::SparseCsr##device; \
710
+ }
711
+ C10_FORALL_BACKEND_DEVICE_TYPES(DO_CASE, unused)
712
+ #undef DO_CASE
713
+ default:
714
+ TORCH_CHECK_NOT_IMPLEMENTED(
715
+ false,
716
+ "Unsupported device type for ",
717
+ layout_,
718
+ " layout: ",
719
+ device_.type());
720
+ }
721
+ default:
722
+ TORCH_CHECK(false, "Unsupported layout: ", layout_);
723
+ }
724
+ }
725
+
726
+ inline Layout dispatchKeyToLayout(DispatchKey dispatch_key) {
727
+ switch (dispatch_key) {
728
+ #define DO_CASE(bc, _) case DispatchKey::Sparse##bc:
729
+ C10_FORALL_BACKEND_COMPONENTS(DO_CASE, unused)
730
+ #undef DO_CASE
731
+ return Layout::Sparse;
732
+ #define DO_CASE(bc, _) case DispatchKey::SparseCsr##bc:
733
+ C10_FORALL_BACKEND_COMPONENTS(DO_CASE, unused)
734
+ #undef DO_CASE
735
+ TORCH_CHECK(
736
+ false, "Cannot map DispatchKey ", dispatch_key, " to a unique layout.");
737
+ case DispatchKey::MkldnnCPU:
738
+ return Layout::Mkldnn;
739
+ default:
740
+ return Layout::Strided;
741
+ }
742
+ }
743
+
744
+ inline c10::DeviceType dispatchKeyToDeviceType(DispatchKey dispatch_key) {
745
+ switch (dispatch_key) {
746
+ // stuff that's real
747
+ #define DO_CASE(suffix, prefix) \
748
+ case DispatchKey::prefix##suffix: \
749
+ return c10::DeviceType::suffix;
750
+ #define DO_CASES(_, prefix) C10_FORALL_BACKEND_DEVICE_TYPES(DO_CASE, prefix)
751
+ C10_FORALL_FUNCTIONALITY_KEYS(DO_CASES)
752
+ #undef DO_CASES
753
+ #undef DO_CASE
754
+
755
+ case DispatchKey::MkldnnCPU:
756
+ return c10::DeviceType::CPU;
757
+ case DispatchKey::Vulkan:
758
+ return c10::DeviceType::Vulkan;
759
+
760
+ case DispatchKey::MAIA:
761
+ return c10::DeviceType::MAIA;
762
+ default:
763
+ TORCH_CHECK(
764
+ false,
765
+ "DispatchKey ",
766
+ dispatch_key,
767
+ " doesn't correspond to a device");
768
+ }
769
+ }
770
+
771
+ inline TensorOptions dispatchKeyToTensorOptions(DispatchKey dispatch_key) {
772
+ return TensorOptions()
773
+ .layout(dispatchKeyToLayout(dispatch_key))
774
+ .device(dispatchKeyToDeviceType(dispatch_key));
775
+ }
776
+
777
+ namespace detail {
778
+ inline bool backend_supports_empty_operator(const TensorOptions& options) {
779
+ // Quantized backends don't support at::empty().
780
+ // They have separate operators like at::empty_quantized() that take in
781
+ // extra information about how to quantize the tensor.
782
+ return !isQIntType(typeMetaToScalarType(options.dtype()));
783
+ }
784
+
785
+ } // namespace detail
786
+
787
+ } // namespace c10
parrot/lib/python3.10/site-packages/torch/include/c10/core/UndefinedTensorImpl.h ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/MemoryFormat.h>
4
+ #include <c10/core/SymIntArrayRef.h>
5
+ #include <c10/core/TensorImpl.h>
6
+ #include <c10/macros/Export.h>
7
+ #include <c10/util/ArrayRef.h>
8
+ #include <cstdint>
9
+
10
+ namespace c10 {
11
+
12
+ struct C10_API UndefinedTensorImpl final : public TensorImpl {
13
+ public:
14
+ // Without this, we get:
15
+ // error: identifier "at::UndefinedTensorImpl::_singleton" is undefined in
16
+ // device code
17
+ // (ostensibly because the constexpr tricks MSVC into trying to compile this
18
+ // function for device as well).
19
+ #ifdef _WIN32
20
+ static inline TensorImpl* singleton() {
21
+ #else
22
+ static constexpr inline TensorImpl* singleton() {
23
+ #endif
24
+ return &_singleton;
25
+ }
26
+ #ifdef DEBUG
27
+ bool has_storage() const override;
28
+ #endif
29
+ void set_storage_offset(int64_t offset) override;
30
+
31
+ protected:
32
+ bool is_contiguous_custom(MemoryFormat format) const override;
33
+ IntArrayRef strides_custom() const override;
34
+ SymIntArrayRef sym_strides_custom() const override;
35
+
36
+ private:
37
+ UndefinedTensorImpl();
38
+ static UndefinedTensorImpl _singleton;
39
+ const char* tensorimpl_type_name() const override;
40
+ };
41
+
42
+ } // namespace c10
parrot/lib/python3.10/site-packages/torch/include/c10/core/WrapDimMinimal.h ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/SymInt.h>
4
+ #include <c10/macros/Export.h>
5
+ #include <c10/macros/Macros.h>
6
+ #include <cstdint>
7
+ #include <utility>
8
+
9
+ namespace c10 {
10
+
11
+ namespace detail {
12
+ // This template can only be specialized at int64_t and c10::SymInt;
13
+ // you'll get linker errors otherwise
14
+ template <typename T>
15
+ C10_API T maybe_wrap_dim_slow(T dim, T dim_post_expr, bool wrap_scalar);
16
+ } // namespace detail
17
+
18
+ template <typename T>
19
+ T _maybe_wrap_dim(T dim, T dim_post_expr, bool wrap_scalar = true) {
20
+ // Inline the fast paths
21
+ if (C10_LIKELY(dim_post_expr * -1 <= dim && dim < dim_post_expr)) {
22
+ // For SymInts, we want an explicit control flow to trigger a guard, so we
23
+ // may as well branch too.
24
+ if (dim < 0) {
25
+ return dim + dim_post_expr;
26
+ }
27
+ return dim;
28
+ }
29
+ // Check edge-cases out-of-line (wrapping scalars and out-of-bounds errors)
30
+ return c10::detail::maybe_wrap_dim_slow<T>(
31
+ std::move(dim), std::move(dim_post_expr), wrap_scalar);
32
+ }
33
+
34
+ inline int64_t maybe_wrap_dim(
35
+ int64_t dim,
36
+ int64_t dim_post_expr,
37
+ bool wrap_scalar = true) {
38
+ return _maybe_wrap_dim(dim, dim_post_expr, wrap_scalar);
39
+ }
40
+
41
+ inline c10::SymInt maybe_wrap_dim(
42
+ c10::SymInt dim,
43
+ c10::SymInt dim_post_expr,
44
+ bool wrap_scalar = true) {
45
+ return _maybe_wrap_dim(std::move(dim), std::move(dim_post_expr), wrap_scalar);
46
+ }
47
+
48
+ } // namespace c10
parrot/lib/python3.10/site-packages/torch/include/c10/core/alignment.h ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <cstddef>
4
+
5
+ namespace c10 {
6
+
7
+ #ifdef C10_MOBILE
8
+ // Use 16-byte alignment on mobile
9
+ // - ARM NEON AArch32 and AArch64
10
+ // - x86[-64] < AVX
11
+ constexpr size_t gAlignment = 16;
12
+ #else
13
+ // Use 64-byte alignment should be enough for computation up to AVX512.
14
+ constexpr size_t gAlignment = 64;
15
+ #endif
16
+
17
+ constexpr size_t gPagesize = 4096;
18
+ // since the default thp pagesize is 2MB, enable thp only
19
+ // for buffers of size 2MB or larger to avoid memory bloating
20
+ constexpr size_t gAlloc_threshold_thp = static_cast<size_t>(2) * 1024 * 1024;
21
+ } // namespace c10
parrot/lib/python3.10/site-packages/torch/include/c10/core/impl/DeviceGuardImplInterface.h ADDED
@@ -0,0 +1,365 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/Device.h>
4
+ #include <c10/core/DeviceType.h>
5
+ #include <c10/core/Stream.h>
6
+ #include <c10/util/Exception.h>
7
+
8
+ // Just for C10_ANONYMOUS_VARIABLE
9
+ #include <c10/util/Registry.h>
10
+
11
+ #include <atomic>
12
+
13
+ namespace c10 {
14
+
15
+ // Forward declaration
16
+ class DataPtr;
17
+
18
+ /**
19
+ * Note [Flags defining the behavior of events]
20
+ *
21
+ * PYTORCH_DEFAULT and BACKEND_DEFAULT are valid for all backends. The
22
+ * BACKEND_DEFAULT is what a particular backend would select if no
23
+ * flags were given. PYTORCH_DEFAULT is the PyTorch's framework default
24
+ * choice for events on that backend, which may not be the same.
25
+ *
26
+ * The mapping of PYTORCH_DEFAULT and BACKEND_DEFAULT is done by each
27
+ * backend implementation.
28
+ */
29
+ enum class EventFlag {
30
+ // Disable timing
31
+ PYTORCH_DEFAULT,
32
+ // Enable timing
33
+ BACKEND_DEFAULT,
34
+ // FOR TESTING ONLY
35
+ INVALID
36
+ };
37
+
38
+ namespace impl {
39
+
40
+ /**
41
+ * DeviceGuardImplInterface represents the virtual interface which provides
42
+ * functionality to provide an RAII class for device and stream switching,
43
+ * via DeviceGuard. Every distinct device type, e.g., CUDA and HIP, is
44
+ * expected to implement and register an implementation of this interface.
45
+ * All classes which inherit from DeviceGuardImplInterface should be declared
46
+ * 'final'.
47
+ *
48
+ * This class exists because we provide a unified interface for performing
49
+ * device guards via DeviceGuard, but we cannot assume that we have actually
50
+ * compiled against the, e.g., CUDA library, which actually implements
51
+ * this guard functionality. In this case, a dynamic dispatch is required
52
+ * to cross the library boundary.
53
+ *
54
+ * If possible, you should directly use implementations of this interface;
55
+ * those uses will be devirtualized.
56
+ */
57
+ struct C10_API DeviceGuardImplInterface {
58
+ DeviceGuardImplInterface() = default;
59
+ DeviceGuardImplInterface(const DeviceGuardImplInterface&) = default;
60
+ DeviceGuardImplInterface& operator=(const DeviceGuardImplInterface&) =
61
+ default;
62
+ DeviceGuardImplInterface(DeviceGuardImplInterface&&) noexcept = default;
63
+ DeviceGuardImplInterface& operator=(DeviceGuardImplInterface&&) noexcept =
64
+ default;
65
+
66
+ /**
67
+ * Return the type of device managed by this guard implementation.
68
+ */
69
+ virtual DeviceType type() const = 0;
70
+
71
+ /**
72
+ * Set the current device to Device, and return the previous Device.
73
+ */
74
+ virtual Device exchangeDevice(Device) const = 0;
75
+ // NB: Implementations of exchangeDevice can be a bit boilerplatey. You might
76
+ // consider replacing exchangeDevice with a non-virtual function with a baked
77
+ // in implementation; however, note that this will triple the number of
78
+ // virtual calls (when you implement exchangeDevice in a final subclass,
79
+ // the compiler gets to devirtualize everything; it won't do that if you don't
80
+ // define it in the subclass!) A common way to solve this problem is to use
81
+ // some sort of CRTP; however, we can template DeviceGuardImplInterface since
82
+ // we really *do* need it to be virtual. A little boilerplate seems easiest
83
+ // to explain. (Another way around this problem is to provide inline
84
+ // functions that provide the default implementations, but this seems a little
85
+ // hard to explain. In any case, we're only going to have on order of ten
86
+ // implementations of this anyway.)
87
+
88
+ /**
89
+ * Get the current device.
90
+ */
91
+ virtual Device getDevice() const = 0;
92
+
93
+ /**
94
+ * Set the current device to Device.
95
+ */
96
+ virtual void setDevice(Device) const = 0;
97
+
98
+ /**
99
+ * Set the current device to Device, without checking for errors
100
+ * (so, e.g., this can be called from a destructor).
101
+ */
102
+ virtual void uncheckedSetDevice(Device) const noexcept = 0;
103
+
104
+ /**
105
+ * Get the current stream for a given device.
106
+ */
107
+ virtual Stream getStream(Device) const noexcept = 0;
108
+
109
+ /**
110
+ * Get the default stream for a given device.
111
+ */
112
+ virtual Stream getDefaultStream(Device) const {
113
+ TORCH_CHECK(false, "Backend doesn't support acquiring a default stream.")
114
+ }
115
+
116
+ /**
117
+ * Get a stream from the global pool for a given device.
118
+ */
119
+ virtual Stream getStreamFromGlobalPool(Device, bool isHighPriority = false)
120
+ const {
121
+ (void)isHighPriority; // Suppress unused variable warning
122
+ TORCH_CHECK(false, "Backend doesn't support acquiring a stream from pool.")
123
+ }
124
+
125
+ /**
126
+ * Return a new stream for a given device and priority. The stream will be
127
+ * copied and shared around, device backend should be able to correctly handle
128
+ * the lifetime of the stream.
129
+ */
130
+ virtual Stream getNewStream(Device, int priority = 0) const {
131
+ (void)priority;
132
+ TORCH_CHECK(false, "Backend doesn't support create a new Stream.")
133
+ }
134
+
135
+ /**
136
+ * Set a stream to be the thread local current stream for its device.
137
+ * Return the previous stream for that device. You are NOT required
138
+ * to set the current device to match the device of this stream.
139
+ */
140
+ virtual Stream exchangeStream(Stream) const noexcept = 0;
141
+
142
+ /**
143
+ * Destroys the given event.
144
+ */
145
+ virtual void destroyEvent(void* /*event*/, const DeviceIndex /*device_index*/)
146
+ const noexcept {}
147
+
148
+ /**
149
+ * Increments the event's version and enqueues a job with this version
150
+ * in the stream's work queue. When the stream process that job
151
+ * it notifies all streams waiting on / blocked by that version of the
152
+ * event to continue and marks that version as recorded.
153
+ * */
154
+ virtual void record(
155
+ void** /*event*/,
156
+ const Stream& /*stream*/,
157
+ const DeviceIndex /*device_index*/,
158
+ const c10::EventFlag /*flag*/) const {
159
+ TORCH_CHECK(false, "Backend doesn't support events.");
160
+ }
161
+
162
+ /**
163
+ * Does nothing if the event has not been scheduled to be recorded.
164
+ * If the event was previously enqueued to be recorded, a command
165
+ * to wait for the version of the event that exists at the time of this call
166
+ * is inserted in the stream's work queue.
167
+ * When the stream reaches this command it will stop processing
168
+ * additional commands until that version of the event is marked as recorded.
169
+ */
170
+ virtual void block(void* /*event*/, const Stream& /*stream*/) const {
171
+ TORCH_CHECK(false, "Backend doesn't support events.");
172
+ }
173
+
174
+ /**
175
+ * Returns true if (and only if)
176
+ * (1) the event has never been scheduled to be recorded
177
+ * (2) the current version is marked as recorded.
178
+ * Returns false otherwise.
179
+ */
180
+ virtual bool queryEvent(void* /*event*/) const {
181
+ TORCH_CHECK(false, "Backend doesn't support events.");
182
+ }
183
+
184
+ /**
185
+ * Get the number of devices. WARNING: This is REQUIRED to not raise
186
+ * an exception. If there is some sort of problem, e.g., driver error,
187
+ * you should report that there are zero available devices.
188
+ */
189
+ virtual DeviceIndex deviceCount() const noexcept = 0;
190
+
191
+ /**
192
+ * Return true if all the work previously enqueued on the stream for
193
+ * asynchronous execution has completed running on the device.
194
+ */
195
+ virtual bool queryStream(const Stream& /*stream*/) const {
196
+ TORCH_CHECK(false, "Backend doesn't support querying streams.");
197
+ }
198
+
199
+ /**
200
+ * Wait (by blocking the calling thread) until all the work previously
201
+ * enqueued on the stream has completed running on the device.
202
+ */
203
+ virtual void synchronizeStream(const Stream& /*stream*/) const {
204
+ TORCH_CHECK(false, "Backend doesn't support synchronizing streams.");
205
+ }
206
+
207
+ /**
208
+ * Wait (by blocking the calling thread) until all the work previously
209
+ * recorded on the event has completed running on the device.
210
+ */
211
+ virtual void synchronizeEvent(void* /*event*/) const {
212
+ TORCH_CHECK(false, "Backend doesn't support synchronizing events.");
213
+ }
214
+
215
+ /**
216
+ * Ensure the caching allocator (if any) is aware that the given DataPtr is
217
+ * being used on the given stream, and that it should thus avoid recycling the
218
+ * DataPtr until all work on that stream is done.
219
+ */
220
+ virtual void recordDataPtrOnStream(const c10::DataPtr&, const Stream&) const {
221
+ }
222
+
223
+ /**
224
+ * Fetch the elapsed time between two recorded events.
225
+ */
226
+ virtual double elapsedTime(
227
+ void* /*event1*/,
228
+ void* /*event2*/,
229
+ const DeviceIndex /*device_index*/) const {
230
+ TORCH_CHECK(false, "Backend doesn't support elapsedTime.");
231
+ }
232
+
233
+ /**
234
+ * Intended use of this class is to leak the DeviceGuardImpl at program end.
235
+ * So you better not call the destructor, buster!
236
+ */
237
+ virtual ~DeviceGuardImplInterface() = default;
238
+ };
239
+
240
+ // A no-op device guard impl that doesn't do anything interesting. Useful
241
+ // for devices that don't actually have a concept of device index. Prominent
242
+ // examples are CPU and Meta.
243
+ template <DeviceType D>
244
+ struct NoOpDeviceGuardImpl final : public DeviceGuardImplInterface {
245
+ NoOpDeviceGuardImpl() = default;
246
+ DeviceType type() const override {
247
+ return D;
248
+ }
249
+ Device exchangeDevice(Device) const override {
250
+ return Device(D, -1); // no-op
251
+ }
252
+ Device getDevice() const override {
253
+ return Device(D, -1);
254
+ }
255
+ void setDevice(Device) const override {
256
+ // no-op
257
+ }
258
+ void uncheckedSetDevice(Device) const noexcept override {
259
+ // no-op
260
+ }
261
+ Stream getStream(Device) const noexcept override {
262
+ // no-op
263
+ return Stream(Stream::DEFAULT, Device(D, -1));
264
+ }
265
+
266
+ Stream getNewStream(Device, int priority = 0) const override {
267
+ // no-op
268
+ (void)priority;
269
+ return Stream(Stream::DEFAULT, Device(D, -1));
270
+ }
271
+
272
+ // NB: These do NOT set the current device
273
+ Stream exchangeStream(Stream) const noexcept override {
274
+ // no-op
275
+ return Stream(Stream::DEFAULT, Device(D, -1));
276
+ }
277
+ DeviceIndex deviceCount() const noexcept override {
278
+ return 1;
279
+ }
280
+
281
+ // Event-related functions
282
+ void record(
283
+ void** /*event*/,
284
+ const Stream& /*stream*/,
285
+ const DeviceIndex /*device_index*/,
286
+ const EventFlag /*flag*/) const override {
287
+ TORCH_CHECK(false, D, " backend doesn't support events.");
288
+ }
289
+ void block(void* /*event*/, const Stream& /*stream*/) const override {
290
+ TORCH_CHECK(false, D, " backend doesn't support events.")
291
+ }
292
+ bool queryEvent(void* /*event*/) const override {
293
+ TORCH_CHECK(false, D, " backend doesn't support events.")
294
+ }
295
+ void destroyEvent(void* /*event*/, const DeviceIndex /*device_index*/)
296
+ const noexcept override {}
297
+
298
+ // Stream-related functions
299
+ bool queryStream(const Stream& /*stream*/) const override {
300
+ return true;
301
+ }
302
+ void synchronizeStream(const Stream& /*stream*/) const override {
303
+ // Don't wait for anything.
304
+ }
305
+ };
306
+
307
+ // The registry is NON-owning. Each stored pointer is std::atomic so
308
+ // that under all interleavings of registry calls the structure is
309
+ // race-free. This doesn't cost us anything on reads in X86. (An
310
+ // unsynchronized implementation probably is OK too, but I didn't want
311
+ // to prove that we never read from device_guard_impl_registry at the
312
+ // same time some registration is occurring. Shiver.)
313
+ //
314
+ // I'd like this registry to be valid even at program destruction time
315
+ // (in case someone uses a DeviceGuard in a destructor to do some cleanup
316
+ // in the CUDA API.) Since there are no direct accesses of the underlying
317
+ // owning objects which I can use to enforce initialization order (unlike
318
+ // in a Meyer singleton), it implies that you must *leak* objects when
319
+ // putting them in the registry. This is done by deleting the destructor
320
+ // on DeviceGuardImplInterface.
321
+ // NOLINTNEXTLINE(*c-arrays*)
322
+ extern C10_API std::atomic<const DeviceGuardImplInterface*>
323
+ device_guard_impl_registry[static_cast<size_t>(
324
+ DeviceType::COMPILE_TIME_MAX_DEVICE_TYPES)];
325
+
326
+ // I can't conveniently use c10/util/Registry.h for the following reason:
327
+ // c10/util/Registry.h gives me a slow way of Create'ing a object of some
328
+ // interface from the registry, but no way of quickly accessing an already
329
+ // created object. I'll be banging on getDeviceGuardImpl every time we do a
330
+ // DeviceGuard, so I really don't want to be doing an unordered_map lookup.
331
+ // Better if the registration mechanism directly drops its implementation
332
+ // into device_guard_impl_registry.
333
+
334
+ class C10_API DeviceGuardImplRegistrar {
335
+ public:
336
+ DeviceGuardImplRegistrar(DeviceType, const DeviceGuardImplInterface*);
337
+ };
338
+
339
+ #define C10_REGISTER_GUARD_IMPL(DevType, DeviceGuardImpl) \
340
+ static ::c10::impl::DeviceGuardImplRegistrar C10_ANONYMOUS_VARIABLE( \
341
+ g_##DeviceType)(::c10::DeviceType::DevType, new DeviceGuardImpl());
342
+
343
+ inline const DeviceGuardImplInterface* getDeviceGuardImpl(DeviceType type) {
344
+ // Two adjacent int16_t fields DeviceType and DeviceIndex has field access
345
+ // miscompiled on NVCC. To workaround this issue, we apply a mask to the
346
+ // DeviceType. First check if the DeviceType is 16-bit.
347
+ // FB employees can see
348
+ // https://fb.workplace.com/groups/llvm.gcc/permalink/4053565044692080/
349
+ // for more details
350
+ static_assert(sizeof(DeviceType) == 1, "DeviceType is not 8-bit");
351
+ auto p = device_guard_impl_registry[static_cast<size_t>(type) & 0xFF].load();
352
+
353
+ // This seems to be the first place where you make use of a device
354
+ // when you pass devices to factory functions. Give a nicer error
355
+ // message in this case.
356
+ TORCH_CHECK(p, "PyTorch is not linked with support for ", type, " devices");
357
+ return p;
358
+ }
359
+
360
+ inline bool hasDeviceGuardImpl(DeviceType type) {
361
+ return device_guard_impl_registry[static_cast<size_t>(type)].load();
362
+ }
363
+
364
+ } // namespace impl
365
+ } // namespace c10
parrot/lib/python3.10/site-packages/torch/include/c10/core/impl/HermeticPyObjectTLS.h ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/macros/Export.h>
4
+ #include <atomic>
5
+
6
+ namespace c10::impl {
7
+
8
+ // This TLS controls whether or not we permanently associate PyObject
9
+ // with Tensor the first time it is allocated. When hermetic PyObject
10
+ // TLS is enabled (state is true), we DO NOT save PyObjects to Tensor,
11
+ // meaning you get a distinct PyObject whenever you execute the code in
12
+ // question.
13
+ struct C10_API HermeticPyObjectTLS {
14
+ static void set_state(bool state);
15
+ static bool get_state() {
16
+ // Hypothetical fastpath if torchdeploy/multipy isn't used. Per
17
+ // https://www.open-std.org/jtc1/sc22/wg21/docs/papers/2020/p2055r0.pdf
18
+ // this qualifies relaxed access because it is a single-location data
19
+ // structure (only the boolean here).
20
+ //
21
+ // Forgetting about data races for a moment, is there a logical race?
22
+ //
23
+ // - Boolean only ever transitions from false to true. So the
24
+ // critical situation is when one interpreter is already running
25
+ // when a second interpreter switches haveState from false to true.
26
+ //
27
+ // - The first interpreter is indifferent whether or not it sees
28
+ // hasState true/false; obviously false works (this is what the
29
+ // interpreter was previously using; more directly, the interpreter
30
+ // calls into itself as the handler, so being hermetic is not
31
+ // required), and true simply means serviced python operator calls will
32
+ // be hermetic; in these cases it is expected to be functionally
33
+ // equivalent.
34
+ //
35
+ // - The second interpreter MUST see hasState true (as its requests will
36
+ // be forwarded to the first interpreter), but it is assumed that there
37
+ // is a synchronization between the interpreter initialization, and
38
+ // when we actually perform operations, so it is guaranteed to see
39
+ // hasState true.
40
+ //
41
+ // QED.
42
+ //
43
+ // This fastpath is currently disabled so that we can more easily test that
44
+ // hermetic mode works correctly even on stock build of PyTorch.
45
+ if (false && !haveState_.load(std::memory_order_relaxed))
46
+ return false;
47
+ return get_tls_state();
48
+ }
49
+ // Call this from the multipy/torchdeploy top level
50
+ static void init_state();
51
+
52
+ private:
53
+ // This only flipped once from false to true during torchdeploy/multipy
54
+ // initialization, and never again.
55
+ static std::atomic<bool> haveState_;
56
+ static bool get_tls_state();
57
+ };
58
+
59
+ } // namespace c10::impl
parrot/lib/python3.10/site-packages/torch/include/c10/core/impl/InlineDeviceGuard.h ADDED
@@ -0,0 +1,428 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // This file provides implementations of InlineDeviceGuard and
4
+ // InlineOptionalDeviceGuard.
5
+
6
+ #include <c10/core/Device.h>
7
+ #include <c10/core/DeviceType.h>
8
+ #include <c10/core/impl/DeviceGuardImplInterface.h>
9
+ #include <c10/core/impl/VirtualGuardImpl.h>
10
+ #include <c10/util/Exception.h>
11
+ #include <c10/util/Optional.h>
12
+ #include <type_traits>
13
+ #include <utility>
14
+
15
+ namespace c10::impl {
16
+
17
+ /**
18
+ * A DeviceGuard is an RAII class that sets a device to some value
19
+ * on construction, and resets the device to its original value on
20
+ * destruction.
21
+ *
22
+ * InlineDeviceGuard is a helper class for implementing DeviceGuards.
23
+ * It is templated over a DeviceGuardImpl (anything that implements
24
+ * DeviceGuardImplInterface). There are two primary ways to instantiate
25
+ * InlineDeviceGuard:
26
+ *
27
+ * - With a concrete implementation of DeviceGuardImpl, e.g., CUDAGuardImpl.
28
+ * This is the best way to use InlineDeviceGuard, as all calls are
29
+ * devirtualized, giving you code as efficient as straight line
30
+ * calls to cudaGetDevice/cudaSetDevice.
31
+ *
32
+ * - With VirtualGuardImpl, which does a virtual dispatch to a DeviceGuardImpl
33
+ * retrieved from a DeviceType registry. We have explicitly instantiated
34
+ * InlineDeviceGuard this way as c10::DeviceGuard.
35
+ *
36
+ * If you are in a hurry, you can use InlineDeviceGuard directly:
37
+ *
38
+ * using CUDAGuard = impl::InlineDeviceGuard<CUDAGuardImpl>;
39
+ *
40
+ * However, you can provide a better user experience if you explicitly write a
41
+ * wrapper class that itself contains the template instantiation:
42
+ *
43
+ * class CUDAGuard {
44
+ * public:
45
+ * // ... the API ...
46
+ * private:
47
+ * impl::InlineDeviceGuard<CUDAGuardImpl> guard_;
48
+ * }
49
+ *
50
+ * The wrapper class provides a good place to write documentation, and helps
51
+ * avoid weird template instantiation errors when a user incorrectly uses the
52
+ * class.
53
+ *
54
+ * If you need to test this class, consider instantiating it with FakeGuardImpl.
55
+ */
56
+ template <typename T>
57
+ class InlineDeviceGuard {
58
+ public:
59
+ // Note [Omitted default constructor from RAII]
60
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
61
+ // In principle, we could add a default constructor to
62
+ // DeviceGuard which reads the current device and promises to
63
+ // restore to that device on exit. However, most cases where you
64
+ // would have written this, you probably meant to actually just
65
+ // use OptionalDeviceGuard (since you don't actually need the
66
+ // restore to happen if you don't ever actually set the device).
67
+ // We remove the constructor here to encourage you to think about
68
+ // what you actually want to happen.
69
+ explicit InlineDeviceGuard() = delete;
70
+
71
+ /// Set the current device to the passed Device.
72
+ explicit InlineDeviceGuard(Device device)
73
+ : impl_(device.type()),
74
+ original_device_(
75
+ device.index() == -1 ? impl_.getDevice()
76
+ : impl_.exchangeDevice(device)),
77
+ current_device_(device.index() == -1 ? original_device_ : device) {}
78
+
79
+ /// Set the current device index to the passed DeviceIndex. (The
80
+ /// device type is inferred from the template parameter T).
81
+ template <
82
+ typename U = T,
83
+ typename =
84
+ typename std::enable_if_t<!std::is_same_v<U, VirtualGuardImpl>>>
85
+ explicit InlineDeviceGuard(DeviceIndex device_index)
86
+ : InlineDeviceGuard(Device(U::static_type, device_index)) {}
87
+
88
+ /// Construct an InlineDeviceGuard using VirtualGuardImpl with an explicit
89
+ /// DeviceGuardImplInterface pointer.
90
+ template <
91
+ typename U = T,
92
+ typename = typename std::enable_if_t<std::is_same_v<U, VirtualGuardImpl>>>
93
+ explicit InlineDeviceGuard(
94
+ Device device,
95
+ const DeviceGuardImplInterface* impl)
96
+ : impl_(
97
+ VirtualGuardImpl(impl ? impl : getDeviceGuardImpl(device.type()))),
98
+ original_device_(
99
+ device.index() == -1 ? impl_.getDevice()
100
+ : impl_.exchangeDevice(device)),
101
+ current_device_(device.index() == -1 ? original_device_ : device) {}
102
+
103
+ /// Copy is disallowed
104
+ InlineDeviceGuard(const InlineDeviceGuard<T>&) = delete;
105
+ InlineDeviceGuard<T>& operator=(const InlineDeviceGuard<T>&) = delete;
106
+
107
+ /// Move is disallowed, as DeviceGuard does not have an uninitialized state,
108
+ /// which is required for moves on types with nontrivial destructors.
109
+ InlineDeviceGuard(InlineDeviceGuard<T>&& other) = delete;
110
+ InlineDeviceGuard& operator=(InlineDeviceGuard<T>&& other) = delete;
111
+
112
+ ~InlineDeviceGuard() {
113
+ impl_.uncheckedSetDevice(original_device_);
114
+ }
115
+
116
+ /// Sets the device to the given one.
117
+ template <
118
+ typename U = T,
119
+ typename std::enable_if_t<!std::is_same_v<U, VirtualGuardImpl>, int> = 0>
120
+ void set_device(at::Device device) {
121
+ AT_ASSERT(
122
+ (U::static_type == DeviceType::HIP && device.is_cuda()) ||
123
+ device.type() == U::static_type);
124
+ auto index = device.index();
125
+ if (index == -1)
126
+ return;
127
+ impl_.setDevice(device);
128
+ current_device_ = device;
129
+ }
130
+
131
+ /// Resets the currently set device to its original device, and then sets the
132
+ /// current device to the passed device. This is effectively equivalent to
133
+ /// set_device when a guard supports only a single device type.
134
+ template <typename U = T>
135
+ typename std::enable_if_t<!std::is_same_v<U, VirtualGuardImpl>> reset_device(
136
+ at::Device device) {
137
+ set_device(device);
138
+ }
139
+
140
+ /// Resets the currently set device to its original device, and then sets the
141
+ /// current device to the passed device (for a possibly different device
142
+ /// type).
143
+ ///
144
+ /// This method is named reset_device to highlight the fact that previous
145
+ /// device settings from this guard are NOT preserved, even if the device
146
+ /// has a different device type. For example:
147
+ ///
148
+ /// // CUDA device is 0
149
+ /// DeviceGuard g(Device(kCUDA, 1));
150
+ /// g.reset_device(Device(kHIP, 2));
151
+ /// // CUDA device is 0 (!!)
152
+ ///
153
+ /// NOTE: this implementation may skip some device setting if it can prove
154
+ /// that it is unnecessary.
155
+ ///
156
+ /// Optional argument is for testing only.
157
+ template <typename U = T>
158
+ typename std::enable_if_t<std::is_same_v<U, VirtualGuardImpl>> reset_device(
159
+ at::Device device,
160
+ const impl::DeviceGuardImplInterface* impl = nullptr) {
161
+ auto index = device.index();
162
+ if (index == -1)
163
+ return;
164
+ if (device.type() == original_device_.type()) {
165
+ AT_ASSERT(impl == nullptr || impl->type() == device.type());
166
+ impl_.setDevice(device);
167
+ current_device_ = device;
168
+ } else {
169
+ // Destruct and reconstruct the DeviceGuard in place
170
+ impl_.setDevice(original_device_);
171
+ impl_ = !impl ? VirtualGuardImpl(device.type()) : VirtualGuardImpl(impl);
172
+ original_device_ = impl_.exchangeDevice(device);
173
+ current_device_ = device;
174
+ }
175
+ }
176
+
177
+ /// Sets the device index to the given one. The device type is inferred
178
+ /// from the original device type.
179
+ void set_index(DeviceIndex index) {
180
+ reset_device(Device(original_device_.type(), index));
181
+ }
182
+
183
+ /// Returns the device that was set at the time the most recent
184
+ /// reset_device(), or otherwise the device at construction time.
185
+ Device original_device() const {
186
+ return original_device_;
187
+ }
188
+
189
+ /// Returns the most recent device that was set using this device guard,
190
+ /// either from construction, or via set_device/reset_device/set_index.
191
+ Device current_device() const {
192
+ return current_device_;
193
+ }
194
+
195
+ protected:
196
+ T impl_;
197
+
198
+ private:
199
+ Device original_device_;
200
+ Device current_device_;
201
+ };
202
+
203
+ /**
204
+ * A OptionalDeviceGuard is an RAII class that sets a device to some value on
205
+ * initialization, and resets the device to its original value on destruction.
206
+ *
207
+ * InlineOptionalDeviceGuard is a helper class for implementing
208
+ * OptionalDeviceGuards. See guidance in InlineDeviceGuard on how to
209
+ * use this. See OptionalDeviceGuard for user-oriented usage notes.
210
+ */
211
+ template <typename T>
212
+ class InlineOptionalDeviceGuard {
213
+ public:
214
+ // Note [Explicit initialization of optional fields]
215
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
216
+ // Explicit initialization of optional fields
217
+ // required to workaround an nvcc bug; see
218
+ // https://github.com/pytorch/pytorch/issues/12117
219
+
220
+ /// Creates an uninitialized OptionalDeviceGuard.
221
+ explicit InlineOptionalDeviceGuard()
222
+ : guard_() // See Note [Explicit initialization of optional fields]
223
+ {}
224
+
225
+ /// Set the current device to the passed Device, if it is not nullopt.
226
+ explicit InlineOptionalDeviceGuard(optional<Device> device_opt)
227
+ : guard_() { // See Note [Explicit initialization of optional fields]
228
+ if (device_opt.has_value()) {
229
+ guard_.emplace(device_opt.value());
230
+ }
231
+ }
232
+
233
+ /// Set the current device to the passed DeviceIndex, if it is not nullopt.
234
+ template <
235
+ typename U = T,
236
+ typename =
237
+ typename std::enable_if_t<!std::is_same_v<U, VirtualGuardImpl>>>
238
+ explicit InlineOptionalDeviceGuard(optional<DeviceIndex> device_index_opt)
239
+ : guard_() { // See Note [Explicit initialization of optional fields]
240
+ if (device_index_opt.has_value()) {
241
+ guard_.emplace(device_index_opt.value());
242
+ }
243
+ }
244
+
245
+ /// All constructors of DeviceGuard are valid for OptionalDeviceGuard
246
+ /// and result in initialized OptionalDeviceGuard.
247
+ template <typename... Args>
248
+ explicit InlineOptionalDeviceGuard(Args&&... args)
249
+ : guard_(std::in_place, std::forward<Args>(args)...) {}
250
+
251
+ // TODO: Consider reading Tensor and TensorList constructors here, when
252
+ // Tensor moves to c10. (These are only valid on OptionalDeviceGuard,
253
+ // because a Tensor may be undefined, in which case we need an uninitialized
254
+ // tensor guard.)
255
+
256
+ // Note [Move construction for RAII guards is tricky]
257
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
258
+ // In principle, move construction is useful for terminating
259
+ // the lifetime of a `OptionalDeviceGuard` early; for example:
260
+ //
261
+ // // current device is d0
262
+ // OptionalDeviceGuard g1(d1);
263
+ // // current device is d1
264
+ // {
265
+ // OptionalDeviceGuard g2(std::move(g1));
266
+ // }
267
+ // // current device is d0!!
268
+ //
269
+ // However, it's difficult to implement the move constructor
270
+ // in a way that works in all situations. For example, consider
271
+ // the following example:
272
+ //
273
+ // OptionalDeviceGuard g1(d1);
274
+ // {
275
+ // OptionalDeviceGuard g2(d2);
276
+ // {
277
+ // OptionalDeviceGuard g3(std::move(g1)); // !!!
278
+ // }
279
+ // }
280
+ //
281
+ // What should the current device be while g3 in scope... and what
282
+ // should it be after it goes out of scope? What about g2?
283
+ // There don't seem to be satisfactory answers for these questions.
284
+ //
285
+ // It's in principle possible to raise an error when this occurs
286
+ // by doing some extra thread-local bookkeeping. But why bother?
287
+ // Just don't provide the constructor.
288
+ InlineOptionalDeviceGuard(InlineOptionalDeviceGuard<T>&& other) = delete;
289
+
290
+ // Note [Move assignment for RAII guards is tricky]
291
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
292
+ // Move assignment is deleted, because you need to know which guard was
293
+ // defined "first", as that guard's original_device_ wins--with the current
294
+ // representation, we have no way of telling which is the case. (Move
295
+ // construction does not have this problem, as one guard is always
296
+ // uninitialized.)
297
+ //
298
+ // We can make this clear by way of a pair of examples:
299
+ //
300
+ // Example 1:
301
+ //
302
+ // // initial device is n0
303
+ // {
304
+ // CUDAGuard g1(n1);
305
+ // {
306
+ // CUDAGuard g2(n2);
307
+ // // current device should be n2
308
+ // g1 = std::move(g2);
309
+ // // current device should still be n2
310
+ // }
311
+ // // current device should still be n2
312
+ // }
313
+ // // current device should be n0
314
+ //
315
+ // Example 2 (flip the order of the two guards):
316
+ //
317
+ // // initial device is n0
318
+ // {
319
+ // CUDAGuard g2(n2);
320
+ // {
321
+ // CUDAGuard g1(n1);
322
+ // // current device should be n1
323
+ // g1 = std::move(g2);
324
+ // // current device should be n2
325
+ // }
326
+ // // current device should be n0 (since g2 has been vacated)
327
+ // }
328
+ //
329
+ // In both examples, we need g1 to restore to n0 after move assignment.
330
+ // However, in example 1, this is determined by the restore value of g1
331
+ // (prior to the move). In example 2, however, it is determined by the the
332
+ // restore value of g2(!!). We don't know which one should win, without having
333
+ // a way of telling which guard was allocated first.
334
+ //
335
+ // We could solve this with an extra thread-local variable. But no one is
336
+ // actually using move-assignment. So just get rid of it.
337
+ InlineOptionalDeviceGuard& operator=(InlineOptionalDeviceGuard&& other) =
338
+ delete;
339
+
340
+ /// Sets the device to the given one. Initializes OptionalDeviceGuard if it
341
+ /// is not already initialized.
342
+ template <
343
+ typename U = T,
344
+ typename =
345
+ typename std::enable_if_t<!std::is_same_v<U, VirtualGuardImpl>>>
346
+ void set_device(at::Device device) {
347
+ if (!guard_.has_value()) {
348
+ guard_.emplace(device);
349
+ } else {
350
+ guard_->set_device(device);
351
+ }
352
+ }
353
+
354
+ /// Resets the currently set device to its original device, and then sets the
355
+ /// current device to the passed device (for a possibly different device
356
+ /// type). Initializes OptionalDeviceGuard if it is not already initialized.
357
+ ///
358
+ /// See notes on why this is called reset_device on InlineDeviceGuard.
359
+ ///
360
+ /// Optional argument is for testing only.
361
+ template <
362
+ typename U = T,
363
+ typename = typename std::enable_if_t<std::is_same_v<U, VirtualGuardImpl>>>
364
+ void reset_device(
365
+ at::Device device,
366
+ const DeviceGuardImplInterface* impl = nullptr) {
367
+ if (!guard_.has_value()) {
368
+ guard_.emplace(device, impl);
369
+ } else {
370
+ guard_->reset_device(device, impl);
371
+ }
372
+ }
373
+
374
+ /// Resets the currently set device to its original device, and then sets the
375
+ /// current device to the passed device. Initializes the guard if it is
376
+ /// not already initialized. This is effectively equivalent to set_device
377
+ /// when a guard supports only a single device type.
378
+ template <
379
+ typename U = T,
380
+ typename =
381
+ typename std::enable_if_t<!std::is_same_v<U, VirtualGuardImpl>>>
382
+ void reset_device(at::Device device) {
383
+ if (!guard_.has_value()) {
384
+ guard_.emplace(device);
385
+ } else {
386
+ guard_->reset_device(device);
387
+ }
388
+ }
389
+
390
+ /// Sets the device index to the given one. The device type is statically
391
+ /// known.
392
+ template <
393
+ typename U = T,
394
+ typename =
395
+ typename std::enable_if_t<!std::is_same_v<U, VirtualGuardImpl>>>
396
+ void set_index(DeviceIndex index) {
397
+ if (!guard_.has_value()) {
398
+ guard_.emplace(index);
399
+ } else {
400
+ guard_->set_index(index);
401
+ }
402
+ }
403
+
404
+ /// Returns the device that was set immediately prior to initialization of
405
+ /// the, guard, or nullopt if the guard is uninitialized.
406
+ optional<Device> original_device() const {
407
+ return guard_.has_value() ? make_optional(guard_->original_device())
408
+ : nullopt;
409
+ }
410
+
411
+ /// Returns the most recent device that was set using this device guard,
412
+ /// either from construction, or via set_device, if the guard is initialized,
413
+ /// or nullopt if the guard is uninitialized.
414
+ optional<Device> current_device() const {
415
+ return guard_.has_value() ? make_optional(guard_->current_device())
416
+ : nullopt;
417
+ }
418
+
419
+ /// Restore the original device, resetting this guard to uninitialized state.
420
+ void reset() {
421
+ guard_.reset();
422
+ }
423
+
424
+ private:
425
+ optional<InlineDeviceGuard<T>> guard_;
426
+ };
427
+
428
+ } // namespace c10::impl
parrot/lib/python3.10/site-packages/torch/include/c10/core/impl/InlineStreamGuard.h ADDED
@@ -0,0 +1,255 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/impl/InlineDeviceGuard.h>
4
+ #include <c10/util/ArrayRef.h>
5
+ #include <c10/util/irange.h>
6
+
7
+ namespace c10::impl {
8
+
9
+ /**
10
+ * A StreamGuard is an RAII class that changes the current device
11
+ * to the device corresponding to some stream, and changes the
12
+ * default stream on that device to be this stream.
13
+ *
14
+ * InlineStreamGuard is a helper class for implementing StreamGuards.
15
+ * See InlineDeviceGuard for guidance on how to use this class.
16
+ */
17
+ template <typename T>
18
+ class InlineStreamGuard : private InlineDeviceGuard<T> {
19
+ public:
20
+ /// No default constructor, see Note [Omitted default constructor from RAII]
21
+ explicit InlineStreamGuard() = delete;
22
+
23
+ /// Set the current device to the device associated with the passed stream,
24
+ /// and set the current stream on that device to the passed stream.
25
+ explicit InlineStreamGuard(Stream stream)
26
+ : InlineDeviceGuard<T>(stream.device()),
27
+ original_stream_of_original_device_(
28
+ this->impl_.getStream(original_device())),
29
+ original_stream_of_current_device_(this->impl_.exchangeStream(stream)),
30
+ current_stream_(stream) {}
31
+
32
+ /// This constructor exists purely for testing
33
+ template <
34
+ typename U = T,
35
+ typename = typename std::enable_if_t<std::is_same_v<U, VirtualGuardImpl>>>
36
+ explicit InlineStreamGuard(
37
+ Stream stream,
38
+ const DeviceGuardImplInterface* impl)
39
+ : InlineDeviceGuard<T>(
40
+ stream.device(),
41
+ impl ? impl : getDeviceGuardImpl(stream.device_type())),
42
+ original_stream_of_original_device_(
43
+ this->impl_.getStream(original_device())),
44
+ original_stream_of_current_device_(this->impl_.exchangeStream(stream)),
45
+ current_stream_(stream) {}
46
+
47
+ /// Copy is disallowed
48
+ InlineStreamGuard(const InlineStreamGuard<T>&) = delete;
49
+ InlineStreamGuard<T>& operator=(const InlineStreamGuard<T>&) = delete;
50
+
51
+ /// Move is disallowed, as StreamGuard does not have an uninitialized state,
52
+ /// which is required for moves on types with nontrivial destructors.
53
+ InlineStreamGuard(InlineStreamGuard<T>&& other) = delete;
54
+ InlineStreamGuard& operator=(InlineStreamGuard<T>&& other) = delete;
55
+
56
+ ~InlineStreamGuard() {
57
+ this->impl_.exchangeStream(original_stream_of_current_device_);
58
+ }
59
+
60
+ /// Resets the currently set stream to the original stream and
61
+ /// the currently set device to the original device. Then,
62
+ /// set the current device to the device associated with the passed stream,
63
+ /// and set the current stream on that device to the passed stream.
64
+ ///
65
+ /// NOTE: this implementation may skip some stream/device setting if
66
+ /// it can prove that it is unnecessary.
67
+ ///
68
+ /// WARNING: reset_stream does NOT preserve previously set streams on
69
+ /// different devices. If you need to set streams on multiple devices
70
+ /// use MultiStreamGuard instead.
71
+ void reset_stream(Stream stream) {
72
+ // TODO: make a version that takes an impl argument. Unfortunately,
73
+ // that will require SFINAE because impl is only valid for the
74
+ // VirtualGuardImpl specialization.
75
+ if (stream.device() == this->current_device()) {
76
+ this->impl_.exchangeStream(stream);
77
+ current_stream_ = stream;
78
+ } else {
79
+ // Destruct and reconstruct the StreamGuard in-place
80
+ this->impl_.exchangeStream(original_stream_of_current_device_);
81
+ this->reset_device(stream.device());
82
+ original_stream_of_current_device_ = this->impl_.exchangeStream(stream);
83
+ current_stream_ = stream;
84
+ }
85
+ }
86
+
87
+ // It's not clear if set_device should also reset the current stream
88
+ // if the device is unchanged; therefore, we don't provide it.
89
+ // The situation is somewhat clearer with reset_device, but it's still
90
+ // a pretty weird thing to do, so haven't added this either.
91
+
92
+ /// Returns the stream of the original device prior to this guard. Subtly,
93
+ /// the stream returned here is the original stream of the *original*
94
+ /// device; i.e., it's the stream that your computation *would* have
95
+ /// been put on, if it hadn't been for this meddling stream guard.
96
+ /// This is usually what you want.
97
+ Stream original_stream() const {
98
+ return original_stream_of_original_device_;
99
+ }
100
+
101
+ /// Returns the most recent stream that was set using this device guard,
102
+ /// either from construction, or via set_stream.
103
+ Stream current_stream() const {
104
+ return current_stream_;
105
+ }
106
+
107
+ /// Returns the most recent device that was set using this device guard,
108
+ /// either from construction, or via set_device/reset_device/set_index.
109
+ Device current_device() const {
110
+ return InlineDeviceGuard<T>::current_device();
111
+ }
112
+
113
+ /// Returns the device that was set at the most recent reset_stream(),
114
+ /// or otherwise the device at construction time.
115
+ Device original_device() const {
116
+ return InlineDeviceGuard<T>::original_device();
117
+ }
118
+
119
+ private:
120
+ Stream
121
+ original_stream_of_original_device_; // what the user probably cares about
122
+ Stream original_stream_of_current_device_; // what we need to restore
123
+ Stream current_stream_;
124
+ };
125
+
126
+ /**
127
+ * An OptionalStreamGuard is an RAII class that sets a device to some value on
128
+ * initialization, and resets the device to its original value on destruction.
129
+ * See InlineOptionalDeviceGuard for more guidance on how to use this class.
130
+ */
131
+ template <typename T>
132
+ class InlineOptionalStreamGuard {
133
+ public:
134
+ /// Creates an uninitialized stream guard.
135
+ explicit InlineOptionalStreamGuard()
136
+ : guard_() // See Note [Explicit initialization of optional fields]
137
+ {}
138
+
139
+ /// Set the current device to the device associated with the passed stream,
140
+ /// and set the current stream on that device to the passed stream,
141
+ /// if the passed stream is not nullopt.
142
+ explicit InlineOptionalStreamGuard(optional<Stream> stream_opt) : guard_() {
143
+ if (stream_opt.has_value()) {
144
+ guard_.emplace(stream_opt.value());
145
+ }
146
+ }
147
+
148
+ /// All constructors of StreamGuard are valid for OptionalStreamGuard
149
+ template <typename... Args>
150
+ explicit InlineOptionalStreamGuard(Args&&... args)
151
+ : guard_(std::in_place, std::forward<Args>(args)...) {}
152
+
153
+ // See Note [Move construction for RAII guards is tricky]
154
+ InlineOptionalStreamGuard(InlineOptionalStreamGuard<T>&& other) = delete;
155
+
156
+ // See Note [Move assignment for RAII guards is tricky]
157
+ InlineOptionalStreamGuard& operator=(InlineOptionalStreamGuard&& other) =
158
+ delete;
159
+
160
+ /// Resets the currently set stream to the original stream and
161
+ /// the currently set device to the original device. Then,
162
+ /// set the current device to the device associated with the passed stream,
163
+ /// and set the current stream on that device to the passed stream.
164
+ /// Initializes the OptionalStreamGuard if it was not previously initialized.
165
+ void reset_stream(Stream stream) {
166
+ if (guard_.has_value()) {
167
+ guard_->reset_stream(stream);
168
+ } else {
169
+ guard_.emplace(stream);
170
+ }
171
+ }
172
+
173
+ /// Returns the stream that was set at the time the guard was most recently
174
+ /// initialized, or nullopt if the guard is uninitialized.
175
+ optional<Stream> original_stream() const {
176
+ return guard_.has_value() ? make_optional(guard_->original_stream())
177
+ : nullopt;
178
+ }
179
+
180
+ /// Returns the most recent stream that was set using this stream guard,
181
+ /// either from construction, or via reset_stream, if the guard is
182
+ /// initialized, or nullopt if the guard is uninitialized.
183
+ optional<Stream> current_stream() const {
184
+ return guard_.has_value() ? make_optional(guard_->current_stream())
185
+ : nullopt;
186
+ }
187
+
188
+ /// Restore the original device and stream, resetting this guard to
189
+ /// uninitialized state.
190
+ void reset() {
191
+ guard_.reset();
192
+ }
193
+
194
+ private:
195
+ optional<InlineStreamGuard<T>> guard_;
196
+ };
197
+
198
+ template <typename T>
199
+ class InlineMultiStreamGuard {
200
+ public:
201
+ /// Calls `set_stream` on each of the streams in the list.
202
+ /// This may be useful if you need to set different streams
203
+ /// for different devices.
204
+ explicit InlineMultiStreamGuard(ArrayRef<Stream> streams) {
205
+ if (!streams.empty()) {
206
+ impl_.emplace(getDeviceTypeOfStreams(streams));
207
+ original_streams_.reserve(streams.size());
208
+ for (const Stream& s : streams) {
209
+ original_streams_.emplace_back(this->impl_->exchangeStream(s));
210
+ }
211
+ }
212
+ }
213
+
214
+ /// Copy is disallowed
215
+ InlineMultiStreamGuard(const InlineMultiStreamGuard&) = delete;
216
+ InlineMultiStreamGuard<T>& operator=(const InlineMultiStreamGuard&) = delete;
217
+
218
+ /// Move is disallowed, as StreamGuard does not have an uninitialized state,
219
+ /// which is required for moves on types with nontrivial destructors.
220
+ InlineMultiStreamGuard(InlineMultiStreamGuard&& other) = delete;
221
+ InlineMultiStreamGuard& operator=(InlineMultiStreamGuard&& other) = delete;
222
+
223
+ ~InlineMultiStreamGuard() noexcept {
224
+ if (this->impl_.has_value()) {
225
+ for (const Stream& s : original_streams_) {
226
+ this->impl_->exchangeStream(s);
227
+ }
228
+ }
229
+ }
230
+
231
+ protected:
232
+ optional<T> impl_;
233
+
234
+ private:
235
+ /// The original streams that were active on all devices.
236
+ std::vector<Stream> original_streams_;
237
+
238
+ static DeviceType getDeviceTypeOfStreams(ArrayRef<Stream> streams) {
239
+ TORCH_INTERNAL_ASSERT(!streams.empty());
240
+ DeviceType type = streams[0].device_type();
241
+ for (const auto idx : c10::irange(1, streams.size())) {
242
+ TORCH_CHECK_VALUE(
243
+ streams[idx].device_type() == type,
244
+ "Streams have a mix of device types: stream 0 is on ",
245
+ streams[0].device(),
246
+ " while stream ",
247
+ idx,
248
+ " is on device ",
249
+ streams[idx].device());
250
+ }
251
+ return type;
252
+ }
253
+ };
254
+
255
+ } // namespace c10::impl
parrot/lib/python3.10/site-packages/torch/include/c10/core/impl/LocalDispatchKeySet.h ADDED
@@ -0,0 +1,164 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/DispatchKeySet.h>
4
+ #include <c10/macros/Export.h>
5
+
6
+ // TLS management for DispatchKeySet (the "local" DispatchKeySet(s))
7
+ //
8
+ // This manages two thread-local DispatchKeySets:
9
+ //
10
+ // - The included type set, which adds a tensor type for consideration
11
+ // in dispatch. (For example, you might add Profiling to
12
+ // the included type set to turn on profiling on all tensor operations.)
13
+ //
14
+ // - The excluded type set, which disqualifies a tensor type from dispatch.
15
+ // (For example, after redispatching on variable, we disqualify
16
+ // Autograd so we don't attempt to handle variable again.)
17
+ // (Exclusion wins over inclusion.)
18
+ //
19
+ // NB: Originally, I implemented the excluded type set as storing the inverted
20
+ // set, but TLS is defined to be zero-initialized, so this doesn't actually work
21
+ // (if it's inverted, you want the set to be -1 initialized).
22
+
23
+ namespace c10::impl {
24
+
25
+ // POD version of LocalDispatchKeySet. Declared here just so that
26
+ // we can put it in the guards.
27
+ // This struct encapsulates special handling for TLS initialization
28
+ // in set_included()/included() API so that they reflect the truth.
29
+ // If you want to create PODLocalDispatchKeySet with non-zero state,
30
+ // use set_included() instead of default constructor.
31
+ struct C10_API PODLocalDispatchKeySet {
32
+ uint64_t included_;
33
+ uint64_t excluded_;
34
+
35
+ // See Note [TLS Initialization]
36
+ DispatchKeySet included() const {
37
+ return DispatchKeySet(DispatchKeySet::RAW, included_) ^
38
+ c10::default_included_set;
39
+ }
40
+ DispatchKeySet excluded() const {
41
+ return DispatchKeySet(DispatchKeySet::RAW, excluded_) ^
42
+ c10::default_excluded_set;
43
+ }
44
+
45
+ void set_included(DispatchKeySet x) {
46
+ included_ = (x ^ c10::default_included_set).raw_repr();
47
+ }
48
+ void set_excluded(DispatchKeySet x) {
49
+ excluded_ = (x ^ c10::default_excluded_set).raw_repr();
50
+ }
51
+ };
52
+ static_assert(
53
+ std::is_trivial_v<PODLocalDispatchKeySet>,
54
+ "PODLocalDispatchKeySet must be a POD type.");
55
+
56
+ struct C10_API LocalDispatchKeySet {
57
+ /* implicit */ LocalDispatchKeySet(PODLocalDispatchKeySet x)
58
+ : included_(x.included()), excluded_(x.excluded()) {}
59
+ DispatchKeySet included_;
60
+ DispatchKeySet excluded_;
61
+ };
62
+
63
+ // thread_local variables cannot be C10_API on Windows.
64
+ // Inlining this seems to break AutoDispatchBelowAutograd on Android.
65
+ #if defined(_MSC_VER) || defined(C10_ANDROID) || defined(C10_IPHONE)
66
+ C10_API LocalDispatchKeySet tls_local_dispatch_key_set();
67
+ #else // defined(_MSC_VER) || defined(C10_ANDROID) || defined(C10_IPHONE)
68
+ extern C10_API thread_local PODLocalDispatchKeySet raw_local_dispatch_key_set;
69
+
70
+ inline C10_API LocalDispatchKeySet tls_local_dispatch_key_set() {
71
+ // Don't let people fiddle with the thread_local directly just
72
+ // because they include this header.
73
+ return raw_local_dispatch_key_set;
74
+ }
75
+ #endif // defined(_MSC_VER) || defined(C10_ANDROID) || defined(C10_IPHONE)
76
+
77
+ // Internal, use ThreadLocalStateGuard
78
+ C10_API void _force_tls_local_dispatch_key_set(LocalDispatchKeySet key_set);
79
+
80
+ // RAII API for manipulating the thread-local dispatch state.
81
+
82
+ class C10_API IncludeDispatchKeyGuard {
83
+ public:
84
+ IncludeDispatchKeyGuard(DispatchKeySet);
85
+ IncludeDispatchKeyGuard(DispatchKey k)
86
+ : IncludeDispatchKeyGuard(DispatchKeySet(k)) {}
87
+ IncludeDispatchKeyGuard(const IncludeDispatchKeyGuard&) = delete;
88
+ IncludeDispatchKeyGuard operator=(const IncludeDispatchKeyGuard&) = delete;
89
+ IncludeDispatchKeyGuard(IncludeDispatchKeyGuard&&) = delete;
90
+ IncludeDispatchKeyGuard operator=(IncludeDispatchKeyGuard&&) = delete;
91
+ ~IncludeDispatchKeyGuard();
92
+
93
+ private:
94
+ // A little micro-optimization to save us from tls_get_addr call
95
+ // on destruction
96
+ PODLocalDispatchKeySet* tls_;
97
+ DispatchKeySet include_;
98
+ };
99
+
100
+ class C10_API ExcludeDispatchKeyGuard {
101
+ public:
102
+ ExcludeDispatchKeyGuard(DispatchKeySet);
103
+ ExcludeDispatchKeyGuard(DispatchKey k)
104
+ : ExcludeDispatchKeyGuard(DispatchKeySet(k)) {}
105
+ ExcludeDispatchKeyGuard(const ExcludeDispatchKeyGuard&) = delete;
106
+ ExcludeDispatchKeyGuard operator=(const ExcludeDispatchKeyGuard&) = delete;
107
+ ExcludeDispatchKeyGuard(ExcludeDispatchKeyGuard&&) = delete;
108
+ ExcludeDispatchKeyGuard operator=(ExcludeDispatchKeyGuard&&) = delete;
109
+ ~ExcludeDispatchKeyGuard();
110
+
111
+ private:
112
+ // A little micro-optimization to save us from tls_get_addr call
113
+ // on destruction
114
+ PODLocalDispatchKeySet* tls_;
115
+ DispatchKeySet exclude_;
116
+ };
117
+
118
+ struct C10_API ForceDispatchKeyGuard {
119
+ public:
120
+ ForceDispatchKeyGuard()
121
+ : saved_keyset_(c10::impl::tls_local_dispatch_key_set()) {}
122
+ ForceDispatchKeyGuard(c10::impl::LocalDispatchKeySet key_set)
123
+ : ForceDispatchKeyGuard() {
124
+ c10::impl::_force_tls_local_dispatch_key_set(key_set);
125
+ }
126
+ ForceDispatchKeyGuard(
127
+ c10::DispatchKeySet include,
128
+ c10::DispatchKeySet exclude)
129
+ : ForceDispatchKeyGuard() {
130
+ auto updated_set = saved_keyset_;
131
+ updated_set.included_ = include;
132
+ updated_set.excluded_ = exclude;
133
+ c10::impl::_force_tls_local_dispatch_key_set(updated_set);
134
+ }
135
+ ~ForceDispatchKeyGuard() {
136
+ c10::impl::_force_tls_local_dispatch_key_set(saved_keyset_);
137
+ }
138
+
139
+ private:
140
+ c10::impl::LocalDispatchKeySet saved_keyset_;
141
+ };
142
+
143
+ // Non-RAII API for manipulating the thread-local dispatch state.
144
+ // Please prefer the RAII API. The non-RAII API may be useful when
145
+ // the included/excluded state of a given DispatchKey must span
146
+ // many calls from the Python to the C++, so you cannot conveniently
147
+ // use an RAII guard.
148
+ //
149
+ // Example use case: a Python context manager that includes a certain
150
+ // DispatchKey, to ensure ops running under the context manager dispatch
151
+ // through that DispatchKey's registered overrides.
152
+ //
153
+ // The non-RAII API is less efficient than the RAII guards because both the
154
+ // getter and setter will do a tls_getaddr lookup (the RAII struct only needs
155
+ // one!)
156
+
157
+ C10_API bool tls_is_dispatch_key_excluded(DispatchKey x);
158
+ C10_API void tls_set_dispatch_key_excluded(DispatchKey x, bool desired_state);
159
+ C10_API bool tls_is_dispatch_key_included(DispatchKey x);
160
+ C10_API void tls_set_dispatch_key_included(DispatchKey x, bool desired_state);
161
+ C10_API bool tls_is_dispatch_keyset_excluded(DispatchKeySet ks);
162
+ C10_API bool tls_is_dispatch_keyset_included(DispatchKeySet ks);
163
+
164
+ } // namespace c10::impl
parrot/lib/python3.10/site-packages/torch/include/c10/core/impl/PyObjectSlot.h ADDED
@@ -0,0 +1,190 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/impl/HermeticPyObjectTLS.h>
4
+ #include <c10/core/impl/PyInterpreter.h>
5
+ #include <c10/util/Optional.h>
6
+ #include <c10/util/python_stub.h>
7
+
8
+ #include <atomic>
9
+
10
+ namespace c10::impl {
11
+
12
+ struct C10_API PyObjectSlot {
13
+ public:
14
+ PyObjectSlot();
15
+
16
+ ~PyObjectSlot();
17
+
18
+ void maybe_destroy_pyobj();
19
+
20
+ // Associate the TensorImpl with the specified PyObject, and, if necessary,
21
+ // also tag the interpreter.
22
+ //
23
+ // NB: This lives in a header so that we can inline away the switch on status
24
+ //
25
+ // NB: THIS FUNCTION CAN RAISE AN EXCEPTION. Make sure to clean up after
26
+ // PyObject if necessary!
27
+ void init_pyobj(
28
+ PyInterpreter* self_interpreter,
29
+ PyObject* pyobj,
30
+ PyInterpreterStatus status) {
31
+ impl::PyInterpreter* expected = nullptr;
32
+ switch (status) {
33
+ case impl::PyInterpreterStatus::DEFINITELY_UNINITIALIZED:
34
+ // caller guarantees there is no multithreaded access; if there is
35
+ // no data race OK to do a relaxed store
36
+ pyobj_interpreter_.store(self_interpreter, std::memory_order_relaxed);
37
+ break;
38
+ case impl::PyInterpreterStatus::TAGGED_BY_US:
39
+ // no tagging is necessary, the tag is already correct
40
+ break;
41
+ case impl::PyInterpreterStatus::MAYBE_UNINITIALIZED:
42
+ // attempt to claim this TensorImpl with the specified interpreter
43
+ // tag
44
+ if (pyobj_interpreter_.compare_exchange_strong(
45
+ expected, self_interpreter, std::memory_order_acq_rel)) {
46
+ break;
47
+ }
48
+ // test if, actually, it was already tagged by us! this situation can't
49
+ // be caused by a race, but it could be caused by a situation
50
+ // where someone conservatively tagged the tensor as MAYBE_UNINITIALIZED
51
+ // (because they didn't pre-check the tag) when actually it was
52
+ // owned by the interpreter
53
+ if (expected == self_interpreter) {
54
+ break;
55
+ }
56
+ // fallthrough, we lost the race. We are guaranteed not to lose the
57
+ // race with ourself, as calls to init_pyobj with the same interpreter
58
+ // ID must be sequentialized by the GIL
59
+ [[fallthrough]];
60
+ case impl::PyInterpreterStatus::TAGGED_BY_OTHER:
61
+ TORCH_CHECK(
62
+ false,
63
+ "cannot allocate PyObject for Tensor on interpreter ",
64
+ self_interpreter,
65
+ " that has already been used by another torch deploy interpreter ",
66
+ pyobj_interpreter_.load());
67
+ }
68
+
69
+ // we are the ONLY thread that can have gotten to this point. It is not
70
+ // possible to conflict with another zero interpreter as access is protected
71
+ // by GIL
72
+ // NB: owns_pyobj tag is initially false
73
+ pyobj_ = pyobj;
74
+ }
75
+
76
+ // Query the PyObject interpreter. This may return null if there is no
77
+ // interpreter. This is racy!
78
+ PyInterpreter* pyobj_interpreter();
79
+
80
+ PyObject* _unchecked_untagged_pyobj() const;
81
+
82
+ // Test the interpreter tag. If tagged for the current interpreter, return
83
+ // a non-nullopt (but possibly null) PyObject. If (possibly) untagged,
84
+ // returns a nullopt. If it is definitely invalid, raises an error.
85
+ //
86
+ // If `ignore_hermetic_tls` is false and this function is called from a
87
+ // hermetic context (ie, `HermeticPyObjectTLS::get_state()` is true), then
88
+ // nullopt is returned. If `ignore_hermetic_tls` is true, then the hermetic
89
+ // context is ignored, allowing you to check the interpreter tag of a
90
+ // nonhermetic PyObject from within a hermetic context. This is necessary
91
+ // because there are some cases where the deallocator function of a
92
+ // nonhermetic PyObject is called from within a hermetic context, so it must
93
+ // be properly treated as a nonhermetic PyObject.
94
+ //
95
+ // NB: this lives in header so that we can avoid actually creating the
96
+ // std::optional
97
+ std::optional<PyObject*> check_pyobj(
98
+ PyInterpreter* self_interpreter,
99
+ bool ignore_hermetic_tls = false) const {
100
+ // Note [Memory ordering on Python interpreter tag]
101
+ impl::PyInterpreter* interpreter =
102
+ pyobj_interpreter_.load(std::memory_order_acquire);
103
+ if (interpreter == nullptr) {
104
+ // NB: This never returns DEFINITELY_UNINITIALIZED because there is
105
+ // always the possibility that another thread races to initialize
106
+ // after we query here. The only time when we can conclude a tensor
107
+ // is definitely uninitialized is when we have just allocated it and
108
+ // it cannot have escaped to other threads yet
109
+ return c10::nullopt;
110
+ } else if (interpreter == self_interpreter) {
111
+ // NB: pyobj_ could still be null!
112
+ if (!ignore_hermetic_tls && c10::impl::HermeticPyObjectTLS::get_state()) {
113
+ return c10::nullopt;
114
+ } else {
115
+ return c10::make_optional(_unchecked_untagged_pyobj());
116
+ }
117
+ } else {
118
+ TORCH_CHECK(
119
+ false,
120
+ "cannot access PyObject for Tensor on interpreter ",
121
+ (*self_interpreter)->name(),
122
+ " that has already been used by another torch deploy interpreter ",
123
+ (*pyobj_interpreter_.load())->name());
124
+ }
125
+ }
126
+
127
+ // Clear the PyObject field for an interpreter, in situations where we
128
+ // statically know the tensor is tagged with our interpreter.
129
+ void unchecked_clear_pyobj(PyInterpreter* interpreter);
130
+
131
+ PyInterpreter& load_pyobj_interpreter() const;
132
+
133
+ // Check if the PyObjectSlot's interpreter is the same as the specified
134
+ // interpreter
135
+ bool check_interpreter(PyInterpreter* interpreter);
136
+
137
+ // Check if the PyObjectSlot is holding a PyObject, owned or non-owned
138
+ bool has_pyobj_nonhermetic();
139
+
140
+ bool owns_pyobj();
141
+
142
+ void set_owns_pyobj(bool b);
143
+
144
+ private:
145
+ // This field contains the interpreter tag for this object. See
146
+ // Note [Python interpreter tag] for general context
147
+ //
148
+ // Note [Memory ordering on Python interpreter tag]
149
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
150
+ // What memory_order do we need when accessing this atomic? We don't
151
+ // need a single total modification order (as provided by
152
+ // memory_order_seq_cst) as pyobj_interpreter_ is monotonic: it can only
153
+ // transition from -1 to some positive integer and never changes afterwards.
154
+ // Because there is only one modification, it trivially already has a total
155
+ // modification order (e.g., we don't need fences or locked instructions on
156
+ // x86)
157
+ //
158
+ // In fact, one could make a reasonable argument that relaxed reads are OK,
159
+ // due to the presence of external locking (GIL) to ensure that interactions
160
+ // with other data structures are still correctly synchronized, so that
161
+ // we fall in the "Single-Location Data Structures" case as described in
162
+ // http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2020/p2055r0.pdf
163
+ // However, on x86, it doesn't matter if I use acquire or relaxed on the load
164
+ // as I get the same assembly in both cases. So I just use the more
165
+ // conservative acquire (which will impede compiler optimizations but I don't
166
+ // care)
167
+ std::atomic<PyInterpreter*> pyobj_interpreter_;
168
+
169
+ // This field contains a reference to a PyObject representing this Tensor.
170
+ // If pyobj is nullptr, when we transfer Tensor to Python, we allocate a new
171
+ // PyObject for it and set this field. This field does not have to be
172
+ // protected by an atomic as it is only allowed to be accessed when you hold
173
+ // the GIL, or during destruction of the tensor.
174
+ //
175
+ // When a PyObject dies, you are obligated to clear this field
176
+ // (otherwise, you will try to use-after-free the pyobj); this currently
177
+ // occurs in THPVariable_clear in torch/csrc/autograd/python_variable.cpp
178
+ //
179
+ // NB: Ordinarily, this should not be a strong reference, as if the
180
+ // PyObject owns the Tensor, this would create a reference cycle.
181
+ // However, sometimes this ownership flips. To track who owns
182
+ // who, this has a single pointer tag indicating whether or not the
183
+ // C++ object owns the PyObject (the common case, zero, means PyObject
184
+ // owns the C++ object); see _unchecked_untagged_pyobj for raw access
185
+ // or check_pyobj for checked access. See references to PyObject
186
+ // resurrection in torch/csrc/autograd/python_variable.cpp
187
+ PyObject* pyobj_;
188
+ };
189
+
190
+ } // namespace c10::impl
parrot/lib/python3.10/site-packages/torch/include/c10/core/impl/PythonDispatcherTLS.h ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/impl/PyInterpreter.h>
4
+ #include <c10/macros/Export.h>
5
+
6
+ namespace c10::impl {
7
+
8
+ struct C10_API PythonDispatcherTLS {
9
+ static void set_state(PyInterpreter* state);
10
+ static PyInterpreter* get_state();
11
+ static void reset_state();
12
+ };
13
+
14
+ struct C10_API DisablePythonDispatcher {
15
+ DisablePythonDispatcher() : old_(PythonDispatcherTLS::get_state()) {
16
+ PythonDispatcherTLS::set_state({});
17
+ }
18
+ ~DisablePythonDispatcher() {
19
+ PythonDispatcherTLS::set_state(old_);
20
+ }
21
+ PyInterpreter* old_;
22
+ };
23
+
24
+ } // namespace c10::impl
parrot/lib/python3.10/site-packages/torch/include/c10/core/impl/alloc_cpu.h ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/macros/Export.h>
4
+
5
+ #include <cstddef>
6
+
7
+ namespace c10 {
8
+
9
+ C10_API void* alloc_cpu(size_t nbytes);
10
+ C10_API void free_cpu(void* data);
11
+
12
+ } // namespace c10
parrot/lib/python3.10/site-packages/torch/include/c10/macros/Export.h ADDED
@@ -0,0 +1,160 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef C10_MACROS_EXPORT_H_
2
+ #define C10_MACROS_EXPORT_H_
3
+
4
+ /* Header file to define the common scaffolding for exported symbols.
5
+ *
6
+ * Export is by itself a quite tricky situation to deal with, and if you are
7
+ * hitting this file, make sure you start with the background here:
8
+ * - Linux: https://gcc.gnu.org/wiki/Visibility
9
+ * - Windows:
10
+ * https://docs.microsoft.com/en-us/cpp/cpp/dllexport-dllimport?view=vs-2017
11
+ *
12
+ * Do NOT include this file directly. Instead, use c10/macros/Macros.h
13
+ */
14
+
15
+ // You do not need to edit this part of file unless you are changing the core
16
+ // pytorch export abstractions.
17
+ //
18
+ // This part defines the C10 core export and import macros. This is controlled
19
+ // by whether we are building shared libraries or not, which is determined
20
+ // during build time and codified in c10/core/cmake_macros.h.
21
+ // When the library is built as a shared lib, EXPORT and IMPORT will contain
22
+ // visibility attributes. If it is being built as a static lib, then EXPORT
23
+ // and IMPORT basically have no effect.
24
+
25
+ // As a rule of thumb, you should almost NEVER mix static and shared builds for
26
+ // libraries that depend on c10. AKA, if c10 is built as a static library, we
27
+ // recommend everything dependent on c10 to be built statically. If c10 is built
28
+ // as a shared library, everything dependent on it should be built as shared. In
29
+ // the PyTorch project, all native libraries shall use the macro
30
+ // C10_BUILD_SHARED_LIB to check whether pytorch is building shared or static
31
+ // libraries.
32
+
33
+ // For build systems that do not directly depend on CMake and directly build
34
+ // from the source directory (such as Buck), one may not have a cmake_macros.h
35
+ // file at all. In this case, the build system is responsible for providing
36
+ // correct macro definitions corresponding to the cmake_macros.h.in file.
37
+ //
38
+ // In such scenarios, one should define the macro
39
+ // C10_USING_CUSTOM_GENERATED_MACROS
40
+ // to inform this header that it does not need to include the cmake_macros.h
41
+ // file.
42
+
43
+ #ifndef C10_USING_CUSTOM_GENERATED_MACROS
44
+ #include <c10/macros/cmake_macros.h>
45
+ #endif // C10_USING_CUSTOM_GENERATED_MACROS
46
+
47
+ #ifdef _WIN32
48
+ #define C10_HIDDEN
49
+ #if defined(C10_BUILD_SHARED_LIBS)
50
+ #define C10_EXPORT __declspec(dllexport)
51
+ #define C10_IMPORT __declspec(dllimport)
52
+ #else
53
+ #define C10_EXPORT
54
+ #define C10_IMPORT
55
+ #endif
56
+ #else // _WIN32
57
+ #if defined(__GNUC__)
58
+ #define C10_EXPORT __attribute__((__visibility__("default")))
59
+ #define C10_HIDDEN __attribute__((__visibility__("hidden")))
60
+ #else // defined(__GNUC__)
61
+ #define C10_EXPORT
62
+ #define C10_HIDDEN
63
+ #endif // defined(__GNUC__)
64
+ #define C10_IMPORT C10_EXPORT
65
+ #endif // _WIN32
66
+
67
+ #ifdef NO_EXPORT
68
+ #undef C10_EXPORT
69
+ #define C10_EXPORT
70
+ #endif
71
+
72
+ // Definition of an adaptive XX_API macro, that depends on whether you are
73
+ // building the library itself or not, routes to XX_EXPORT and XX_IMPORT.
74
+ // Basically, you will need to do this for each shared library that you are
75
+ // building, and the instruction is as follows: assuming that you are building
76
+ // a library called libawesome.so. You should:
77
+ // (1) for your cmake target (usually done by "add_library(awesome, ...)"),
78
+ // define a macro called AWESOME_BUILD_MAIN_LIB using
79
+ // target_compile_options.
80
+ // (2) define the AWESOME_API macro similar to the one below.
81
+ // And in the source file of your awesome library, use AWESOME_API to
82
+ // annotate public symbols.
83
+
84
+ // Here, for the C10 library, we will define the macro C10_API for both import
85
+ // and export.
86
+
87
+ // This one is being used by libc10.so
88
+ #ifdef C10_BUILD_MAIN_LIB
89
+ #define C10_API C10_EXPORT
90
+ #else
91
+ #define C10_API C10_IMPORT
92
+ #endif
93
+
94
+ // This one is being used by libtorch.so
95
+ #ifdef CAFFE2_BUILD_MAIN_LIB
96
+ #define TORCH_API C10_EXPORT
97
+ #else
98
+ #define TORCH_API C10_IMPORT
99
+ #endif
100
+
101
+ // You may be wondering: Whose brilliant idea was it to split torch_cuda into
102
+ // two pieces with confusing names?
103
+ // Once upon a time, there _was_ only TORCH_CUDA_API. All was happy until we
104
+ // tried to compile PyTorch for CUDA 11.1, which ran into relocation marker
105
+ // issues when linking big binaries.
106
+ // (https://github.com/pytorch/pytorch/issues/39968) We had two choices:
107
+ // (1) Stop supporting so many GPU architectures
108
+ // (2) Do something else
109
+ // We chose #2 and decided to split the behemoth that was torch_cuda into two
110
+ // smaller libraries, one with most of the core kernel functions (torch_cuda_cu)
111
+ // and the other that had..well..everything else (torch_cuda_cpp). The idea was
112
+ // this: instead of linking our static libraries (like the hefty
113
+ // libcudnn_static.a) with another huge library, torch_cuda, and run into pesky
114
+ // relocation marker issues, we could link our static libraries to a smaller
115
+ // part of torch_cuda (torch_cuda_cpp) and avoid the issues.
116
+
117
+ // libtorch_cuda_cu.so
118
+ #ifdef TORCH_CUDA_CU_BUILD_MAIN_LIB
119
+ #define TORCH_CUDA_CU_API C10_EXPORT
120
+ #elif defined(BUILD_SPLIT_CUDA)
121
+ #define TORCH_CUDA_CU_API C10_IMPORT
122
+ #endif
123
+
124
+ // libtorch_cuda_cpp.so
125
+ #ifdef TORCH_CUDA_CPP_BUILD_MAIN_LIB
126
+ #define TORCH_CUDA_CPP_API C10_EXPORT
127
+ #elif defined(BUILD_SPLIT_CUDA)
128
+ #define TORCH_CUDA_CPP_API C10_IMPORT
129
+ #endif
130
+
131
+ // libtorch_cuda.so (where torch_cuda_cu and torch_cuda_cpp are a part of the
132
+ // same api)
133
+ #ifdef TORCH_CUDA_BUILD_MAIN_LIB
134
+ #define TORCH_CUDA_CPP_API C10_EXPORT
135
+ #define TORCH_CUDA_CU_API C10_EXPORT
136
+ #elif !defined(BUILD_SPLIT_CUDA)
137
+ #define TORCH_CUDA_CPP_API C10_IMPORT
138
+ #define TORCH_CUDA_CU_API C10_IMPORT
139
+ #endif
140
+
141
+ #if defined(TORCH_HIP_BUILD_MAIN_LIB)
142
+ #define TORCH_HIP_API C10_EXPORT
143
+ #else
144
+ #define TORCH_HIP_API C10_IMPORT
145
+ #endif
146
+
147
+ #if defined(TORCH_XPU_BUILD_MAIN_LIB)
148
+ #define TORCH_XPU_API C10_EXPORT
149
+ #else
150
+ #define TORCH_XPU_API C10_IMPORT
151
+ #endif
152
+
153
+ // Enums only need to be exported on windows for non-CUDA files
154
+ #if defined(_WIN32) && defined(__CUDACC__)
155
+ #define C10_API_ENUM C10_API
156
+ #else
157
+ #define C10_API_ENUM
158
+ #endif
159
+
160
+ #endif // C10_MACROS_MACROS_H_