ZTWHHH commited on
Commit
a36e569
·
verified ·
1 Parent(s): 66030b8

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +1 -0
  2. valley/lib/python3.10/site-packages/nvidia/nccl/lib/libnccl.so.2 +3 -0
  3. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/FileStore.hpp +63 -0
  4. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/GlooDeviceFactory.hpp +32 -0
  5. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/NCCLUtils.hpp +473 -0
  6. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/ProcessGroup.hpp +721 -0
  7. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/ProcessGroupGloo.hpp +438 -0
  8. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/ProcessGroupMPI.hpp +271 -0
  9. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/PyProcessGroup.hpp +181 -0
  10. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/Store.hpp +101 -0
  11. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/Utils.hpp +729 -0
  12. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/debug.h +23 -0
  13. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/error.h +56 -0
  14. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/backends/backend.h +119 -0
  15. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/backends/backend_debug_handler.h +140 -0
  16. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/backends/backend_debug_info.h +65 -0
  17. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/backends/backend_detail.h +41 -0
  18. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/backends/backend_exception.h +54 -0
  19. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/backends/backend_init.h +11 -0
  20. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/backends/backend_interface.h +34 -0
  21. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/backends/backend_preprocess.h +18 -0
  22. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/backends/backend_resolver.h +10 -0
  23. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/codegen/cuda/interface.h +58 -0
  24. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/builtin_functions.h +11 -0
  25. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/canonicalize_modified_loop.h +16 -0
  26. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/concrete_module_type.h +241 -0
  27. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/convert_to_ssa.h +16 -0
  28. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/edit_distance.h +15 -0
  29. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/error_report.h +54 -0
  30. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/exit_transforms.h +12 -0
  31. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/inline_loop_condition.h +16 -0
  32. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/ir_emitter.h +21 -0
  33. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/lexer.h +576 -0
  34. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/mini_environment.h +57 -0
  35. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/parse_string_literal.h +87 -0
  36. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/parser_constants.h +7 -0
  37. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/resolver.h +68 -0
  38. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/schema_matching.h +70 -0
  39. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/script_type_parser.h +55 -0
  40. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/source_range.h +459 -0
  41. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/source_ref.h +47 -0
  42. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/strtod.h +12 -0
  43. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/sugared_value.h +857 -0
  44. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/tracer.h +414 -0
  45. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/tree.h +220 -0
  46. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/tree_views.h +1275 -0
  47. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/versioned_symbols.h +21 -0
  48. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/code.h +39 -0
  49. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/debug_info.h +57 -0
  50. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/file_format.h +196 -0
.gitattributes CHANGED
@@ -1704,3 +1704,4 @@ parrot/lib/python3.10/site-packages/scipy/cluster/_hierarchy.cpython-310-x86_64-
1704
  parrot/lib/python3.10/site-packages/scipy/stats/_ansari_swilk_statistics.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
1705
  parrot/lib/python3.10/site-packages/scipy/special/_test_internal.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
1706
  vllm/lib/python3.10/site-packages/mpl_toolkits/mplot3d/__pycache__/axes3d.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
 
 
1704
  parrot/lib/python3.10/site-packages/scipy/stats/_ansari_swilk_statistics.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
1705
  parrot/lib/python3.10/site-packages/scipy/special/_test_internal.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
1706
  vllm/lib/python3.10/site-packages/mpl_toolkits/mplot3d/__pycache__/axes3d.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
1707
+ valley/lib/python3.10/site-packages/nvidia/nccl/lib/libnccl.so.2 filter=lfs diff=lfs merge=lfs -text
valley/lib/python3.10/site-packages/nvidia/nccl/lib/libnccl.so.2 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8278dcc6632df94762737b1c930050075738affba25e73cb1cac1b448472dc06
3
+ size 232685936
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/FileStore.hpp ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <sys/types.h>
4
+
5
+ #include <mutex>
6
+ #include <unordered_map>
7
+
8
+ #include <torch/csrc/distributed/c10d/Store.hpp>
9
+
10
+ namespace c10d {
11
+
12
+ class TORCH_API FileStore : public Store {
13
+ public:
14
+ explicit FileStore(std::string path, int numWorkers);
15
+
16
+ ~FileStore() override;
17
+
18
+ void set(const std::string& key, const std::vector<uint8_t>& value) override;
19
+
20
+ std::vector<uint8_t> compareSet(
21
+ const std::string& key,
22
+ const std::vector<uint8_t>& expectedValue,
23
+ const std::vector<uint8_t>& desiredValue) override;
24
+
25
+ std::vector<uint8_t> get(const std::string& key) override;
26
+
27
+ int64_t add(const std::string& key, int64_t value) override;
28
+
29
+ int64_t getNumKeys() override;
30
+
31
+ bool deleteKey(const std::string& key) override;
32
+
33
+ bool check(const std::vector<std::string>& keys) override;
34
+
35
+ void wait(const std::vector<std::string>& keys) override;
36
+
37
+ void wait(
38
+ const std::vector<std::string>& keys,
39
+ const std::chrono::milliseconds& timeout) override;
40
+
41
+ // Returns the path used by the FileStore.
42
+ const std::string& getPath() const noexcept {
43
+ return path_;
44
+ }
45
+
46
+ protected:
47
+ int64_t addHelper(const std::string& key, int64_t i);
48
+
49
+ std::string path_;
50
+ off_t pos_{0};
51
+
52
+ int numWorkers_;
53
+ const std::string cleanupKey_;
54
+ const std::string refCountKey_;
55
+ const std::string regularPrefix_;
56
+ const std::string deletePrefix_;
57
+
58
+ std::unordered_map<std::string, std::vector<uint8_t>> cache_;
59
+
60
+ std::mutex activeFileOpLock_;
61
+ };
62
+
63
+ } // namespace c10d
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/GlooDeviceFactory.hpp ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #ifdef USE_C10D_GLOO
4
+
5
+ #include <string>
6
+
7
+ #include <c10/util/Registry.h>
8
+ #include <gloo/config.h>
9
+ #include <gloo/transport/device.h>
10
+
11
+ namespace c10d {
12
+
13
+ class TORCH_API GlooDeviceFactory {
14
+ public:
15
+ // Create new device instance for specific interface.
16
+ static std::shared_ptr<::gloo::transport::Device> makeDeviceForInterface(
17
+ const std::string& interface);
18
+
19
+ // Create new device instance for specific hostname or address.
20
+ static std::shared_ptr<::gloo::transport::Device> makeDeviceForHostname(
21
+ const std::string& hostname);
22
+ };
23
+
24
+ TORCH_DECLARE_SHARED_REGISTRY(
25
+ GlooDeviceRegistry,
26
+ ::gloo::transport::Device,
27
+ const std::string&, /* interface */
28
+ const std::string& /* hostname */);
29
+
30
+ } // namespace c10d
31
+
32
+ #endif // USE_C10D_GLOO
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/NCCLUtils.hpp ADDED
@@ -0,0 +1,473 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #ifdef USE_C10D_NCCL
4
+
5
+ #include <stdio.h>
6
+ #include <stdlib.h>
7
+
8
+ #include <memory>
9
+ #include <mutex>
10
+
11
+ #include <c10/util/Exception.h>
12
+ #include <c10/util/Optional.h>
13
+ #include <nccl.h>
14
+
15
+ #if defined(NCCL_MAJOR) && (NCCL_MAJOR == 2) && defined(NCCL_MINOR) && \
16
+ (NCCL_MINOR >= 14)
17
+ #define NCCL_HAS_COMM_NONBLOCKING
18
+ #endif
19
+
20
+ #if defined(NCCL_MAJOR) && (NCCL_MAJOR == 2) && defined(NCCL_MINOR) && \
21
+ (NCCL_MINOR >= 18)
22
+ #define NCCL_HAS_COMM_SPLIT
23
+ #endif
24
+
25
+ // ncclGetLastError() is enabled only for NCCL versions 2.13+
26
+ // ncclRemoteError only exists in NCCL versions 2.13+
27
+ #if defined(NCCL_MAJOR) && (NCCL_MAJOR == 2) && defined(NCCL_MINOR) && \
28
+ (NCCL_MINOR >= 13)
29
+ #define ENABLE_NCCL_GET_LAST_ERROR
30
+ #define NCCL_REMOTE_ERROR
31
+ #elif defined(NCCL_MAJOR) && (NCCL_MAJOR >= 3)
32
+ #define ENABLE_NCCL_GET_LAST_ERROR
33
+ #define NCCL_REMOTE_ERROR
34
+ #endif
35
+
36
+ // Error checking is enabled only for NCCL versions 2.4+ since ncclCommAbort()
37
+ // and ncclCommGetAsyncError() are not supported in earlier versions.
38
+ #if defined(NCCL_MAJOR) && (NCCL_MAJOR == 2) && defined(NCCL_MINOR) && \
39
+ (NCCL_MINOR >= 4)
40
+ #define ENABLE_NCCL_ERROR_CHECKING
41
+ #elif defined(NCCL_MAJOR) && (NCCL_MAJOR >= 3)
42
+ #define ENABLE_NCCL_ERROR_CHECKING
43
+ #endif
44
+
45
+ // P2P is enabled only for NCCL versions 2.7+ since ncclSend()
46
+ // and ncclRecv() are not supported in earlier versions.
47
+ #if defined(NCCL_MAJOR) && (NCCL_MAJOR == 2) && defined(NCCL_MINOR) && \
48
+ (NCCL_MINOR >= 7)
49
+ #define ENABLE_NCCL_P2P_SUPPORT
50
+ #elif defined(NCCL_MAJOR) && (NCCL_MAJOR >= 3)
51
+ #define ENABLE_NCCL_P2P_SUPPORT
52
+ #endif
53
+
54
+ #if defined(NCCL_MAJOR) && (NCCL_MAJOR == 2) && defined(NCCL_MINOR) && \
55
+ (NCCL_MINOR >= 11)
56
+ #define ENABLE_NCCL_PREMUL_SUM_SUPPORT
57
+ #elif defined(NCCL_MAJOR) && (NCCL_MAJOR >= 3)
58
+ #define ENABLE_NCCL_PREMUL_SUM_SUPPORT
59
+ #endif
60
+
61
+ #if defined(NCCL_MAJOR) && (NCCL_MAJOR == 2) && defined(NCCL_MINOR) && \
62
+ (NCCL_MINOR >= 17)
63
+ #define NCCL_HAS_COMM_CTA_CGA
64
+ #elif defined(NCCL_MAJOR) && (NCCL_MAJOR >= 3)
65
+ #define NCCL_HAS_COMM_CTA_CGA
66
+ #endif
67
+
68
+ #if defined(NCCL_REGISTRATION_SUPPORTED) || \
69
+ ((defined(NCCL_MAJOR) && (NCCL_MAJOR == 2) && defined(NCCL_MINOR) && \
70
+ (NCCL_MINOR >= 19)))
71
+ #define NCCL_HAS_COMM_REGISTER
72
+ #elif defined(NCCL_MAJOR) && (NCCL_MAJOR >= 3)
73
+ #define NCCL_HAS_COMM_REGISTER
74
+ #endif
75
+
76
+ // Macro to throw on a non-successful NCCL return value.
77
+ #define C10D_NCCL_CHECK(cmd, failureReason) \
78
+ do { \
79
+ ncclResult_t result = cmd; \
80
+ if (result != ncclSuccess) { \
81
+ std::string err = "NCCL error in: " + std::string(__FILE__) + ":" + \
82
+ std::to_string(__LINE__) + ", " + ncclGetErrorWithVersion(result) + \
83
+ "\n" + getNcclErrorDetailStr(result, failureReason); \
84
+ TORCH_CHECK_WITH(DistBackendError, false, err); \
85
+ } \
86
+ } while (0)
87
+
88
+ // Macro to throw on a non-successful NCCL return value, non-blocking.
89
+ #define C10D_NCCL_CHECK_TIMEOUT(cmd, comm, failureReason) \
90
+ ncclResult_t result = cmd; \
91
+ auto startTimepoint = std::chrono::steady_clock::now(); \
92
+ while (result == ncclInProgress) { \
93
+ if (nccl_nonblocking_timeout() > 0) { \
94
+ auto currentTimepoint = std::chrono::steady_clock::now(); \
95
+ auto timeElapsed = std::chrono::duration_cast<std::chrono::seconds>( \
96
+ currentTimepoint - startTimepoint) \
97
+ .count(); \
98
+ if (timeElapsed > nccl_nonblocking_timeout()) { \
99
+ std::string err = "NCCL timeout in: " + std::string(__FILE__) + ":" + \
100
+ std::to_string(__LINE__) + ", " + \
101
+ ncclGetErrorWithVersion(result) + "\n" + \
102
+ getNcclErrorDetailStr(result, failureReason); \
103
+ TORCH_CHECK_WITH(DistBackendError, false, err); \
104
+ } \
105
+ } \
106
+ ncclCommGetAsyncError(comm, &result); \
107
+ } \
108
+ if (result != ncclSuccess) { \
109
+ std::string err = "NCCL error in: " + std::string(__FILE__) + ":" + \
110
+ std::to_string(__LINE__) + ", " + ncclGetErrorWithVersion(result) + \
111
+ "\n" + getNcclErrorDetailStr(result, failureReason); \
112
+ TORCH_CHECK_WITH(DistBackendError, false, err); \
113
+ }
114
+
115
+ #define C10D_NCCL_CHECK_TIMEOUT_GROUPEND(cmd, comms_, failureReason) \
116
+ ncclResult_t state = cmd; \
117
+ auto startTimepoint = std::chrono::steady_clock::now(); \
118
+ if (state == ncclInProgress) { \
119
+ for (const auto i : c10::irange(comms_.size())) { \
120
+ do { \
121
+ if (nccl_nonblocking_timeout() > 0) { \
122
+ auto currentTimepoint = std::chrono::steady_clock::now(); \
123
+ auto timeElapsed = std::chrono::duration_cast<std::chrono::seconds>( \
124
+ currentTimepoint - startTimepoint) \
125
+ .count(); \
126
+ if (timeElapsed > nccl_nonblocking_timeout()) { \
127
+ std::string err = "NCCL timeout in: " + std::string(__FILE__) + \
128
+ ":" + std::to_string(__LINE__) + ", " + \
129
+ ncclGetErrorWithVersion(state) + "\n" + \
130
+ getNcclErrorDetailStr(state, failureReason); \
131
+ TORCH_CHECK_WITH(DistBackendError, false, err); \
132
+ } \
133
+ } \
134
+ ncclCommGetAsyncError(comms_[i]->getNcclComm(), &state); \
135
+ } while (state == ncclInProgress); \
136
+ if (state != ncclSuccess) { \
137
+ break; /* fall through to failed case */ \
138
+ } \
139
+ } \
140
+ } \
141
+ if (state != ncclSuccess) { \
142
+ std::string err = "NCCL error in: " + std::string(__FILE__) + ":" + \
143
+ std::to_string(__LINE__) + ", " + ncclGetErrorWithVersion(state) + \
144
+ "\n" + getNcclErrorDetailStr(state, failureReason); \
145
+ TORCH_CHECK_WITH(DistBackendError, false, err); \
146
+ }
147
+
148
+ // Macro to print and abort on a non-successful NCCL return value.
149
+ #define C10D_NCCL_ASSERT(cmd) \
150
+ do { \
151
+ ncclResult_t result = cmd; \
152
+ if (result != ncclSuccess) { \
153
+ std::string err = ncclGetErrorWithVersion(result); \
154
+ fprintf( \
155
+ stderr, \
156
+ "NCCL error in: %s:%d, %s\n", \
157
+ __FILE__, \
158
+ __LINE__, \
159
+ err.c_str()); \
160
+ abort(); \
161
+ } \
162
+ } while (0)
163
+
164
+ namespace c10d {
165
+
166
+ std::string getNcclVersion();
167
+ std::string ncclGetErrorWithVersion(ncclResult_t error);
168
+ bool nccl_use_nonblocking();
169
+ int nccl_nonblocking_timeout();
170
+
171
+ // Provides additional detail into NCCL error codes based on when these are
172
+ // thrown in the NCCL codebase.
173
+ std::string getNcclErrorDetailStr(
174
+ ncclResult_t error,
175
+ c10::optional<std::string> processGroupFailureReason = c10::nullopt);
176
+
177
+ // Write NCCL debug info to local disk or any storage users define.
178
+ class TORCH_API DebugInfoWriter {
179
+ public:
180
+ DebugInfoWriter(int rank);
181
+ virtual ~DebugInfoWriter();
182
+ virtual void write(const std::string& ncclTrace);
183
+
184
+ protected:
185
+ std::string filename_;
186
+ };
187
+
188
+ // RAII wrapper for NCCL communicator
189
+ class NCCLComm {
190
+ public:
191
+ explicit NCCLComm(ncclComm_t ncclComm)
192
+ : ncclComm_(ncclComm),
193
+ aborted_(false),
194
+ ncclAsyncErr_(ncclSuccess),
195
+ commFailureReason_(c10::nullopt) {}
196
+
197
+ NCCLComm() : NCCLComm(nullptr) {}
198
+
199
+ ~NCCLComm() noexcept {
200
+ // Add lock in this destructor, as aborted_ needs to be read after memory
201
+ // barrier here.
202
+ std::unique_lock<std::mutex> lock(mutex_);
203
+ if (ncclComm_ && !aborted_) {
204
+ #ifdef ENABLE_NCCL_ERROR_CHECKING
205
+ // Use ncclCommAbort instead of ncclCommDestroy here since
206
+ // ncclCommDestroy could block forever waiting for work to complete on
207
+ // the communicator.
208
+ C10D_NCCL_ASSERT(::ncclCommAbort(ncclComm_));
209
+ #else
210
+ C10D_NCCL_ASSERT(::ncclCommDestroy(ncclComm_));
211
+ #endif
212
+ }
213
+ }
214
+
215
+ static std::shared_ptr<NCCLComm> create(
216
+ int numRanks,
217
+ int rank,
218
+ ncclUniqueId commId) {
219
+ auto comm = std::make_shared<NCCLComm>();
220
+ C10D_NCCL_CHECK(
221
+ ncclCommInitRank(&(comm->ncclComm_), numRanks, commId, rank),
222
+ c10::nullopt);
223
+ comm->ncclId_ = commId;
224
+ comm->rank_ = rank;
225
+ return comm;
226
+ }
227
+
228
+ #ifdef NCCL_HAS_COMM_NONBLOCKING
229
+ static std::shared_ptr<NCCLComm> create(
230
+ int numRanks,
231
+ int rank,
232
+ ncclUniqueId commId,
233
+ ncclConfig_t& config) {
234
+ auto comm = std::make_shared<NCCLComm>();
235
+ if (nccl_use_nonblocking()) {
236
+ config.blocking = 0;
237
+ C10D_NCCL_CHECK_TIMEOUT(
238
+ ncclCommInitRankConfig(
239
+ &(comm->ncclComm_), numRanks, commId, rank, &config),
240
+ comm->ncclComm_,
241
+ c10::nullopt);
242
+ } else {
243
+ C10D_NCCL_CHECK(
244
+ ncclCommInitRankConfig(
245
+ &(comm->ncclComm_), numRanks, commId, rank, &config),
246
+ c10::nullopt);
247
+ }
248
+ comm->ncclId_ = commId;
249
+ comm->rank_ = rank;
250
+ return comm;
251
+ }
252
+ #endif
253
+
254
+ #ifdef NCCL_HAS_COMM_SPLIT
255
+ static std::shared_ptr<NCCLComm> split(
256
+ NCCLComm* source,
257
+ int color_id,
258
+ int rank,
259
+ ncclConfig_t& config) {
260
+ auto comm = std::make_shared<NCCLComm>();
261
+ C10D_NCCL_CHECK(
262
+ ncclCommSplit(
263
+ source->ncclComm_, color_id, rank, &(comm->ncclComm_), &config),
264
+ c10::nullopt);
265
+ ++source->ncclCommSplitCounter_;
266
+ return comm;
267
+ }
268
+ #endif
269
+
270
+ ncclUniqueId getNcclId() {
271
+ return ncclId_;
272
+ }
273
+
274
+ // Must not be copyable
275
+ NCCLComm(const NCCLComm&) = delete;
276
+ NCCLComm& operator=(const NCCLComm&) = delete;
277
+
278
+ // Do not support move assignment as there is no valid use case
279
+ NCCLComm& operator=(NCCLComm&& other) = delete;
280
+
281
+ // Move constructable
282
+ NCCLComm(NCCLComm&& other) {
283
+ // Using other's lock, as it reads other's states
284
+ // Can not use this.mutex_, as this object is being constructed.
285
+ std::unique_lock<std::mutex> lock(other.mutex_);
286
+ std::swap(ncclComm_, other.ncclComm_);
287
+ std::swap(aborted_, other.aborted_);
288
+ std::swap(ncclAsyncErr_, other.ncclAsyncErr_);
289
+ }
290
+
291
+ ncclComm_t getNcclComm();
292
+
293
+ c10::optional<std::string> getNcclCommFailureReason() const {
294
+ std::unique_lock<std::mutex> lock(mutex_);
295
+ return commFailureReason_;
296
+ }
297
+
298
+ void ncclCommAbort(
299
+ c10::optional<std::string> commFailureReason = c10::nullopt) {
300
+ std::unique_lock<std::mutex> lock(mutex_);
301
+ #ifdef ENABLE_NCCL_ERROR_CHECKING
302
+ if (aborted_) {
303
+ // Should not abort twice.
304
+ return;
305
+ }
306
+
307
+ #ifdef NCCL_HAS_COMM_REGISTER
308
+ // Deregister all registered segments before aborting.
309
+ for (auto& it : registeredSegmentHandles_) {
310
+ void* handle = it.second;
311
+ C10D_NCCL_CHECK(
312
+ ::ncclCommDeregister(ncclComm_, handle),
313
+ c10::str(
314
+ "Failed to deregister segment handle ",
315
+ handle,
316
+ " on ncclComm_ ",
317
+ ncclComm_));
318
+ }
319
+ registeredSegmentHandles_.clear();
320
+ #endif
321
+
322
+ // Set true failure reason if provided by ProcessGroupNCCL (e.g. work
323
+ // timeout)
324
+ commFailureReason_ = commFailureReason;
325
+ #ifndef NCCL_HAS_COMM_NONBLOCKING
326
+ C10D_NCCL_CHECK(::ncclCommAbort(ncclComm_), commFailureReason_);
327
+ #else
328
+ C10D_NCCL_CHECK_TIMEOUT(
329
+ ::ncclCommAbort(ncclComm_), ncclComm_, commFailureReason_);
330
+ #endif
331
+ aborted_ = true;
332
+ ncclComm_ = nullptr;
333
+
334
+ // Set an appropriate error so that we avoid using the communicator.
335
+ if (ncclAsyncErr_ == ncclSuccess) {
336
+ ncclAsyncErr_ = ncclSystemError;
337
+ }
338
+ #else
339
+ // This is a NOOP, if error checks are disabled.
340
+ return;
341
+ #endif
342
+ }
343
+
344
+ bool isAborted() const {
345
+ std::unique_lock<std::mutex> lock(mutex_);
346
+ return aborted_;
347
+ }
348
+
349
+ uint64_t getCommSplitCounter() const {
350
+ return ncclCommSplitCounter_;
351
+ }
352
+
353
+ ncclResult_t checkForNcclError() {
354
+ std::unique_lock<std::mutex> lock(mutex_);
355
+ #ifdef ENABLE_NCCL_ERROR_CHECKING
356
+ if (ncclAsyncErr_ != ncclSuccess) {
357
+ return ncclAsyncErr_;
358
+ }
359
+ C10D_NCCL_CHECK(
360
+ ncclCommGetAsyncError(ncclComm_, &ncclAsyncErr_), commFailureReason_);
361
+ return ncclAsyncErr_;
362
+ #else
363
+ // Always return success, if error checks are disabled.
364
+ return ncclSuccess;
365
+ #endif
366
+ }
367
+
368
+ ncclResult_t registerSegment(void* ptr, size_t size) {
369
+ std::unique_lock<std::mutex> lock(mutex_);
370
+ #ifdef NCCL_HAS_COMM_REGISTER
371
+ // We register only segments from cache allocator
372
+ // which are guaranteed to be with disjoint addr ranges. Thus, a ptr always
373
+ // maps to a unique handle and should not be registered before the current
374
+ // ptr is deregistered and freed.
375
+ TORCH_CHECK(
376
+ registeredSegmentHandles_.count(ptr) == 0,
377
+ "Segment with ptr ",
378
+ ptr,
379
+ " has already been registered on ncclComm_ ",
380
+ ncclComm_);
381
+
382
+ void* handle;
383
+ C10D_NCCL_CHECK(
384
+ ncclCommRegister(ncclComm_, ptr, size, &handle),
385
+ c10::str(
386
+ "Failed to register segment with ptr ",
387
+ ptr,
388
+ ", size ",
389
+ size,
390
+ " on ncclComm_ ",
391
+ ncclComm_));
392
+ registeredSegmentHandles_[ptr] = handle;
393
+ return ncclSuccess;
394
+ #else
395
+ return ncclInvalidUsage;
396
+ #endif
397
+ }
398
+
399
+ ncclResult_t deregisterSegment(void* ptr) {
400
+ std::unique_lock<std::mutex> lock(mutex_);
401
+ #ifdef NCCL_HAS_COMM_REGISTER
402
+ TORCH_CHECK(
403
+ registeredSegmentHandles_.count(ptr) == 1,
404
+ "Segment with ptr ",
405
+ ptr,
406
+ " is not registered on ncclComm_ ",
407
+ ncclComm_);
408
+
409
+ void* handle = registeredSegmentHandles_[ptr];
410
+ C10D_NCCL_CHECK(
411
+ ncclCommDeregister(ncclComm_, handle),
412
+ c10::str(
413
+ "Failed to deregister segment handle ",
414
+ handle,
415
+ " on ncclComm_ ",
416
+ ncclComm_));
417
+ registeredSegmentHandles_.erase(ptr);
418
+ return ncclSuccess;
419
+ #else
420
+ return ncclInvalidUsage;
421
+ #endif
422
+ }
423
+
424
+ protected:
425
+ ncclComm_t ncclComm_;
426
+ // Unique nccl_id for this communicator.
427
+ ncclUniqueId ncclId_;
428
+ bool aborted_;
429
+ uint64_t ncclCommSplitCounter_{0};
430
+ ncclResult_t ncclAsyncErr_;
431
+ mutable std::mutex mutex_;
432
+ // Rank that this communicator corresponds to.
433
+ int rank_;
434
+ // Optional reason for communicator failure, provided by ProcessGroupNCCL for
435
+ // better error messaging.
436
+ c10::optional<std::string> commFailureReason_;
437
+ #ifdef NCCL_HAS_COMM_REGISTER
438
+ // Stores handlers for tensors registered by NCCL
439
+ std::unordered_map<void*, void*> registeredSegmentHandles_;
440
+ #endif
441
+ };
442
+
443
+ // Helper that automatically cleans up premul sums.
444
+ struct ncclRedOpRAII {
445
+ ncclRedOpRAII() = default;
446
+ ncclRedOpRAII(ncclRedOp_t op) : op_(op) {}
447
+ ncclRedOpRAII(ncclRedOp_t op, ncclComm_t comm)
448
+ : op_(op), comm_(comm), premul_sum_(true) {}
449
+ ncclRedOpRAII(const ncclRedOpRAII&) = delete;
450
+ ncclRedOpRAII& operator=(const ncclRedOpRAII&) = delete;
451
+ ncclRedOpRAII(ncclRedOpRAII&& tmp) : ncclRedOpRAII() {
452
+ std::swap(tmp.op_, this->op_);
453
+ std::swap(tmp.comm_, this->comm_);
454
+ std::swap(tmp.premul_sum_, this->premul_sum_);
455
+ }
456
+ #if defined(ENABLE_NCCL_PREMUL_SUM_SUPPORT)
457
+ ~ncclRedOpRAII() {
458
+ if (premul_sum_) {
459
+ ncclRedOpDestroy(op_, comm_);
460
+ }
461
+ }
462
+ #endif
463
+ operator ncclRedOp_t() const {
464
+ return op_;
465
+ }
466
+ ncclRedOp_t op_;
467
+ ncclComm_t comm_;
468
+ bool premul_sum_ = false;
469
+ };
470
+
471
+ } // namespace c10d
472
+
473
+ #endif // USE_C10D_NCCL
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/ProcessGroup.hpp ADDED
@@ -0,0 +1,721 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/distributed/c10d/Backend.hpp>
4
+ #include <condition_variable>
5
+ #include <memory>
6
+ #include <mutex>
7
+ #include <stdexcept>
8
+ #include <unordered_map>
9
+ #include <utility>
10
+ #include <vector>
11
+
12
+ #include <ATen/ATen.h>
13
+ #include <ATen/core/dispatch/Dispatcher.h>
14
+ #include <c10/macros/Macros.h>
15
+
16
+ #include <torch/csrc/distributed/c10d/Work.hpp>
17
+ // *************************************************************************
18
+ // PROCESS GROUP collective communication API IS BEING CHANGED BETWEEN
19
+ // versions 1.7 and 1.8.
20
+ // PLEASE DO NOT ADD ANY DEPENDENCIES.
21
+ // SEE RFC: https://github.com/pytorch/pytorch/issues/39662
22
+ // *************************************************************************
23
+
24
+ constexpr auto kProcessGroupDefaultTimeout =
25
+ std::chrono::milliseconds(30 * 60 * 1000);
26
+
27
+ namespace c10d {
28
+
29
+ // ProcessGroup is a base class that captures collective and point to
30
+ // point communication in a fixed set of processes.
31
+ //
32
+ // The functions specified in the class below describe the API alone;
33
+ // implementations are provided in subclasses.
34
+ //
35
+ // Every function that performs I/O is executed asynchronously by a
36
+ // thread pool owned by the ProcessGroup (by default). They return an
37
+ // object that can be used to wait for completion or error.
38
+ //
39
+ // The ProcessGroup can instantiate subgroups with fewer or an equal
40
+ // number of members. Implementations must take care that multiple
41
+ // process groups can be used in parallel and synchronize accordingly.
42
+ //
43
+ // The ProcessGroup assumes a fixed set of processes. If the set
44
+ // changes, existing instances must be destructed and instantiation
45
+ // and initialization must start from scratch. For members of the
46
+ // process group to find each other (referred to as rendezvous from
47
+ // hereon)
48
+ //
49
+ class TORCH_API ProcessGroup : public torch::CustomClassHolder {
50
+ public:
51
+ // ProcessGroup Options is a base struct that defines the basic options
52
+ // when constructing a ProcessGroup. Each ProcessGroup subclass should
53
+ // extend this struct and define its options if it wants to provide more
54
+ // config options (beyond basic ones defined here) to end user.
55
+ struct TORCH_API Options : torch::CustomClassHolder {
56
+ explicit Options(
57
+ std::string backend,
58
+ std::chrono::milliseconds timeout = kProcessGroupDefaultTimeout)
59
+ : timeout(timeout), backend(std::move(backend)) {}
60
+ ~Options() override = default;
61
+
62
+ std::chrono::milliseconds timeout;
63
+
64
+ // backend name
65
+ const std::string backend;
66
+ };
67
+
68
+ enum BackendType {
69
+ UNDEFINED = 0,
70
+ GLOO = 1,
71
+ NCCL = 2,
72
+ UCC = 3,
73
+ MPI = 4,
74
+ CUSTOM = 5,
75
+ };
76
+
77
+ // Not used, set for backwards compatibility and only used for TypeDef in
78
+ // Ops.cpp
79
+ explicit ProcessGroup(int rank, int size);
80
+
81
+ explicit ProcessGroup(
82
+ const c10::intrusive_ptr<::c10d::Store>& store,
83
+ int rank,
84
+ int size,
85
+ c10::intrusive_ptr<Options> options);
86
+ ~ProcessGroup() override;
87
+
88
+ int getRank() const {
89
+ return rank_;
90
+ }
91
+
92
+ int getSize() const {
93
+ return size_;
94
+ }
95
+
96
+ // Returns an unique opaque ID of this process group object.
97
+ int64_t getID() const {
98
+ return reinterpret_cast<std::intptr_t>(this);
99
+ }
100
+
101
+ // Returns an unique opaque ID of a backend for the specific backend type
102
+ // that can correlate with this process group's collectives.
103
+ int64_t getBackendID(BackendType backend_type) const {
104
+ return reinterpret_cast<std::intptr_t>(getBackend(backend_type).get());
105
+ }
106
+
107
+ virtual const std::string getBackendName() const {
108
+ return options_->backend;
109
+ };
110
+
111
+ BackendType getBackendType() const {
112
+ return backendType_;
113
+ };
114
+
115
+ virtual void startCoalescing(c10::DeviceType deviceType) {
116
+ // only nccl has implemented startCoalescing so only execute for nccl
117
+ // backends
118
+ auto backend = getBackend(deviceType);
119
+ backend->startCoalescing();
120
+ }
121
+
122
+ virtual c10::intrusive_ptr<Work> endCoalescing(c10::DeviceType deviceType) {
123
+ // only nccl has implemented endCoalescing so only execute for nccl
124
+ // backends
125
+ auto backend = getBackend(deviceType);
126
+ auto work = backend->endCoalescing();
127
+ return work;
128
+ }
129
+
130
+ virtual c10::intrusive_ptr<Work> broadcast(
131
+ std::vector<at::Tensor>& tensors,
132
+ const BroadcastOptions& opts = BroadcastOptions()) {
133
+ static auto op =
134
+ c10::Dispatcher::singleton()
135
+ .findSchemaOrThrow("c10d::broadcast_", "")
136
+ .typed<
137
+ std::tuple<std::vector<at::Tensor>, c10::intrusive_ptr<Work>>(
138
+ at::TensorList,
139
+ const c10::intrusive_ptr<::c10d::ProcessGroup>&,
140
+ int64_t,
141
+ int64_t,
142
+ bool,
143
+ int64_t)>();
144
+ // It's awakward to unbox the opts here and box them again in the custom C++
145
+ // op. But it's also complicated to make opts as a CustomClassHolder. Leave
146
+ // it as it is now.
147
+ return std::get<1>(op.call(
148
+ tensors,
149
+ c10::intrusive_ptr<ProcessGroup>::unsafe_reclaim_from_nonowning(this),
150
+ opts.rootRank,
151
+ opts.rootTensor,
152
+ opts.asyncOp,
153
+ opts.timeout.count()));
154
+ }
155
+
156
+ virtual c10::intrusive_ptr<Work> allreduce(
157
+ std::vector<at::Tensor>& tensors,
158
+ const AllreduceOptions& opts = AllreduceOptions()) {
159
+ static auto op =
160
+ c10::Dispatcher::singleton()
161
+ .findSchemaOrThrow("c10d::allreduce_", "")
162
+ .typed<
163
+ std::tuple<std::vector<at::Tensor>, c10::intrusive_ptr<Work>>(
164
+ at::TensorList,
165
+ const c10::intrusive_ptr<::c10d::ProcessGroup>&,
166
+ const c10::intrusive_ptr<::c10d::ReduceOp>&,
167
+ const c10::optional<at::Tensor>& sparse_indices,
168
+ int64_t)>();
169
+
170
+ return std::get<1>(op.call(
171
+ tensors,
172
+ c10::intrusive_ptr<ProcessGroup>::unsafe_reclaim_from_nonowning(this),
173
+ c10::make_intrusive<ReduceOp>(opts.reduceOp),
174
+ opts.sparseIndices,
175
+ opts.timeout.count()));
176
+ }
177
+
178
+ virtual c10::intrusive_ptr<Work> allreduce_coalesced(
179
+ std::vector<at::Tensor>& tensors,
180
+ const AllreduceCoalescedOptions& opts = AllreduceCoalescedOptions()) {
181
+ static auto op = c10::Dispatcher::singleton()
182
+ .findSchemaOrThrow("c10d::allreduce_coalesced_", "")
183
+ .typed<c10::intrusive_ptr<::c10d::Work>(
184
+ at::TensorList,
185
+ const c10::intrusive_ptr<::c10d::ProcessGroup>&,
186
+ const c10::intrusive_ptr<::c10d::ReduceOp>&,
187
+ int64_t)>();
188
+
189
+ return op.call(
190
+ tensors,
191
+ c10::intrusive_ptr<ProcessGroup>::unsafe_reclaim_from_nonowning(this),
192
+ c10::make_intrusive<ReduceOp>(opts.reduceOp),
193
+ opts.timeout.count());
194
+ }
195
+
196
+ virtual c10::intrusive_ptr<Work> reduce(
197
+ std::vector<at::Tensor>& tensors,
198
+ const ReduceOptions& opts = ReduceOptions()) {
199
+ static auto op = c10::Dispatcher::singleton()
200
+ .findSchemaOrThrow("c10d::reduce_", "")
201
+ .typed<c10::intrusive_ptr<::c10d::Work>(
202
+ at::TensorList,
203
+ const c10::intrusive_ptr<::c10d::ProcessGroup>&,
204
+ const c10::intrusive_ptr<::c10d::ReduceOp>&,
205
+ int64_t,
206
+ int64_t,
207
+ int64_t)>();
208
+ return op.call(
209
+ tensors,
210
+ c10::intrusive_ptr<ProcessGroup>::unsafe_reclaim_from_nonowning(this),
211
+ c10::make_intrusive<ReduceOp>(opts.reduceOp),
212
+ opts.rootRank,
213
+ opts.rootTensor,
214
+ opts.timeout.count());
215
+ }
216
+
217
+ virtual c10::intrusive_ptr<Work> allgather(
218
+ std::vector<std::vector<at::Tensor>>& outputTensors,
219
+ std::vector<at::Tensor>& inputTensors,
220
+ const AllgatherOptions& opts = AllgatherOptions()) {
221
+ static auto op = c10::Dispatcher::singleton()
222
+ .findSchemaOrThrow("c10d::allgather_", "")
223
+ .typed<std::tuple<
224
+ std::vector<std::vector<at::Tensor>>,
225
+ c10::intrusive_ptr<Work>>(
226
+ const std::vector<std::vector<at::Tensor>>&,
227
+ at::TensorList,
228
+ const c10::intrusive_ptr<::c10d::ProcessGroup>&,
229
+ int64_t)>();
230
+
231
+ return std::get<1>(op.call(
232
+ outputTensors,
233
+ inputTensors,
234
+ c10::intrusive_ptr<ProcessGroup>::unsafe_reclaim_from_nonowning(this),
235
+ opts.timeout.count()));
236
+ }
237
+
238
+ // Gathers a single tensor inputBuffer into a single buffer outputBuffer that
239
+ // is interpreted as a contiguous collection of size inputBuffer * WORLD_SIZE.
240
+ // For implementers of ProcessGroup API and advanced users only.
241
+ // Note: this function will be deprecated in near future.
242
+ virtual c10::intrusive_ptr<Work> _allgather_base(
243
+ at::Tensor& outputBuffer,
244
+ at::Tensor& inputBuffer,
245
+ const AllgatherOptions& opts = AllgatherOptions()) {
246
+ static auto op =
247
+ c10::Dispatcher::singleton()
248
+ .findSchemaOrThrow("c10d::_allgather_base_", "")
249
+ .typed<std::tuple<at::Tensor, c10::intrusive_ptr<Work>>(
250
+ at::Tensor&,
251
+ at::Tensor&,
252
+ const c10::intrusive_ptr<::c10d::ProcessGroup>&,
253
+ bool,
254
+ int64_t)>();
255
+
256
+ return std::get<1>(op.call(
257
+ outputBuffer,
258
+ inputBuffer,
259
+ c10::intrusive_ptr<ProcessGroup>::unsafe_reclaim_from_nonowning(this),
260
+ opts.asyncOp,
261
+ opts.timeout.count()));
262
+ }
263
+
264
+ // This function is deprecated and will be moved out of ProcessGroup to comms:
265
+ // * do not add dependencies on this function,
266
+ // * do not implement it in your ProcessGroup, implement _allgather_base
267
+ // instead.
268
+ virtual c10::intrusive_ptr<Work> allgather_coalesced(
269
+ std::vector<std::vector<at::Tensor>>& outputTensorLists,
270
+ std::vector<at::Tensor>& inputTensors,
271
+ const AllgatherOptions& opts = AllgatherOptions()) {
272
+ static auto op =
273
+ c10::Dispatcher::singleton()
274
+ .findSchemaOrThrow("c10d::allgather_coalesced_", "")
275
+ .typed<c10::intrusive_ptr<Work>(
276
+ const std::vector<std::vector<at::Tensor>>&,
277
+ const at::TensorList&,
278
+ const c10::intrusive_ptr<::c10d::ProcessGroup>&)>();
279
+
280
+ return op.call(
281
+ outputTensorLists,
282
+ inputTensors,
283
+ c10::intrusive_ptr<ProcessGroup>::unsafe_reclaim_from_nonowning(this));
284
+ }
285
+
286
+ // This function is a coalesced version of `allgather_into_tensor` (currently
287
+ // still named as `_allgather_base`). Each tensor in the vector corresponds to
288
+ // an input/output of one `allgather_into_tensor` operation.
289
+ virtual c10::intrusive_ptr<Work> allgather_into_tensor_coalesced(
290
+ std::vector<at::Tensor>& outputTensors,
291
+ std::vector<at::Tensor>& inputTensors,
292
+ const AllgatherOptions& opts = AllgatherOptions()) {
293
+ static auto op =
294
+ c10::Dispatcher::singleton()
295
+ .findSchemaOrThrow("c10d::allgather_into_tensor_coalesced_", "")
296
+ .typed<c10::intrusive_ptr<Work>(
297
+ const at::TensorList,
298
+ const at::TensorList,
299
+ const c10::intrusive_ptr<::c10d::ProcessGroup>&)>();
300
+
301
+ return op.call(
302
+ outputTensors,
303
+ inputTensors,
304
+ c10::intrusive_ptr<ProcessGroup>::unsafe_reclaim_from_nonowning(this));
305
+ }
306
+
307
+ virtual c10::intrusive_ptr<Work> gather(
308
+ std::vector<std::vector<at::Tensor>>& outputTensors,
309
+ std::vector<at::Tensor>& inputTensors,
310
+ const GatherOptions& opts = GatherOptions()) {
311
+ static auto op = c10::Dispatcher::singleton()
312
+ .findSchemaOrThrow("c10d::gather_", "")
313
+ .typed<c10::intrusive_ptr<::c10d::Work>(
314
+ const std::vector<std::vector<at::Tensor>>&,
315
+ const at::TensorList&,
316
+ const c10::intrusive_ptr<::c10d::ProcessGroup>&,
317
+ int64_t,
318
+ int64_t)>();
319
+ return op.call(
320
+ outputTensors,
321
+ inputTensors,
322
+ c10::intrusive_ptr<ProcessGroup>::unsafe_reclaim_from_nonowning(this),
323
+ opts.rootRank,
324
+ opts.timeout.count());
325
+ }
326
+
327
+ virtual c10::intrusive_ptr<Work> scatter(
328
+ std::vector<at::Tensor>& outputTensors,
329
+ std::vector<std::vector<at::Tensor>>& inputTensors,
330
+ const ScatterOptions& opts = ScatterOptions()) {
331
+ static auto op =
332
+ c10::Dispatcher::singleton()
333
+ .findSchemaOrThrow("c10d::scatter_", "")
334
+ .typed<
335
+ std::tuple<std::vector<at::Tensor>, c10::intrusive_ptr<Work>>(
336
+ const at::TensorList&,
337
+ const std::vector<std::vector<at::Tensor>>&,
338
+ const c10::intrusive_ptr<::c10d::ProcessGroup>&,
339
+ int64_t,
340
+ bool,
341
+ int64_t)>();
342
+ return std::get<1>(op.call(
343
+ outputTensors,
344
+ inputTensors,
345
+ c10::intrusive_ptr<ProcessGroup>::unsafe_reclaim_from_nonowning(this),
346
+ opts.rootRank,
347
+ opts.asyncOp,
348
+ opts.timeout.count()));
349
+ }
350
+
351
+ virtual c10::intrusive_ptr<Work> reduce_scatter(
352
+ std::vector<at::Tensor>& outputTensors,
353
+ std::vector<std::vector<at::Tensor>>& inputTensors,
354
+ const ReduceScatterOptions& opts = ReduceScatterOptions()) {
355
+ static auto op =
356
+ c10::Dispatcher::singleton()
357
+ .findSchemaOrThrow("c10d::reduce_scatter_", "")
358
+ .typed<
359
+ std::tuple<std::vector<at::Tensor>, c10::intrusive_ptr<Work>>(
360
+ const at::TensorList&,
361
+ const std::vector<std::vector<at::Tensor>>&,
362
+ const c10::intrusive_ptr<::c10d::ProcessGroup>&,
363
+ const c10::intrusive_ptr<::c10d::ReduceOp>&,
364
+ int64_t)>();
365
+ return std::get<1>(op.call(
366
+ outputTensors,
367
+ inputTensors,
368
+ c10::intrusive_ptr<ProcessGroup>::unsafe_reclaim_from_nonowning(this),
369
+ c10::make_intrusive<::c10d::ReduceOp>(opts.reduceOp),
370
+ opts.timeout.count()));
371
+ }
372
+
373
+ virtual c10::intrusive_ptr<Work> _reduce_scatter_base(
374
+ at::Tensor& outputBuffer,
375
+ at::Tensor& inputBuffer,
376
+ const ReduceScatterOptions& opts = ReduceScatterOptions()) {
377
+ static auto op =
378
+ c10::Dispatcher::singleton()
379
+ .findSchemaOrThrow("c10d::_reduce_scatter_base_", "")
380
+ .typed<std::tuple<at::Tensor, c10::intrusive_ptr<Work>>(
381
+ at::Tensor&,
382
+ at::Tensor&,
383
+ const c10::intrusive_ptr<::c10d::ProcessGroup>&,
384
+ const c10::intrusive_ptr<::c10d::ReduceOp>&,
385
+ bool,
386
+ int64_t)>();
387
+ return std::get<1>(op.call(
388
+ outputBuffer,
389
+ inputBuffer,
390
+ c10::intrusive_ptr<ProcessGroup>::unsafe_reclaim_from_nonowning(this),
391
+ c10::make_intrusive<::c10d::ReduceOp>(opts.reduceOp),
392
+ opts.asyncOp,
393
+ opts.timeout.count()));
394
+ }
395
+
396
+ // This function is a coalesced version of `reduce_scatter_tensor` (currently
397
+ // still named as `_reduce_scatter_base`). Each tensor in the vector
398
+ // corresponds to an input/output of one `reduce_scatter_tensor` operation.
399
+ virtual c10::intrusive_ptr<Work> reduce_scatter_tensor_coalesced(
400
+ std::vector<at::Tensor>& outputTensors,
401
+ std::vector<at::Tensor>& inputTensors,
402
+ const ReduceScatterOptions& opts = ReduceScatterOptions()) {
403
+ static auto op =
404
+ c10::Dispatcher::singleton()
405
+ .findSchemaOrThrow("c10d::reduce_scatter_tensor_coalesced_", "")
406
+ .typed<c10::intrusive_ptr<Work>(
407
+ const at::TensorList,
408
+ const at::TensorList,
409
+ const c10::intrusive_ptr<::c10d::ProcessGroup>&,
410
+ const c10::intrusive_ptr<::c10d::ReduceOp>&,
411
+ int64_t)>();
412
+
413
+ return op.call(
414
+ outputTensors,
415
+ inputTensors,
416
+ c10::intrusive_ptr<ProcessGroup>::unsafe_reclaim_from_nonowning(this),
417
+ c10::make_intrusive<::c10d::ReduceOp>(opts.reduceOp),
418
+ opts.timeout.count());
419
+ }
420
+
421
+ virtual c10::intrusive_ptr<Work> alltoall_base(
422
+ at::Tensor& outputBuffer,
423
+ at::Tensor& inputBuffer,
424
+ std::vector<int64_t>& outputSplitSizes,
425
+ std::vector<int64_t>& inputSplitSizes,
426
+ const AllToAllOptions& opts = AllToAllOptions()) {
427
+ static auto op = c10::Dispatcher::singleton()
428
+ .findSchemaOrThrow("c10d::alltoall_base_", "")
429
+ .typed<c10::intrusive_ptr<::c10d::Work>(
430
+ at::Tensor&,
431
+ at::Tensor&,
432
+ const c10::intrusive_ptr<::c10d::ProcessGroup>&,
433
+ std::vector<int64_t>,
434
+ std::vector<int64_t>,
435
+ int64_t)>();
436
+ return op.call(
437
+ outputBuffer,
438
+ inputBuffer,
439
+ c10::intrusive_ptr<ProcessGroup>::unsafe_reclaim_from_nonowning(this),
440
+ outputSplitSizes,
441
+ inputSplitSizes,
442
+ opts.timeout.count());
443
+ }
444
+
445
+ virtual c10::intrusive_ptr<Work> alltoall(
446
+ std::vector<at::Tensor>& outputTensors,
447
+ std::vector<at::Tensor>& inputTensors,
448
+ const AllToAllOptions& opts = AllToAllOptions()) {
449
+ static auto op =
450
+ c10::Dispatcher::singleton()
451
+ .findSchemaOrThrow("c10d::alltoall_", "")
452
+ .typed<
453
+ std::tuple<std::vector<at::Tensor>, c10::intrusive_ptr<Work>>(
454
+ const at::TensorList&,
455
+ const at::TensorList&,
456
+ const c10::intrusive_ptr<::c10d::ProcessGroup>&,
457
+ int64_t)>();
458
+ return std::get<1>(op.call(
459
+ outputTensors,
460
+ inputTensors,
461
+ c10::intrusive_ptr<ProcessGroup>::unsafe_reclaim_from_nonowning(this),
462
+ opts.timeout.count()));
463
+ }
464
+
465
+ virtual void monitoredBarrier(
466
+ const BarrierOptions& opts,
467
+ bool wait_all_ranks = false) {
468
+ static auto op = c10::Dispatcher::singleton()
469
+ .findSchemaOrThrow("c10d::monitored_barrier_", "")
470
+ .typed<void(
471
+ at::Tensor,
472
+ const c10::intrusive_ptr<::c10d::ProcessGroup>&,
473
+ const std::vector<int64_t>&,
474
+ int64_t,
475
+ bool)>();
476
+ // Default to using cpu implementation, monitored barrier is only for GLOO
477
+ at::Tensor tensor = at::empty({0}, at::TensorOptions().device(at::kCPU));
478
+ op.call(
479
+ tensor,
480
+ c10::intrusive_ptr<ProcessGroup>::unsafe_reclaim_from_nonowning(this),
481
+ opts.device_ids,
482
+ opts.timeout.count(),
483
+ wait_all_ranks);
484
+ }
485
+
486
+ // Agrees on an initial sequence number for the whole group by having rank 0
487
+ // create it and broadcast it to other ranks using the store. Only implemented
488
+ // for GLOO and NCCL backends currently.
489
+ virtual void setSequenceNumberForGroup() {
490
+ auto backendType = getBackendType();
491
+ // TODO: HACK for backend name to get sequence number for that backend.
492
+ if (backendType == ProcessGroup::BackendType::GLOO ||
493
+ backendType == ProcessGroup::BackendType::NCCL ||
494
+ backendType == ProcessGroup::BackendType::UCC) {
495
+ getDefaultBackend()->setSequenceNumberForGroup();
496
+ } else {
497
+ TORCH_CHECK(
498
+ false,
499
+ c10::str(
500
+ "ProcessGroup ",
501
+ getBackendName(),
502
+ " does not yet support sequence numbers."));
503
+ }
504
+ }
505
+
506
+ // Retrieves the current sequence number for the whole group, which should be
507
+ // in sync. If the returned number is not consistent across the group, it
508
+ // may indicate that there is some sort of collective desynchronization.
509
+ virtual uint64_t getSequenceNumberForGroup() {
510
+ auto backendType = getBackendType();
511
+
512
+ // TODO: HACK for backend name to get sequence number for that backend.
513
+ if (backendType == ProcessGroup::BackendType::GLOO ||
514
+ backendType == ProcessGroup::BackendType::NCCL ||
515
+ backendType == ProcessGroup::BackendType::UCC) {
516
+ return getDefaultBackend()->getSequenceNumberForGroup();
517
+ } else {
518
+ TORCH_CHECK(
519
+ false,
520
+ c10::str(
521
+ "ProcessGroup ",
522
+ getBackendName(),
523
+ " does not yet support sequence numbers."));
524
+ }
525
+ }
526
+
527
+ virtual c10::intrusive_ptr<Work> send(
528
+ std::vector<at::Tensor>& tensors,
529
+ int dstRank,
530
+ int tag) {
531
+ static auto op = c10::Dispatcher::singleton()
532
+ .findSchemaOrThrow("c10d::send", "")
533
+ .typed<c10::intrusive_ptr<::c10d::Work>(
534
+ at::TensorList,
535
+ const c10::intrusive_ptr<::c10d::ProcessGroup>&,
536
+ int64_t,
537
+ int64_t)>();
538
+ return op.call(
539
+ tensors,
540
+ c10::intrusive_ptr<ProcessGroup>::unsafe_reclaim_from_nonowning(this),
541
+ dstRank,
542
+ tag);
543
+ }
544
+
545
+ virtual c10::intrusive_ptr<Work> recv(
546
+ std::vector<at::Tensor>& tensors,
547
+ int srcRank,
548
+ int tag) {
549
+ static auto op = c10::Dispatcher::singleton()
550
+ .findSchemaOrThrow("c10d::recv_", "")
551
+ .typed<c10::intrusive_ptr<::c10d::Work>(
552
+ at::TensorList,
553
+ const c10::intrusive_ptr<::c10d::ProcessGroup>&,
554
+ int64_t,
555
+ int64_t)>();
556
+ return op.call(
557
+ tensors,
558
+ c10::intrusive_ptr<ProcessGroup>::unsafe_reclaim_from_nonowning(this),
559
+ srcRank,
560
+ tag);
561
+ }
562
+
563
+ virtual c10::intrusive_ptr<Work> recvAnysource(
564
+ std::vector<at::Tensor>& tensors,
565
+ int tag) {
566
+ static auto op = c10::Dispatcher::singleton()
567
+ .findSchemaOrThrow("c10d::recv_any_source_", "")
568
+ .typed<c10::intrusive_ptr<::c10d::Work>(
569
+ at::TensorList,
570
+ const c10::intrusive_ptr<::c10d::ProcessGroup>&,
571
+ int64_t)>();
572
+ return op.call(
573
+ tensors,
574
+ c10::intrusive_ptr<ProcessGroup>::unsafe_reclaim_from_nonowning(this),
575
+ tag);
576
+ }
577
+
578
+ virtual c10::intrusive_ptr<Work> barrier(
579
+ const BarrierOptions& opts = BarrierOptions()) {
580
+ static at::Tensor tensor;
581
+ // TODO: if nccl was specified then use it
582
+ auto device = opts.device;
583
+ if (device.has_value()) {
584
+ // set device tensor from argument
585
+ tensor = at::empty(
586
+ {1}, at::TensorOptions().device(device.value()).dtype(at::kByte));
587
+ } else if (backendType_ == c10d::ProcessGroup::BackendType::NCCL) {
588
+ // set cuda tensor
589
+ tensor = at::empty(
590
+ {1},
591
+ at::TensorOptions().device(at::DeviceType::CUDA).dtype(at::kByte));
592
+ } else {
593
+ // Default to using cpu implementation
594
+ tensor = at::empty(
595
+ {1},
596
+ at::TensorOptions().device(at::DeviceType::CPU).dtype(at::kByte));
597
+ }
598
+
599
+ static auto op = c10::Dispatcher::singleton()
600
+ .findSchemaOrThrow("c10d::barrier", "")
601
+ .typed<c10::intrusive_ptr<::c10d::Work>(
602
+ at::Tensor,
603
+ const c10::intrusive_ptr<::c10d::ProcessGroup>&,
604
+ const std::vector<int64_t>&,
605
+ int64_t)>();
606
+
607
+ return op.call(
608
+ tensor,
609
+ c10::intrusive_ptr<ProcessGroup>::unsafe_reclaim_from_nonowning(this),
610
+ opts.device_ids,
611
+ opts.timeout.count());
612
+ }
613
+
614
+ c10::intrusive_ptr<Options> getOptions() {
615
+ return options_;
616
+ }
617
+
618
+ bool hasBackends() {
619
+ return !deviceTypeToBackendType_.empty();
620
+ }
621
+
622
+ void setBackend(
623
+ c10::DeviceType deviceType,
624
+ BackendType backendType,
625
+ const c10::optional<c10::intrusive_ptr<Backend>>& backend) {
626
+ // TODO: should we add these entries after the backend setting succeeds?
627
+ deviceTypeToBackendType_[deviceType] = backendType;
628
+ deviceTypes_.insert(deviceType);
629
+ // if the backendType is already set then reuse it for this device
630
+ if (backendTypeToBackend_.find(backendType) !=
631
+ backendTypeToBackend_.end()) {
632
+ auto existingBackend = backendTypeToBackend_.at(backendType);
633
+ deviceTypeToBackend_[deviceType] = existingBackend;
634
+ } else {
635
+ // check if backend has value
636
+ if (backend.has_value()) {
637
+ deviceTypeToBackend_[deviceType] = backend.value();
638
+ backendTypeToBackend_[backendType] = backend.value();
639
+ }
640
+ }
641
+ }
642
+
643
+ c10::intrusive_ptr<Backend> getDefaultBackend() const {
644
+ TORCH_CHECK(
645
+ backendTypeToBackend_.find(backendType_) != backendTypeToBackend_.end(),
646
+ "Could not find the default backend type ",
647
+ backendType_,
648
+ " for Process Group with name ",
649
+ getBackendName(),
650
+ ".");
651
+ return backendTypeToBackend_.at(backendType_);
652
+ }
653
+
654
+ c10::intrusive_ptr<Backend> getBackend(c10::DeviceType deviceType);
655
+
656
+ c10::intrusive_ptr<Backend> getBackend(BackendType backendType) const {
657
+ TORCH_CHECK(
658
+ backendTypeToBackend_.find(backendType) != backendTypeToBackend_.end(),
659
+ "Could not find backend type ",
660
+ backendType,
661
+ ".");
662
+ return backendTypeToBackend_.at(backendType);
663
+ }
664
+
665
+ // Return device types supported by this ProcessGroup.
666
+ // Note: the return type is `Device` rather than `DeviceType` for the purpose
667
+ // of easy comparison at Python level. The `Device` will have default index
668
+ // (-1).
669
+ std::vector<c10::Device> getDeviceTypes() const {
670
+ std::vector<c10::Device> devices;
671
+ devices.reserve(deviceTypes_.size());
672
+ for (auto& dt : deviceTypes_) {
673
+ devices.push_back(c10::Device(dt));
674
+ }
675
+ return devices;
676
+ }
677
+
678
+ void registerOnCompletionHook(
679
+ std::function<void(std::shared_ptr<WorkInfo>)>&& hook) {
680
+ getDefaultBackend()->registerOnCompletionHook(std::move(hook));
681
+ }
682
+
683
+ void waitForPendingWorks() {
684
+ getDefaultBackend()->waitForPendingWorks();
685
+ }
686
+
687
+ bool hasHooks() const {
688
+ return getDefaultBackend()->hasHooks();
689
+ }
690
+
691
+ const std::string& getGroupName() const;
692
+ void setGroupName(const std::string& name);
693
+ void enableCollectivesTiming();
694
+
695
+ void release_resources() override;
696
+
697
+ protected:
698
+ // Implementations of this interface need to call this to setup
699
+ // appropriate logging etc.
700
+ void init();
701
+
702
+ c10::intrusive_ptr<c10d::Store> store_;
703
+ const int rank_;
704
+ const int size_;
705
+ const c10::intrusive_ptr<Options> options_;
706
+ const BackendType backendType_;
707
+
708
+ // Debug level setting. It is parsed once when ProcessGroup is constructed and
709
+ // remains the same across use of this process group.
710
+ DebugLevel dist_debug_level_;
711
+
712
+ // Backend classes for this ProcessGroup
713
+ std::unordered_set<c10::DeviceType> deviceTypes_;
714
+ std::unordered_map<c10::DeviceType, BackendType> deviceTypeToBackendType_;
715
+ std::unordered_map<c10::DeviceType, c10::intrusive_ptr<Backend>>
716
+ deviceTypeToBackend_;
717
+ std::unordered_map<BackendType, c10::intrusive_ptr<Backend>>
718
+ backendTypeToBackend_;
719
+ };
720
+
721
+ } // namespace c10d
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/ProcessGroupGloo.hpp ADDED
@@ -0,0 +1,438 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #ifdef USE_C10D_GLOO
4
+
5
+ #include <condition_variable>
6
+ #include <deque>
7
+ #include <mutex>
8
+ #include <thread>
9
+ #include <unordered_map>
10
+ #include <vector>
11
+
12
+ #include <gloo/algorithm.h>
13
+ #include <gloo/common/error.h>
14
+ #include <gloo/context.h>
15
+ #include <gloo/rendezvous/store.h>
16
+ #include <gloo/transport/device.h>
17
+
18
+ #include <c10/util/hash.h>
19
+
20
+ #include <torch/csrc/distributed/c10d/Backend.hpp>
21
+ #include <torch/csrc/distributed/c10d/Store.hpp>
22
+ #include <torch/csrc/distributed/c10d/Types.hpp>
23
+ #include <torch/csrc/distributed/c10d/Utils.hpp>
24
+
25
+ namespace c10d {
26
+
27
+ constexpr const char* GLOO_BACKEND_NAME = "gloo";
28
+
29
+ // ProcessGroupGloo implements Gloo bindings for c10d.
30
+ //
31
+ // All functions on this class are expected to be called in the same
32
+ // order across processes in the group. This is the only way that we
33
+ // can guarantee to match up the same calls across processes. For
34
+ // multi-threaded usage of process groups, you can use consider using
35
+ // multiple process group instances.
36
+ //
37
+ // The Gloo algorithms that this class calls into are cached by their
38
+ // signature (see description of AlgorithmKey above). This cache works
39
+ // as follows: every function call instantiates an AlgorithmKey and
40
+ // looks in the cache for existing entries. If there is one, it is
41
+ // removed from the cache and returned to the caller. If there are
42
+ // none, a new entry is created and returned. If an entry was created
43
+ // before, but is still in use, the call will block and wait until the
44
+ // entry is returned to the cache.
45
+ //
46
+ // In the future, we hope to extend this to allow multiple entries per
47
+ // key, to enable parallelism for a single key. The number of entries
48
+ // per key must always be identical for all processes. This maximum
49
+ // number can be automatically tuned, but only if we let a single
50
+ // process take charge, and have it broadcast the limits.
51
+ //
52
+ class TORCH_API ProcessGroupGloo : public Backend {
53
+ public:
54
+ // AsyncWork is the Gloo specific superclass for asynchronous work items.
55
+ // We can split asynchronous work into 3 phases:
56
+ // 1) Sanity checks and prepare input (e.g. memcpy)
57
+ // 2) Run operation on background thread
58
+ // 3) Synchronize with completion on foreground thread
59
+ //
60
+ // There is state to be shared between these 3 phases and all of this state
61
+ // is captured in the AsyncWork class and its derivatives.
62
+ //
63
+ // Note: while we are porting operations to use new style collectives, there
64
+ // is a split between operations using the existing caching approach and
65
+ // operations using the new AsyncWork base class. Over time we will port
66
+ // all operations and perform needed cleanup.
67
+ //
68
+ // FIXME: This probably should be called WorkGloo since the work is executed
69
+ // in sync mode by a background thread.
70
+ class TORCH_API AsyncWork : public Work {
71
+ public:
72
+ explicit AsyncWork(
73
+ std::vector<std::vector<at::Tensor>> outputTensors,
74
+ OpType opType,
75
+ uint64_t seq,
76
+ const char* profilingTitle = nullptr,
77
+ const c10::optional<std::vector<at::Tensor>>& inputTensors =
78
+ c10::nullopt);
79
+
80
+ ~AsyncWork() override = default;
81
+
82
+ static void execute(c10::intrusive_ptr<AsyncWork> work);
83
+
84
+ virtual void run() = 0;
85
+
86
+ std::vector<at::Tensor> result() override;
87
+
88
+ c10::intrusive_ptr<c10::ivalue::Future> getFuture() override;
89
+ uint64_t getSequencenumber() const override;
90
+
91
+ protected:
92
+ friend class ProcessGroupGloo;
93
+
94
+ private:
95
+ void finishWorkGloo();
96
+ void finishWorkGlooError(std::exception_ptr eptr);
97
+ inline void recordAsyncWorkProfilingInfo(
98
+ const char* profilingTitle,
99
+ const c10::optional<std::vector<at::Tensor>>& inputTensors);
100
+
101
+ const std::vector<std::vector<at::Tensor>> outputTensors_;
102
+ c10::intrusive_ptr<at::ivalue::Future> future_;
103
+ std::function<void()> recordFunctionBeforeCallback_;
104
+ const uint64_t seq_;
105
+ };
106
+
107
+ // Wrap c10d store as Gloo store
108
+ class TORCH_API GlooStore : public ::gloo::rendezvous::Store {
109
+ public:
110
+ GlooStore(const c10::intrusive_ptr<::c10d::Store>& store) : store_(store) {}
111
+
112
+ void setUint(const std::string& key, const std::vector<uint8_t>& value) {
113
+ store_->set(key, value);
114
+ }
115
+
116
+ void set(const std::string& key, const std::vector<char>& value) override {
117
+ std::vector<uint8_t> tmp(value.begin(), value.end());
118
+ store_->set(key, tmp);
119
+ }
120
+
121
+ std::vector<uint8_t> getUint(const std::string& key) {
122
+ auto value = store_->get(key);
123
+ return value;
124
+ }
125
+
126
+ std::vector<char> get(const std::string& key) override {
127
+ auto value = store_->get(key);
128
+ return std::vector<char>(value.begin(), value.end());
129
+ }
130
+
131
+ void wait(const std::vector<std::string>& keys) override {
132
+ store_->wait(keys, ::c10d::Store::kDefaultTimeout);
133
+ }
134
+
135
+ void wait(
136
+ const std::vector<std::string>& keys,
137
+ const std::chrono::milliseconds& timeout) override {
138
+ store_->wait(keys, timeout);
139
+ }
140
+
141
+ #ifdef GLOO_STORE_HAS_STORE_V2
142
+ bool has_v2_support() override {
143
+ return store_->hasExtendedApi();
144
+ }
145
+
146
+ std::vector<std::vector<char>> multi_get(
147
+ const std::vector<std::string>& keys) override {
148
+ std::vector<std::vector<char>> res;
149
+ for (auto& value : store_->multiGet(keys)) {
150
+ res.emplace_back(std::vector<char>(value.begin(), value.end()));
151
+ }
152
+ return res;
153
+ }
154
+
155
+ void multi_set(
156
+ const std::vector<std::string>& keys,
157
+ const std::vector<std::vector<char>>& values) override {
158
+ std::vector<std::vector<uint8_t>> u_values;
159
+ for (auto& value : values) {
160
+ u_values.emplace_back(std::vector<uint8_t>(value.begin(), value.end()));
161
+ }
162
+ store_->multiSet(keys, u_values);
163
+ }
164
+
165
+ void append(const std::string& key, const std::vector<char>& value)
166
+ override {
167
+ std::vector<uint8_t> tmp(value.begin(), value.end());
168
+ return store_->append(key, tmp);
169
+ }
170
+
171
+ int64_t add(const std::string& key, int64_t value) override {
172
+ return store_->add(key, value);
173
+ }
174
+ #endif
175
+
176
+ protected:
177
+ c10::intrusive_ptr<::c10d::Store> store_;
178
+ };
179
+
180
+ // For send and recv operations there is no need to pass them to the
181
+ // thread pool as they are entirely completed by the device thread.
182
+ // This work object is used to synchronize completion of the send or
183
+ // recv operation. It keeps a reference to the tensor it is
184
+ // operating on to prevent it from being deallocated while the
185
+ // operation is still in flight.
186
+ class TORCH_API SendWork : public Work {
187
+ public:
188
+ explicit SendWork(
189
+ at::Tensor& tensor,
190
+ std::unique_ptr<::gloo::transport::UnboundBuffer> buffer,
191
+ uint64_t seq);
192
+
193
+ bool wait(std::chrono::milliseconds timeout = kNoTimeout) override;
194
+
195
+ void abort() override;
196
+
197
+ uint64_t getSequencenumber() const override;
198
+
199
+ protected:
200
+ at::Tensor tensor_;
201
+ std::unique_ptr<::gloo::transport::UnboundBuffer> buffer_;
202
+ const uint64_t seq_;
203
+ };
204
+
205
+ class TORCH_API RecvWork : public Work {
206
+ public:
207
+ explicit RecvWork(
208
+ at::Tensor& tensor,
209
+ std::unique_ptr<::gloo::transport::UnboundBuffer> buffer,
210
+ OpType opType,
211
+ uint64_t seq,
212
+ const char* profilingTitle = nullptr);
213
+
214
+ int sourceRank() const override;
215
+
216
+ bool wait(std::chrono::milliseconds timeout = kNoTimeout) override;
217
+
218
+ void abort() override;
219
+
220
+ uint64_t getSequencenumber() const override;
221
+
222
+ protected:
223
+ at::Tensor tensor_;
224
+ std::unique_ptr<::gloo::transport::UnboundBuffer> buffer_;
225
+ int srcRank_;
226
+ const uint64_t seq_;
227
+ };
228
+
229
+ struct TORCH_API Options : public Backend::Options {
230
+ explicit Options(
231
+ std::chrono::milliseconds timeout = kBackendDefaultTimeout);
232
+
233
+ // return intrusive_ptr of the object
234
+ static c10::intrusive_ptr<Options> create(
235
+ std::chrono::milliseconds timeout = kBackendDefaultTimeout) {
236
+ return c10::make_intrusive<Options>(timeout);
237
+ }
238
+
239
+ std::vector<std::shared_ptr<::gloo::transport::Device>> devices;
240
+ int threads;
241
+ };
242
+
243
+ const std::string getBackendName() const override {
244
+ return std::string(GLOO_BACKEND_NAME);
245
+ }
246
+
247
+ // Helper functions to create a new device object.
248
+ // They are static functions on this class to keep them logically
249
+ // separate from the rest of the code base (e.g. torch/csrc/distributed).
250
+
251
+ // Create new device instance for specific interface.
252
+ static std::shared_ptr<::gloo::transport::Device> createDeviceForInterface(
253
+ const std::string& interface);
254
+
255
+ // Create new device instance for specific hostname or address.
256
+ static std::shared_ptr<::gloo::transport::Device> createDeviceForHostname(
257
+ const std::string& hostname);
258
+
259
+ // Create new device instance.
260
+ // It tries to resolve this machine's hostname and bind to that address.
261
+ // If that fails (i.e. the hostname doesn't resolve to an address), it
262
+ // falls back to binding to the loopback address.
263
+ static std::shared_ptr<::gloo::transport::Device> createDefaultDevice();
264
+
265
+ // Create ProcessGroupGloo instance.
266
+ static c10::intrusive_ptr<ProcessGroupGloo> createProcessGroupGloo(
267
+ const c10::intrusive_ptr<Store>& store,
268
+ int rank,
269
+ int size,
270
+ std::chrono::milliseconds timeout);
271
+
272
+ explicit ProcessGroupGloo(
273
+ const c10::intrusive_ptr<Store>& store,
274
+ int rank,
275
+ int size,
276
+ c10::intrusive_ptr<Options> options = Options::create());
277
+
278
+ ~ProcessGroupGloo() override;
279
+
280
+ c10::intrusive_ptr<Options> getOptions() {
281
+ return options_;
282
+ }
283
+
284
+ c10::intrusive_ptr<Work> broadcast(
285
+ std::vector<at::Tensor>& tensors,
286
+ const BroadcastOptions& opts = BroadcastOptions()) override;
287
+
288
+ c10::intrusive_ptr<Work> allreduce(
289
+ std::vector<at::Tensor>& tensors,
290
+ const AllreduceOptions& opts = AllreduceOptions()) override;
291
+
292
+ c10::intrusive_ptr<Work> allreduce_sparse(
293
+ std::vector<at::Tensor>& tensors,
294
+ const AllreduceOptions& opts = AllreduceOptions()) override;
295
+
296
+ c10::intrusive_ptr<Work> allreduce_coalesced(
297
+ std::vector<at::Tensor>& tensors,
298
+ const AllreduceCoalescedOptions& opts =
299
+ AllreduceCoalescedOptions()) override;
300
+
301
+ c10::intrusive_ptr<Work> reduce(
302
+ std::vector<at::Tensor>& tensors,
303
+ const ReduceOptions& opts = ReduceOptions()) override;
304
+
305
+ c10::intrusive_ptr<Work> _reduce_scatter_base(
306
+ at::Tensor& outputTensor,
307
+ at::Tensor& inputTensor,
308
+ const ReduceScatterOptions& opts = ReduceScatterOptions()) override;
309
+
310
+ c10::intrusive_ptr<Work> _allgather_base(
311
+ at::Tensor& output_tensor,
312
+ at::Tensor& input_tensor,
313
+ const AllgatherOptions& opts = AllgatherOptions()) override;
314
+
315
+ c10::intrusive_ptr<Work> allgather(
316
+ std::vector<std::vector<at::Tensor>>& outputs,
317
+ std::vector<at::Tensor>& inputs,
318
+ const AllgatherOptions& opts = AllgatherOptions()) override;
319
+
320
+ c10::intrusive_ptr<Work> allgather_coalesced(
321
+ std::vector<std::vector<at::Tensor>>& output_lists,
322
+ std::vector<at::Tensor>& input_list,
323
+ const AllgatherOptions& opts = AllgatherOptions()) override;
324
+
325
+ c10::intrusive_ptr<Work> gather(
326
+ std::vector<std::vector<at::Tensor>>& outputs,
327
+ std::vector<at::Tensor>& inputs,
328
+ const GatherOptions& opts = GatherOptions()) override;
329
+
330
+ c10::intrusive_ptr<Work> scatter(
331
+ std::vector<at::Tensor>& outputs,
332
+ std::vector<std::vector<at::Tensor>>& inputs,
333
+ const ScatterOptions& opts = ScatterOptions()) override;
334
+
335
+ c10::intrusive_ptr<Work> reduce_scatter(
336
+ std::vector<at::Tensor>& outputs,
337
+ std::vector<std::vector<at::Tensor>>& inputs,
338
+ const ReduceScatterOptions& opts = ReduceScatterOptions()) override;
339
+
340
+ c10::intrusive_ptr<Work> alltoall_base(
341
+ at::Tensor& outputTensor,
342
+ at::Tensor& inputTensor,
343
+ std::vector<int64_t>& outputCounts,
344
+ std::vector<int64_t>& inputCounts,
345
+ const AllToAllOptions& opts = AllToAllOptions()) override;
346
+
347
+ c10::intrusive_ptr<Work> send(
348
+ std::vector<at::Tensor>& tensors,
349
+ int dstRank,
350
+ int tag) override;
351
+
352
+ c10::intrusive_ptr<Work> recv(
353
+ std::vector<at::Tensor>& tensors,
354
+ int srcRank,
355
+ int tag) override;
356
+
357
+ c10::intrusive_ptr<Work> recvAnysource(
358
+ std::vector<at::Tensor>& tensors,
359
+ int tag) override;
360
+
361
+ c10::intrusive_ptr<Work> barrier(
362
+ const BarrierOptions& opts = BarrierOptions()) override;
363
+
364
+ void enableCollectivesTiming() override;
365
+
366
+ const std::unique_ptr<::gloo::rendezvous::Store>& _getStore() const {
367
+ return store_;
368
+ }
369
+
370
+ // Similar to barrier(), but blocks rank 0 until all other ranks have
371
+ // acknowledged that they are alive (through send/recv from rank 0). Rank 0
372
+ // is able to report all failed ranks if waitAllRanks = true, otherwise
373
+ // reports the first rank it detected as failed.
374
+ void monitoredBarrier(
375
+ const BarrierOptions& opts = BarrierOptions(),
376
+ bool waitAllRanks = false) override;
377
+
378
+ // Agrees on an initial sequence number for the whole group by having rank 0
379
+ // create it and broadcast it to other ranks using the store.
380
+ void setSequenceNumberForGroup() override;
381
+
382
+ // Retrieves the current sequence number for the whole group, which should be
383
+ // in sync. If the returned number is not consistent across the group, it
384
+ // may indicate that there is some sort of collective desynchronization.
385
+ uint64_t getSequenceNumberForGroup() override;
386
+
387
+ int getNumThreads() {
388
+ return options_->threads;
389
+ }
390
+
391
+ protected:
392
+ std::unique_ptr<::gloo::rendezvous::Store> store_;
393
+ const c10::intrusive_ptr<Options> options_;
394
+
395
+ // Every Gloo context represents a set of connections to its peers.
396
+ // In order to use more than one device (or allow for parallelism on
397
+ // a single device), you need multiple contexts.
398
+ std::vector<std::shared_ptr<::gloo::Context>> contexts_;
399
+ std::vector<std::thread> threads_;
400
+ bool stop_;
401
+
402
+ // Incremented for every collective we kick off.
403
+ // The value is used as tag for collective operations. Collectives are kicked
404
+ // off in identical order across processes. Therefore the tag can be used
405
+ // to match up operations during concurrent execution.
406
+ uint32_t collectiveCounter_;
407
+
408
+ // Returns next collective tag to use (uses collectiveCounter_).
409
+ uint32_t nextTag();
410
+
411
+ // Returns the context to use for the specified tag.
412
+ // With `nextTag` returning an increasing number, this should lead
413
+ // to contexts being used in a round-robin fashion.
414
+ std::shared_ptr<::gloo::Context> getContext(uint32_t tag);
415
+
416
+ // Entrypoint for worker threads.
417
+ void runLoop(int workerIndex);
418
+
419
+ // Queue work to run on worker thread.
420
+ void enqueue(c10::intrusive_ptr<AsyncWork> work);
421
+
422
+ // Keep both a queue of pending work, and a vector with in progress work.
423
+ // Both of these can only be mutated when holding the queue lock.
424
+ // We keep both around instead of just the queue, so we can grab a weak_ptr
425
+ // to all in progress and pending work when executing a barrier.
426
+ // When executing a barrier, we need to ensure that all prior work
427
+ // has completed before completing itself.
428
+ std::deque<c10::intrusive_ptr<AsyncWork>> workQueue_;
429
+ std::vector<c10::intrusive_ptr<AsyncWork>> workInProgress_;
430
+ std::mutex workMutex_;
431
+ std::condition_variable workProduceCV_;
432
+ std::condition_variable workConsumeCV_;
433
+ uint64_t seq_{0};
434
+ };
435
+
436
+ } // namespace c10d
437
+
438
+ #endif // USE_C10D_GLOO
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/ProcessGroupMPI.hpp ADDED
@@ -0,0 +1,271 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #ifdef USE_C10D_MPI
4
+
5
+ #include <condition_variable>
6
+ #include <deque>
7
+ #include <exception>
8
+ #include <memory>
9
+ #include <mutex>
10
+ #include <thread>
11
+ #include <vector>
12
+
13
+ #include <ATen/core/ivalue.h>
14
+ #include <ATen/core/ivalue_inl.h>
15
+
16
+ #include <torch/csrc/distributed/c10d/Backend.hpp>
17
+ #include <torch/csrc/distributed/c10d/Types.hpp>
18
+ #include <torch/csrc/distributed/c10d/Utils.hpp>
19
+
20
+ #include <c10/util/CallOnce.h>
21
+
22
+ #include <mpi.h>
23
+
24
+ namespace c10d {
25
+
26
+ constexpr const char* MPI_BACKEND_NAME = "mpi";
27
+
28
+ // WorkEntry is the state associated with a single MPI run instance.
29
+ // It include the source Tensor list and destination Tensor list, as well as
30
+ // The actual run function that will operate either on src or dst or both.
31
+ struct WorkEntry {
32
+ explicit WorkEntry(
33
+ std::vector<at::Tensor>* srcPtr,
34
+ std::vector<at::Tensor>* dstPtr,
35
+ std::function<void(std::unique_ptr<WorkEntry>&)> run)
36
+ : dst(dstPtr ? *dstPtr : std::vector<at::Tensor>()), run(std::move(run)) {
37
+ if (srcPtr) {
38
+ src = *srcPtr;
39
+ }
40
+ }
41
+
42
+ // Not copyable
43
+ WorkEntry(const WorkEntry&) = delete;
44
+ // Not copy assignable
45
+ WorkEntry& operator=(const WorkEntry&) = delete;
46
+
47
+ // For input and output tensors (in-place), we will always use src
48
+ std::vector<at::Tensor> src;
49
+
50
+ // Copy of user provided outputs.
51
+ const std::vector<at::Tensor> dst;
52
+
53
+ // src rank returned, for recv only
54
+ int* srcRank = nullptr;
55
+ std::function<void(std::unique_ptr<WorkEntry>&)> run;
56
+ };
57
+
58
+ // ProcessGroupMPI implements MPI bindings for c10d.
59
+ //
60
+ // All functions on this class are expected to be called in the same
61
+ // order across processes in the group. This is the only way that we
62
+ // can guarantee to match up the same calls across processes.
63
+ //
64
+ // All MPI functions provided by this class is asynchronously scheduled on a
65
+ // Worker thread. Therefore, ProcessGroupMPI requires the MPI implementation
66
+ // that is used to have a minimum thread support value of MPI_THREAD_SERIALIZED.
67
+ // That is, The process may be multi-threaded, and multiple threads may make
68
+ // MPI calls, but only one at a time: MPI calls are not made concurrently from
69
+ // two distinct threads (all MPI calls are serialized). However, with
70
+ // MPI_THREAD_SERIALIZED, ProcessGroupMPI will only support a singe process
71
+ // group. In other words, no more than 1 process group can be created globally.
72
+ //
73
+ // If you would like to use multiple ProcessGroupMPI, it requires your MPI
74
+ // implementation to have a thread support value of MPI_THREAD_MULTIPLE, that
75
+ // is, multiple threads may call MPI, with no restriction.
76
+ //
77
+ // Also note that ProcessGroupMPI only supports a single Tensor operation. In
78
+ // other words, the size of the input Tensor vector should always be 1.
79
+ //
80
+ // CUDA tensor can be supported if the MPI used is CUDA-aware MPI, and
81
+ // ProcessGroupMPI will automatically detect this support.
82
+ class TORCH_API ProcessGroupMPI : public Backend {
83
+ public:
84
+ class WorkMPI : public Work {
85
+ public:
86
+ explicit WorkMPI(
87
+ std::vector<at::Tensor> outputTensors,
88
+ const char* profilingTitle = nullptr,
89
+ const c10::optional<std::vector<at::Tensor>>& inputTensors =
90
+ c10::nullopt)
91
+ : Work(-1, OpType::UNKNOWN, profilingTitle, inputTensors),
92
+ outputTensors_(std::move(outputTensors)),
93
+ future_(c10::make_intrusive<at::ivalue::Future>(
94
+ c10::ListType::create(c10::TensorType::get()))) {}
95
+
96
+ std::vector<at::Tensor> result() override;
97
+
98
+ c10::intrusive_ptr<c10::ivalue::Future> getFuture() override;
99
+
100
+ protected:
101
+ friend class ProcessGroupMPI;
102
+
103
+ private:
104
+ void finishWorkMPI();
105
+ void finishWorkMPIError(std::exception_ptr eptr);
106
+
107
+ std::vector<at::Tensor> outputTensors_;
108
+ c10::intrusive_ptr<at::ivalue::Future> future_;
109
+ };
110
+
111
+ class AsyncWork : public Work {
112
+ public:
113
+ AsyncWork(
114
+ MPI_Request request,
115
+ std::vector<at::Tensor> outputTensors,
116
+ const char* profilingTitle = nullptr,
117
+ const c10::optional<std::vector<at::Tensor>>& inputTensors =
118
+ c10::nullopt);
119
+
120
+ ~AsyncWork() override;
121
+
122
+ bool isCompleted() override;
123
+
124
+ bool isSuccess() const override;
125
+
126
+ int sourceRank() const override;
127
+
128
+ bool wait(std::chrono::milliseconds timeout = kUnsetTimeout) override;
129
+
130
+ void abort() override;
131
+
132
+ std::vector<at::Tensor> result() override;
133
+
134
+ protected:
135
+ void populateException();
136
+
137
+ private:
138
+ const std::vector<at::Tensor> outputTensors_;
139
+ MPI_Request request_;
140
+ MPI_Status status_;
141
+ };
142
+
143
+ // Constructor will spawn up the worker thread loop
144
+ explicit ProcessGroupMPI(int rank, int size, MPI_Comm pgComm);
145
+
146
+ ~ProcessGroupMPI() override;
147
+
148
+ // Abort the MPI program, needs to be called when exception is detected
149
+ void abort();
150
+
151
+ const std::string getBackendName() const override {
152
+ return std::string(MPI_BACKEND_NAME);
153
+ }
154
+
155
+ c10::intrusive_ptr<Work> broadcast(
156
+ std::vector<at::Tensor>& data,
157
+ const BroadcastOptions& opts = BroadcastOptions()) override;
158
+
159
+ c10::intrusive_ptr<Work> allreduce(
160
+ std::vector<at::Tensor>& tensors,
161
+ const AllreduceOptions& opts = AllreduceOptions()) override;
162
+
163
+ c10::intrusive_ptr<Work> allreduce_coalesced(
164
+ std::vector<at::Tensor>& tensors,
165
+ const AllreduceCoalescedOptions& opts =
166
+ AllreduceCoalescedOptions()) override;
167
+
168
+ c10::intrusive_ptr<Work> reduce(
169
+ std::vector<at::Tensor>& tensors,
170
+ const ReduceOptions& opts = ReduceOptions()) override;
171
+
172
+ c10::intrusive_ptr<Work> allgather(
173
+ std::vector<std::vector<at::Tensor>>& outputTensors,
174
+ std::vector<at::Tensor>& inputTensors,
175
+ const AllgatherOptions& opts = AllgatherOptions()) override;
176
+
177
+ c10::intrusive_ptr<Work> _allgather_base(
178
+ at::Tensor& outputbuffer,
179
+ at::Tensor& inputbuffer,
180
+ const AllgatherOptions& opts = AllgatherOptions()) override;
181
+
182
+ c10::intrusive_ptr<Work> allgather_coalesced(
183
+ std::vector<std::vector<at::Tensor>>& outputTensorLists,
184
+ std::vector<at::Tensor>& inputTensors,
185
+ const AllgatherOptions& opts = AllgatherOptions()) override;
186
+
187
+ c10::intrusive_ptr<Work> gather(
188
+ std::vector<std::vector<at::Tensor>>& outputTensors,
189
+ std::vector<at::Tensor>& inputTensors,
190
+ const GatherOptions& opts = GatherOptions()) override;
191
+
192
+ c10::intrusive_ptr<Work> scatter(
193
+ std::vector<at::Tensor>& outputTensors,
194
+ std::vector<std::vector<at::Tensor>>& inputTensors,
195
+ const ScatterOptions& opts = ScatterOptions()) override;
196
+
197
+ c10::intrusive_ptr<Work> reduce_scatter(
198
+ std::vector<at::Tensor>& outputTensors,
199
+ std::vector<std::vector<at::Tensor>>& inputTensors,
200
+ const ReduceScatterOptions& opts = ReduceScatterOptions()) override;
201
+
202
+ c10::intrusive_ptr<Work> alltoall_base(
203
+ at::Tensor& outputTensor,
204
+ at::Tensor& inputTensor,
205
+ std::vector<int64_t>& outputSplitSizes,
206
+ std::vector<int64_t>& inputSplitSizes,
207
+ const AllToAllOptions& opts = AllToAllOptions()) override;
208
+
209
+ c10::intrusive_ptr<Work> alltoall(
210
+ std::vector<at::Tensor>& outputTensors,
211
+ std::vector<at::Tensor>& inputTensors,
212
+ const AllToAllOptions& opts = AllToAllOptions()) override;
213
+
214
+ c10::intrusive_ptr<Work> send(
215
+ std::vector<at::Tensor>& tensors,
216
+ int dstRank,
217
+ int tag) override;
218
+
219
+ c10::intrusive_ptr<Work> recv(
220
+ std::vector<at::Tensor>& tensors,
221
+ int srcRank,
222
+ int tag) override;
223
+
224
+ c10::intrusive_ptr<Work> recvAnysource(
225
+ std::vector<at::Tensor>& tensor,
226
+ int tag) override;
227
+
228
+ c10::intrusive_ptr<Work> barrier(
229
+ const BarrierOptions& opts = BarrierOptions()) override;
230
+
231
+ // Creating a new ProcessGroupMPI, will initialize MPI if not initialized
232
+ static c10::intrusive_ptr<ProcessGroupMPI> createProcessGroupMPI(
233
+ std::vector<int> ranks = {});
234
+
235
+ protected:
236
+ using WorkType =
237
+ std::tuple<std::unique_ptr<WorkEntry>, c10::intrusive_ptr<WorkMPI>>;
238
+ // Worker thread loop
239
+ void runLoop();
240
+ // Helper function that is called by the destructor
241
+ void destroy();
242
+
243
+ c10::intrusive_ptr<Work> enqueue(
244
+ std::unique_ptr<WorkEntry> entry,
245
+ const char* profilingTitle = nullptr,
246
+ const c10::optional<std::vector<at::Tensor>>& inputTensors =
247
+ c10::nullopt);
248
+
249
+ bool stop_;
250
+
251
+ std::mutex pgMutex_;
252
+ std::thread workerThread_;
253
+
254
+ std::deque<WorkType> queue_;
255
+ std::condition_variable queueProduceCV_;
256
+ std::condition_variable queueConsumeCV_;
257
+
258
+ // Global states
259
+ static void initMPIOnce();
260
+ static void mpiExit();
261
+ static c10::once_flag onceFlagInitMPI;
262
+
263
+ static std::mutex pgGlobalMutex_;
264
+ static int mpiThreadSupport_;
265
+
266
+ MPI_Comm pgComm_;
267
+ };
268
+
269
+ } // namespace c10d
270
+
271
+ #endif // USE_C10D_MPI
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/PyProcessGroup.hpp ADDED
@@ -0,0 +1,181 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/distributed/c10d/ProcessGroup.hpp>
4
+ #include <torch/csrc/jit/python/pybind_utils.h>
5
+ #include <torch/csrc/utils/pybind.h>
6
+
7
+ namespace c10d {
8
+
9
+ // PyProcessGroup is a pybind11 trampoline class to allow a Python
10
+ // class to inherit from torch.distributed.ProcessGroup
11
+ class PyProcessGroup : public ProcessGroup {
12
+ public:
13
+ // PyWork is a pybind11 trampoline class to allow a Python
14
+ // class to inherit from torch.distributed.Work
15
+ class PyWork : public Work {
16
+ public:
17
+ PyWork() = default;
18
+
19
+ bool wait(std::chrono::milliseconds timeout = kNoTimeout) override {
20
+ PYBIND11_OVERRIDE(
21
+ bool, /* Return type */
22
+ Work, /* Parent class */
23
+ wait, /* Name of function in C++ */
24
+ timeout);
25
+ }
26
+
27
+ c10::intrusive_ptr<c10::ivalue::Future> getFuture() override {
28
+ // We cannot use PYBIND11_OVERRIDE because:
29
+ // 1. We have to >MANUALLY< unwrap the PyFutureWrapper and
30
+ // 2. The python name is get_future
31
+ pybind11::gil_scoped_acquire gil;
32
+ auto override =
33
+ pybind11::get_override(static_cast<const Work*>(this), "get_future");
34
+
35
+ if (override) {
36
+ py::object o = override();
37
+ auto futWrapper =
38
+ o.cast<std::shared_ptr<torch::jit::PythonFutureWrapper>>();
39
+ return futWrapper->fut;
40
+ }
41
+
42
+ return Work::getFuture();
43
+ }
44
+ };
45
+
46
+ using ProcessGroup::ProcessGroup;
47
+
48
+ const std::string getBackendName() const override {
49
+ PYBIND11_OVERRIDE_PURE(
50
+ std::string, /* Return type */
51
+ ProcessGroup, /* Parent class */
52
+ getBackendName, /* Name of function in C++ */
53
+ );
54
+ }
55
+
56
+ c10::intrusive_ptr<Work> allgather(
57
+ std::vector<std::vector<at::Tensor>>& outputTensors,
58
+ std::vector<at::Tensor>& inputTensors,
59
+ const AllgatherOptions& opts = AllgatherOptions()) override {
60
+ PYBIND11_OVERRIDE(
61
+ c10::intrusive_ptr<Work>, /* Return type */
62
+ ProcessGroup, /* Parent class */
63
+ allgather, /* Name of function in C++ */
64
+ outputTensors,
65
+ inputTensors,
66
+ opts);
67
+ }
68
+
69
+ c10::intrusive_ptr<Work> allreduce(
70
+ std::vector<at::Tensor>& tensors,
71
+ const AllreduceOptions& opts = AllreduceOptions()) override {
72
+ PYBIND11_OVERRIDE(
73
+ c10::intrusive_ptr<Work>, /* Return type */
74
+ ProcessGroup, /* Parent class */
75
+ allreduce, /* Name of function in C++ */
76
+ tensors,
77
+ opts);
78
+ }
79
+
80
+ c10::intrusive_ptr<Work> barrier(
81
+ const BarrierOptions& opts = BarrierOptions()) override {
82
+ PYBIND11_OVERRIDE(
83
+ c10::intrusive_ptr<Work>, /* Return type */
84
+ ProcessGroup, /* Parent class */
85
+ barrier, /* Name of function in C++ */
86
+ opts);
87
+ }
88
+
89
+ c10::intrusive_ptr<Work> broadcast(
90
+ std::vector<at::Tensor>& tensors,
91
+ const BroadcastOptions& opts = BroadcastOptions()) override {
92
+ PYBIND11_OVERRIDE(
93
+ c10::intrusive_ptr<Work>, /* Return type */
94
+ ProcessGroup, /* Parent class */
95
+ broadcast, /* Name of function in C++ */
96
+ tensors,
97
+ opts);
98
+ }
99
+
100
+ c10::intrusive_ptr<Work> reduce_scatter(
101
+ std::vector<at::Tensor>& outputTensors,
102
+ std::vector<std::vector<at::Tensor>>& inputTensors,
103
+ const ReduceScatterOptions& opts = ReduceScatterOptions()) override {
104
+ PYBIND11_OVERRIDE(
105
+ c10::intrusive_ptr<Work>, /* Return type */
106
+ ProcessGroup, /* Parent class */
107
+ reduce_scatter, /* Name of function in C++ */
108
+ outputTensors,
109
+ inputTensors,
110
+ opts);
111
+ }
112
+
113
+ c10::intrusive_ptr<Work> send(
114
+ std::vector<at::Tensor>& tensors,
115
+ int dstRank,
116
+ int tag) override {
117
+ PYBIND11_OVERRIDE(
118
+ c10::intrusive_ptr<Work>, /* Return type */
119
+ ProcessGroup, /* Parent class */
120
+ send, /* Name of function in C++ */
121
+ tensors,
122
+ dstRank,
123
+ tag);
124
+ }
125
+
126
+ c10::intrusive_ptr<Work> recv(
127
+ std::vector<at::Tensor>& tensors,
128
+ int srcRank,
129
+ int tag) override {
130
+ PYBIND11_OVERRIDE(
131
+ c10::intrusive_ptr<Work>, /* Return type */
132
+ ProcessGroup, /* Parent class */
133
+ recv, /* Name of function in C++ */
134
+ tensors,
135
+ srcRank,
136
+ tag);
137
+ }
138
+ };
139
+
140
+ class TORCH_PYTHON_API PythonOnCompletionHook {
141
+ public:
142
+ // Wraps a py::object hook and acquires Python GIL in dtor before
143
+ // destructing the hook object.
144
+ PythonOnCompletionHook(py::object hook) : hook_(std::move(hook)) {}
145
+
146
+ ~PythonOnCompletionHook() {
147
+ py::gil_scoped_acquire ag;
148
+ hook_.dec_ref();
149
+ // Explicitly set hook_ to nullptr to prevent py::object's dtor
150
+ // to decref on the PyObject again.
151
+ // See Note [Destructing py::object] in python_ivalue.h
152
+ hook_.ptr() = nullptr;
153
+ }
154
+
155
+ void operator()(std::shared_ptr<WorkInfo> workInfo) const {
156
+ std::exception_ptr eptr;
157
+ {
158
+ py::gil_scoped_acquire acquire;
159
+ try {
160
+ hook_(workInfo);
161
+ } catch (py::error_already_set& e) {
162
+ // py::error_already_set requires GIL to destruct, take
163
+ // special care.
164
+ eptr = std::make_exception_ptr(std::runtime_error(e.what()));
165
+ e.restore();
166
+ PyErr_Clear();
167
+ } catch (std::exception& e) {
168
+ eptr = std::current_exception();
169
+ }
170
+ }
171
+ // No more Python-related stuff at this point, i.e., this
172
+ // exception can be captured and handled by PG backend.
173
+ if (eptr)
174
+ std::rethrow_exception(eptr);
175
+ }
176
+
177
+ private:
178
+ py::object hook_;
179
+ };
180
+
181
+ } // namespace c10d
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/Store.hpp ADDED
@@ -0,0 +1,101 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <chrono>
4
+ #include <cstdint>
5
+ #include <stdexcept>
6
+ #include <string>
7
+ #include <vector>
8
+
9
+ #include <c10/macros/Macros.h>
10
+ #include <torch/custom_class.h>
11
+
12
+ namespace c10d {
13
+
14
+ // callback function will be given arguments (optional<string> oldValue,
15
+ // optional<string> newValue)
16
+ using WatchKeyCallback =
17
+ std::function<void(c10::optional<std::string>, c10::optional<std::string>)>;
18
+
19
+ class TORCH_API Store : public torch::CustomClassHolder {
20
+ public:
21
+ static constexpr std::chrono::milliseconds kDefaultTimeout =
22
+ std::chrono::seconds(300);
23
+ static constexpr std::chrono::milliseconds kNoTimeout =
24
+ std::chrono::milliseconds::zero();
25
+
26
+ Store() : timeout_(kDefaultTimeout) {}
27
+
28
+ explicit Store(const std::chrono::milliseconds& timeout)
29
+ : timeout_(timeout) {}
30
+
31
+ Store(const Store&) = default;
32
+ Store(Store&&) noexcept = default;
33
+
34
+ ~Store() override = default;
35
+
36
+ void set(const std::string& key, const std::string& value);
37
+
38
+ virtual void set(
39
+ const std::string& key,
40
+ const std::vector<uint8_t>& value) = 0;
41
+
42
+ std::string compareSet(
43
+ const std::string& key,
44
+ const std::string& currentValue,
45
+ const std::string& newValue);
46
+
47
+ virtual std::vector<uint8_t> compareSet(
48
+ const std::string& key,
49
+ const std::vector<uint8_t>& currentValue,
50
+ const std::vector<uint8_t>& newValue) {
51
+ TORCH_INTERNAL_ASSERT(false, "Not implemented.");
52
+ }
53
+
54
+ std::string get_to_str(const std::string& key);
55
+
56
+ virtual std::vector<uint8_t> get(const std::string& key) = 0;
57
+
58
+ virtual int64_t add(const std::string& key, int64_t value) = 0;
59
+
60
+ virtual bool deleteKey(const std::string& key) = 0;
61
+
62
+ virtual bool check(const std::vector<std::string>& keys) = 0;
63
+
64
+ virtual int64_t getNumKeys() = 0;
65
+
66
+ virtual void wait(const std::vector<std::string>& keys) = 0;
67
+
68
+ virtual void wait(
69
+ const std::vector<std::string>& keys,
70
+ const std::chrono::milliseconds& timeout) = 0;
71
+
72
+ virtual const std::chrono::milliseconds& getTimeout() const noexcept;
73
+
74
+ virtual void setTimeout(const std::chrono::milliseconds& timeout);
75
+
76
+ // watchKey() is deprecated and no longer supported.
77
+ virtual void watchKey(
78
+ const std::string& /* unused */,
79
+ WatchKeyCallback /* unused */) {
80
+ TORCH_CHECK(false, "watchKey is deprecated, no implementation support it.");
81
+ }
82
+
83
+ virtual void append(
84
+ const std::string& key,
85
+ const std::vector<uint8_t>& value);
86
+
87
+ virtual std::vector<std::vector<uint8_t>> multiGet(
88
+ const std::vector<std::string>& keys);
89
+
90
+ virtual void multiSet(
91
+ const std::vector<std::string>& keys,
92
+ const std::vector<std::vector<uint8_t>>& values);
93
+
94
+ // Returns true if this store support append, multiGet and multiSet
95
+ virtual bool hasExtendedApi() const;
96
+
97
+ protected:
98
+ std::chrono::milliseconds timeout_;
99
+ };
100
+
101
+ } // namespace c10d
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/Utils.hpp ADDED
@@ -0,0 +1,729 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/ATen.h>
4
+ #include <c10/util/Exception.h>
5
+ #include <c10/util/accumulate.h>
6
+ #include <c10/util/irange.h>
7
+ #include <torch/csrc/distributed/c10d/Types.hpp>
8
+
9
+ #ifdef _WIN32
10
+ #include <winsock2.h>
11
+ #include <ws2tcpip.h>
12
+ typedef SSIZE_T ssize_t;
13
+ #pragma comment(lib, "Ws2_32.lib")
14
+ #else
15
+ #include <fcntl.h>
16
+ #include <netdb.h>
17
+ #include <sys/poll.h>
18
+ #include <sys/socket.h>
19
+ #include <unistd.h>
20
+ #endif
21
+
22
+ #include <sys/types.h>
23
+
24
+ #include <chrono>
25
+ #include <cstdint>
26
+ #include <cstdlib>
27
+ #include <functional>
28
+ #include <limits>
29
+ #include <string>
30
+ #include <system_error>
31
+ #include <tuple>
32
+ #include <vector>
33
+
34
+ namespace c10d {
35
+
36
+ // Retrieve tensor shapes from a given tensor.
37
+ TORCH_API std::vector<at::Tensor> getTensorShapes(
38
+ const std::vector<at::Tensor>& tensors);
39
+
40
+ // Use -2 to represent unset state of env vars
41
+ #define C10D_ENV_NOT_SET -2
42
+
43
+ // Turns at::IntArrayRef into "(1, 2, 3, 4)".
44
+ inline std::string toString(at::IntArrayRef l) {
45
+ std::stringstream ss;
46
+ ss << "(";
47
+ for (const auto i : c10::irange(l.size())) {
48
+ if (i > 0) {
49
+ ss << ", ";
50
+ }
51
+ ss << l[i];
52
+ }
53
+ ss << ")";
54
+ return ss.str();
55
+ }
56
+
57
+ inline std::string toString(const c10::Layout& layout) {
58
+ std::stringstream ss;
59
+ ss << layout;
60
+ return ss.str();
61
+ }
62
+
63
+ inline void assertSameType(
64
+ const at::DeprecatedTypeProperties& type,
65
+ const std::vector<at::Tensor>& tensors) {
66
+ for (const auto i : c10::irange(tensors.size())) {
67
+ if (!tensors[i].options().type_equal(type.options())) {
68
+ const std::string expected = type.toString();
69
+ const std::string actual = tensors[i].toString();
70
+ throw std::invalid_argument(
71
+ "mixed types (" + expected + " and " + actual + ")");
72
+ }
73
+ }
74
+ }
75
+
76
+ inline std::vector<std::string> split(
77
+ char separator,
78
+ const std::string& string) {
79
+ std::vector<std::string> pieces;
80
+ std::stringstream ss(string);
81
+ std::string item;
82
+ while (std::getline(ss, item, separator)) {
83
+ pieces.push_back(std::move(item));
84
+ }
85
+ return pieces;
86
+ }
87
+
88
+ inline std::string getCvarString(
89
+ const std::vector<std::string>& env,
90
+ const char* def) {
91
+ const char* ret = def;
92
+
93
+ if (env.empty()) {
94
+ TORCH_CHECK(false, "No environment variables passed");
95
+ return ret;
96
+ }
97
+
98
+ /* parse environment variable in reverse order, so the early
99
+ * versions of a variable get higher priority than the latter
100
+ * versions of the same variable */
101
+ for (int i = env.size() - 1; i >= 0; i--) {
102
+ const char* val = std::getenv(env[i].c_str());
103
+ if (val == nullptr) {
104
+ continue;
105
+ } else if (i) {
106
+ TORCH_WARN(
107
+ "Environment variable " + env[i] + " is deprecated; use " + env[0] +
108
+ " instead");
109
+ }
110
+
111
+ ret = val;
112
+ }
113
+
114
+ return ret;
115
+ }
116
+
117
+ inline int getCvarInt(const std::vector<std::string>& env, int def) {
118
+ int ret = def;
119
+
120
+ if (env.empty()) {
121
+ TORCH_CHECK(false, "No environment variables passed");
122
+ return ret;
123
+ }
124
+
125
+ /* parse environment variable in reverse order, so the early
126
+ * versions of a variable get higher priority than the latter
127
+ * versions of the same variable */
128
+ for (int i = env.size() - 1; i >= 0; i--) {
129
+ char* val = std::getenv(env[i].c_str());
130
+ if (val == nullptr) {
131
+ continue;
132
+ } else if (i) {
133
+ TORCH_WARN(
134
+ "Environment variable " + env[i] + " is deprecated; use " + env[0] +
135
+ " instead");
136
+ }
137
+
138
+ try {
139
+ ret = std::stoi(val);
140
+ } catch (std::exception& e) {
141
+ TORCH_CHECK(false, "Invalid value for environment variable: " + env[i]);
142
+ }
143
+ }
144
+
145
+ return ret;
146
+ }
147
+
148
+ inline bool getCvarBool(const std::vector<std::string>& env, bool def) {
149
+ bool ret = def;
150
+
151
+ if (env.empty()) {
152
+ TORCH_CHECK(false, "No environment variables passed");
153
+ return ret;
154
+ }
155
+
156
+ /* parse environment variable in reverse order, so the early
157
+ * versions of a variable get higher priority than the latter
158
+ * versions of the same variable */
159
+ for (int i = env.size() - 1; i >= 0; i--) {
160
+ char* val_ = std::getenv(env[i].c_str());
161
+ if (val_ == nullptr) {
162
+ continue;
163
+ } else if (i) {
164
+ TORCH_WARN(
165
+ "Environment variable " + env[i] + " is deprecated; use " + env[0] +
166
+ " instead");
167
+ }
168
+
169
+ std::string val = std::string(val_);
170
+ for (auto& x : val) {
171
+ x = std::tolower(x);
172
+ }
173
+
174
+ if (val == "y" || val == "yes" || val == "1" || val == "t" ||
175
+ val == "true") {
176
+ ret = true;
177
+ } else if (
178
+ val == "n" || val == "no" || val == "0" || val == "f" ||
179
+ val == "false") {
180
+ ret = false;
181
+ } else {
182
+ TORCH_CHECK(false, "Invalid value for environment variable: " + env[i]);
183
+ return ret;
184
+ }
185
+ }
186
+
187
+ return ret;
188
+ }
189
+
190
+ inline void assertSameSizes(
191
+ const at::IntArrayRef& sizes,
192
+ const std::vector<at::Tensor>& tensors) {
193
+ for (const auto i : c10::irange(tensors.size())) {
194
+ if (!tensors[i].sizes().equals(sizes)) {
195
+ const auto expected = toString(sizes);
196
+ const auto actual = toString(tensors[i].sizes());
197
+ throw std::invalid_argument(
198
+ "mixed sizes (" + expected + " and " + actual + ")");
199
+ }
200
+ }
201
+ }
202
+
203
+ inline void assertSameSizeAndType(const std::vector<at::Tensor>& tensors) {
204
+ // Ensure we have at least one tensor
205
+ if (tensors.empty()) {
206
+ throw std::invalid_argument("argument is empty");
207
+ }
208
+
209
+ // Ensure all tensors have identical type and shape
210
+ auto options = tensors[0].options();
211
+ auto sizes = tensors[0].sizes();
212
+ for (const auto i : c10::irange(1, tensors.size())) {
213
+ if (!tensors[i].options().type_equal(options)) {
214
+ const auto expected = toString(options);
215
+ const auto actual = toString(tensors[i].options());
216
+ throw std::invalid_argument(
217
+ "argument contains mixed types (" + expected + " and " + actual +
218
+ ")");
219
+ }
220
+ if (!tensors[i].sizes().equals(sizes)) {
221
+ const auto expected = toString(sizes);
222
+ const auto actual = toString(tensors[i].sizes());
223
+ throw std::invalid_argument(
224
+ "argument contains mixed sizes (" + expected + " and " + actual +
225
+ ")");
226
+ }
227
+ }
228
+ }
229
+
230
+ inline void assertTypeMatch(
231
+ std::function<void(const std::string&)> fn,
232
+ const at::DeprecatedTypeProperties& type,
233
+ const at::ArrayRef<at::Tensor> tensors,
234
+ size_t index) {
235
+ if (!tensors[index].options().type_equal(type.options())) {
236
+ fn("invalid tensor type at index " + std::to_string(index) + " (expected " +
237
+ type.toString() + ", got " + tensors[index].toString() + ")");
238
+ }
239
+ }
240
+
241
+ inline void assertTypeMatch(
242
+ std::function<void(const std::string&)> fn,
243
+ const at::TensorOptions& options,
244
+ const at::ArrayRef<at::Tensor> tensors,
245
+ size_t index) {
246
+ if (!tensors[index].options().type_equal(options)) {
247
+ fn("invalid tensor type at index " + std::to_string(index) + " (expected " +
248
+ toString(options) + ", got " + toString(tensors[index].options()) + ")");
249
+ }
250
+ }
251
+
252
+ inline void assertSizesMatch(
253
+ std::function<void(const std::string&)> fn,
254
+ const at::IntArrayRef& sizes,
255
+ const at::ArrayRef<at::Tensor> tensors,
256
+ size_t index) {
257
+ if (tensors[index].sizes() != sizes) {
258
+ fn("invalid tensor size at index " + std::to_string(index) + " (expected " +
259
+ toString(sizes) + ", got " + toString(tensors[index].sizes()) + ")");
260
+ }
261
+ }
262
+
263
+ inline void assertLayoutMatch(
264
+ std::function<void(const std::string&)> fn,
265
+ const c10::Layout& expected,
266
+ const at::ArrayRef<at::Tensor> tensors,
267
+ size_t index) {
268
+ const auto& actual = tensors[index].layout();
269
+ if (actual != expected) {
270
+ fn("invalid tensor layout at index " + std::to_string(index) +
271
+ " (expected " + toString(expected) + ", got " + toString(actual) + ")");
272
+ }
273
+ }
274
+
275
+ inline void assertLayoutMatch(
276
+ std::function<void(const std::string&)> fn,
277
+ const at::ArrayRef<at::Tensor> tensors) {
278
+ const auto& layout = tensors[0].layout();
279
+ for (const auto i : c10::irange(1, tensors.size())) {
280
+ assertLayoutMatch(fn, layout, tensors, i);
281
+ }
282
+ }
283
+
284
+ inline void assertNonEmpty(
285
+ std::function<void(const std::string&)> fn,
286
+ const at::ArrayRef<at::Tensor> tensors) {
287
+ if (tensors.empty()) {
288
+ fn("requires non-empty tensor list");
289
+ }
290
+ }
291
+
292
+ inline void assertSingleElement(
293
+ std::function<void(const std::string&)> fn,
294
+ const at::ArrayRef<at::Tensor> tensors) {
295
+ if (tensors.size() != 1) {
296
+ fn("requires a single-element tensor list");
297
+ }
298
+ }
299
+
300
+ inline void assertSingleElementInput(
301
+ std::function<void(const std::string&)> fn,
302
+ const at::ArrayRef<at::Tensor> tensors) {
303
+ if (tensors.size() != 1) {
304
+ fn("requires a single-element input tensor list");
305
+ }
306
+ }
307
+
308
+ inline void assertSingleElementOutput(
309
+ std::function<void(const std::string&)> fn,
310
+ const at::ArrayRef<at::Tensor> tensors) {
311
+ if (tensors.size() != 1) {
312
+ fn("requires a single-element output tensor list");
313
+ }
314
+ }
315
+
316
+ inline void assertRootRank(
317
+ std::function<void(const std::string&)> fn,
318
+ int rank,
319
+ int size) {
320
+ if (rank < 0 || rank >= size) {
321
+ fn("invalid root rank: " + std::to_string(rank));
322
+ }
323
+ }
324
+
325
+ inline void assertRootTensor(
326
+ std::function<void(const std::string&)> fn,
327
+ int rank,
328
+ int size) {
329
+ if (rank < 0 || rank >= size) {
330
+ fn("invalid root tensor: " + std::to_string(rank));
331
+ }
332
+ }
333
+
334
+ inline void assertDense(
335
+ std::function<void(const std::string&)> fn,
336
+ const at::ArrayRef<at::Tensor> tensors) {
337
+ const auto& layout = tensors[0].layout();
338
+ if (layout != at::kStrided) {
339
+ fn("only supports dense tensors");
340
+ }
341
+ }
342
+
343
+ inline void assertCPU(
344
+ std::function<void(const std::string&)> fn,
345
+ const at::ArrayRef<at::Tensor> tensors) {
346
+ const auto& device = tensors[0].device();
347
+ if (device.type() != at::kCPU) {
348
+ fn("only supports CPU tensors");
349
+ }
350
+ }
351
+
352
+ inline void assertSameDevice(
353
+ std::function<void(const std::string&)> fn,
354
+ const at::ArrayRef<at::Tensor> tensors) {
355
+ if (tensors.size() < 2) {
356
+ return;
357
+ }
358
+ const auto& device = tensors[0].device();
359
+ for (const auto i : c10::irange(1, tensors.size())) {
360
+ if (tensors[i].device() != device) {
361
+ fn("tensors should be on the same device");
362
+ }
363
+ }
364
+ }
365
+
366
+ inline void assertTypeAndSizesMatch(
367
+ std::function<void(const std::string&)> fn,
368
+ const at::ArrayRef<at::Tensor> tensors,
369
+ const at::DeprecatedTypeProperties& type,
370
+ const at::IntArrayRef& sizes) {
371
+ for (const auto i : c10::irange(tensors.size())) {
372
+ assertTypeMatch(fn, type, tensors, i);
373
+ assertSizesMatch(fn, sizes, tensors, i);
374
+ }
375
+ }
376
+
377
+ inline void assertTypeAndSizesMatch(
378
+ std::function<void(const std::string&)> fn,
379
+ const at::ArrayRef<at::Tensor> tensors,
380
+ const at::TensorOptions& options,
381
+ const at::IntArrayRef& sizes) {
382
+ for (const auto i : c10::irange(tensors.size())) {
383
+ assertTypeMatch(fn, options, tensors, i);
384
+ assertSizesMatch(fn, sizes, tensors, i);
385
+ }
386
+ }
387
+
388
+ inline void assertTypeAndSizesMatch(
389
+ std::function<void(const std::string&)> fn,
390
+ const at::ArrayRef<at::Tensor> tensors) {
391
+ const auto& options = tensors[0].options();
392
+ const auto sizes = tensors[0].sizes();
393
+ assertTypeAndSizesMatch(fn, tensors.slice(1), options, sizes);
394
+ }
395
+
396
+ // Copied from ATen/core/functional.h.
397
+ template <typename F, typename T>
398
+ inline auto fmap(T& inputs, const F& fn)
399
+ -> std::vector<decltype(fn(*inputs.begin()))> {
400
+ std::vector<decltype(fn(*inputs.begin()))> r;
401
+ r.reserve(inputs.size());
402
+ for (auto& input : inputs) {
403
+ r.push_back(fn(input));
404
+ }
405
+ return r;
406
+ }
407
+
408
+ // Copied from torch/csrc/utils/tensor_flatten.h.
409
+ inline at::Tensor flattenDenseTensors(at::TensorList tensors) {
410
+ static const auto flatten = [](const at::Tensor& t) {
411
+ return t.contiguous().view({-1});
412
+ };
413
+ if (tensors.size() == 1) {
414
+ return flatten(tensors[0]);
415
+ }
416
+ return at::cat(::c10d::fmap(tensors, flatten));
417
+ }
418
+
419
+ inline at::Tensor newLikeFlat(
420
+ std::vector<std::vector<at::Tensor>>& tensors,
421
+ size_t deviceIdx) {
422
+ if (tensors.empty() || tensors[0].empty()) {
423
+ TORCH_CHECK(false, "Received an empty list");
424
+ }
425
+ if (deviceIdx >= tensors.size()) {
426
+ TORCH_CHECK(false, "Invalid device index");
427
+ }
428
+ auto& t = tensors[deviceIdx][0];
429
+ auto device = t.device();
430
+ for (const auto i : c10::irange(1, tensors[deviceIdx].size())) {
431
+ if (tensors[deviceIdx][i].device() != device) {
432
+ TORCH_CHECK(false, "Expecting all tensors on the same device");
433
+ }
434
+ }
435
+ at::DeviceGuard gpuGuard(device);
436
+ std::vector<int64_t> sizes{static_cast<int64_t>(tensors[deviceIdx].size())};
437
+ std::vector<int64_t> strides{static_cast<int64_t>(t.numel())};
438
+ sizes.insert(sizes.end(), t.sizes().begin(), t.sizes().end());
439
+ strides.insert(strides.end(), t.strides().begin(), t.strides().end());
440
+ return at::empty_strided(
441
+ sizes, strides, t.options().memory_format(c10::nullopt));
442
+ }
443
+
444
+ inline at::Tensor newLikeFlat(std::vector<at::Tensor>& tensors) {
445
+ if (tensors.empty()) {
446
+ TORCH_CHECK(false, "Received an empty list");
447
+ }
448
+ auto& t = tensors[0];
449
+ at::DeviceGuard gpuGuard(t.device());
450
+ std::vector<int64_t> sizes{static_cast<int64_t>(tensors.size())};
451
+ sizes.insert(sizes.end(), t.sizes().begin(), t.sizes().end());
452
+ return at::empty(sizes, t.options());
453
+ }
454
+
455
+ inline std::vector<std::vector<int64_t>> getSizes(
456
+ const std::vector<at::Tensor>& tensors) {
457
+ std::vector<std::vector<int64_t>> sizes(tensors.size());
458
+ for (const auto i : c10::irange(tensors.size())) {
459
+ sizes[i] = tensors[i].sizes().vec();
460
+ }
461
+ return sizes;
462
+ }
463
+
464
+ inline std::vector<int> getDevices(const std::vector<at::Tensor>& tensors) {
465
+ std::vector<int> devices(tensors.size(), -1);
466
+ if (tensors[0].device().is_cuda()) {
467
+ for (const auto i : c10::irange(tensors.size())) {
468
+ devices[i] = tensors[i].storage().device().index();
469
+ }
470
+ }
471
+ return devices;
472
+ }
473
+
474
+ template <typename T>
475
+ inline T* getDataPointer(const at::Tensor& tensor) {
476
+ // This method is only used in ProcessGroupGloo for now. Call sites must make
477
+ // sure that the input tensor is contiguous. It is OK if the tensor does not
478
+ // start from the beginning of the storage. For example, it could come from
479
+ // chunk(..., dim=0)[1]. Hence, we need to use data_ptr() instead of
480
+ // tensor.storage().data()
481
+ // NB: not using tensor.data<T>() because tensor is not aware of gloo::TYPE
482
+ return static_cast<T*>(tensor.data_ptr());
483
+ }
484
+
485
+ template <typename T>
486
+ std::vector<T*> getDataPointers(const std::vector<at::Tensor>& tensors) {
487
+ std::vector<T*> ptrs(tensors.size());
488
+ for (const auto i : c10::irange(tensors.size())) {
489
+ ptrs[i] = getDataPointer<T>(tensors[i]);
490
+ }
491
+ return ptrs;
492
+ }
493
+
494
+ // For alltoall split size sanity check
495
+ inline void checkSplitSizes(
496
+ const std::vector<int64_t>& split_sizes,
497
+ const at::Tensor& tensor,
498
+ int group_size) {
499
+ if (split_sizes.empty()) {
500
+ TORCH_CHECK(
501
+ tensor.size(0) % group_size == 0,
502
+ "Tensor's dim 0 does not divide equally across group size");
503
+ } else {
504
+ TORCH_CHECK(
505
+ split_sizes.size() == static_cast<size_t>(group_size),
506
+ "Number of tensor splits not equal to group size");
507
+ const auto sum = c10::sum_integers(split_sizes);
508
+ TORCH_CHECK(
509
+ sum == tensor.size(0), "Split sizes doesn't match total dim 0 size");
510
+ }
511
+ }
512
+
513
+ // Compute alltoall lengths and offsets, handling multi-dimension tensors
514
+ template <typename T>
515
+ size_t computeLengthsAndOffsets(
516
+ const std::vector<int64_t>& split_sizes,
517
+ const at::Tensor& tensor,
518
+ std::vector<T>* lengths,
519
+ std::vector<T>* offsets) {
520
+ size_t group_size = lengths->size();
521
+ bool equal_splits = false;
522
+ size_t dim0_size = tensor.size(0);
523
+ size_t row_size = (dim0_size ? tensor.numel() / dim0_size : 1);
524
+ size_t split_size = 0;
525
+ size_t offset = 0;
526
+
527
+ if (split_sizes.empty()) {
528
+ equal_splits = true;
529
+ split_size = tensor.size(0) / group_size;
530
+ }
531
+ for (const auto i : c10::irange(group_size)) {
532
+ size_t length = row_size * (equal_splits ? split_size : split_sizes[i]);
533
+ (*lengths)[i] = length;
534
+ (*offsets)[i] = offset;
535
+ // TODO: see if we should add overflow protection for offset
536
+ offset += length;
537
+ }
538
+ return offset;
539
+ }
540
+
541
+ template <typename T>
542
+ size_t computeLengthsAndOffsets(
543
+ const std::vector<at::Tensor>& tensors,
544
+ std::vector<T>* lengths,
545
+ std::vector<T>* offsets) {
546
+ size_t group_size = lengths->size();
547
+ size_t offset = 0;
548
+ for (const auto i : c10::irange(group_size)) {
549
+ size_t length = tensors[i].numel();
550
+ (*lengths)[i] = length;
551
+ (*offsets)[i] = offset;
552
+ offset += length;
553
+ }
554
+ return offset;
555
+ }
556
+
557
+ using RankType = uint32_t;
558
+ using SizeType = uint64_t;
559
+
560
+ // `errno` is only meaningful when it fails. E.g., a successful `fork()` sets
561
+ // `errno` to `EINVAL` in child process on some macos
562
+ // (https://stackoverflow.com/a/20295079), and thus `errno` should really only
563
+ // be inspected if an error occurred.
564
+ //
565
+ // `success_cond` is an expression used to check if an error has happend. So for
566
+ // `fork()`, we can use `SYSCHECK(pid = fork(), pid != -1)`. The function output
567
+ // is stored in variable `__output` and may be used in `success_cond`.
568
+ #ifdef _WIN32
569
+ #define SYSCHECK(expr, success_cond) \
570
+ while (true) { \
571
+ auto __output = (expr); \
572
+ auto errno_local = WSAGetLastError(); \
573
+ (void)__output; \
574
+ if (!(success_cond)) { \
575
+ if (errno == EINTR) { \
576
+ continue; \
577
+ } else if ( \
578
+ errno_local == WSAETIMEDOUT || errno_local == WSAEWOULDBLOCK) { \
579
+ C10_THROW_ERROR(DistNetworkError, "Socket Timeout"); \
580
+ } else { \
581
+ C10_THROW_ERROR(DistNetworkError, std::strerror(errno_local)); \
582
+ } \
583
+ } else { \
584
+ break; \
585
+ } \
586
+ }
587
+ #else
588
+ #define SYSCHECK(expr, success_cond) \
589
+ while (true) { \
590
+ auto __output = (expr); \
591
+ (void)__output; \
592
+ if (!(success_cond)) { \
593
+ if (errno == EINTR) { \
594
+ continue; \
595
+ } else if (errno == EAGAIN || errno == EWOULDBLOCK) { \
596
+ C10_THROW_ERROR(DistNetworkError, "Socket Timeout"); \
597
+ } else { \
598
+ C10_THROW_ERROR(DistNetworkError, std::strerror(errno)); \
599
+ } \
600
+ } else { \
601
+ break; \
602
+ } \
603
+ }
604
+ #endif
605
+
606
+ // Most functions indicate error by returning `-1`. This is a helper macro for
607
+ // this common case with `SYSCHECK`.
608
+ // Since SOCKET_ERROR = -1 in MSVC, so also leverage SYSCHECK_ERR_RETURN_NEG1
609
+ #define SYSCHECK_ERR_RETURN_NEG1(expr) SYSCHECK(expr, __output != -1)
610
+
611
+ namespace tcputil {
612
+
613
+ // Send and receive
614
+ template <typename T>
615
+ void sendBytes(
616
+ int socket,
617
+ const T* buffer,
618
+ size_t length,
619
+ bool moreData = false) {
620
+ size_t bytesToSend = sizeof(T) * length;
621
+ if (bytesToSend == 0) {
622
+ return;
623
+ }
624
+
625
+ auto bytes = reinterpret_cast<const uint8_t*>(buffer);
626
+ uint8_t* currentBytes = const_cast<uint8_t*>(bytes);
627
+
628
+ int flags = 0;
629
+
630
+ #ifdef MSG_MORE
631
+ if (moreData) { // there is more data to send
632
+ flags |= MSG_MORE;
633
+ }
634
+ #endif
635
+
636
+ // Ignore SIGPIPE as the send() return value is always checked for error
637
+ #ifdef MSG_NOSIGNAL
638
+ flags |= MSG_NOSIGNAL;
639
+ #endif
640
+
641
+ while (bytesToSend > 0) {
642
+ ssize_t bytesSent;
643
+ SYSCHECK_ERR_RETURN_NEG1(
644
+ bytesSent =
645
+ ::send(socket, (const char*)currentBytes, bytesToSend, flags))
646
+ if (bytesSent == 0) {
647
+ C10_THROW_ERROR(DistNetworkError, std::strerror(ECONNRESET));
648
+ }
649
+
650
+ bytesToSend -= bytesSent;
651
+ currentBytes += bytesSent;
652
+ }
653
+ }
654
+
655
+ template <typename T>
656
+ void recvBytes(int socket, T* buffer, size_t length) {
657
+ size_t bytesToReceive = sizeof(T) * length;
658
+ if (bytesToReceive == 0) {
659
+ return;
660
+ }
661
+
662
+ auto bytes = reinterpret_cast<uint8_t*>(buffer);
663
+ uint8_t* currentBytes = bytes;
664
+
665
+ while (bytesToReceive > 0) {
666
+ ssize_t bytesReceived;
667
+ SYSCHECK_ERR_RETURN_NEG1(
668
+ bytesReceived = recv(socket, (char*)currentBytes, bytesToReceive, 0))
669
+ if (bytesReceived == 0) {
670
+ C10_THROW_ERROR(DistNetworkError, std::strerror(ECONNRESET));
671
+ }
672
+
673
+ bytesToReceive -= bytesReceived;
674
+ currentBytes += bytesReceived;
675
+ }
676
+ }
677
+
678
+ // send a vector's length and data
679
+ template <typename T>
680
+ void sendVector(int socket, const std::vector<T>& vec, bool moreData = false) {
681
+ SizeType size = vec.size();
682
+ sendBytes<SizeType>(socket, &size, 1, true);
683
+ sendBytes<T>(socket, vec.data(), size, moreData);
684
+ }
685
+
686
+ // receive a vector as sent in sendVector
687
+ template <typename T>
688
+ std::vector<T> recvVector(int socket) {
689
+ SizeType valueSize;
690
+ recvBytes<SizeType>(socket, &valueSize, 1);
691
+ std::vector<T> value(valueSize);
692
+ recvBytes<T>(socket, value.data(), value.size());
693
+ return value;
694
+ }
695
+
696
+ // this is only for convenience when sending rvalues
697
+ template <typename T>
698
+ void sendValue(int socket, const T& value, bool moreData = false) {
699
+ sendBytes<T>(socket, &value, 1, moreData);
700
+ }
701
+
702
+ template <typename T>
703
+ T recvValue(int socket) {
704
+ T value;
705
+ recvBytes<T>(socket, &value, 1);
706
+ return value;
707
+ }
708
+
709
+ // send a string's length and data
710
+ inline void sendString(
711
+ int socket,
712
+ const std::string& str,
713
+ bool moreData = false) {
714
+ SizeType size = str.size();
715
+ sendBytes<SizeType>(socket, &size, 1, true);
716
+ sendBytes<char>(socket, str.data(), size, moreData);
717
+ }
718
+
719
+ // receive a string as sent in sendString
720
+ inline std::string recvString(int socket) {
721
+ SizeType valueSize;
722
+ recvBytes<SizeType>(socket, &valueSize, 1);
723
+ std::vector<char> value(valueSize);
724
+ recvBytes<char>(socket, value.data(), value.size());
725
+ return std::string(value.data(), value.size());
726
+ }
727
+
728
+ } // namespace tcputil
729
+ } // namespace c10d
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/debug.h ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Meta Platforms, Inc. and its affiliates.
2
+ // All rights reserved.
3
+ //
4
+ // This source code is licensed under the BSD-style license found in the
5
+ // LICENSE file in the root directory of this source tree.
6
+
7
+ #pragma once
8
+
9
+ #include <c10/macros/Macros.h>
10
+
11
+ namespace c10d {
12
+
13
+ enum class DebugLevel { Off = 0, Info = 1, Detail = 2 };
14
+
15
+ TORCH_API void setDebugLevel(DebugLevel level);
16
+
17
+ // Sets the debug level based on the value of the `TORCH_DISTRIBUTED_DEBUG`
18
+ // environment variable.
19
+ TORCH_API void setDebugLevelFromEnvironment();
20
+
21
+ TORCH_API DebugLevel debug_level() noexcept;
22
+
23
+ } // namespace c10d
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/error.h ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Facebook, Inc. and its affiliates.
2
+ // All rights reserved.
3
+ //
4
+ // This source code is licensed under the BSD-style license found in the
5
+ // LICENSE file in the root directory of this source tree.
6
+
7
+ #pragma once
8
+
9
+ #include <cstring>
10
+ #include <system_error>
11
+
12
+ #include <fmt/format.h>
13
+
14
+ namespace fmt {
15
+
16
+ template <>
17
+ struct formatter<std::error_category> {
18
+ constexpr decltype(auto) parse(format_parse_context& ctx) const {
19
+ return ctx.begin();
20
+ }
21
+
22
+ template <typename FormatContext>
23
+ decltype(auto) format(const std::error_category& cat, FormatContext& ctx)
24
+ const {
25
+ if (std::strcmp(cat.name(), "generic") == 0) {
26
+ return fmt::format_to(ctx.out(), "errno");
27
+ } else {
28
+ return fmt::format_to(ctx.out(), "{} error", cat.name());
29
+ }
30
+ }
31
+ };
32
+
33
+ template <>
34
+ struct formatter<std::error_code> {
35
+ constexpr decltype(auto) parse(format_parse_context& ctx) const {
36
+ return ctx.begin();
37
+ }
38
+
39
+ template <typename FormatContext>
40
+ decltype(auto) format(const std::error_code& err, FormatContext& ctx) const {
41
+ return fmt::format_to(
42
+ ctx.out(), "({}: {} - {})", err.category(), err.value(), err.message());
43
+ }
44
+ };
45
+
46
+ } // namespace fmt
47
+
48
+ namespace c10d {
49
+ namespace detail {
50
+
51
+ inline std::error_code lastError() noexcept {
52
+ return std::error_code{errno, std::generic_category()};
53
+ }
54
+
55
+ } // namespace detail
56
+ } // namespace c10d
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/backends/backend.h ADDED
@@ -0,0 +1,119 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/builtin_function.h>
4
+ #include <ATen/core/stack.h>
5
+ #include <torch/csrc/jit/backends/backend_interface.h>
6
+ #include <torch/custom_class.h>
7
+
8
+ namespace torch {
9
+ namespace jit {
10
+ namespace {
11
+ // NOLINTNEXTLINE(clang-diagnostic-unneeded-internal-declaration)
12
+ inline c10::FunctionSchema getIsAvailableSchema() {
13
+ c10::Argument self("self", c10::AnyType::get());
14
+ c10::Argument available("available", c10::BoolType::get());
15
+ c10::FunctionSchema preprocessor_schema(
16
+ "is_available",
17
+ /*overload_name=*/"",
18
+ /*arguments=*/{self},
19
+ /*returns=*/{available});
20
+ return preprocessor_schema;
21
+ }
22
+
23
+ constexpr static auto kBackendsNamespace = "__backends__";
24
+
25
+ // NOLINTNEXTLINE(clang-diagnostic-unneeded-internal-declaration)
26
+ inline c10::FunctionSchema getCompileSchema() {
27
+ c10::Argument self("self", c10::AnyType::get());
28
+ c10::Argument mod("processed", c10::AnyType::get());
29
+ auto any_dict_ty =
30
+ c10::DictType::create(c10::StringType::get(), c10::AnyType::get());
31
+ c10::Argument method_compile_spec("method_compile_spec", any_dict_ty);
32
+ c10::Argument handles("handles", any_dict_ty);
33
+
34
+ c10::FunctionSchema compile_schema(
35
+ "compile",
36
+ /*overload_name=*/"",
37
+ /*arguments=*/{self, mod, method_compile_spec},
38
+ /*returns=*/{handles});
39
+ return compile_schema;
40
+ }
41
+
42
+ // NOLINTNEXTLINE(clang-diagnostic-unneeded-internal-declaration)
43
+ inline c10::FunctionSchema getExecuteSchema() {
44
+ auto any_list_ty = c10::ListType::create(c10::AnyType::get());
45
+ c10::Argument self("self", c10::AnyType::get());
46
+ c10::Argument handle("handle", c10::AnyType::get());
47
+ c10::Argument input("input", any_list_ty);
48
+ c10::Argument output("output", any_list_ty);
49
+ return c10::FunctionSchema(
50
+ "execute",
51
+ /*overload_name=*/"",
52
+ /*arguments=*/{self, handle, input},
53
+ /*returns=*/{output});
54
+ }
55
+
56
+ template <typename TBackendInterface>
57
+ std::function<void(Stack&)> getIsAvailableFunc() {
58
+ return [](Stack& stack) {
59
+ auto self = pop(stack).toCustomClass<TBackendInterface>();
60
+ auto ret = self->is_available();
61
+ push(stack, ret);
62
+ };
63
+ }
64
+
65
+ template <typename TBackendInterface>
66
+ std::function<void(Stack&)> getCompileFunc() {
67
+ return [](Stack& stack) {
68
+ auto method_compile_spec = pop(stack).toGenericDict();
69
+ auto processed = pop(stack);
70
+ auto self = pop(stack).toCustomClass<TBackendInterface>();
71
+ auto ret = self->compile(processed, method_compile_spec);
72
+ push(stack, ret);
73
+ };
74
+ }
75
+
76
+ template <typename TBackendInterface>
77
+ std::function<void(Stack&)> getExecuteFunc() {
78
+ return [](Stack& stack) {
79
+ auto args = pop(stack);
80
+ auto handle = pop(stack);
81
+ auto self = pop(stack);
82
+ auto backend = self.toCustomClass<TBackendInterface>();
83
+ auto res = backend->execute(handle, args.toList());
84
+ push(stack, res);
85
+ };
86
+ }
87
+ } // namespace
88
+
89
+ // Static registration API for backends.
90
+ template <class TBackendInterface>
91
+ class backend {
92
+ static_assert(
93
+ std::is_base_of<PyTorchBackendInterface, TBackendInterface>::value,
94
+ "torch::jit::backend<T> requires T to inherit from PyTorchBackendInterface");
95
+ std::string backend_name_;
96
+
97
+ public:
98
+ // Registers a new backend with /p name, and the given /p preprocess
99
+ // function.
100
+ backend(const std::string& name) : backend_name_(name) {
101
+ static auto cls = torch::class_<TBackendInterface>(kBackendsNamespace, name)
102
+ .def(torch::init<>())
103
+ ._def_unboxed(
104
+ "is_available",
105
+ getIsAvailableFunc<TBackendInterface>(),
106
+ getIsAvailableSchema())
107
+ ._def_unboxed(
108
+ "compile",
109
+ getCompileFunc<TBackendInterface>(),
110
+ getCompileSchema())
111
+ ._def_unboxed(
112
+ "execute",
113
+ getExecuteFunc<TBackendInterface>(),
114
+ getExecuteSchema());
115
+ }
116
+ };
117
+
118
+ } // namespace jit
119
+ } // namespace torch
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/backends/backend_debug_handler.h ADDED
@@ -0,0 +1,140 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <ATen/core/ivalue.h>
3
+
4
+ #include <torch/csrc/jit/backends/backend_detail.h>
5
+ #include <torch/csrc/jit/ir/ir.h>
6
+ #include <torch/csrc/jit/ir/scope.h>
7
+
8
+ #include <atomic>
9
+
10
+ namespace torch {
11
+ namespace jit {
12
+
13
+ /*
14
+ * BackendDebugHandleManager is responsible for issuing debug handles to
15
+ * backends. Debug handles are associated with nodes of a graph.
16
+ * BackendDebugHandleManager also maintains a map
17
+ * [debug-handle, DebugInfoTuple = {source range, inlined callstack ptr]} that
18
+ * will help generate a callstack for exception raised using debug handles.
19
+ * Effectively debug handles are something that is given to backend and later
20
+ * when an exception occurs in the backend, backend can tell, using debug
21
+ * handle, that an exception occurred here. Then the runtime can generate
22
+ * callstack correspoding to the exception.
23
+ * There are two parts to BackendDebugHandleManager:
24
+ * 1. static std::atomic debug_handle
25
+ * 2. Map of [debug-handle, DebugInfoTuple]
26
+ *
27
+ * About 1:
28
+ * Why do they have to be unique. The reason is that by ensuring
29
+ * uniqueness of debug handles, we remove the burden of another layer of
30
+ * mapping where we need to say this set of debug handles were generated for
31
+ * this lowered module or this bytecode function. This simplifies the API for
32
+ * serialization since debug handles can uniquely identify DebugInfoTuple.
33
+ * Thus simplifies the runtime API for throwing exception. Exception throwing
34
+ * only needs to know debug_handle and not which module or method threw it.
35
+ * There are 2 issues to keep in mind, though,for static std::atomic
36
+ * debug_handle: A. Performance implications of using atomic variable. However
37
+ * this is only used for compilation so we assume to absorb some of that
38
+ * penalty. Plus if there is no contention then we should have less to worry
39
+ * about. B. If repeated compilation is part of a long running process then we
40
+ * may overflow int64_t. We may detect and fail on this. For now this is not
41
+ * done.
42
+ *
43
+ * Now about 2:
44
+ * There are two usecases for [debug-handle, DebugInfoTuple]
45
+ * A. During bytecode generation the DebugInfoTuple corresponding to the nodes
46
+ * of the inlined graph being serialized, are stored in this object and a
47
+ * unique debug handle is returned. This unique debug handle is stored in
48
+ * mobile_debug info for pytorch lite models. It will be used for raising
49
+ * exceptions as well as profiling. B. During backend lowering, each backend's
50
+ * preprocess/compile method can compile method's graph and serialize those
51
+ * methods. Once the method is lowered to backend, graph is essentially lost.
52
+ * Without access to graph it is hard to generate model level debug info. Thus
53
+ * the debug handles provide a way to map nodes of the graph to the model level
54
+ * debug info.
55
+ *
56
+ * During byte-code model serialization, [debug-handle, DebugInfoTuple] is
57
+ * serialized. Now we know a. debug handles and b. how to map debug handles to
58
+ * model source code. Thus we can either do eager symbolication by converting
59
+ * debug handles to corresponding source code at runtime, or do lazy
60
+ * symbolicattion offline.
61
+ *
62
+ * Note that it is not necessary to serialize [debug-handle, DebugInfoTuple]
63
+ * corresponding to lowered backend if the lowering process, that is
64
+ * preprocess/compile, and execution happens in the same session, then eager
65
+ * symbolication can be employed.
66
+ *
67
+ * Now how does BackendDebugHandleManager capture all of the above?
68
+ * By providing two API.
69
+ * 1. getNextDebugHandle which given a Node* returns a unique debug handle,
70
+ * that will uniquely identify DebugInfoTuple.
71
+ * and
72
+ * 2. getCallStackPtrMap which returns the map
73
+ * [debug-handle, DebugInfoTuple]
74
+ *
75
+ * 1 provides debug handles to backends and 2 provides runtime a way to map
76
+ * debug handles to source level debug info.
77
+ *
78
+ * So why does debug handle map to DebugInfoTuple = {source range and inlined
79
+ * cs}? {debug_handle, source_range_tag, serialized_callstack} Take this
80
+ * example: class L(nn.Module): def __init__(self):
81
+ * ...
82
+ * def forward(self, x):
83
+ * return x * 5
84
+ * class M(nn.Module):
85
+ * def __init__(self):
86
+ * ...
87
+ * def forward(self, x):
88
+ * return x - 2
89
+ * class N(nn.Module):
90
+ * def __init__(self):
91
+ * self.m = M()
92
+ * def forward(self, x):
93
+ * return self.m(x) + 3
94
+ * m = torch.jit.script(N())
95
+ * Once you inline m's forward method, m.forward.graph will look something
96
+ * like this
97
+ * graph(%self...):
98
+ * %x = aten::mul(..)
99
+ * %x = aten::sub(x, ..)
100
+ * %y = aten::add(x, ..)
101
+ * ..
102
+ * Inlined callstack ptr for these two nodes will look like:
103
+ * aten::mul's inlined CS (callstack): [N.forward, source range] -> [M.forward,
104
+ * source range] aten::sub's inlined CS (callstack): [N.forward, source range]
105
+ * aten::add's inlined CS: null
106
+ * mul node's inlined CS contains only information about the callsites' source
107
+ * range The information about mul node's source range ('return x * 5') is not
108
+ * available in its inlined CS. It is rather part of node's source range
109
+ * instead of inlined CS. Thus to get full stack: [N.forward, source range] ->
110
+ * [M.forward, source range] -> [aten::mul's source range] We need to track
111
+ * mul's source range and inlined CS both.
112
+ */
113
+
114
+ using BackendDebugInfoMapType =
115
+ std::unordered_map<torch::jit::DebugHandleType, DebugInfoTuple>;
116
+
117
+ /*
118
+ * This class is used to generate debug info map.
119
+ * backend's preprocess will call generate_debug_handles (see
120
+ * backend_detail.cpp), which uses debug_handle_manager to generate debug
121
+ * handles. When lowering process finishes, calling stopRecording will
122
+ * return debug info map from debug_handle_manager
123
+ */
124
+ class TORCH_API BackendDebugInfoRecorder {
125
+ public:
126
+ BackendDebugInfoRecorder() = default;
127
+ int64_t getNextDebugHandle(const Node* node);
128
+ // Reason this is not done as RAII is that work done in stopRecording
129
+ // can throw, and throwing with dtor will call terminate and thus voids any
130
+ // exception catching at a higher level.
131
+ BackendDebugInfoMapType stopRecording();
132
+ NodeToDebugHandle generate_debug_handles(const std::shared_ptr<Graph>& graph);
133
+
134
+ private:
135
+ static std::atomic<DebugHandleType> unique_debug_handle_;
136
+ BackendDebugInfoMapType handles_to_inlined_callstack_ptrs_;
137
+ };
138
+
139
+ } // namespace jit
140
+ } // namespace torch
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/backends/backend_debug_info.h ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #ifndef BUILD_LITE_INTERPRETER
4
+ #include <torch/csrc/jit/backends/backend_debug_handler.h>
5
+ #endif
6
+ #include <torch/custom_class.h>
7
+
8
+ namespace torch {
9
+ namespace jit {
10
+
11
+ constexpr static auto kBackendUtilsNamespace = "backendutils";
12
+ constexpr static auto kBackendDebugInfoClass = "BackendDebugInfo";
13
+
14
+ #ifndef BUILD_LITE_INTERPRETER
15
+ /*
16
+ * Custom class for holding debug information in lowered modules, intended
17
+ * purely for keeping this information to be later serialized outside of the
18
+ * lowered module itself.
19
+ * Its usage pattern is:
20
+ * 1. LoweredModule declares an instance of this class in __backend_debug_info
21
+ * 2. During serialization, __backend_debug_info is used to obtain the debug
22
+ * information.
23
+ * 3. The contents of LoweredModule.__backend_debug_info are not serialized
24
+ * within the LoweredModule itself.
25
+ */
26
+ class TORCH_API PyTorchBackendDebugInfo : public torch::CustomClassHolder {
27
+ public:
28
+ PyTorchBackendDebugInfo() = default;
29
+
30
+ c10::optional<BackendDebugInfoMapType>& getDebugInfoMap() {
31
+ return debug_info_map_;
32
+ }
33
+
34
+ void setDebugInfoMap(BackendDebugInfoMapType&& debug_info_map) {
35
+ debug_info_map_ = std::move(debug_info_map);
36
+ }
37
+
38
+ private:
39
+ c10::optional<BackendDebugInfoMapType> debug_info_map_;
40
+ };
41
+
42
+ #else
43
+
44
+ /*
45
+ * Dummy instance exists for the following reason:
46
+ * __backend_debug_info is of type BackendDebugInfo which is a torchbind'
47
+ * class backed by cpp class PyTorchBackendDebugInfo.
48
+ * PyTorchBackendDebugInfo, depends on ir.h., scope.h, source_range etc.
49
+ * We dont include this on lite interpreter side. Thus on lite interpreter side
50
+ * we cannot have valid definition of PyTorchBackendDebugInfo. However we do not
51
+ * need valid instance of __backend_debug_info in lite interpreter anyway as we
52
+ * dont serialize this info as part of LowerdModule as mentioned ealrier.
53
+ * However since LoweredModule has registered attribute of __backend_debug_info
54
+ * we still need to make sure that BackendDebugInfo is registered with
55
+ * TorchScript. However in this instance it does not have to be backed by
56
+ * PyTorchBackendDebugInfo, so we create a dummy PyTorchBackendDebugInfoDummy
57
+ * just for this purpose.
58
+ */
59
+ class PyTorchBackendDebugInfoDummy : public torch::CustomClassHolder {
60
+ public:
61
+ PyTorchBackendDebugInfoDummy() = default;
62
+ };
63
+ #endif
64
+ } // namespace jit
65
+ } // namespace torch
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/backends/backend_detail.h ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/api/module.h>
4
+
5
+ #include <ATen/core/jit_type.h>
6
+
7
+ #include <functional>
8
+
9
+ namespace torch {
10
+ namespace jit {
11
+
12
+ using DebugHandleType = int64_t;
13
+
14
+ using NodeToDebugHandle = std::unordered_map<Node*, DebugHandleType>;
15
+
16
+ using BackendDebugHandleGenerator =
17
+ std::function<NodeToDebugHandle(const std::shared_ptr<Graph>&)>;
18
+
19
+ namespace detail {
20
+
21
+ using BackendPreprocessFunction = std::function<c10::IValue(
22
+ const Module&,
23
+ const c10::Dict<IValue, IValue>&,
24
+ const BackendDebugHandleGenerator& generate_debug_handles)>;
25
+
26
+ TORCH_API void registerBackendPreprocessFunction(
27
+ const std::string& name,
28
+ const BackendPreprocessFunction& preprocess);
29
+
30
+ bool hasBackendPreprocessFunction(const std::string& name);
31
+
32
+ BackendPreprocessFunction getBackendPreprocessFunction(const std::string& name);
33
+
34
+ TORCH_API Module codegen_backend_module(
35
+ const std::string& backend_name,
36
+ const Module& orig_module,
37
+ const c10::Dict<IValue, IValue>& method_compile_spec,
38
+ const c10::DictTypePtr& any_dict_ty);
39
+ } // namespace detail
40
+ } // namespace jit
41
+ } // namespace torch
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/backends/backend_exception.h ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <c10/util/Exception.h>
3
+
4
+ namespace c10 {
5
+ class TORCH_API BackendRuntimeException : public c10::Error {
6
+ public:
7
+ // Use debug_handle to throw exception
8
+ BackendRuntimeException(
9
+ SourceLocation loc,
10
+ std::string msg,
11
+ int64_t debug_handle)
12
+ : c10::Error(loc, msg) {
13
+ debug_handles.push_back(debug_handle);
14
+ }
15
+ // If rethrowing, can push another debug_handle
16
+ // This is useful in couple of scenarios.
17
+ // 1. A submodule is lowered and lite interperter has CallMethod
18
+ // to lowered module's method. In this case lowered module will throw with
19
+ // a handle, plus there will be another debug handle corresponding
20
+ // to the CallMethod node in lite interpreter. Both together give complete
21
+ // trace. This function allows lite interpreter to rethrow with debug
22
+ // handle it has for CallMethod.
23
+ // 2. Another scenarios is when lite interperter can make function calls or
24
+ // the lowered backend also has function call ability. Thus we have
25
+ // multiple function frames. Now we need a stack of handles to symbolicate
26
+ // entire stack trace.
27
+ void pushDebugHandle(int64_t debug_handle) {
28
+ debug_handles.push_back(debug_handle);
29
+ }
30
+ const std::vector<int64_t>& getDebugHandles() {
31
+ return debug_handles;
32
+ }
33
+
34
+ private:
35
+ // Stores stack of debug handles.
36
+ std::vector<int64_t> debug_handles;
37
+ };
38
+
39
+ } // namespace c10
40
+ #define TORCH_DELEGATED_BACKEND_THROW(cond, msg, debug_handle) \
41
+ if (C10_UNLIKELY_OR_CONST(!(cond))) { \
42
+ throw ::c10::BackendRuntimeException( \
43
+ {__func__, __FILE__, static_cast<uint32_t>(__LINE__)}, \
44
+ msg, \
45
+ debug_handle); \
46
+ }
47
+
48
+ #define TORCH_DELEGATED_BACKEND_RETHROW(e, debug_handle) \
49
+ do { \
50
+ e.pushDebugHandle(debug_handle); \
51
+ throw; \
52
+ } while (false)
53
+
54
+ #define DEBUG_HANDLE_UNKNOWN -1
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/backends/backend_init.h ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/python/pybind.h>
4
+ #include <torch/csrc/utils/pybind.h>
5
+
6
+ namespace torch {
7
+ namespace jit {
8
+ // Initialize Python bindings for JIT to_<backend> functions.
9
+ void initJitBackendBindings(PyObject* module);
10
+ } // namespace jit
11
+ } // namespace torch
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/backends/backend_interface.h ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/custom_class.h>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+
8
+ // Interface for a JIT backend.
9
+ class TORCH_API PyTorchBackendInterface : public torch::CustomClassHolder {
10
+ public:
11
+ PyTorchBackendInterface() noexcept;
12
+ ~PyTorchBackendInterface() override;
13
+
14
+ // Returns true if the backend is available to process delegation calls.
15
+ virtual bool is_available() = 0;
16
+
17
+ // Compile the module contained in \p processed using the details provided in
18
+ // \p method_compile_spec for each module method that should be compiled for
19
+ // the backend. \p method_compile_spec should be of type Dict<string, Any>.
20
+ // \returns a dictionary of type Dict<string, Any> that contains a backend
21
+ // handle each method that can run on the backend (i.e. each key in \p
22
+ // method_compile_spec).
23
+ virtual c10::impl::GenericDict compile(
24
+ c10::IValue processed,
25
+ c10::impl::GenericDict method_compile_spec) = 0;
26
+
27
+ // Execute the method specified by \p handle using \p inputs. \returns the
28
+ // outputs as a tuple.
29
+ virtual c10::impl::GenericList execute(
30
+ c10::IValue handle,
31
+ c10::impl::GenericList inputs) = 0;
32
+ };
33
+ } // namespace jit
34
+ } // namespace torch
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/backends/backend_preprocess.h ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/backends/backend_detail.h>
4
+ namespace torch {
5
+ namespace jit {
6
+ class backend_preprocess_register {
7
+ std::string backend_name_;
8
+
9
+ public:
10
+ backend_preprocess_register(
11
+ const std::string& name,
12
+ const detail::BackendPreprocessFunction& preprocess)
13
+ : backend_name_(name) {
14
+ detail::registerBackendPreprocessFunction(name, preprocess);
15
+ }
16
+ };
17
+ } // namespace jit
18
+ } // namespace torch
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/backends/backend_resolver.h ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/frontend/resolver.h>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+ // Create a Resolver for use in generating LoweredModules for specific backends.
8
+ TORCH_API std::shared_ptr<Resolver> loweredModuleResolver();
9
+ } // namespace jit
10
+ } // namespace torch
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/codegen/cuda/interface.h ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/macros/Export.h>
4
+ #include <torch/csrc/jit/ir/ir.h>
5
+ #include <torch/csrc/jit/passes/pass_manager.h>
6
+ #include <torch/csrc/jit/runtime/profiling_record.h>
7
+
8
+ /*
9
+ * This file contains APIs for cuda fuser;
10
+ *
11
+ * We use an empty static struct to hold the function pointers, which are
12
+ * registered separately. This is to support cpu-only compilation.
13
+ * Registration is done in torch/csrc/jit/codegen/cuda/register_interface.cpp
14
+ */
15
+
16
+ namespace torch {
17
+ namespace jit {
18
+ namespace fuser {
19
+ namespace cuda {
20
+
21
+ TORCH_API std::atomic<bool>& getCudaFusionGuardMode();
22
+
23
+ TORCH_API bool getSingletonFusion();
24
+ TORCH_API bool setSingletonFusion(bool value);
25
+ TORCH_API bool getHorizontalFusion();
26
+ TORCH_API bool setHorizontalFusion(bool value);
27
+
28
+ // dummy struct to allow API registration
29
+ struct CudaFuserInterface {
30
+ void (*fn_compile_n)(Node*) = nullptr;
31
+ void (*fn_run_n_s)(const Node*, Stack&) = nullptr;
32
+ void (*fn_fuse_graph)(std::shared_ptr<Graph>&) = nullptr;
33
+ bool (*fn_can_fuse_n)(const Node*) = nullptr;
34
+ void (*fn_insert_profile_inodes)(ProfilingRecord* pr) = nullptr;
35
+ bool (*fn_profile_n)(const Node*) = nullptr;
36
+ bool (*fn_skip_n)(const std::string&, bool flip) = nullptr;
37
+ };
38
+
39
+ // Get interface, this is used by registration and user facing API internally
40
+ TORCH_API CudaFuserInterface* getFuserInterface();
41
+
42
+ TORCH_API void compileFusionGroup(Node* fusion_node);
43
+ TORCH_API void runFusionGroup(const Node* fusion_node, Stack& stack);
44
+ TORCH_API void fuseGraph(std::shared_ptr<Graph>&);
45
+ TORCH_API bool canFuseNode(const Node* node);
46
+ TORCH_API void InsertProfileNodesForCUDAFuser(ProfilingRecord* pr);
47
+ TORCH_API bool profileNode(const Node* node);
48
+
49
+ TORCH_API bool skipNode(const std::string& symbol_str, bool flip = true);
50
+
51
+ TORCH_API bool isEnabled();
52
+ TORCH_API bool setEnabled(bool is_enabled);
53
+ TORCH_API bool canBeEnabled();
54
+
55
+ } // namespace cuda
56
+ } // namespace fuser
57
+ } // namespace jit
58
+ } // namespace torch
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/builtin_functions.h ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/Export.h>
4
+ #include <torch/csrc/jit/api/module.h>
5
+
6
+ namespace torch {
7
+ namespace jit {
8
+
9
+ TORCH_API const std::vector<Function*>& getAllBuiltinFunctionsFor(Symbol name);
10
+ } // namespace jit
11
+ } // namespace torch
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/canonicalize_modified_loop.h ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <memory>
3
+
4
+ #include <torch/csrc/Export.h>
5
+
6
+ namespace torch {
7
+ namespace jit {
8
+
9
+ struct Graph;
10
+
11
+ // Transforms loops so that they can be represented as python
12
+ // for or while loops
13
+ TORCH_API void CanonicalizeModifiedLoops(std::shared_ptr<Graph>& graph);
14
+
15
+ } // namespace jit
16
+ } // namespace torch
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/concrete_module_type.h ADDED
@@ -0,0 +1,241 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/ivalue.h>
4
+ #include <torch/csrc/jit/api/module.h>
5
+ #include <torch/csrc/jit/python/pybind_utils.h>
6
+ #include <memory>
7
+ #include <string>
8
+ #include <vector>
9
+
10
+ namespace torch {
11
+ namespace jit {
12
+
13
+ enum class IterableModuleKind { NONE, LIST, DICT, PARAMLIST, PARAMDICT };
14
+ class ConcreteModuleType;
15
+
16
+ // You can think of an nn.Module as a template that corresponds to a family of
17
+ // JIT types. The template "arguments" are things like the constant values.
18
+ // e.g.
19
+ // class M(nn.Module):
20
+ // __constants__ = ["const"]
21
+ // ...
22
+ //
23
+ // Is similar to writing the following in C++:
24
+ //
25
+ // template<TConst>
26
+ // class M {
27
+ // ...
28
+ // }
29
+ //
30
+ // We need to consider each different member of the type family a different JIT
31
+ // type because, e.g. different constant values lead to different versions of
32
+ // the same method.
33
+ //
34
+ // ConcreteModuleType corresponds to a single member of the type family, with
35
+ // all template arguments fully specified. Two Modules that share a
36
+ // ConcreteModuleType can share a JIT type, and vice versa.
37
+ //
38
+ // Why not just use a JIT type to represent concrete types? Because constants,
39
+ // function attributes, etc. are currently not representable in the type system,
40
+ // so this acts a non-first-class way of tracking concrete types.
41
+ //
42
+ // ConcreteModuleType is also the source of truth for servicing all
43
+ // ModuleValue::attr calls. This is so we can guarantee that if two Module's
44
+ // share a JIT type (and thus a ConcreteModuleType), then they behave the same
45
+ // way when you access attributes on them.
46
+
47
+ // ConcreteModuleType has two phases.
48
+ // 1. Creation: First we build it up, during the ScriptModule conversion
49
+ // process. This is represented by ConcreteModuleTypeBuilder.
50
+ // ...then the converter calls ConcreteModuleTypeBuilder::build(), producing
51
+ // a
52
+ // ConcreteModuleType ready for querying.
53
+ // 2. Querying: We use ConcreteModuleType as a source of truth for
54
+ // ModuleValue::attr calls during method compilation.
55
+
56
+ // Represents a concrete type during in the process for construction. We use
57
+ // this to decide whether we can share types between modules.
58
+ class VISIBILITY_HIDDEN ConcreteModuleTypeBuilder {
59
+ public:
60
+ explicit ConcreteModuleTypeBuilder(py::object pyClass) {
61
+ TORCH_INTERNAL_ASSERT(pyClass);
62
+ pyClass_ = std::move(pyClass);
63
+ }
64
+
65
+ void addConstant(std::string name, py::object value);
66
+ void addConstant(std::string name, IValue value);
67
+ void addAttribute(
68
+ std::string name,
69
+ const TypePtr& type,
70
+ bool isParameter,
71
+ bool isBuffer);
72
+ void addFunctionAttribute(
73
+ std::string name,
74
+ const TypePtr& type,
75
+ py::object pyFunction);
76
+
77
+ void addModule(std::string name, std::shared_ptr<ConcreteModuleType> meta);
78
+
79
+ void addForwardHook(py::object hook);
80
+ void addForwardPreHook(py::object pre_hook);
81
+
82
+ void addOverload(
83
+ std::string methodName,
84
+ std::vector<std::string> overloadedMethodNames);
85
+ void addBuiltinFunction(std::string name, const std::string& symbol_name);
86
+ void addFailedAttribute(std::string name, std::string failureReason);
87
+ void addIgnoredAttribute(std::string name);
88
+ void setIterableModuleKind(IterableModuleKind kind);
89
+
90
+ // If a ConcreteModuleType is poisoned, it will never compare equal to any
91
+ // other concrete type
92
+ void setPoisoned();
93
+
94
+ std::shared_ptr<ConcreteModuleType> build() const {
95
+ return std::make_shared<ConcreteModuleType>(*this);
96
+ }
97
+
98
+ // This determines whether two modules can share a type. The container structs
99
+ // used by ConcreteModuleType have been defined such that operator==
100
+ // implements a meaningful comparison in that context.
101
+ bool equals(const ConcreteModuleTypeBuilder& other) const;
102
+
103
+ struct FunctionAttribute {
104
+ FunctionTypePtr function_;
105
+ py::object pyFunction_;
106
+
107
+ friend bool operator==(
108
+ const FunctionAttribute& lhs,
109
+ const FunctionAttribute& rhs) {
110
+ // Functions are not first class, so we can't do type comparison like a
111
+ // regular attribute. So we do a pointer equality check on the actual
112
+ // Python function object.
113
+ return lhs.pyFunction_.is(rhs.pyFunction_);
114
+ }
115
+ };
116
+
117
+ struct Attribute {
118
+ Attribute(TypePtr type, bool isParam, bool isBuffer)
119
+ : type_(std::move(type)), isParam_(isParam), isBuffer_(isBuffer) {}
120
+
121
+ friend bool operator==(const Attribute& lhs, const Attribute& rhs) {
122
+ return *(lhs.type_) == *(rhs.type_) && lhs.isParam_ == rhs.isParam_;
123
+ }
124
+ TypePtr type_;
125
+ bool isParam_;
126
+ bool isBuffer_;
127
+ };
128
+
129
+ struct ModuleInfo {
130
+ ModuleInfo(std::string name, std::shared_ptr<ConcreteModuleType> meta)
131
+ : name_(std::move(name)), meta_(std::move(meta)) {}
132
+
133
+ friend bool operator==(const ModuleInfo& lhs, const ModuleInfo& rhs);
134
+
135
+ std::string name_;
136
+ std::shared_ptr<ConcreteModuleType> meta_;
137
+ };
138
+
139
+ private:
140
+ ConcreteModuleTypeBuilder() = default;
141
+ ClassTypePtr createTypeFromThis() const;
142
+
143
+ // If true, this type will never compare equally to anything else. This is
144
+ // used if we want to ensure that this type is not shared (for example, if it
145
+ // came from a traced module)
146
+ bool isPoisoned_ = false;
147
+
148
+ // The value of any constants defined by the module.
149
+ std::unordered_map<std::string, IValue> constants_;
150
+ // The types of any attributes
151
+ OrderedDict<std::string, Attribute> attributes_;
152
+ // Overloads, in the same format as `__overloads__` in Python
153
+ std::unordered_map<std::string, std::vector<std::string>> overloads_;
154
+ // Any attributes we failed to convert to TorchScript, along with a hint as to
155
+ // why
156
+ std::unordered_map<std::string, std::string> failedAttributes_;
157
+ // Any attributes that were marked as ignored. They cannot be used in
158
+ // TorchScript but can still be used in ignored function in Python.
159
+ std::unordered_set<std::string> ignoredAttributes_;
160
+ // Any function attributes. These are special right now because functions are
161
+ // not first-class in the type system.
162
+ std::unordered_map<std::string, FunctionAttribute> functionAttributes_;
163
+ // Function attributes that are calls to builtin functions. These get
164
+ // de-sugared directly into the corresponding aten:: call. The map is
165
+ // attribute name -> aten symbol name
166
+ std::unordered_map<std::string, c10::Symbol> builtinFunctions_;
167
+ // The concrete types of any submodules
168
+ std::vector<ModuleInfo> modules_;
169
+ // Hooks to be called before/after forward when the module
170
+ // is called directly. Used to ensure modules have different types
171
+ // when they have different python hooks
172
+ // Actual hooks are added to ClassType directly during compilation
173
+ std::vector<py::object> forwardHooks_;
174
+ std::vector<py::object> forwardPreHooks_;
175
+
176
+ // If something is a ModuleDict/ModuleList, it means:
177
+ // 1. The order of the submodules matters for comparing the type
178
+ // 2. The compiler is allowed to treat it like a dict/tuple
179
+ IterableModuleKind iterableModuleKind_ = IterableModuleKind::NONE;
180
+
181
+ // The original `nn.Module` class that we derived this ScriptModule from.
182
+ py::object pyClass_;
183
+
184
+ // NOTE: If you ever add any more state to this struct, you need to make sure
185
+ // operator== still makes sense!
186
+ friend ConcreteModuleType;
187
+ };
188
+
189
+ // Represents a finalized concrete type, used to service ModuleValue::attr calls
190
+ // during method compilation.
191
+ class VISIBILITY_HIDDEN ConcreteModuleType {
192
+ public:
193
+ explicit ConcreteModuleType(ConcreteModuleTypeBuilder data);
194
+
195
+ static std::shared_ptr<ConcreteModuleType> fromJitType(TypePtr type);
196
+
197
+ TypePtr getJitType() const;
198
+ c10::optional<py::object> getPyClass() const;
199
+ IterableModuleKind getIterableModuleKind() const;
200
+ c10::optional<std::vector<std::string>> findOverloads(
201
+ const std::string& name) const;
202
+ c10::optional<Function*> findFunctionAttribute(const std::string& name) const;
203
+ c10::optional<c10::Symbol> findBuiltinFunction(const std::string& name) const;
204
+ std::shared_ptr<ConcreteModuleType> findSubmoduleConcreteType(
205
+ const std::string& name) const;
206
+ c10::optional<std::string> findFailedAttribute(const std::string& name) const;
207
+ bool isIgnoredAttribute(const std::string& name) const;
208
+
209
+ // These getters are only here to return things as types that can be
210
+ // automatically converted by pybind.
211
+ std::unordered_map<std::string, py::object> getConstantsPy() const;
212
+ std::unordered_map<std::string, std::pair<TypePtr, bool>> getAttributesPy()
213
+ const;
214
+ std::vector<std::pair<std::string, std::shared_ptr<ConcreteModuleType>>>
215
+ getModulesPy() const;
216
+
217
+ bool equals(const ConcreteModuleType& other) const {
218
+ if (jitType_ == other.jitType_) {
219
+ // If the computed types are the same, these modules can (obviously) share
220
+ // a type.
221
+ return true;
222
+ }
223
+
224
+ return data_.equals(other.data_);
225
+ }
226
+ bool equals(const ConcreteModuleTypeBuilder& other) const {
227
+ return data_.equals(other);
228
+ }
229
+
230
+ void dump() const;
231
+
232
+ private:
233
+ ConcreteModuleType() = default;
234
+
235
+ // The JIT type derived from this ConcreteModuleType.
236
+ ConcreteModuleTypeBuilder data_;
237
+ TypePtr jitType_;
238
+ };
239
+
240
+ } // namespace jit
241
+ } // namespace torch
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/convert_to_ssa.h ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <functional>
3
+ #include <memory>
4
+ #include <string>
5
+
6
+ #include <torch/csrc/Export.h>
7
+ #include <torch/csrc/jit/ir/ir.h>
8
+
9
+ namespace torch {
10
+ namespace jit {
11
+
12
+ // Convert a graph with Loads & Stores into SSA form
13
+ TORCH_API void ConvertToSSA(std::shared_ptr<Graph>& graph);
14
+
15
+ } // namespace jit
16
+ } // namespace torch
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/edit_distance.h ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/Export.h>
4
+ #include <cstddef>
5
+
6
+ namespace torch {
7
+ namespace jit {
8
+
9
+ TORCH_API size_t ComputeEditDistance(
10
+ const char* word1,
11
+ const char* word2,
12
+ size_t maxEditDistance);
13
+
14
+ } // namespace jit
15
+ } // namespace torch
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/error_report.h ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/util/Optional.h>
4
+ #include <torch/csrc/jit/frontend/tree.h>
5
+
6
+ namespace torch {
7
+ namespace jit {
8
+
9
+ struct Call {
10
+ std::string fn_name;
11
+ SourceRange caller_range;
12
+ };
13
+
14
+ struct TORCH_API ErrorReport : public std::exception {
15
+ ErrorReport(const ErrorReport& e);
16
+
17
+ explicit ErrorReport(SourceRange r);
18
+ explicit ErrorReport(const TreeRef& tree) : ErrorReport(tree->range()) {}
19
+ explicit ErrorReport(const Token& tok) : ErrorReport(tok.range) {}
20
+
21
+ const char* what() const noexcept override;
22
+
23
+ struct TORCH_API CallStack {
24
+ // These functions are used to report why a function was being compiled
25
+ // (i.e. what was the call stack of user functions at compilation time that
26
+ // led to this error)
27
+ CallStack(const std::string& name, const SourceRange& range);
28
+ ~CallStack();
29
+
30
+ // Change the range that is relevant for the current function (i.e. after
31
+ // each successful expression compilation, change it to the next expression)
32
+ static void update_pending_range(const SourceRange& range);
33
+ };
34
+
35
+ static std::string current_call_stack();
36
+
37
+ private:
38
+ template <typename T>
39
+ friend const ErrorReport& operator<<(const ErrorReport& e, const T& t);
40
+
41
+ mutable std::stringstream ss;
42
+ OwnedSourceRange context;
43
+ mutable std::string the_message;
44
+ std::vector<Call> error_stack;
45
+ };
46
+
47
+ template <typename T>
48
+ const ErrorReport& operator<<(const ErrorReport& e, const T& t) {
49
+ e.ss << t;
50
+ return e;
51
+ }
52
+
53
+ } // namespace jit
54
+ } // namespace torch
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/exit_transforms.h ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/Export.h>
4
+ #include <torch/csrc/jit/ir/ir.h>
5
+
6
+ namespace torch {
7
+ namespace jit {
8
+
9
+ TORCH_API void TransformExits(std::shared_ptr<Graph>& graph);
10
+
11
+ } // namespace jit
12
+ } // namespace torch
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/inline_loop_condition.h ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <functional>
3
+ #include <memory>
4
+ #include <string>
5
+
6
+ #include <torch/csrc/Export.h>
7
+ #include <torch/csrc/jit/ir/ir.h>
8
+
9
+ namespace torch {
10
+ namespace jit {
11
+
12
+ TORCH_API void InlineLoopCondition(std::shared_ptr<Graph>& graph);
13
+ TORCH_API void InlineBlockBeforeNode(Node* before_node, Block* block);
14
+
15
+ } // namespace jit
16
+ } // namespace torch
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/ir_emitter.h ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <functional>
3
+ #include <memory>
4
+ #include <string>
5
+
6
+ #include <torch/csrc/jit/api/module.h>
7
+ #include <torch/csrc/jit/frontend/error_report.h>
8
+ #include <torch/csrc/jit/frontend/resolver.h>
9
+ #include <torch/csrc/jit/frontend/sugared_value.h>
10
+ #include <torch/csrc/jit/frontend/tree_views.h>
11
+ #include <torch/csrc/jit/ir/ir.h>
12
+
13
+ namespace torch {
14
+ namespace jit {
15
+
16
+ TORCH_API void runCleanupPasses(std::shared_ptr<Graph>& to_clean);
17
+
18
+ TORCH_API bool meaningfulName(const std::string& name);
19
+
20
+ } // namespace jit
21
+ } // namespace torch
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/lexer.h ADDED
@@ -0,0 +1,576 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <c10/macros/Macros.h>
3
+ #include <c10/util/C++17.h>
4
+ #include <c10/util/Exception.h>
5
+ #include <torch/csrc/Export.h>
6
+ #include <torch/csrc/jit/frontend/parser_constants.h>
7
+ #include <torch/csrc/jit/frontend/source_range.h>
8
+ #include <torch/csrc/jit/frontend/strtod.h>
9
+ #include <algorithm>
10
+ #include <clocale>
11
+ #include <cstdlib>
12
+ #include <memory>
13
+ #include <sstream>
14
+ #include <string>
15
+ #include <vector>
16
+
17
+ C10_CLANG_DIAGNOSTIC_PUSH()
18
+ #if C10_CLANG_HAS_WARNING("-Wshorten-64-to-32")
19
+ C10_CLANG_DIAGNOSTIC_IGNORE("-Wshorten-64-to-32")
20
+ #endif
21
+
22
+ namespace torch {
23
+ namespace jit {
24
+
25
+ // single character tokens are just the character itself '+'
26
+ // multi-character tokens need an entry here
27
+ // if the third entry is not the empty string, it is used
28
+ // in the lexer to match this token.
29
+
30
+ // These kinds are also used in Tree.h as the kind of the AST node.
31
+ // Some kinds TK_APPLY, TK_LIST are only used in the AST and are not seen in the
32
+ // lexer.
33
+
34
+ #define TC_FORALL_TOKEN_KINDS(_) \
35
+ _(TK_EOF, "eof", "") \
36
+ _(TK_WHITESPACE, "whitespace", "") \
37
+ _(TK_WHITESPACE_EOF, "whitespace_eof", "") \
38
+ _(TK_NUMBER, "number", "") \
39
+ _(TK_NEWLINE, "newline", "") \
40
+ _(TK_INDENT, "indent", "") \
41
+ _(TK_DEDENT, "dedent", "") \
42
+ _(TK_DEF, "def", "def") \
43
+ _(TK_EQUIVALENT, "equivalent", "<=>") \
44
+ _(TK_IDENT, "ident", "") \
45
+ _(TK_STRING, "string", "") \
46
+ _(TK_STRINGLITERAL, "string_literal", "") \
47
+ _(TK_CONST, "const", "") \
48
+ _(TK_LIST, "list", "") \
49
+ _(TK_DICT, "dict", "") \
50
+ _(TK_OPTION, "option", "") \
51
+ _(TK_APPLY, "apply", "") \
52
+ _(TK_COMPREHENSION, "comprehension", "") \
53
+ _(TK_RANGE_CONSTRAINT, "range_constraint", "") \
54
+ _(TK_PARAM, "param", "") \
55
+ _(TK_INFERRED, "inferred", "") \
56
+ _(TK_ACCESS, "access", "") \
57
+ _(TK_ASSIGN, "assign", "") \
58
+ _(TK_AUG_ASSIGN, "aug_assign", "") \
59
+ _(TK_ATTRIBUTE, "attribute", "") \
60
+ _(TK_IF, "if", "if") \
61
+ _(TK_ELSE, "else", "else") \
62
+ _(TK_ELIF, "elif", "elif") \
63
+ _(TK_WHILE, "while", "while") \
64
+ _(TK_EXPR_STMT, "expression statement", "") \
65
+ _(TK_RETURN, "return", "return") \
66
+ _(TK_IS, "is", "is") \
67
+ _(TK_ISNOT, "is not", "is not") \
68
+ _(TK_NE, "ne", "!=") \
69
+ _(TK_EQ, "eq", "==") \
70
+ _(TK_LE, "le", "<=") \
71
+ _(TK_GE, "ge", ">=") \
72
+ _(TK_FLOOR_DIV, "floordiv", "//") \
73
+ _(TK_IF_EXPR, "if", "") \
74
+ _(TK_TRUE, "True", "True") \
75
+ _(TK_FALSE, "False", "False") \
76
+ _(TK_NONE, "None", "None") \
77
+ _(TK_AND, "and", "and") \
78
+ _(TK_OR, "or", "or") \
79
+ _(TK_NOT, "not", "not") \
80
+ _(TK_LSHIFT, "<<", "<<") \
81
+ _(TK_RSHIFT, ">>", ">>") \
82
+ _(TK_CAST, "cast", "") \
83
+ _(TK_PLUS_EQ, "+=", "+=") \
84
+ _(TK_MINUS_EQ, "-=", "-=") \
85
+ _(TK_TIMES_EQ, "*=", "*=") \
86
+ _(TK_DIV_EQ, "/=", "/=") \
87
+ _(TK_MOD_EQ, "%=", "%=") \
88
+ _(TK_BIT_OR_EQ, "|=", "|=") \
89
+ _(TK_BIT_AND_EQ, "&=", "&=") \
90
+ _(TK_BIT_XOR_EQ, "^=", "^=") \
91
+ _(TK_LSHIFT_EQ, "<<=", "<<=") \
92
+ _(TK_RSHIFT_EQ, ">>=", ">>=") \
93
+ _(TK_POW_EQ, "**=", "**=") \
94
+ _(TK_GLOBAL, "global", "global") \
95
+ _(TK_BUILT_IN, "built-in", "") \
96
+ _(TK_SUBSCRIPT, "subscript", "") \
97
+ _(TK_VAR, "variable", "") \
98
+ _(TK_NOTHING, "nothing", "") \
99
+ _(TK_DICT_LITERAL, "dict-literal", "") \
100
+ _(TK_LIST_LITERAL, "list-literal", "") \
101
+ _(TK_TUPLE_LITERAL, "tuple-literal", "") \
102
+ _(TK_FOR, "for", "for") \
103
+ _(TK_IN, "in", "in") \
104
+ _(TK_NOTIN, "not in", "not in") \
105
+ _(TK_STARRED, "starred", "") \
106
+ _(TK_UNARY_MINUS, "unary minus", "") \
107
+ _(TK_POW, "pow operator", "**") \
108
+ _(TK_ARROW, "arrow", "->") \
109
+ _(TK_DECL, "decl", "") \
110
+ _(TK_SLICE_EXPR, "slice expr", "") \
111
+ _(TK_TYPE_COMMENT, "type comment", "# type:") \
112
+ _(TK_RAISE, "raise", "raise") \
113
+ _(TK_ASSERT, "assert", "assert") \
114
+ _(TK_DOTS, "dots", "...") \
115
+ _(TK_LIST_COMP, "list comprehension", "") \
116
+ _(TK_DICT_COMP, "dict comprehension", "") \
117
+ _(TK_BREAK, "break", "break") \
118
+ _(TK_CONTINUE, "continue", "continue") \
119
+ _(TK_DELETE, "del", "del") \
120
+ _(TK_PASS, "pass", "pass") \
121
+ _(TK_CLASS_DEF, "class", "class") \
122
+ _(TK_IMPORT, "import", "import") \
123
+ _(TK_WITH, "with", "with") \
124
+ _(TK_WITH_ITEM, "withitem", "") \
125
+ _(TK_AS, "as", "as") \
126
+ _(TK_PROP, "property", "") \
127
+ _(TK_ELLIPSIS, "Ellipsis", "Ellipsis") \
128
+ _(TK_NONE_TYPE, "NoneType", "NoneType")
129
+
130
+ enum TokenKind {
131
+ // we use characters to represent themselves so skip all valid characters
132
+ // before
133
+ // assigning enum values to multi-char tokens.
134
+ TK_DUMMY_START = 256,
135
+ #define DEFINE_TOKEN(tok, _, _2) tok,
136
+ TC_FORALL_TOKEN_KINDS(DEFINE_TOKEN)
137
+ #undef DEFINE_TOKEN
138
+ };
139
+
140
+ TORCH_API std::string kindToString(int kind);
141
+ TORCH_API int stringToKind(const std::string& str);
142
+
143
+ // nested hash tables that indicate char-by-char what is a valid token.
144
+ struct TokenTrie;
145
+ using TokenTrieRef = std::unique_ptr<TokenTrie>;
146
+ struct TokenTrie {
147
+ TokenTrie() : kind(0) {}
148
+ void insert(const char* str, int tok) {
149
+ if (*str == '\0') {
150
+ AT_ASSERT(kind == 0);
151
+ kind = tok;
152
+ return;
153
+ }
154
+
155
+ for (size_t i = 0, e = child_chars.size(); i < e; ++i) {
156
+ if (child_chars[i] == *str) {
157
+ child_tries[i]->insert(str + 1, tok);
158
+ return;
159
+ }
160
+ }
161
+
162
+ child_chars.emplace_back(*str);
163
+ child_tries.emplace_back(std::make_unique<TokenTrie>());
164
+ child_tries.back()->insert(str + 1, tok);
165
+ }
166
+ int kind; // 0 == invalid token
167
+
168
+ std::vector<char> child_chars;
169
+ std::vector<TokenTrieRef> child_tries;
170
+ };
171
+
172
+ // stuff that is shared against all TC lexers/parsers and is initialized only
173
+ // once.
174
+ struct TORCH_API SharedParserData {
175
+ SharedParserData() : head(new TokenTrie()) {
176
+ std::stringstream ss;
177
+ for (const char* c = valid_single_char_tokens; *c; c++) {
178
+ std::string str(1, *c);
179
+ head->insert(str.c_str(), *c);
180
+ }
181
+
182
+ #define ADD_CASE(tok, _, tokstring) \
183
+ if (*(tokstring) != '\0') { \
184
+ head->insert((tokstring), (tok)); \
185
+ }
186
+ TC_FORALL_TOKEN_KINDS(ADD_CASE)
187
+ #undef ADD_CASE
188
+ }
189
+
190
+ bool match(
191
+ StringCordView::Iterator pos,
192
+ bool continuation, // are we inside a scope where newlines don't count
193
+ // (e.g. inside parens)
194
+ bool whitespace_token, // should we treat whitespace as a token
195
+ int* kind,
196
+ StringCordView::Iterator* start,
197
+ StringCordView::Iterator* end) {
198
+ *start = pos;
199
+ // skip whitespace
200
+ while (pos.has_next() && isblank(*pos)) {
201
+ ++pos;
202
+ }
203
+
204
+ // special handling
205
+ if (pos.has_next()) {
206
+ if (*pos == '#' && !isTypeComment(pos)) {
207
+ // skip comments
208
+ while (pos.has_next() && *pos != '\n')
209
+ ++pos;
210
+ // tail call, handle whitespace and more comments
211
+ return match(pos, continuation, whitespace_token, kind, start, end);
212
+ }
213
+ if (*pos == '\\') {
214
+ auto newiter = pos;
215
+ ++newiter;
216
+ if (newiter.has_next() && *newiter == '\n' && !whitespace_token) {
217
+ ++newiter;
218
+ return match(newiter, continuation, false, kind, start, end);
219
+ }
220
+ }
221
+ if (*pos == '\n') {
222
+ return match(++pos, continuation, !continuation, kind, start, end);
223
+ }
224
+ }
225
+ // we handle white space before EOF because in the case we have something
226
+ // like the following where we need to generate the dedent token if foo:
227
+ // ...
228
+ // else:
229
+ // pass
230
+ if (whitespace_token) {
231
+ *kind = !pos.has_next() ? TK_WHITESPACE_EOF : TK_WHITESPACE;
232
+ *end = pos;
233
+ return true;
234
+ }
235
+ if (!pos.has_next()) {
236
+ *kind = TK_EOF;
237
+ *start = pos;
238
+ *end = *start;
239
+ return true;
240
+ }
241
+ // invariant: the next token is not whitespace or newline
242
+ *start = pos;
243
+ // check for a valid number
244
+ size_t len;
245
+ if (isNumber(pos.rest_line(), 0, &len)) {
246
+ *end = *start;
247
+ *end += len;
248
+ *kind = TK_NUMBER;
249
+ return true;
250
+ }
251
+ // check for string
252
+ if (isString(pos.rest_line(), 0, &len)) {
253
+ *kind = TK_STRINGLITERAL;
254
+ *end = *start;
255
+ *end += len;
256
+ return true;
257
+ }
258
+
259
+ // check for either an ident or a token
260
+ // ident tracks whether what we have scanned so far could be an identifier
261
+ // matched indicates if we have found any match.
262
+ bool matched = false;
263
+ bool ident = true;
264
+ TokenTrie* cur = head.get();
265
+ // for (size_t i = 0; pos + i < str.size() && (ident || cur != nullptr);
266
+ // i++)
267
+ for (size_t i = 0; pos.has_next() && (ident || cur != nullptr);
268
+ ++pos, ++i) {
269
+ ident = ident && validIdent(i, *pos);
270
+ if (ident) {
271
+ matched = true;
272
+ *end = pos.next_iter();
273
+ *kind = TK_IDENT;
274
+ }
275
+ // check for token second, so that e.g. 'max' matches the token TK_MAX
276
+ // rather the
277
+ // identifier 'max'
278
+ if (cur) {
279
+ const auto begin_it = cur->child_chars.begin();
280
+ const auto end_it = cur->child_chars.end();
281
+ const auto ch_it = std::find(begin_it, end_it, *pos);
282
+
283
+ cur = (ch_it == end_it) ? nullptr
284
+ : cur->child_tries[ch_it - begin_it].get();
285
+
286
+ if (cur && cur->kind != 0) {
287
+ matched = true;
288
+ *end = pos.next_iter();
289
+ *kind = cur->kind;
290
+ }
291
+ }
292
+ }
293
+ return matched;
294
+ }
295
+
296
+ bool isUnary(int kind, int* prec);
297
+ bool isBinary(int kind, int* prec);
298
+ bool isRightAssociative(int kind) {
299
+ switch (kind) {
300
+ case '?':
301
+ case TK_POW:
302
+ case TK_IF:
303
+ return true;
304
+ default:
305
+ return false;
306
+ }
307
+ }
308
+
309
+ private:
310
+ bool validIdent(size_t i, char n) {
311
+ return isalpha(n) || n == '_' || (i > 0 && isdigit(n));
312
+ }
313
+
314
+ // 1. skip whitespace
315
+ // 2. handle comment or newline
316
+ //
317
+ bool isNumber(c10::string_view str, size_t start, size_t* len) {
318
+ char first = str[start];
319
+ // strtod allows numbers to start with + or - or nan or inf
320
+ // http://en.cppreference.com/w/cpp/string/byte/strtof
321
+ // but we want only the number part, otherwise 1+3 will turn into two
322
+ // adjacent numbers in the lexer
323
+ if (first == '-' || first == '+' || isalpha(first))
324
+ return false;
325
+ const char* startptr = str.data() + start;
326
+ // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
327
+ char* endptr;
328
+ torch::jit::strtod_c(startptr, &endptr);
329
+ *len = endptr - startptr;
330
+ // check if the number is complex valued
331
+ // access is safe because string is assumed to be null terminated
332
+ if (endptr != nullptr && *endptr == 'j') {
333
+ *len += 1;
334
+ }
335
+ return *len > 0;
336
+ }
337
+
338
+ bool isCharCount(char c, c10::string_view str, size_t start, int len) {
339
+ // count checks from [start, start + len)
340
+ return start + len <= str.size() &&
341
+ std::count(str.begin() + start, str.begin() + start + len, c) == len;
342
+ }
343
+
344
+ // python concatenates all adjacent strings "a" "b" == "ab"
345
+ // strings can be enclosed with 1 or 3 single or double quotes
346
+ // if enclosed with 3 quotes newlines are valid
347
+ // as elsewhere, backslash and new line should be ignored
348
+ bool isString(c10::string_view str, size_t start, size_t* len) {
349
+ char quote = str[start];
350
+ if (quote != '\"' && quote != '\'')
351
+ return false;
352
+ int quote_len = isCharCount(quote, str, start, 3) ? 3 : 1;
353
+
354
+ // end is now set past the opening quotation marks
355
+ size_t end = start + quote_len;
356
+ while (end < str.size() && !isCharCount(quote, str, end, quote_len)) {
357
+ if (str[end] == '\n' && quote_len != 3) {
358
+ return false;
359
+ }
360
+ // handle escaped characters. advances past escaped quotation marks,
361
+ // escaped newlines and escaped backslashes
362
+ // multi-char escapes like \x1A are handled fine here because the
363
+ // remainder of the escape are valid string characters anyway
364
+ if (str[end] == '\\') {
365
+ end++;
366
+ }
367
+ end++;
368
+ }
369
+ // set length equal to the complete string including quotations
370
+ *len = end - start + quote_len;
371
+ // if end finished without going past the last character of the string than
372
+ // there is a match
373
+ return end < str.size();
374
+ }
375
+
376
+ bool isblank(int n) {
377
+ return isspace(n) && n != '\n';
378
+ }
379
+
380
+ bool isTypeComment(StringCordView::Iterator str_iter) {
381
+ c10::string_view rest_line = str_iter.rest_line();
382
+ const std::string type_string = "# type:";
383
+ if (rest_line.size() < type_string.length()) {
384
+ return false;
385
+ }
386
+ auto match_string = rest_line.substr(0, type_string.size());
387
+ return match_string == type_string;
388
+ }
389
+
390
+ // Make an exception ignoring comments for type annotation comments
391
+ bool isTypeComment(StringCordView str, size_t pos) {
392
+ const std::string type_string = "# type:";
393
+ if (str.size() < pos + type_string.length()) {
394
+ return false;
395
+ }
396
+ auto match_string = str.substr(pos, type_string.size());
397
+ return match_string == type_string;
398
+ }
399
+
400
+ TokenTrieRef head;
401
+ };
402
+
403
+ TORCH_API SharedParserData& sharedParserData();
404
+
405
+ struct Token {
406
+ int kind;
407
+ SourceRange range;
408
+ Token(int kind, SourceRange range) : kind(kind), range(std::move(range)) {}
409
+ std::string text() {
410
+ return std::string(range.token_text());
411
+ }
412
+ std::string kindString() const {
413
+ return kindToString(kind);
414
+ }
415
+ };
416
+
417
+ struct Lexer {
418
+ explicit Lexer(std::shared_ptr<Source> source)
419
+ : source(std::move(source)),
420
+ pos(0),
421
+ nesting(0),
422
+ indent_stack(),
423
+ next_tokens(),
424
+ shared(sharedParserData()) {
425
+ auto first_indent = lexRaw(true);
426
+ indent_stack.push_back(first_indent.range.size());
427
+ lex();
428
+ }
429
+ // Return the current token, and then move to the next one
430
+ Token next() {
431
+ if (next_tokens.empty())
432
+ reportError("Lexer invariant violated: empty token queue");
433
+ Token r = std::move(next_tokens.front());
434
+ next_tokens.erase(next_tokens.begin());
435
+ if (next_tokens.empty()) {
436
+ lex();
437
+ }
438
+ return r;
439
+ }
440
+ // Skip the current token if it matches the given kind
441
+ bool nextIf(int kind) {
442
+ if (cur().kind != kind)
443
+ return false;
444
+ next();
445
+ return true;
446
+ }
447
+
448
+ [[noreturn]] void reportError(const std::string& what) {
449
+ reportError(what, cur());
450
+ }
451
+ [[noreturn]] void reportError(const std::string& what, const Token& t) {
452
+ std::stringstream ss;
453
+ ss << what << ":\n";
454
+ t.range.highlight(ss);
455
+ throw std::runtime_error(ss.str());
456
+ }
457
+ [[noreturn]] void expected(const std::string& what, const Token& t) {
458
+ std::stringstream ss;
459
+ ss << "expected " << what << " but found '" << t.kindString()
460
+ << "' here:\n";
461
+ t.range.highlight(ss);
462
+ throw std::runtime_error(ss.str());
463
+ }
464
+ [[noreturn]] void expected(const std::string& what) {
465
+ expected(what, cur());
466
+ }
467
+ // Check that the current token has a given kind, return the current token,
468
+ // and advance to the next one.
469
+ Token expect(int kind) {
470
+ if (cur().kind != kind) {
471
+ expected(kindToString(kind));
472
+ }
473
+ return next();
474
+ }
475
+ Token& lookahead() {
476
+ if (next_tokens.size() < 2) {
477
+ lex();
478
+ }
479
+ return next_tokens[1];
480
+ }
481
+ Token& cur() {
482
+ return next_tokens.front();
483
+ }
484
+
485
+ private:
486
+ void lex() {
487
+ auto r = lexRaw();
488
+ switch (r.kind) {
489
+ case '(':
490
+ case '[':
491
+ case '{':
492
+ nesting++;
493
+ break;
494
+ case ')':
495
+ case ']':
496
+ case '}':
497
+ nesting--;
498
+ break;
499
+ case TK_WHITESPACE:
500
+ case TK_WHITESPACE_EOF: {
501
+ const auto depth = static_cast<int64_t>(
502
+ r.kind == TK_WHITESPACE_EOF ? indent_stack.front()
503
+ : r.range.size());
504
+ // note: TK_WHITESPACE_EOF is whitespace right before the EOF token
505
+ // just like we allow the code to be indented to a particular initial
506
+ // indent level, we allow the final indent to be anything and set
507
+ // it back to the initial indent level. This allows the code to be
508
+ // put into string literals inside code without worrying about final
509
+ // whitespace
510
+ if (depth > indent_stack.back()) {
511
+ indent_stack.push_back(depth);
512
+ r.kind = TK_INDENT;
513
+ } else if (depth == indent_stack.back()) {
514
+ r.kind = TK_NEWLINE;
515
+ } else {
516
+ next_tokens.emplace_back(TK_NEWLINE, r.range);
517
+ while (indent_stack.back() != depth) {
518
+ indent_stack.pop_back();
519
+ next_tokens.emplace_back(TK_DEDENT, r.range);
520
+ if (indent_stack.empty()) {
521
+ reportError("invalid indent level " + std::to_string(depth), r);
522
+ }
523
+ }
524
+ return; // We've already queued the tokens
525
+ }
526
+ } break;
527
+ default:
528
+ break;
529
+ }
530
+ next_tokens.push_back(std::move(r));
531
+ }
532
+ Token lexRaw(bool whitespace_token = false) {
533
+ // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
534
+ int kind;
535
+ AT_ASSERT(source);
536
+ if (current == nullptr) {
537
+ AT_ASSERT(pos == 0);
538
+ current = std::make_unique<StringCordView::Iterator>(
539
+ source->text_str().begin());
540
+ }
541
+
542
+ StringCordView::Iterator start_iter = *current;
543
+ StringCordView::Iterator end_iter = *current;
544
+ if (!shared.match(
545
+ *current,
546
+ nesting > 0,
547
+ whitespace_token,
548
+ &kind,
549
+ &start_iter,
550
+ &end_iter)) {
551
+ expected(
552
+ "a valid token",
553
+ Token(
554
+ **current,
555
+ SourceRange(source, start_iter, start_iter.pos() + 1)));
556
+ }
557
+
558
+ auto t = Token(kind, SourceRange(source, start_iter, end_iter.pos()));
559
+ pos = end_iter.pos();
560
+ *current = end_iter;
561
+ return t;
562
+ }
563
+
564
+ std::shared_ptr<Source> source;
565
+ std::unique_ptr<StringCordView::Iterator> current;
566
+ size_t pos;
567
+ size_t nesting; // depth of ( [ { nesting...
568
+ std::vector<int> indent_stack; // stack of indentation level of blocks
569
+ // Invariant: this should always contain at least a single element
570
+ std::vector<Token> next_tokens;
571
+ SharedParserData& shared;
572
+ };
573
+ } // namespace jit
574
+ } // namespace torch
575
+
576
+ C10_CLANG_DIAGNOSTIC_POP()
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/mini_environment.h ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/jit_type.h>
4
+ #include <torch/csrc/jit/ir/ir.h>
5
+
6
+ namespace torch {
7
+ namespace jit {
8
+
9
+ // Simple data structure for containing a type T in nested control blocks
10
+ // Should only be used after initial compilation where type checking and
11
+ // loads and stores are emitted
12
+
13
+ template <typename T>
14
+ struct MiniEnvironment {
15
+ MiniEnvironment(Block* b, std::shared_ptr<MiniEnvironment> next = nullptr)
16
+ : next(std::move(next)) {}
17
+
18
+ // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
19
+ std::shared_ptr<MiniEnvironment<T>> next;
20
+
21
+ T findInThisFrame(const std::string& name) {
22
+ auto it = table.find(name);
23
+ if (it != table.end()) {
24
+ return it->second;
25
+ }
26
+ return nullptr;
27
+ }
28
+
29
+ T findInAnyFrame(const std::string& name) {
30
+ for (auto runner = this; runner; runner = runner->next.get()) {
31
+ if (auto r = runner->findInThisFrame(name)) {
32
+ return r;
33
+ }
34
+ }
35
+ return nullptr;
36
+ }
37
+
38
+ void setVar(const std::string& name, T value) {
39
+ table[name] = value;
40
+ }
41
+
42
+ std::vector<std::string> definedVariables() {
43
+ std::vector<std::string> result;
44
+ result.reserve(table.size());
45
+ for (auto& kv : table) {
46
+ result.push_back(kv.first);
47
+ }
48
+ std::sort(result.begin(), result.end());
49
+ return result;
50
+ }
51
+
52
+ private:
53
+ std::unordered_map<std::string, T> table;
54
+ };
55
+
56
+ } // namespace jit
57
+ } // namespace torch
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/parse_string_literal.h ADDED
@@ -0,0 +1,87 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <c10/util/Optional.h>
3
+ #include <torch/csrc/jit/frontend/error_report.h>
4
+ #include <torch/csrc/jit/frontend/lexer.h>
5
+
6
+ namespace torch {
7
+ namespace jit {
8
+
9
+ inline bool isCharCount(char c, const std::string& str, size_t start, int len) {
10
+ // count checks from [start, start + len)
11
+ return start + len <= str.size() &&
12
+ std::count(str.begin() + start, str.begin() + start + len, c) == len;
13
+ }
14
+
15
+ inline c10::optional<char> parseOctal(const std::string& str, size_t pos) {
16
+ //\xxx where x are 0-7
17
+ if (pos + 3 >= str.size())
18
+ return c10::nullopt;
19
+ size_t c = 0;
20
+ for (size_t i = 1, b = 64; i < 4; ++i, b /= 8) {
21
+ // NOLINTNEXTLINE(bugprone-signed-char-misuse)
22
+ int d = str[pos + i];
23
+ if (d < '0' || d > '7')
24
+ return c10::nullopt;
25
+ c += b * (d - '0');
26
+ }
27
+ if (c >= 256)
28
+ return c10::nullopt;
29
+ return c;
30
+ }
31
+
32
+ inline std::string parseStringLiteral(
33
+ const SourceRange& range,
34
+ const std::string& str) {
35
+ int quote_len = isCharCount(str[0], str, 0, 3) ? 3 : 1;
36
+ auto ret_str = str.substr(quote_len, str.size() - quote_len * 2);
37
+ size_t pos = ret_str.find('\\');
38
+ while (pos != std::string::npos) {
39
+ // invariant: pos has to escape a character because it is a valid string
40
+ char c = ret_str[pos + 1];
41
+ size_t to_erase = 2;
42
+ switch (ret_str[pos + 1]) {
43
+ case '\\':
44
+ case '\'':
45
+ case '\"':
46
+ case '\n':
47
+ break;
48
+ case 'a':
49
+ c = '\a';
50
+ break;
51
+ case 'b':
52
+ c = '\b';
53
+ break;
54
+ case 'f':
55
+ c = '\f';
56
+ break;
57
+ case 'n':
58
+ c = '\n';
59
+ break;
60
+ case 'v':
61
+ c = '\v';
62
+ break;
63
+ case 't':
64
+ c = '\t';
65
+ break;
66
+ case 'x':
67
+ throw ErrorReport(range) << "unsupported hex specifier";
68
+ case 'u':
69
+ case 'U':
70
+ throw ErrorReport(range) << "unsupported unicode specifier";
71
+ default:
72
+ // octal value in format \nnn, n is [0-7]
73
+ if (auto v = parseOctal(ret_str, pos)) {
74
+ to_erase = 4;
75
+ c = *v;
76
+ } else {
77
+ throw ErrorReport(range) << " ill formed octal specifier";
78
+ }
79
+ }
80
+ ret_str.replace(pos, to_erase, /* num copies */ 1, c);
81
+ pos = ret_str.find('\\', pos + 1);
82
+ }
83
+ return ret_str;
84
+ }
85
+
86
+ } // namespace jit
87
+ } // namespace torch
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/parser_constants.h ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ namespace torch {
4
+ namespace jit {
5
+ static const char* valid_single_char_tokens = "+-*/%@()[]:,={}><.?!&^|~";
6
+ } // namespace jit
7
+ } // namespace torch
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/resolver.h ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/jit_type.h>
4
+ #include <ATen/core/qualified_name.h>
5
+ #include <torch/csrc/jit/frontend/sugared_value.h>
6
+
7
+ namespace torch {
8
+ namespace jit {
9
+
10
+ struct Resolver;
11
+ using ResolverPtr = std::shared_ptr<Resolver>;
12
+
13
+ /**
14
+ * class Resolver
15
+ *
16
+ * Represents an "outer environment" in which we an look up names and return
17
+ * a corresponding SugaredValue. This is used during compilation to resolve
18
+ * references to names which are not defined internal to the graph.
19
+ *
20
+ * Example: PythonResolver looks at the enclosing Python scope for `name`.
21
+ *
22
+ * NOTE: When adding methods, keep this an abstract class (i.e. all new methods
23
+ * should be purely virtual). Resist the urge to provide a default
24
+ * implementation; you should explicitly think about how each resolver would
25
+ * handle the method.
26
+ */
27
+ struct Resolver {
28
+ virtual ~Resolver() = default;
29
+
30
+ // Resolve a given name to a SugaredValue. This takes the method `m` that the
31
+ // caller is currently constructing, since we may need to insert nodes into
32
+ // the graph to create a value.
33
+ virtual std::shared_ptr<SugaredValue> resolveValue(
34
+ const std::string& name,
35
+ GraphFunction& m,
36
+ const SourceRange& loc) {
37
+ return nullptr;
38
+ }
39
+
40
+ // Resolve `name` to a TypePtr.
41
+ virtual TypePtr resolveType(const std::string& name, const SourceRange& loc) {
42
+ return nullptr;
43
+ }
44
+ };
45
+
46
+ // A resolver that only understands "torch.foo()" lookups.
47
+ struct NativeResolver : public Resolver {
48
+ std::shared_ptr<SugaredValue> resolveValue(
49
+ const std::string& name,
50
+ GraphFunction& m,
51
+ const SourceRange& loc) override {
52
+ if (name == "torch") {
53
+ return std::make_shared<BuiltinModule>("aten");
54
+ }
55
+ return nullptr;
56
+ }
57
+
58
+ TypePtr resolveType(const std::string& name, const SourceRange& loc)
59
+ override {
60
+ return nullptr;
61
+ }
62
+ };
63
+
64
+ inline std::shared_ptr<NativeResolver> nativeResolver() {
65
+ return std::make_shared<NativeResolver>();
66
+ }
67
+ } // namespace jit
68
+ } // namespace torch
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/schema_matching.h ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <torch/csrc/Export.h>
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+ #include <torch/csrc/jit/ir/named_value.h>
5
+
6
+ #include <ATen/core/function_schema.h>
7
+
8
+ namespace torch {
9
+ namespace jit {
10
+
11
+ // Try to match a list of inputs and keyword 'attributes' to this
12
+ // schema. Return the flat list of positional inputs to the call or
13
+ // `c10::nullopt` on failure (`failure_messages` contains a good error
14
+ // report in this case)
15
+
16
+ struct MatchedSchema {
17
+ std::vector<Value*> inputs;
18
+ std::vector<TypePtr> return_types;
19
+ c10::OptNameList return_field_names;
20
+ std::string schema_name;
21
+ };
22
+
23
+ TORCH_API bool isBlockListedSchema(const FunctionSchema& schema);
24
+
25
+ TORCH_API MatchedSchema matchSchema(
26
+ const ::c10::FunctionSchema& schema,
27
+ const SourceRange& loc,
28
+ Graph& graph,
29
+ at::ArrayRef<NamedValue> args,
30
+ at::ArrayRef<NamedValue> kwargs,
31
+ const c10::optional<NamedValue>& self = c10::nullopt);
32
+
33
+ TORCH_API std::pair<size_t, MatchedSchema> matchSchemas(
34
+ const std::vector<const ::c10::FunctionSchema*>& schemas,
35
+ const SourceRange& loc,
36
+ Graph& graph,
37
+ at::ArrayRef<NamedValue> args,
38
+ at::ArrayRef<NamedValue> kwargs,
39
+ const c10::optional<NamedValue>& self = c10::nullopt,
40
+ bool render_errors = false);
41
+
42
+ TORCH_API bool convertibleToList(
43
+ const TypePtr& type,
44
+ const TypePtr& list_type_);
45
+
46
+ TORCH_API std::string getFullSchemaName(const ::c10::FunctionSchema& schema);
47
+
48
+ TORCH_API Value* emitBuiltinCall(
49
+ const SourceRange& loc,
50
+ Graph& graph,
51
+ Symbol name,
52
+ at::ArrayRef<NamedValue> args,
53
+ at::ArrayRef<NamedValue> kwargs,
54
+ const c10::optional<NamedValue>& self = c10::nullopt);
55
+
56
+ TORCH_API c10::optional<size_t> findInputWithName(
57
+ const std::string& name,
58
+ at::ArrayRef<NamedValue> kwargs,
59
+ bool is_aten = false);
60
+
61
+ // applies implicit conversion from value trying to turn it into type
62
+ // concrete_type it succeeds if the return_value->isSubtypeOf(concrete_type)
63
+ TORCH_API Value* tryConvertToType(
64
+ const SourceRange& loc,
65
+ Graph& graph,
66
+ const TypePtr& concrete_type,
67
+ Value* value,
68
+ bool allow_conversions);
69
+ } // namespace jit
70
+ } // namespace torch
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/script_type_parser.h ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <ATen/core/jit_type.h>
3
+ #include <torch/csrc/Export.h>
4
+ #include <torch/csrc/jit/frontend/resolver.h>
5
+ #include <torch/csrc/jit/frontend/tree_views.h>
6
+
7
+ namespace torch {
8
+ namespace jit {
9
+
10
+ /**
11
+ * class ScriptTypeParser
12
+ *
13
+ * Parses expressions in our typed AST format (TreeView) into types and
14
+ * typenames.
15
+ */
16
+ class TORCH_API ScriptTypeParser {
17
+ public:
18
+ explicit ScriptTypeParser() = default;
19
+ explicit ScriptTypeParser(ResolverPtr resolver)
20
+ : resolver_(std::move(resolver)) {}
21
+
22
+ c10::TypePtr parseTypeFromExpr(const Expr& expr) const;
23
+
24
+ c10::optional<std::pair<c10::TypePtr, int32_t>> parseBroadcastList(
25
+ const Expr& expr) const;
26
+
27
+ c10::TypePtr parseType(const std::string& str);
28
+
29
+ FunctionSchema parseSchemaFromDef(const Def& def, bool skip_self);
30
+
31
+ c10::IValue parseClassConstant(const Assign& assign);
32
+
33
+ private:
34
+ c10::TypePtr parseTypeFromExprImpl(const Expr& expr) const;
35
+
36
+ c10::optional<std::string> parseBaseTypeName(const Expr& expr) const;
37
+ at::TypePtr subscriptToType(
38
+ const std::string& typeName,
39
+ const Subscript& subscript) const;
40
+ std::vector<IValue> evaluateDefaults(
41
+ const SourceRange& r,
42
+ const std::vector<Expr>& default_types,
43
+ const std::vector<Expr>& default_exprs);
44
+ std::vector<Argument> parseArgsFromDecl(const Decl& decl, bool skip_self);
45
+
46
+ std::vector<Argument> parseReturnFromDecl(const Decl& decl);
47
+
48
+ ResolverPtr resolver_ = nullptr;
49
+
50
+ // Need to use `evaluateDefaults` in serialization
51
+ friend struct ConstantTableValue;
52
+ friend struct SourceImporterImpl;
53
+ };
54
+ } // namespace jit
55
+ } // namespace torch
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/source_range.h ADDED
@@ -0,0 +1,459 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <c10/util/Exception.h>
3
+ #include <c10/util/Optional.h>
4
+
5
+ #include <algorithm>
6
+ #include <iterator>
7
+ #include <memory>
8
+ #include <numeric>
9
+ #include <ostream>
10
+ #include <regex>
11
+ #include <sstream>
12
+ #include <unordered_map>
13
+
14
+ namespace torch {
15
+ namespace jit {
16
+
17
+ class SourceRangeUnpickler;
18
+ struct SourceRange;
19
+
20
+ // A stringlike class backed by a vector of string_view
21
+ // the string represented are logically the concatenation of the string_views
22
+ // This has advantage of not needing continues memory.
23
+ struct TORCH_API StringCordView {
24
+ StringCordView();
25
+ StringCordView(const StringCordView&) = default;
26
+ StringCordView(StringCordView&&) noexcept = default;
27
+ StringCordView(
28
+ std::vector<c10::string_view> inputs,
29
+ std::vector<std::shared_ptr<std::string>> ownerships);
30
+
31
+ StringCordView& operator=(const StringCordView&) = default;
32
+ StringCordView& operator=(StringCordView&&) noexcept = default;
33
+
34
+ size_t size() const {
35
+ return accumulated_sizes_.back();
36
+ }
37
+
38
+ size_t find(const std::string& tok, size_t start) const;
39
+ size_t find_regex(const std::string& tok, size_t start) const;
40
+ StringCordView substr(size_t start, size_t size) const;
41
+
42
+ char at(size_t index) const {
43
+ return *iter_for_pos(index);
44
+ }
45
+ char operator[](size_t index) const {
46
+ return at(index);
47
+ }
48
+
49
+ std::string str() const {
50
+ std::stringstream ss;
51
+ for (auto s : pieces_) {
52
+ ss << std::string(s);
53
+ }
54
+ return ss.str();
55
+ }
56
+
57
+ bool operator==(const std::string& rhs) const;
58
+
59
+ bool operator==(const StringCordView& rhs) const;
60
+
61
+ c10::string_view piece(size_t index) const {
62
+ return pieces_[index];
63
+ }
64
+
65
+ struct Iterator {
66
+ Iterator(
67
+ const StringCordView* str,
68
+ size_t start_line,
69
+ size_t start_pos,
70
+ size_t size)
71
+ : line_(start_line), pos_(start_pos), str_(str), size_(size) {}
72
+ explicit Iterator(const StringCordView* str)
73
+ : Iterator(str, 0, 0, str->size()) {}
74
+
75
+ Iterator() : Iterator(nullptr, 0, 0, 0) {}
76
+
77
+ Iterator(const Iterator&) = default;
78
+ Iterator(Iterator&&) = default;
79
+ Iterator& operator=(const Iterator&) = default;
80
+ Iterator& operator=(Iterator&&) = default;
81
+
82
+ Iterator operator++() {
83
+ if (size_ == 0) {
84
+ return *this;
85
+ }
86
+ if ((pos_ + 1) < str_->pieces_[line_].size()) {
87
+ pos_++;
88
+ } else {
89
+ line_++;
90
+ pos_ = 0;
91
+ }
92
+ return *this;
93
+ }
94
+
95
+ Iterator operator++(int) {
96
+ Iterator prev(*this);
97
+ ++(*this);
98
+ return prev;
99
+ }
100
+
101
+ Iterator next_iter() const {
102
+ Iterator next(*this);
103
+ ++next;
104
+ return next;
105
+ }
106
+
107
+ Iterator& operator+=(size_t num) {
108
+ if (!has_next()) {
109
+ return *this;
110
+ }
111
+ size_t target_pos = pos_ + num;
112
+ if (target_pos >= str_->accumulated_sizes_[line_] &&
113
+ (line_ + 1) < str_->accumulated_sizes_.size() &&
114
+ target_pos < str_->accumulated_sizes_[line_ + 1]) {
115
+ pos_ = target_pos;
116
+ return *this;
117
+ }
118
+
119
+ size_t target_abs_pos = pos() + num;
120
+ *this = str_->iter_for_pos(target_abs_pos);
121
+ return *this;
122
+ }
123
+
124
+ bool operator==(const Iterator& rhs) const {
125
+ if (!has_next() && !rhs.has_next()) {
126
+ return true;
127
+ }
128
+ return (str_ == rhs.str_) && (line_ == rhs.line_) && (pos_ == rhs.pos_);
129
+ }
130
+ bool operator!=(const Iterator& rhs) {
131
+ return !((*this) == rhs);
132
+ }
133
+ bool has_next() const {
134
+ return size_ > 0 && (line_ < str_->pieces_.size());
135
+ }
136
+
137
+ char operator*() const {
138
+ TORCH_INTERNAL_ASSERT(line_ < str_->pieces_.size());
139
+ TORCH_INTERNAL_ASSERT(pos_ < str_->pieces_[line_].size());
140
+ return str_->pieces_[line_].at(pos_);
141
+ }
142
+
143
+ // returns rest of the line of the current iterator
144
+ c10::string_view rest_line() const {
145
+ if (line_ >= str_->pieces_.size()) {
146
+ return "";
147
+ }
148
+
149
+ c10::string_view cur_line = str_->pieces_[line_];
150
+ return cur_line.substr(pos_, std::string::npos);
151
+ }
152
+
153
+ size_t pos() const {
154
+ if (size_ == 0) {
155
+ return 0;
156
+ }
157
+ return str_->accumulated_sizes_[line_] + pos_;
158
+ }
159
+
160
+ private:
161
+ size_t line_;
162
+ size_t pos_;
163
+ const StringCordView* str_;
164
+ size_t size_;
165
+ friend struct StringCordView;
166
+ };
167
+
168
+ Iterator begin() const {
169
+ return Iterator(this, 0, 0, size());
170
+ }
171
+ Iterator end() const {
172
+ return Iterator(this, pieces_.size(), 0, 0);
173
+ }
174
+ Iterator iter_for_pos(size_t pos) const;
175
+
176
+ private:
177
+ std::vector<c10::string_view> pieces_;
178
+ std::vector<size_t> accumulated_sizes_;
179
+ std::vector<std::shared_ptr<std::string>> owned_strings_;
180
+ };
181
+
182
+ // Source represents a code segment. It keeps track of:
183
+ // - text_view : the view into text of the code segment
184
+ // - filename (optional) : if present, represents the name of the file from
185
+ // which the code segment originated.
186
+ // - starting_line_no : represents the line in the original file where the
187
+ // code segment started.
188
+ struct TORCH_API Source {
189
+ // Whether or not Source should copy the string passed in the constructor.
190
+ enum CopiesString { COPIES_STRING, DONT_COPY };
191
+
192
+ explicit Source(
193
+ c10::string_view text_view,
194
+ c10::optional<std::string> filename = c10::nullopt,
195
+ size_t starting_line_no = 0,
196
+ std::shared_ptr<SourceRangeUnpickler> gen_ranges = nullptr,
197
+ CopiesString copies_str = COPIES_STRING)
198
+ : filename_(std::move(filename)),
199
+ starting_line_no_(starting_line_no),
200
+ gen_ranges_(std::move(gen_ranges)) {
201
+ if (copies_str == COPIES_STRING) {
202
+ std::shared_ptr<std::string> allocated_str =
203
+ std::make_shared<std::string>(text_view.data(), text_view.size());
204
+ text_view_ = StringCordView({*allocated_str}, {allocated_str});
205
+ } else {
206
+ text_view_ = StringCordView({text_view}, {});
207
+ }
208
+
209
+ calc_line_start_offsets();
210
+ }
211
+
212
+ explicit Source(
213
+ StringCordView str,
214
+ c10::optional<std::string> filename = c10::nullopt,
215
+ size_t starting_line_no = 0,
216
+ std::shared_ptr<SourceRangeUnpickler> gen_ranges = nullptr)
217
+ : text_view_(std::move(str)),
218
+ filename_(std::move(filename)),
219
+ starting_line_no_(starting_line_no),
220
+ gen_ranges_(std::move(gen_ranges)) {
221
+ calc_line_start_offsets();
222
+ }
223
+ // Given a line number (within source_), return the byte offset of the
224
+ // beginning of that line.
225
+ size_t offset_for_line(size_t line) const {
226
+ return line_starting_offsets_.at(line);
227
+ }
228
+
229
+ // Returns number of lines present.
230
+ size_t num_lines() const {
231
+ return line_starting_offsets_.size();
232
+ }
233
+
234
+ // Calculate the line (within the code segment) on which `offset` resides.
235
+ size_t lineno_for_offset(size_t offset) const {
236
+ auto iter = std::upper_bound(
237
+ line_starting_offsets_.begin(), line_starting_offsets_.end(), offset);
238
+ return iter - line_starting_offsets_.begin() - 1;
239
+ }
240
+
241
+ // Calculate the line (within the original source file, if present) on which
242
+ // `lineno` resides.
243
+ size_t lineno_to_source_lineno(size_t lineno) const {
244
+ if (filename_) {
245
+ return lineno + starting_line_no_;
246
+ } else {
247
+ return lineno;
248
+ }
249
+ }
250
+
251
+ StringCordView get_line(size_t lineno) const {
252
+ auto start = offset_for_line(lineno);
253
+ auto size = (lineno + 1) < num_lines() ? offset_for_line(lineno + 1) - start
254
+ : text_view_.size() - start;
255
+ return text_view_.substr(start, size);
256
+ }
257
+
258
+ const StringCordView& text_str() const {
259
+ return text_view_;
260
+ }
261
+
262
+ char char_at(size_t index) const {
263
+ return text_view_.at(index);
264
+ }
265
+
266
+ size_t size() const {
267
+ return text_view_.size();
268
+ }
269
+
270
+ c10::optional<std::string>& filename() {
271
+ return filename_;
272
+ }
273
+
274
+ size_t starting_line_no() const {
275
+ return starting_line_no_;
276
+ }
277
+
278
+ c10::optional<SourceRange> findSourceRangeThatGenerated(
279
+ const SourceRange& range);
280
+
281
+ ~Source() = default;
282
+
283
+ private:
284
+ void calc_line_start_offsets() {
285
+ line_starting_offsets_.clear();
286
+ line_starting_offsets_.push_back(0);
287
+ size_t pos = 0;
288
+ while ((pos = text_view_.find("\n", pos)) != std::string::npos) {
289
+ line_starting_offsets_.push_back(++pos);
290
+ }
291
+ }
292
+
293
+ StringCordView text_view_;
294
+
295
+ c10::optional<std::string> filename_;
296
+ // If filename_ is not present, starting_line_no_ is don't care
297
+ size_t starting_line_no_;
298
+ // Starting offsets for lines into the source. e.g. line 0 starts at
299
+ // line_starting_offsets_[0], etc.
300
+ std::vector<size_t> line_starting_offsets_;
301
+
302
+ std::shared_ptr<SourceRangeUnpickler> gen_ranges_;
303
+ };
304
+
305
+ // A SourceRange is a reference to subset of a Source, specified by `start` and
306
+ // `end` byte offsets into the source text.
307
+ struct TORCH_API SourceRange {
308
+ SourceRange(std::shared_ptr<Source> source_view, size_t start_, size_t end_)
309
+ : source_view_(std::move(source_view)), start_(start_), end_(end_) {
310
+ if (source_view_) {
311
+ start_iter_ = source_view_->text_str().iter_for_pos(start_);
312
+ }
313
+ }
314
+
315
+ SourceRange() : source_view_(nullptr), start_(0), end_(0) {}
316
+
317
+ SourceRange(
318
+ std::shared_ptr<Source> source_view_,
319
+ StringCordView::Iterator start_iter,
320
+ size_t end_)
321
+ : source_view_(std::move(source_view_)),
322
+ start_(start_iter.pos()),
323
+ end_(end_),
324
+ start_iter_(start_iter) {}
325
+
326
+ const c10::string_view token_text() const {
327
+ size_t size = end() - start();
328
+ return start_iter_.rest_line().substr(0, size);
329
+ }
330
+
331
+ const StringCordView text() const {
332
+ return source_view_->text_str().substr(start(), end() - start());
333
+ }
334
+ size_t size() const {
335
+ return end() - start();
336
+ }
337
+ static const size_t CONTEXT = 3;
338
+ void highlight(std::ostream& out) const;
339
+
340
+ // Customizable version of 'highlight' method.
341
+ void print_with_context(
342
+ std::ostream& out,
343
+ size_t context,
344
+ bool highlight,
345
+ const std::string& funcname) const;
346
+
347
+ const std::shared_ptr<Source>& source() const {
348
+ return source_view_;
349
+ }
350
+ size_t start() const {
351
+ return start_;
352
+ }
353
+ size_t end() const {
354
+ return end_;
355
+ }
356
+ std::string str() const {
357
+ std::stringstream ss;
358
+ highlight(ss);
359
+ return ss.str();
360
+ }
361
+
362
+ c10::optional<std::tuple<std::string, size_t, size_t>> file_line_col() const {
363
+ if (!source_view_ || !source()->filename()) {
364
+ return c10::nullopt;
365
+ }
366
+
367
+ auto lineno = source_view_->lineno_for_offset(start_);
368
+ auto col_offset = (int)start_ - (int)source_view_->offset_for_line(lineno);
369
+ // TODO: c10::optional<>::value returns an rvalue ref so can't use it here??
370
+ return std::make_tuple<std::string, size_t, size_t>(
371
+ source_view_->filename().value_or(""),
372
+ source_view_->lineno_to_source_lineno(lineno),
373
+ (size_t)col_offset);
374
+ }
375
+
376
+ bool operator==(const SourceRange& rhs) const {
377
+ return start() == rhs.start() && end() == rhs.end() &&
378
+ source() == rhs.source();
379
+ }
380
+
381
+ bool operator!=(const SourceRange& rhs) const {
382
+ return !(*this == rhs);
383
+ }
384
+
385
+ c10::optional<SourceRange> findSourceRangeThatGenerated() const {
386
+ if (!source_view_) {
387
+ return c10::nullopt;
388
+ }
389
+ return source_view_->findSourceRangeThatGenerated(*this);
390
+ }
391
+
392
+ protected:
393
+ std::shared_ptr<Source> source_view_;
394
+
395
+ private:
396
+ size_t start_;
397
+ size_t end_;
398
+ StringCordView::Iterator start_iter_;
399
+ };
400
+
401
+ // OwnedSourceRange is just like a SourceRange except that it owns a `Source`
402
+ // instead of `Source`. Thus OwnedSourceRange owns a copy of source text.
403
+ struct OwnedSourceRange : public SourceRange {
404
+ explicit OwnedSourceRange(const SourceRange& source_range)
405
+ : SourceRange(source_range) {
406
+ const auto& source = source_range.source();
407
+ if (source) {
408
+ source_view_ = std::make_shared<Source>(
409
+ source->text_str().str(),
410
+ source->filename(),
411
+ source->starting_line_no());
412
+ }
413
+ }
414
+ };
415
+
416
+ struct TORCH_API SourceRangeHasher {
417
+ public:
418
+ size_t operator()(const torch::jit::SourceRange& key) const;
419
+ };
420
+
421
+ struct StackEntry {
422
+ std::string filename;
423
+ SourceRange range;
424
+ };
425
+
426
+ TORCH_API void format_stack_trace(
427
+ std::ostream& out,
428
+ const std::vector<StackEntry>& entries);
429
+
430
+ inline std::ostream& operator<<(std::ostream& out, const SourceRange& range) {
431
+ range.highlight(out);
432
+ return out;
433
+ }
434
+
435
+ // A pair of (byte offset, SourceRange) describing a specific segment
436
+ // of the output stream
437
+ struct TaggedRange {
438
+ TaggedRange(size_t bytes, SourceRange range)
439
+ : bytes(bytes), range(std::move(range)) {}
440
+ size_t bytes;
441
+ SourceRange range;
442
+ };
443
+ using SourceRangeRecords = std::vector<TaggedRange>;
444
+ using SourceRangeTagMap =
445
+ std::unordered_map<SourceRange, int64_t, SourceRangeHasher>;
446
+
447
+ } // namespace jit
448
+ } // namespace torch
449
+
450
+ namespace std {
451
+ template <>
452
+ struct iterator_traits<torch::jit::StringCordView::Iterator> {
453
+ using value_type = char;
454
+ using difference_type = ptrdiff_t;
455
+ using pointer = char*;
456
+ using reference = char&;
457
+ using iterator_category = std::forward_iterator_tag;
458
+ };
459
+ } // namespace std
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/source_ref.h ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <functional>
4
+ #include <memory>
5
+
6
+ #include <ATen/core/ivalue.h>
7
+ #include <c10/macros/Export.h>
8
+ #include <torch/csrc/jit/frontend/source_range.h>
9
+
10
+ namespace torch {
11
+ namespace jit {
12
+
13
+ /**
14
+ * SourceRef does two things:
15
+ * 1. Owns a Source object.
16
+ * 2. Serves as lookup key to the owned Source in associative containers, for
17
+ * runtime data aggregation.
18
+ * We don't want to use std::shared_ptr<Source> directly because we want to
19
+ * support heteogeneous lookup, and also shared_ptr is an implementation detail
20
+ * which should be encapsulated.
21
+ */
22
+ class TORCH_API SourceRef : public CustomClassHolder {
23
+ public:
24
+ explicit SourceRef(std::shared_ptr<Source> source_view)
25
+ : source_view_(std::move(source_view)) {}
26
+ bool operator==(const SourceRef& other) const {
27
+ return source_view_ == other.source_view_;
28
+ }
29
+ bool operator<(const Source& other) const {
30
+ return source_view_.get() < &other;
31
+ }
32
+ friend bool operator<(const Source& other, const SourceRef& self) {
33
+ return &other < self.source_view_.get();
34
+ }
35
+ bool operator<(const SourceRef& other) const {
36
+ return *this < *other.source_view_.get();
37
+ }
38
+ const Source* operator->() const {
39
+ return source_view_.get();
40
+ }
41
+
42
+ private:
43
+ std::shared_ptr<Source> source_view_;
44
+ };
45
+
46
+ } // namespace jit
47
+ } // namespace torch
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/strtod.h ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/macros/Macros.h>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+
8
+ TORCH_API double strtod_c(const char* nptr, char** endptr);
9
+ TORCH_API float strtof_c(const char* nptr, char** endptr);
10
+
11
+ } // namespace jit
12
+ } // namespace torch
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/sugared_value.h ADDED
@@ -0,0 +1,857 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <c10/util/Optional.h>
3
+ #include <functional>
4
+ #include <memory>
5
+ #include <string>
6
+ #include <utility>
7
+
8
+ #include <ATen/core/symbol.h>
9
+ #include <caffe2/serialize/versions.h>
10
+ #include <torch/csrc/jit/api/module.h>
11
+ #include <torch/csrc/jit/frontend/error_report.h>
12
+ #include <torch/csrc/jit/frontend/schema_matching.h>
13
+ #include <torch/csrc/jit/frontend/versioned_symbols.h>
14
+ #include <torch/csrc/jit/ir/ir.h>
15
+
16
+ namespace torch {
17
+ namespace jit {
18
+
19
+ using SugaredValuePtr = std::shared_ptr<SugaredValue>;
20
+
21
+ // The AST can contain nodes like `self`, `self.b` or `python_fn` that
22
+ // are not first-class values in the graph representation, but instead
23
+ // will be desugared based on how they are used in the AST.
24
+
25
+ // SugaredValue is used to temporarily represent these values in a way
26
+ // that separates their behavior from the AST -> IR converter itself.
27
+ // This allows us to keep dependencies on python minimal.
28
+
29
+ struct TORCH_API SugaredValue
30
+ : public std::enable_shared_from_this<SugaredValue> {
31
+ // what is this node? for error reporting (e.g. Module, python function)
32
+ virtual std::string kind() const = 0;
33
+
34
+ // what can we do with this thing?
35
+ // use it as a value e.g. `this + 4`
36
+ virtual Value* asValue(const SourceRange& loc, GraphFunction& m) {
37
+ throw ErrorReport(loc) << kind() << " cannot be used as a value";
38
+ }
39
+
40
+ // select an attribute on it, e.g. `this.field`
41
+ virtual std::shared_ptr<SugaredValue> attr(
42
+ const SourceRange& loc,
43
+ GraphFunction& m,
44
+ const std::string& field) {
45
+ throw ErrorReport(loc) << "attribute lookup is not defined on " << kind();
46
+ }
47
+
48
+ virtual bool hasAttr(
49
+ const SourceRange& loc,
50
+ GraphFunction& m,
51
+ const std::string& field) {
52
+ throw ErrorReport(loc) << "attribute lookup is not defined on " << kind();
53
+ }
54
+
55
+ // assign an attribute on it, e.g. `this.field = newValue`
56
+ virtual void setAttr(
57
+ const SourceRange& loc,
58
+ GraphFunction& m,
59
+ const std::string& field,
60
+ Value* newValue) {
61
+ throw ErrorReport(loc) << "attribute assignment is not defined on "
62
+ << kind();
63
+ }
64
+
65
+ // use it as a vector of values, e.g. a tuple of values as return value from
66
+ // a method invocation
67
+ virtual std::vector<std::shared_ptr<SugaredValue>> asTuple(
68
+ const SourceRange& loc,
69
+ GraphFunction& m,
70
+ const c10::optional<size_t>& size_hint = {}) {
71
+ throw ErrorReport(loc) << kind() << " cannot be used as a tuple";
72
+ }
73
+
74
+ // TODO @wconstab refactor to use ModuleValue::asTuple instead of new API
75
+ virtual SugaredValuePtr asTupleValue(
76
+ const SourceRange& loc,
77
+ GraphFunction& m) {
78
+ throw ErrorReport(loc) << kind() << " cannot be used as a tuplevalue";
79
+ }
80
+
81
+ virtual std::vector<std::shared_ptr<SugaredValue>> asType(
82
+ const SourceRange& loc,
83
+ Method& m) {
84
+ throw ErrorReport(loc) << kind() << " cannot be used as a type";
85
+ }
86
+
87
+ // call it like a function, e.g. `outputs = this(inputs)`
88
+ virtual std::shared_ptr<SugaredValue> call(
89
+ const SourceRange& loc,
90
+ GraphFunction& m,
91
+ // note: names for args will be 'argument 0', 'argument 1', etc..
92
+ at::ArrayRef<NamedValue> args,
93
+ at::ArrayRef<NamedValue> kwargs,
94
+ size_t n_binders) {
95
+ // n_binders is always set to the number of variables an expression is
96
+ // syntactically bound to:
97
+ // a = foo() # 1 binder (note in this case the single binder might be a
98
+ // tuple) a, * b = foo() # 1 binder a, b = foo() # 2 binders foo() # 0
99
+ // binders
100
+ //
101
+ // In subexpressions, like bar() in foo(bar()), n_binders is always set to
102
+ // 1. n_binders is used as a hint to subexpressions to determine how many
103
+ // values they should return when that number is ambiguous statically. In
104
+ // particular it is currently used to decide how many tensors a call to a
105
+ // python function will return. It is only a hint, functions do not have to
106
+ // check that n_binders match the number of things they are returning, the
107
+ // assignment logic will do that anyway.
108
+
109
+ throw ErrorReport(loc) << "cannot call a " << kind();
110
+ }
111
+
112
+ // This function is called when to convert a SugaredValue to its iterator.
113
+ // For example, when iterating through a Dict we iterate over its keys
114
+ virtual std::shared_ptr<SugaredValue> iter(
115
+ const SourceRange& loc,
116
+ GraphFunction& m) {
117
+ throw ErrorReport(loc) << kind() << " cannot be used as an iterable";
118
+ }
119
+
120
+ // If we are iterating over a Sugared Value and it returns a value from this
121
+ // function, then we emit an unrolled loop over the variable. This allows us
122
+ // to support containers of Heterogenous types, like Module Containers &
123
+ // Tuples
124
+ virtual c10::optional<int64_t> staticLen() {
125
+ return c10::nullopt;
126
+ }
127
+
128
+ // When iterating over this SugaredValue, should we emit the for loop as an
129
+ // unrolled loop.
130
+ bool shouldEmitUnrolled() {
131
+ return staticLen() != c10::nullopt;
132
+ }
133
+
134
+ // return length of this thing, if not then it can't be iterated.
135
+ // If it does not have a statically-determinable length, then it cannot
136
+ // be iterated over with a modulelist. If it does it must return a constant
137
+ // Value *
138
+ virtual Value* len(const SourceRange& loc, GraphFunction& m) {
139
+ throw ErrorReport(loc) << "'" << kind() << "'"
140
+ << " object is not iterable";
141
+ }
142
+
143
+ // expression for ith elemement for iterable value
144
+ virtual std::shared_ptr<SugaredValue> getitem(
145
+ const SourceRange& loc,
146
+ GraphFunction& m,
147
+ Value* idx,
148
+ TypePtr type_hint = nullptr) {
149
+ throw ErrorReport(loc) << "'" << kind() << "'"
150
+ << " object is not subscriptable";
151
+ }
152
+
153
+ virtual ~SugaredValue() = default;
154
+ };
155
+
156
+ // most things in the environment are just simple value types
157
+ // and not special python syntax sugar types
158
+ struct TORCH_API SimpleValue : public SugaredValue {
159
+ SimpleValue(Value* value) : value_(value) {}
160
+ std::string kind() const override {
161
+ std::stringstream ss;
162
+ // NOLINTNEXTLINE(clang-analyzer-core.CallAndMessage)
163
+ ss << "value of type '" << value_->type()->annotation_str() << "'";
164
+ return ss.str();
165
+ }
166
+ Value* asValue(const SourceRange& range, GraphFunction& m) override {
167
+ return value_;
168
+ }
169
+ std::vector<std::shared_ptr<SugaredValue>> asTuple(
170
+ const SourceRange& loc,
171
+ GraphFunction& m,
172
+ const c10::optional<size_t>& size_hint = {}) override;
173
+ std::shared_ptr<SugaredValue> attr(
174
+ const SourceRange& loc,
175
+ GraphFunction& m,
176
+ const std::string& field) override;
177
+
178
+ bool hasAttr(
179
+ const SourceRange& loc,
180
+ GraphFunction& m,
181
+ const std::string& field) override;
182
+
183
+ void setAttr(
184
+ const SourceRange& loc,
185
+ GraphFunction& m,
186
+ const std::string& field,
187
+ Value* newValue) override;
188
+
189
+ std::shared_ptr<SugaredValue> call(
190
+ const SourceRange& loc,
191
+ GraphFunction& m,
192
+ // note: names for args will be 'argument 0', 'argument 1', etc..
193
+ at::ArrayRef<NamedValue> args,
194
+ at::ArrayRef<NamedValue> kwargs,
195
+ size_t n_binders) override;
196
+
197
+ std::shared_ptr<SugaredValue> iter(const SourceRange& loc, GraphFunction& m)
198
+ override;
199
+
200
+ Value* getValue() const {
201
+ return value_;
202
+ }
203
+
204
+ Value* len(const SourceRange& loc, GraphFunction& m) override;
205
+ SugaredValuePtr getitem(
206
+ const SourceRange& loc,
207
+ GraphFunction& m,
208
+ Value* idx,
209
+ TypePtr type_hint = nullptr) override;
210
+
211
+ private:
212
+ Value* value_;
213
+ };
214
+
215
+ struct TORCH_API BuiltinFunction : public SugaredValue {
216
+ BuiltinFunction(Symbol symbol, c10::optional<NamedValue> self)
217
+ : symbol(symbol), self(std::move(self)) {}
218
+
219
+ // The symbol of the function (e.g. `aten::relu`).
220
+ Symbol symbol;
221
+
222
+ // if this is method, then this is the self argument.
223
+ c10::optional<NamedValue> self;
224
+ std::string kind() const override {
225
+ return "builtin";
226
+ }
227
+ std::shared_ptr<SugaredValue> call(
228
+ const SourceRange& loc,
229
+ GraphFunction& m,
230
+ at::ArrayRef<NamedValue> args,
231
+ at::ArrayRef<NamedValue> kwargs,
232
+ size_t n_binders) override;
233
+
234
+ // try to create this builtin but if it doesn't exist or the self argument
235
+ // cannot possibly match, then return nullptr. Use in situations where it is
236
+ // not clear if it is a valid builtin
237
+ static std::shared_ptr<BuiltinFunction> tryCreate(
238
+ Symbol symbol,
239
+ c10::optional<NamedValue> self);
240
+ };
241
+
242
+ struct TORCH_API SugaredTupleValue : public SugaredValue {
243
+ explicit SugaredTupleValue(std::vector<std::shared_ptr<SugaredValue>> tup)
244
+ : tup_(std::move(tup)){};
245
+
246
+ std::vector<std::shared_ptr<SugaredValue>> asTuple(
247
+ const SourceRange& loc,
248
+ GraphFunction& m,
249
+ const c10::optional<size_t>& size_hint = {}) override {
250
+ return tup_;
251
+ };
252
+
253
+ Value* asValue(const SourceRange& loc, GraphFunction& m) override {
254
+ std::vector<Value*> vec;
255
+ vec.reserve(tup_.size());
256
+ for (const auto& sv : tup_) {
257
+ vec.push_back(sv->asValue(loc, m));
258
+ }
259
+ Graph& g = *m.graph();
260
+ return g.insertNode(g.createTuple(vec))->output();
261
+ }
262
+
263
+ std::string kind() const override {
264
+ return "Tuple";
265
+ }
266
+
267
+ SugaredValuePtr getitem(
268
+ const SourceRange& loc,
269
+ GraphFunction& m,
270
+ Value* idx,
271
+ TypePtr type_hint = nullptr) override {
272
+ if (!(idx->type()->cast<IntType>() && toIValue(idx))) {
273
+ throw ErrorReport(loc)
274
+ << "Expected integer literal for index but got a variable or non-integer. "
275
+ << "ModuleList/Sequential indexing is only supported with integer literals. "
276
+ << "For example, 'i = 4; self.layers[i](x)' will fail because i is not a literal. "
277
+ << "Enumeration is supported, e.g. 'for index, v in enumerate(self): out = v(inp)'";
278
+ }
279
+ auto index = toIValue(idx)->toInt();
280
+ int64_t adj_index =
281
+ (index < 0) ? index + static_cast<int64_t>(tup_.size()) : index;
282
+ if (!(adj_index >= 0 && adj_index < static_cast<int64_t>(tup_.size()))) {
283
+ throw ErrorReport(loc)
284
+ << "Index " << index << " out of range of length " << tup_.size();
285
+ }
286
+ return tup_.at(adj_index);
287
+ }
288
+
289
+ // This function is called when a SugaredValue is used to convert a
290
+ // SugaredValue to its iterator. For example, when iterating through a Dict we
291
+ // iterate over its keys
292
+ std::shared_ptr<SugaredValue> iter(const SourceRange& loc, GraphFunction& m)
293
+ override {
294
+ return shared_from_this();
295
+ };
296
+
297
+ // Because this is used to contain SugaredValues of Heterogenous types,
298
+ // we define staticLen() so that when this is iterated over it is emitted
299
+ // as an unrolled loop.
300
+ c10::optional<int64_t> staticLen() override {
301
+ return static_cast<int64_t>(tup_.size());
302
+ }
303
+
304
+ std::vector<std::shared_ptr<SugaredValue>> tup_;
305
+ };
306
+
307
+ struct TORCH_API BuiltinModule : public SugaredValue {
308
+ BuiltinModule(std::string name, c10::optional<int64_t> version = at::nullopt)
309
+ : name(std::move(name)), version(version) {}
310
+
311
+ std::string kind() const override {
312
+ return "builtin module";
313
+ }
314
+ std::shared_ptr<SugaredValue> attr(
315
+ const SourceRange& loc,
316
+ GraphFunction& m,
317
+ const std::string& field) override {
318
+ if (field == "autograd") {
319
+ // When refering torch.autograd, it is also considered to be a
320
+ // BuiltinModule and we will dispatch to the aten operators for the
321
+ // methods under its module.
322
+ return std::make_shared<BuiltinModule>("aten", version);
323
+ }
324
+
325
+ auto sym = Symbol::fromQualString(name + "::" + field);
326
+ return std::make_shared<BuiltinFunction>(sym, c10::nullopt);
327
+ }
328
+
329
+ private:
330
+ std::string name;
331
+ // when we add operator versioning, emit this op as it exising at 'version'
332
+ // if not set, use the latest version
333
+ c10::optional<int64_t> version;
334
+ };
335
+
336
+ // Represents a class, analagous to `int` or `dict`. Instances of classes,
337
+ // like `1` or `{"foo": 5}`, are represented as SimpleValues
338
+ struct TORCH_API ClassValue : public SugaredValue {
339
+ explicit ClassValue(ClassTypePtr type) : type_(std::move(type)) {}
340
+
341
+ // Call the type's constructor, as in:
342
+ // n = Foo(constructor_arg)
343
+ std::shared_ptr<SugaredValue> call(
344
+ const SourceRange& loc,
345
+ GraphFunction& m,
346
+ at::ArrayRef<NamedValue> args,
347
+ at::ArrayRef<NamedValue> kwargs,
348
+ size_t n_binders) override;
349
+
350
+ std::shared_ptr<SugaredValue> attr(
351
+ const SourceRange& loc,
352
+ GraphFunction& m,
353
+ const std::string& field) override;
354
+
355
+ std::string kind() const override {
356
+ return type_->str();
357
+ }
358
+
359
+ ClassTypePtr type_;
360
+ };
361
+
362
+ struct TORCH_API NamedTupleConstructor : public SugaredValue {
363
+ explicit NamedTupleConstructor(TupleTypePtr type) : type_(std::move(type)) {}
364
+
365
+ std::shared_ptr<SugaredValue> call(
366
+ const SourceRange& loc,
367
+ GraphFunction& m,
368
+ at::ArrayRef<NamedValue> args,
369
+ at::ArrayRef<NamedValue> kwargs,
370
+ size_t n_binders) override;
371
+
372
+ std::string kind() const override {
373
+ return type_->str();
374
+ }
375
+
376
+ TupleTypePtr type_;
377
+ };
378
+
379
+ struct FunctionValue : public SugaredValue {
380
+ FunctionValue(Function* callee) : callees_({callee}) {}
381
+ FunctionValue(const StrongFunctionPtr& p)
382
+ : callees_({p.function_}), cu_(p.cu_) {}
383
+ FunctionValue(const std::vector<StrongFunctionPtr>& callees) {
384
+ for (const StrongFunctionPtr& callee : callees) {
385
+ cu_ = cu_ ? cu_ : callee.cu_;
386
+ TORCH_INTERNAL_ASSERT(callee.cu_ == cu_);
387
+ callees_.push_back(callee.function_);
388
+ }
389
+ }
390
+
391
+ std::string kind() const override {
392
+ return "function";
393
+ }
394
+
395
+ std::shared_ptr<SugaredValue> call(
396
+ const SourceRange& loc,
397
+ GraphFunction& f,
398
+ at::ArrayRef<NamedValue> args,
399
+ at::ArrayRef<NamedValue> kwargs,
400
+ size_t n_binders) override {
401
+ std::vector<const FunctionSchema*> schemas;
402
+ for (Function* callee : callees_) {
403
+ try {
404
+ callee->ensure_defined();
405
+ } catch (const RecursiveMethodCallError&) {
406
+ throw ErrorReport(loc)
407
+ << " function '" << callee->name() << "' is called recursively. "
408
+ << "Recursive calls are not supported";
409
+ }
410
+ schemas.push_back(&callee->getSchema());
411
+ }
412
+ auto match = matchSchemas(schemas, loc, *f.graph(), args, kwargs);
413
+ Value* output =
414
+ f.graph()->insertFunctionCall(callees_[match.first], match.second);
415
+ output->node()->setSourceRange(loc);
416
+ return std::make_shared<SimpleValue>(output);
417
+ }
418
+
419
+ const std::vector<Function*>& callees() {
420
+ return callees_;
421
+ }
422
+
423
+ private:
424
+ std::vector<Function*> callees_;
425
+ // TODO holding this thing is creepy
426
+ std::shared_ptr<CompilationUnit> cu_;
427
+ };
428
+
429
+ struct TORCH_API ClosureValue : public SugaredValue {
430
+ ClosureValue(Value* value) : value_(value) {
431
+ TORCH_INTERNAL_ASSERT(value_->node()->kind() == prim::Closure);
432
+ }
433
+ std::string kind() const override {
434
+ return "closure";
435
+ }
436
+ Value* asValue(const SourceRange& range, GraphFunction& m) override {
437
+ return value_;
438
+ }
439
+ Value* value_;
440
+ };
441
+
442
+ // defines how a method obtained from a module/class/interface behaves in script
443
+ struct MethodValue : public SugaredValue {
444
+ MethodValue(Value* self, std::vector<std::string> method_names)
445
+ : self_(self), method_names_(std::move(method_names)) {}
446
+ MethodValue(Value* self, std::string method_name)
447
+ : MethodValue(self, std::vector<std::string>({std::move(method_name)})) {}
448
+
449
+ std::string kind() const override {
450
+ return "method";
451
+ }
452
+
453
+ std::shared_ptr<SugaredValue> call(
454
+ const SourceRange& loc,
455
+ GraphFunction& f,
456
+ at::ArrayRef<NamedValue> args,
457
+ at::ArrayRef<NamedValue> kwargs,
458
+ size_t n_binders) override {
459
+ std::vector<NamedValue> argsWithSelf = {self_};
460
+ argsWithSelf.insert(argsWithSelf.end(), args.begin(), args.end());
461
+ std::vector<const FunctionSchema*> schemas;
462
+ for (const std::string& method_name : method_names_) {
463
+ if (auto class_type = self_->type()->cast<ClassType>()) {
464
+ Function& method = class_type->getMethod(method_name);
465
+ try {
466
+ method.ensure_defined();
467
+ } catch (const RecursiveMethodCallError&) {
468
+ throw ErrorReport(loc)
469
+ << " method '" << method.name() << "' is called recursively. "
470
+ << "Recursive calls are not supported";
471
+ }
472
+ schemas.push_back(&method.getSchema());
473
+ } else if (auto interface_type = self_->type()->cast<InterfaceType>()) {
474
+ schemas.push_back(interface_type->getMethod(method_name));
475
+ } else {
476
+ TORCH_INTERNAL_ASSERT(
477
+ false, "method constructed that is not a class or interface");
478
+ }
479
+ }
480
+ auto match = matchSchemas(schemas, loc, *f.graph(), argsWithSelf, kwargs);
481
+ Value* output =
482
+ f.graph()->insertMethodCall(method_names_[match.first], match.second);
483
+ output->node()->setSourceRange(loc);
484
+ return std::make_shared<SimpleValue>(output);
485
+ }
486
+
487
+ private:
488
+ Value* self_;
489
+ std::vector<std::string> method_names_;
490
+ };
491
+
492
+ struct TORCH_API PrintValue : public SugaredValue {
493
+ std::string kind() const override {
494
+ return "print";
495
+ }
496
+ std::shared_ptr<SugaredValue> call(
497
+ const SourceRange& loc,
498
+ GraphFunction& m,
499
+ at::ArrayRef<NamedValue> args,
500
+ at::ArrayRef<NamedValue> kwargs,
501
+ size_t n_binders) override;
502
+ };
503
+
504
+ // expressions like int(x)
505
+ // these are the same as call prim::Int or equivalent except it
506
+ // is a noop when the input is a subtype of 'type'
507
+ struct TORCH_API CastValue : public BuiltinFunction {
508
+ CastValue(TypePtr type, c10::Symbol method)
509
+ : BuiltinFunction(method, c10::nullopt), type_(std::move(type)) {}
510
+ std::shared_ptr<SugaredValue> call(
511
+ const SourceRange& loc,
512
+ GraphFunction& m,
513
+ at::ArrayRef<NamedValue> args,
514
+ at::ArrayRef<NamedValue> kwargs,
515
+ size_t n_binders) override {
516
+ if (args.size() == 1 && kwargs.empty()) {
517
+ auto len_op = std::make_shared<BuiltinFunction>(aten::len, at::nullopt);
518
+ auto gt_op = std::make_shared<BuiltinFunction>(aten::gt, at::nullopt);
519
+ auto zero = m.graph()->insertConstant(0);
520
+
521
+ auto v = args[0].value(*m.graph());
522
+ if (v->type()->isSubtypeOf(*type_)) {
523
+ return std::make_shared<SimpleValue>(v);
524
+ } else if (
525
+ *type_ == *BoolType::get() &&
526
+ (v->type()->isSubtypeOf(*AnyListType::get()) ||
527
+ v->type()->isSubtypeOf(*StringType::get()) ||
528
+ v->type()->cast<DictType>())) {
529
+ auto len = len_op->call(loc, m, {v}, {}, 1);
530
+ return gt_op->call(loc, m, {len->asValue(loc, m), zero}, {}, 1);
531
+ }
532
+ }
533
+ return BuiltinFunction::call(loc, m, args, kwargs, n_binders);
534
+ }
535
+
536
+ private:
537
+ TypePtr type_;
538
+ };
539
+
540
+ struct TORCH_API TensorCastValue : public SugaredValue {
541
+ TensorCastValue(at::ScalarType type, NamedValue self)
542
+ : dtype_(type), self_(std::move(self)) {}
543
+
544
+ std::string kind() const override {
545
+ return "Cast";
546
+ }
547
+
548
+ std::shared_ptr<SugaredValue> call(
549
+ const SourceRange& loc,
550
+ GraphFunction& m,
551
+ at::ArrayRef<NamedValue> args,
552
+ at::ArrayRef<NamedValue> kwargs,
553
+ size_t n_binders) override {
554
+ TORCH_INTERNAL_ASSERT(args.empty() && kwargs.empty());
555
+ Value* dtype_const = m.graph()->insertConstant(dtype_, loc);
556
+ std::vector<NamedValue> kwargs_{
557
+ self_, NamedValue(loc, "dtype", dtype_const)};
558
+ Value* casted_val = m.graph()->insert(
559
+ /*opname=*/Symbol::fromQualString("aten::to"),
560
+ /*args=*/args,
561
+ /*kwargs=*/kwargs_,
562
+ /*range=*/loc);
563
+ return std::make_shared<SimpleValue>(casted_val);
564
+ }
565
+
566
+ at::ScalarType dtype_;
567
+ NamedValue self_;
568
+ };
569
+
570
+ // builtins operators and functions that call a method if it exists
571
+ // on a class type, like 'len(x)' and 'x + y'
572
+ struct TORCH_API MagicMethod : public SugaredValue {
573
+ MagicMethod(std::string desugared_name, SugaredValuePtr base)
574
+ : base_value_(std::move(base)),
575
+ desugared_name_(std::move(desugared_name)) {}
576
+
577
+ std::string kind() const override {
578
+ return desugared_name_;
579
+ }
580
+
581
+ std::shared_ptr<SugaredValue> call(
582
+ const SourceRange& loc,
583
+ GraphFunction& m,
584
+ at::ArrayRef<NamedValue> args,
585
+ at::ArrayRef<NamedValue> kwargs,
586
+ size_t n_binders) override;
587
+
588
+ private:
589
+ SugaredValuePtr base_value_;
590
+ std::string desugared_name_;
591
+ };
592
+
593
+ // things that look like function applications, but
594
+ // perform non-standard evaluation are represented
595
+ // with SpecialFormValues, e.g.
596
+ // isinstance(x, int)
597
+ // fork(fn)
598
+ // annotate(int, 3)
599
+ // The implementation of each value is handled by a case inside emitApplyExpr
600
+ struct TORCH_API SpecialFormValue : public SugaredValue {
601
+ SpecialFormValue(Symbol form) : form_(form) {}
602
+ std::string kind() const override {
603
+ return form_.toUnqualString();
604
+ }
605
+ Symbol form() const {
606
+ return form_;
607
+ }
608
+ static std::shared_ptr<SpecialFormValue> create(Symbol form) {
609
+ return std::make_shared<SpecialFormValue>(form);
610
+ }
611
+
612
+ private:
613
+ Symbol form_;
614
+ };
615
+
616
+ struct TORCH_API LegacyTensorConstructor : public SpecialFormValue {
617
+ LegacyTensorConstructor(Symbol form, at::ScalarType dtype, at::Device device)
618
+ : SpecialFormValue(form), device_(device), dtype_(dtype) {}
619
+
620
+ static std::shared_ptr<LegacyTensorConstructor> create(
621
+ Symbol form,
622
+ at::ScalarType dtype,
623
+ at::Device device) {
624
+ return std::make_shared<LegacyTensorConstructor>(form, dtype, device);
625
+ }
626
+ at::ScalarType dtype() const {
627
+ return dtype_;
628
+ }
629
+
630
+ private:
631
+ at::Device device_;
632
+ at::ScalarType dtype_;
633
+ };
634
+
635
+ // matched against for special handling of range expressions
636
+ struct TORCH_API RangeValue : SugaredValue {
637
+ RangeValue(
638
+ const SourceRange& loc,
639
+ GraphFunction& m,
640
+ std::vector<Value*> input,
641
+ c10::optional<int64_t> static_len = c10::nullopt);
642
+
643
+ std::string kind() const override {
644
+ return "range";
645
+ }
646
+ Value* len(const SourceRange& loc, GraphFunction& m) override;
647
+ SugaredValuePtr getitem(
648
+ const SourceRange& loc,
649
+ GraphFunction& m,
650
+ Value* idx,
651
+ TypePtr type_hint = nullptr) override;
652
+ std::shared_ptr<SugaredValue> iter(const SourceRange& loc, GraphFunction& m)
653
+ override;
654
+
655
+ // When Range is instantiated via enumerate(iterable_with_static_len),
656
+ // then it takes the static length of the iterable
657
+ c10::optional<int64_t> staticLen() override {
658
+ return static_len_;
659
+ }
660
+
661
+ private:
662
+ Value* start_{};
663
+ Value* end_{};
664
+ Value* step_{};
665
+ // a flag to determine if it's a simple range() call with only end_ from
666
+ // arguments If true, we will not insert length calculation and index
667
+ // derivation nodes to simplify the graph and enable more possible
668
+ // optimizations
669
+ bool has_only_end_{};
670
+ c10::optional<int64_t> static_len_;
671
+ };
672
+
673
+ // Specialized Tree structure to matched against for special handling
674
+ // of builtin functions iterables expressions like zip(), enumerate(), etc.
675
+ // zip and enumerate can be modeled as a tree of SimpleValue/RangeValue:
676
+ // zip(x, y) -> (x, y) with tuple assignment to each loop target
677
+ // enumerate(x) -> (range(0, math.inf, 1), x)
678
+ // So a complicated expression like zip(a, enumerate(b), range(0, 100)) will be:
679
+ // (a, (range(0, math.inf, 1), b), range(0, 100))
680
+ // We use those base iterables to fill in the loop information like
681
+ // max_trip_count and set the value table for loop targets
682
+ // Iterables can contain lists of SugaredValues like ModuleLists. If it
683
+ // does, then we emit it unrolled and require that all values it contains
684
+ // have a statically-determinable length.
685
+ struct TORCH_API IterableTree : SugaredValue {
686
+ IterableTree() = default;
687
+ IterableTree(
688
+ const SourceRange& range,
689
+ GraphFunction& m,
690
+ at::ArrayRef<SugaredValuePtr> children) {
691
+ for (const auto& child : children) {
692
+ addChild(range, m, child);
693
+ }
694
+ }
695
+ std::string kind() const override {
696
+ return "iterabletree";
697
+ }
698
+
699
+ std::shared_ptr<SugaredValue> iter(const SourceRange& loc, GraphFunction& m)
700
+ override {
701
+ return shared_from_this();
702
+ }
703
+
704
+ void addChild(
705
+ const SourceRange& range,
706
+ GraphFunction& m,
707
+ const SugaredValuePtr& iter_value);
708
+
709
+ std::vector<SugaredValuePtr> get_children() {
710
+ return children_;
711
+ }
712
+
713
+ // If this iterable contains a ModuleList or Tuple, then it will have a
714
+ // static length, and we will emit it as an unrolled for loop.
715
+ c10::optional<int64_t> staticLen() override {
716
+ return unroll_length_;
717
+ }
718
+
719
+ // given a IterableTree node, get all the base iterables/leaves under the
720
+ // IterableTree node. This enables
721
+ // us to get all the basic SugaredValues that contains valid loop information
722
+ // with len() and getitem()
723
+ std::vector<SugaredValuePtr> get_base_iterables();
724
+
725
+ Value* len(const SourceRange& loc, GraphFunction& m) override;
726
+ SugaredValuePtr getitem(
727
+ const SourceRange& loc,
728
+ GraphFunction& m,
729
+ Value* idx,
730
+ TypePtr type_hint = nullptr) override;
731
+
732
+ private:
733
+ c10::optional<int64_t> unroll_length_ = c10::nullopt;
734
+ std::vector<SugaredValuePtr> children_;
735
+ };
736
+
737
+ static inline std::vector<Value*> toValues(
738
+ Graph& g,
739
+ at::ArrayRef<NamedValue> nvs) {
740
+ return fmap(nvs, [&](const NamedValue& v) { return v.value(g); });
741
+ }
742
+
743
+ struct SimpleSelf : public Self {
744
+ explicit SimpleSelf(ClassTypePtr classType)
745
+ : Self(), classType_(std::move(classType)) {}
746
+ std::shared_ptr<SugaredValue> makeSugared(Value* v) const override {
747
+ v->setType(classType_);
748
+ return std::make_shared<SimpleValue>(v);
749
+ }
750
+ ClassTypePtr getClassType() const override {
751
+ return classType_;
752
+ }
753
+
754
+ private:
755
+ ClassTypePtr classType_;
756
+ };
757
+
758
+ // This is not a SimpleValue so it can not pass through the code paths that
759
+ // expect a SimpleValue as a sugared value.
760
+ struct TORCH_API ExceptionMessageValue : public SugaredValue {
761
+ explicit ExceptionMessageValue(
762
+ Value* value,
763
+ Value* qualified_class_name = nullptr)
764
+ : value_(value), qualified_class_name_(qualified_class_name) {}
765
+
766
+ std::string kind() const override {
767
+ return "exception message";
768
+ }
769
+
770
+ Value* getValue() {
771
+ return value_;
772
+ }
773
+
774
+ // qualified python class name
775
+ Value* getQualifiedClassName() {
776
+ return qualified_class_name_;
777
+ }
778
+
779
+ private:
780
+ Value* value_;
781
+ Value* qualified_class_name_;
782
+ };
783
+
784
+ struct TORCH_API ExceptionValue : public SugaredValue {
785
+ explicit ExceptionValue(std::string message) : message_(std::move(message)) {}
786
+
787
+ std::string kind() const override {
788
+ return "exception";
789
+ }
790
+
791
+ std::shared_ptr<SugaredValue> call(
792
+ const SourceRange& loc,
793
+ GraphFunction& m,
794
+ at::ArrayRef<NamedValue> args,
795
+ at::ArrayRef<NamedValue> /*attributes*/,
796
+ size_t /*n_binders*/) override {
797
+ auto exception_message = insertConstant(*m.graph(), message_ + ": ", loc);
798
+ for (auto& input : args) {
799
+ auto input_str = input.value(*m.graph());
800
+ if (!input_str->type()->isSubtypeOf(*StringType::get())) {
801
+ input_str =
802
+ emitBuiltinCall(loc, *m.graph(), aten::str, {input_str}, {});
803
+ }
804
+ exception_message = emitBuiltinCall(
805
+ loc, *m.graph(), aten::add, {exception_message, input_str}, {});
806
+ }
807
+ return std::make_shared<ExceptionMessageValue>(exception_message);
808
+ }
809
+
810
+ std::string message_;
811
+ };
812
+
813
+ struct TORCH_API SugaredEnumClass : public SugaredValue {
814
+ explicit SugaredEnumClass(EnumTypePtr enum_type)
815
+ : enum_type_(std::move(enum_type)) {}
816
+
817
+ std::string kind() const override {
818
+ return "EnumClass";
819
+ }
820
+
821
+ SugaredValuePtr attr(
822
+ const SourceRange& loc,
823
+ GraphFunction& m,
824
+ const std::string& field) override;
825
+
826
+ SugaredValuePtr iter(const SourceRange& loc, GraphFunction& m) override;
827
+
828
+ private:
829
+ EnumTypePtr enum_type_;
830
+ };
831
+
832
+ struct TORCH_API SliceValue : public SugaredValue {
833
+ explicit SliceValue(Value* start, Value* stop, Value* step)
834
+ : start_(start), stop_(stop), step_(step) {}
835
+
836
+ std::string kind() const override {
837
+ return "Python slice value";
838
+ }
839
+
840
+ Value* start() {
841
+ return start_;
842
+ };
843
+ Value* stop() {
844
+ return stop_;
845
+ };
846
+ Value* step() {
847
+ return step_;
848
+ };
849
+
850
+ private:
851
+ Value* start_;
852
+ Value* stop_;
853
+ Value* step_;
854
+ };
855
+
856
+ } // namespace jit
857
+ } // namespace torch
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/tracer.h ADDED
@@ -0,0 +1,414 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/Dimname.h>
4
+ #include <ATen/core/class_type.h>
5
+ #include <ATen/core/jit_type.h>
6
+ #include <ATen/core/stack.h>
7
+ #include <ATen/core/symbol.h>
8
+ #include <c10/util/Exception.h>
9
+ #include <torch/csrc/Export.h>
10
+
11
+ #include <torch/csrc/jit/frontend/source_range.h>
12
+ #include <torch/csrc/utils/variadic.h>
13
+
14
+ #include <cstdint>
15
+ #include <memory>
16
+ #include <mutex>
17
+ #include <unordered_map>
18
+ #include <vector>
19
+
20
+ namespace torch {
21
+ namespace jit {
22
+ struct Node;
23
+ struct Value;
24
+ struct Graph;
25
+ struct Module;
26
+
27
+ namespace tracer {
28
+
29
+ using ::c10::ivalue::Shared;
30
+
31
+ using ::c10::IValue;
32
+ using ::c10::ivalue::Future;
33
+
34
+ using ::c10::ArrayRef;
35
+ using ::c10::TupleType;
36
+ using ::c10::TupleTypePtr;
37
+ using ::c10::ivalue::ConstantString;
38
+
39
+ using torch::autograd::Variable;
40
+ using variable_list = std::vector<Variable>;
41
+
42
+ TORCH_API std::atomic<bool>& getTracerStateWarnMode();
43
+
44
+ struct TORCH_API TracingState
45
+ : public std::enable_shared_from_this<TracingState> {
46
+ TracingState();
47
+ ~TracingState();
48
+
49
+ // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
50
+ std::shared_ptr<Graph> graph;
51
+ // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
52
+ bool warn = getTracerStateWarnMode();
53
+ // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
54
+ bool strict = true;
55
+ // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
56
+ bool force_outplace = false;
57
+ // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
58
+ std::function<std::string(const Variable& var)> lookup_var_name_fn =
59
+ [](const Variable& var) { return ""; };
60
+
61
+ void enterFrame() {
62
+ env_stack.emplace_back();
63
+ }
64
+
65
+ void leaveFrame() {
66
+ env_stack.pop_back();
67
+ }
68
+
69
+ void setValue(const IValue& v, Value* value);
70
+ void delValue(const IValue& var);
71
+ Value* getValue(const IValue& var);
72
+ Value* getOutput(const IValue& var, size_t i);
73
+ bool hasValue(const IValue& var) const;
74
+
75
+ Node* createNode(c10::Symbol op_name, size_t num_outputs);
76
+ void insertNode(Node* node);
77
+
78
+ private:
79
+ using WeakIValue = at::WeakIValue;
80
+
81
+ struct WeakIValueHasher {
82
+ size_t operator()(const WeakIValue& t) const {
83
+ return t.hash();
84
+ }
85
+ };
86
+
87
+ struct WeakIValueEq {
88
+ bool operator()(const WeakIValue& t1, const WeakIValue& t2) const {
89
+ return t1.isSameIdentity(t2);
90
+ }
91
+ };
92
+
93
+ using Frame =
94
+ std::unordered_map<WeakIValue, Value*, WeakIValueHasher, WeakIValueEq>;
95
+ std::vector<Frame> env_stack;
96
+ };
97
+
98
+ // This is meant to be used as a thread local place, where we can store extra
99
+ // info that gets lost when we call into ATen from Python bindings. One example
100
+ // for when this happens is when we get an IntArrayRef argument with e.g. sizes
101
+ // for view. When tracing, those might be tensors, which let us encode extra
102
+ // data dependencies, but once they get to the ATen call where we actually have
103
+ // the tracing logic, they get converted into a raw IntArrayRef, and we loose
104
+ // all information. To prevent this, we temporarily stash it in here.
105
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
106
+ struct ArgumentStash {
107
+ struct IntArrayRefTrace : std::vector<Value*> {
108
+ IntArrayRefTrace(int size) : std::vector<Value*>(size, nullptr) {}
109
+ };
110
+
111
+ static bool empty() {
112
+ return stash.intlists.empty();
113
+ }
114
+
115
+ TORCH_API static void stashIntArrayRefElem(
116
+ const std::string& arg_name,
117
+ size_t size,
118
+ size_t idx,
119
+ const Variable& var);
120
+
121
+ static bool hasIntArrayRef(const std::string& arg_name) {
122
+ return stash.intlists.count(arg_name) > 0;
123
+ }
124
+
125
+ static IntArrayRefTrace popIntArrayRef(const std::string& arg_name) {
126
+ auto info = std::move(stash.intlists.at(arg_name));
127
+ stash.intlists.erase(arg_name);
128
+ return info;
129
+ }
130
+
131
+ // Value stashing: Use these methods to stash arguments which correspond
132
+ // to regular Value*'s in the graph. i.e. they don't require special
133
+ // handling like in the case of IntArrayRefs
134
+ TORCH_API static void stashValue(
135
+ const std::string& arg_name,
136
+ size_t idx,
137
+ const Variable& var,
138
+ const c10::TypePtr& type = nullptr);
139
+
140
+ static bool hasValue(const std::string& arg_name) {
141
+ return stash.values.count(arg_name) > 0;
142
+ }
143
+
144
+ static Value* popValue(const std::string& arg_name) {
145
+ auto info = stash.values.at(arg_name);
146
+ stash.values.erase(arg_name);
147
+ return info;
148
+ }
149
+
150
+ private:
151
+ static thread_local ArgumentStash stash;
152
+ std::unordered_map<std::string, IntArrayRefTrace> intlists;
153
+ std::unordered_map<std::string, Value*> values;
154
+ };
155
+
156
+ // Retrieve or set the current tracing state. Returns a nullptr if tracing is
157
+ // disabled.
158
+ TORCH_API const std::shared_ptr<TracingState>& getTracingState();
159
+ TORCH_API void setTracingState(std::shared_ptr<TracingState> state);
160
+
161
+ inline bool isTracing() {
162
+ return static_cast<bool>(getTracingState());
163
+ }
164
+
165
+ using warn_fn_type = void (*)(const std::string& msg);
166
+ TORCH_API extern const char* WARN_PYTHON_DATAFLOW;
167
+ TORCH_API extern const char* WARN_CONSTRUCTOR;
168
+ TORCH_API extern const char* WARN_RESIZE;
169
+ TORCH_API extern const char* STRICT_TRACER_MSG;
170
+ TORCH_API void _do_warn(const char* _reason, const char* _kind);
171
+ inline void warn(const char* _reason, const char* _kind = nullptr) {
172
+ if (const auto& state = getTracingState()) {
173
+ if (!state->warn)
174
+ return;
175
+ _do_warn(_reason, _kind);
176
+ }
177
+ }
178
+ TORCH_API void setWarn(warn_fn_type fn);
179
+
180
+ struct TORCH_API NoWarn {
181
+ NoWarn() : state(getTracingState()) {
182
+ if (state) {
183
+ prev = state->warn;
184
+ state->warn = false;
185
+ }
186
+ }
187
+ ~NoWarn() {
188
+ if (state) {
189
+ state->warn = prev;
190
+ }
191
+ }
192
+ std::shared_ptr<TracingState> state;
193
+ bool prev{false};
194
+ };
195
+
196
+ struct WithNestedTracingFrame {
197
+ WithNestedTracingFrame() {
198
+ getTracingState()->enterFrame();
199
+ }
200
+
201
+ ~WithNestedTracingFrame() {
202
+ getTracingState()->leaveFrame();
203
+ }
204
+ };
205
+ TORCH_API void recordSourceLocation(Node* n);
206
+ TORCH_API void setRecordSourceLocation(void (*v)(Node*));
207
+
208
+ TORCH_API std::vector<StackEntry> pythonCallstack();
209
+ TORCH_API void setPythonCallstack(std::vector<StackEntry> (*v)());
210
+
211
+ // Having finished adding a new 'node' to the graph IR 'setValueTrace'
212
+ // associates this node with an output variable, so that further operations
213
+ // involving this variable know which node in the IR to reference.
214
+ TORCH_API void setValueTrace(const IValue& v, Value* value);
215
+
216
+ TORCH_API void delValueTrace(const IValue& var);
217
+
218
+ TORCH_API std::function<void()> pauseTracing();
219
+
220
+ TORCH_API Value* getValueTrace(const IValue& var);
221
+
222
+ TORCH_API std::pair<std::shared_ptr<TracingState>, Stack> trace(
223
+ Stack inputs,
224
+ const std::function<Stack(Stack)>& traced_fn,
225
+ std::function<std::string(const Variable&)> var_name_lookup_fn,
226
+ bool strict = true,
227
+ bool force_outplace = false,
228
+ Module* self = nullptr,
229
+ const std::vector<std::string>& argument_names = {});
230
+
231
+ TORCH_API void abandon();
232
+
233
+ // NB: those serve both as an intermediate steps in addInputs below,
234
+ // as well as the overloads that terminate template recursion
235
+ TORCH_API void addInputs(Node* n, const char* name, int64_t value);
236
+ TORCH_API void addInputs(Node* n, const char* name, c10::SymInt value);
237
+ TORCH_API void addInputs(
238
+ Node* n,
239
+ const char* name,
240
+ c10::optional<int64_t> value);
241
+ TORCH_API void addInputs(Node* n, const char* name, bool value);
242
+ TORCH_API void addInputs(
243
+ Node* n,
244
+ const char* name,
245
+ const c10::optional<bool>& value);
246
+ TORCH_API void addInputs(Node* n, const char* name, double value);
247
+ TORCH_API void addInputs(
248
+ Node* n,
249
+ const char* name,
250
+ const c10::optional<double>& value);
251
+ TORCH_API void addInputs(Node* n, const char* name, const at::Scalar& value);
252
+ TORCH_API void addInputs(
253
+ Node* n,
254
+ const char* name,
255
+ const c10::optional<at::Scalar>& value);
256
+ TORCH_API void addInputs(Node* n, const char* name, const at::Tensor& value);
257
+ TORCH_API void addInputs(
258
+ Node* n,
259
+ const char* name,
260
+ const c10::optional<at::Tensor>& value);
261
+ TORCH_API void addInputs(Node* n, const char* name, ArrayRef<int64_t> value);
262
+ TORCH_API void addInputs(Node* n, const char* name, c10::SymIntArrayRef value);
263
+ TORCH_API void addInputs(
264
+ Node* n,
265
+ const char* name,
266
+ c10::optional<c10::SymInt> value);
267
+ TORCH_API void addInputs(
268
+ Node* n,
269
+ const char* name,
270
+ const c10::optional<ArrayRef<int64_t>>& value);
271
+ TORCH_API void addInputs(
272
+ Node* n,
273
+ const char* name,
274
+ const at::OptionalIntArrayRef& opt_value);
275
+ TORCH_API void addInputs(
276
+ Node* n,
277
+ const char* name,
278
+ const at::OptionalSymIntArrayRef& opt_value);
279
+ TORCH_API void addInputs(
280
+ Node* n,
281
+ const char* name,
282
+ ArrayRef<at::Tensor> value,
283
+ bool allow_undefined = false);
284
+ TORCH_API void addInputs(
285
+ Node* n,
286
+ const char* name,
287
+ std::vector<at::Tensor> value,
288
+ bool allow_undefined = false);
289
+ TORCH_API void addInputs(
290
+ Node* n,
291
+ const char* name,
292
+ at::ITensorListRef value,
293
+ bool allow_undefined = false);
294
+ TORCH_API void addInputs(
295
+ Node* n,
296
+ const char* name,
297
+ const List<c10::optional<at::Tensor>>& value);
298
+ TORCH_API void addInputs(
299
+ Node* n,
300
+ const char* name,
301
+ ArrayRef<c10::intrusive_ptr<c10::ivalue::Object>> value,
302
+ const c10::ClassTypePtr& class_type);
303
+ TORCH_API void addInputs(Node* n, const char* name, ArrayRef<double> value);
304
+ TORCH_API void addInputs(
305
+ Node* n,
306
+ const char* name,
307
+ const c10::optional<ArrayRef<double>>& value);
308
+ TORCH_API void addInputs(
309
+ Node* n,
310
+ const char* name,
311
+ const c10::string_view value);
312
+ TORCH_API void addInputs(
313
+ Node* n,
314
+ const char* name,
315
+ const c10::optional<c10::string_view>& value);
316
+ TORCH_API void addInputs(Node* n, const char* name, at::Device value);
317
+ TORCH_API void addInputs(Node* n, const char* name, c10::Stream stream);
318
+ TORCH_API void addInputs(Node* n, const char* name, at::Layout value);
319
+ TORCH_API void addInputs(Node* n, const char* name, at::ScalarType value);
320
+ TORCH_API void addInputs(
321
+ Node* n,
322
+ const char* name,
323
+ const c10::optional<at::ScalarType>& value);
324
+ TORCH_API void addInputs(
325
+ Node* n,
326
+ const char* name,
327
+ const c10::optional<at::Device>& value);
328
+ TORCH_API void addInputs(
329
+ Node* n,
330
+ const char* name,
331
+ const c10::optional<at::Layout>& value);
332
+ TORCH_API void addInputs(Node* n, const char* name, at::MemoryFormat value);
333
+ TORCH_API void addInputs(
334
+ Node* n,
335
+ const char* name,
336
+ c10::optional<at::DimnameList> value);
337
+ TORCH_API void addInputs(
338
+ Node* n,
339
+ const char* name,
340
+ const c10::optional<at::MemoryFormat>& value);
341
+ TORCH_API void addInputs(
342
+ Node* n,
343
+ const char* name,
344
+ const c10::optional<at::Generator>& value);
345
+
346
+ inline void addInputs(
347
+ Node* n,
348
+ const char* name,
349
+ const std::vector<bool>& value) {
350
+ AT_ERROR("Tracing a list of bool type is currently not supported!");
351
+ }
352
+
353
+ template <typename T>
354
+ void addInputs(Node* n, const char* name, ArrayRef<T> value) {
355
+ AT_ERROR("Tracing a list of arbitrary type is currently not supported!");
356
+ }
357
+ template <typename K, typename V>
358
+ void addInputs(
359
+ Node* n,
360
+ const char* name,
361
+ const std::unordered_map<K, V>& value) {
362
+ AT_ERROR("Tracing a dict of arbitrary types is currently not supported!");
363
+ }
364
+
365
+ template <size_t N>
366
+ void addInputs(Node* n, const char* name, std::array<bool, N> value) {
367
+ throw std::runtime_error(
368
+ "Found an unsupported argument type in the JIT tracer. File a bug report.");
369
+ }
370
+
371
+ TORCH_API void addInputs(
372
+ Node* n,
373
+ const char* name,
374
+ const c10::intrusive_ptr<c10::ivalue::Object>& obj);
375
+
376
+ TORCH_API void ensureUniqueIfOutOfPlaced(
377
+ const char* name,
378
+ const at::Tensor& tensor);
379
+ TORCH_API void ensureUniqueIfOutOfPlaced(
380
+ const char* name,
381
+ const c10::optional<at::Tensor>& tensor);
382
+
383
+ template <
384
+ typename T,
385
+ typename = torch::enable_if_t<(
386
+ !std::is_convertible<torch::decay_t<T>, at::TensorList>::value &&
387
+ !std::is_convertible<torch::decay_t<T>, c10::List<at::Tensor>>::value &&
388
+ !std::is_convertible<torch::decay_t<T>, at::Tensor>::value &&
389
+ !std::is_convertible<
390
+ torch::decay_t<T>,
391
+ c10::intrusive_ptr<c10::ivalue::Object>>::value)>>
392
+ void addOutput(Node* node, T&&) {
393
+ AT_ERROR(
394
+ "Found an unsupported argument type ",
395
+ c10::demangle_type<T>(),
396
+ " in the JIT tracer. File a bug report.");
397
+ }
398
+ TORCH_API void addOutput(Node* node, const at::Tensor& tensor);
399
+ TORCH_API void setOutput(Value* value, const at::Tensor& output);
400
+ TORCH_API void addOutput(Node* node, const std::vector<at::Tensor>& list);
401
+ TORCH_API void addOutput(Node* node, const c10::List<at::Tensor>& list);
402
+ TORCH_API void addOutput(
403
+ Node* node,
404
+ const c10::intrusive_ptr<c10::ivalue::Object>& output);
405
+
406
+ TORCH_API autograd::Variable getSizeOf(
407
+ const autograd::Variable& var,
408
+ int64_t dim);
409
+
410
+ TORCH_API autograd::Variable getNumelOf(const autograd::Variable& var);
411
+
412
+ } // namespace tracer
413
+ } // namespace jit
414
+ } // namespace torch
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/tree.h ADDED
@@ -0,0 +1,220 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <functional>
4
+ #include <memory>
5
+ #include <unordered_map>
6
+ #include <vector>
7
+
8
+ #include <c10/util/SmallVector.h>
9
+ #include <c10/util/intrusive_ptr.h>
10
+ #include <torch/csrc/jit/frontend/lexer.h>
11
+
12
+ namespace torch {
13
+ namespace jit {
14
+
15
+ // Trees are used to represent all forms of TC IR, pre- and post-typechecking.
16
+ // Rather than have a full class hierarchy for all TC statements, trees are a
17
+ // slight variation of Lisp s-expressions. For instance, the expression a*b+1
18
+ // is represented as:
19
+ // (+ (* (ident a) (ident b)) (const 1))
20
+ // Atoms like 'a', 'b', and '1' are represented by subclasses of Tree which
21
+ // define stringValue(). Everything else is a Compound object, which has a
22
+ // 'kind' that is a token from lexer.h's TokenKind enum. Single-character
23
+ // operators like '+' are represented using the character itself (so, add.kind()
24
+ // would be '+'). Each Compound object also contains a list of subtrees and is
25
+ // associated with a SourceRange for error reporting.
26
+ // Memory management of trees is done using intrusive_ptr.
27
+
28
+ struct Tree;
29
+ using TreeRef = c10::intrusive_ptr<Tree>;
30
+ using TreeList = at::SmallVector<TreeRef, 4>;
31
+
32
+ struct Tree : c10::intrusive_ptr_target {
33
+ Tree(int kind_) : kind_(kind_) {}
34
+ int kind() const {
35
+ return kind_;
36
+ }
37
+ virtual bool isAtom() const {
38
+ return true;
39
+ }
40
+ virtual const SourceRange& range() const {
41
+ throw std::runtime_error("is an Atom");
42
+ }
43
+ virtual const std::string& stringValue() const {
44
+ throw std::runtime_error("stringValue can only be called on TK_STRING");
45
+ }
46
+ virtual const TreeList& trees() const {
47
+ static const TreeList empty_trees = {};
48
+ return empty_trees;
49
+ }
50
+ const TreeRef& tree(size_t i) const {
51
+ return trees().at(i);
52
+ }
53
+ virtual TreeRef map(const std::function<TreeRef(TreeRef)>& fn) {
54
+ (void)fn;
55
+ c10::raw::intrusive_ptr::incref(this); // we are creating a new pointer
56
+ // from a raw `this` pointer
57
+ // so we need to bump the refcount
58
+ // to account for this ownership
59
+ return TreeRef::reclaim(this);
60
+ }
61
+ template <typename... Args>
62
+ void match(int k, Args&... args) const {
63
+ matchD(k, "unknown", 0, args...);
64
+ }
65
+ template <typename... Args>
66
+ void matchD(int k, const char* filename, int lineno, Args&... args) const {
67
+ std::initializer_list<TreeRef*> vars = {args...};
68
+ matchNumSubtreesD(k, filename, lineno, vars.size(), true);
69
+ size_t i = 0;
70
+ for (TreeRef* v : vars) {
71
+ *v = trees()[i++];
72
+ }
73
+ }
74
+ void matchNumSubtrees(int k, size_t expected_subtrees) {
75
+ return matchNumSubtreesD(k, "unknown", 0, expected_subtrees, false);
76
+ }
77
+ void matchNumSubtreesD(
78
+ int k,
79
+ const char* filename,
80
+ int lineno,
81
+ size_t expected_subtrees,
82
+ bool allow_more) const {
83
+ if (kind() != k) {
84
+ std::stringstream ss;
85
+ ss << filename << ":" << lineno << ": expecting kind '" << kindToString(k)
86
+ << "' but found '" << kindToString(kind()) << "'\n";
87
+ range().highlight(ss);
88
+ throw std::runtime_error(ss.str());
89
+ }
90
+ if (trees().size() < expected_subtrees ||
91
+ (!allow_more && trees().size() != expected_subtrees)) {
92
+ std::stringstream ss;
93
+ ss << filename << ":" << lineno << ": expected at least "
94
+ << expected_subtrees << " subtrees, but found only " << trees().size()
95
+ << "\n";
96
+ range().highlight(ss);
97
+ throw std::runtime_error(ss.str());
98
+ }
99
+ }
100
+ ~Tree() override = default;
101
+
102
+ private:
103
+ int kind_;
104
+ };
105
+
106
+ struct String : public Tree {
107
+ String(std::string value) : Tree(TK_STRING), value_(std::move(value)) {}
108
+ const std::string& stringValue() const override {
109
+ return value_;
110
+ }
111
+ template <typename... Args>
112
+ static TreeRef create(Args&&... args) {
113
+ return c10::make_intrusive<String>(std::forward<Args>(args)...);
114
+ }
115
+
116
+ private:
117
+ std::string value_;
118
+ };
119
+
120
+ static SourceRange mergeRanges(SourceRange c, const TreeList& others) {
121
+ for (const auto& t : others) {
122
+ if (t->isAtom())
123
+ continue;
124
+ size_t s = std::min(c.start(), t->range().start());
125
+ size_t e = std::max(c.end(), t->range().end());
126
+ c = SourceRange(c.source(), s, e);
127
+ }
128
+ return c;
129
+ }
130
+
131
+ struct Compound : public Tree {
132
+ Compound(int kind, SourceRange range)
133
+ : Tree(kind), range_(std::move(range)) {}
134
+ Compound(int kind, const SourceRange& range_, TreeList&& trees_)
135
+ : Tree(kind),
136
+ range_(mergeRanges(range_, trees_)),
137
+ trees_(std::move(trees_)) {}
138
+ const TreeList& trees() const override {
139
+ return trees_;
140
+ }
141
+ static TreeRef create(
142
+ int kind,
143
+ const SourceRange& range_,
144
+ TreeList&& trees_) {
145
+ return c10::make_intrusive<Compound>(kind, range_, std::move(trees_));
146
+ }
147
+ bool isAtom() const override {
148
+ return false;
149
+ }
150
+ TreeRef map(const std::function<TreeRef(TreeRef)>& fn) override {
151
+ TreeList ret;
152
+ for (auto& t : trees()) {
153
+ ret.push_back(fn(t));
154
+ }
155
+ return Compound::create(kind(), range(), std::move(ret));
156
+ }
157
+
158
+ const SourceRange& range() const override {
159
+ return range_;
160
+ }
161
+
162
+ private:
163
+ SourceRange range_;
164
+ TreeList trees_;
165
+ };
166
+
167
+ // tree pretty printer
168
+ struct pretty_tree {
169
+ pretty_tree(const TreeRef& tree, size_t col = 40) : tree(tree), col(col) {}
170
+ const TreeRef& tree;
171
+ size_t col;
172
+ std::unordered_map<TreeRef, std::string> flat_strings;
173
+ const std::string& get_flat(const TreeRef& t) {
174
+ auto it = flat_strings.find(t);
175
+ if (it != flat_strings.end())
176
+ return it->second;
177
+
178
+ std::stringstream out;
179
+ switch (t->kind()) {
180
+ case TK_STRING:
181
+ out << t->stringValue();
182
+ break;
183
+ default:
184
+ out << "(" << kindToString(t->kind());
185
+ for (const auto& e : t->trees()) {
186
+ out << " " << get_flat(e);
187
+ }
188
+ out << ")";
189
+ break;
190
+ }
191
+ auto it_ = flat_strings.emplace(t, out.str());
192
+ return it_.first->second;
193
+ }
194
+ void print(std::ostream& out, const TreeRef& t, int indent) {
195
+ const std::string& s = get_flat(t);
196
+ if (indent + s.size() < col || t->isAtom()) {
197
+ out << s;
198
+ return;
199
+ }
200
+ std::string k = kindToString(t->kind());
201
+ out << "(" << k;
202
+ for (const auto& e : t->trees()) {
203
+ out << "\n" << std::string(indent + 2, ' ');
204
+ print(out, e, indent + 2);
205
+ }
206
+ out << ")";
207
+ }
208
+ };
209
+
210
+ static inline std::ostream& operator<<(std::ostream& out, pretty_tree t_) {
211
+ t_.print(out, t_.tree, 0);
212
+ return out << std::endl;
213
+ }
214
+
215
+ static inline std::ostream& operator<<(std::ostream& out, const TreeRef& t) {
216
+ return out << pretty_tree(t);
217
+ }
218
+
219
+ } // namespace jit
220
+ } // namespace torch
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/tree_views.h ADDED
@@ -0,0 +1,1275 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <c10/util/string_utils.h>
3
+ #include <torch/csrc/jit/frontend/error_report.h>
4
+ #include <torch/csrc/jit/frontend/strtod.h>
5
+ #include <torch/csrc/jit/frontend/tree.h>
6
+
7
+ #include <c10/util/complex.h>
8
+ #include <functional>
9
+ #include <iostream>
10
+ #include <string>
11
+ #include <utility>
12
+
13
+ namespace torch {
14
+ namespace jit {
15
+
16
+ // clang-format off
17
+ // TreeView provides a statically-typed way to traverse the tree, which should
18
+ // be formed according to the grammar below.
19
+ //
20
+ // A few notes on types and their aliases:
21
+ // - List<T> is really a Tree with kind TK_LIST and elements as subtrees
22
+ // - Maybe<T> is really a Tree with kind TK_OPTION that has 0 or 1 subtree of type T
23
+ // - Builtin types are: Ident (TK_IDENT), String (TK_STRING)
24
+ //
25
+ // Param = Param(Maybe<Expr> type, Ident name) TK_PARAM
26
+ //
27
+ // Decl = Decl(List<Param> params, Maybe<Expr> return_type) TK_DECL
28
+ // Def = Def(Ident name, Decl decl, List<Stmt> body) TK_DEF
29
+ // ClassDef = ClassDef(Ident name, TK_CLASS_DEF
30
+ // Maybe<Expr> superclass,
31
+ // List<Stmt> body)
32
+ //
33
+ // Stmt = If(Expr cond, List<Stmt> true_body, List<Stmt> false_body) TK_IF
34
+ // | For(List<Expr> targets, List<Expr> iters, List<Stmt> body) TK_FOR
35
+ // | While(Expr cond, List<Stmt> body) TK_WHILE
36
+ // | Global(List<Ident> idents) TK_GLOBAL
37
+ // -- NB: the only type of Expr's allowed on lhs are Var
38
+ // Or a tuple containing Var with an optional terminating Starred
39
+ // | Assign(Expr lhs, Maybe<Expr> rhs, Maybe<Expr> type) TK_ASSIGN
40
+ // | AugAssign(Expr lhs, AugAssignKind aug_op, Expr rhs) TK_AUG_ASSIGN
41
+ // | Return(List<Expr> values) TK_RETURN
42
+ // | ExprStmt(List<Expr> expr) TK_EXPR_STMT
43
+ // | Raise(Expr expr) TK_RAISE
44
+ // | Def TK_DEF
45
+ // | With(List<WithItem> targets, List<Stmt> body) TK_WITH
46
+ //
47
+ // Expr = TernaryIf(Expr cond, Expr true_expr, Expr false_expr) TK_IF_EXPR
48
+ // | BinOp(Expr lhs, Expr rhs)
49
+ // | And TK_AND
50
+ // | Or TK_OR
51
+ // | Lt '<'
52
+ // | Gt '>'
53
+ // | Eq TK_EQ
54
+ // | Le TK_LE
55
+ // | Ge TK_GE
56
+ // | Ne TK_NE
57
+ // | Is TK_IS
58
+ // | IsNot TK_ISNOT
59
+ // | Add '+'
60
+ // | Sub '-'
61
+ // | Mul '*'
62
+ // | Div '/'
63
+ // | Mod '%'
64
+ // | MatMult '@'
65
+ // | Pow TK_POW
66
+ // | UnaryOp(Expr expr)
67
+ // | Not TK_NOT
68
+ // | USub '-'
69
+ // | Const(String value) TK_CONST
70
+ // -- NB: x.name(y) is desugared into name(x, y)
71
+ // | Apply(Ident name, List<Expr> args, List<Attribute> kwargs) TK_APPLY
72
+ // | Select(Expr value, Ident selector) '.'
73
+ // | Subscript(Expr value, List<Expr> subscript_exprs) TK_SUBSCRIPT
74
+ // | SliceExpr(Maybe<Expr> start, Maybe<Expr> end) TK_SLICE_EXPR
75
+ // | Var(Ident name) TK_VAR
76
+ // | ListLiteral(List<Expr> inputs) TK_LIST_LITERAL
77
+ // | TupleLiteral(List<Expr> inputs) TK_TUPLE_LITERAL
78
+ // | Starred(Expr expr) TK_STARRED
79
+ // | WithItem(Expr target, Maybe<Var> var) TK_WITH_ITEM
80
+ // -- NB: only allowed expressions are Const or List(Const)
81
+ // (List as a value, not type constructor)
82
+ // Attribute = Attribute(Ident name, Expr value) TK_ATTRIBUTE
83
+ //
84
+ // AugAssignKind =
85
+ // | Add() TK_PLUS_EQ
86
+ // | Sub() TK_MINUS_EQ
87
+ // | Mul() TK_TIMES_EQ
88
+ // | Div() TK_DIV_EQ
89
+ // | Mod() TK_MOD_EQ
90
+ //
91
+
92
+ // Each subclass of TreeView should provide:
93
+ // 1. Constructor that takes a TreeRef, and checks that it's of the right type.
94
+ // 2. Accessors that get underlying information out of the object. If they
95
+ // return subtrees, they should wrap them in appropriate views too.
96
+ // 3. Static method 'create' that creates the underlying TreeRef object
97
+ // for every TreeRef kind that has a TreeView, the parser always uses
98
+ // (e.g.) Ident::create rather than Compound::Create, this means that
99
+ // changes to the structure of Ident are always made right here rather
100
+ // than both in the parser and in this code.
101
+ // XXX: these structs should have no fields to prevent slicing when passing by value
102
+ // clang-format on
103
+ struct TreeView {
104
+ explicit TreeView(TreeRef tree) : tree_(std::move(tree)) {}
105
+ TreeRef tree() const {
106
+ return tree_;
107
+ }
108
+ const SourceRange& range() const {
109
+ return tree_->range();
110
+ }
111
+ operator TreeRef() const {
112
+ return tree_;
113
+ }
114
+ const TreeRef& get() const {
115
+ return tree_;
116
+ }
117
+ int kind() const {
118
+ return tree_->kind();
119
+ }
120
+ void dump() const {
121
+ std::cout << tree_;
122
+ }
123
+
124
+ protected:
125
+ const TreeRef& subtree(size_t i) const {
126
+ return tree_->trees().at(i);
127
+ }
128
+ // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
129
+ TreeRef tree_;
130
+ };
131
+
132
+ template <typename T>
133
+ struct ListIterator {
134
+ ListIterator(TreeList::const_iterator it) : it(it) {}
135
+ bool operator!=(const ListIterator& rhs) const {
136
+ return it != rhs.it;
137
+ }
138
+ bool operator==(const ListIterator& rhs) const {
139
+ return it == rhs.it;
140
+ }
141
+ T operator*() const {
142
+ return T(*it);
143
+ }
144
+ ListIterator& operator+=(std::ptrdiff_t n) {
145
+ it += n;
146
+ return *this;
147
+ }
148
+ ListIterator& operator++() {
149
+ ++it;
150
+ return *this;
151
+ }
152
+ ListIterator& operator--() {
153
+ --it;
154
+ return *this;
155
+ }
156
+
157
+ private:
158
+ TreeList::const_iterator it;
159
+ };
160
+
161
+ template <typename T>
162
+ struct List : public TreeView {
163
+ using iterator = ListIterator<T>;
164
+ using const_iterator = ListIterator<T>;
165
+
166
+ List(const TreeRef& tree) : TreeView(tree) {
167
+ tree->match(TK_LIST);
168
+ // Iterate over list to temporarily instantiate Ts that will check the type
169
+ for (const T& elem : *this) {
170
+ (void)elem; // silence unused warning
171
+ }
172
+ }
173
+ iterator begin() const {
174
+ return iterator(tree_->trees().begin());
175
+ }
176
+ iterator end() const {
177
+ return iterator(tree_->trees().end());
178
+ }
179
+ bool empty() const {
180
+ return tree_->trees().begin() == tree_->trees().end();
181
+ }
182
+ T operator[](size_t i) const {
183
+ return T(subtree(i));
184
+ }
185
+ TreeRef map(const std::function<TreeRef(const T&)>& fn) {
186
+ return tree_->map([&](TreeRef v) { return fn(T(v)); });
187
+ }
188
+ static List create(const SourceRange& range, const std::vector<T>& subtrees) {
189
+ TreeList type_erased_sub{subtrees.begin(), subtrees.end()};
190
+ return List(Compound::create(TK_LIST, range, std::move(type_erased_sub)));
191
+ }
192
+ static List unsafeCreate(const SourceRange& range, TreeList&& subtrees) {
193
+ return List(Compound::create(TK_LIST, range, std::move(subtrees)));
194
+ }
195
+ size_t size() const {
196
+ return tree_->trees().size();
197
+ }
198
+ };
199
+
200
+ template <typename T>
201
+ struct Maybe : public TreeView {
202
+ explicit Maybe(const TreeRef& tree) : TreeView(tree) {
203
+ tree_->match(TK_OPTION);
204
+ if (tree_->trees().size() > 1)
205
+ throw ErrorReport(tree) << "Maybe trees can have at most one subtree";
206
+ }
207
+ /* implicit */ Maybe(const T& tree) : TreeView(tree) {}
208
+ bool present() const {
209
+ return tree_->trees().size() > 0;
210
+ }
211
+ T get() const {
212
+ return T(tree_->trees().at(0));
213
+ }
214
+ TreeRef map(const std::function<TreeRef(const T&)>& fn) {
215
+ return tree_->map([&](TreeRef v) { return fn(T(v)); });
216
+ }
217
+ static Maybe<T> create(const SourceRange& range) {
218
+ return Maybe<T>(Compound::create(TK_OPTION, range, {}));
219
+ }
220
+ static Maybe<T> create(const SourceRange& range, const T& value) {
221
+ return Maybe<T>(Compound::create(TK_OPTION, range, {value}));
222
+ }
223
+ };
224
+
225
+ struct Ident : public TreeView {
226
+ explicit Ident(const TreeRef& tree) : TreeView(tree) {
227
+ tree_->match(TK_IDENT);
228
+ }
229
+ const std::string& name() const {
230
+ return subtree(0)->stringValue();
231
+ }
232
+ static Ident create(const SourceRange& range, std::string name) {
233
+ return Ident(
234
+ Compound::create(TK_IDENT, range, {String::create(std::move(name))}));
235
+ }
236
+ };
237
+
238
+ ////////////////////////////////////////////////////////////////////////////////
239
+ // Base types (production LHS)
240
+ ////////////////////////////////////////////////////////////////////////////////
241
+
242
+ struct Stmt : public TreeView {
243
+ explicit Stmt(const TreeRef& tree) : TreeView(tree) {
244
+ switch (tree->kind()) {
245
+ case TK_IF:
246
+ case TK_FOR:
247
+ case TK_WHILE:
248
+ case TK_GLOBAL:
249
+ case TK_ASSIGN:
250
+ case TK_AUG_ASSIGN:
251
+ case TK_RETURN:
252
+ case TK_EXPR_STMT:
253
+ case TK_RAISE:
254
+ case TK_ASSERT:
255
+ case TK_PASS:
256
+ case TK_BREAK:
257
+ case TK_DELETE:
258
+ case TK_CONTINUE:
259
+ case TK_DEF:
260
+ case TK_WITH:
261
+ return;
262
+ default:
263
+ throw ErrorReport(tree)
264
+ << kindToString(tree->kind()) << " is not a valid Stmt";
265
+ }
266
+ }
267
+ };
268
+
269
+ struct Expr : public TreeView {
270
+ explicit Expr(const TreeRef& tree) : TreeView(tree) {
271
+ switch (tree->kind()) {
272
+ case TK_IF_EXPR:
273
+ case TK_AND:
274
+ case TK_OR:
275
+ case '<':
276
+ case '>':
277
+ case TK_IS:
278
+ case TK_ISNOT:
279
+ case TK_EQ:
280
+ case TK_LE:
281
+ case TK_GE:
282
+ case TK_NE:
283
+ case '+':
284
+ case '-':
285
+ case TK_UNARY_MINUS:
286
+ case '~':
287
+ case '*':
288
+ case TK_STARRED:
289
+ case '/':
290
+ case '%':
291
+ case TK_NOT:
292
+ case TK_CONST:
293
+ case TK_STRINGLITERAL:
294
+ case TK_TRUE:
295
+ case TK_FALSE:
296
+ case TK_NONE:
297
+ case TK_NONE_TYPE:
298
+ case TK_CAST:
299
+ case TK_APPLY:
300
+ case '.':
301
+ case TK_SUBSCRIPT:
302
+ case TK_SLICE_EXPR:
303
+ case TK_VAR:
304
+ case TK_LIST_LITERAL:
305
+ case TK_TUPLE_LITERAL:
306
+ case TK_DICT_LITERAL:
307
+ case '@':
308
+ case TK_POW:
309
+ case TK_LSHIFT:
310
+ case TK_RSHIFT:
311
+ case TK_FLOOR_DIV:
312
+ case '&':
313
+ case '^':
314
+ case '|':
315
+ case TK_LIST_COMP:
316
+ case TK_DICT_COMP:
317
+ case TK_DOTS:
318
+ case TK_IN:
319
+ case TK_WITH_ITEM:
320
+ return;
321
+ default:
322
+ throw ErrorReport(tree)
323
+ << kindToString(tree->kind()) << " is not a valid Expr";
324
+ }
325
+ }
326
+ };
327
+
328
+ ////////////////////////////////////////////////////////////////////////////////
329
+ // Helper nodes (mostly for function arguments)
330
+ ////////////////////////////////////////////////////////////////////////////////
331
+
332
+ struct Attribute : public TreeView {
333
+ explicit Attribute(const TreeRef& tree) : TreeView(tree) {
334
+ tree_->match(TK_ATTRIBUTE);
335
+ }
336
+ Ident name() const {
337
+ return Ident(subtree(0));
338
+ }
339
+ Expr value() const {
340
+ return Expr(subtree(1));
341
+ }
342
+ static Attribute create(
343
+ const SourceRange& range,
344
+ const Ident& name,
345
+ const TreeRef& value) {
346
+ return Attribute(Compound::create(TK_ATTRIBUTE, range, {name, value}));
347
+ }
348
+ };
349
+
350
+ struct Param : public TreeView {
351
+ explicit Param(const TreeRef& tree) : TreeView(tree) {
352
+ tree_->match(TK_PARAM);
353
+ }
354
+ static Param create(
355
+ const SourceRange& range,
356
+ const Ident& ident,
357
+ const Maybe<Expr>& type,
358
+ const Maybe<Expr>& def,
359
+ bool kwarg_only) {
360
+ TreeRef kwarg_only_tree =
361
+ Compound::create(kwarg_only ? TK_TRUE : TK_FALSE, range, {});
362
+ return Param(Compound::create(
363
+ TK_PARAM, range, {ident, type, def, std::move(kwarg_only_tree)}));
364
+ }
365
+ Ident ident() const {
366
+ return Ident(subtree(0));
367
+ }
368
+ Maybe<Expr> type() const {
369
+ return Maybe<Expr>(subtree(1));
370
+ }
371
+ Maybe<Expr> defaultValue() const {
372
+ return Maybe<Expr>(subtree(2));
373
+ }
374
+ bool kwarg_only() const {
375
+ return TK_TRUE == subtree(3)->kind();
376
+ }
377
+ Param withType(const Maybe<Expr>& typ) const {
378
+ return Param::create(range(), ident(), typ, defaultValue(), kwarg_only());
379
+ }
380
+ };
381
+
382
+ ////////////////////////////////////////////////////////////////////////////////
383
+ // Top level definitions
384
+ ////////////////////////////////////////////////////////////////////////////////
385
+
386
+ struct Decl : public TreeView {
387
+ explicit Decl(const TreeRef& tree) : TreeView(tree) {
388
+ tree->match(TK_DECL);
389
+ }
390
+ List<Param> params() const {
391
+ return List<Param>(subtree(0));
392
+ }
393
+ Maybe<Expr> return_type() const {
394
+ return Maybe<Expr>(subtree(1));
395
+ }
396
+ static Decl create(
397
+ const SourceRange& range,
398
+ const List<Param>& params,
399
+ const Maybe<Expr>& return_type) {
400
+ return Decl(Compound::create(TK_DECL, range, {params, return_type}));
401
+ }
402
+ };
403
+
404
+ struct Def : public TreeView {
405
+ explicit Def(const TreeRef& tree) : TreeView(tree) {
406
+ tree->match(TK_DEF);
407
+ }
408
+ Def withName(std::string new_name) const {
409
+ auto new_ident = Ident::create(name().range(), std::move(new_name));
410
+ return create(range(), new_ident, decl(), statements());
411
+ }
412
+ Def withDecl(const Decl& decl) const {
413
+ return create(range(), name(), decl, statements());
414
+ }
415
+ Ident name() const {
416
+ return Ident(subtree(0));
417
+ }
418
+ Decl decl() const {
419
+ return Decl(subtree(1));
420
+ }
421
+ List<Stmt> statements() const {
422
+ return List<Stmt>(subtree(2));
423
+ }
424
+ static Def create(
425
+ const SourceRange& range,
426
+ const Ident& name,
427
+ const Decl& decl,
428
+ const List<Stmt>& stmts) {
429
+ return Def(Compound::create(TK_DEF, range, {name, decl, stmts}));
430
+ }
431
+ };
432
+
433
+ // Property represents a named attribute combined with a getter and setter
434
+ // method to access and mutate that attribute.
435
+ struct Property : public TreeView {
436
+ explicit Property(const TreeRef& tree) : TreeView(tree) {
437
+ tree->match(TK_PROP);
438
+ }
439
+ Ident name() const {
440
+ return Ident(subtree(0));
441
+ }
442
+ Def getter() const {
443
+ return Def(subtree(1));
444
+ }
445
+ Maybe<Def> setter() const {
446
+ return Maybe<Def>(subtree(2));
447
+ }
448
+ static Property create(
449
+ const SourceRange& range,
450
+ const Ident& name,
451
+ const Def& getter,
452
+ const Maybe<Def>& setter) {
453
+ return Property(Compound::create(TK_PROP, range, {name, getter, setter}));
454
+ }
455
+ };
456
+
457
+ struct Assign;
458
+
459
+ struct ClassDef : public TreeView {
460
+ explicit ClassDef(const TreeRef& tree) : TreeView(tree) {
461
+ tree->match(TK_CLASS_DEF);
462
+ }
463
+ explicit ClassDef(TreeRef&& tree) : TreeView(std::move(tree)) {
464
+ tree_->match(TK_CLASS_DEF);
465
+ }
466
+ ClassDef withName(std::string new_name) const {
467
+ auto new_ident = Ident::create(name().range(), std::move(new_name));
468
+ return create(range(), new_ident, superclass(), body());
469
+ }
470
+ Ident name() const {
471
+ return Ident(subtree(0));
472
+ }
473
+ Maybe<Expr> superclass() const {
474
+ return Maybe<Expr>(subtree(1));
475
+ }
476
+ List<Stmt> body() const {
477
+ return List<Stmt>(subtree(2));
478
+ }
479
+ Maybe<List<Property>> properties() const {
480
+ return Maybe<List<Property>>(subtree(3));
481
+ }
482
+ Maybe<List<Assign>> assigns() const {
483
+ return Maybe<List<Assign>>(subtree(4));
484
+ }
485
+ static ClassDef create(
486
+ const SourceRange& range,
487
+ const Ident& name,
488
+ const Maybe<Expr>& superclass,
489
+ const List<Stmt>& body) {
490
+ return ClassDef(Compound::create(
491
+ TK_CLASS_DEF,
492
+ range,
493
+ {name,
494
+ superclass,
495
+ body,
496
+ Maybe<List<Property>>::create(range),
497
+ Maybe<List<Assign>>::create(range)}));
498
+ }
499
+ static ClassDef create(
500
+ const SourceRange& range,
501
+ const Ident& name,
502
+ const Maybe<Expr>& superclass,
503
+ const List<Stmt>& body,
504
+ const List<Property>& properties,
505
+ const List<Assign>& assigns);
506
+ };
507
+
508
+ TORCH_API std::vector<std::string> getUnresolvedClassAttributes(
509
+ const ClassDef& def);
510
+
511
+ ////////////////////////////////////////////////////////////////////////////////
512
+ // Statements
513
+ ////////////////////////////////////////////////////////////////////////////////
514
+
515
+ struct If : public Stmt {
516
+ explicit If(const TreeRef& tree) : Stmt(tree) {
517
+ tree_->match(TK_IF);
518
+ }
519
+ Expr cond() const {
520
+ return Expr(subtree(0));
521
+ }
522
+ List<Stmt> trueBranch() const {
523
+ return List<Stmt>(subtree(1));
524
+ }
525
+ List<Stmt> falseBranch() const {
526
+ return List<Stmt>(subtree(2));
527
+ }
528
+ If withNewBranches(
529
+ const List<Stmt>& true_branch,
530
+ const List<Stmt>& false_branch) const {
531
+ return create(range(), cond(), true_branch, false_branch);
532
+ }
533
+ static If create(
534
+ const SourceRange& range,
535
+ const Expr& cond,
536
+ const List<Stmt>& true_branch,
537
+ const List<Stmt>& false_branch) {
538
+ return If(
539
+ Compound::create(TK_IF, range, {cond, true_branch, false_branch}));
540
+ }
541
+ };
542
+
543
+ struct While : public Stmt {
544
+ explicit While(const TreeRef& tree) : Stmt(tree) {
545
+ tree_->match(TK_WHILE);
546
+ }
547
+ Expr cond() const {
548
+ return Expr(subtree(0));
549
+ }
550
+ List<Stmt> body() const {
551
+ return List<Stmt>(subtree(1));
552
+ }
553
+ static While create(
554
+ const SourceRange& range,
555
+ const Expr& cond,
556
+ const List<Stmt>& body) {
557
+ return While(Compound::create(TK_WHILE, range, {cond, body}));
558
+ }
559
+ };
560
+
561
+ struct For : public Stmt {
562
+ explicit For(const TreeRef& tree) : Stmt(tree) {
563
+ tree->match(TK_FOR);
564
+ }
565
+ List<Expr> targets() const {
566
+ return List<Expr>(subtree(0));
567
+ }
568
+ List<Expr> itrs() const {
569
+ return List<Expr>(subtree(1));
570
+ }
571
+ List<Stmt> body() const {
572
+ return List<Stmt>(subtree(2));
573
+ }
574
+ static For create(
575
+ const SourceRange& range,
576
+ const List<Expr>& targets,
577
+ const List<Expr>& itrs,
578
+ const List<Stmt>& body) {
579
+ return For(Compound::create(TK_FOR, range, {targets, itrs, body}));
580
+ }
581
+ };
582
+
583
+ // TODO: supports only single comprehension for now
584
+ struct ListComp : public Expr {
585
+ explicit ListComp(const TreeRef& tree) : Expr(tree) {
586
+ tree->match(TK_LIST_COMP);
587
+ }
588
+ Expr elt() const {
589
+ return Expr(subtree(0));
590
+ }
591
+ Expr target() const {
592
+ return Expr(subtree(1));
593
+ }
594
+ Expr iter() const {
595
+ return Expr(subtree(2));
596
+ }
597
+ // TODO: no ifs for now
598
+ static ListComp create(
599
+ const SourceRange& range,
600
+ const Expr& elt,
601
+ const Expr& target,
602
+ const Expr& iter) {
603
+ return ListComp(Compound::create(TK_LIST_COMP, range, {elt, target, iter}));
604
+ }
605
+ };
606
+
607
+ // TODO: supports only single comprehension for now
608
+ struct DictComp : public Expr {
609
+ explicit DictComp(const TreeRef& tree) : Expr(tree) {
610
+ tree->match(TK_DICT_COMP);
611
+ }
612
+ Expr key() const {
613
+ return Expr(subtree(0));
614
+ }
615
+ Expr value() const {
616
+ return Expr(subtree(1));
617
+ }
618
+ Expr target() const {
619
+ return Expr(subtree(2));
620
+ }
621
+ Expr iter() const {
622
+ return Expr(subtree(3));
623
+ }
624
+ // TODO: no ifs for now
625
+ static DictComp create(
626
+ const SourceRange& range,
627
+ const Expr& key,
628
+ const Expr& value,
629
+ const Expr& target,
630
+ const Expr& iter) {
631
+ return DictComp(
632
+ Compound::create(TK_DICT_COMP, range, {key, value, target, iter}));
633
+ }
634
+ };
635
+
636
+ struct Global : public Stmt {
637
+ explicit Global(const TreeRef& tree) : Stmt(tree) {
638
+ tree_->match(TK_GLOBAL);
639
+ }
640
+ List<Ident> names() {
641
+ return List<Ident>(subtree(0));
642
+ }
643
+ static Global create(const SourceRange& range, const List<Ident>& names) {
644
+ return Global(Compound::create(TK_GLOBAL, range, {names}));
645
+ }
646
+ };
647
+
648
+ struct AugAssignKind : public TreeView {
649
+ explicit AugAssignKind(const TreeRef& tree) : TreeView(tree) {
650
+ switch (tree->kind()) {
651
+ case '+':
652
+ case '-':
653
+ case '*':
654
+ case '/':
655
+ case '%':
656
+ case '|':
657
+ case '&':
658
+ case '^':
659
+ case TK_POW:
660
+ case TK_LSHIFT:
661
+ case TK_RSHIFT:
662
+ return;
663
+ default:
664
+ throw ErrorReport(tree) << "is not a valid AugAssignKind";
665
+ }
666
+ }
667
+ };
668
+
669
+ // Augmented assignment, like "foo += bar"
670
+ struct AugAssign : public Stmt {
671
+ explicit AugAssign(const TreeRef& tree) : Stmt(tree) {
672
+ tree_->match(TK_AUG_ASSIGN);
673
+ }
674
+ static AugAssign create(
675
+ const SourceRange& range,
676
+ const Expr& lhs,
677
+ const AugAssignKind& aug_op,
678
+ const Expr& rhs) {
679
+ return AugAssign(
680
+ Compound::create(TK_AUG_ASSIGN, range, {lhs, aug_op, rhs}));
681
+ }
682
+ Expr lhs() const {
683
+ return Expr(subtree(0));
684
+ }
685
+ int aug_op() const {
686
+ return subtree(1)->kind();
687
+ }
688
+ Expr rhs() const {
689
+ return Expr(subtree(2));
690
+ }
691
+ };
692
+
693
+ struct Assign : public Stmt {
694
+ explicit Assign(const TreeRef& tree) : Stmt(tree) {
695
+ tree_->match(TK_ASSIGN);
696
+ }
697
+ static Assign create(
698
+ const SourceRange& range,
699
+ const List<Expr>& lhs,
700
+ const Maybe<Expr>& rhs,
701
+ const Maybe<Expr>& type) {
702
+ return Assign(Compound::create(TK_ASSIGN, range, {lhs, rhs, type}));
703
+ }
704
+
705
+ List<Expr> lhs_list() const {
706
+ return List<Expr>(subtree(0));
707
+ }
708
+
709
+ Expr lhs() const {
710
+ const auto& li = lhs_list();
711
+ TORCH_INTERNAL_ASSERT(li.size() == 1);
712
+ return *li.begin();
713
+ }
714
+
715
+ Maybe<Expr> rhs() const {
716
+ return Maybe<Expr>(subtree(1));
717
+ }
718
+
719
+ Maybe<Expr> type() const {
720
+ return Maybe<Expr>(subtree(2));
721
+ }
722
+ };
723
+
724
+ struct Return : public Stmt {
725
+ explicit Return(const TreeRef& tree) : Stmt(tree) {
726
+ tree_->match(TK_RETURN);
727
+ }
728
+ Expr expr() const {
729
+ return Expr(subtree(0));
730
+ }
731
+ static Return create(const SourceRange& range, const Expr& value) {
732
+ return Return(Compound::create(TK_RETURN, range, {value}));
733
+ }
734
+ };
735
+
736
+ struct Raise : public Stmt {
737
+ explicit Raise(const TreeRef& tree) : Stmt(tree) {
738
+ tree_->match(TK_RAISE);
739
+ }
740
+ Expr expr() const {
741
+ return Expr(subtree(0));
742
+ }
743
+ static Raise create(const SourceRange& range, const Expr& expr) {
744
+ return Raise(Compound::create(TK_RAISE, range, {expr}));
745
+ }
746
+ };
747
+
748
+ struct Assert : public Stmt {
749
+ explicit Assert(const TreeRef& tree) : Stmt(tree) {
750
+ tree_->match(TK_ASSERT);
751
+ }
752
+ Expr test() const {
753
+ return Expr(subtree(0));
754
+ }
755
+ Maybe<Expr> msg() const {
756
+ return Maybe<Expr>(subtree(1));
757
+ }
758
+ static Assert create(
759
+ const SourceRange& range,
760
+ const Expr& test,
761
+ const Maybe<Expr>& msg) {
762
+ return Assert(Compound::create(TK_ASSERT, range, {test, msg}));
763
+ }
764
+ };
765
+
766
+ struct Pass : public Stmt {
767
+ explicit Pass(const TreeRef& tree) : Stmt(tree) {
768
+ tree_->match(TK_PASS);
769
+ }
770
+ static Pass create(const SourceRange& range) {
771
+ return Pass(Compound::create(TK_PASS, range, {}));
772
+ }
773
+ };
774
+
775
+ struct Dots : public Expr {
776
+ explicit Dots(const TreeRef& tree) : Expr(tree) {
777
+ tree_->match(TK_DOTS);
778
+ }
779
+ static Dots create(const SourceRange& range) {
780
+ return Dots(Compound::create(TK_DOTS, range, {}));
781
+ }
782
+ };
783
+
784
+ struct Break : public Stmt {
785
+ explicit Break(const TreeRef& tree) : Stmt(tree) {
786
+ tree_->match(TK_BREAK);
787
+ }
788
+ static Break create(const SourceRange& range) {
789
+ return Break(Compound::create(TK_BREAK, range, {}));
790
+ }
791
+ };
792
+
793
+ struct Continue : public Stmt {
794
+ explicit Continue(const TreeRef& tree) : Stmt(tree) {
795
+ tree_->match(TK_CONTINUE);
796
+ }
797
+ static Continue create(const SourceRange& range) {
798
+ return Continue(Compound::create(TK_CONTINUE, range, {}));
799
+ }
800
+ };
801
+
802
+ struct ExprStmt : public Stmt {
803
+ explicit ExprStmt(const TreeRef& tree) : Stmt(tree) {
804
+ tree_->match(TK_EXPR_STMT);
805
+ }
806
+ Expr expr() {
807
+ return Expr(subtree(0));
808
+ }
809
+ static ExprStmt create(const SourceRange& range, const Expr& list) {
810
+ return ExprStmt(Compound::create(TK_EXPR_STMT, range, {list}));
811
+ }
812
+ };
813
+
814
+ ////////////////////////////////////////////////////////////////////////////////
815
+ // Expressions
816
+ ////////////////////////////////////////////////////////////////////////////////
817
+
818
+ struct BinOp : public Expr {
819
+ explicit BinOp(const TreeRef& tree) : Expr(tree) {
820
+ switch (tree->kind()) {
821
+ case TK_AND:
822
+ case TK_OR:
823
+ case '<':
824
+ case '>':
825
+ case TK_IS:
826
+ case TK_ISNOT:
827
+ case TK_EQ:
828
+ case TK_LE:
829
+ case TK_GE:
830
+ case TK_NE:
831
+ case '+':
832
+ case '*':
833
+ case '/':
834
+ case '-':
835
+ case '@':
836
+ case TK_POW:
837
+ case TK_LSHIFT:
838
+ case TK_RSHIFT:
839
+ case '%':
840
+ case '&':
841
+ case '^':
842
+ case '|':
843
+ case TK_FLOOR_DIV:
844
+ case TK_IN:
845
+ if (tree->trees().size() != 2)
846
+ throw ErrorReport(tree)
847
+ << "BinOp expected 2 subtrees, found " << tree->trees().size();
848
+ return;
849
+ default:
850
+ throw ErrorReport(tree)
851
+ << kindToString(tree->kind()) << " is not a valid BinOp";
852
+ }
853
+ }
854
+ Expr lhs() const {
855
+ return Expr(subtree(0));
856
+ }
857
+ Expr rhs() const {
858
+ return Expr(subtree(1));
859
+ }
860
+ static BinOp create(
861
+ const SourceRange& range,
862
+ int kind,
863
+ const Expr& lhs,
864
+ const Expr& rhs) {
865
+ return BinOp(Compound::create(kind, range, {lhs, rhs}));
866
+ }
867
+ };
868
+
869
+ struct UnaryOp : public Expr {
870
+ explicit UnaryOp(const TreeRef& tree) : Expr(tree) {
871
+ switch (tree->kind()) {
872
+ case TK_UNARY_MINUS:
873
+ case '~':
874
+ case TK_NOT:
875
+ if (tree->trees().size() != 1)
876
+ throw ErrorReport(tree)
877
+ << "UnaryOp expected 1 subtree, found " << tree->trees().size();
878
+ return;
879
+ default:
880
+ throw ErrorReport(tree)
881
+ << kindToString(tree->kind()) << " is not a valid UnaryOp";
882
+ }
883
+ }
884
+ static UnaryOp create(const SourceRange& range, int kind, const Expr& expr) {
885
+ return UnaryOp(Compound::create(kind, range, {expr}));
886
+ }
887
+ };
888
+
889
+ struct Const : public Expr {
890
+ explicit Const(const TreeRef& tree) : Expr(tree) {
891
+ tree_->matchNumSubtrees(TK_CONST, 1);
892
+ }
893
+ bool isFloatingPoint() const {
894
+ if (isComplex())
895
+ return false;
896
+
897
+ bool is_inf = subtree(0)->stringValue() == "inf";
898
+ return is_inf ||
899
+ subtree(0)->stringValue().find_first_of(".eE") != std::string::npos;
900
+ }
901
+ bool isIntegral() const {
902
+ return !isFloatingPoint() && !isComplex();
903
+ }
904
+ bool isComplex() const {
905
+ return subtree(0)->stringValue().find_first_of('j') != std::string::npos;
906
+ }
907
+ int64_t asIntegral() const {
908
+ try {
909
+ // NOLINTNEXTLINE(modernize-use-nullptr)
910
+ return std::stoll(subtree(0)->stringValue(), /*__idx=*/0, /*base=*/0);
911
+ } catch (const std::out_of_range&) {
912
+ throw ErrorReport(range()) << "Integral constant out of range "
913
+ "(must fit in a signed 64 bit integer)";
914
+ }
915
+ }
916
+ double asFloatingPoint() const {
917
+ // We can't pass in nullptr as the dummy pointer gets dereferenced for
918
+ // Android version of strtod_c().
919
+ // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
920
+ char* dummy;
921
+ return torch::jit::strtod_c(subtree(0)->stringValue().c_str(), &dummy);
922
+ }
923
+ c10::complex<double> asComplex() const {
924
+ // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
925
+ char* dummy;
926
+ auto str = subtree(0)->stringValue();
927
+ // Complex numbers (a+bj, where a is non-zero) are parsed as an addition
928
+ // between float/int a and a complex number "bj". When a is 0, a complex
929
+ // number bj is created as above. So, while parsing the string, we don't
930
+ // have to worry about the real component of the complex number.
931
+ auto imag =
932
+ torch::jit::strtod_c(str.substr(0, str.size() - 1).c_str(), &dummy);
933
+ return c10::complex<double>(0, imag);
934
+ }
935
+ const std::string& text() const {
936
+ return subtree(0)->stringValue();
937
+ }
938
+ static Const create(const SourceRange& range, const std::string& value) {
939
+ return Const(Compound::create(TK_CONST, range, {String::create(value)}));
940
+ }
941
+ };
942
+
943
+ struct StringLiteral : public Expr {
944
+ explicit StringLiteral(const TreeRef& tree) : Expr(tree) {
945
+ tree_->matchNumSubtrees(TK_STRINGLITERAL, 1);
946
+ }
947
+ const std::string& text() const {
948
+ return subtree(0)->stringValue();
949
+ }
950
+ static StringLiteral create(
951
+ const SourceRange& range,
952
+ const std::string& value) {
953
+ return StringLiteral(
954
+ Compound::create(TK_STRINGLITERAL, range, {String::create(value)}));
955
+ }
956
+ };
957
+
958
+ struct Apply : public Expr {
959
+ explicit Apply(const TreeRef& tree) : Expr(tree) {
960
+ tree_->match(TK_APPLY);
961
+ }
962
+ Expr callee() const {
963
+ return Expr(subtree(0));
964
+ }
965
+ List<Expr> inputs() const {
966
+ return List<Expr>(subtree(1));
967
+ }
968
+ List<Attribute> attributes() const {
969
+ return List<Attribute>(subtree(2));
970
+ }
971
+ static Apply create(
972
+ const SourceRange& range,
973
+ const Expr& callee,
974
+ const List<Expr>& inputs,
975
+ const List<Attribute>& attributes) {
976
+ return Apply(
977
+ Compound::create(TK_APPLY, range, {callee, inputs, attributes}));
978
+ }
979
+ };
980
+
981
+ struct Select : public Expr {
982
+ explicit Select(const TreeRef& tree) : Expr(tree) {
983
+ tree_->match('.');
984
+ }
985
+ Expr value() const {
986
+ return Expr(subtree(0));
987
+ }
988
+ Ident selector() const {
989
+ return Ident(subtree(1));
990
+ }
991
+ static Select create(
992
+ const SourceRange& range,
993
+ const Expr& value,
994
+ const Ident& selector) {
995
+ return Select(Compound::create('.', range, {value, selector}));
996
+ }
997
+ };
998
+
999
+ struct SliceExpr : public Expr {
1000
+ explicit SliceExpr(const TreeRef& tree) : Expr(tree) {
1001
+ tree_->match(TK_SLICE_EXPR);
1002
+ }
1003
+ Maybe<Expr> start() const {
1004
+ return Maybe<Expr>(subtree(0));
1005
+ }
1006
+ Maybe<Expr> end() const {
1007
+ return Maybe<Expr>(subtree(1));
1008
+ }
1009
+ Maybe<Expr> step() const {
1010
+ return Maybe<Expr>(subtree(2));
1011
+ }
1012
+ Expr startOr(int64_t alternative) const {
1013
+ const auto startOption = start();
1014
+ return startOption.present() ? startOption.get() : createInt(alternative);
1015
+ }
1016
+ Expr endOr(int64_t alternative) const {
1017
+ const auto endOption = end();
1018
+ return endOption.present() ? endOption.get() : createInt(alternative);
1019
+ }
1020
+ Expr stepOr(int64_t alternative) const {
1021
+ const auto stepOption = step();
1022
+ return stepOption.present() ? stepOption.get() : createInt(alternative);
1023
+ }
1024
+ static SliceExpr create(
1025
+ const SourceRange& range,
1026
+ const Maybe<Expr>& start,
1027
+ const Maybe<Expr>& end,
1028
+ const Maybe<Expr>& step) {
1029
+ return SliceExpr(
1030
+ Compound::create(TK_SLICE_EXPR, range, {start, end, step}));
1031
+ }
1032
+
1033
+ private:
1034
+ Expr createInt(int64_t value) const {
1035
+ return Expr(Const::create(range(), c10::to_string(value)));
1036
+ }
1037
+ };
1038
+
1039
+ struct Subscript : public Expr {
1040
+ explicit Subscript(const TreeRef& tree) : Expr(tree) {
1041
+ tree_->match(TK_SUBSCRIPT);
1042
+ }
1043
+ Expr value() const {
1044
+ return Expr(subtree(0));
1045
+ }
1046
+ List<Expr> subscript_exprs() const {
1047
+ return List<Expr>(subtree(1));
1048
+ }
1049
+ static Subscript create(
1050
+ const SourceRange& range,
1051
+ const Expr& value,
1052
+ const List<Expr>& subscript_exprs) {
1053
+ auto whole_range = SourceRange(
1054
+ range.source(), range.start(), subscript_exprs.range().end() + 1);
1055
+ return Subscript(
1056
+ Compound::create(TK_SUBSCRIPT, whole_range, {value, subscript_exprs}));
1057
+ }
1058
+ };
1059
+
1060
+ struct Var : public Expr {
1061
+ explicit Var(const TreeRef& tree) : Expr(tree) {
1062
+ tree_->match(TK_VAR);
1063
+ };
1064
+ Ident name() const {
1065
+ return Ident(subtree(0));
1066
+ }
1067
+ static Var create(const SourceRange& range, const Ident& name) {
1068
+ return Var(Compound::create(TK_VAR, range, {name}));
1069
+ }
1070
+ };
1071
+
1072
+ // WithItem represents an item using with a WithStmt.
1073
+ struct WithItem : public Expr {
1074
+ explicit WithItem(const TreeRef& tree) : Expr(tree) {
1075
+ tree_->match(TK_WITH_ITEM);
1076
+ }
1077
+
1078
+ Expr target() const {
1079
+ return Expr(subtree(0));
1080
+ }
1081
+
1082
+ Maybe<Var> var() const {
1083
+ return Maybe<Var>(subtree(1));
1084
+ }
1085
+
1086
+ static WithItem create(
1087
+ const SourceRange& range,
1088
+ const Expr& target,
1089
+ const Maybe<Var>& var) {
1090
+ return WithItem(Compound::create(TK_WITH_ITEM, range, {target, var}));
1091
+ }
1092
+ };
1093
+
1094
+ // With represents a with statement consisting of a list of with items and a
1095
+ // body of statements.
1096
+ struct With : public Stmt {
1097
+ explicit With(const TreeRef& tree) : Stmt(tree) {
1098
+ tree_->match(TK_WITH);
1099
+ }
1100
+
1101
+ List<WithItem> targets() const {
1102
+ return List<WithItem>(subtree(0));
1103
+ }
1104
+
1105
+ List<Stmt> body() const {
1106
+ return List<Stmt>(subtree(1));
1107
+ }
1108
+
1109
+ static With create(
1110
+ const SourceRange& range,
1111
+ const List<WithItem>& targets,
1112
+ const List<Stmt>& body) {
1113
+ return With(Compound::create(TK_WITH, range, {targets, body}));
1114
+ }
1115
+ };
1116
+
1117
+ struct TernaryIf : public Expr {
1118
+ explicit TernaryIf(const TreeRef& tree) : Expr(tree) {
1119
+ tree_->matchNumSubtrees(TK_IF_EXPR, 3);
1120
+ };
1121
+ Expr cond() const {
1122
+ return Expr(subtree(0));
1123
+ }
1124
+ Expr true_expr() const {
1125
+ return Expr(subtree(1));
1126
+ }
1127
+ Expr false_expr() const {
1128
+ return Expr(subtree(2));
1129
+ }
1130
+ static TernaryIf create(
1131
+ const SourceRange& range,
1132
+ const Expr& cond,
1133
+ const Expr& true_expr,
1134
+ const Expr& false_expr) {
1135
+ return TernaryIf(
1136
+ Compound::create(TK_IF_EXPR, range, {cond, true_expr, false_expr}));
1137
+ };
1138
+ };
1139
+
1140
+ struct ListLiteral : public Expr {
1141
+ explicit ListLiteral(const TreeRef& tree) : Expr(tree) {
1142
+ tree_->match(TK_LIST_LITERAL);
1143
+ }
1144
+ List<Expr> inputs() const {
1145
+ return subtree(0);
1146
+ }
1147
+ static ListLiteral create(
1148
+ const SourceRange& range,
1149
+ const List<Expr>& inputs) {
1150
+ return ListLiteral(Compound::create(TK_LIST_LITERAL, range, {inputs}));
1151
+ }
1152
+ };
1153
+
1154
+ struct TupleLiteral : public Expr {
1155
+ explicit TupleLiteral(const TreeRef& tree) : Expr(tree) {
1156
+ tree_->match(TK_TUPLE_LITERAL);
1157
+ }
1158
+ List<Expr> inputs() const {
1159
+ return subtree(0);
1160
+ }
1161
+ static TupleLiteral create(
1162
+ const SourceRange& range,
1163
+ const List<Expr>& inputs) {
1164
+ return TupleLiteral(Compound::create(TK_TUPLE_LITERAL, range, {inputs}));
1165
+ }
1166
+ };
1167
+
1168
+ struct DictLiteral : public Expr {
1169
+ explicit DictLiteral(const TreeRef& tree) : Expr(tree) {
1170
+ tree_->match(TK_DICT_LITERAL);
1171
+ }
1172
+ List<Expr> key_inputs() const {
1173
+ return subtree(0);
1174
+ }
1175
+ List<Expr> value_inputs() const {
1176
+ return subtree(1);
1177
+ }
1178
+ static DictLiteral create(
1179
+ const SourceRange& range,
1180
+ const List<Expr>& keys,
1181
+ const List<Expr>& values) {
1182
+ return DictLiteral(
1183
+ Compound::create(TK_DICT_LITERAL, range, {keys, values}));
1184
+ }
1185
+ };
1186
+
1187
+ struct Starred : public Expr {
1188
+ explicit Starred(const TreeRef& tree) : Expr(tree) {
1189
+ tree_->match(TK_STARRED);
1190
+ }
1191
+ Expr expr() const {
1192
+ return Expr(subtree(0));
1193
+ }
1194
+ static Starred create(const SourceRange& range, const Expr& expr) {
1195
+ return Starred(Compound::create(TK_STARRED, range, {expr}));
1196
+ }
1197
+ };
1198
+
1199
+ struct Delete : public Stmt {
1200
+ explicit Delete(const TreeRef& tree) : Stmt(tree) {
1201
+ tree_->match(TK_DELETE);
1202
+ }
1203
+ List<Expr> targets() const {
1204
+ return subtree(0);
1205
+ }
1206
+ static Delete create(const SourceRange& range, const List<Expr>& targets) {
1207
+ return Delete(Compound::create(TK_DELETE, range, {targets}));
1208
+ }
1209
+ };
1210
+
1211
+ /*
1212
+ * NOTE: transforming PEP 604 union into equivalent union type
1213
+ *
1214
+ * NOTE: Union[int, float] parses into:
1215
+ * <EXPR> expr:(subscript
1216
+ * (variable (ident Union))
1217
+ * (list
1218
+ * (variable (ident int))
1219
+ * (variable (ident float))))
1220
+ * <KIND> subscript
1221
+ *
1222
+ * NOTE: (int | float) parses into:
1223
+ * <EXPR> expr:(|
1224
+ * (variable (ident int))
1225
+ * (variable (ident float)))
1226
+ * <KIND> |
1227
+ */
1228
+
1229
+ inline void _flatten_pep604_union(
1230
+ const torch::jit::Expr& node,
1231
+ std::vector<torch::jit::Expr>* result) {
1232
+ // flatten possibly nested union expressions like (int | (float | str))
1233
+ // into a flat list of expressions like [int, float, str]
1234
+ if (node.kind() == '|') {
1235
+ auto as_binop = torch::jit::BinOp(node);
1236
+ _flatten_pep604_union(as_binop.lhs(), result);
1237
+ _flatten_pep604_union(as_binop.rhs(), result);
1238
+ } else {
1239
+ result->push_back(node);
1240
+ }
1241
+ }
1242
+
1243
+ inline std::vector<Expr> get_pep604_union_members(const Expr& node) {
1244
+ std::vector<Expr> result;
1245
+ _flatten_pep604_union(node, &result);
1246
+ return result;
1247
+ }
1248
+
1249
+ // Flattens a PEP 604 union into a classical union.
1250
+ // For example, ((x | y) | z) is transformed into Union[x, y, z].
1251
+ inline Expr pep604union_to_union(const Expr& expr) {
1252
+ // noop if not a pep604 union
1253
+ if (expr.kind() != '|')
1254
+ return expr;
1255
+
1256
+ // In order to support unions with more than 2 operands ((x|y)|z), we need to
1257
+ // recursively flatten the tree of | expressions.
1258
+ auto members = get_pep604_union_members(expr);
1259
+ auto synthesised_union = Subscript::create(
1260
+ expr.range(),
1261
+ Var::create(expr.range(), Ident::create(expr.range(), "Union")),
1262
+ List<Expr>::create(expr.range(), members));
1263
+ return std::move(synthesised_union);
1264
+ }
1265
+
1266
+ } // namespace jit
1267
+ } // namespace torch
1268
+
1269
+ namespace std {
1270
+
1271
+ template <typename T>
1272
+ struct iterator_traits<torch::jit::ListIterator<T>>
1273
+ : std::iterator_traits<torch::jit::TreeList::const_iterator> {};
1274
+
1275
+ } // namespace std
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/versioned_symbols.h ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <caffe2/serialize/versions.h>
4
+ #include <torch/csrc/Export.h>
5
+ #include <torch/csrc/jit/api/module.h>
6
+
7
+ #include <cstdint>
8
+
9
+ namespace torch {
10
+ namespace jit {
11
+ // Maps the given symbol into an implementation of its behavior at the
12
+ // given version.
13
+ // See note [Versioned Symbols]
14
+ TORCH_API Symbol
15
+ get_symbol_for_version(const Symbol name, const uint64_t version);
16
+
17
+ // Maps the given kind to the minimum version that supports it.
18
+ // See note [Dynamic Versions and torch.jit.save vs. torch.save]
19
+ TORCH_API uint64_t get_min_version_for_kind(const NodeKind& kind);
20
+ } // namespace jit
21
+ } // namespace torch
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/code.h ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <vector>
4
+
5
+ #include <ATen/core/ivalue.h>
6
+ #include <ATen/core/operator_name.h>
7
+ #include <torch/csrc/jit/runtime/instruction.h>
8
+
9
+ namespace torch {
10
+ namespace jit {
11
+ namespace mobile {
12
+
13
+ using Stack = std::vector<c10::IValue>;
14
+ using DebugHandle = int64_t;
15
+
16
+ class Function;
17
+
18
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
19
+ struct Code {
20
+ std::vector<Instruction> instructions_;
21
+ std::vector<DebugHandle> debug_handles_;
22
+ std::vector<c10::OperatorName> op_names_;
23
+ std::vector<int> operator_input_sizes_;
24
+ std::vector<std::function<void(Stack&)>> operators_;
25
+ std::vector<c10::IValue> constants_;
26
+ std::vector<c10::TypePtr> types_;
27
+ // TODO After we actually export CALL instructions we can remove this.
28
+ // We may need a two-stage importing scheme, where we firstly construct all
29
+ // function objects, and then append referenced function pointers. This could
30
+ // be done in parseMethods().
31
+ std::vector<mobile::Function*> functions_;
32
+ size_t register_size_ = 0; // Aggregated output size.
33
+ // initialized means operators_ array is filled with operators
34
+ bool initialized = false;
35
+ };
36
+
37
+ } // namespace mobile
38
+ } // namespace jit
39
+ } // namespace torch
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/debug_info.h ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <c10/util/flat_hash_map.h>
3
+ #include <caffe2/serialize/inline_container.h>
4
+ #include <torch/csrc/jit/api/compilation_unit.h>
5
+ #include <torch/csrc/jit/ir/scope.h>
6
+ #include <torch/csrc/jit/serialization/source_range_serialization.h>
7
+
8
+ namespace torch {
9
+ namespace jit {
10
+ /*
11
+ * MobileDebugTable:
12
+ * Deserializes debug_pkl and callstack_map records from PT model's zip archive
13
+ * and stores them in a map of debug handles to DebugInfoPair. Debug handles are
14
+ * unique per model and runtime, be in lite interpreter or delegate, an
15
+ * exception of BackendRuntimeException should raised using debug handles.
16
+ * getSourceDebugString method is responsible for translating debug
17
+ * handles to correspond debug information.
18
+ * This debug informatin includes stack trace of model level source code and
19
+ * module hierarchy where the exception occurred.
20
+ */
21
+ class MobileDebugTable {
22
+ public:
23
+ MobileDebugTable() = default;
24
+ MobileDebugTable(
25
+ std::unique_ptr<caffe2::serialize::PyTorchStreamReader>& reader,
26
+ const std::shared_ptr<CompilationUnit>& cu);
27
+
28
+ template <typename It>
29
+ MobileDebugTable(It begin, It end) : callstack_ptr_map_(begin, end) {}
30
+
31
+ std::string getSourceDebugString(
32
+ const int64_t debug_handle,
33
+ const std::string& top_module_type_name = "ModuleTypeUnknown") const;
34
+ std::string getSourceDebugString(
35
+ const std::vector<int64_t>& debug_handles,
36
+ const std::string& top_module_type_name = "ModuleTypeUnknown") const;
37
+ std::string getModuleHierarchyInfo(
38
+ const int64_t debug_handle,
39
+ const std::string& top_module_type_name = "ModuleTypeUnknown") const;
40
+ std::string getModuleHierarchyInfo(
41
+ const std::vector<int64_t>& debug_handles,
42
+ const std::string& top_module_type_name = "ModuleTypeUnknown") const;
43
+
44
+ const ska::flat_hash_map<int64_t, DebugInfoTuple>& getCallStackPtrMap()
45
+ const {
46
+ return callstack_ptr_map_;
47
+ }
48
+
49
+ private:
50
+ std::pair<std::string, std::string> getSourceDebugModuleHierarchyInfo(
51
+ const std::vector<int64_t>& debug_handles,
52
+ const std::string& top_module_type_name = "ModuleTypeUnknown") const;
53
+ ska::flat_hash_map<int64_t, DebugInfoTuple> callstack_ptr_map_;
54
+ };
55
+
56
+ } // namespace jit
57
+ } // namespace torch
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/file_format.h ADDED
@@ -0,0 +1,196 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <array>
4
+ #include <cerrno>
5
+ #include <cstddef>
6
+ #include <cstring>
7
+ #include <fstream>
8
+ #include <istream>
9
+ #include <memory>
10
+
11
+ #include <c10/core/CPUAllocator.h>
12
+ #include <c10/core/impl/alloc_cpu.h>
13
+ #include <caffe2/serialize/read_adapter_interface.h>
14
+
15
+ #if defined(HAVE_MMAP)
16
+ #include <fcntl.h>
17
+ #include <sys/mman.h>
18
+ #include <sys/stat.h>
19
+ #include <sys/types.h>
20
+ #include <unistd.h>
21
+ #endif
22
+
23
+ /**
24
+ * @file
25
+ *
26
+ * Helpers for identifying file formats when reading serialized data.
27
+ *
28
+ * Note that these functions are declared inline because they will typically
29
+ * only be called from one or two locations per binary.
30
+ */
31
+
32
+ namespace torch {
33
+ namespace jit {
34
+
35
+ /**
36
+ * The format of a file or data stream.
37
+ */
38
+ enum class FileFormat {
39
+ UnknownFileFormat = 0,
40
+ FlatbufferFileFormat,
41
+ ZipFileFormat,
42
+ };
43
+
44
+ /// The size of the buffer to pass to #getFileFormat(), in bytes.
45
+ constexpr size_t kFileFormatHeaderSize = 8;
46
+ constexpr size_t kMaxAlignment = 16;
47
+
48
+ /**
49
+ * Returns the likely file format based on the magic header bytes in @p header,
50
+ * which should contain the first bytes of a file or data stream.
51
+ */
52
+ // NOLINTNEXTLINE(facebook-hte-NamespaceScopedStaticDeclaration)
53
+ static inline FileFormat getFileFormat(const char* data) {
54
+ // The size of magic strings to look for in the buffer.
55
+ static constexpr size_t kMagicSize = 4;
56
+
57
+ // Bytes 4..7 of a Flatbuffer-encoded file produced by
58
+ // `flatbuffer_serializer.h`. (The first four bytes contain an offset to the
59
+ // actual Flatbuffer data.)
60
+ static constexpr std::array<char, kMagicSize> kFlatbufferMagicString = {
61
+ 'P', 'T', 'M', 'F'};
62
+ static constexpr size_t kFlatbufferMagicOffset = 4;
63
+
64
+ // The first four bytes of a ZIP file.
65
+ static constexpr std::array<char, kMagicSize> kZipMagicString = {
66
+ 'P', 'K', '\x03', '\x04'};
67
+
68
+ // Note that we check for Flatbuffer magic first. Since the first four bytes
69
+ // of flatbuffer data contain an offset to the root struct, it's theoretically
70
+ // possible to construct a file whose offset looks like the ZIP magic. On the
71
+ // other hand, bytes 4-7 of ZIP files are constrained to a small set of values
72
+ // that do not typically cross into the printable ASCII range, so a ZIP file
73
+ // should never have a header that looks like a Flatbuffer file.
74
+ if (std::memcmp(
75
+ data + kFlatbufferMagicOffset,
76
+ kFlatbufferMagicString.data(),
77
+ kMagicSize) == 0) {
78
+ // Magic header for a binary file containing a Flatbuffer-serialized mobile
79
+ // Module.
80
+ return FileFormat::FlatbufferFileFormat;
81
+ } else if (std::memcmp(data, kZipMagicString.data(), kMagicSize) == 0) {
82
+ // Magic header for a zip file, which we use to store pickled sub-files.
83
+ return FileFormat::ZipFileFormat;
84
+ }
85
+ return FileFormat::UnknownFileFormat;
86
+ }
87
+
88
+ /**
89
+ * Returns the likely file format based on the magic header bytes of @p data.
90
+ * If the stream position changes while inspecting the data, this function will
91
+ * restore the stream position to its original offset before returning.
92
+ */
93
+ // NOLINTNEXTLINE(facebook-hte-NamespaceScopedStaticDeclaration)
94
+ static inline FileFormat getFileFormat(std::istream& data) {
95
+ FileFormat format = FileFormat::UnknownFileFormat;
96
+ std::streampos orig_pos = data.tellg();
97
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
98
+ std::array<char, kFileFormatHeaderSize> header;
99
+ data.read(header.data(), header.size());
100
+ if (data.good()) {
101
+ format = getFileFormat(header.data());
102
+ }
103
+ data.seekg(orig_pos, data.beg);
104
+ return format;
105
+ }
106
+
107
+ /**
108
+ * Returns the likely file format based on the magic header bytes of the file
109
+ * named @p filename.
110
+ */
111
+ // NOLINTNEXTLINE(facebook-hte-NamespaceScopedStaticDeclaration)
112
+ static inline FileFormat getFileFormat(const std::string& filename) {
113
+ std::ifstream data(filename, std::ifstream::binary);
114
+ return getFileFormat(data);
115
+ }
116
+
117
+ // NOLINTNEXTLINE(facebook-hte-NamespaceScopedStaticDeclaration)
118
+ static void file_not_found_error() {
119
+ std::stringstream message;
120
+ message << "Error while opening file: ";
121
+ if (errno == ENOENT) {
122
+ message << "no such file or directory" << std::endl;
123
+ } else {
124
+ message << "error no is: " << errno << std::endl;
125
+ }
126
+ TORCH_CHECK(false, message.str());
127
+ }
128
+
129
+ // NOLINTNEXTLINE(facebook-hte-NamespaceScopedStaticDeclaration)
130
+ static inline std::tuple<std::shared_ptr<char>, size_t> get_file_content(
131
+ const char* filename) {
132
+ #if defined(HAVE_MMAP)
133
+ int fd = open(filename, O_RDONLY);
134
+ if (fd < 0) {
135
+ // failed to open file, chances are it's no such file or directory.
136
+ file_not_found_error();
137
+ }
138
+ struct stat statbuf {};
139
+ fstat(fd, &statbuf);
140
+ size_t size = statbuf.st_size;
141
+ void* ptr = mmap(nullptr, statbuf.st_size, PROT_READ, MAP_PRIVATE, fd, 0);
142
+ close(fd);
143
+ auto deleter = [statbuf](char* ptr) { munmap(ptr, statbuf.st_size); };
144
+ std::shared_ptr<char> data(reinterpret_cast<char*>(ptr), deleter);
145
+ #else
146
+ FILE* f = fopen(filename, "rb");
147
+ if (f == nullptr) {
148
+ file_not_found_error();
149
+ }
150
+ fseek(f, 0, SEEK_END);
151
+ size_t size = ftell(f);
152
+ fseek(f, 0, SEEK_SET);
153
+ // make sure buffer size is multiple of alignment
154
+ size_t buffer_size = (size / kMaxAlignment + 1) * kMaxAlignment;
155
+ std::shared_ptr<char> data(
156
+ static_cast<char*>(c10::alloc_cpu(buffer_size)), c10::free_cpu);
157
+ fread(data.get(), size, 1, f);
158
+ fclose(f);
159
+ #endif
160
+ return std::make_tuple(data, size);
161
+ }
162
+
163
+ // NOLINTNEXTLINE(facebook-hte-NamespaceScopedStaticDeclaration)
164
+ static inline std::tuple<std::shared_ptr<char>, size_t> get_stream_content(
165
+ std::istream& in) {
166
+ // get size of the stream and reset to orig
167
+ std::streampos orig_pos = in.tellg();
168
+ in.seekg(orig_pos, std::ios::end);
169
+ const long size = in.tellg();
170
+ in.seekg(orig_pos, in.beg);
171
+
172
+ // read stream
173
+ // NOLINT make sure buffer size is multiple of alignment
174
+ size_t buffer_size = (size / kMaxAlignment + 1) * kMaxAlignment;
175
+ std::shared_ptr<char> data(
176
+ static_cast<char*>(c10::alloc_cpu(buffer_size)), c10::free_cpu);
177
+ in.read(data.get(), size);
178
+
179
+ // reset stream to original position
180
+ in.seekg(orig_pos, in.beg);
181
+ return std::make_tuple(data, size);
182
+ }
183
+
184
+ // NOLINTNEXTLINE(facebook-hte-NamespaceScopedStaticDeclaration)
185
+ static inline std::tuple<std::shared_ptr<char>, size_t> get_rai_content(
186
+ caffe2::serialize::ReadAdapterInterface* rai) {
187
+ size_t buffer_size = (rai->size() / kMaxAlignment + 1) * kMaxAlignment;
188
+ std::shared_ptr<char> data(
189
+ static_cast<char*>(c10::alloc_cpu(buffer_size)), c10::free_cpu);
190
+ rai->read(
191
+ 0, data.get(), rai->size(), "Loading ReadAdapterInterface to bytes");
192
+ return std::make_tuple(data, buffer_size);
193
+ }
194
+
195
+ } // namespace jit
196
+ } // namespace torch