ZTWHHH commited on
Commit
3ac981a
·
verified ·
1 Parent(s): 34844f6

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. falcon/lib/python3.10/site-packages/torch/include/torch/csrc/jit/api/compilation_unit.h +351 -0
  2. falcon/lib/python3.10/site-packages/torch/include/torch/csrc/jit/api/function_impl.h +181 -0
  3. falcon/lib/python3.10/site-packages/torch/include/torch/csrc/jit/api/method.h +84 -0
  4. falcon/lib/python3.10/site-packages/torch/include/torch/csrc/jit/api/module.h +685 -0
  5. falcon/lib/python3.10/site-packages/torch/include/torch/csrc/jit/api/object.h +200 -0
  6. falcon/lib/python3.10/site-packages/torch/include/torch/csrc/jit/backends/backend_debug_handler.h +140 -0
  7. falcon/lib/python3.10/site-packages/torch/include/torch/csrc/jit/backends/backend_detail.h +41 -0
  8. falcon/lib/python3.10/site-packages/torch/include/torch/csrc/jit/backends/backend_exception.h +54 -0
  9. falcon/lib/python3.10/site-packages/torch/include/torch/csrc/jit/backends/backend_init.h +11 -0
  10. falcon/lib/python3.10/site-packages/torch/include/torch/csrc/jit/backends/backend_interface.h +34 -0
  11. falcon/lib/python3.10/site-packages/torch/include/torch/csrc/jit/backends/backend_preprocess.h +18 -0
  12. falcon/lib/python3.10/site-packages/torch/include/torch/csrc/jit/backends/backend_resolver.h +10 -0
  13. falcon/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/convert_to_ssa.h +16 -0
  14. falcon/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/error_report.h +54 -0
  15. falcon/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/inline_loop_condition.h +16 -0
  16. falcon/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/ir_emitter.h +21 -0
  17. falcon/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/resolver.h +68 -0
  18. falcon/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/source_range.h +457 -0
  19. falcon/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/source_ref.h +47 -0
  20. falcon/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/tracer.h +412 -0
  21. falcon/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/tree_views.h +1275 -0
  22. falcon/lib/python3.10/site-packages/torch/include/torch/csrc/jit/ir/constants.h +61 -0
  23. falcon/lib/python3.10/site-packages/torch/include/torch/csrc/jit/ir/graph_utils.h +25 -0
  24. falcon/lib/python3.10/site-packages/torch/include/torch/csrc/jit/ir/ir.h +1841 -0
  25. falcon/lib/python3.10/site-packages/torch/include/torch/csrc/jit/ir/node_hashing.h +17 -0
  26. falcon/lib/python3.10/site-packages/torch/include/torch/csrc/jit/ir/subgraph_matcher.h +74 -0
  27. falcon/lib/python3.10/site-packages/torch/include/torch/csrc/jit/ir/type_hashing.h +20 -0
  28. falcon/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/code.h +39 -0
  29. falcon/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/debug_info.h +57 -0
  30. falcon/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/file_format.h +196 -0
  31. falcon/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/flatbuffer_loader.h +136 -0
  32. falcon/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/function.h +86 -0
  33. falcon/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/import.h +112 -0
  34. falcon/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/import_data.h +38 -0
  35. falcon/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/method.h +45 -0
  36. falcon/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/module.h +197 -0
  37. falcon/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/observer.h +110 -0
  38. falcon/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/prim_ops_registery.h +32 -0
  39. falcon/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/profiler_edge.h +119 -0
  40. falcon/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/promoted_prim_ops.h +63 -0
  41. falcon/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/quantization.h +38 -0
  42. falcon/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/register_ops_common_utils.h +55 -0
  43. falcon/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/type_parser.h +54 -0
  44. falcon/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/upgrader_mobile.h +43 -0
  45. falcon/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/init.h +9 -0
  46. falcon/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/module_python.h +35 -0
  47. falcon/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/pybind.h +213 -0
  48. falcon/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/python_arg_flatten.h +119 -0
  49. falcon/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/python_custom_class.h +20 -0
  50. falcon/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/python_ivalue.h +97 -0
falcon/lib/python3.10/site-packages/torch/include/torch/csrc/jit/api/compilation_unit.h ADDED
@@ -0,0 +1,351 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <ATen/core/function.h>
3
+ #include <c10/util/Exception.h>
4
+ #include <torch/csrc/jit/api/function_impl.h>
5
+ #include <torch/csrc/jit/frontend/name_mangler.h>
6
+ #include <torch/csrc/jit/frontend/source_range.h>
7
+ #include <torch/csrc/jit/ir/ir.h>
8
+ #include <torch/csrc/jit/runtime/graph_executor.h>
9
+
10
+ #include <torch/csrc/Export.h>
11
+
12
+ #include <ATen/core/function_schema.h>
13
+ #include <ATen/core/qualified_name.h>
14
+ #include <c10/util/ArrayRef.h>
15
+ #include <c10/util/Optional.h>
16
+
17
+ #include <functional>
18
+ #include <memory>
19
+ #include <mutex>
20
+ #include <ostream>
21
+ #include <string>
22
+ #include <unordered_map>
23
+ #include <vector>
24
+
25
+ namespace torch::jit {
26
+
27
+ struct Def;
28
+ struct Property;
29
+ struct ClassDef;
30
+ struct SugaredValue;
31
+ struct Resolver;
32
+
33
+ using ResolverPtr = std::shared_ptr<Resolver>;
34
+ struct Self {
35
+ virtual ~Self() = default;
36
+ virtual std::shared_ptr<SugaredValue> makeSugared(Value* v) const = 0;
37
+ virtual ClassTypePtr getClassType() const = 0;
38
+ };
39
+
40
+ // A CompilationUnit is a list of named Functions
41
+ // with helper methods to iterate the list or invoke the function.
42
+ // Classes have a CompilationUnit holding the class methods,
43
+ // and Modules have a CompilationUnit holding the Functions that
44
+ // are used to implement their Methods
45
+
46
+ struct TORCH_API CompilationUnit {
47
+ enum class FunctionType { Method, Hook, PreHook };
48
+ // constructor that takes a set of functions to compile using the native
49
+ // resolver
50
+ explicit CompilationUnit(const std::string& source);
51
+ CompilationUnit() = default;
52
+
53
+ CompilationUnit& operator=(CompilationUnit&&) = default;
54
+ CompilationUnit(CompilationUnit&&) = default;
55
+ CompilationUnit& operator=(const CompilationUnit&) = delete;
56
+ CompilationUnit(const CompilationUnit&) = delete;
57
+
58
+ Function* find_function(const c10::QualifiedName& name) const {
59
+ auto it = dict_.find(name);
60
+ if (it == dict_.end()) {
61
+ return nullptr;
62
+ }
63
+ return functions_[it->second].get();
64
+ }
65
+
66
+ Function& get_function(const c10::QualifiedName& name) const {
67
+ if (auto r = find_function(name)) {
68
+ return *r;
69
+ }
70
+ TORCH_CHECK(false, "attempted to get undefined function ", name.name());
71
+ }
72
+
73
+ void set_optimized(bool o) {
74
+ TORCH_WARN(
75
+ "CompilationUnit::set_optimized() is deprecated and has no effect. "
76
+ "Please use setGraphExecutorOptimize()");
77
+ }
78
+
79
+ bool is_optimized() const {
80
+ TORCH_WARN(
81
+ "CompilationUnit::is_optimized() is deprecated and always returns true. "
82
+ "Please use getGraphExecutorOptimize()");
83
+ return true;
84
+ }
85
+
86
+ // for historic reasons, these are defined in ir_emitter.cpp
87
+ // Returns the list of Functions just defined.
88
+ std::vector<Function*> define(
89
+ const c10::optional<c10::QualifiedName>& prefix,
90
+ const std::vector<Property>& properties,
91
+ const std::vector<ResolverPtr>& propResolvers,
92
+ const std::vector<Def>& definitions,
93
+ const std::vector<ResolverPtr>&
94
+ defResolvers, /* determines how we handle free
95
+ variables in each definition*/
96
+ // if non-null, the first argument to each def, is bound to this value
97
+ const Self* self,
98
+ // see [name mangling]
99
+ bool shouldMangle = false,
100
+ c10::optional<size_t> operator_set_version = c10::nullopt);
101
+
102
+ void define_hooks(
103
+ const c10::optional<c10::QualifiedName>& prefix,
104
+ const std::vector<Def>& hookDefs,
105
+ const std::vector<ResolverPtr>& hookResolvers,
106
+ const std::vector<Def>& preHookDefs,
107
+ const std::vector<ResolverPtr>& preHookResolvers,
108
+ const Self* self,
109
+ bool shouldMangle = false);
110
+
111
+ // same as above but parse the definitions from source
112
+ // Returns the list of Functions just defined.
113
+ std::vector<Function*> define(
114
+ // prefix namespace to put all the defined functions into
115
+ const c10::optional<c10::QualifiedName>& prefix,
116
+ const std::string& source,
117
+ const ResolverPtr& resolver,
118
+ const Self* self);
119
+
120
+ void define_interface(
121
+ const c10::QualifiedName& qualifiedName,
122
+ const ClassDef& classDef,
123
+ ResolverPtr rcb,
124
+ bool is_module = false);
125
+
126
+ Function* create_function(
127
+ c10::QualifiedName name,
128
+ std::shared_ptr<Graph> graph,
129
+ bool shouldMangle = false) {
130
+ if (shouldMangle) {
131
+ name = mangle(name);
132
+ }
133
+ auto fn = std::make_unique<GraphFunction>(
134
+ std::move(name), std::move(graph), nullptr);
135
+ auto ret = fn.get();
136
+ register_function(std::move(fn));
137
+ return ret;
138
+ }
139
+
140
+ std::vector<Function*> get_functions() const {
141
+ return fmap(functions_, [](const std::unique_ptr<Function>& fn) {
142
+ return fn.get();
143
+ });
144
+ }
145
+
146
+ /// Run a method from this compilation.
147
+ ///
148
+ /// For example:
149
+ /// @code
150
+ /// IValue output = module->run("relu_script", a, b);
151
+ /// @endcode
152
+ ///
153
+ /// To get a compile a module from a source string, see torch::jit::compile
154
+ ///
155
+ /// @param method_name The name of the method to run
156
+ /// @param args Arguments to be passed to the method
157
+ /// @return An IValue containing the return value (or values if it is a tuple)
158
+ /// from the method
159
+ template <typename... Types>
160
+ IValue run_method(const c10::QualifiedName& method_name, Types&&... args) {
161
+ return get_function(method_name)({IValue(std::forward<Types>(args))...});
162
+ }
163
+
164
+ void drop_all_functions() {
165
+ dict_.clear();
166
+ functions_.clear();
167
+ }
168
+
169
+ /**
170
+ * Register a class as being owned by this compilation unit.
171
+ */
172
+ void register_type(c10::NamedTypePtr namedType) {
173
+ // TODO: class types cannot be redefined because we have no way right now
174
+ // of invalidating their methods. NamedTuples are fine though, since they
175
+ // don't have methods.
176
+ TORCH_CHECK(
177
+ 0 == classDict_.count(*namedType->name()),
178
+ "class '",
179
+ namedType->name()->qualifiedName(),
180
+ "' already defined.");
181
+ classes_.push_back(std::move(namedType));
182
+ classDict_[*classes_.back()->name()] = classes_.size() - 1;
183
+ };
184
+
185
+ c10::ClassTypePtr get_class(const c10::QualifiedName& name) const {
186
+ auto type = get_type(name);
187
+ if (!type) {
188
+ return nullptr;
189
+ }
190
+ return type->cast<c10::ClassType>();
191
+ }
192
+
193
+ c10::InterfaceTypePtr get_interface(const c10::QualifiedName& name) const {
194
+ auto type = get_type(name);
195
+ if (!type) {
196
+ return nullptr;
197
+ }
198
+ return type->cast<c10::InterfaceType>();
199
+ }
200
+
201
+ c10::TupleTypePtr get_named_tuple(const c10::QualifiedName& name) const {
202
+ for (const auto& cls : classes_) {
203
+ if (cls->name()->qualifiedName() == name.qualifiedName()) {
204
+ return cls->expect<TupleType>();
205
+ }
206
+ }
207
+ return nullptr;
208
+ }
209
+
210
+ c10::NamedTypePtr get_type(const c10::QualifiedName& name) const {
211
+ auto it = classDict_.find(name);
212
+ if (it == classDict_.end()) {
213
+ return nullptr;
214
+ }
215
+ return classes_[it->second];
216
+ }
217
+
218
+ // For testing: clear all Python-defined classes to ensure that unit tests
219
+ // have isolation.
220
+ void _clear_python_cu() {
221
+ // Delete all the associated class methods
222
+ for (const auto& type : classes_) {
223
+ if (auto cls = type->cast<ClassType>()) {
224
+ for (auto method : cls->methods()) {
225
+ // Tombstone the method in the compilation unit.
226
+ // Don't erase because the dict_
227
+ auto it = dict_.find(method->qualname());
228
+ if (it != dict_.end()) {
229
+ functions_[it->second] = nullptr;
230
+ // Erase in our big lookup table
231
+ dict_.erase(it);
232
+ }
233
+ }
234
+ // Classes can have multiple pointers to the same hook,
235
+ // need to make sure to not delete it twice
236
+ std::unordered_set<Function*> hooks_to_delete;
237
+ for (const auto& hook : cls->getForwardHooks()) {
238
+ hooks_to_delete.insert(hook);
239
+ }
240
+ for (const auto& pre_hook : cls->getForwardPreHooks()) {
241
+ hooks_to_delete.insert(pre_hook);
242
+ }
243
+ for (const auto& hook : hooks_to_delete) {
244
+ // Tombstone the hook in the compilation unit.
245
+ auto it = dict_.find(hook->qualname());
246
+ if (it != dict_.end()) {
247
+ functions_[it->second] = nullptr;
248
+ // Erase in our big lookup table
249
+ dict_.erase(it);
250
+ }
251
+ }
252
+ }
253
+ }
254
+ classes_.clear();
255
+ classDict_.clear();
256
+ }
257
+
258
+ // [Internal Only] Remove method.
259
+ // Note Used for freezing.
260
+ void unsafeRemoveMethod(const c10::QualifiedName& method_name) {
261
+ auto it = dict_.find(method_name);
262
+ TORCH_CHECK(
263
+ it != dict_.end(),
264
+ "method '",
265
+ method_name.qualifiedName(),
266
+ "' does not exist.");
267
+ functions_[it->second] = nullptr;
268
+ dict_.erase(it);
269
+ }
270
+
271
+ // [name mangling] All code objects must have a unique qualified name in a
272
+ // CompilationUnit. In Python, sometimes functions won't have unique qualified
273
+ // name (for example, nested functions). So we mangle Python functions to
274
+ // ensure that they are uniquely named.
275
+ //
276
+ // We also use mangling to distinguish different Module instances. Since each
277
+ // Module is a singleton class instance, different instances of the same
278
+ // Python Module will have different types but the same qualified name.
279
+ c10::QualifiedName mangle(const c10::QualifiedName& name) const {
280
+ auto mangled = name;
281
+ while (get_type(mangled) || find_function(mangled)) {
282
+ mangled = mangler_.mangle(mangled);
283
+ }
284
+ return mangled;
285
+ }
286
+
287
+ private:
288
+ std::unique_ptr<Function> define(
289
+ const c10::optional<c10::QualifiedName>& prefix,
290
+ const Def& def,
291
+ const ResolverPtr& resolver,
292
+ const Self* self,
293
+ const std::unordered_map<std::string, Function*>& function_table,
294
+ bool shouldMangle = false,
295
+ FunctionType type = FunctionType::Method,
296
+ c10::optional<size_t> version = c10::nullopt) const;
297
+
298
+ // Define a property on \p self.
299
+ struct PropertyPair;
300
+ PropertyPair define_property(
301
+ const c10::optional<c10::QualifiedName>& prefix,
302
+ const Property& prop,
303
+ const ResolverPtr& resolver,
304
+ const Self* self,
305
+ const std::unordered_map<std::string, Function*>& function_table,
306
+ bool shouldMangle = false) const;
307
+
308
+ Function& register_function(std::unique_ptr<Function> fn) {
309
+ TORCH_CHECK(
310
+ 0 == dict_.count(fn->qualname().qualifiedName()),
311
+ "method '",
312
+ fn->qualname().qualifiedName(),
313
+ "' already defined.");
314
+ functions_.emplace_back(std::move(fn));
315
+ dict_[functions_.back()->qualname()] = functions_.size() - 1;
316
+ return *functions_.back();
317
+ }
318
+ std::vector<std::unique_ptr<Function>> functions_;
319
+ // for fast lookup
320
+ std::unordered_map<c10::QualifiedName, size_t> dict_;
321
+ std::unordered_map<c10::QualifiedName, size_t> classDict_;
322
+
323
+ // [class ownership] Right now there are two relationships between classes
324
+ // and compilation units:
325
+ // 1. Classes have compilation units internally that hold their methods.
326
+ // 2. On load, the TypePtrs of any imported classes are owned by the main
327
+ // module's compilation unit.
328
+ std::vector<c10::NamedTypePtr> classes_;
329
+
330
+ mutable NameMangler mangler_;
331
+ };
332
+
333
+ // An owning pointer to a Function. Just a pair of a raw Function ptr and it's
334
+ // owning CU. We need this because pybind requires a ref-counted way to refer to
335
+ // Functions.
336
+ struct StrongFunctionPtr {
337
+ StrongFunctionPtr(std::shared_ptr<CompilationUnit> cu, Function* function)
338
+ : cu_(std::move(cu)), function_(function) {
339
+ TORCH_INTERNAL_ASSERT(cu_);
340
+ TORCH_INTERNAL_ASSERT(function_);
341
+ }
342
+ std::shared_ptr<CompilationUnit> cu_;
343
+ Function* function_;
344
+ };
345
+
346
+ namespace script {
347
+ // We once had a `script::` namespace that was deleted. This is for backcompat
348
+ // of the public API; new code should not use this type alias.
349
+ using CompilationUnit = ::torch::jit::CompilationUnit;
350
+ } // namespace script
351
+ } // namespace torch::jit
falcon/lib/python3.10/site-packages/torch/include/torch/csrc/jit/api/function_impl.h ADDED
@@ -0,0 +1,181 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/function.h>
4
+ #include <torch/csrc/jit/ir/ir.h>
5
+ #include <torch/csrc/jit/runtime/graph_executor.h>
6
+
7
+ namespace torch::jit {
8
+
9
+ struct TORCH_API GraphFunction : public Function {
10
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
11
+ GraphFunction(
12
+ c10::QualifiedName name,
13
+ std::shared_ptr<Graph> graph,
14
+ std::function<void(GraphFunction&)> function_creator,
15
+ c10::optional<ExecutorExecutionMode> executor_execution_mode =
16
+ c10::nullopt)
17
+ : name_(std::move(name)),
18
+ graph_(std::move(graph)),
19
+ executor_execution_mode_(executor_execution_mode),
20
+ function_creator_(std::move(function_creator)) {}
21
+
22
+ bool isGraphFunction() const override {
23
+ return true;
24
+ }
25
+
26
+ void run(Stack& stack) override;
27
+
28
+ std::function<void(GraphFunction&)> function_creator() const {
29
+ return function_creator_;
30
+ }
31
+
32
+ c10::intrusive_ptr<c10::ivalue::Future> runAsync(
33
+ Stack& stack,
34
+ TaskLauncher taskLauncher = at::launch) override;
35
+
36
+ std::shared_ptr<Graph> graph() const {
37
+ return graph_;
38
+ }
39
+
40
+ std::shared_ptr<Graph> optimized_graph() const;
41
+
42
+ const c10::QualifiedName& qualname() const override {
43
+ return name_;
44
+ }
45
+
46
+ // private/unstable api. sets the initial execution mode
47
+ // will not affect executor if there is an existing executor
48
+ // created for this function
49
+ void _set_initial_executor_execution_mode(ExecutorExecutionMode mode) {
50
+ executor_execution_mode_ = mode;
51
+ }
52
+ // private/unstable api. sets flag of whether or not to ignore amp.
53
+ // will not affect executor if there is an existing executor
54
+ // created for this function
55
+ void _set_ignore_amp(bool ignore_amp) {
56
+ force_no_amp_ = ignore_amp;
57
+ }
58
+
59
+ // if this isn't yet defined, run its method_creator function
60
+ void ensure_defined() override;
61
+
62
+ size_t num_inputs() const override {
63
+ return graph()->inputs().size();
64
+ }
65
+
66
+ Function& setSchema(FunctionSchema schema) override {
67
+ schema_ = std::make_unique<FunctionSchema>(std::move(schema));
68
+ return *this;
69
+ }
70
+
71
+ const FunctionSchema& getSchema() const override;
72
+
73
+ GraphExecutorState getDebugState() {
74
+ return get_executor().getDebugState();
75
+ }
76
+
77
+ bool is_optimized() const {
78
+ TORCH_WARN(
79
+ "GraphFunction::is_optimized() is deprecated and always returns true. "
80
+ "Please use getGraphExecutorOptimize()");
81
+ return true;
82
+ }
83
+
84
+ void check_single_output() {
85
+ TORCH_CHECK(
86
+ graph()->outputs().size() == 1,
87
+ "Method (but not graphs in general) require a single output. Use None/Tuple for 0 or 2+ outputs");
88
+ }
89
+
90
+ GraphExecutor& get_executor() {
91
+ ensure_defined();
92
+ std::lock_guard<std::recursive_mutex> lock(compile_mutex);
93
+ auto& executor = executors_[currentSpecialization()];
94
+ if (executor) {
95
+ return *executor;
96
+ }
97
+ check_single_output();
98
+ const std::string& name = name_.name();
99
+ std::shared_ptr<Graph> opt_graph = optimized_graph();
100
+ if (!executor_execution_mode_) {
101
+ executor = GraphExecutor(opt_graph, name);
102
+ } else {
103
+ executor = GraphExecutor(opt_graph, name, *executor_execution_mode_);
104
+ }
105
+ return *executor;
106
+ }
107
+
108
+ using Function::call;
109
+ bool call(
110
+ Stack& stack,
111
+ c10::optional<size_t> bailOut,
112
+ c10::function_ref<void(const Code&)> f) override {
113
+ f(get_executor().getPlanFor(stack, bailOut).code);
114
+ return true;
115
+ }
116
+
117
+ void clear_optimized_graphs() {
118
+ optimized_graphs_.fill(nullptr);
119
+ }
120
+
121
+ private:
122
+ enum SpecializationKey {
123
+ AutocastOff,
124
+ CpuAutocastOn,
125
+ GpuAutocastOn,
126
+ CpuGpuAutocastOn,
127
+
128
+ // This provides the number of specializations
129
+ // (Must be last entry)
130
+ TotalCount
131
+ };
132
+
133
+ SpecializationKey currentSpecialization() const;
134
+
135
+ private:
136
+ c10::QualifiedName name_;
137
+ // The original, non-optimized graph
138
+ std::shared_ptr<Graph> graph_; // for debugging and for inlining
139
+
140
+ // allows users to specify Simple/Profiling Executor for function
141
+ // TODO: add more executors
142
+ mutable c10::optional<ExecutorExecutionMode> executor_execution_mode_;
143
+
144
+ // if invoked on a graph that has already traced through amp
145
+ // don't invoke amp pass
146
+ mutable bool force_no_amp_ = false;
147
+ // Optimized graph, computed lazily. Used for inlining.
148
+ mutable std::array<std::shared_ptr<Graph>, SpecializationKey::TotalCount>
149
+ optimized_graphs_;
150
+
151
+ // GraphFunctions are invokable from multiple threads, so this lock needs to
152
+ // be held when we're initializing graph executor for the first time or
153
+ // computing the optimized graph. We're using reentrant mutex so that we don't
154
+ // need to worry about causing a deadlock by calling one method from another
155
+ // (e.g. optimized_graph() from get_executor()).
156
+ mutable std::recursive_mutex compile_mutex;
157
+
158
+ // executor_[0] - autocast off
159
+ // executor_[1] - autocast cpu on
160
+ // executor_[2] - autocast gpu on
161
+ // executor_[3] - autocast cpu & gpu on
162
+ std::array<c10::optional<GraphExecutor>, SpecializationKey::TotalCount>
163
+ executors_;
164
+
165
+ // an optional function that actually creates the method when
166
+ // ensure_defined() is called. This is used by the compiler so
167
+ // that it can construct methods out of order
168
+ std::function<void(GraphFunction&)> function_creator_;
169
+
170
+ // if absent, then we generate a default schema based on the graph
171
+ // mutable because getSchema caches the default schema if one is requested
172
+ // before a call to setSchema
173
+ mutable std::unique_ptr<FunctionSchema> schema_;
174
+ };
175
+
176
+ // Short hands for dynamic_cast<GraphFunction*>.
177
+ TORCH_API GraphFunction* tryToGraphFunction(Function&) noexcept;
178
+ TORCH_API GraphFunction& toGraphFunction(Function&);
179
+ TORCH_API const GraphFunction& toGraphFunction(const Function&);
180
+
181
+ } // namespace torch::jit
falcon/lib/python3.10/site-packages/torch/include/torch/csrc/jit/api/method.h ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/function.h>
4
+ #include <ATen/core/ivalue.h>
5
+ #include <ATen/core/stack.h>
6
+ #include <torch/csrc/api/include/torch/imethod.h>
7
+ #include <torch/csrc/jit/api/function_impl.h>
8
+
9
+ namespace torch::jit {
10
+
11
+ using ObjectPtr = c10::intrusive_ptr<c10::ivalue::Object>;
12
+
13
+ // A method in a module, e.g. f in:
14
+ //
15
+ // class M(ScriptModule):
16
+ // @script_method
17
+ // def f(self, x):
18
+ // ...
19
+ // Note: because Method/Module are exposed to python these
20
+ // classes use python method naming conventions
21
+ struct TORCH_API Method : public torch::IMethod {
22
+ Method(ObjectPtr owner, Function* function);
23
+
24
+ // the module that contains this method.
25
+ Module owner() const;
26
+ // the raw objectptr that owns this method, for when the method is owned by a
27
+ // torchbind object.
28
+ ObjectPtr raw_owner() const;
29
+ void run(Stack& stack);
30
+ void run(Stack&& stack) {
31
+ run(stack);
32
+ }
33
+
34
+ c10::IValue operator()(
35
+ std::vector<c10::IValue> stack,
36
+ const Kwargs& kwargs = Kwargs()) const override;
37
+
38
+ // Run method async. Invocation on this function would invokes a JIT
39
+ // interpreter that executes ops inline, one by one, on caller's thread. A
40
+ // model can utilize async op, i.e. `fork`, to launch an asynchronous task
41
+ // which will be launched on provided `taskLauncher`.
42
+ c10::intrusive_ptr<c10::ivalue::Future> run_async(
43
+ std::vector<c10::IValue> stack,
44
+ const Kwargs& kwargs = Kwargs(),
45
+ TaskLauncher taskLauncher = at::launch);
46
+
47
+ std::shared_ptr<Graph> graph() const {
48
+ return toGraphFunction(*function_).graph();
49
+ }
50
+
51
+ const std::string& name() const override {
52
+ return function_->name();
53
+ }
54
+
55
+ size_t num_inputs() const {
56
+ return function_->num_inputs();
57
+ }
58
+
59
+ GraphExecutor& get_executor() {
60
+ return toGraphFunction(*function_).get_executor();
61
+ }
62
+
63
+ Function& function() const {
64
+ return *function_;
65
+ }
66
+
67
+ private:
68
+ void setArgumentNames(std::vector<std::string>&) const override;
69
+
70
+ // Methods are uniqued onwed by a single module. This raw pointer allows
71
+ // looking up the module.
72
+ ObjectPtr owner_;
73
+
74
+ // Underlying unbound function
75
+ Function* function_;
76
+ };
77
+
78
+ namespace script {
79
+ // We once had a `script::` namespace that was deleted. This is for backcompat
80
+ // of the public API; new code should not use this type alias.
81
+ using Method = ::torch::jit::Method;
82
+ } // namespace script
83
+
84
+ } // namespace torch::jit
falcon/lib/python3.10/site-packages/torch/include/torch/csrc/jit/api/module.h ADDED
@@ -0,0 +1,685 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <c10/util/Exception.h>
3
+ #include <torch/csrc/autograd/variable.h>
4
+ #include <torch/csrc/jit/api/object.h>
5
+ #include <torch/csrc/jit/frontend/source_range.h>
6
+ #include <torch/csrc/jit/ir/ir.h>
7
+ #include <torch/csrc/jit/ir/named_value.h>
8
+ #include <torch/csrc/jit/runtime/argument_spec.h>
9
+ #include <torch/csrc/jit/runtime/graph_executor.h>
10
+
11
+ #include <torch/csrc/Export.h>
12
+ #include <torch/csrc/api/include/torch/ordered_dict.h>
13
+ #include <torch/csrc/jit/api/compilation_unit.h>
14
+
15
+ #include <ATen/core/function_schema.h>
16
+ #include <ATen/core/qualified_name.h>
17
+ #include <c10/util/ArrayRef.h>
18
+ #include <c10/util/Optional.h>
19
+ #include <c10/util/irange.h>
20
+
21
+ #include <functional>
22
+ #include <memory>
23
+ #include <mutex>
24
+ #include <ostream>
25
+ #include <string>
26
+ #include <unordered_map>
27
+ #include <unordered_set>
28
+ #include <utility>
29
+ #include <vector>
30
+
31
+ // This file contains classes which assist in desugaring Python style
32
+ // modules and their methods into flattened graphs which don't have any
33
+ // function calls.
34
+
35
+ namespace torch::jit {
36
+
37
+ using ::c10::Argument;
38
+ using ::c10::FunctionSchema;
39
+ using ::c10::QualifiedName;
40
+ // Map which stores filename to content.
41
+ using ExtraFilesMap = std::unordered_map<std::string, std::string>;
42
+
43
+ using ModulePtr = c10::intrusive_ptr<c10::ivalue::Object>;
44
+
45
+ struct Module;
46
+
47
+ template <typename T>
48
+ struct slot_list_impl;
49
+
50
+ template <typename T>
51
+ struct Named {
52
+ std::string name;
53
+ T value;
54
+ };
55
+
56
+ using NameModule = Named<Module>;
57
+ using NameValue = Named<IValue>;
58
+ using NameTensor = Named<at::Tensor>;
59
+
60
+ namespace detail {
61
+ struct TORCH_API ModulePolicy;
62
+ struct TORCH_API ParameterPolicy;
63
+ struct TORCH_API AttributePolicy;
64
+ struct TORCH_API BufferPolicy;
65
+ template <typename P>
66
+ struct NamedPolicy;
67
+ } // namespace detail
68
+
69
+ using module_list = slot_list_impl<detail::ModulePolicy>;
70
+ using named_module_list =
71
+ slot_list_impl<detail::NamedPolicy<detail::ModulePolicy>>;
72
+
73
+ using parameter_list = slot_list_impl<detail::ParameterPolicy>;
74
+ using named_parameter_list =
75
+ slot_list_impl<detail::NamedPolicy<detail::ParameterPolicy>>;
76
+
77
+ using attribute_list = slot_list_impl<detail::AttributePolicy>;
78
+ using named_attribute_list =
79
+ slot_list_impl<detail::NamedPolicy<detail::AttributePolicy>>;
80
+
81
+ using buffer_list = slot_list_impl<detail::BufferPolicy>;
82
+ using named_buffer_list =
83
+ slot_list_impl<detail::NamedPolicy<detail::BufferPolicy>>;
84
+
85
+ using ModuleLookup = std::function<Module(const std::vector<std::string>&)>;
86
+
87
+ struct TORCH_API Module : public Object {
88
+ explicit Module(c10::QualifiedName class_name);
89
+ Module(std::shared_ptr<CompilationUnit> cu, const c10::ClassTypePtr& type);
90
+ Module() = default;
91
+ Module(const Module&) = default;
92
+ Module& operator=(const Module&) = default;
93
+ Module(Module&&) noexcept = default;
94
+ Module& operator=(Module&&) noexcept = default;
95
+ Module(
96
+ c10::QualifiedName,
97
+ std::shared_ptr<CompilationUnit> cu,
98
+ bool shouldMangle = false);
99
+ Module(ModulePtr module_value) : Object(std::move(module_value)) {}
100
+ ~Module() = default;
101
+
102
+ void set_optimized(bool o) {
103
+ TORCH_WARN(
104
+ "Module::set_optimized() is deprecated and has no effect. "
105
+ "Please use setGraphExecutorOptimize()");
106
+ }
107
+
108
+ bool is_optimized() const {
109
+ TORCH_WARN(
110
+ "Module::is_optimized() is deprecated and always returns true. "
111
+ "Please use getGraphExecutorOptimize()");
112
+ return true;
113
+ }
114
+
115
+ IValue forward(std::vector<IValue> inputs, const Kwargs& kwargs = Kwargs()) {
116
+ return get_method("forward")(std::move(inputs), kwargs);
117
+ }
118
+
119
+ // In script modules, buffers are Tensors attribute that are _not_ registered
120
+ // as parameters. This is different than in nn.Module where there is a special
121
+ // register_buffer method. With this simplification, we only need to track
122
+ // whether a slot is a parameter to be able to classify it.
123
+ void register_buffer(const std::string& name, at::Tensor v) {
124
+ bool is_param = false;
125
+ bool is_buffer = true;
126
+ std::lock_guard<std::mutex> lock(*register_mutex_);
127
+ type()->addOrCheckAttribute(name, TensorType::get(), is_param, is_buffer);
128
+ _ivalue()->setAttr(name, std::move(v));
129
+ }
130
+
131
+ void register_parameter(
132
+ const std::string& name,
133
+ at::Tensor v,
134
+ bool is_buffer) {
135
+ std::lock_guard<std::mutex> lock(*register_mutex_);
136
+ type()->addOrCheckAttribute(name, TensorType::get(), !is_buffer, is_buffer);
137
+ _ivalue()->setAttr(name, std::move(v));
138
+ }
139
+
140
+ void register_attribute(
141
+ const std::string& name,
142
+ const TypePtr& t,
143
+ IValue v,
144
+ bool is_param = false,
145
+ bool is_buffer = false) {
146
+ type()->addOrCheckAttribute(name, t, is_param, is_buffer);
147
+ _ivalue()->setAttr(name, std::move(v));
148
+ }
149
+
150
+ void register_module(const std::string& name, const Module& module) {
151
+ type()->addOrCheckAttribute(name, module.type());
152
+ _ivalue()->setAttr(name, module._ivalue());
153
+ }
154
+
155
+ void apply(const std::function<void(Module&)>& fn);
156
+
157
+ buffer_list buffers(bool recurse = true) const;
158
+ named_buffer_list named_buffers(bool recurse = true) const;
159
+
160
+ module_list children() const; // direct modules
161
+ named_module_list named_children() const;
162
+ module_list modules() const; // all modules, including this one, recursively
163
+ named_module_list named_modules() const;
164
+
165
+ // all tensors involved in gradient optimization
166
+ parameter_list parameters(bool recurse = true) const;
167
+ named_parameter_list named_parameters(bool recurse = true) const;
168
+
169
+ // all members of the object, similar to iterating over dir(obj) in python
170
+ attribute_list attributes(bool recurse = true) const;
171
+ named_attribute_list named_attributes(bool recurse = true) const;
172
+
173
+ void dump(
174
+ bool print_method_bodies,
175
+ bool print_attr_values,
176
+ bool print_param_values) const;
177
+
178
+ std::string dump_to_str(
179
+ bool print_method_bodies,
180
+ bool print_attr_values,
181
+ bool print_param_values) const;
182
+
183
+ /// Enables "training" mode.
184
+ void train(bool on = true);
185
+ /// Calls train(false) to enable "eval" mode.
186
+ /// Do not override this method, override `train()` instead.
187
+ void eval() {
188
+ train(/*on=*/false);
189
+ }
190
+ /// True if the module is in training mode.
191
+ bool is_training() const {
192
+ return attr("training", true).toBool();
193
+ }
194
+
195
+ /// Recursively casts all parameters to the given `dtype` and `device`.
196
+ ///
197
+ /// If `non_blocking` is true and the source is in pinned memory and
198
+ /// destination is on the GPU or vice versa, the copy is performed
199
+ /// asynchronously with respect to the host. Otherwise, the argument has no
200
+ /// effect.
201
+ void to(at::Device device, at::ScalarType dtype, bool non_blocking = false);
202
+
203
+ /// Recursively casts all parameters to the given dtype.
204
+ ///
205
+ /// If `non_blocking` is true and the source is in pinned memory and
206
+ /// destination is on the GPU or vice versa, the copy is performed
207
+ /// asynchronously with respect to the host. Otherwise, the argument has no
208
+ /// effect.
209
+ void to(at::ScalarType dtype, bool non_blocking = false);
210
+
211
+ /// Recursively moves all parameters to the given device.
212
+ ///
213
+ /// If `non_blocking` is true and the source is in pinned memory and
214
+ /// destination is on the GPU or vice versa, the copy is performed
215
+ /// asynchronously with respect to the host. Otherwise, the argument has no
216
+ /// effect.
217
+ void to(at::Device device, bool non_blocking = false);
218
+
219
+ void save(
220
+ std::ostream& out,
221
+ const ExtraFilesMap& extra_files = ExtraFilesMap()) const;
222
+
223
+ void save(
224
+ const std::string& filename,
225
+ const ExtraFilesMap& extra_files = ExtraFilesMap()) const;
226
+
227
+ void _save_for_mobile(
228
+ std::ostream& out,
229
+ const ExtraFilesMap& extra_files = ExtraFilesMap(),
230
+ bool save_mobile_debug_info = false,
231
+ bool use_flatbuffer = false) const;
232
+
233
+ void _save_for_mobile(
234
+ const std::string& filename,
235
+ const ExtraFilesMap& extra_files = ExtraFilesMap(),
236
+ bool save_mobile_debug_info = false,
237
+ bool use_flatbuffer = false) const;
238
+
239
+ Module copy() const;
240
+
241
+ Module deepcopy(c10::optional<at::Device> device = c10::nullopt) const;
242
+
243
+ // Clones both the underlying `ClassType` and the module instance(data), this
244
+ // function creates a new `ClassType` and returns a new instance that has the
245
+ // same data as the current instance but with the new type, shared ClassType
246
+ // will be preserved as well
247
+ Module clone(bool inplace = false) const;
248
+
249
+ // Clones both the underlying `ClassType` and the module instance(data), this
250
+ // function creates a new `ClassType` and returns a new instance that has the
251
+ // same data as the current instance but with the new type, shared ClassType
252
+ // will be preserved as well. Also allows the caller to specify a set of
253
+ // method and attribute names to not clone.
254
+ Module clone(
255
+ bool inplace,
256
+ const std::unordered_set<std::string>& ignored_method,
257
+ const std::unordered_set<std::string>& ignored_attributes) const;
258
+
259
+ void clone_method(const Module& orig, const std::string& name);
260
+
261
+ IValue operator()(std::vector<IValue> inputs);
262
+
263
+ template <typename... Types>
264
+ IValue create_class(const c10::QualifiedName& name, Types&&... args) const {
265
+ return create_class(name, {IValue(std::forward<Types>(args))...});
266
+ }
267
+
268
+ IValue create_class(const c10::QualifiedName& name, Stack stack) const;
269
+
270
+ inline bool operator==(const Module& y) const noexcept {
271
+ return _ivalue() == y._ivalue();
272
+ }
273
+
274
+ void set_delete_memory(std::shared_ptr<char> delete_mem) {
275
+ mem_to_delete_ = std::move(delete_mem);
276
+ }
277
+
278
+ // A set of functions to maintain input shapes through torch.jit.save and
279
+ // torch.jit.load. It only works on tensors and lists/dicts of tensors
280
+ // because tracing is only supported by these types.
281
+ void store_traced_inputs(std::string func_name, std::vector<IValue> inputs) {
282
+ if (inputs.size() == 0) {
283
+ return;
284
+ }
285
+ auto c10_inputs = c10::impl::GenericList(AnyType::get());
286
+ for (IValue& value : inputs) {
287
+ // Not checking whether this is traceable type as that is already checked
288
+ // higher up in the stack and changing that would require a larger
289
+ // restructuring.
290
+ c10_inputs.emplace_back(std::move(value));
291
+ }
292
+ traced_inputs_.insert_or_assign(func_name, c10_inputs);
293
+ }
294
+
295
+ c10::Dict<std::string, c10::impl::GenericList> retrieve_traced_inputs()
296
+ const {
297
+ return traced_inputs_;
298
+ }
299
+
300
+ private:
301
+ Module clone_impl(
302
+ std::unordered_map<TypePtr, TypePtr>& type_remap,
303
+ bool inplace,
304
+ IValue::HashAliasedIValueMap memo,
305
+ const std::unordered_set<std::string>& ignored_methods,
306
+ const std::unordered_set<std::string>& ignored_attributes) const;
307
+
308
+ void clone_method(
309
+ const Module& orig,
310
+ const Function& method,
311
+ const std::unordered_map<TypePtr, TypePtr>& type_remap);
312
+
313
+ c10::QualifiedName getNameForMethod(std::string basename) const {
314
+ return QualifiedName(*type()->name(), std::move(basename));
315
+ }
316
+
317
+ void to_impl(
318
+ const c10::optional<at::Device>& device,
319
+ const c10::optional<at::ScalarType>& dtype,
320
+ bool non_blocking);
321
+
322
+ // Extra handle for the module to delete when itself is deleted
323
+ std::shared_ptr<char> mem_to_delete_;
324
+
325
+ // Map of function names to the traced inputs that they have been traced with
326
+ c10::Dict<std::string, c10::impl::GenericList> traced_inputs_;
327
+
328
+ // Mutex to keep registring buffer or parameter thread safe.
329
+ std::shared_ptr<std::mutex> register_mutex_ = std::make_shared<std::mutex>();
330
+ };
331
+
332
+ // C++ equivalent api of `torch.jit.freeze`. See documentation there for
333
+ // details.
334
+ TORCH_API Module freeze(
335
+ const Module& module,
336
+ const c10::optional<std::vector<std::string>>& preserved_attrs =
337
+ c10::nullopt,
338
+ bool optimize_numerics = true);
339
+
340
+ // C++ equivalent api of `torch.jit.optimize_for_inference`. See documentation
341
+ // there for details.
342
+ TORCH_API Module optimize_for_inference(
343
+ Module& module,
344
+ const std::vector<std::string>& other_methods = {});
345
+
346
+ enum class FusionBehavior { STATIC, DYNAMIC };
347
+
348
+ using FusionStrategy = std::vector<std::pair<FusionBehavior, size_t>>;
349
+ // clang-format off
350
+ /*
351
+ Sets the type and number of specializations that can occur during fusion.
352
+
353
+ Usage: provide a list of pairs (type, depth) where type is one of STATIC or DYNAMIC
354
+ and depth is an integer.
355
+
356
+ Behavior - static vs dynamic:
357
+ In STATIC fusion, fused ops are compiled to have fixed input shapes. The shape is determined
358
+ based on some initial profiling runs.
359
+ In DYNAMIC fusion, fused ops are compiled to have variable input shapes, so that multiple
360
+ shapes are possible.
361
+
362
+ In both cases, we also recompile on new striding behavior, device, or dtype.
363
+
364
+ Behavior - fallback functions & depth:
365
+ When an input doesn't match the format required by the specialized compiled op, it will run
366
+ a fallback function. Fallback functions are recursively be compiled and specialized based
367
+ on the observed tensor shapes. Since compilation can be slow, the "depth" parameter is provided to
368
+ limit the number of specializations that can be compiled, before giving up on recompiling and
369
+ falling back to a completely un-fused, un-specialized implementation.
370
+
371
+ The list of (type, depth) pairs controls the type of specializations and the number of
372
+ specializations. For example: [(STATIC, 2), (DYNAMIC, 2)] indicates that the first
373
+ two specializations will use static fusions, the following two specializations will use
374
+ dynamic fusion, and any inputs that satisfy none of the 4 options will run an
375
+ unfused implementation.
376
+
377
+ NB: in the future, if more as more fusion backends are added there may be more granular
378
+ apis for specific fusers.
379
+ */
380
+ // clang-format on
381
+ TORCH_API FusionStrategy getFusionStrategy();
382
+ // returns previous strategy
383
+ TORCH_API FusionStrategy setFusionStrategy(FusionStrategy& fusion_strategy);
384
+
385
+ namespace detail {
386
+
387
+ struct TORCH_API SlotCursor {
388
+ Module module_;
389
+ int64_t i_; // slot offset, -1 indicates the module itself
390
+ };
391
+
392
+ } // namespace detail
393
+
394
+ // This iterator allows the (optionally recursive) enumeration of
395
+ // the members of a Module. It performs a depth-first pre-order
396
+ // traversal of the module. The Policy template parameter determines
397
+ // which slots of the object should be included. For instance,
398
+ // when iterating parameters, we return the parameter tensors,
399
+ // but skip modules, buffers, and other attributes.
400
+ // See ModulePolicy for comments about Policy object's API.
401
+ template <typename Policy>
402
+ struct slot_iterator_impl {
403
+ using SlotCursor = detail::SlotCursor;
404
+ using value_type = typename Policy::value_type;
405
+ slot_iterator_impl(
406
+ Module root,
407
+ bool recurse, // if true, do a depth-first search, otherwise, just look at
408
+ // slots of root
409
+ bool return_module) // if true include root itself as the first thing
410
+ // visited (used in modules())
411
+ : cursors_({SlotCursor{std::move(root), return_module ? -1 : 0}}),
412
+ recurse_(recurse) {
413
+ // advance iterator to first valid element (or the end, if empty)
414
+ while_not_valid_next();
415
+ }
416
+ // empty cursors_, represents end of iteration
417
+ slot_iterator_impl() : recurse_(false) {}
418
+ value_type operator*() const {
419
+ return Policy::create(cursors_, cur());
420
+ }
421
+ value_type operator->() const {
422
+ return **this;
423
+ }
424
+ slot_iterator_impl& operator++() {
425
+ next_valid();
426
+ return *this;
427
+ }
428
+ slot_iterator_impl operator++(int) {
429
+ // this is really expensive, should we delete it so people don't use it
430
+ // instead of prefix?
431
+ slot_iterator_impl old = *this;
432
+ ++(*this);
433
+ return old;
434
+ }
435
+
436
+ private:
437
+ // return_module() is a corner case where instead of returning a submodule
438
+ // of root, we are returning root itself, because we are iterating modules(),
439
+ // which contains the root module itself.
440
+ // It is represented with a single SlotCursor whose index is -1.
441
+ bool return_module() const {
442
+ return top().i_ == -1;
443
+ }
444
+ const SlotCursor& top() const {
445
+ return cursors_.back();
446
+ }
447
+ SlotCursor& top() {
448
+ return cursors_.back();
449
+ }
450
+ IValue cur() const {
451
+ return return_module() ? top().module_._ivalue()
452
+ : top().module_._ivalue()->getSlot(top().i_);
453
+ }
454
+
455
+ // advance to the next slot in a depth first pre-order traversal of the
456
+ // modules slots. This function does not guarantee the next slot is a
457
+ // valid element of the iteration. That is done by valid().
458
+ // invariant: !cursors_.empty()
459
+ void next() {
460
+ // we just returned the module itself, advance i_ to 0 so we are now
461
+ // at the first slot of the module.
462
+ if (return_module()) {
463
+ ++top().i_;
464
+ return;
465
+ }
466
+ // the last traversal action advanced beyond the number of slots in the
467
+ // module so continue the iteration in the parent.
468
+ if (top().i_ >= int64_t(top().module_._ivalue()->type()->numAttributes())) {
469
+ cursors_.pop_back();
470
+ if (!cursors_.empty()) {
471
+ ++top().i_;
472
+ }
473
+ return;
474
+ }
475
+ // if the current thing is a module, we have to scan it for recursive
476
+ // traversals. We do this by adding a new SlotCursor to track the traversal.
477
+ if (recurse_ &&
478
+ top().module_._ivalue()->type()->getAttribute(top().i_)->is_module()) {
479
+ cursors_.emplace_back(SlotCursor{cur().toModule(), 0});
480
+ return;
481
+ }
482
+ // common case: advance to the next slot.
483
+ ++top().i_;
484
+ }
485
+ // is the current position of the iterator a valid one?
486
+ // otherwise, we have to continue advancing.
487
+ bool valid() const {
488
+ return top().i_ <
489
+ int64_t(top().module_._ivalue()->type()->numAttributes()) &&
490
+ Policy::valid(
491
+ top().module_._ivalue()->type(),
492
+ top().i_,
493
+ top().module_._ivalue()->getSlot(top().i_));
494
+ }
495
+ void while_not_valid_next() {
496
+ // advance iteration until we are either at the end (cursors_.empty())
497
+ // or in a valid state. return_module() is a special case,
498
+ // and is always considered valid, regardless of Policy, because it is
499
+ // it is only true when we are iterating modules.
500
+ while (!cursors_.empty() && !return_module() && !valid()) {
501
+ next();
502
+ }
503
+ }
504
+ void next_valid() {
505
+ // avoid crashing if this is empty
506
+ if (cursors_.empty()) {
507
+ return;
508
+ }
509
+ // advance to next element, which is maybe not valid
510
+ next();
511
+ while_not_valid_next();
512
+ }
513
+
514
+ std::vector<SlotCursor> cursors_;
515
+ bool recurse_;
516
+
517
+ friend inline bool operator!=(
518
+ const slot_iterator_impl<Policy>& a,
519
+ const slot_iterator_impl<Policy>& b) {
520
+ // we are finished iteration when we have no more iteration SlotCursors.
521
+ // end is always an empty iterator with no cursors.
522
+ return (a.cursors_.empty() != b.cursors_.empty());
523
+ }
524
+ };
525
+
526
+ // This type represents lists of parameters, attributes, and
527
+ // submodules contained in the module. It is abstract because
528
+ // they are not stored directly in std::vectors but inside the
529
+ // module's IValue object itself.
530
+ template <typename Policy>
531
+ struct slot_list_impl {
532
+ using iterator = slot_iterator_impl<Policy>;
533
+ using const_iterator = slot_iterator_impl<Policy>;
534
+ using value_type = typename iterator::value_type;
535
+ slot_iterator_impl<Policy> begin() const {
536
+ return slot_iterator_impl<Policy>(module_, recurse_, return_module_);
537
+ }
538
+ slot_iterator_impl<Policy> end() const {
539
+ return slot_iterator_impl<Policy>();
540
+ }
541
+ size_t size() const {
542
+ if (!size_) {
543
+ size_ = size_t(0);
544
+ // NOLINTNEXTLINE(clang-diagnostic-unused-variable)
545
+ for (const value_type& s : *(this)) {
546
+ (void)s; // Suppress unused variable warning
547
+ ++*size_;
548
+ }
549
+ }
550
+ return *size_;
551
+ }
552
+
553
+ slot_list_impl(Module module, bool recurse, bool return_module)
554
+ : module_(std::move(module)),
555
+ recurse_(recurse),
556
+ return_module_(return_module),
557
+ size_(c10::nullopt) {
558
+ if (!recurse && !return_module && Policy::all_slots) {
559
+ size_ = module_.num_slots();
560
+ }
561
+ }
562
+
563
+ private:
564
+ Module module_;
565
+ bool recurse_;
566
+ bool return_module_;
567
+ // size of this list, cached on first request
568
+ // when we need to filter the slot list
569
+ mutable c10::optional<size_t> size_;
570
+ friend struct Module;
571
+ };
572
+
573
+ namespace detail {
574
+
575
+ // slot_iterator_impl always iterate over all the slots in a module,
576
+ // the Policy template argument determines slots should be returned and their
577
+ // types
578
+ struct TORCH_API ModulePolicy {
579
+ // the type of the value being returned
580
+ using value_type = Module;
581
+
582
+ // the logic for creating the type being returned, given the raw IValue
583
+ // of that object.
584
+ static value_type create(
585
+ const std::vector<detail::SlotCursor>& cursors,
586
+ IValue v) {
587
+ return Module(std::move(v).toObject());
588
+ }
589
+ // is slot i in typ something that this iterator should return, otherwise,
590
+ // we skip it.
591
+ static bool valid(const ClassTypePtr& typ, size_t i, const IValue& v) {
592
+ return typ->getAttribute(i)->is_module();
593
+ }
594
+ // are we going to return everything? If so, we can optimize the calculate
595
+ // of the size of the list.
596
+ static CONSTEXPR_EXCEPT_WIN_CUDA bool all_slots = false;
597
+ };
598
+
599
+ struct TORCH_API ParameterPolicy {
600
+ using value_type = at::Tensor;
601
+ static value_type create(
602
+ const std::vector<detail::SlotCursor>& cursors,
603
+ IValue v) {
604
+ return std::move(v).toTensor();
605
+ }
606
+ static bool valid(const ClassTypePtr& typ, size_t i, const IValue& v) {
607
+ return typ->is_parameter(i) && v.isTensor();
608
+ }
609
+ static CONSTEXPR_EXCEPT_WIN_CUDA bool all_slots = false;
610
+ };
611
+
612
+ struct TORCH_API BufferPolicy {
613
+ using value_type = at::Tensor;
614
+ static value_type create(
615
+ const std::vector<detail::SlotCursor>& cursors,
616
+ IValue v) {
617
+ return std::move(v).toTensor();
618
+ }
619
+ static bool valid(const ClassTypePtr& typ, size_t i, const IValue& v) {
620
+ return typ->getAttribute(i)->isSubtypeOf(*TensorType::get()) &&
621
+ typ->is_buffer(i);
622
+ }
623
+ static CONSTEXPR_EXCEPT_WIN_CUDA bool all_slots = false;
624
+ };
625
+
626
+ struct TORCH_API AttributePolicy {
627
+ using value_type = IValue;
628
+ static value_type create(
629
+ const std::vector<detail::SlotCursor>& cursors,
630
+ IValue v) {
631
+ return v;
632
+ }
633
+ static bool valid(const ClassTypePtr& typ, size_t i, const IValue& v) {
634
+ return true;
635
+ }
636
+ static CONSTEXPR_EXCEPT_WIN_CUDA bool all_slots = true;
637
+ };
638
+
639
+ // take a Policy object, and make a version of it that returns the slot.
640
+ // along with the fully qualified name of that slot. This is used for the named_
641
+ // variants like named_parameters().
642
+ template <typename Policy>
643
+ struct NamedPolicy {
644
+ using value_type = Named<typename Policy::value_type>;
645
+ static value_type create(
646
+ const std::vector<detail::SlotCursor>& cursors,
647
+ IValue v) {
648
+ std::string name;
649
+ if (cursors.size() == 1) {
650
+ name = (cursors.back().i_ == -1) ? "" : nameFragment(cursors.back());
651
+ } else {
652
+ std::ostringstream ss;
653
+ for (const auto i : c10::irange(cursors.size())) {
654
+ if (i > 0) {
655
+ ss << ".";
656
+ }
657
+ ss << nameFragment(cursors[i]);
658
+ }
659
+ name = ss.str();
660
+ }
661
+ return value_type{std::move(name), Policy::create(cursors, std::move(v))};
662
+ }
663
+ static bool valid(const ClassTypePtr& t, size_t i, const IValue& v) {
664
+ return Policy::valid(t, i, v);
665
+ }
666
+ static constexpr bool all_slots = Policy::all_slots;
667
+
668
+ private:
669
+ static std::string nameFragment(const detail::SlotCursor& f) {
670
+ return f.module_.type()->getAttributeName(f.i_);
671
+ }
672
+ };
673
+
674
+ } // namespace detail
675
+
676
+ TORCH_API bool& getInlineEverythingMode();
677
+
678
+ namespace script {
679
+ // We once had a `script::` namespace that was deleted. This is for backcompat
680
+ // of the public API; new code should not use this type alias.
681
+ using Module = ::torch::jit::Module;
682
+ using ExtraFilesMap = ::torch::jit::ExtraFilesMap;
683
+ } // namespace script
684
+
685
+ } // namespace torch::jit
falcon/lib/python3.10/site-packages/torch/include/torch/csrc/jit/api/object.h ADDED
@@ -0,0 +1,200 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/functional.h>
4
+ #include <ATen/core/ivalue.h>
5
+ #include <c10/util/Optional.h>
6
+ #include <torch/csrc/jit/api/method.h>
7
+
8
+ #include <utility>
9
+
10
+ namespace torch::jit {
11
+
12
+ struct Resolver;
13
+ using ResolverPtr = std::shared_ptr<Resolver>;
14
+
15
+ using ObjectPtr = c10::intrusive_ptr<c10::ivalue::Object>;
16
+
17
+ // Throw this in C++ land if `attr` fails. This will be converted to a Python
18
+ // AttributeError by the Python binding code
19
+ class ObjectAttributeError : public std::runtime_error {
20
+ public:
21
+ ObjectAttributeError(const std::string& what) : std::runtime_error(what) {}
22
+ };
23
+
24
+ struct TORCH_API Object {
25
+ Object() = default;
26
+ Object(const Object&) = default;
27
+ Object& operator=(const Object&) = default;
28
+ Object(Object&&) noexcept = default;
29
+ Object& operator=(Object&&) noexcept = default;
30
+ Object(ObjectPtr _ivalue) : _ivalue_(std::move(_ivalue)) {}
31
+ Object(std::shared_ptr<CompilationUnit> cu, const c10::ClassTypePtr& type);
32
+ Object(
33
+ c10::QualifiedName,
34
+ std::shared_ptr<CompilationUnit> cu,
35
+ bool shouldMangle = false);
36
+
37
+ ObjectPtr _ivalue() const {
38
+ TORCH_INTERNAL_ASSERT(_ivalue_);
39
+ return _ivalue_;
40
+ }
41
+
42
+ c10::ClassTypePtr type() const {
43
+ return _ivalue()->type();
44
+ }
45
+
46
+ struct Property {
47
+ std::string name;
48
+ Method getter_func;
49
+ c10::optional<Method> setter_func;
50
+ };
51
+
52
+ void setattr(const std::string& name, c10::IValue v) {
53
+ if (_ivalue()->type()->hasConstant(name)) {
54
+ TORCH_CHECK(
55
+ false,
56
+ "Can't set constant '",
57
+ name,
58
+ "' which has value:",
59
+ _ivalue()->type()->getConstant(name));
60
+ } else if (auto slot = _ivalue()->type()->findAttributeSlot(name)) {
61
+ const c10::TypePtr& expected = _ivalue()->type()->getAttribute(*slot);
62
+ TORCH_CHECK(
63
+ v.type()->isSubtypeOf(*expected),
64
+ "Expected a value of type '",
65
+ expected->repr_str(),
66
+ "' for field '",
67
+ name,
68
+ "', but found '",
69
+ v.type()->repr_str(),
70
+ "'");
71
+ _ivalue()->setSlot(*slot, std::move(v));
72
+ } else {
73
+ TORCH_CHECK(false, "Module has no attribute '", name, "'");
74
+ }
75
+ }
76
+
77
+ c10::IValue attr(const std::string& name) const {
78
+ if (auto r = _ivalue()->type()->findAttributeSlot(name)) {
79
+ return _ivalue()->getSlot(*r);
80
+ }
81
+ if (auto r = _ivalue()->type()->findConstantSlot(name)) {
82
+ return _ivalue()->type()->getConstant(*r);
83
+ }
84
+ std::stringstream err;
85
+ err << _ivalue()->type()->repr_str() << " does not have a field with name '"
86
+ << name.c_str() << "'";
87
+ throw ObjectAttributeError(err.str());
88
+ }
89
+
90
+ c10::IValue attr(const std::string& name, c10::IValue or_else) const {
91
+ if (auto r = _ivalue()->type()->findAttributeSlot(name)) {
92
+ return _ivalue()->getSlot(*r);
93
+ }
94
+ if (auto r = _ivalue()->type()->findConstantSlot(name)) {
95
+ return _ivalue()->type()->getConstant(*r);
96
+ }
97
+ return or_else;
98
+ }
99
+
100
+ bool hasattr(const std::string& name) const {
101
+ return _ivalue()->type()->hasAttribute(name) ||
102
+ _ivalue()->type()->hasConstant(name);
103
+ }
104
+
105
+ // each object owns its methods. The reference returned here
106
+ // is guaranteed to stay valid until this module has been destroyed
107
+ Method get_method(const std::string& name) const {
108
+ if (auto method = find_method(name)) {
109
+ return *method;
110
+ }
111
+ AT_ERROR("Method '", name, "' is not defined.");
112
+ }
113
+
114
+ const std::vector<Method> get_methods() const {
115
+ return c10::fmap(type()->methods(), [&](Function* func) {
116
+ return Method(_ivalue(), func);
117
+ });
118
+ }
119
+
120
+ bool has_property(const std::string& name) const {
121
+ for (const auto& prop : type()->properties()) {
122
+ if (prop.name == name) {
123
+ return true;
124
+ }
125
+ }
126
+ return false;
127
+ }
128
+
129
+ const Property get_property(const std::string& name) const {
130
+ for (const auto& prop : type()->properties()) {
131
+ if (prop.name == name) {
132
+ c10::optional<Method> setter = c10::nullopt;
133
+ if (prop.setter) {
134
+ setter = Method(_ivalue(), prop.setter);
135
+ }
136
+ return Property{
137
+ prop.name, Method(_ivalue(), prop.getter), std::move(setter)};
138
+ }
139
+ }
140
+ AT_ERROR("Property '", name, "' is not defined.");
141
+ }
142
+
143
+ const std::vector<Property> get_properties() const {
144
+ return c10::fmap(type()->properties(), [&](ClassType::Property prop) {
145
+ c10::optional<Method> setter = c10::nullopt;
146
+ if (prop.setter) {
147
+ setter = Method(_ivalue(), prop.setter);
148
+ }
149
+ return Property{
150
+ std::move(prop.name),
151
+ Method(_ivalue(), prop.getter),
152
+ std::move(setter)};
153
+ });
154
+ }
155
+
156
+ c10::optional<Method> find_method(const std::string& basename) const;
157
+
158
+ /// Run a method from this module.
159
+ ///
160
+ /// For example:
161
+ /// @code
162
+ /// IValue output = module->run("relu_script", a, b);
163
+ /// @endcode
164
+ ///
165
+ /// To get a compile a module from a source string, see torch::jit::compile
166
+ ///
167
+ /// @param method_name The name of the method to run
168
+ /// @param args Arguments to be passed to the method
169
+ /// @return An IValue containing the return value (or values if it is a tuple)
170
+ /// from the method
171
+ template <typename... Types>
172
+ IValue run_method(const std::string& method_name, Types&&... args) {
173
+ return get_method(method_name)({IValue(std::forward<Types>(args))...});
174
+ }
175
+
176
+ // so that C++ users can easily add methods
177
+ void define(const std::string& src, const ResolverPtr& resolver = nullptr);
178
+
179
+ size_t num_slots() const {
180
+ return _ivalue()->slots().size();
181
+ }
182
+
183
+ // shallow copy the object
184
+ Object copy() const;
185
+
186
+ // Copies all the attributes of the object recursively without creating new
187
+ // `ClassType`, including deepcopy of Tensors
188
+ Object deepcopy() const;
189
+
190
+ private:
191
+ // mutable be we lazily initialize in module_object.
192
+ mutable ObjectPtr _ivalue_;
193
+ };
194
+
195
+ namespace script {
196
+ // We once had a `script::` namespace that was deleted. This is for backcompat
197
+ // of the public API; new code should not use this type alias.
198
+ using Object = ::torch::jit::Object;
199
+ } // namespace script
200
+ } // namespace torch::jit
falcon/lib/python3.10/site-packages/torch/include/torch/csrc/jit/backends/backend_debug_handler.h ADDED
@@ -0,0 +1,140 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <ATen/core/ivalue.h>
3
+
4
+ #include <torch/csrc/jit/backends/backend_detail.h>
5
+ #include <torch/csrc/jit/ir/ir.h>
6
+ #include <torch/csrc/jit/ir/scope.h>
7
+
8
+ #include <atomic>
9
+
10
+ namespace torch {
11
+ namespace jit {
12
+
13
+ /*
14
+ * BackendDebugHandleManager is responsible for issuing debug handles to
15
+ * backends. Debug handles are associated with nodes of a graph.
16
+ * BackendDebugHandleManager also maintains a map
17
+ * [debug-handle, DebugInfoTuple = {source range, inlined callstack ptr]} that
18
+ * will help generate a callstack for exception raised using debug handles.
19
+ * Effectively debug handles are something that is given to backend and later
20
+ * when an exception occurs in the backend, backend can tell, using debug
21
+ * handle, that an exception occurred here. Then the runtime can generate
22
+ * callstack correspoding to the exception.
23
+ * There are two parts to BackendDebugHandleManager:
24
+ * 1. static std::atomic debug_handle
25
+ * 2. Map of [debug-handle, DebugInfoTuple]
26
+ *
27
+ * About 1:
28
+ * Why do they have to be unique. The reason is that by ensuring
29
+ * uniqueness of debug handles, we remove the burden of another layer of
30
+ * mapping where we need to say this set of debug handles were generated for
31
+ * this lowered module or this bytecode function. This simplifies the API for
32
+ * serialization since debug handles can uniquely identify DebugInfoTuple.
33
+ * Thus simplifies the runtime API for throwing exception. Exception throwing
34
+ * only needs to know debug_handle and not which module or method threw it.
35
+ * There are 2 issues to keep in mind, though,for static std::atomic
36
+ * debug_handle: A. Performance implications of using atomic variable. However
37
+ * this is only used for compilation so we assume to absorb some of that
38
+ * penalty. Plus if there is no contention then we should have less to worry
39
+ * about. B. If repeated compilation is part of a long running process then we
40
+ * may overflow int64_t. We may detect and fail on this. For now this is not
41
+ * done.
42
+ *
43
+ * Now about 2:
44
+ * There are two usecases for [debug-handle, DebugInfoTuple]
45
+ * A. During bytecode generation the DebugInfoTuple corresponding to the nodes
46
+ * of the inlined graph being serialized, are stored in this object and a
47
+ * unique debug handle is returned. This unique debug handle is stored in
48
+ * mobile_debug info for pytorch lite models. It will be used for raising
49
+ * exceptions as well as profiling. B. During backend lowering, each backend's
50
+ * preprocess/compile method can compile method's graph and serialize those
51
+ * methods. Once the method is lowered to backend, graph is essentially lost.
52
+ * Without access to graph it is hard to generate model level debug info. Thus
53
+ * the debug handles provide a way to map nodes of the graph to the model level
54
+ * debug info.
55
+ *
56
+ * During byte-code model serialization, [debug-handle, DebugInfoTuple] is
57
+ * serialized. Now we know a. debug handles and b. how to map debug handles to
58
+ * model source code. Thus we can either do eager symbolication by converting
59
+ * debug handles to corresponding source code at runtime, or do lazy
60
+ * symbolicattion offline.
61
+ *
62
+ * Note that it is not necessary to serialize [debug-handle, DebugInfoTuple]
63
+ * corresponding to lowered backend if the lowering process, that is
64
+ * preprocess/compile, and execution happens in the same session, then eager
65
+ * symbolication can be employed.
66
+ *
67
+ * Now how does BackendDebugHandleManager capture all of the above?
68
+ * By providing two API.
69
+ * 1. getNextDebugHandle which given a Node* returns a unique debug handle,
70
+ * that will uniquely identify DebugInfoTuple.
71
+ * and
72
+ * 2. getCallStackPtrMap which returns the map
73
+ * [debug-handle, DebugInfoTuple]
74
+ *
75
+ * 1 provides debug handles to backends and 2 provides runtime a way to map
76
+ * debug handles to source level debug info.
77
+ *
78
+ * So why does debug handle map to DebugInfoTuple = {source range and inlined
79
+ * cs}? {debug_handle, source_range_tag, serialized_callstack} Take this
80
+ * example: class L(nn.Module): def __init__(self):
81
+ * ...
82
+ * def forward(self, x):
83
+ * return x * 5
84
+ * class M(nn.Module):
85
+ * def __init__(self):
86
+ * ...
87
+ * def forward(self, x):
88
+ * return x - 2
89
+ * class N(nn.Module):
90
+ * def __init__(self):
91
+ * self.m = M()
92
+ * def forward(self, x):
93
+ * return self.m(x) + 3
94
+ * m = torch.jit.script(N())
95
+ * Once you inline m's forward method, m.forward.graph will look something
96
+ * like this
97
+ * graph(%self...):
98
+ * %x = aten::mul(..)
99
+ * %x = aten::sub(x, ..)
100
+ * %y = aten::add(x, ..)
101
+ * ..
102
+ * Inlined callstack ptr for these two nodes will look like:
103
+ * aten::mul's inlined CS (callstack): [N.forward, source range] -> [M.forward,
104
+ * source range] aten::sub's inlined CS (callstack): [N.forward, source range]
105
+ * aten::add's inlined CS: null
106
+ * mul node's inlined CS contains only information about the callsites' source
107
+ * range The information about mul node's source range ('return x * 5') is not
108
+ * available in its inlined CS. It is rather part of node's source range
109
+ * instead of inlined CS. Thus to get full stack: [N.forward, source range] ->
110
+ * [M.forward, source range] -> [aten::mul's source range] We need to track
111
+ * mul's source range and inlined CS both.
112
+ */
113
+
114
+ using BackendDebugInfoMapType =
115
+ std::unordered_map<torch::jit::DebugHandleType, DebugInfoTuple>;
116
+
117
+ /*
118
+ * This class is used to generate debug info map.
119
+ * backend's preprocess will call generate_debug_handles (see
120
+ * backend_detail.cpp), which uses debug_handle_manager to generate debug
121
+ * handles. When lowering process finishes, calling stopRecording will
122
+ * return debug info map from debug_handle_manager
123
+ */
124
+ class TORCH_API BackendDebugInfoRecorder {
125
+ public:
126
+ BackendDebugInfoRecorder() = default;
127
+ int64_t getNextDebugHandle(const Node* node);
128
+ // Reason this is not done as RAII is that work done in stopRecording
129
+ // can throw, and throwing with dtor will call terminate and thus voids any
130
+ // exception catching at a higher level.
131
+ BackendDebugInfoMapType stopRecording();
132
+ NodeToDebugHandle generate_debug_handles(const std::shared_ptr<Graph>& graph);
133
+
134
+ private:
135
+ static std::atomic<DebugHandleType> unique_debug_handle_;
136
+ BackendDebugInfoMapType handles_to_inlined_callstack_ptrs_;
137
+ };
138
+
139
+ } // namespace jit
140
+ } // namespace torch
falcon/lib/python3.10/site-packages/torch/include/torch/csrc/jit/backends/backend_detail.h ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/api/module.h>
4
+
5
+ #include <ATen/core/jit_type.h>
6
+
7
+ #include <functional>
8
+
9
+ namespace torch {
10
+ namespace jit {
11
+
12
+ using DebugHandleType = int64_t;
13
+
14
+ using NodeToDebugHandle = std::unordered_map<Node*, DebugHandleType>;
15
+
16
+ using BackendDebugHandleGenerator =
17
+ std::function<NodeToDebugHandle(const std::shared_ptr<Graph>&)>;
18
+
19
+ namespace detail {
20
+
21
+ using BackendPreprocessFunction = std::function<c10::IValue(
22
+ const Module&,
23
+ const c10::Dict<IValue, IValue>&,
24
+ const BackendDebugHandleGenerator& generate_debug_handles)>;
25
+
26
+ TORCH_API void registerBackendPreprocessFunction(
27
+ const std::string& name,
28
+ const BackendPreprocessFunction& preprocess);
29
+
30
+ bool hasBackendPreprocessFunction(const std::string& name);
31
+
32
+ BackendPreprocessFunction getBackendPreprocessFunction(const std::string& name);
33
+
34
+ TORCH_API Module codegen_backend_module(
35
+ const std::string& backend_name,
36
+ const Module& orig_module,
37
+ const c10::Dict<IValue, IValue>& method_compile_spec,
38
+ const c10::DictTypePtr& any_dict_ty);
39
+ } // namespace detail
40
+ } // namespace jit
41
+ } // namespace torch
falcon/lib/python3.10/site-packages/torch/include/torch/csrc/jit/backends/backend_exception.h ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <c10/util/Exception.h>
3
+
4
+ namespace c10 {
5
+ class TORCH_API BackendRuntimeException : public c10::Error {
6
+ public:
7
+ // Use debug_handle to throw exception
8
+ BackendRuntimeException(
9
+ SourceLocation loc,
10
+ std::string msg,
11
+ int64_t debug_handle)
12
+ : c10::Error(loc, msg) {
13
+ debug_handles.push_back(debug_handle);
14
+ }
15
+ // If rethrowing, can push another debug_handle
16
+ // This is useful in couple of scenarios.
17
+ // 1. A submodule is lowered and lite interperter has CallMethod
18
+ // to lowered module's method. In this case lowered module will throw with
19
+ // a handle, plus there will be another debug handle corresponding
20
+ // to the CallMethod node in lite interpreter. Both together give complete
21
+ // trace. This function allows lite interpreter to rethrow with debug
22
+ // handle it has for CallMethod.
23
+ // 2. Another scenarios is when lite interperter can make function calls or
24
+ // the lowered backend also has function call ability. Thus we have
25
+ // multiple function frames. Now we need a stack of handles to symbolicate
26
+ // entire stack trace.
27
+ void pushDebugHandle(int64_t debug_handle) {
28
+ debug_handles.push_back(debug_handle);
29
+ }
30
+ const std::vector<int64_t>& getDebugHandles() {
31
+ return debug_handles;
32
+ }
33
+
34
+ private:
35
+ // Stores stack of debug handles.
36
+ std::vector<int64_t> debug_handles;
37
+ };
38
+
39
+ } // namespace c10
40
+ #define TORCH_DELEGATED_BACKEND_THROW(cond, msg, debug_handle) \
41
+ if (C10_UNLIKELY_OR_CONST(!(cond))) { \
42
+ throw ::c10::BackendRuntimeException( \
43
+ {__func__, __FILE__, static_cast<uint32_t>(__LINE__)}, \
44
+ msg, \
45
+ debug_handle); \
46
+ }
47
+
48
+ #define TORCH_DELEGATED_BACKEND_RETHROW(e, debug_handle) \
49
+ do { \
50
+ e.pushDebugHandle(debug_handle); \
51
+ throw; \
52
+ } while (false)
53
+
54
+ #define DEBUG_HANDLE_UNKNOWN -1
falcon/lib/python3.10/site-packages/torch/include/torch/csrc/jit/backends/backend_init.h ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/python/pybind.h>
4
+ #include <torch/csrc/utils/pybind.h>
5
+
6
+ namespace torch {
7
+ namespace jit {
8
+ // Initialize Python bindings for JIT to_<backend> functions.
9
+ void initJitBackendBindings(PyObject* module);
10
+ } // namespace jit
11
+ } // namespace torch
falcon/lib/python3.10/site-packages/torch/include/torch/csrc/jit/backends/backend_interface.h ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/custom_class.h>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+
8
+ // Interface for a JIT backend.
9
+ class TORCH_API PyTorchBackendInterface : public torch::CustomClassHolder {
10
+ public:
11
+ PyTorchBackendInterface() noexcept;
12
+ ~PyTorchBackendInterface() override;
13
+
14
+ // Returns true if the backend is available to process delegation calls.
15
+ virtual bool is_available() = 0;
16
+
17
+ // Compile the module contained in \p processed using the details provided in
18
+ // \p method_compile_spec for each module method that should be compiled for
19
+ // the backend. \p method_compile_spec should be of type Dict<string, Any>.
20
+ // \returns a dictionary of type Dict<string, Any> that contains a backend
21
+ // handle each method that can run on the backend (i.e. each key in \p
22
+ // method_compile_spec).
23
+ virtual c10::impl::GenericDict compile(
24
+ c10::IValue processed,
25
+ c10::impl::GenericDict method_compile_spec) = 0;
26
+
27
+ // Execute the method specified by \p handle using \p inputs. \returns the
28
+ // outputs as a tuple.
29
+ virtual c10::impl::GenericList execute(
30
+ c10::IValue handle,
31
+ c10::impl::GenericList inputs) = 0;
32
+ };
33
+ } // namespace jit
34
+ } // namespace torch
falcon/lib/python3.10/site-packages/torch/include/torch/csrc/jit/backends/backend_preprocess.h ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/backends/backend_detail.h>
4
+ namespace torch {
5
+ namespace jit {
6
+ class backend_preprocess_register {
7
+ std::string backend_name_;
8
+
9
+ public:
10
+ backend_preprocess_register(
11
+ const std::string& name,
12
+ const detail::BackendPreprocessFunction& preprocess)
13
+ : backend_name_(name) {
14
+ detail::registerBackendPreprocessFunction(name, preprocess);
15
+ }
16
+ };
17
+ } // namespace jit
18
+ } // namespace torch
falcon/lib/python3.10/site-packages/torch/include/torch/csrc/jit/backends/backend_resolver.h ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/frontend/resolver.h>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+ // Create a Resolver for use in generating LoweredModules for specific backends.
8
+ TORCH_API std::shared_ptr<Resolver> loweredModuleResolver();
9
+ } // namespace jit
10
+ } // namespace torch
falcon/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/convert_to_ssa.h ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <functional>
3
+ #include <memory>
4
+ #include <string>
5
+
6
+ #include <torch/csrc/Export.h>
7
+ #include <torch/csrc/jit/ir/ir.h>
8
+
9
+ namespace torch {
10
+ namespace jit {
11
+
12
+ // Convert a graph with Loads & Stores into SSA form
13
+ TORCH_API void ConvertToSSA(std::shared_ptr<Graph>& graph);
14
+
15
+ } // namespace jit
16
+ } // namespace torch
falcon/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/error_report.h ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/util/Optional.h>
4
+ #include <torch/csrc/jit/frontend/tree.h>
5
+
6
+ namespace torch {
7
+ namespace jit {
8
+
9
+ struct Call {
10
+ std::string fn_name;
11
+ SourceRange caller_range;
12
+ };
13
+
14
+ struct TORCH_API ErrorReport : public std::exception {
15
+ ErrorReport(const ErrorReport& e);
16
+
17
+ explicit ErrorReport(SourceRange r);
18
+ explicit ErrorReport(const TreeRef& tree) : ErrorReport(tree->range()) {}
19
+ explicit ErrorReport(const Token& tok) : ErrorReport(tok.range) {}
20
+
21
+ const char* what() const noexcept override;
22
+
23
+ struct TORCH_API CallStack {
24
+ // These functions are used to report why a function was being compiled
25
+ // (i.e. what was the call stack of user functions at compilation time that
26
+ // led to this error)
27
+ CallStack(const std::string& name, const SourceRange& range);
28
+ ~CallStack();
29
+
30
+ // Change the range that is relevant for the current function (i.e. after
31
+ // each successful expression compilation, change it to the next expression)
32
+ static void update_pending_range(const SourceRange& range);
33
+ };
34
+
35
+ static std::string current_call_stack();
36
+
37
+ private:
38
+ template <typename T>
39
+ friend const ErrorReport& operator<<(const ErrorReport& e, const T& t);
40
+
41
+ mutable std::stringstream ss;
42
+ OwnedSourceRange context;
43
+ mutable std::string the_message;
44
+ std::vector<Call> error_stack;
45
+ };
46
+
47
+ template <typename T>
48
+ const ErrorReport& operator<<(const ErrorReport& e, const T& t) {
49
+ e.ss << t;
50
+ return e;
51
+ }
52
+
53
+ } // namespace jit
54
+ } // namespace torch
falcon/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/inline_loop_condition.h ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <functional>
3
+ #include <memory>
4
+ #include <string>
5
+
6
+ #include <torch/csrc/Export.h>
7
+ #include <torch/csrc/jit/ir/ir.h>
8
+
9
+ namespace torch {
10
+ namespace jit {
11
+
12
+ TORCH_API void InlineLoopCondition(std::shared_ptr<Graph>& graph);
13
+ TORCH_API void InlineBlockBeforeNode(Node* before_node, Block* block);
14
+
15
+ } // namespace jit
16
+ } // namespace torch
falcon/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/ir_emitter.h ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <functional>
3
+ #include <memory>
4
+ #include <string>
5
+
6
+ #include <torch/csrc/jit/api/module.h>
7
+ #include <torch/csrc/jit/frontend/error_report.h>
8
+ #include <torch/csrc/jit/frontend/resolver.h>
9
+ #include <torch/csrc/jit/frontend/sugared_value.h>
10
+ #include <torch/csrc/jit/frontend/tree_views.h>
11
+ #include <torch/csrc/jit/ir/ir.h>
12
+
13
+ namespace torch {
14
+ namespace jit {
15
+
16
+ TORCH_API void runCleanupPasses(std::shared_ptr<Graph>& to_clean);
17
+
18
+ TORCH_API bool meaningfulName(const std::string& name);
19
+
20
+ } // namespace jit
21
+ } // namespace torch
falcon/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/resolver.h ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/jit_type.h>
4
+ #include <ATen/core/qualified_name.h>
5
+ #include <torch/csrc/jit/frontend/sugared_value.h>
6
+
7
+ namespace torch {
8
+ namespace jit {
9
+
10
+ struct Resolver;
11
+ using ResolverPtr = std::shared_ptr<Resolver>;
12
+
13
+ /**
14
+ * class Resolver
15
+ *
16
+ * Represents an "outer environment" in which we an look up names and return
17
+ * a corresponding SugaredValue. This is used during compilation to resolve
18
+ * references to names which are not defined internal to the graph.
19
+ *
20
+ * Example: PythonResolver looks at the enclosing Python scope for `name`.
21
+ *
22
+ * NOTE: When adding methods, keep this an abstract class (i.e. all new methods
23
+ * should be purely virtual). Resist the urge to provide a default
24
+ * implementation; you should explicitly think about how each resolver would
25
+ * handle the method.
26
+ */
27
+ struct Resolver {
28
+ virtual ~Resolver() = default;
29
+
30
+ // Resolve a given name to a SugaredValue. This takes the method `m` that the
31
+ // caller is currently constructing, since we may need to insert nodes into
32
+ // the graph to create a value.
33
+ virtual std::shared_ptr<SugaredValue> resolveValue(
34
+ const std::string& name,
35
+ GraphFunction& m,
36
+ const SourceRange& loc) {
37
+ return nullptr;
38
+ }
39
+
40
+ // Resolve `name` to a TypePtr.
41
+ virtual TypePtr resolveType(const std::string& name, const SourceRange& loc) {
42
+ return nullptr;
43
+ }
44
+ };
45
+
46
+ // A resolver that only understands "torch.foo()" lookups.
47
+ struct NativeResolver : public Resolver {
48
+ std::shared_ptr<SugaredValue> resolveValue(
49
+ const std::string& name,
50
+ GraphFunction& m,
51
+ const SourceRange& loc) override {
52
+ if (name == "torch") {
53
+ return std::make_shared<BuiltinModule>("aten");
54
+ }
55
+ return nullptr;
56
+ }
57
+
58
+ TypePtr resolveType(const std::string& name, const SourceRange& loc)
59
+ override {
60
+ return nullptr;
61
+ }
62
+ };
63
+
64
+ inline std::shared_ptr<NativeResolver> nativeResolver() {
65
+ return std::make_shared<NativeResolver>();
66
+ }
67
+ } // namespace jit
68
+ } // namespace torch
falcon/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/source_range.h ADDED
@@ -0,0 +1,457 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <c10/util/Exception.h>
3
+ #include <c10/util/Optional.h>
4
+
5
+ #include <algorithm>
6
+ #include <iterator>
7
+ #include <memory>
8
+ #include <numeric>
9
+ #include <ostream>
10
+ #include <regex>
11
+ #include <sstream>
12
+ #include <unordered_map>
13
+
14
+ namespace torch::jit {
15
+
16
+ class SourceRangeUnpickler;
17
+ struct SourceRange;
18
+
19
+ // A stringlike class backed by a vector of string_view
20
+ // the string represented are logically the concatenation of the string_views
21
+ // This has advantage of not needing continues memory.
22
+ struct TORCH_API StringCordView {
23
+ StringCordView();
24
+ StringCordView(const StringCordView&) = default;
25
+ StringCordView(StringCordView&&) noexcept = default;
26
+ StringCordView(
27
+ std::vector<c10::string_view> inputs,
28
+ std::vector<std::shared_ptr<std::string>> ownerships);
29
+
30
+ StringCordView& operator=(const StringCordView&) = default;
31
+ StringCordView& operator=(StringCordView&&) noexcept = default;
32
+
33
+ size_t size() const {
34
+ return accumulated_sizes_.back();
35
+ }
36
+
37
+ size_t find(const std::string& tok, size_t start) const;
38
+ size_t find_regex(const std::string& tok, size_t start) const;
39
+ StringCordView substr(size_t start, size_t size) const;
40
+
41
+ char at(size_t index) const {
42
+ return *iter_for_pos(index);
43
+ }
44
+ char operator[](size_t index) const {
45
+ return at(index);
46
+ }
47
+
48
+ std::string str() const {
49
+ std::stringstream ss;
50
+ for (auto s : pieces_) {
51
+ ss << std::string(s);
52
+ }
53
+ return ss.str();
54
+ }
55
+
56
+ bool operator==(const std::string& rhs) const;
57
+
58
+ bool operator==(const StringCordView& rhs) const;
59
+
60
+ c10::string_view piece(size_t index) const {
61
+ return pieces_[index];
62
+ }
63
+
64
+ struct Iterator {
65
+ Iterator(
66
+ const StringCordView* str,
67
+ size_t start_line,
68
+ size_t start_pos,
69
+ size_t size)
70
+ : line_(start_line), pos_(start_pos), str_(str), size_(size) {}
71
+ explicit Iterator(const StringCordView* str)
72
+ : Iterator(str, 0, 0, str->size()) {}
73
+
74
+ Iterator() : Iterator(nullptr, 0, 0, 0) {}
75
+
76
+ Iterator(const Iterator&) = default;
77
+ Iterator(Iterator&&) = default;
78
+ Iterator& operator=(const Iterator&) = default;
79
+ Iterator& operator=(Iterator&&) = default;
80
+
81
+ Iterator operator++() {
82
+ if (size_ == 0) {
83
+ return *this;
84
+ }
85
+ if ((pos_ + 1) < str_->pieces_[line_].size()) {
86
+ pos_++;
87
+ } else {
88
+ line_++;
89
+ pos_ = 0;
90
+ }
91
+ return *this;
92
+ }
93
+
94
+ Iterator operator++(int) {
95
+ Iterator prev(*this);
96
+ ++(*this);
97
+ return prev;
98
+ }
99
+
100
+ Iterator next_iter() const {
101
+ Iterator next(*this);
102
+ ++next;
103
+ return next;
104
+ }
105
+
106
+ Iterator& operator+=(size_t num) {
107
+ if (!has_next()) {
108
+ return *this;
109
+ }
110
+ size_t target_pos = pos_ + num;
111
+ if (target_pos >= str_->accumulated_sizes_[line_] &&
112
+ (line_ + 1) < str_->accumulated_sizes_.size() &&
113
+ target_pos < str_->accumulated_sizes_[line_ + 1]) {
114
+ pos_ = target_pos;
115
+ return *this;
116
+ }
117
+
118
+ size_t target_abs_pos = pos() + num;
119
+ *this = str_->iter_for_pos(target_abs_pos);
120
+ return *this;
121
+ }
122
+
123
+ bool operator==(const Iterator& rhs) const {
124
+ if (!has_next() && !rhs.has_next()) {
125
+ return true;
126
+ }
127
+ return (str_ == rhs.str_) && (line_ == rhs.line_) && (pos_ == rhs.pos_);
128
+ }
129
+ bool operator!=(const Iterator& rhs) {
130
+ return !((*this) == rhs);
131
+ }
132
+ bool has_next() const {
133
+ return size_ > 0 && (line_ < str_->pieces_.size());
134
+ }
135
+
136
+ char operator*() const {
137
+ TORCH_INTERNAL_ASSERT(line_ < str_->pieces_.size());
138
+ TORCH_INTERNAL_ASSERT(pos_ < str_->pieces_[line_].size());
139
+ return str_->pieces_[line_].at(pos_);
140
+ }
141
+
142
+ // returns rest of the line of the current iterator
143
+ c10::string_view rest_line() const {
144
+ if (line_ >= str_->pieces_.size()) {
145
+ return "";
146
+ }
147
+
148
+ c10::string_view cur_line = str_->pieces_[line_];
149
+ return cur_line.substr(pos_, std::string::npos);
150
+ }
151
+
152
+ size_t pos() const {
153
+ if (size_ == 0) {
154
+ return 0;
155
+ }
156
+ return str_->accumulated_sizes_[line_] + pos_;
157
+ }
158
+
159
+ private:
160
+ size_t line_;
161
+ size_t pos_;
162
+ const StringCordView* str_;
163
+ size_t size_;
164
+ friend struct StringCordView;
165
+ };
166
+
167
+ Iterator begin() const {
168
+ return Iterator(this, 0, 0, size());
169
+ }
170
+ Iterator end() const {
171
+ return Iterator(this, pieces_.size(), 0, 0);
172
+ }
173
+ Iterator iter_for_pos(size_t pos) const;
174
+
175
+ private:
176
+ std::vector<c10::string_view> pieces_;
177
+ std::vector<size_t> accumulated_sizes_;
178
+ std::vector<std::shared_ptr<std::string>> owned_strings_;
179
+ };
180
+
181
+ // Source represents a code segment. It keeps track of:
182
+ // - text_view : the view into text of the code segment
183
+ // - filename (optional) : if present, represents the name of the file from
184
+ // which the code segment originated.
185
+ // - starting_line_no : represents the line in the original file where the
186
+ // code segment started.
187
+ struct TORCH_API Source {
188
+ // Whether or not Source should copy the string passed in the constructor.
189
+ enum CopiesString { COPIES_STRING, DONT_COPY };
190
+
191
+ explicit Source(
192
+ c10::string_view text_view,
193
+ c10::optional<std::string> filename = c10::nullopt,
194
+ size_t starting_line_no = 0,
195
+ std::shared_ptr<SourceRangeUnpickler> gen_ranges = nullptr,
196
+ CopiesString copies_str = COPIES_STRING)
197
+ : filename_(std::move(filename)),
198
+ starting_line_no_(starting_line_no),
199
+ gen_ranges_(std::move(gen_ranges)) {
200
+ if (copies_str == COPIES_STRING) {
201
+ std::shared_ptr<std::string> allocated_str =
202
+ std::make_shared<std::string>(text_view.data(), text_view.size());
203
+ text_view_ = StringCordView({*allocated_str}, {allocated_str});
204
+ } else {
205
+ text_view_ = StringCordView({text_view}, {});
206
+ }
207
+
208
+ calc_line_start_offsets();
209
+ }
210
+
211
+ explicit Source(
212
+ StringCordView str,
213
+ c10::optional<std::string> filename = c10::nullopt,
214
+ size_t starting_line_no = 0,
215
+ std::shared_ptr<SourceRangeUnpickler> gen_ranges = nullptr)
216
+ : text_view_(std::move(str)),
217
+ filename_(std::move(filename)),
218
+ starting_line_no_(starting_line_no),
219
+ gen_ranges_(std::move(gen_ranges)) {
220
+ calc_line_start_offsets();
221
+ }
222
+ // Given a line number (within source_), return the byte offset of the
223
+ // beginning of that line.
224
+ size_t offset_for_line(size_t line) const {
225
+ return line_starting_offsets_.at(line);
226
+ }
227
+
228
+ // Returns number of lines present.
229
+ size_t num_lines() const {
230
+ return line_starting_offsets_.size();
231
+ }
232
+
233
+ // Calculate the line (within the code segment) on which `offset` resides.
234
+ size_t lineno_for_offset(size_t offset) const {
235
+ auto iter = std::upper_bound(
236
+ line_starting_offsets_.begin(), line_starting_offsets_.end(), offset);
237
+ return iter - line_starting_offsets_.begin() - 1;
238
+ }
239
+
240
+ // Calculate the line (within the original source file, if present) on which
241
+ // `lineno` resides.
242
+ size_t lineno_to_source_lineno(size_t lineno) const {
243
+ if (filename_) {
244
+ return lineno + starting_line_no_;
245
+ } else {
246
+ return lineno;
247
+ }
248
+ }
249
+
250
+ StringCordView get_line(size_t lineno) const {
251
+ auto start = offset_for_line(lineno);
252
+ auto size = (lineno + 1) < num_lines() ? offset_for_line(lineno + 1) - start
253
+ : text_view_.size() - start;
254
+ return text_view_.substr(start, size);
255
+ }
256
+
257
+ const StringCordView& text_str() const {
258
+ return text_view_;
259
+ }
260
+
261
+ char char_at(size_t index) const {
262
+ return text_view_.at(index);
263
+ }
264
+
265
+ size_t size() const {
266
+ return text_view_.size();
267
+ }
268
+
269
+ c10::optional<std::string>& filename() {
270
+ return filename_;
271
+ }
272
+
273
+ size_t starting_line_no() const {
274
+ return starting_line_no_;
275
+ }
276
+
277
+ c10::optional<SourceRange> findSourceRangeThatGenerated(
278
+ const SourceRange& range);
279
+
280
+ ~Source() = default;
281
+
282
+ private:
283
+ void calc_line_start_offsets() {
284
+ line_starting_offsets_.clear();
285
+ line_starting_offsets_.push_back(0);
286
+ size_t pos = 0;
287
+ while ((pos = text_view_.find("\n", pos)) != std::string::npos) {
288
+ line_starting_offsets_.push_back(++pos);
289
+ }
290
+ }
291
+
292
+ StringCordView text_view_;
293
+
294
+ c10::optional<std::string> filename_;
295
+ // If filename_ is not present, starting_line_no_ is don't care
296
+ size_t starting_line_no_;
297
+ // Starting offsets for lines into the source. e.g. line 0 starts at
298
+ // line_starting_offsets_[0], etc.
299
+ std::vector<size_t> line_starting_offsets_;
300
+
301
+ std::shared_ptr<SourceRangeUnpickler> gen_ranges_;
302
+ };
303
+
304
+ // A SourceRange is a reference to subset of a Source, specified by `start` and
305
+ // `end` byte offsets into the source text.
306
+ struct TORCH_API SourceRange {
307
+ SourceRange(std::shared_ptr<Source> source_view, size_t start_, size_t end_)
308
+ : source_view_(std::move(source_view)), start_(start_), end_(end_) {
309
+ if (source_view_) {
310
+ start_iter_ = source_view_->text_str().iter_for_pos(start_);
311
+ }
312
+ }
313
+
314
+ SourceRange() : source_view_(nullptr), start_(0), end_(0) {}
315
+
316
+ SourceRange(
317
+ std::shared_ptr<Source> source_view_,
318
+ StringCordView::Iterator start_iter,
319
+ size_t end_)
320
+ : source_view_(std::move(source_view_)),
321
+ start_(start_iter.pos()),
322
+ end_(end_),
323
+ start_iter_(start_iter) {}
324
+
325
+ const c10::string_view token_text() const {
326
+ size_t size = end() - start();
327
+ return start_iter_.rest_line().substr(0, size);
328
+ }
329
+
330
+ const StringCordView text() const {
331
+ return source_view_->text_str().substr(start(), end() - start());
332
+ }
333
+ size_t size() const {
334
+ return end() - start();
335
+ }
336
+ static const size_t CONTEXT = 3;
337
+ void highlight(std::ostream& out) const;
338
+
339
+ // Customizable version of 'highlight' method.
340
+ void print_with_context(
341
+ std::ostream& out,
342
+ size_t context,
343
+ bool highlight,
344
+ const std::string& funcname) const;
345
+
346
+ const std::shared_ptr<Source>& source() const {
347
+ return source_view_;
348
+ }
349
+ size_t start() const {
350
+ return start_;
351
+ }
352
+ size_t end() const {
353
+ return end_;
354
+ }
355
+ std::string str() const {
356
+ std::stringstream ss;
357
+ highlight(ss);
358
+ return ss.str();
359
+ }
360
+
361
+ c10::optional<std::tuple<std::string, size_t, size_t>> file_line_col() const {
362
+ if (!source_view_ || !source()->filename()) {
363
+ return c10::nullopt;
364
+ }
365
+
366
+ auto lineno = source_view_->lineno_for_offset(start_);
367
+ auto col_offset = (int)start_ - (int)source_view_->offset_for_line(lineno);
368
+ // TODO: c10::optional<>::value returns an rvalue ref so can't use it here??
369
+ return std::make_tuple<std::string, size_t, size_t>(
370
+ source_view_->filename().value_or(""),
371
+ source_view_->lineno_to_source_lineno(lineno),
372
+ (size_t)col_offset);
373
+ }
374
+
375
+ bool operator==(const SourceRange& rhs) const {
376
+ return start() == rhs.start() && end() == rhs.end() &&
377
+ source() == rhs.source();
378
+ }
379
+
380
+ bool operator!=(const SourceRange& rhs) const {
381
+ return !(*this == rhs);
382
+ }
383
+
384
+ c10::optional<SourceRange> findSourceRangeThatGenerated() const {
385
+ if (!source_view_) {
386
+ return c10::nullopt;
387
+ }
388
+ return source_view_->findSourceRangeThatGenerated(*this);
389
+ }
390
+
391
+ protected:
392
+ std::shared_ptr<Source> source_view_;
393
+
394
+ private:
395
+ size_t start_;
396
+ size_t end_;
397
+ StringCordView::Iterator start_iter_;
398
+ };
399
+
400
+ // OwnedSourceRange is just like a SourceRange except that it owns a `Source`
401
+ // instead of `Source`. Thus OwnedSourceRange owns a copy of source text.
402
+ struct OwnedSourceRange : public SourceRange {
403
+ explicit OwnedSourceRange(const SourceRange& source_range)
404
+ : SourceRange(source_range) {
405
+ const auto& source = source_range.source();
406
+ if (source) {
407
+ source_view_ = std::make_shared<Source>(
408
+ source->text_str().str(),
409
+ source->filename(),
410
+ source->starting_line_no());
411
+ }
412
+ }
413
+ };
414
+
415
+ struct TORCH_API SourceRangeHasher {
416
+ public:
417
+ size_t operator()(const torch::jit::SourceRange& key) const;
418
+ };
419
+
420
+ struct StackEntry {
421
+ std::string filename;
422
+ SourceRange range;
423
+ };
424
+
425
+ TORCH_API void format_stack_trace(
426
+ std::ostream& out,
427
+ const std::vector<StackEntry>& entries);
428
+
429
+ inline std::ostream& operator<<(std::ostream& out, const SourceRange& range) {
430
+ range.highlight(out);
431
+ return out;
432
+ }
433
+
434
+ // A pair of (byte offset, SourceRange) describing a specific segment
435
+ // of the output stream
436
+ struct TaggedRange {
437
+ TaggedRange(size_t bytes, SourceRange range)
438
+ : bytes(bytes), range(std::move(range)) {}
439
+ size_t bytes;
440
+ SourceRange range;
441
+ };
442
+ using SourceRangeRecords = std::vector<TaggedRange>;
443
+ using SourceRangeTagMap =
444
+ std::unordered_map<SourceRange, int64_t, SourceRangeHasher>;
445
+
446
+ } // namespace torch::jit
447
+
448
+ namespace std {
449
+ template <>
450
+ struct iterator_traits<torch::jit::StringCordView::Iterator> {
451
+ using value_type = char;
452
+ using difference_type = ptrdiff_t;
453
+ using pointer = char*;
454
+ using reference = char&;
455
+ using iterator_category = std::forward_iterator_tag;
456
+ };
457
+ } // namespace std
falcon/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/source_ref.h ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <functional>
4
+ #include <memory>
5
+
6
+ #include <ATen/core/ivalue.h>
7
+ #include <c10/macros/Export.h>
8
+ #include <torch/csrc/jit/frontend/source_range.h>
9
+
10
+ namespace torch {
11
+ namespace jit {
12
+
13
+ /**
14
+ * SourceRef does two things:
15
+ * 1. Owns a Source object.
16
+ * 2. Serves as lookup key to the owned Source in associative containers, for
17
+ * runtime data aggregation.
18
+ * We don't want to use std::shared_ptr<Source> directly because we want to
19
+ * support heteogeneous lookup, and also shared_ptr is an implementation detail
20
+ * which should be encapsulated.
21
+ */
22
+ class TORCH_API SourceRef : public CustomClassHolder {
23
+ public:
24
+ explicit SourceRef(std::shared_ptr<Source> source_view)
25
+ : source_view_(std::move(source_view)) {}
26
+ bool operator==(const SourceRef& other) const {
27
+ return source_view_ == other.source_view_;
28
+ }
29
+ bool operator<(const Source& other) const {
30
+ return source_view_.get() < &other;
31
+ }
32
+ friend bool operator<(const Source& other, const SourceRef& self) {
33
+ return &other < self.source_view_.get();
34
+ }
35
+ bool operator<(const SourceRef& other) const {
36
+ return *this < *other.source_view_.get();
37
+ }
38
+ const Source* operator->() const {
39
+ return source_view_.get();
40
+ }
41
+
42
+ private:
43
+ std::shared_ptr<Source> source_view_;
44
+ };
45
+
46
+ } // namespace jit
47
+ } // namespace torch
falcon/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/tracer.h ADDED
@@ -0,0 +1,412 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/Dimname.h>
4
+ #include <ATen/core/class_type.h>
5
+ #include <ATen/core/jit_type.h>
6
+ #include <ATen/core/stack.h>
7
+ #include <ATen/core/symbol.h>
8
+ #include <c10/util/Exception.h>
9
+ #include <torch/csrc/Export.h>
10
+
11
+ #include <torch/csrc/jit/frontend/source_range.h>
12
+ #include <torch/csrc/utils/variadic.h>
13
+
14
+ #include <cstdint>
15
+ #include <memory>
16
+ #include <mutex>
17
+ #include <unordered_map>
18
+ #include <vector>
19
+
20
+ namespace torch::jit {
21
+ struct Node;
22
+ struct Value;
23
+ struct Graph;
24
+ struct Module;
25
+
26
+ namespace tracer {
27
+
28
+ using ::c10::ivalue::Shared;
29
+
30
+ using ::c10::IValue;
31
+ using ::c10::ivalue::Future;
32
+
33
+ using ::c10::ArrayRef;
34
+ using ::c10::TupleType;
35
+ using ::c10::TupleTypePtr;
36
+ using ::c10::ivalue::ConstantString;
37
+
38
+ using torch::autograd::Variable;
39
+ using variable_list = std::vector<Variable>;
40
+
41
+ TORCH_API std::atomic<bool>& getTracerStateWarnMode();
42
+
43
+ struct TORCH_API TracingState
44
+ : public std::enable_shared_from_this<TracingState> {
45
+ TracingState();
46
+ ~TracingState();
47
+
48
+ // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
49
+ std::shared_ptr<Graph> graph;
50
+ // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
51
+ bool warn = getTracerStateWarnMode();
52
+ // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
53
+ bool strict = true;
54
+ // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
55
+ bool force_outplace = false;
56
+ // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
57
+ std::function<std::string(const Variable& var)> lookup_var_name_fn =
58
+ [](const Variable& var) { return ""; };
59
+
60
+ void enterFrame() {
61
+ env_stack.emplace_back();
62
+ }
63
+
64
+ void leaveFrame() {
65
+ env_stack.pop_back();
66
+ }
67
+
68
+ void setValue(const IValue& v, Value* value);
69
+ void delValue(const IValue& var);
70
+ Value* getValue(const IValue& var);
71
+ Value* getOutput(const IValue& var, size_t i);
72
+ bool hasValue(const IValue& var) const;
73
+
74
+ Node* createNode(c10::Symbol op_name, size_t num_outputs);
75
+ void insertNode(Node* node);
76
+
77
+ private:
78
+ using WeakIValue = at::WeakIValue;
79
+
80
+ struct WeakIValueHasher {
81
+ size_t operator()(const WeakIValue& t) const {
82
+ return t.hash();
83
+ }
84
+ };
85
+
86
+ struct WeakIValueEq {
87
+ bool operator()(const WeakIValue& t1, const WeakIValue& t2) const {
88
+ return t1.isSameIdentity(t2);
89
+ }
90
+ };
91
+
92
+ using Frame =
93
+ std::unordered_map<WeakIValue, Value*, WeakIValueHasher, WeakIValueEq>;
94
+ std::vector<Frame> env_stack;
95
+ };
96
+
97
+ // This is meant to be used as a thread local place, where we can store extra
98
+ // info that gets lost when we call into ATen from Python bindings. One example
99
+ // for when this happens is when we get an IntArrayRef argument with e.g. sizes
100
+ // for view. When tracing, those might be tensors, which let us encode extra
101
+ // data dependencies, but once they get to the ATen call where we actually have
102
+ // the tracing logic, they get converted into a raw IntArrayRef, and we loose
103
+ // all information. To prevent this, we temporarily stash it in here.
104
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
105
+ struct ArgumentStash {
106
+ struct IntArrayRefTrace : std::vector<Value*> {
107
+ IntArrayRefTrace(int size) : std::vector<Value*>(size, nullptr) {}
108
+ };
109
+
110
+ static bool empty() {
111
+ return stash.intlists.empty();
112
+ }
113
+
114
+ TORCH_API static void stashIntArrayRefElem(
115
+ const std::string& arg_name,
116
+ size_t size,
117
+ size_t idx,
118
+ const Variable& var);
119
+
120
+ static bool hasIntArrayRef(const std::string& arg_name) {
121
+ return stash.intlists.count(arg_name) > 0;
122
+ }
123
+
124
+ static IntArrayRefTrace popIntArrayRef(const std::string& arg_name) {
125
+ auto info = std::move(stash.intlists.at(arg_name));
126
+ stash.intlists.erase(arg_name);
127
+ return info;
128
+ }
129
+
130
+ // Value stashing: Use these methods to stash arguments which correspond
131
+ // to regular Value*'s in the graph. i.e. they don't require special
132
+ // handling like in the case of IntArrayRefs
133
+ TORCH_API static void stashValue(
134
+ const std::string& arg_name,
135
+ size_t idx,
136
+ const Variable& var,
137
+ const c10::TypePtr& type = nullptr);
138
+
139
+ static bool hasValue(const std::string& arg_name) {
140
+ return stash.values.count(arg_name) > 0;
141
+ }
142
+
143
+ static Value* popValue(const std::string& arg_name) {
144
+ auto info = stash.values.at(arg_name);
145
+ stash.values.erase(arg_name);
146
+ return info;
147
+ }
148
+
149
+ private:
150
+ static thread_local ArgumentStash stash;
151
+ std::unordered_map<std::string, IntArrayRefTrace> intlists;
152
+ std::unordered_map<std::string, Value*> values;
153
+ };
154
+
155
+ // Retrieve or set the current tracing state. Returns a nullptr if tracing is
156
+ // disabled.
157
+ TORCH_API const std::shared_ptr<TracingState>& getTracingState();
158
+ TORCH_API void setTracingState(std::shared_ptr<TracingState> state);
159
+
160
+ inline bool isTracing() {
161
+ return static_cast<bool>(getTracingState());
162
+ }
163
+
164
+ using warn_fn_type = void (*)(const std::string& msg);
165
+ TORCH_API extern const char* WARN_PYTHON_DATAFLOW;
166
+ TORCH_API extern const char* WARN_CONSTRUCTOR;
167
+ TORCH_API extern const char* WARN_RESIZE;
168
+ TORCH_API extern const char* STRICT_TRACER_MSG;
169
+ TORCH_API void _do_warn(const char* _reason, const char* _kind);
170
+ inline void warn(const char* _reason, const char* _kind = nullptr) {
171
+ if (const auto& state = getTracingState()) {
172
+ if (!state->warn)
173
+ return;
174
+ _do_warn(_reason, _kind);
175
+ }
176
+ }
177
+ TORCH_API void setWarn(warn_fn_type fn);
178
+
179
+ struct TORCH_API NoWarn {
180
+ NoWarn() : state(getTracingState()) {
181
+ if (state) {
182
+ prev = state->warn;
183
+ state->warn = false;
184
+ }
185
+ }
186
+ ~NoWarn() {
187
+ if (state) {
188
+ state->warn = prev;
189
+ }
190
+ }
191
+ std::shared_ptr<TracingState> state;
192
+ bool prev{false};
193
+ };
194
+
195
+ struct WithNestedTracingFrame {
196
+ WithNestedTracingFrame() {
197
+ getTracingState()->enterFrame();
198
+ }
199
+
200
+ ~WithNestedTracingFrame() {
201
+ getTracingState()->leaveFrame();
202
+ }
203
+ };
204
+ TORCH_API void recordSourceLocation(Node* n);
205
+ TORCH_API void setRecordSourceLocation(void (*v)(Node*));
206
+
207
+ TORCH_API std::vector<StackEntry> pythonCallstack();
208
+ TORCH_API void setPythonCallstack(std::vector<StackEntry> (*v)());
209
+
210
+ // Having finished adding a new 'node' to the graph IR 'setValueTrace'
211
+ // associates this node with an output variable, so that further operations
212
+ // involving this variable know which node in the IR to reference.
213
+ TORCH_API void setValueTrace(const IValue& v, Value* value);
214
+
215
+ TORCH_API void delValueTrace(const IValue& var);
216
+
217
+ TORCH_API std::function<void()> pauseTracing();
218
+
219
+ TORCH_API Value* getValueTrace(const IValue& var);
220
+
221
+ TORCH_API std::pair<std::shared_ptr<TracingState>, Stack> trace(
222
+ Stack inputs,
223
+ const std::function<Stack(Stack)>& traced_fn,
224
+ std::function<std::string(const Variable&)> var_name_lookup_fn,
225
+ bool strict = true,
226
+ bool force_outplace = false,
227
+ Module* self = nullptr,
228
+ const std::vector<std::string>& argument_names = {});
229
+
230
+ TORCH_API void abandon();
231
+
232
+ // NB: those serve both as an intermediate steps in addInputs below,
233
+ // as well as the overloads that terminate template recursion
234
+ TORCH_API void addInputs(Node* n, const char* name, int64_t value);
235
+ TORCH_API void addInputs(Node* n, const char* name, c10::SymInt value);
236
+ TORCH_API void addInputs(
237
+ Node* n,
238
+ const char* name,
239
+ c10::optional<int64_t> value);
240
+ TORCH_API void addInputs(Node* n, const char* name, bool value);
241
+ TORCH_API void addInputs(
242
+ Node* n,
243
+ const char* name,
244
+ const c10::optional<bool>& value);
245
+ TORCH_API void addInputs(Node* n, const char* name, double value);
246
+ TORCH_API void addInputs(
247
+ Node* n,
248
+ const char* name,
249
+ const c10::optional<double>& value);
250
+ TORCH_API void addInputs(Node* n, const char* name, const at::Scalar& value);
251
+ TORCH_API void addInputs(
252
+ Node* n,
253
+ const char* name,
254
+ const c10::optional<at::Scalar>& value);
255
+ TORCH_API void addInputs(Node* n, const char* name, const at::Tensor& value);
256
+ TORCH_API void addInputs(
257
+ Node* n,
258
+ const char* name,
259
+ const c10::optional<at::Tensor>& value);
260
+ TORCH_API void addInputs(Node* n, const char* name, ArrayRef<int64_t> value);
261
+ TORCH_API void addInputs(Node* n, const char* name, c10::SymIntArrayRef value);
262
+ TORCH_API void addInputs(
263
+ Node* n,
264
+ const char* name,
265
+ c10::optional<c10::SymInt> value);
266
+ TORCH_API void addInputs(
267
+ Node* n,
268
+ const char* name,
269
+ const c10::optional<ArrayRef<int64_t>>& value);
270
+ TORCH_API void addInputs(
271
+ Node* n,
272
+ const char* name,
273
+ const at::OptionalIntArrayRef& opt_value);
274
+ TORCH_API void addInputs(
275
+ Node* n,
276
+ const char* name,
277
+ const at::OptionalSymIntArrayRef& opt_value);
278
+ TORCH_API void addInputs(
279
+ Node* n,
280
+ const char* name,
281
+ ArrayRef<at::Tensor> value,
282
+ bool allow_undefined = false);
283
+ TORCH_API void addInputs(
284
+ Node* n,
285
+ const char* name,
286
+ std::vector<at::Tensor> value,
287
+ bool allow_undefined = false);
288
+ TORCH_API void addInputs(
289
+ Node* n,
290
+ const char* name,
291
+ at::ITensorListRef value,
292
+ bool allow_undefined = false);
293
+ TORCH_API void addInputs(
294
+ Node* n,
295
+ const char* name,
296
+ const List<c10::optional<at::Tensor>>& value);
297
+ TORCH_API void addInputs(
298
+ Node* n,
299
+ const char* name,
300
+ ArrayRef<c10::intrusive_ptr<c10::ivalue::Object>> value,
301
+ const c10::ClassTypePtr& class_type);
302
+ TORCH_API void addInputs(Node* n, const char* name, ArrayRef<double> value);
303
+ TORCH_API void addInputs(
304
+ Node* n,
305
+ const char* name,
306
+ const c10::optional<ArrayRef<double>>& value);
307
+ TORCH_API void addInputs(
308
+ Node* n,
309
+ const char* name,
310
+ const c10::string_view value);
311
+ TORCH_API void addInputs(
312
+ Node* n,
313
+ const char* name,
314
+ const c10::optional<c10::string_view>& value);
315
+ TORCH_API void addInputs(Node* n, const char* name, at::Device value);
316
+ TORCH_API void addInputs(Node* n, const char* name, c10::Stream stream);
317
+ TORCH_API void addInputs(Node* n, const char* name, at::Layout value);
318
+ TORCH_API void addInputs(Node* n, const char* name, at::ScalarType value);
319
+ TORCH_API void addInputs(
320
+ Node* n,
321
+ const char* name,
322
+ const c10::optional<at::ScalarType>& value);
323
+ TORCH_API void addInputs(
324
+ Node* n,
325
+ const char* name,
326
+ const c10::optional<at::Device>& value);
327
+ TORCH_API void addInputs(
328
+ Node* n,
329
+ const char* name,
330
+ const c10::optional<at::Layout>& value);
331
+ TORCH_API void addInputs(Node* n, const char* name, at::MemoryFormat value);
332
+ TORCH_API void addInputs(
333
+ Node* n,
334
+ const char* name,
335
+ c10::optional<at::DimnameList> value);
336
+ TORCH_API void addInputs(
337
+ Node* n,
338
+ const char* name,
339
+ const c10::optional<at::MemoryFormat>& value);
340
+ TORCH_API void addInputs(
341
+ Node* n,
342
+ const char* name,
343
+ const c10::optional<at::Generator>& value);
344
+
345
+ inline void addInputs(
346
+ Node* n,
347
+ const char* name,
348
+ const std::vector<bool>& value) {
349
+ AT_ERROR("Tracing a list of bool type is currently not supported!");
350
+ }
351
+
352
+ template <typename T>
353
+ void addInputs(Node* n, const char* name, ArrayRef<T> value) {
354
+ AT_ERROR("Tracing a list of arbitrary type is currently not supported!");
355
+ }
356
+ template <typename K, typename V>
357
+ void addInputs(
358
+ Node* n,
359
+ const char* name,
360
+ const std::unordered_map<K, V>& value) {
361
+ AT_ERROR("Tracing a dict of arbitrary types is currently not supported!");
362
+ }
363
+
364
+ template <size_t N>
365
+ void addInputs(Node* n, const char* name, std::array<bool, N> value) {
366
+ throw std::runtime_error(
367
+ "Found an unsupported argument type in the JIT tracer. File a bug report.");
368
+ }
369
+
370
+ TORCH_API void addInputs(
371
+ Node* n,
372
+ const char* name,
373
+ const c10::intrusive_ptr<c10::ivalue::Object>& obj);
374
+
375
+ TORCH_API void ensureUniqueIfOutOfPlaced(
376
+ const char* name,
377
+ const at::Tensor& tensor);
378
+ TORCH_API void ensureUniqueIfOutOfPlaced(
379
+ const char* name,
380
+ const c10::optional<at::Tensor>& tensor);
381
+
382
+ template <
383
+ typename T,
384
+ typename = torch::enable_if_t<
385
+ (!std::is_convertible_v<torch::decay_t<T>, at::TensorList> &&
386
+ !std::is_convertible_v<torch::decay_t<T>, c10::List<at::Tensor>> &&
387
+ !std::is_convertible_v<torch::decay_t<T>, at::Tensor> &&
388
+ !std::is_convertible_v<
389
+ torch::decay_t<T>,
390
+ c10::intrusive_ptr<c10::ivalue::Object>>)>>
391
+ void addOutput(Node* node, T&&) {
392
+ AT_ERROR(
393
+ "Found an unsupported argument type ",
394
+ c10::demangle_type<T>(),
395
+ " in the JIT tracer. File a bug report.");
396
+ }
397
+ TORCH_API void addOutput(Node* node, const at::Tensor& tensor);
398
+ TORCH_API void setOutput(Value* value, const at::Tensor& output);
399
+ TORCH_API void addOutput(Node* node, const std::vector<at::Tensor>& list);
400
+ TORCH_API void addOutput(Node* node, const c10::List<at::Tensor>& list);
401
+ TORCH_API void addOutput(
402
+ Node* node,
403
+ const c10::intrusive_ptr<c10::ivalue::Object>& output);
404
+
405
+ TORCH_API autograd::Variable getSizeOf(
406
+ const autograd::Variable& var,
407
+ int64_t dim);
408
+
409
+ TORCH_API autograd::Variable getNumelOf(const autograd::Variable& var);
410
+
411
+ } // namespace tracer
412
+ } // namespace torch::jit
falcon/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/tree_views.h ADDED
@@ -0,0 +1,1275 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <c10/util/string_utils.h>
3
+ #include <torch/csrc/jit/frontend/error_report.h>
4
+ #include <torch/csrc/jit/frontend/strtod.h>
5
+ #include <torch/csrc/jit/frontend/tree.h>
6
+
7
+ #include <c10/util/complex.h>
8
+ #include <functional>
9
+ #include <iostream>
10
+ #include <string>
11
+ #include <utility>
12
+
13
+ namespace torch {
14
+ namespace jit {
15
+
16
+ // clang-format off
17
+ // TreeView provides a statically-typed way to traverse the tree, which should
18
+ // be formed according to the grammar below.
19
+ //
20
+ // A few notes on types and their aliases:
21
+ // - List<T> is really a Tree with kind TK_LIST and elements as subtrees
22
+ // - Maybe<T> is really a Tree with kind TK_OPTION that has 0 or 1 subtree of type T
23
+ // - Builtin types are: Ident (TK_IDENT), String (TK_STRING)
24
+ //
25
+ // Param = Param(Maybe<Expr> type, Ident name) TK_PARAM
26
+ //
27
+ // Decl = Decl(List<Param> params, Maybe<Expr> return_type) TK_DECL
28
+ // Def = Def(Ident name, Decl decl, List<Stmt> body) TK_DEF
29
+ // ClassDef = ClassDef(Ident name, TK_CLASS_DEF
30
+ // Maybe<Expr> superclass,
31
+ // List<Stmt> body)
32
+ //
33
+ // Stmt = If(Expr cond, List<Stmt> true_body, List<Stmt> false_body) TK_IF
34
+ // | For(List<Expr> targets, List<Expr> iters, List<Stmt> body) TK_FOR
35
+ // | While(Expr cond, List<Stmt> body) TK_WHILE
36
+ // | Global(List<Ident> idents) TK_GLOBAL
37
+ // -- NB: the only type of Expr's allowed on lhs are Var
38
+ // Or a tuple containing Var with an optional terminating Starred
39
+ // | Assign(Expr lhs, Maybe<Expr> rhs, Maybe<Expr> type) TK_ASSIGN
40
+ // | AugAssign(Expr lhs, AugAssignKind aug_op, Expr rhs) TK_AUG_ASSIGN
41
+ // | Return(List<Expr> values) TK_RETURN
42
+ // | ExprStmt(List<Expr> expr) TK_EXPR_STMT
43
+ // | Raise(Expr expr) TK_RAISE
44
+ // | Def TK_DEF
45
+ // | With(List<WithItem> targets, List<Stmt> body) TK_WITH
46
+ //
47
+ // Expr = TernaryIf(Expr cond, Expr true_expr, Expr false_expr) TK_IF_EXPR
48
+ // | BinOp(Expr lhs, Expr rhs)
49
+ // | And TK_AND
50
+ // | Or TK_OR
51
+ // | Lt '<'
52
+ // | Gt '>'
53
+ // | Eq TK_EQ
54
+ // | Le TK_LE
55
+ // | Ge TK_GE
56
+ // | Ne TK_NE
57
+ // | Is TK_IS
58
+ // | IsNot TK_ISNOT
59
+ // | Add '+'
60
+ // | Sub '-'
61
+ // | Mul '*'
62
+ // | Div '/'
63
+ // | Mod '%'
64
+ // | MatMult '@'
65
+ // | Pow TK_POW
66
+ // | UnaryOp(Expr expr)
67
+ // | Not TK_NOT
68
+ // | USub '-'
69
+ // | Const(String value) TK_CONST
70
+ // -- NB: x.name(y) is desugared into name(x, y)
71
+ // | Apply(Ident name, List<Expr> args, List<Attribute> kwargs) TK_APPLY
72
+ // | Select(Expr value, Ident selector) '.'
73
+ // | Subscript(Expr value, List<Expr> subscript_exprs) TK_SUBSCRIPT
74
+ // | SliceExpr(Maybe<Expr> start, Maybe<Expr> end) TK_SLICE_EXPR
75
+ // | Var(Ident name) TK_VAR
76
+ // | ListLiteral(List<Expr> inputs) TK_LIST_LITERAL
77
+ // | TupleLiteral(List<Expr> inputs) TK_TUPLE_LITERAL
78
+ // | Starred(Expr expr) TK_STARRED
79
+ // | WithItem(Expr target, Maybe<Var> var) TK_WITH_ITEM
80
+ // -- NB: only allowed expressions are Const or List(Const)
81
+ // (List as a value, not type constructor)
82
+ // Attribute = Attribute(Ident name, Expr value) TK_ATTRIBUTE
83
+ //
84
+ // AugAssignKind =
85
+ // | Add() TK_PLUS_EQ
86
+ // | Sub() TK_MINUS_EQ
87
+ // | Mul() TK_TIMES_EQ
88
+ // | Div() TK_DIV_EQ
89
+ // | Mod() TK_MOD_EQ
90
+ //
91
+
92
+ // Each subclass of TreeView should provide:
93
+ // 1. Constructor that takes a TreeRef, and checks that it's of the right type.
94
+ // 2. Accessors that get underlying information out of the object. If they
95
+ // return subtrees, they should wrap them in appropriate views too.
96
+ // 3. Static method 'create' that creates the underlying TreeRef object
97
+ // for every TreeRef kind that has a TreeView, the parser always uses
98
+ // (e.g.) Ident::create rather than Compound::Create, this means that
99
+ // changes to the structure of Ident are always made right here rather
100
+ // than both in the parser and in this code.
101
+ // XXX: these structs should have no fields to prevent slicing when passing by value
102
+ // clang-format on
103
+ struct TreeView {
104
+ explicit TreeView(TreeRef tree) : tree_(std::move(tree)) {}
105
+ TreeRef tree() const {
106
+ return tree_;
107
+ }
108
+ const SourceRange& range() const {
109
+ return tree_->range();
110
+ }
111
+ operator TreeRef() const {
112
+ return tree_;
113
+ }
114
+ const TreeRef& get() const {
115
+ return tree_;
116
+ }
117
+ int kind() const {
118
+ return tree_->kind();
119
+ }
120
+ void dump() const {
121
+ std::cout << tree_;
122
+ }
123
+
124
+ protected:
125
+ const TreeRef& subtree(size_t i) const {
126
+ return tree_->trees().at(i);
127
+ }
128
+ // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
129
+ TreeRef tree_;
130
+ };
131
+
132
+ template <typename T>
133
+ struct ListIterator {
134
+ ListIterator(TreeList::const_iterator it) : it(it) {}
135
+ bool operator!=(const ListIterator& rhs) const {
136
+ return it != rhs.it;
137
+ }
138
+ bool operator==(const ListIterator& rhs) const {
139
+ return it == rhs.it;
140
+ }
141
+ T operator*() const {
142
+ return T(*it);
143
+ }
144
+ ListIterator& operator+=(std::ptrdiff_t n) {
145
+ it += n;
146
+ return *this;
147
+ }
148
+ ListIterator& operator++() {
149
+ ++it;
150
+ return *this;
151
+ }
152
+ ListIterator& operator--() {
153
+ --it;
154
+ return *this;
155
+ }
156
+
157
+ private:
158
+ TreeList::const_iterator it;
159
+ };
160
+
161
+ template <typename T>
162
+ struct List : public TreeView {
163
+ using iterator = ListIterator<T>;
164
+ using const_iterator = ListIterator<T>;
165
+
166
+ List(const TreeRef& tree) : TreeView(tree) {
167
+ tree->match(TK_LIST);
168
+ // Iterate over list to temporarily instantiate Ts that will check the type
169
+ for (const T& elem : *this) {
170
+ (void)elem; // silence unused warning
171
+ }
172
+ }
173
+ iterator begin() const {
174
+ return iterator(tree_->trees().begin());
175
+ }
176
+ iterator end() const {
177
+ return iterator(tree_->trees().end());
178
+ }
179
+ bool empty() const {
180
+ return tree_->trees().begin() == tree_->trees().end();
181
+ }
182
+ T operator[](size_t i) const {
183
+ return T(subtree(i));
184
+ }
185
+ TreeRef map(const std::function<TreeRef(const T&)>& fn) {
186
+ return tree_->map([&](TreeRef v) { return fn(T(v)); });
187
+ }
188
+ static List create(const SourceRange& range, const std::vector<T>& subtrees) {
189
+ TreeList type_erased_sub{subtrees.begin(), subtrees.end()};
190
+ return List(Compound::create(TK_LIST, range, std::move(type_erased_sub)));
191
+ }
192
+ static List unsafeCreate(const SourceRange& range, TreeList&& subtrees) {
193
+ return List(Compound::create(TK_LIST, range, std::move(subtrees)));
194
+ }
195
+ size_t size() const {
196
+ return tree_->trees().size();
197
+ }
198
+ };
199
+
200
+ template <typename T>
201
+ struct Maybe : public TreeView {
202
+ explicit Maybe(const TreeRef& tree) : TreeView(tree) {
203
+ tree_->match(TK_OPTION);
204
+ if (tree_->trees().size() > 1)
205
+ throw ErrorReport(tree) << "Maybe trees can have at most one subtree";
206
+ }
207
+ /* implicit */ Maybe(const T& tree) : TreeView(tree) {}
208
+ bool present() const {
209
+ return tree_->trees().size() > 0;
210
+ }
211
+ T get() const {
212
+ return T(tree_->trees().at(0));
213
+ }
214
+ TreeRef map(const std::function<TreeRef(const T&)>& fn) {
215
+ return tree_->map([&](TreeRef v) { return fn(T(v)); });
216
+ }
217
+ static Maybe<T> create(const SourceRange& range) {
218
+ return Maybe<T>(Compound::create(TK_OPTION, range, {}));
219
+ }
220
+ static Maybe<T> create(const SourceRange& range, const T& value) {
221
+ return Maybe<T>(Compound::create(TK_OPTION, range, {value}));
222
+ }
223
+ };
224
+
225
+ struct Ident : public TreeView {
226
+ explicit Ident(const TreeRef& tree) : TreeView(tree) {
227
+ tree_->match(TK_IDENT);
228
+ }
229
+ const std::string& name() const {
230
+ return subtree(0)->stringValue();
231
+ }
232
+ static Ident create(const SourceRange& range, std::string name) {
233
+ return Ident(
234
+ Compound::create(TK_IDENT, range, {String::create(std::move(name))}));
235
+ }
236
+ };
237
+
238
+ ////////////////////////////////////////////////////////////////////////////////
239
+ // Base types (production LHS)
240
+ ////////////////////////////////////////////////////////////////////////////////
241
+
242
+ struct Stmt : public TreeView {
243
+ explicit Stmt(const TreeRef& tree) : TreeView(tree) {
244
+ switch (tree->kind()) {
245
+ case TK_IF:
246
+ case TK_FOR:
247
+ case TK_WHILE:
248
+ case TK_GLOBAL:
249
+ case TK_ASSIGN:
250
+ case TK_AUG_ASSIGN:
251
+ case TK_RETURN:
252
+ case TK_EXPR_STMT:
253
+ case TK_RAISE:
254
+ case TK_ASSERT:
255
+ case TK_PASS:
256
+ case TK_BREAK:
257
+ case TK_DELETE:
258
+ case TK_CONTINUE:
259
+ case TK_DEF:
260
+ case TK_WITH:
261
+ return;
262
+ default:
263
+ throw ErrorReport(tree)
264
+ << kindToString(tree->kind()) << " is not a valid Stmt";
265
+ }
266
+ }
267
+ };
268
+
269
+ struct Expr : public TreeView {
270
+ explicit Expr(const TreeRef& tree) : TreeView(tree) {
271
+ switch (tree->kind()) {
272
+ case TK_IF_EXPR:
273
+ case TK_AND:
274
+ case TK_OR:
275
+ case '<':
276
+ case '>':
277
+ case TK_IS:
278
+ case TK_ISNOT:
279
+ case TK_EQ:
280
+ case TK_LE:
281
+ case TK_GE:
282
+ case TK_NE:
283
+ case '+':
284
+ case '-':
285
+ case TK_UNARY_MINUS:
286
+ case '~':
287
+ case '*':
288
+ case TK_STARRED:
289
+ case '/':
290
+ case '%':
291
+ case TK_NOT:
292
+ case TK_CONST:
293
+ case TK_STRINGLITERAL:
294
+ case TK_TRUE:
295
+ case TK_FALSE:
296
+ case TK_NONE:
297
+ case TK_NONE_TYPE:
298
+ case TK_CAST:
299
+ case TK_APPLY:
300
+ case '.':
301
+ case TK_SUBSCRIPT:
302
+ case TK_SLICE_EXPR:
303
+ case TK_VAR:
304
+ case TK_LIST_LITERAL:
305
+ case TK_TUPLE_LITERAL:
306
+ case TK_DICT_LITERAL:
307
+ case '@':
308
+ case TK_POW:
309
+ case TK_LSHIFT:
310
+ case TK_RSHIFT:
311
+ case TK_FLOOR_DIV:
312
+ case '&':
313
+ case '^':
314
+ case '|':
315
+ case TK_LIST_COMP:
316
+ case TK_DICT_COMP:
317
+ case TK_DOTS:
318
+ case TK_IN:
319
+ case TK_WITH_ITEM:
320
+ return;
321
+ default:
322
+ throw ErrorReport(tree)
323
+ << kindToString(tree->kind()) << " is not a valid Expr";
324
+ }
325
+ }
326
+ };
327
+
328
+ ////////////////////////////////////////////////////////////////////////////////
329
+ // Helper nodes (mostly for function arguments)
330
+ ////////////////////////////////////////////////////////////////////////////////
331
+
332
+ struct Attribute : public TreeView {
333
+ explicit Attribute(const TreeRef& tree) : TreeView(tree) {
334
+ tree_->match(TK_ATTRIBUTE);
335
+ }
336
+ Ident name() const {
337
+ return Ident(subtree(0));
338
+ }
339
+ Expr value() const {
340
+ return Expr(subtree(1));
341
+ }
342
+ static Attribute create(
343
+ const SourceRange& range,
344
+ const Ident& name,
345
+ const TreeRef& value) {
346
+ return Attribute(Compound::create(TK_ATTRIBUTE, range, {name, value}));
347
+ }
348
+ };
349
+
350
+ struct Param : public TreeView {
351
+ explicit Param(const TreeRef& tree) : TreeView(tree) {
352
+ tree_->match(TK_PARAM);
353
+ }
354
+ static Param create(
355
+ const SourceRange& range,
356
+ const Ident& ident,
357
+ const Maybe<Expr>& type,
358
+ const Maybe<Expr>& def,
359
+ bool kwarg_only) {
360
+ TreeRef kwarg_only_tree =
361
+ Compound::create(kwarg_only ? TK_TRUE : TK_FALSE, range, {});
362
+ return Param(Compound::create(
363
+ TK_PARAM, range, {ident, type, def, std::move(kwarg_only_tree)}));
364
+ }
365
+ Ident ident() const {
366
+ return Ident(subtree(0));
367
+ }
368
+ Maybe<Expr> type() const {
369
+ return Maybe<Expr>(subtree(1));
370
+ }
371
+ Maybe<Expr> defaultValue() const {
372
+ return Maybe<Expr>(subtree(2));
373
+ }
374
+ bool kwarg_only() const {
375
+ return TK_TRUE == subtree(3)->kind();
376
+ }
377
+ Param withType(const Maybe<Expr>& typ) const {
378
+ return Param::create(range(), ident(), typ, defaultValue(), kwarg_only());
379
+ }
380
+ };
381
+
382
+ ////////////////////////////////////////////////////////////////////////////////
383
+ // Top level definitions
384
+ ////////////////////////////////////////////////////////////////////////////////
385
+
386
+ struct Decl : public TreeView {
387
+ explicit Decl(const TreeRef& tree) : TreeView(tree) {
388
+ tree->match(TK_DECL);
389
+ }
390
+ List<Param> params() const {
391
+ return List<Param>(subtree(0));
392
+ }
393
+ Maybe<Expr> return_type() const {
394
+ return Maybe<Expr>(subtree(1));
395
+ }
396
+ static Decl create(
397
+ const SourceRange& range,
398
+ const List<Param>& params,
399
+ const Maybe<Expr>& return_type) {
400
+ return Decl(Compound::create(TK_DECL, range, {params, return_type}));
401
+ }
402
+ };
403
+
404
+ struct Def : public TreeView {
405
+ explicit Def(const TreeRef& tree) : TreeView(tree) {
406
+ tree->match(TK_DEF);
407
+ }
408
+ Def withName(std::string new_name) const {
409
+ auto new_ident = Ident::create(name().range(), std::move(new_name));
410
+ return create(range(), new_ident, decl(), statements());
411
+ }
412
+ Def withDecl(const Decl& decl) const {
413
+ return create(range(), name(), decl, statements());
414
+ }
415
+ Ident name() const {
416
+ return Ident(subtree(0));
417
+ }
418
+ Decl decl() const {
419
+ return Decl(subtree(1));
420
+ }
421
+ List<Stmt> statements() const {
422
+ return List<Stmt>(subtree(2));
423
+ }
424
+ static Def create(
425
+ const SourceRange& range,
426
+ const Ident& name,
427
+ const Decl& decl,
428
+ const List<Stmt>& stmts) {
429
+ return Def(Compound::create(TK_DEF, range, {name, decl, stmts}));
430
+ }
431
+ };
432
+
433
+ // Property represents a named attribute combined with a getter and setter
434
+ // method to access and mutate that attribute.
435
+ struct Property : public TreeView {
436
+ explicit Property(const TreeRef& tree) : TreeView(tree) {
437
+ tree->match(TK_PROP);
438
+ }
439
+ Ident name() const {
440
+ return Ident(subtree(0));
441
+ }
442
+ Def getter() const {
443
+ return Def(subtree(1));
444
+ }
445
+ Maybe<Def> setter() const {
446
+ return Maybe<Def>(subtree(2));
447
+ }
448
+ static Property create(
449
+ const SourceRange& range,
450
+ const Ident& name,
451
+ const Def& getter,
452
+ const Maybe<Def>& setter) {
453
+ return Property(Compound::create(TK_PROP, range, {name, getter, setter}));
454
+ }
455
+ };
456
+
457
+ struct Assign;
458
+
459
+ struct ClassDef : public TreeView {
460
+ explicit ClassDef(const TreeRef& tree) : TreeView(tree) {
461
+ tree->match(TK_CLASS_DEF);
462
+ }
463
+ explicit ClassDef(TreeRef&& tree) : TreeView(std::move(tree)) {
464
+ tree_->match(TK_CLASS_DEF);
465
+ }
466
+ ClassDef withName(std::string new_name) const {
467
+ auto new_ident = Ident::create(name().range(), std::move(new_name));
468
+ return create(range(), new_ident, superclass(), body());
469
+ }
470
+ Ident name() const {
471
+ return Ident(subtree(0));
472
+ }
473
+ Maybe<Expr> superclass() const {
474
+ return Maybe<Expr>(subtree(1));
475
+ }
476
+ List<Stmt> body() const {
477
+ return List<Stmt>(subtree(2));
478
+ }
479
+ Maybe<List<Property>> properties() const {
480
+ return Maybe<List<Property>>(subtree(3));
481
+ }
482
+ Maybe<List<Assign>> assigns() const {
483
+ return Maybe<List<Assign>>(subtree(4));
484
+ }
485
+ static ClassDef create(
486
+ const SourceRange& range,
487
+ const Ident& name,
488
+ const Maybe<Expr>& superclass,
489
+ const List<Stmt>& body) {
490
+ return ClassDef(Compound::create(
491
+ TK_CLASS_DEF,
492
+ range,
493
+ {name,
494
+ superclass,
495
+ body,
496
+ Maybe<List<Property>>::create(range),
497
+ Maybe<List<Assign>>::create(range)}));
498
+ }
499
+ static ClassDef create(
500
+ const SourceRange& range,
501
+ const Ident& name,
502
+ const Maybe<Expr>& superclass,
503
+ const List<Stmt>& body,
504
+ const List<Property>& properties,
505
+ const List<Assign>& assigns);
506
+ };
507
+
508
+ TORCH_API std::vector<std::string> getUnresolvedClassAttributes(
509
+ const ClassDef& def);
510
+
511
+ ////////////////////////////////////////////////////////////////////////////////
512
+ // Statements
513
+ ////////////////////////////////////////////////////////////////////////////////
514
+
515
+ struct If : public Stmt {
516
+ explicit If(const TreeRef& tree) : Stmt(tree) {
517
+ tree_->match(TK_IF);
518
+ }
519
+ Expr cond() const {
520
+ return Expr(subtree(0));
521
+ }
522
+ List<Stmt> trueBranch() const {
523
+ return List<Stmt>(subtree(1));
524
+ }
525
+ List<Stmt> falseBranch() const {
526
+ return List<Stmt>(subtree(2));
527
+ }
528
+ If withNewBranches(
529
+ const List<Stmt>& true_branch,
530
+ const List<Stmt>& false_branch) const {
531
+ return create(range(), cond(), true_branch, false_branch);
532
+ }
533
+ static If create(
534
+ const SourceRange& range,
535
+ const Expr& cond,
536
+ const List<Stmt>& true_branch,
537
+ const List<Stmt>& false_branch) {
538
+ return If(
539
+ Compound::create(TK_IF, range, {cond, true_branch, false_branch}));
540
+ }
541
+ };
542
+
543
+ struct While : public Stmt {
544
+ explicit While(const TreeRef& tree) : Stmt(tree) {
545
+ tree_->match(TK_WHILE);
546
+ }
547
+ Expr cond() const {
548
+ return Expr(subtree(0));
549
+ }
550
+ List<Stmt> body() const {
551
+ return List<Stmt>(subtree(1));
552
+ }
553
+ static While create(
554
+ const SourceRange& range,
555
+ const Expr& cond,
556
+ const List<Stmt>& body) {
557
+ return While(Compound::create(TK_WHILE, range, {cond, body}));
558
+ }
559
+ };
560
+
561
+ struct For : public Stmt {
562
+ explicit For(const TreeRef& tree) : Stmt(tree) {
563
+ tree->match(TK_FOR);
564
+ }
565
+ List<Expr> targets() const {
566
+ return List<Expr>(subtree(0));
567
+ }
568
+ List<Expr> itrs() const {
569
+ return List<Expr>(subtree(1));
570
+ }
571
+ List<Stmt> body() const {
572
+ return List<Stmt>(subtree(2));
573
+ }
574
+ static For create(
575
+ const SourceRange& range,
576
+ const List<Expr>& targets,
577
+ const List<Expr>& itrs,
578
+ const List<Stmt>& body) {
579
+ return For(Compound::create(TK_FOR, range, {targets, itrs, body}));
580
+ }
581
+ };
582
+
583
+ // TODO: supports only single comprehension for now
584
+ struct ListComp : public Expr {
585
+ explicit ListComp(const TreeRef& tree) : Expr(tree) {
586
+ tree->match(TK_LIST_COMP);
587
+ }
588
+ Expr elt() const {
589
+ return Expr(subtree(0));
590
+ }
591
+ Expr target() const {
592
+ return Expr(subtree(1));
593
+ }
594
+ Expr iter() const {
595
+ return Expr(subtree(2));
596
+ }
597
+ // TODO: no ifs for now
598
+ static ListComp create(
599
+ const SourceRange& range,
600
+ const Expr& elt,
601
+ const Expr& target,
602
+ const Expr& iter) {
603
+ return ListComp(Compound::create(TK_LIST_COMP, range, {elt, target, iter}));
604
+ }
605
+ };
606
+
607
+ // TODO: supports only single comprehension for now
608
+ struct DictComp : public Expr {
609
+ explicit DictComp(const TreeRef& tree) : Expr(tree) {
610
+ tree->match(TK_DICT_COMP);
611
+ }
612
+ Expr key() const {
613
+ return Expr(subtree(0));
614
+ }
615
+ Expr value() const {
616
+ return Expr(subtree(1));
617
+ }
618
+ Expr target() const {
619
+ return Expr(subtree(2));
620
+ }
621
+ Expr iter() const {
622
+ return Expr(subtree(3));
623
+ }
624
+ // TODO: no ifs for now
625
+ static DictComp create(
626
+ const SourceRange& range,
627
+ const Expr& key,
628
+ const Expr& value,
629
+ const Expr& target,
630
+ const Expr& iter) {
631
+ return DictComp(
632
+ Compound::create(TK_DICT_COMP, range, {key, value, target, iter}));
633
+ }
634
+ };
635
+
636
+ struct Global : public Stmt {
637
+ explicit Global(const TreeRef& tree) : Stmt(tree) {
638
+ tree_->match(TK_GLOBAL);
639
+ }
640
+ List<Ident> names() {
641
+ return List<Ident>(subtree(0));
642
+ }
643
+ static Global create(const SourceRange& range, const List<Ident>& names) {
644
+ return Global(Compound::create(TK_GLOBAL, range, {names}));
645
+ }
646
+ };
647
+
648
+ struct AugAssignKind : public TreeView {
649
+ explicit AugAssignKind(const TreeRef& tree) : TreeView(tree) {
650
+ switch (tree->kind()) {
651
+ case '+':
652
+ case '-':
653
+ case '*':
654
+ case '/':
655
+ case '%':
656
+ case '|':
657
+ case '&':
658
+ case '^':
659
+ case TK_POW:
660
+ case TK_LSHIFT:
661
+ case TK_RSHIFT:
662
+ return;
663
+ default:
664
+ throw ErrorReport(tree) << "is not a valid AugAssignKind";
665
+ }
666
+ }
667
+ };
668
+
669
+ // Augmented assignment, like "foo += bar"
670
+ struct AugAssign : public Stmt {
671
+ explicit AugAssign(const TreeRef& tree) : Stmt(tree) {
672
+ tree_->match(TK_AUG_ASSIGN);
673
+ }
674
+ static AugAssign create(
675
+ const SourceRange& range,
676
+ const Expr& lhs,
677
+ const AugAssignKind& aug_op,
678
+ const Expr& rhs) {
679
+ return AugAssign(
680
+ Compound::create(TK_AUG_ASSIGN, range, {lhs, aug_op, rhs}));
681
+ }
682
+ Expr lhs() const {
683
+ return Expr(subtree(0));
684
+ }
685
+ int aug_op() const {
686
+ return subtree(1)->kind();
687
+ }
688
+ Expr rhs() const {
689
+ return Expr(subtree(2));
690
+ }
691
+ };
692
+
693
+ struct Assign : public Stmt {
694
+ explicit Assign(const TreeRef& tree) : Stmt(tree) {
695
+ tree_->match(TK_ASSIGN);
696
+ }
697
+ static Assign create(
698
+ const SourceRange& range,
699
+ const List<Expr>& lhs,
700
+ const Maybe<Expr>& rhs,
701
+ const Maybe<Expr>& type) {
702
+ return Assign(Compound::create(TK_ASSIGN, range, {lhs, rhs, type}));
703
+ }
704
+
705
+ List<Expr> lhs_list() const {
706
+ return List<Expr>(subtree(0));
707
+ }
708
+
709
+ Expr lhs() const {
710
+ const auto& li = lhs_list();
711
+ TORCH_INTERNAL_ASSERT(li.size() == 1);
712
+ return *li.begin();
713
+ }
714
+
715
+ Maybe<Expr> rhs() const {
716
+ return Maybe<Expr>(subtree(1));
717
+ }
718
+
719
+ Maybe<Expr> type() const {
720
+ return Maybe<Expr>(subtree(2));
721
+ }
722
+ };
723
+
724
+ struct Return : public Stmt {
725
+ explicit Return(const TreeRef& tree) : Stmt(tree) {
726
+ tree_->match(TK_RETURN);
727
+ }
728
+ Expr expr() const {
729
+ return Expr(subtree(0));
730
+ }
731
+ static Return create(const SourceRange& range, const Expr& value) {
732
+ return Return(Compound::create(TK_RETURN, range, {value}));
733
+ }
734
+ };
735
+
736
+ struct Raise : public Stmt {
737
+ explicit Raise(const TreeRef& tree) : Stmt(tree) {
738
+ tree_->match(TK_RAISE);
739
+ }
740
+ Expr expr() const {
741
+ return Expr(subtree(0));
742
+ }
743
+ static Raise create(const SourceRange& range, const Expr& expr) {
744
+ return Raise(Compound::create(TK_RAISE, range, {expr}));
745
+ }
746
+ };
747
+
748
+ struct Assert : public Stmt {
749
+ explicit Assert(const TreeRef& tree) : Stmt(tree) {
750
+ tree_->match(TK_ASSERT);
751
+ }
752
+ Expr test() const {
753
+ return Expr(subtree(0));
754
+ }
755
+ Maybe<Expr> msg() const {
756
+ return Maybe<Expr>(subtree(1));
757
+ }
758
+ static Assert create(
759
+ const SourceRange& range,
760
+ const Expr& test,
761
+ const Maybe<Expr>& msg) {
762
+ return Assert(Compound::create(TK_ASSERT, range, {test, msg}));
763
+ }
764
+ };
765
+
766
+ struct Pass : public Stmt {
767
+ explicit Pass(const TreeRef& tree) : Stmt(tree) {
768
+ tree_->match(TK_PASS);
769
+ }
770
+ static Pass create(const SourceRange& range) {
771
+ return Pass(Compound::create(TK_PASS, range, {}));
772
+ }
773
+ };
774
+
775
+ struct Dots : public Expr {
776
+ explicit Dots(const TreeRef& tree) : Expr(tree) {
777
+ tree_->match(TK_DOTS);
778
+ }
779
+ static Dots create(const SourceRange& range) {
780
+ return Dots(Compound::create(TK_DOTS, range, {}));
781
+ }
782
+ };
783
+
784
+ struct Break : public Stmt {
785
+ explicit Break(const TreeRef& tree) : Stmt(tree) {
786
+ tree_->match(TK_BREAK);
787
+ }
788
+ static Break create(const SourceRange& range) {
789
+ return Break(Compound::create(TK_BREAK, range, {}));
790
+ }
791
+ };
792
+
793
+ struct Continue : public Stmt {
794
+ explicit Continue(const TreeRef& tree) : Stmt(tree) {
795
+ tree_->match(TK_CONTINUE);
796
+ }
797
+ static Continue create(const SourceRange& range) {
798
+ return Continue(Compound::create(TK_CONTINUE, range, {}));
799
+ }
800
+ };
801
+
802
+ struct ExprStmt : public Stmt {
803
+ explicit ExprStmt(const TreeRef& tree) : Stmt(tree) {
804
+ tree_->match(TK_EXPR_STMT);
805
+ }
806
+ Expr expr() {
807
+ return Expr(subtree(0));
808
+ }
809
+ static ExprStmt create(const SourceRange& range, const Expr& list) {
810
+ return ExprStmt(Compound::create(TK_EXPR_STMT, range, {list}));
811
+ }
812
+ };
813
+
814
+ ////////////////////////////////////////////////////////////////////////////////
815
+ // Expressions
816
+ ////////////////////////////////////////////////////////////////////////////////
817
+
818
+ struct BinOp : public Expr {
819
+ explicit BinOp(const TreeRef& tree) : Expr(tree) {
820
+ switch (tree->kind()) {
821
+ case TK_AND:
822
+ case TK_OR:
823
+ case '<':
824
+ case '>':
825
+ case TK_IS:
826
+ case TK_ISNOT:
827
+ case TK_EQ:
828
+ case TK_LE:
829
+ case TK_GE:
830
+ case TK_NE:
831
+ case '+':
832
+ case '*':
833
+ case '/':
834
+ case '-':
835
+ case '@':
836
+ case TK_POW:
837
+ case TK_LSHIFT:
838
+ case TK_RSHIFT:
839
+ case '%':
840
+ case '&':
841
+ case '^':
842
+ case '|':
843
+ case TK_FLOOR_DIV:
844
+ case TK_IN:
845
+ if (tree->trees().size() != 2)
846
+ throw ErrorReport(tree)
847
+ << "BinOp expected 2 subtrees, found " << tree->trees().size();
848
+ return;
849
+ default:
850
+ throw ErrorReport(tree)
851
+ << kindToString(tree->kind()) << " is not a valid BinOp";
852
+ }
853
+ }
854
+ Expr lhs() const {
855
+ return Expr(subtree(0));
856
+ }
857
+ Expr rhs() const {
858
+ return Expr(subtree(1));
859
+ }
860
+ static BinOp create(
861
+ const SourceRange& range,
862
+ int kind,
863
+ const Expr& lhs,
864
+ const Expr& rhs) {
865
+ return BinOp(Compound::create(kind, range, {lhs, rhs}));
866
+ }
867
+ };
868
+
869
+ struct UnaryOp : public Expr {
870
+ explicit UnaryOp(const TreeRef& tree) : Expr(tree) {
871
+ switch (tree->kind()) {
872
+ case TK_UNARY_MINUS:
873
+ case '~':
874
+ case TK_NOT:
875
+ if (tree->trees().size() != 1)
876
+ throw ErrorReport(tree)
877
+ << "UnaryOp expected 1 subtree, found " << tree->trees().size();
878
+ return;
879
+ default:
880
+ throw ErrorReport(tree)
881
+ << kindToString(tree->kind()) << " is not a valid UnaryOp";
882
+ }
883
+ }
884
+ static UnaryOp create(const SourceRange& range, int kind, const Expr& expr) {
885
+ return UnaryOp(Compound::create(kind, range, {expr}));
886
+ }
887
+ };
888
+
889
+ struct Const : public Expr {
890
+ explicit Const(const TreeRef& tree) : Expr(tree) {
891
+ tree_->matchNumSubtrees(TK_CONST, 1);
892
+ }
893
+ bool isFloatingPoint() const {
894
+ if (isComplex())
895
+ return false;
896
+
897
+ bool is_inf = subtree(0)->stringValue() == "inf";
898
+ return is_inf ||
899
+ subtree(0)->stringValue().find_first_of(".eE") != std::string::npos;
900
+ }
901
+ bool isIntegral() const {
902
+ return !isFloatingPoint() && !isComplex();
903
+ }
904
+ bool isComplex() const {
905
+ return subtree(0)->stringValue().find_first_of('j') != std::string::npos;
906
+ }
907
+ int64_t asIntegral() const {
908
+ try {
909
+ // NOLINTNEXTLINE(modernize-use-nullptr)
910
+ return std::stoll(subtree(0)->stringValue(), /*__idx=*/0, /*base=*/0);
911
+ } catch (const std::out_of_range&) {
912
+ throw ErrorReport(range()) << "Integral constant out of range "
913
+ "(must fit in a signed 64 bit integer)";
914
+ }
915
+ }
916
+ double asFloatingPoint() const {
917
+ // We can't pass in nullptr as the dummy pointer gets dereferenced for
918
+ // Android version of strtod_c().
919
+ // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
920
+ char* dummy;
921
+ return torch::jit::strtod_c(subtree(0)->stringValue().c_str(), &dummy);
922
+ }
923
+ c10::complex<double> asComplex() const {
924
+ // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
925
+ char* dummy;
926
+ auto str = subtree(0)->stringValue();
927
+ // Complex numbers (a+bj, where a is non-zero) are parsed as an addition
928
+ // between float/int a and a complex number "bj". When a is 0, a complex
929
+ // number bj is created as above. So, while parsing the string, we don't
930
+ // have to worry about the real component of the complex number.
931
+ auto imag =
932
+ torch::jit::strtod_c(str.substr(0, str.size() - 1).c_str(), &dummy);
933
+ return c10::complex<double>(0, imag);
934
+ }
935
+ const std::string& text() const {
936
+ return subtree(0)->stringValue();
937
+ }
938
+ static Const create(const SourceRange& range, const std::string& value) {
939
+ return Const(Compound::create(TK_CONST, range, {String::create(value)}));
940
+ }
941
+ };
942
+
943
+ struct StringLiteral : public Expr {
944
+ explicit StringLiteral(const TreeRef& tree) : Expr(tree) {
945
+ tree_->matchNumSubtrees(TK_STRINGLITERAL, 1);
946
+ }
947
+ const std::string& text() const {
948
+ return subtree(0)->stringValue();
949
+ }
950
+ static StringLiteral create(
951
+ const SourceRange& range,
952
+ const std::string& value) {
953
+ return StringLiteral(
954
+ Compound::create(TK_STRINGLITERAL, range, {String::create(value)}));
955
+ }
956
+ };
957
+
958
+ struct Apply : public Expr {
959
+ explicit Apply(const TreeRef& tree) : Expr(tree) {
960
+ tree_->match(TK_APPLY);
961
+ }
962
+ Expr callee() const {
963
+ return Expr(subtree(0));
964
+ }
965
+ List<Expr> inputs() const {
966
+ return List<Expr>(subtree(1));
967
+ }
968
+ List<Attribute> attributes() const {
969
+ return List<Attribute>(subtree(2));
970
+ }
971
+ static Apply create(
972
+ const SourceRange& range,
973
+ const Expr& callee,
974
+ const List<Expr>& inputs,
975
+ const List<Attribute>& attributes) {
976
+ return Apply(
977
+ Compound::create(TK_APPLY, range, {callee, inputs, attributes}));
978
+ }
979
+ };
980
+
981
+ struct Select : public Expr {
982
+ explicit Select(const TreeRef& tree) : Expr(tree) {
983
+ tree_->match('.');
984
+ }
985
+ Expr value() const {
986
+ return Expr(subtree(0));
987
+ }
988
+ Ident selector() const {
989
+ return Ident(subtree(1));
990
+ }
991
+ static Select create(
992
+ const SourceRange& range,
993
+ const Expr& value,
994
+ const Ident& selector) {
995
+ return Select(Compound::create('.', range, {value, selector}));
996
+ }
997
+ };
998
+
999
+ struct SliceExpr : public Expr {
1000
+ explicit SliceExpr(const TreeRef& tree) : Expr(tree) {
1001
+ tree_->match(TK_SLICE_EXPR);
1002
+ }
1003
+ Maybe<Expr> start() const {
1004
+ return Maybe<Expr>(subtree(0));
1005
+ }
1006
+ Maybe<Expr> end() const {
1007
+ return Maybe<Expr>(subtree(1));
1008
+ }
1009
+ Maybe<Expr> step() const {
1010
+ return Maybe<Expr>(subtree(2));
1011
+ }
1012
+ Expr startOr(int64_t alternative) const {
1013
+ const auto startOption = start();
1014
+ return startOption.present() ? startOption.get() : createInt(alternative);
1015
+ }
1016
+ Expr endOr(int64_t alternative) const {
1017
+ const auto endOption = end();
1018
+ return endOption.present() ? endOption.get() : createInt(alternative);
1019
+ }
1020
+ Expr stepOr(int64_t alternative) const {
1021
+ const auto stepOption = step();
1022
+ return stepOption.present() ? stepOption.get() : createInt(alternative);
1023
+ }
1024
+ static SliceExpr create(
1025
+ const SourceRange& range,
1026
+ const Maybe<Expr>& start,
1027
+ const Maybe<Expr>& end,
1028
+ const Maybe<Expr>& step) {
1029
+ return SliceExpr(
1030
+ Compound::create(TK_SLICE_EXPR, range, {start, end, step}));
1031
+ }
1032
+
1033
+ private:
1034
+ Expr createInt(int64_t value) const {
1035
+ return Expr(Const::create(range(), c10::to_string(value)));
1036
+ }
1037
+ };
1038
+
1039
+ struct Subscript : public Expr {
1040
+ explicit Subscript(const TreeRef& tree) : Expr(tree) {
1041
+ tree_->match(TK_SUBSCRIPT);
1042
+ }
1043
+ Expr value() const {
1044
+ return Expr(subtree(0));
1045
+ }
1046
+ List<Expr> subscript_exprs() const {
1047
+ return List<Expr>(subtree(1));
1048
+ }
1049
+ static Subscript create(
1050
+ const SourceRange& range,
1051
+ const Expr& value,
1052
+ const List<Expr>& subscript_exprs) {
1053
+ auto whole_range = SourceRange(
1054
+ range.source(), range.start(), subscript_exprs.range().end() + 1);
1055
+ return Subscript(
1056
+ Compound::create(TK_SUBSCRIPT, whole_range, {value, subscript_exprs}));
1057
+ }
1058
+ };
1059
+
1060
+ struct Var : public Expr {
1061
+ explicit Var(const TreeRef& tree) : Expr(tree) {
1062
+ tree_->match(TK_VAR);
1063
+ };
1064
+ Ident name() const {
1065
+ return Ident(subtree(0));
1066
+ }
1067
+ static Var create(const SourceRange& range, const Ident& name) {
1068
+ return Var(Compound::create(TK_VAR, range, {name}));
1069
+ }
1070
+ };
1071
+
1072
+ // WithItem represents an item using with a WithStmt.
1073
+ struct WithItem : public Expr {
1074
+ explicit WithItem(const TreeRef& tree) : Expr(tree) {
1075
+ tree_->match(TK_WITH_ITEM);
1076
+ }
1077
+
1078
+ Expr target() const {
1079
+ return Expr(subtree(0));
1080
+ }
1081
+
1082
+ Maybe<Var> var() const {
1083
+ return Maybe<Var>(subtree(1));
1084
+ }
1085
+
1086
+ static WithItem create(
1087
+ const SourceRange& range,
1088
+ const Expr& target,
1089
+ const Maybe<Var>& var) {
1090
+ return WithItem(Compound::create(TK_WITH_ITEM, range, {target, var}));
1091
+ }
1092
+ };
1093
+
1094
+ // With represents a with statement consisting of a list of with items and a
1095
+ // body of statements.
1096
+ struct With : public Stmt {
1097
+ explicit With(const TreeRef& tree) : Stmt(tree) {
1098
+ tree_->match(TK_WITH);
1099
+ }
1100
+
1101
+ List<WithItem> targets() const {
1102
+ return List<WithItem>(subtree(0));
1103
+ }
1104
+
1105
+ List<Stmt> body() const {
1106
+ return List<Stmt>(subtree(1));
1107
+ }
1108
+
1109
+ static With create(
1110
+ const SourceRange& range,
1111
+ const List<WithItem>& targets,
1112
+ const List<Stmt>& body) {
1113
+ return With(Compound::create(TK_WITH, range, {targets, body}));
1114
+ }
1115
+ };
1116
+
1117
+ struct TernaryIf : public Expr {
1118
+ explicit TernaryIf(const TreeRef& tree) : Expr(tree) {
1119
+ tree_->matchNumSubtrees(TK_IF_EXPR, 3);
1120
+ };
1121
+ Expr cond() const {
1122
+ return Expr(subtree(0));
1123
+ }
1124
+ Expr true_expr() const {
1125
+ return Expr(subtree(1));
1126
+ }
1127
+ Expr false_expr() const {
1128
+ return Expr(subtree(2));
1129
+ }
1130
+ static TernaryIf create(
1131
+ const SourceRange& range,
1132
+ const Expr& cond,
1133
+ const Expr& true_expr,
1134
+ const Expr& false_expr) {
1135
+ return TernaryIf(
1136
+ Compound::create(TK_IF_EXPR, range, {cond, true_expr, false_expr}));
1137
+ };
1138
+ };
1139
+
1140
+ struct ListLiteral : public Expr {
1141
+ explicit ListLiteral(const TreeRef& tree) : Expr(tree) {
1142
+ tree_->match(TK_LIST_LITERAL);
1143
+ }
1144
+ List<Expr> inputs() const {
1145
+ return subtree(0);
1146
+ }
1147
+ static ListLiteral create(
1148
+ const SourceRange& range,
1149
+ const List<Expr>& inputs) {
1150
+ return ListLiteral(Compound::create(TK_LIST_LITERAL, range, {inputs}));
1151
+ }
1152
+ };
1153
+
1154
+ struct TupleLiteral : public Expr {
1155
+ explicit TupleLiteral(const TreeRef& tree) : Expr(tree) {
1156
+ tree_->match(TK_TUPLE_LITERAL);
1157
+ }
1158
+ List<Expr> inputs() const {
1159
+ return subtree(0);
1160
+ }
1161
+ static TupleLiteral create(
1162
+ const SourceRange& range,
1163
+ const List<Expr>& inputs) {
1164
+ return TupleLiteral(Compound::create(TK_TUPLE_LITERAL, range, {inputs}));
1165
+ }
1166
+ };
1167
+
1168
+ struct DictLiteral : public Expr {
1169
+ explicit DictLiteral(const TreeRef& tree) : Expr(tree) {
1170
+ tree_->match(TK_DICT_LITERAL);
1171
+ }
1172
+ List<Expr> key_inputs() const {
1173
+ return subtree(0);
1174
+ }
1175
+ List<Expr> value_inputs() const {
1176
+ return subtree(1);
1177
+ }
1178
+ static DictLiteral create(
1179
+ const SourceRange& range,
1180
+ const List<Expr>& keys,
1181
+ const List<Expr>& values) {
1182
+ return DictLiteral(
1183
+ Compound::create(TK_DICT_LITERAL, range, {keys, values}));
1184
+ }
1185
+ };
1186
+
1187
+ struct Starred : public Expr {
1188
+ explicit Starred(const TreeRef& tree) : Expr(tree) {
1189
+ tree_->match(TK_STARRED);
1190
+ }
1191
+ Expr expr() const {
1192
+ return Expr(subtree(0));
1193
+ }
1194
+ static Starred create(const SourceRange& range, const Expr& expr) {
1195
+ return Starred(Compound::create(TK_STARRED, range, {expr}));
1196
+ }
1197
+ };
1198
+
1199
+ struct Delete : public Stmt {
1200
+ explicit Delete(const TreeRef& tree) : Stmt(tree) {
1201
+ tree_->match(TK_DELETE);
1202
+ }
1203
+ List<Expr> targets() const {
1204
+ return subtree(0);
1205
+ }
1206
+ static Delete create(const SourceRange& range, const List<Expr>& targets) {
1207
+ return Delete(Compound::create(TK_DELETE, range, {targets}));
1208
+ }
1209
+ };
1210
+
1211
+ /*
1212
+ * NOTE: transforming PEP 604 union into equivalent union type
1213
+ *
1214
+ * NOTE: Union[int, float] parses into:
1215
+ * <EXPR> expr:(subscript
1216
+ * (variable (ident Union))
1217
+ * (list
1218
+ * (variable (ident int))
1219
+ * (variable (ident float))))
1220
+ * <KIND> subscript
1221
+ *
1222
+ * NOTE: (int | float) parses into:
1223
+ * <EXPR> expr:(|
1224
+ * (variable (ident int))
1225
+ * (variable (ident float)))
1226
+ * <KIND> |
1227
+ */
1228
+
1229
+ inline void _flatten_pep604_union(
1230
+ const torch::jit::Expr& node,
1231
+ std::vector<torch::jit::Expr>* result) {
1232
+ // flatten possibly nested union expressions like (int | (float | str))
1233
+ // into a flat list of expressions like [int, float, str]
1234
+ if (node.kind() == '|') {
1235
+ auto as_binop = torch::jit::BinOp(node);
1236
+ _flatten_pep604_union(as_binop.lhs(), result);
1237
+ _flatten_pep604_union(as_binop.rhs(), result);
1238
+ } else {
1239
+ result->push_back(node);
1240
+ }
1241
+ }
1242
+
1243
+ inline std::vector<Expr> get_pep604_union_members(const Expr& node) {
1244
+ std::vector<Expr> result;
1245
+ _flatten_pep604_union(node, &result);
1246
+ return result;
1247
+ }
1248
+
1249
+ // Flattens a PEP 604 union into a classical union.
1250
+ // For example, ((x | y) | z) is transformed into Union[x, y, z].
1251
+ inline Expr pep604union_to_union(const Expr& expr) {
1252
+ // noop if not a pep604 union
1253
+ if (expr.kind() != '|')
1254
+ return expr;
1255
+
1256
+ // In order to support unions with more than 2 operands ((x|y)|z), we need to
1257
+ // recursively flatten the tree of | expressions.
1258
+ auto members = get_pep604_union_members(expr);
1259
+ auto synthesised_union = Subscript::create(
1260
+ expr.range(),
1261
+ Var::create(expr.range(), Ident::create(expr.range(), "Union")),
1262
+ List<Expr>::create(expr.range(), members));
1263
+ return std::move(synthesised_union);
1264
+ }
1265
+
1266
+ } // namespace jit
1267
+ } // namespace torch
1268
+
1269
+ namespace std {
1270
+
1271
+ template <typename T>
1272
+ struct iterator_traits<torch::jit::ListIterator<T>>
1273
+ : std::iterator_traits<torch::jit::TreeList::const_iterator> {};
1274
+
1275
+ } // namespace std
falcon/lib/python3.10/site-packages/torch/include/torch/csrc/jit/ir/constants.h ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <ATen/core/ivalue.h>
3
+ #include <ATen/core/jit_type.h>
4
+ #include <torch/csrc/Export.h>
5
+ #include <torch/csrc/jit/frontend/source_range.h>
6
+ #include <torch/csrc/jit/ir/scope.h>
7
+
8
+ // helpers for handling constants in the IR
9
+ // - create constant nodes from ints, floats, complex, intlist, Tensors, and
10
+ // other types
11
+ // - implement primitive constant ops.
12
+ namespace torch {
13
+ namespace jit {
14
+
15
+ using ::c10::IValue;
16
+
17
+ struct Graph;
18
+ struct Value;
19
+
20
+ // thrown when insertConstant cannot encode the IValue into a graph
21
+ struct TORCH_API constant_not_supported_error : public std::runtime_error {
22
+ using runtime_error::runtime_error;
23
+ };
24
+
25
+ TORCH_API Value* insertConstant(
26
+ Graph& g,
27
+ const IValue& val,
28
+ c10::optional<SourceRange> loc = c10::nullopt,
29
+ c10::optional<ScopePtr> scope = c10::nullopt);
30
+
31
+ // note: prefer g.insertConsant(val, loc) which does exactly the same thing
32
+ // this function is only declared/defined here because its implementation is
33
+ // closely related to the implementation of prim::Constant that is also in
34
+ // constants.cpp.
35
+ //
36
+ // returns a c10::nullopt if the IValue kind cannot be inserted as a constant
37
+ TORCH_API c10::optional<Value*> tryInsertConstant(
38
+ Graph& g,
39
+ const IValue& val,
40
+ c10::optional<SourceRange> loc = c10::nullopt,
41
+ c10::optional<ScopePtr> scope = c10::nullopt);
42
+
43
+ ////////////////////////////////////////////////////////////////////////////////
44
+ // Helper for retrieving constants
45
+ ////////////////////////////////////////////////////////////////////////////////
46
+
47
+ // attempt to convert a (possibly constant) Value* into an interpreter value
48
+ // (IValue). returns c10::nullopt if the Value* was not constant
49
+ TORCH_API c10::optional<IValue> toIValue(const Value* v);
50
+
51
+ // if a value is a constant then try to turn into type T using the
52
+ // same rules as the interpreter
53
+ template <typename T>
54
+ c10::optional<T> constant_as(const Value* v) {
55
+ if (auto ivalue = toIValue(v)) {
56
+ return ivalue->to<T>();
57
+ }
58
+ return c10::nullopt;
59
+ }
60
+ } // namespace jit
61
+ } // namespace torch
falcon/lib/python3.10/site-packages/torch/include/torch/csrc/jit/ir/graph_utils.h ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ #include <vector>
6
+
7
+ namespace torch {
8
+ namespace jit {
9
+
10
+ TORCH_API TypePtr getTensorType(const at::Tensor& t, bool complete);
11
+
12
+ TORCH_API TypePtr inferShapeAndTypeForInput(
13
+ TypePtr input_type,
14
+ Stack::const_iterator& s_iter,
15
+ const Stack::const_iterator& s_iter_end,
16
+ bool complete);
17
+
18
+ TORCH_API void setInputTensorTypes(
19
+ Graph& g,
20
+ const Stack& stack,
21
+ bool complete,
22
+ const std::vector<int>& param_count_list = {});
23
+
24
+ } // namespace jit
25
+ } // namespace torch
falcon/lib/python3.10/site-packages/torch/include/torch/csrc/jit/ir/ir.h ADDED
@@ -0,0 +1,1841 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/attributes.h>
4
+ #include <torch/csrc/jit/ir/graph_node_list.h>
5
+ #include <torch/csrc/jit/ir/named_value.h>
6
+ #include <torch/csrc/jit/ir/scope.h>
7
+ #include <torch/csrc/jit/runtime/operator.h>
8
+
9
+ #include <torch/csrc/Export.h>
10
+ #include <torch/csrc/utils/python_stub.h>
11
+ #include <torch/csrc/utils/schema_info.h>
12
+
13
+ #include <ATen/Utils.h>
14
+ #include <ATen/core/Tensor.h>
15
+ #include <ATen/core/dynamic_type.h>
16
+ #include <ATen/core/enum_type.h>
17
+ #include <ATen/core/functional.h>
18
+ #include <ATen/core/interned_strings.h>
19
+ #include <ATen/core/ivalue.h>
20
+ #include <ATen/core/jit_type.h>
21
+ #include <c10/util/ArrayRef.h>
22
+ #include <c10/util/Exception.h>
23
+ #include <c10/util/Optional.h>
24
+
25
+ #include <functional>
26
+ #include <iosfwd>
27
+ #include <unordered_set>
28
+ #include <vector>
29
+
30
+ // Forward declare, the real meat is in python_ir.cpp
31
+ template <class T>
32
+ class THPPointer;
33
+ using THPObjectPtr = THPPointer<PyObject>;
34
+ using pyobj_list = std::vector<THPObjectPtr>;
35
+
36
+ namespace torch {
37
+ namespace jit {
38
+ namespace utils {
39
+ TORCH_API std::string getNodesModuleHierarchy(const Node& n);
40
+ } // namespace utils
41
+ class AliasDb;
42
+
43
+ using ::c10::Argument;
44
+ using ::c10::FunctionSchema;
45
+ using ::c10::Symbol;
46
+
47
+ using ::c10::ivalue::Shared;
48
+
49
+ using ::c10::IValue;
50
+ using ::c10::ivalue::Future;
51
+
52
+ using ::c10::ivalue::ConstantString;
53
+
54
+ #define C10_USING(T) using ::c10::T;
55
+ C10_FORALL_TYPES(C10_USING)
56
+ #undef C10_USING
57
+
58
+ #define C10_USING(T) using ::c10::T##Ptr;
59
+ C10_FORALL_TYPES(C10_USING)
60
+ #undef C10_USING
61
+
62
+ using ::c10::Type;
63
+ using ::c10::TypeEnv;
64
+ using ::c10::TypePtr;
65
+
66
+ using ::c10::getTypePtr;
67
+ using ::c10::MatchTypeReturn;
68
+ using ::c10::TypeKind;
69
+
70
+ using ::c10::fmap;
71
+
72
+ namespace prim {
73
+ using namespace ::c10::prim;
74
+ }
75
+ namespace attr {
76
+ using namespace ::c10::attr;
77
+ }
78
+ namespace aten {
79
+ using namespace ::c10::aten;
80
+ }
81
+ namespace cuda {
82
+ #if !defined(USE_ROCM)
83
+ using namespace ::c10::cuda;
84
+ #endif
85
+ } // namespace cuda
86
+
87
+ struct Function;
88
+ struct GraphFunction;
89
+ struct MatchedSchema;
90
+
91
+ // A Graph represents one "function" of computation.
92
+ // It uses a simple ownership model where the graph owns all the nodes inside
93
+ // it. All references inside the graph are raw pointers. Destroying the Graph
94
+ // will invalidate any pointers to nodes in the graph.
95
+ struct Graph;
96
+
97
+ // Node is the base class of the IR graph. It represents one computation
98
+ // and dependencies on a list of Values. The "prim-ops", so to speak.
99
+ struct Node;
100
+
101
+ // A Value represents an input or output to node that is either a
102
+ // Tensor or an opaque Handle object, as determined by type().
103
+ struct Value;
104
+
105
+ TORCH_API std::ostream& operator<<(std::ostream& out, const Graph& g);
106
+ TORCH_API std::ostream& operator<<(std::ostream& out, const Node& n);
107
+
108
+ // A list of nodes, with inputs and outputs
109
+ struct Block;
110
+
111
+ // Each use is represented by this type, see 'Node::uses()'
112
+ // 'user' is the consumer of the value, 'offset' is the index into
113
+ // 'user's input this where the producers will be found.
114
+ struct Use {
115
+ Use(Node* user, size_t offset) : user(user), offset(offset) {}
116
+ Node* user;
117
+ size_t offset;
118
+
119
+ bool operator==(const Use& b) {
120
+ return user == b.user && offset == b.offset;
121
+ }
122
+ };
123
+
124
+ // Note [User node does not uniquely identify use]
125
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
126
+ // A while back, we wrote some code manipulating uses that looked like this:
127
+ //
128
+ // for (auto& use : used_val->uses_) {
129
+ // if (use.user == this_node) {
130
+ // use.offset += 1;
131
+ // break;
132
+ // }
133
+ // }
134
+ //
135
+ // This code is trying to find a particular use (our node's use) to update it.
136
+ // However, it's wrong: there may be *multiple* uses of a value %x in a node,
137
+ // as might be the case in this IR:
138
+ //
139
+ // %y = Add %x %x
140
+ //
141
+ // In this case, there are two uses of %x whose user is the node 'Add %x %x'.
142
+ // So, "use induced by this node" is not a well-formed concept.
143
+ //
144
+ // If you are looking for "use induced by an input", it's best to use
145
+ // findUseForInput() to get it.
146
+
147
+ // the list types are intentionally simple, but we type-def
148
+ // them here so if we need to change them, refactoring will be easier
149
+ using node_list = std::vector<Node*>;
150
+ using value_list = std::vector<Value*>;
151
+ using use_list = std::vector<Use>;
152
+ template <typename T>
153
+ using ArrayRef = at::ArrayRef<T>;
154
+ using NodeKind = Symbol;
155
+ using topo_position_t = int64_t;
156
+ using ValueSet = std::unordered_set<const Value*>;
157
+
158
+ struct OperatorSet;
159
+ template <typename T>
160
+ struct OperatorMap;
161
+
162
+ // This is a wrapper to allow invalidating the Python object
163
+ // safely when the C++ object for a Node/Value/Block is deleted
164
+ // like much of graph, it isn't safe for different threads to
165
+ // access the same graph
166
+ template <typename T>
167
+ struct Wrap {
168
+ explicit Wrap(T* p) : elem(p), clear_cb(nullptr) {}
169
+ void clear() {
170
+ if (clear_cb) {
171
+ clear_cb(elem);
172
+ }
173
+ elem = nullptr;
174
+ }
175
+ T* elem;
176
+ void (*clear_cb)(void*);
177
+ };
178
+
179
+ struct Value {
180
+ AT_DISALLOW_COPY_AND_ASSIGN(Value);
181
+ Value(Node* node_, size_t offset_);
182
+
183
+ private:
184
+ friend struct Node;
185
+ friend struct Graph;
186
+ Node* node_;
187
+ size_t offset_;
188
+ size_t unique_ = 0; // unique id
189
+ use_list uses_;
190
+ std::string unique_name_;
191
+ TypePtr type_;
192
+ // a managing wrapper for Python to allow invalidation
193
+ std::shared_ptr<Wrap<Value>> wrap_;
194
+
195
+ public:
196
+ Value* setType(TypePtr type);
197
+ TORCH_API void inferTypeFrom(const at::Tensor& output);
198
+ TORCH_API void inferTypeFrom(
199
+ const c10::intrusive_ptr<c10::ivalue::Object>& output);
200
+ const TypePtr& type() const {
201
+ AT_ASSERT(type_ != nullptr);
202
+ return type_;
203
+ }
204
+ bool requires_grad() const {
205
+ return type()->requires_grad();
206
+ }
207
+ bool isCompleteTensor() const {
208
+ if (auto pt = type()->cast<TensorType>()) {
209
+ return pt->isComplete();
210
+ }
211
+ return false;
212
+ }
213
+ TORCH_API bool mustBeNone() const;
214
+ TORCH_API bool mustNotBeNone() const;
215
+ size_t unique() const {
216
+ return unique_;
217
+ }
218
+ bool hasDebugName() const {
219
+ return !unique_name_.empty();
220
+ }
221
+ static bool isValidName(const std::string& name);
222
+ TORCH_API Value* setDebugName(const std::string& name);
223
+ std::string debugName() const {
224
+ if (hasDebugName()) {
225
+ return unique_name_;
226
+ }
227
+ return c10::to_string(unique());
228
+ }
229
+ TORCH_API std::string debugNameBase() const;
230
+ Node* node() {
231
+ return node_;
232
+ }
233
+ size_t offset() const {
234
+ return offset_;
235
+ }
236
+ void setOffset(size_t offset) {
237
+ offset_ = offset;
238
+ }
239
+ const Node* node() const {
240
+ return node_;
241
+ }
242
+
243
+ /**
244
+ * @warning NEVER pass raw pointer of smart pointer managed Graph to Python.
245
+ * Check #87343 for details.
246
+ */
247
+ Graph* owningGraph();
248
+ const Graph* owningGraph() const;
249
+ // TODO: make this more const correct
250
+ const use_list& uses() const {
251
+ return uses_;
252
+ }
253
+
254
+ bool hasUses() const {
255
+ return !uses().empty();
256
+ }
257
+
258
+ TORCH_API void replaceFirstUseWith(Value* newValue);
259
+
260
+ // Replaces all uses of this value with 'newValue'.
261
+ //
262
+ // Given: %3 = f(%1, %2)
263
+ // %4 = g(%3)
264
+ // %5 = h(%3, %3)
265
+ // Execute: %3.replaceAllUsesWith(%6)
266
+ // Result: %3 = f(%1, %2)
267
+ // %4 = g(%6)
268
+ // %5 = h(%6, %6)
269
+ TORCH_API void replaceAllUsesWith(Value* newValue);
270
+
271
+ // Replaces all uses of this value with 'newValue' after 'node'.
272
+ // Given: %3 = f(%1, %2)
273
+ // %4 = g(%3)
274
+ // %5 = inplace_(%3)
275
+ // %6 = h(%3, %3)
276
+ // Execute: %3.replaceAllUsesAfterNodeWith(%5.node(), %5)
277
+ // Result: %3 = f(%1, %2)
278
+ // %4 = g(%3)
279
+ // %5 = inplace_(%3)
280
+ // %6 = h(%5, %5)
281
+ // XXX: does not check scoping legality, consider using
282
+ // replaceAllUsesDominatedByNodeWith
283
+ TORCH_API void replaceAllUsesAfterNodeWith(const Node* node, Value* newValue);
284
+
285
+ // Replaces all uses of this value with 'newValue' that are dominated by
286
+ // 'node'. Given:
287
+ // x = op(...).
288
+ // if cond:
289
+ // z = foo(..)
290
+ // bar(x)
291
+ // else:
292
+ // print(x)
293
+ // x.replaceAllUsesDominatedByNodeWith(foo, z) would replace bar(x)
294
+ // but not print(x) because print is not dominated by foo.
295
+ // replaceAllUsesAfterNode does not check domination, so in this example
296
+ // it would produce invalid IR.
297
+ TORCH_API void replaceAllUsesDominatedByNodeWith(
298
+ const Node* node,
299
+ Value* newValue);
300
+
301
+ TORCH_API Value* copyMetadata(Value* from);
302
+
303
+ TORCH_API std::shared_ptr<Wrap<Value>> wrap() {
304
+ if (!wrap_) {
305
+ wrap_ = std::make_shared<Wrap<Value>>(this);
306
+ }
307
+ return wrap_;
308
+ }
309
+
310
+ virtual ~Value() {
311
+ if (wrap_) {
312
+ wrap_->clear();
313
+ }
314
+ }
315
+ };
316
+
317
+ struct TORCH_API Node {
318
+ AT_DISALLOW_COPY_AND_ASSIGN(Node);
319
+ friend struct Graph;
320
+ friend struct Block;
321
+ friend struct Value;
322
+ friend graph_node_list;
323
+ friend const_graph_node_list;
324
+ friend graph_node_list_iterator;
325
+ friend const_graph_node_list_iterator;
326
+
327
+ private:
328
+ const NodeKind kind_;
329
+ std::vector<Value*> inputs_;
330
+ std::vector<Value*> outputs_;
331
+ // subblocks
332
+ std::vector<Block*> blocks_;
333
+ Graph* graph_;
334
+ Block* owning_block_;
335
+ c10::optional<SourceRange> source_range_;
336
+ ScopePtr scope_;
337
+ c10::optional<InlinedCallStackPtr> callstack_;
338
+ // Assumes FunctionSchemas are persistent, so we don't manage their lifetime.
339
+ // This field is effective a cache that's populated on attribute lookups and
340
+ // invalidated every time we perform an operation that could potentially
341
+ // change the schema. note: mutable because schema_ is effectively a cache
342
+ mutable const Operator* op_;
343
+ topo_position_t topo_position_ = 0;
344
+ // a managing wrapper for Python to allow invalidation
345
+ std::shared_ptr<Wrap<Node>> wrap_;
346
+ // Stores the full schema name, if the operator is historic
347
+ // When the operator is deprecated or the name of the operator
348
+ // is changed, we need to rely on this name
349
+ // to retrieve old schemas to successfully apply upgraders
350
+ // for this operator.
351
+ c10::optional<std::string> historic_schema_name_ = c10::nullopt;
352
+
353
+ protected:
354
+ Node(Graph* graph_, NodeKind kind_); // defined after graph
355
+ public:
356
+ // Each Node but Return/Param Nodes are associated with exactly one
357
+ // place in the Node list of the Graph. The Graph itself is a circular
358
+ // doubly-linked list. The Return Node is used as the sentinel for the
359
+ // "beginning"/"end" of the list. This means that you can tell when
360
+ // you've traversed the entire list without means worrying about null
361
+ // pointers. `next_in_graph[0]` is the pointer to the next Node, while
362
+ // `next_in_graph[1]` is the pointer to the previous Node. The
363
+ // linked list is implemented as an array to allow the same iterator
364
+ // class for forward and reversed Node lists. Taken together, this
365
+ // list also represents a topological sort of the Nodes in the Graph.
366
+ // NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,cppcoreguidelines-non-private-member-variables-in-classes,modernize-avoid-c-arrays)
367
+ Node* next_in_graph[2] = {nullptr, nullptr};
368
+
369
+ std::shared_ptr<Wrap<Node>> wrap() {
370
+ if (!wrap_) {
371
+ wrap_ = std::make_shared<Wrap<Node>>(this);
372
+ }
373
+ return wrap_;
374
+ }
375
+
376
+ const c10::optional<std::string> getHistoricSchemaName() {
377
+ return historic_schema_name_;
378
+ }
379
+
380
+ void setHistoricSchemaName(const std::string& name) {
381
+ historic_schema_name_ = name;
382
+ }
383
+
384
+ Node*& next() {
385
+ return next_in_graph[kNextDirection];
386
+ }
387
+ Node*& prev() {
388
+ return next_in_graph[kPrevDirection];
389
+ }
390
+ Node* const& next() const {
391
+ return next_in_graph[kNextDirection];
392
+ }
393
+ Node* const& prev() const {
394
+ return next_in_graph[kPrevDirection];
395
+ }
396
+
397
+ NodeKind kind() const {
398
+ return kind_;
399
+ }
400
+ Node* setSourceRange(SourceRange r) {
401
+ source_range_ = std::move(r);
402
+ return this;
403
+ }
404
+ SourceRange sourceRange() const;
405
+
406
+ /**
407
+ * @warning NEVER pass raw pointer of smart pointer managed Graph to Python.
408
+ * Check #87343 for details.
409
+ */
410
+ Graph* owningGraph() {
411
+ return graph_;
412
+ }
413
+ const Graph* owningGraph() const {
414
+ return graph_;
415
+ }
416
+ Block* owningBlock() {
417
+ return owning_block_;
418
+ }
419
+ const Block* owningBlock() const {
420
+ return owning_block_;
421
+ }
422
+ ScopePtr scope() {
423
+ return scope_;
424
+ }
425
+ void setScope(ScopePtr scope) {
426
+ scope_ = std::move(scope);
427
+ }
428
+ std::string scopeName() const {
429
+ if (!scope_) {
430
+ return "";
431
+ }
432
+ return scope_->namesFromRoot();
433
+ }
434
+
435
+ // Copies the source range, scope and callstack from another node.
436
+ Node* copyMetadata(Node* from) {
437
+ this->setSourceRange(from->sourceRange());
438
+ this->setScope(from->scope());
439
+ if (auto cs = from->callstack()) {
440
+ this->setCallStack(*cs);
441
+ }
442
+ return this;
443
+ }
444
+
445
+ c10::optional<InlinedCallStackPtr> callstack() const {
446
+ return callstack_;
447
+ }
448
+ void setCallStack(InlinedCallStackPtr cs) {
449
+ callstack_ = std::move(cs);
450
+ }
451
+
452
+ // NB: This returns an ArrayRef; that means that it will
453
+ // get invalidated if you resize inputs (e.g., using addInput)
454
+ // We can't return a std::vector<Node*>& because there's no
455
+ // way to soundly cast to std::vector<const Node*> (an insane
456
+ // implementation of std::vector could make this representationally
457
+ // different.)
458
+ at::ArrayRef<Value*> inputs() {
459
+ return inputs_;
460
+ }
461
+ at::ArrayRef<const Value*> inputs() const {
462
+ // Vectors are not convertible in const-ness of elements, but
463
+ // raw pointers are.
464
+ return {inputs_.data(), inputs_.size()};
465
+ }
466
+ // NB: This returns an ArrayRef; that means that it will
467
+ // get invalidated if you resize inputs (e.g., using addInput)
468
+ // We can't return a std::vector<Node*>& because there's no
469
+ // way to soundly cast to std::vector<const Node*> (an insane
470
+ // implementation of std::vector could make this representationally
471
+ // different.)
472
+ at::ArrayRef<Value*> outputs() {
473
+ return outputs_;
474
+ }
475
+ at::ArrayRef<const Value*> outputs() const {
476
+ // Vectors are not convertible in const-ness of elements, but
477
+ // raw pointers are.
478
+ return {outputs_.data(), outputs_.size()};
479
+ }
480
+ Value* output(size_t i) const {
481
+ return outputs_.at(i);
482
+ }
483
+ bool hasUses() const {
484
+ for (auto o : outputs()) {
485
+ if (!o->uses().empty()) {
486
+ return true;
487
+ }
488
+ }
489
+ return false;
490
+ }
491
+
492
+ void replaceAllUsesWith(Node* n);
493
+
494
+ // replaces `this` with a new node with the same inputs and outputs
495
+ // but a new node symbol. does not destroy `this`
496
+ Node* replaceWithNewSymbol(Symbol new_symbol);
497
+
498
+ // Checks if this node is dominated by `dominator` which means that
499
+ // `dominator` will always be executed before `this` and `dominator`
500
+ // is in scope of `this.
501
+ bool isDominatedBy(const Node* dominator) const;
502
+
503
+ // lots of things like chunk have a single input or single output, so we have
504
+ // a helper to make accessing it easier
505
+ Value* input() {
506
+ AT_ASSERT(inputs_.size() == 1);
507
+ return inputs_.at(0);
508
+ }
509
+ Value* output() {
510
+ AT_ASSERT(outputs_.size() == 1);
511
+ return outputs_.at(0);
512
+ }
513
+ const Value* output() const {
514
+ AT_ASSERT(outputs_.size() == 1);
515
+ return outputs_.at(0);
516
+ }
517
+ const Value* input() const {
518
+ AT_ASSERT(inputs_.size() == 1);
519
+ return inputs_.at(0);
520
+ }
521
+ // Access a particular input. This is a checked index.
522
+ Value* input(size_t i) const {
523
+ return inputs_.at(i);
524
+ }
525
+
526
+ bool hasNamedInput(const std::string& unqualName) const;
527
+ Value* namedInput(const std::string& unqualName) const;
528
+ Value* namedInput(Symbol name) const;
529
+
530
+ c10::optional<IValue> get(Symbol name) const;
531
+
532
+ template <typename T>
533
+ c10::optional<T> get(Symbol name) const {
534
+ if (auto v = get(name)) {
535
+ return v->template to<T>();
536
+ }
537
+ return c10::nullopt;
538
+ }
539
+
540
+ // Returns true if the value of input name is statically known
541
+ bool is_constant(Symbol name) const {
542
+ return static_cast<bool>(get(name));
543
+ }
544
+ bool mustBeNone() const;
545
+
546
+ bool isNondeterministic() const;
547
+ bool hasSideEffects() const;
548
+
549
+ // instructions lowered by the interpreter and not run in the optimized graph
550
+ bool notExecutedOp() const {
551
+ return kind_ == prim::Constant || kind_ == prim::profile ||
552
+ kind_ == prim::profile_ivalue;
553
+ }
554
+
555
+ // Graphs
556
+
557
+ // Note [Topological invariant]
558
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
559
+ // We always maintain an up-to-date topological ordering of all nodes via
560
+ // the next()/prev() links. All transformations to graphs must preserve
561
+ // this topological ordering: for example, it is only valid to 'addInput'
562
+ // with an input which is topologically before the current node.
563
+ //
564
+ // Usually, it is obvious whether or not topological order is maintained;
565
+ // for example, if you are adding nodes to the end of the topsort, it's
566
+ // impossible for them to refer to inputs that are not in the topsort.
567
+ // If it is not obvious, please comment accordingly.
568
+
569
+ // Add 'node' as an input to 'this' at the end of existing
570
+ // arguments. Returns the added node for ease of chaining.
571
+ //
572
+ // Given: %3 = f(%1, %2)
573
+ // Execute: %3.addInput(%4)
574
+ // Result: %3 = f(%1, %2, %4)
575
+ Value* addInput(Value* value);
576
+
577
+ // Add 'value' as an input to 'this' at the specified position in the
578
+ // arguments. Returns the added value for ease of chaining.
579
+ Value* insertInput(size_t i, Value* value);
580
+
581
+ // Replace the input of 'this' at position 'i' with
582
+ // 'newValue', returning the old node.
583
+ //
584
+ // Given: %3 = f(%1, %2)
585
+ // Execute: %3.replaceInput(1, %4)
586
+ // Result: %3 = f(%1, %4)
587
+ Value* replaceInput(size_t i, Value* newValue);
588
+
589
+ // Replace all occurrences of 'from' in the inputs of this
590
+ // node with 'to'. Corresponds to llvm's replaceUsesOfWith.
591
+ //
592
+ // Given: %3 = f(%1, %2, %1)
593
+ // Execute: %3.replaceInputWith(%1, %4)
594
+ // Result: %3 = f(%4, %2, %4)
595
+ void replaceInputWith(Value* from, Value* to);
596
+
597
+ Value* addOutput();
598
+
599
+ Value* insertOutput(size_t i);
600
+
601
+ void eraseOutput(size_t i);
602
+
603
+ Block* addBlock();
604
+ void eraseBlock(size_t i);
605
+
606
+ // Each Node can have a list of subblocks. These are used to define structured
607
+ // nested control flow operators such as If and Loop.
608
+ // The meaning of a block is specific to the kind of node it is in, but
609
+ // all blocks share these semantics:
610
+ // * Nested lexical scoping: If a node 'Parent' has a subblock which contains
611
+ // a node 'Child', Child can use any value that was in scope for the Parent
612
+ // node in addition to any values defined before 'Child' in the subblock.
613
+ // * The list of inputs to the block are in scope for the duration of the
614
+ // block
615
+ // * the outputs of the Parent node are not in scope for the subblocks
616
+ // Typically the inputs to a block that represents control flow act as
617
+ // as the equivalents phi-nodes in standard SSA form,
618
+ // defining a new Value to represent any term that has multiple
619
+ // definitions depending on how control flowed. Outputs of the node containing
620
+ // control flow serve a similiar purpose defining new values for variables
621
+ // that would have different definitions depending on which way control
622
+ // flowed.
623
+
624
+ at::ArrayRef<Block*> blocks() {
625
+ return blocks_;
626
+ }
627
+ at::ArrayRef<const Block*> blocks() const {
628
+ // Vectors are not convertible in const-ness of elements, but
629
+ // raw pointers are.
630
+ return {blocks_.data(), blocks_.size()};
631
+ }
632
+
633
+ // Is 'this' before 'n' in the topological order?
634
+ bool isBefore(const Node* n) const;
635
+
636
+ // Is 'this' after 'n' in the topological order?
637
+ bool isAfter(const Node* n) const;
638
+
639
+ // Insert unattached 'this' node before 'n' in the topological order.
640
+ // Returns this (for chaining).
641
+ //
642
+ // Given: %3 = f(%1, %2)
643
+ // %4 = g(%3)
644
+ // and unattached: %5 = h(%1)
645
+ // Execute: %5.insertBefore(%4)
646
+ // Result: %3 = f(%1, %2)
647
+ // %5 = h(%1)
648
+ // %4 = g(%3)
649
+ Node* insertBefore(Node* n);
650
+
651
+ // Insert unattached 'this' node after 'n' in the topological order.
652
+ // Returns this (for chaining).
653
+ //
654
+ // Given: %3 = f(%1, %2)
655
+ // %4 = g(%3)
656
+ // and unattached: %5 = h(%1)
657
+ // Execute: %5.insertAfter(%4)
658
+ // Result: %3 = f(%1, %2)
659
+ // %4 = g(%3)
660
+ // %5 = h(%1)
661
+ Node* insertAfter(Node* n);
662
+
663
+ // Move 'this' (already in the graph) after 'n' in the topological order.
664
+ //
665
+ // NOTE: Does not check that value dependencies are preserved, see
666
+ // AliasDb::moveAfterTopologicallyValid
667
+ //
668
+ // Given: %2 = f(%1)
669
+ // %3 = g(%1)
670
+ // Execute: %2.moveAfter(%3)
671
+ // Result: %3 = g(%1)
672
+ // %2 = f(%1)
673
+ //
674
+ void moveAfter(Node* n);
675
+
676
+ // Move a node 'n' (already in the graph) before 'this' in the topological
677
+ // order.
678
+ //
679
+ // NOTE: Does not check that value dependencies are preserved, see
680
+ // AliasDb::moveBeforeTopologicallyValid
681
+ //
682
+ // Given: %2 = f(%1)
683
+ // %3 = g(%1)
684
+ // Execute: %3.moveBefore(%2)
685
+ // Result: %3 = g(%1)
686
+ // %2 = f(%1)
687
+ void moveBefore(Node* n);
688
+
689
+ // Remove the input at 'i' from this node.
690
+ //
691
+ // WARNING: This is O(n) in the number of inputs, so avoid repeatedly calling
692
+ // removeInput.
693
+ //
694
+ // Given: %3 = f(%1, %2)
695
+ // Execute: %3.removeInput(1)
696
+ // Result: %3 = f(%1)
697
+ void removeInput(size_t i);
698
+
699
+ // Remove all inputs from a node.
700
+ //
701
+ // Given: %3 = f(%1, %2)
702
+ // Execute: %3.removeAllInputs()
703
+ // Result: %3 = f()
704
+ void removeAllInputs();
705
+
706
+ // Remove all outputs from a node.
707
+ //
708
+ // Given: %1, %2 = f()
709
+ // Execute:removeAllInputs()
710
+ // Result: = f()
711
+ void removeAllOutputs();
712
+
713
+ // Rearrange the ordering of inputs or outputs of a node
714
+ // Given: %3 = f(%1, %2)
715
+ // Execute: %3.permuteInputs({1, 0})
716
+ // Result: %3 = f(%2, %1)
717
+ // Each index must appear exactly once
718
+ void permuteInputs(const std::vector<size_t>& new_inputs);
719
+ void permuteOutputs(const std::vector<size_t>& new_inputs);
720
+
721
+ // iterators of the node list starting at this node
722
+ // useful for resuming a search starting at this node
723
+ inline graph_node_list_iterator iterator() {
724
+ return {this, 0};
725
+ }
726
+ inline graph_node_list_iterator reverseIterator() {
727
+ return iterator().reverse();
728
+ }
729
+ inline const_graph_node_list_iterator iterator() const {
730
+ return {this, 0};
731
+ }
732
+ inline const_graph_node_list_iterator reverseIterator() const {
733
+ return iterator().reverse();
734
+ }
735
+
736
+ // Remove 'this' from the instruction list and deallocate it.
737
+ //
738
+ // Invariant: no outputs of 'this' may have any uses.
739
+ //
740
+ // Given: %2 = f(%1)
741
+ // %3 = g(%1)
742
+ // Execute: %2.destroy()
743
+ // Result: %3 = g(%1)
744
+ void destroy();
745
+
746
+ // Dynamically cast this node to the subclass indicated by the
747
+ // template variable, returning nullptr if the cast is invalid..
748
+ //
749
+ // Example usage: if(auto s = n.cast<Select>()) { ... }
750
+ template <typename T>
751
+ T* cast() {
752
+ if (T::Kind == kind()) {
753
+ return static_cast<T*>(this);
754
+ }
755
+ return nullptr;
756
+ }
757
+ template <typename T>
758
+ const T* cast() const {
759
+ if (T::Kind == kind()) {
760
+ return static_cast<const T*>(this);
761
+ }
762
+ return nullptr;
763
+ }
764
+
765
+ template <typename T>
766
+ T* expect() {
767
+ TORCH_CHECK(
768
+ T::Kind == kind(),
769
+ "expected a ",
770
+ T::Kind.toDisplayString(),
771
+ " but found a ",
772
+ kind().toDisplayString());
773
+ return static_cast<T*>(this);
774
+ }
775
+
776
+ bool matches(const FunctionSchema& schema) const;
777
+
778
+ // XXX: this function is meant to be used with string literals only!
779
+ bool matches(
780
+ const char* signature_literal,
781
+ at::ArrayRef<Symbol> const_inputs = {}) const;
782
+
783
+ bool isMemberOf(const OperatorSet& os) const;
784
+ template <typename T>
785
+ bool isMemberOf(const OperatorMap<T>& om) const {
786
+ auto it = om.map.find(kind());
787
+ if (it == om.map.end()) {
788
+ return false;
789
+ }
790
+ for (auto& op : it->second) {
791
+ if (matches(op.first->schema())) {
792
+ return true;
793
+ }
794
+ }
795
+ return false;
796
+ }
797
+
798
+ const FunctionSchema& schema() const;
799
+ const FunctionSchema* maybeSchema() const;
800
+ const Operator& getOperator() const;
801
+ Operation getOperation() const;
802
+
803
+ const Operator* maybeOperator() const;
804
+
805
+ void dump() const;
806
+
807
+ std::ostream& print(
808
+ std::ostream& out,
809
+ size_t level,
810
+ std::vector<const Node*>* groups,
811
+ bool print_source_locations = true,
812
+ bool print_attributes = true,
813
+ bool print_scopes = true,
814
+ bool print_body = true) const;
815
+
816
+ virtual ~Node() {
817
+ if (wrap_) {
818
+ wrap_->clear();
819
+ }
820
+ }
821
+
822
+ // Methods for accessing attributes
823
+ Node* copyAttributes(const Node& rhs) {
824
+ values_.clear();
825
+ for (const AVPtr& i : rhs.values_) {
826
+ values_.push_back(i->clone());
827
+ }
828
+ return this;
829
+ }
830
+ bool hasAttribute(Symbol name) const {
831
+ AT_ASSERT(name.is_attr());
832
+ return findAttr(name, false) != values_.end();
833
+ }
834
+ bool hasAttributeS(const std::string& name) const {
835
+ return hasAttribute(Symbol::attr(name));
836
+ }
837
+ AttributeKind kindOf(Symbol name) const {
838
+ AT_ASSERT(name.is_attr());
839
+ return (*findAttr(name, true))->kind();
840
+ }
841
+ AttributeKind kindOfS(const std::string& name) const {
842
+ return kindOf(Symbol::attr(name));
843
+ }
844
+ Node* removeAttribute(Symbol name) {
845
+ AT_ASSERT(name.is_attr());
846
+ values_.erase(findAttr(name, true));
847
+ return this;
848
+ }
849
+ Node* removeAttributeS(const std::string& name) {
850
+ return removeAttribute(Symbol::attr(name));
851
+ }
852
+ bool hasAttributes() const {
853
+ return !values_.empty();
854
+ }
855
+ size_t numAttributes() const {
856
+ return values_.size();
857
+ }
858
+ // The names are returned in order, since name actually is the index.
859
+ std::vector<Symbol> attributeNames() const {
860
+ std::vector<Symbol> names;
861
+ names.reserve(values_.size());
862
+ for (const AVPtr& a : values_) {
863
+ names.push_back(a->name);
864
+ }
865
+ return names;
866
+ }
867
+ std::vector<const char*> attributeNamesS() const {
868
+ std::vector<const char*> names;
869
+ names.reserve(values_.size());
870
+ for (const AVPtr& a : values_) {
871
+ names.push_back(a->name.toUnqualString());
872
+ }
873
+ return names;
874
+ }
875
+
876
+ #define CREATE_ACCESSOR(Kind, method) \
877
+ Node* method##_(Symbol name, Kind##Attr::ConstructorType v) { \
878
+ return setAttr<Kind##Attr>( \
879
+ name, std::forward<Kind##Attr::ConstructorType>(v)); \
880
+ } \
881
+ const Kind##Attr::ValueType& method(Symbol name) const { \
882
+ return getAttr<Kind##Attr>(name); \
883
+ }
884
+
885
+ CREATE_ACCESSOR(Float, f)
886
+ CREATE_ACCESSOR(Complex, c)
887
+ CREATE_ACCESSOR(Floats, fs)
888
+ CREATE_ACCESSOR(ComplexVals, cs)
889
+ CREATE_ACCESSOR(String, s)
890
+ CREATE_ACCESSOR(Strings, ss)
891
+ CREATE_ACCESSOR(Int, i)
892
+ CREATE_ACCESSOR(Ints, is)
893
+ CREATE_ACCESSOR(Graph, g)
894
+ CREATE_ACCESSOR(Graphs, gs)
895
+ CREATE_ACCESSOR(Type, ty)
896
+ CREATE_ACCESSOR(Types, tys)
897
+ CREATE_ACCESSOR(IValue, ival)
898
+
899
+ #undef CREATE_ACCESSOR
900
+
901
+ // Our Graphs are not very const-correct, so we need to allow returning
902
+ // non-const references too
903
+ GraphAttr::ValueType& g(Symbol name) {
904
+ return getAttr<GraphAttr>(name);
905
+ }
906
+
907
+ // does not use CREATE_ACCESSOR because we need additional asserts
908
+ Node* t_(Symbol name, TensorAttr::ConstructorType v) {
909
+ return setAttr<TensorAttr>(
910
+ name, std::forward<TensorAttr::ConstructorType>(v));
911
+ }
912
+ const TensorAttr::ValueType& t(Symbol name) const {
913
+ return getAttr<TensorAttr>(name);
914
+ }
915
+
916
+ Node* ts_(Symbol name, TensorsAttr::ConstructorType v) {
917
+ return setAttr<TensorsAttr>(
918
+ name, std::forward<TensorsAttr::ConstructorType>(v));
919
+ }
920
+ const TensorsAttr::ValueType& ts(Symbol name) const {
921
+ return getAttr<TensorsAttr>(name);
922
+ }
923
+
924
+ Block* findCommonAncestorBlockWith(Node* n);
925
+
926
+ size_t blocksFromGraphBlock();
927
+
928
+ private:
929
+ void printAttrValue(std::ostream& out, const Symbol& name) const;
930
+ void printAttributes(std::ostream& out, bool ignore_subgraph) const;
931
+
932
+ template <typename T>
933
+ Node* setAttr(Symbol name, typename T::ConstructorType v) {
934
+ AT_ASSERT(name.is_attr());
935
+ auto it = findAttr(name, false);
936
+ auto nv = AVPtr(new T(name, std::forward<typename T::ConstructorType>(v)));
937
+ // NOLINTNEXTLINE(bugprone-branch-clone)
938
+ if (it == values_.end()) {
939
+ values_.push_back(std::move(nv));
940
+ } else {
941
+ *it = std::move(nv);
942
+ }
943
+ return this;
944
+ }
945
+ template <typename T>
946
+ typename T::ValueType& getAttr(Symbol name) const {
947
+ AT_ASSERT(name.is_attr());
948
+ auto it = findAttr(name, true);
949
+ auto* child = dynamic_cast<T*>(it->get());
950
+ if (child == nullptr) {
951
+ throw IRAttributeError(name, true);
952
+ }
953
+ return child->value();
954
+ }
955
+ using AVPtr = AttributeValue::Ptr;
956
+ // NB: For determinism, we use a vector rather than a hash map. This does
957
+ // mean that lookups are O(n), so you shouldn't use Attributes to store
958
+ // a big pile of messages.
959
+ std::vector<AVPtr> values_;
960
+ std::vector<AVPtr>::iterator findAttr(Symbol name, bool required) {
961
+ AT_ASSERT(name.is_attr());
962
+ auto it = std::find_if(values_.begin(), values_.end(), [&](const AVPtr& v) {
963
+ return v->name == name;
964
+ });
965
+ if (required && it == values_.end()) {
966
+ throw IRAttributeError(name, false);
967
+ }
968
+ AT_ASSERT(!required || it != values_.end());
969
+ return it;
970
+ }
971
+ std::vector<AVPtr>::const_iterator findAttr(Symbol name, bool required)
972
+ const {
973
+ AT_ASSERT(name.is_attr());
974
+ auto it = std::find_if(values_.begin(), values_.end(), [&](const AVPtr& v) {
975
+ return v->name == name;
976
+ });
977
+ if (required && it == values_.end()) {
978
+ throw IRAttributeError(name, false);
979
+ }
980
+ AT_ASSERT(!required || it != values_.end());
981
+ return it;
982
+ }
983
+
984
+ enum class MoveSide { BEFORE, AFTER };
985
+ bool isBeforeOrAfter(const Node* n, MoveSide moveSide) const;
986
+
987
+ std::pair<Value*, const Argument&> findInput(Symbol name);
988
+ // Lookup iterator in use list of _input i_ that corresponds to its use of
989
+ // _this_
990
+ use_list::iterator findUseForInput(size_t i);
991
+
992
+ // remove the use of input i, this sets input i to nullptr, but
993
+ // is only used internally to Node before setting it to a new value
994
+ // or erasing the entry from the list.
995
+ Value* dropInput(size_t i);
996
+
997
+ bool inBlockList() const {
998
+ if (next() == nullptr) {
999
+ AT_ASSERT(prev() == nullptr);
1000
+ }
1001
+ return next() != nullptr;
1002
+ }
1003
+
1004
+ void removeFromList();
1005
+ void lint() const;
1006
+
1007
+ void assignTopoPosition();
1008
+
1009
+ protected:
1010
+ // subclasses must override
1011
+ // this function is used by createClone to initialize a new version
1012
+ // of a node in another graph. It should allocate a new instance of the same
1013
+ // concrete type as 'this', but in graph 'g' which might be different
1014
+ // than graph_
1015
+ virtual Node* allocNewInstance(Graph* g) {
1016
+ return new Node(g, kind());
1017
+ }
1018
+ // create a copy of all properties of Node s into this.
1019
+ // subclasses should extend if they have additional information to copy.
1020
+ // 'this' will be allocated with s->allocNewInstance(g) so it should have
1021
+ // the same concrete type as 's'
1022
+ virtual void cloneFrom(Node* s);
1023
+ };
1024
+
1025
+ struct Block {
1026
+ friend struct Node;
1027
+ friend struct Graph;
1028
+
1029
+ AT_DISALLOW_COPY_AND_ASSIGN(Block);
1030
+ TORCH_API Block(Graph* graph_, Node* node_);
1031
+
1032
+ at::ArrayRef<Value*> inputs() {
1033
+ return input_->outputs();
1034
+ }
1035
+ at::ArrayRef<const Value*> inputs() const {
1036
+ const auto& inputs = input_->outputs();
1037
+ return {inputs.data(), inputs.size()};
1038
+ }
1039
+ at::ArrayRef<Value*> outputs() {
1040
+ return output_->inputs();
1041
+ }
1042
+ at::ArrayRef<const Value*> outputs() const {
1043
+ return static_cast<const Node*>(output_)->inputs();
1044
+ }
1045
+ graph_node_list nodes() {
1046
+ return {input_, kNextDirection};
1047
+ }
1048
+ const_graph_node_list nodes() const {
1049
+ return {input_, kNextDirection};
1050
+ }
1051
+ Node* return_node() {
1052
+ return output_;
1053
+ }
1054
+ const Node* return_node() const {
1055
+ return output_;
1056
+ }
1057
+ Node* param_node() {
1058
+ return input_;
1059
+ }
1060
+ const Node* param_node() const {
1061
+ return input_;
1062
+ }
1063
+ /**
1064
+ * @warning NEVER pass raw pointer of smart pointer managed Graph to Python.
1065
+ * Check #87343 for details.
1066
+ */
1067
+ Graph* owningGraph() {
1068
+ return graph_;
1069
+ }
1070
+ const Graph* owningGraph() const {
1071
+ return graph_;
1072
+ }
1073
+ Node* owningNode() {
1074
+ return owning_node_;
1075
+ }
1076
+ const Node* owningNode() const {
1077
+ return owning_node_;
1078
+ }
1079
+
1080
+ Value* addInput(const std::string& name = "") {
1081
+ Value* v = input_->addOutput();
1082
+ v->setDebugName(name);
1083
+ return v;
1084
+ }
1085
+ Value* insertInput(size_t i, const std::string& name = "") {
1086
+ Value* v = input_->insertOutput(i);
1087
+ v->setDebugName(name);
1088
+ return v;
1089
+ }
1090
+ void eraseInput(size_t i) {
1091
+ input_->eraseOutput(i);
1092
+ }
1093
+ void removeAllInputs() {
1094
+ input_->removeAllOutputs();
1095
+ }
1096
+ size_t registerOutput(Value* v) {
1097
+ output_->addInput(v);
1098
+ return outputs().size() - 1;
1099
+ }
1100
+ size_t insertOutput(size_t i, Value* n) {
1101
+ output_->insertInput(i, n);
1102
+ return i;
1103
+ }
1104
+ void eraseOutput(size_t i) {
1105
+ output_->removeInput(i);
1106
+ }
1107
+ void removeAllOutputs() {
1108
+ output_->removeAllInputs();
1109
+ }
1110
+
1111
+ void replaceOutput(size_t i, Value* n) {
1112
+ output_->replaceInput(i, n);
1113
+ }
1114
+ void permuteOutputs(const std::vector<size_t>& new_inputs) {
1115
+ output_->permuteInputs(new_inputs);
1116
+ }
1117
+ void permuteInputs(const std::vector<size_t>& new_inputs) {
1118
+ input_->permuteOutputs(new_inputs);
1119
+ }
1120
+
1121
+ Node* appendNode(Node* n) {
1122
+ AT_ASSERT(n->graph_ == graph_ && !n->inBlockList());
1123
+ n->insertBefore(output_);
1124
+ return n;
1125
+ }
1126
+ Node* prependNode(Node* n) {
1127
+ AT_ASSERT(n->graph_ == graph_ && !n->inBlockList());
1128
+ n->insertAfter(input_);
1129
+ return n;
1130
+ }
1131
+
1132
+ // clone all inputs, nodes, and outputs from src and append them
1133
+ // to the inputs, nodes, and outputs of this block
1134
+ // value_map is used whenever a node in src references a free variable
1135
+ // in src to look up its corresponding value
1136
+ TORCH_API void cloneFrom(Block* src, std::function<Value*(Value*)> value_map);
1137
+ TORCH_API void remapTypes(const std::function<TypePtr(TypePtr)>& type_map);
1138
+
1139
+ TORCH_API std::shared_ptr<Wrap<Block>> wrap() {
1140
+ if (!wrap_) {
1141
+ wrap_ = std::make_shared<Wrap<Block>>(this);
1142
+ }
1143
+ return wrap_;
1144
+ }
1145
+
1146
+ virtual ~Block() {
1147
+ if (wrap_) {
1148
+ wrap_->clear();
1149
+ }
1150
+ }
1151
+
1152
+ void clear() {
1153
+ removeAllOutputs();
1154
+ for (auto it = nodes().rbegin(); it != nodes().rend(); it++) {
1155
+ it.destroyCurrent();
1156
+ }
1157
+ removeAllInputs();
1158
+ }
1159
+
1160
+ private:
1161
+ void reIndexTopology();
1162
+
1163
+ // get rid of all nodes
1164
+ // destroys in reverse order so that uses internal to this block
1165
+ // do not have to be removed before you can destroy the block
1166
+ void destroy();
1167
+
1168
+ Graph* const graph_;
1169
+ // holds outputs in a way that can be reflected
1170
+ // as a Use object
1171
+ // also used as the beginning/end of the circular node list to avoid
1172
+ // having corner cases where the list is empty.
1173
+ Node* const output_;
1174
+ Node* const input_;
1175
+ Node* const
1176
+ owning_node_; // either the node that has this block or nullptr for root
1177
+ // a managing wrapper for Python to allow invalidation
1178
+ std::shared_ptr<Wrap<Block>> wrap_;
1179
+ };
1180
+
1181
+ struct Graph : std::enable_shared_from_this<Graph> {
1182
+ AT_DISALLOW_COPY_AND_ASSIGN(Graph);
1183
+ friend struct Node;
1184
+ friend struct Value;
1185
+ friend struct Block;
1186
+
1187
+ private:
1188
+ // only used to keep track of allocated nodes
1189
+ // actual representation of Graph is done with
1190
+ // inputs, outputs, nodes
1191
+
1192
+ std::unordered_set<const Node*> all_nodes;
1193
+ std::unordered_set<const Value*> all_values;
1194
+ std::unordered_set<const Block*> all_blocks;
1195
+ size_t next_unique_;
1196
+
1197
+ std::unordered_map<std::string, Value*> unique_names_;
1198
+ // name_base_suffix tracks largest suffix currently used by all names sharing
1199
+ // same name_base. Key of this map is name_base, value is largest suffix
1200
+ // numeric value.
1201
+ std::unordered_map<std::string, size_t> name_base_suffix_;
1202
+
1203
+ ScopePtr current_scope_;
1204
+
1205
+ Block* const block_;
1206
+ // when insertNode() is called, the node is inserted before this node
1207
+ // by default this is set to append to the top level block
1208
+ Node* insert_before_;
1209
+ int64_t predicted_insert_count_ = 0;
1210
+
1211
+ c10::optional<size_t> op_version_;
1212
+
1213
+ public:
1214
+ Graph(ScopePtr scope_root = c10::make_intrusive<Scope>())
1215
+ : next_unique_(0),
1216
+ current_scope_(std::move(scope_root)),
1217
+ block_(new Block(this, nullptr)),
1218
+ insert_before_(return_node()) {}
1219
+
1220
+ at::ArrayRef<Value*> inputs() {
1221
+ return block_->inputs();
1222
+ }
1223
+ at::ArrayRef<const Value*> inputs() const {
1224
+ const Block& block = *block_;
1225
+ return block.inputs();
1226
+ }
1227
+ at::ArrayRef<Value*> outputs() {
1228
+ return block_->outputs();
1229
+ }
1230
+ at::ArrayRef<const Value*> outputs() const {
1231
+ const Block& block = *block_;
1232
+ return block.outputs();
1233
+ }
1234
+ graph_node_list nodes() {
1235
+ return block_->nodes();
1236
+ }
1237
+ const_graph_node_list nodes() const {
1238
+ const Block& block = *block_;
1239
+ return block.nodes();
1240
+ }
1241
+ Node* param_node() {
1242
+ return block_->param_node();
1243
+ }
1244
+ const Node* param_node() const {
1245
+ return block_->param_node();
1246
+ }
1247
+ Node* return_node() {
1248
+ return block_->return_node();
1249
+ }
1250
+ const Node* return_node() const {
1251
+ return block_->return_node();
1252
+ }
1253
+ const std::unordered_map<std::string, Value*>& debugNames() const {
1254
+ return unique_names_;
1255
+ }
1256
+
1257
+ TORCH_API void push_scope(const std::string& scope_name);
1258
+ TORCH_API void pop_scope();
1259
+
1260
+ ScopePtr current_scope() {
1261
+ return current_scope_;
1262
+ }
1263
+
1264
+ void set_op_version(c10::optional<size_t> version) {
1265
+ op_version_ = version;
1266
+ }
1267
+
1268
+ c10::optional<size_t> get_op_version() {
1269
+ return op_version_;
1270
+ }
1271
+
1272
+ void set_current_scope(ScopePtr scope) {
1273
+ current_scope_ = std::move(scope);
1274
+ }
1275
+
1276
+ Value* addInput(const std::string& name = "") {
1277
+ return block_->addInput(name);
1278
+ }
1279
+ Value* insertInput(size_t i, const std::string& name = "") {
1280
+ return block_->insertInput(i, name);
1281
+ }
1282
+ void eraseInput(size_t i) {
1283
+ block_->eraseInput(i);
1284
+ }
1285
+ size_t registerOutput(Value* n) {
1286
+ return block_->registerOutput(n);
1287
+ }
1288
+ void eraseOutput(size_t i) {
1289
+ block_->eraseOutput(i);
1290
+ }
1291
+
1292
+ TORCH_API Node* create(NodeKind kind, size_t num_outputs = 1);
1293
+ TORCH_API Node* create(
1294
+ NodeKind kind,
1295
+ ArrayRef<Value*> inputs,
1296
+ size_t num_outputs = 1);
1297
+
1298
+ TORCH_API Node* createNone();
1299
+ TORCH_API Node* createAutogradZero();
1300
+ TORCH_API Node* createUninitialized(TypePtr typ);
1301
+ TORCH_API Node* createWithSubgraph(Symbol kind);
1302
+ TORCH_API Node* createDifferentiableSubgraph();
1303
+ TORCH_API Node* createTuple(
1304
+ at::ArrayRef<Value*> values,
1305
+ TupleTypePtr optional_named_tuple = nullptr);
1306
+ TORCH_API Node* createTupleUnpack(Value* v);
1307
+ TORCH_API Node* createTupleIndex(
1308
+ Value* tup,
1309
+ Value* idx,
1310
+ const TypePtr& output_type);
1311
+ TORCH_API Node* createTupleSlice(
1312
+ Value* tup,
1313
+ int64_t beg,
1314
+ int64_t step_size,
1315
+ int64_t num_values);
1316
+ TORCH_API Node* createEnumName(Value* e);
1317
+ TORCH_API Node* createEnumValue(Value* e);
1318
+ TORCH_API Node* createList(
1319
+ const TypePtr& contained_type,
1320
+ at::ArrayRef<Value*> values);
1321
+ TORCH_API Node* createListUnpack(Value* v, size_t size);
1322
+ TORCH_API Node* createDict(
1323
+ const TypePtr& key_type,
1324
+ const TypePtr& value_type,
1325
+ at::ArrayRef<Value*> keys,
1326
+ at::ArrayRef<Value*> values);
1327
+ TORCH_API Node* createNumToTensor(Value* value);
1328
+ TORCH_API Node* createObject(const ClassTypePtr& type);
1329
+ TORCH_API Node* createSetAttr(
1330
+ Value* obj,
1331
+ const std::string& field,
1332
+ Value* newValue);
1333
+ TORCH_API Node* createGetAttr(Value* obj, const std::string& field);
1334
+ Value* insertGetAttr(Value* obj, const std::string& field) {
1335
+ return insertNode(createGetAttr(obj, field))->output();
1336
+ }
1337
+ TORCH_API Node* createStore(const std::string& name, Value* v);
1338
+ TORCH_API Node* createLoad(const std::string& name, const TypePtr& type);
1339
+ TORCH_API Node* createIsInstance(Value* v, at::ArrayRef<TypePtr> types);
1340
+
1341
+ TORCH_API Value* insertUncheckedCast(Value* v, TypePtr type);
1342
+
1343
+ // Insert a ToList operator with argument \p v and output type \p type.
1344
+ // \returns the output of the operation.
1345
+ TORCH_API Value* insertToList(Value* v, TypePtr type);
1346
+
1347
+ TORCH_API Value* insertFunctionCall(
1348
+ Function* callee,
1349
+ const MatchedSchema& matched);
1350
+ TORCH_API Value* insertMethodCall(
1351
+ std::string method_name,
1352
+ const MatchedSchema& matched);
1353
+
1354
+ // Note: defined in python_ir.cpp and can be used only in python extension
1355
+ Node* createPythonOp(
1356
+ THPObjectPtr&& pyobj,
1357
+ const std::string& cconv,
1358
+ pyobj_list&& scalar_args);
1359
+ // clone n, making a new node in _this_ graph.
1360
+ // use value_map to translate inputs of n to inputs of the cloned node
1361
+ // if copy_blocks is false, it will not recursively clone the nested blocks
1362
+ // this node contains.
1363
+ TORCH_API Node* createClone(
1364
+ Node* n,
1365
+ const std::function<Value*(Value*)>& value_map,
1366
+ bool copy_blocks = true);
1367
+
1368
+ // Insert constant IValue into the graph.
1369
+ TORCH_API Value* insertConstant(
1370
+ const IValue& val,
1371
+ c10::optional<SourceRange> loc = c10::nullopt,
1372
+ c10::optional<ScopePtr> scope = c10::nullopt);
1373
+
1374
+ // Schema-driven insert:
1375
+ // This inserts a node into the graph with inputs determined from args and
1376
+ // kwargs using Python argument matching rules, and checks that the op matches
1377
+ // a known schema.
1378
+ //
1379
+ // If this node successfully completes, it guarentees the node
1380
+ // is a correctly-formed invocation of opname
1381
+ TORCH_API Value* insert(
1382
+ Symbol opname,
1383
+ at::ArrayRef<NamedValue> args,
1384
+ at::ArrayRef<NamedValue> kwargs = {},
1385
+ const c10::optional<SourceRange>& range = {});
1386
+
1387
+ Node* appendNode(Node* n) {
1388
+ return block_->appendNode(n);
1389
+ }
1390
+
1391
+ Node* prependNode(Node* n) {
1392
+ return block_->prependNode(n);
1393
+ }
1394
+
1395
+ // insert before insert_before_ node
1396
+ // initialized to insert at the end of the top level block
1397
+ // can be changed with setInsertPoint()
1398
+ Node* insertNode(Node* n) {
1399
+ AT_ASSERT(
1400
+ insert_before_->inBlockList() &&
1401
+ "insert point node is no longer in a block list");
1402
+ return n->insertBefore(insert_before_);
1403
+ }
1404
+ // set where nodes are inserted to append to the end of this block
1405
+ void setInsertPoint(Block* b) {
1406
+ AT_ASSERT(b->owningGraph() == this);
1407
+ setInsertPoint(b->return_node());
1408
+ }
1409
+ // set where nodes are inserted to insert _before_ this node
1410
+ // for implementation simplicity we only support inserting before a node for
1411
+ // now
1412
+ void setInsertPoint(Node* n) {
1413
+ AT_ASSERT(n->owningGraph() == this && n->inBlockList());
1414
+ insert_before_ = n;
1415
+ predicted_insert_count_ = 0;
1416
+ }
1417
+ Node* insertPoint() {
1418
+ return insert_before_;
1419
+ }
1420
+
1421
+ // the top level block
1422
+ Block* block() {
1423
+ return block_;
1424
+ }
1425
+ const Block* block() const {
1426
+ return block_;
1427
+ }
1428
+
1429
+ // Checks well-formedness and invariants of graph
1430
+ TORCH_API void lint() const;
1431
+ // for use in debugger
1432
+ TORCH_API void dump() const;
1433
+
1434
+ TORCH_API ~Graph();
1435
+
1436
+ TORCH_API std::string toString(bool print_source_locations = true) const;
1437
+
1438
+ TORCH_API std::ostream& print(
1439
+ std::ostream& out,
1440
+ bool print_source_locations = true) const;
1441
+
1442
+ friend TORCH_API std::ostream& operator<<(std::ostream& out, const Graph& g);
1443
+
1444
+ TORCH_API std::shared_ptr<Graph> copy();
1445
+ TORCH_API std::unique_ptr<Graph> copyUnique();
1446
+ TORCH_API void remapTypes(const std::function<TypePtr(TypePtr)>& type_map);
1447
+
1448
+ private:
1449
+ friend TORCH_API void Lint(const AliasDb* db);
1450
+ TORCH_API void freeNode(Node* n);
1451
+ TORCH_API void freeValue(Value* v);
1452
+ TORCH_API void freeBlock(Block* b);
1453
+ void cloneFrom(Graph& src);
1454
+ };
1455
+
1456
+ /** \brief An utility class for setting temporary insertion points.
1457
+ *
1458
+ * When an object of this class is created, it stores the current insertion
1459
+ * point, sets the new one, and restores the original insertion point when the
1460
+ * object is destroyed.
1461
+ */
1462
+ struct WithInsertPoint {
1463
+ WithInsertPoint(Node* n) : prev_(n->owningGraph()->insertPoint()) {
1464
+ n->owningGraph()->setInsertPoint(n);
1465
+ }
1466
+ WithInsertPoint(Block* b) : WithInsertPoint(b->return_node()) {}
1467
+
1468
+ ~WithInsertPoint() {
1469
+ prev_->owningGraph()->setInsertPoint(prev_);
1470
+ }
1471
+
1472
+ private:
1473
+ Node* prev_;
1474
+ };
1475
+
1476
+ /** \brief An utility class for setting temporary scopes.
1477
+ *
1478
+ * When an object of this class is created, it stores the current scope, sets
1479
+ * the new one, and restores the original scope when the object is destroyed.
1480
+ */
1481
+ struct WithCurrentScope {
1482
+ WithCurrentScope(Graph& g, ScopePtr scope)
1483
+ : graph_(&g), prev_scope_(g.current_scope()) {
1484
+ g.set_current_scope(std::move(scope));
1485
+ }
1486
+ ~WithCurrentScope() {
1487
+ graph_->set_current_scope(prev_scope_);
1488
+ }
1489
+
1490
+ private:
1491
+ Graph* graph_;
1492
+ ScopePtr prev_scope_;
1493
+ };
1494
+
1495
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
1496
+ inline Value::Value(Node* node_, size_t offset_)
1497
+ : node_(node_),
1498
+ offset_(offset_),
1499
+ unique_(node_->graph_->next_unique_++),
1500
+ type_(TensorType::get()) {
1501
+ node_->graph_->all_values.emplace(this);
1502
+ }
1503
+
1504
+ inline Value* Value::setType(TypePtr type) {
1505
+ AT_ASSERT(type);
1506
+ if (auto dyn = type->castRaw<c10::DynamicType>()) {
1507
+ type = dyn->fallback();
1508
+ }
1509
+ type_ = std::move(type);
1510
+ for (Use& use : uses_) {
1511
+ use.user->op_ = nullptr;
1512
+ }
1513
+ return this;
1514
+ }
1515
+
1516
+ inline Graph* Value::owningGraph() {
1517
+ return node()->owningGraph();
1518
+ }
1519
+
1520
+ inline const Graph* Value::owningGraph() const {
1521
+ return node()->owningGraph();
1522
+ }
1523
+
1524
+ /************* All nodes not required to be defined before Graph **************/
1525
+ struct ProfileOp : public Node {
1526
+ static const Symbol Kind;
1527
+ ProfileOp(Graph* graph, std::function<void(std::vector<IValue>&)> callback)
1528
+ : Node(graph, ::c10::prim::profile), callback_(std::move(callback)) {}
1529
+
1530
+ void cloneFrom(Node* other_) override;
1531
+ Node* allocNewInstance(Graph* g) override;
1532
+
1533
+ const std::function<void(std::vector<IValue>&)>& getCallback() const {
1534
+ return callback_;
1535
+ }
1536
+
1537
+ void setCallback(std::function<void(std::vector<IValue>&)> callback) {
1538
+ callback_ = std::move(callback);
1539
+ }
1540
+
1541
+ bool hasSeenTensor() const {
1542
+ return has_seen_tensor_;
1543
+ }
1544
+
1545
+ void setHasSeenTensor(bool has_seen_tensor) {
1546
+ has_seen_tensor_ = has_seen_tensor;
1547
+ }
1548
+
1549
+ private:
1550
+ std::function<void(std::vector<IValue>&)> callback_;
1551
+ bool has_seen_tensor_ = false;
1552
+ };
1553
+
1554
+ struct TORCH_API ProfileIValueOp : public Node {
1555
+ static const Symbol Kind;
1556
+ ProfileIValueOp(
1557
+ Graph* graph,
1558
+ std::function<void(std::vector<IValue>&)> callback)
1559
+ : Node(graph, ::c10::prim::profile_ivalue),
1560
+ callback_(std::move(callback)) {}
1561
+
1562
+ void cloneFrom(Node* other_) override;
1563
+ Node* allocNewInstance(Graph* g) override;
1564
+
1565
+ const std::function<void(std::vector<IValue>&)>& getCallback() const {
1566
+ return callback_;
1567
+ }
1568
+
1569
+ void setCallback(std::function<void(std::vector<IValue>&)> callback) {
1570
+ callback_ = std::move(callback);
1571
+ }
1572
+
1573
+ private:
1574
+ std::function<void(std::vector<IValue>&)> callback_;
1575
+ };
1576
+
1577
+ // execute a Python function, used for Ops we can't optimize but that we want to
1578
+ // optimize around
1579
+ //
1580
+ // Note: actual implementation (ConcretePythonOp) is defined in python_ir.cpp
1581
+ // which is not included in libtorch.so. We still include some bits and pieces
1582
+ // of PythonOp here to enable writing simple passes generically. In general,
1583
+ // python-aware bits need to be moved to the descendant classes.
1584
+ struct TORCH_API PythonOp : public Node {
1585
+ using Node::Node;
1586
+
1587
+ virtual std::string name() const = 0;
1588
+ virtual void writeScalars(std::ostream& out) const = 0;
1589
+ void cloneFrom(Node* other_) override = 0;
1590
+ Node* allocNewInstance(Graph* g) override = 0;
1591
+ // recover the autograd.Function instance, if this PythonOp's function
1592
+ // was originally SomeFunction.apply
1593
+ // used in ONNX for discovering symbolics
1594
+ virtual c10::optional<THPObjectPtr> autogradFunction() const = 0;
1595
+
1596
+ virtual void lint_python() const = 0;
1597
+ };
1598
+
1599
+ TORCH_API void LintGraph(const std::shared_ptr<Graph>& graph);
1600
+
1601
+ TORCH_API at::ArrayRef<Value*> createTupleUnpack(Value* v);
1602
+
1603
+ /** Insert graph \p CALLEE into graph \p G using \p INPUTS as input values.
1604
+ * The insertion happens at the current insertion point.
1605
+ * Optionally, one can also pass \p VALUE_MAP to get a map between \p CALLEE
1606
+ * values and their cloned copies in \p G.
1607
+ */
1608
+ TORCH_API std::vector<Value*> insertGraph(
1609
+ Graph& g,
1610
+ Graph& callee,
1611
+ ArrayRef<Value*> inputs);
1612
+ TORCH_API std::vector<Value*> insertGraph(
1613
+ Graph& g,
1614
+ Graph& callee,
1615
+ ArrayRef<Value*> inputs,
1616
+ std::unordered_map<Value*, Value*>& value_map);
1617
+
1618
+ /** Insert function \p CALLEE after node \p TO_REPLACE, remove the node and
1619
+ * replace all its uses with corresponding outputs of the inserted function.
1620
+ * This asserts that the number of outputs of the original node and the
1621
+ * graph are the same.
1622
+ */
1623
+ TORCH_API std::vector<Value*> inlineCallTo(
1624
+ Node* to_replace,
1625
+ GraphFunction* callee,
1626
+ bool use_graph = true);
1627
+
1628
+ TORCH_API std::vector<Value*> inlineCallTo(
1629
+ Node* to_replace,
1630
+ GraphFunction* callee,
1631
+ Graph* callee_graph);
1632
+
1633
+ /** If there is only one value in \p OUTPUTS and its kind is Tuple, insert a
1634
+ * tuple unpack node and return the resulting values.
1635
+ */
1636
+ TORCH_API std::vector<Value*> unpackOutputs(const std::vector<Value*>& outputs);
1637
+
1638
+ TORCH_API std::vector<Node*> findAllNodes(Graph& g, Symbol kind, bool recurse);
1639
+ TORCH_API std::vector<Node*> findAllNodes(Block& b, Symbol kind, bool recurse);
1640
+ TORCH_API std::vector<Node*> findAllNodes(
1641
+ at::ArrayRef<Block*> a,
1642
+ Symbol kind,
1643
+ bool recurse);
1644
+
1645
+ struct TORCH_API OperatorSet {
1646
+ OperatorSet(std::initializer_list<const char*> sig_literals);
1647
+ std::vector<std::shared_ptr<Operator>> getOps() const;
1648
+ void insert(std::initializer_list<const char*> sig_literals);
1649
+
1650
+ private:
1651
+ friend struct Node;
1652
+ std::unordered_map<Symbol, std::vector<std::shared_ptr<Operator>>> ops;
1653
+ };
1654
+
1655
+ template <typename T>
1656
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
1657
+ struct OperatorMap {
1658
+ // Type aliasing
1659
+ using OpMapType = typename std::pair<std::shared_ptr<Operator>, T>;
1660
+ using ValueType = std::vector<OpMapType>;
1661
+ using MapType = std::unordered_map<Symbol, ValueType>;
1662
+
1663
+ OperatorMap() = default;
1664
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
1665
+ explicit OperatorMap(
1666
+ std::initializer_list<std::pair<std::shared_ptr<Operator>, T>> init) {
1667
+ insert(init);
1668
+ }
1669
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
1670
+ explicit OperatorMap(std::initializer_list<std::pair<const char*, T>> init) {
1671
+ insert(init);
1672
+ }
1673
+
1674
+ void insert(const std::shared_ptr<Operator>& op, T val) {
1675
+ // Remove if exists before insert
1676
+ erase(op);
1677
+ map[Symbol::fromQualString(op->schema().name())].emplace_back(
1678
+ std::make_pair(op, val));
1679
+ }
1680
+
1681
+ void insert(const OperatorSet& op_set, T val) {
1682
+ for (auto& op : op_set.getOps()) {
1683
+ insert(op, val);
1684
+ }
1685
+ }
1686
+
1687
+ void insert(
1688
+ std::initializer_list<std::pair<std::shared_ptr<Operator>, T>> v) {
1689
+ for (auto& el : v) {
1690
+ insert(el.first, el.second);
1691
+ }
1692
+ }
1693
+
1694
+ void insert(std::initializer_list<std::pair<const char*, T>> v) {
1695
+ for (auto& el : v) {
1696
+ insert(getOperatorForLiteral(el.first), el.second);
1697
+ }
1698
+ }
1699
+
1700
+ void erase(const std::shared_ptr<Operator>& op) {
1701
+ auto it = map.find(Symbol::fromQualString(op->schema().name()));
1702
+ if (it == map.end()) {
1703
+ return;
1704
+ }
1705
+ for (auto vit = it->second.begin(); vit != it->second.end(); ++vit) {
1706
+ if (vit->first->schema() == op->schema()) {
1707
+ it->second.erase(vit);
1708
+ break;
1709
+ }
1710
+ }
1711
+ if (it->second.size() == 0) {
1712
+ map.erase(Symbol::fromQualString(op->schema().name()));
1713
+ }
1714
+ }
1715
+
1716
+ bool contains(const Operator& op) const {
1717
+ const auto it = map.find(Symbol::fromQualString(op.schema().name()));
1718
+ if (it == map.end()) {
1719
+ return false;
1720
+ }
1721
+ for (auto vit = it->second.begin(); vit != it->second.end(); ++vit) {
1722
+ if (vit->first->schema() == op.schema()) {
1723
+ return true;
1724
+ }
1725
+ }
1726
+ return false;
1727
+ }
1728
+
1729
+ bool contains(const Node* n) const {
1730
+ return n->maybeOperator() && contains(n->getOperator());
1731
+ }
1732
+
1733
+ c10::optional<T> find(const Operator& op) {
1734
+ const auto it = map.find(Symbol::fromQualString(op.schema().name()));
1735
+ if (it == map.end()) {
1736
+ return c10::nullopt;
1737
+ }
1738
+ for (auto vit = it->second.begin(); vit != it->second.end(); ++vit) {
1739
+ if (vit->first->schema() == op.schema()) {
1740
+ return vit->second;
1741
+ }
1742
+ }
1743
+ return c10::nullopt;
1744
+ }
1745
+
1746
+ // TODO: return iterator
1747
+ std::vector<OpMapType> getAllKeysAndValues() const {
1748
+ // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
1749
+ std::vector<OpMapType> keys_values;
1750
+ for (auto& symbol_mapping : map) {
1751
+ auto& vec = symbol_mapping.second;
1752
+ for (auto& pair : vec) {
1753
+ keys_values.push_back(pair);
1754
+ }
1755
+ }
1756
+ return keys_values;
1757
+ }
1758
+
1759
+ private:
1760
+ friend struct Node;
1761
+ MapType map;
1762
+ };
1763
+
1764
+ template <typename T>
1765
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
1766
+ struct FunctionSchemaMap {
1767
+ // Type aliasing
1768
+ using FuncSchemaMapType = typename std::pair<FunctionSchema, T>;
1769
+ using ValueType = std::vector<FuncSchemaMapType>;
1770
+ using MapType = std::unordered_map<Symbol, ValueType>;
1771
+
1772
+ FunctionSchemaMap() = default;
1773
+ void insert(const FunctionSchema& schema, T val) {
1774
+ // Remove if exists before insert
1775
+ erase(schema);
1776
+ map[Symbol::fromQualString(schema.name())].emplace_back(
1777
+ std::make_pair(schema, val));
1778
+ }
1779
+
1780
+ void erase(const FunctionSchema& schema) {
1781
+ auto it = map.find(Symbol::fromQualString(schema.name()));
1782
+ if (it == map.end()) {
1783
+ return;
1784
+ }
1785
+ for (auto vit = it->second.begin(); vit != it->second.end(); ++vit) {
1786
+ if (vit->first == schema) {
1787
+ it->second.erase(vit);
1788
+ break;
1789
+ }
1790
+ }
1791
+ if (it->second.size() == 0) {
1792
+ map.erase(Symbol::fromQualString(schema.name()));
1793
+ }
1794
+ }
1795
+
1796
+ bool contains(const FunctionSchema& schema) const {
1797
+ const auto it = map.find(Symbol::fromQualString(schema.name()));
1798
+ if (it == map.end()) {
1799
+ return false;
1800
+ }
1801
+ for (auto vit = it->second.begin(); vit != it->second.end(); ++vit) {
1802
+ if (vit->first->schema() == schema) {
1803
+ return true;
1804
+ }
1805
+ }
1806
+ return false;
1807
+ }
1808
+
1809
+ c10::optional<T> find(const FunctionSchema& schema) const {
1810
+ const auto it = map.find(Symbol::fromQualString(schema.name()));
1811
+ if (it == map.end()) {
1812
+ return c10::nullopt;
1813
+ }
1814
+ for (auto vit = it->second.begin(); vit != it->second.end(); ++vit) {
1815
+ if (vit->first == schema) {
1816
+ return vit->second;
1817
+ }
1818
+ }
1819
+ return c10::nullopt;
1820
+ }
1821
+
1822
+ // TODO: return iterator
1823
+ std::vector<FuncSchemaMapType> getAllKeysAndValues() const {
1824
+ // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
1825
+ std::vector<FuncSchemaMapType> keys_values;
1826
+ for (auto& symbol_mapping : map) {
1827
+ auto& vec = symbol_mapping.second;
1828
+ for (auto& pair : vec) {
1829
+ keys_values.push_back(pair);
1830
+ }
1831
+ }
1832
+ return keys_values;
1833
+ }
1834
+
1835
+ private:
1836
+ friend struct Node;
1837
+ MapType map;
1838
+ };
1839
+
1840
+ } // namespace jit
1841
+ } // namespace torch
falcon/lib/python3.10/site-packages/torch/include/torch/csrc/jit/ir/node_hashing.h ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+
8
+ struct TORCH_API HashNode {
9
+ size_t operator()(const Node* k) const;
10
+ };
11
+
12
+ struct TORCH_API EqualNode {
13
+ bool operator()(const Node* lhs, const Node* rhs) const;
14
+ };
15
+
16
+ } // namespace jit
17
+ } // namespace torch
falcon/lib/python3.10/site-packages/torch/include/torch/csrc/jit/ir/subgraph_matcher.h ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ #include <unordered_map>
6
+ #include <vector>
7
+
8
+ namespace torch {
9
+ namespace jit {
10
+
11
+ /**
12
+ * \brief A structure describing a match of a pattern in a graph.
13
+ *
14
+ * The structure contains an anchor node, from which the match was found, and
15
+ * match-maps for nodes and values. A match-map specifies the correspondance
16
+ * between nodes in the pattern graph (match-map keys) with nodes in the actual
17
+ * graph (match-map values). We keep such maps for both nodes and values.
18
+ */
19
+ struct Match {
20
+ Node* anchor;
21
+ std::unordered_map<const Node*, Node*> nodes_map;
22
+ std::unordered_map<const Value*, Value*> values_map;
23
+ };
24
+
25
+ /**
26
+ * \brief Find all matches of a \p PATTERN in a \p GRAPH.
27
+ *
28
+ * The function returns a vector of match-descriptors (see description of
29
+ * `struct Match`).
30
+ *
31
+ * Matching rules:
32
+ * - Pattern graph must contain a single block.
33
+ * - Matched subgraphs do not span across different blocks.
34
+ * - No uses outside the match are allowed, except for Param and Return nodes.
35
+ * Basically, we're matching hammocks, not arbitrary subgraphs.
36
+ * - The pattern graph must return only one value (i.e. it must have a single
37
+ * node leading to return).
38
+ * - Nodes that are not used in computation of the return value in the pattern
39
+ * graph are ignored during matching (IOW, we're essentially performing DCE on
40
+ * the pattern).
41
+ * - Pattern graph nodes cannot alias. TODO: the check not implemented yet.
42
+ * - Aliasing nodes in the graph cannot consitute a match (i.e. through all
43
+ * found matches, no nodes in the subgraph alias with each other). TODO: check
44
+ * not implemented yet.
45
+ * - The matcher will not mutate either the pattern graph or the matched graph.
46
+ * The matched graph is taken as non-const so that Match may contain non-const
47
+ * pointers. This enables clients of this API to use Match to drive mutations.
48
+ *
49
+ * Note [Multi-output Patterns]
50
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
51
+ * Subgraph matcher provides limited support for multi-output patterns. With a
52
+ * single output pattern, a single scan through the graph is sufficient to
53
+ * find all the matches: given a starting node (an "anchor"), we can
54
+ * deterministically check whether a pattern matches a subgraph corresponding to
55
+ * this anchor node. For a general case of multi-output patterns, we would have
56
+ * N anchors, which would result in M^N comparisons (M is the size of the
57
+ * graph). Clearly this is computationally prohibitive.
58
+ *
59
+ * To overcome this, we impose some constraints on the multi-output patterns
60
+ * that we accept. We require that checking whether the pattern matches a
61
+ * subgraph would still be fully determined by a single node in the graph. To
62
+ * achieve this, we designate the first output in the pattern as the "main"
63
+ * output and assume that we can traverse up from this node to match the
64
+ * entire pattern.
65
+ *
66
+ * Corrolary 1: the order of outputs in the pattern matters!
67
+ * Corollary 2: patterns cannot contain any nodes not participating in the main
68
+ * output computation.
69
+ */
70
+ std::vector<Match> TORCH_API
71
+ findPatternMatches(const Graph& pattern, Graph& graph);
72
+
73
+ } // namespace jit
74
+ } // namespace torch
falcon/lib/python3.10/site-packages/torch/include/torch/csrc/jit/ir/type_hashing.h ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/jit_type.h>
4
+ #include <torch/csrc/jit/ir/ir.h>
5
+
6
+ namespace torch {
7
+ namespace jit {
8
+
9
+ struct HashType {
10
+ size_t operator()(const TypePtr& type) const;
11
+ size_t operator()(const c10::ConstTypePtr& type) const;
12
+ };
13
+
14
+ struct EqualType {
15
+ bool operator()(const TypePtr& a, const TypePtr& b) const;
16
+ bool operator()(const c10::ConstTypePtr& a, const c10::ConstTypePtr& b) const;
17
+ };
18
+
19
+ } // namespace jit
20
+ } // namespace torch
falcon/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/code.h ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <vector>
4
+
5
+ #include <ATen/core/ivalue.h>
6
+ #include <ATen/core/operator_name.h>
7
+ #include <torch/csrc/jit/runtime/instruction.h>
8
+
9
+ namespace torch {
10
+ namespace jit {
11
+ namespace mobile {
12
+
13
+ using Stack = std::vector<c10::IValue>;
14
+ using DebugHandle = int64_t;
15
+
16
+ class Function;
17
+
18
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
19
+ struct Code {
20
+ std::vector<Instruction> instructions_;
21
+ std::vector<DebugHandle> debug_handles_;
22
+ std::vector<c10::OperatorName> op_names_;
23
+ std::vector<int> operator_input_sizes_;
24
+ std::vector<std::function<void(Stack&)>> operators_;
25
+ std::vector<c10::IValue> constants_;
26
+ std::vector<c10::TypePtr> types_;
27
+ // TODO After we actually export CALL instructions we can remove this.
28
+ // We may need a two-stage importing scheme, where we firstly construct all
29
+ // function objects, and then append referenced function pointers. This could
30
+ // be done in parseMethods().
31
+ std::vector<mobile::Function*> functions_;
32
+ size_t register_size_ = 0; // Aggregated output size.
33
+ // initialized means operators_ array is filled with operators
34
+ bool initialized = false;
35
+ };
36
+
37
+ } // namespace mobile
38
+ } // namespace jit
39
+ } // namespace torch
falcon/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/debug_info.h ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <c10/util/flat_hash_map.h>
3
+ #include <caffe2/serialize/inline_container.h>
4
+ #include <torch/csrc/jit/api/compilation_unit.h>
5
+ #include <torch/csrc/jit/ir/scope.h>
6
+ #include <torch/csrc/jit/serialization/source_range_serialization.h>
7
+
8
+ namespace torch {
9
+ namespace jit {
10
+ /*
11
+ * MobileDebugTable:
12
+ * Deserializes debug_pkl and callstack_map records from PT model's zip archive
13
+ * and stores them in a map of debug handles to DebugInfoPair. Debug handles are
14
+ * unique per model and runtime, be in lite interpreter or delegate, an
15
+ * exception of BackendRuntimeException should raised using debug handles.
16
+ * getSourceDebugString method is responsible for translating debug
17
+ * handles to correspond debug information.
18
+ * This debug informatin includes stack trace of model level source code and
19
+ * module hierarchy where the exception occurred.
20
+ */
21
+ class MobileDebugTable {
22
+ public:
23
+ MobileDebugTable() = default;
24
+ MobileDebugTable(
25
+ std::unique_ptr<caffe2::serialize::PyTorchStreamReader>& reader,
26
+ const std::shared_ptr<CompilationUnit>& cu);
27
+
28
+ template <typename It>
29
+ MobileDebugTable(It begin, It end) : callstack_ptr_map_(begin, end) {}
30
+
31
+ std::string getSourceDebugString(
32
+ const int64_t debug_handle,
33
+ const std::string& top_module_type_name = "ModuleTypeUnknown") const;
34
+ std::string getSourceDebugString(
35
+ const std::vector<int64_t>& debug_handles,
36
+ const std::string& top_module_type_name = "ModuleTypeUnknown") const;
37
+ std::string getModuleHierarchyInfo(
38
+ const int64_t debug_handle,
39
+ const std::string& top_module_type_name = "ModuleTypeUnknown") const;
40
+ std::string getModuleHierarchyInfo(
41
+ const std::vector<int64_t>& debug_handles,
42
+ const std::string& top_module_type_name = "ModuleTypeUnknown") const;
43
+
44
+ const ska::flat_hash_map<int64_t, DebugInfoTuple>& getCallStackPtrMap()
45
+ const {
46
+ return callstack_ptr_map_;
47
+ }
48
+
49
+ private:
50
+ std::pair<std::string, std::string> getSourceDebugModuleHierarchyInfo(
51
+ const std::vector<int64_t>& debug_handles,
52
+ const std::string& top_module_type_name = "ModuleTypeUnknown") const;
53
+ ska::flat_hash_map<int64_t, DebugInfoTuple> callstack_ptr_map_;
54
+ };
55
+
56
+ } // namespace jit
57
+ } // namespace torch
falcon/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/file_format.h ADDED
@@ -0,0 +1,196 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <array>
4
+ #include <cerrno>
5
+ #include <cstddef>
6
+ #include <cstring>
7
+ #include <fstream>
8
+ #include <istream>
9
+ #include <memory>
10
+
11
+ #include <c10/core/CPUAllocator.h>
12
+ #include <c10/core/impl/alloc_cpu.h>
13
+ #include <caffe2/serialize/read_adapter_interface.h>
14
+
15
+ #if defined(HAVE_MMAP)
16
+ #include <fcntl.h>
17
+ #include <sys/mman.h>
18
+ #include <sys/stat.h>
19
+ #include <sys/types.h>
20
+ #include <unistd.h>
21
+ #endif
22
+
23
+ /**
24
+ * @file
25
+ *
26
+ * Helpers for identifying file formats when reading serialized data.
27
+ *
28
+ * Note that these functions are declared inline because they will typically
29
+ * only be called from one or two locations per binary.
30
+ */
31
+
32
+ namespace torch {
33
+ namespace jit {
34
+
35
+ /**
36
+ * The format of a file or data stream.
37
+ */
38
+ enum class FileFormat {
39
+ UnknownFileFormat = 0,
40
+ FlatbufferFileFormat,
41
+ ZipFileFormat,
42
+ };
43
+
44
+ /// The size of the buffer to pass to #getFileFormat(), in bytes.
45
+ constexpr size_t kFileFormatHeaderSize = 8;
46
+ constexpr size_t kMaxAlignment = 16;
47
+
48
+ /**
49
+ * Returns the likely file format based on the magic header bytes in @p header,
50
+ * which should contain the first bytes of a file or data stream.
51
+ */
52
+ // NOLINTNEXTLINE(facebook-hte-NamespaceScopedStaticDeclaration)
53
+ static inline FileFormat getFileFormat(const char* data) {
54
+ // The size of magic strings to look for in the buffer.
55
+ static constexpr size_t kMagicSize = 4;
56
+
57
+ // Bytes 4..7 of a Flatbuffer-encoded file produced by
58
+ // `flatbuffer_serializer.h`. (The first four bytes contain an offset to the
59
+ // actual Flatbuffer data.)
60
+ static constexpr std::array<char, kMagicSize> kFlatbufferMagicString = {
61
+ 'P', 'T', 'M', 'F'};
62
+ static constexpr size_t kFlatbufferMagicOffset = 4;
63
+
64
+ // The first four bytes of a ZIP file.
65
+ static constexpr std::array<char, kMagicSize> kZipMagicString = {
66
+ 'P', 'K', '\x03', '\x04'};
67
+
68
+ // Note that we check for Flatbuffer magic first. Since the first four bytes
69
+ // of flatbuffer data contain an offset to the root struct, it's theoretically
70
+ // possible to construct a file whose offset looks like the ZIP magic. On the
71
+ // other hand, bytes 4-7 of ZIP files are constrained to a small set of values
72
+ // that do not typically cross into the printable ASCII range, so a ZIP file
73
+ // should never have a header that looks like a Flatbuffer file.
74
+ if (std::memcmp(
75
+ data + kFlatbufferMagicOffset,
76
+ kFlatbufferMagicString.data(),
77
+ kMagicSize) == 0) {
78
+ // Magic header for a binary file containing a Flatbuffer-serialized mobile
79
+ // Module.
80
+ return FileFormat::FlatbufferFileFormat;
81
+ } else if (std::memcmp(data, kZipMagicString.data(), kMagicSize) == 0) {
82
+ // Magic header for a zip file, which we use to store pickled sub-files.
83
+ return FileFormat::ZipFileFormat;
84
+ }
85
+ return FileFormat::UnknownFileFormat;
86
+ }
87
+
88
+ /**
89
+ * Returns the likely file format based on the magic header bytes of @p data.
90
+ * If the stream position changes while inspecting the data, this function will
91
+ * restore the stream position to its original offset before returning.
92
+ */
93
+ // NOLINTNEXTLINE(facebook-hte-NamespaceScopedStaticDeclaration)
94
+ static inline FileFormat getFileFormat(std::istream& data) {
95
+ FileFormat format = FileFormat::UnknownFileFormat;
96
+ std::streampos orig_pos = data.tellg();
97
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
98
+ std::array<char, kFileFormatHeaderSize> header;
99
+ data.read(header.data(), header.size());
100
+ if (data.good()) {
101
+ format = getFileFormat(header.data());
102
+ }
103
+ data.seekg(orig_pos, data.beg);
104
+ return format;
105
+ }
106
+
107
+ /**
108
+ * Returns the likely file format based on the magic header bytes of the file
109
+ * named @p filename.
110
+ */
111
+ // NOLINTNEXTLINE(facebook-hte-NamespaceScopedStaticDeclaration)
112
+ static inline FileFormat getFileFormat(const std::string& filename) {
113
+ std::ifstream data(filename, std::ifstream::binary);
114
+ return getFileFormat(data);
115
+ }
116
+
117
+ // NOLINTNEXTLINE(facebook-hte-NamespaceScopedStaticDeclaration)
118
+ static void file_not_found_error() {
119
+ std::stringstream message;
120
+ message << "Error while opening file: ";
121
+ if (errno == ENOENT) {
122
+ message << "no such file or directory" << std::endl;
123
+ } else {
124
+ message << "error no is: " << errno << std::endl;
125
+ }
126
+ TORCH_CHECK(false, message.str());
127
+ }
128
+
129
+ // NOLINTNEXTLINE(facebook-hte-NamespaceScopedStaticDeclaration)
130
+ static inline std::tuple<std::shared_ptr<char>, size_t> get_file_content(
131
+ const char* filename) {
132
+ #if defined(HAVE_MMAP)
133
+ int fd = open(filename, O_RDONLY);
134
+ if (fd < 0) {
135
+ // failed to open file, chances are it's no such file or directory.
136
+ file_not_found_error();
137
+ }
138
+ struct stat statbuf {};
139
+ fstat(fd, &statbuf);
140
+ size_t size = statbuf.st_size;
141
+ void* ptr = mmap(nullptr, statbuf.st_size, PROT_READ, MAP_PRIVATE, fd, 0);
142
+ close(fd);
143
+ auto deleter = [statbuf](char* ptr) { munmap(ptr, statbuf.st_size); };
144
+ std::shared_ptr<char> data(reinterpret_cast<char*>(ptr), deleter);
145
+ #else
146
+ FILE* f = fopen(filename, "rb");
147
+ if (f == nullptr) {
148
+ file_not_found_error();
149
+ }
150
+ fseek(f, 0, SEEK_END);
151
+ size_t size = ftell(f);
152
+ fseek(f, 0, SEEK_SET);
153
+ // make sure buffer size is multiple of alignment
154
+ size_t buffer_size = (size / kMaxAlignment + 1) * kMaxAlignment;
155
+ std::shared_ptr<char> data(
156
+ static_cast<char*>(c10::alloc_cpu(buffer_size)), c10::free_cpu);
157
+ fread(data.get(), size, 1, f);
158
+ fclose(f);
159
+ #endif
160
+ return std::make_tuple(data, size);
161
+ }
162
+
163
+ // NOLINTNEXTLINE(facebook-hte-NamespaceScopedStaticDeclaration)
164
+ static inline std::tuple<std::shared_ptr<char>, size_t> get_stream_content(
165
+ std::istream& in) {
166
+ // get size of the stream and reset to orig
167
+ std::streampos orig_pos = in.tellg();
168
+ in.seekg(orig_pos, std::ios::end);
169
+ const long size = in.tellg();
170
+ in.seekg(orig_pos, in.beg);
171
+
172
+ // read stream
173
+ // NOLINT make sure buffer size is multiple of alignment
174
+ size_t buffer_size = (size / kMaxAlignment + 1) * kMaxAlignment;
175
+ std::shared_ptr<char> data(
176
+ static_cast<char*>(c10::alloc_cpu(buffer_size)), c10::free_cpu);
177
+ in.read(data.get(), size);
178
+
179
+ // reset stream to original position
180
+ in.seekg(orig_pos, in.beg);
181
+ return std::make_tuple(data, size);
182
+ }
183
+
184
+ // NOLINTNEXTLINE(facebook-hte-NamespaceScopedStaticDeclaration)
185
+ static inline std::tuple<std::shared_ptr<char>, size_t> get_rai_content(
186
+ caffe2::serialize::ReadAdapterInterface* rai) {
187
+ size_t buffer_size = (rai->size() / kMaxAlignment + 1) * kMaxAlignment;
188
+ std::shared_ptr<char> data(
189
+ static_cast<char*>(c10::alloc_cpu(buffer_size)), c10::free_cpu);
190
+ rai->read(
191
+ 0, data.get(), rai->size(), "Loading ReadAdapterInterface to bytes");
192
+ return std::make_tuple(data, buffer_size);
193
+ }
194
+
195
+ } // namespace jit
196
+ } // namespace torch
falcon/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/flatbuffer_loader.h ADDED
@@ -0,0 +1,136 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <istream>
4
+ #include <memory>
5
+ #include <string>
6
+ #include <unordered_map>
7
+ #include <vector>
8
+
9
+ #include <ATen/core/ivalue.h>
10
+ #include <c10/core/Device.h>
11
+ #include <c10/macros/Macros.h>
12
+ #include <c10/util/Optional.h>
13
+ #include <torch/csrc/jit/mobile/module.h>
14
+
15
+ /**
16
+ * Defines the public API for loading flatbuffer-serialized mobile modules.
17
+ * Note that this header must not include or depend on flatbuffer-defined
18
+ * types, to avoid leaking those details to PyTorch clients.
19
+ */
20
+
21
+ namespace torch {
22
+ namespace jit {
23
+
24
+ /// All non-copied data pointers provided to `parse_and_initialize_*` functions
25
+ /// must be aligned to this boundary. Since the Module will point directly into
26
+ /// the data, this alignment is necessary to ensure that certain types/structs
27
+ /// are properly aligned.
28
+ constexpr size_t kFlatbufferDataAlignmentBytes = 16;
29
+
30
+ /// Maps file names to file contents.
31
+ using ExtraFilesMap = std::unordered_map<std::string, std::string>;
32
+
33
+ // On high level, to produce a Module from a file on disk, we need to go
34
+ // through the follow steps:
35
+ // 1. Read: Read the file from disk -> memory
36
+ // 2. Deserialize: Parse the bytes to produce some in memory manipulable
37
+ // structure
38
+ // 3. Module initialization: Produce mobile::Module out of the structure
39
+ // produced in 2.
40
+ // Under this context, the structure described in 2. is the flatbuffer-defined
41
+ // type mobile::serialization::Module. However, this step/type is not visible in
42
+ // the public API.
43
+
44
+ // Parse a mobile::Module from raw bytes.
45
+ //
46
+ // This function does steps 2+3 described above.
47
+ //
48
+ // Does not take ownership of `data`; if you want it to take ownership, see the
49
+ // shared_ptr overload of this function.
50
+ //
51
+ // If should_copy_tensor_memory is true, then the returned module will NOT have
52
+ // refences to `data`, so `data` can be freed immediately.
53
+ //
54
+ // If should_copy_tensor_memory is false, then returned module will have tensors
55
+ // that points inside of `data`; the caller will need to make sure that `data`
56
+ // outlives the returned Module. Also, `data` must be aligned to
57
+ // kFlatbufferDataAlignmentBytes.
58
+ TORCH_API mobile::Module parse_and_initialize_mobile_module(
59
+ void* data,
60
+ size_t size, // of `data`, in bytes.
61
+ c10::optional<at::Device> device = c10::nullopt,
62
+ ExtraFilesMap* extra_files = nullptr,
63
+ bool should_copy_tensor_memory = false);
64
+
65
+ // Parse a mobile::Module from raw bytes.
66
+ //
67
+ // This function does steps 2+3 described above.
68
+ //
69
+ // The returned Module holds a reference to `data`, which must be aligned to
70
+ // kFlatbufferDataAlignmentBytes.
71
+ //
72
+ // If you do not want the Module to hold a reference to `data`, see the raw
73
+ // pointer overload of this function.
74
+ TORCH_API mobile::Module parse_and_initialize_mobile_module(
75
+ std::shared_ptr<char> data,
76
+ size_t size, // of `data`, in bytes.
77
+ c10::optional<at::Device> device = c10::nullopt,
78
+ ExtraFilesMap* extra_files = nullptr);
79
+
80
+ // Parse a mobile::Module from raw bytes, also returning JIT-related metadata.
81
+ //
82
+ // This is the same as parse_and_initialize_mobile_module() except that it also
83
+ // extracts JIT source files and constants. Can be used to construct a
84
+ // jit::Module.
85
+ TORCH_API mobile::Module parse_and_initialize_mobile_module_for_jit(
86
+ void* data,
87
+ size_t size, // of `data`, in bytes.
88
+ ExtraFilesMap& jit_sources,
89
+ std::vector<IValue>& jit_constants,
90
+ c10::optional<at::Device> device = c10::nullopt,
91
+ ExtraFilesMap* extra_files = nullptr);
92
+
93
+ // Load a mobile::Module from a filepath.
94
+ //
95
+ // This function does steps 1+2+3 described above.
96
+ //
97
+ // We need to have this as a convienience because Python API will need to wrap
98
+ // this. C++ clients should use one of the versions of
99
+ // parse_and_initialize_mobile_module() so they can manage the raw data more
100
+ // directly.
101
+ TORCH_API mobile::Module load_mobile_module_from_file(
102
+ const std::string& filename,
103
+ c10::optional<at::Device> device = c10::nullopt,
104
+ ExtraFilesMap* extra_files = nullptr);
105
+
106
+ TORCH_API uint64_t get_bytecode_version(std::istream& in);
107
+ TORCH_API uint64_t get_bytecode_version(const std::string& filename);
108
+ TORCH_API uint64_t get_bytecode_version_from_bytes(char* flatbuffer_content);
109
+
110
+ TORCH_API mobile::ModuleInfo get_module_info_from_flatbuffer(
111
+ char* flatbuffer_content);
112
+
113
+ // The methods below are less efficient because it need to read the stream in
114
+ // its entirity to a buffer
115
+ TORCH_API mobile::Module load_mobile_module_from_stream_with_copy(
116
+ std::istream& in,
117
+ c10::optional<at::Device> device = c10::nullopt,
118
+ ExtraFilesMap* extra_files = nullptr);
119
+
120
+ TORCH_API mobile::Module parse_flatbuffer_no_object(
121
+ std::shared_ptr<char> data,
122
+ size_t size,
123
+ c10::optional<at::Device> device);
124
+
125
+ TORCH_API mobile::Module parse_and_initialize_mobile_module(
126
+ void* data,
127
+ size_t,
128
+ c10::optional<at::Device>,
129
+ ExtraFilesMap* extra_files,
130
+ bool should_copy_tensor_memory);
131
+
132
+ // no op, TODO(qihan) delete
133
+ TORCH_API bool register_flatbuffer_loader();
134
+
135
+ } // namespace jit
136
+ } // namespace torch
falcon/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/function.h ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <vector>
4
+
5
+ #include <ATen/core/function.h>
6
+ #include <ATen/core/function_schema.h>
7
+ #include <ATen/core/ivalue.h>
8
+ #include <torch/csrc/jit/mobile/code.h>
9
+
10
+ namespace torch {
11
+ namespace jit {
12
+ enum OpCode : uint8_t;
13
+ struct Instruction;
14
+ struct OperatorString;
15
+
16
+ namespace mobile {
17
+
18
+ class TORCH_API Function : public torch::jit::Function {
19
+ public:
20
+ explicit Function(c10::QualifiedName name);
21
+ Function(
22
+ c10::QualifiedName name,
23
+ Code code,
24
+ at::optional<c10::FunctionSchema> schema);
25
+ void run(Stack& stack) override;
26
+ at::IValue operator()(Stack& stack);
27
+ void ensure_defined() override {}
28
+ size_t num_inputs() const override;
29
+ const c10::QualifiedName& qualname() const override;
30
+ bool call(Stack&, c10::function_ref<void(const mobile::Code&)>) override;
31
+
32
+ // NOTE: the APIs below is dangerous: if you call append_instruction with
33
+ // dbg_handle and then call it without; then the dbg_handle will become
34
+ // misaligned. Therefore only use ONE variant at time.
35
+ void append_instruction(OpCode op, int X, int N, int64_t dbg_handle);
36
+ void append_instruction(OpCode op, int X, int N);
37
+ void append_operator(
38
+ const std::string& name,
39
+ const std::string& overload_name,
40
+ const c10::optional<int>& num_specified_args);
41
+ void append_constant(const c10::IValue& constant);
42
+ void append_type(const c10::TypePtr& type);
43
+ void append_function(mobile::Function& func);
44
+
45
+ void set_register_size(size_t size);
46
+
47
+ int64_t get_debug_handle(size_t pc) const;
48
+ const Code& get_code() const;
49
+ Code& get_code();
50
+
51
+ torch::jit::Function& setSchema(c10::FunctionSchema schema) override;
52
+ bool hasSchema() const;
53
+ const c10::FunctionSchema& getSchema() const override;
54
+
55
+ // Returns the debug handle corresponding to where the execution
56
+ // is halted due to exception.
57
+ // If no corresponding debug handle is found then -1 is returned.
58
+ const std::vector<int64_t>& getExceptionDebugHandles() const;
59
+ static Function& registerFunc(
60
+ const std::string& qualified_name,
61
+ const std::vector<Instruction>& instructions,
62
+ const std::vector<c10::IValue>& constants,
63
+ const std::vector<c10::TypePtr>& types,
64
+ const size_t register_size);
65
+
66
+ // if not initialize, initialize by loading operators.
67
+ // return true of all op loaded, return false if some op is not found
68
+ // in the current runtime. Then, the ops that did not found will be filled
69
+ // in unsupported_op_names
70
+ bool initialize_operators(bool should_check_operators);
71
+
72
+ private:
73
+ c10::QualifiedName name_;
74
+ Code code_;
75
+ at::optional<c10::FunctionSchema> schema_; // (byte-code version 4+)
76
+ };
77
+
78
+ c10::optional<std::function<void(Stack&)>> makeOperatorFunction(
79
+ c10::OperatorName opname,
80
+ c10::optional<int> num_specified_args);
81
+
82
+ TORCH_API std::string operator_str(const c10::OperatorName& opname);
83
+
84
+ } // namespace mobile
85
+ } // namespace jit
86
+ } // namespace torch
falcon/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/import.h ADDED
@@ -0,0 +1,112 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <torch/csrc/jit/mobile/module.h>
3
+ #include <torch/csrc/jit/mobile/parse_operators.h>
4
+
5
+ #include <istream>
6
+ #include <memory>
7
+
8
+ #include <caffe2/serialize/file_adapter.h>
9
+
10
+ namespace torch {
11
+ namespace jit {
12
+ using caffe2::serialize::FileAdapter;
13
+ using caffe2::serialize::IStreamAdapter;
14
+ using caffe2::serialize::ReadAdapterInterface;
15
+ using ExtraFilesMap = std::unordered_map<std::string, std::string>;
16
+
17
+ constexpr const char* kArchiveNameBytecode = "bytecode";
18
+ constexpr const char* kArchiveNameConstants = "constants";
19
+ constexpr const char* kArchiveNameVersion = "version";
20
+
21
+ // The family of methods below load a serialized Mobile Module
22
+ // into a mobile::Module object.
23
+ TORCH_API mobile::Module _load_for_mobile(
24
+ std::istream& in,
25
+ c10::optional<at::Device> device,
26
+ ExtraFilesMap& extra_file,
27
+ uint64_t module_load_options = kDefaultMobileLoadOptions);
28
+
29
+ TORCH_API mobile::Module _load_for_mobile(
30
+ const std::string& filename,
31
+ c10::optional<at::Device> device,
32
+ ExtraFilesMap& extra_files);
33
+
34
+ TORCH_API mobile::Module _load_for_mobile(
35
+ std::unique_ptr<ReadAdapterInterface> rai,
36
+ c10::optional<c10::Device> device,
37
+ ExtraFilesMap& extra_files,
38
+ uint64_t module_load_options = kDefaultMobileLoadOptions);
39
+
40
+ TORCH_API mobile::Module _load_for_mobile(
41
+ const std::string& filename,
42
+ c10::optional<at::Device> device,
43
+ ExtraFilesMap& extra_files,
44
+ uint64_t module_load_options);
45
+
46
+ TORCH_API mobile::Module _load_for_mobile(
47
+ std::istream& in,
48
+ c10::optional<at::Device> device = c10::nullopt);
49
+
50
+ TORCH_API mobile::Module _load_for_mobile(
51
+ const std::string& filename,
52
+ c10::optional<at::Device> device = c10::nullopt);
53
+
54
+ TORCH_API mobile::Module _load_for_mobile(
55
+ std::unique_ptr<ReadAdapterInterface> rai,
56
+ c10::optional<c10::Device> device = c10::nullopt);
57
+
58
+ /**
59
+ * Load only the contents of the "extra/" files whose names are
60
+ * passed in the map (extra_files). Populate the corresponding values
61
+ * with the contents of those files. Do not attempt to load the entire
62
+ * model, and stop once the extra files have been extracted.
63
+ *
64
+ * This API is needed to be able to load GPU models on linux CPU
65
+ * machines and extract only the extra files so that we can inspect
66
+ * the metadata that was added to the .ptl archive when it was
67
+ * generated.
68
+ *
69
+ */
70
+ void _load_extra_only_for_mobile(
71
+ const std::string& filename,
72
+ c10::optional<at::Device> device,
73
+ ExtraFilesMap& extra_files);
74
+
75
+ // Currently used by both mobile/import.cpp and model_compatibility.cpp.
76
+ // Should be removed after model_compatibility.cpp start using simplified
77
+ // version type_resolver and obj_loader.
78
+ at::TypePtr resolveTypeNameMobile(
79
+ const c10::QualifiedName& qn,
80
+ std::shared_ptr<CompilationUnit> compilation_unit);
81
+ c10::StrongTypePtr typeResolverMobile(
82
+ const c10::QualifiedName& qn,
83
+ const std::shared_ptr<CompilationUnit>& compilation_unit);
84
+ c10::intrusive_ptr<c10::ivalue::Object> objLoaderMobile(
85
+ const at::StrongTypePtr& type,
86
+ const at::IValue& input,
87
+ mobile::CompilationUnit& mobile_compilation_unit);
88
+
89
+ // Given a reader, which has access to a model file,
90
+ // return true if there exists tensors in `bytecode` archive
91
+ bool isTensorInBytecodeArchive(
92
+ caffe2::serialize::PyTorchStreamReader& stream_reader);
93
+
94
+ namespace mobile {
95
+
96
+ /**
97
+ * Given a torch::jit::mobile::Module, return a set of operator names
98
+ * (with overload name) that are used by any method in this mobile
99
+ * Mobile. This method runs through the bytecode for all methods
100
+ * in the specified model (module), and extracts all the root
101
+ * operator names. Root operators are operators that are called
102
+ * directly by the model (as opposed to non-root operators, which
103
+ * may be called transitively by the root operators).
104
+ *
105
+ */
106
+ TORCH_API std::set<std::string> _export_operator_list(
107
+ torch::jit::mobile::Module& module);
108
+
109
+ } // namespace mobile
110
+
111
+ } // namespace jit
112
+ } // namespace torch
falcon/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/import_data.h ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/TensorBase.h>
4
+ #include <c10/core/Device.h>
5
+ #include <c10/util/Optional.h>
6
+ #include <torch/csrc/jit/mobile/module.h>
7
+
8
+ #include <istream>
9
+ #include <map>
10
+ #include <string>
11
+
12
+ namespace torch {
13
+ namespace jit {
14
+
15
+ /**
16
+ * Loads named parameters from the serialized data in @p in.
17
+ *
18
+ * Calls #TORCH_CHECK() if the data format is not recognized.
19
+ */
20
+ TORCH_API std::map<std::string, at::Tensor> _load_parameters(
21
+ std::istream& in,
22
+ c10::optional<at::Device> device = c10::nullopt);
23
+
24
+ /**
25
+ * Loads named parameters from the serialized data in @p filename.
26
+ *
27
+ * Calls #TORCH_CHECK() if the data format is not recognized.
28
+ */
29
+ TORCH_API std::map<std::string, at::Tensor> _load_parameters(
30
+ const std::string& filename,
31
+ c10::optional<at::Device> device = c10::nullopt);
32
+
33
+ // NOTE: Please prefer using _load_parameters over using the function below.
34
+ TORCH_API std::map<std::string, at::Tensor> mobile_module_to_parameter_map(
35
+ const mobile::Module& module);
36
+
37
+ } // namespace jit
38
+ } // namespace torch
falcon/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/method.h ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/ivalue.h>
4
+ #include <torch/csrc/jit/mobile/function.h>
5
+
6
+ namespace torch {
7
+ namespace jit {
8
+ namespace mobile {
9
+
10
+ class Module;
11
+
12
+ struct TORCH_API Method {
13
+ Method(const Module* owner, Function* function);
14
+
15
+ void run(Stack& stack) const;
16
+ void run(Stack&& stack) const {
17
+ run(stack);
18
+ }
19
+
20
+ c10::IValue operator()(std::vector<c10::IValue> stack) const;
21
+
22
+ const std::string& name() const {
23
+ return function_->name();
24
+ }
25
+
26
+ int64_t get_debug_handle(size_t pc) const {
27
+ return function_->get_debug_handle(pc);
28
+ }
29
+
30
+ Function& function() const {
31
+ return *function_;
32
+ }
33
+
34
+ private:
35
+ // Methods are uniquely owned by a single module.
36
+ // This raw pointer allows referencing the module
37
+ const Module* owner_;
38
+
39
+ // Underlying unbound function
40
+ Function* function_;
41
+ };
42
+
43
+ } // namespace mobile
44
+ } // namespace jit
45
+ } // namespace torch
falcon/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/module.h ADDED
@@ -0,0 +1,197 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <ATen/core/jit_type.h>
3
+ #include <torch/csrc/jit/mobile/debug_info.h>
4
+ #include <torch/csrc/jit/mobile/function.h>
5
+ #include <torch/csrc/jit/mobile/method.h>
6
+ #include <torch/csrc/jit/mobile/quantization.h>
7
+
8
+ #include <utility>
9
+
10
+ namespace torch {
11
+ namespace jit {
12
+ namespace mobile {
13
+ using Stack = std::vector<c10::IValue>;
14
+
15
+ // A CompilationUnit object is the one that gets executed by the lite
16
+ // interpreter.
17
+ //
18
+ // A CompilationUnit object contains a list of Method Objects. These are methods
19
+ // that appear in the original PyTorch Model. These method correspond to Python
20
+ // member functions of the Model class.
21
+ //
22
+ // Methods in turn contain a Function, and a back-pointer to the Module that
23
+ // owns this Method instance.
24
+ //
25
+ // A Function contains a Code Object (code_) which is defined in interpreter.h
26
+ //
27
+ // A Code object contains the following:
28
+ //
29
+ // std::vector<Instruction> instructions_;
30
+ // std::vector<c10::OperatorName> op_names_;
31
+ // std::vector<std::function<void(Stack&)>> operators_;
32
+ // std::vector<c10::IValue> constants_;
33
+ // std::vector<c10::TypePtr> types_;
34
+ // size_t register_size_; // Aggregated output size.
35
+ //
36
+ class CompilationUnit {
37
+ public:
38
+ void register_function(std::unique_ptr<Function> fn);
39
+ std::vector<std::unique_ptr<Function>>& methods() {
40
+ return methods_;
41
+ }
42
+ const std::vector<std::unique_ptr<Function>>& methods() const {
43
+ return methods_;
44
+ }
45
+ Function* find_function(const c10::QualifiedName& qn);
46
+ const Function* find_function(const c10::QualifiedName& qn) const;
47
+
48
+ void unsafeRemoveFunction(const int64_t index) {
49
+ methods_.erase(methods_.begin() + index);
50
+ }
51
+
52
+ private:
53
+ std::vector<std::unique_ptr<Function>> methods_;
54
+ };
55
+
56
+ // A Torch Mobile Module is a representation of the model (trained in case
57
+ // of inference). A Mobile Module contains
58
+ //
59
+ // 1. data (object_)
60
+ // 2. metadata (optional) about the model (metadata_ from the metadata.pkl
61
+ // file added after training)
62
+ // 3. Compilation Unit (cu_)
63
+ //
64
+ class TORCH_API Module {
65
+ public:
66
+ Module(
67
+ c10::intrusive_ptr<c10::ivalue::Object> object,
68
+ std::shared_ptr<CompilationUnit> cu)
69
+ : object_(std::move(object)), cu_(std::move(cu)) {}
70
+ Module() = default;
71
+ Method get_method(const std::string& method_name) const;
72
+ template <typename... Types>
73
+ c10::IValue run_method(const std::string& method_name, Types&&... args) {
74
+ return get_method(method_name)({IValue(std::forward<Types>(args))...});
75
+ }
76
+ c10::IValue forward(std::vector<c10::IValue> inputs) {
77
+ return get_method("forward")(std::move(inputs));
78
+ }
79
+ c10::optional<Method> find_method(const std::string& basename) const;
80
+
81
+ const std::string name() const {
82
+ return object_->name();
83
+ }
84
+ const std::vector<at::IValue>& slots() const {
85
+ return object_->slots();
86
+ }
87
+ const c10::intrusive_ptr<c10::ivalue::Object> _ivalue() const {
88
+ return object_;
89
+ }
90
+ const std::vector<at::Tensor> parameters() const;
91
+ const std::map<std::string, at::Tensor> named_parameters() const;
92
+ std::string get_forward_method_debug_info(int64_t debug_handle) const;
93
+ std::string getModuleHierarchy(const int64_t debug_handle) const;
94
+ std::string getCallStack(const int64_t debug_handle) const;
95
+ /// Enables "training" mode.
96
+ void train(bool on = true);
97
+ /// Calls train(false) to enable "eval" mode.
98
+ void eval() {
99
+ train(/*on=*/false);
100
+ }
101
+ /// True if the module is in training mode.
102
+ bool is_training() const;
103
+ const std::unordered_map<std::string, std::string> getMetadata() const {
104
+ return metadata_;
105
+ }
106
+ void setMetadata(
107
+ const std::unordered_map<std::string, std::string>& metadata) {
108
+ metadata_ = metadata;
109
+ }
110
+ const std::vector<Method> get_methods() const;
111
+
112
+ c10::IValue attr(const std::string& name, c10::IValue or_else) const {
113
+ if (auto r = object_->type()->findAttributeSlot(name)) {
114
+ return object_->getSlot(*r);
115
+ }
116
+ if (auto r = object_->type()->findConstantSlot(name)) {
117
+ return object_->type()->getConstant(*r);
118
+ }
119
+ return or_else;
120
+ }
121
+
122
+ void setDebugTable(MobileDebugTable&& debug_table) {
123
+ debug_table_ = std::move(debug_table);
124
+ }
125
+ const MobileDebugTable& getDebugTable() const {
126
+ return debug_table_;
127
+ }
128
+
129
+ void setHasDebugHandles(bool has_debug_handles) {
130
+ has_debug_handles_ = has_debug_handles;
131
+ }
132
+
133
+ bool hasDebugHandles() const {
134
+ return has_debug_handles_;
135
+ }
136
+
137
+ const CompilationUnit& compilation_unit() const {
138
+ return *cu_.get();
139
+ }
140
+
141
+ void set_delete_memory(std::shared_ptr<char> delete_mem) {
142
+ mem_to_delete_ = std::move(delete_mem);
143
+ }
144
+
145
+ void set_min_operator_version(int64_t version) {
146
+ min_operator_version_ = version;
147
+ }
148
+
149
+ int64_t min_operator_version() const {
150
+ return min_operator_version_;
151
+ }
152
+
153
+ void set_bytecode_version(int64_t version) {
154
+ bytecode_version_ = version;
155
+ }
156
+
157
+ int64_t bytecode_version() const {
158
+ return bytecode_version_;
159
+ }
160
+
161
+ private:
162
+ friend class quantization::PTQQuanizationHelper;
163
+
164
+ bool compareMethodSchemas(
165
+ const std::string& name_1,
166
+ const std::string& name_2);
167
+
168
+ void unsafeRemoveMethod(const std::string& basename);
169
+
170
+ void unsafeCopyMethod(
171
+ const std::string& new_method_name,
172
+ const Function& to_be_copied);
173
+
174
+ c10::intrusive_ptr<c10::ivalue::Object> object_;
175
+ std::unordered_map<std::string, std::string> metadata_;
176
+ std::shared_ptr<CompilationUnit> cu_;
177
+ MobileDebugTable debug_table_;
178
+ bool has_debug_handles_ = false;
179
+ int64_t min_operator_version_ = 4;
180
+ int64_t bytecode_version_ = 4;
181
+
182
+ // Extra handle for the module to delete when itself is deleted
183
+ std::shared_ptr<char> mem_to_delete_;
184
+ };
185
+
186
+ struct TORCH_API ModuleInfo {
187
+ uint64_t bytecode_version;
188
+ uint64_t operator_version;
189
+ std::unordered_map<std::string, int> opname_to_num_args;
190
+ std::unordered_set<std::string> function_names;
191
+ std::unordered_set<std::string> type_names;
192
+ };
193
+ TORCH_API ModuleInfo get_module_info(const mobile::Module& module);
194
+
195
+ } // namespace mobile
196
+ } // namespace jit
197
+ } // namespace torch
falcon/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/observer.h ADDED
@@ -0,0 +1,110 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/util/ThreadLocalDebugInfo.h>
4
+ #include <string>
5
+ #include <unordered_map>
6
+ #include <vector>
7
+
8
+ namespace torch {
9
+
10
+ class MobileDebugInfo : public c10::DebugInfoBase {
11
+ public:
12
+ const std::string& getModelName() {
13
+ return model_name_;
14
+ }
15
+
16
+ void setModelName(const std::string& model_name) {
17
+ model_name_ = model_name;
18
+ }
19
+
20
+ const std::string& getMethodName() {
21
+ return method_name_;
22
+ }
23
+
24
+ void setMethodName(const std::string& method_name) {
25
+ method_name_ = method_name;
26
+ }
27
+
28
+ size_t getOpIdx() {
29
+ return op_idx_;
30
+ }
31
+
32
+ void setOpIdx(size_t op_idx) {
33
+ op_idx_ = op_idx;
34
+ }
35
+
36
+ private:
37
+ std::string model_name_;
38
+ std::string method_name_;
39
+ // TODO: Kimish
40
+ // If we launch a thread such as for at::launch, interepter continuation
41
+ // and if the caching allocator is enabled in the base thread
42
+ // then, in order to propagate this information, that is caching allocator
43
+ // is enabled, across thread boundaries we can use the mechanism provided
44
+ // by ThreadLocalDebugInfo
45
+ // Once the thread local MobileDebugInfo is accessible in the launched
46
+ // thread, it can be accessed in that thread and that thread can set
47
+ // its own thread local CachingAllocatorInfo.
48
+ // However, we cannot expect every launched thread to extract and set
49
+ // its own thread local copy of CachingAllocatorInfo.
50
+ // But this can be done in lite interpreter, where in the run method
51
+ // it can do info =
52
+ // c10::ThreadLocalDebugInfo::get(c10::DebugInfoKind::MOBILE_RUNTIME_INFO))
53
+ // .get_caching_allocator_info();
54
+ // GetThreadLocalCachingAllocatorInfo() = info;
55
+ // Other option is to have MobileDebugInfo itself be the place where thread
56
+ // local copy of CachingAllocatorInfo is stored. Then
57
+ // DefaultMobileCPUAllocator inspects this to decide if to use
58
+ // CachingAllocator. However, current lite interpreter does not support FORK,
59
+ // thus from the run method of lite interpreter we are not really gonna launch
60
+ // another instance of lite interpreter in a different thread. So for now not
61
+ // getting bothered about passing CachingAllocatorInfo across thread
62
+ // boundaries. c10::CachingAllocatorInfo caching_allocator_info;
63
+ size_t op_idx_ = 0;
64
+ };
65
+
66
+ class MobileModuleObserver {
67
+ public:
68
+ virtual ~MobileModuleObserver() = default;
69
+
70
+ virtual void onEnterRunMethod(const int32_t) {}
71
+ virtual void onExitRunMethod(
72
+ const std::unordered_map<std::string, std::string>&,
73
+ const std::string&,
74
+ const int32_t) {}
75
+ virtual void onFailRunMethod(
76
+ const std::unordered_map<std::string, std::string>&,
77
+ const std::string&,
78
+ const int32_t,
79
+ const char*) {}
80
+ virtual void onEnterLoadModel(const int32_t) {}
81
+ virtual void onExitLoadModel(
82
+ const int32_t,
83
+ const std::unordered_map<std::string, std::string>&) {
84
+ } // key: filename, value: file content
85
+ virtual void onFailLoadModel(const int32_t, const char*) {}
86
+ virtual void onFailLoadModel(
87
+ const int32_t,
88
+ const char*,
89
+ const std::unordered_map<std::string, std::string>&) {}
90
+ virtual std::vector<std::string> getDefaultExtraFiles() = 0;
91
+ virtual std::unordered_map<std::string, std::string> processMetadataFromExtra(
92
+ const std::unordered_map<std::string, std::string>&) = 0;
93
+ };
94
+
95
+ class MobileObserverConfig {
96
+ public:
97
+ void setModuleObserver(std::unique_ptr<MobileModuleObserver> reporter) {
98
+ module_observer_ = std::move(reporter);
99
+ }
100
+ MobileModuleObserver* getModuleObserver() {
101
+ return module_observer_.get();
102
+ }
103
+
104
+ private:
105
+ std::unique_ptr<MobileModuleObserver> module_observer_;
106
+ };
107
+
108
+ MobileObserverConfig& observerConfig();
109
+
110
+ } // namespace torch
falcon/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/prim_ops_registery.h ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/ivalue.h>
4
+ #include <functional>
5
+ #include <vector>
6
+
7
+ namespace torch {
8
+ namespace jit {
9
+ namespace mobile {
10
+
11
+ using Stack = std::vector<c10::IValue>;
12
+
13
+ void registerPrimOpsFunction(
14
+ const std::string& name,
15
+ const std::function<void(Stack&)>& fn);
16
+
17
+ bool hasPrimOpsFn(const std::string& name);
18
+
19
+ std::function<void(Stack&)>& getPrimOpsFn(const std::string& name);
20
+
21
+ class prim_op_fn_register {
22
+ public:
23
+ prim_op_fn_register(
24
+ const std::string& name,
25
+ const std::function<void(Stack&)>& fn) {
26
+ registerPrimOpsFunction(name, fn);
27
+ }
28
+ };
29
+
30
+ } // namespace mobile
31
+ } // namespace jit
32
+ } // namespace torch
falcon/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/profiler_edge.h ADDED
@@ -0,0 +1,119 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <torch/csrc/autograd/profiler_kineto.h>
3
+ #include <torch/csrc/jit/mobile/module.h>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+ namespace mobile {
8
+
9
+ // If we dont have kineto available then edge profiler does not
10
+ // work since it relies on Kineto
11
+ #ifdef USE_KINETO
12
+ class TORCH_API KinetoEdgeCPUProfiler {
13
+ public:
14
+ // This profiler only profiles KINETO events
15
+ // No GPU_FALLBACK or NVTX
16
+ /*
17
+ * @param m is the instance of mobile Module which is being profiled.
18
+ * Note that this implies that KinetoEdgeCPUProfiler can be used
19
+ * to profile specific Module (see usage below), unliked ProfilerKineto
20
+ * which can profile pytorch runtime in arbitrary scope.
21
+ * @param fname is the name of the file to which chrome trace is written.
22
+ * @param report_input_shapes: whether to record shapes of op's inputs.
23
+ * @param with_stack: whether to record model's python stacktrace for the op.
24
+ * @param with_flops: whether to report flops corresponding to the op.
25
+ * @param with_modules: whether to report original python module
26
+ * hierarchy to which the op belongs.
27
+ * @param events
28
+ * @param adjust_vulkan_timestamps: whether to adjust vulkan timestamps from
29
+ * query pool to align with cpu event times
30
+ *
31
+ * Usage pattern for this profiler must be as follows:
32
+ *
33
+ * {
34
+ * KinetoEdgeCPUProfiler(m, filename, args);
35
+ * m.forward(...);
36
+ * }
37
+ *
38
+ * The reason being that KinetoEdgeCPUProfiler has a dependency on Module
39
+ * and thus it must not outlive it.
40
+ *
41
+ * Thus, when KinetoEdgeCPUProfiler is used as RAII to do profiling
42
+ * within certain scope. In that scope, the captured reference to
43
+ * Module will outlive KinetoEdgeCPUProfiler. This is gauranteed because
44
+ * KinetoEdgeCPUProfiler must be constructed later than Module, on stack.
45
+ *
46
+ * An example of the anti-pattern and wrong usage is:
47
+ *
48
+ * std::shared_ptr<KinetoMobileCPUProfiler> profiler(m, filename, args);
49
+ * m.forward(...);
50
+ *
51
+ * Since KinetoEdgeCPUProfiler object would then be constructed on heap
52
+ * with its lifetime managed manually or via smart pointers.
53
+ */
54
+ KinetoEdgeCPUProfiler(
55
+ const torch::jit::mobile::Module& m,
56
+ const std::string& fname,
57
+ const bool report_input_shapes = false,
58
+ const bool profile_memory = false,
59
+ const bool with_stack = false,
60
+ const bool with_flops = false,
61
+ const bool with_modules = false,
62
+ std::vector<std::string> events = {},
63
+ const bool adjust_vulkan_timestamps = false);
64
+
65
+ const std::unique_ptr<torch::autograd::profiler::ProfilerResult>&
66
+ disableProfiler();
67
+ const std::unique_ptr<torch::autograd::profiler::ProfilerResult>&
68
+ getProfilerResult();
69
+ void recordBackendEvent(
70
+ const int64_t start_time_us,
71
+ const int64_t end_time_us,
72
+ const int64_t debug_handle,
73
+ const std::string& event_name,
74
+ const std::string& backend_name);
75
+ void recordBackendMemoryEvent(
76
+ void* ptr,
77
+ int64_t alloc_size,
78
+ size_t total_allocated,
79
+ size_t total_reserved,
80
+ c10::Device device);
81
+
82
+ ~KinetoEdgeCPUProfiler();
83
+
84
+ private:
85
+ /*
86
+ * We store a reference to Module to make such dependency explicit, since
87
+ * a Module reference is already stored in a functor.
88
+ */
89
+ const mobile::Module& m_;
90
+ std::string trace_file_name_;
91
+ std::unique_ptr<torch::autograd::profiler::ProfilerResult> profiler_result_;
92
+ };
93
+
94
+ TORCH_API KinetoEdgeCPUProfiler* getCurrentEdgeProfiler();
95
+
96
+ #define RECORD_BACKEND_EVENT_TO_EDGE_PROFILER( \
97
+ start_time_us, end_time_us, debug_handle, event_name, backend_name) \
98
+ if (mobile::getCurrentEdgeProfiler()) { \
99
+ mobile::getCurrentEdgeProfiler()->recordBackendEvent( \
100
+ start_time_us, end_time_us, debug_handle, event_name, backend_name); \
101
+ }
102
+
103
+ #define RECORD_BACKEND_MEMORY_EVENT_TO_EDGE_PROFILER( \
104
+ ptr, alloc_size, total_allocated, total_reserved, device) \
105
+ if (mobile::getCurrentEdgeProfiler()) { \
106
+ mobile::getCurrentEdgeProfiler()->recordBackendMemoryEvent( \
107
+ ptr, alloc_size, total_allocated, total_reserved, device); \
108
+ }
109
+ #else
110
+
111
+ #define RECORD_BACKEND_EVENT_TO_EDGE_PROFILER( \
112
+ start_time_us, end_time_us, debug_handle, event_name, backend_name)
113
+
114
+ #define RECORD_BACKEND_MEMORY_EVENT_TO_EDGE_PROFILER( \
115
+ ptr, alloc_size, total_allocated, total_reserved, device)
116
+ #endif
117
+ } // namespace mobile
118
+ } // namespace jit
119
+ } // namespace torch
falcon/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/promoted_prim_ops.h ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <torch/csrc/jit/mobile/prim_ops_registery.h>
3
+ #include <torch/csrc/jit/mobile/register_ops_common_utils.h>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+
8
+ void tupleIndex(Stack& stack);
9
+
10
+ void raiseException(Stack& stack);
11
+
12
+ void is(Stack& stack);
13
+
14
+ void unInitialized(Stack& stack);
15
+
16
+ void isNot(Stack& stack);
17
+
18
+ void aten_format(Stack& stack);
19
+
20
+ void size(Stack& stack);
21
+
22
+ void sym_size(Stack& stack);
23
+
24
+ void sym_size_int(Stack& stack);
25
+
26
+ void sym_stride_int(Stack& stack);
27
+
28
+ void sym_numel(Stack& stack);
29
+
30
+ void sym_storage_offset(Stack& stack);
31
+
32
+ void sym_stride(Stack& stack);
33
+
34
+ void device(Stack& stack);
35
+
36
+ void device_with_index(Stack& stack);
37
+
38
+ void dtype(Stack& stack);
39
+
40
+ void layout(Stack& stack);
41
+
42
+ void toPrimDType(Stack& stack);
43
+
44
+ void dim(Stack& stack);
45
+
46
+ void _not(Stack& stack);
47
+
48
+ void boolTensor(Stack& stack);
49
+
50
+ void toList(Stack& stack);
51
+
52
+ void numToTensorScalar(Stack& stack);
53
+
54
+ void isCuda(Stack& stack);
55
+
56
+ void numToTensorBool(Stack& stack);
57
+
58
+ void dictIndex(Stack& stack);
59
+
60
+ void raiseExceptionWithMessage(Stack& stack);
61
+
62
+ } // namespace jit
63
+ } // namespace torch
falcon/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/quantization.h ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/macros/Export.h>
4
+ #include <string>
5
+
6
+ namespace torch {
7
+ namespace jit {
8
+ namespace mobile {
9
+ class Module;
10
+ namespace quantization {
11
+ /*
12
+ * Device side PTQ API.
13
+ * Once the model has been prepared for quantization on server side, such model
14
+ * is sent to device. On device side the model is further trained. At the end of
15
+ * the training, before the model is readied for inference, we need to quantize
16
+ * the model.
17
+ * Usage of this API is as follows.
18
+ * PTQQuanizationHelper ptq_helper;
19
+ * ptq_helper.quantize_dynamic(m, "forward");
20
+ * Args:
21
+ * m: Captured by reference, an instance of mobile::Module. This module will be
22
+ * mutated in place to replace its <method_name> method with quantized
23
+ * equivalent. method:name: Name of the method to be quantized. AOT preparation
24
+ * for quantization must also have been done for this method. Returns: In place
25
+ * mutated `m` whose size should be smaller due to weight quantization and whose
26
+ * <method_name> method should use quantized ops
27
+ */
28
+ class TORCH_API PTQQuanizationHelper {
29
+ public:
30
+ PTQQuanizationHelper() = default;
31
+ void quantize_dynamic(
32
+ torch::jit::mobile::Module& m,
33
+ const std::string& method_name);
34
+ };
35
+ } // namespace quantization
36
+ } // namespace mobile
37
+ } // namespace jit
38
+ } // namespace torch
falcon/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/register_ops_common_utils.h ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/Context.h>
4
+ #include <ATen/NativeFunctions.h>
5
+ #include <ATen/core/ivalue.h>
6
+ #include <ATen/core/stack.h>
7
+ #include <torch/csrc/jit/runtime/jit_exception.h>
8
+ #include <torch/csrc/jit/runtime/vararg_functions.h>
9
+
10
+ namespace torch {
11
+ namespace jit {
12
+
13
+ inline void noop(Stack& n) {}
14
+
15
+ int64_t normalizeIndex(int64_t idx, int64_t list_size);
16
+
17
+ // reference function THPVariable_to in python_variable_methods.cpp
18
+ static C10_UNUSED at::Tensor to_dispatch(
19
+ at::Tensor self,
20
+ c10::optional<at::Device> device,
21
+ c10::optional<at::ScalarType> scalarType,
22
+ bool non_blocking,
23
+ bool copy) {
24
+ if (device && device->is_cuda()) {
25
+ at::globalContext().lazyInitCUDA();
26
+ }
27
+ if (!device && !scalarType && !copy) {
28
+ return self;
29
+ } else if (!device) {
30
+ return self.to(*scalarType, non_blocking, copy);
31
+ } else if (!scalarType) {
32
+ return self.to(*device, non_blocking, copy);
33
+ } else {
34
+ return self.to(*device, *scalarType, non_blocking, copy);
35
+ }
36
+ }
37
+
38
+ // Convert the tensor pointed to by \p data to a nested list. \p dim is the
39
+ // number of dimensions in the tensor and \p cur_dim is the dimension being
40
+ // processed by the current invocation. \p ty is the expected output IR type of
41
+ // the operation. \p is the scalar type of \p data. \p sizes and \p strides are
42
+ // the sizes and strides of the tensor operand and \p element_size is the size
43
+ // in bytes of one tensor element.
44
+ IValue tensorToListRecursive(
45
+ char* data,
46
+ int64_t cur_dim,
47
+ int64_t num_tensor_dims,
48
+ at::TypePtr ty,
49
+ at::ScalarType scalar_ty,
50
+ at::IntArrayRef sizes,
51
+ at::IntArrayRef strides,
52
+ size_t element_size);
53
+
54
+ } // namespace jit
55
+ } // namespace torch
falcon/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/type_parser.h ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/dynamic_type.h>
4
+ #include <ATen/core/jit_type.h>
5
+ #include <unordered_set>
6
+
7
+ namespace c10 {
8
+
9
+ class TORCH_API TypeParser {
10
+ public:
11
+ explicit TypeParser(std::string pythonStr);
12
+ explicit TypeParser(std::vector<std::string>& pythonStrs);
13
+
14
+ TypePtr parse();
15
+ std::vector<TypePtr> parseList();
16
+ static const std::unordered_set<std::string>& getNonSimpleType();
17
+ static const std::unordered_set<std::string>& getCustomType();
18
+ std::unordered_set<std::string> getContainedTypes();
19
+
20
+ private:
21
+ TypePtr parseNamedTuple(const std::string& qualified_name);
22
+ TypePtr parseCustomType();
23
+ TypePtr parseTorchbindClassType();
24
+ TypePtr parseNonSimple(const std::string& token);
25
+
26
+ void expect(const char* s);
27
+ void expectChar(char c);
28
+ template <typename T>
29
+ TypePtr parseSingleElementType();
30
+
31
+ void lex();
32
+
33
+ std::string next();
34
+ c10::string_view nextView();
35
+ void advance();
36
+ C10_NODISCARD c10::string_view cur() const;
37
+
38
+ std::string pythonStr_;
39
+ size_t start_;
40
+ c10::string_view next_token_;
41
+
42
+ // Used for parsing string list
43
+ std::vector<std::string> pythonStrs_;
44
+ std::unordered_map<std::string, c10::TypePtr> str_type_ptr_map_;
45
+
46
+ // Store all contained types when parsing a string
47
+ std::unordered_set<std::string> contained_types_;
48
+ };
49
+
50
+ TORCH_API TypePtr parseType(const std::string& pythonStr);
51
+
52
+ TORCH_API std::vector<TypePtr> parseType(std::vector<std::string>& pythonStr);
53
+
54
+ } // namespace c10
falcon/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/upgrader_mobile.h ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // #include <ATen/core/ivalue.h>
4
+ #include <ATen/core/ivalue_inl.h>
5
+
6
+ #include <torch/csrc/jit/mobile/code.h>
7
+ #include <torch/csrc/jit/mobile/function.h>
8
+ #include <torch/csrc/jit/serialization/import_export_functions.h>
9
+ #include <memory>
10
+ #include <string>
11
+ #include <unordered_map>
12
+ #include <vector>
13
+
14
+ namespace torch {
15
+ namespace jit {
16
+ struct Instruction;
17
+ struct Upgrader {
18
+ int min_version;
19
+ int max_version;
20
+ std::string upgrader_name;
21
+ int index;
22
+ };
23
+
24
+ // From operator_versions.yaml
25
+ TORCH_API const std::unordered_map<std::string, std::vector<Upgrader>>
26
+ getOperatorVersionMapForMobile();
27
+
28
+ struct OperatorString {
29
+ const std::string name;
30
+ const std::string overload_name;
31
+ const c10::optional<int> num_specified_args;
32
+ };
33
+
34
+ struct ByteCodeFunctionWithOperator {
35
+ mobile::Function& function;
36
+ std::vector<OperatorString> operators;
37
+ };
38
+
39
+ TORCH_API const std::vector<ByteCodeFunctionWithOperator>&
40
+ getUpgraderBytecodeList();
41
+
42
+ } // namespace jit
43
+ } // namespace torch
falcon/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/init.h ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/utils/pybind.h>
4
+
5
+ namespace torch::jit {
6
+
7
+ void initJITBindings(PyObject* module);
8
+
9
+ } // namespace torch::jit
falcon/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/module_python.h ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <pybind11/pybind11.h>
3
+ #include <pybind11/stl.h>
4
+ #include <torch/csrc/jit/api/module.h>
5
+ #include <torch/csrc/utils/pybind.h>
6
+
7
+ namespace py = pybind11;
8
+
9
+ namespace torch::jit {
10
+
11
+ inline c10::optional<Module> as_module(py::handle obj) {
12
+ static py::handle ScriptModule =
13
+ py::module::import("torch.jit").attr("ScriptModule");
14
+ if (py::isinstance(obj, ScriptModule)) {
15
+ return py::cast<Module>(obj.attr("_c"));
16
+ }
17
+ return c10::nullopt;
18
+ }
19
+
20
+ inline c10::optional<Object> as_object(py::handle obj) {
21
+ static py::handle ScriptObject =
22
+ py::module::import("torch").attr("ScriptObject");
23
+ if (py::isinstance(obj, ScriptObject)) {
24
+ return py::cast<Object>(obj);
25
+ }
26
+
27
+ static py::handle RecursiveScriptClass =
28
+ py::module::import("torch.jit").attr("RecursiveScriptClass");
29
+ if (py::isinstance(obj, RecursiveScriptClass)) {
30
+ return py::cast<Object>(obj.attr("_c"));
31
+ }
32
+ return c10::nullopt;
33
+ }
34
+
35
+ } // namespace torch::jit
falcon/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/pybind.h ADDED
@@ -0,0 +1,213 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/python_headers.h>
4
+
5
+ #include <ATen/core/ivalue.h>
6
+ #include <ATen/core/symbol.h>
7
+ #include <c10/util/irange.h>
8
+ #include <torch/csrc/DynamicTypes.h>
9
+ #include <torch/csrc/THP.h>
10
+ #include <torch/csrc/autograd/variable.h>
11
+ #include <torch/csrc/jit/frontend/tracer.h>
12
+ #include <torch/csrc/jit/python/pybind_utils.h>
13
+ #include <torch/csrc/utils/pybind.h>
14
+
15
+ #include <pybind11/functional.h>
16
+ #include <pybind11/pybind11.h>
17
+ #include <pybind11/stl.h>
18
+
19
+ namespace py = pybind11;
20
+
21
+ namespace torch::jit {
22
+
23
+ // This is a variant of shared_ptr that "sees through" a wrapper.
24
+ // We use it to convert Value, Node, Block and node to "wrapped" Python
25
+ // values. When we destruct the C++ object, the wrapper's pointer will
26
+ // be set to 0 and any future dereferencing will throw. We need this
27
+ // because the Python objects may hang around after the C++ object
28
+ // has already been destroyed.
29
+ // This also needs the magic type_caster below, which is from the
30
+ // workaround offered in https://github.com/pybind/pybind11/issues/2751
31
+ template <typename T>
32
+ class unwrapping_shared_ptr {
33
+ static_assert(
34
+ std::is_same<T, torch::jit::Value>::value ||
35
+ std::is_same<T, torch::jit::Node>::value ||
36
+ std::is_same<T, torch::jit::Block>::value,
37
+ "unwrapping type only defined for Graph object types");
38
+
39
+ private:
40
+ std::shared_ptr<torch::jit::Wrap<T>> impl;
41
+
42
+ public:
43
+ unwrapping_shared_ptr() : impl({}) {}
44
+ explicit unwrapping_shared_ptr(T* p) : impl(p->wrap()) {
45
+ impl->clear_cb = &clear_registered_instances;
46
+ }
47
+ T* get() const {
48
+ if (!impl->elem) {
49
+ throw std::logic_error("has been invalidated");
50
+ }
51
+ return impl->elem;
52
+ }
53
+ // we need to disable the overloaded & for PyBind11 < 2.3 due.
54
+ // see https://github.com/pybind/pybind11/pull/1435
55
+ #if (PYBIND11_VERSION_MAJOR > 2) || \
56
+ ((PYBIND11_VERSION_MAJOR == 2) && (PYBIND11_VERSION_MINOR >= 3))
57
+ T** operator&() {
58
+ if (!impl->elem) {
59
+ throw std::logic_error("has been invalidated");
60
+ }
61
+ return &(impl->elem);
62
+ }
63
+ #endif
64
+ };
65
+
66
+ } // namespace torch::jit
67
+
68
+ PYBIND11_DECLARE_HOLDER_TYPE(T, torch::jit::unwrapping_shared_ptr<T>, true);
69
+
70
+ namespace pybind11::detail {
71
+
72
+ #define CREATE_UNWRAPPING_CASTER(Class) \
73
+ template <> \
74
+ struct type_caster<Class> : public type_caster_base<Class> { \
75
+ public: \
76
+ using type = Class; \
77
+ using holder_type = torch::jit::unwrapping_shared_ptr<Class>; \
78
+ \
79
+ bool load(handle src, bool convert) { \
80
+ return load_impl<type_caster<Class>>(src, convert); \
81
+ } \
82
+ \
83
+ explicit operator type*() { \
84
+ return static_cast<type*>(value); \
85
+ } \
86
+ explicit operator type&() { \
87
+ return *static_cast<type*>(value); \
88
+ } \
89
+ \
90
+ protected: \
91
+ friend class type_caster_generic; \
92
+ \
93
+ bool load_value(value_and_holder&& v_h) { \
94
+ if (v_h.holder_constructed()) { \
95
+ value = v_h.template holder<holder_type>().get(); \
96
+ return true; \
97
+ } else { \
98
+ throw cast_error( \
99
+ "Unable to cast from non-held to held instance (#Class& to Holder<#Class>)"); \
100
+ } \
101
+ } \
102
+ }
103
+
104
+ CREATE_UNWRAPPING_CASTER(torch::jit::Node);
105
+ CREATE_UNWRAPPING_CASTER(torch::jit::Value);
106
+ CREATE_UNWRAPPING_CASTER(torch::jit::Block);
107
+
108
+ #undef CREATE_UNWRAPPING_CASTER
109
+
110
+ template <>
111
+ struct type_caster<torch::jit::IValue> {
112
+ public:
113
+ // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
114
+ PYBIND11_TYPE_CASTER(torch::jit::IValue, _("IValue"));
115
+
116
+ bool load(handle src, bool) {
117
+ try {
118
+ value = torch::jit::toTypeInferredIValue(src);
119
+ return true;
120
+ } catch (std::exception& e) {
121
+ return false;
122
+ }
123
+ }
124
+
125
+ static handle cast(
126
+ torch::jit::IValue src,
127
+ return_value_policy /* policy */,
128
+ handle /* parent */) {
129
+ return torch::jit::toPyObject(std::move(src)).release();
130
+ }
131
+ };
132
+
133
+ template <>
134
+ struct type_caster<torch::jit::Symbol> {
135
+ public:
136
+ // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
137
+ PYBIND11_TYPE_CASTER(torch::jit::Symbol, _("Symbol"));
138
+
139
+ bool load(handle src, bool) {
140
+ // TODO: Is there a way to py::cast that doesn't raise an exception on
141
+ // failure? Can we catch pybind11::cast_error here instead?
142
+ std::string src_str;
143
+ try {
144
+ src_str = py::cast<std::string>(src);
145
+ } catch (std::exception& e) {
146
+ return false;
147
+ }
148
+ value = torch::jit::Symbol::fromQualString(src_str);
149
+ return true;
150
+ }
151
+
152
+ static handle cast(
153
+ torch::jit::Symbol src,
154
+ return_value_policy /* policy */,
155
+ handle /* parent */) {
156
+ return py::cast(std::string(src.toQualString()), return_value_policy::copy)
157
+ .release();
158
+ }
159
+ };
160
+
161
+ template <>
162
+ struct type_caster<torch::jit::AttributeKind> {
163
+ public:
164
+ // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
165
+ PYBIND11_TYPE_CASTER(torch::jit::AttributeKind, _("AttributeKind"));
166
+
167
+ bool load(handle src, bool) {
168
+ return false;
169
+ }
170
+
171
+ static handle cast(
172
+ torch::jit::AttributeKind src,
173
+ return_value_policy /* policy */,
174
+ handle /* parent */) {
175
+ return py::cast(
176
+ std::string(torch::jit::toString(src)),
177
+ return_value_policy::copy)
178
+ .release();
179
+ }
180
+ };
181
+
182
+ // See https://github.com/pybind/pybind11/issues/637
183
+ using ListCasterBase = pybind11::detail::
184
+ list_caster<std::vector<torch::jit::Node*>, torch::jit::Node*>;
185
+ template <>
186
+ struct type_caster<std::vector<torch::jit::Node*>> : ListCasterBase {
187
+ static handle cast(
188
+ const std::vector<torch::jit::Node*>& src,
189
+ return_value_policy,
190
+ handle parent) {
191
+ return ListCasterBase::cast(src, return_value_policy::reference, parent);
192
+ }
193
+ static handle cast(
194
+ const std::vector<torch::jit::Node*>* src,
195
+ return_value_policy pol,
196
+ handle parent) {
197
+ return cast(*src, pol, parent);
198
+ }
199
+ };
200
+
201
+ } // namespace pybind11::detail
202
+
203
+ namespace torch::jit {
204
+
205
+ static inline py::tuple tuple_tail(const py::tuple& tup) {
206
+ py::tuple r(tup.size() - 1);
207
+ for (const auto i : c10::irange(1, tup.size())) {
208
+ r[i - 1] = tup[i];
209
+ }
210
+ return r;
211
+ }
212
+
213
+ } // namespace torch::jit
falcon/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/python_arg_flatten.h ADDED
@@ -0,0 +1,119 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/util/hash.h>
4
+ #include <c10/util/irange.h>
5
+ #include <torch/csrc/autograd/variable.h>
6
+ #include <torch/csrc/jit/python/pybind.h>
7
+
8
+ #include <ATen/ATen.h>
9
+ #include <functional>
10
+ #include <tuple>
11
+ #include <vector>
12
+
13
+ namespace torch::jit::python {
14
+
15
+ struct IODescriptor {
16
+ struct VariableMetadata {
17
+ VariableMetadata(const autograd::Variable& var)
18
+ : sizes(var.sizes().vec()),
19
+ type(var.scalar_type()),
20
+ device(var.device()),
21
+ requires_grad(var.requires_grad()) {}
22
+
23
+ bool operator==(const VariableMetadata& o) const {
24
+ return std::tie(device, requires_grad, type, sizes) ==
25
+ std::tie(o.device, o.requires_grad, o.type, o.sizes);
26
+ }
27
+
28
+ static size_t hash(const VariableMetadata& m) {
29
+ return c10::get_hash(m.sizes, m.device, m.requires_grad, m.type);
30
+ }
31
+
32
+ std::vector<int64_t> sizes;
33
+ at::ScalarType type;
34
+ at::Device device;
35
+ bool requires_grad;
36
+ };
37
+
38
+ bool operator==(const IODescriptor& o) const {
39
+ return std::tie(structure, metadata, grad_enabled) ==
40
+ std::tie(o.structure, o.metadata, o.grad_enabled);
41
+ }
42
+
43
+ static size_t hash(const IODescriptor& o) {
44
+ return c10::get_hash(o.structure, o.metadata, o.grad_enabled);
45
+ }
46
+
47
+ void extend(const autograd::variable_list& list) {
48
+ metadata.reserve(metadata.size() + list.size());
49
+ for (auto& var : list)
50
+ metadata.emplace_back(var);
51
+ }
52
+
53
+ // Description of argument structure. Variables are replaced with
54
+ // different characters, depending on their flags, beginnings and
55
+ // ends of tuples and lists are denoted by a pair of parenthesis
56
+ // of their corresponding kind. They should always be paired.
57
+ // Example desc: (vv[v(v)v])
58
+ // NOTE: if extend() was ever called then metadata.size() can be
59
+ // different than the number of 'v's in structure.
60
+ std::string structure;
61
+ std::vector<std::string> strings;
62
+ std::vector<VariableMetadata> metadata;
63
+ bool grad_enabled = false;
64
+ };
65
+
66
+ static inline std::ostream& operator<<(
67
+ std::ostream& out,
68
+ const IODescriptor::VariableMetadata& meta) {
69
+ at::Device meta_device = meta.device;
70
+ auto& t = at::getDeprecatedTypeProperties(
71
+ meta_device.is_cpu() ? at::Backend::CPU : at::Backend::CUDA, meta.type);
72
+ out << t << "(requires_grad=" << meta.requires_grad;
73
+ if (meta_device.is_cuda()) {
74
+ out << ", device=" << meta_device.index();
75
+ }
76
+ out << ") {";
77
+ for (const auto i : c10::irange(meta.sizes.size())) {
78
+ if (i > 0)
79
+ out << ", ";
80
+ out << meta.sizes[i];
81
+ }
82
+ out << "}";
83
+ return out;
84
+ }
85
+
86
+ static inline std::ostream& operator<<(
87
+ std::ostream& out,
88
+ const IODescriptor& desc) {
89
+ out << desc.structure << "\n";
90
+ out << " with grad_enabled=" << desc.grad_enabled << "\n";
91
+ for (const auto i : c10::irange(desc.metadata.size())) {
92
+ out << " with v" << i << " having type " << desc.metadata[i] << "\n";
93
+ }
94
+ return out;
95
+ }
96
+
97
+ struct ParsedArgs {
98
+ // Flat vector of Variables found in arguments
99
+ autograd::variable_list vars;
100
+ // Metadata describing nesting of objects received from Python and
101
+ // metadata of vars and whether grad is enabled.
102
+ IODescriptor desc;
103
+
104
+ void extend(const autograd::variable_list& list) {
105
+ if (list.empty())
106
+ return;
107
+ vars.reserve(vars.size() + list.size());
108
+ for (auto& var : list)
109
+ vars.emplace_back(var);
110
+ desc.extend(list);
111
+ }
112
+ };
113
+
114
+ ParsedArgs flatten(py::handle obj);
115
+ PyObject* unflatten(
116
+ at::ArrayRef<autograd::Variable> vars,
117
+ const IODescriptor& structure);
118
+
119
+ } // namespace torch::jit::python
falcon/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/python_custom_class.h ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/python/pybind_utils.h>
4
+ #include <torch/csrc/utils/pybind.h>
5
+ #include <torch/custom_class.h>
6
+
7
+ namespace torch::jit {
8
+
9
+ void initPythonCustomClassBindings(PyObject* module);
10
+
11
+ struct ScriptClass {
12
+ ScriptClass(c10::StrongTypePtr class_type)
13
+ : class_type_(std::move(class_type)) {}
14
+
15
+ py::object __call__(py::args args, py::kwargs kwargs);
16
+
17
+ c10::StrongTypePtr class_type_;
18
+ };
19
+
20
+ } // namespace torch::jit
falcon/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/python_ivalue.h ADDED
@@ -0,0 +1,97 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <ATen/core/ivalue.h>
3
+ #include <pybind11/pybind11.h>
4
+ #include <torch/csrc/jit/python/pybind_utils.h>
5
+ #include <torch/csrc/python_headers.h>
6
+ #include <torch/csrc/utils/pybind.h>
7
+
8
+ namespace py = pybind11;
9
+
10
+ namespace c10::ivalue {
11
+
12
+ // concrete ivalue Holder that hold a py::object
13
+ struct C10_EXPORT ConcretePyObjectHolder final : PyObjectHolder {
14
+ public:
15
+ static c10::intrusive_ptr<PyObjectHolder> create(py::object py_obj) {
16
+ return c10::make_intrusive<ConcretePyObjectHolder>(std::move(py_obj));
17
+ }
18
+
19
+ static c10::intrusive_ptr<PyObjectHolder> create(const py::handle& handle) {
20
+ py::gil_scoped_acquire ag;
21
+ return c10::make_intrusive<ConcretePyObjectHolder>(
22
+ handle.cast<py::object>());
23
+ }
24
+
25
+ PyObject* getPyObject() override {
26
+ return py_obj_.ptr();
27
+ }
28
+
29
+ InferredType tryToInferType() override {
30
+ pybind11::gil_scoped_acquire ag;
31
+ return torch::jit::tryToInferType(py_obj_);
32
+ }
33
+
34
+ IValue toIValue(const TypePtr& type, c10::optional<int32_t> N = c10::nullopt)
35
+ override {
36
+ pybind11::gil_scoped_acquire ag;
37
+ return torch::jit::toIValue(py_obj_, type, N);
38
+ }
39
+
40
+ std::string toStr() override {
41
+ pybind11::gil_scoped_acquire ag;
42
+ return py::str(py_obj_);
43
+ }
44
+
45
+ std::vector<at::Tensor> extractTensors() override {
46
+ // We could implement this entirely in C++ via pybind11 but it turns out to
47
+ // be substantially slower. Namely, the total time taken by markCompleted on
48
+ // a CUDAFuture is 21.5us with this implementation, but goes up to 58.7us
49
+ // when using C++. The reason is unclear.
50
+ try {
51
+ pybind11::gil_scoped_acquire ag;
52
+ static py::object& extractorFn = *new py::object(
53
+ py::module::import("torch._jit_internal").attr("_extract_tensors"));
54
+ return extractorFn(py_obj_).cast<std::vector<at::Tensor>>();
55
+ } catch (py::error_already_set& e) {
56
+ auto err = std::runtime_error(
57
+ c10::str("Cannot extract tensors from value: ", e.what()));
58
+ {
59
+ pybind11::gil_scoped_acquire ag;
60
+ e.restore();
61
+ PyErr_Clear();
62
+ }
63
+ throw err;
64
+ }
65
+ }
66
+
67
+ // Note [Destructing py::object]
68
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~
69
+ //
70
+ // (1) Why py_obj_ = py::none(); does not work. Because we also need to
71
+ // acquire GIL when destructing py::object of None that de-references None.
72
+ // https://docs.python.org/3/c-api/none.html#c.Py_RETURN_NONE
73
+ //
74
+ // https://stackoverflow.com/questions/15287590/why-should-py-increfpy-none-be-required-before-returning-py-none-in-c
75
+ //
76
+ // (2) Why we need to call dec_ref() explicitly. Because py::object of
77
+ // nullptr, on destruction, effectively does nothing because of it calls
78
+ // Py_XDECREF(NULL) underlying.
79
+ // https://docs.python.org/3/c-api/refcounting.html#c.Py_XDECREF
80
+ ~ConcretePyObjectHolder() override {
81
+ pybind11::gil_scoped_acquire ag;
82
+ py_obj_.dec_ref();
83
+ // explicitly setting PyObject* to nullptr to prevent py::object's dtor to
84
+ // decref on the PyObject again.
85
+ py_obj_.ptr() = nullptr;
86
+ }
87
+
88
+ // explicit construction to avoid errornous implicit conversion and
89
+ // copy-initialization
90
+ explicit ConcretePyObjectHolder(py::object py_obj)
91
+ : py_obj_(std::move(py_obj)) {}
92
+
93
+ private:
94
+ py::object py_obj_;
95
+ };
96
+
97
+ } // namespace c10::ivalue