ZTWHHH commited on
Commit
cd31685
·
verified ·
1 Parent(s): a36e569

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +2 -0
  2. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/api/compilation_unit.h +351 -0
  3. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/api/function_impl.h +194 -0
  4. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/api/method.h +81 -0
  5. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/api/module.h +685 -0
  6. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/api/object.h +200 -0
  7. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/flatbuffer_loader.h +136 -0
  8. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/frame.h +53 -0
  9. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/import.h +112 -0
  10. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/import_data.h +38 -0
  11. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/import_export_common.h +23 -0
  12. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/interpreter.h +30 -0
  13. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/method.h +45 -0
  14. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/module.h +197 -0
  15. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/parse_bytecode.h +25 -0
  16. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/prim_ops_registery.h +32 -0
  17. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/promoted_prim_ops.h +63 -0
  18. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/quantization.h +38 -0
  19. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/type_parser.h +54 -0
  20. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/init.h +9 -0
  21. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/pybind.h +213 -0
  22. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/pybind_utils.h +1115 -0
  23. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/python_ir.h +50 -0
  24. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/python_tracer.h +45 -0
  25. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/python_tree_views.h +9 -0
  26. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/update_graph_executor_opt.h +6 -0
  27. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/utf8_decoding_ignore.h +6 -0
  28. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/autodiff.h +94 -0
  29. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/calculate_necessary_args.h +69 -0
  30. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/decomposition_registry.h +33 -0
  31. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/graph_executor.h +142 -0
  32. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/graph_executor_impl.h +113 -0
  33. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/graph_iterator.h +147 -0
  34. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/interpreter.h +158 -0
  35. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/jit_exception.h +38 -0
  36. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/operator.h +346 -0
  37. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/profiling_graph_executor_impl.h +77 -0
  38. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/profiling_record.h +205 -0
  39. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/shape_function_registry.h +12 -0
  40. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/symbolic_shape_registry.h +69 -0
  41. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/symbolic_shape_registry_util.h +12 -0
  42. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/vararg_functions.h +41 -0
  43. vllm/lib/python3.10/site-packages/cupy/_core/__pycache__/__init__.cpython-310.pyc +0 -0
  44. vllm/lib/python3.10/site-packages/cupy/_core/__pycache__/_codeblock.cpython-310.pyc +0 -0
  45. vllm/lib/python3.10/site-packages/cupy/_core/__pycache__/_fusion_interface.cpython-310.pyc +0 -0
  46. vllm/lib/python3.10/site-packages/cupy/_core/__pycache__/_fusion_op.cpython-310.pyc +0 -0
  47. vllm/lib/python3.10/site-packages/cupy/_core/__pycache__/_fusion_optimization.cpython-310.pyc +0 -0
  48. vllm/lib/python3.10/site-packages/cupy/_core/__pycache__/_gufuncs.cpython-310.pyc +0 -0
  49. vllm/lib/python3.10/site-packages/cupy/_core/__pycache__/_ufuncs.cpython-310.pyc +0 -0
  50. vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/README.md +5 -0
.gitattributes CHANGED
@@ -1705,3 +1705,5 @@ parrot/lib/python3.10/site-packages/scipy/stats/_ansari_swilk_statistics.cpython
1705
  parrot/lib/python3.10/site-packages/scipy/special/_test_internal.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
1706
  vllm/lib/python3.10/site-packages/mpl_toolkits/mplot3d/__pycache__/axes3d.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
1707
  valley/lib/python3.10/site-packages/nvidia/nccl/lib/libnccl.so.2 filter=lfs diff=lfs merge=lfs -text
 
 
 
1705
  parrot/lib/python3.10/site-packages/scipy/special/_test_internal.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
1706
  vllm/lib/python3.10/site-packages/mpl_toolkits/mplot3d/__pycache__/axes3d.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
1707
  valley/lib/python3.10/site-packages/nvidia/nccl/lib/libnccl.so.2 filter=lfs diff=lfs merge=lfs -text
1708
+ vllm/lib/python3.10/site-packages/cupy/_util.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
1709
+ vllm/lib/python3.10/site-packages/cupy/cuda/common.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/api/compilation_unit.h ADDED
@@ -0,0 +1,351 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <ATen/core/function.h>
3
+ #include <c10/util/Exception.h>
4
+ #include <torch/csrc/jit/api/function_impl.h>
5
+ #include <torch/csrc/jit/frontend/name_mangler.h>
6
+ #include <torch/csrc/jit/frontend/source_range.h>
7
+ #include <torch/csrc/jit/ir/ir.h>
8
+ #include <torch/csrc/jit/runtime/graph_executor.h>
9
+
10
+ #include <torch/csrc/Export.h>
11
+
12
+ #include <ATen/core/function_schema.h>
13
+ #include <ATen/core/qualified_name.h>
14
+ #include <c10/util/ArrayRef.h>
15
+ #include <c10/util/Optional.h>
16
+
17
+ #include <functional>
18
+ #include <memory>
19
+ #include <mutex>
20
+ #include <ostream>
21
+ #include <string>
22
+ #include <unordered_map>
23
+ #include <vector>
24
+
25
+ namespace torch::jit {
26
+
27
+ struct Def;
28
+ struct Property;
29
+ struct ClassDef;
30
+ struct SugaredValue;
31
+ struct Resolver;
32
+
33
+ using ResolverPtr = std::shared_ptr<Resolver>;
34
+ struct Self {
35
+ virtual ~Self() = default;
36
+ virtual std::shared_ptr<SugaredValue> makeSugared(Value* v) const = 0;
37
+ virtual ClassTypePtr getClassType() const = 0;
38
+ };
39
+
40
+ // A CompilationUnit is a list of named Functions
41
+ // with helper methods to iterate the list or invoke the function.
42
+ // Classes have a CompilationUnit holding the class methods,
43
+ // and Modules have a CompilationUnit holding the Functions that
44
+ // are used to implement their Methods
45
+
46
+ struct TORCH_API CompilationUnit {
47
+ enum class FunctionType { Method, Hook, PreHook };
48
+ // constructor that takes a set of functions to compile using the native
49
+ // resolver
50
+ explicit CompilationUnit(const std::string& source);
51
+ CompilationUnit() = default;
52
+
53
+ CompilationUnit& operator=(CompilationUnit&&) = default;
54
+ CompilationUnit(CompilationUnit&&) = default;
55
+ CompilationUnit& operator=(const CompilationUnit&) = delete;
56
+ CompilationUnit(const CompilationUnit&) = delete;
57
+
58
+ Function* find_function(const c10::QualifiedName& name) const {
59
+ auto it = dict_.find(name);
60
+ if (it == dict_.end()) {
61
+ return nullptr;
62
+ }
63
+ return functions_[it->second].get();
64
+ }
65
+
66
+ Function& get_function(const c10::QualifiedName& name) const {
67
+ if (auto r = find_function(name)) {
68
+ return *r;
69
+ }
70
+ TORCH_CHECK(false, "attempted to get undefined function ", name.name());
71
+ }
72
+
73
+ void set_optimized(bool o) {
74
+ TORCH_WARN(
75
+ "CompilationUnit::set_optimized() is deprecated and has no effect. "
76
+ "Please use setGraphExecutorOptimize()");
77
+ }
78
+
79
+ bool is_optimized() const {
80
+ TORCH_WARN(
81
+ "CompilationUnit::is_optimized() is deprecated and always returns true. "
82
+ "Please use getGraphExecutorOptimize()");
83
+ return true;
84
+ }
85
+
86
+ // for historic reasons, these are defined in ir_emitter.cpp
87
+ // Returns the list of Functions just defined.
88
+ std::vector<Function*> define(
89
+ const c10::optional<c10::QualifiedName>& prefix,
90
+ const std::vector<Property>& properties,
91
+ const std::vector<ResolverPtr>& propResolvers,
92
+ const std::vector<Def>& definitions,
93
+ const std::vector<ResolverPtr>&
94
+ defResolvers, /* determines how we handle free
95
+ variables in each definition*/
96
+ // if non-null, the first argument to each def, is bound to this value
97
+ const Self* self,
98
+ // see [name mangling]
99
+ bool shouldMangle = false,
100
+ c10::optional<size_t> operator_set_version = c10::nullopt);
101
+
102
+ void define_hooks(
103
+ const c10::optional<c10::QualifiedName>& prefix,
104
+ const std::vector<Def>& hookDefs,
105
+ const std::vector<ResolverPtr>& hookResolvers,
106
+ const std::vector<Def>& preHookDefs,
107
+ const std::vector<ResolverPtr>& preHookResolvers,
108
+ const Self* self,
109
+ bool shouldMangle = false);
110
+
111
+ // same as above but parse the definitions from source
112
+ // Returns the list of Functions just defined.
113
+ std::vector<Function*> define(
114
+ // prefix namespace to put all the defined functions into
115
+ const c10::optional<c10::QualifiedName>& prefix,
116
+ const std::string& source,
117
+ const ResolverPtr& resolver,
118
+ const Self* self);
119
+
120
+ void define_interface(
121
+ const c10::QualifiedName& qualifiedName,
122
+ const ClassDef& classDef,
123
+ ResolverPtr rcb,
124
+ bool is_module = false);
125
+
126
+ Function* create_function(
127
+ c10::QualifiedName name,
128
+ std::shared_ptr<Graph> graph,
129
+ bool shouldMangle = false) {
130
+ if (shouldMangle) {
131
+ name = mangle(name);
132
+ }
133
+ auto fn = std::make_unique<GraphFunction>(
134
+ std::move(name), std::move(graph), nullptr);
135
+ auto ret = fn.get();
136
+ register_function(std::move(fn));
137
+ return ret;
138
+ }
139
+
140
+ std::vector<Function*> get_functions() const {
141
+ return fmap(functions_, [](const std::unique_ptr<Function>& fn) {
142
+ return fn.get();
143
+ });
144
+ }
145
+
146
+ /// Run a method from this compilation.
147
+ ///
148
+ /// For example:
149
+ /// @code
150
+ /// IValue output = module->run("relu_script", a, b);
151
+ /// @endcode
152
+ ///
153
+ /// To get a compile a module from a source string, see torch::jit::compile
154
+ ///
155
+ /// @param method_name The name of the method to run
156
+ /// @param args Arguments to be passed to the method
157
+ /// @return An IValue containing the return value (or values if it is a tuple)
158
+ /// from the method
159
+ template <typename... Types>
160
+ IValue run_method(const c10::QualifiedName& method_name, Types&&... args) {
161
+ return get_function(method_name)({IValue(std::forward<Types>(args))...});
162
+ }
163
+
164
+ void drop_all_functions() {
165
+ dict_.clear();
166
+ functions_.clear();
167
+ }
168
+
169
+ /**
170
+ * Register a class as being owned by this compilation unit.
171
+ */
172
+ void register_type(c10::NamedTypePtr namedType) {
173
+ // TODO: class types cannot be redefined because we have no way right now
174
+ // of invalidating their methods. NamedTuples are fine though, since they
175
+ // don't have methods.
176
+ TORCH_CHECK(
177
+ 0 == classDict_.count(*namedType->name()),
178
+ "class '",
179
+ namedType->name()->qualifiedName(),
180
+ "' already defined.");
181
+ classes_.push_back(std::move(namedType));
182
+ classDict_[*classes_.back()->name()] = classes_.size() - 1;
183
+ };
184
+
185
+ c10::ClassTypePtr get_class(const c10::QualifiedName& name) const {
186
+ auto type = get_type(name);
187
+ if (!type) {
188
+ return nullptr;
189
+ }
190
+ return type->cast<c10::ClassType>();
191
+ }
192
+
193
+ c10::InterfaceTypePtr get_interface(const c10::QualifiedName& name) const {
194
+ auto type = get_type(name);
195
+ if (!type) {
196
+ return nullptr;
197
+ }
198
+ return type->cast<c10::InterfaceType>();
199
+ }
200
+
201
+ c10::TupleTypePtr get_named_tuple(const c10::QualifiedName& name) const {
202
+ for (const auto& cls : classes_) {
203
+ if (cls->name()->qualifiedName() == name.qualifiedName()) {
204
+ return cls->expect<TupleType>();
205
+ }
206
+ }
207
+ return nullptr;
208
+ }
209
+
210
+ c10::NamedTypePtr get_type(const c10::QualifiedName& name) const {
211
+ auto it = classDict_.find(name);
212
+ if (it == classDict_.end()) {
213
+ return nullptr;
214
+ }
215
+ return classes_[it->second];
216
+ }
217
+
218
+ // For testing: clear all Python-defined classes to ensure that unit tests
219
+ // have isolation.
220
+ void _clear_python_cu() {
221
+ // Delete all the associated class methods
222
+ for (const auto& type : classes_) {
223
+ if (auto cls = type->cast<ClassType>()) {
224
+ for (auto method : cls->methods()) {
225
+ // Tombstone the method in the compilation unit.
226
+ // Don't erase because the dict_
227
+ auto it = dict_.find(method->qualname());
228
+ if (it != dict_.end()) {
229
+ functions_[it->second] = nullptr;
230
+ // Erase in our big lookup table
231
+ dict_.erase(it);
232
+ }
233
+ }
234
+ // Classes can have multiple pointers to the same hook,
235
+ // need to make sure to not delete it twice
236
+ std::unordered_set<Function*> hooks_to_delete;
237
+ for (const auto& hook : cls->getForwardHooks()) {
238
+ hooks_to_delete.insert(hook);
239
+ }
240
+ for (const auto& pre_hook : cls->getForwardPreHooks()) {
241
+ hooks_to_delete.insert(pre_hook);
242
+ }
243
+ for (const auto& hook : hooks_to_delete) {
244
+ // Tombstone the hook in the compilation unit.
245
+ auto it = dict_.find(hook->qualname());
246
+ if (it != dict_.end()) {
247
+ functions_[it->second] = nullptr;
248
+ // Erase in our big lookup table
249
+ dict_.erase(it);
250
+ }
251
+ }
252
+ }
253
+ }
254
+ classes_.clear();
255
+ classDict_.clear();
256
+ }
257
+
258
+ // [Internal Only] Remove method.
259
+ // Note Used for freezing.
260
+ void unsafeRemoveMethod(const c10::QualifiedName& method_name) {
261
+ auto it = dict_.find(method_name);
262
+ TORCH_CHECK(
263
+ it != dict_.end(),
264
+ "method '",
265
+ method_name.qualifiedName(),
266
+ "' does not exist.");
267
+ functions_[it->second] = nullptr;
268
+ dict_.erase(it);
269
+ }
270
+
271
+ // [name mangling] All code objects must have a unique qualified name in a
272
+ // CompilationUnit. In Python, sometimes functions won't have unique qualified
273
+ // name (for example, nested functions). So we mangle Python functions to
274
+ // ensure that they are uniquely named.
275
+ //
276
+ // We also use mangling to distinguish different Module instances. Since each
277
+ // Module is a singleton class instance, different instances of the same
278
+ // Python Module will have different types but the same qualified name.
279
+ c10::QualifiedName mangle(const c10::QualifiedName& name) const {
280
+ auto mangled = name;
281
+ while (get_type(mangled) || find_function(mangled)) {
282
+ mangled = mangler_.mangle(mangled);
283
+ }
284
+ return mangled;
285
+ }
286
+
287
+ private:
288
+ std::unique_ptr<Function> define(
289
+ const c10::optional<c10::QualifiedName>& prefix,
290
+ const Def& def,
291
+ const ResolverPtr& resolver,
292
+ const Self* self,
293
+ const std::unordered_map<std::string, Function*>& function_table,
294
+ bool shouldMangle = false,
295
+ FunctionType type = FunctionType::Method,
296
+ c10::optional<size_t> version = c10::nullopt) const;
297
+
298
+ // Define a property on \p self.
299
+ struct PropertyPair;
300
+ PropertyPair define_property(
301
+ const c10::optional<c10::QualifiedName>& prefix,
302
+ const Property& prop,
303
+ const ResolverPtr& resolver,
304
+ const Self* self,
305
+ const std::unordered_map<std::string, Function*>& function_table,
306
+ bool shouldMangle = false) const;
307
+
308
+ Function& register_function(std::unique_ptr<Function> fn) {
309
+ TORCH_CHECK(
310
+ 0 == dict_.count(fn->qualname().qualifiedName()),
311
+ "method '",
312
+ fn->qualname().qualifiedName(),
313
+ "' already defined.");
314
+ functions_.emplace_back(std::move(fn));
315
+ dict_[functions_.back()->qualname()] = functions_.size() - 1;
316
+ return *functions_.back();
317
+ }
318
+ std::vector<std::unique_ptr<Function>> functions_;
319
+ // for fast lookup
320
+ std::unordered_map<c10::QualifiedName, size_t> dict_;
321
+ std::unordered_map<c10::QualifiedName, size_t> classDict_;
322
+
323
+ // [class ownership] Right now there are two relationships between classes
324
+ // and compilation units:
325
+ // 1. Classes have compilation units internally that hold their methods.
326
+ // 2. On load, the TypePtrs of any imported classes are owned by the main
327
+ // module's compilation unit.
328
+ std::vector<c10::NamedTypePtr> classes_;
329
+
330
+ mutable NameMangler mangler_;
331
+ };
332
+
333
+ // An owning pointer to a Function. Just a pair of a raw Function ptr and it's
334
+ // owning CU. We need this because pybind requires a ref-counted way to refer to
335
+ // Functions.
336
+ struct StrongFunctionPtr {
337
+ StrongFunctionPtr(std::shared_ptr<CompilationUnit> cu, Function* function)
338
+ : cu_(std::move(cu)), function_(function) {
339
+ TORCH_INTERNAL_ASSERT(cu_);
340
+ TORCH_INTERNAL_ASSERT(function_);
341
+ }
342
+ std::shared_ptr<CompilationUnit> cu_;
343
+ Function* function_;
344
+ };
345
+
346
+ namespace script {
347
+ // We once had a `script::` namespace that was deleted. This is for backcompat
348
+ // of the public API; new code should not use this type alias.
349
+ using CompilationUnit = ::torch::jit::CompilationUnit;
350
+ } // namespace script
351
+ } // namespace torch::jit
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/api/function_impl.h ADDED
@@ -0,0 +1,194 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/function.h>
4
+ #include <torch/csrc/jit/ir/ir.h>
5
+ #include <torch/csrc/jit/runtime/graph_executor.h>
6
+
7
+ namespace torch::jit {
8
+
9
+ struct TORCH_API GraphFunction : public Function {
10
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
11
+ GraphFunction(
12
+ c10::QualifiedName name,
13
+ std::shared_ptr<Graph> graph,
14
+ std::function<void(GraphFunction&)> function_creator,
15
+ c10::optional<ExecutorExecutionMode> executor_execution_mode =
16
+ c10::nullopt)
17
+ : name_(std::move(name)),
18
+ graph_(std::move(graph)),
19
+ executor_execution_mode_(executor_execution_mode),
20
+ function_creator_(std::move(function_creator)) {}
21
+
22
+ bool isGraphFunction() const override {
23
+ return true;
24
+ }
25
+
26
+ void run(Stack& stack) override;
27
+
28
+ std::function<void(GraphFunction&)> function_creator() const {
29
+ return function_creator_;
30
+ }
31
+
32
+ c10::intrusive_ptr<c10::ivalue::Future> runAsync(
33
+ Stack& stack,
34
+ TaskLauncher taskLauncher = at::launch) override;
35
+
36
+ std::shared_ptr<Graph> graph() const {
37
+ return graph_;
38
+ }
39
+
40
+ std::shared_ptr<Graph> optimized_graph() const {
41
+ std::lock_guard<std::recursive_mutex> lock(compile_mutex);
42
+ auto& optimized_graph = optimized_graphs_[currentSpecialization()];
43
+ if (optimized_graph) {
44
+ return *optimized_graph;
45
+ }
46
+ optimized_graph = graph_->copy();
47
+ if (getGraphExecutorOptimize()) {
48
+ preoptimizeGraph(*optimized_graph, force_no_amp_);
49
+ }
50
+ return *optimized_graph;
51
+ }
52
+
53
+ const c10::QualifiedName& qualname() const override {
54
+ return name_;
55
+ }
56
+
57
+ // private/unstable api. sets the initial execution mode
58
+ // will not affect executor if there is an existing executor
59
+ // created for this function
60
+ void _set_initial_executor_execution_mode(ExecutorExecutionMode mode) {
61
+ executor_execution_mode_ = mode;
62
+ }
63
+ // private/unstable api. sets flag of whether or not to ignore amp.
64
+ // will not affect executor if there is an existing executor
65
+ // created for this function
66
+ void _set_ignore_amp(bool ignore_amp) {
67
+ force_no_amp_ = ignore_amp;
68
+ }
69
+
70
+ // if this isn't yet defined, run its method_creator function
71
+ void ensure_defined() override;
72
+
73
+ size_t num_inputs() const override {
74
+ return graph()->inputs().size();
75
+ }
76
+
77
+ Function& setSchema(FunctionSchema schema) override {
78
+ schema_ = std::make_unique<FunctionSchema>(std::move(schema));
79
+ return *this;
80
+ }
81
+
82
+ const FunctionSchema& getSchema() const override;
83
+
84
+ GraphExecutorState getDebugState() {
85
+ return get_executor().getDebugState();
86
+ }
87
+
88
+ bool is_optimized() const {
89
+ TORCH_WARN(
90
+ "GraphFunction::is_optimized() is deprecated and always returns true. "
91
+ "Please use getGraphExecutorOptimize()");
92
+ return true;
93
+ }
94
+
95
+ void check_single_output() {
96
+ TORCH_CHECK(
97
+ graph()->outputs().size() == 1,
98
+ "Method (but not graphs in general) require a single output. Use None/Tuple for 0 or 2+ outputs");
99
+ }
100
+
101
+ GraphExecutor& get_executor() {
102
+ ensure_defined();
103
+ std::lock_guard<std::recursive_mutex> lock(compile_mutex);
104
+ auto& executor = executors_[currentSpecialization()];
105
+ if (executor) {
106
+ return *executor;
107
+ }
108
+ check_single_output();
109
+ const std::string& name = name_.name();
110
+ std::shared_ptr<Graph> opt_graph = optimized_graph();
111
+ if (!executor_execution_mode_) {
112
+ executor = GraphExecutor(opt_graph, name);
113
+ } else {
114
+ executor = GraphExecutor(opt_graph, name, *executor_execution_mode_);
115
+ }
116
+ return *executor;
117
+ }
118
+
119
+ using Function::call;
120
+ bool call(
121
+ Stack& stack,
122
+ c10::optional<size_t> bailOut,
123
+ c10::function_ref<void(const Code&)> f) override {
124
+ f(get_executor().getPlanFor(stack, bailOut).code);
125
+ return true;
126
+ }
127
+
128
+ void clear_optimized_graphs() {
129
+ optimized_graphs_.fill(c10::nullopt);
130
+ }
131
+
132
+ private:
133
+ enum SpecializationKey {
134
+ AutocastOff,
135
+ CpuAutocastOn,
136
+ GpuAutocastOn,
137
+ CpuGpuAutocastOn,
138
+
139
+ // This provides the number of specializations
140
+ // (Must be last entry)
141
+ TotalCount
142
+ };
143
+
144
+ SpecializationKey currentSpecialization() const;
145
+
146
+ private:
147
+ c10::QualifiedName name_;
148
+ // The original, non-optimized graph
149
+ std::shared_ptr<Graph> graph_; // for debugging and for inlining
150
+
151
+ // allows users to specify Simple/Profiling Executor for function
152
+ // TODO: add more executors
153
+ mutable c10::optional<ExecutorExecutionMode> executor_execution_mode_;
154
+
155
+ // if invoked on a graph that has already traced through amp
156
+ // don't invoke amp pass
157
+ mutable bool force_no_amp_ = false;
158
+ // Optimized graph, computed lazily. Used for inlining.
159
+ mutable std::array<
160
+ c10::optional<std::shared_ptr<Graph>>,
161
+ SpecializationKey::TotalCount>
162
+ optimized_graphs_;
163
+
164
+ // GraphFunctions are invokable from multiple threads, so this lock needs to
165
+ // be held when we're initializing graph executor for the first time or
166
+ // computing the optimized graph. We're using reentrant mutex so that we don't
167
+ // need to worry about causing a deadlock by calling one method from another
168
+ // (e.g. optimized_graph() from get_executor()).
169
+ mutable std::recursive_mutex compile_mutex;
170
+
171
+ // executor_[0] - autocast off
172
+ // executor_[1] - autocast cpu on
173
+ // executor_[2] - autocast gpu on
174
+ // executor_[3] - autocast cpu & gpu on
175
+ std::array<c10::optional<GraphExecutor>, SpecializationKey::TotalCount>
176
+ executors_;
177
+
178
+ // an optional function that actually creates the method when
179
+ // ensure_defined() is called. This is used by the compiler so
180
+ // that it can construct methods out of order
181
+ std::function<void(GraphFunction&)> function_creator_;
182
+
183
+ // if absent, then we generate a default schema based on the graph
184
+ // mutable because getSchema caches the default schema if one is requested
185
+ // before a call to setSchema
186
+ mutable std::unique_ptr<FunctionSchema> schema_;
187
+ };
188
+
189
+ // Short hands for dynamic_cast<GraphFunction*>.
190
+ TORCH_API GraphFunction* tryToGraphFunction(Function&) noexcept;
191
+ TORCH_API GraphFunction& toGraphFunction(Function&);
192
+ TORCH_API const GraphFunction& toGraphFunction(const Function&);
193
+
194
+ } // namespace torch::jit
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/api/method.h ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/function.h>
4
+ #include <ATen/core/ivalue.h>
5
+ #include <ATen/core/stack.h>
6
+ #include <torch/csrc/api/include/torch/imethod.h>
7
+ #include <torch/csrc/jit/api/function_impl.h>
8
+
9
+ namespace torch::jit {
10
+
11
+ using ObjectPtr = c10::intrusive_ptr<c10::ivalue::Object>;
12
+
13
+ // A method in a module, e.g. f in:
14
+ //
15
+ // class M(ScriptModule):
16
+ // @script_method
17
+ // def f(self, x):
18
+ // ...
19
+ // Note: because Method/Module are exposed to python these
20
+ // classes use python method naming conventions
21
+ struct TORCH_API Method : public torch::IMethod {
22
+ Method(ObjectPtr owner, Function* function);
23
+
24
+ // the module that contains this method.
25
+ Module owner() const;
26
+ void run(Stack& stack);
27
+ void run(Stack&& stack) {
28
+ run(stack);
29
+ }
30
+
31
+ c10::IValue operator()(
32
+ std::vector<c10::IValue> stack,
33
+ const Kwargs& kwargs = Kwargs()) const override;
34
+
35
+ // Run method async. Invocation on this function would invokes a JIT
36
+ // interpreter that executes ops inline, one by one, on caller's thread. A
37
+ // model can utilize async op, i.e. `fork`, to launch an asynchronous task
38
+ // which will be launched on provided `taskLauncher`.
39
+ c10::intrusive_ptr<c10::ivalue::Future> run_async(
40
+ std::vector<c10::IValue> stack,
41
+ const Kwargs& kwargs = Kwargs(),
42
+ TaskLauncher taskLauncher = at::launch);
43
+
44
+ std::shared_ptr<Graph> graph() const {
45
+ return toGraphFunction(*function_).graph();
46
+ }
47
+
48
+ const std::string& name() const override {
49
+ return function_->name();
50
+ }
51
+
52
+ size_t num_inputs() const {
53
+ return function_->num_inputs();
54
+ }
55
+
56
+ GraphExecutor& get_executor() {
57
+ return toGraphFunction(*function_).get_executor();
58
+ }
59
+
60
+ Function& function() const {
61
+ return *function_;
62
+ }
63
+
64
+ private:
65
+ void setArgumentNames(std::vector<std::string>&) const override;
66
+
67
+ // Methods are uniqued onwed by a single module. This raw pointer allows
68
+ // looking up the module.
69
+ ObjectPtr owner_;
70
+
71
+ // Underlying unbound function
72
+ Function* function_;
73
+ };
74
+
75
+ namespace script {
76
+ // We once had a `script::` namespace that was deleted. This is for backcompat
77
+ // of the public API; new code should not use this type alias.
78
+ using Method = ::torch::jit::Method;
79
+ } // namespace script
80
+
81
+ } // namespace torch::jit
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/api/module.h ADDED
@@ -0,0 +1,685 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <c10/util/Exception.h>
3
+ #include <torch/csrc/autograd/variable.h>
4
+ #include <torch/csrc/jit/api/object.h>
5
+ #include <torch/csrc/jit/frontend/source_range.h>
6
+ #include <torch/csrc/jit/ir/ir.h>
7
+ #include <torch/csrc/jit/ir/named_value.h>
8
+ #include <torch/csrc/jit/runtime/argument_spec.h>
9
+ #include <torch/csrc/jit/runtime/graph_executor.h>
10
+
11
+ #include <torch/csrc/Export.h>
12
+ #include <torch/csrc/api/include/torch/ordered_dict.h>
13
+ #include <torch/csrc/jit/api/compilation_unit.h>
14
+
15
+ #include <ATen/core/function_schema.h>
16
+ #include <ATen/core/qualified_name.h>
17
+ #include <c10/util/ArrayRef.h>
18
+ #include <c10/util/Optional.h>
19
+ #include <c10/util/irange.h>
20
+
21
+ #include <functional>
22
+ #include <memory>
23
+ #include <mutex>
24
+ #include <ostream>
25
+ #include <string>
26
+ #include <unordered_map>
27
+ #include <unordered_set>
28
+ #include <utility>
29
+ #include <vector>
30
+
31
+ // This file contains classes which assist in desugaring Python style
32
+ // modules and their methods into flattened graphs which don't have any
33
+ // function calls.
34
+
35
+ namespace torch::jit {
36
+
37
+ using ::c10::Argument;
38
+ using ::c10::FunctionSchema;
39
+ using ::c10::QualifiedName;
40
+ // Map which stores filename to content.
41
+ using ExtraFilesMap = std::unordered_map<std::string, std::string>;
42
+
43
+ using ModulePtr = c10::intrusive_ptr<c10::ivalue::Object>;
44
+
45
+ struct Module;
46
+
47
+ template <typename T>
48
+ struct slot_list_impl;
49
+
50
+ template <typename T>
51
+ struct Named {
52
+ std::string name;
53
+ T value;
54
+ };
55
+
56
+ using NameModule = Named<Module>;
57
+ using NameValue = Named<IValue>;
58
+ using NameTensor = Named<at::Tensor>;
59
+
60
+ namespace detail {
61
+ struct TORCH_API ModulePolicy;
62
+ struct TORCH_API ParameterPolicy;
63
+ struct TORCH_API AttributePolicy;
64
+ struct TORCH_API BufferPolicy;
65
+ template <typename P>
66
+ struct NamedPolicy;
67
+ } // namespace detail
68
+
69
+ using module_list = slot_list_impl<detail::ModulePolicy>;
70
+ using named_module_list =
71
+ slot_list_impl<detail::NamedPolicy<detail::ModulePolicy>>;
72
+
73
+ using parameter_list = slot_list_impl<detail::ParameterPolicy>;
74
+ using named_parameter_list =
75
+ slot_list_impl<detail::NamedPolicy<detail::ParameterPolicy>>;
76
+
77
+ using attribute_list = slot_list_impl<detail::AttributePolicy>;
78
+ using named_attribute_list =
79
+ slot_list_impl<detail::NamedPolicy<detail::AttributePolicy>>;
80
+
81
+ using buffer_list = slot_list_impl<detail::BufferPolicy>;
82
+ using named_buffer_list =
83
+ slot_list_impl<detail::NamedPolicy<detail::BufferPolicy>>;
84
+
85
+ using ModuleLookup = std::function<Module(const std::vector<std::string>&)>;
86
+
87
+ struct TORCH_API Module : public Object {
88
+ explicit Module(c10::QualifiedName class_name);
89
+ Module(std::shared_ptr<CompilationUnit> cu, const c10::ClassTypePtr& type);
90
+ Module() = default;
91
+ Module(const Module&) = default;
92
+ Module& operator=(const Module&) = default;
93
+ Module(Module&&) noexcept = default;
94
+ Module& operator=(Module&&) noexcept = default;
95
+ Module(
96
+ c10::QualifiedName,
97
+ std::shared_ptr<CompilationUnit> cu,
98
+ bool shouldMangle = false);
99
+ Module(ModulePtr module_value) : Object(std::move(module_value)) {}
100
+ ~Module() = default;
101
+
102
+ void set_optimized(bool o) {
103
+ TORCH_WARN(
104
+ "Module::set_optimized() is deprecated and has no effect. "
105
+ "Please use setGraphExecutorOptimize()");
106
+ }
107
+
108
+ bool is_optimized() const {
109
+ TORCH_WARN(
110
+ "Module::is_optimized() is deprecated and always returns true. "
111
+ "Please use getGraphExecutorOptimize()");
112
+ return true;
113
+ }
114
+
115
+ IValue forward(std::vector<IValue> inputs, const Kwargs& kwargs = Kwargs()) {
116
+ return get_method("forward")(std::move(inputs), kwargs);
117
+ }
118
+
119
+ // In script modules, buffers are Tensors attribute that are _not_ registered
120
+ // as parameters. This is different than in nn.Module where there is a special
121
+ // register_buffer method. With this simplification, we only need to track
122
+ // whether a slot is a parameter to be able to classify it.
123
+ void register_buffer(const std::string& name, at::Tensor v) {
124
+ bool is_param = false;
125
+ bool is_buffer = true;
126
+ std::lock_guard<std::mutex> lock(*register_mutex_);
127
+ type()->addOrCheckAttribute(name, TensorType::get(), is_param, is_buffer);
128
+ _ivalue()->setAttr(name, std::move(v));
129
+ }
130
+
131
+ void register_parameter(
132
+ const std::string& name,
133
+ at::Tensor v,
134
+ bool is_buffer) {
135
+ std::lock_guard<std::mutex> lock(*register_mutex_);
136
+ type()->addOrCheckAttribute(name, TensorType::get(), !is_buffer, is_buffer);
137
+ _ivalue()->setAttr(name, std::move(v));
138
+ }
139
+
140
+ void register_attribute(
141
+ const std::string& name,
142
+ const TypePtr& t,
143
+ IValue v,
144
+ bool is_param = false,
145
+ bool is_buffer = false) {
146
+ type()->addOrCheckAttribute(name, t, is_param, is_buffer);
147
+ _ivalue()->setAttr(name, std::move(v));
148
+ }
149
+
150
+ void register_module(const std::string& name, const Module& module) {
151
+ type()->addOrCheckAttribute(name, module.type());
152
+ _ivalue()->setAttr(name, module._ivalue());
153
+ }
154
+
155
+ void apply(const std::function<void(Module&)>& fn);
156
+
157
+ buffer_list buffers(bool recurse = true) const;
158
+ named_buffer_list named_buffers(bool recurse = true) const;
159
+
160
+ module_list children() const; // direct modules
161
+ named_module_list named_children() const;
162
+ module_list modules() const; // all modules, including this one, recursively
163
+ named_module_list named_modules() const;
164
+
165
+ // all tensors involved in gradient optimization
166
+ parameter_list parameters(bool recurse = true) const;
167
+ named_parameter_list named_parameters(bool recurse = true) const;
168
+
169
+ // all members of the object, similar to iterating over dir(obj) in python
170
+ attribute_list attributes(bool recurse = true) const;
171
+ named_attribute_list named_attributes(bool recurse = true) const;
172
+
173
+ void dump(
174
+ bool print_method_bodies,
175
+ bool print_attr_values,
176
+ bool print_param_values) const;
177
+
178
+ std::string dump_to_str(
179
+ bool print_method_bodies,
180
+ bool print_attr_values,
181
+ bool print_param_values) const;
182
+
183
+ /// Enables "training" mode.
184
+ void train(bool on = true);
185
+ /// Calls train(false) to enable "eval" mode.
186
+ /// Do not override this method, override `train()` instead.
187
+ void eval() {
188
+ train(/*on=*/false);
189
+ }
190
+ /// True if the module is in training mode.
191
+ bool is_training() const {
192
+ return attr("training", true).toBool();
193
+ }
194
+
195
+ /// Recursively casts all parameters to the given `dtype` and `device`.
196
+ ///
197
+ /// If `non_blocking` is true and the source is in pinned memory and
198
+ /// destination is on the GPU or vice versa, the copy is performed
199
+ /// asynchronously with respect to the host. Otherwise, the argument has no
200
+ /// effect.
201
+ void to(at::Device device, at::ScalarType dtype, bool non_blocking = false);
202
+
203
+ /// Recursively casts all parameters to the given dtype.
204
+ ///
205
+ /// If `non_blocking` is true and the source is in pinned memory and
206
+ /// destination is on the GPU or vice versa, the copy is performed
207
+ /// asynchronously with respect to the host. Otherwise, the argument has no
208
+ /// effect.
209
+ void to(at::ScalarType dtype, bool non_blocking = false);
210
+
211
+ /// Recursively moves all parameters to the given device.
212
+ ///
213
+ /// If `non_blocking` is true and the source is in pinned memory and
214
+ /// destination is on the GPU or vice versa, the copy is performed
215
+ /// asynchronously with respect to the host. Otherwise, the argument has no
216
+ /// effect.
217
+ void to(at::Device device, bool non_blocking = false);
218
+
219
+ void save(
220
+ std::ostream& out,
221
+ const ExtraFilesMap& extra_files = ExtraFilesMap()) const;
222
+
223
+ void save(
224
+ const std::string& filename,
225
+ const ExtraFilesMap& extra_files = ExtraFilesMap()) const;
226
+
227
+ void _save_for_mobile(
228
+ std::ostream& out,
229
+ const ExtraFilesMap& extra_files = ExtraFilesMap(),
230
+ bool save_mobile_debug_info = false,
231
+ bool use_flatbuffer = false) const;
232
+
233
+ void _save_for_mobile(
234
+ const std::string& filename,
235
+ const ExtraFilesMap& extra_files = ExtraFilesMap(),
236
+ bool save_mobile_debug_info = false,
237
+ bool use_flatbuffer = false) const;
238
+
239
+ Module copy() const;
240
+
241
+ Module deepcopy(c10::optional<at::Device> device = c10::nullopt) const;
242
+
243
+ // Clones both the underlying `ClassType` and the module instance(data), this
244
+ // function creates a new `ClassType` and returns a new instance that has the
245
+ // same data as the current instance but with the new type, shared ClassType
246
+ // will be preserved as well
247
+ Module clone(bool inplace = false) const;
248
+
249
+ // Clones both the underlying `ClassType` and the module instance(data), this
250
+ // function creates a new `ClassType` and returns a new instance that has the
251
+ // same data as the current instance but with the new type, shared ClassType
252
+ // will be preserved as well. Also allows the caller to specify a set of
253
+ // method and attribute names to not clone.
254
+ Module clone(
255
+ bool inplace,
256
+ const std::unordered_set<std::string>& ignored_method,
257
+ const std::unordered_set<std::string>& ignored_attributes) const;
258
+
259
+ void clone_method(const Module& orig, const std::string& name);
260
+
261
+ IValue operator()(std::vector<IValue> inputs);
262
+
263
+ template <typename... Types>
264
+ IValue create_class(const c10::QualifiedName& name, Types&&... args) const {
265
+ return create_class(name, {IValue(std::forward<Types>(args))...});
266
+ }
267
+
268
+ IValue create_class(const c10::QualifiedName& name, Stack stack) const;
269
+
270
+ inline bool operator==(const Module& y) const noexcept {
271
+ return _ivalue() == y._ivalue();
272
+ }
273
+
274
+ void set_delete_memory(std::shared_ptr<char> delete_mem) {
275
+ mem_to_delete_ = std::move(delete_mem);
276
+ }
277
+
278
+ // A set of functions to maintain input shapes through torch.jit.save and
279
+ // torch.jit.load. It only works on tensors and lists/dicts of tensors
280
+ // because tracing is only supported by these types.
281
+ void store_traced_inputs(std::string func_name, std::vector<IValue> inputs) {
282
+ if (inputs.size() == 0) {
283
+ return;
284
+ }
285
+ auto c10_inputs = c10::impl::GenericList(AnyType::get());
286
+ for (IValue& value : inputs) {
287
+ // Not checking whether this is traceable type as that is already checked
288
+ // higher up in the stack and changing that would require a larger
289
+ // restructuring.
290
+ c10_inputs.emplace_back(std::move(value));
291
+ }
292
+ traced_inputs_.insert_or_assign(func_name, c10_inputs);
293
+ }
294
+
295
+ c10::Dict<std::string, c10::impl::GenericList> retrieve_traced_inputs()
296
+ const {
297
+ return traced_inputs_;
298
+ }
299
+
300
+ private:
301
+ Module clone_impl(
302
+ std::unordered_map<TypePtr, TypePtr>& type_remap,
303
+ bool inplace,
304
+ IValue::HashAliasedIValueMap memo,
305
+ const std::unordered_set<std::string>& ignored_methods,
306
+ const std::unordered_set<std::string>& ignored_attributes) const;
307
+
308
+ void clone_method(
309
+ const Module& orig,
310
+ const Function& method,
311
+ const std::unordered_map<TypePtr, TypePtr>& type_remap);
312
+
313
+ c10::QualifiedName getNameForMethod(std::string basename) const {
314
+ return QualifiedName(*type()->name(), std::move(basename));
315
+ }
316
+
317
+ void to_impl(
318
+ const c10::optional<at::Device>& device,
319
+ const c10::optional<at::ScalarType>& dtype,
320
+ bool non_blocking);
321
+
322
+ // Extra handle for the module to delete when itself is deleted
323
+ std::shared_ptr<char> mem_to_delete_;
324
+
325
+ // Map of function names to the traced inputs that they have been traced with
326
+ c10::Dict<std::string, c10::impl::GenericList> traced_inputs_;
327
+
328
+ // Mutex to keep registring buffer or parameter thread safe.
329
+ std::shared_ptr<std::mutex> register_mutex_ = std::make_shared<std::mutex>();
330
+ };
331
+
332
+ // C++ equivalent api of `torch.jit.freeze`. See documentation there for
333
+ // details.
334
+ TORCH_API Module freeze(
335
+ const Module& module,
336
+ const c10::optional<std::vector<std::string>>& preserved_attrs =
337
+ c10::nullopt,
338
+ bool optimize_numerics = true);
339
+
340
+ // C++ equivalent api of `torch.jit.optimize_for_inference`. See documentation
341
+ // there for details.
342
+ TORCH_API Module optimize_for_inference(
343
+ Module& module,
344
+ const std::vector<std::string>& other_methods = {});
345
+
346
+ enum class FusionBehavior { STATIC, DYNAMIC };
347
+
348
+ using FusionStrategy = std::vector<std::pair<FusionBehavior, size_t>>;
349
+ // clang-format off
350
+ /*
351
+ Sets the type and number of specializations that can occur during fusion.
352
+
353
+ Usage: provide a list of pairs (type, depth) where type is one of STATIC or DYNAMIC
354
+ and depth is an integer.
355
+
356
+ Behavior - static vs dynamic:
357
+ In STATIC fusion, fused ops are compiled to have fixed input shapes. The shape is determined
358
+ based on some initial profiling runs.
359
+ In DYNAMIC fusion, fused ops are compiled to have variable input shapes, so that multiple
360
+ shapes are possible.
361
+
362
+ In both cases, we also recompile on new striding behavior, device, or dtype.
363
+
364
+ Behavior - fallback functions & depth:
365
+ When an input doesn't match the format required by the specialized compiled op, it will run
366
+ a fallback function. Fallback functions are recursively be compiled and specialized based
367
+ on the observed tensor shapes. Since compilation can be slow, the "depth" parameter is provided to
368
+ limit the number of specializations that can be compiled, before giving up on recompiling and
369
+ falling back to a completely un-fused, un-specialized implementation.
370
+
371
+ The list of (type, depth) pairs controls the type of specializations and the number of
372
+ specializations. For example: [(STATIC, 2), (DYNAMIC, 2)] indicates that the first
373
+ two specializations will use static fusions, the following two specializations will use
374
+ dynamic fusion, and any inputs that satisfy none of the 4 options will run an
375
+ unfused implementation.
376
+
377
+ NB: in the future, if more as more fusion backends are added there may be more granular
378
+ apis for specific fusers.
379
+ */
380
+ // clang-format on
381
+ TORCH_API FusionStrategy getFusionStrategy();
382
+ // returns previous strategy
383
+ TORCH_API FusionStrategy setFusionStrategy(FusionStrategy& fusion_strategy);
384
+
385
+ namespace detail {
386
+
387
+ struct TORCH_API SlotCursor {
388
+ Module module_;
389
+ int64_t i_; // slot offset, -1 indicates the module itself
390
+ };
391
+
392
+ } // namespace detail
393
+
394
+ // This iterator allows the (optionally recursive) enumeration of
395
+ // the members of a Module. It performs a depth-first pre-order
396
+ // traversal of the module. The Policy template parameter determines
397
+ // which slots of the object should be included. For instance,
398
+ // when iterating parameters, we return the parameter tensors,
399
+ // but skip modules, buffers, and other attributes.
400
+ // See ModulePolicy for comments about Policy object's API.
401
+ template <typename Policy>
402
+ struct slot_iterator_impl {
403
+ using SlotCursor = detail::SlotCursor;
404
+ using value_type = typename Policy::value_type;
405
+ slot_iterator_impl(
406
+ Module root,
407
+ bool recurse, // if true, do a depth-first search, otherwise, just look at
408
+ // slots of root
409
+ bool return_module) // if true include root itself as the first thing
410
+ // visited (used in modules())
411
+ : cursors_({SlotCursor{std::move(root), return_module ? -1 : 0}}),
412
+ recurse_(recurse) {
413
+ // advance iterator to first valid element (or the end, if empty)
414
+ while_not_valid_next();
415
+ }
416
+ // empty cursors_, represents end of iteration
417
+ slot_iterator_impl() : recurse_(false) {}
418
+ value_type operator*() const {
419
+ return Policy::create(cursors_, cur());
420
+ }
421
+ value_type operator->() const {
422
+ return **this;
423
+ }
424
+ slot_iterator_impl& operator++() {
425
+ next_valid();
426
+ return *this;
427
+ }
428
+ slot_iterator_impl operator++(int) {
429
+ // this is really expensive, should we delete it so people don't use it
430
+ // instead of prefix?
431
+ slot_iterator_impl old = *this;
432
+ ++(*this);
433
+ return old;
434
+ }
435
+
436
+ private:
437
+ // return_module() is a corner case where instead of returning a submodule
438
+ // of root, we are returning root itself, because we are iterating modules(),
439
+ // which contains the root module itself.
440
+ // It is represented with a single SlotCursor whose index is -1.
441
+ bool return_module() const {
442
+ return top().i_ == -1;
443
+ }
444
+ const SlotCursor& top() const {
445
+ return cursors_.back();
446
+ }
447
+ SlotCursor& top() {
448
+ return cursors_.back();
449
+ }
450
+ IValue cur() const {
451
+ return return_module() ? top().module_._ivalue()
452
+ : top().module_._ivalue()->getSlot(top().i_);
453
+ }
454
+
455
+ // advance to the next slot in a depth first pre-order traversal of the
456
+ // modules slots. This function does not guarantee the next slot is a
457
+ // valid element of the iteration. That is done by valid().
458
+ // invariant: !cursors_.empty()
459
+ void next() {
460
+ // we just returned the module itself, advance i_ to 0 so we are now
461
+ // at the first slot of the module.
462
+ if (return_module()) {
463
+ ++top().i_;
464
+ return;
465
+ }
466
+ // the last traversal action advanced beyond the number of slots in the
467
+ // module so continue the iteration in the parent.
468
+ if (top().i_ >= int64_t(top().module_._ivalue()->type()->numAttributes())) {
469
+ cursors_.pop_back();
470
+ if (!cursors_.empty()) {
471
+ ++top().i_;
472
+ }
473
+ return;
474
+ }
475
+ // if the current thing is a module, we have to scan it for recursive
476
+ // traversals. We do this by adding a new SlotCursor to track the traversal.
477
+ if (recurse_ &&
478
+ top().module_._ivalue()->type()->getAttribute(top().i_)->is_module()) {
479
+ cursors_.emplace_back(SlotCursor{cur().toModule(), 0});
480
+ return;
481
+ }
482
+ // common case: advance to the next slot.
483
+ ++top().i_;
484
+ }
485
+ // is the current position of the iterator a valid one?
486
+ // otherwise, we have to continue advancing.
487
+ bool valid() const {
488
+ return top().i_ <
489
+ int64_t(top().module_._ivalue()->type()->numAttributes()) &&
490
+ Policy::valid(
491
+ top().module_._ivalue()->type(),
492
+ top().i_,
493
+ top().module_._ivalue()->getSlot(top().i_));
494
+ }
495
+ void while_not_valid_next() {
496
+ // advance iteration until we are either at the end (cursors_.empty())
497
+ // or in a valid state. return_module() is a special case,
498
+ // and is always considered valid, regardless of Policy, because it is
499
+ // it is only true when we are iterating modules.
500
+ while (!cursors_.empty() && !return_module() && !valid()) {
501
+ next();
502
+ }
503
+ }
504
+ void next_valid() {
505
+ // avoid crashing if this is empty
506
+ if (cursors_.empty()) {
507
+ return;
508
+ }
509
+ // advance to next element, which is maybe not valid
510
+ next();
511
+ while_not_valid_next();
512
+ }
513
+
514
+ std::vector<SlotCursor> cursors_;
515
+ bool recurse_;
516
+
517
+ friend inline bool operator!=(
518
+ const slot_iterator_impl<Policy>& a,
519
+ const slot_iterator_impl<Policy>& b) {
520
+ // we are finished iteration when we have no more iteration SlotCursors.
521
+ // end is always an empty iterator with no cursors.
522
+ return (a.cursors_.empty() != b.cursors_.empty());
523
+ }
524
+ };
525
+
526
+ // This type represents lists of parameters, attributes, and
527
+ // submodules contained in the module. It is abstract because
528
+ // they are not stored directly in std::vectors but inside the
529
+ // module's IValue object itself.
530
+ template <typename Policy>
531
+ struct slot_list_impl {
532
+ using iterator = slot_iterator_impl<Policy>;
533
+ using const_iterator = slot_iterator_impl<Policy>;
534
+ using value_type = typename iterator::value_type;
535
+ slot_iterator_impl<Policy> begin() const {
536
+ return slot_iterator_impl<Policy>(module_, recurse_, return_module_);
537
+ }
538
+ slot_iterator_impl<Policy> end() const {
539
+ return slot_iterator_impl<Policy>();
540
+ }
541
+ size_t size() const {
542
+ if (!size_) {
543
+ size_ = size_t(0);
544
+ // NOLINTNEXTLINE(clang-diagnostic-unused-variable)
545
+ for (const value_type& s : *(this)) {
546
+ (void)s; // Suppress unused variable warning
547
+ ++*size_;
548
+ }
549
+ }
550
+ return *size_;
551
+ }
552
+
553
+ slot_list_impl(Module module, bool recurse, bool return_module)
554
+ : module_(std::move(module)),
555
+ recurse_(recurse),
556
+ return_module_(return_module),
557
+ size_(c10::nullopt) {
558
+ if (!recurse && !return_module && Policy::all_slots) {
559
+ size_ = module_.num_slots();
560
+ }
561
+ }
562
+
563
+ private:
564
+ Module module_;
565
+ bool recurse_;
566
+ bool return_module_;
567
+ // size of this list, cached on first request
568
+ // when we need to filter the slot list
569
+ mutable c10::optional<size_t> size_;
570
+ friend struct Module;
571
+ };
572
+
573
+ namespace detail {
574
+
575
+ // slot_iterator_impl always iterate over all the slots in a module,
576
+ // the Policy template argument determines slots should be returned and their
577
+ // types
578
+ struct TORCH_API ModulePolicy {
579
+ // the type of the value being returned
580
+ using value_type = Module;
581
+
582
+ // the logic for creating the type being returned, given the raw IValue
583
+ // of that object.
584
+ static value_type create(
585
+ const std::vector<detail::SlotCursor>& cursors,
586
+ IValue v) {
587
+ return Module(std::move(v).toObject());
588
+ }
589
+ // is slot i in typ something that this iterator should return, otherwise,
590
+ // we skip it.
591
+ static bool valid(const ClassTypePtr& typ, size_t i, const IValue& v) {
592
+ return typ->getAttribute(i)->is_module();
593
+ }
594
+ // are we going to return everything? If so, we can optimize the calculate
595
+ // of the size of the list.
596
+ static CONSTEXPR_EXCEPT_WIN_CUDA bool all_slots = false;
597
+ };
598
+
599
+ struct TORCH_API ParameterPolicy {
600
+ using value_type = at::Tensor;
601
+ static value_type create(
602
+ const std::vector<detail::SlotCursor>& cursors,
603
+ IValue v) {
604
+ return std::move(v).toTensor();
605
+ }
606
+ static bool valid(const ClassTypePtr& typ, size_t i, const IValue& v) {
607
+ return typ->is_parameter(i) && v.isTensor();
608
+ }
609
+ static CONSTEXPR_EXCEPT_WIN_CUDA bool all_slots = false;
610
+ };
611
+
612
+ struct TORCH_API BufferPolicy {
613
+ using value_type = at::Tensor;
614
+ static value_type create(
615
+ const std::vector<detail::SlotCursor>& cursors,
616
+ IValue v) {
617
+ return std::move(v).toTensor();
618
+ }
619
+ static bool valid(const ClassTypePtr& typ, size_t i, const IValue& v) {
620
+ return typ->getAttribute(i)->isSubtypeOf(*TensorType::get()) &&
621
+ typ->is_buffer(i);
622
+ }
623
+ static CONSTEXPR_EXCEPT_WIN_CUDA bool all_slots = false;
624
+ };
625
+
626
+ struct TORCH_API AttributePolicy {
627
+ using value_type = IValue;
628
+ static value_type create(
629
+ const std::vector<detail::SlotCursor>& cursors,
630
+ IValue v) {
631
+ return v;
632
+ }
633
+ static bool valid(const ClassTypePtr& typ, size_t i, const IValue& v) {
634
+ return true;
635
+ }
636
+ static CONSTEXPR_EXCEPT_WIN_CUDA bool all_slots = true;
637
+ };
638
+
639
+ // take a Policy object, and make a version of it that returns the slot.
640
+ // along with the fully qualified name of that slot. This is used for the named_
641
+ // variants like named_parameters().
642
+ template <typename Policy>
643
+ struct NamedPolicy {
644
+ using value_type = Named<typename Policy::value_type>;
645
+ static value_type create(
646
+ const std::vector<detail::SlotCursor>& cursors,
647
+ IValue v) {
648
+ std::string name;
649
+ if (cursors.size() == 1) {
650
+ name = (cursors.back().i_ == -1) ? "" : nameFragment(cursors.back());
651
+ } else {
652
+ std::ostringstream ss;
653
+ for (const auto i : c10::irange(cursors.size())) {
654
+ if (i > 0) {
655
+ ss << ".";
656
+ }
657
+ ss << nameFragment(cursors[i]);
658
+ }
659
+ name = ss.str();
660
+ }
661
+ return value_type{std::move(name), Policy::create(cursors, std::move(v))};
662
+ }
663
+ static bool valid(const ClassTypePtr& t, size_t i, const IValue& v) {
664
+ return Policy::valid(t, i, v);
665
+ }
666
+ static constexpr bool all_slots = Policy::all_slots;
667
+
668
+ private:
669
+ static std::string nameFragment(const detail::SlotCursor& f) {
670
+ return f.module_.type()->getAttributeName(f.i_);
671
+ }
672
+ };
673
+
674
+ } // namespace detail
675
+
676
+ TORCH_API bool& getInlineEverythingMode();
677
+
678
+ namespace script {
679
+ // We once had a `script::` namespace that was deleted. This is for backcompat
680
+ // of the public API; new code should not use this type alias.
681
+ using Module = ::torch::jit::Module;
682
+ using ExtraFilesMap = ::torch::jit::ExtraFilesMap;
683
+ } // namespace script
684
+
685
+ } // namespace torch::jit
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/api/object.h ADDED
@@ -0,0 +1,200 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/functional.h>
4
+ #include <ATen/core/ivalue.h>
5
+ #include <c10/util/Optional.h>
6
+ #include <torch/csrc/jit/api/method.h>
7
+
8
+ #include <utility>
9
+
10
+ namespace torch::jit {
11
+
12
+ struct Resolver;
13
+ using ResolverPtr = std::shared_ptr<Resolver>;
14
+
15
+ using ObjectPtr = c10::intrusive_ptr<c10::ivalue::Object>;
16
+
17
+ // Throw this in C++ land if `attr` fails. This will be converted to a Python
18
+ // AttributeError by the Python binding code
19
+ class ObjectAttributeError : public std::runtime_error {
20
+ public:
21
+ ObjectAttributeError(const std::string& what) : std::runtime_error(what) {}
22
+ };
23
+
24
+ struct TORCH_API Object {
25
+ Object() = default;
26
+ Object(const Object&) = default;
27
+ Object& operator=(const Object&) = default;
28
+ Object(Object&&) noexcept = default;
29
+ Object& operator=(Object&&) noexcept = default;
30
+ Object(ObjectPtr _ivalue) : _ivalue_(std::move(_ivalue)) {}
31
+ Object(std::shared_ptr<CompilationUnit> cu, const c10::ClassTypePtr& type);
32
+ Object(
33
+ c10::QualifiedName,
34
+ std::shared_ptr<CompilationUnit> cu,
35
+ bool shouldMangle = false);
36
+
37
+ ObjectPtr _ivalue() const {
38
+ TORCH_INTERNAL_ASSERT(_ivalue_);
39
+ return _ivalue_;
40
+ }
41
+
42
+ c10::ClassTypePtr type() const {
43
+ return _ivalue()->type();
44
+ }
45
+
46
+ struct Property {
47
+ std::string name;
48
+ Method getter_func;
49
+ c10::optional<Method> setter_func;
50
+ };
51
+
52
+ void setattr(const std::string& name, c10::IValue v) {
53
+ if (_ivalue()->type()->hasConstant(name)) {
54
+ TORCH_CHECK(
55
+ false,
56
+ "Can't set constant '",
57
+ name,
58
+ "' which has value:",
59
+ _ivalue()->type()->getConstant(name));
60
+ } else if (auto slot = _ivalue()->type()->findAttributeSlot(name)) {
61
+ const c10::TypePtr& expected = _ivalue()->type()->getAttribute(*slot);
62
+ TORCH_CHECK(
63
+ v.type()->isSubtypeOf(*expected),
64
+ "Expected a value of type '",
65
+ expected->repr_str(),
66
+ "' for field '",
67
+ name,
68
+ "', but found '",
69
+ v.type()->repr_str(),
70
+ "'");
71
+ _ivalue()->setSlot(*slot, std::move(v));
72
+ } else {
73
+ TORCH_CHECK(false, "Module has no attribute '", name, "'");
74
+ }
75
+ }
76
+
77
+ c10::IValue attr(const std::string& name) const {
78
+ if (auto r = _ivalue()->type()->findAttributeSlot(name)) {
79
+ return _ivalue()->getSlot(*r);
80
+ }
81
+ if (auto r = _ivalue()->type()->findConstantSlot(name)) {
82
+ return _ivalue()->type()->getConstant(*r);
83
+ }
84
+ std::stringstream err;
85
+ err << _ivalue()->type()->repr_str() << " does not have a field with name '"
86
+ << name.c_str() << "'";
87
+ throw ObjectAttributeError(err.str());
88
+ }
89
+
90
+ c10::IValue attr(const std::string& name, c10::IValue or_else) const {
91
+ if (auto r = _ivalue()->type()->findAttributeSlot(name)) {
92
+ return _ivalue()->getSlot(*r);
93
+ }
94
+ if (auto r = _ivalue()->type()->findConstantSlot(name)) {
95
+ return _ivalue()->type()->getConstant(*r);
96
+ }
97
+ return or_else;
98
+ }
99
+
100
+ bool hasattr(const std::string& name) const {
101
+ return _ivalue()->type()->hasAttribute(name) ||
102
+ _ivalue()->type()->hasConstant(name);
103
+ }
104
+
105
+ // each object owns its methods. The reference returned here
106
+ // is guaranteed to stay valid until this module has been destroyed
107
+ Method get_method(const std::string& name) const {
108
+ if (auto method = find_method(name)) {
109
+ return *method;
110
+ }
111
+ AT_ERROR("Method '", name, "' is not defined.");
112
+ }
113
+
114
+ const std::vector<Method> get_methods() const {
115
+ return c10::fmap(type()->methods(), [&](Function* func) {
116
+ return Method(_ivalue(), func);
117
+ });
118
+ }
119
+
120
+ bool has_property(const std::string& name) const {
121
+ for (const auto& prop : type()->properties()) {
122
+ if (prop.name == name) {
123
+ return true;
124
+ }
125
+ }
126
+ return false;
127
+ }
128
+
129
+ const Property get_property(const std::string& name) const {
130
+ for (const auto& prop : type()->properties()) {
131
+ if (prop.name == name) {
132
+ c10::optional<Method> setter = c10::nullopt;
133
+ if (prop.setter) {
134
+ setter = Method(_ivalue(), prop.setter);
135
+ }
136
+ return Property{
137
+ prop.name, Method(_ivalue(), prop.getter), std::move(setter)};
138
+ }
139
+ }
140
+ AT_ERROR("Property '", name, "' is not defined.");
141
+ }
142
+
143
+ const std::vector<Property> get_properties() const {
144
+ return c10::fmap(type()->properties(), [&](ClassType::Property prop) {
145
+ c10::optional<Method> setter = c10::nullopt;
146
+ if (prop.setter) {
147
+ setter = Method(_ivalue(), prop.setter);
148
+ }
149
+ return Property{
150
+ std::move(prop.name),
151
+ Method(_ivalue(), prop.getter),
152
+ std::move(setter)};
153
+ });
154
+ }
155
+
156
+ c10::optional<Method> find_method(const std::string& basename) const;
157
+
158
+ /// Run a method from this module.
159
+ ///
160
+ /// For example:
161
+ /// @code
162
+ /// IValue output = module->run("relu_script", a, b);
163
+ /// @endcode
164
+ ///
165
+ /// To get a compile a module from a source string, see torch::jit::compile
166
+ ///
167
+ /// @param method_name The name of the method to run
168
+ /// @param args Arguments to be passed to the method
169
+ /// @return An IValue containing the return value (or values if it is a tuple)
170
+ /// from the method
171
+ template <typename... Types>
172
+ IValue run_method(const std::string& method_name, Types&&... args) {
173
+ return get_method(method_name)({IValue(std::forward<Types>(args))...});
174
+ }
175
+
176
+ // so that C++ users can easily add methods
177
+ void define(const std::string& src, const ResolverPtr& resolver = nullptr);
178
+
179
+ size_t num_slots() const {
180
+ return _ivalue()->slots().size();
181
+ }
182
+
183
+ // shallow copy the object
184
+ Object copy() const;
185
+
186
+ // Copies all the attributes of the object recursively without creating new
187
+ // `ClassType`, including deepcopy of Tensors
188
+ Object deepcopy() const;
189
+
190
+ private:
191
+ // mutable be we lazily initialize in module_object.
192
+ mutable ObjectPtr _ivalue_;
193
+ };
194
+
195
+ namespace script {
196
+ // We once had a `script::` namespace that was deleted. This is for backcompat
197
+ // of the public API; new code should not use this type alias.
198
+ using Object = ::torch::jit::Object;
199
+ } // namespace script
200
+ } // namespace torch::jit
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/flatbuffer_loader.h ADDED
@@ -0,0 +1,136 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <istream>
4
+ #include <memory>
5
+ #include <string>
6
+ #include <unordered_map>
7
+ #include <vector>
8
+
9
+ #include <ATen/core/ivalue.h>
10
+ #include <c10/core/Device.h>
11
+ #include <c10/macros/Macros.h>
12
+ #include <c10/util/Optional.h>
13
+ #include <torch/csrc/jit/mobile/module.h>
14
+
15
+ /**
16
+ * Defines the public API for loading flatbuffer-serialized mobile modules.
17
+ * Note that this header must not include or depend on flatbuffer-defined
18
+ * types, to avoid leaking those details to PyTorch clients.
19
+ */
20
+
21
+ namespace torch {
22
+ namespace jit {
23
+
24
+ /// All non-copied data pointers provided to `parse_and_initialize_*` functions
25
+ /// must be aligned to this boundary. Since the Module will point directly into
26
+ /// the data, this alignment is necessary to ensure that certain types/structs
27
+ /// are properly aligned.
28
+ constexpr size_t kFlatbufferDataAlignmentBytes = 16;
29
+
30
+ /// Maps file names to file contents.
31
+ using ExtraFilesMap = std::unordered_map<std::string, std::string>;
32
+
33
+ // On high level, to produce a Module from a file on disk, we need to go
34
+ // through the follow steps:
35
+ // 1. Read: Read the file from disk -> memory
36
+ // 2. Deserialize: Parse the bytes to produce some in memory manipulable
37
+ // structure
38
+ // 3. Module initialization: Produce mobile::Module out of the structure
39
+ // produced in 2.
40
+ // Under this context, the structure described in 2. is the flatbuffer-defined
41
+ // type mobile::serialization::Module. However, this step/type is not visible in
42
+ // the public API.
43
+
44
+ // Parse a mobile::Module from raw bytes.
45
+ //
46
+ // This function does steps 2+3 described above.
47
+ //
48
+ // Does not take ownership of `data`; if you want it to take ownership, see the
49
+ // shared_ptr overload of this function.
50
+ //
51
+ // If should_copy_tensor_memory is true, then the returned module will NOT have
52
+ // refences to `data`, so `data` can be freed immediately.
53
+ //
54
+ // If should_copy_tensor_memory is false, then returned module will have tensors
55
+ // that points inside of `data`; the caller will need to make sure that `data`
56
+ // outlives the returned Module. Also, `data` must be aligned to
57
+ // kFlatbufferDataAlignmentBytes.
58
+ TORCH_API mobile::Module parse_and_initialize_mobile_module(
59
+ void* data,
60
+ size_t size, // of `data`, in bytes.
61
+ c10::optional<at::Device> device = c10::nullopt,
62
+ ExtraFilesMap* extra_files = nullptr,
63
+ bool should_copy_tensor_memory = false);
64
+
65
+ // Parse a mobile::Module from raw bytes.
66
+ //
67
+ // This function does steps 2+3 described above.
68
+ //
69
+ // The returned Module holds a reference to `data`, which must be aligned to
70
+ // kFlatbufferDataAlignmentBytes.
71
+ //
72
+ // If you do not want the Module to hold a reference to `data`, see the raw
73
+ // pointer overload of this function.
74
+ TORCH_API mobile::Module parse_and_initialize_mobile_module(
75
+ std::shared_ptr<char> data,
76
+ size_t size, // of `data`, in bytes.
77
+ c10::optional<at::Device> device = c10::nullopt,
78
+ ExtraFilesMap* extra_files = nullptr);
79
+
80
+ // Parse a mobile::Module from raw bytes, also returning JIT-related metadata.
81
+ //
82
+ // This is the same as parse_and_initialize_mobile_module() except that it also
83
+ // extracts JIT source files and constants. Can be used to construct a
84
+ // jit::Module.
85
+ TORCH_API mobile::Module parse_and_initialize_mobile_module_for_jit(
86
+ void* data,
87
+ size_t size, // of `data`, in bytes.
88
+ ExtraFilesMap& jit_sources,
89
+ std::vector<IValue>& jit_constants,
90
+ c10::optional<at::Device> device = c10::nullopt,
91
+ ExtraFilesMap* extra_files = nullptr);
92
+
93
+ // Load a mobile::Module from a filepath.
94
+ //
95
+ // This function does steps 1+2+3 described above.
96
+ //
97
+ // We need to have this as a convienience because Python API will need to wrap
98
+ // this. C++ clients should use one of the versions of
99
+ // parse_and_initialize_mobile_module() so they can manage the raw data more
100
+ // directly.
101
+ TORCH_API mobile::Module load_mobile_module_from_file(
102
+ const std::string& filename,
103
+ c10::optional<at::Device> device = c10::nullopt,
104
+ ExtraFilesMap* extra_files = nullptr);
105
+
106
+ TORCH_API uint64_t get_bytecode_version(std::istream& in);
107
+ TORCH_API uint64_t get_bytecode_version(const std::string& filename);
108
+ TORCH_API uint64_t get_bytecode_version_from_bytes(char* flatbuffer_content);
109
+
110
+ TORCH_API mobile::ModuleInfo get_module_info_from_flatbuffer(
111
+ char* flatbuffer_content);
112
+
113
+ // The methods below are less efficient because it need to read the stream in
114
+ // its entirity to a buffer
115
+ TORCH_API mobile::Module load_mobile_module_from_stream_with_copy(
116
+ std::istream& in,
117
+ c10::optional<at::Device> device = c10::nullopt,
118
+ ExtraFilesMap* extra_files = nullptr);
119
+
120
+ TORCH_API mobile::Module parse_flatbuffer_no_object(
121
+ std::shared_ptr<char> data,
122
+ size_t size,
123
+ c10::optional<at::Device> device);
124
+
125
+ TORCH_API mobile::Module parse_and_initialize_mobile_module(
126
+ void* data,
127
+ size_t,
128
+ c10::optional<at::Device>,
129
+ ExtraFilesMap* extra_files,
130
+ bool should_copy_tensor_memory);
131
+
132
+ // no op, TODO(qihan) delete
133
+ TORCH_API bool register_flatbuffer_loader();
134
+
135
+ } // namespace jit
136
+ } // namespace torch
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/frame.h ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <cstddef>
4
+
5
+ #include <c10/util/Optional.h>
6
+ #include <torch/csrc/jit/mobile/code.h>
7
+
8
+ namespace torch {
9
+ namespace jit {
10
+ namespace mobile {
11
+
12
+ class Frame {
13
+ public:
14
+ explicit Frame(const Code& code) : code_(code) {}
15
+ const Code& getCode() const {
16
+ return code_;
17
+ }
18
+
19
+ void step() {
20
+ pc_++;
21
+ }
22
+
23
+ void jump(size_t n) {
24
+ pc_ += n;
25
+ }
26
+
27
+ size_t getPC() const {
28
+ return pc_;
29
+ }
30
+
31
+ const Instruction& getInstruction() const {
32
+ return code_.instructions_.at(pc_);
33
+ }
34
+
35
+ c10::optional<int64_t> getDebugHandle() const {
36
+ return getDebugHandle(pc_);
37
+ }
38
+
39
+ c10::optional<int64_t> getDebugHandle(size_t pc) const {
40
+ if (pc >= code_.debug_handles_.size()) {
41
+ return {};
42
+ }
43
+ return code_.debug_handles_[pc];
44
+ }
45
+
46
+ private:
47
+ const Code& code_;
48
+ size_t pc_{0};
49
+ };
50
+
51
+ } // namespace mobile
52
+ } // namespace jit
53
+ } // namespace torch
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/import.h ADDED
@@ -0,0 +1,112 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <torch/csrc/jit/mobile/module.h>
3
+ #include <torch/csrc/jit/mobile/parse_operators.h>
4
+
5
+ #include <istream>
6
+ #include <memory>
7
+
8
+ #include <caffe2/serialize/file_adapter.h>
9
+
10
+ namespace torch {
11
+ namespace jit {
12
+ using caffe2::serialize::FileAdapter;
13
+ using caffe2::serialize::IStreamAdapter;
14
+ using caffe2::serialize::ReadAdapterInterface;
15
+ using ExtraFilesMap = std::unordered_map<std::string, std::string>;
16
+
17
+ constexpr const char* kArchiveNameBytecode = "bytecode";
18
+ constexpr const char* kArchiveNameConstants = "constants";
19
+ constexpr const char* kArchiveNameVersion = "version";
20
+
21
+ // The family of methods below load a serialized Mobile Module
22
+ // into a mobile::Module object.
23
+ TORCH_API mobile::Module _load_for_mobile(
24
+ std::istream& in,
25
+ c10::optional<at::Device> device,
26
+ ExtraFilesMap& extra_file,
27
+ uint64_t module_load_options = kDefaultMobileLoadOptions);
28
+
29
+ TORCH_API mobile::Module _load_for_mobile(
30
+ const std::string& filename,
31
+ c10::optional<at::Device> device,
32
+ ExtraFilesMap& extra_files);
33
+
34
+ TORCH_API mobile::Module _load_for_mobile(
35
+ std::unique_ptr<ReadAdapterInterface> rai,
36
+ c10::optional<c10::Device> device,
37
+ ExtraFilesMap& extra_files,
38
+ uint64_t module_load_options = kDefaultMobileLoadOptions);
39
+
40
+ TORCH_API mobile::Module _load_for_mobile(
41
+ const std::string& filename,
42
+ c10::optional<at::Device> device,
43
+ ExtraFilesMap& extra_files,
44
+ uint64_t module_load_options);
45
+
46
+ TORCH_API mobile::Module _load_for_mobile(
47
+ std::istream& in,
48
+ c10::optional<at::Device> device = c10::nullopt);
49
+
50
+ TORCH_API mobile::Module _load_for_mobile(
51
+ const std::string& filename,
52
+ c10::optional<at::Device> device = c10::nullopt);
53
+
54
+ TORCH_API mobile::Module _load_for_mobile(
55
+ std::unique_ptr<ReadAdapterInterface> rai,
56
+ c10::optional<c10::Device> device = c10::nullopt);
57
+
58
+ /**
59
+ * Load only the contents of the "extra/" files whose names are
60
+ * passed in the map (extra_files). Populate the corresponding values
61
+ * with the contents of those files. Do not attempt to load the entire
62
+ * model, and stop once the extra files have been extracted.
63
+ *
64
+ * This API is needed to be able to load GPU models on linux CPU
65
+ * machines and extract only the extra files so that we can inspect
66
+ * the metadata that was added to the .ptl archive when it was
67
+ * generated.
68
+ *
69
+ */
70
+ void _load_extra_only_for_mobile(
71
+ const std::string& filename,
72
+ c10::optional<at::Device> device,
73
+ ExtraFilesMap& extra_files);
74
+
75
+ // Currently used by both mobile/import.cpp and model_compatibility.cpp.
76
+ // Should be removed after model_compatibility.cpp start using simplified
77
+ // version type_resolver and obj_loader.
78
+ at::TypePtr resolveTypeNameMobile(
79
+ const c10::QualifiedName& qn,
80
+ std::shared_ptr<CompilationUnit> compilation_unit);
81
+ c10::StrongTypePtr typeResolverMobile(
82
+ const c10::QualifiedName& qn,
83
+ const std::shared_ptr<CompilationUnit>& compilation_unit);
84
+ c10::intrusive_ptr<c10::ivalue::Object> objLoaderMobile(
85
+ const at::StrongTypePtr& type,
86
+ const at::IValue& input,
87
+ mobile::CompilationUnit& mobile_compilation_unit);
88
+
89
+ // Given a reader, which has access to a model file,
90
+ // return true if there exists tensors in `bytecode` archive
91
+ bool isTensorInBytecodeArchive(
92
+ caffe2::serialize::PyTorchStreamReader& stream_reader);
93
+
94
+ namespace mobile {
95
+
96
+ /**
97
+ * Given a torch::jit::mobile::Module, return a set of operator names
98
+ * (with overload name) that are used by any method in this mobile
99
+ * Mobile. This method runs through the bytecode for all methods
100
+ * in the specified model (module), and extracts all the root
101
+ * operator names. Root operators are operators that are called
102
+ * directly by the model (as opposed to non-root operators, which
103
+ * may be called transitively by the root operators).
104
+ *
105
+ */
106
+ TORCH_API std::set<std::string> _export_operator_list(
107
+ torch::jit::mobile::Module& module);
108
+
109
+ } // namespace mobile
110
+
111
+ } // namespace jit
112
+ } // namespace torch
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/import_data.h ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/TensorBase.h>
4
+ #include <c10/core/Device.h>
5
+ #include <c10/util/Optional.h>
6
+ #include <torch/csrc/jit/mobile/module.h>
7
+
8
+ #include <istream>
9
+ #include <map>
10
+ #include <string>
11
+
12
+ namespace torch {
13
+ namespace jit {
14
+
15
+ /**
16
+ * Loads named parameters from the serialized data in @p in.
17
+ *
18
+ * Calls #TORCH_CHECK() if the data format is not recognized.
19
+ */
20
+ TORCH_API std::map<std::string, at::Tensor> _load_parameters(
21
+ std::istream& in,
22
+ c10::optional<at::Device> device = c10::nullopt);
23
+
24
+ /**
25
+ * Loads named parameters from the serialized data in @p filename.
26
+ *
27
+ * Calls #TORCH_CHECK() if the data format is not recognized.
28
+ */
29
+ TORCH_API std::map<std::string, at::Tensor> _load_parameters(
30
+ const std::string& filename,
31
+ c10::optional<at::Device> device = c10::nullopt);
32
+
33
+ // NOTE: Please prefer using _load_parameters over using the function below.
34
+ TORCH_API std::map<std::string, at::Tensor> mobile_module_to_parameter_map(
35
+ const mobile::Module& module);
36
+
37
+ } // namespace jit
38
+ } // namespace torch
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/import_export_common.h ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ /**
4
+ * @file
5
+ * Declarations shared between import_data.cpp and export_data.cpp
6
+ */
7
+
8
+ namespace torch {
9
+ namespace jit {
10
+ namespace mobile {
11
+
12
+ namespace internal {
13
+ /**
14
+ * The name of the mobile::Module attribute which contains saved parameters, as
15
+ * a Dict of names to Tensors. Only used for Flatbuffer serialization.
16
+ */
17
+ // NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,modernize-avoid-c-arrays)
18
+ constexpr char kSavedParametersAttributeName[] = "data";
19
+ } // namespace internal
20
+
21
+ } // namespace mobile
22
+ } // namespace jit
23
+ } // namespace torch
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/interpreter.h ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <vector>
4
+
5
+ #include <torch/csrc/jit/mobile/code.h>
6
+ #include <torch/csrc/jit/mobile/frame.h>
7
+
8
+ namespace torch {
9
+ namespace jit {
10
+ namespace mobile {
11
+
12
+ struct InterpreterState {
13
+ TORCH_API explicit InterpreterState(const Code& code);
14
+ TORCH_API bool run(Stack& stack);
15
+
16
+ private:
17
+ void enterFrame(const Code&);
18
+ void leaveFrame();
19
+ void saveExceptionDebugHandles();
20
+ void callFunction(torch::jit::Function& f, Stack& stack);
21
+
22
+ c10::IValue& reg(size_t reg);
23
+ std::vector<c10::IValue> registers_;
24
+ std::vector<Frame> frames_;
25
+ };
26
+
27
+ const std::vector<DebugHandle>& getInterpretersExceptionDebugHandles();
28
+ } // namespace mobile
29
+ } // namespace jit
30
+ } // namespace torch
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/method.h ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/ivalue.h>
4
+ #include <torch/csrc/jit/mobile/function.h>
5
+
6
+ namespace torch {
7
+ namespace jit {
8
+ namespace mobile {
9
+
10
+ class Module;
11
+
12
+ struct TORCH_API Method {
13
+ Method(const Module* owner, Function* function);
14
+
15
+ void run(Stack& stack) const;
16
+ void run(Stack&& stack) const {
17
+ run(stack);
18
+ }
19
+
20
+ c10::IValue operator()(std::vector<c10::IValue> stack) const;
21
+
22
+ const std::string& name() const {
23
+ return function_->name();
24
+ }
25
+
26
+ int64_t get_debug_handle(size_t pc) const {
27
+ return function_->get_debug_handle(pc);
28
+ }
29
+
30
+ Function& function() const {
31
+ return *function_;
32
+ }
33
+
34
+ private:
35
+ // Methods are uniquely owned by a single module.
36
+ // This raw pointer allows referencing the module
37
+ const Module* owner_;
38
+
39
+ // Underlying unbound function
40
+ Function* function_;
41
+ };
42
+
43
+ } // namespace mobile
44
+ } // namespace jit
45
+ } // namespace torch
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/module.h ADDED
@@ -0,0 +1,197 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <ATen/core/jit_type.h>
3
+ #include <torch/csrc/jit/mobile/debug_info.h>
4
+ #include <torch/csrc/jit/mobile/function.h>
5
+ #include <torch/csrc/jit/mobile/method.h>
6
+ #include <torch/csrc/jit/mobile/quantization.h>
7
+
8
+ #include <utility>
9
+
10
+ namespace torch {
11
+ namespace jit {
12
+ namespace mobile {
13
+ using Stack = std::vector<c10::IValue>;
14
+
15
+ // A CompilationUnit object is the one that gets executed by the lite
16
+ // interpreter.
17
+ //
18
+ // A CompilationUnit object contains a list of Method Objects. These are methods
19
+ // that appear in the original PyTorch Model. These method correspond to Python
20
+ // member functions of the Model class.
21
+ //
22
+ // Methods in turn contain a Function, and a back-pointer to the Module that
23
+ // owns this Method instance.
24
+ //
25
+ // A Function contains a Code Object (code_) which is defined in interpreter.h
26
+ //
27
+ // A Code object contains the following:
28
+ //
29
+ // std::vector<Instruction> instructions_;
30
+ // std::vector<c10::OperatorName> op_names_;
31
+ // std::vector<std::function<void(Stack&)>> operators_;
32
+ // std::vector<c10::IValue> constants_;
33
+ // std::vector<c10::TypePtr> types_;
34
+ // size_t register_size_; // Aggregated output size.
35
+ //
36
+ class CompilationUnit {
37
+ public:
38
+ void register_function(std::unique_ptr<Function> fn);
39
+ std::vector<std::unique_ptr<Function>>& methods() {
40
+ return methods_;
41
+ }
42
+ const std::vector<std::unique_ptr<Function>>& methods() const {
43
+ return methods_;
44
+ }
45
+ Function* find_function(const c10::QualifiedName& qn);
46
+ const Function* find_function(const c10::QualifiedName& qn) const;
47
+
48
+ void unsafeRemoveFunction(const int64_t index) {
49
+ methods_.erase(methods_.begin() + index);
50
+ }
51
+
52
+ private:
53
+ std::vector<std::unique_ptr<Function>> methods_;
54
+ };
55
+
56
+ // A Torch Mobile Module is a representation of the model (trained in case
57
+ // of inference). A Mobile Module contains
58
+ //
59
+ // 1. data (object_)
60
+ // 2. metadata (optional) about the model (metadata_ from the metadata.pkl
61
+ // file added after training)
62
+ // 3. Compilation Unit (cu_)
63
+ //
64
+ class TORCH_API Module {
65
+ public:
66
+ Module(
67
+ c10::intrusive_ptr<c10::ivalue::Object> object,
68
+ std::shared_ptr<CompilationUnit> cu)
69
+ : object_(std::move(object)), cu_(std::move(cu)) {}
70
+ Module() = default;
71
+ Method get_method(const std::string& method_name) const;
72
+ template <typename... Types>
73
+ c10::IValue run_method(const std::string& method_name, Types&&... args) {
74
+ return get_method(method_name)({IValue(std::forward<Types>(args))...});
75
+ }
76
+ c10::IValue forward(std::vector<c10::IValue> inputs) {
77
+ return get_method("forward")(std::move(inputs));
78
+ }
79
+ c10::optional<Method> find_method(const std::string& basename) const;
80
+
81
+ const std::string name() const {
82
+ return object_->name();
83
+ }
84
+ const std::vector<at::IValue>& slots() const {
85
+ return object_->slots();
86
+ }
87
+ const c10::intrusive_ptr<c10::ivalue::Object> _ivalue() const {
88
+ return object_;
89
+ }
90
+ const std::vector<at::Tensor> parameters() const;
91
+ const std::map<std::string, at::Tensor> named_parameters() const;
92
+ std::string get_forward_method_debug_info(int64_t debug_handle) const;
93
+ std::string getModuleHierarchy(const int64_t debug_handle) const;
94
+ std::string getCallStack(const int64_t debug_handle) const;
95
+ /// Enables "training" mode.
96
+ void train(bool on = true);
97
+ /// Calls train(false) to enable "eval" mode.
98
+ void eval() {
99
+ train(/*on=*/false);
100
+ }
101
+ /// True if the module is in training mode.
102
+ bool is_training() const;
103
+ const std::unordered_map<std::string, std::string> getMetadata() const {
104
+ return metadata_;
105
+ }
106
+ void setMetadata(
107
+ const std::unordered_map<std::string, std::string>& metadata) {
108
+ metadata_ = metadata;
109
+ }
110
+ const std::vector<Method> get_methods() const;
111
+
112
+ c10::IValue attr(const std::string& name, c10::IValue or_else) const {
113
+ if (auto r = object_->type()->findAttributeSlot(name)) {
114
+ return object_->getSlot(*r);
115
+ }
116
+ if (auto r = object_->type()->findConstantSlot(name)) {
117
+ return object_->type()->getConstant(*r);
118
+ }
119
+ return or_else;
120
+ }
121
+
122
+ void setDebugTable(MobileDebugTable&& debug_table) {
123
+ debug_table_ = std::move(debug_table);
124
+ }
125
+ const MobileDebugTable& getDebugTable() const {
126
+ return debug_table_;
127
+ }
128
+
129
+ void setHasDebugHandles(bool has_debug_handles) {
130
+ has_debug_handles_ = has_debug_handles;
131
+ }
132
+
133
+ bool hasDebugHandles() const {
134
+ return has_debug_handles_;
135
+ }
136
+
137
+ const CompilationUnit& compilation_unit() const {
138
+ return *cu_.get();
139
+ }
140
+
141
+ void set_delete_memory(std::shared_ptr<char> delete_mem) {
142
+ mem_to_delete_ = std::move(delete_mem);
143
+ }
144
+
145
+ void set_min_operator_version(int64_t version) {
146
+ min_operator_version_ = version;
147
+ }
148
+
149
+ int64_t min_operator_version() const {
150
+ return min_operator_version_;
151
+ }
152
+
153
+ void set_bytecode_version(int64_t version) {
154
+ bytecode_version_ = version;
155
+ }
156
+
157
+ int64_t bytecode_version() const {
158
+ return bytecode_version_;
159
+ }
160
+
161
+ private:
162
+ friend class quantization::PTQQuanizationHelper;
163
+
164
+ bool compareMethodSchemas(
165
+ const std::string& name_1,
166
+ const std::string& name_2);
167
+
168
+ void unsafeRemoveMethod(const std::string& basename);
169
+
170
+ void unsafeCopyMethod(
171
+ const std::string& new_method_name,
172
+ const Function& to_be_copied);
173
+
174
+ c10::intrusive_ptr<c10::ivalue::Object> object_;
175
+ std::unordered_map<std::string, std::string> metadata_;
176
+ std::shared_ptr<CompilationUnit> cu_;
177
+ MobileDebugTable debug_table_;
178
+ bool has_debug_handles_ = false;
179
+ int64_t min_operator_version_ = 4;
180
+ int64_t bytecode_version_ = 4;
181
+
182
+ // Extra handle for the module to delete when itself is deleted
183
+ std::shared_ptr<char> mem_to_delete_;
184
+ };
185
+
186
+ struct TORCH_API ModuleInfo {
187
+ uint64_t bytecode_version;
188
+ uint64_t operator_version;
189
+ std::unordered_map<std::string, int> opname_to_num_args;
190
+ std::unordered_set<std::string> function_names;
191
+ std::unordered_set<std::string> type_names;
192
+ };
193
+ TORCH_API ModuleInfo get_module_info(const mobile::Module& module);
194
+
195
+ } // namespace mobile
196
+ } // namespace jit
197
+ } // namespace torch
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/parse_bytecode.h ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <torch/csrc/jit/mobile/function.h>
3
+
4
+ namespace torch {
5
+ namespace jit {
6
+ namespace mobile {
7
+ using c10::IValue;
8
+ TORCH_API void parseInstructions(
9
+ const std::string& function_name,
10
+ c10::ivalue::TupleElements&& ins_list,
11
+ c10::ivalue::TupleElements& debug_handles_m_tuple,
12
+ mobile::Function* function);
13
+ TORCH_API void parseConstants(
14
+ const c10::ivalue::TupleElements& consts_list,
15
+ mobile::Function* function);
16
+ TORCH_API void parseTypes(
17
+ const c10::ivalue::TupleElements& types_list,
18
+ mobile::Function* function);
19
+ TORCH_API void parseRegisterSize(size_t rsize, mobile::Function* function);
20
+ TORCH_API void applyUpgrader(
21
+ mobile::Function* function,
22
+ uint64_t operator_version);
23
+ } // namespace mobile
24
+ } // namespace jit
25
+ } // namespace torch
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/prim_ops_registery.h ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/ivalue.h>
4
+ #include <functional>
5
+ #include <vector>
6
+
7
+ namespace torch {
8
+ namespace jit {
9
+ namespace mobile {
10
+
11
+ using Stack = std::vector<c10::IValue>;
12
+
13
+ void registerPrimOpsFunction(
14
+ const std::string& name,
15
+ const std::function<void(Stack&)>& fn);
16
+
17
+ bool hasPrimOpsFn(const std::string& name);
18
+
19
+ std::function<void(Stack&)>& getPrimOpsFn(const std::string& name);
20
+
21
+ class prim_op_fn_register {
22
+ public:
23
+ prim_op_fn_register(
24
+ const std::string& name,
25
+ const std::function<void(Stack&)>& fn) {
26
+ registerPrimOpsFunction(name, fn);
27
+ }
28
+ };
29
+
30
+ } // namespace mobile
31
+ } // namespace jit
32
+ } // namespace torch
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/promoted_prim_ops.h ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <torch/csrc/jit/mobile/prim_ops_registery.h>
3
+ #include <torch/csrc/jit/mobile/register_ops_common_utils.h>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+
8
+ void tupleIndex(Stack& stack);
9
+
10
+ void raiseException(Stack& stack);
11
+
12
+ void is(Stack& stack);
13
+
14
+ void unInitialized(Stack& stack);
15
+
16
+ void isNot(Stack& stack);
17
+
18
+ void aten_format(Stack& stack);
19
+
20
+ void size(Stack& stack);
21
+
22
+ void sym_size(Stack& stack);
23
+
24
+ void sym_size_int(Stack& stack);
25
+
26
+ void sym_stride_int(Stack& stack);
27
+
28
+ void sym_numel(Stack& stack);
29
+
30
+ void sym_storage_offset(Stack& stack);
31
+
32
+ void sym_stride(Stack& stack);
33
+
34
+ void device(Stack& stack);
35
+
36
+ void device_with_index(Stack& stack);
37
+
38
+ void dtype(Stack& stack);
39
+
40
+ void layout(Stack& stack);
41
+
42
+ void toPrimDType(Stack& stack);
43
+
44
+ void dim(Stack& stack);
45
+
46
+ void _not(Stack& stack);
47
+
48
+ void boolTensor(Stack& stack);
49
+
50
+ void toList(Stack& stack);
51
+
52
+ void numToTensorScalar(Stack& stack);
53
+
54
+ void isCuda(Stack& stack);
55
+
56
+ void numToTensorBool(Stack& stack);
57
+
58
+ void dictIndex(Stack& stack);
59
+
60
+ void raiseExceptionWithMessage(Stack& stack);
61
+
62
+ } // namespace jit
63
+ } // namespace torch
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/quantization.h ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/macros/Export.h>
4
+ #include <string>
5
+
6
+ namespace torch {
7
+ namespace jit {
8
+ namespace mobile {
9
+ class Module;
10
+ namespace quantization {
11
+ /*
12
+ * Device side PTQ API.
13
+ * Once the model has been prepared for quantization on server side, such model
14
+ * is sent to device. On device side the model is further trained. At the end of
15
+ * the training, before the model is readied for inference, we need to quantize
16
+ * the model.
17
+ * Usage of this API is as follows.
18
+ * PTQQuanizationHelper ptq_helper;
19
+ * ptq_helper.quantize_dynamic(m, "forward");
20
+ * Args:
21
+ * m: Captured by reference, an instance of mobile::Module. This module will be
22
+ * mutated in place to replace its <method_name> method with quantized
23
+ * equivalent. method:name: Name of the method to be quantized. AOT preparation
24
+ * for quantization must also have been done for this method. Returns: In place
25
+ * mutated `m` whose size should be smaller due to weight quantization and whose
26
+ * <method_name> method should use quantized ops
27
+ */
28
+ class TORCH_API PTQQuanizationHelper {
29
+ public:
30
+ PTQQuanizationHelper() = default;
31
+ void quantize_dynamic(
32
+ torch::jit::mobile::Module& m,
33
+ const std::string& method_name);
34
+ };
35
+ } // namespace quantization
36
+ } // namespace mobile
37
+ } // namespace jit
38
+ } // namespace torch
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/type_parser.h ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/dynamic_type.h>
4
+ #include <ATen/core/jit_type.h>
5
+ #include <unordered_set>
6
+
7
+ namespace c10 {
8
+
9
+ class TORCH_API TypeParser {
10
+ public:
11
+ explicit TypeParser(std::string pythonStr);
12
+ explicit TypeParser(std::vector<std::string>& pythonStrs);
13
+
14
+ TypePtr parse();
15
+ std::vector<TypePtr> parseList();
16
+ static const std::unordered_set<std::string>& getNonSimpleType();
17
+ static const std::unordered_set<std::string>& getCustomType();
18
+ std::unordered_set<std::string> getContainedTypes();
19
+
20
+ private:
21
+ TypePtr parseNamedTuple(const std::string& qualified_name);
22
+ TypePtr parseCustomType();
23
+ TypePtr parseTorchbindClassType();
24
+ TypePtr parseNonSimple(const std::string& token);
25
+
26
+ void expect(const char* s);
27
+ void expectChar(char c);
28
+ template <typename T>
29
+ TypePtr parseSingleElementType();
30
+
31
+ void lex();
32
+
33
+ std::string next();
34
+ c10::string_view nextView();
35
+ void advance();
36
+ C10_NODISCARD c10::string_view cur() const;
37
+
38
+ std::string pythonStr_;
39
+ size_t start_;
40
+ c10::string_view next_token_;
41
+
42
+ // Used for parsing string list
43
+ std::vector<std::string> pythonStrs_;
44
+ std::unordered_map<std::string, c10::TypePtr> str_type_ptr_map_;
45
+
46
+ // Store all contained types when parsing a string
47
+ std::unordered_set<std::string> contained_types_;
48
+ };
49
+
50
+ TORCH_API TypePtr parseType(const std::string& pythonStr);
51
+
52
+ TORCH_API std::vector<TypePtr> parseType(std::vector<std::string>& pythonStr);
53
+
54
+ } // namespace c10
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/init.h ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/utils/pybind.h>
4
+
5
+ namespace torch::jit {
6
+
7
+ void initJITBindings(PyObject* module);
8
+
9
+ } // namespace torch::jit
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/pybind.h ADDED
@@ -0,0 +1,213 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/python_headers.h>
4
+
5
+ #include <ATen/core/ivalue.h>
6
+ #include <ATen/core/symbol.h>
7
+ #include <c10/util/irange.h>
8
+ #include <torch/csrc/DynamicTypes.h>
9
+ #include <torch/csrc/THP.h>
10
+ #include <torch/csrc/autograd/variable.h>
11
+ #include <torch/csrc/jit/frontend/tracer.h>
12
+ #include <torch/csrc/jit/python/pybind_utils.h>
13
+ #include <torch/csrc/utils/pybind.h>
14
+
15
+ #include <pybind11/functional.h>
16
+ #include <pybind11/pybind11.h>
17
+ #include <pybind11/stl.h>
18
+
19
+ namespace py = pybind11;
20
+
21
+ namespace torch::jit {
22
+
23
+ // This is a variant of shared_ptr that "sees through" a wrapper.
24
+ // We use it to convert Value, Node, Block and node to "wrapped" Python
25
+ // values. When we destruct the C++ object, the wrapper's pointer will
26
+ // be set to 0 and any future dereferencing will throw. We need this
27
+ // because the Python objects may hang around after the C++ object
28
+ // has already been destroyed.
29
+ // This also needs the magic type_caster below, which is from the
30
+ // workaround offered in https://github.com/pybind/pybind11/issues/2751
31
+ template <typename T>
32
+ class unwrapping_shared_ptr {
33
+ static_assert(
34
+ std::is_same<T, torch::jit::Value>::value ||
35
+ std::is_same<T, torch::jit::Node>::value ||
36
+ std::is_same<T, torch::jit::Block>::value,
37
+ "unwrapping type only defined for Graph object types");
38
+
39
+ private:
40
+ std::shared_ptr<torch::jit::Wrap<T>> impl;
41
+
42
+ public:
43
+ unwrapping_shared_ptr() : impl({}) {}
44
+ explicit unwrapping_shared_ptr(T* p) : impl(p->wrap()) {
45
+ impl->clear_cb = &clear_registered_instances;
46
+ }
47
+ T* get() const {
48
+ if (!impl->elem) {
49
+ throw std::logic_error("has been invalidated");
50
+ }
51
+ return impl->elem;
52
+ }
53
+ // we need to disable the overloaded & for PyBind11 < 2.3 due.
54
+ // see https://github.com/pybind/pybind11/pull/1435
55
+ #if (PYBIND11_VERSION_MAJOR > 2) || \
56
+ ((PYBIND11_VERSION_MAJOR == 2) && (PYBIND11_VERSION_MINOR >= 3))
57
+ T** operator&() {
58
+ if (!impl->elem) {
59
+ throw std::logic_error("has been invalidated");
60
+ }
61
+ return &(impl->elem);
62
+ }
63
+ #endif
64
+ };
65
+
66
+ } // namespace torch::jit
67
+
68
+ PYBIND11_DECLARE_HOLDER_TYPE(T, torch::jit::unwrapping_shared_ptr<T>, true);
69
+
70
+ namespace pybind11::detail {
71
+
72
+ #define CREATE_UNWRAPPING_CASTER(Class) \
73
+ template <> \
74
+ struct type_caster<Class> : public type_caster_base<Class> { \
75
+ public: \
76
+ using type = Class; \
77
+ using holder_type = torch::jit::unwrapping_shared_ptr<Class>; \
78
+ \
79
+ bool load(handle src, bool convert) { \
80
+ return load_impl<type_caster<Class>>(src, convert); \
81
+ } \
82
+ \
83
+ explicit operator type*() { \
84
+ return static_cast<type*>(value); \
85
+ } \
86
+ explicit operator type&() { \
87
+ return *static_cast<type*>(value); \
88
+ } \
89
+ \
90
+ protected: \
91
+ friend class type_caster_generic; \
92
+ \
93
+ bool load_value(value_and_holder&& v_h) { \
94
+ if (v_h.holder_constructed()) { \
95
+ value = v_h.template holder<holder_type>().get(); \
96
+ return true; \
97
+ } else { \
98
+ throw cast_error( \
99
+ "Unable to cast from non-held to held instance (#Class& to Holder<#Class>)"); \
100
+ } \
101
+ } \
102
+ }
103
+
104
+ CREATE_UNWRAPPING_CASTER(torch::jit::Node);
105
+ CREATE_UNWRAPPING_CASTER(torch::jit::Value);
106
+ CREATE_UNWRAPPING_CASTER(torch::jit::Block);
107
+
108
+ #undef CREATE_UNWRAPPING_CASTER
109
+
110
+ template <>
111
+ struct type_caster<torch::jit::IValue> {
112
+ public:
113
+ // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
114
+ PYBIND11_TYPE_CASTER(torch::jit::IValue, _("IValue"));
115
+
116
+ bool load(handle src, bool) {
117
+ try {
118
+ value = torch::jit::toTypeInferredIValue(src);
119
+ return true;
120
+ } catch (std::exception& e) {
121
+ return false;
122
+ }
123
+ }
124
+
125
+ static handle cast(
126
+ torch::jit::IValue src,
127
+ return_value_policy /* policy */,
128
+ handle /* parent */) {
129
+ return torch::jit::toPyObject(std::move(src)).release();
130
+ }
131
+ };
132
+
133
+ template <>
134
+ struct type_caster<torch::jit::Symbol> {
135
+ public:
136
+ // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
137
+ PYBIND11_TYPE_CASTER(torch::jit::Symbol, _("Symbol"));
138
+
139
+ bool load(handle src, bool) {
140
+ // TODO: Is there a way to py::cast that doesn't raise an exception on
141
+ // failure? Can we catch pybind11::cast_error here instead?
142
+ std::string src_str;
143
+ try {
144
+ src_str = py::cast<std::string>(src);
145
+ } catch (std::exception& e) {
146
+ return false;
147
+ }
148
+ value = torch::jit::Symbol::fromQualString(src_str);
149
+ return true;
150
+ }
151
+
152
+ static handle cast(
153
+ torch::jit::Symbol src,
154
+ return_value_policy /* policy */,
155
+ handle /* parent */) {
156
+ return py::cast(std::string(src.toQualString()), return_value_policy::copy)
157
+ .release();
158
+ }
159
+ };
160
+
161
+ template <>
162
+ struct type_caster<torch::jit::AttributeKind> {
163
+ public:
164
+ // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
165
+ PYBIND11_TYPE_CASTER(torch::jit::AttributeKind, _("AttributeKind"));
166
+
167
+ bool load(handle src, bool) {
168
+ return false;
169
+ }
170
+
171
+ static handle cast(
172
+ torch::jit::AttributeKind src,
173
+ return_value_policy /* policy */,
174
+ handle /* parent */) {
175
+ return py::cast(
176
+ std::string(torch::jit::toString(src)),
177
+ return_value_policy::copy)
178
+ .release();
179
+ }
180
+ };
181
+
182
+ // See https://github.com/pybind/pybind11/issues/637
183
+ using ListCasterBase = pybind11::detail::
184
+ list_caster<std::vector<torch::jit::Node*>, torch::jit::Node*>;
185
+ template <>
186
+ struct type_caster<std::vector<torch::jit::Node*>> : ListCasterBase {
187
+ static handle cast(
188
+ const std::vector<torch::jit::Node*>& src,
189
+ return_value_policy,
190
+ handle parent) {
191
+ return ListCasterBase::cast(src, return_value_policy::reference, parent);
192
+ }
193
+ static handle cast(
194
+ const std::vector<torch::jit::Node*>* src,
195
+ return_value_policy pol,
196
+ handle parent) {
197
+ return cast(*src, pol, parent);
198
+ }
199
+ };
200
+
201
+ } // namespace pybind11::detail
202
+
203
+ namespace torch::jit {
204
+
205
+ static inline py::tuple tuple_tail(const py::tuple& tup) {
206
+ py::tuple r(tup.size() - 1);
207
+ for (const auto i : c10::irange(1, tup.size())) {
208
+ r[i - 1] = tup[i];
209
+ }
210
+ return r;
211
+ }
212
+
213
+ } // namespace torch::jit
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/pybind_utils.h ADDED
@@ -0,0 +1,1115 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/ivalue.h>
4
+ #include <ATen/core/jit_type.h>
5
+ #include <ATen/core/qualified_name.h>
6
+ #include <ATen/core/stack.h>
7
+ #include <pybind11/complex.h>
8
+ #include <pybind11/pybind11.h>
9
+ #include <pybind11/pytypes.h>
10
+ #include <torch/csrc/Device.h>
11
+ #include <torch/csrc/Dtype.h>
12
+ #include <torch/csrc/Export.h>
13
+ #include <torch/csrc/Layout.h>
14
+ #include <torch/csrc/QScheme.h>
15
+ #include <torch/csrc/Stream.h>
16
+ #include <torch/csrc/jit/api/module.h>
17
+ #include <torch/csrc/jit/frontend/schema_matching.h>
18
+ #include <torch/csrc/jit/frontend/tracer.h>
19
+ #include <torch/csrc/jit/python/module_python.h>
20
+ #include <torch/csrc/jit/python/python_custom_class.h>
21
+ #include <torch/csrc/jit/python/python_tracer.h>
22
+ #include <torch/csrc/jit/resource_guard.h>
23
+ #include <torch/csrc/jit/runtime/operator.h>
24
+ #include <torch/csrc/utils/pybind.h>
25
+ #include <torch/csrc/utils/python_arg_parser.h>
26
+ #include <torch/csrc/utils/six.h>
27
+ #ifdef USE_DISTRIBUTED
28
+ #include <torch/csrc/distributed/rpc/py_rref.h>
29
+ #include <torch/csrc/distributed/rpc/rref_impl.h>
30
+ #endif
31
+
32
+ #include <ATen/core/function_schema.h>
33
+ #include <c10/core/Stream.h>
34
+ #ifdef USE_C10D_NCCL
35
+ #include <c10/cuda/CUDACachingAllocator.h>
36
+ #include <c10/cuda/CUDAStream.h>
37
+ #endif
38
+ #include <c10/util/Exception.h>
39
+ #include <c10/util/Optional.h>
40
+ #include <c10/util/irange.h>
41
+
42
+ #include <algorithm>
43
+ #include <cstddef>
44
+ #include <string>
45
+ #include <utility>
46
+ #include <vector>
47
+
48
+ // The visibility attribute is to avoid a warning about storing a field in the
49
+ // struct that has a different visibility (from pybind) than the struct.
50
+ #ifdef _WIN32
51
+ #define VISIBILITY_HIDDEN
52
+ #else
53
+ #define VISIBILITY_HIDDEN __attribute__((visibility("hidden")))
54
+ #endif
55
+
56
+ namespace torch::jit {
57
+
58
+ using ResolutionCallback = std::function<py::object(std::string)>;
59
+
60
+ void clear_registered_instances(void* ptr);
61
+
62
+ TORCH_PYTHON_API IValue toIValue(
63
+ py::handle obj,
64
+ const TypePtr& type,
65
+ c10::optional<int32_t> N = c10::nullopt);
66
+
67
+ TORCH_PYTHON_API py::object toPyObject(IValue ivalue);
68
+
69
+ // Hack to overload the behavior of toIValue to accept Python
70
+ // numbers in places where a Tensor is expected
71
+ // See also torch::should_allow_numbers_as_tensors
72
+ class ToIValueAllowNumbersAsTensors {
73
+ bool old_;
74
+
75
+ public:
76
+ ToIValueAllowNumbersAsTensors(bool enable);
77
+ ~ToIValueAllowNumbersAsTensors();
78
+ };
79
+
80
+ // Wrap Python function to guard deref
81
+ // NB: Need VISIBILITY_HIDDEN for silencing compiler error,
82
+ // 'torch::jit::PythonFunctionGuard' declared with greater visibility than the
83
+ // type of its field 'torch::jit::PythonFunctionGuard::func_'
84
+ struct VISIBILITY_HIDDEN PythonFunctionGuard {
85
+ explicit PythonFunctionGuard(py::function func) : func_(std::move(func)) {}
86
+
87
+ ~PythonFunctionGuard() {
88
+ pybind11::gil_scoped_acquire ag;
89
+ func_.dec_ref();
90
+ // explicitly setting PyObject* to nullptr to prevent py::object's dtor to
91
+ // decref on the PyObject again.
92
+ // See Note [Destructing py::object] in python_ivalue.h
93
+ func_.ptr() = nullptr;
94
+ }
95
+
96
+ py::function func_;
97
+ };
98
+
99
+ // The PythonFutureWrapper for ivalue::Future
100
+ //
101
+ // NB: VISIBILITY_HIDDEN is for silencing compiling error,
102
+ // "error: 'torch::jit::PythonFutureWrapper' declared with greater visibility
103
+ // than the type of its field 'torch::jit::PythonFutureWrapper::unwrap_func'
104
+ // [-Werror=attributes]"
105
+ //
106
+ // NB: inherit from enable_shared_from_this because then(py::function) needs to
107
+ // get a shared_ptr from this pointer.
108
+ struct VISIBILITY_HIDDEN PythonFutureWrapper
109
+ : std::enable_shared_from_this<PythonFutureWrapper> {
110
+ using UnwrapFunc = std::function<void(py::object)>;
111
+
112
+ explicit PythonFutureWrapper(
113
+ c10::intrusive_ptr<c10::ivalue::Future> fut,
114
+ c10::optional<UnwrapFunc> unwrap_func = c10::nullopt)
115
+ : fut(std::move(fut)), unwrap_func(std::move(unwrap_func)) {}
116
+
117
+ explicit PythonFutureWrapper(const PythonFutureWrapper&) = delete;
118
+ PythonFutureWrapper& operator=(const PythonFutureWrapper&) = delete;
119
+
120
+ bool done() {
121
+ return fut->completed();
122
+ }
123
+
124
+ py::object value() {
125
+ // acquiring GIL as toPyObject creates new py::object
126
+ // without grabbing the GIL.
127
+ py::gil_scoped_acquire acquire;
128
+ py::object py_obj = toPyObject(fut->value());
129
+ // unwrap_func is a general compositional function that takes in a
130
+ // py::object and executes some python function. It is currently mostly used
131
+ // to throw python exceptions.
132
+ if (unwrap_func) {
133
+ (*unwrap_func)(py_obj);
134
+ }
135
+ return py_obj;
136
+ }
137
+
138
+ py::object wait() {
139
+ fut->wait();
140
+ if (jit::tracer::isTracing()) {
141
+ auto graph = jit::tracer::getTracingState()->graph;
142
+
143
+ Value* fut_val = jit::tracer::getValueTrace(fut);
144
+ auto output = graph->insert(aten::wait, {fut_val});
145
+ jit::tracer::setValueTrace(fut->value(), output);
146
+ }
147
+ return value();
148
+ }
149
+
150
+ // The py::function cb arg must take a std::shared_ptr<PythonFutureWrapper>
151
+ // (i.e., torch._C.Future) as the only argument. If the type mismatches, an
152
+ // error will be thrown when waiting for the value of this returned Future.
153
+ std::shared_ptr<PythonFutureWrapper> then(py::function cb) {
154
+ // We need this an additional layer of wrapper here to guard the
155
+ // destruction of the py::function object. Because, the
156
+ // Future owns a reference to the py::function in its callback
157
+ // vector, but Future does not acquire GIL on destruction.
158
+ auto pf = std::make_shared<PythonFunctionGuard>(std::move(cb));
159
+
160
+ return std::make_shared<jit::PythonFutureWrapper>(fut->then(
161
+ // Capture a copy of the ivalue::Future instead of the `this` pointer
162
+ // because the PythonFutureWrapper object could have been deleted
163
+ // when the callbacks are fired. For example, RPC only captures the
164
+ // ivalue::Future instead of PythonFutureWrapper in JitFuture's
165
+ // callback functions. Hence, if user code does not hold a reference to
166
+ // this PythonFutureWrapper object, there is no guarantee that the
167
+ // PythonFutureWrapper is still valid when running the callback.
168
+ [pyFut(this->getPtr()),
169
+ pf(std::move(pf))](c10::ivalue::Future& /* unused */) -> IValue {
170
+ try {
171
+ pybind11::gil_scoped_acquire ag;
172
+ return toIValue(pf->func_(pyFut), PyObjectType::get());
173
+ } catch (py::error_already_set& e) {
174
+ auto err = std::runtime_error(c10::str(
175
+ "Got the following error when running the callback: ",
176
+ e.what()));
177
+ {
178
+ pybind11::gil_scoped_acquire ag;
179
+ // Release ownership on py::objects and also restore Python
180
+ // Error Indicator.
181
+ e.restore();
182
+ // Clear the Python Error Indicator as we has recorded the
183
+ // exception in the response message.
184
+ PyErr_Clear();
185
+ }
186
+
187
+ throw err;
188
+ }
189
+ },
190
+ PyObjectType::get()));
191
+ }
192
+
193
+ void add_done_callback(py::function cb) {
194
+ auto pf = std::make_shared<PythonFunctionGuard>(std::move(cb));
195
+ // NOLINTNEXTLINE(modernize-avoid-bind)
196
+ fut->addCallback(std::bind(
197
+ [pyFut(this->getPtr())](std::shared_ptr<PythonFunctionGuard> pf) {
198
+ try {
199
+ pybind11::gil_scoped_acquire ag;
200
+ pf->func_(pyFut);
201
+ } catch (py::error_already_set& e) {
202
+ {
203
+ pybind11::gil_scoped_acquire ag;
204
+ // Release ownership on py::objects and also restore Python
205
+ // Error Indicator.
206
+ e.restore();
207
+ // Clear the Python Error Indicator as we has recorded the
208
+ // exception in the response message.
209
+ PyErr_Clear();
210
+ }
211
+ // Log and ignore exceptions raised through the callback
212
+ LOG(ERROR) << "Got the following error when running the callback: "
213
+ << e.what();
214
+
215
+ } catch (const std::exception& e) {
216
+ // Log and ignore exceptions raised through the callback
217
+ LOG(ERROR) << "Got the following error when running the callback: "
218
+ << e.what();
219
+ }
220
+ },
221
+ std::move(pf)));
222
+ }
223
+
224
+ void markCompleted(const py::object& pyValue) {
225
+ DCHECK(PyGILState_Check());
226
+ IValue value = toIValue(pyValue, PyObjectType::get());
227
+
228
+ py::gil_scoped_release release;
229
+ fut->markCompleted(std::move(value));
230
+ }
231
+
232
+ c10::intrusive_ptr<c10::ivalue::Future> fut;
233
+ // unwrap_func works like a callback for the value returned by
234
+ // PythonFutureWrapper::wait().
235
+ c10::optional<UnwrapFunc> unwrap_func;
236
+
237
+ private:
238
+ std::shared_ptr<PythonFutureWrapper> getPtr() {
239
+ return shared_from_this();
240
+ }
241
+ };
242
+
243
+ // The PythonAwaitWrapper for ivalue::Await
244
+ //
245
+ // Expresses delayed function execution with Lazy semantic.
246
+ // i.e. Await[W] in eager mode can be used as W.
247
+ // When the attribute of W type is requested, Await[W] will return the
248
+ // attribute of W, transparently calling wait() beforehand.
249
+ // No Lazy semantic for script, explicit wait(Await[W]) -> W must be called to
250
+ // convert to type W.
251
+ //
252
+ // The Await object takes shared ownership of specified function and the
253
+ // arguments. After first call for wait() it owns the result. Deliberately no
254
+ // type inference for eager mode.
255
+ struct VISIBILITY_HIDDEN PythonAwaitWrapper
256
+ : std::enable_shared_from_this<PythonAwaitWrapper> {
257
+ explicit PythonAwaitWrapper(c10::intrusive_ptr<c10::ivalue::Await> aw)
258
+ : aw_(std::move(aw)) {}
259
+ explicit PythonAwaitWrapper(py::handle input) {
260
+ args_ = py::tuple(1u);
261
+ args_[0] = input;
262
+ auto type = PyObjectType::get();
263
+ aw_ = c10::make_intrusive<c10::ivalue::Await>(type);
264
+ aw_->markCompleted(toIValue(input, type));
265
+ }
266
+
267
+ explicit PythonAwaitWrapper(py::function pf, py::tuple args) {
268
+ pyfg_ = std::make_shared<torch::jit::PythonFunctionGuard>(std::move(pf));
269
+ args_ = std::move(args);
270
+ std::function<IValue()> f = [fg(pyfg_), &args(args_)]() {
271
+ pybind11::gil_scoped_acquire ag;
272
+ return toIValue(fg->func_(*args), PyObjectType::get());
273
+ };
274
+ aw_ = c10::make_intrusive<c10::ivalue::Await>(
275
+ PyObjectType::get(), std::move(f));
276
+ }
277
+
278
+ explicit PythonAwaitWrapper(const PythonAwaitWrapper&) = delete;
279
+ PythonAwaitWrapper& operator=(const PythonAwaitWrapper&) = delete;
280
+
281
+ py::object wait() {
282
+ py::gil_scoped_acquire acquire;
283
+ return toPyObject(aw_->wait());
284
+ }
285
+
286
+ // Nowait semantic means trivial case when Await is constructed from the
287
+ // result
288
+ bool is_nowait() {
289
+ return pyfg_ == nullptr;
290
+ }
291
+
292
+ const py::function fn() {
293
+ TORCH_CHECK(
294
+ pyfg_, "Await constructed as awaitable_nowait does not have fn");
295
+ return pyfg_->func_;
296
+ }
297
+
298
+ const py::tuple args() {
299
+ return args_;
300
+ }
301
+
302
+ TypePtr type() {
303
+ return aw_->type();
304
+ }
305
+
306
+ c10::intrusive_ptr<c10::ivalue::Await> aw_;
307
+ std::shared_ptr<torch::jit::PythonFunctionGuard> pyfg_;
308
+ py::tuple args_;
309
+
310
+ private:
311
+ std::shared_ptr<PythonAwaitWrapper> getPtr() {
312
+ return shared_from_this();
313
+ }
314
+ };
315
+
316
+ // error reporting: when reporting user-caused errors, these functions should
317
+ // not use AT_ERROR macros, since these macros add stack trace information
318
+ // that is confusing to display to the end user since it always reports
319
+ // locations in libtorch code rather than user code.
320
+
321
+ inline std::shared_ptr<CompilationUnit> get_python_cu() {
322
+ return py::module::import("torch.jit._state")
323
+ .attr("_python_cu")
324
+ .cast<std::shared_ptr<CompilationUnit>>();
325
+ }
326
+
327
+ struct TypedIValue : public std::pair<IValue, TypePtr> {
328
+ using pair::pair;
329
+
330
+ IValue& ivalue() {
331
+ return this->first;
332
+ }
333
+ TypePtr& type() {
334
+ return this->second;
335
+ }
336
+ };
337
+
338
+ inline TypedIValue toDictKeyIValue(py::handle key) {
339
+ if (py::isinstance<py::str>(key)) {
340
+ return TypedIValue(
341
+ ConstantString::create(py::cast<std::string>(key)), StringType::get());
342
+ } else if (py::isinstance<py::int_>(key)) {
343
+ return TypedIValue(py::cast<int64_t>(key), IntType::get());
344
+ } else if (py::isinstance<py::float_>(key)) {
345
+ return TypedIValue(py::cast<double>(key), FloatType::get());
346
+ } else {
347
+ AT_ERROR("Dictionary inputs may only have string, int, or float keys");
348
+ }
349
+ }
350
+
351
+ inline c10::optional<TypePtr> unifyOrInitializeType(
352
+ const TypePtr& accum,
353
+ const TypePtr& unify) {
354
+ if (!accum) {
355
+ return unify;
356
+ }
357
+ return unifyTypes(accum, unify);
358
+ }
359
+
360
+ using InferredType = c10::InferredType;
361
+
362
+ InferredType tryToInferContainerType(py::handle input);
363
+
364
+ // Try to infer the type of a Python object
365
+ // The type cannot be inferred if:
366
+ // input is an empty container (list, dict)
367
+ // input is an list with element types that cannot be unified
368
+ // input is an dict with key or value types that cannot be unified
369
+ inline InferredType tryToInferType(py::handle input) {
370
+ // Try tensor types
371
+ if (THPVariable_Check(input.ptr())) {
372
+ return InferredType(TensorType::get());
373
+ }
374
+
375
+ if (input.is_none()) {
376
+ return InferredType(NoneType::get());
377
+ }
378
+
379
+ if (py::isinstance<StrongFunctionPtr>(input)) {
380
+ auto fn = py::cast<StrongFunctionPtr>(input).function_;
381
+ return InferredType(FunctionType::create(fn));
382
+ }
383
+
384
+ // Try basic types first
385
+ if (py::isinstance<py::bool_>(input)) {
386
+ return InferredType(BoolType::get());
387
+ // NOLINTNEXTLINE(bugprone-branch-clone)
388
+ } else if (py::isinstance<py::int_>(input)) {
389
+ return InferredType(IntType::get());
390
+ } else if (py::isinstance<py::float_>(input)) {
391
+ return InferredType(FloatType::get());
392
+ } else if (PyComplex_CheckExact(input.ptr())) {
393
+ return InferredType(ComplexType::get());
394
+ } else if (py::isinstance<py::str>(input)) {
395
+ return InferredType(StringType::get());
396
+ } else if (THPLayout_Check(input.ptr())) {
397
+ return InferredType(IntType::get());
398
+ } else if (THPDevice_Check(input.ptr())) {
399
+ return InferredType(DeviceObjType::get());
400
+ } else if (THPGenerator_Check(input.ptr())) {
401
+ return InferredType(GeneratorType::get());
402
+ } else if (THPStream_Check(input.ptr())) {
403
+ return InferredType(StreamObjType::get());
404
+ } else if (THPDtype_Check(input.ptr())) {
405
+ return InferredType(IntType::get());
406
+ } else if (THPQScheme_Check(input.ptr())) {
407
+ return InferredType(IntType::get());
408
+ } else if (THPLayout_Check(input.ptr())) {
409
+ return InferredType(IntType::get());
410
+ }
411
+
412
+ auto enum_type = py::module::import("enum").attr("Enum");
413
+ py::bool_ isEnumValue = py::isinstance(input, enum_type);
414
+ if (py::cast<bool>(isEnumValue)) {
415
+ auto enum_class = input.attr("__class__");
416
+ auto enum_type = py::cast<TypePtr>(
417
+ py::module::import("torch.jit.annotations")
418
+ .attr("try_ann_to_type")(enum_class, SourceRange()));
419
+ return InferredType(std::move(enum_type));
420
+ }
421
+
422
+ py::bool_ isClass =
423
+ py::module::import("inspect").attr("isclass")(input.get_type());
424
+ if (py::cast<bool>(isClass)) {
425
+ // Assume that the class is compiled already or will compile. Invalidate
426
+ // this later if needed.
427
+ bool class_compiled = true;
428
+
429
+ // Check if the type is already compiled.
430
+ py::object existing_ty = py::module::import("torch.jit._state")
431
+ .attr("_get_script_class")(input.get_type());
432
+
433
+ if (existing_ty.is_none()) {
434
+ // If not, try to compile it.
435
+ py::bool_ can_compile = py::module::import("torch._jit_internal")
436
+ .attr("can_compile_class")(input.get_type());
437
+
438
+ if (py::cast<bool>(can_compile)) {
439
+ // Try to compile the class. This is wrapped in a try-catch because
440
+ // compilation of class types can raise an Exception and in that case,
441
+ // we want to defer to other attempts at type inference below rather
442
+ // than fail compilation altogether.
443
+ try {
444
+ py::module::import("torch.jit._script")
445
+ .attr("_recursive_compile_class")(
446
+ input.get_type(), SourceRange());
447
+ } catch (...) {
448
+ // Invalidate the assumption that the class compiled so that we don't
449
+ // look up and return its JIT type as the type for the input.
450
+ class_compiled = false;
451
+ }
452
+ }
453
+ }
454
+
455
+ // If the class compiled successfully, look up the existing JIT type by
456
+ // qualified name and return it.
457
+ if (class_compiled) {
458
+ auto script_class = py::module::import("torch.jit._state")
459
+ .attr("_get_script_class")(input.get_type());
460
+
461
+ if (!script_class.is_none()) {
462
+ auto class_type = py::cast<ClassTypePtr>(script_class);
463
+
464
+ if (class_type && !class_type->is_module()) {
465
+ return InferredType(std::move(class_type));
466
+ }
467
+ }
468
+ }
469
+ }
470
+
471
+ if (py::isinstance<Object>(input)) {
472
+ auto object = py::cast<Object>(input);
473
+ return InferredType(object.type());
474
+ #ifdef USE_RPC
475
+ } else if (py::isinstance<torch::distributed::rpc::PyRRef>(input)) {
476
+ auto rref_ivalue = input.cast<torch::distributed::rpc::PyRRef>().toIValue();
477
+ return InferredType(rref_ivalue.type());
478
+ #endif
479
+ }
480
+
481
+ auto await_type = py::module::import("torch._awaits").attr("_Await");
482
+ py::bool_ is_await = py::isinstance(input, await_type);
483
+ if (py::cast<bool>(is_await)) {
484
+ auto awptr = input.cast<std::shared_ptr<PythonAwaitWrapper>>();
485
+ return InferredType(AwaitType::create(awptr->aw_->elementType()));
486
+ }
487
+
488
+ if (as_module(py::cast<py::object>(input))) {
489
+ return InferredType("Cannot infer type of ScriptModule");
490
+ }
491
+
492
+ auto module_type = py::module::import("torch.nn").attr("Module");
493
+ py::bool_ is_module = py::isinstance(input, module_type);
494
+ if (py::cast<bool>(is_module)) {
495
+ return InferredType("Cannot infer concrete type of torch.nn.Module");
496
+ }
497
+
498
+ // Try container types
499
+ return tryToInferContainerType(input);
500
+ }
501
+
502
+ inline InferredType tryToInferContainerType(py::handle input) {
503
+ if (six::isTuple(input)) {
504
+ py::tuple tuple = py::cast<py::tuple>(input);
505
+ std::vector<TypePtr> element_types;
506
+ element_types.reserve(tuple.size());
507
+
508
+ for (py::handle elem : tuple) {
509
+ auto type_match = tryToInferType(elem);
510
+ if (type_match.success()) {
511
+ element_types.push_back(type_match.type());
512
+ } else {
513
+ // Forward error message along
514
+ return type_match.reason();
515
+ }
516
+ }
517
+ return InferredType(TupleType::create(std::move(element_types)));
518
+ } else if (PyDict_Check(input.ptr())) {
519
+ // Check to make sure we can generate useful input/output types
520
+ auto dict = py::cast<py::dict>(input);
521
+ size_t len = py::len(dict);
522
+ if (!len) {
523
+ return InferredType("Dictionary inputs must have entries");
524
+ }
525
+
526
+ TypePtr key_type = nullptr;
527
+ TypePtr value_type = nullptr;
528
+
529
+ for (auto entry : dict) {
530
+ // Try to infer the key type and unify it with the existing one
531
+ auto entry_key_type_match = tryToInferType(entry.first);
532
+ if (!entry_key_type_match.success()) {
533
+ return entry_key_type_match.reason();
534
+ }
535
+ auto unified_key =
536
+ unifyOrInitializeType(key_type, entry_key_type_match.type());
537
+ if (!unified_key) {
538
+ return InferredType(c10::str(
539
+ "Dictionary inputs to traced functions must have consistent type. Found ",
540
+ key_type->repr_str(),
541
+ " and ",
542
+ (entry_key_type_match.type())->repr_str()));
543
+ }
544
+
545
+ // Try to infer the value type and unify it with the existing one
546
+ auto entry_value_type_match = tryToInferType(entry.second);
547
+ if (!entry_value_type_match.success()) {
548
+ return entry_value_type_match.reason();
549
+ }
550
+ auto unified_value =
551
+ unifyOrInitializeType(value_type, entry_value_type_match.type());
552
+ if (!unified_value) {
553
+ return InferredType(c10::str(
554
+ "Dictionary inputs to traced functions must have consistent type. Found ",
555
+ value_type->repr_str(),
556
+ " and ",
557
+ (entry_value_type_match.type())->repr_str()));
558
+ }
559
+
560
+ key_type = *unified_key;
561
+ value_type = *unified_value;
562
+ }
563
+ return InferredType(
564
+ DictType::create(std::move(key_type), std::move(value_type)));
565
+ } else if (PyList_Check(input.ptr())) {
566
+ auto list = py::cast<py::list>(input);
567
+ size_t len = py::len(list);
568
+ if (!len) {
569
+ return InferredType("List trace inputs must have elements");
570
+ }
571
+
572
+ TypePtr element_type = nullptr;
573
+ for (auto elem : list) {
574
+ auto element_type_match = tryToInferType(elem);
575
+ if (!element_type_match.success()) {
576
+ return InferredType(c10::str(
577
+ "Could not infer type of list element: ",
578
+ element_type_match.reason()));
579
+ }
580
+ auto unified_type =
581
+ unifyOrInitializeType(element_type, element_type_match.type());
582
+ if (!unified_type) {
583
+ return InferredType(c10::str(
584
+ "List inputs to traced functions must have consistent element type. Found ",
585
+ element_type->repr_str(),
586
+ " and ",
587
+ (element_type_match.type())->repr_str()));
588
+ }
589
+ element_type = *unified_type;
590
+ }
591
+ return InferredType(ListType::create(element_type));
592
+ } else {
593
+ // TODO: this message is not correct anymore, since this InferredType is
594
+ // used from a bunch of circumstances unrelated to tracing. We can re-use
595
+ // this instead of the attribute_failure stuff in concreteType
596
+ return InferredType(c10::str(
597
+ "Only tensors and (possibly nested) tuples of tensors, lists, or dicts",
598
+ "are supported ",
599
+ "as inputs or outputs of traced functions",
600
+ ", but instead got value of type ",
601
+ py::str(input.get_type().attr("__name__")),
602
+ "."));
603
+ }
604
+ }
605
+
606
+ inline bool isTraceableType(const TypePtr& type) {
607
+ if (type->isSubtypeOf(*TensorType::get())) {
608
+ return true;
609
+ }
610
+
611
+ if (auto list_type = type->cast<ListType>()) {
612
+ return isTraceableType(list_type->getElementType());
613
+ }
614
+
615
+ if (auto tuple_type = type->cast<TupleType>()) {
616
+ return std::all_of(
617
+ tuple_type->elements().begin(),
618
+ tuple_type->elements().end(),
619
+ [](const TypePtr& element_type) {
620
+ return isTraceableType(element_type);
621
+ });
622
+ }
623
+
624
+ if (auto dict_type = type->cast<DictType>()) {
625
+ return isTraceableType(dict_type->getValueType());
626
+ }
627
+
628
+ return false;
629
+ }
630
+
631
+ inline IValue toTypeInferredIValue(py::handle input) {
632
+ auto match = tryToInferType(input);
633
+ if (!match.success()) {
634
+ auto object = py::cast<py::object>(input);
635
+ if (auto mod = as_module(object)) {
636
+ // if obj is already a ScriptModule, just return its ivalue
637
+ auto ptr = mod.value()._ivalue();
638
+ // explict copy semantics for strong ownership of the resource.
639
+ return c10::intrusive_ptr<c10::ivalue::Object>::reclaim_copy(
640
+ ptr.release());
641
+ }
642
+
643
+ // Check if the obj is a ScriptObject.
644
+ if (auto script_obj = as_object(object)) {
645
+ auto ptr = script_obj.value()._ivalue();
646
+ return c10::intrusive_ptr<c10::ivalue::Object>::reclaim_copy(
647
+ ptr.release());
648
+ }
649
+ AT_ERROR(
650
+ "Tracer cannot infer type of ", py::str(input), "\n:", match.reason());
651
+ }
652
+ return toIValue(input, match.type());
653
+ }
654
+
655
+ inline Stack toTraceableStack(const py::tuple& inputs) {
656
+ auto info = toTypeInferredIValue(inputs);
657
+ TORCH_CHECK(
658
+ isTraceableType(info.type()),
659
+ "Type '",
660
+ info.type()->repr_str(),
661
+ "' cannot be traced. Only Tensors and (possibly nested) Lists, Dicts, and"
662
+ " Tuples of Tensors can be traced");
663
+ return info.toTupleRef().elements().vec();
664
+ }
665
+
666
+ // Serialize the python dictionary into a traceable stack.
667
+ inline Stack toTraceableStack(const py::dict& inputs) {
668
+ Stack res;
669
+ for (auto it = inputs.begin(); it != inputs.end(); it++) {
670
+ if (THPVariable_Check(it->second.ptr())) {
671
+ res.push_back(toIValue(it->second, tryToInferType(it->second).type()));
672
+ }
673
+ }
674
+ return res;
675
+ }
676
+
677
+ inline IValue createGenericList(py::handle obj, const TypePtr& elem_type) {
678
+ auto elems = c10::impl::GenericList(elem_type);
679
+ for (auto elem : obj) {
680
+ elems.push_back(toIValue(elem, elem_type));
681
+ }
682
+ return IValue(elems);
683
+ }
684
+
685
+ inline IValue createGenericDict(
686
+ const py::dict& obj,
687
+ const TypePtr& key_type,
688
+ const TypePtr& value_type) {
689
+ c10::impl::GenericDict elems(key_type, value_type);
690
+ elems.reserve(py::len(obj));
691
+ for (auto& entry : obj) {
692
+ elems.insert(
693
+ toIValue(entry.first, key_type), toIValue(entry.second, value_type));
694
+ }
695
+ return IValue(elems);
696
+ }
697
+
698
+ template <class T>
699
+ inline void guardAgainstNamedTensor(const T& var) {
700
+ TORCH_CHECK(
701
+ !var.has_names(),
702
+ "NYI: Named tensors are currently unsupported in TorchScript. As a "
703
+ "workaround please drop names via `tensor = tensor.rename(None)`.");
704
+ }
705
+
706
+ // Extract custom class registered with torchbind
707
+ template <typename T>
708
+ c10::intrusive_ptr<T> toCustomClass(py::handle obj) {
709
+ static_assert(
710
+ std::is_base_of<CustomClassHolder, T>::value, "T is not a CustomClass");
711
+ const auto& type = c10::getCustomClassType<c10::intrusive_ptr<T>>();
712
+ c10::IValue ivalue = toIValue(obj, type);
713
+ return std::move(ivalue).toCustomClass<T>();
714
+ }
715
+
716
+ // Small wrapper around getting the type name string from Python to make
717
+ // types easier to interpret, e.g. give the structural type for a NamedTuple
718
+ inline std::string friendlyTypeName(py::handle obj) {
719
+ if (py::isinstance<py::tuple>(obj) && py::hasattr(obj, "_fields")) {
720
+ auto field_names =
721
+ py::cast<std::vector<std::string>>(py::getattr(obj, "_fields"));
722
+ std::stringstream ss;
723
+ ss << py::str(obj.get_type().attr("__name__"));
724
+ ss << " (aka NamedTuple(";
725
+ bool first = true;
726
+ for (auto& field_name : field_names) {
727
+ if (!first) {
728
+ ss << ", ";
729
+ }
730
+ ss << field_name;
731
+ first = false;
732
+ }
733
+ ss << "))";
734
+ return ss.str();
735
+ } else {
736
+ return py::str(obj.get_type().attr("__name__"));
737
+ }
738
+ }
739
+
740
+ // Thrown when trying to create a schema for a list of python
741
+ // arguments that cannot be converted.
742
+ // Can be caught by the caller to attempt to use other schema
743
+ // when there is an overloaded operator.
744
+ struct schema_match_error : public std::runtime_error {
745
+ using std::runtime_error::runtime_error;
746
+ };
747
+
748
+ inline IValue argumentToIValue(
749
+ const FunctionSchema& schema,
750
+ size_t argumentPosition,
751
+ py::handle object) {
752
+ const auto& argument = schema.arguments().at(argumentPosition);
753
+ try {
754
+ return toIValue(object, argument.real_type(), argument.N());
755
+ } catch (const py::cast_error& error) {
756
+ throw schema_match_error(c10::str(
757
+ schema.formatTypeMismatchMsg(
758
+ argument,
759
+ friendlyTypeName(object),
760
+ argumentPosition,
761
+ py::repr(object)),
762
+ "\nCast error details: ",
763
+ error.what()));
764
+ } catch (const py::error_already_set& error) {
765
+ throw schema_match_error(c10::str(
766
+ schema.formatTypeMismatchMsg(
767
+ argument,
768
+ friendlyTypeName(object),
769
+ argumentPosition,
770
+ py::repr(object)),
771
+ "\n Python error details: ",
772
+ error.what()));
773
+ }
774
+ }
775
+
776
+ inline IValue returnToIValue(const TypePtr& type, py::handle object) {
777
+ try {
778
+ return toIValue(object, type);
779
+ } catch (const py::cast_error& error) {
780
+ throw std::runtime_error(c10::str(
781
+ " expected value of type ",
782
+ type->str(),
783
+ " for return value but instead got value of type ",
784
+ py::str(object.get_type().attr("__name__")),
785
+ ".",
786
+ "\nValue: ",
787
+ py::repr(object),
788
+ "\nCast error details: ",
789
+ error.what()));
790
+ }
791
+ }
792
+
793
+ inline py::object getScriptedClassOrError(const c10::NamedTypePtr& classType) {
794
+ auto py_class =
795
+ py::module::import("torch.jit._state")
796
+ .attr("_get_python_class")(classType->name()->qualifiedName());
797
+ if (py_class.is_none()) {
798
+ std::stringstream err;
799
+ err << "Unknown reference to ScriptClass ";
800
+ err << classType->name()->qualifiedName();
801
+ err << ". (Did you forget to import it?)";
802
+ throw std::runtime_error(err.str());
803
+ }
804
+ return py_class;
805
+ }
806
+
807
+ struct VISIBILITY_HIDDEN tuple_slice {
808
+ /*implicit*/ tuple_slice(py::tuple tup_)
809
+ : tup(std::move(tup_)), b(0), e(tup.size()) {}
810
+ tuple_slice(py::tuple tup_, int64_t b_)
811
+ : tup(std::move(tup_)), b(b_), e(tup.size()) {}
812
+ tuple_slice(py::tuple tup_, int64_t b_, int64_t e_)
813
+ : tup(std::move(tup_)), b(b_), e(e_) {}
814
+ py::detail::tuple_iterator begin() const {
815
+ return {tup, static_cast<pybind11::ssize_t>(b)};
816
+ }
817
+ py::detail::tuple_iterator end() const {
818
+ return {tup, static_cast<pybind11::ssize_t>(e)};
819
+ }
820
+ size_t size() const {
821
+ return e - b;
822
+ }
823
+ py::detail::tuple_accessor operator[](size_t index) const {
824
+ return {tup, static_cast<size_t>(b + index)};
825
+ }
826
+
827
+ private:
828
+ py::tuple tup;
829
+ int64_t b;
830
+ int64_t e;
831
+ };
832
+
833
+ inline Stack createStackForSchema(
834
+ const FunctionSchema& schema,
835
+ const tuple_slice& args,
836
+ const py::kwargs& kwargs,
837
+ c10::optional<IValue> self) {
838
+ size_t all_arguments = (self ? 1 : 0) + args.size() + kwargs.size();
839
+ if (all_arguments > schema.arguments().size()) {
840
+ throw schema_match_error(c10::str(
841
+ schema.name(),
842
+ "() expected at most ",
843
+ schema.arguments().size(),
844
+ " argument(s) but received ",
845
+ all_arguments,
846
+ " argument(s). Declaration: ",
847
+ schema));
848
+ }
849
+ Stack stack;
850
+ stack.reserve(schema.arguments().size());
851
+
852
+ int64_t arg_idx = 0;
853
+ if (self) {
854
+ push(stack, std::move(*self));
855
+ arg_idx++;
856
+ }
857
+ // First push all positional args.
858
+ for (const auto& arg : args) {
859
+ // ...but refuse to do it if the schema says that this was supposed
860
+ // to be keyword only
861
+ if (schema.arguments()[arg_idx].kwarg_only()) {
862
+ throw schema_match_error(c10::str(
863
+ schema.name(),
864
+ "() takes ",
865
+ arg_idx,
866
+ " positional argument(s) but ",
867
+ self ? 1 + args.size() : args.size(),
868
+ " was/were given. Declaration: ",
869
+ schema));
870
+ }
871
+ // Use the type information from the schema to convert the PyObject.
872
+ push(stack, argumentToIValue(schema, stack.size(), arg));
873
+ arg_idx++;
874
+ }
875
+
876
+ // Now for every remaining non-positional argument in the schema, look for it
877
+ // in the kwargs dict and push it if found, or use its default value if it
878
+ // has one.
879
+ size_t consumed_kwargs = 0;
880
+ for (size_t i = stack.size(); i < schema.arguments().size(); ++i) {
881
+ const auto& arg = schema.arguments()[i];
882
+ if (kwargs.contains(arg.name().c_str())) {
883
+ push(stack, argumentToIValue(schema, i, kwargs[arg.name().c_str()]));
884
+ consumed_kwargs += 1;
885
+ } else if (arg.default_value()) {
886
+ push(stack, *arg.default_value());
887
+ } else {
888
+ throw schema_match_error(c10::str(
889
+ schema.name(),
890
+ "() is missing value for argument '",
891
+ arg.name(),
892
+ "'. Declaration: ",
893
+ schema));
894
+ }
895
+ }
896
+
897
+ if (consumed_kwargs != kwargs.size()) {
898
+ std::vector<std::string> names;
899
+ for (const auto& kwarg : kwargs) {
900
+ names.emplace_back(py::cast<std::string>(kwarg.first));
901
+ }
902
+ throw schema_match_error(schema.findErrorInKwargs(names));
903
+ }
904
+
905
+ return stack;
906
+ }
907
+
908
+ inline py::object createPyObjectForStack(Stack&& stack) {
909
+ if (stack.empty()) {
910
+ return py::none();
911
+ }
912
+
913
+ // Return a simple value and not a single-element tuple if there is only one
914
+ // return value.
915
+ if (stack.size() == 1) {
916
+ return toPyObject(std::move(stack[0]));
917
+ }
918
+
919
+ // If there is more than one return value, pop them into a py::tuple.
920
+ py::tuple return_values(stack.size());
921
+ for (const auto ret : c10::irange(return_values.size())) {
922
+ return_values[ret] = toPyObject(std::move(stack[ret]));
923
+ }
924
+
925
+ return std::move(return_values);
926
+ }
927
+
928
+ // TODO: Remove once we clean up the GraphExecutor usage.
929
+ inline Stack evilDeprecatedBadCreateStackDoNotUse(
930
+ const py::tuple& tuple,
931
+ at::ArrayRef<Value*> inputs,
932
+ size_t reserve_extra_space = 0) {
933
+ if (tuple.size() != inputs.size()) {
934
+ AT_ERROR(
935
+ "expected " + std::to_string(inputs.size()) + " inputs, but got " +
936
+ std::to_string(tuple.size()));
937
+ }
938
+ Stack result;
939
+ result.reserve(tuple.size() + reserve_extra_space);
940
+ for (const auto i : c10::irange(inputs.size())) {
941
+ result.push_back(toIValue(std::move(tuple[i]), inputs[i]->type()));
942
+ }
943
+ return result;
944
+ }
945
+
946
+ // Run `callee`, potentially inserting a CallFunction/CallMethod node into the
947
+ // tracing graph.
948
+ inline py::object runAndInsertCall(
949
+ Function& callee,
950
+ const tuple_slice& args,
951
+ const py::kwargs& kwargs,
952
+ c10::optional<IValue> self,
953
+ // Lambda that tells this function how to insert `callee` into the graph if
954
+ // we're tracing.
955
+ const std::function<Value*(Graph&, const MatchedSchema& match)>&
956
+ callInserter) {
957
+ auto stack =
958
+ createStackForSchema(callee.getSchema(), args, kwargs, std::move(self));
959
+ const auto& tracing_state = tracer::getTracingState();
960
+ if (!tracing_state) {
961
+ pybind11::gil_scoped_release no_gil_guard;
962
+ // If we're not tracing, just run the callee as normal.
963
+ callee.run(stack);
964
+ } else {
965
+ // If we are tracing, insert the appropriate CallFunction or CallMethod node
966
+ // and then run the callee with tracing disabled.
967
+
968
+ // Get the graph `Value`s that represent the input IValues
969
+ auto inputs = last(stack, callee.num_inputs());
970
+ auto input_values =
971
+ fmap(inputs, [](const IValue& v) { return tracer::getValueTrace(v); });
972
+ TORCH_INTERNAL_ASSERT(callee.getSchema().returns().size() == 1)
973
+ auto return_type = callee.getSchema().returns().at(0).type();
974
+ auto graph = tracing_state->graph;
975
+ std::vector<NamedValue> named_values;
976
+ named_values.reserve(input_values.size());
977
+ for (Value* v : input_values) {
978
+ named_values.emplace_back(v);
979
+ }
980
+
981
+ // Add a call node.
982
+ MatchedSchema match = matchSchema(
983
+ callee.getSchema(),
984
+ tracer::getPythonInterpreterSourceRange(),
985
+ *graph,
986
+ named_values,
987
+ {});
988
+ auto output_value = callInserter(*graph, match);
989
+
990
+ // Actually run the callee. Pause the tracer so that we don't double-add the
991
+ // callee nodes.
992
+ {
993
+ pybind11::gil_scoped_release no_gil_guard;
994
+ ResourceGuard guard(tracer::pauseTracing());
995
+ callee.run(stack);
996
+ }
997
+
998
+ // Associate the output IValues with the output `Value`s in the graph
999
+ tracer::setValueTrace(stack.back(), output_value);
1000
+ }
1001
+
1002
+ TORCH_CHECK(
1003
+ !stack.empty(),
1004
+ "Expected values in the stack after execution but found none");
1005
+ return toPyObject(std::move(stack.back()));
1006
+ }
1007
+
1008
+ inline c10::optional<py::object> maybeTorchFunctionDispatch(
1009
+ const py::object& callee,
1010
+ const tuple_slice& args_no_self,
1011
+ const py::kwargs& kwargs,
1012
+ const c10::QualifiedName qualname) {
1013
+ std::vector<py::handle> args_vec;
1014
+ for (const auto& arg : args_no_self) {
1015
+ args_vec.push_back(arg);
1016
+ }
1017
+ py::tuple args = py::cast(args_vec);
1018
+
1019
+ // Handle __torch_function__ dispatch
1020
+ std::vector<PyObject*> overloaded_args;
1021
+ size_t total_arg_num = args.size() + kwargs.size();
1022
+ for (const auto& arg : args) {
1023
+ is_tensor_and_append_overloaded(arg.ptr(), &overloaded_args);
1024
+ is_tensor_list_and_append_overloaded(
1025
+ arg.ptr(),
1026
+ &overloaded_args,
1027
+ static_cast<int>(total_arg_num),
1028
+ false /* throw_error */);
1029
+ }
1030
+ // NB: for kwargs, we cannot guarantee the order of appending
1031
+ // is the same as the argument order in operator's schema.
1032
+ // This is suboptimal, but should be fine. Later when we have
1033
+ // better schema matching and argument parsing, we could
1034
+ // match the operator in `operations` first, then the order will
1035
+ // be guaranteed.
1036
+ for (auto item : kwargs) {
1037
+ is_tensor_and_append_overloaded(item.second.ptr(), &overloaded_args);
1038
+ is_tensor_list_and_append_overloaded(
1039
+ item.second.ptr(),
1040
+ &overloaded_args,
1041
+ total_arg_num,
1042
+ false /* throw_error */);
1043
+ }
1044
+ if (!overloaded_args.empty()) {
1045
+ return pybind11::reinterpret_steal<py::object>(
1046
+ handle_torch_function_no_python_arg_parser(
1047
+ /*overloaded_args=*/overloaded_args,
1048
+ /*args=*/args.ptr(),
1049
+ /*kwargs=*/kwargs.ptr(),
1050
+ /*func_name=*/qualname.name().c_str(),
1051
+ /*torch_api_function=*/callee.ptr(),
1052
+ /*module_name=*/qualname.prefix().c_str()));
1053
+ }
1054
+
1055
+ return c10::nullopt;
1056
+ }
1057
+
1058
+ inline py::object invokeScriptFunctionFromPython(
1059
+ Function& callee,
1060
+ const tuple_slice& args,
1061
+ const py::kwargs& kwargs) {
1062
+ // TODO: we could add __torch_function__ dispatch here but I don't know
1063
+ // the implications of doing so
1064
+
1065
+ return runAndInsertCall(
1066
+ callee,
1067
+ args,
1068
+ kwargs,
1069
+ /*self=*/c10::nullopt,
1070
+ [&](Graph& graph, const MatchedSchema& match) {
1071
+ return graph.insertFunctionCall(&callee, match);
1072
+ });
1073
+ }
1074
+
1075
+ inline py::object invokeScriptMethodFromPython(
1076
+ Method& callee,
1077
+ const tuple_slice& args,
1078
+ const py::kwargs& kwargs) {
1079
+ auto self = callee.owner()._ivalue();
1080
+
1081
+ if (auto torch_fn_result = maybeTorchFunctionDispatch(
1082
+ py::cast(callee), args, kwargs, callee.name())) {
1083
+ return *torch_fn_result;
1084
+ }
1085
+
1086
+ return runAndInsertCall(
1087
+ callee.function(),
1088
+ args,
1089
+ kwargs,
1090
+ self,
1091
+ [&](Graph& graph, const MatchedSchema& match) {
1092
+ return graph.insertMethodCall(callee.name(), match);
1093
+ });
1094
+ }
1095
+
1096
+ TORCH_PYTHON_API std::pair<std::shared_ptr<Operator>, Stack> getOpWithStack(
1097
+ const std::vector<std::shared_ptr<Operator>>& operations,
1098
+ py::args args,
1099
+ const py::kwargs& kwargs);
1100
+
1101
+ TORCH_PYTHON_API py::object invokeOperatorFromPython(
1102
+ const std::vector<std::shared_ptr<Operator>>& operations,
1103
+ py::args args,
1104
+ const py::kwargs& kwargs,
1105
+ c10::optional<c10::DispatchKey> dk = c10::nullopt);
1106
+
1107
+ TORCH_PYTHON_API py::object _get_operation_for_overload_or_packet(
1108
+ const std::vector<std::shared_ptr<Operator>>& operations,
1109
+ Symbol symbol,
1110
+ py::args args,
1111
+ const py::kwargs& kwargs,
1112
+ bool is_overload,
1113
+ c10::optional<c10::DispatchKey> dk = c10::nullopt);
1114
+
1115
+ } // namespace torch::jit
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/python_ir.h ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+ #include <torch/csrc/utils/object_ptr.h>
5
+
6
+ namespace torch::jit {
7
+
8
+ void initPythonIRBindings(PyObject* module);
9
+
10
+ // execute a Python function, used for Ops we can't optimize but that we want to
11
+ // optimize around
12
+ struct ConcretePythonOp : public PythonOp {
13
+ static Symbol Kind;
14
+
15
+ ConcretePythonOp(Graph* graph) : PythonOp(graph, ::c10::prim::PythonOp) {}
16
+ ConcretePythonOp* init(
17
+ THPObjectPtr&& pyobj,
18
+ const std::string& cconv,
19
+ pyobj_list&& scalar_args) {
20
+ this->pyobj = std::move(pyobj);
21
+ this->scalar_args = std::move(scalar_args);
22
+ this->cconv = cconv;
23
+ return this;
24
+ }
25
+ // The Python object which contains the implementation of this function.
26
+ // This is either a class (non-legacy) or an object (legacy). See
27
+ // TraceInterpreterState for execution semantics.
28
+ THPObjectPtr pyobj;
29
+ // The calling convention for the Python function.
30
+ // 'c' -- constant argument
31
+ // 'd' -- dynamic argument
32
+ std::string cconv;
33
+ // Scalar arguments to the Python function. Not necessarily passed to
34
+ // the function in this order; see cconv for the correct order.
35
+ std::vector<THPObjectPtr> scalar_args;
36
+
37
+ std::string name() const override;
38
+ void cloneFrom(Node* other_) override;
39
+ Node* allocNewInstance(Graph* g) override {
40
+ return new ConcretePythonOp(g);
41
+ }
42
+ // recover the autograd.Function instance, if this PythonOp's function
43
+ // was originally SomeFunction.apply
44
+ // used in ONNX for discovering symbolics
45
+ c10::optional<THPObjectPtr> autogradFunction() const override;
46
+ void writeScalars(std::ostream& out) const override;
47
+ void lint_python() const override;
48
+ };
49
+
50
+ } // namespace torch::jit
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/python_tracer.h ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/frontend/source_range.h>
4
+ #include <torch/csrc/jit/frontend/tracer.h>
5
+ #include <torch/csrc/python_headers.h>
6
+ #include <torch/csrc/utils/pybind.h>
7
+
8
+ #include <memory>
9
+ #include <string>
10
+
11
+ namespace torch::jit {
12
+
13
+ struct Module;
14
+
15
+ namespace tracer {
16
+ void initPythonTracerBindings(PyObject* module);
17
+
18
+ SourceRange getPythonInterpreterSourceRange();
19
+
20
+ Node* preRecordPythonTrace(
21
+ THPObjectPtr pyobj,
22
+ const std::string& arg_types,
23
+ at::ArrayRef<autograd::Variable> inputs,
24
+ std::vector<THPObjectPtr> scalar_args);
25
+
26
+ std::pair<std::shared_ptr<Graph>, Stack> createGraphByTracingWithDict(
27
+ const py::function& func,
28
+ const py::dict& inputs_dict,
29
+ Stack inputs,
30
+ const py::function& var_name_lookup_fn,
31
+ bool strict,
32
+ bool force_outplace,
33
+ Module* self = nullptr,
34
+ const std::vector<std::string>& argument_names = {});
35
+
36
+ std::pair<std::shared_ptr<Graph>, Stack> createGraphByTracing(
37
+ const py::function& func,
38
+ Stack inputs,
39
+ const py::function& var_name_lookup_fn,
40
+ bool strict,
41
+ bool force_outplace,
42
+ Module* self = nullptr,
43
+ const std::vector<std::string>& argument_names = {});
44
+ } // namespace tracer
45
+ } // namespace torch::jit
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/python_tree_views.h ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/python_headers.h>
4
+
5
+ namespace torch::jit {
6
+
7
+ void initTreeViewBindings(PyObject* module);
8
+
9
+ } // namespace torch::jit
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/update_graph_executor_opt.h ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <torch/csrc/Export.h>
3
+ namespace torch::jit {
4
+ TORCH_API void setGraphExecutorOptimize(bool o);
5
+ TORCH_API bool getGraphExecutorOptimize();
6
+ } // namespace torch::jit
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/utf8_decoding_ignore.h ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <torch/csrc/Export.h>
3
+ namespace torch::jit {
4
+ TORCH_API void setUTF8DecodingIgnore(bool o);
5
+ TORCH_API bool getUTF8DecodingIgnore();
6
+ } // namespace torch::jit
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/autodiff.h ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/Export.h>
4
+ #include <torch/csrc/jit/ir/ir.h>
5
+
6
+ #include <memory>
7
+ #include <vector>
8
+
9
+ namespace torch::jit {
10
+
11
+ using value_list = std::vector<Value*>;
12
+ // clang-format off
13
+ // Example showcasing how Gradient is constructed:
14
+ //
15
+ // Let's assume we have a function f, `m` and `n` do not require grad
16
+ // (`n` can depend only on `m`):
17
+ // y, n = f(x, m)
18
+ //
19
+ // Now, let's assume that the reverse of f (called f') needs to use values of `x`, `t` and `y`.
20
+ // `t` is an intermediate value produced in the body of f, and let's assume that it requires
21
+ // grad too.
22
+ //
23
+ // In this case differentiate(f) will return this:
24
+ // y, n, t = f(x, m) // `t` is appended to the output list
25
+ // dx = f'(dy, dt, x, t, y) // No `dm` or `dn` because they do not require gradient
26
+ // // All needed values from f are prepended to the input list
27
+ //
28
+ // f_real_outputs = 2 // Only first two outputs were present in f originally
29
+ // df_input_vjps = {0, 2} // i.e. connect grad_fn of y and t variables produced by f,
30
+ // y t // with y's output_nr = 0 and t's output_nr = 1
31
+ // df_input_captures = {I0, O2, O0} // Order matches the prefix of inputs to df
32
+ // x t y
33
+ // df_output_vjps = {0} // i.e. connect next_edge[0] of grad_fn to x's (grad_fn, output_nr).
34
+ //
35
+ // Terminology: vjp = vector-jacobian product
36
+ // clang-format on
37
+
38
+ struct Gradient {
39
+ explicit operator bool() const {
40
+ return df != nullptr;
41
+ }
42
+ std::shared_ptr<Graph> f;
43
+ std::shared_ptr<Graph> df;
44
+
45
+ // Describes how to construct outputs of f from what its graph will return.
46
+ // This is necessary because some trailing outputs are intermediates produced
47
+ // only to be saved for df (and should be ignored).
48
+ size_t f_real_outputs = 0; // initialized for safety.
49
+
50
+ // df inputs are split into two sections: vjps (aka grad_outputs) and
51
+ // captures. VJPs are "seeds" for the gradient computation given for each
52
+ // input capture of an Output kind. Captures are values the need to be saved
53
+ // when f is run. We handle inputs specially, because this allows us to avoid
54
+ // adding extra vjps as df inputs.
55
+
56
+ std::vector<size_t> df_input_vjps; // Offsets into f's outputs.
57
+ // capture can come from inputs or outputs
58
+ std::vector<size_t> df_input_captured_inputs; // Offsets into f's inputs
59
+ std::vector<size_t> df_input_captured_outputs; // Offsets into f's outputs
60
+
61
+ // df will produce vjps for a subset of inputs of f that required grad.
62
+ // df_output_vjps[idx] == inp_idx means that idx-th output of df produces a
63
+ // vjp for inp_idx-th input of f.
64
+ std::vector<size_t> df_output_vjps; // Offsets into f's inputs.
65
+
66
+ // How to use gradient to implement a differentiable autograd function:
67
+ // When running f:
68
+ // - Unwrap input Variables
69
+ // - Run f's graph
70
+ // - Create grad_fn
71
+ // - Wrap outputs in Variables (assume we have a tensor_outputs array):
72
+ // outputs = map(Variable, tensor_output)
73
+ // for i, offset in enumerate(df_input_vjps):
74
+ // outputs[offset].set_grad_fn(grad_fn, output_nr=i)
75
+ // - Use df_output_vjps to connect next_edges of grad_fn:
76
+ // for idx in df_output_vjps:
77
+ // grad_fn.add_next_edge(inputs[idx].gradient_edge())
78
+ // - Save captures for df (care needs to be taken to use SavedVariables for
79
+ // inputs and outputs that we will actually return)
80
+ // - Return outputs[:f_real_outputs]
81
+ //
82
+ // When running df:
83
+ // - Concatenate received vjps and captured Variables
84
+ // - Interpret df
85
+ // - Wrap outputs of df into Variables (that don't require grad)
86
+ };
87
+ TORCH_API Gradient differentiate(std::shared_ptr<Graph>& graph);
88
+
89
+ // can we take a derivative of this node symbolically?
90
+ TORCH_API bool isDifferentiable(const Node* n);
91
+ TORCH_API bool isDifferentiable(Graph& g);
92
+ TORCH_API bool isZero(Value* v);
93
+
94
+ } // namespace torch::jit
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/calculate_necessary_args.h ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/Export.h>
4
+ #include <torch/csrc/jit/frontend/schema_matching.h>
5
+ #include <cstddef>
6
+
7
+ namespace torch::jit {
8
+
9
+ // Calculates the number of args that need to be passed in.
10
+ // Less args may be needed if defaults are provided.
11
+ // Returns: {number args needed, number of out args}
12
+ inline std::pair<int64_t, int64_t> CalculateNecessaryArgs(
13
+ const std::vector<Argument>& schema_args,
14
+ at::ArrayRef<Value*> actual_inputs,
15
+ bool allow_trailing_out_args) {
16
+ if (schema_args.empty()) {
17
+ return std::make_pair(0, 0);
18
+ }
19
+
20
+ // count number of out arguments
21
+ int64_t schema_idx = static_cast<int64_t>(schema_args.size()) - 1;
22
+ if (allow_trailing_out_args) {
23
+ // skip over out arguments in the end.
24
+ while (schema_idx >= 0) {
25
+ const auto& current_arg = schema_args.at(schema_idx);
26
+ if (!current_arg.is_out()) {
27
+ break;
28
+ }
29
+ schema_idx--;
30
+ }
31
+ }
32
+
33
+ int64_t num_out = static_cast<int64_t>(schema_args.size()) - schema_idx - 1;
34
+
35
+ if (schema_args.size() < actual_inputs.size()) {
36
+ return std::make_pair(actual_inputs.size(), num_out);
37
+ }
38
+
39
+ // if it is the default args, we reset the index to the last element
40
+ if (!allow_trailing_out_args) {
41
+ schema_idx = schema_args.size() - 1;
42
+ }
43
+ // keeps track of trailing unnecessary args
44
+ while (schema_idx >= 0) {
45
+ // this means it is not default argument, so it is necessary
46
+ if (!schema_args.at(schema_idx).default_value().has_value()) {
47
+ return std::make_pair(schema_idx + 1, num_out);
48
+ } else {
49
+ auto schema_value =
50
+ schema_args.at(schema_idx).default_value().value().toIValue();
51
+ // non-const value will become nullptr here, so will be marked necessary
52
+ // non-const would include prim::ListConstruct, prim::DictConstruct as
53
+ // well.
54
+ auto actual_value = toIValue(actual_inputs[schema_idx]);
55
+ if (!actual_value.has_value()) {
56
+ return std::make_pair(schema_idx + 1, num_out);
57
+ }
58
+ // if the IR has same value as default value of the schema,
59
+ // it is not necessary argument.
60
+ if (schema_value != actual_value.value()) {
61
+ return std::make_pair(schema_idx + 1, num_out);
62
+ }
63
+ }
64
+ schema_idx--;
65
+ }
66
+ return std::make_pair(0, num_out);
67
+ }
68
+
69
+ } // namespace torch::jit
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/decomposition_registry.h ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // This file is temporary until native_functions.yaml and derivatives.yaml are
3
+ // merged. Ideally this should all go into native_functions.yaml
4
+
5
+ #include <torch/csrc/Export.h>
6
+ #include <torch/csrc/jit/ir/ir.h>
7
+
8
+ namespace torch::jit {
9
+
10
+ TORCH_API c10::optional<std::shared_ptr<Graph>> GetDecomposition(
11
+ const FunctionSchema& schema);
12
+
13
+ TORCH_API void RegisterDecomposition(
14
+ const FunctionSchema& schema,
15
+ std::shared_ptr<Graph> g);
16
+
17
+ TORCH_API void RunDecompositions(std::shared_ptr<Graph> g);
18
+
19
+ TORCH_API c10::optional<GraphFunction*> GetDecompositionFunction(
20
+ const FunctionSchema& schema);
21
+
22
+ // For invocation in C++, recommended is to assign to static local variable
23
+ TORCH_API Function* GetDecompositionExecutor(const char* schema_literal);
24
+
25
+ TORCH_API Function* GetDecompositionExecutor(const FunctionSchema& schema);
26
+
27
+ TORCH_API void run_jit_decomposition(
28
+ const c10::OperatorHandle& op,
29
+ torch::jit::Stack* stack);
30
+
31
+ TORCH_API bool has_jit_decomposition(const FunctionSchema& schema);
32
+
33
+ } // namespace torch::jit
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/graph_executor.h ADDED
@@ -0,0 +1,142 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <atomic>
4
+ #include <memory>
5
+
6
+ #include <torch/csrc/jit/ir/ir.h>
7
+ #include <torch/csrc/jit/python/update_graph_executor_opt.h>
8
+ #include <torch/csrc/jit/runtime/argument_spec.h>
9
+ #include <torch/csrc/jit/runtime/interpreter.h>
10
+ #include <torch/csrc/jit/runtime/variable_tensor_list.h>
11
+
12
+ C10_DECLARE_bool(torch_jit_enable_new_executor);
13
+
14
+ namespace torch::jit {
15
+ struct GraphExecutorState;
16
+ struct Code;
17
+
18
+ enum ExecutorExecutionMode {
19
+ SIMPLE,
20
+ PROFILING,
21
+ };
22
+
23
+ struct ExecutionPlan {
24
+ ExecutionPlan() = default;
25
+ ExecutionPlan(std::shared_ptr<Graph> graph, std::string function_name)
26
+ : code(graph, std::move(function_name)), graph(std::move(graph)) {}
27
+
28
+ operator bool() const {
29
+ return static_cast<bool>(graph);
30
+ }
31
+
32
+ Code code;
33
+ std::shared_ptr<Graph> graph;
34
+ };
35
+
36
+ // Notice that those structs don't manage lifetime of their members.
37
+ // They are only valid only right after you call getDebugState() and should
38
+ // never be used again once another GraphExecutor function is called.
39
+
40
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
41
+ struct GraphExecutorState {
42
+ const Graph* graph = nullptr;
43
+ ExecutionPlan fallback; // XXX: members of this field are optional
44
+ std::unordered_map<ArgumentSpec, ExecutionPlan> execution_plans;
45
+ };
46
+
47
+ struct TORCH_API EnableProfilingGuard {
48
+ EnableProfilingGuard();
49
+ ~EnableProfilingGuard();
50
+
51
+ private:
52
+ bool old_executor_mode = false;
53
+ bool old_get_optimize = false;
54
+ };
55
+
56
+ struct GraphExecutorImplBase;
57
+ struct TORCH_API GraphExecutor {
58
+ GraphExecutor() = default;
59
+ GraphExecutor(const std::shared_ptr<Graph>& graph, std::string function_name);
60
+
61
+ GraphExecutor(
62
+ const std::shared_ptr<Graph>& graph,
63
+ std::string function_name,
64
+ ExecutorExecutionMode executor_mode);
65
+
66
+ void run(Stack& inputs);
67
+ c10::intrusive_ptr<Future> runAsync(
68
+ Stack& stack,
69
+ TaskLauncher taskLauncher = at::launch);
70
+
71
+ // `remaining_bailout_depth` stands for the maximum number of profiled and
72
+ // specialized recompilations allowed for the current `GraphExecutor`. if
73
+ // remaining_bailout_depth is equal to 0, `GraphExecutor` won't perform any
74
+ // profiling and specialization. This is also equivalent to the
75
+ // SIMPLE_EXECUTOR mode. if remaining_bailout_depth is greater than 0,
76
+ // `GraphExecutor` will profile and specialize its input graph based on the
77
+ // profiled information whenever a bailout check is failed/triggered, a new
78
+ // `GraphExecutor` will be created. This new `GraphExecutor`'s
79
+ // remaining_bailout_depth will be reduced by 1.
80
+ // If no bailout depth is passed, the depth will be initialized from the
81
+ // current global fusion strategy settings.
82
+ const ExecutionPlan& getPlanFor(
83
+ Stack& inputs,
84
+ c10::optional<size_t> remaining_bailout_depth = c10::nullopt);
85
+ GraphExecutorState getDebugState();
86
+
87
+ void debugFlushCompilationCache();
88
+
89
+ bool isOptimized() const;
90
+
91
+ private:
92
+ std::shared_ptr<GraphExecutorImplBase> pImpl;
93
+ };
94
+
95
+ TORCH_API Node* replaceBlockWithFallbackGraph(
96
+ Block* b,
97
+ ArrayRef<Value*> inputs);
98
+
99
+ // These passes need to run before it is valid to pass to the interpreter
100
+ // regardless of whether sizes have been specialized or not.
101
+ TORCH_API void runRequiredPasses(const std::shared_ptr<Graph>& g);
102
+
103
+ TORCH_API void debugSetFusionGroupInlining(bool state);
104
+ TORCH_API bool getFusionGroupInlining();
105
+
106
+ TORCH_API void debugSetAutodiffSubgraphInlining(bool state);
107
+ TORCH_API std::shared_ptr<Graph> lastExecutedOptimizedGraph();
108
+
109
+ TORCH_API std::atomic<bool>& getProfilingMode();
110
+ TORCH_API std::atomic<bool>& getExecutorMode();
111
+ TORCH_API std::atomic<size_t>& getNumProfiledRuns();
112
+ TORCH_API size_t getBailoutDepth();
113
+ TORCH_API bool IsNewExecutorEnabled();
114
+
115
+ struct TORCH_API GraphOptimizerEnabledGuard {
116
+ GraphOptimizerEnabledGuard(bool state)
117
+ : old_state_(getGraphExecutorOptimize()) {
118
+ setGraphExecutorOptimize(state);
119
+ }
120
+
121
+ ~GraphOptimizerEnabledGuard() {
122
+ setGraphExecutorOptimize(old_state_);
123
+ }
124
+
125
+ bool old_state_;
126
+ };
127
+
128
+ namespace detail {
129
+
130
+ GraphExecutor* getGradExecutor(Operation& op);
131
+
132
+ GraphExecutor* getDifferentiableGraphOpExecutor(Operation& op);
133
+
134
+ // for debugging information we expose a way to get the last actually
135
+ // run graph. Previous approaches allowed querying the GraphExecutor
136
+ // for what graph it would run in certain circumstances (graphFor), but
137
+ // this is fragile because we sometimes change how these decisions are made.
138
+ // This interface still allows our tests to look at optimized graphs, but
139
+ // with less plumbing.
140
+ } // namespace detail
141
+
142
+ } // namespace torch::jit
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/graph_executor_impl.h ADDED
@@ -0,0 +1,113 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <torch/csrc/jit/runtime/graph_executor.h>
3
+
4
+ #include <ATen/core/ivalue.h>
5
+ #include <c10/util/Exception.h>
6
+ #include <torch/csrc/autograd/grad_mode.h>
7
+ #include <torch/csrc/jit/frontend/tracer.h>
8
+ #include <torch/csrc/jit/ir/ir.h>
9
+ #include <torch/csrc/jit/passes/shape_analysis.h>
10
+ #include <torch/csrc/jit/resource_guard.h>
11
+ #include <torch/csrc/jit/runtime/argument_spec.h>
12
+ #include <torch/csrc/jit/runtime/autodiff.h>
13
+ #include <torch/csrc/jit/runtime/custom_operator.h>
14
+ #include <torch/csrc/jit/runtime/interpreter.h>
15
+ #include <torch/csrc/jit/runtime/profiling_record.h>
16
+
17
+ #include <torch/csrc/autograd/edge.h>
18
+ #include <torch/csrc/autograd/function.h>
19
+ #include <torch/csrc/jit/frontend/ir_emitter.h>
20
+ #include <torch/csrc/jit/runtime/logging.h>
21
+
22
+ #include <cstdint>
23
+ #include <iterator>
24
+ #include <memory>
25
+ #include <mutex>
26
+ #include <unordered_map>
27
+ #include <utility>
28
+ #include <vector>
29
+
30
+ namespace torch::jit {
31
+
32
+ void packGradient(const Gradient& gradient, Node* dnode);
33
+ bool needsGradient(const std::shared_ptr<const Graph>& graph);
34
+ void runOptimization(
35
+ std::shared_ptr<Graph>& graph,
36
+ bool unroll_non_constant_loops = true,
37
+ bool const_prop_user_classes = true);
38
+ void runNondiffOptimization(
39
+ std::shared_ptr<Graph>& graph,
40
+ bool strict_fuser_check = false);
41
+ void debugSetAutodiffSubgraphInlining(bool state);
42
+ bool TORCH_API getAutodiffSubgraphInlining();
43
+
44
+ void debugSetFusionGroupInlining(bool state);
45
+ bool getFusionGroupInlining();
46
+
47
+ // Tunable parameters for deciding when to create/keep subgraphs of
48
+ // differentiable code
49
+ const size_t autodiffSubgraphNodeThreshold = 2;
50
+ const size_t autodiffSubgraphInlineThreshold = 5;
51
+
52
+ // a Graph can be created via tracing, or via a language-based frontend
53
+ // GraphExecutor runs it. It can run the same graph on many different sizes
54
+ // and different requires_grad states, and handles specializations for each
55
+ // situation. GraphExecutor is completely unaware of tracing or module
56
+ // parameters to keep the tracing concerns separated.
57
+ struct GraphExecutorImplBase {
58
+ static std::shared_ptr<Graph> prepareGraph(
59
+ const std::shared_ptr<Graph>& graph) {
60
+ auto copy = graph->copy();
61
+ EraseShapeInformation(copy);
62
+ return copy;
63
+ }
64
+
65
+ GraphExecutorImplBase(
66
+ const std::shared_ptr<Graph>& graph,
67
+ std::string function_name)
68
+ : graph(prepareGraph(graph)),
69
+ function_name_(std::move(function_name)),
70
+ num_inputs(this->graph->inputs().size()),
71
+ num_outputs(this->graph->outputs().size()) {}
72
+
73
+ // entry point where execution begins
74
+ void run(Stack& stack);
75
+ c10::intrusive_ptr<Future> runAsync(
76
+ Stack& stack,
77
+ TaskLauncher taskLauncher = at::launch);
78
+
79
+ virtual const ExecutionPlan& getPlanFor(
80
+ Stack& stack,
81
+ c10::optional<size_t> remaining_bailout_depth = c10::nullopt) = 0;
82
+ virtual GraphExecutorState getDebugState() = 0;
83
+ virtual ~GraphExecutorImplBase() = default;
84
+
85
+ virtual bool isOptimized() const {
86
+ return false;
87
+ }
88
+
89
+ protected:
90
+ friend struct GraphExecutor;
91
+
92
+ // The unoptimized starting graph. This field is effectively const, but we
93
+ // can't make it so because Graph::copy() is not const (and making it const is
94
+ // not that easy at this point).
95
+ // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
96
+ std::shared_ptr<Graph> graph;
97
+ // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
98
+ std::string function_name_;
99
+
100
+ // If false, we'll run the graph as we get it, without any optimizations.
101
+ // Useful for debugging.
102
+ // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
103
+ const size_t num_inputs;
104
+ // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
105
+ const size_t num_outputs;
106
+
107
+ // GraphExecutors can be accessed from multiple threads, so this thread needs
108
+ // to be held every time we access the fallback or plan_cache.
109
+ // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
110
+ std::mutex compile_mutex;
111
+ };
112
+
113
+ } // namespace torch::jit
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/graph_iterator.h ADDED
@@ -0,0 +1,147 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include <torch/csrc/jit/ir/ir.h>
2
+
3
+ namespace torch::jit {
4
+
5
+ // This class facilitates depth-first iteration over all nodes in a graph.
6
+ class DepthFirstGraphNodeIterator {
7
+ Node* current_;
8
+
9
+ public:
10
+ // Constructor.
11
+ explicit DepthFirstGraphNodeIterator(std::shared_ptr<Graph>& graph)
12
+ : current_(*(graph->block()->nodes().begin())) {}
13
+
14
+ // Moves up and to the next node (may move up recursively).
15
+ void move_up() {
16
+ if (current_ == nullptr) {
17
+ return;
18
+ }
19
+ // Basically we start from the child block (which is current_)
20
+ // and we try to find the block that owns it. Now we need to check
21
+ // if that block is the graph root block, or if it is an If/Loop/etc
22
+ // block.
23
+ //
24
+ // If it's the graph root block we can stop because there is no "up"
25
+ // but if it is a node (e.g. If/Loop/etc) we need to apply logic
26
+ // based on where we are coming from to move to the next block.
27
+ // This might mean that we need to traverse up again (e.g. if we've
28
+ // reached the end of the else clause in an if block we need to go)
29
+ // up to the parent block that contains the if.
30
+ //
31
+ // Similarly if we've reached the end of the parent block containing
32
+ // the else clause we might need to go up again so this is a recursive
33
+ // function.
34
+ //
35
+ // BlockNode (if/loop/with)
36
+ // |
37
+ // [Block1] ... [Block2]
38
+ // |
39
+ // [ Node1, Node2, Node3, FromNode]
40
+ //
41
+ auto parent_block = current_->owningBlock();
42
+ TORCH_INTERNAL_ASSERT(parent_block, "Every node must be owned by a block");
43
+
44
+ // Get the node that owns the parent block. This node has to be an if,
45
+ // loop, or with.
46
+ auto parent_node = parent_block->owningNode();
47
+ if (parent_node == nullptr) {
48
+ // If there's no node that owns this current block then we're at the
49
+ // top of the graph and since we're trying to move up we have reached
50
+ // the end of the traversal.
51
+ current_ = nullptr;
52
+ return;
53
+ }
54
+
55
+ // Check the type of node this root is.
56
+ if (parent_node->kind() == prim::If) {
57
+ // Need to check if we came from the `then` branch or the `else` branch.
58
+ auto* then_block = parent_node->blocks().at(0);
59
+ auto* else_block = parent_node->blocks().at(1);
60
+
61
+ if (parent_block == else_block) {
62
+ // If else block then we move to the next node in the parent block.
63
+ current_ = parent_node->next();
64
+ if (current_->kind() == prim::Return) {
65
+ move_up();
66
+ }
67
+ } else {
68
+ // If then block then move to the else block if it is not empty.
69
+ TORCH_INTERNAL_ASSERT(parent_block == then_block);
70
+ bool else_block_empty =
71
+ else_block->nodes().begin() == else_block->nodes().end();
72
+
73
+ if (!else_block_empty) {
74
+ current_ = *(else_block->nodes().begin());
75
+ } else {
76
+ // Since it's empty we move to the next node.
77
+ current_ = parent_node->next();
78
+ if (current_->kind() == prim::Return) {
79
+ move_up();
80
+ }
81
+ }
82
+ }
83
+ } else if (
84
+ parent_node->kind() == prim::Loop ||
85
+ parent_node->kind() == prim::With) {
86
+ current_ = parent_node->next();
87
+ if (current_->kind() == prim::Return) {
88
+ move_up();
89
+ }
90
+ } else {
91
+ TORCH_INTERNAL_ASSERT(
92
+ false, "Only if/loop/with nodes should have child blocks");
93
+ }
94
+ }
95
+
96
+ // Moves to the next adjacent node or up in to the parent if that is not
97
+ // possible.
98
+ void move_next() {
99
+ if (current_ == nullptr) {
100
+ return;
101
+ }
102
+
103
+ // Increment to the next node in the current block.
104
+ current_ = current_->next();
105
+
106
+ // Check if we're at the end of the block. If so we need
107
+ // to move upwards (if it makes sense to).
108
+ if (current_->kind() == prim::Return) {
109
+ move_up();
110
+ }
111
+ }
112
+
113
+ // Moves to the next node in the graph into children if it can.
114
+ void move_into() {
115
+ if (current_ == nullptr) {
116
+ return;
117
+ }
118
+
119
+ // Check if we're currently on a node that contains sub-nodes.
120
+ if (current_->kind() == prim::If || current_->kind() == prim::Loop ||
121
+ current_->kind() == prim::With) {
122
+ auto* first_block = current_->blocks().at(0);
123
+ current_ = first_block->param_node();
124
+ // Move next will move up and out of the current node if the block is
125
+ // empty. `move_up` which is called by `move_next` will handle the
126
+ // difference between If, Loop, and With blocks appropriately.
127
+ move_next();
128
+ } else {
129
+ move_next();
130
+ }
131
+ }
132
+
133
+ // Get the next Node in the graph. \returns nullptr if there are no nodes
134
+ // left.
135
+ Node* next() {
136
+ auto result = current_;
137
+
138
+ // Try move into the existing node to set the next node to be returned.
139
+ // This will move to the next node if not possible, or move upwards and
140
+ // to the next.
141
+ move_into();
142
+
143
+ return result;
144
+ }
145
+ };
146
+
147
+ } // namespace torch::jit
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/interpreter.h ADDED
@@ -0,0 +1,158 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <c10/util/Optional.h>
3
+ #include <memory>
4
+ #include <vector>
5
+
6
+ #include <ATen/ThreadLocalState.h>
7
+ #include <ATen/core/ivalue.h>
8
+ #include <ATen/core/jit_type.h>
9
+ #include <torch/csrc/Export.h>
10
+ #include <torch/csrc/jit/frontend/source_range.h>
11
+
12
+ C10_DECLARE_bool(torch_jit_disable_warning_prints);
13
+ C10_DECLARE_bool(torch_jit_enable_rethrow_caught_exception);
14
+
15
+ namespace at {
16
+ class Tensor;
17
+ TORCH_API void launch(std::function<void()> func);
18
+ } // namespace at
19
+ namespace c10 {
20
+ struct IValue;
21
+ struct OperatorName;
22
+ } // namespace c10
23
+
24
+ namespace torch::jit {
25
+
26
+ // The interpreter run Graphs with Tensor inputs and Tensor outputs
27
+ // a separate component in the autograd handles unwrapping and wrapping
28
+ // variable objects for use in the interpreter.
29
+ namespace interpreter {
30
+ struct CodeImpl;
31
+ }
32
+
33
+ struct Node;
34
+ struct GraphExecutor;
35
+ struct InterpreterStateImpl;
36
+ struct Graph;
37
+ struct Node;
38
+ struct Instruction;
39
+ using Stack = std::vector<c10::IValue>;
40
+ using c10::ivalue::Future;
41
+ using TaskLauncher = std::function<void(std::function<void()>)>;
42
+
43
+ struct TORCH_API Code {
44
+ Code() = default;
45
+ explicit Code(interpreter::CodeImpl* pImpl);
46
+ // remaining_bailout_depth is irrelevant in a `Code` object unless the `Code`
47
+ // is directly created by `GraphExecutor` in which case it's likely to contain
48
+ // `prim::BailOut`s to control the maximum depth of bailout chains
49
+ explicit Code(
50
+ const std::shared_ptr<Graph>& graph,
51
+ std::string function_name,
52
+ size_t remaining_bailout_depth = 0);
53
+
54
+ const std::vector<GraphExecutor*>& grad_executors();
55
+ const std::vector<GraphExecutor*>& diff_graph_op_executors();
56
+
57
+ explicit operator bool() const {
58
+ return pImpl != nullptr;
59
+ }
60
+ size_t num_inputs() const;
61
+ size_t num_outputs() const;
62
+ size_t num_bailouts() const;
63
+ const std::vector<c10::IValue>& constant_table() const;
64
+ const std::vector<c10::TypePtr>& type_table() const;
65
+ const std::vector<Instruction>& instructions() const;
66
+ const std::unordered_map<std::string, size_t>& op_to_num_specified_args()
67
+ const;
68
+ const std::vector<Node*>& instructions_source() const;
69
+ void request_bailout(size_t index);
70
+ size_t register_size() const;
71
+
72
+ private:
73
+ std::shared_ptr<interpreter::CodeImpl> pImpl;
74
+ friend struct InterpreterStateImpl;
75
+ friend std::ostream& operator<<(std::ostream& out, const Code& code);
76
+ };
77
+
78
+ struct TORCH_API MobileCode : Code {
79
+ explicit MobileCode(
80
+ const std::shared_ptr<Graph>& graph,
81
+ std::string function_name,
82
+ bool emit_default_input_instructions = true,
83
+ bool support_default_args_before_out = true,
84
+ bool emit_promoted_ops = true,
85
+ size_t remaining_bailout_depth = 0);
86
+ };
87
+
88
+ struct InterpreterState {
89
+ TORCH_API InterpreterState(
90
+ const Code& code,
91
+ TaskLauncher taskLauncher = at::launch);
92
+ TORCH_API void run(Stack& stack);
93
+ TORCH_API c10::intrusive_ptr<Future> runAsync(Stack& stack);
94
+ c10::intrusive_ptr<Future> getFuture();
95
+
96
+ private:
97
+ InterpreterState(c10::intrusive_ptr<c10::intrusive_ptr_target> pImpl);
98
+ // Ideally we should use c10::intrusive_ptr<InterpreterStateImpl> for pImpl;
99
+ // but intrusive_ptr requires full definition of InterpreterStateImpl,
100
+ // which we need to hide in the header.
101
+ c10::intrusive_ptr<c10::intrusive_ptr_target> pImpl;
102
+ friend struct InterpreterStateImpl;
103
+ };
104
+
105
+ // Created by wait()
106
+ struct Suspend : public std::exception {
107
+ const char* what() const noexcept override {
108
+ return "Suspend";
109
+ }
110
+
111
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
112
+ explicit Suspend(c10::intrusive_ptr<Future> future_)
113
+ : future(std::move(future_)) {}
114
+
115
+ c10::intrusive_ptr<Future> future;
116
+ };
117
+
118
+ // InterpreterContinuation propagates dist_autograd_context_id
119
+ // through (and only through) the forward pass manually, other
120
+ // thread local settings are propagated with ThreadLocalState
121
+ struct InterpreterContinuation {
122
+ InterpreterContinuation(
123
+ InterpreterState state_,
124
+ Stack stack_,
125
+ int64_t dist_autograd_context_id = 0,
126
+ c10::optional<at::ThreadLocalState> tls_state = c10::nullopt)
127
+ : state(std::move(state_)),
128
+ stack(std::move(stack_)),
129
+ tls_state_(std::move(tls_state))
130
+ #ifdef USE_DISTRIBUTED
131
+ ,
132
+ dist_autograd_context_id_(dist_autograd_context_id)
133
+ #endif
134
+ {
135
+ }
136
+
137
+ void operator()();
138
+
139
+ private:
140
+ InterpreterState state;
141
+ Stack stack;
142
+ c10::optional<at::ThreadLocalState> tls_state_ = c10::nullopt;
143
+ #ifdef USE_DISTRIBUTED
144
+ int64_t dist_autograd_context_id_;
145
+ #endif
146
+ };
147
+
148
+ // what is the tensors type, including state from the current execution context
149
+ // that modifies how the tensor behaves. For instance if no_grad is enabled
150
+ // this will cause the TensorType to have requires_grad=False.
151
+ TORCH_API at::TensorTypePtr tensorTypeInCurrentExecutionContext(
152
+ const at::Tensor& t);
153
+
154
+ // current (TLS) TorchScript interpreter callstack
155
+ TORCH_API std::vector<StackEntry> currentCallstack();
156
+ TORCH_API std::vector<std::string> currentModuleHierarchy();
157
+
158
+ } // namespace torch::jit
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/jit_exception.h ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <stdexcept>
4
+
5
+ #include <c10/util/Optional.h>
6
+ #include <torch/csrc/Export.h>
7
+ #include <string>
8
+
9
+ namespace torch::jit {
10
+
11
+ struct TORCH_API JITException : public std::runtime_error {
12
+ explicit JITException(
13
+ const std::string& msg,
14
+ c10::optional<std::string> python_class_name = c10::nullopt,
15
+ c10::optional<std::string> original_msg = c10::nullopt);
16
+
17
+ c10::optional<std::string> getPythonClassName() const {
18
+ return python_class_name_;
19
+ }
20
+
21
+ // the original msg if this is from a python exception. The interpretor has
22
+ // changed the original message by adding "The following operation failed in
23
+ // the TorchScript interpreter." in front of it in the handleError function.
24
+ c10::optional<std::string> getOriginalMsg() const {
25
+ return original_msg_;
26
+ }
27
+
28
+ static const std::string& getCaughtOriginalMsg();
29
+ static const std::string& getCaughtPythonClassName();
30
+ static void setCaughtOriginalMsg(const std::string& msg);
31
+ static void setCaughtPythonClassName(const std::string& pythonClassName);
32
+
33
+ private:
34
+ c10::optional<std::string> python_class_name_;
35
+ c10::optional<std::string> original_msg_;
36
+ };
37
+
38
+ } // namespace torch::jit
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/operator.h ADDED
@@ -0,0 +1,346 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // in memory description of all ATen Ops similar to Caffe2 schema
2
+ // once C10 exists this can be removed, or stubbed out, but we need
3
+ // it now to implement correct semantic checking for script
4
+ #pragma once
5
+
6
+ #include <ATen/core/dispatch/Dispatcher.h>
7
+ #include <ATen/core/dispatch/OperatorOptions.h>
8
+ #include <ATen/core/op_registration/op_allowlist.h>
9
+ #include <ATen/core/stack.h>
10
+ #include <c10/util/Exception.h>
11
+ #include <c10/util/overloaded.h>
12
+ #include <torch/csrc/jit/frontend/function_schema_parser.h>
13
+ #include <torch/csrc/jit/runtime/operator_options.h>
14
+ #include <torch/library.h>
15
+
16
+ #include <ATen/core/function_schema.h>
17
+ #include <ATen/core/symbol.h>
18
+
19
+ #include <functional>
20
+ #include <initializer_list>
21
+ #include <memory>
22
+ #include <string>
23
+ #include <unordered_map>
24
+ #include <utility>
25
+ #include <variant>
26
+ #include <vector>
27
+
28
+ namespace torch::jit {
29
+
30
+ struct Node;
31
+ using ::c10::Argument;
32
+ using ::c10::FunctionSchema;
33
+ using ::c10::Symbol;
34
+
35
+ using OperationCreator = Operation (*)(const Node*);
36
+
37
+ namespace {
38
+ const std::array<at::Tag, 1> kJitOnlyOperatorTags = {
39
+ at::Tag::pt2_compliant_tag};
40
+ }
41
+
42
+ /*
43
+ * Note: JIT relies on Operator instances having static lifetime, because
44
+ * it for example stores a non-owning FunctionSchema* pointer in the Node class,
45
+ * which points to the function schema stored in the Operator instance.
46
+ * Also, jit::Operator is meant to store more operator related information like
47
+ * symbolic derivatives, which also requires them to have static lifetime
48
+ * so that changes to symbolic derivatives are remembered.
49
+ *
50
+ * Currently, the JIT operator library contains a jit::Operator instance
51
+ * with a wrapper for each c10 operator. The c10 operator library registers
52
+ * those wrappers using listeners in register_c10_ops.cpp.
53
+ * TODO Instead of doing it this way, we should only have pure-jit ops in
54
+ * the jit library but have the JIT operator lookup look into the c10 library
55
+ * too.
56
+ */
57
+
58
+ // An Operator is a thin wrapper around either a pure JIT operator (e.g. prim
59
+ // ops) or a c10 operator, allowing some common operations and abstracting away
60
+ // the concrete operator nature.
61
+ struct TORCH_API Operator {
62
+ private:
63
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
64
+ struct C10Operator final {
65
+ c10::OperatorHandle handle_;
66
+ Operation op_;
67
+ };
68
+ struct UnparsedFunctionSchema final {
69
+ std::string schema_string_;
70
+ mutable c10::optional<c10::AliasAnalysisKind> alias_analysis_;
71
+ };
72
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
73
+ struct JitOnlyOperator final {
74
+ // The only valid transition for schema_ is from right->left, i.e.
75
+ // when the schema gets parsed.
76
+ mutable std::variant<FunctionSchema, UnparsedFunctionSchema> schema_;
77
+
78
+ std::variant<Operation, OperationCreator> op_;
79
+ };
80
+
81
+ public:
82
+ Operator(c10::OperatorHandle opHandle, Operation operation)
83
+ : op_(C10Operator(
84
+ C10Operator{std::move(opHandle), std::move(operation)})) {}
85
+
86
+ Operator(
87
+ std::string schema,
88
+ Operation op,
89
+ c10::AliasAnalysisKind alias_analysis)
90
+ : op_(JitOnlyOperator{
91
+ UnparsedFunctionSchema{std::move(schema), alias_analysis},
92
+ Operation(std::move(op))}) {}
93
+
94
+ Operator(
95
+ std::string name,
96
+ std::string overload_name,
97
+ std::vector<Argument> arguments,
98
+ std::vector<Argument> returns,
99
+ Operation op,
100
+ c10::AliasAnalysisKind alias_analysis)
101
+ : op_(JitOnlyOperator{
102
+ FunctionSchema(varArgSchemaWithName(
103
+ std::move(name),
104
+ std::move(overload_name),
105
+ std::move(arguments),
106
+ std::move(returns),
107
+ alias_analysis)),
108
+ std::move(op)}) {}
109
+
110
+ Operator(
111
+ std::string schema,
112
+ OperationCreator op_creator,
113
+ c10::AliasAnalysisKind alias_analysis)
114
+ : op_(JitOnlyOperator{
115
+ UnparsedFunctionSchema{std::move(schema), alias_analysis},
116
+ op_creator}) {}
117
+
118
+ // Helper constructor to register `op` to run
119
+ // run for _every_ IR Node where n.kind() == name, regardless of arguments.
120
+ // This is accomplished by marking the schema varargs and having no required
121
+ // arguments.
122
+ Operator(
123
+ Symbol name,
124
+ OperationCreator op_creator,
125
+ c10::AliasAnalysisKind alias_analysis)
126
+ : op_(JitOnlyOperator{
127
+ FunctionSchema(varArgSchemaWithName(name, alias_analysis)),
128
+ op_creator}) {}
129
+
130
+ Operation getOperation(const Node* node = nullptr) const {
131
+ return std::visit(
132
+ c10::overloaded(
133
+ [](const C10Operator& op) { return op.op_; },
134
+ [node](const JitOnlyOperator& op) {
135
+ return std::visit(
136
+ c10::overloaded(
137
+ [](const Operation& op) { return op; },
138
+ [node](const OperationCreator& op_creator) {
139
+ return op_creator(node);
140
+ }),
141
+ op.op_);
142
+ }),
143
+ op_);
144
+ }
145
+
146
+ Operation getOperationForDispatchKey(c10::DispatchKey dk) const {
147
+ // TODO: some sort of caching mechanism?
148
+ return std::visit(
149
+ c10::overloaded(
150
+ [dk](const C10Operator& op) {
151
+ return Operation([op, dk](Stack& stack) {
152
+ op.handle_.callBoxedForDispatchKey(dk, stack);
153
+ });
154
+ },
155
+ [](const JitOnlyOperator& op) {
156
+ TORCH_CHECK(
157
+ false,
158
+ "calling a JIT operator for dispatch key is not supported");
159
+ return Operation(nullptr);
160
+ }),
161
+ op_);
162
+ }
163
+
164
+ const FunctionSchema& schema() const {
165
+ return std::visit(
166
+ c10::overloaded(
167
+ [](const C10Operator& op) -> const FunctionSchema& {
168
+ return op.handle_.schema();
169
+ },
170
+ [](const JitOnlyOperator& op) -> const FunctionSchema& {
171
+ // we lazily parse schema initialized from strings so that
172
+ // we do less work during static operator registration
173
+ if (op.schema_.index() == 1) {
174
+ auto& unmaterializedSchema =
175
+ std::get<UnparsedFunctionSchema>(op.schema_);
176
+ FunctionSchema schema =
177
+ parseSchema(unmaterializedSchema.schema_string_);
178
+ if (unmaterializedSchema.alias_analysis_.has_value()) {
179
+ // TODO What if it gets set later?
180
+ schema.setAliasAnalysis(
181
+ *unmaterializedSchema.alias_analysis_);
182
+ }
183
+ op.schema_ = std::move(schema);
184
+ }
185
+ return std::get<FunctionSchema>(op.schema_);
186
+ }),
187
+ op_);
188
+ }
189
+
190
+ c10::ArrayRef<at::Tag> getTags() const {
191
+ return std::visit(
192
+ c10::overloaded(
193
+ [](const C10Operator& op) { return op.handle_.getTags(); },
194
+ [](const JitOnlyOperator& op) {
195
+ // JitOnlyOperators don't have an c10::OperatorHandle or a way to
196
+ // specify tags. We're grandfathering them all into
197
+ // pt2_compliant_tag, but for anything else, please just stop
198
+ // using JitOnlyOperator.
199
+ return c10::ArrayRef<at::Tag>(kJitOnlyOperatorTags);
200
+ }),
201
+ op_);
202
+ }
203
+
204
+ bool isC10Op() const {
205
+ return op_.index() == 0;
206
+ }
207
+
208
+ c10::AliasAnalysisKind aliasAnalysisKind() const {
209
+ const FunctionSchema& schemaRef = schema();
210
+ c10::AliasAnalysisKind alias_analysis = schemaRef.aliasAnalysis();
211
+
212
+ TORCH_CHECK(
213
+ alias_analysis == AliasAnalysisKind::FROM_SCHEMA ||
214
+ !schemaRef.hasAnyAliasInfo(),
215
+ "In operator registration: Tried to register operator ",
216
+ schemaRef,
217
+ " with aliasing information in the schema but without AliasAnalysisKind::FROM_SCHEMA.");
218
+ return alias_analysis;
219
+ }
220
+
221
+ bool hasOperation() const {
222
+ return std::visit(
223
+ c10::overloaded(
224
+ [](const C10Operator&) { return true; },
225
+ [](const JitOnlyOperator& op) { return op.op_.index() == 0; }),
226
+ op_);
227
+ }
228
+
229
+ private:
230
+ static FunctionSchema varArgSchemaWithName(
231
+ Symbol name,
232
+ AliasAnalysisKind alias_analysis) {
233
+ auto result = FunctionSchema(
234
+ name,
235
+ "",
236
+ {},
237
+ {},
238
+ /*is_vararg*/ true,
239
+ /*is_varret*/ true);
240
+ result.setAliasAnalysis(alias_analysis);
241
+ return result;
242
+ }
243
+
244
+ static FunctionSchema varArgSchemaWithName(
245
+ std::string name,
246
+ std::string overload_name,
247
+ std::vector<Argument> arguments,
248
+ std::vector<Argument> returns,
249
+ AliasAnalysisKind alias_analysis) {
250
+ auto result = FunctionSchema(
251
+ std::move(name),
252
+ std::move(overload_name),
253
+ std::move(arguments),
254
+ std::move(returns),
255
+ /*is_vararg*/ false,
256
+ /*is_varret*/ false);
257
+ result.setAliasAnalysis(alias_analysis);
258
+ return result;
259
+ }
260
+
261
+ std::variant<C10Operator, JitOnlyOperator> op_;
262
+ };
263
+
264
+ TORCH_API std::string canonicalSchemaString(const FunctionSchema& schema);
265
+
266
+ TORCH_API const std::vector<std::shared_ptr<Operator>> getAllOperators();
267
+ TORCH_API const std::vector<std::shared_ptr<Operator>>& getAllOperatorsFor(
268
+ Symbol name);
269
+ // Returns operators in the order which OpOverloadPacket resolves them.
270
+ TORCH_API std::vector<std::shared_ptr<Operator>> getAllSortedOperatorsFor(
271
+ Symbol name);
272
+
273
+ // given a operator with an overload name, find the specific operator related to
274
+ // it, may return nullptr if no operator exists.
275
+ TORCH_API std::shared_ptr<Operator> findOperatorFor(
276
+ const c10::OperatorName& full_name);
277
+
278
+ TORCH_API std::vector<Symbol> findSimilarOperators(Symbol input_op);
279
+
280
+ TORCH_API void registerOperator(Operator&& op);
281
+ TORCH_API void deregisterOperator(const FunctionSchema& schema);
282
+
283
+ // XXX: this function is meant to be used with string literals only!
284
+ TORCH_API std::shared_ptr<Operator> getOperatorForLiteral(
285
+ const char* signature);
286
+
287
+ // Ensure the thing that registers c10 ops is defined.
288
+ // Otherwise, our registry will not have c10 ops. You can run into this
289
+ // scenario if you're querying registered ops during static init.
290
+ //
291
+ // This fn is defined in register_c10_ops.cpp
292
+ TORCH_API void ensure_c10_registerer_defined();
293
+
294
+ // Used to assert that unschematized operators have an analysis method written
295
+ TORCH_API bool aliasAnalysisHasSpecialCaseFor(c10::Symbol sym);
296
+
297
+ // A factory function to generate an optional operator. It has two
298
+ // instantiations depending on the template bool arg value. The arg can be a
299
+ // compile-time function for the selective op registration based on schema
300
+ // string.
301
+ template <typename Func>
302
+ c10::optional<Operator> OperatorGenerator(
303
+ const char* schema_str,
304
+ Func&& op,
305
+ AliasAnalysisKind alias_analysis) {
306
+ return c10::optional<Operator>(Operator(
307
+ std::string(schema_str), std::forward<Func>(op), alias_analysis));
308
+ }
309
+
310
+ template <typename Func>
311
+ c10::optional<Operator> OperatorGenerator(
312
+ torch::detail::SelectiveStr<true> schema_str,
313
+ Func&& op,
314
+ AliasAnalysisKind alias_analysis) {
315
+ return OperatorGenerator(
316
+ static_cast<const char*>(schema_str),
317
+ std::forward<Func>(op),
318
+ alias_analysis);
319
+ }
320
+
321
+ template <typename Func>
322
+ c10::optional<Operator> OperatorGenerator(
323
+ torch::detail::SelectiveStr<false> schema_str,
324
+ Func&& op,
325
+ AliasAnalysisKind alias_analysis) {
326
+ return c10::nullopt;
327
+ }
328
+
329
+ template <typename Func>
330
+ c10::optional<Operator> OperatorGenerator(
331
+ const std::string name,
332
+ const std::string overload_name,
333
+ const std::vector<c10::Argument> arguments,
334
+ const std::vector<c10::Argument> returns,
335
+ Func&& op,
336
+ AliasAnalysisKind alias_analysis) {
337
+ return c10::optional<Operator>(Operator(
338
+ name,
339
+ overload_name,
340
+ arguments,
341
+ returns,
342
+ std::forward<Func>(op),
343
+ alias_analysis));
344
+ }
345
+
346
+ } // namespace torch::jit
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/profiling_graph_executor_impl.h ADDED
@@ -0,0 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <c10/util/Flags.h>
3
+ #include <torch/csrc/jit/api/module.h>
4
+ #include <torch/csrc/jit/runtime/graph_executor_impl.h>
5
+
6
+ C10_DECLARE_bool(torch_jit_static_then_dynamic);
7
+
8
+ C10_DECLARE_bool(torch_jit_always_dynamic);
9
+
10
+ namespace torch::jit {
11
+
12
+ TORCH_API void runNooptPassPipeline(std::shared_ptr<Graph>& graph);
13
+
14
+ struct TORCH_API ProfilingGraphExecutorImpl : public GraphExecutorImplBase {
15
+ ProfilingGraphExecutorImpl(
16
+ const std::shared_ptr<Graph>& graph,
17
+ std::string function_name);
18
+
19
+ const ExecutionPlan& getPlanFor(
20
+ Stack& stack,
21
+ c10::optional<size_t> remaining_bailout_depth) override;
22
+ GraphExecutorState getDebugState() override;
23
+ ~ProfilingGraphExecutorImpl() override = default;
24
+
25
+ void debugFlushCompilationCache() {
26
+ std::lock_guard<std::mutex> lock(compile_mutex);
27
+ pr_.reset();
28
+ fallback_plan_.reset();
29
+ profiling_plan_.reset();
30
+ optimized_plan_.reset();
31
+ // prevent memory leaks
32
+ fallback_functions_.clear();
33
+ remaining_bailout_depth_.reset();
34
+ // TODO - would be nice to have it initialized in subsequent use
35
+ fusion_strategy_ = getFusionStrategy();
36
+ }
37
+
38
+ bool isOptimized() const override {
39
+ return optimized_plan_.has_value();
40
+ }
41
+
42
+ private:
43
+ const ExecutionPlan& getOptimizedPlanFor(
44
+ Stack& stack,
45
+ c10::optional<size_t> remaining_bailout_depth);
46
+ void runProfilingInsensitiveOptimizations(std::shared_ptr<Graph>& graph);
47
+ void runProfilingOptimizations(
48
+ std::shared_ptr<Graph>& graph,
49
+ size_t remaining_depth);
50
+ void replaceFallbackGraphWithFallbackFunction(Block* b);
51
+ FusionBehavior getCurrentBehavior(size_t remaining_depth);
52
+ size_t getInstantiatedBailoutDepth();
53
+ void runNoGradOptimizations(
54
+ std::shared_ptr<Graph>& graph,
55
+ size_t remaining_bailout_depth);
56
+ void runFinalOptimizations(std::shared_ptr<Graph>& graph);
57
+ std::unique_ptr<ProfilingRecord> pr_;
58
+ c10::optional<ExecutionPlan>
59
+ profiling_plan_; // plan to run in order to profiling the code
60
+ c10::optional<ExecutionPlan> optimized_plan_;
61
+ FusionStrategy fusion_strategy_;
62
+
63
+ // this plan is used if getGraphExecutorOptimize is unset
64
+ c10::optional<ExecutionPlan> fallback_plan_;
65
+ // fallback functions are inserted for tensorexpr fusion groups
66
+ // and by specialize_autogradzero. Whenever, at runtime, input
67
+ // tensor don't match profiled properties, fallback functions are called
68
+ // They are the deoptimized version of the logic in fusion groups
69
+ // and/or autograd.
70
+ // The fallback functions are owned by a GraphExecutor instance
71
+ // They only exist in the optimized graph which is a private property
72
+ // of the GraphExecutor and only shared with InterpreterState
73
+ std::vector<std::unique_ptr<Function>> fallback_functions_;
74
+ c10::optional<size_t> remaining_bailout_depth_;
75
+ };
76
+
77
+ } // namespace torch::jit
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/profiling_record.h ADDED
@@ -0,0 +1,205 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/ATen.h>
4
+ #include <ATen/core/ivalue.h>
5
+ #include <ATen/core/jit_type.h>
6
+ #include <ATen/core/stack.h>
7
+ #include <torch/csrc/Export.h>
8
+ #include <torch/csrc/jit/ir/ir.h>
9
+
10
+ #include <list>
11
+ #include <map>
12
+ #include <unordered_map>
13
+ #include <vector>
14
+
15
+ // We would like to assign each position/axis of a tensor an abstract size
16
+ // * For each `tensor` we have a profiled `Value` of a `TensorType` describing
17
+ // the properties of the `tensor`.
18
+ // * `TensorType` has a property called `symbolic_sizes_` to describe observed
19
+ // `tensor.sizes()`
20
+ // * `symbolic_sizes_` is a vector of abstract sizes (or
21
+ // `std::vector<ShapeSymbol>`) where
22
+ // * `ShapeSymbol`at `symbolic_sizes_[i]` describes the size value
23
+ // (`Dimension`) at `tensor.sizes()[i]`
24
+ // * We may see the same `Dimension` at different positions `i` in
25
+ // `tensor.sizes()` or even in different `tensor`
26
+ // * First, we would like associate the same `ShapeSymbol` to the same
27
+ // `Dimension` across **one** profiling execution or run of a TorchScript
28
+ // function.
29
+ // * The same `ShapeSymbol`s in different positions of `symbolic_shapes_` in
30
+ // possibly different `TensorType`s (i.e. `TensorType`s for different
31
+ // profiled values) form an implicit set. The elements of such a set are
32
+ // called *dimension locations*.
33
+ // * These sets allow us to track how the shapes of input arguments of some
34
+ // operation relate to operation's output shapes as the input and output
35
+ // shapes might share the same `ShapeSymbol`s
36
+ // * For **every** profiling run, we would like to maintain the invariant that
37
+ // *the same `ShapeSymbol` is always associated with the same `Dimension`*.
38
+ // * To maintain this invariant we merge the profiling information from all
39
+ // profiling runs,
40
+ // * For every two runs, we iterate over all `symbic_shapes_` and compare
41
+ // their `ShapeSymbol`s in the same position.
42
+ // * if we observe that for every dimension location that has
43
+ // the`ShapeSymbol S1` in run #1 there is **only one** `ShapeSymbol S2` in
44
+ // the same dimension location in run #2, we conclude that the invariant
45
+ // holds.
46
+ // * However, if we observe some dimension locations in run #2 have
47
+ // `ShapeSymbol S2` and the other ones have `ShapeSymbol S3` we would like
48
+ // to partition the virtual set of dimension locations associated with
49
+ // `ShapeSymbol S1` into two new subsets, so the invariant holds.
50
+ // * The partitioning works by assigning a new symbol to the dimension
51
+ // locations (associated with `ShapeSymbol S1`) that have `ShapeSymbol S2`
52
+ // and another new symbol to the dimension locations that have `ShapeSymbol
53
+ // S3`. In other words,
54
+ // * Subset #1 will consist of the dimension locations that in run #2 have
55
+ // `ShapeSymbol S2` and will have `ShapeSymbol S4` in those dimension
56
+ // locations
57
+ // * Subset #2 will consist of the dimension locations that in run #2 have
58
+ // `ShapeSymbol S4` and will have `ShapeSymbol S5` in those dimension
59
+ // locations
60
+ // * The effective result of merging the profiling information from two runs
61
+ // is new `TensorTypes` whose `symbolic_sizes_` /dimension locations have
62
+ // either `ShapeSymbol S4` or `ShapeSymbol S5`.
63
+ // * Partitioning can be done even before we have seen all the dimension
64
+ // locations associated with `ShapeSymbol S1`
65
+ // * We use `getSymbolInSet` of `ShapeSymbolTable` to remember all
66
+ // `ShapeSymbols` from run #2 we observed in the dimension locations
67
+ // associated with `ShapeSymbol S1` .
68
+ // * For every `ShapeSymbol` from run #2 in the dimension location
69
+ // associated with `ShapeSymbol S1` `getSymbolInSet` returns a symbol
70
+ // that we assign to the dimension location in a new TensorType.
71
+ // * It's important to point out that the same `ShapeSymbol S2` from run
72
+ // #2 in two dimension locations that have different `ShapeSymbol`s in
73
+ // run #1 are different! These dimension locations will belong to
74
+ // different subsets and have different `ShapeSymbol`s after merge.
75
+ // * On the other hand, for the same `ShapeSymbol S2` in two dimension
76
+ // locations that have `ShapeSymbol S1` in run #1`getSymbolInSet` will
77
+ // return the same symbol.
78
+
79
+ namespace torch::jit {
80
+
81
+ using ::c10::TensorTypePtr;
82
+ using Dimension = int64_t;
83
+
84
+ TORCH_API void RegisterProfilingNode(const std::function<bool(const Node*)>&);
85
+
86
+ struct ProfilingRecord;
87
+
88
+ // `SetPartitioningHelper` is used to maintain the following invariant:
89
+ // For **every** profiling run, *the same `ShapeSymbol` is always associated
90
+ // with the same `Dimension`*.
91
+ // while merging the profiling information from multiple runs.
92
+ struct SetPartitioningHelper {
93
+ std::map<c10::ShapeSymbol, std::map<Dimension, c10::ShapeSymbol>>
94
+ sets2subsets_;
95
+
96
+ // `partitionSetByDimension` partitions a virtual set
97
+ // of dimension locations associated with ShapeSymbol `symbol` into subsets.
98
+ // Partitioning is equivalent to giving (or renaming) a particular
99
+ // dimension location a new `ShapeSymbol`.
100
+ // The same `Dimension` value in different dimension locations
101
+ // that used to have `symbol` will receive the same
102
+ // new `ShapeSymbol`, effectively forming a new set.
103
+ c10::ShapeSymbol partitionSetByDimension(
104
+ Dimension new_size,
105
+ c10::ShapeSymbol symbol) {
106
+ auto& dims2symbols = getSetForSymbol(symbol);
107
+
108
+ if (dims2symbols.count(new_size) == 0) {
109
+ auto new_sym = c10::ShapeSymbol::newSymbol();
110
+ dims2symbols[new_size] = new_sym;
111
+ return new_sym;
112
+ }
113
+
114
+ return dims2symbols[new_size];
115
+ }
116
+
117
+ private:
118
+ std::map<Dimension, c10::ShapeSymbol>& getSetForSymbol(c10::ShapeSymbol s) {
119
+ auto& set = sets2subsets_[s];
120
+ // N.B. adding a mapping { s.static_size(), s }
121
+ // makes sure we preserve the fact that
122
+ // some dimension values remain the same
123
+ // across all profiled runs
124
+ if (s.is_static()) {
125
+ set.insert({s.static_size(), s});
126
+ }
127
+ return set;
128
+ }
129
+ };
130
+
131
+ // ShapeSymbolTable is used by Interpreter
132
+ // to assign dimension values to ShapeSymbols
133
+ // and fail a guard if the same symbol
134
+ // is assigned more than one dimension value.
135
+ struct ShapeSymbolTable {
136
+ // N.B. we treat static symbols as always assigned
137
+ // to themselves
138
+ bool isBound(c10::ShapeSymbol s) {
139
+ if (s.is_static()) {
140
+ return true;
141
+ }
142
+ return data_.count(s) != 0;
143
+ }
144
+
145
+ // N.B. we treat static symbols as always assigned
146
+ // to themselves
147
+ Dimension getValue(c10::ShapeSymbol s) {
148
+ if (s.is_static()) {
149
+ return s.static_size();
150
+ }
151
+ return data_[s];
152
+ }
153
+ void assign(c10::ShapeSymbol s, Dimension v) {
154
+ TORCH_INTERNAL_ASSERT(!s.is_static());
155
+ data_[s] = v;
156
+ }
157
+ std::map<c10::ShapeSymbol, Dimension> data_;
158
+ // Tries to assign dimension values from `new_sizes` to
159
+ // `ShapeSymbol`s `sym_shapes`.
160
+ // Returns `true` if every dimension value from `new_sizes`
161
+ // can be assigned to the corresponding `ShapeSymbol` from
162
+ // `sym_shapes`
163
+ // A dimension value can be assigned to a `ShapeSymbol`
164
+ // * if the symbol isn't assigned yet any dimension value
165
+ // * if the symbol is assigned and its value is equal to
166
+ // the dimension value from `new_sizes`
167
+ bool bindSymbolicShapes(
168
+ at::IntArrayRef new_sizes,
169
+ const c10::SymbolicShape& sym_shapes);
170
+ };
171
+
172
+ struct ProfilingRecord {
173
+ // N.B. ProfilingRecord's copy and move c-tor are disabled, so we won't
174
+ // end up accidentally copying or moving ProfilingRecords whose addresses
175
+ // are captured in callbacks_
176
+ ProfilingRecord(const ProfilingRecord&) = delete;
177
+ ProfilingRecord(ProfilingRecord&&) noexcept = delete;
178
+ TORCH_API static std::unique_ptr<ProfilingRecord> instrumentGraph(
179
+ const std::shared_ptr<Graph>& graph);
180
+ TORCH_API static void removeProfilingNodes(Block* b);
181
+ TORCH_API static void removeProfileCounter(Block* b);
182
+
183
+ std::shared_ptr<Graph> profiled_graph_;
184
+ mutable std::mutex mutex_;
185
+ size_t profiling_count_;
186
+
187
+ bool ready() const;
188
+
189
+ std::shared_ptr<Graph> graph() const {
190
+ return profiled_graph_;
191
+ }
192
+
193
+ TORCH_API ProfileIValueOp* createProfileIValueNode(Value* in_val);
194
+ TORCH_API ProfileIValueOp* createProfileIValueNode(ArrayRef<Value*> inputs);
195
+
196
+ private:
197
+ ProfileOp* createProfileNode(
198
+ const std::function<void(Stack&)>& fp,
199
+ at::ArrayRef<Value*> inputs);
200
+ void instrumentBlock(Block* block);
201
+ void insertShapeProfile(Node* n, size_t offset, const TypePtr& input_type);
202
+ ProfilingRecord(std::shared_ptr<Graph> g);
203
+ };
204
+
205
+ } // namespace torch::jit
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/shape_function_registry.h ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/Export.h>
4
+ #include <torch/csrc/jit/ir/ir.h>
5
+
6
+ namespace torch::jit {
7
+
8
+ TORCH_API const std::string& GetSerializedFuncs();
9
+
10
+ TORCH_API const OperatorMap<std::string>& GetFuncMapping();
11
+
12
+ } // namespace torch::jit
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/symbolic_shape_registry.h ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // This file is temporary until native_functions.yaml and derivatives.yaml are
3
+ // merged. Ideally this should all go into native_functions.yaml
4
+
5
+ #include <torch/csrc/Export.h>
6
+ #include <torch/csrc/jit/ir/ir.h>
7
+
8
+ namespace torch::jit {
9
+
10
+ /*
11
+ ADDING A NEW SHAPE GRAPH:
12
+ - For one node schema, there is one corresponding registered shape compute
13
+ graph. The schema of the graph should be the same except for Tensor arguments.
14
+ For every Tensor input in operator schema, there should be a List[int]
15
+ corresponding to that Tensor's shape. For example: "aten::linear(Tensor input,
16
+ Tensor weight, Tensor? bias=None) -> Tensor" ==> def linear(input: List[int],
17
+ weight: List[int], bias: Optional[List[int]])
18
+
19
+ Additionally, arguments which are unused at the end of the schema may be left
20
+ off. This allows sharing a single graph for multiple function schemas, such as
21
+ unary operators with different trailing arguments that do not affect the output
22
+ shape.
23
+
24
+ The shape graph should return a new, unaliased List[int] (or tuple of lists for
25
+ multiple returns) and should not modify any input lists. This allows the shape
26
+ graphs to be composed and executed.
27
+
28
+ The shape analysis (particularly for non-complete, or symbolic shapes) works by
29
+ partially evaluating the JIT IR. It may be possible for a Graph to be registered
30
+ that we cannot currently partially evaluate. If this happens, please file an
31
+ issue. There are lints registered to avoid particular known patterns (continue
32
+ or break or early return in a loop). Those may be improved in the future, please
33
+ file an issue if necessary.
34
+
35
+ To debug (and write initially) the recommended flow is to define these functions
36
+ in python and iterate there. Functions should be added to
37
+ torch/jit/_shape_functions.
38
+
39
+ To test operators, the preferred flow is through OpInfos, with
40
+ `assert_jit_shape_analysis=True`. If this is not feasible, you can look at tests
41
+ in `test_symbolic_shape_analysis.py` such as `test_adaptive_avg_pool2d`.
42
+
43
+ Operators which take in a list of tensors, such as concat, are not yet
44
+ supported. Concat has been special cased and could be generalized as needed.
45
+ Please file an issue.
46
+ */
47
+
48
+ struct BoundedShapeGraphs {
49
+ std::shared_ptr<Graph> lower_bound;
50
+ std::shared_ptr<Graph> upper_bound;
51
+ };
52
+
53
+ TORCH_API void RegisterShapeComputeGraphForSchema(
54
+ const FunctionSchema& schema,
55
+ std::shared_ptr<Graph> g);
56
+
57
+ TORCH_API c10::optional<std::shared_ptr<Graph>> shapeComputeGraphForSchema(
58
+ const FunctionSchema& schema);
59
+
60
+ TORCH_API c10::optional<BoundedShapeGraphs> boundedGraphsForSchema(
61
+ const FunctionSchema& schema);
62
+
63
+ TORCH_API std::vector<const FunctionSchema*> RegisteredShapeComputeSchemas();
64
+
65
+ TORCH_API void LintShapeComputeGraph(
66
+ const FunctionSchema* schema,
67
+ const std::shared_ptr<Graph>& graph);
68
+
69
+ } // namespace torch::jit
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/symbolic_shape_registry_util.h ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // This file is temporary until native_functions.yaml and derivatives.yaml are
3
+ // merged. Ideally this should all go into native_functions.yaml
4
+
5
+ #include <torch/csrc/Export.h>
6
+ #include <torch/csrc/jit/ir/ir.h>
7
+
8
+ namespace torch::jit {
9
+
10
+ TORCH_API const OperatorMap<std::string>& get_tensorexpr_elementwise_set();
11
+
12
+ } // namespace torch::jit
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/vararg_functions.h ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <ATen/core/List.h>
3
+ #include <ATen/core/functional.h>
4
+ #include <ATen/core/ivalue.h>
5
+ #include <ATen/core/jit_type.h>
6
+ #include <ATen/core/stack.h>
7
+
8
+ namespace torch::jit {
9
+
10
+ void tupleUnpack(Stack& stack);
11
+
12
+ void format(Stack& stack, size_t num_inputs);
13
+
14
+ void einsum(Stack& stack, size_t num_inputs);
15
+
16
+ void percentFormat(Stack& stack, size_t num_inputs);
17
+
18
+ void listUnpack(Stack& stack, size_t num_outputs);
19
+
20
+ void tupleConstruct(Stack& stack, size_t num_inputs);
21
+
22
+ void namedTupleConstruct(Stack& stack, c10::TypePtr type, size_t num_inputs);
23
+
24
+ void listConstruct(Stack& stack, const c10::Type& list_type, size_t num_inputs);
25
+
26
+ void dictConstruct(Stack& stack, const c10::Type& type, size_t num_inputs);
27
+
28
+ // as weak_ref will create a Object with a non-owning CompilationUnit reference,
29
+ // for use as a constant in the Graph to avoid a reference cycle
30
+ void createObject(
31
+ Stack& stack,
32
+ const at::ClassTypePtr& type,
33
+ bool as_weak_ref = false);
34
+
35
+ void isinstance(Stack& stack, at::ArrayRef<at::TypePtr> types);
36
+
37
+ void tupleSlice(Stack& stack, size_t begin, size_t end);
38
+
39
+ void dequantize(Stack& stack);
40
+
41
+ } // namespace torch::jit
vllm/lib/python3.10/site-packages/cupy/_core/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (2.96 kB). View file
 
vllm/lib/python3.10/site-packages/cupy/_core/__pycache__/_codeblock.cpython-310.pyc ADDED
Binary file (1.39 kB). View file
 
vllm/lib/python3.10/site-packages/cupy/_core/__pycache__/_fusion_interface.cpython-310.pyc ADDED
Binary file (12.1 kB). View file
 
vllm/lib/python3.10/site-packages/cupy/_core/__pycache__/_fusion_op.cpython-310.pyc ADDED
Binary file (11.8 kB). View file
 
vllm/lib/python3.10/site-packages/cupy/_core/__pycache__/_fusion_optimization.cpython-310.pyc ADDED
Binary file (2.51 kB). View file
 
vllm/lib/python3.10/site-packages/cupy/_core/__pycache__/_gufuncs.cpython-310.pyc ADDED
Binary file (23.2 kB). View file
 
vllm/lib/python3.10/site-packages/cupy/_core/__pycache__/_ufuncs.cpython-310.pyc ADDED
Binary file (408 Bytes). View file
 
vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/README.md ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ # "include" directory
2
+
3
+ All files and directories in this directory will be copied to the distribution (sdist and wheel).
4
+ Note that items starting with `.` (e.g., `.git`) are excluded.
5
+ See `setup.py` for details.