ZTWHHH commited on
Commit
07db713
·
verified ·
1 Parent(s): 9219467

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +4 -0
  2. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/inductor/aoti_runtime/model_container.h +200 -0
  3. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/debug_util.h +47 -0
  4. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/dynamic_ir.h +59 -0
  5. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/hash.h +238 -0
  6. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/helpers.h +72 -0
  7. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/internal_ops/ltc_ops.h +52 -0
  8. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/ir.h +298 -0
  9. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/ir_builder.h +150 -0
  10. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/ir_util.h +47 -0
  11. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/metrics.h +286 -0
  12. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/multi_wait.h +62 -0
  13. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/ops/utils.h +41 -0
  14. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/permutation_util.h +43 -0
  15. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/shape.h +80 -0
  16. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/shape_inference.h +124 -0
  17. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/tensor_impl.h +62 -0
  18. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/trie.h +79 -0
  19. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/unique.h +56 -0
  20. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/ts_backend/dynamic_ir.h +85 -0
  21. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/ts_backend/ts_backend_impl.h +52 -0
  22. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/ts_backend/ts_eager_fallback.h +27 -0
  23. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/ts_backend/ts_lowering_context.h +152 -0
  24. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/ts_backend/ts_node_lowering.h +17 -0
  25. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/onnx/back_compat.h +19 -0
  26. vllm/lib/python3.10/site-packages/sympy/logic/__init__.py +12 -0
  27. vllm/lib/python3.10/site-packages/sympy/logic/__pycache__/__init__.cpython-310.pyc +0 -0
  28. vllm/lib/python3.10/site-packages/sympy/logic/__pycache__/inference.cpython-310.pyc +0 -0
  29. vllm/lib/python3.10/site-packages/sympy/logic/algorithms/__init__.py +0 -0
  30. vllm/lib/python3.10/site-packages/sympy/logic/algorithms/z3_wrapper.py +115 -0
  31. vllm/lib/python3.10/site-packages/sympy/logic/boolalg.py +0 -0
  32. vllm/lib/python3.10/site-packages/sympy/logic/inference.py +340 -0
  33. vllm/lib/python3.10/site-packages/sympy/logic/tests/__pycache__/__init__.cpython-310.pyc +0 -0
  34. vllm/lib/python3.10/site-packages/sympy/logic/tests/__pycache__/test_inference.cpython-310.pyc +0 -0
  35. vllm/lib/python3.10/site-packages/sympy/logic/utilities/__init__.py +3 -0
  36. vllm/lib/python3.10/site-packages/sympy/logic/utilities/__pycache__/__init__.cpython-310.pyc +0 -0
  37. vllm/lib/python3.10/site-packages/sympy/logic/utilities/__pycache__/dimacs.cpython-310.pyc +0 -0
  38. vllm/lib/python3.10/site-packages/sympy/logic/utilities/dimacs.py +70 -0
  39. vllm/lib/python3.10/site-packages/sympy/plotting/intervalmath/__pycache__/__init__.cpython-310.pyc +0 -0
  40. vllm/lib/python3.10/site-packages/sympy/plotting/intervalmath/__pycache__/interval_arithmetic.cpython-310.pyc +0 -0
  41. vllm/lib/python3.10/site-packages/sympy/plotting/intervalmath/__pycache__/interval_membership.cpython-310.pyc +0 -0
  42. vllm/lib/python3.10/site-packages/sympy/plotting/intervalmath/__pycache__/lib_interval.cpython-310.pyc +0 -0
  43. vllm/lib/python3.10/site-packages/sympy/plotting/tests/test_region_and.png +3 -0
  44. vllm/lib/python3.10/site-packages/sympy/series/__init__.py +23 -0
  45. vllm/lib/python3.10/site-packages/sympy/series/__pycache__/acceleration.cpython-310.pyc +0 -0
  46. vllm/lib/python3.10/site-packages/sympy/series/__pycache__/approximants.cpython-310.pyc +0 -0
  47. vllm/lib/python3.10/site-packages/sympy/series/__pycache__/fourier.cpython-310.pyc +0 -0
  48. vllm/lib/python3.10/site-packages/sympy/series/__pycache__/gruntz.cpython-310.pyc +0 -0
  49. vllm/lib/python3.10/site-packages/sympy/series/__pycache__/kauers.cpython-310.pyc +0 -0
  50. vllm/lib/python3.10/site-packages/sympy/series/__pycache__/limits.cpython-310.pyc +0 -0
.gitattributes CHANGED
@@ -1687,3 +1687,7 @@ parrot/lib/python3.10/site-packages/scipy/special/_gufuncs.cpython-310-x86_64-li
1687
  parrot/lib/python3.10/site-packages/scipy/_lib/_uarray/_uarray.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
1688
  vllm/lib/python3.10/site-packages/sympy/printing/pretty/tests/__pycache__/test_pretty.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
1689
  vllm/lib/python3.10/site-packages/sympy/solvers/ode/tests/__pycache__/test_systems.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
1687
  parrot/lib/python3.10/site-packages/scipy/_lib/_uarray/_uarray.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
1688
  vllm/lib/python3.10/site-packages/sympy/printing/pretty/tests/__pycache__/test_pretty.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
1689
  vllm/lib/python3.10/site-packages/sympy/solvers/ode/tests/__pycache__/test_systems.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
1690
+ vllm/lib/python3.10/site-packages/sympy/utilities/tests/__pycache__/test_wester.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
1691
+ vllm/lib/python3.10/site-packages/sympy/tensor/__pycache__/tensor.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
1692
+ vllm/lib/python3.10/site-packages/sympy/solvers/ode/__pycache__/single.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
1693
+ vllm/lib/python3.10/site-packages/sympy/solvers/__pycache__/solvers.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/inductor/aoti_runtime/model_container.h ADDED
@@ -0,0 +1,200 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <algorithm>
4
+ #include <deque>
5
+ #include <future>
6
+ #include <mutex>
7
+ #include <shared_mutex>
8
+
9
+ // WARNING: Be careful when adding new includes here. This header will be used
10
+ // in model.so, and should not refer to any aten/c10 headers except the stable
11
+ // C ABI defined in torch/csrc/inductor/aoti_torch/c/shim.h. The same rule
12
+ // applies to other files under torch/csrc/inductor/aoti_runtime/.
13
+ #include <torch/csrc/inductor/aoti_runtime/model.h>
14
+
15
+ namespace torch {
16
+ namespace aot_inductor {
17
+
18
+ class AOTInductorModelContainer {
19
+ public:
20
+ AOTInductorModelContainer(
21
+ size_t num_models,
22
+ bool is_cpu = false,
23
+ std::optional<std::string> cubin_dir = std::nullopt) {
24
+ constants_ = std::make_shared<ConstantMap>();
25
+ models_.reserve(num_models);
26
+ available_models_.reserve(num_models);
27
+ for (size_t i = 0; i < num_models; ++i) {
28
+ models_.push_back(AOTInductorModel::Create(constants_, cubin_dir));
29
+ available_models_.push_back(models_.back().get());
30
+ }
31
+
32
+ // Note that the all following fields (input_names_, output_names,
33
+ // etc) can be filled in by the AOT
34
+ // codegen. However, we choose to query such information from
35
+ // the owned AOTInductorModel for a couple of reasons:
36
+ // * simplify the codegen templates
37
+ // * reduce information fragmentation and duplication
38
+ // * the initialization process below is done only once when the container
39
+ // is constructed, so it would have little performance impact
40
+ auto* model = available_models_[0];
41
+ size_t num_inputs = model->num_inputs();
42
+ input_names_.reserve(num_inputs);
43
+ for (size_t i = 0; i < num_inputs; i++) {
44
+ input_names_.push_back(model->input_name(i));
45
+ }
46
+
47
+ size_t num_outputs = model->num_outputs();
48
+ output_names_.reserve(num_outputs);
49
+ for (size_t i = 0; i < num_outputs; i++) {
50
+ output_names_.push_back(model->output_name(i));
51
+ }
52
+
53
+ model->load_constants(is_cpu);
54
+ #ifdef USE_CUDA
55
+ constant_blob_ = model->release_constant_blob();
56
+ #endif
57
+
58
+ for (auto& model : models_) {
59
+ model->update_constants_map(constants_);
60
+ }
61
+
62
+ in_spec_ = model->get_in_spec();
63
+ out_spec_ = model->get_out_spec();
64
+ }
65
+
66
+ void run(
67
+ AtenTensorHandle*
68
+ input_handles, // array of input AtenTensorHandle; handles
69
+ // are stolen; the array itself is borrowed
70
+ AtenTensorHandle*
71
+ output_handles, // array for writing output AtenTensorHandle; handles
72
+ // will be stolen by the caller; the array itself is
73
+ // borrowed
74
+ DeviceStreamType stream,
75
+ AOTIProxyExecutorHandle proxy_executor) {
76
+ auto* model = get_available_model();
77
+ try {
78
+ model->run(input_handles, output_handles, stream, proxy_executor);
79
+ } catch (...) {
80
+ std::lock_guard lk(models_mutex_);
81
+ available_models_.push_back(model);
82
+ throw;
83
+ }
84
+
85
+ {
86
+ std::lock_guard lk(models_mutex_);
87
+ pending_models_.push_back(model);
88
+ }
89
+ pending_models_available_.notify_one();
90
+ }
91
+
92
+ size_t num_inputs() const {
93
+ return input_names_.size();
94
+ }
95
+
96
+ size_t num_outputs() const {
97
+ return output_names_.size();
98
+ }
99
+
100
+ const char* input_name(size_t idx) const {
101
+ return input_names_.at(idx).c_str();
102
+ }
103
+
104
+ const char* output_name(size_t idx) const {
105
+ return output_names_.at(idx).c_str();
106
+ }
107
+
108
+ size_t num_models() const {
109
+ return models_.size();
110
+ }
111
+
112
+ const char* get_in_spec() const {
113
+ return in_spec_;
114
+ }
115
+
116
+ const char* get_out_spec() const {
117
+ return out_spec_;
118
+ }
119
+
120
+ private:
121
+ std::vector<std::string> input_names_;
122
+ std::vector<std::string> output_names_;
123
+ const char* in_spec_;
124
+ const char* out_spec_;
125
+
126
+ #ifdef USE_CUDA
127
+ // Holds the blob storage for constants' at::Tensor for CUDA.
128
+ CUDAPtr constant_blob_;
129
+ #endif // USE_CUDA
130
+
131
+ // Holds the mapping of constants to at::Tensor.
132
+ // The underlying data of at::Tensor is in either constant_blob_ (for CUDA).
133
+ // or _binary_constants_bin_start (for CPU).
134
+ std::shared_ptr<ConstantMap> constants_;
135
+
136
+ // Holds all the AOTInductorModel instances owned by this container.
137
+ std::vector<std::unique_ptr<AOTInductorModel>> models_;
138
+
139
+ // Holds the AOTInductorModel instances available for inference.
140
+ std::vector<AOTInductorModel*> available_models_;
141
+
142
+ // Holds the AOTInductorModel instances that have started running
143
+ // inference and can be placed onto available_models_ upon their
144
+ // completion.
145
+ std::deque<AOTInductorModel*> pending_models_;
146
+
147
+ // Protects available_models_ and pending_models_.
148
+ std::mutex models_mutex_;
149
+
150
+ // Notified whenever a model is placed onto pending_models_.
151
+ std::condition_variable pending_models_available_;
152
+
153
+ AOTInductorModel* get_available_model() {
154
+ std::unique_lock lk(models_mutex_);
155
+ if (available_models_.empty()) {
156
+ reclaim_finished_models(lk);
157
+ }
158
+ auto* result = available_models_.back();
159
+ available_models_.pop_back();
160
+ return result;
161
+ }
162
+
163
+ void reclaim_finished_models(std::unique_lock<std::mutex>& lk) {
164
+ // push finished model instances to the end of pending_models_
165
+ auto it = std::stable_partition(
166
+ pending_models_.begin(),
167
+ pending_models_.end(),
168
+ [](AOTInductorModel* m) { return !m->is_finished(); });
169
+
170
+ if (it != pending_models_.end()) {
171
+ // We have finished model instances that can be pushed into
172
+ // available_models_ so that we don't have to be blocked on waiting
173
+ // the pending_models_available_ condition.
174
+ available_models_.insert(
175
+ available_models_.end(), it, pending_models_.end());
176
+ pending_models_.erase(it, pending_models_.end());
177
+ return;
178
+ }
179
+
180
+ pending_models_available_.wait(
181
+ lk, [this]() { return !pending_models_.empty(); });
182
+ // Let's make the schedule simple first. We always wait on the first
183
+ // pending_models_ to be complete.
184
+ auto* model = pending_models_.front();
185
+ pending_models_.pop_front();
186
+ lk.unlock();
187
+ try {
188
+ model->wait_for_completion();
189
+ } catch (...) {
190
+ lk.lock();
191
+ available_models_.push_back(model);
192
+ throw;
193
+ }
194
+ lk.lock();
195
+ available_models_.push_back(model);
196
+ }
197
+ };
198
+
199
+ } // namespace aot_inductor
200
+ } // namespace torch
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/debug_util.h ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <string>
4
+ #include <vector>
5
+
6
+ #include <torch/csrc/lazy/core/tensor.h>
7
+
8
+ namespace torch {
9
+ namespace lazy {
10
+
11
+ TORCH_API std::function<std::vector<SourceLocation>()>&
12
+ GetPythonFramesFunction();
13
+
14
+ TORCH_API std::string GetFirstUserFrameInPython();
15
+
16
+ class TORCH_API DebugUtil {
17
+ public:
18
+ enum GraphFormat {
19
+ kText,
20
+ kDot,
21
+ kBackend,
22
+ };
23
+
24
+ static GraphFormat GetDefaultGraphFormat();
25
+
26
+ // Dumps the current Python frame and the IR Graph whose roots are the IR
27
+ // values held at the tensors. If indices is not nullptr, it selects the
28
+ // indices of the tensors whose graph will be emitted.
29
+ static std::string GetTensorsGraphInfo(
30
+ c10::ArrayRef<torch::lazy::LazyTensorPtr> tensors,
31
+ const std::vector<size_t>* indices,
32
+ GraphFormat format = GetDefaultGraphFormat());
33
+
34
+ // If the environment variable LTC_SAVE_TENSORS_FILE is set to the proper
35
+ // output path, an instance of the report returned by GetTensorsGraphInfo() is
36
+ // saved.
37
+ static void SaveTensorsGraphInfo(
38
+ const char* name,
39
+ c10::ArrayRef<torch::lazy::LazyTensorPtr> tensors,
40
+ const std::vector<size_t>* indices,
41
+ GraphFormat format = GetDefaultGraphFormat());
42
+
43
+ static bool ExperimentEnabled(const std::string& name);
44
+ };
45
+
46
+ } // namespace lazy
47
+ } // namespace torch
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/dynamic_ir.h ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/symbol.h>
4
+
5
+ #include <functional>
6
+ #include <memory>
7
+ #include <set>
8
+ #include <string>
9
+ #include <unordered_map>
10
+ #include <unordered_set>
11
+ #include <utility>
12
+ #include <vector>
13
+
14
+ #include <c10/core/ScalarType.h>
15
+ #include <c10/util/Flags.h>
16
+ #include <torch/csrc/lazy/core/hash.h>
17
+ #include <torch/csrc/lazy/core/ir.h>
18
+ #include <torch/csrc/lazy/core/ir_metadata.h>
19
+ #include <torch/csrc/lazy/ts_backend/ts_node.h>
20
+
21
+ namespace torch {
22
+ namespace lazy {
23
+
24
+ /**
25
+ * The goal of "dynamic" Nodes is to patch a hole in our tracing.
26
+ * Previously, if a user called `sizes` on a Tensor, it would leak out
27
+ * of our tracing system, as `sizes` returns a torch.Size or an int. To
28
+ * prevent this from happening, we introduce DimensionNode, a new type
29
+ * of Node that abstracts the operation of getting the dimensions of a
30
+ * Tensor.
31
+ *
32
+ * Consider the following example:
33
+ * ```
34
+ * numel = x.shape()[0] * x.shape()[1]
35
+ * ```
36
+ *
37
+ * Here, `x.shape()[i]` will be a SizeNode (subclass of DimensionNode),
38
+ * and the multiplication of the two SizeNodes will be represented by
39
+ * a SizeMul (also a subclass of DimensionNode). Through this, we can
40
+ * prevent `numel` from being represented as a Python int and thus
41
+ * burned into the Graph.
42
+ */
43
+
44
+ class TORCH_API DimensionNode {
45
+ public:
46
+ virtual bool isSymbolic() const {
47
+ return false;
48
+ };
49
+ virtual int64_t getDynamicValue() const {
50
+ TORCH_CHECK(false, "NYI");
51
+ };
52
+ virtual int64_t getStaticValue() const {
53
+ TORCH_CHECK(false, "NYI");
54
+ };
55
+ virtual ~DimensionNode() = default;
56
+ };
57
+
58
+ } // namespace lazy
59
+ } // namespace torch
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/hash.h ADDED
@@ -0,0 +1,238 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /**
2
+ * Hash utils in this file is adapted from PyTorch/XLA
3
+ * https://github.com/pytorch/xla/blob/e0e5f937a0ba8d904f9608137dc8c51ba439df2d/third_party/xla_client/util.h
4
+ */
5
+ #pragma once
6
+
7
+ #include <ATen/Tensor.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <c10/util/int128.h>
10
+ #include <torch/csrc/Export.h>
11
+ #include <cstring>
12
+ #include <set>
13
+ #include <string>
14
+ #include <vector>
15
+
16
+ namespace torch {
17
+ namespace lazy {
18
+
19
+ using size_t = std::size_t;
20
+
21
+ class TORCH_API hash_t : public c10::uint128 {
22
+ public:
23
+ // Swich from typedef hash_t = uint128 to provide explicit casters
24
+ hash_t(int8_t val) : uint128(static_cast<uint32_t>(val)) {}
25
+ hash_t(int16_t val) : uint128(static_cast<uint32_t>(val)) {}
26
+ hash_t(int32_t val) : uint128(static_cast<uint32_t>(val)) {}
27
+ hash_t(int64_t val) : uint128(static_cast<uint64_t>(val)) {}
28
+ hash_t(uint32_t val) : uint128(val) {}
29
+ hash_t(uint64_t val) : uint128(val) {}
30
+ hash_t(uint128 val) : uint128(val) {}
31
+ hash_t(uint64_t top, uint64_t bottom) : uint128(top, bottom) {}
32
+ hash_t() : uint128() {}
33
+ };
34
+
35
+ // Std* functions use 64-bit hash
36
+ size_t TORCH_API StdDataHash(const void* data, size_t size);
37
+
38
+ size_t TORCH_API StdHashCombine(uintmax_t a, uintmax_t b);
39
+
40
+ // Other functions are all 128-bit
41
+ hash_t TORCH_API HashBlock(const void* data, size_t n, const hash_t& seed);
42
+
43
+ hash_t TORCH_API DataHash(const void* data, size_t size);
44
+
45
+ hash_t TORCH_API HashCombine(const hash_t& a, const hash_t& b);
46
+
47
+ size_t TORCH_API HashReduce(const hash_t& a);
48
+
49
+ // Returns a string representation of a hash
50
+ std::string TORCH_API HashToString(const hash_t& a);
51
+
52
+ struct HashReducer {
53
+ size_t operator()(const hash_t& value) const {
54
+ return HashReduce(value);
55
+ }
56
+ };
57
+
58
+ static inline hash_t StringHash(const char* data) {
59
+ return DataHash(data, std::strlen(data));
60
+ }
61
+
62
+ // Automatic templated implementation for 'arithmetic' types
63
+ template <
64
+ typename T,
65
+ typename std::enable_if<std::is_arithmetic<T>::value>::type* = nullptr>
66
+ hash_t Hash(const T& value) {
67
+ return DataHash(&value, sizeof(value));
68
+ }
69
+
70
+ // added because on macos builds the vector<bool> specialization
71
+ // breaks falling through to the templated arithmetic types above
72
+ hash_t TORCH_API Hash(const std::vector<bool>& value);
73
+
74
+ // Specialiazed implementations for proprietary types
75
+ static inline hash_t Hash(const c10::ScalarType& value) {
76
+ return DataHash(&value, sizeof(value));
77
+ }
78
+
79
+ static inline hash_t Hash(const c10::MemoryFormat& value) {
80
+ return DataHash(&value, sizeof(value));
81
+ }
82
+
83
+ static inline hash_t Hash(const c10::DeviceType& value) {
84
+ return DataHash(&value, sizeof(value));
85
+ }
86
+
87
+ static inline hash_t Hash(const c10::Device& value) {
88
+ return HashCombine(Hash(value.type()), Hash(value.index()));
89
+ }
90
+
91
+ static inline hash_t Hash(const c10::Layout& value) {
92
+ return DataHash(&value, sizeof(value));
93
+ }
94
+
95
+ static inline hash_t Hash(const c10::Scalar& value) {
96
+ switch (value.type()) {
97
+ case c10::ScalarType::ComplexDouble:
98
+ return Hash(value.toComplexDouble());
99
+ case c10::ScalarType::Double:
100
+ return Hash(value.toDouble());
101
+ case c10::ScalarType::Long:
102
+ return Hash(value.toLong());
103
+ case c10::ScalarType::Bool:
104
+ return Hash(value.toBool());
105
+ default:
106
+ TORCH_INTERNAL_ASSERT(false, "Unknown scalar type.", value.type());
107
+ }
108
+ }
109
+
110
+ static inline hash_t TensorHash(const at::Tensor& tensor) {
111
+ at::Tensor ctensor = tensor.contiguous();
112
+ int64_t size = ctensor.numel() * ctensor.element_size();
113
+ switch (ctensor.scalar_type()) {
114
+ case at::ScalarType::Bool:
115
+ return DataHash(ctensor.const_data_ptr<bool>(), size);
116
+ case at::ScalarType::Byte:
117
+ return DataHash(ctensor.const_data_ptr<uint8_t>(), size);
118
+ case at::ScalarType::Char:
119
+ return DataHash(ctensor.const_data_ptr<int8_t>(), size);
120
+ case at::ScalarType::Short:
121
+ return DataHash(ctensor.const_data_ptr<int16_t>(), size);
122
+ case at::ScalarType::Int:
123
+ return DataHash(ctensor.const_data_ptr<int32_t>(), size);
124
+ case at::ScalarType::Long:
125
+ return DataHash(ctensor.const_data_ptr<int64_t>(), size);
126
+ case at::ScalarType::Float:
127
+ return DataHash(ctensor.const_data_ptr<float>(), size);
128
+ case at::ScalarType::Double:
129
+ return DataHash(ctensor.const_data_ptr<double>(), size);
130
+ case at::ScalarType::BFloat16:
131
+ return DataHash(ctensor.const_data_ptr<at::BFloat16>(), size);
132
+ case at::ScalarType::Half:
133
+ return DataHash(ctensor.const_data_ptr<at::Half>(), size);
134
+ case at::ScalarType::ComplexFloat:
135
+ return DataHash(ctensor.const_data_ptr<c10::complex<float>>(), size);
136
+ case at::ScalarType::ComplexDouble:
137
+ return DataHash(ctensor.const_data_ptr<c10::complex<double>>(), size);
138
+ default:
139
+ TORCH_INTERNAL_ASSERT(
140
+ false, "Unsupported scalar type:", ctensor.scalar_type());
141
+ }
142
+ }
143
+
144
+ static inline hash_t Hash(const std::string& value) {
145
+ return DataHash(value.data(), value.size());
146
+ }
147
+
148
+ static inline hash_t Hash(const c10::string_view& value) {
149
+ return DataHash(value.data(), value.size());
150
+ }
151
+
152
+ static inline hash_t Hash(const at::Generator& value) {
153
+ return TensorHash(value.get_state());
154
+ }
155
+
156
+ // Taken from glibc's implementation of hashing optionals,
157
+ // we want to include a contribution to the hash to distinguish
158
+ // cases where one or another option was null, but we hope it doesn't
159
+ // collide with an actually scalar value.
160
+ //
161
+ // Use an arbitrary randomly-selected 64-bit integer rather than a
162
+ // small constant that we then hash at runtime so we don't have to
163
+ // repeatedly hash a constant at runtime.
164
+ static const int64_t kNullOpt = 0x8655d738f3678dda;
165
+
166
+ // Hashing for c10::optional types contributes to hash
167
+ // for optionals with null value, important to distinguish
168
+ // between <nullopt, non-nullopt> and <non-nullopt, nullopt> cases
169
+ template <typename T>
170
+ hash_t Hash(const c10::optional<T>& value) {
171
+ if (value.has_value()) {
172
+ return Hash(value.value());
173
+ } else {
174
+ return kNullOpt;
175
+ }
176
+ }
177
+
178
+ // Hashing of containers
179
+ // Forward declare to allow hashes of vectors of vectors to work.
180
+ template <typename T>
181
+ hash_t ContainerHash(const T& values);
182
+
183
+ template <typename T>
184
+ hash_t Hash(const std::vector<T>& values) {
185
+ return ContainerHash(values);
186
+ }
187
+
188
+ // Need a special case for optional<container>?
189
+ template <typename T>
190
+ hash_t Hash(const c10::optional<std::vector<T>>& value) {
191
+ if (value.has_value()) {
192
+ return ContainerHash(value.value());
193
+ } else {
194
+ return kNullOpt;
195
+ }
196
+ }
197
+
198
+ template <typename T>
199
+ hash_t Hash(const std::set<T>& values) {
200
+ return ContainerHash(values);
201
+ }
202
+
203
+ template <typename T, typename S>
204
+ hash_t Hash(const std::pair<T, S>& values) {
205
+ return HashCombine(Hash(values.first), Hash(values.second));
206
+ }
207
+
208
+ static inline hash_t Hash(const hash_t& value) {
209
+ return value;
210
+ }
211
+
212
+ template <typename T>
213
+ hash_t Hash(c10::ArrayRef<T> values) {
214
+ return ContainerHash(values);
215
+ }
216
+
217
+ template <typename T>
218
+ hash_t ContainerHash(const T& values) {
219
+ hash_t h(static_cast<uint64_t>(0x85ebca77c2b2ae63));
220
+ for (const auto& value : values) {
221
+ h = HashCombine(h, Hash(value));
222
+ }
223
+ return h;
224
+ }
225
+
226
+ // Varargs hashing
227
+ template <typename T = void>
228
+ hash_t MHash() {
229
+ return hash_t(static_cast<uint64_t>(0x165667b19e3779f9));
230
+ }
231
+
232
+ template <typename T, typename... Targs>
233
+ hash_t MHash(T value, Targs... Fargs) {
234
+ return HashCombine(Hash(value), MHash(Fargs...));
235
+ }
236
+
237
+ } // namespace lazy
238
+ } // namespace torch
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/helpers.h ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/Scalar.h>
4
+ #include <c10/util/BFloat16.h>
5
+ #include <c10/util/Half.h>
6
+ #include <c10/util/Optional.h>
7
+ #include <torch/csrc/lazy/core/permutation_util.h>
8
+ #include <torch/csrc/lazy/core/shape.h>
9
+ #include <torch/csrc/lazy/core/util.h>
10
+
11
+ #include <complex>
12
+ #include <functional>
13
+ #include <tuple>
14
+ #include <vector>
15
+
16
+ // TODO: Consolidate this file with util.h
17
+
18
+ namespace torch {
19
+ namespace lazy {
20
+
21
+ // Converts an iterable container to a vector of int64's.
22
+ template <typename S>
23
+ static std::vector<int64_t> ToI64Vector(const S& input) {
24
+ return ToVector<int64_t>(input);
25
+ }
26
+
27
+ // Creates a set of dimension by dropping the drop_dims ones.
28
+ TORCH_API std::vector<int64_t> DropDimensions(
29
+ c10::ArrayRef<int64_t> sizes,
30
+ c10::ArrayRef<int64_t> drop_dims);
31
+
32
+ // Get the canonical dimension index in the [0, rank) interval. Negative
33
+ // indices are interpreted as follows: -1 is rank-1, -2 is rank-2 etc.
34
+ TORCH_API int64_t GetCanonicalDimensionIndex(int64_t dim, int64_t rank);
35
+
36
+ // Same as above, for multiple dimensions.
37
+ TORCH_API std::vector<int64_t> GetCanonicalDimensionIndices(
38
+ c10::ArrayRef<int64_t> dimensions,
39
+ int64_t rank);
40
+
41
+ // Returns the canonical position in the dim dimension, handling negative
42
+ // values for the position.
43
+ TORCH_API int64_t GetCanonicalPosition(
44
+ c10::ArrayRef<int64_t> dimensions,
45
+ int64_t dim,
46
+ int64_t pos);
47
+
48
+ // Creates a transposition from the given input and dimensions.
49
+ TORCH_API std::vector<int64_t> MakeTransposePermutation(
50
+ int64_t dim0,
51
+ int64_t dim1,
52
+ int64_t rank);
53
+
54
+ // Calculates the protomoted shape to which the input shapes should be
55
+ // broadcasted for an elementwise operation. The size of the common dimensions
56
+ // (2,3,4 for shape1, and 0,1,2 for shape2) must either match, or either one
57
+ // of the two be 1.
58
+ // Example:
59
+ // shape1 = [9, 7, 6, 1, 2]
60
+ // shape2 = [6, 5, 2]
61
+ // result_shape = [9, 7, 6, 5, 2]
62
+ TORCH_API std::vector<int64_t> GetPromotedShape(
63
+ c10::ArrayRef<int64_t> shape1_dims,
64
+ c10::ArrayRef<int64_t> shape2_dims);
65
+
66
+ TORCH_API Shape
67
+ GetPromotedBinaryOpShape(const Shape& shape1, const Shape& shape2);
68
+
69
+ TORCH_API std::vector<std::string> StrSplit(c10::string_view text, char delim);
70
+
71
+ } // namespace lazy
72
+ } // namespace torch
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/internal_ops/ltc_ops.h ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/lazy/core/ir.h>
4
+
5
+ #include <c10/util/CallOnce.h>
6
+
7
+ #include <mutex>
8
+ #include <string>
9
+
10
+ namespace torch {
11
+ namespace lazy {
12
+
13
+ class TORCH_API OpKindWrapper {
14
+ public:
15
+ explicit OpKindWrapper(const char* name) : name_(name) {}
16
+
17
+ const OpKind& operator*() const {
18
+ return get();
19
+ }
20
+
21
+ operator OpKind() const {
22
+ return get();
23
+ }
24
+
25
+ private:
26
+ const OpKind& get() const {
27
+ c10::call_once(once_, [this]() { op_kind_ = OpKind::Get(name_); });
28
+ return op_kind_;
29
+ }
30
+
31
+ const char* name_;
32
+ mutable OpKind op_kind_;
33
+ mutable c10::once_flag once_;
34
+ };
35
+
36
+ const OpKindWrapper ltc_all_to_all("lazy_tensors::all_to_all");
37
+ const OpKindWrapper ltc_cast("lazy_tensors::cast");
38
+ const OpKindWrapper ltc_collective_permute("lazy_tensors::collective_permute");
39
+ const OpKindWrapper ltc_cross_replica_sum("lazy_tensors::cross_replica_sum");
40
+ const OpKindWrapper ltc_device_data("lazy_tensors::device_data");
41
+ const OpKindWrapper ltc_get_dimensions_size(
42
+ "lazy_tensors::ltc_get_dimensions_size");
43
+ const OpKindWrapper ltc_moving_average("lazy_tensors::moving_average");
44
+ const OpKindWrapper ltc_nms("lazy_tensors::nms");
45
+ const OpKindWrapper ltc_not_supported("lazy_tensors::not_supported");
46
+ const OpKindWrapper ltc_replication_pad("lazy_tensors::replication_pad");
47
+ const OpKindWrapper ltc_replication_pad_backward(
48
+ "lazy_tensors::replication_pad_backward");
49
+ const OpKindWrapper ltc_tensor_data("lazy_tensors::tensor_data");
50
+
51
+ } // namespace lazy
52
+ } // namespace torch
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/ir.h ADDED
@@ -0,0 +1,298 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/symbol.h>
4
+
5
+ #include <functional>
6
+ #include <memory>
7
+ #include <set>
8
+ #include <string>
9
+ #include <unordered_map>
10
+ #include <unordered_set>
11
+ #include <utility>
12
+ #include <vector>
13
+
14
+ #include <c10/core/ScalarType.h>
15
+ #include <c10/util/ArrayRef.h>
16
+ #include <c10/util/Flags.h>
17
+ #include <torch/csrc/lazy/core/hash.h>
18
+ #include <torch/csrc/lazy/core/ir_metadata.h>
19
+ #include <torch/csrc/lazy/core/shape.h>
20
+
21
+ C10_DECLARE_bool(ltc_enable_dynamic_shapes);
22
+
23
+ namespace torch {
24
+ namespace lazy {
25
+
26
+ static const hash_t kHashSeed(static_cast<uint32_t>(0x5a2d296e9));
27
+
28
+ class Node;
29
+ struct Output;
30
+ struct Value;
31
+
32
+ using NodePtr = std::shared_ptr<Node>;
33
+
34
+ // The Kind of operation a Node can be associated to.
35
+ struct TORCH_API OpKind {
36
+ OpKind() = default;
37
+ explicit OpKind(c10::Symbol op) : op(op) {}
38
+
39
+ bool operator==(const OpKind& rhs) const {
40
+ return op == rhs.op;
41
+ }
42
+ bool operator!=(const OpKind& rhs) const {
43
+ return !operator==(rhs);
44
+ }
45
+ bool operator<(const OpKind& rhs) const {
46
+ return c10::unique_t(op) < c10::unique_t(rhs.op);
47
+ }
48
+
49
+ hash_t hash() const;
50
+
51
+ std::string ToString() const {
52
+ return op.toQualString();
53
+ }
54
+
55
+ // Retrieves an existing operation object, or creates a new one. Operations
56
+ // that are specific to lazy tensors, should live within the 'lazy_tensors::'
57
+ // namespace.
58
+ static OpKind Get(const std::string& name);
59
+
60
+ c10::Symbol op;
61
+ };
62
+
63
+ inline std::ostream& operator<<(std::ostream& stream, const OpKind& op) {
64
+ stream << op.ToString();
65
+ return stream;
66
+ }
67
+
68
+ using OpList = c10::ArrayRef<Value>;
69
+
70
+ hash_t OperandHashes(
71
+ const OpList& operands,
72
+ const hash_t& seed,
73
+ bool bakeInSizes);
74
+ // A node in the graph. Nodes for operations which require extra data to be
75
+ // stored for lowering should inherit from this class and add an operation
76
+ // specific member there. For example, a constant might create a new
77
+ // NodeConstant class (inheriting from Node) with an extra lazy_tensors::Literal
78
+ // field, or a tensor value might create a new NodeTensor with a computation
79
+ // client data handle in it.
80
+ class TORCH_API Node {
81
+ public:
82
+ static bool enableDynamicShape();
83
+
84
+ // Creates a new node with the given op name. The op is a unique identifier
85
+ // for the operation. The num_outputs tells how many outputs a given operation
86
+ // generates.
87
+ //
88
+ // None leaf node's node_hash does not contains shape information always.
89
+ // So we pass in the hash value rather than a function.
90
+ Node(OpKind op, size_t num_outputs);
91
+
92
+ // Construct node with operands and shapes
93
+ Node(
94
+ OpKind op,
95
+ OpList operands,
96
+ std::vector<Shape>&& shapes,
97
+ size_t num_outputs = 1);
98
+
99
+ // Construct node with operands and shape generated from a function
100
+ Node(
101
+ OpKind op,
102
+ OpList operands,
103
+ const std::function<Shape()>& shape_fn,
104
+ size_t num_outputs = 1);
105
+
106
+ // Construct node with operands and no shape
107
+ Node(OpKind op, OpList operands, size_t num_outputs = 1);
108
+
109
+ // Construct node with shape and no operands
110
+ Node(OpKind op, Shape shape, size_t num_outputs = 1);
111
+
112
+ virtual ~Node();
113
+
114
+ const OpKind& op() const {
115
+ return op_;
116
+ }
117
+
118
+ size_t num_outputs() const {
119
+ return num_outputs_;
120
+ }
121
+
122
+ // Retrieves the full shape of the IR Node.
123
+ virtual c10::ArrayRef<Shape> shapes() const;
124
+
125
+ virtual const Shape& shape(size_t output_index = 0) const;
126
+
127
+ // Add the shape computed by the shape_fn
128
+ void addComputedShape(const std::function<Shape()>& shape_fn);
129
+
130
+ // Compute the shape using the provided shape_fn if not previously cached
131
+ Shape computeShape(const std::function<Shape()>& shape_fn);
132
+
133
+ virtual const std::vector<Output>& operands() const;
134
+
135
+ virtual const Output& operand(size_t i) const;
136
+
137
+ // Gets operand at index i if index is valid, or kNullOutput otherwise.
138
+ virtual const Output& nullable_operand(size_t i) const;
139
+
140
+ // Returns the hash of the dag used to look up the compiled graph
141
+ virtual hash_t hash() const = 0;
142
+
143
+ // Returns the hash of the dag used to for shape caching
144
+ virtual hash_t shapeHash() const = 0;
145
+
146
+ const MetaData& metadata() const {
147
+ return metadata_;
148
+ }
149
+
150
+ UserMetaData* user_metadata() const {
151
+ return user_metadata_.get();
152
+ }
153
+
154
+ std::shared_ptr<UserMetaData> SetUserMetadata(
155
+ std::shared_ptr<UserMetaData> user_meta) {
156
+ std::swap(user_metadata_, user_meta);
157
+ return user_meta;
158
+ }
159
+
160
+ virtual std::string ToString() const;
161
+
162
+ private:
163
+ // The ID of the operation captured by this node.
164
+ OpKind op_;
165
+ size_t num_outputs_ = 1;
166
+
167
+ // The IR specific metadata attached to the IR node.
168
+ MetaData metadata_;
169
+ // The IR framework user can attach a user defined metadata object deriving
170
+ // from UserMetaData.
171
+ std::shared_ptr<UserMetaData> user_metadata_;
172
+
173
+ protected:
174
+ // Adds node's index output number as operand.
175
+ void AddOperand(NodePtr node, size_t index = 0);
176
+
177
+ std::vector<Shape> shapes_;
178
+ // A node holds a real reference to its operands.
179
+ std::vector<NodePtr> operands_;
180
+ // Outputs do not hold references on the nodes, and neither do the uses, since
181
+ // otherwise we get into circular reference counting.
182
+ std::vector<Output> operands_as_outputs_;
183
+ };
184
+
185
+ inline std::ostream& operator<<(std::ostream& stream, const Node& node) {
186
+ stream << node.ToString();
187
+ return stream;
188
+ }
189
+
190
+ // Note: Keep this version of NodeCast for smooth PyTorch/XLA migration, and
191
+ // clean up once the migration is done.
192
+ template <typename T>
193
+ const T* NodeCast(const Node* node, OpKind op) {
194
+ if (op != node->op()) {
195
+ return nullptr;
196
+ }
197
+ #ifdef NDEBUG
198
+ return static_cast<const T*>(node);
199
+ #else
200
+ return &dynamic_cast<const T&>(*node);
201
+ #endif
202
+ }
203
+
204
+ template <typename T>
205
+ const T* NodeCast(const Node* node) {
206
+ if (T::ClassOpKind() != node->op()) {
207
+ return nullptr;
208
+ }
209
+ // TODO: Some IR classes share the same opkind, such as Mean and MeanDim, so
210
+ // static_cast is not safe here. Unless we have opkind unique for each class,
211
+ // we have to use dynamic_cast here.
212
+ return dynamic_cast<const T*>(node);
213
+ }
214
+
215
+ // Represents a specific output produced by a node. Since the output of a node
216
+ // can be composed by multiple outputs, the node+index coordinates fully qualify
217
+ // each single output.
218
+ struct TORCH_API Output {
219
+ struct Hasher {
220
+ size_t operator()(const Output& output) const;
221
+ };
222
+
223
+ Output() = default;
224
+ explicit Output(const Node* node, size_t index = 0)
225
+ : node(node), index(index) {}
226
+
227
+ hash_t hash() const;
228
+ hash_t shapeHash() const;
229
+
230
+ bool operator==(const Output& rhs) const {
231
+ return node == rhs.node && index == rhs.index;
232
+ }
233
+
234
+ // To compare the operands of to-be-constructed node and to-be-reused node
235
+ bool operator==(const Value& rhs) const;
236
+
237
+ bool operator!=(const Output& rhs) const {
238
+ return !operator==(rhs);
239
+ }
240
+
241
+ const Shape& shape() const {
242
+ return node->shape(index);
243
+ }
244
+
245
+ std::string ToString() const;
246
+
247
+ // The node providing the output.
248
+ const Node* node{nullptr};
249
+ // The index in the node's output this output refers to.
250
+ size_t index{0};
251
+ };
252
+
253
+ inline std::ostream& operator<<(std::ostream& stream, const Output& output) {
254
+ stream << output.ToString();
255
+ return stream;
256
+ }
257
+
258
+ template <typename T>
259
+ using OutputMap = std::unordered_map<Output, T, Output::Hasher>;
260
+
261
+ // Represents an input/operand for a Node object.
262
+ struct TORCH_API Value {
263
+ Value() = default;
264
+ /* implicit */ Value(NodePtr&& node, size_t index = 0)
265
+ : node(std::move(node)), index(index) {}
266
+ /* implicit */ Value(const NodePtr& node, size_t index = 0)
267
+ : node(node), index(index) {}
268
+
269
+ hash_t hash() const;
270
+ hash_t shapeHash() const;
271
+
272
+ operator bool() const {
273
+ return node != nullptr;
274
+ }
275
+
276
+ operator Output() const {
277
+ return Output(node.get(), index);
278
+ }
279
+
280
+ const Shape& shape() const {
281
+ return node->shape(index);
282
+ }
283
+
284
+ Node* operator->() const {
285
+ return node.get();
286
+ }
287
+
288
+ NodePtr node;
289
+ size_t index = 0;
290
+ };
291
+
292
+ } // namespace lazy
293
+ } // namespace torch
294
+
295
+ namespace c10 {
296
+ // Explicit template instantiation to make ArrayRef<Value> work
297
+ template class at::ArrayRef<torch::lazy::Value>;
298
+ } // namespace c10
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/ir_builder.h ADDED
@@ -0,0 +1,150 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/ScalarType.h>
4
+ #include <c10/util/Optional.h>
5
+ #include <torch/csrc/lazy/backend/backend_interface.h>
6
+ #include <torch/csrc/lazy/core/config.h>
7
+ #include <torch/csrc/lazy/core/ir.h>
8
+ #include <torch/csrc/lazy/core/tensor.h>
9
+ #include <torch/csrc/lazy/core/trie.h>
10
+ #include <vector>
11
+
12
+ // This file is part of the backend interface. So, ops shouldn't be added or
13
+ // removed without due process The exception to this being the view ops which
14
+ // will be removed soon pending functionalization
15
+
16
+ namespace torch {
17
+ namespace lazy {
18
+
19
+ template <typename T, typename... Args>
20
+ NodePtr ReuseNode(Args&&... args) {
21
+ if (FLAGS_torch_lazy_reuse_ir) {
22
+ return LookupNodeFromTrieCache<T>(std::forward<Args>(args)...);
23
+ }
24
+ return nullptr;
25
+ }
26
+
27
+ // Caching an IR node into TrieCache
28
+ static inline void CacheNode(NodePtr node) {
29
+ if (FLAGS_torch_lazy_reuse_ir) {
30
+ TrieCache::Get()->Insert(std::move(node));
31
+ }
32
+ }
33
+
34
+ template <typename T, typename... Args>
35
+ NodePtr MakeNode(Args&&... args) {
36
+ return std::make_shared<T>(std::forward<Args>(args)...);
37
+ }
38
+
39
+ // op is passed in for a more efficient node casting, see the implementation of
40
+ // NodeCast
41
+ template <typename T, typename... Args>
42
+ NodePtr ReuseOrMakeNode(Args&&... args) {
43
+ NodePtr node = ReuseNode<T>(std::forward<Args>(args)...);
44
+ if (!node) {
45
+ node = MakeNode<T>(std::forward<Args>(args)...);
46
+ CacheNode(node);
47
+ }
48
+ return node;
49
+ }
50
+
51
+ struct IrBuilder {
52
+ virtual NodePtr MakeDeviceData(
53
+ const std::shared_ptr<BackendData>& data) const = 0;
54
+ virtual NodePtr MakeScalar(
55
+ const at::Scalar& value,
56
+ const at::ScalarType& type) const = 0;
57
+ virtual NodePtr MakeExpand(
58
+ const Value& input0,
59
+ const std::vector<int64_t>& size,
60
+ const bool& is_scalar_expand) const = 0;
61
+ virtual NodePtr MakeCast(
62
+ const Value& input0,
63
+ const at::ScalarType& dtype,
64
+ const c10::optional<at::ScalarType>& stype = c10::nullopt) const = 0;
65
+ virtual NodePtr MakeTensorList(const OpList& inputs) const = 0;
66
+ virtual NodePtr MakeGeneric(
67
+ const OpKind& op,
68
+ const OpList& operands,
69
+ const Shape& shape,
70
+ const size_t& num_outputs = 1,
71
+ const hash_t& hash_seed = static_cast<uint32_t>(0x5a2d296e9)) const = 0;
72
+
73
+ // dynamic ir nodes
74
+ virtual NodePtr MakeSizeNode(const Value& input, size_t dim) const = 0;
75
+ virtual NodePtr MakeSizeAdd(const Value& a, const Value& b) const = 0;
76
+ virtual NodePtr MakeSizeMul(const Value& a, const Value& b) const = 0;
77
+ virtual NodePtr MakeSizeDiv(const Value& a, const Value& b) const = 0;
78
+
79
+ virtual ~IrBuilder() = default;
80
+ };
81
+
82
+ static inline NodePtr MakeDeviceData(const std::shared_ptr<BackendData>& data) {
83
+ return getIrBuilder()->MakeDeviceData(data);
84
+ }
85
+ static inline NodePtr MakeScalar(
86
+ const at::Scalar& value,
87
+ const at::ScalarType& type) {
88
+ return getIrBuilder()->MakeScalar(value, type);
89
+ }
90
+ static inline NodePtr MakeExpand(
91
+ const Value& input0,
92
+ const std::vector<int64_t>& size,
93
+ const bool& is_scalar_expand) {
94
+ return getIrBuilder()->MakeExpand(input0, size, is_scalar_expand);
95
+ }
96
+ static inline NodePtr MakeCast(
97
+ const Value& input0,
98
+ const at::ScalarType& dtype,
99
+ const c10::optional<at::ScalarType>& stype = c10::nullopt) {
100
+ return getIrBuilder()->MakeCast(input0, dtype, stype);
101
+ }
102
+ static inline NodePtr MakeTensorList(const OpList& inputs) {
103
+ return getIrBuilder()->MakeTensorList(inputs);
104
+ }
105
+ static inline NodePtr MakeGeneric(
106
+ const OpKind& op,
107
+ const OpList& operands,
108
+ const Shape& shape,
109
+ const size_t& num_outputs = 1,
110
+ const hash_t& hash_seed = static_cast<uint32_t>(0x5a2d296e9)) {
111
+ return getIrBuilder()->MakeGeneric(
112
+ op, operands, shape, num_outputs, hash_seed);
113
+ }
114
+
115
+ // dynamic ir nodes
116
+ static inline NodePtr MakeSizeNode(const Value& input, size_t dim) {
117
+ return getIrBuilder()->MakeSizeNode(input, dim);
118
+ }
119
+ static inline NodePtr MakeSizeAdd(const Value& a, const Value& b) {
120
+ return getIrBuilder()->MakeSizeAdd(a, b);
121
+ }
122
+ static inline NodePtr MakeSizeMul(const Value& a, const Value& b) {
123
+ return getIrBuilder()->MakeSizeAdd(a, b);
124
+ }
125
+ static inline NodePtr MakeSizeDiv(const Value& a, const Value& b) {
126
+ return getIrBuilder()->MakeSizeDiv(a, b);
127
+ }
128
+
129
+ inline Value GetSymIntValue(c10::SymInt a) {
130
+ if (auto ma = a.maybe_as_int()) {
131
+ return Value(MakeScalar(*ma, at::kLong), 0);
132
+ } else {
133
+ return Value(
134
+ dynamic_cast<torch::lazy::SymNodeImpl*>(a.toSymNodeImplUnowned())
135
+ ->node_,
136
+ 0);
137
+ }
138
+ }
139
+
140
+ // TODO: this should return Value
141
+ inline std::vector<int64_t> GetSymIntArrayRefValue(c10::SymIntArrayRef arr) {
142
+ std::vector<int64_t> r;
143
+ for (const auto& a : arr) {
144
+ r.emplace_back(a.guard_int(__FILE__, __LINE__));
145
+ }
146
+ return r;
147
+ }
148
+
149
+ } // namespace lazy
150
+ } // namespace torch
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/ir_util.h ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <unordered_map>
4
+ #include <vector>
5
+
6
+ #include <torch/csrc/lazy/core/ir.h>
7
+
8
+ namespace torch {
9
+ namespace lazy {
10
+
11
+ class TORCH_API Util {
12
+ public:
13
+ // Tracks the emission status of the nodes during the post-order generation.
14
+ // It helps tracking loops within the computation graphs.
15
+ enum EmitStatus {
16
+ kNotEmitted,
17
+ kEmitting,
18
+ kEmitted,
19
+ };
20
+
21
+ using EmissionMap = std::unordered_map<const Node*, EmitStatus>;
22
+
23
+ // Computes the post order from the given node, without using recursion. The
24
+ // emission map can be used as saved state, for multiple separate calls to
25
+ // this API. The returned post-order can be empty if the node has already been
26
+ // emitted inside the emission map. An error is generated if a loop is
27
+ // detected.
28
+ static std::vector<const Node*> ComputePostOrder(
29
+ const Node* node,
30
+ EmissionMap* emap);
31
+
32
+ static std::vector<const Node*> ComputePostOrder(
33
+ c10::ArrayRef<const Node*> nodes,
34
+ EmissionMap* emap);
35
+
36
+ // Same as above, but computes the post order on the set of nodes specified as
37
+ // argument.
38
+ static std::vector<const Node*> ComputePostOrder(
39
+ c10::ArrayRef<const Node*> nodes);
40
+
41
+ // Retrieves the number of nodes within the graph whose sink are passed in the
42
+ // nodes argument.
43
+ static size_t GetGraphSize(c10::ArrayRef<const Node*> nodes);
44
+ };
45
+
46
+ } // namespace lazy
47
+ } // namespace torch
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/metrics.h ADDED
@@ -0,0 +1,286 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /**
2
+ * This file is adapted from PyTorch/XLA
3
+ * https://github.com/pytorch/xla/blob/master/third_party/xla_client/metrics.h
4
+ */
5
+
6
+ #pragma once
7
+
8
+ #include <atomic>
9
+ #include <functional>
10
+ #include <map>
11
+ #include <memory>
12
+ #include <mutex>
13
+ #include <string>
14
+ #include <vector>
15
+
16
+ #include <c10/macros/Export.h>
17
+
18
+ namespace torch {
19
+ namespace lazy {
20
+
21
+ struct TORCH_API Sample {
22
+ Sample() = default;
23
+ Sample(int64_t timestamp_ns, double value)
24
+ : timestamp_ns(timestamp_ns), value(value) {}
25
+
26
+ int64_t timestamp_ns = 0;
27
+ double value = 0;
28
+ };
29
+
30
+ using MetricReprFn = std::function<std::string(double)>;
31
+
32
+ // Class used to collect time-stamped numeric samples. The samples are stored in
33
+ // a circular buffer whose size can be configured at constructor time.
34
+ class TORCH_API MetricData {
35
+ public:
36
+ // Creates a new MetricData object with the internal circular buffer storing
37
+ // max_samples samples. The repr_fn argument allow to specify a function which
38
+ // pretty-prints a sample value.
39
+ MetricData(MetricReprFn repr_fn, size_t max_samples);
40
+
41
+ // Returns the total values of all the samples being posted to this metric.
42
+ double Accumulator() const;
43
+
44
+ size_t TotalSamples() const;
45
+
46
+ void AddSample(int64_t timestamp_ns, double value);
47
+
48
+ // Returns a vector with all the current samples, from the oldest to the
49
+ // newer. If accumulator is not nullptr, it will receive the current value of
50
+ // the metrics' accumulator (the sum of all posted values). If total_samples
51
+ // is not nullptr, it will receive the count of the posted values.
52
+ std::vector<Sample> Samples(double* accumulator, size_t* total_samples) const;
53
+
54
+ std::string Repr(double value) const {
55
+ return repr_fn_(value);
56
+ }
57
+
58
+ void Reset();
59
+
60
+ bool IsValid() const {
61
+ return TotalSamples() > 0;
62
+ }
63
+
64
+ private:
65
+ mutable std::mutex lock_;
66
+ MetricReprFn repr_fn_;
67
+ size_t count_ = 0;
68
+ std::vector<Sample> samples_;
69
+ double accumulator_ = 0.0;
70
+ };
71
+
72
+ // Counters are a very lightweight form of metrics which do not need to track
73
+ // sample time.
74
+ class TORCH_API CounterData {
75
+ public:
76
+ CounterData() : value_(0) {}
77
+
78
+ void AddValue(int64_t value) {
79
+ value_ += value;
80
+ }
81
+
82
+ int64_t Value() const {
83
+ return value_;
84
+ }
85
+
86
+ void Reset() {
87
+ value_ = 0;
88
+ }
89
+
90
+ bool IsValid() const {
91
+ return value_ > 0;
92
+ }
93
+
94
+ private:
95
+ std::atomic<int64_t> value_;
96
+ };
97
+
98
+ class TORCH_API MetricsArena {
99
+ public:
100
+ static MetricsArena* Get();
101
+
102
+ void ResetCounters();
103
+ void ResetMetrics();
104
+
105
+ // Registers a new metric in the global arena.
106
+ void RegisterMetric(
107
+ const std::string& name,
108
+ MetricReprFn repr_fn,
109
+ size_t max_samples,
110
+ std::shared_ptr<MetricData>* data);
111
+
112
+ void RegisterCounter(
113
+ const std::string& name,
114
+ std::shared_ptr<CounterData>* data);
115
+
116
+ void ForEachMetric(
117
+ const std::function<void(const std::string&, MetricData*)>& metric_func);
118
+
119
+ void ForEachCounter(
120
+ const std::function<void(const std::string&, CounterData*)>&
121
+ counter_func);
122
+
123
+ std::vector<std::string> GetMetricNames();
124
+
125
+ MetricData* GetMetric(const std::string& name);
126
+
127
+ std::vector<std::string> GetCounterNames();
128
+
129
+ CounterData* GetCounter(const std::string& name);
130
+
131
+ private:
132
+ std::mutex lock_;
133
+ std::map<std::string, std::shared_ptr<MetricData>> metrics_;
134
+ std::map<std::string, std::shared_ptr<CounterData>> counters_;
135
+ };
136
+
137
+ // Emits the value in a to_string() conversion.
138
+ TORCH_API std::string MetricFnValue(double value);
139
+ // Emits the value in a humanized bytes representation.
140
+ TORCH_API std::string MetricFnBytes(double value);
141
+ // Emits the value in a humanized time representation. The value is expressed in
142
+ // nanoseconds EPOCH time.
143
+ TORCH_API std::string MetricFnTime(double value);
144
+
145
+ // The typical use of a Metric is one in which it gets created either in a
146
+ // global scope context:
147
+ // static Metric* metric = new Metric("RpcCount");
148
+ // Or within a function scope:
149
+ // void MyFunction(...) {
150
+ // static Metric* metric = new Metric("RpcCount");
151
+ // ...
152
+ // metric->AddSample(ts_nanos, some_value);
153
+ // }
154
+ class TORCH_API Metric {
155
+ public:
156
+ explicit Metric(
157
+ std::string name,
158
+ MetricReprFn repr_fn = MetricFnValue,
159
+ size_t max_samples = 0);
160
+
161
+ const std::string& Name() const {
162
+ return name_;
163
+ }
164
+
165
+ double Accumulator() const;
166
+
167
+ void AddSample(int64_t timestamp_ns, double value);
168
+
169
+ void AddSample(double value);
170
+
171
+ std::vector<Sample> Samples(double* accumulator, size_t* total_samples) const;
172
+
173
+ std::string Repr(double value) const;
174
+
175
+ private:
176
+ MetricData* GetData() const;
177
+
178
+ std::string name_;
179
+ MetricReprFn repr_fn_;
180
+ size_t max_samples_;
181
+ mutable std::shared_ptr<MetricData> data_ptr_;
182
+ mutable std::atomic<MetricData*> data_;
183
+ };
184
+
185
+ // A Counter is a lightweight form of metric which tracks an integer value which
186
+ // can increase or decrease.
187
+ // A typical use is as:
188
+ // static Counter* counter = new Counter("MyCounter");
189
+ // ...
190
+ // counter->AddValue(+1);
191
+ class TORCH_API Counter {
192
+ public:
193
+ explicit Counter(std::string name);
194
+
195
+ void AddValue(int64_t value) {
196
+ GetData()->AddValue(value);
197
+ }
198
+
199
+ int64_t Value() const {
200
+ return GetData()->Value();
201
+ }
202
+
203
+ private:
204
+ CounterData* GetData() const;
205
+
206
+ std::string name_;
207
+ mutable std::shared_ptr<CounterData> data_ptr_;
208
+ mutable std::atomic<CounterData*> data_;
209
+ };
210
+
211
+ #define TORCH_LAZY_COUNTER(name, value) \
212
+ do { \
213
+ static ::torch::lazy::Counter* __counter = \
214
+ new ::torch::lazy::Counter(name); \
215
+ __counter->AddValue(value); \
216
+ } while (0)
217
+
218
+ #define TORCH_LAZY_FN_COUNTER(ns) TORCH_LAZY_COUNTER(c10::str(ns, __func__), 1)
219
+
220
+ #define TORCH_LAZY_VALUE_METRIC(name, value) \
221
+ do { \
222
+ static ::torch::lazy::Metric* __metric = \
223
+ new ::torch::lazy::Metric(name, torch::lazy::MetricFnValue); \
224
+ __metric->AddSample(value); \
225
+ } while (0)
226
+
227
+ // Creates a report with the current metrics statistics.
228
+ TORCH_API std::string CreateMetricReport();
229
+
230
+ // Creates a report with the selected metrics statistics.
231
+ TORCH_API std::string CreateMetricReport(
232
+ const std::vector<std::string>& counter_names,
233
+ const std::vector<std::string>& metric_names);
234
+
235
+ // Returns the currently registered metric names. Note that the list can grow
236
+ // since metrics are usually function intialized (they are static function
237
+ // variables).
238
+ TORCH_API std::vector<std::string> GetMetricNames();
239
+
240
+ // Retrieves the metric data of a given metric, or nullptr if such metric does
241
+ // not exist.
242
+ TORCH_API MetricData* GetMetric(const std::string& name);
243
+
244
+ // Returns the currently registered counter names. Note that the list can grow
245
+ // since counters are usually function intialized (they are static function
246
+ // variables).
247
+ TORCH_API std::vector<std::string> GetCounterNames();
248
+
249
+ // Retrieves the counter data of a given counter, or nullptr if such counter
250
+ // does not exist.
251
+ TORCH_API CounterData* GetCounter(const std::string& name);
252
+
253
+ // Retrieves the current EPOCH time in nanoseconds.
254
+ TORCH_API int64_t NowNs();
255
+
256
+ // Scope based utility class TORCH_API to measure the time the code takes within
257
+ // a given C++ scope.
258
+ class TORCH_API TimedSection {
259
+ public:
260
+ explicit TimedSection(Metric* metric) : metric_(metric), start_(NowNs()) {}
261
+
262
+ ~TimedSection() {
263
+ int64_t now = NowNs();
264
+ metric_->AddSample(now, now - start_);
265
+ }
266
+
267
+ double Elapsed() const {
268
+ return 1e-9 * static_cast<double>(NowNs() - start_);
269
+ }
270
+
271
+ private:
272
+ Metric* metric_;
273
+ int64_t start_;
274
+ };
275
+
276
+ #define TORCH_LAZY_TIMED(name) \
277
+ static torch::lazy::Metric* timed_metric = \
278
+ new torch::lazy::Metric(name, torch::lazy::MetricFnTime); \
279
+ torch::lazy::TimedSection timed_section(timed_metric)
280
+
281
+ #define TORCH_LAZY_FN_COUNTER_TIMED_TRACING(ns) \
282
+ TORCH_LAZY_FN_COUNTER(ns); \
283
+ TORCH_LAZY_TIMED("LazyTracing")
284
+
285
+ } // namespace lazy
286
+ } // namespace torch
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/multi_wait.h ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /**
2
+ * This file is adapted from PyTorch/XLA
3
+ * https://github.com/pytorch/xla/blob/master/third_party/xla_client/multi_wait.h
4
+ */
5
+
6
+ #pragma once
7
+
8
+ #include <condition_variable>
9
+ #include <exception>
10
+ #include <functional>
11
+ #include <memory>
12
+ #include <mutex>
13
+
14
+ #include <c10/macros/Export.h>
15
+
16
+ namespace torch {
17
+ namespace lazy {
18
+
19
+ // Support waiting for a number of tasks to complete.
20
+ class TORCH_API MultiWait {
21
+ public:
22
+ explicit MultiWait(size_t count) : count_(count) {}
23
+
24
+ // Signal the completion of a single task.
25
+ void Done();
26
+
27
+ // Waits until at least count (passed as constructor value) completions
28
+ // happened.
29
+ void Wait();
30
+
31
+ // Same as above, but waits up to wait_seconds.
32
+ void Wait(double wait_seconds);
33
+
34
+ // Resets the threshold counter for the MultiWait object. The completed count
35
+ // is also reset to zero.
36
+ void Reset(size_t count);
37
+
38
+ // Creates a completer functor which signals the mult wait object once func
39
+ // has completed. Handles exceptions by signaling the multi wait with the
40
+ // proper status value. This API returns a function which captures a MultiWait
41
+ // reference, so care must be taken such that the reference remains valid for
42
+ // the whole lifetime of the returned function.
43
+ std::function<void()> Completer(std::function<void()> func);
44
+
45
+ // Similar as the above API, but with explicit capture of the MultiWait shared
46
+ // pointer.
47
+ static std::function<void()> Completer(
48
+ std::shared_ptr<MultiWait> mwait,
49
+ std::function<void()> func);
50
+
51
+ private:
52
+ void Complete(const std::function<void()>& func);
53
+
54
+ std::mutex mutex_;
55
+ std::condition_variable cv_;
56
+ size_t count_ = 0;
57
+ size_t completed_count_ = 0;
58
+ std::exception_ptr exptr_;
59
+ };
60
+
61
+ } // namespace lazy
62
+ } // namespace torch
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/ops/utils.h ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include <vector>
2
+
3
+ #include <torch/csrc/lazy/core/tensor_util.h>
4
+ #include <torch/csrc/lazy/core/util.h>
5
+
6
+ namespace torch {
7
+ namespace lazy {
8
+
9
+ TORCH_API bool StrideIsSupported(c10::ArrayRef<int64_t> stride);
10
+
11
+ TORCH_API std::vector<int64_t> GetArrayStridePermutation(
12
+ c10::ArrayRef<int64_t> stride);
13
+
14
+ TORCH_API Shape MakeDiagonalShape(
15
+ const Shape& shape,
16
+ int64_t offset,
17
+ int64_t dim1,
18
+ int64_t dim2);
19
+
20
+ TORCH_API Shape
21
+ MakePermuteShape(const Shape& source_shape, c10::ArrayRef<int64_t> permutation);
22
+
23
+ TORCH_API Shape MakeSelectShape(
24
+ const Shape& shape,
25
+ int64_t dim,
26
+ int64_t start,
27
+ int64_t end,
28
+ int64_t stride);
29
+
30
+ TORCH_API int64_t GetStride(int64_t start, int64_t end, int64_t stride);
31
+
32
+ TORCH_API std::vector<int64_t> BuildSqueezedDimensions(
33
+ c10::ArrayRef<int64_t> dimensions,
34
+ int64_t squeeze_dim);
35
+
36
+ TORCH_API std::vector<int64_t> BuildUnsqueezedDimensions(
37
+ c10::ArrayRef<int64_t> dimensions,
38
+ int64_t squeeze_dim);
39
+
40
+ } // namespace lazy
41
+ } // namespace torch
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/permutation_util.h ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/util/ArrayRef.h>
4
+ #include <c10/util/Exception.h>
5
+ #include <c10/util/irange.h>
6
+
7
+ #include <vector>
8
+
9
+ namespace torch {
10
+ namespace lazy {
11
+
12
+ TORCH_API std::vector<int64_t> InversePermutation(
13
+ c10::ArrayRef<int64_t> input_permutation);
14
+
15
+ TORCH_API bool IsPermutation(c10::ArrayRef<int64_t> permutation);
16
+
17
+ // Gathers the input using the order specified by the permutation. For each i,
18
+ // output[i] = dimensions[permutation[i]]. The given permutation must be the
19
+ // same size as the input.
20
+ template <typename Container>
21
+ std::vector<typename Container::value_type> PermuteDimensions(
22
+ c10::ArrayRef<int64_t> permutation,
23
+ const Container& dimensions) {
24
+ using T = typename Container::value_type;
25
+ TORCH_CHECK(
26
+ dimensions.size() == permutation.size(),
27
+ "Invalid permutation specified. dimensions.size() != permutation.size() (",
28
+ dimensions.size(),
29
+ " vs. ",
30
+ permutation.size(),
31
+ ")");
32
+ TORCH_CHECK(
33
+ IsPermutation(permutation),
34
+ "Invalid permutation specified. Permutation is not permutation");
35
+ std::vector<T> output(dimensions.size());
36
+ for (const auto i : c10::irange(permutation.size())) {
37
+ output[i] = dimensions[permutation[i]];
38
+ }
39
+ return output;
40
+ }
41
+
42
+ } // namespace lazy
43
+ } // namespace torch
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/shape.h ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ostream>
4
+ #include <vector>
5
+
6
+ #include <c10/core/Scalar.h>
7
+ #include <torch/csrc/jit/passes/symbolic_shape_analysis.h>
8
+ #include <torch/csrc/lazy/core/hash.h>
9
+
10
+ C10_DECLARE_bool(ltc_enable_symbolic_shapes);
11
+
12
+ namespace torch {
13
+ namespace lazy {
14
+
15
+ class TORCH_API Shape {
16
+ public:
17
+ Shape() = default;
18
+
19
+ Shape(
20
+ at::ScalarType scalar_type,
21
+ c10::ArrayRef<int64_t> sizes,
22
+ c10::optional<std::vector<bool>> is_symbolic = c10::nullopt);
23
+
24
+ std::string to_string() const;
25
+
26
+ c10::ScalarType scalar_type() const {
27
+ return scalar_type_;
28
+ }
29
+ void set_scalar_type(at::ScalarType value) {
30
+ scalar_type_ = value;
31
+ }
32
+
33
+ int64_t dim() const {
34
+ return sizes_.size();
35
+ }
36
+ c10::ArrayRef<int64_t> sizes() const {
37
+ return sizes_;
38
+ }
39
+ int64_t size(int64_t dim) const {
40
+ return sizes_.at(dim);
41
+ }
42
+ void set_size(int64_t dim, int64_t size) {
43
+ sizes_.at(dim) = size;
44
+ }
45
+
46
+ const c10::optional<std::vector<bool>>& is_symbolic() const {
47
+ return is_symbolic_;
48
+ }
49
+
50
+ // Makes a copy with symbolic dims applied
51
+ Shape with_symbolic_dims(
52
+ c10::optional<std::vector<bool>> symbolic_dims) const;
53
+
54
+ size_t numel() const;
55
+ hash_t hash(bool bakeInSizes) const;
56
+
57
+ bool operator==(const Shape& other) const;
58
+
59
+ private:
60
+ c10::ScalarType scalar_type_{c10::ScalarType::Undefined};
61
+
62
+ // Sizes are the upper bound sizes for a tensor, used by XLA.
63
+ std::vector<int64_t> sizes_;
64
+ // Stores which dimmensions are symbolic
65
+ // If nullopt, either it hasn't been initialized or the symbolic
66
+ // dimmensions are not calculatable
67
+ c10::optional<std::vector<bool>> is_symbolic_ = c10::nullopt;
68
+ };
69
+
70
+ TORCH_API std::ostream& operator<<(std::ostream& out, const Shape& shape);
71
+
72
+ TORCH_API bool symbolicShapeEnabled();
73
+ // Calculate and applies symbolic shapes onto the
74
+ // Shape objects passed to result_shapes
75
+ TORCH_API void applySymbolicShapesOnLT(
76
+ const char* schema_str,
77
+ std::vector<c10::IValue> args,
78
+ std::vector<Shape>& result_shapes);
79
+ } // namespace lazy
80
+ } // namespace torch
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/shape_inference.h ADDED
@@ -0,0 +1,124 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/Tensor.h>
4
+ #include <c10/core/ScalarType.h>
5
+ #include <c10/core/SymInt.h>
6
+ #include <c10/core/SymIntArrayRef.h>
7
+ #include <c10/core/SymNodeImpl.h>
8
+ #include <c10/macros/Export.h>
9
+ #include <c10/util/Optional.h>
10
+ #include <torch/csrc/lazy/backend/backend_data.h>
11
+ #include <torch/csrc/lazy/core/ir.h>
12
+ #include <torch/csrc/lazy/core/shape.h>
13
+ #include <torch/csrc/lazy/core/tensor.h>
14
+ #include <vector>
15
+
16
+ namespace torch {
17
+ namespace lazy {
18
+ // Turn clang-format off, as we rely on the whole signature being on one line
19
+ // for codegen.
20
+ // clang-format off
21
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape__adaptive_avg_pool2d(const at::Tensor & self, at::IntArrayRef output_size);
22
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape__adaptive_avg_pool2d_backward(const at::Tensor & grad_output, const at::Tensor & self);
23
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape__adaptive_avg_pool3d(const at::Tensor & self, at::IntArrayRef output_size);
24
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape__adaptive_avg_pool3d_backward(const at::Tensor & grad_output, const at::Tensor & self);
25
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape_abs(const at::Tensor & self);
26
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape_arange_out(const at::Scalar & start, const at::Scalar & end, const at::Scalar & step, at::Tensor & out);
27
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape_bernoulli(const at::Tensor & self, c10::optional<at::Generator> generator);
28
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape_bernoulli(const at::Tensor & self, double p, c10::optional<at::Generator> generator);
29
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape_binary_cross_entropy(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction);
30
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape_binary_cross_entropy_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction);
31
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape_cat(at::TensorList tensors, int64_t dim);
32
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape_cholesky(const at::Tensor & self, bool upper);
33
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape_clamp_min(const at::Tensor & self, const at::Scalar & min);
34
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape_clone(const at::Tensor & self, c10::optional<at::MemoryFormat> memory_format);
35
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape_constant_pad_nd(const at::Tensor & self, at::IntArrayRef pad, const at::Scalar & value);
36
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape_convolution(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups);
37
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape_convolution_backward(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, at::OptionalIntArrayRef bias_sizes, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups, ::std::array<bool,3> output_mask);
38
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape_embedding(const at::Tensor & weight, const at::Tensor & indices, int64_t padding_idx, bool scale_grad_by_freq, bool sparse);
39
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape_embedding_dense_backward(const at::Tensor & grad_output, const at::Tensor & indices, int64_t num_weights, int64_t padding_idx, bool scale_grad_by_freq);
40
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape_expand(const at::Tensor & self, at::IntArrayRef size, bool implicit);
41
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape_expand(const at::Tensor & self, c10::SymIntArrayRef size, bool implicit);
42
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape_flip(const at::Tensor & self, at::IntArrayRef dims);
43
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape_glu_backward(const at::Tensor & grad_output, const at::Tensor & self, int64_t dim);
44
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape_glu_jvp(const at::Tensor & glu, const at::Tensor & x, const at::Tensor & dx, int64_t dim);
45
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape_grid_sampler_2d(const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners);
46
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape_grid_sampler_2d_backward(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, ::std::array<bool,2> output_mask);
47
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape_index_select(const at::Tensor & self, int64_t dim, const at::Tensor & index);
48
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape_inverse(const at::Tensor & self);
49
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape_isnan(const at::Tensor & self);
50
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape_log_sigmoid_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & buffer);
51
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape_log_sigmoid_forward(const at::Tensor & self);
52
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape_logdet(const at::Tensor & self);
53
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape_logical_and(const at::Tensor & self, const at::Tensor & other);
54
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape_logical_not(const at::Tensor & self);
55
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape_logical_or(const at::Tensor & self, const at::Tensor & other);
56
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape_logical_xor(const at::Tensor & self, const at::Tensor & other);
57
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape_masked_fill(const at::Tensor & self, const at::Tensor & mask, const at::Scalar & value);
58
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape_masked_fill(const at::Tensor & self, const at::Tensor & mask, const at::Tensor & value);
59
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape_max(const at::Tensor & self);
60
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape_mean(const at::Tensor & self, c10::optional<at::ScalarType> dtype);
61
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape_min(const at::Tensor & self);
62
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape_mv(const at::Tensor & self, const at::Tensor & vec);
63
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape_native_batch_norm(const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, bool training, double momentum, double eps);
64
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape_native_batch_norm_backward(const at::Tensor & grad_out, const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, const c10::optional<at::Tensor> & save_mean, const c10::optional<at::Tensor> & save_invstd, bool train, double eps, ::std::array<bool,3> output_mask);
65
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape_native_dropout(const at::Tensor & input, double p, c10::optional<bool> train);
66
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape_native_dropout_backward(const at::Tensor & grad_output, const at::Tensor & mask, double scale);
67
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape_native_layer_norm(const at::Tensor & input, at::IntArrayRef normalized_shape, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, double eps);
68
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape_native_layer_norm_backward(const at::Tensor & grad_out, const at::Tensor & input, at::IntArrayRef normalized_shape, const at::Tensor & mean, const at::Tensor & rstd, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, ::std::array<bool,3> output_mask);
69
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape_new_empty_strided(const at::Tensor & self, at::IntArrayRef size, at::IntArrayRef stride, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory);
70
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape_nll_loss2d_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, int64_t ignore_index, const at::Tensor & total_weight);
71
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape_nll_loss2d_forward(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, int64_t ignore_index);
72
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape_nonzero(const at::Tensor & self);
73
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape_normal_functional(const at::Tensor & self, double mean, double std, c10::optional<at::Generator> generator);
74
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape_random(const at::Tensor & self, c10::optional<at::Generator> generator);
75
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape_random(const at::Tensor & self, int64_t to, c10::optional<at::Generator> generator);
76
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape_random(const at::Tensor & self, int64_t from, c10::optional<int64_t> to, c10::optional<at::Generator> generator);
77
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape_relu(const at::Tensor & self);
78
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape_repeat(const at::Tensor & self, at::IntArrayRef repeats);
79
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape_slogdet(const at::Tensor & self);
80
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape_smooth_l1_loss_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, double beta);
81
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape_sort(const at::Tensor & self, int64_t dim, bool descending);
82
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape_stack(at::TensorList tensors, int64_t dim);
83
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape_std(const at::Tensor & self, bool unbiased);
84
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape_std(const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim);
85
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape_std(const at::Tensor & self, at::OptionalIntArrayRef dim, const c10::optional<at::Scalar> & correction, bool keepdim);
86
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape_sum(const at::Tensor & self, c10::optional<at::ScalarType> dtype);
87
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape__to_copy(const at::Tensor & self, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, bool non_blocking, c10::optional<at::MemoryFormat> memory_format);
88
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape_take(const at::Tensor & self, const at::Tensor & index);
89
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape_trace(const at::Tensor & self);
90
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape_zero(const at::Tensor & self);
91
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape_narrow_copy_symint(const at::Tensor & self, int64_t dim, int64_t start, c10::SymInt length);
92
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape_hardswish(const at::Tensor & self);
93
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape_hardswish_backward(const at::Tensor & grad_output, const at::Tensor & self);
94
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape_selu(const at::Tensor & self);
95
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape_uniform(const at::Tensor & self, double from, double to, c10::optional<at::Generator> generator);
96
+
97
+ // Non-Native ops
98
+ TORCH_API std::vector<Shape> compute_shape_scalar(const at::Scalar& value, const at::ScalarType& type);
99
+ TORCH_API std::vector<Shape> compute_shape_expand(const Output& input0, const std::vector<int64_t>& size, const bool& is_scalar_expand);
100
+ TORCH_API std::vector<Shape> compute_shape_view(const Output& input0, const std::vector<int64_t>& output_sizes);
101
+ TORCH_API std::vector<Shape> compute_shape_cast(const Output& input0, const at::ScalarType& dtype, const c10::optional<at::ScalarType>& stype);
102
+
103
+ // View Ops
104
+ // (Now that functionalization pass is used, we should kill these in a later PR)
105
+ TORCH_API std::vector<Shape> compute_shape_as_strided_view_update(const Output& target, const Output& input, const std::vector<int64_t>& size, const std::vector<int64_t>& stride, const int64_t& storage_offset);
106
+ TORCH_API std::vector<Shape> compute_shape_as_strided(const Output& input, const std::vector<int64_t>& size, const std::vector<int64_t>& stride, const int64_t& storage_offset);
107
+ TORCH_API std::vector<Shape> compute_shape_diagonal_view_update(const Output& target, const Output& input, const int64_t& offset, const int64_t& dim1, const int64_t& dim2);
108
+ TORCH_API std::vector<Shape> compute_shape_diagonal(const Output& input, const int64_t& offset, const int64_t& dim1, const int64_t& dim2);
109
+ TORCH_API std::vector<Shape> compute_shape_narrow_view_update(const Output& input, const Output& source, const std::vector<int64_t>& base_indices);
110
+ TORCH_API std::vector<Shape> compute_shape_narrow(const Output& input, const std::vector<int64_t>& base_indices, const std::vector<int64_t>& sizes);
111
+ TORCH_API std::vector<Shape> compute_shape_permute(const Output& input, const std::vector<int64_t>& dims);
112
+ TORCH_API std::vector<Shape> compute_shape_resize(const Output& input, const std::vector<int64_t>& size);
113
+ TORCH_API std::vector<Shape> compute_shape_select_view_update(const Output& target, const Output& source, const int64_t& dim, const int64_t& start, const int64_t& end, const int64_t& stride);
114
+ TORCH_API std::vector<Shape> compute_shape_select(const Output& input, const int64_t& dim, const int64_t& start, const int64_t& end, const int64_t& stride);
115
+ TORCH_API std::vector<Shape> compute_shape_squeeze(const Output& input, const int& dim);
116
+ TORCH_API std::vector<Shape> compute_shape_unsqueeze(const Output& input, const int& dim);
117
+
118
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape_select_scatter(const at::Tensor & self, const at::Tensor & src, int64_t dim, int64_t index);
119
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape_diagonal_scatter(const at::Tensor & self, const at::Tensor & src, int64_t offset, int64_t dim1, int64_t dim2);
120
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape_slice_scatter_symint(const at::Tensor & self, const at::Tensor & src, int64_t dim, c10::optional<c10::SymInt> start, c10::optional<c10::SymInt> end, c10::SymInt step);
121
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape_as_strided_scatter_symint(const at::Tensor & self, const at::Tensor & src, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional<c10::SymInt> storage_offset);
122
+ // clang-format on
123
+ } // namespace lazy
124
+ } // namespace torch
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/tensor_impl.h ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/Tensor.h>
4
+ #include <c10/core/SymIntArrayRef.h>
5
+ #include <c10/core/TensorImpl.h>
6
+
7
+ #include <torch/csrc/lazy/core/tensor.h>
8
+
9
+ namespace torch {
10
+ namespace lazy {
11
+
12
+ // Tensor implementation class used to be fed to the at::Tensor.
13
+ // Its scope is just to handle an LazyTensor.
14
+ class TORCH_API LTCTensorImpl final : public c10::TensorImpl {
15
+ public:
16
+ explicit LTCTensorImpl(const LazyTensorPtr& tensor);
17
+ explicit LTCTensorImpl(const LazyTensor& tensor);
18
+ explicit LTCTensorImpl(LazyTensor&& tensor);
19
+
20
+ LazyTensorPtr tensor() {
21
+ return tensor_;
22
+ }
23
+
24
+ void set_tensor(const LazyTensorPtr& lazy_tensor);
25
+
26
+ void force_refresh_sizes() {
27
+ generation_ = 0;
28
+ }
29
+
30
+ c10::intrusive_ptr<TensorImpl> shallow_copy_and_detach(
31
+ const c10::VariableVersion& version_counter,
32
+ bool allow_tensor_metadata_change) const override;
33
+
34
+ c10::intrusive_ptr<TensorImpl> shallow_copy_and_detach(
35
+ c10::VariableVersion&& version_counter,
36
+ bool allow_tensor_metadata_change) const override;
37
+
38
+ void shallow_copy_from(const c10::intrusive_ptr<TensorImpl>& impl) override;
39
+
40
+ at::IntArrayRef sizes_custom() const override;
41
+ at::IntArrayRef strides_custom() const override;
42
+ int64_t numel_custom() const override;
43
+ int64_t storage_offset_custom() const override;
44
+ int64_t dim_custom() const override;
45
+ bool is_contiguous_custom(at::MemoryFormat memory_format) const override;
46
+ bool is_strides_like_custom(at::MemoryFormat memory_format) const override;
47
+ bool is_non_overlapping_and_dense_custom() const override;
48
+
49
+ c10::SymIntArrayRef sym_sizes_custom() const override;
50
+ c10::SymIntArrayRef sym_strides_custom() const override;
51
+ c10::SymInt sym_numel_custom() const override;
52
+
53
+ private:
54
+ void setup_size_properties();
55
+
56
+ LazyTensorPtr tensor_;
57
+ mutable c10::optional<std::vector<c10::SymInt>> sym_sizes_;
58
+ size_t generation_{0};
59
+ };
60
+
61
+ } // namespace lazy
62
+ } // namespace torch
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/trie.h ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <atomic>
4
+ #include <list>
5
+
6
+ #include <c10/core/ScalarType.h>
7
+ #include <torch/csrc/lazy/core/ir.h>
8
+ #include <torch/csrc/lazy/core/metrics.h>
9
+
10
+ namespace torch {
11
+ namespace lazy {
12
+
13
+ struct TORCH_API TrieNode {
14
+ static size_t GetNextUniqueId() {
15
+ static thread_local size_t id_generator = 0;
16
+ return id_generator++;
17
+ }
18
+
19
+ size_t unique_id;
20
+ size_t hit_counter;
21
+ NodePtr ir_node;
22
+ std::list<std::shared_ptr<TrieNode>> successors;
23
+
24
+ TrieNode() : unique_id(GetNextUniqueId()), hit_counter(0), ir_node(nullptr) {}
25
+ explicit TrieNode(NodePtr node)
26
+ : unique_id(GetNextUniqueId()),
27
+ hit_counter(0),
28
+ ir_node(std::move(node)) {}
29
+ };
30
+
31
+ class TORCH_API TrieCache {
32
+ public:
33
+ static TrieCache* Get();
34
+
35
+ TrieNode* Current() const;
36
+ // Take an iterator as the input because we want to move the corresponding
37
+ // node in the successor list to achieve a LRU caching effect
38
+ void SetCurrent(std::list<std::shared_ptr<TrieNode>>::iterator& iter);
39
+ // Used in MarkStep to indicate the end of one tracing
40
+ void ResetCurrent();
41
+
42
+ // Create a new TrieNode for ir_node and insert into the TrieCache
43
+ void Insert(NodePtr ir_node);
44
+
45
+ // Clear all TrieCache nodes
46
+ // TODO: Because we don't expect user to explicitly call this function via
47
+ // a Python API, we may need to introduce a threshold on the size of the cache
48
+ // to avoid holding tensors for too long.
49
+ void Clear();
50
+
51
+ void DumpToDotFile(const std::string& file_name);
52
+
53
+ private:
54
+ TrieCache();
55
+
56
+ std::shared_ptr<TrieNode> root_;
57
+ TrieNode* current_;
58
+ };
59
+
60
+ template <typename T, typename... Args>
61
+ NodePtr LookupNodeFromTrieCache(Args&&... args) {
62
+ auto& successors = TrieCache::Get()->Current()->successors;
63
+ for (auto it = successors.begin(); it != successors.end(); it++) {
64
+ NodePtr ir_node = (*it)->ir_node;
65
+ const T* concrete_node = NodeCast<T>(ir_node.get());
66
+ if (concrete_node &&
67
+ concrete_node->CanBeReused(std::forward<Args>(args)...)) {
68
+ TORCH_LAZY_COUNTER(
69
+ "IrNodeReused_" + c10::demangle((typeid(T).name())), 1);
70
+ (*it)->hit_counter++;
71
+ TrieCache::Get()->SetCurrent(it);
72
+ return ir_node;
73
+ }
74
+ }
75
+ return nullptr;
76
+ }
77
+
78
+ } // namespace lazy
79
+ } // namespace torch
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/unique.h ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /**
2
+ * Unique in this file is adapted from PyTorch/XLA
3
+ * https://github.com/pytorch/xla/blob/master/third_party/xla_client/unique.h
4
+ */
5
+
6
+ #pragma once
7
+
8
+ #include <c10/util/Optional.h>
9
+
10
+ #include <functional>
11
+ #include <set>
12
+
13
+ namespace torch {
14
+ namespace lazy {
15
+
16
+ // Helper class to allow tracking zero or more things, which should be forcibly
17
+ // be one only thing.
18
+ template <typename T, typename C = std::equal_to<T>>
19
+ class Unique {
20
+ public:
21
+ std::pair<bool, const T&> set(const T& value) {
22
+ if (value_) {
23
+ TORCH_CHECK(C()(*value_, value), "'", *value_, "' vs '", value);
24
+ return std::pair<bool, const T&>(false, *value_);
25
+ }
26
+ value_ = value;
27
+ return std::pair<bool, const T&>(true, *value_);
28
+ }
29
+
30
+ operator bool() const {
31
+ return value_.has_value();
32
+ }
33
+ operator const T&() const {
34
+ return *value_;
35
+ }
36
+ const T& operator*() const {
37
+ return *value_;
38
+ }
39
+ const T* operator->() const {
40
+ return value_.operator->();
41
+ }
42
+
43
+ std::set<T> AsSet() const {
44
+ std::set<T> vset;
45
+ if (value_.has_value()) {
46
+ vset.insert(*value_);
47
+ }
48
+ return vset;
49
+ }
50
+
51
+ private:
52
+ c10::optional<T> value_;
53
+ };
54
+
55
+ } // namespace lazy
56
+ } // namespace torch
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/ts_backend/dynamic_ir.h ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/symbol.h>
4
+
5
+ #include <functional>
6
+ #include <memory>
7
+ #include <set>
8
+ #include <string>
9
+ #include <unordered_map>
10
+ #include <unordered_set>
11
+ #include <utility>
12
+ #include <vector>
13
+
14
+ #include <c10/core/ScalarType.h>
15
+ #include <c10/util/Flags.h>
16
+ #include <torch/csrc/lazy/core/dynamic_ir.h>
17
+ #include <torch/csrc/lazy/core/hash.h>
18
+ #include <torch/csrc/lazy/core/ir.h>
19
+ #include <torch/csrc/lazy/core/ir_metadata.h>
20
+ #include <torch/csrc/lazy/ts_backend/ts_node.h>
21
+
22
+ C10_DECLARE_bool(ltc_enable_dynamic_shapes);
23
+
24
+ namespace torch {
25
+ namespace lazy {
26
+
27
+ /**
28
+ * The goal of "dynamic" Nodes is to patch a hole in our tracing.
29
+ * Previously, if a user called `sizes` on a Tensor, it would leak out
30
+ * of our tracing system, as `sizes` returns a torch.Size or an int. To
31
+ * prevent this from happening, we introduce DimensionNode, a new type
32
+ * of Node that abstracts the operation of getting the dimensions of a
33
+ * Tensor.
34
+ *
35
+ * Consider the following example:
36
+ * ```
37
+ * numel = x.shape()[0] * x.shape()[1]
38
+ * ```
39
+ *
40
+ * Here, `x.shape()[i]` will be a SizeNode (subclass of DimensionNode),
41
+ * and the multiplication of the two SizeNodes will be represented by
42
+ * a SizeMul (also a subclass of DimensionNode). Through this, we can
43
+ * prevent `numel` from being represented as a Python int and thus
44
+ * burned into the Graph.
45
+ */
46
+
47
+ // Represents the result of calling `size` on a Tensor
48
+ class TORCH_API SizeNode : public TsNode, public DimensionNode {
49
+ public:
50
+ SizeNode(Value input, size_t dim);
51
+ int64_t getStaticValue() const override;
52
+ bool isSymbolic() const override;
53
+ std::string ToString() const override;
54
+ size_t dim_ = 0;
55
+ torch::lazy::TSOpVector Lower(
56
+ std::shared_ptr<torch::jit::GraphFunction> function,
57
+ TSLoweringContext* loctx) const override;
58
+ };
59
+
60
+ class TORCH_API SizeAdd : public TsNode, public DimensionNode {
61
+ public:
62
+ SizeAdd(Value a, Value b);
63
+ int64_t getStaticValue() const override;
64
+ bool isSymbolic() const override;
65
+ std::string ToString() const override;
66
+ };
67
+
68
+ class TORCH_API SizeMul : public TsNode, public DimensionNode {
69
+ public:
70
+ SizeMul(Value a, Value b);
71
+ int64_t getStaticValue() const override;
72
+ bool isSymbolic() const override;
73
+ std::string ToString() const override;
74
+ };
75
+
76
+ class TORCH_API SizeDiv : public TsNode, public DimensionNode {
77
+ public:
78
+ SizeDiv(Value a, Value b);
79
+ int64_t getStaticValue() const override;
80
+ bool isSymbolic() const override;
81
+ std::string ToString() const override;
82
+ };
83
+
84
+ } // namespace lazy
85
+ } // namespace torch
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/ts_backend/ts_backend_impl.h ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/lazy/backend/backend_interface.h>
4
+
5
+ namespace torch {
6
+ namespace lazy {
7
+
8
+ class TORCH_API TSData : public torch::lazy::BackendData {
9
+ public:
10
+ TSData(const at::Scalar& scalar, const torch::lazy::BackendDevice& device)
11
+ : torch::lazy::BackendData(device, torch::lazy::Shape(scalar.type(), {})),
12
+ scalar(scalar) {}
13
+
14
+ TSData(
15
+ const at::Tensor& data,
16
+ const torch::lazy::Shape& shape,
17
+ const torch::lazy::BackendDevice& device)
18
+ : torch::lazy::BackendData(device, shape), data_(data) {}
19
+
20
+ TSData(
21
+ const torch::lazy::Shape& shape,
22
+ const torch::lazy::BackendDevice& device)
23
+ : torch::lazy::BackendData(device, shape) {}
24
+
25
+ Handle GetHandle() override {
26
+ return reinterpret_cast<int64_t>(this);
27
+ }
28
+
29
+ void Assign(const torch::lazy::BackendData& data) override {
30
+ data_ = static_cast<const TSData&>(data).data_;
31
+ }
32
+
33
+ bool HasValue() const override {
34
+ return data_.defined();
35
+ }
36
+
37
+ at::Tensor data() {
38
+ return data_;
39
+ }
40
+
41
+ c10::optional<at::Scalar> scalar;
42
+
43
+ private:
44
+ at::Tensor data_;
45
+ };
46
+
47
+ TORCH_API torch::lazy::BackendImplInterface* GetTSBackendImpl();
48
+
49
+ TORCH_API void InitTorchScriptBackend();
50
+
51
+ } // namespace lazy
52
+ } // namespace torch
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/ts_backend/ts_eager_fallback.h ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/dispatch/Dispatcher.h>
4
+ #include <ATen/core/ivalue.h>
5
+ #include <ATen/core/stack.h>
6
+ #include <functional>
7
+
8
+ namespace torch {
9
+ namespace lazy {
10
+
11
+ bool force_eager_fallback(c10::Symbol op);
12
+ void ltc_eager_fallback(
13
+ const c10::OperatorHandle& op,
14
+ torch::jit::Stack* stack);
15
+
16
+ void ts_eager_fallback(
17
+ const c10::OperatorHandle& op,
18
+ torch::jit::Stack* stack,
19
+ c10::DeviceType device_type);
20
+
21
+ // The TorchScript backend does not register itself with pytorch dispatcher
22
+ // until it is explicitly initialized. This function should only be called
23
+ // by the main Torchscript backend init function.
24
+ void register_ts_ltc_eager_fallback();
25
+
26
+ } // namespace lazy
27
+ } // namespace torch
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/ts_backend/ts_lowering_context.h ADDED
@@ -0,0 +1,152 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <sstream>
4
+
5
+ #include <torch/csrc/api/include/torch/jit.h>
6
+ #include <torch/csrc/jit/runtime/graph_executor.h>
7
+ #include <torch/csrc/lazy/backend/lowering_context.h>
8
+ #include <torch/csrc/lazy/core/ir.h>
9
+ #include <torch/csrc/lazy/ts_backend/ts_node_lowering.h>
10
+
11
+ namespace torch {
12
+ namespace lazy {
13
+
14
+ using TSOpVector = std::vector<torch::jit::Value*>;
15
+
16
+ class TORCH_API TSComputation : public Computation {
17
+ public:
18
+ TSComputation(const std::shared_ptr<torch::jit::Graph>& graph)
19
+ : graph_(graph), graph_executor_(graph, "") {
20
+ for (torch::jit::Value* input : graph_->inputs()) {
21
+ parameter_names_.push_back(input->debugName());
22
+ }
23
+ }
24
+
25
+ int parameters_size() const override {
26
+ return parameter_names_.size();
27
+ }
28
+
29
+ const std::vector<Shape>& parameter_shapes() const override {
30
+ throw std::runtime_error(
31
+ "TODO(whc) implement TS computation shapes or change interface");
32
+ return parameter_shapes_;
33
+ }
34
+
35
+ const std::vector<std::string>& parameter_names() const override {
36
+ return parameter_names_;
37
+ }
38
+
39
+ const Shape& result_shape() const override {
40
+ throw std::runtime_error(
41
+ "TODO(whc) implement TS computation shapes or change interface");
42
+ return result_shape_;
43
+ }
44
+
45
+ const std::string to_string() const override {
46
+ std::ostringstream oss;
47
+ oss << *graph_;
48
+ return oss.str();
49
+ }
50
+
51
+ std::shared_ptr<torch::jit::Graph> graph() const {
52
+ return graph_;
53
+ }
54
+
55
+ torch::jit::GraphExecutor& graph_executor() {
56
+ return graph_executor_;
57
+ }
58
+
59
+ private:
60
+ std::shared_ptr<torch::jit::Graph> graph_;
61
+ torch::jit::GraphExecutor graph_executor_;
62
+ std::vector<std::string> parameter_names_;
63
+ std::vector<Shape> parameter_shapes_;
64
+ Shape result_shape_;
65
+ };
66
+
67
+ class TORCH_API TSLoweringContext : public LoweringContext {
68
+ public:
69
+ TSLoweringContext(const std::string& name, const BackendDevice device);
70
+
71
+ TSLoweringContext(
72
+ const std::string& name,
73
+ BackendDevice device,
74
+ c10::ArrayRef<const Node*> post_order,
75
+ Util::EmissionMap emit_status);
76
+
77
+ size_t AddResult(const Output& output) override {
78
+ return AddResult(GetOutputOp(output));
79
+ }
80
+
81
+ void AddParameter(
82
+ const torch::lazy::Output& output,
83
+ size_t index,
84
+ const Shape& shape,
85
+ const std::string& name) override {
86
+ TORCH_INTERNAL_ASSERT(false, "not implemented");
87
+ }
88
+
89
+ void Lower(const Node* node);
90
+
91
+ ComputationPtr Build() override {
92
+ for (torch::jit::Value* output : root_tuple_) {
93
+ graph_->block()->registerOutput(output);
94
+ }
95
+ return std::shared_ptr<Computation>(new TSComputation(graph_));
96
+ }
97
+
98
+ // Retrieves the lowered operation for an output. If the requested output is
99
+ // not available yet, the graph behind the output's Node is lowered, and the
100
+ // corresponding TS operation returned.
101
+ torch::jit::Value* GetOutputOp(const Output& output) {
102
+ auto it = emitted_outputs_.find(output);
103
+ if (it == emitted_outputs_.end()) {
104
+ auto post_order = Util::ComputePostOrder(output.node, &emit_status_);
105
+ for (auto node : post_order) {
106
+ Lower(node);
107
+ }
108
+ // At this point the output better be present, otherwise there is an issue
109
+ // with the lowering code.
110
+ it = emitted_outputs_.find(output);
111
+ TORCH_CHECK(
112
+ it != emitted_outputs_.end(),
113
+ "No TS operation emitted for output: ",
114
+ output.ToString());
115
+ }
116
+ return it->second;
117
+ }
118
+
119
+ // Assigns the given TS operation to the specified output. As outputs are
120
+ // lowered in a post-order fashion, later nodes should always find their
121
+ // operands among the emitted outputs.
122
+ void AssignOutputOp(const Output& output, torch::jit::Value* op);
123
+
124
+ // If a parameter associated with data has already been declared, it will be
125
+ // returned. Otherwise a new one will be created, associated with the tensor
126
+ // held in data.
127
+ torch::jit::Value* GetParameter(BackendDataPtr data);
128
+
129
+ std::shared_ptr<torch::jit::Graph> graph() const {
130
+ return graph_;
131
+ }
132
+
133
+ private:
134
+ struct Parameter {
135
+ torch::jit::Value* param{nullptr};
136
+ size_t index = 0;
137
+ };
138
+
139
+ size_t AddResult(torch::jit::Value* op) {
140
+ root_tuple_.push_back(std::move(op));
141
+ return root_tuple_.size() - 1;
142
+ }
143
+
144
+ std::shared_ptr<torch::jit::Graph> graph_;
145
+ std::shared_ptr<torch::jit::GraphFunction> function_;
146
+ std::unordered_map<BackendData::Handle, Parameter> parameters_map_;
147
+ std::vector<torch::jit::Value*> root_tuple_;
148
+ OutputMap<torch::jit::Value*> emitted_outputs_;
149
+ };
150
+
151
+ } // namespace lazy
152
+ } // namespace torch
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/ts_backend/ts_node_lowering.h ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/api/include/torch/jit.h>
4
+ #include <torch/csrc/lazy/backend/lowering_context.h>
5
+
6
+ namespace torch {
7
+ namespace lazy {
8
+ using TSOpVector = std::vector<torch::jit::Value*>;
9
+
10
+ TORCH_API TSOpVector LowerTSBuiltin(
11
+ std::shared_ptr<torch::jit::GraphFunction> function,
12
+ c10::Symbol sym,
13
+ const std::vector<torch::jit::NamedValue>& arguments,
14
+ const std::vector<torch::jit::NamedValue>& kwarguments = {});
15
+
16
+ } // namespace lazy
17
+ } // namespace torch
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/onnx/back_compat.h ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <onnx/onnx_pb.h>
4
+
5
+ namespace torch::onnx {
6
+
7
+ // The following constants are defined here to avoid breaking Meta's internal
8
+ // usage of ONNX which pre-dates ONNX 1.14 and thus does not support FLOAT8:
9
+ // cf. https://github.com/pytorch/pytorch/pull/106379#issuecomment-1675189340
10
+ // -abock, 2023-08-25
11
+ //
12
+ // ::ONNX_NAMESPACE::TensorProto_DataType_FLOAT8E4M3FN
13
+ constexpr auto TensorProto_DataType_FLOAT8E4M3FN =
14
+ static_cast<::ONNX_NAMESPACE::TensorProto_DataType>(17);
15
+ // ::ONNX_NAMESPACE::TensorProto_DataType_FLOAT8E5M2
16
+ constexpr auto TensorProto_DataType_FLOAT8E5M2 =
17
+ static_cast<::ONNX_NAMESPACE::TensorProto_DataType>(19);
18
+
19
+ } // namespace torch::onnx
vllm/lib/python3.10/site-packages/sympy/logic/__init__.py ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .boolalg import (to_cnf, to_dnf, to_nnf, And, Or, Not, Xor, Nand, Nor, Implies,
2
+ Equivalent, ITE, POSform, SOPform, simplify_logic, bool_map, true, false,
3
+ gateinputcount)
4
+ from .inference import satisfiable
5
+
6
+ __all__ = [
7
+ 'to_cnf', 'to_dnf', 'to_nnf', 'And', 'Or', 'Not', 'Xor', 'Nand', 'Nor',
8
+ 'Implies', 'Equivalent', 'ITE', 'POSform', 'SOPform', 'simplify_logic',
9
+ 'bool_map', 'true', 'false', 'gateinputcount',
10
+
11
+ 'satisfiable',
12
+ ]
vllm/lib/python3.10/site-packages/sympy/logic/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (672 Bytes). View file
 
vllm/lib/python3.10/site-packages/sympy/logic/__pycache__/inference.cpython-310.pyc ADDED
Binary file (9.15 kB). View file
 
vllm/lib/python3.10/site-packages/sympy/logic/algorithms/__init__.py ADDED
File without changes
vllm/lib/python3.10/site-packages/sympy/logic/algorithms/z3_wrapper.py ADDED
@@ -0,0 +1,115 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from sympy.printing.smtlib import smtlib_code
2
+ from sympy.assumptions.assume import AppliedPredicate
3
+ from sympy.assumptions.cnf import EncodedCNF
4
+ from sympy.assumptions.ask import Q
5
+
6
+ from sympy.core import Add, Mul
7
+ from sympy.core.relational import Equality, LessThan, GreaterThan, StrictLessThan, StrictGreaterThan
8
+ from sympy.functions.elementary.complexes import Abs
9
+ from sympy.functions.elementary.exponential import Pow
10
+ from sympy.functions.elementary.miscellaneous import Min, Max
11
+ from sympy.logic.boolalg import And, Or, Xor, Implies
12
+ from sympy.logic.boolalg import Not, ITE
13
+ from sympy.assumptions.relation.equality import StrictGreaterThanPredicate, StrictLessThanPredicate, GreaterThanPredicate, LessThanPredicate, EqualityPredicate
14
+ from sympy.external import import_module
15
+
16
+ def z3_satisfiable(expr, all_models=False):
17
+ if not isinstance(expr, EncodedCNF):
18
+ exprs = EncodedCNF()
19
+ exprs.add_prop(expr)
20
+ expr = exprs
21
+
22
+ z3 = import_module("z3")
23
+ if z3 is None:
24
+ raise ImportError("z3 is not installed")
25
+
26
+ s = encoded_cnf_to_z3_solver(expr, z3)
27
+
28
+ res = str(s.check())
29
+ if res == "unsat":
30
+ return False
31
+ elif res == "sat":
32
+ return z3_model_to_sympy_model(s.model(), expr)
33
+ else:
34
+ return None
35
+
36
+
37
+ def z3_model_to_sympy_model(z3_model, enc_cnf):
38
+ rev_enc = {value : key for key, value in enc_cnf.encoding.items()}
39
+ return {rev_enc[int(var.name()[1:])] : bool(z3_model[var]) for var in z3_model}
40
+
41
+
42
+ def clause_to_assertion(clause):
43
+ clause_strings = [f"d{abs(lit)}" if lit > 0 else f"(not d{abs(lit)})" for lit in clause]
44
+ return "(assert (or " + " ".join(clause_strings) + "))"
45
+
46
+
47
+ def encoded_cnf_to_z3_solver(enc_cnf, z3):
48
+ def dummify_bool(pred):
49
+ return False
50
+ assert isinstance(pred, AppliedPredicate)
51
+
52
+ if pred.function in [Q.positive, Q.negative, Q.zero]:
53
+ return pred
54
+ else:
55
+ return False
56
+
57
+ s = z3.Solver()
58
+
59
+ declarations = [f"(declare-const d{var} Bool)" for var in enc_cnf.variables]
60
+ assertions = [clause_to_assertion(clause) for clause in enc_cnf.data]
61
+
62
+ symbols = set()
63
+ for pred, enc in enc_cnf.encoding.items():
64
+ if not isinstance(pred, AppliedPredicate):
65
+ continue
66
+ if pred.function not in (Q.gt, Q.lt, Q.ge, Q.le, Q.ne, Q.eq, Q.positive, Q.negative, Q.extended_negative, Q.extended_positive, Q.zero, Q.nonzero, Q.nonnegative, Q.nonpositive, Q.extended_nonzero, Q.extended_nonnegative, Q.extended_nonpositive):
67
+ continue
68
+
69
+ pred_str = smtlib_code(pred, auto_declare=False, auto_assert=False, known_functions=known_functions)
70
+
71
+ symbols |= pred.free_symbols
72
+ pred = pred_str
73
+ clause = f"(implies d{enc} {pred})"
74
+ assertion = "(assert " + clause + ")"
75
+ assertions.append(assertion)
76
+
77
+ for sym in symbols:
78
+ declarations.append(f"(declare-const {sym} Real)")
79
+
80
+ declarations = "\n".join(declarations)
81
+ assertions = "\n".join(assertions)
82
+ s.from_string(declarations)
83
+ s.from_string(assertions)
84
+
85
+ return s
86
+
87
+
88
+ known_functions = {
89
+ Add: '+',
90
+ Mul: '*',
91
+
92
+ Equality: '=',
93
+ LessThan: '<=',
94
+ GreaterThan: '>=',
95
+ StrictLessThan: '<',
96
+ StrictGreaterThan: '>',
97
+
98
+ EqualityPredicate(): '=',
99
+ LessThanPredicate(): '<=',
100
+ GreaterThanPredicate(): '>=',
101
+ StrictLessThanPredicate(): '<',
102
+ StrictGreaterThanPredicate(): '>',
103
+
104
+ Abs: 'abs',
105
+ Min: 'min',
106
+ Max: 'max',
107
+ Pow: '^',
108
+
109
+ And: 'and',
110
+ Or: 'or',
111
+ Xor: 'xor',
112
+ Not: 'not',
113
+ ITE: 'ite',
114
+ Implies: '=>',
115
+ }
vllm/lib/python3.10/site-packages/sympy/logic/boolalg.py ADDED
The diff for this file is too large to render. See raw diff
 
vllm/lib/python3.10/site-packages/sympy/logic/inference.py ADDED
@@ -0,0 +1,340 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Inference in propositional logic"""
2
+
3
+ from sympy.logic.boolalg import And, Not, conjuncts, to_cnf, BooleanFunction
4
+ from sympy.core.sorting import ordered
5
+ from sympy.core.sympify import sympify
6
+ from sympy.external.importtools import import_module
7
+
8
+
9
+ def literal_symbol(literal):
10
+ """
11
+ The symbol in this literal (without the negation).
12
+
13
+ Examples
14
+ ========
15
+
16
+ >>> from sympy.abc import A
17
+ >>> from sympy.logic.inference import literal_symbol
18
+ >>> literal_symbol(A)
19
+ A
20
+ >>> literal_symbol(~A)
21
+ A
22
+
23
+ """
24
+
25
+ if literal is True or literal is False:
26
+ return literal
27
+ elif literal.is_Symbol:
28
+ return literal
29
+ elif literal.is_Not:
30
+ return literal_symbol(literal.args[0])
31
+ else:
32
+ raise ValueError("Argument must be a boolean literal.")
33
+
34
+
35
+ def satisfiable(expr, algorithm=None, all_models=False, minimal=False, use_lra_theory=False):
36
+ """
37
+ Check satisfiability of a propositional sentence.
38
+ Returns a model when it succeeds.
39
+ Returns {true: true} for trivially true expressions.
40
+
41
+ On setting all_models to True, if given expr is satisfiable then
42
+ returns a generator of models. However, if expr is unsatisfiable
43
+ then returns a generator containing the single element False.
44
+
45
+ Examples
46
+ ========
47
+
48
+ >>> from sympy.abc import A, B
49
+ >>> from sympy.logic.inference import satisfiable
50
+ >>> satisfiable(A & ~B)
51
+ {A: True, B: False}
52
+ >>> satisfiable(A & ~A)
53
+ False
54
+ >>> satisfiable(True)
55
+ {True: True}
56
+ >>> next(satisfiable(A & ~A, all_models=True))
57
+ False
58
+ >>> models = satisfiable((A >> B) & B, all_models=True)
59
+ >>> next(models)
60
+ {A: False, B: True}
61
+ >>> next(models)
62
+ {A: True, B: True}
63
+ >>> def use_models(models):
64
+ ... for model in models:
65
+ ... if model:
66
+ ... # Do something with the model.
67
+ ... print(model)
68
+ ... else:
69
+ ... # Given expr is unsatisfiable.
70
+ ... print("UNSAT")
71
+ >>> use_models(satisfiable(A >> ~A, all_models=True))
72
+ {A: False}
73
+ >>> use_models(satisfiable(A ^ A, all_models=True))
74
+ UNSAT
75
+
76
+ """
77
+ if use_lra_theory:
78
+ if algorithm is not None and algorithm != "dpll2":
79
+ raise ValueError(f"Currently only dpll2 can handle using lra theory. {algorithm} is not handled.")
80
+ algorithm = "dpll2"
81
+
82
+ if algorithm is None or algorithm == "pycosat":
83
+ pycosat = import_module('pycosat')
84
+ if pycosat is not None:
85
+ algorithm = "pycosat"
86
+ else:
87
+ if algorithm == "pycosat":
88
+ raise ImportError("pycosat module is not present")
89
+ # Silently fall back to dpll2 if pycosat
90
+ # is not installed
91
+ algorithm = "dpll2"
92
+
93
+ if algorithm=="minisat22":
94
+ pysat = import_module('pysat')
95
+ if pysat is None:
96
+ algorithm = "dpll2"
97
+
98
+ if algorithm=="z3":
99
+ z3 = import_module('z3')
100
+ if z3 is None:
101
+ algorithm = "dpll2"
102
+
103
+ if algorithm == "dpll":
104
+ from sympy.logic.algorithms.dpll import dpll_satisfiable
105
+ return dpll_satisfiable(expr)
106
+ elif algorithm == "dpll2":
107
+ from sympy.logic.algorithms.dpll2 import dpll_satisfiable
108
+ return dpll_satisfiable(expr, all_models, use_lra_theory=use_lra_theory)
109
+ elif algorithm == "pycosat":
110
+ from sympy.logic.algorithms.pycosat_wrapper import pycosat_satisfiable
111
+ return pycosat_satisfiable(expr, all_models)
112
+ elif algorithm == "minisat22":
113
+ from sympy.logic.algorithms.minisat22_wrapper import minisat22_satisfiable
114
+ return minisat22_satisfiable(expr, all_models, minimal)
115
+ elif algorithm == "z3":
116
+ from sympy.logic.algorithms.z3_wrapper import z3_satisfiable
117
+ return z3_satisfiable(expr, all_models)
118
+
119
+ raise NotImplementedError
120
+
121
+
122
+ def valid(expr):
123
+ """
124
+ Check validity of a propositional sentence.
125
+ A valid propositional sentence is True under every assignment.
126
+
127
+ Examples
128
+ ========
129
+
130
+ >>> from sympy.abc import A, B
131
+ >>> from sympy.logic.inference import valid
132
+ >>> valid(A | ~A)
133
+ True
134
+ >>> valid(A | B)
135
+ False
136
+
137
+ References
138
+ ==========
139
+
140
+ .. [1] https://en.wikipedia.org/wiki/Validity
141
+
142
+ """
143
+ return not satisfiable(Not(expr))
144
+
145
+
146
+ def pl_true(expr, model=None, deep=False):
147
+ """
148
+ Returns whether the given assignment is a model or not.
149
+
150
+ If the assignment does not specify the value for every proposition,
151
+ this may return None to indicate 'not obvious'.
152
+
153
+ Parameters
154
+ ==========
155
+
156
+ model : dict, optional, default: {}
157
+ Mapping of symbols to boolean values to indicate assignment.
158
+ deep: boolean, optional, default: False
159
+ Gives the value of the expression under partial assignments
160
+ correctly. May still return None to indicate 'not obvious'.
161
+
162
+
163
+ Examples
164
+ ========
165
+
166
+ >>> from sympy.abc import A, B
167
+ >>> from sympy.logic.inference import pl_true
168
+ >>> pl_true( A & B, {A: True, B: True})
169
+ True
170
+ >>> pl_true(A & B, {A: False})
171
+ False
172
+ >>> pl_true(A & B, {A: True})
173
+ >>> pl_true(A & B, {A: True}, deep=True)
174
+ >>> pl_true(A >> (B >> A))
175
+ >>> pl_true(A >> (B >> A), deep=True)
176
+ True
177
+ >>> pl_true(A & ~A)
178
+ >>> pl_true(A & ~A, deep=True)
179
+ False
180
+ >>> pl_true(A & B & (~A | ~B), {A: True})
181
+ >>> pl_true(A & B & (~A | ~B), {A: True}, deep=True)
182
+ False
183
+
184
+ """
185
+
186
+ from sympy.core.symbol import Symbol
187
+
188
+ boolean = (True, False)
189
+
190
+ def _validate(expr):
191
+ if isinstance(expr, Symbol) or expr in boolean:
192
+ return True
193
+ if not isinstance(expr, BooleanFunction):
194
+ return False
195
+ return all(_validate(arg) for arg in expr.args)
196
+
197
+ if expr in boolean:
198
+ return expr
199
+ expr = sympify(expr)
200
+ if not _validate(expr):
201
+ raise ValueError("%s is not a valid boolean expression" % expr)
202
+ if not model:
203
+ model = {}
204
+ model = {k: v for k, v in model.items() if v in boolean}
205
+ result = expr.subs(model)
206
+ if result in boolean:
207
+ return bool(result)
208
+ if deep:
209
+ model = dict.fromkeys(result.atoms(), True)
210
+ if pl_true(result, model):
211
+ if valid(result):
212
+ return True
213
+ else:
214
+ if not satisfiable(result):
215
+ return False
216
+ return None
217
+
218
+
219
+ def entails(expr, formula_set=None):
220
+ """
221
+ Check whether the given expr_set entail an expr.
222
+ If formula_set is empty then it returns the validity of expr.
223
+
224
+ Examples
225
+ ========
226
+
227
+ >>> from sympy.abc import A, B, C
228
+ >>> from sympy.logic.inference import entails
229
+ >>> entails(A, [A >> B, B >> C])
230
+ False
231
+ >>> entails(C, [A >> B, B >> C, A])
232
+ True
233
+ >>> entails(A >> B)
234
+ False
235
+ >>> entails(A >> (B >> A))
236
+ True
237
+
238
+ References
239
+ ==========
240
+
241
+ .. [1] https://en.wikipedia.org/wiki/Logical_consequence
242
+
243
+ """
244
+ if formula_set:
245
+ formula_set = list(formula_set)
246
+ else:
247
+ formula_set = []
248
+ formula_set.append(Not(expr))
249
+ return not satisfiable(And(*formula_set))
250
+
251
+
252
+ class KB:
253
+ """Base class for all knowledge bases"""
254
+ def __init__(self, sentence=None):
255
+ self.clauses_ = set()
256
+ if sentence:
257
+ self.tell(sentence)
258
+
259
+ def tell(self, sentence):
260
+ raise NotImplementedError
261
+
262
+ def ask(self, query):
263
+ raise NotImplementedError
264
+
265
+ def retract(self, sentence):
266
+ raise NotImplementedError
267
+
268
+ @property
269
+ def clauses(self):
270
+ return list(ordered(self.clauses_))
271
+
272
+
273
+ class PropKB(KB):
274
+ """A KB for Propositional Logic. Inefficient, with no indexing."""
275
+
276
+ def tell(self, sentence):
277
+ """Add the sentence's clauses to the KB
278
+
279
+ Examples
280
+ ========
281
+
282
+ >>> from sympy.logic.inference import PropKB
283
+ >>> from sympy.abc import x, y
284
+ >>> l = PropKB()
285
+ >>> l.clauses
286
+ []
287
+
288
+ >>> l.tell(x | y)
289
+ >>> l.clauses
290
+ [x | y]
291
+
292
+ >>> l.tell(y)
293
+ >>> l.clauses
294
+ [y, x | y]
295
+
296
+ """
297
+ for c in conjuncts(to_cnf(sentence)):
298
+ self.clauses_.add(c)
299
+
300
+ def ask(self, query):
301
+ """Checks if the query is true given the set of clauses.
302
+
303
+ Examples
304
+ ========
305
+
306
+ >>> from sympy.logic.inference import PropKB
307
+ >>> from sympy.abc import x, y
308
+ >>> l = PropKB()
309
+ >>> l.tell(x & ~y)
310
+ >>> l.ask(x)
311
+ True
312
+ >>> l.ask(y)
313
+ False
314
+
315
+ """
316
+ return entails(query, self.clauses_)
317
+
318
+ def retract(self, sentence):
319
+ """Remove the sentence's clauses from the KB
320
+
321
+ Examples
322
+ ========
323
+
324
+ >>> from sympy.logic.inference import PropKB
325
+ >>> from sympy.abc import x, y
326
+ >>> l = PropKB()
327
+ >>> l.clauses
328
+ []
329
+
330
+ >>> l.tell(x | y)
331
+ >>> l.clauses
332
+ [x | y]
333
+
334
+ >>> l.retract(x | y)
335
+ >>> l.clauses
336
+ []
337
+
338
+ """
339
+ for c in conjuncts(to_cnf(sentence)):
340
+ self.clauses_.discard(c)
vllm/lib/python3.10/site-packages/sympy/logic/tests/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (167 Bytes). View file
 
vllm/lib/python3.10/site-packages/sympy/logic/tests/__pycache__/test_inference.cpython-310.pyc ADDED
Binary file (14.3 kB). View file
 
vllm/lib/python3.10/site-packages/sympy/logic/utilities/__init__.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ from .dimacs import load_file
2
+
3
+ __all__ = ['load_file']
vllm/lib/python3.10/site-packages/sympy/logic/utilities/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (233 Bytes). View file
 
vllm/lib/python3.10/site-packages/sympy/logic/utilities/__pycache__/dimacs.cpython-310.pyc ADDED
Binary file (1.53 kB). View file
 
vllm/lib/python3.10/site-packages/sympy/logic/utilities/dimacs.py ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """For reading in DIMACS file format
2
+
3
+ www.cs.ubc.ca/~hoos/SATLIB/Benchmarks/SAT/satformat.ps
4
+
5
+ """
6
+
7
+ from sympy.core import Symbol
8
+ from sympy.logic.boolalg import And, Or
9
+ import re
10
+
11
+
12
+ def load(s):
13
+ """Loads a boolean expression from a string.
14
+
15
+ Examples
16
+ ========
17
+
18
+ >>> from sympy.logic.utilities.dimacs import load
19
+ >>> load('1')
20
+ cnf_1
21
+ >>> load('1 2')
22
+ cnf_1 | cnf_2
23
+ >>> load('1 \\n 2')
24
+ cnf_1 & cnf_2
25
+ >>> load('1 2 \\n 3')
26
+ cnf_3 & (cnf_1 | cnf_2)
27
+ """
28
+ clauses = []
29
+
30
+ lines = s.split('\n')
31
+
32
+ pComment = re.compile(r'c.*')
33
+ pStats = re.compile(r'p\s*cnf\s*(\d*)\s*(\d*)')
34
+
35
+ while len(lines) > 0:
36
+ line = lines.pop(0)
37
+
38
+ # Only deal with lines that aren't comments
39
+ if not pComment.match(line):
40
+ m = pStats.match(line)
41
+
42
+ if not m:
43
+ nums = line.rstrip('\n').split(' ')
44
+ list = []
45
+ for lit in nums:
46
+ if lit != '':
47
+ if int(lit) == 0:
48
+ continue
49
+ num = abs(int(lit))
50
+ sign = True
51
+ if int(lit) < 0:
52
+ sign = False
53
+
54
+ if sign:
55
+ list.append(Symbol("cnf_%s" % num))
56
+ else:
57
+ list.append(~Symbol("cnf_%s" % num))
58
+
59
+ if len(list) > 0:
60
+ clauses.append(Or(*list))
61
+
62
+ return And(*clauses)
63
+
64
+
65
+ def load_file(location):
66
+ """Loads a boolean expression from a file."""
67
+ with open(location) as f:
68
+ s = f.read()
69
+
70
+ return load(s)
vllm/lib/python3.10/site-packages/sympy/plotting/intervalmath/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (737 Bytes). View file
 
vllm/lib/python3.10/site-packages/sympy/plotting/intervalmath/__pycache__/interval_arithmetic.cpython-310.pyc ADDED
Binary file (9.24 kB). View file
 
vllm/lib/python3.10/site-packages/sympy/plotting/intervalmath/__pycache__/interval_membership.cpython-310.pyc ADDED
Binary file (3.05 kB). View file
 
vllm/lib/python3.10/site-packages/sympy/plotting/intervalmath/__pycache__/lib_interval.cpython-310.pyc ADDED
Binary file (9.58 kB). View file
 
vllm/lib/python3.10/site-packages/sympy/plotting/tests/test_region_and.png ADDED

Git LFS Details

  • SHA256: 115d0b9b81ed40f93fe9e216b4f6384cf71093e3bbb64a5d648b8b9858c645a0
  • Pointer size: 129 Bytes
  • Size of remote file: 6.86 kB
vllm/lib/python3.10/site-packages/sympy/series/__init__.py ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """A module that handles series: find a limit, order the series etc.
2
+ """
3
+ from .order import Order
4
+ from .limits import limit, Limit
5
+ from .gruntz import gruntz
6
+ from .series import series
7
+ from .approximants import approximants
8
+ from .residues import residue
9
+ from .sequences import SeqPer, SeqFormula, sequence, SeqAdd, SeqMul
10
+ from .fourier import fourier_series
11
+ from .formal import fps
12
+ from .limitseq import difference_delta, limit_seq
13
+
14
+ from sympy.core.singleton import S
15
+ EmptySequence = S.EmptySequence
16
+
17
+ O = Order
18
+
19
+ __all__ = ['Order', 'O', 'limit', 'Limit', 'gruntz', 'series', 'approximants',
20
+ 'residue', 'EmptySequence', 'SeqPer', 'SeqFormula', 'sequence',
21
+ 'SeqAdd', 'SeqMul', 'fourier_series', 'fps', 'difference_delta',
22
+ 'limit_seq'
23
+ ]
vllm/lib/python3.10/site-packages/sympy/series/__pycache__/acceleration.cpython-310.pyc ADDED
Binary file (3.81 kB). View file
 
vllm/lib/python3.10/site-packages/sympy/series/__pycache__/approximants.cpython-310.pyc ADDED
Binary file (3.41 kB). View file
 
vllm/lib/python3.10/site-packages/sympy/series/__pycache__/fourier.cpython-310.pyc ADDED
Binary file (24.2 kB). View file
 
vllm/lib/python3.10/site-packages/sympy/series/__pycache__/gruntz.cpython-310.pyc ADDED
Binary file (21.4 kB). View file
 
vllm/lib/python3.10/site-packages/sympy/series/__pycache__/kauers.cpython-310.pyc ADDED
Binary file (1.9 kB). View file
 
vllm/lib/python3.10/site-packages/sympy/series/__pycache__/limits.cpython-310.pyc ADDED
Binary file (9.3 kB). View file